]> Git Repo - linux.git/blame - fs/btrfs/backref.c
Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[linux.git] / fs / btrfs / backref.c
CommitLineData
c1d7c514 1// SPDX-License-Identifier: GPL-2.0
a542ad1b
JS
2/*
3 * Copyright (C) 2011 STRATO. All rights reserved.
a542ad1b
JS
4 */
5
f54de068 6#include <linux/mm.h>
afce772e 7#include <linux/rbtree.h>
00142756 8#include <trace/events/btrfs.h>
a542ad1b
JS
9#include "ctree.h"
10#include "disk-io.h"
11#include "backref.h"
8da6d581
JS
12#include "ulist.h"
13#include "transaction.h"
14#include "delayed-ref.h"
b916a59a 15#include "locking.h"
1b60d2ec 16#include "misc.h"
a542ad1b 17
dc046b10
JB
18/* Just an arbitrary number so we can be sure this happened */
19#define BACKREF_FOUND_SHARED 6
20
976b1908
JS
21struct extent_inode_elem {
22 u64 inum;
23 u64 offset;
24 struct extent_inode_elem *next;
25};
26
73980bec
JM
27static int check_extent_in_eb(const struct btrfs_key *key,
28 const struct extent_buffer *eb,
29 const struct btrfs_file_extent_item *fi,
30 u64 extent_item_pos,
c995ab3c
ZB
31 struct extent_inode_elem **eie,
32 bool ignore_offset)
976b1908 33{
8ca15e05 34 u64 offset = 0;
976b1908
JS
35 struct extent_inode_elem *e;
36
c995ab3c
ZB
37 if (!ignore_offset &&
38 !btrfs_file_extent_compression(eb, fi) &&
8ca15e05
JB
39 !btrfs_file_extent_encryption(eb, fi) &&
40 !btrfs_file_extent_other_encoding(eb, fi)) {
41 u64 data_offset;
42 u64 data_len;
976b1908 43
8ca15e05
JB
44 data_offset = btrfs_file_extent_offset(eb, fi);
45 data_len = btrfs_file_extent_num_bytes(eb, fi);
46
47 if (extent_item_pos < data_offset ||
48 extent_item_pos >= data_offset + data_len)
49 return 1;
50 offset = extent_item_pos - data_offset;
51 }
976b1908
JS
52
53 e = kmalloc(sizeof(*e), GFP_NOFS);
54 if (!e)
55 return -ENOMEM;
56
57 e->next = *eie;
58 e->inum = key->objectid;
8ca15e05 59 e->offset = key->offset + offset;
976b1908
JS
60 *eie = e;
61
62 return 0;
63}
64
f05c4746
WS
65static void free_inode_elem_list(struct extent_inode_elem *eie)
66{
67 struct extent_inode_elem *eie_next;
68
69 for (; eie; eie = eie_next) {
70 eie_next = eie->next;
71 kfree(eie);
72 }
73}
74
73980bec
JM
75static int find_extent_in_eb(const struct extent_buffer *eb,
76 u64 wanted_disk_byte, u64 extent_item_pos,
c995ab3c
ZB
77 struct extent_inode_elem **eie,
78 bool ignore_offset)
976b1908
JS
79{
80 u64 disk_byte;
81 struct btrfs_key key;
82 struct btrfs_file_extent_item *fi;
83 int slot;
84 int nritems;
85 int extent_type;
86 int ret;
87
88 /*
89 * from the shared data ref, we only have the leaf but we need
90 * the key. thus, we must look into all items and see that we
91 * find one (some) with a reference to our extent item.
92 */
93 nritems = btrfs_header_nritems(eb);
94 for (slot = 0; slot < nritems; ++slot) {
95 btrfs_item_key_to_cpu(eb, &key, slot);
96 if (key.type != BTRFS_EXTENT_DATA_KEY)
97 continue;
98 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
99 extent_type = btrfs_file_extent_type(eb, fi);
100 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
101 continue;
102 /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
103 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
104 if (disk_byte != wanted_disk_byte)
105 continue;
106
c995ab3c 107 ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie, ignore_offset);
976b1908
JS
108 if (ret < 0)
109 return ret;
110 }
111
112 return 0;
113}
114
86d5f994 115struct preftree {
ecf160b4 116 struct rb_root_cached root;
6c336b21 117 unsigned int count;
86d5f994
EN
118};
119
ecf160b4 120#define PREFTREE_INIT { .root = RB_ROOT_CACHED, .count = 0 }
86d5f994
EN
121
122struct preftrees {
123 struct preftree direct; /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
124 struct preftree indirect; /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */
125 struct preftree indirect_missing_keys;
126};
127
3ec4d323
EN
128/*
129 * Checks for a shared extent during backref search.
130 *
131 * The share_count tracks prelim_refs (direct and indirect) having a
132 * ref->count >0:
133 * - incremented when a ref->count transitions to >0
134 * - decremented when a ref->count transitions to <1
135 */
136struct share_check {
137 u64 root_objectid;
138 u64 inum;
139 int share_count;
140};
141
142static inline int extent_is_shared(struct share_check *sc)
143{
144 return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0;
145}
146
b9e9a6cb
WS
147static struct kmem_cache *btrfs_prelim_ref_cache;
148
149int __init btrfs_prelim_ref_init(void)
150{
151 btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
e0c476b1 152 sizeof(struct prelim_ref),
b9e9a6cb 153 0,
fba4b697 154 SLAB_MEM_SPREAD,
b9e9a6cb
WS
155 NULL);
156 if (!btrfs_prelim_ref_cache)
157 return -ENOMEM;
158 return 0;
159}
160
e67c718b 161void __cold btrfs_prelim_ref_exit(void)
b9e9a6cb 162{
5598e900 163 kmem_cache_destroy(btrfs_prelim_ref_cache);
b9e9a6cb
WS
164}
165
86d5f994
EN
166static void free_pref(struct prelim_ref *ref)
167{
168 kmem_cache_free(btrfs_prelim_ref_cache, ref);
169}
170
171/*
172 * Return 0 when both refs are for the same block (and can be merged).
173 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
174 * indicates a 'higher' block.
175 */
176static int prelim_ref_compare(struct prelim_ref *ref1,
177 struct prelim_ref *ref2)
178{
179 if (ref1->level < ref2->level)
180 return -1;
181 if (ref1->level > ref2->level)
182 return 1;
183 if (ref1->root_id < ref2->root_id)
184 return -1;
185 if (ref1->root_id > ref2->root_id)
186 return 1;
187 if (ref1->key_for_search.type < ref2->key_for_search.type)
188 return -1;
189 if (ref1->key_for_search.type > ref2->key_for_search.type)
190 return 1;
191 if (ref1->key_for_search.objectid < ref2->key_for_search.objectid)
192 return -1;
193 if (ref1->key_for_search.objectid > ref2->key_for_search.objectid)
194 return 1;
195 if (ref1->key_for_search.offset < ref2->key_for_search.offset)
196 return -1;
197 if (ref1->key_for_search.offset > ref2->key_for_search.offset)
198 return 1;
199 if (ref1->parent < ref2->parent)
200 return -1;
201 if (ref1->parent > ref2->parent)
202 return 1;
203
204 return 0;
205}
206
ccc8dc75
CIK
207static void update_share_count(struct share_check *sc, int oldcount,
208 int newcount)
3ec4d323
EN
209{
210 if ((!sc) || (oldcount == 0 && newcount < 1))
211 return;
212
213 if (oldcount > 0 && newcount < 1)
214 sc->share_count--;
215 else if (oldcount < 1 && newcount > 0)
216 sc->share_count++;
217}
218
86d5f994
EN
219/*
220 * Add @newref to the @root rbtree, merging identical refs.
221 *
3ec4d323 222 * Callers should assume that newref has been freed after calling.
86d5f994 223 */
00142756
JM
224static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
225 struct preftree *preftree,
3ec4d323
EN
226 struct prelim_ref *newref,
227 struct share_check *sc)
86d5f994 228{
ecf160b4 229 struct rb_root_cached *root;
86d5f994
EN
230 struct rb_node **p;
231 struct rb_node *parent = NULL;
232 struct prelim_ref *ref;
233 int result;
ecf160b4 234 bool leftmost = true;
86d5f994
EN
235
236 root = &preftree->root;
ecf160b4 237 p = &root->rb_root.rb_node;
86d5f994
EN
238
239 while (*p) {
240 parent = *p;
241 ref = rb_entry(parent, struct prelim_ref, rbnode);
242 result = prelim_ref_compare(ref, newref);
243 if (result < 0) {
244 p = &(*p)->rb_left;
245 } else if (result > 0) {
246 p = &(*p)->rb_right;
ecf160b4 247 leftmost = false;
86d5f994
EN
248 } else {
249 /* Identical refs, merge them and free @newref */
250 struct extent_inode_elem *eie = ref->inode_list;
251
252 while (eie && eie->next)
253 eie = eie->next;
254
255 if (!eie)
256 ref->inode_list = newref->inode_list;
257 else
258 eie->next = newref->inode_list;
00142756
JM
259 trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
260 preftree->count);
3ec4d323
EN
261 /*
262 * A delayed ref can have newref->count < 0.
263 * The ref->count is updated to follow any
264 * BTRFS_[ADD|DROP]_DELAYED_REF actions.
265 */
266 update_share_count(sc, ref->count,
267 ref->count + newref->count);
86d5f994
EN
268 ref->count += newref->count;
269 free_pref(newref);
270 return;
271 }
272 }
273
3ec4d323 274 update_share_count(sc, 0, newref->count);
6c336b21 275 preftree->count++;
00142756 276 trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
86d5f994 277 rb_link_node(&newref->rbnode, parent, p);
ecf160b4 278 rb_insert_color_cached(&newref->rbnode, root, leftmost);
86d5f994
EN
279}
280
281/*
282 * Release the entire tree. We don't care about internal consistency so
283 * just free everything and then reset the tree root.
284 */
285static void prelim_release(struct preftree *preftree)
286{
287 struct prelim_ref *ref, *next_ref;
288
ecf160b4
LB
289 rbtree_postorder_for_each_entry_safe(ref, next_ref,
290 &preftree->root.rb_root, rbnode)
86d5f994
EN
291 free_pref(ref);
292
ecf160b4 293 preftree->root = RB_ROOT_CACHED;
6c336b21 294 preftree->count = 0;
86d5f994
EN
295}
296
d5c88b73
JS
297/*
298 * the rules for all callers of this function are:
299 * - obtaining the parent is the goal
300 * - if you add a key, you must know that it is a correct key
301 * - if you cannot add the parent or a correct key, then we will look into the
302 * block later to set a correct key
303 *
304 * delayed refs
305 * ============
306 * backref type | shared | indirect | shared | indirect
307 * information | tree | tree | data | data
308 * --------------------+--------+----------+--------+----------
309 * parent logical | y | - | - | -
310 * key to resolve | - | y | y | y
311 * tree block logical | - | - | - | -
312 * root for resolving | y | y | y | y
313 *
314 * - column 1: we've the parent -> done
315 * - column 2, 3, 4: we use the key to find the parent
316 *
317 * on disk refs (inline or keyed)
318 * ==============================
319 * backref type | shared | indirect | shared | indirect
320 * information | tree | tree | data | data
321 * --------------------+--------+----------+--------+----------
322 * parent logical | y | - | y | -
323 * key to resolve | - | - | - | y
324 * tree block logical | y | y | y | y
325 * root for resolving | - | y | y | y
326 *
327 * - column 1, 3: we've the parent -> done
328 * - column 2: we take the first key from the block to find the parent
e0c476b1 329 * (see add_missing_keys)
d5c88b73
JS
330 * - column 4: we use the key to find the parent
331 *
332 * additional information that's available but not required to find the parent
333 * block might help in merging entries to gain some speed.
334 */
00142756
JM
335static int add_prelim_ref(const struct btrfs_fs_info *fs_info,
336 struct preftree *preftree, u64 root_id,
e0c476b1 337 const struct btrfs_key *key, int level, u64 parent,
3ec4d323
EN
338 u64 wanted_disk_byte, int count,
339 struct share_check *sc, gfp_t gfp_mask)
8da6d581 340{
e0c476b1 341 struct prelim_ref *ref;
8da6d581 342
48ec4736
LB
343 if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
344 return 0;
345
b9e9a6cb 346 ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
8da6d581
JS
347 if (!ref)
348 return -ENOMEM;
349
350 ref->root_id = root_id;
7ac8b88e 351 if (key)
d5c88b73 352 ref->key_for_search = *key;
7ac8b88e 353 else
d5c88b73 354 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
8da6d581 355
3301958b 356 ref->inode_list = NULL;
8da6d581
JS
357 ref->level = level;
358 ref->count = count;
359 ref->parent = parent;
360 ref->wanted_disk_byte = wanted_disk_byte;
3ec4d323
EN
361 prelim_ref_insert(fs_info, preftree, ref, sc);
362 return extent_is_shared(sc);
8da6d581
JS
363}
364
86d5f994 365/* direct refs use root == 0, key == NULL */
00142756
JM
366static int add_direct_ref(const struct btrfs_fs_info *fs_info,
367 struct preftrees *preftrees, int level, u64 parent,
3ec4d323
EN
368 u64 wanted_disk_byte, int count,
369 struct share_check *sc, gfp_t gfp_mask)
86d5f994 370{
00142756 371 return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level,
3ec4d323 372 parent, wanted_disk_byte, count, sc, gfp_mask);
86d5f994
EN
373}
374
375/* indirect refs use parent == 0 */
00142756
JM
376static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
377 struct preftrees *preftrees, u64 root_id,
86d5f994 378 const struct btrfs_key *key, int level,
3ec4d323
EN
379 u64 wanted_disk_byte, int count,
380 struct share_check *sc, gfp_t gfp_mask)
86d5f994
EN
381{
382 struct preftree *tree = &preftrees->indirect;
383
384 if (!key)
385 tree = &preftrees->indirect_missing_keys;
00142756 386 return add_prelim_ref(fs_info, tree, root_id, key, level, 0,
3ec4d323 387 wanted_disk_byte, count, sc, gfp_mask);
86d5f994
EN
388}
389
ed58f2e6 390static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr)
391{
392 struct rb_node **p = &preftrees->direct.root.rb_root.rb_node;
393 struct rb_node *parent = NULL;
394 struct prelim_ref *ref = NULL;
9c6c723f 395 struct prelim_ref target = {};
ed58f2e6 396 int result;
397
398 target.parent = bytenr;
399
400 while (*p) {
401 parent = *p;
402 ref = rb_entry(parent, struct prelim_ref, rbnode);
403 result = prelim_ref_compare(ref, &target);
404
405 if (result < 0)
406 p = &(*p)->rb_left;
407 else if (result > 0)
408 p = &(*p)->rb_right;
409 else
410 return 1;
411 }
412 return 0;
413}
414
8da6d581 415static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
ed58f2e6 416 struct ulist *parents,
417 struct preftrees *preftrees, struct prelim_ref *ref,
44853868 418 int level, u64 time_seq, const u64 *extent_item_pos,
b25b0b87 419 bool ignore_offset)
8da6d581 420{
69bca40d
AB
421 int ret = 0;
422 int slot;
423 struct extent_buffer *eb;
424 struct btrfs_key key;
7ef81ac8 425 struct btrfs_key *key_for_search = &ref->key_for_search;
8da6d581 426 struct btrfs_file_extent_item *fi;
ed8c4913 427 struct extent_inode_elem *eie = NULL, *old = NULL;
8da6d581 428 u64 disk_byte;
7ef81ac8
JB
429 u64 wanted_disk_byte = ref->wanted_disk_byte;
430 u64 count = 0;
7ac8b88e 431 u64 data_offset;
8da6d581 432
69bca40d
AB
433 if (level != 0) {
434 eb = path->nodes[level];
435 ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
3301958b
JS
436 if (ret < 0)
437 return ret;
8da6d581 438 return 0;
69bca40d 439 }
8da6d581
JS
440
441 /*
ed58f2e6 442 * 1. We normally enter this function with the path already pointing to
443 * the first item to check. But sometimes, we may enter it with
444 * slot == nritems.
445 * 2. We are searching for normal backref but bytenr of this leaf
446 * matches shared data backref
cfc0eed0 447 * 3. The leaf owner is not equal to the root we are searching
448 *
ed58f2e6 449 * For these cases, go to the next leaf before we continue.
8da6d581 450 */
ed58f2e6 451 eb = path->nodes[0];
452 if (path->slots[0] >= btrfs_header_nritems(eb) ||
cfc0eed0 453 is_shared_data_backref(preftrees, eb->start) ||
454 ref->root_id != btrfs_header_owner(eb)) {
de47c9d3 455 if (time_seq == SEQ_LAST)
21633fc6
QW
456 ret = btrfs_next_leaf(root, path);
457 else
458 ret = btrfs_next_old_leaf(root, path, time_seq);
459 }
8da6d581 460
b25b0b87 461 while (!ret && count < ref->count) {
8da6d581 462 eb = path->nodes[0];
69bca40d
AB
463 slot = path->slots[0];
464
465 btrfs_item_key_to_cpu(eb, &key, slot);
466
467 if (key.objectid != key_for_search->objectid ||
468 key.type != BTRFS_EXTENT_DATA_KEY)
469 break;
470
ed58f2e6 471 /*
472 * We are searching for normal backref but bytenr of this leaf
cfc0eed0 473 * matches shared data backref, OR
474 * the leaf owner is not equal to the root we are searching for
ed58f2e6 475 */
cfc0eed0 476 if (slot == 0 &&
477 (is_shared_data_backref(preftrees, eb->start) ||
478 ref->root_id != btrfs_header_owner(eb))) {
ed58f2e6 479 if (time_seq == SEQ_LAST)
480 ret = btrfs_next_leaf(root, path);
481 else
482 ret = btrfs_next_old_leaf(root, path, time_seq);
483 continue;
484 }
69bca40d
AB
485 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
486 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
7ac8b88e 487 data_offset = btrfs_file_extent_offset(eb, fi);
69bca40d
AB
488
489 if (disk_byte == wanted_disk_byte) {
490 eie = NULL;
ed8c4913 491 old = NULL;
7ac8b88e 492 if (ref->key_for_search.offset == key.offset - data_offset)
493 count++;
494 else
495 goto next;
69bca40d
AB
496 if (extent_item_pos) {
497 ret = check_extent_in_eb(&key, eb, fi,
498 *extent_item_pos,
c995ab3c 499 &eie, ignore_offset);
69bca40d
AB
500 if (ret < 0)
501 break;
502 }
ed8c4913
JB
503 if (ret > 0)
504 goto next;
4eb1f66d
TI
505 ret = ulist_add_merge_ptr(parents, eb->start,
506 eie, (void **)&old, GFP_NOFS);
ed8c4913
JB
507 if (ret < 0)
508 break;
509 if (!ret && extent_item_pos) {
510 while (old->next)
511 old = old->next;
512 old->next = eie;
69bca40d 513 }
f05c4746 514 eie = NULL;
8da6d581 515 }
ed8c4913 516next:
de47c9d3 517 if (time_seq == SEQ_LAST)
21633fc6
QW
518 ret = btrfs_next_item(root, path);
519 else
520 ret = btrfs_next_old_item(root, path, time_seq);
8da6d581
JS
521 }
522
69bca40d
AB
523 if (ret > 0)
524 ret = 0;
f05c4746
WS
525 else if (ret < 0)
526 free_inode_elem_list(eie);
69bca40d 527 return ret;
8da6d581
JS
528}
529
530/*
531 * resolve an indirect backref in the form (root_id, key, level)
532 * to a logical address
533 */
e0c476b1
JM
534static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
535 struct btrfs_path *path, u64 time_seq,
ed58f2e6 536 struct preftrees *preftrees,
e0c476b1 537 struct prelim_ref *ref, struct ulist *parents,
b25b0b87 538 const u64 *extent_item_pos, bool ignore_offset)
8da6d581 539{
8da6d581 540 struct btrfs_root *root;
8da6d581
JS
541 struct extent_buffer *eb;
542 int ret = 0;
543 int root_level;
544 int level = ref->level;
7ac8b88e 545 struct btrfs_key search_key = ref->key_for_search;
8da6d581 546
49d11bea
JB
547 /*
548 * If we're search_commit_root we could possibly be holding locks on
549 * other tree nodes. This happens when qgroups does backref walks when
550 * adding new delayed refs. To deal with this we need to look in cache
551 * for the root, and if we don't find it then we need to search the
552 * tree_root's commit root, thus the btrfs_get_fs_root_commit_root usage
553 * here.
554 */
555 if (path->search_commit_root)
556 root = btrfs_get_fs_root_commit_root(fs_info, path, ref->root_id);
557 else
558 root = btrfs_get_fs_root(fs_info, ref->root_id, false);
8da6d581
JS
559 if (IS_ERR(root)) {
560 ret = PTR_ERR(root);
9326f76f
JB
561 goto out_free;
562 }
563
39dba873
JB
564 if (!path->search_commit_root &&
565 test_bit(BTRFS_ROOT_DELETING, &root->state)) {
566 ret = -ENOENT;
567 goto out;
568 }
569
f5ee5c9a 570 if (btrfs_is_testing(fs_info)) {
d9ee522b
JB
571 ret = -ENOENT;
572 goto out;
573 }
574
9e351cc8
JB
575 if (path->search_commit_root)
576 root_level = btrfs_header_level(root->commit_root);
de47c9d3 577 else if (time_seq == SEQ_LAST)
21633fc6 578 root_level = btrfs_header_level(root->node);
9e351cc8
JB
579 else
580 root_level = btrfs_old_root_level(root, time_seq);
8da6d581 581
c75e8394 582 if (root_level + 1 == level)
8da6d581
JS
583 goto out;
584
7ac8b88e 585 /*
586 * We can often find data backrefs with an offset that is too large
587 * (>= LLONG_MAX, maximum allowed file offset) due to underflows when
588 * subtracting a file's offset with the data offset of its
589 * corresponding extent data item. This can happen for example in the
590 * clone ioctl.
591 *
592 * So if we detect such case we set the search key's offset to zero to
593 * make sure we will find the matching file extent item at
594 * add_all_parents(), otherwise we will miss it because the offset
595 * taken form the backref is much larger then the offset of the file
596 * extent item. This can make us scan a very large number of file
597 * extent items, but at least it will not make us miss any.
598 *
599 * This is an ugly workaround for a behaviour that should have never
600 * existed, but it does and a fix for the clone ioctl would touch a lot
601 * of places, cause backwards incompatibility and would not fix the
602 * problem for extents cloned with older kernels.
603 */
604 if (search_key.type == BTRFS_EXTENT_DATA_KEY &&
605 search_key.offset >= LLONG_MAX)
606 search_key.offset = 0;
8da6d581 607 path->lowest_level = level;
de47c9d3 608 if (time_seq == SEQ_LAST)
7ac8b88e 609 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
21633fc6 610 else
7ac8b88e 611 ret = btrfs_search_old_slot(root, &search_key, path, time_seq);
538f72cd 612
ab8d0fc4
JM
613 btrfs_debug(fs_info,
614 "search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
c1c9ff7c
GU
615 ref->root_id, level, ref->count, ret,
616 ref->key_for_search.objectid, ref->key_for_search.type,
617 ref->key_for_search.offset);
8da6d581
JS
618 if (ret < 0)
619 goto out;
620
621 eb = path->nodes[level];
9345457f 622 while (!eb) {
fae7f21c 623 if (WARN_ON(!level)) {
9345457f
JS
624 ret = 1;
625 goto out;
626 }
627 level--;
628 eb = path->nodes[level];
8da6d581
JS
629 }
630
ed58f2e6 631 ret = add_all_parents(root, path, parents, preftrees, ref, level,
b25b0b87 632 time_seq, extent_item_pos, ignore_offset);
8da6d581 633out:
00246528 634 btrfs_put_root(root);
9326f76f 635out_free:
da61d31a
JB
636 path->lowest_level = 0;
637 btrfs_release_path(path);
8da6d581
JS
638 return ret;
639}
640
4dae077a
JM
641static struct extent_inode_elem *
642unode_aux_to_inode_list(struct ulist_node *node)
643{
644 if (!node)
645 return NULL;
646 return (struct extent_inode_elem *)(uintptr_t)node->aux;
647}
648
8da6d581 649/*
52042d8e 650 * We maintain three separate rbtrees: one for direct refs, one for
86d5f994
EN
651 * indirect refs which have a key, and one for indirect refs which do not
652 * have a key. Each tree does merge on insertion.
653 *
654 * Once all of the references are located, we iterate over the tree of
655 * indirect refs with missing keys. An appropriate key is located and
656 * the ref is moved onto the tree for indirect refs. After all missing
657 * keys are thus located, we iterate over the indirect ref tree, resolve
658 * each reference, and then insert the resolved reference onto the
659 * direct tree (merging there too).
660 *
661 * New backrefs (i.e., for parent nodes) are added to the appropriate
662 * rbtree as they are encountered. The new backrefs are subsequently
663 * resolved as above.
8da6d581 664 */
e0c476b1
JM
665static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
666 struct btrfs_path *path, u64 time_seq,
86d5f994 667 struct preftrees *preftrees,
b25b0b87 668 const u64 *extent_item_pos,
c995ab3c 669 struct share_check *sc, bool ignore_offset)
8da6d581
JS
670{
671 int err;
672 int ret = 0;
8da6d581
JS
673 struct ulist *parents;
674 struct ulist_node *node;
cd1b413c 675 struct ulist_iterator uiter;
86d5f994 676 struct rb_node *rnode;
8da6d581
JS
677
678 parents = ulist_alloc(GFP_NOFS);
679 if (!parents)
680 return -ENOMEM;
681
682 /*
86d5f994
EN
683 * We could trade memory usage for performance here by iterating
684 * the tree, allocating new refs for each insertion, and then
685 * freeing the entire indirect tree when we're done. In some test
686 * cases, the tree can grow quite large (~200k objects).
8da6d581 687 */
ecf160b4 688 while ((rnode = rb_first_cached(&preftrees->indirect.root))) {
86d5f994
EN
689 struct prelim_ref *ref;
690
691 ref = rb_entry(rnode, struct prelim_ref, rbnode);
692 if (WARN(ref->parent,
693 "BUG: direct ref found in indirect tree")) {
694 ret = -EINVAL;
695 goto out;
696 }
697
ecf160b4 698 rb_erase_cached(&ref->rbnode, &preftrees->indirect.root);
6c336b21 699 preftrees->indirect.count--;
86d5f994
EN
700
701 if (ref->count == 0) {
702 free_pref(ref);
8da6d581 703 continue;
86d5f994
EN
704 }
705
3ec4d323
EN
706 if (sc && sc->root_objectid &&
707 ref->root_id != sc->root_objectid) {
86d5f994 708 free_pref(ref);
dc046b10
JB
709 ret = BACKREF_FOUND_SHARED;
710 goto out;
711 }
ed58f2e6 712 err = resolve_indirect_ref(fs_info, path, time_seq, preftrees,
713 ref, parents, extent_item_pos,
b25b0b87 714 ignore_offset);
95def2ed
WS
715 /*
716 * we can only tolerate ENOENT,otherwise,we should catch error
717 * and return directly.
718 */
719 if (err == -ENOENT) {
3ec4d323
EN
720 prelim_ref_insert(fs_info, &preftrees->direct, ref,
721 NULL);
8da6d581 722 continue;
95def2ed 723 } else if (err) {
86d5f994 724 free_pref(ref);
95def2ed
WS
725 ret = err;
726 goto out;
727 }
8da6d581
JS
728
729 /* we put the first parent into the ref at hand */
cd1b413c
JS
730 ULIST_ITER_INIT(&uiter);
731 node = ulist_next(parents, &uiter);
8da6d581 732 ref->parent = node ? node->val : 0;
4dae077a 733 ref->inode_list = unode_aux_to_inode_list(node);
8da6d581 734
86d5f994 735 /* Add a prelim_ref(s) for any other parent(s). */
cd1b413c 736 while ((node = ulist_next(parents, &uiter))) {
86d5f994
EN
737 struct prelim_ref *new_ref;
738
b9e9a6cb
WS
739 new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
740 GFP_NOFS);
8da6d581 741 if (!new_ref) {
86d5f994 742 free_pref(ref);
8da6d581 743 ret = -ENOMEM;
e36902d4 744 goto out;
8da6d581
JS
745 }
746 memcpy(new_ref, ref, sizeof(*ref));
747 new_ref->parent = node->val;
4dae077a 748 new_ref->inode_list = unode_aux_to_inode_list(node);
3ec4d323
EN
749 prelim_ref_insert(fs_info, &preftrees->direct,
750 new_ref, NULL);
8da6d581 751 }
86d5f994 752
3ec4d323 753 /*
52042d8e 754 * Now it's a direct ref, put it in the direct tree. We must
3ec4d323
EN
755 * do this last because the ref could be merged/freed here.
756 */
757 prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL);
86d5f994 758
8da6d581 759 ulist_reinit(parents);
9dd14fd6 760 cond_resched();
8da6d581 761 }
e36902d4 762out:
8da6d581
JS
763 ulist_free(parents);
764 return ret;
765}
766
d5c88b73
JS
767/*
768 * read tree blocks and add keys where required.
769 */
e0c476b1 770static int add_missing_keys(struct btrfs_fs_info *fs_info,
38e3eebf 771 struct preftrees *preftrees, bool lock)
d5c88b73 772{
e0c476b1 773 struct prelim_ref *ref;
d5c88b73 774 struct extent_buffer *eb;
86d5f994
EN
775 struct preftree *tree = &preftrees->indirect_missing_keys;
776 struct rb_node *node;
d5c88b73 777
ecf160b4 778 while ((node = rb_first_cached(&tree->root))) {
86d5f994 779 ref = rb_entry(node, struct prelim_ref, rbnode);
ecf160b4 780 rb_erase_cached(node, &tree->root);
86d5f994
EN
781
782 BUG_ON(ref->parent); /* should not be a direct ref */
783 BUG_ON(ref->key_for_search.type);
d5c88b73 784 BUG_ON(!ref->wanted_disk_byte);
86d5f994 785
581c1760
QW
786 eb = read_tree_block(fs_info, ref->wanted_disk_byte, 0,
787 ref->level - 1, NULL);
64c043de 788 if (IS_ERR(eb)) {
86d5f994 789 free_pref(ref);
64c043de
LB
790 return PTR_ERR(eb);
791 } else if (!extent_buffer_uptodate(eb)) {
86d5f994 792 free_pref(ref);
416bc658
JB
793 free_extent_buffer(eb);
794 return -EIO;
795 }
38e3eebf
JB
796 if (lock)
797 btrfs_tree_read_lock(eb);
d5c88b73
JS
798 if (btrfs_header_level(eb) == 0)
799 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
800 else
801 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
38e3eebf
JB
802 if (lock)
803 btrfs_tree_read_unlock(eb);
d5c88b73 804 free_extent_buffer(eb);
3ec4d323 805 prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
9dd14fd6 806 cond_resched();
d5c88b73
JS
807 }
808 return 0;
809}
810
8da6d581
JS
811/*
812 * add all currently queued delayed refs from this head whose seq nr is
813 * smaller or equal that seq to the list
814 */
00142756
JM
815static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
816 struct btrfs_delayed_ref_head *head, u64 seq,
b25b0b87 817 struct preftrees *preftrees, struct share_check *sc)
8da6d581 818{
c6fc2454 819 struct btrfs_delayed_ref_node *node;
8da6d581 820 struct btrfs_delayed_extent_op *extent_op = head->extent_op;
d5c88b73 821 struct btrfs_key key;
86d5f994 822 struct btrfs_key tmp_op_key;
0e0adbcf 823 struct rb_node *n;
01747e92 824 int count;
b1375d64 825 int ret = 0;
8da6d581 826
a6dbceaf 827 if (extent_op && extent_op->update_key)
86d5f994 828 btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key);
8da6d581 829
d7df2c79 830 spin_lock(&head->lock);
e3d03965 831 for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
0e0adbcf
JB
832 node = rb_entry(n, struct btrfs_delayed_ref_node,
833 ref_node);
8da6d581
JS
834 if (node->seq > seq)
835 continue;
836
837 switch (node->action) {
838 case BTRFS_ADD_DELAYED_EXTENT:
839 case BTRFS_UPDATE_DELAYED_HEAD:
840 WARN_ON(1);
841 continue;
842 case BTRFS_ADD_DELAYED_REF:
01747e92 843 count = node->ref_mod;
8da6d581
JS
844 break;
845 case BTRFS_DROP_DELAYED_REF:
01747e92 846 count = node->ref_mod * -1;
8da6d581
JS
847 break;
848 default:
290342f6 849 BUG();
8da6d581
JS
850 }
851 switch (node->type) {
852 case BTRFS_TREE_BLOCK_REF_KEY: {
86d5f994 853 /* NORMAL INDIRECT METADATA backref */
8da6d581
JS
854 struct btrfs_delayed_tree_ref *ref;
855
856 ref = btrfs_delayed_node_to_tree_ref(node);
00142756
JM
857 ret = add_indirect_ref(fs_info, preftrees, ref->root,
858 &tmp_op_key, ref->level + 1,
01747e92
EN
859 node->bytenr, count, sc,
860 GFP_ATOMIC);
8da6d581
JS
861 break;
862 }
863 case BTRFS_SHARED_BLOCK_REF_KEY: {
86d5f994 864 /* SHARED DIRECT METADATA backref */
8da6d581
JS
865 struct btrfs_delayed_tree_ref *ref;
866
867 ref = btrfs_delayed_node_to_tree_ref(node);
86d5f994 868
01747e92
EN
869 ret = add_direct_ref(fs_info, preftrees, ref->level + 1,
870 ref->parent, node->bytenr, count,
3ec4d323 871 sc, GFP_ATOMIC);
8da6d581
JS
872 break;
873 }
874 case BTRFS_EXTENT_DATA_REF_KEY: {
86d5f994 875 /* NORMAL INDIRECT DATA backref */
8da6d581 876 struct btrfs_delayed_data_ref *ref;
8da6d581
JS
877 ref = btrfs_delayed_node_to_data_ref(node);
878
879 key.objectid = ref->objectid;
880 key.type = BTRFS_EXTENT_DATA_KEY;
881 key.offset = ref->offset;
dc046b10
JB
882
883 /*
884 * Found a inum that doesn't match our known inum, we
885 * know it's shared.
886 */
3ec4d323 887 if (sc && sc->inum && ref->objectid != sc->inum) {
dc046b10 888 ret = BACKREF_FOUND_SHARED;
3ec4d323 889 goto out;
dc046b10
JB
890 }
891
00142756 892 ret = add_indirect_ref(fs_info, preftrees, ref->root,
01747e92
EN
893 &key, 0, node->bytenr, count, sc,
894 GFP_ATOMIC);
8da6d581
JS
895 break;
896 }
897 case BTRFS_SHARED_DATA_REF_KEY: {
86d5f994 898 /* SHARED DIRECT FULL backref */
8da6d581 899 struct btrfs_delayed_data_ref *ref;
8da6d581
JS
900
901 ref = btrfs_delayed_node_to_data_ref(node);
86d5f994 902
01747e92
EN
903 ret = add_direct_ref(fs_info, preftrees, 0, ref->parent,
904 node->bytenr, count, sc,
905 GFP_ATOMIC);
8da6d581
JS
906 break;
907 }
908 default:
909 WARN_ON(1);
910 }
3ec4d323
EN
911 /*
912 * We must ignore BACKREF_FOUND_SHARED until all delayed
913 * refs have been checked.
914 */
915 if (ret && (ret != BACKREF_FOUND_SHARED))
d7df2c79 916 break;
8da6d581 917 }
3ec4d323
EN
918 if (!ret)
919 ret = extent_is_shared(sc);
920out:
d7df2c79
JB
921 spin_unlock(&head->lock);
922 return ret;
8da6d581
JS
923}
924
925/*
926 * add all inline backrefs for bytenr to the list
3ec4d323
EN
927 *
928 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
8da6d581 929 */
00142756
JM
930static int add_inline_refs(const struct btrfs_fs_info *fs_info,
931 struct btrfs_path *path, u64 bytenr,
86d5f994 932 int *info_level, struct preftrees *preftrees,
b25b0b87 933 struct share_check *sc)
8da6d581 934{
b1375d64 935 int ret = 0;
8da6d581
JS
936 int slot;
937 struct extent_buffer *leaf;
938 struct btrfs_key key;
261c84b6 939 struct btrfs_key found_key;
8da6d581
JS
940 unsigned long ptr;
941 unsigned long end;
942 struct btrfs_extent_item *ei;
943 u64 flags;
944 u64 item_size;
945
946 /*
947 * enumerate all inline refs
948 */
949 leaf = path->nodes[0];
dadcaf78 950 slot = path->slots[0];
8da6d581
JS
951
952 item_size = btrfs_item_size_nr(leaf, slot);
953 BUG_ON(item_size < sizeof(*ei));
954
955 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
956 flags = btrfs_extent_flags(leaf, ei);
261c84b6 957 btrfs_item_key_to_cpu(leaf, &found_key, slot);
8da6d581
JS
958
959 ptr = (unsigned long)(ei + 1);
960 end = (unsigned long)ei + item_size;
961
261c84b6
JB
962 if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
963 flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
8da6d581 964 struct btrfs_tree_block_info *info;
8da6d581
JS
965
966 info = (struct btrfs_tree_block_info *)ptr;
967 *info_level = btrfs_tree_block_level(leaf, info);
8da6d581
JS
968 ptr += sizeof(struct btrfs_tree_block_info);
969 BUG_ON(ptr > end);
261c84b6
JB
970 } else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
971 *info_level = found_key.offset;
8da6d581
JS
972 } else {
973 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
974 }
975
976 while (ptr < end) {
977 struct btrfs_extent_inline_ref *iref;
978 u64 offset;
979 int type;
980
981 iref = (struct btrfs_extent_inline_ref *)ptr;
3de28d57
LB
982 type = btrfs_get_extent_inline_ref_type(leaf, iref,
983 BTRFS_REF_TYPE_ANY);
984 if (type == BTRFS_REF_TYPE_INVALID)
af431dcb 985 return -EUCLEAN;
3de28d57 986
8da6d581
JS
987 offset = btrfs_extent_inline_ref_offset(leaf, iref);
988
989 switch (type) {
990 case BTRFS_SHARED_BLOCK_REF_KEY:
00142756
JM
991 ret = add_direct_ref(fs_info, preftrees,
992 *info_level + 1, offset,
3ec4d323 993 bytenr, 1, NULL, GFP_NOFS);
8da6d581
JS
994 break;
995 case BTRFS_SHARED_DATA_REF_KEY: {
996 struct btrfs_shared_data_ref *sdref;
997 int count;
998
999 sdref = (struct btrfs_shared_data_ref *)(iref + 1);
1000 count = btrfs_shared_data_ref_count(leaf, sdref);
86d5f994 1001
00142756 1002 ret = add_direct_ref(fs_info, preftrees, 0, offset,
3ec4d323 1003 bytenr, count, sc, GFP_NOFS);
8da6d581
JS
1004 break;
1005 }
1006 case BTRFS_TREE_BLOCK_REF_KEY:
00142756
JM
1007 ret = add_indirect_ref(fs_info, preftrees, offset,
1008 NULL, *info_level + 1,
3ec4d323 1009 bytenr, 1, NULL, GFP_NOFS);
8da6d581
JS
1010 break;
1011 case BTRFS_EXTENT_DATA_REF_KEY: {
1012 struct btrfs_extent_data_ref *dref;
1013 int count;
1014 u64 root;
1015
1016 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1017 count = btrfs_extent_data_ref_count(leaf, dref);
1018 key.objectid = btrfs_extent_data_ref_objectid(leaf,
1019 dref);
1020 key.type = BTRFS_EXTENT_DATA_KEY;
1021 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
dc046b10 1022
3ec4d323 1023 if (sc && sc->inum && key.objectid != sc->inum) {
dc046b10
JB
1024 ret = BACKREF_FOUND_SHARED;
1025 break;
1026 }
1027
8da6d581 1028 root = btrfs_extent_data_ref_root(leaf, dref);
86d5f994 1029
00142756
JM
1030 ret = add_indirect_ref(fs_info, preftrees, root,
1031 &key, 0, bytenr, count,
3ec4d323 1032 sc, GFP_NOFS);
8da6d581
JS
1033 break;
1034 }
1035 default:
1036 WARN_ON(1);
1037 }
1149ab6b
WS
1038 if (ret)
1039 return ret;
8da6d581
JS
1040 ptr += btrfs_extent_inline_ref_size(type);
1041 }
1042
1043 return 0;
1044}
1045
1046/*
1047 * add all non-inline backrefs for bytenr to the list
3ec4d323
EN
1048 *
1049 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
8da6d581 1050 */
e0c476b1
JM
1051static int add_keyed_refs(struct btrfs_fs_info *fs_info,
1052 struct btrfs_path *path, u64 bytenr,
86d5f994 1053 int info_level, struct preftrees *preftrees,
3ec4d323 1054 struct share_check *sc)
8da6d581
JS
1055{
1056 struct btrfs_root *extent_root = fs_info->extent_root;
1057 int ret;
1058 int slot;
1059 struct extent_buffer *leaf;
1060 struct btrfs_key key;
1061
1062 while (1) {
1063 ret = btrfs_next_item(extent_root, path);
1064 if (ret < 0)
1065 break;
1066 if (ret) {
1067 ret = 0;
1068 break;
1069 }
1070
1071 slot = path->slots[0];
1072 leaf = path->nodes[0];
1073 btrfs_item_key_to_cpu(leaf, &key, slot);
1074
1075 if (key.objectid != bytenr)
1076 break;
1077 if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
1078 continue;
1079 if (key.type > BTRFS_SHARED_DATA_REF_KEY)
1080 break;
1081
1082 switch (key.type) {
1083 case BTRFS_SHARED_BLOCK_REF_KEY:
86d5f994 1084 /* SHARED DIRECT METADATA backref */
00142756
JM
1085 ret = add_direct_ref(fs_info, preftrees,
1086 info_level + 1, key.offset,
3ec4d323 1087 bytenr, 1, NULL, GFP_NOFS);
8da6d581
JS
1088 break;
1089 case BTRFS_SHARED_DATA_REF_KEY: {
86d5f994 1090 /* SHARED DIRECT FULL backref */
8da6d581
JS
1091 struct btrfs_shared_data_ref *sdref;
1092 int count;
1093
1094 sdref = btrfs_item_ptr(leaf, slot,
1095 struct btrfs_shared_data_ref);
1096 count = btrfs_shared_data_ref_count(leaf, sdref);
00142756
JM
1097 ret = add_direct_ref(fs_info, preftrees, 0,
1098 key.offset, bytenr, count,
3ec4d323 1099 sc, GFP_NOFS);
8da6d581
JS
1100 break;
1101 }
1102 case BTRFS_TREE_BLOCK_REF_KEY:
86d5f994 1103 /* NORMAL INDIRECT METADATA backref */
00142756
JM
1104 ret = add_indirect_ref(fs_info, preftrees, key.offset,
1105 NULL, info_level + 1, bytenr,
3ec4d323 1106 1, NULL, GFP_NOFS);
8da6d581
JS
1107 break;
1108 case BTRFS_EXTENT_DATA_REF_KEY: {
86d5f994 1109 /* NORMAL INDIRECT DATA backref */
8da6d581
JS
1110 struct btrfs_extent_data_ref *dref;
1111 int count;
1112 u64 root;
1113
1114 dref = btrfs_item_ptr(leaf, slot,
1115 struct btrfs_extent_data_ref);
1116 count = btrfs_extent_data_ref_count(leaf, dref);
1117 key.objectid = btrfs_extent_data_ref_objectid(leaf,
1118 dref);
1119 key.type = BTRFS_EXTENT_DATA_KEY;
1120 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
dc046b10 1121
3ec4d323 1122 if (sc && sc->inum && key.objectid != sc->inum) {
dc046b10
JB
1123 ret = BACKREF_FOUND_SHARED;
1124 break;
1125 }
1126
8da6d581 1127 root = btrfs_extent_data_ref_root(leaf, dref);
00142756
JM
1128 ret = add_indirect_ref(fs_info, preftrees, root,
1129 &key, 0, bytenr, count,
3ec4d323 1130 sc, GFP_NOFS);
8da6d581
JS
1131 break;
1132 }
1133 default:
1134 WARN_ON(1);
1135 }
1149ab6b
WS
1136 if (ret)
1137 return ret;
1138
8da6d581
JS
1139 }
1140
1141 return ret;
1142}
1143
1144/*
1145 * this adds all existing backrefs (inline backrefs, backrefs and delayed
1146 * refs) for the given bytenr to the refs list, merges duplicates and resolves
1147 * indirect refs to their parent bytenr.
1148 * When roots are found, they're added to the roots list
1149 *
de47c9d3 1150 * If time_seq is set to SEQ_LAST, it will not search delayed_refs, and behave
21633fc6
QW
1151 * much like trans == NULL case, the difference only lies in it will not
1152 * commit root.
1153 * The special case is for qgroup to search roots in commit_transaction().
1154 *
3ec4d323
EN
1155 * @sc - if !NULL, then immediately return BACKREF_FOUND_SHARED when a
1156 * shared extent is detected.
1157 *
1158 * Otherwise this returns 0 for success and <0 for an error.
1159 *
c995ab3c
ZB
1160 * If ignore_offset is set to false, only extent refs whose offsets match
1161 * extent_item_pos are returned. If true, every extent ref is returned
1162 * and extent_item_pos is ignored.
1163 *
8da6d581
JS
1164 * FIXME some caching might speed things up
1165 */
1166static int find_parent_nodes(struct btrfs_trans_handle *trans,
1167 struct btrfs_fs_info *fs_info, u64 bytenr,
097b8a7c 1168 u64 time_seq, struct ulist *refs,
dc046b10 1169 struct ulist *roots, const u64 *extent_item_pos,
c995ab3c 1170 struct share_check *sc, bool ignore_offset)
8da6d581
JS
1171{
1172 struct btrfs_key key;
1173 struct btrfs_path *path;
8da6d581 1174 struct btrfs_delayed_ref_root *delayed_refs = NULL;
d3b01064 1175 struct btrfs_delayed_ref_head *head;
8da6d581
JS
1176 int info_level = 0;
1177 int ret;
e0c476b1 1178 struct prelim_ref *ref;
86d5f994 1179 struct rb_node *node;
f05c4746 1180 struct extent_inode_elem *eie = NULL;
86d5f994
EN
1181 struct preftrees preftrees = {
1182 .direct = PREFTREE_INIT,
1183 .indirect = PREFTREE_INIT,
1184 .indirect_missing_keys = PREFTREE_INIT
1185 };
8da6d581
JS
1186
1187 key.objectid = bytenr;
8da6d581 1188 key.offset = (u64)-1;
261c84b6
JB
1189 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1190 key.type = BTRFS_METADATA_ITEM_KEY;
1191 else
1192 key.type = BTRFS_EXTENT_ITEM_KEY;
8da6d581
JS
1193
1194 path = btrfs_alloc_path();
1195 if (!path)
1196 return -ENOMEM;
e84752d4 1197 if (!trans) {
da61d31a 1198 path->search_commit_root = 1;
e84752d4
WS
1199 path->skip_locking = 1;
1200 }
8da6d581 1201
de47c9d3 1202 if (time_seq == SEQ_LAST)
21633fc6
QW
1203 path->skip_locking = 1;
1204
8da6d581
JS
1205 /*
1206 * grab both a lock on the path and a lock on the delayed ref head.
1207 * We need both to get a consistent picture of how the refs look
1208 * at a specified point in time
1209 */
1210again:
d3b01064
LZ
1211 head = NULL;
1212
8da6d581
JS
1213 ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
1214 if (ret < 0)
1215 goto out;
1216 BUG_ON(ret == 0);
1217
faa2dbf0 1218#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
21633fc6 1219 if (trans && likely(trans->type != __TRANS_DUMMY) &&
de47c9d3 1220 time_seq != SEQ_LAST) {
faa2dbf0 1221#else
de47c9d3 1222 if (trans && time_seq != SEQ_LAST) {
faa2dbf0 1223#endif
7a3ae2f8
JS
1224 /*
1225 * look if there are updates for this ref queued and lock the
1226 * head
1227 */
1228 delayed_refs = &trans->transaction->delayed_refs;
1229 spin_lock(&delayed_refs->lock);
f72ad18e 1230 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
7a3ae2f8
JS
1231 if (head) {
1232 if (!mutex_trylock(&head->mutex)) {
d278850e 1233 refcount_inc(&head->refs);
7a3ae2f8
JS
1234 spin_unlock(&delayed_refs->lock);
1235
1236 btrfs_release_path(path);
1237
1238 /*
1239 * Mutex was contended, block until it's
1240 * released and try again
1241 */
1242 mutex_lock(&head->mutex);
1243 mutex_unlock(&head->mutex);
d278850e 1244 btrfs_put_delayed_ref_head(head);
7a3ae2f8
JS
1245 goto again;
1246 }
d7df2c79 1247 spin_unlock(&delayed_refs->lock);
00142756 1248 ret = add_delayed_refs(fs_info, head, time_seq,
b25b0b87 1249 &preftrees, sc);
155725c9 1250 mutex_unlock(&head->mutex);
d7df2c79 1251 if (ret)
7a3ae2f8 1252 goto out;
d7df2c79
JB
1253 } else {
1254 spin_unlock(&delayed_refs->lock);
d3b01064 1255 }
8da6d581 1256 }
8da6d581
JS
1257
1258 if (path->slots[0]) {
1259 struct extent_buffer *leaf;
1260 int slot;
1261
dadcaf78 1262 path->slots[0]--;
8da6d581 1263 leaf = path->nodes[0];
dadcaf78 1264 slot = path->slots[0];
8da6d581
JS
1265 btrfs_item_key_to_cpu(leaf, &key, slot);
1266 if (key.objectid == bytenr &&
261c84b6
JB
1267 (key.type == BTRFS_EXTENT_ITEM_KEY ||
1268 key.type == BTRFS_METADATA_ITEM_KEY)) {
00142756 1269 ret = add_inline_refs(fs_info, path, bytenr,
b25b0b87 1270 &info_level, &preftrees, sc);
8da6d581
JS
1271 if (ret)
1272 goto out;
e0c476b1 1273 ret = add_keyed_refs(fs_info, path, bytenr, info_level,
3ec4d323 1274 &preftrees, sc);
8da6d581
JS
1275 if (ret)
1276 goto out;
1277 }
1278 }
8da6d581 1279
86d5f994 1280 btrfs_release_path(path);
8da6d581 1281
38e3eebf 1282 ret = add_missing_keys(fs_info, &preftrees, path->skip_locking == 0);
d5c88b73
JS
1283 if (ret)
1284 goto out;
1285
ecf160b4 1286 WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
8da6d581 1287
86d5f994 1288 ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees,
b25b0b87 1289 extent_item_pos, sc, ignore_offset);
8da6d581
JS
1290 if (ret)
1291 goto out;
1292
ecf160b4 1293 WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root));
8da6d581 1294
86d5f994
EN
1295 /*
1296 * This walks the tree of merged and resolved refs. Tree blocks are
1297 * read in as needed. Unique entries are added to the ulist, and
1298 * the list of found roots is updated.
1299 *
1300 * We release the entire tree in one go before returning.
1301 */
ecf160b4 1302 node = rb_first_cached(&preftrees.direct.root);
86d5f994
EN
1303 while (node) {
1304 ref = rb_entry(node, struct prelim_ref, rbnode);
1305 node = rb_next(&ref->rbnode);
c8195a7b
ZB
1306 /*
1307 * ref->count < 0 can happen here if there are delayed
1308 * refs with a node->action of BTRFS_DROP_DELAYED_REF.
1309 * prelim_ref_insert() relies on this when merging
1310 * identical refs to keep the overall count correct.
1311 * prelim_ref_insert() will merge only those refs
1312 * which compare identically. Any refs having
1313 * e.g. different offsets would not be merged,
1314 * and would retain their original ref->count < 0.
1315 */
98cfee21 1316 if (roots && ref->count && ref->root_id && ref->parent == 0) {
3ec4d323
EN
1317 if (sc && sc->root_objectid &&
1318 ref->root_id != sc->root_objectid) {
dc046b10
JB
1319 ret = BACKREF_FOUND_SHARED;
1320 goto out;
1321 }
1322
8da6d581
JS
1323 /* no parent == root of tree */
1324 ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
f1723939
WS
1325 if (ret < 0)
1326 goto out;
8da6d581
JS
1327 }
1328 if (ref->count && ref->parent) {
8a56457f
JB
1329 if (extent_item_pos && !ref->inode_list &&
1330 ref->level == 0) {
976b1908 1331 struct extent_buffer *eb;
707e8a07 1332
581c1760
QW
1333 eb = read_tree_block(fs_info, ref->parent, 0,
1334 ref->level, NULL);
64c043de
LB
1335 if (IS_ERR(eb)) {
1336 ret = PTR_ERR(eb);
1337 goto out;
1338 } else if (!extent_buffer_uptodate(eb)) {
416bc658 1339 free_extent_buffer(eb);
c16c2e2e
WS
1340 ret = -EIO;
1341 goto out;
416bc658 1342 }
38e3eebf
JB
1343
1344 if (!path->skip_locking) {
1345 btrfs_tree_read_lock(eb);
1346 btrfs_set_lock_blocking_read(eb);
1347 }
976b1908 1348 ret = find_extent_in_eb(eb, bytenr,
c995ab3c 1349 *extent_item_pos, &eie, ignore_offset);
38e3eebf
JB
1350 if (!path->skip_locking)
1351 btrfs_tree_read_unlock_blocking(eb);
976b1908 1352 free_extent_buffer(eb);
f5929cd8
FDBM
1353 if (ret < 0)
1354 goto out;
1355 ref->inode_list = eie;
976b1908 1356 }
4eb1f66d
TI
1357 ret = ulist_add_merge_ptr(refs, ref->parent,
1358 ref->inode_list,
1359 (void **)&eie, GFP_NOFS);
f1723939
WS
1360 if (ret < 0)
1361 goto out;
3301958b
JS
1362 if (!ret && extent_item_pos) {
1363 /*
1364 * we've recorded that parent, so we must extend
1365 * its inode list here
1366 */
1367 BUG_ON(!eie);
1368 while (eie->next)
1369 eie = eie->next;
1370 eie->next = ref->inode_list;
1371 }
f05c4746 1372 eie = NULL;
8da6d581 1373 }
9dd14fd6 1374 cond_resched();
8da6d581
JS
1375 }
1376
1377out:
8da6d581 1378 btrfs_free_path(path);
86d5f994
EN
1379
1380 prelim_release(&preftrees.direct);
1381 prelim_release(&preftrees.indirect);
1382 prelim_release(&preftrees.indirect_missing_keys);
1383
f05c4746
WS
1384 if (ret < 0)
1385 free_inode_elem_list(eie);
8da6d581
JS
1386 return ret;
1387}
1388
976b1908
JS
1389static void free_leaf_list(struct ulist *blocks)
1390{
1391 struct ulist_node *node = NULL;
1392 struct extent_inode_elem *eie;
976b1908
JS
1393 struct ulist_iterator uiter;
1394
1395 ULIST_ITER_INIT(&uiter);
1396 while ((node = ulist_next(blocks, &uiter))) {
1397 if (!node->aux)
1398 continue;
4dae077a 1399 eie = unode_aux_to_inode_list(node);
f05c4746 1400 free_inode_elem_list(eie);
976b1908
JS
1401 node->aux = 0;
1402 }
1403
1404 ulist_free(blocks);
1405}
1406
8da6d581
JS
1407/*
1408 * Finds all leafs with a reference to the specified combination of bytenr and
1409 * offset. key_list_head will point to a list of corresponding keys (caller must
1410 * free each list element). The leafs will be stored in the leafs ulist, which
1411 * must be freed with ulist_free.
1412 *
1413 * returns 0 on success, <0 on error
1414 */
19b546d7
QW
1415int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
1416 struct btrfs_fs_info *fs_info, u64 bytenr,
1417 u64 time_seq, struct ulist **leafs,
1418 const u64 *extent_item_pos, bool ignore_offset)
8da6d581 1419{
8da6d581
JS
1420 int ret;
1421
8da6d581 1422 *leafs = ulist_alloc(GFP_NOFS);
98cfee21 1423 if (!*leafs)
8da6d581 1424 return -ENOMEM;
8da6d581 1425
afce772e 1426 ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
c995ab3c 1427 *leafs, NULL, extent_item_pos, NULL, ignore_offset);
8da6d581 1428 if (ret < 0 && ret != -ENOENT) {
976b1908 1429 free_leaf_list(*leafs);
8da6d581
JS
1430 return ret;
1431 }
1432
1433 return 0;
1434}
1435
1436/*
1437 * walk all backrefs for a given extent to find all roots that reference this
1438 * extent. Walking a backref means finding all extents that reference this
1439 * extent and in turn walk the backrefs of those, too. Naturally this is a
1440 * recursive process, but here it is implemented in an iterative fashion: We
1441 * find all referencing extents for the extent in question and put them on a
1442 * list. In turn, we find all referencing extents for those, further appending
1443 * to the list. The way we iterate the list allows adding more elements after
1444 * the current while iterating. The process stops when we reach the end of the
1445 * list. Found roots are added to the roots list.
1446 *
1447 * returns 0 on success, < 0 on error.
1448 */
e0c476b1
JM
1449static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans,
1450 struct btrfs_fs_info *fs_info, u64 bytenr,
c995ab3c
ZB
1451 u64 time_seq, struct ulist **roots,
1452 bool ignore_offset)
8da6d581
JS
1453{
1454 struct ulist *tmp;
1455 struct ulist_node *node = NULL;
cd1b413c 1456 struct ulist_iterator uiter;
8da6d581
JS
1457 int ret;
1458
1459 tmp = ulist_alloc(GFP_NOFS);
1460 if (!tmp)
1461 return -ENOMEM;
1462 *roots = ulist_alloc(GFP_NOFS);
1463 if (!*roots) {
1464 ulist_free(tmp);
1465 return -ENOMEM;
1466 }
1467
cd1b413c 1468 ULIST_ITER_INIT(&uiter);
8da6d581 1469 while (1) {
afce772e 1470 ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
c995ab3c 1471 tmp, *roots, NULL, NULL, ignore_offset);
8da6d581
JS
1472 if (ret < 0 && ret != -ENOENT) {
1473 ulist_free(tmp);
1474 ulist_free(*roots);
580c079b 1475 *roots = NULL;
8da6d581
JS
1476 return ret;
1477 }
cd1b413c 1478 node = ulist_next(tmp, &uiter);
8da6d581
JS
1479 if (!node)
1480 break;
1481 bytenr = node->val;
bca1a290 1482 cond_resched();
8da6d581
JS
1483 }
1484
1485 ulist_free(tmp);
1486 return 0;
1487}
1488
9e351cc8
JB
1489int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
1490 struct btrfs_fs_info *fs_info, u64 bytenr,
c995ab3c
ZB
1491 u64 time_seq, struct ulist **roots,
1492 bool ignore_offset)
9e351cc8
JB
1493{
1494 int ret;
1495
1496 if (!trans)
1497 down_read(&fs_info->commit_root_sem);
e0c476b1 1498 ret = btrfs_find_all_roots_safe(trans, fs_info, bytenr,
c995ab3c 1499 time_seq, roots, ignore_offset);
9e351cc8
JB
1500 if (!trans)
1501 up_read(&fs_info->commit_root_sem);
1502 return ret;
1503}
1504
2c2ed5aa
MF
1505/**
1506 * btrfs_check_shared - tell us whether an extent is shared
1507 *
2c2ed5aa
MF
1508 * btrfs_check_shared uses the backref walking code but will short
1509 * circuit as soon as it finds a root or inode that doesn't match the
1510 * one passed in. This provides a significant performance benefit for
1511 * callers (such as fiemap) which want to know whether the extent is
1512 * shared but do not need a ref count.
1513 *
03628cdb
FM
1514 * This attempts to attach to the running transaction in order to account for
1515 * delayed refs, but continues on even when no running transaction exists.
bb739cf0 1516 *
2c2ed5aa
MF
1517 * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
1518 */
5911c8fe
DS
1519int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
1520 struct ulist *roots, struct ulist *tmp)
dc046b10 1521{
bb739cf0
EN
1522 struct btrfs_fs_info *fs_info = root->fs_info;
1523 struct btrfs_trans_handle *trans;
dc046b10
JB
1524 struct ulist_iterator uiter;
1525 struct ulist_node *node;
3284da7b 1526 struct seq_list elem = SEQ_LIST_INIT(elem);
dc046b10 1527 int ret = 0;
3ec4d323 1528 struct share_check shared = {
4fd786e6 1529 .root_objectid = root->root_key.objectid,
3ec4d323
EN
1530 .inum = inum,
1531 .share_count = 0,
1532 };
dc046b10 1533
5911c8fe
DS
1534 ulist_init(roots);
1535 ulist_init(tmp);
dc046b10 1536
a6d155d2 1537 trans = btrfs_join_transaction_nostart(root);
bb739cf0 1538 if (IS_ERR(trans)) {
03628cdb
FM
1539 if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
1540 ret = PTR_ERR(trans);
1541 goto out;
1542 }
bb739cf0 1543 trans = NULL;
dc046b10 1544 down_read(&fs_info->commit_root_sem);
bb739cf0
EN
1545 } else {
1546 btrfs_get_tree_mod_seq(fs_info, &elem);
1547 }
1548
dc046b10
JB
1549 ULIST_ITER_INIT(&uiter);
1550 while (1) {
1551 ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
c995ab3c 1552 roots, NULL, &shared, false);
dc046b10 1553 if (ret == BACKREF_FOUND_SHARED) {
2c2ed5aa 1554 /* this is the only condition under which we return 1 */
dc046b10
JB
1555 ret = 1;
1556 break;
1557 }
1558 if (ret < 0 && ret != -ENOENT)
1559 break;
2c2ed5aa 1560 ret = 0;
dc046b10
JB
1561 node = ulist_next(tmp, &uiter);
1562 if (!node)
1563 break;
1564 bytenr = node->val;
18bf591b 1565 shared.share_count = 0;
dc046b10
JB
1566 cond_resched();
1567 }
bb739cf0
EN
1568
1569 if (trans) {
dc046b10 1570 btrfs_put_tree_mod_seq(fs_info, &elem);
bb739cf0
EN
1571 btrfs_end_transaction(trans);
1572 } else {
dc046b10 1573 up_read(&fs_info->commit_root_sem);
bb739cf0 1574 }
03628cdb 1575out:
5911c8fe
DS
1576 ulist_release(roots);
1577 ulist_release(tmp);
dc046b10
JB
1578 return ret;
1579}
1580
f186373f
MF
1581int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
1582 u64 start_off, struct btrfs_path *path,
1583 struct btrfs_inode_extref **ret_extref,
1584 u64 *found_off)
1585{
1586 int ret, slot;
1587 struct btrfs_key key;
1588 struct btrfs_key found_key;
1589 struct btrfs_inode_extref *extref;
73980bec 1590 const struct extent_buffer *leaf;
f186373f
MF
1591 unsigned long ptr;
1592
1593 key.objectid = inode_objectid;
962a298f 1594 key.type = BTRFS_INODE_EXTREF_KEY;
f186373f
MF
1595 key.offset = start_off;
1596
1597 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1598 if (ret < 0)
1599 return ret;
1600
1601 while (1) {
1602 leaf = path->nodes[0];
1603 slot = path->slots[0];
1604 if (slot >= btrfs_header_nritems(leaf)) {
1605 /*
1606 * If the item at offset is not found,
1607 * btrfs_search_slot will point us to the slot
1608 * where it should be inserted. In our case
1609 * that will be the slot directly before the
1610 * next INODE_REF_KEY_V2 item. In the case
1611 * that we're pointing to the last slot in a
1612 * leaf, we must move one leaf over.
1613 */
1614 ret = btrfs_next_leaf(root, path);
1615 if (ret) {
1616 if (ret >= 1)
1617 ret = -ENOENT;
1618 break;
1619 }
1620 continue;
1621 }
1622
1623 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1624
1625 /*
1626 * Check that we're still looking at an extended ref key for
1627 * this particular objectid. If we have different
1628 * objectid or type then there are no more to be found
1629 * in the tree and we can exit.
1630 */
1631 ret = -ENOENT;
1632 if (found_key.objectid != inode_objectid)
1633 break;
962a298f 1634 if (found_key.type != BTRFS_INODE_EXTREF_KEY)
f186373f
MF
1635 break;
1636
1637 ret = 0;
1638 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1639 extref = (struct btrfs_inode_extref *)ptr;
1640 *ret_extref = extref;
1641 if (found_off)
1642 *found_off = found_key.offset;
1643 break;
1644 }
1645
1646 return ret;
1647}
1648
48a3b636
ES
1649/*
1650 * this iterates to turn a name (from iref/extref) into a full filesystem path.
1651 * Elements of the path are separated by '/' and the path is guaranteed to be
1652 * 0-terminated. the path is only given within the current file system.
1653 * Therefore, it never starts with a '/'. the caller is responsible to provide
1654 * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
1655 * the start point of the resulting string is returned. this pointer is within
1656 * dest, normally.
1657 * in case the path buffer would overflow, the pointer is decremented further
1658 * as if output was written to the buffer, though no more output is actually
1659 * generated. that way, the caller can determine how much space would be
1660 * required for the path to fit into the buffer. in that case, the returned
1661 * value will be smaller than dest. callers must check this!
1662 */
96b5bd77
JS
1663char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
1664 u32 name_len, unsigned long name_off,
1665 struct extent_buffer *eb_in, u64 parent,
1666 char *dest, u32 size)
a542ad1b 1667{
a542ad1b
JS
1668 int slot;
1669 u64 next_inum;
1670 int ret;
661bec6b 1671 s64 bytes_left = ((s64)size) - 1;
a542ad1b
JS
1672 struct extent_buffer *eb = eb_in;
1673 struct btrfs_key found_key;
b916a59a 1674 int leave_spinning = path->leave_spinning;
d24bec3a 1675 struct btrfs_inode_ref *iref;
a542ad1b
JS
1676
1677 if (bytes_left >= 0)
1678 dest[bytes_left] = '\0';
1679
b916a59a 1680 path->leave_spinning = 1;
a542ad1b 1681 while (1) {
d24bec3a 1682 bytes_left -= name_len;
a542ad1b
JS
1683 if (bytes_left >= 0)
1684 read_extent_buffer(eb, dest + bytes_left,
d24bec3a 1685 name_off, name_len);
b916a59a 1686 if (eb != eb_in) {
0c0fe3b0
FM
1687 if (!path->skip_locking)
1688 btrfs_tree_read_unlock_blocking(eb);
a542ad1b 1689 free_extent_buffer(eb);
b916a59a 1690 }
c234a24d
DS
1691 ret = btrfs_find_item(fs_root, path, parent, 0,
1692 BTRFS_INODE_REF_KEY, &found_key);
8f24b496
JS
1693 if (ret > 0)
1694 ret = -ENOENT;
a542ad1b
JS
1695 if (ret)
1696 break;
d24bec3a 1697
a542ad1b
JS
1698 next_inum = found_key.offset;
1699
1700 /* regular exit ahead */
1701 if (parent == next_inum)
1702 break;
1703
1704 slot = path->slots[0];
1705 eb = path->nodes[0];
1706 /* make sure we can use eb after releasing the path */
b916a59a 1707 if (eb != eb_in) {
0c0fe3b0 1708 if (!path->skip_locking)
300aa896 1709 btrfs_set_lock_blocking_read(eb);
0c0fe3b0
FM
1710 path->nodes[0] = NULL;
1711 path->locks[0] = 0;
b916a59a 1712 }
a542ad1b 1713 btrfs_release_path(path);
a542ad1b 1714 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
d24bec3a
MF
1715
1716 name_len = btrfs_inode_ref_name_len(eb, iref);
1717 name_off = (unsigned long)(iref + 1);
1718
a542ad1b
JS
1719 parent = next_inum;
1720 --bytes_left;
1721 if (bytes_left >= 0)
1722 dest[bytes_left] = '/';
1723 }
1724
1725 btrfs_release_path(path);
b916a59a 1726 path->leave_spinning = leave_spinning;
a542ad1b
JS
1727
1728 if (ret)
1729 return ERR_PTR(ret);
1730
1731 return dest + bytes_left;
1732}
1733
1734/*
1735 * this makes the path point to (logical EXTENT_ITEM *)
1736 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
1737 * tree blocks and <0 on error.
1738 */
1739int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
69917e43
LB
1740 struct btrfs_path *path, struct btrfs_key *found_key,
1741 u64 *flags_ret)
a542ad1b
JS
1742{
1743 int ret;
1744 u64 flags;
261c84b6 1745 u64 size = 0;
a542ad1b 1746 u32 item_size;
73980bec 1747 const struct extent_buffer *eb;
a542ad1b
JS
1748 struct btrfs_extent_item *ei;
1749 struct btrfs_key key;
1750
261c84b6
JB
1751 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1752 key.type = BTRFS_METADATA_ITEM_KEY;
1753 else
1754 key.type = BTRFS_EXTENT_ITEM_KEY;
a542ad1b
JS
1755 key.objectid = logical;
1756 key.offset = (u64)-1;
1757
1758 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
1759 if (ret < 0)
1760 return ret;
a542ad1b 1761
850a8cdf
WS
1762 ret = btrfs_previous_extent_item(fs_info->extent_root, path, 0);
1763 if (ret) {
1764 if (ret > 0)
1765 ret = -ENOENT;
1766 return ret;
580f0a67 1767 }
850a8cdf 1768 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
261c84b6 1769 if (found_key->type == BTRFS_METADATA_ITEM_KEY)
da17066c 1770 size = fs_info->nodesize;
261c84b6
JB
1771 else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
1772 size = found_key->offset;
1773
580f0a67 1774 if (found_key->objectid > logical ||
261c84b6 1775 found_key->objectid + size <= logical) {
ab8d0fc4
JM
1776 btrfs_debug(fs_info,
1777 "logical %llu is not within any extent", logical);
a542ad1b 1778 return -ENOENT;
4692cf58 1779 }
a542ad1b
JS
1780
1781 eb = path->nodes[0];
1782 item_size = btrfs_item_size_nr(eb, path->slots[0]);
1783 BUG_ON(item_size < sizeof(*ei));
1784
1785 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
1786 flags = btrfs_extent_flags(eb, ei);
1787
ab8d0fc4
JM
1788 btrfs_debug(fs_info,
1789 "logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
c1c9ff7c
GU
1790 logical, logical - found_key->objectid, found_key->objectid,
1791 found_key->offset, flags, item_size);
69917e43
LB
1792
1793 WARN_ON(!flags_ret);
1794 if (flags_ret) {
1795 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1796 *flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
1797 else if (flags & BTRFS_EXTENT_FLAG_DATA)
1798 *flags_ret = BTRFS_EXTENT_FLAG_DATA;
1799 else
290342f6 1800 BUG();
69917e43
LB
1801 return 0;
1802 }
a542ad1b
JS
1803
1804 return -EIO;
1805}
1806
1807/*
1808 * helper function to iterate extent inline refs. ptr must point to a 0 value
1809 * for the first call and may be modified. it is used to track state.
1810 * if more refs exist, 0 is returned and the next call to
e0c476b1 1811 * get_extent_inline_ref must pass the modified ptr parameter to get the
a542ad1b
JS
1812 * next ref. after the last ref was processed, 1 is returned.
1813 * returns <0 on error
1814 */
e0c476b1
JM
1815static int get_extent_inline_ref(unsigned long *ptr,
1816 const struct extent_buffer *eb,
1817 const struct btrfs_key *key,
1818 const struct btrfs_extent_item *ei,
1819 u32 item_size,
1820 struct btrfs_extent_inline_ref **out_eiref,
1821 int *out_type)
a542ad1b
JS
1822{
1823 unsigned long end;
1824 u64 flags;
1825 struct btrfs_tree_block_info *info;
1826
1827 if (!*ptr) {
1828 /* first call */
1829 flags = btrfs_extent_flags(eb, ei);
1830 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
6eda71d0
LB
1831 if (key->type == BTRFS_METADATA_ITEM_KEY) {
1832 /* a skinny metadata extent */
1833 *out_eiref =
1834 (struct btrfs_extent_inline_ref *)(ei + 1);
1835 } else {
1836 WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
1837 info = (struct btrfs_tree_block_info *)(ei + 1);
1838 *out_eiref =
1839 (struct btrfs_extent_inline_ref *)(info + 1);
1840 }
a542ad1b
JS
1841 } else {
1842 *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
1843 }
1844 *ptr = (unsigned long)*out_eiref;
cd857dd6 1845 if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
a542ad1b
JS
1846 return -ENOENT;
1847 }
1848
1849 end = (unsigned long)ei + item_size;
6eda71d0 1850 *out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
3de28d57
LB
1851 *out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
1852 BTRFS_REF_TYPE_ANY);
1853 if (*out_type == BTRFS_REF_TYPE_INVALID)
af431dcb 1854 return -EUCLEAN;
a542ad1b
JS
1855
1856 *ptr += btrfs_extent_inline_ref_size(*out_type);
1857 WARN_ON(*ptr > end);
1858 if (*ptr == end)
1859 return 1; /* last */
1860
1861 return 0;
1862}
1863
1864/*
1865 * reads the tree block backref for an extent. tree level and root are returned
1866 * through out_level and out_root. ptr must point to a 0 value for the first
e0c476b1 1867 * call and may be modified (see get_extent_inline_ref comment).
a542ad1b
JS
1868 * returns 0 if data was provided, 1 if there was no more data to provide or
1869 * <0 on error.
1870 */
1871int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
6eda71d0
LB
1872 struct btrfs_key *key, struct btrfs_extent_item *ei,
1873 u32 item_size, u64 *out_root, u8 *out_level)
a542ad1b
JS
1874{
1875 int ret;
1876 int type;
a542ad1b
JS
1877 struct btrfs_extent_inline_ref *eiref;
1878
1879 if (*ptr == (unsigned long)-1)
1880 return 1;
1881
1882 while (1) {
e0c476b1 1883 ret = get_extent_inline_ref(ptr, eb, key, ei, item_size,
6eda71d0 1884 &eiref, &type);
a542ad1b
JS
1885 if (ret < 0)
1886 return ret;
1887
1888 if (type == BTRFS_TREE_BLOCK_REF_KEY ||
1889 type == BTRFS_SHARED_BLOCK_REF_KEY)
1890 break;
1891
1892 if (ret == 1)
1893 return 1;
1894 }
1895
1896 /* we can treat both ref types equally here */
a542ad1b 1897 *out_root = btrfs_extent_inline_ref_offset(eb, eiref);
a1317f45
FM
1898
1899 if (key->type == BTRFS_EXTENT_ITEM_KEY) {
1900 struct btrfs_tree_block_info *info;
1901
1902 info = (struct btrfs_tree_block_info *)(ei + 1);
1903 *out_level = btrfs_tree_block_level(eb, info);
1904 } else {
1905 ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
1906 *out_level = (u8)key->offset;
1907 }
a542ad1b
JS
1908
1909 if (ret == 1)
1910 *ptr = (unsigned long)-1;
1911
1912 return 0;
1913}
1914
ab8d0fc4
JM
1915static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
1916 struct extent_inode_elem *inode_list,
1917 u64 root, u64 extent_item_objectid,
1918 iterate_extent_inodes_t *iterate, void *ctx)
a542ad1b 1919{
976b1908 1920 struct extent_inode_elem *eie;
4692cf58 1921 int ret = 0;
4692cf58 1922
976b1908 1923 for (eie = inode_list; eie; eie = eie->next) {
ab8d0fc4
JM
1924 btrfs_debug(fs_info,
1925 "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
1926 extent_item_objectid, eie->inum,
1927 eie->offset, root);
976b1908 1928 ret = iterate(eie->inum, eie->offset, root, ctx);
4692cf58 1929 if (ret) {
ab8d0fc4
JM
1930 btrfs_debug(fs_info,
1931 "stopping iteration for %llu due to ret=%d",
1932 extent_item_objectid, ret);
4692cf58
JS
1933 break;
1934 }
a542ad1b
JS
1935 }
1936
a542ad1b
JS
1937 return ret;
1938}
1939
1940/*
1941 * calls iterate() for every inode that references the extent identified by
4692cf58 1942 * the given parameters.
a542ad1b
JS
1943 * when the iterator function returns a non-zero value, iteration stops.
1944 */
1945int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
4692cf58 1946 u64 extent_item_objectid, u64 extent_item_pos,
7a3ae2f8 1947 int search_commit_root,
c995ab3c
ZB
1948 iterate_extent_inodes_t *iterate, void *ctx,
1949 bool ignore_offset)
a542ad1b 1950{
a542ad1b 1951 int ret;
da61d31a 1952 struct btrfs_trans_handle *trans = NULL;
7a3ae2f8
JS
1953 struct ulist *refs = NULL;
1954 struct ulist *roots = NULL;
4692cf58
JS
1955 struct ulist_node *ref_node = NULL;
1956 struct ulist_node *root_node = NULL;
3284da7b 1957 struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
cd1b413c
JS
1958 struct ulist_iterator ref_uiter;
1959 struct ulist_iterator root_uiter;
a542ad1b 1960
ab8d0fc4 1961 btrfs_debug(fs_info, "resolving all inodes for extent %llu",
4692cf58 1962 extent_item_objectid);
a542ad1b 1963
da61d31a 1964 if (!search_commit_root) {
bfc61c36
FM
1965 trans = btrfs_attach_transaction(fs_info->extent_root);
1966 if (IS_ERR(trans)) {
1967 if (PTR_ERR(trans) != -ENOENT &&
1968 PTR_ERR(trans) != -EROFS)
1969 return PTR_ERR(trans);
1970 trans = NULL;
1971 }
1972 }
1973
1974 if (trans)
8445f61c 1975 btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
bfc61c36 1976 else
9e351cc8 1977 down_read(&fs_info->commit_root_sem);
a542ad1b 1978
4692cf58 1979 ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
097b8a7c 1980 tree_mod_seq_elem.seq, &refs,
c995ab3c 1981 &extent_item_pos, ignore_offset);
4692cf58
JS
1982 if (ret)
1983 goto out;
a542ad1b 1984
cd1b413c
JS
1985 ULIST_ITER_INIT(&ref_uiter);
1986 while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
e0c476b1 1987 ret = btrfs_find_all_roots_safe(trans, fs_info, ref_node->val,
c995ab3c
ZB
1988 tree_mod_seq_elem.seq, &roots,
1989 ignore_offset);
4692cf58
JS
1990 if (ret)
1991 break;
cd1b413c
JS
1992 ULIST_ITER_INIT(&root_uiter);
1993 while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
ab8d0fc4
JM
1994 btrfs_debug(fs_info,
1995 "root %llu references leaf %llu, data list %#llx",
1996 root_node->val, ref_node->val,
1997 ref_node->aux);
1998 ret = iterate_leaf_refs(fs_info,
1999 (struct extent_inode_elem *)
995e01b7
JS
2000 (uintptr_t)ref_node->aux,
2001 root_node->val,
2002 extent_item_objectid,
2003 iterate, ctx);
4692cf58 2004 }
976b1908 2005 ulist_free(roots);
a542ad1b
JS
2006 }
2007
976b1908 2008 free_leaf_list(refs);
4692cf58 2009out:
bfc61c36 2010 if (trans) {
8445f61c 2011 btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
3a45bb20 2012 btrfs_end_transaction(trans);
9e351cc8
JB
2013 } else {
2014 up_read(&fs_info->commit_root_sem);
7a3ae2f8
JS
2015 }
2016
a542ad1b
JS
2017 return ret;
2018}
2019
2020int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
2021 struct btrfs_path *path,
c995ab3c
ZB
2022 iterate_extent_inodes_t *iterate, void *ctx,
2023 bool ignore_offset)
a542ad1b
JS
2024{
2025 int ret;
4692cf58 2026 u64 extent_item_pos;
69917e43 2027 u64 flags = 0;
a542ad1b 2028 struct btrfs_key found_key;
7a3ae2f8 2029 int search_commit_root = path->search_commit_root;
a542ad1b 2030
69917e43 2031 ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
4692cf58 2032 btrfs_release_path(path);
a542ad1b
JS
2033 if (ret < 0)
2034 return ret;
69917e43 2035 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
3627bf45 2036 return -EINVAL;
a542ad1b 2037
4692cf58 2038 extent_item_pos = logical - found_key.objectid;
7a3ae2f8
JS
2039 ret = iterate_extent_inodes(fs_info, found_key.objectid,
2040 extent_item_pos, search_commit_root,
c995ab3c 2041 iterate, ctx, ignore_offset);
a542ad1b
JS
2042
2043 return ret;
2044}
2045
d24bec3a
MF
2046typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off,
2047 struct extent_buffer *eb, void *ctx);
2048
2049static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
2050 struct btrfs_path *path,
2051 iterate_irefs_t *iterate, void *ctx)
a542ad1b 2052{
aefc1eb1 2053 int ret = 0;
a542ad1b
JS
2054 int slot;
2055 u32 cur;
2056 u32 len;
2057 u32 name_len;
2058 u64 parent = 0;
2059 int found = 0;
2060 struct extent_buffer *eb;
2061 struct btrfs_item *item;
2062 struct btrfs_inode_ref *iref;
2063 struct btrfs_key found_key;
2064
aefc1eb1 2065 while (!ret) {
c234a24d
DS
2066 ret = btrfs_find_item(fs_root, path, inum,
2067 parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
2068 &found_key);
2069
a542ad1b
JS
2070 if (ret < 0)
2071 break;
2072 if (ret) {
2073 ret = found ? 0 : -ENOENT;
2074 break;
2075 }
2076 ++found;
2077
2078 parent = found_key.offset;
2079 slot = path->slots[0];
3fe81ce2
FDBM
2080 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2081 if (!eb) {
2082 ret = -ENOMEM;
2083 break;
2084 }
a542ad1b
JS
2085 btrfs_release_path(path);
2086
dd3cc16b 2087 item = btrfs_item_nr(slot);
a542ad1b
JS
2088 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2089
2090 for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
2091 name_len = btrfs_inode_ref_name_len(eb, iref);
2092 /* path must be released before calling iterate()! */
ab8d0fc4
JM
2093 btrfs_debug(fs_root->fs_info,
2094 "following ref at offset %u for inode %llu in tree %llu",
4fd786e6
MT
2095 cur, found_key.objectid,
2096 fs_root->root_key.objectid);
d24bec3a
MF
2097 ret = iterate(parent, name_len,
2098 (unsigned long)(iref + 1), eb, ctx);
aefc1eb1 2099 if (ret)
a542ad1b 2100 break;
a542ad1b
JS
2101 len = sizeof(*iref) + name_len;
2102 iref = (struct btrfs_inode_ref *)((char *)iref + len);
2103 }
2104 free_extent_buffer(eb);
2105 }
2106
2107 btrfs_release_path(path);
2108
2109 return ret;
2110}
2111
d24bec3a
MF
2112static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
2113 struct btrfs_path *path,
2114 iterate_irefs_t *iterate, void *ctx)
2115{
2116 int ret;
2117 int slot;
2118 u64 offset = 0;
2119 u64 parent;
2120 int found = 0;
2121 struct extent_buffer *eb;
2122 struct btrfs_inode_extref *extref;
d24bec3a
MF
2123 u32 item_size;
2124 u32 cur_offset;
2125 unsigned long ptr;
2126
2127 while (1) {
2128 ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
2129 &offset);
2130 if (ret < 0)
2131 break;
2132 if (ret) {
2133 ret = found ? 0 : -ENOENT;
2134 break;
2135 }
2136 ++found;
2137
2138 slot = path->slots[0];
3fe81ce2
FDBM
2139 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2140 if (!eb) {
2141 ret = -ENOMEM;
2142 break;
2143 }
d24bec3a
MF
2144 btrfs_release_path(path);
2145
2849a854
CM
2146 item_size = btrfs_item_size_nr(eb, slot);
2147 ptr = btrfs_item_ptr_offset(eb, slot);
d24bec3a
MF
2148 cur_offset = 0;
2149
2150 while (cur_offset < item_size) {
2151 u32 name_len;
2152
2153 extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
2154 parent = btrfs_inode_extref_parent(eb, extref);
2155 name_len = btrfs_inode_extref_name_len(eb, extref);
2156 ret = iterate(parent, name_len,
2157 (unsigned long)&extref->name, eb, ctx);
2158 if (ret)
2159 break;
2160
2849a854 2161 cur_offset += btrfs_inode_extref_name_len(eb, extref);
d24bec3a
MF
2162 cur_offset += sizeof(*extref);
2163 }
d24bec3a
MF
2164 free_extent_buffer(eb);
2165
2166 offset++;
2167 }
2168
2169 btrfs_release_path(path);
2170
2171 return ret;
2172}
2173
2174static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
2175 struct btrfs_path *path, iterate_irefs_t *iterate,
2176 void *ctx)
2177{
2178 int ret;
2179 int found_refs = 0;
2180
2181 ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx);
2182 if (!ret)
2183 ++found_refs;
2184 else if (ret != -ENOENT)
2185 return ret;
2186
2187 ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx);
2188 if (ret == -ENOENT && found_refs)
2189 return 0;
2190
2191 return ret;
2192}
2193
a542ad1b
JS
2194/*
2195 * returns 0 if the path could be dumped (probably truncated)
2196 * returns <0 in case of an error
2197 */
d24bec3a
MF
2198static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2199 struct extent_buffer *eb, void *ctx)
a542ad1b
JS
2200{
2201 struct inode_fs_paths *ipath = ctx;
2202 char *fspath;
2203 char *fspath_min;
2204 int i = ipath->fspath->elem_cnt;
2205 const int s_ptr = sizeof(char *);
2206 u32 bytes_left;
2207
2208 bytes_left = ipath->fspath->bytes_left > s_ptr ?
2209 ipath->fspath->bytes_left - s_ptr : 0;
2210
740c3d22 2211 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
96b5bd77
JS
2212 fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
2213 name_off, eb, inum, fspath_min, bytes_left);
a542ad1b
JS
2214 if (IS_ERR(fspath))
2215 return PTR_ERR(fspath);
2216
2217 if (fspath > fspath_min) {
745c4d8e 2218 ipath->fspath->val[i] = (u64)(unsigned long)fspath;
a542ad1b
JS
2219 ++ipath->fspath->elem_cnt;
2220 ipath->fspath->bytes_left = fspath - fspath_min;
2221 } else {
2222 ++ipath->fspath->elem_missed;
2223 ipath->fspath->bytes_missing += fspath_min - fspath;
2224 ipath->fspath->bytes_left = 0;
2225 }
2226
2227 return 0;
2228}
2229
2230/*
2231 * this dumps all file system paths to the inode into the ipath struct, provided
2232 * is has been created large enough. each path is zero-terminated and accessed
740c3d22 2233 * from ipath->fspath->val[i].
a542ad1b 2234 * when it returns, there are ipath->fspath->elem_cnt number of paths available
740c3d22 2235 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
01327610 2236 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
a542ad1b
JS
2237 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2238 * have been needed to return all paths.
2239 */
2240int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
2241{
2242 return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
d24bec3a 2243 inode_to_path, ipath);
a542ad1b
JS
2244}
2245
a542ad1b
JS
2246struct btrfs_data_container *init_data_container(u32 total_bytes)
2247{
2248 struct btrfs_data_container *data;
2249 size_t alloc_bytes;
2250
2251 alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
f54de068 2252 data = kvmalloc(alloc_bytes, GFP_KERNEL);
a542ad1b
JS
2253 if (!data)
2254 return ERR_PTR(-ENOMEM);
2255
2256 if (total_bytes >= sizeof(*data)) {
2257 data->bytes_left = total_bytes - sizeof(*data);
2258 data->bytes_missing = 0;
2259 } else {
2260 data->bytes_missing = sizeof(*data) - total_bytes;
2261 data->bytes_left = 0;
2262 }
2263
2264 data->elem_cnt = 0;
2265 data->elem_missed = 0;
2266
2267 return data;
2268}
2269
2270/*
2271 * allocates space to return multiple file system paths for an inode.
2272 * total_bytes to allocate are passed, note that space usable for actual path
2273 * information will be total_bytes - sizeof(struct inode_fs_paths).
2274 * the returned pointer must be freed with free_ipath() in the end.
2275 */
2276struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
2277 struct btrfs_path *path)
2278{
2279 struct inode_fs_paths *ifp;
2280 struct btrfs_data_container *fspath;
2281
2282 fspath = init_data_container(total_bytes);
2283 if (IS_ERR(fspath))
afc6961f 2284 return ERR_CAST(fspath);
a542ad1b 2285
f54de068 2286 ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
a542ad1b 2287 if (!ifp) {
f54de068 2288 kvfree(fspath);
a542ad1b
JS
2289 return ERR_PTR(-ENOMEM);
2290 }
2291
2292 ifp->btrfs_path = path;
2293 ifp->fspath = fspath;
2294 ifp->fs_root = fs_root;
2295
2296 return ifp;
2297}
2298
2299void free_ipath(struct inode_fs_paths *ipath)
2300{
4735fb28
JJ
2301 if (!ipath)
2302 return;
f54de068 2303 kvfree(ipath->fspath);
a542ad1b
JS
2304 kfree(ipath);
2305}
a37f232b
QW
2306
2307struct btrfs_backref_iter *btrfs_backref_iter_alloc(
2308 struct btrfs_fs_info *fs_info, gfp_t gfp_flag)
2309{
2310 struct btrfs_backref_iter *ret;
2311
2312 ret = kzalloc(sizeof(*ret), gfp_flag);
2313 if (!ret)
2314 return NULL;
2315
2316 ret->path = btrfs_alloc_path();
c15c2ec0 2317 if (!ret->path) {
a37f232b
QW
2318 kfree(ret);
2319 return NULL;
2320 }
2321
2322 /* Current backref iterator only supports iteration in commit root */
2323 ret->path->search_commit_root = 1;
2324 ret->path->skip_locking = 1;
2325 ret->fs_info = fs_info;
2326
2327 return ret;
2328}
2329
2330int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
2331{
2332 struct btrfs_fs_info *fs_info = iter->fs_info;
2333 struct btrfs_path *path = iter->path;
2334 struct btrfs_extent_item *ei;
2335 struct btrfs_key key;
2336 int ret;
2337
2338 key.objectid = bytenr;
2339 key.type = BTRFS_METADATA_ITEM_KEY;
2340 key.offset = (u64)-1;
2341 iter->bytenr = bytenr;
2342
2343 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
2344 if (ret < 0)
2345 return ret;
2346 if (ret == 0) {
2347 ret = -EUCLEAN;
2348 goto release;
2349 }
2350 if (path->slots[0] == 0) {
2351 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
2352 ret = -EUCLEAN;
2353 goto release;
2354 }
2355 path->slots[0]--;
2356
2357 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2358 if ((key.type != BTRFS_EXTENT_ITEM_KEY &&
2359 key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) {
2360 ret = -ENOENT;
2361 goto release;
2362 }
2363 memcpy(&iter->cur_key, &key, sizeof(key));
2364 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2365 path->slots[0]);
2366 iter->end_ptr = (u32)(iter->item_ptr +
2367 btrfs_item_size_nr(path->nodes[0], path->slots[0]));
2368 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
2369 struct btrfs_extent_item);
2370
2371 /*
2372 * Only support iteration on tree backref yet.
2373 *
2374 * This is an extra precaution for non skinny-metadata, where
2375 * EXTENT_ITEM is also used for tree blocks, that we can only use
2376 * extent flags to determine if it's a tree block.
2377 */
2378 if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) {
2379 ret = -ENOTSUPP;
2380 goto release;
2381 }
2382 iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei));
2383
2384 /* If there is no inline backref, go search for keyed backref */
2385 if (iter->cur_ptr >= iter->end_ptr) {
2386 ret = btrfs_next_item(fs_info->extent_root, path);
2387
2388 /* No inline nor keyed ref */
2389 if (ret > 0) {
2390 ret = -ENOENT;
2391 goto release;
2392 }
2393 if (ret < 0)
2394 goto release;
2395
2396 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key,
2397 path->slots[0]);
2398 if (iter->cur_key.objectid != bytenr ||
2399 (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
2400 iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) {
2401 ret = -ENOENT;
2402 goto release;
2403 }
2404 iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2405 path->slots[0]);
2406 iter->item_ptr = iter->cur_ptr;
2407 iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size_nr(
2408 path->nodes[0], path->slots[0]));
2409 }
2410
2411 return 0;
2412release:
2413 btrfs_backref_iter_release(iter);
2414 return ret;
2415}
c39c2ddc
QW
2416
2417/*
2418 * Go to the next backref item of current bytenr, can be either inlined or
2419 * keyed.
2420 *
2421 * Caller needs to check whether it's inline ref or not by iter->cur_key.
2422 *
2423 * Return 0 if we get next backref without problem.
2424 * Return >0 if there is no extra backref for this bytenr.
2425 * Return <0 if there is something wrong happened.
2426 */
2427int btrfs_backref_iter_next(struct btrfs_backref_iter *iter)
2428{
2429 struct extent_buffer *eb = btrfs_backref_get_eb(iter);
2430 struct btrfs_path *path = iter->path;
2431 struct btrfs_extent_inline_ref *iref;
2432 int ret;
2433 u32 size;
2434
2435 if (btrfs_backref_iter_is_inline_ref(iter)) {
2436 /* We're still inside the inline refs */
2437 ASSERT(iter->cur_ptr < iter->end_ptr);
2438
2439 if (btrfs_backref_has_tree_block_info(iter)) {
2440 /* First tree block info */
2441 size = sizeof(struct btrfs_tree_block_info);
2442 } else {
2443 /* Use inline ref type to determine the size */
2444 int type;
2445
2446 iref = (struct btrfs_extent_inline_ref *)
2447 ((unsigned long)iter->cur_ptr);
2448 type = btrfs_extent_inline_ref_type(eb, iref);
2449
2450 size = btrfs_extent_inline_ref_size(type);
2451 }
2452 iter->cur_ptr += size;
2453 if (iter->cur_ptr < iter->end_ptr)
2454 return 0;
2455
2456 /* All inline items iterated, fall through */
2457 }
2458
2459 /* We're at keyed items, there is no inline item, go to the next one */
2460 ret = btrfs_next_item(iter->fs_info->extent_root, iter->path);
2461 if (ret)
2462 return ret;
2463
2464 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]);
2465 if (iter->cur_key.objectid != iter->bytenr ||
2466 (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
2467 iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
2468 return 1;
2469 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2470 path->slots[0]);
2471 iter->cur_ptr = iter->item_ptr;
2472 iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size_nr(path->nodes[0],
2473 path->slots[0]);
2474 return 0;
2475}
584fb121
QW
2476
2477void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
2478 struct btrfs_backref_cache *cache, int is_reloc)
2479{
2480 int i;
2481
2482 cache->rb_root = RB_ROOT;
2483 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
2484 INIT_LIST_HEAD(&cache->pending[i]);
2485 INIT_LIST_HEAD(&cache->changed);
2486 INIT_LIST_HEAD(&cache->detached);
2487 INIT_LIST_HEAD(&cache->leaves);
2488 INIT_LIST_HEAD(&cache->pending_edge);
2489 INIT_LIST_HEAD(&cache->useless_node);
2490 cache->fs_info = fs_info;
2491 cache->is_reloc = is_reloc;
2492}
b1818dab
QW
2493
2494struct btrfs_backref_node *btrfs_backref_alloc_node(
2495 struct btrfs_backref_cache *cache, u64 bytenr, int level)
2496{
2497 struct btrfs_backref_node *node;
2498
2499 ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL);
2500 node = kzalloc(sizeof(*node), GFP_NOFS);
2501 if (!node)
2502 return node;
2503
2504 INIT_LIST_HEAD(&node->list);
2505 INIT_LIST_HEAD(&node->upper);
2506 INIT_LIST_HEAD(&node->lower);
2507 RB_CLEAR_NODE(&node->rb_node);
2508 cache->nr_nodes++;
2509 node->level = level;
2510 node->bytenr = bytenr;
2511
2512 return node;
2513}
47254d07
QW
2514
2515struct btrfs_backref_edge *btrfs_backref_alloc_edge(
2516 struct btrfs_backref_cache *cache)
2517{
2518 struct btrfs_backref_edge *edge;
2519
2520 edge = kzalloc(sizeof(*edge), GFP_NOFS);
2521 if (edge)
2522 cache->nr_edges++;
2523 return edge;
2524}
023acb07
QW
2525
2526/*
2527 * Drop the backref node from cache, also cleaning up all its
2528 * upper edges and any uncached nodes in the path.
2529 *
2530 * This cleanup happens bottom up, thus the node should either
2531 * be the lowest node in the cache or a detached node.
2532 */
2533void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
2534 struct btrfs_backref_node *node)
2535{
2536 struct btrfs_backref_node *upper;
2537 struct btrfs_backref_edge *edge;
2538
2539 if (!node)
2540 return;
2541
2542 BUG_ON(!node->lowest && !node->detached);
2543 while (!list_empty(&node->upper)) {
2544 edge = list_entry(node->upper.next, struct btrfs_backref_edge,
2545 list[LOWER]);
2546 upper = edge->node[UPPER];
2547 list_del(&edge->list[LOWER]);
2548 list_del(&edge->list[UPPER]);
2549 btrfs_backref_free_edge(cache, edge);
2550
2551 if (RB_EMPTY_NODE(&upper->rb_node)) {
2552 BUG_ON(!list_empty(&node->upper));
2553 btrfs_backref_drop_node(cache, node);
2554 node = upper;
2555 node->lowest = 1;
2556 continue;
2557 }
2558 /*
2559 * Add the node to leaf node list if no other child block
2560 * cached.
2561 */
2562 if (list_empty(&upper->lower)) {
2563 list_add_tail(&upper->lower, &cache->leaves);
2564 upper->lowest = 1;
2565 }
2566 }
2567
2568 btrfs_backref_drop_node(cache, node);
2569}
13fe1bdb
QW
2570
2571/*
2572 * Release all nodes/edges from current cache
2573 */
2574void btrfs_backref_release_cache(struct btrfs_backref_cache *cache)
2575{
2576 struct btrfs_backref_node *node;
2577 int i;
2578
2579 while (!list_empty(&cache->detached)) {
2580 node = list_entry(cache->detached.next,
2581 struct btrfs_backref_node, list);
2582 btrfs_backref_cleanup_node(cache, node);
2583 }
2584
2585 while (!list_empty(&cache->leaves)) {
2586 node = list_entry(cache->leaves.next,
2587 struct btrfs_backref_node, lower);
2588 btrfs_backref_cleanup_node(cache, node);
2589 }
2590
2591 cache->last_trans = 0;
2592
2593 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
2594 ASSERT(list_empty(&cache->pending[i]));
2595 ASSERT(list_empty(&cache->pending_edge));
2596 ASSERT(list_empty(&cache->useless_node));
2597 ASSERT(list_empty(&cache->changed));
2598 ASSERT(list_empty(&cache->detached));
2599 ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
2600 ASSERT(!cache->nr_nodes);
2601 ASSERT(!cache->nr_edges);
2602}
1b60d2ec
QW
2603
2604/*
2605 * Handle direct tree backref
2606 *
2607 * Direct tree backref means, the backref item shows its parent bytenr
2608 * directly. This is for SHARED_BLOCK_REF backref (keyed or inlined).
2609 *
2610 * @ref_key: The converted backref key.
2611 * For keyed backref, it's the item key.
2612 * For inlined backref, objectid is the bytenr,
2613 * type is btrfs_inline_ref_type, offset is
2614 * btrfs_inline_ref_offset.
2615 */
2616static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
2617 struct btrfs_key *ref_key,
2618 struct btrfs_backref_node *cur)
2619{
2620 struct btrfs_backref_edge *edge;
2621 struct btrfs_backref_node *upper;
2622 struct rb_node *rb_node;
2623
2624 ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY);
2625
2626 /* Only reloc root uses backref pointing to itself */
2627 if (ref_key->objectid == ref_key->offset) {
2628 struct btrfs_root *root;
2629
2630 cur->is_reloc_root = 1;
2631 /* Only reloc backref cache cares about a specific root */
2632 if (cache->is_reloc) {
2633 root = find_reloc_root(cache->fs_info, cur->bytenr);
2634 if (WARN_ON(!root))
2635 return -ENOENT;
2636 cur->root = root;
2637 } else {
2638 /*
2639 * For generic purpose backref cache, reloc root node
2640 * is useless.
2641 */
2642 list_add(&cur->list, &cache->useless_node);
2643 }
2644 return 0;
2645 }
2646
2647 edge = btrfs_backref_alloc_edge(cache);
2648 if (!edge)
2649 return -ENOMEM;
2650
2651 rb_node = rb_simple_search(&cache->rb_root, ref_key->offset);
2652 if (!rb_node) {
2653 /* Parent node not yet cached */
2654 upper = btrfs_backref_alloc_node(cache, ref_key->offset,
2655 cur->level + 1);
2656 if (!upper) {
2657 btrfs_backref_free_edge(cache, edge);
2658 return -ENOMEM;
2659 }
2660
2661 /*
2662 * Backrefs for the upper level block isn't cached, add the
2663 * block to pending list
2664 */
2665 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2666 } else {
2667 /* Parent node already cached */
2668 upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
2669 ASSERT(upper->checked);
2670 INIT_LIST_HEAD(&edge->list[UPPER]);
2671 }
2672 btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER);
2673 return 0;
2674}
2675
2676/*
2677 * Handle indirect tree backref
2678 *
2679 * Indirect tree backref means, we only know which tree the node belongs to.
2680 * We still need to do a tree search to find out the parents. This is for
2681 * TREE_BLOCK_REF backref (keyed or inlined).
2682 *
2683 * @ref_key: The same as @ref_key in handle_direct_tree_backref()
2684 * @tree_key: The first key of this tree block.
2685 * @path: A clean (released) path, to avoid allocating path everytime
2686 * the function get called.
2687 */
2688static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
2689 struct btrfs_path *path,
2690 struct btrfs_key *ref_key,
2691 struct btrfs_key *tree_key,
2692 struct btrfs_backref_node *cur)
2693{
2694 struct btrfs_fs_info *fs_info = cache->fs_info;
2695 struct btrfs_backref_node *upper;
2696 struct btrfs_backref_node *lower;
2697 struct btrfs_backref_edge *edge;
2698 struct extent_buffer *eb;
2699 struct btrfs_root *root;
1b60d2ec
QW
2700 struct rb_node *rb_node;
2701 int level;
2702 bool need_check = true;
2703 int ret;
2704
56e9357a 2705 root = btrfs_get_fs_root(fs_info, ref_key->offset, false);
1b60d2ec
QW
2706 if (IS_ERR(root))
2707 return PTR_ERR(root);
92a7cc42 2708 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
1b60d2ec
QW
2709 cur->cowonly = 1;
2710
2711 if (btrfs_root_level(&root->root_item) == cur->level) {
2712 /* Tree root */
2713 ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr);
876de781
QW
2714 /*
2715 * For reloc backref cache, we may ignore reloc root. But for
2716 * general purpose backref cache, we can't rely on
2717 * btrfs_should_ignore_reloc_root() as it may conflict with
2718 * current running relocation and lead to missing root.
2719 *
2720 * For general purpose backref cache, reloc root detection is
2721 * completely relying on direct backref (key->offset is parent
2722 * bytenr), thus only do such check for reloc cache.
2723 */
2724 if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) {
1b60d2ec
QW
2725 btrfs_put_root(root);
2726 list_add(&cur->list, &cache->useless_node);
2727 } else {
2728 cur->root = root;
2729 }
2730 return 0;
2731 }
2732
2733 level = cur->level + 1;
2734
2735 /* Search the tree to find parent blocks referring to the block */
2736 path->search_commit_root = 1;
2737 path->skip_locking = 1;
2738 path->lowest_level = level;
2739 ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0);
2740 path->lowest_level = 0;
2741 if (ret < 0) {
2742 btrfs_put_root(root);
2743 return ret;
2744 }
2745 if (ret > 0 && path->slots[level] > 0)
2746 path->slots[level]--;
2747
2748 eb = path->nodes[level];
2749 if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) {
2750 btrfs_err(fs_info,
2751"couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
2752 cur->bytenr, level - 1, root->root_key.objectid,
2753 tree_key->objectid, tree_key->type, tree_key->offset);
2754 btrfs_put_root(root);
2755 ret = -ENOENT;
2756 goto out;
2757 }
2758 lower = cur;
2759
2760 /* Add all nodes and edges in the path */
2761 for (; level < BTRFS_MAX_LEVEL; level++) {
2762 if (!path->nodes[level]) {
2763 ASSERT(btrfs_root_bytenr(&root->root_item) ==
2764 lower->bytenr);
876de781
QW
2765 /* Same as previous should_ignore_reloc_root() call */
2766 if (btrfs_should_ignore_reloc_root(root) &&
2767 cache->is_reloc) {
1b60d2ec
QW
2768 btrfs_put_root(root);
2769 list_add(&lower->list, &cache->useless_node);
2770 } else {
2771 lower->root = root;
2772 }
2773 break;
2774 }
2775
2776 edge = btrfs_backref_alloc_edge(cache);
2777 if (!edge) {
2778 btrfs_put_root(root);
2779 ret = -ENOMEM;
2780 goto out;
2781 }
2782
2783 eb = path->nodes[level];
2784 rb_node = rb_simple_search(&cache->rb_root, eb->start);
2785 if (!rb_node) {
2786 upper = btrfs_backref_alloc_node(cache, eb->start,
2787 lower->level + 1);
2788 if (!upper) {
2789 btrfs_put_root(root);
2790 btrfs_backref_free_edge(cache, edge);
2791 ret = -ENOMEM;
2792 goto out;
2793 }
2794 upper->owner = btrfs_header_owner(eb);
92a7cc42 2795 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
1b60d2ec
QW
2796 upper->cowonly = 1;
2797
2798 /*
2799 * If we know the block isn't shared we can avoid
2800 * checking its backrefs.
2801 */
2802 if (btrfs_block_can_be_shared(root, eb))
2803 upper->checked = 0;
2804 else
2805 upper->checked = 1;
2806
2807 /*
2808 * Add the block to pending list if we need to check its
2809 * backrefs, we only do this once while walking up a
2810 * tree as we will catch anything else later on.
2811 */
2812 if (!upper->checked && need_check) {
2813 need_check = false;
2814 list_add_tail(&edge->list[UPPER],
2815 &cache->pending_edge);
2816 } else {
2817 if (upper->checked)
2818 need_check = true;
2819 INIT_LIST_HEAD(&edge->list[UPPER]);
2820 }
2821 } else {
2822 upper = rb_entry(rb_node, struct btrfs_backref_node,
2823 rb_node);
2824 ASSERT(upper->checked);
2825 INIT_LIST_HEAD(&edge->list[UPPER]);
2826 if (!upper->owner)
2827 upper->owner = btrfs_header_owner(eb);
2828 }
2829 btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER);
2830
2831 if (rb_node) {
2832 btrfs_put_root(root);
2833 break;
2834 }
2835 lower = upper;
2836 upper = NULL;
2837 }
2838out:
2839 btrfs_release_path(path);
2840 return ret;
2841}
2842
2843/*
2844 * Add backref node @cur into @cache.
2845 *
2846 * NOTE: Even if the function returned 0, @cur is not yet cached as its upper
2847 * links aren't yet bi-directional. Needs to finish such links.
fc997ed0 2848 * Use btrfs_backref_finish_upper_links() to finish such linkage.
1b60d2ec
QW
2849 *
2850 * @path: Released path for indirect tree backref lookup
2851 * @iter: Released backref iter for extent tree search
2852 * @node_key: The first key of the tree block
2853 */
2854int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
2855 struct btrfs_path *path,
2856 struct btrfs_backref_iter *iter,
2857 struct btrfs_key *node_key,
2858 struct btrfs_backref_node *cur)
2859{
2860 struct btrfs_fs_info *fs_info = cache->fs_info;
2861 struct btrfs_backref_edge *edge;
2862 struct btrfs_backref_node *exist;
2863 int ret;
2864
2865 ret = btrfs_backref_iter_start(iter, cur->bytenr);
2866 if (ret < 0)
2867 return ret;
2868 /*
2869 * We skip the first btrfs_tree_block_info, as we don't use the key
2870 * stored in it, but fetch it from the tree block
2871 */
2872 if (btrfs_backref_has_tree_block_info(iter)) {
2873 ret = btrfs_backref_iter_next(iter);
2874 if (ret < 0)
2875 goto out;
2876 /* No extra backref? This means the tree block is corrupted */
2877 if (ret > 0) {
2878 ret = -EUCLEAN;
2879 goto out;
2880 }
2881 }
2882 WARN_ON(cur->checked);
2883 if (!list_empty(&cur->upper)) {
2884 /*
2885 * The backref was added previously when processing backref of
2886 * type BTRFS_TREE_BLOCK_REF_KEY
2887 */
2888 ASSERT(list_is_singular(&cur->upper));
2889 edge = list_entry(cur->upper.next, struct btrfs_backref_edge,
2890 list[LOWER]);
2891 ASSERT(list_empty(&edge->list[UPPER]));
2892 exist = edge->node[UPPER];
2893 /*
2894 * Add the upper level block to pending list if we need check
2895 * its backrefs
2896 */
2897 if (!exist->checked)
2898 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2899 } else {
2900 exist = NULL;
2901 }
2902
2903 for (; ret == 0; ret = btrfs_backref_iter_next(iter)) {
2904 struct extent_buffer *eb;
2905 struct btrfs_key key;
2906 int type;
2907
2908 cond_resched();
2909 eb = btrfs_backref_get_eb(iter);
2910
2911 key.objectid = iter->bytenr;
2912 if (btrfs_backref_iter_is_inline_ref(iter)) {
2913 struct btrfs_extent_inline_ref *iref;
2914
2915 /* Update key for inline backref */
2916 iref = (struct btrfs_extent_inline_ref *)
2917 ((unsigned long)iter->cur_ptr);
2918 type = btrfs_get_extent_inline_ref_type(eb, iref,
2919 BTRFS_REF_TYPE_BLOCK);
2920 if (type == BTRFS_REF_TYPE_INVALID) {
2921 ret = -EUCLEAN;
2922 goto out;
2923 }
2924 key.type = type;
2925 key.offset = btrfs_extent_inline_ref_offset(eb, iref);
2926 } else {
2927 key.type = iter->cur_key.type;
2928 key.offset = iter->cur_key.offset;
2929 }
2930
2931 /*
2932 * Parent node found and matches current inline ref, no need to
2933 * rebuild this node for this inline ref
2934 */
2935 if (exist &&
2936 ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
2937 exist->owner == key.offset) ||
2938 (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
2939 exist->bytenr == key.offset))) {
2940 exist = NULL;
2941 continue;
2942 }
2943
2944 /* SHARED_BLOCK_REF means key.offset is the parent bytenr */
2945 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
2946 ret = handle_direct_tree_backref(cache, &key, cur);
2947 if (ret < 0)
2948 goto out;
2949 continue;
2950 } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
2951 ret = -EINVAL;
2952 btrfs_print_v0_err(fs_info);
2953 btrfs_handle_fs_error(fs_info, ret, NULL);
2954 goto out;
2955 } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
2956 continue;
2957 }
2958
2959 /*
2960 * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset
2961 * means the root objectid. We need to search the tree to get
2962 * its parent bytenr.
2963 */
2964 ret = handle_indirect_tree_backref(cache, path, &key, node_key,
2965 cur);
2966 if (ret < 0)
2967 goto out;
2968 }
2969 ret = 0;
2970 cur->checked = 1;
2971 WARN_ON(exist);
2972out:
2973 btrfs_backref_iter_release(iter);
2974 return ret;
2975}
fc997ed0
QW
2976
2977/*
2978 * Finish the upwards linkage created by btrfs_backref_add_tree_node()
2979 */
2980int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
2981 struct btrfs_backref_node *start)
2982{
2983 struct list_head *useless_node = &cache->useless_node;
2984 struct btrfs_backref_edge *edge;
2985 struct rb_node *rb_node;
2986 LIST_HEAD(pending_edge);
2987
2988 ASSERT(start->checked);
2989
2990 /* Insert this node to cache if it's not COW-only */
2991 if (!start->cowonly) {
2992 rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
2993 &start->rb_node);
2994 if (rb_node)
2995 btrfs_backref_panic(cache->fs_info, start->bytenr,
2996 -EEXIST);
2997 list_add_tail(&start->lower, &cache->leaves);
2998 }
2999
3000 /*
3001 * Use breadth first search to iterate all related edges.
3002 *
3003 * The starting points are all the edges of this node
3004 */
3005 list_for_each_entry(edge, &start->upper, list[LOWER])
3006 list_add_tail(&edge->list[UPPER], &pending_edge);
3007
3008 while (!list_empty(&pending_edge)) {
3009 struct btrfs_backref_node *upper;
3010 struct btrfs_backref_node *lower;
fc997ed0
QW
3011
3012 edge = list_first_entry(&pending_edge,
3013 struct btrfs_backref_edge, list[UPPER]);
3014 list_del_init(&edge->list[UPPER]);
3015 upper = edge->node[UPPER];
3016 lower = edge->node[LOWER];
3017
3018 /* Parent is detached, no need to keep any edges */
3019 if (upper->detached) {
3020 list_del(&edge->list[LOWER]);
3021 btrfs_backref_free_edge(cache, edge);
3022
3023 /* Lower node is orphan, queue for cleanup */
3024 if (list_empty(&lower->upper))
3025 list_add(&lower->list, useless_node);
3026 continue;
3027 }
3028
3029 /*
3030 * All new nodes added in current build_backref_tree() haven't
3031 * been linked to the cache rb tree.
3032 * So if we have upper->rb_node populated, this means a cache
3033 * hit. We only need to link the edge, as @upper and all its
3034 * parents have already been linked.
3035 */
3036 if (!RB_EMPTY_NODE(&upper->rb_node)) {
3037 if (upper->lowest) {
3038 list_del_init(&upper->lower);
3039 upper->lowest = 0;
3040 }
3041
3042 list_add_tail(&edge->list[UPPER], &upper->lower);
3043 continue;
3044 }
3045
3046 /* Sanity check, we shouldn't have any unchecked nodes */
3047 if (!upper->checked) {
3048 ASSERT(0);
3049 return -EUCLEAN;
3050 }
3051
3052 /* Sanity check, COW-only node has non-COW-only parent */
3053 if (start->cowonly != upper->cowonly) {
3054 ASSERT(0);
3055 return -EUCLEAN;
3056 }
3057
3058 /* Only cache non-COW-only (subvolume trees) tree blocks */
3059 if (!upper->cowonly) {
3060 rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
3061 &upper->rb_node);
3062 if (rb_node) {
3063 btrfs_backref_panic(cache->fs_info,
3064 upper->bytenr, -EEXIST);
3065 return -EUCLEAN;
3066 }
3067 }
3068
3069 list_add_tail(&edge->list[UPPER], &upper->lower);
3070
3071 /*
3072 * Also queue all the parent edges of this uncached node
3073 * to finish the upper linkage
3074 */
3075 list_for_each_entry(edge, &upper->upper, list[LOWER])
3076 list_add_tail(&edge->list[UPPER], &pending_edge);
3077 }
3078 return 0;
3079}
1b23ea18
QW
3080
3081void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
3082 struct btrfs_backref_node *node)
3083{
3084 struct btrfs_backref_node *lower;
3085 struct btrfs_backref_node *upper;
3086 struct btrfs_backref_edge *edge;
3087
3088 while (!list_empty(&cache->useless_node)) {
3089 lower = list_first_entry(&cache->useless_node,
3090 struct btrfs_backref_node, list);
3091 list_del_init(&lower->list);
3092 }
3093 while (!list_empty(&cache->pending_edge)) {
3094 edge = list_first_entry(&cache->pending_edge,
3095 struct btrfs_backref_edge, list[UPPER]);
3096 list_del(&edge->list[UPPER]);
3097 list_del(&edge->list[LOWER]);
3098 lower = edge->node[LOWER];
3099 upper = edge->node[UPPER];
3100 btrfs_backref_free_edge(cache, edge);
3101
3102 /*
3103 * Lower is no longer linked to any upper backref nodes and
3104 * isn't in the cache, we can free it ourselves.
3105 */
3106 if (list_empty(&lower->upper) &&
3107 RB_EMPTY_NODE(&lower->rb_node))
3108 list_add(&lower->list, &cache->useless_node);
3109
3110 if (!RB_EMPTY_NODE(&upper->rb_node))
3111 continue;
3112
3113 /* Add this guy's upper edges to the list to process */
3114 list_for_each_entry(edge, &upper->upper, list[LOWER])
3115 list_add_tail(&edge->list[UPPER],
3116 &cache->pending_edge);
3117 if (list_empty(&upper->upper))
3118 list_add(&upper->list, &cache->useless_node);
3119 }
3120
3121 while (!list_empty(&cache->useless_node)) {
3122 lower = list_first_entry(&cache->useless_node,
3123 struct btrfs_backref_node, list);
3124 list_del_init(&lower->list);
3125 if (lower == node)
3126 node = NULL;
3127 btrfs_backref_free_node(cache, lower);
3128 }
3129
3130 btrfs_backref_cleanup_node(cache, node);
3131 ASSERT(list_empty(&cache->useless_node) &&
3132 list_empty(&cache->pending_edge));
3133}
This page took 1.041551 seconds and 4 git commands to generate.