1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2009 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/writeback.h>
9 #include <linux/blkdev.h>
10 #include <linux/rbtree.h>
11 #include <linux/slab.h>
14 #include "transaction.h"
17 #include "btrfs_inode.h"
18 #include "async-thread.h"
19 #include "free-space-cache.h"
20 #include "inode-map.h"
22 #include "print-tree.h"
25 * backref_node, mapping_node and tree_block start with this
28 struct rb_node rb_node;
33 * present a tree block in the backref cache
36 struct rb_node rb_node;
40 /* objectid of tree block owner, can be not uptodate */
42 /* link to pending, changed or detached list */
43 struct list_head list;
44 /* list of upper level blocks reference this block */
45 struct list_head upper;
46 /* list of child blocks in the cache */
47 struct list_head lower;
48 /* NULL if this node is not tree root */
49 struct btrfs_root *root;
50 /* extent buffer got by COW the block */
51 struct extent_buffer *eb;
52 /* level of tree block */
54 /* is the block in non-reference counted tree */
55 unsigned int cowonly:1;
56 /* 1 if no child node in the cache */
57 unsigned int lowest:1;
58 /* is the extent buffer locked */
59 unsigned int locked:1;
60 /* has the block been processed */
61 unsigned int processed:1;
62 /* have backrefs of this block been checked */
63 unsigned int checked:1;
65 * 1 if corresponding block has been cowed but some upper
66 * level block pointers may not point to the new location
68 unsigned int pending:1;
70 * 1 if the backref node isn't connected to any other
73 unsigned int detached:1;
77 * present a block pointer in the backref cache
80 struct list_head list[2];
81 struct backref_node *node[2];
86 #define RELOCATION_RESERVED_NODES 256
88 struct backref_cache {
89 /* red black tree of all backref nodes in the cache */
90 struct rb_root rb_root;
91 /* for passing backref nodes to btrfs_reloc_cow_block */
92 struct backref_node *path[BTRFS_MAX_LEVEL];
94 * list of blocks that have been cowed but some block
95 * pointers in upper level blocks may not reflect the
98 struct list_head pending[BTRFS_MAX_LEVEL];
99 /* list of backref nodes with no child node */
100 struct list_head leaves;
101 /* list of blocks that have been cowed in current transaction */
102 struct list_head changed;
103 /* list of detached backref node. */
104 struct list_head detached;
113 * map address of tree root to tree
115 struct mapping_node {
116 struct rb_node rb_node;
121 struct mapping_tree {
122 struct rb_root rb_root;
127 * present a tree block to process
130 struct rb_node rb_node;
132 struct btrfs_key key;
133 unsigned int level:8;
134 unsigned int key_ready:1;
137 #define MAX_EXTENTS 128
139 struct file_extent_cluster {
142 u64 boundary[MAX_EXTENTS];
146 struct reloc_control {
147 /* block group to relocate */
148 struct btrfs_block_group_cache *block_group;
150 struct btrfs_root *extent_root;
151 /* inode for moving data */
152 struct inode *data_inode;
154 struct btrfs_block_rsv *block_rsv;
156 struct backref_cache backref_cache;
158 struct file_extent_cluster cluster;
159 /* tree blocks have been processed */
160 struct extent_io_tree processed_blocks;
161 /* map start of tree root to corresponding reloc tree */
162 struct mapping_tree reloc_root_tree;
163 /* list of reloc trees */
164 struct list_head reloc_roots;
165 /* list of subvolume trees that get relocated */
166 struct list_head dirty_subvol_roots;
167 /* size of metadata reservation for merging reloc trees */
168 u64 merging_rsv_size;
169 /* size of relocated tree nodes */
171 /* reserved size for block group relocation*/
177 unsigned int stage:8;
178 unsigned int create_reloc_tree:1;
179 unsigned int merge_reloc_tree:1;
180 unsigned int found_file_extent:1;
183 /* stages of data relocation */
184 #define MOVE_DATA_EXTENTS 0
185 #define UPDATE_DATA_PTRS 1
187 static void remove_backref_node(struct backref_cache *cache,
188 struct backref_node *node);
189 static void __mark_block_processed(struct reloc_control *rc,
190 struct backref_node *node);
192 static void mapping_tree_init(struct mapping_tree *tree)
194 tree->rb_root = RB_ROOT;
195 spin_lock_init(&tree->lock);
198 static void backref_cache_init(struct backref_cache *cache)
201 cache->rb_root = RB_ROOT;
202 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
203 INIT_LIST_HEAD(&cache->pending[i]);
204 INIT_LIST_HEAD(&cache->changed);
205 INIT_LIST_HEAD(&cache->detached);
206 INIT_LIST_HEAD(&cache->leaves);
209 static void backref_cache_cleanup(struct backref_cache *cache)
211 struct backref_node *node;
214 while (!list_empty(&cache->detached)) {
215 node = list_entry(cache->detached.next,
216 struct backref_node, list);
217 remove_backref_node(cache, node);
220 while (!list_empty(&cache->leaves)) {
221 node = list_entry(cache->leaves.next,
222 struct backref_node, lower);
223 remove_backref_node(cache, node);
226 cache->last_trans = 0;
228 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
229 ASSERT(list_empty(&cache->pending[i]));
230 ASSERT(list_empty(&cache->changed));
231 ASSERT(list_empty(&cache->detached));
232 ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
233 ASSERT(!cache->nr_nodes);
234 ASSERT(!cache->nr_edges);
237 static struct backref_node *alloc_backref_node(struct backref_cache *cache)
239 struct backref_node *node;
241 node = kzalloc(sizeof(*node), GFP_NOFS);
243 INIT_LIST_HEAD(&node->list);
244 INIT_LIST_HEAD(&node->upper);
245 INIT_LIST_HEAD(&node->lower);
246 RB_CLEAR_NODE(&node->rb_node);
252 static void free_backref_node(struct backref_cache *cache,
253 struct backref_node *node)
261 static struct backref_edge *alloc_backref_edge(struct backref_cache *cache)
263 struct backref_edge *edge;
265 edge = kzalloc(sizeof(*edge), GFP_NOFS);
271 static void free_backref_edge(struct backref_cache *cache,
272 struct backref_edge *edge)
280 static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr,
281 struct rb_node *node)
283 struct rb_node **p = &root->rb_node;
284 struct rb_node *parent = NULL;
285 struct tree_entry *entry;
289 entry = rb_entry(parent, struct tree_entry, rb_node);
291 if (bytenr < entry->bytenr)
293 else if (bytenr > entry->bytenr)
299 rb_link_node(node, parent, p);
300 rb_insert_color(node, root);
304 static struct rb_node *tree_search(struct rb_root *root, u64 bytenr)
306 struct rb_node *n = root->rb_node;
307 struct tree_entry *entry;
310 entry = rb_entry(n, struct tree_entry, rb_node);
312 if (bytenr < entry->bytenr)
314 else if (bytenr > entry->bytenr)
322 static void backref_tree_panic(struct rb_node *rb_node, int errno, u64 bytenr)
325 struct btrfs_fs_info *fs_info = NULL;
326 struct backref_node *bnode = rb_entry(rb_node, struct backref_node,
329 fs_info = bnode->root->fs_info;
330 btrfs_panic(fs_info, errno,
331 "Inconsistency in backref cache found at offset %llu",
336 * walk up backref nodes until reach node presents tree root
338 static struct backref_node *walk_up_backref(struct backref_node *node,
339 struct backref_edge *edges[],
342 struct backref_edge *edge;
345 while (!list_empty(&node->upper)) {
346 edge = list_entry(node->upper.next,
347 struct backref_edge, list[LOWER]);
349 node = edge->node[UPPER];
351 BUG_ON(node->detached);
357 * walk down backref nodes to find start of next reference path
359 static struct backref_node *walk_down_backref(struct backref_edge *edges[],
362 struct backref_edge *edge;
363 struct backref_node *lower;
367 edge = edges[idx - 1];
368 lower = edge->node[LOWER];
369 if (list_is_last(&edge->list[LOWER], &lower->upper)) {
373 edge = list_entry(edge->list[LOWER].next,
374 struct backref_edge, list[LOWER]);
375 edges[idx - 1] = edge;
377 return edge->node[UPPER];
383 static void unlock_node_buffer(struct backref_node *node)
386 btrfs_tree_unlock(node->eb);
391 static void drop_node_buffer(struct backref_node *node)
394 unlock_node_buffer(node);
395 free_extent_buffer(node->eb);
400 static void drop_backref_node(struct backref_cache *tree,
401 struct backref_node *node)
403 BUG_ON(!list_empty(&node->upper));
405 drop_node_buffer(node);
406 list_del(&node->list);
407 list_del(&node->lower);
408 if (!RB_EMPTY_NODE(&node->rb_node))
409 rb_erase(&node->rb_node, &tree->rb_root);
410 free_backref_node(tree, node);
414 * remove a backref node from the backref cache
416 static void remove_backref_node(struct backref_cache *cache,
417 struct backref_node *node)
419 struct backref_node *upper;
420 struct backref_edge *edge;
425 BUG_ON(!node->lowest && !node->detached);
426 while (!list_empty(&node->upper)) {
427 edge = list_entry(node->upper.next, struct backref_edge,
429 upper = edge->node[UPPER];
430 list_del(&edge->list[LOWER]);
431 list_del(&edge->list[UPPER]);
432 free_backref_edge(cache, edge);
434 if (RB_EMPTY_NODE(&upper->rb_node)) {
435 BUG_ON(!list_empty(&node->upper));
436 drop_backref_node(cache, node);
442 * add the node to leaf node list if no other
443 * child block cached.
445 if (list_empty(&upper->lower)) {
446 list_add_tail(&upper->lower, &cache->leaves);
451 drop_backref_node(cache, node);
454 static void update_backref_node(struct backref_cache *cache,
455 struct backref_node *node, u64 bytenr)
457 struct rb_node *rb_node;
458 rb_erase(&node->rb_node, &cache->rb_root);
459 node->bytenr = bytenr;
460 rb_node = tree_insert(&cache->rb_root, node->bytenr, &node->rb_node);
462 backref_tree_panic(rb_node, -EEXIST, bytenr);
466 * update backref cache after a transaction commit
468 static int update_backref_cache(struct btrfs_trans_handle *trans,
469 struct backref_cache *cache)
471 struct backref_node *node;
474 if (cache->last_trans == 0) {
475 cache->last_trans = trans->transid;
479 if (cache->last_trans == trans->transid)
483 * detached nodes are used to avoid unnecessary backref
484 * lookup. transaction commit changes the extent tree.
485 * so the detached nodes are no longer useful.
487 while (!list_empty(&cache->detached)) {
488 node = list_entry(cache->detached.next,
489 struct backref_node, list);
490 remove_backref_node(cache, node);
493 while (!list_empty(&cache->changed)) {
494 node = list_entry(cache->changed.next,
495 struct backref_node, list);
496 list_del_init(&node->list);
497 BUG_ON(node->pending);
498 update_backref_node(cache, node, node->new_bytenr);
502 * some nodes can be left in the pending list if there were
503 * errors during processing the pending nodes.
505 for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
506 list_for_each_entry(node, &cache->pending[level], list) {
507 BUG_ON(!node->pending);
508 if (node->bytenr == node->new_bytenr)
510 update_backref_node(cache, node, node->new_bytenr);
514 cache->last_trans = 0;
519 static int should_ignore_root(struct btrfs_root *root)
521 struct btrfs_root *reloc_root;
523 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
526 reloc_root = root->reloc_root;
530 if (btrfs_root_last_snapshot(&reloc_root->root_item) ==
531 root->fs_info->running_transaction->transid - 1)
534 * if there is reloc tree and it was created in previous
535 * transaction backref lookup can find the reloc tree,
536 * so backref node for the fs tree root is useless for
542 * find reloc tree by address of tree root
544 static struct btrfs_root *find_reloc_root(struct reloc_control *rc,
547 struct rb_node *rb_node;
548 struct mapping_node *node;
549 struct btrfs_root *root = NULL;
551 spin_lock(&rc->reloc_root_tree.lock);
552 rb_node = tree_search(&rc->reloc_root_tree.rb_root, bytenr);
554 node = rb_entry(rb_node, struct mapping_node, rb_node);
555 root = (struct btrfs_root *)node->data;
557 spin_unlock(&rc->reloc_root_tree.lock);
561 static int is_cowonly_root(u64 root_objectid)
563 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
564 root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
565 root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
566 root_objectid == BTRFS_DEV_TREE_OBJECTID ||
567 root_objectid == BTRFS_TREE_LOG_OBJECTID ||
568 root_objectid == BTRFS_CSUM_TREE_OBJECTID ||
569 root_objectid == BTRFS_UUID_TREE_OBJECTID ||
570 root_objectid == BTRFS_QUOTA_TREE_OBJECTID ||
571 root_objectid == BTRFS_FREE_SPACE_TREE_OBJECTID)
576 static struct btrfs_root *read_fs_root(struct btrfs_fs_info *fs_info,
579 struct btrfs_key key;
581 key.objectid = root_objectid;
582 key.type = BTRFS_ROOT_ITEM_KEY;
583 if (is_cowonly_root(root_objectid))
586 key.offset = (u64)-1;
588 return btrfs_get_fs_root(fs_info, &key, false);
591 static noinline_for_stack
592 int find_inline_backref(struct extent_buffer *leaf, int slot,
593 unsigned long *ptr, unsigned long *end)
595 struct btrfs_key key;
596 struct btrfs_extent_item *ei;
597 struct btrfs_tree_block_info *bi;
600 btrfs_item_key_to_cpu(leaf, &key, slot);
602 item_size = btrfs_item_size_nr(leaf, slot);
603 if (item_size < sizeof(*ei)) {
604 btrfs_print_v0_err(leaf->fs_info);
605 btrfs_handle_fs_error(leaf->fs_info, -EINVAL, NULL);
608 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
609 WARN_ON(!(btrfs_extent_flags(leaf, ei) &
610 BTRFS_EXTENT_FLAG_TREE_BLOCK));
612 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
613 item_size <= sizeof(*ei) + sizeof(*bi)) {
614 WARN_ON(item_size < sizeof(*ei) + sizeof(*bi));
617 if (key.type == BTRFS_METADATA_ITEM_KEY &&
618 item_size <= sizeof(*ei)) {
619 WARN_ON(item_size < sizeof(*ei));
623 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
624 bi = (struct btrfs_tree_block_info *)(ei + 1);
625 *ptr = (unsigned long)(bi + 1);
627 *ptr = (unsigned long)(ei + 1);
629 *end = (unsigned long)ei + item_size;
634 * build backref tree for a given tree block. root of the backref tree
635 * corresponds the tree block, leaves of the backref tree correspond
636 * roots of b-trees that reference the tree block.
638 * the basic idea of this function is check backrefs of a given block
639 * to find upper level blocks that reference the block, and then check
640 * backrefs of these upper level blocks recursively. the recursion stop
641 * when tree root is reached or backrefs for the block is cached.
643 * NOTE: if we find backrefs for a block are cached, we know backrefs
644 * for all upper level blocks that directly/indirectly reference the
645 * block are also cached.
647 static noinline_for_stack
648 struct backref_node *build_backref_tree(struct reloc_control *rc,
649 struct btrfs_key *node_key,
650 int level, u64 bytenr)
652 struct backref_cache *cache = &rc->backref_cache;
653 struct btrfs_path *path1; /* For searching extent root */
654 struct btrfs_path *path2; /* For searching parent of TREE_BLOCK_REF */
655 struct extent_buffer *eb;
656 struct btrfs_root *root;
657 struct backref_node *cur;
658 struct backref_node *upper;
659 struct backref_node *lower;
660 struct backref_node *node = NULL;
661 struct backref_node *exist = NULL;
662 struct backref_edge *edge;
663 struct rb_node *rb_node;
664 struct btrfs_key key;
667 LIST_HEAD(list); /* Pending edge list, upper node needs to be checked */
672 bool need_check = true;
674 path1 = btrfs_alloc_path();
675 path2 = btrfs_alloc_path();
676 if (!path1 || !path2) {
680 path1->reada = READA_FORWARD;
681 path2->reada = READA_FORWARD;
683 node = alloc_backref_node(cache);
689 node->bytenr = bytenr;
696 key.objectid = cur->bytenr;
697 key.type = BTRFS_METADATA_ITEM_KEY;
698 key.offset = (u64)-1;
700 path1->search_commit_root = 1;
701 path1->skip_locking = 1;
702 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path1,
709 ASSERT(path1->slots[0]);
713 WARN_ON(cur->checked);
714 if (!list_empty(&cur->upper)) {
716 * the backref was added previously when processing
717 * backref of type BTRFS_TREE_BLOCK_REF_KEY
719 ASSERT(list_is_singular(&cur->upper));
720 edge = list_entry(cur->upper.next, struct backref_edge,
722 ASSERT(list_empty(&edge->list[UPPER]));
723 exist = edge->node[UPPER];
725 * add the upper level block to pending list if we need
729 list_add_tail(&edge->list[UPPER], &list);
736 eb = path1->nodes[0];
739 if (path1->slots[0] >= btrfs_header_nritems(eb)) {
740 ret = btrfs_next_leaf(rc->extent_root, path1);
747 eb = path1->nodes[0];
750 btrfs_item_key_to_cpu(eb, &key, path1->slots[0]);
751 if (key.objectid != cur->bytenr) {
756 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
757 key.type == BTRFS_METADATA_ITEM_KEY) {
758 ret = find_inline_backref(eb, path1->slots[0],
766 /* update key for inline back ref */
767 struct btrfs_extent_inline_ref *iref;
769 iref = (struct btrfs_extent_inline_ref *)ptr;
770 type = btrfs_get_extent_inline_ref_type(eb, iref,
771 BTRFS_REF_TYPE_BLOCK);
772 if (type == BTRFS_REF_TYPE_INVALID) {
777 key.offset = btrfs_extent_inline_ref_offset(eb, iref);
779 WARN_ON(key.type != BTRFS_TREE_BLOCK_REF_KEY &&
780 key.type != BTRFS_SHARED_BLOCK_REF_KEY);
784 * Parent node found and matches current inline ref, no need to
785 * rebuild this node for this inline ref.
788 ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
789 exist->owner == key.offset) ||
790 (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
791 exist->bytenr == key.offset))) {
796 /* SHARED_BLOCK_REF means key.offset is the parent bytenr */
797 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
798 if (key.objectid == key.offset) {
800 * Only root blocks of reloc trees use backref
801 * pointing to itself.
803 root = find_reloc_root(rc, cur->bytenr);
809 edge = alloc_backref_edge(cache);
814 rb_node = tree_search(&cache->rb_root, key.offset);
816 upper = alloc_backref_node(cache);
818 free_backref_edge(cache, edge);
822 upper->bytenr = key.offset;
823 upper->level = cur->level + 1;
825 * backrefs for the upper level block isn't
826 * cached, add the block to pending list
828 list_add_tail(&edge->list[UPPER], &list);
830 upper = rb_entry(rb_node, struct backref_node,
832 ASSERT(upper->checked);
833 INIT_LIST_HEAD(&edge->list[UPPER]);
835 list_add_tail(&edge->list[LOWER], &cur->upper);
836 edge->node[LOWER] = cur;
837 edge->node[UPPER] = upper;
840 } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
842 btrfs_print_v0_err(rc->extent_root->fs_info);
843 btrfs_handle_fs_error(rc->extent_root->fs_info, err,
846 } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
851 * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset
852 * means the root objectid. We need to search the tree to get
855 root = read_fs_root(rc->extent_root->fs_info, key.offset);
861 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
864 if (btrfs_root_level(&root->root_item) == cur->level) {
866 ASSERT(btrfs_root_bytenr(&root->root_item) ==
868 if (should_ignore_root(root))
869 list_add(&cur->list, &useless);
875 level = cur->level + 1;
877 /* Search the tree to find parent blocks referring the block. */
878 path2->search_commit_root = 1;
879 path2->skip_locking = 1;
880 path2->lowest_level = level;
881 ret = btrfs_search_slot(NULL, root, node_key, path2, 0, 0);
882 path2->lowest_level = 0;
887 if (ret > 0 && path2->slots[level] > 0)
888 path2->slots[level]--;
890 eb = path2->nodes[level];
891 if (btrfs_node_blockptr(eb, path2->slots[level]) !=
893 btrfs_err(root->fs_info,
894 "couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
895 cur->bytenr, level - 1,
896 root->root_key.objectid,
897 node_key->objectid, node_key->type,
905 /* Add all nodes and edges in the path */
906 for (; level < BTRFS_MAX_LEVEL; level++) {
907 if (!path2->nodes[level]) {
908 ASSERT(btrfs_root_bytenr(&root->root_item) ==
910 if (should_ignore_root(root))
911 list_add(&lower->list, &useless);
917 edge = alloc_backref_edge(cache);
923 eb = path2->nodes[level];
924 rb_node = tree_search(&cache->rb_root, eb->start);
926 upper = alloc_backref_node(cache);
928 free_backref_edge(cache, edge);
932 upper->bytenr = eb->start;
933 upper->owner = btrfs_header_owner(eb);
934 upper->level = lower->level + 1;
935 if (!test_bit(BTRFS_ROOT_REF_COWS,
940 * if we know the block isn't shared
941 * we can void checking its backrefs.
943 if (btrfs_block_can_be_shared(root, eb))
949 * add the block to pending list if we
950 * need check its backrefs, we only do this once
951 * while walking up a tree as we will catch
952 * anything else later on.
954 if (!upper->checked && need_check) {
956 list_add_tail(&edge->list[UPPER],
961 INIT_LIST_HEAD(&edge->list[UPPER]);
964 upper = rb_entry(rb_node, struct backref_node,
966 ASSERT(upper->checked);
967 INIT_LIST_HEAD(&edge->list[UPPER]);
969 upper->owner = btrfs_header_owner(eb);
971 list_add_tail(&edge->list[LOWER], &lower->upper);
972 edge->node[LOWER] = lower;
973 edge->node[UPPER] = upper;
980 btrfs_release_path(path2);
983 ptr += btrfs_extent_inline_ref_size(key.type);
993 btrfs_release_path(path1);
998 /* the pending list isn't empty, take the first block to process */
999 if (!list_empty(&list)) {
1000 edge = list_entry(list.next, struct backref_edge, list[UPPER]);
1001 list_del_init(&edge->list[UPPER]);
1002 cur = edge->node[UPPER];
1007 * everything goes well, connect backref nodes and insert backref nodes
1010 ASSERT(node->checked);
1011 cowonly = node->cowonly;
1013 rb_node = tree_insert(&cache->rb_root, node->bytenr,
1016 backref_tree_panic(rb_node, -EEXIST, node->bytenr);
1017 list_add_tail(&node->lower, &cache->leaves);
1020 list_for_each_entry(edge, &node->upper, list[LOWER])
1021 list_add_tail(&edge->list[UPPER], &list);
1023 while (!list_empty(&list)) {
1024 edge = list_entry(list.next, struct backref_edge, list[UPPER]);
1025 list_del_init(&edge->list[UPPER]);
1026 upper = edge->node[UPPER];
1027 if (upper->detached) {
1028 list_del(&edge->list[LOWER]);
1029 lower = edge->node[LOWER];
1030 free_backref_edge(cache, edge);
1031 if (list_empty(&lower->upper))
1032 list_add(&lower->list, &useless);
1036 if (!RB_EMPTY_NODE(&upper->rb_node)) {
1037 if (upper->lowest) {
1038 list_del_init(&upper->lower);
1042 list_add_tail(&edge->list[UPPER], &upper->lower);
1046 if (!upper->checked) {
1048 * Still want to blow up for developers since this is a
1055 if (cowonly != upper->cowonly) {
1062 rb_node = tree_insert(&cache->rb_root, upper->bytenr,
1065 backref_tree_panic(rb_node, -EEXIST,
1069 list_add_tail(&edge->list[UPPER], &upper->lower);
1071 list_for_each_entry(edge, &upper->upper, list[LOWER])
1072 list_add_tail(&edge->list[UPPER], &list);
1075 * process useless backref nodes. backref nodes for tree leaves
1076 * are deleted from the cache. backref nodes for upper level
1077 * tree blocks are left in the cache to avoid unnecessary backref
1080 while (!list_empty(&useless)) {
1081 upper = list_entry(useless.next, struct backref_node, list);
1082 list_del_init(&upper->list);
1083 ASSERT(list_empty(&upper->upper));
1086 if (upper->lowest) {
1087 list_del_init(&upper->lower);
1090 while (!list_empty(&upper->lower)) {
1091 edge = list_entry(upper->lower.next,
1092 struct backref_edge, list[UPPER]);
1093 list_del(&edge->list[UPPER]);
1094 list_del(&edge->list[LOWER]);
1095 lower = edge->node[LOWER];
1096 free_backref_edge(cache, edge);
1098 if (list_empty(&lower->upper))
1099 list_add(&lower->list, &useless);
1101 __mark_block_processed(rc, upper);
1102 if (upper->level > 0) {
1103 list_add(&upper->list, &cache->detached);
1104 upper->detached = 1;
1106 rb_erase(&upper->rb_node, &cache->rb_root);
1107 free_backref_node(cache, upper);
1111 btrfs_free_path(path1);
1112 btrfs_free_path(path2);
1114 while (!list_empty(&useless)) {
1115 lower = list_entry(useless.next,
1116 struct backref_node, list);
1117 list_del_init(&lower->list);
1119 while (!list_empty(&list)) {
1120 edge = list_first_entry(&list, struct backref_edge,
1122 list_del(&edge->list[UPPER]);
1123 list_del(&edge->list[LOWER]);
1124 lower = edge->node[LOWER];
1125 upper = edge->node[UPPER];
1126 free_backref_edge(cache, edge);
1129 * Lower is no longer linked to any upper backref nodes
1130 * and isn't in the cache, we can free it ourselves.
1132 if (list_empty(&lower->upper) &&
1133 RB_EMPTY_NODE(&lower->rb_node))
1134 list_add(&lower->list, &useless);
1136 if (!RB_EMPTY_NODE(&upper->rb_node))
1139 /* Add this guy's upper edges to the list to process */
1140 list_for_each_entry(edge, &upper->upper, list[LOWER])
1141 list_add_tail(&edge->list[UPPER], &list);
1142 if (list_empty(&upper->upper))
1143 list_add(&upper->list, &useless);
1146 while (!list_empty(&useless)) {
1147 lower = list_entry(useless.next,
1148 struct backref_node, list);
1149 list_del_init(&lower->list);
1152 free_backref_node(cache, lower);
1155 free_backref_node(cache, node);
1156 return ERR_PTR(err);
1158 ASSERT(!node || !node->detached);
1163 * helper to add backref node for the newly created snapshot.
1164 * the backref node is created by cloning backref node that
1165 * corresponds to root of source tree
1167 static int clone_backref_node(struct btrfs_trans_handle *trans,
1168 struct reloc_control *rc,
1169 struct btrfs_root *src,
1170 struct btrfs_root *dest)
1172 struct btrfs_root *reloc_root = src->reloc_root;
1173 struct backref_cache *cache = &rc->backref_cache;
1174 struct backref_node *node = NULL;
1175 struct backref_node *new_node;
1176 struct backref_edge *edge;
1177 struct backref_edge *new_edge;
1178 struct rb_node *rb_node;
1180 if (cache->last_trans > 0)
1181 update_backref_cache(trans, cache);
1183 rb_node = tree_search(&cache->rb_root, src->commit_root->start);
1185 node = rb_entry(rb_node, struct backref_node, rb_node);
1189 BUG_ON(node->new_bytenr != reloc_root->node->start);
1193 rb_node = tree_search(&cache->rb_root,
1194 reloc_root->commit_root->start);
1196 node = rb_entry(rb_node, struct backref_node,
1198 BUG_ON(node->detached);
1205 new_node = alloc_backref_node(cache);
1209 new_node->bytenr = dest->node->start;
1210 new_node->level = node->level;
1211 new_node->lowest = node->lowest;
1212 new_node->checked = 1;
1213 new_node->root = dest;
1215 if (!node->lowest) {
1216 list_for_each_entry(edge, &node->lower, list[UPPER]) {
1217 new_edge = alloc_backref_edge(cache);
1221 new_edge->node[UPPER] = new_node;
1222 new_edge->node[LOWER] = edge->node[LOWER];
1223 list_add_tail(&new_edge->list[UPPER],
1227 list_add_tail(&new_node->lower, &cache->leaves);
1230 rb_node = tree_insert(&cache->rb_root, new_node->bytenr,
1231 &new_node->rb_node);
1233 backref_tree_panic(rb_node, -EEXIST, new_node->bytenr);
1235 if (!new_node->lowest) {
1236 list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) {
1237 list_add_tail(&new_edge->list[LOWER],
1238 &new_edge->node[LOWER]->upper);
1243 while (!list_empty(&new_node->lower)) {
1244 new_edge = list_entry(new_node->lower.next,
1245 struct backref_edge, list[UPPER]);
1246 list_del(&new_edge->list[UPPER]);
1247 free_backref_edge(cache, new_edge);
1249 free_backref_node(cache, new_node);
1254 * helper to add 'address of tree root -> reloc tree' mapping
1256 static int __must_check __add_reloc_root(struct btrfs_root *root)
1258 struct btrfs_fs_info *fs_info = root->fs_info;
1259 struct rb_node *rb_node;
1260 struct mapping_node *node;
1261 struct reloc_control *rc = fs_info->reloc_ctl;
1263 node = kmalloc(sizeof(*node), GFP_NOFS);
1267 node->bytenr = root->node->start;
1270 spin_lock(&rc->reloc_root_tree.lock);
1271 rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
1272 node->bytenr, &node->rb_node);
1273 spin_unlock(&rc->reloc_root_tree.lock);
1275 btrfs_panic(fs_info, -EEXIST,
1276 "Duplicate root found for start=%llu while inserting into relocation tree",
1280 list_add_tail(&root->root_list, &rc->reloc_roots);
1285 * helper to delete the 'address of tree root -> reloc tree'
1288 static void __del_reloc_root(struct btrfs_root *root)
1290 struct btrfs_fs_info *fs_info = root->fs_info;
1291 struct rb_node *rb_node;
1292 struct mapping_node *node = NULL;
1293 struct reloc_control *rc = fs_info->reloc_ctl;
1295 if (rc && root->node) {
1296 spin_lock(&rc->reloc_root_tree.lock);
1297 rb_node = tree_search(&rc->reloc_root_tree.rb_root,
1300 node = rb_entry(rb_node, struct mapping_node, rb_node);
1301 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
1303 spin_unlock(&rc->reloc_root_tree.lock);
1306 BUG_ON((struct btrfs_root *)node->data != root);
1309 spin_lock(&fs_info->trans_lock);
1310 list_del_init(&root->root_list);
1311 spin_unlock(&fs_info->trans_lock);
1316 * helper to update the 'address of tree root -> reloc tree'
1319 static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr)
1321 struct btrfs_fs_info *fs_info = root->fs_info;
1322 struct rb_node *rb_node;
1323 struct mapping_node *node = NULL;
1324 struct reloc_control *rc = fs_info->reloc_ctl;
1326 spin_lock(&rc->reloc_root_tree.lock);
1327 rb_node = tree_search(&rc->reloc_root_tree.rb_root,
1330 node = rb_entry(rb_node, struct mapping_node, rb_node);
1331 rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
1333 spin_unlock(&rc->reloc_root_tree.lock);
1337 BUG_ON((struct btrfs_root *)node->data != root);
1339 spin_lock(&rc->reloc_root_tree.lock);
1340 node->bytenr = new_bytenr;
1341 rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
1342 node->bytenr, &node->rb_node);
1343 spin_unlock(&rc->reloc_root_tree.lock);
1345 backref_tree_panic(rb_node, -EEXIST, node->bytenr);
1349 static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
1350 struct btrfs_root *root, u64 objectid)
1352 struct btrfs_fs_info *fs_info = root->fs_info;
1353 struct btrfs_root *reloc_root;
1354 struct extent_buffer *eb;
1355 struct btrfs_root_item *root_item;
1356 struct btrfs_key root_key;
1359 root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
1362 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
1363 root_key.type = BTRFS_ROOT_ITEM_KEY;
1364 root_key.offset = objectid;
1366 if (root->root_key.objectid == objectid) {
1367 u64 commit_root_gen;
1369 /* called by btrfs_init_reloc_root */
1370 ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
1371 BTRFS_TREE_RELOC_OBJECTID);
1374 * Set the last_snapshot field to the generation of the commit
1375 * root - like this ctree.c:btrfs_block_can_be_shared() behaves
1376 * correctly (returns true) when the relocation root is created
1377 * either inside the critical section of a transaction commit
1378 * (through transaction.c:qgroup_account_snapshot()) and when
1379 * it's created before the transaction commit is started.
1381 commit_root_gen = btrfs_header_generation(root->commit_root);
1382 btrfs_set_root_last_snapshot(&root->root_item, commit_root_gen);
1385 * called by btrfs_reloc_post_snapshot_hook.
1386 * the source tree is a reloc tree, all tree blocks
1387 * modified after it was created have RELOC flag
1388 * set in their headers. so it's OK to not update
1389 * the 'last_snapshot'.
1391 ret = btrfs_copy_root(trans, root, root->node, &eb,
1392 BTRFS_TREE_RELOC_OBJECTID);
1396 memcpy(root_item, &root->root_item, sizeof(*root_item));
1397 btrfs_set_root_bytenr(root_item, eb->start);
1398 btrfs_set_root_level(root_item, btrfs_header_level(eb));
1399 btrfs_set_root_generation(root_item, trans->transid);
1401 if (root->root_key.objectid == objectid) {
1402 btrfs_set_root_refs(root_item, 0);
1403 memset(&root_item->drop_progress, 0,
1404 sizeof(struct btrfs_disk_key));
1405 root_item->drop_level = 0;
1408 btrfs_tree_unlock(eb);
1409 free_extent_buffer(eb);
1411 ret = btrfs_insert_root(trans, fs_info->tree_root,
1412 &root_key, root_item);
1416 reloc_root = btrfs_read_fs_root(fs_info->tree_root, &root_key);
1417 BUG_ON(IS_ERR(reloc_root));
1418 reloc_root->last_trans = trans->transid;
1423 * create reloc tree for a given fs tree. reloc tree is just a
1424 * snapshot of the fs tree with special root objectid.
1426 int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
1427 struct btrfs_root *root)
1429 struct btrfs_fs_info *fs_info = root->fs_info;
1430 struct btrfs_root *reloc_root;
1431 struct reloc_control *rc = fs_info->reloc_ctl;
1432 struct btrfs_block_rsv *rsv;
1436 if (root->reloc_root) {
1437 reloc_root = root->reloc_root;
1438 reloc_root->last_trans = trans->transid;
1442 if (!rc || !rc->create_reloc_tree ||
1443 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1446 if (!trans->reloc_reserved) {
1447 rsv = trans->block_rsv;
1448 trans->block_rsv = rc->block_rsv;
1451 reloc_root = create_reloc_root(trans, root, root->root_key.objectid);
1453 trans->block_rsv = rsv;
1455 ret = __add_reloc_root(reloc_root);
1457 root->reloc_root = reloc_root;
1462 * update root item of reloc tree
1464 int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
1465 struct btrfs_root *root)
1467 struct btrfs_fs_info *fs_info = root->fs_info;
1468 struct btrfs_root *reloc_root;
1469 struct btrfs_root_item *root_item;
1472 if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state) ||
1476 reloc_root = root->reloc_root;
1477 root_item = &reloc_root->root_item;
1479 /* root->reloc_root will stay until current relocation finished */
1480 if (fs_info->reloc_ctl->merge_reloc_tree &&
1481 btrfs_root_refs(root_item) == 0) {
1482 set_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
1483 __del_reloc_root(reloc_root);
1486 if (reloc_root->commit_root != reloc_root->node) {
1487 btrfs_set_root_node(root_item, reloc_root->node);
1488 free_extent_buffer(reloc_root->commit_root);
1489 reloc_root->commit_root = btrfs_root_node(reloc_root);
1492 ret = btrfs_update_root(trans, fs_info->tree_root,
1493 &reloc_root->root_key, root_item);
1501 * helper to find first cached inode with inode number >= objectid
1504 static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid)
1506 struct rb_node *node;
1507 struct rb_node *prev;
1508 struct btrfs_inode *entry;
1509 struct inode *inode;
1511 spin_lock(&root->inode_lock);
1513 node = root->inode_tree.rb_node;
1517 entry = rb_entry(node, struct btrfs_inode, rb_node);
1519 if (objectid < btrfs_ino(entry))
1520 node = node->rb_left;
1521 else if (objectid > btrfs_ino(entry))
1522 node = node->rb_right;
1528 entry = rb_entry(prev, struct btrfs_inode, rb_node);
1529 if (objectid <= btrfs_ino(entry)) {
1533 prev = rb_next(prev);
1537 entry = rb_entry(node, struct btrfs_inode, rb_node);
1538 inode = igrab(&entry->vfs_inode);
1540 spin_unlock(&root->inode_lock);
1544 objectid = btrfs_ino(entry) + 1;
1545 if (cond_resched_lock(&root->inode_lock))
1548 node = rb_next(node);
1550 spin_unlock(&root->inode_lock);
1554 static int in_block_group(u64 bytenr,
1555 struct btrfs_block_group_cache *block_group)
1557 if (bytenr >= block_group->key.objectid &&
1558 bytenr < block_group->key.objectid + block_group->key.offset)
1564 * get new location of data
1566 static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
1567 u64 bytenr, u64 num_bytes)
1569 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
1570 struct btrfs_path *path;
1571 struct btrfs_file_extent_item *fi;
1572 struct extent_buffer *leaf;
1575 path = btrfs_alloc_path();
1579 bytenr -= BTRFS_I(reloc_inode)->index_cnt;
1580 ret = btrfs_lookup_file_extent(NULL, root, path,
1581 btrfs_ino(BTRFS_I(reloc_inode)), bytenr, 0);
1589 leaf = path->nodes[0];
1590 fi = btrfs_item_ptr(leaf, path->slots[0],
1591 struct btrfs_file_extent_item);
1593 BUG_ON(btrfs_file_extent_offset(leaf, fi) ||
1594 btrfs_file_extent_compression(leaf, fi) ||
1595 btrfs_file_extent_encryption(leaf, fi) ||
1596 btrfs_file_extent_other_encoding(leaf, fi));
1598 if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) {
1603 *new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1606 btrfs_free_path(path);
1611 * update file extent items in the tree leaf to point to
1612 * the new locations.
1614 static noinline_for_stack
1615 int replace_file_extents(struct btrfs_trans_handle *trans,
1616 struct reloc_control *rc,
1617 struct btrfs_root *root,
1618 struct extent_buffer *leaf)
1620 struct btrfs_fs_info *fs_info = root->fs_info;
1621 struct btrfs_key key;
1622 struct btrfs_file_extent_item *fi;
1623 struct inode *inode = NULL;
1635 if (rc->stage != UPDATE_DATA_PTRS)
1638 /* reloc trees always use full backref */
1639 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1640 parent = leaf->start;
1644 nritems = btrfs_header_nritems(leaf);
1645 for (i = 0; i < nritems; i++) {
1647 btrfs_item_key_to_cpu(leaf, &key, i);
1648 if (key.type != BTRFS_EXTENT_DATA_KEY)
1650 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
1651 if (btrfs_file_extent_type(leaf, fi) ==
1652 BTRFS_FILE_EXTENT_INLINE)
1654 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1655 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1658 if (!in_block_group(bytenr, rc->block_group))
1662 * if we are modifying block in fs tree, wait for readpage
1663 * to complete and drop the extent cache
1665 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
1667 inode = find_next_inode(root, key.objectid);
1669 } else if (inode && btrfs_ino(BTRFS_I(inode)) < key.objectid) {
1670 btrfs_add_delayed_iput(inode);
1671 inode = find_next_inode(root, key.objectid);
1673 if (inode && btrfs_ino(BTRFS_I(inode)) == key.objectid) {
1675 btrfs_file_extent_num_bytes(leaf, fi);
1676 WARN_ON(!IS_ALIGNED(key.offset,
1677 fs_info->sectorsize));
1678 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
1680 ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
1685 btrfs_drop_extent_cache(BTRFS_I(inode),
1686 key.offset, end, 1);
1687 unlock_extent(&BTRFS_I(inode)->io_tree,
1692 ret = get_new_location(rc->data_inode, &new_bytenr,
1696 * Don't have to abort since we've not changed anything
1697 * in the file extent yet.
1702 btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr);
1705 key.offset -= btrfs_file_extent_offset(leaf, fi);
1706 ret = btrfs_inc_extent_ref(trans, root, new_bytenr,
1708 btrfs_header_owner(leaf),
1709 key.objectid, key.offset);
1711 btrfs_abort_transaction(trans, ret);
1715 ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
1716 parent, btrfs_header_owner(leaf),
1717 key.objectid, key.offset);
1719 btrfs_abort_transaction(trans, ret);
1724 btrfs_mark_buffer_dirty(leaf);
1726 btrfs_add_delayed_iput(inode);
1730 static noinline_for_stack
1731 int memcmp_node_keys(struct extent_buffer *eb, int slot,
1732 struct btrfs_path *path, int level)
1734 struct btrfs_disk_key key1;
1735 struct btrfs_disk_key key2;
1736 btrfs_node_key(eb, &key1, slot);
1737 btrfs_node_key(path->nodes[level], &key2, path->slots[level]);
1738 return memcmp(&key1, &key2, sizeof(key1));
1742 * try to replace tree blocks in fs tree with the new blocks
1743 * in reloc tree. tree blocks haven't been modified since the
1744 * reloc tree was create can be replaced.
1746 * if a block was replaced, level of the block + 1 is returned.
1747 * if no block got replaced, 0 is returned. if there are other
1748 * errors, a negative error number is returned.
1750 static noinline_for_stack
1751 int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
1752 struct btrfs_root *dest, struct btrfs_root *src,
1753 struct btrfs_path *path, struct btrfs_key *next_key,
1754 int lowest_level, int max_level)
1756 struct btrfs_fs_info *fs_info = dest->fs_info;
1757 struct extent_buffer *eb;
1758 struct extent_buffer *parent;
1759 struct btrfs_key key;
1771 BUG_ON(src->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
1772 BUG_ON(dest->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
1774 last_snapshot = btrfs_root_last_snapshot(&src->root_item);
1776 slot = path->slots[lowest_level];
1777 btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot);
1779 eb = btrfs_lock_root_node(dest);
1780 btrfs_set_lock_blocking_write(eb);
1781 level = btrfs_header_level(eb);
1783 if (level < lowest_level) {
1784 btrfs_tree_unlock(eb);
1785 free_extent_buffer(eb);
1790 ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb);
1793 btrfs_set_lock_blocking_write(eb);
1796 next_key->objectid = (u64)-1;
1797 next_key->type = (u8)-1;
1798 next_key->offset = (u64)-1;
1803 struct btrfs_key first_key;
1805 level = btrfs_header_level(parent);
1806 BUG_ON(level < lowest_level);
1808 ret = btrfs_bin_search(parent, &key, level, &slot);
1811 if (ret && slot > 0)
1814 if (next_key && slot + 1 < btrfs_header_nritems(parent))
1815 btrfs_node_key_to_cpu(parent, next_key, slot + 1);
1817 old_bytenr = btrfs_node_blockptr(parent, slot);
1818 blocksize = fs_info->nodesize;
1819 old_ptr_gen = btrfs_node_ptr_generation(parent, slot);
1820 btrfs_node_key_to_cpu(parent, &first_key, slot);
1822 if (level <= max_level) {
1823 eb = path->nodes[level];
1824 new_bytenr = btrfs_node_blockptr(eb,
1825 path->slots[level]);
1826 new_ptr_gen = btrfs_node_ptr_generation(eb,
1827 path->slots[level]);
1833 if (WARN_ON(new_bytenr > 0 && new_bytenr == old_bytenr)) {
1838 if (new_bytenr == 0 || old_ptr_gen > last_snapshot ||
1839 memcmp_node_keys(parent, slot, path, level)) {
1840 if (level <= lowest_level) {
1845 eb = read_tree_block(fs_info, old_bytenr, old_ptr_gen,
1846 level - 1, &first_key);
1850 } else if (!extent_buffer_uptodate(eb)) {
1852 free_extent_buffer(eb);
1855 btrfs_tree_lock(eb);
1857 ret = btrfs_cow_block(trans, dest, eb, parent,
1861 btrfs_set_lock_blocking_write(eb);
1863 btrfs_tree_unlock(parent);
1864 free_extent_buffer(parent);
1871 btrfs_tree_unlock(parent);
1872 free_extent_buffer(parent);
1877 btrfs_node_key_to_cpu(path->nodes[level], &key,
1878 path->slots[level]);
1879 btrfs_release_path(path);
1881 path->lowest_level = level;
1882 ret = btrfs_search_slot(trans, src, &key, path, 0, 1);
1883 path->lowest_level = 0;
1887 * Info qgroup to trace both subtrees.
1889 * We must trace both trees.
1890 * 1) Tree reloc subtree
1891 * If not traced, we will leak data numbers
1893 * If not traced, we will double count old data
1895 * We don't scan the subtree right now, but only record
1896 * the swapped tree blocks.
1897 * The real subtree rescan is delayed until we have new
1898 * CoW on the subtree root node before transaction commit.
1900 ret = btrfs_qgroup_add_swapped_blocks(trans, dest,
1901 rc->block_group, parent, slot,
1902 path->nodes[level], path->slots[level],
1907 * swap blocks in fs tree and reloc tree.
1909 btrfs_set_node_blockptr(parent, slot, new_bytenr);
1910 btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen);
1911 btrfs_mark_buffer_dirty(parent);
1913 btrfs_set_node_blockptr(path->nodes[level],
1914 path->slots[level], old_bytenr);
1915 btrfs_set_node_ptr_generation(path->nodes[level],
1916 path->slots[level], old_ptr_gen);
1917 btrfs_mark_buffer_dirty(path->nodes[level]);
1919 ret = btrfs_inc_extent_ref(trans, src, old_bytenr,
1920 blocksize, path->nodes[level]->start,
1921 src->root_key.objectid, level - 1, 0);
1923 ret = btrfs_inc_extent_ref(trans, dest, new_bytenr,
1924 blocksize, 0, dest->root_key.objectid,
1928 ret = btrfs_free_extent(trans, src, new_bytenr, blocksize,
1929 path->nodes[level]->start,
1930 src->root_key.objectid, level - 1, 0);
1933 ret = btrfs_free_extent(trans, dest, old_bytenr, blocksize,
1934 0, dest->root_key.objectid, level - 1,
1938 btrfs_unlock_up_safe(path, 0);
1943 btrfs_tree_unlock(parent);
1944 free_extent_buffer(parent);
1949 * helper to find next relocated block in reloc tree
1951 static noinline_for_stack
1952 int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
1955 struct extent_buffer *eb;
1960 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1962 for (i = 0; i < *level; i++) {
1963 free_extent_buffer(path->nodes[i]);
1964 path->nodes[i] = NULL;
1967 for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
1968 eb = path->nodes[i];
1969 nritems = btrfs_header_nritems(eb);
1970 while (path->slots[i] + 1 < nritems) {
1972 if (btrfs_node_ptr_generation(eb, path->slots[i]) <=
1979 free_extent_buffer(path->nodes[i]);
1980 path->nodes[i] = NULL;
1986 * walk down reloc tree to find relocated block of lowest level
1988 static noinline_for_stack
1989 int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
1992 struct btrfs_fs_info *fs_info = root->fs_info;
1993 struct extent_buffer *eb = NULL;
2000 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
2002 for (i = *level; i > 0; i--) {
2003 struct btrfs_key first_key;
2005 eb = path->nodes[i];
2006 nritems = btrfs_header_nritems(eb);
2007 while (path->slots[i] < nritems) {
2008 ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]);
2009 if (ptr_gen > last_snapshot)
2013 if (path->slots[i] >= nritems) {
2024 bytenr = btrfs_node_blockptr(eb, path->slots[i]);
2025 btrfs_node_key_to_cpu(eb, &first_key, path->slots[i]);
2026 eb = read_tree_block(fs_info, bytenr, ptr_gen, i - 1,
2030 } else if (!extent_buffer_uptodate(eb)) {
2031 free_extent_buffer(eb);
2034 BUG_ON(btrfs_header_level(eb) != i - 1);
2035 path->nodes[i - 1] = eb;
2036 path->slots[i - 1] = 0;
2042 * invalidate extent cache for file extents whose key in range of
2043 * [min_key, max_key)
2045 static int invalidate_extent_cache(struct btrfs_root *root,
2046 struct btrfs_key *min_key,
2047 struct btrfs_key *max_key)
2049 struct btrfs_fs_info *fs_info = root->fs_info;
2050 struct inode *inode = NULL;
2055 objectid = min_key->objectid;
2060 if (objectid > max_key->objectid)
2063 inode = find_next_inode(root, objectid);
2066 ino = btrfs_ino(BTRFS_I(inode));
2068 if (ino > max_key->objectid) {
2074 if (!S_ISREG(inode->i_mode))
2077 if (unlikely(min_key->objectid == ino)) {
2078 if (min_key->type > BTRFS_EXTENT_DATA_KEY)
2080 if (min_key->type < BTRFS_EXTENT_DATA_KEY)
2083 start = min_key->offset;
2084 WARN_ON(!IS_ALIGNED(start, fs_info->sectorsize));
2090 if (unlikely(max_key->objectid == ino)) {
2091 if (max_key->type < BTRFS_EXTENT_DATA_KEY)
2093 if (max_key->type > BTRFS_EXTENT_DATA_KEY) {
2096 if (max_key->offset == 0)
2098 end = max_key->offset;
2099 WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
2106 /* the lock_extent waits for readpage to complete */
2107 lock_extent(&BTRFS_I(inode)->io_tree, start, end);
2108 btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 1);
2109 unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
2114 static int find_next_key(struct btrfs_path *path, int level,
2115 struct btrfs_key *key)
2118 while (level < BTRFS_MAX_LEVEL) {
2119 if (!path->nodes[level])
2121 if (path->slots[level] + 1 <
2122 btrfs_header_nritems(path->nodes[level])) {
2123 btrfs_node_key_to_cpu(path->nodes[level], key,
2124 path->slots[level] + 1);
2133 * Insert current subvolume into reloc_control::dirty_subvol_roots
2135 static void insert_dirty_subvol(struct btrfs_trans_handle *trans,
2136 struct reloc_control *rc,
2137 struct btrfs_root *root)
2139 struct btrfs_root *reloc_root = root->reloc_root;
2140 struct btrfs_root_item *reloc_root_item;
2142 /* @root must be a subvolume tree root with a valid reloc tree */
2143 ASSERT(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
2146 reloc_root_item = &reloc_root->root_item;
2147 memset(&reloc_root_item->drop_progress, 0,
2148 sizeof(reloc_root_item->drop_progress));
2149 reloc_root_item->drop_level = 0;
2150 btrfs_set_root_refs(reloc_root_item, 0);
2151 btrfs_update_reloc_root(trans, root);
2153 if (list_empty(&root->reloc_dirty_list)) {
2154 btrfs_grab_fs_root(root);
2155 list_add_tail(&root->reloc_dirty_list, &rc->dirty_subvol_roots);
2159 static int clean_dirty_subvols(struct reloc_control *rc)
2161 struct btrfs_root *root;
2162 struct btrfs_root *next;
2165 list_for_each_entry_safe(root, next, &rc->dirty_subvol_roots,
2167 struct btrfs_root *reloc_root = root->reloc_root;
2169 clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
2170 list_del_init(&root->reloc_dirty_list);
2171 root->reloc_root = NULL;
2175 ret2 = btrfs_drop_snapshot(reloc_root, NULL, 0, 1);
2176 if (ret2 < 0 && !ret)
2179 btrfs_put_fs_root(root);
2185 * merge the relocated tree blocks in reloc tree with corresponding
2188 static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
2189 struct btrfs_root *root)
2191 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2192 struct btrfs_key key;
2193 struct btrfs_key next_key;
2194 struct btrfs_trans_handle *trans = NULL;
2195 struct btrfs_root *reloc_root;
2196 struct btrfs_root_item *root_item;
2197 struct btrfs_path *path;
2198 struct extent_buffer *leaf;
2206 path = btrfs_alloc_path();
2209 path->reada = READA_FORWARD;
2211 reloc_root = root->reloc_root;
2212 root_item = &reloc_root->root_item;
2214 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
2215 level = btrfs_root_level(root_item);
2216 extent_buffer_get(reloc_root->node);
2217 path->nodes[level] = reloc_root->node;
2218 path->slots[level] = 0;
2220 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
2222 level = root_item->drop_level;
2224 path->lowest_level = level;
2225 ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0);
2226 path->lowest_level = 0;
2228 btrfs_free_path(path);
2232 btrfs_node_key_to_cpu(path->nodes[level], &next_key,
2233 path->slots[level]);
2234 WARN_ON(memcmp(&key, &next_key, sizeof(key)));
2236 btrfs_unlock_up_safe(path, 0);
2239 min_reserved = fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
2240 memset(&next_key, 0, sizeof(next_key));
2243 ret = btrfs_block_rsv_refill(root, rc->block_rsv, min_reserved,
2244 BTRFS_RESERVE_FLUSH_ALL);
2249 trans = btrfs_start_transaction(root, 0);
2250 if (IS_ERR(trans)) {
2251 err = PTR_ERR(trans);
2255 trans->block_rsv = rc->block_rsv;
2260 ret = walk_down_reloc_tree(reloc_root, path, &level);
2268 if (!find_next_key(path, level, &key) &&
2269 btrfs_comp_cpu_keys(&next_key, &key) >= 0) {
2272 ret = replace_path(trans, rc, root, reloc_root, path,
2273 &next_key, level, max_level);
2282 btrfs_node_key_to_cpu(path->nodes[level], &key,
2283 path->slots[level]);
2287 ret = walk_up_reloc_tree(reloc_root, path, &level);
2293 * save the merging progress in the drop_progress.
2294 * this is OK since root refs == 1 in this case.
2296 btrfs_node_key(path->nodes[level], &root_item->drop_progress,
2297 path->slots[level]);
2298 root_item->drop_level = level;
2300 btrfs_end_transaction_throttle(trans);
2303 btrfs_btree_balance_dirty(fs_info);
2305 if (replaced && rc->stage == UPDATE_DATA_PTRS)
2306 invalidate_extent_cache(root, &key, &next_key);
2310 * handle the case only one block in the fs tree need to be
2311 * relocated and the block is tree root.
2313 leaf = btrfs_lock_root_node(root);
2314 ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf);
2315 btrfs_tree_unlock(leaf);
2316 free_extent_buffer(leaf);
2320 btrfs_free_path(path);
2323 insert_dirty_subvol(trans, rc, root);
2326 btrfs_end_transaction_throttle(trans);
2328 btrfs_btree_balance_dirty(fs_info);
2330 if (replaced && rc->stage == UPDATE_DATA_PTRS)
2331 invalidate_extent_cache(root, &key, &next_key);
2336 static noinline_for_stack
2337 int prepare_to_merge(struct reloc_control *rc, int err)
2339 struct btrfs_root *root = rc->extent_root;
2340 struct btrfs_fs_info *fs_info = root->fs_info;
2341 struct btrfs_root *reloc_root;
2342 struct btrfs_trans_handle *trans;
2343 LIST_HEAD(reloc_roots);
2347 mutex_lock(&fs_info->reloc_mutex);
2348 rc->merging_rsv_size += fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
2349 rc->merging_rsv_size += rc->nodes_relocated * 2;
2350 mutex_unlock(&fs_info->reloc_mutex);
2354 num_bytes = rc->merging_rsv_size;
2355 ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes,
2356 BTRFS_RESERVE_FLUSH_ALL);
2361 trans = btrfs_join_transaction(rc->extent_root);
2362 if (IS_ERR(trans)) {
2364 btrfs_block_rsv_release(fs_info, rc->block_rsv,
2366 return PTR_ERR(trans);
2370 if (num_bytes != rc->merging_rsv_size) {
2371 btrfs_end_transaction(trans);
2372 btrfs_block_rsv_release(fs_info, rc->block_rsv,
2378 rc->merge_reloc_tree = 1;
2380 while (!list_empty(&rc->reloc_roots)) {
2381 reloc_root = list_entry(rc->reloc_roots.next,
2382 struct btrfs_root, root_list);
2383 list_del_init(&reloc_root->root_list);
2385 root = read_fs_root(fs_info, reloc_root->root_key.offset);
2386 BUG_ON(IS_ERR(root));
2387 BUG_ON(root->reloc_root != reloc_root);
2390 * set reference count to 1, so btrfs_recover_relocation
2391 * knows it should resumes merging
2394 btrfs_set_root_refs(&reloc_root->root_item, 1);
2395 btrfs_update_reloc_root(trans, root);
2397 list_add(&reloc_root->root_list, &reloc_roots);
2400 list_splice(&reloc_roots, &rc->reloc_roots);
2403 btrfs_commit_transaction(trans);
2405 btrfs_end_transaction(trans);
2409 static noinline_for_stack
2410 void free_reloc_roots(struct list_head *list)
2412 struct btrfs_root *reloc_root;
2414 while (!list_empty(list)) {
2415 reloc_root = list_entry(list->next, struct btrfs_root,
2417 __del_reloc_root(reloc_root);
2418 free_extent_buffer(reloc_root->node);
2419 free_extent_buffer(reloc_root->commit_root);
2420 reloc_root->node = NULL;
2421 reloc_root->commit_root = NULL;
2425 static noinline_for_stack
2426 void merge_reloc_roots(struct reloc_control *rc)
2428 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2429 struct btrfs_root *root;
2430 struct btrfs_root *reloc_root;
2431 LIST_HEAD(reloc_roots);
2435 root = rc->extent_root;
2438 * this serializes us with btrfs_record_root_in_transaction,
2439 * we have to make sure nobody is in the middle of
2440 * adding their roots to the list while we are
2443 mutex_lock(&fs_info->reloc_mutex);
2444 list_splice_init(&rc->reloc_roots, &reloc_roots);
2445 mutex_unlock(&fs_info->reloc_mutex);
2447 while (!list_empty(&reloc_roots)) {
2449 reloc_root = list_entry(reloc_roots.next,
2450 struct btrfs_root, root_list);
2452 if (btrfs_root_refs(&reloc_root->root_item) > 0) {
2453 root = read_fs_root(fs_info,
2454 reloc_root->root_key.offset);
2455 BUG_ON(IS_ERR(root));
2456 BUG_ON(root->reloc_root != reloc_root);
2458 ret = merge_reloc_root(rc, root);
2460 if (list_empty(&reloc_root->root_list))
2461 list_add_tail(&reloc_root->root_list,
2466 list_del_init(&reloc_root->root_list);
2476 btrfs_handle_fs_error(fs_info, ret, NULL);
2477 if (!list_empty(&reloc_roots))
2478 free_reloc_roots(&reloc_roots);
2480 /* new reloc root may be added */
2481 mutex_lock(&fs_info->reloc_mutex);
2482 list_splice_init(&rc->reloc_roots, &reloc_roots);
2483 mutex_unlock(&fs_info->reloc_mutex);
2484 if (!list_empty(&reloc_roots))
2485 free_reloc_roots(&reloc_roots);
2488 BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
2491 static void free_block_list(struct rb_root *blocks)
2493 struct tree_block *block;
2494 struct rb_node *rb_node;
2495 while ((rb_node = rb_first(blocks))) {
2496 block = rb_entry(rb_node, struct tree_block, rb_node);
2497 rb_erase(rb_node, blocks);
2502 static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans,
2503 struct btrfs_root *reloc_root)
2505 struct btrfs_fs_info *fs_info = reloc_root->fs_info;
2506 struct btrfs_root *root;
2508 if (reloc_root->last_trans == trans->transid)
2511 root = read_fs_root(fs_info, reloc_root->root_key.offset);
2512 BUG_ON(IS_ERR(root));
2513 BUG_ON(root->reloc_root != reloc_root);
2515 return btrfs_record_root_in_trans(trans, root);
2518 static noinline_for_stack
2519 struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans,
2520 struct reloc_control *rc,
2521 struct backref_node *node,
2522 struct backref_edge *edges[])
2524 struct backref_node *next;
2525 struct btrfs_root *root;
2531 next = walk_up_backref(next, edges, &index);
2534 BUG_ON(!test_bit(BTRFS_ROOT_REF_COWS, &root->state));
2536 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
2537 record_reloc_root_in_trans(trans, root);
2541 btrfs_record_root_in_trans(trans, root);
2542 root = root->reloc_root;
2544 if (next->new_bytenr != root->node->start) {
2545 BUG_ON(next->new_bytenr);
2546 BUG_ON(!list_empty(&next->list));
2547 next->new_bytenr = root->node->start;
2549 list_add_tail(&next->list,
2550 &rc->backref_cache.changed);
2551 __mark_block_processed(rc, next);
2557 next = walk_down_backref(edges, &index);
2558 if (!next || next->level <= node->level)
2565 /* setup backref node path for btrfs_reloc_cow_block */
2567 rc->backref_cache.path[next->level] = next;
2570 next = edges[index]->node[UPPER];
2576 * select a tree root for relocation. return NULL if the block
2577 * is reference counted. we should use do_relocation() in this
2578 * case. return a tree root pointer if the block isn't reference
2579 * counted. return -ENOENT if the block is root of reloc tree.
2581 static noinline_for_stack
2582 struct btrfs_root *select_one_root(struct backref_node *node)
2584 struct backref_node *next;
2585 struct btrfs_root *root;
2586 struct btrfs_root *fs_root = NULL;
2587 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2593 next = walk_up_backref(next, edges, &index);
2597 /* no other choice for non-references counted tree */
2598 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
2601 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID)
2607 next = walk_down_backref(edges, &index);
2608 if (!next || next->level <= node->level)
2613 return ERR_PTR(-ENOENT);
2617 static noinline_for_stack
2618 u64 calcu_metadata_size(struct reloc_control *rc,
2619 struct backref_node *node, int reserve)
2621 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2622 struct backref_node *next = node;
2623 struct backref_edge *edge;
2624 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2628 BUG_ON(reserve && node->processed);
2633 if (next->processed && (reserve || next != node))
2636 num_bytes += fs_info->nodesize;
2638 if (list_empty(&next->upper))
2641 edge = list_entry(next->upper.next,
2642 struct backref_edge, list[LOWER]);
2643 edges[index++] = edge;
2644 next = edge->node[UPPER];
2646 next = walk_down_backref(edges, &index);
2651 static int reserve_metadata_space(struct btrfs_trans_handle *trans,
2652 struct reloc_control *rc,
2653 struct backref_node *node)
2655 struct btrfs_root *root = rc->extent_root;
2656 struct btrfs_fs_info *fs_info = root->fs_info;
2661 num_bytes = calcu_metadata_size(rc, node, 1) * 2;
2663 trans->block_rsv = rc->block_rsv;
2664 rc->reserved_bytes += num_bytes;
2667 * We are under a transaction here so we can only do limited flushing.
2668 * If we get an enospc just kick back -EAGAIN so we know to drop the
2669 * transaction and try to refill when we can flush all the things.
2671 ret = btrfs_block_rsv_refill(root, rc->block_rsv, num_bytes,
2672 BTRFS_RESERVE_FLUSH_LIMIT);
2674 tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES;
2675 while (tmp <= rc->reserved_bytes)
2678 * only one thread can access block_rsv at this point,
2679 * so we don't need hold lock to protect block_rsv.
2680 * we expand more reservation size here to allow enough
2681 * space for relocation and we will return earlier in
2684 rc->block_rsv->size = tmp + fs_info->nodesize *
2685 RELOCATION_RESERVED_NODES;
2693 * relocate a block tree, and then update pointers in upper level
2694 * blocks that reference the block to point to the new location.
2696 * if called by link_to_upper, the block has already been relocated.
2697 * in that case this function just updates pointers.
2699 static int do_relocation(struct btrfs_trans_handle *trans,
2700 struct reloc_control *rc,
2701 struct backref_node *node,
2702 struct btrfs_key *key,
2703 struct btrfs_path *path, int lowest)
2705 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2706 struct backref_node *upper;
2707 struct backref_edge *edge;
2708 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2709 struct btrfs_root *root;
2710 struct extent_buffer *eb;
2718 BUG_ON(lowest && node->eb);
2720 path->lowest_level = node->level + 1;
2721 rc->backref_cache.path[node->level] = node;
2722 list_for_each_entry(edge, &node->upper, list[LOWER]) {
2723 struct btrfs_key first_key;
2727 upper = edge->node[UPPER];
2728 root = select_reloc_root(trans, rc, upper, edges);
2731 if (upper->eb && !upper->locked) {
2733 ret = btrfs_bin_search(upper->eb, key,
2734 upper->level, &slot);
2740 bytenr = btrfs_node_blockptr(upper->eb, slot);
2741 if (node->eb->start == bytenr)
2744 drop_node_buffer(upper);
2748 ret = btrfs_search_slot(trans, root, key, path, 0, 1);
2755 btrfs_release_path(path);
2760 upper->eb = path->nodes[upper->level];
2761 path->nodes[upper->level] = NULL;
2763 BUG_ON(upper->eb != path->nodes[upper->level]);
2767 path->locks[upper->level] = 0;
2769 slot = path->slots[upper->level];
2770 btrfs_release_path(path);
2772 ret = btrfs_bin_search(upper->eb, key, upper->level,
2781 bytenr = btrfs_node_blockptr(upper->eb, slot);
2783 if (bytenr != node->bytenr) {
2784 btrfs_err(root->fs_info,
2785 "lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu",
2786 bytenr, node->bytenr, slot,
2792 if (node->eb->start == bytenr)
2796 blocksize = root->fs_info->nodesize;
2797 generation = btrfs_node_ptr_generation(upper->eb, slot);
2798 btrfs_node_key_to_cpu(upper->eb, &first_key, slot);
2799 eb = read_tree_block(fs_info, bytenr, generation,
2800 upper->level - 1, &first_key);
2804 } else if (!extent_buffer_uptodate(eb)) {
2805 free_extent_buffer(eb);
2809 btrfs_tree_lock(eb);
2810 btrfs_set_lock_blocking_write(eb);
2813 ret = btrfs_cow_block(trans, root, eb, upper->eb,
2815 btrfs_tree_unlock(eb);
2816 free_extent_buffer(eb);
2821 BUG_ON(node->eb != eb);
2823 btrfs_set_node_blockptr(upper->eb, slot,
2825 btrfs_set_node_ptr_generation(upper->eb, slot,
2827 btrfs_mark_buffer_dirty(upper->eb);
2829 ret = btrfs_inc_extent_ref(trans, root,
2830 node->eb->start, blocksize,
2832 btrfs_header_owner(upper->eb),
2836 ret = btrfs_drop_subtree(trans, root, eb, upper->eb);
2840 if (!upper->pending)
2841 drop_node_buffer(upper);
2843 unlock_node_buffer(upper);
2848 if (!err && node->pending) {
2849 drop_node_buffer(node);
2850 list_move_tail(&node->list, &rc->backref_cache.changed);
2854 path->lowest_level = 0;
2855 BUG_ON(err == -ENOSPC);
2859 static int link_to_upper(struct btrfs_trans_handle *trans,
2860 struct reloc_control *rc,
2861 struct backref_node *node,
2862 struct btrfs_path *path)
2864 struct btrfs_key key;
2866 btrfs_node_key_to_cpu(node->eb, &key, 0);
2867 return do_relocation(trans, rc, node, &key, path, 0);
2870 static int finish_pending_nodes(struct btrfs_trans_handle *trans,
2871 struct reloc_control *rc,
2872 struct btrfs_path *path, int err)
2875 struct backref_cache *cache = &rc->backref_cache;
2876 struct backref_node *node;
2880 for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
2881 while (!list_empty(&cache->pending[level])) {
2882 node = list_entry(cache->pending[level].next,
2883 struct backref_node, list);
2884 list_move_tail(&node->list, &list);
2885 BUG_ON(!node->pending);
2888 ret = link_to_upper(trans, rc, node, path);
2893 list_splice_init(&list, &cache->pending[level]);
2898 static void mark_block_processed(struct reloc_control *rc,
2899 u64 bytenr, u32 blocksize)
2901 set_extent_bits(&rc->processed_blocks, bytenr, bytenr + blocksize - 1,
2905 static void __mark_block_processed(struct reloc_control *rc,
2906 struct backref_node *node)
2909 if (node->level == 0 ||
2910 in_block_group(node->bytenr, rc->block_group)) {
2911 blocksize = rc->extent_root->fs_info->nodesize;
2912 mark_block_processed(rc, node->bytenr, blocksize);
2914 node->processed = 1;
2918 * mark a block and all blocks directly/indirectly reference the block
2921 static void update_processed_blocks(struct reloc_control *rc,
2922 struct backref_node *node)
2924 struct backref_node *next = node;
2925 struct backref_edge *edge;
2926 struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2932 if (next->processed)
2935 __mark_block_processed(rc, next);
2937 if (list_empty(&next->upper))
2940 edge = list_entry(next->upper.next,
2941 struct backref_edge, list[LOWER]);
2942 edges[index++] = edge;
2943 next = edge->node[UPPER];
2945 next = walk_down_backref(edges, &index);
2949 static int tree_block_processed(u64 bytenr, struct reloc_control *rc)
2951 u32 blocksize = rc->extent_root->fs_info->nodesize;
2953 if (test_range_bit(&rc->processed_blocks, bytenr,
2954 bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL))
2959 static int get_tree_block_key(struct btrfs_fs_info *fs_info,
2960 struct tree_block *block)
2962 struct extent_buffer *eb;
2964 BUG_ON(block->key_ready);
2965 eb = read_tree_block(fs_info, block->bytenr, block->key.offset,
2966 block->level, NULL);
2969 } else if (!extent_buffer_uptodate(eb)) {
2970 free_extent_buffer(eb);
2973 if (block->level == 0)
2974 btrfs_item_key_to_cpu(eb, &block->key, 0);
2976 btrfs_node_key_to_cpu(eb, &block->key, 0);
2977 free_extent_buffer(eb);
2978 block->key_ready = 1;
2983 * helper function to relocate a tree block
2985 static int relocate_tree_block(struct btrfs_trans_handle *trans,
2986 struct reloc_control *rc,
2987 struct backref_node *node,
2988 struct btrfs_key *key,
2989 struct btrfs_path *path)
2991 struct btrfs_root *root;
2997 BUG_ON(node->processed);
2998 root = select_one_root(node);
2999 if (root == ERR_PTR(-ENOENT)) {
3000 update_processed_blocks(rc, node);
3004 if (!root || test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
3005 ret = reserve_metadata_space(trans, rc, node);
3011 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
3012 BUG_ON(node->new_bytenr);
3013 BUG_ON(!list_empty(&node->list));
3014 btrfs_record_root_in_trans(trans, root);
3015 root = root->reloc_root;
3016 node->new_bytenr = root->node->start;
3018 list_add_tail(&node->list, &rc->backref_cache.changed);
3020 path->lowest_level = node->level;
3021 ret = btrfs_search_slot(trans, root, key, path, 0, 1);
3022 btrfs_release_path(path);
3027 update_processed_blocks(rc, node);
3029 ret = do_relocation(trans, rc, node, key, path, 1);
3032 if (ret || node->level == 0 || node->cowonly)
3033 remove_backref_node(&rc->backref_cache, node);
3038 * relocate a list of blocks
3040 static noinline_for_stack
3041 int relocate_tree_blocks(struct btrfs_trans_handle *trans,
3042 struct reloc_control *rc, struct rb_root *blocks)
3044 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3045 struct backref_node *node;
3046 struct btrfs_path *path;
3047 struct tree_block *block;
3048 struct tree_block *next;
3052 path = btrfs_alloc_path();
3055 goto out_free_blocks;
3058 /* Kick in readahead for tree blocks with missing keys */
3059 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
3060 if (!block->key_ready)
3061 readahead_tree_block(fs_info, block->bytenr);
3064 /* Get first keys */
3065 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
3066 if (!block->key_ready) {
3067 err = get_tree_block_key(fs_info, block);
3073 /* Do tree relocation */
3074 rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
3075 node = build_backref_tree(rc, &block->key,
3076 block->level, block->bytenr);
3078 err = PTR_ERR(node);
3082 ret = relocate_tree_block(trans, rc, node, &block->key,
3085 if (ret != -EAGAIN || &block->rb_node == rb_first(blocks))
3091 err = finish_pending_nodes(trans, rc, path, err);
3094 btrfs_free_path(path);
3096 free_block_list(blocks);
3100 static noinline_for_stack
3101 int prealloc_file_extent_cluster(struct inode *inode,
3102 struct file_extent_cluster *cluster)
3107 u64 offset = BTRFS_I(inode)->index_cnt;
3111 u64 prealloc_start = cluster->start - offset;
3112 u64 prealloc_end = cluster->end - offset;
3114 struct extent_changeset *data_reserved = NULL;
3116 BUG_ON(cluster->start != cluster->boundary[0]);
3119 ret = btrfs_check_data_free_space(inode, &data_reserved, prealloc_start,
3120 prealloc_end + 1 - prealloc_start);
3124 cur_offset = prealloc_start;
3125 while (nr < cluster->nr) {
3126 start = cluster->boundary[nr] - offset;
3127 if (nr + 1 < cluster->nr)
3128 end = cluster->boundary[nr + 1] - 1 - offset;
3130 end = cluster->end - offset;
3132 lock_extent(&BTRFS_I(inode)->io_tree, start, end);
3133 num_bytes = end + 1 - start;
3134 if (cur_offset < start)
3135 btrfs_free_reserved_data_space(inode, data_reserved,
3136 cur_offset, start - cur_offset);
3137 ret = btrfs_prealloc_file_range(inode, 0, start,
3138 num_bytes, num_bytes,
3139 end + 1, &alloc_hint);
3140 cur_offset = end + 1;
3141 unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
3146 if (cur_offset < prealloc_end)
3147 btrfs_free_reserved_data_space(inode, data_reserved,
3148 cur_offset, prealloc_end + 1 - cur_offset);
3150 inode_unlock(inode);
3151 extent_changeset_free(data_reserved);
3155 static noinline_for_stack
3156 int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
3159 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3160 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
3161 struct extent_map *em;
3164 em = alloc_extent_map();
3169 em->len = end + 1 - start;
3170 em->block_len = em->len;
3171 em->block_start = block_start;
3172 em->bdev = fs_info->fs_devices->latest_bdev;
3173 set_bit(EXTENT_FLAG_PINNED, &em->flags);
3175 lock_extent(&BTRFS_I(inode)->io_tree, start, end);
3177 write_lock(&em_tree->lock);
3178 ret = add_extent_mapping(em_tree, em, 0);
3179 write_unlock(&em_tree->lock);
3180 if (ret != -EEXIST) {
3181 free_extent_map(em);
3184 btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0);
3186 unlock_extent(&BTRFS_I(inode)->io_tree, start, end);
3190 static int relocate_file_extent_cluster(struct inode *inode,
3191 struct file_extent_cluster *cluster)
3193 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
3196 u64 offset = BTRFS_I(inode)->index_cnt;
3197 unsigned long index;
3198 unsigned long last_index;
3200 struct file_ra_state *ra;
3201 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
3208 ra = kzalloc(sizeof(*ra), GFP_NOFS);
3212 ret = prealloc_file_extent_cluster(inode, cluster);
3216 file_ra_state_init(ra, inode->i_mapping);
3218 ret = setup_extent_mapping(inode, cluster->start - offset,
3219 cluster->end - offset, cluster->start);
3223 index = (cluster->start - offset) >> PAGE_SHIFT;
3224 last_index = (cluster->end - offset) >> PAGE_SHIFT;
3225 while (index <= last_index) {
3226 ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
3231 page = find_lock_page(inode->i_mapping, index);
3233 page_cache_sync_readahead(inode->i_mapping,
3235 last_index + 1 - index);
3236 page = find_or_create_page(inode->i_mapping, index,
3239 btrfs_delalloc_release_metadata(BTRFS_I(inode),
3246 if (PageReadahead(page)) {
3247 page_cache_async_readahead(inode->i_mapping,
3248 ra, NULL, page, index,
3249 last_index + 1 - index);
3252 if (!PageUptodate(page)) {
3253 btrfs_readpage(NULL, page);
3255 if (!PageUptodate(page)) {
3258 btrfs_delalloc_release_metadata(BTRFS_I(inode),
3260 btrfs_delalloc_release_extents(BTRFS_I(inode),
3267 page_start = page_offset(page);
3268 page_end = page_start + PAGE_SIZE - 1;
3270 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end);
3272 set_page_extent_mapped(page);
3274 if (nr < cluster->nr &&
3275 page_start + offset == cluster->boundary[nr]) {
3276 set_extent_bits(&BTRFS_I(inode)->io_tree,
3277 page_start, page_end,
3282 ret = btrfs_set_extent_delalloc(inode, page_start, page_end, 0,
3287 btrfs_delalloc_release_metadata(BTRFS_I(inode),
3289 btrfs_delalloc_release_extents(BTRFS_I(inode),
3292 clear_extent_bits(&BTRFS_I(inode)->io_tree,
3293 page_start, page_end,
3294 EXTENT_LOCKED | EXTENT_BOUNDARY);
3298 set_page_dirty(page);
3300 unlock_extent(&BTRFS_I(inode)->io_tree,
3301 page_start, page_end);
3306 btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE,
3308 balance_dirty_pages_ratelimited(inode->i_mapping);
3309 btrfs_throttle(fs_info);
3311 WARN_ON(nr != cluster->nr);
3317 static noinline_for_stack
3318 int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key,
3319 struct file_extent_cluster *cluster)
3323 if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) {
3324 ret = relocate_file_extent_cluster(inode, cluster);
3331 cluster->start = extent_key->objectid;
3333 BUG_ON(cluster->nr >= MAX_EXTENTS);
3334 cluster->end = extent_key->objectid + extent_key->offset - 1;
3335 cluster->boundary[cluster->nr] = extent_key->objectid;
3338 if (cluster->nr >= MAX_EXTENTS) {
3339 ret = relocate_file_extent_cluster(inode, cluster);
3348 * helper to add a tree block to the list.
3349 * the major work is getting the generation and level of the block
3351 static int add_tree_block(struct reloc_control *rc,
3352 struct btrfs_key *extent_key,
3353 struct btrfs_path *path,
3354 struct rb_root *blocks)
3356 struct extent_buffer *eb;
3357 struct btrfs_extent_item *ei;
3358 struct btrfs_tree_block_info *bi;
3359 struct tree_block *block;
3360 struct rb_node *rb_node;
3365 eb = path->nodes[0];
3366 item_size = btrfs_item_size_nr(eb, path->slots[0]);
3368 if (extent_key->type == BTRFS_METADATA_ITEM_KEY ||
3369 item_size >= sizeof(*ei) + sizeof(*bi)) {
3370 ei = btrfs_item_ptr(eb, path->slots[0],
3371 struct btrfs_extent_item);
3372 if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) {
3373 bi = (struct btrfs_tree_block_info *)(ei + 1);
3374 level = btrfs_tree_block_level(eb, bi);
3376 level = (int)extent_key->offset;
3378 generation = btrfs_extent_generation(eb, ei);
3379 } else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) {
3380 btrfs_print_v0_err(eb->fs_info);
3381 btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL);
3387 btrfs_release_path(path);
3389 BUG_ON(level == -1);
3391 block = kmalloc(sizeof(*block), GFP_NOFS);
3395 block->bytenr = extent_key->objectid;
3396 block->key.objectid = rc->extent_root->fs_info->nodesize;
3397 block->key.offset = generation;
3398 block->level = level;
3399 block->key_ready = 0;
3401 rb_node = tree_insert(blocks, block->bytenr, &block->rb_node);
3403 backref_tree_panic(rb_node, -EEXIST, block->bytenr);
3409 * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY
3411 static int __add_tree_block(struct reloc_control *rc,
3412 u64 bytenr, u32 blocksize,
3413 struct rb_root *blocks)
3415 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3416 struct btrfs_path *path;
3417 struct btrfs_key key;
3419 bool skinny = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
3421 if (tree_block_processed(bytenr, rc))
3424 if (tree_search(blocks, bytenr))
3427 path = btrfs_alloc_path();
3431 key.objectid = bytenr;
3433 key.type = BTRFS_METADATA_ITEM_KEY;
3434 key.offset = (u64)-1;
3436 key.type = BTRFS_EXTENT_ITEM_KEY;
3437 key.offset = blocksize;
3440 path->search_commit_root = 1;
3441 path->skip_locking = 1;
3442 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0);
3446 if (ret > 0 && skinny) {
3447 if (path->slots[0]) {
3449 btrfs_item_key_to_cpu(path->nodes[0], &key,
3451 if (key.objectid == bytenr &&
3452 (key.type == BTRFS_METADATA_ITEM_KEY ||
3453 (key.type == BTRFS_EXTENT_ITEM_KEY &&
3454 key.offset == blocksize)))
3460 btrfs_release_path(path);
3466 btrfs_print_leaf(path->nodes[0]);
3468 "tree block extent item (%llu) is not found in extent tree",
3475 ret = add_tree_block(rc, &key, path, blocks);
3477 btrfs_free_path(path);
3482 * helper to check if the block use full backrefs for pointers in it
3484 static int block_use_full_backref(struct reloc_control *rc,
3485 struct extent_buffer *eb)
3490 if (btrfs_header_flag(eb, BTRFS_HEADER_FLAG_RELOC) ||
3491 btrfs_header_backref_rev(eb) < BTRFS_MIXED_BACKREF_REV)
3494 ret = btrfs_lookup_extent_info(NULL, rc->extent_root->fs_info,
3495 eb->start, btrfs_header_level(eb), 1,
3499 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)
3506 static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
3507 struct btrfs_block_group_cache *block_group,
3508 struct inode *inode,
3511 struct btrfs_key key;
3512 struct btrfs_root *root = fs_info->tree_root;
3513 struct btrfs_trans_handle *trans;
3520 key.type = BTRFS_INODE_ITEM_KEY;
3523 inode = btrfs_iget(fs_info->sb, &key, root, NULL);
3528 ret = btrfs_check_trunc_cache_free_space(fs_info,
3529 &fs_info->global_block_rsv);
3533 trans = btrfs_join_transaction(root);
3534 if (IS_ERR(trans)) {
3535 ret = PTR_ERR(trans);
3539 ret = btrfs_truncate_free_space_cache(trans, block_group, inode);
3541 btrfs_end_transaction(trans);
3542 btrfs_btree_balance_dirty(fs_info);
3549 * helper to add tree blocks for backref of type BTRFS_EXTENT_DATA_REF_KEY
3550 * this function scans fs tree to find blocks reference the data extent
3552 static int find_data_references(struct reloc_control *rc,
3553 struct btrfs_key *extent_key,
3554 struct extent_buffer *leaf,
3555 struct btrfs_extent_data_ref *ref,
3556 struct rb_root *blocks)
3558 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3559 struct btrfs_path *path;
3560 struct tree_block *block;
3561 struct btrfs_root *root;
3562 struct btrfs_file_extent_item *fi;
3563 struct rb_node *rb_node;
3564 struct btrfs_key key;
3575 ref_root = btrfs_extent_data_ref_root(leaf, ref);
3576 ref_objectid = btrfs_extent_data_ref_objectid(leaf, ref);
3577 ref_offset = btrfs_extent_data_ref_offset(leaf, ref);
3578 ref_count = btrfs_extent_data_ref_count(leaf, ref);
3581 * This is an extent belonging to the free space cache, lets just delete
3582 * it and redo the search.
3584 if (ref_root == BTRFS_ROOT_TREE_OBJECTID) {
3585 ret = delete_block_group_cache(fs_info, rc->block_group,
3586 NULL, ref_objectid);
3592 path = btrfs_alloc_path();
3595 path->reada = READA_FORWARD;
3597 root = read_fs_root(fs_info, ref_root);
3599 err = PTR_ERR(root);
3603 key.objectid = ref_objectid;
3604 key.type = BTRFS_EXTENT_DATA_KEY;
3605 if (ref_offset > ((u64)-1 << 32))
3608 key.offset = ref_offset;
3610 path->search_commit_root = 1;
3611 path->skip_locking = 1;
3612 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3618 leaf = path->nodes[0];
3619 nritems = btrfs_header_nritems(leaf);
3621 * the references in tree blocks that use full backrefs
3622 * are not counted in
3624 if (block_use_full_backref(rc, leaf))
3628 rb_node = tree_search(blocks, leaf->start);
3633 path->slots[0] = nritems;
3636 while (ref_count > 0) {
3637 while (path->slots[0] >= nritems) {
3638 ret = btrfs_next_leaf(root, path);
3643 if (WARN_ON(ret > 0))
3646 leaf = path->nodes[0];
3647 nritems = btrfs_header_nritems(leaf);
3650 if (block_use_full_backref(rc, leaf))
3654 rb_node = tree_search(blocks, leaf->start);
3659 path->slots[0] = nritems;
3663 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3664 if (WARN_ON(key.objectid != ref_objectid ||
3665 key.type != BTRFS_EXTENT_DATA_KEY))
3668 fi = btrfs_item_ptr(leaf, path->slots[0],
3669 struct btrfs_file_extent_item);
3671 if (btrfs_file_extent_type(leaf, fi) ==
3672 BTRFS_FILE_EXTENT_INLINE)
3675 if (btrfs_file_extent_disk_bytenr(leaf, fi) !=
3676 extent_key->objectid)
3679 key.offset -= btrfs_file_extent_offset(leaf, fi);
3680 if (key.offset != ref_offset)
3688 if (!tree_block_processed(leaf->start, rc)) {
3689 block = kmalloc(sizeof(*block), GFP_NOFS);
3694 block->bytenr = leaf->start;
3695 btrfs_item_key_to_cpu(leaf, &block->key, 0);
3697 block->key_ready = 1;
3698 rb_node = tree_insert(blocks, block->bytenr,
3701 backref_tree_panic(rb_node, -EEXIST,
3707 path->slots[0] = nritems;
3713 btrfs_free_path(path);
3718 * helper to find all tree blocks that reference a given data extent
3720 static noinline_for_stack
3721 int add_data_references(struct reloc_control *rc,
3722 struct btrfs_key *extent_key,
3723 struct btrfs_path *path,
3724 struct rb_root *blocks)
3726 struct btrfs_key key;
3727 struct extent_buffer *eb;
3728 struct btrfs_extent_data_ref *dref;
3729 struct btrfs_extent_inline_ref *iref;
3732 u32 blocksize = rc->extent_root->fs_info->nodesize;
3736 eb = path->nodes[0];
3737 ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
3738 end = ptr + btrfs_item_size_nr(eb, path->slots[0]);
3739 ptr += sizeof(struct btrfs_extent_item);
3742 iref = (struct btrfs_extent_inline_ref *)ptr;
3743 key.type = btrfs_get_extent_inline_ref_type(eb, iref,
3744 BTRFS_REF_TYPE_DATA);
3745 if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
3746 key.offset = btrfs_extent_inline_ref_offset(eb, iref);
3747 ret = __add_tree_block(rc, key.offset, blocksize,
3749 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
3750 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
3751 ret = find_data_references(rc, extent_key,
3755 btrfs_err(rc->extent_root->fs_info,
3756 "extent %llu slot %d has an invalid inline ref type",
3757 eb->start, path->slots[0]);
3763 ptr += btrfs_extent_inline_ref_size(key.type);
3769 eb = path->nodes[0];
3770 if (path->slots[0] >= btrfs_header_nritems(eb)) {
3771 ret = btrfs_next_leaf(rc->extent_root, path);
3778 eb = path->nodes[0];
3781 btrfs_item_key_to_cpu(eb, &key, path->slots[0]);
3782 if (key.objectid != extent_key->objectid)
3785 if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
3786 ret = __add_tree_block(rc, key.offset, blocksize,
3788 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
3789 dref = btrfs_item_ptr(eb, path->slots[0],
3790 struct btrfs_extent_data_ref);
3791 ret = find_data_references(rc, extent_key,
3793 } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
3794 btrfs_print_v0_err(eb->fs_info);
3795 btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL);
3807 btrfs_release_path(path);
3809 free_block_list(blocks);
3814 * helper to find next unprocessed extent
3816 static noinline_for_stack
3817 int find_next_extent(struct reloc_control *rc, struct btrfs_path *path,
3818 struct btrfs_key *extent_key)
3820 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3821 struct btrfs_key key;
3822 struct extent_buffer *leaf;
3823 u64 start, end, last;
3826 last = rc->block_group->key.objectid + rc->block_group->key.offset;
3829 if (rc->search_start >= last) {
3834 key.objectid = rc->search_start;
3835 key.type = BTRFS_EXTENT_ITEM_KEY;
3838 path->search_commit_root = 1;
3839 path->skip_locking = 1;
3840 ret = btrfs_search_slot(NULL, rc->extent_root, &key, path,
3845 leaf = path->nodes[0];
3846 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
3847 ret = btrfs_next_leaf(rc->extent_root, path);
3850 leaf = path->nodes[0];
3853 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3854 if (key.objectid >= last) {
3859 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3860 key.type != BTRFS_METADATA_ITEM_KEY) {
3865 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
3866 key.objectid + key.offset <= rc->search_start) {
3871 if (key.type == BTRFS_METADATA_ITEM_KEY &&
3872 key.objectid + fs_info->nodesize <=
3878 ret = find_first_extent_bit(&rc->processed_blocks,
3879 key.objectid, &start, &end,
3880 EXTENT_DIRTY, NULL);
3882 if (ret == 0 && start <= key.objectid) {
3883 btrfs_release_path(path);
3884 rc->search_start = end + 1;
3886 if (key.type == BTRFS_EXTENT_ITEM_KEY)
3887 rc->search_start = key.objectid + key.offset;
3889 rc->search_start = key.objectid +
3891 memcpy(extent_key, &key, sizeof(key));
3895 btrfs_release_path(path);
3899 static void set_reloc_control(struct reloc_control *rc)
3901 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3903 mutex_lock(&fs_info->reloc_mutex);
3904 fs_info->reloc_ctl = rc;
3905 mutex_unlock(&fs_info->reloc_mutex);
3908 static void unset_reloc_control(struct reloc_control *rc)
3910 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3912 mutex_lock(&fs_info->reloc_mutex);
3913 fs_info->reloc_ctl = NULL;
3914 mutex_unlock(&fs_info->reloc_mutex);
3917 static int check_extent_flags(u64 flags)
3919 if ((flags & BTRFS_EXTENT_FLAG_DATA) &&
3920 (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK))
3922 if (!(flags & BTRFS_EXTENT_FLAG_DATA) &&
3923 !(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK))
3925 if ((flags & BTRFS_EXTENT_FLAG_DATA) &&
3926 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
3931 static noinline_for_stack
3932 int prepare_to_relocate(struct reloc_control *rc)
3934 struct btrfs_trans_handle *trans;
3937 rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root->fs_info,
3938 BTRFS_BLOCK_RSV_TEMP);
3942 memset(&rc->cluster, 0, sizeof(rc->cluster));
3943 rc->search_start = rc->block_group->key.objectid;
3944 rc->extents_found = 0;
3945 rc->nodes_relocated = 0;
3946 rc->merging_rsv_size = 0;
3947 rc->reserved_bytes = 0;
3948 rc->block_rsv->size = rc->extent_root->fs_info->nodesize *
3949 RELOCATION_RESERVED_NODES;
3950 ret = btrfs_block_rsv_refill(rc->extent_root,
3951 rc->block_rsv, rc->block_rsv->size,
3952 BTRFS_RESERVE_FLUSH_ALL);
3956 rc->create_reloc_tree = 1;
3957 set_reloc_control(rc);
3959 trans = btrfs_join_transaction(rc->extent_root);
3960 if (IS_ERR(trans)) {
3961 unset_reloc_control(rc);
3963 * extent tree is not a ref_cow tree and has no reloc_root to
3964 * cleanup. And callers are responsible to free the above
3967 return PTR_ERR(trans);
3969 btrfs_commit_transaction(trans);
3973 static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
3975 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3976 struct rb_root blocks = RB_ROOT;
3977 struct btrfs_key key;
3978 struct btrfs_trans_handle *trans = NULL;
3979 struct btrfs_path *path;
3980 struct btrfs_extent_item *ei;
3987 path = btrfs_alloc_path();
3990 path->reada = READA_FORWARD;
3992 ret = prepare_to_relocate(rc);
3999 rc->reserved_bytes = 0;
4000 ret = btrfs_block_rsv_refill(rc->extent_root,
4001 rc->block_rsv, rc->block_rsv->size,
4002 BTRFS_RESERVE_FLUSH_ALL);
4008 trans = btrfs_start_transaction(rc->extent_root, 0);
4009 if (IS_ERR(trans)) {
4010 err = PTR_ERR(trans);
4015 if (update_backref_cache(trans, &rc->backref_cache)) {
4016 btrfs_end_transaction(trans);
4021 ret = find_next_extent(rc, path, &key);
4027 rc->extents_found++;
4029 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
4030 struct btrfs_extent_item);
4031 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
4032 if (item_size >= sizeof(*ei)) {
4033 flags = btrfs_extent_flags(path->nodes[0], ei);
4034 ret = check_extent_flags(flags);
4036 } else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) {
4038 btrfs_print_v0_err(trans->fs_info);
4039 btrfs_abort_transaction(trans, err);
4045 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
4046 ret = add_tree_block(rc, &key, path, &blocks);
4047 } else if (rc->stage == UPDATE_DATA_PTRS &&
4048 (flags & BTRFS_EXTENT_FLAG_DATA)) {
4049 ret = add_data_references(rc, &key, path, &blocks);
4051 btrfs_release_path(path);
4059 if (!RB_EMPTY_ROOT(&blocks)) {
4060 ret = relocate_tree_blocks(trans, rc, &blocks);
4063 * if we fail to relocate tree blocks, force to update
4064 * backref cache when committing transaction.
4066 rc->backref_cache.last_trans = trans->transid - 1;
4068 if (ret != -EAGAIN) {
4072 rc->extents_found--;
4073 rc->search_start = key.objectid;
4077 btrfs_end_transaction_throttle(trans);
4078 btrfs_btree_balance_dirty(fs_info);
4081 if (rc->stage == MOVE_DATA_EXTENTS &&
4082 (flags & BTRFS_EXTENT_FLAG_DATA)) {
4083 rc->found_file_extent = 1;
4084 ret = relocate_data_extent(rc->data_inode,
4085 &key, &rc->cluster);
4092 if (trans && progress && err == -ENOSPC) {
4093 ret = btrfs_force_chunk_alloc(trans, rc->block_group->flags);
4101 btrfs_release_path(path);
4102 clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY);
4105 btrfs_end_transaction_throttle(trans);
4106 btrfs_btree_balance_dirty(fs_info);
4110 ret = relocate_file_extent_cluster(rc->data_inode,
4116 rc->create_reloc_tree = 0;
4117 set_reloc_control(rc);
4119 backref_cache_cleanup(&rc->backref_cache);
4120 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1);
4122 err = prepare_to_merge(rc, err);
4124 merge_reloc_roots(rc);
4126 rc->merge_reloc_tree = 0;
4127 unset_reloc_control(rc);
4128 btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1);
4130 /* get rid of pinned extents */
4131 trans = btrfs_join_transaction(rc->extent_root);
4132 if (IS_ERR(trans)) {
4133 err = PTR_ERR(trans);
4136 btrfs_commit_transaction(trans);
4137 ret = clean_dirty_subvols(rc);
4138 if (ret < 0 && !err)
4141 btrfs_free_block_rsv(fs_info, rc->block_rsv);
4142 btrfs_free_path(path);
4146 static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
4147 struct btrfs_root *root, u64 objectid)
4149 struct btrfs_path *path;
4150 struct btrfs_inode_item *item;
4151 struct extent_buffer *leaf;
4154 path = btrfs_alloc_path();
4158 ret = btrfs_insert_empty_inode(trans, root, path, objectid);
4162 leaf = path->nodes[0];
4163 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
4164 memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
4165 btrfs_set_inode_generation(leaf, item, 1);
4166 btrfs_set_inode_size(leaf, item, 0);
4167 btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
4168 btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
4169 BTRFS_INODE_PREALLOC);
4170 btrfs_mark_buffer_dirty(leaf);
4172 btrfs_free_path(path);
4177 * helper to create inode for data relocation.
4178 * the inode is in data relocation tree and its link count is 0
4180 static noinline_for_stack
4181 struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
4182 struct btrfs_block_group_cache *group)
4184 struct inode *inode = NULL;
4185 struct btrfs_trans_handle *trans;
4186 struct btrfs_root *root;
4187 struct btrfs_key key;
4191 root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID);
4193 return ERR_CAST(root);
4195 trans = btrfs_start_transaction(root, 6);
4197 return ERR_CAST(trans);
4199 err = btrfs_find_free_objectid(root, &objectid);
4203 err = __insert_orphan_inode(trans, root, objectid);
4206 key.objectid = objectid;
4207 key.type = BTRFS_INODE_ITEM_KEY;
4209 inode = btrfs_iget(fs_info->sb, &key, root, NULL);
4210 BUG_ON(IS_ERR(inode));
4211 BTRFS_I(inode)->index_cnt = group->key.objectid;
4213 err = btrfs_orphan_add(trans, BTRFS_I(inode));
4215 btrfs_end_transaction(trans);
4216 btrfs_btree_balance_dirty(fs_info);
4220 inode = ERR_PTR(err);
4225 static struct reloc_control *alloc_reloc_control(void)
4227 struct reloc_control *rc;
4229 rc = kzalloc(sizeof(*rc), GFP_NOFS);
4233 INIT_LIST_HEAD(&rc->reloc_roots);
4234 INIT_LIST_HEAD(&rc->dirty_subvol_roots);
4235 backref_cache_init(&rc->backref_cache);
4236 mapping_tree_init(&rc->reloc_root_tree);
4237 extent_io_tree_init(&rc->processed_blocks, NULL);
4242 * Print the block group being relocated
4244 static void describe_relocation(struct btrfs_fs_info *fs_info,
4245 struct btrfs_block_group_cache *block_group)
4247 char buf[128] = {'\0'};
4249 btrfs_describe_block_groups(block_group->flags, buf, sizeof(buf));
4252 "relocating block group %llu flags %s",
4253 block_group->key.objectid, buf);
4257 * function to relocate all extents in a block group.
4259 int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
4261 struct btrfs_block_group_cache *bg;
4262 struct btrfs_root *extent_root = fs_info->extent_root;
4263 struct reloc_control *rc;
4264 struct inode *inode;
4265 struct btrfs_path *path;
4270 bg = btrfs_lookup_block_group(fs_info, group_start);
4274 if (btrfs_pinned_by_swapfile(fs_info, bg)) {
4275 btrfs_put_block_group(bg);
4279 rc = alloc_reloc_control();
4281 btrfs_put_block_group(bg);
4285 rc->extent_root = extent_root;
4286 rc->block_group = bg;
4288 ret = btrfs_inc_block_group_ro(rc->block_group);
4295 path = btrfs_alloc_path();
4301 inode = lookup_free_space_inode(fs_info, rc->block_group, path);
4302 btrfs_free_path(path);
4305 ret = delete_block_group_cache(fs_info, rc->block_group, inode, 0);
4307 ret = PTR_ERR(inode);
4309 if (ret && ret != -ENOENT) {
4314 rc->data_inode = create_reloc_inode(fs_info, rc->block_group);
4315 if (IS_ERR(rc->data_inode)) {
4316 err = PTR_ERR(rc->data_inode);
4317 rc->data_inode = NULL;
4321 describe_relocation(fs_info, rc->block_group);
4323 btrfs_wait_block_group_reservations(rc->block_group);
4324 btrfs_wait_nocow_writers(rc->block_group);
4325 btrfs_wait_ordered_roots(fs_info, U64_MAX,
4326 rc->block_group->key.objectid,
4327 rc->block_group->key.offset);
4330 mutex_lock(&fs_info->cleaner_mutex);
4331 ret = relocate_block_group(rc);
4332 mutex_unlock(&fs_info->cleaner_mutex);
4338 if (rc->extents_found == 0)
4341 btrfs_info(fs_info, "found %llu extents", rc->extents_found);
4343 if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
4344 ret = btrfs_wait_ordered_range(rc->data_inode, 0,
4350 invalidate_mapping_pages(rc->data_inode->i_mapping,
4352 rc->stage = UPDATE_DATA_PTRS;
4356 WARN_ON(rc->block_group->pinned > 0);
4357 WARN_ON(rc->block_group->reserved > 0);
4358 WARN_ON(btrfs_block_group_used(&rc->block_group->item) > 0);
4361 btrfs_dec_block_group_ro(rc->block_group);
4362 iput(rc->data_inode);
4363 btrfs_put_block_group(rc->block_group);
4368 static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
4370 struct btrfs_fs_info *fs_info = root->fs_info;
4371 struct btrfs_trans_handle *trans;
4374 trans = btrfs_start_transaction(fs_info->tree_root, 0);
4376 return PTR_ERR(trans);
4378 memset(&root->root_item.drop_progress, 0,
4379 sizeof(root->root_item.drop_progress));
4380 root->root_item.drop_level = 0;
4381 btrfs_set_root_refs(&root->root_item, 0);
4382 ret = btrfs_update_root(trans, fs_info->tree_root,
4383 &root->root_key, &root->root_item);
4385 err = btrfs_end_transaction(trans);
4392 * recover relocation interrupted by system crash.
4394 * this function resumes merging reloc trees with corresponding fs trees.
4395 * this is important for keeping the sharing of tree blocks
4397 int btrfs_recover_relocation(struct btrfs_root *root)
4399 struct btrfs_fs_info *fs_info = root->fs_info;
4400 LIST_HEAD(reloc_roots);
4401 struct btrfs_key key;
4402 struct btrfs_root *fs_root;
4403 struct btrfs_root *reloc_root;
4404 struct btrfs_path *path;
4405 struct extent_buffer *leaf;
4406 struct reloc_control *rc = NULL;
4407 struct btrfs_trans_handle *trans;
4411 path = btrfs_alloc_path();
4414 path->reada = READA_BACK;
4416 key.objectid = BTRFS_TREE_RELOC_OBJECTID;
4417 key.type = BTRFS_ROOT_ITEM_KEY;
4418 key.offset = (u64)-1;
4421 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key,
4428 if (path->slots[0] == 0)
4432 leaf = path->nodes[0];
4433 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4434 btrfs_release_path(path);
4436 if (key.objectid != BTRFS_TREE_RELOC_OBJECTID ||
4437 key.type != BTRFS_ROOT_ITEM_KEY)
4440 reloc_root = btrfs_read_fs_root(root, &key);
4441 if (IS_ERR(reloc_root)) {
4442 err = PTR_ERR(reloc_root);
4446 list_add(&reloc_root->root_list, &reloc_roots);
4448 if (btrfs_root_refs(&reloc_root->root_item) > 0) {
4449 fs_root = read_fs_root(fs_info,
4450 reloc_root->root_key.offset);
4451 if (IS_ERR(fs_root)) {
4452 ret = PTR_ERR(fs_root);
4453 if (ret != -ENOENT) {
4457 ret = mark_garbage_root(reloc_root);
4465 if (key.offset == 0)
4470 btrfs_release_path(path);
4472 if (list_empty(&reloc_roots))
4475 rc = alloc_reloc_control();
4481 rc->extent_root = fs_info->extent_root;
4483 set_reloc_control(rc);
4485 trans = btrfs_join_transaction(rc->extent_root);
4486 if (IS_ERR(trans)) {
4487 unset_reloc_control(rc);
4488 err = PTR_ERR(trans);
4492 rc->merge_reloc_tree = 1;
4494 while (!list_empty(&reloc_roots)) {
4495 reloc_root = list_entry(reloc_roots.next,
4496 struct btrfs_root, root_list);
4497 list_del(&reloc_root->root_list);
4499 if (btrfs_root_refs(&reloc_root->root_item) == 0) {
4500 list_add_tail(&reloc_root->root_list,
4505 fs_root = read_fs_root(fs_info, reloc_root->root_key.offset);
4506 if (IS_ERR(fs_root)) {
4507 err = PTR_ERR(fs_root);
4511 err = __add_reloc_root(reloc_root);
4512 BUG_ON(err < 0); /* -ENOMEM or logic error */
4513 fs_root->reloc_root = reloc_root;
4516 err = btrfs_commit_transaction(trans);
4520 merge_reloc_roots(rc);
4522 unset_reloc_control(rc);
4524 trans = btrfs_join_transaction(rc->extent_root);
4525 if (IS_ERR(trans)) {
4526 err = PTR_ERR(trans);
4529 err = btrfs_commit_transaction(trans);
4531 ret = clean_dirty_subvols(rc);
4532 if (ret < 0 && !err)
4537 if (!list_empty(&reloc_roots))
4538 free_reloc_roots(&reloc_roots);
4540 btrfs_free_path(path);
4543 /* cleanup orphan inode in data relocation tree */
4544 fs_root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID);
4545 if (IS_ERR(fs_root))
4546 err = PTR_ERR(fs_root);
4548 err = btrfs_orphan_cleanup(fs_root);
4554 * helper to add ordered checksum for data relocation.
4556 * cloning checksum properly handles the nodatasum extents.
4557 * it also saves CPU time to re-calculate the checksum.
4559 int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
4561 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4562 struct btrfs_ordered_sum *sums;
4563 struct btrfs_ordered_extent *ordered;
4569 ordered = btrfs_lookup_ordered_extent(inode, file_pos);
4570 BUG_ON(ordered->file_offset != file_pos || ordered->len != len);
4572 disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt;
4573 ret = btrfs_lookup_csums_range(fs_info->csum_root, disk_bytenr,
4574 disk_bytenr + len - 1, &list, 0);
4578 while (!list_empty(&list)) {
4579 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
4580 list_del_init(&sums->list);
4583 * We need to offset the new_bytenr based on where the csum is.
4584 * We need to do this because we will read in entire prealloc
4585 * extents but we may have written to say the middle of the
4586 * prealloc extent, so we need to make sure the csum goes with
4587 * the right disk offset.
4589 * We can do this because the data reloc inode refers strictly
4590 * to the on disk bytes, so we don't have to worry about
4591 * disk_len vs real len like with real inodes since it's all
4594 new_bytenr = ordered->start + (sums->bytenr - disk_bytenr);
4595 sums->bytenr = new_bytenr;
4597 btrfs_add_ordered_sum(inode, ordered, sums);
4600 btrfs_put_ordered_extent(ordered);
4604 int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
4605 struct btrfs_root *root, struct extent_buffer *buf,
4606 struct extent_buffer *cow)
4608 struct btrfs_fs_info *fs_info = root->fs_info;
4609 struct reloc_control *rc;
4610 struct backref_node *node;
4615 rc = fs_info->reloc_ctl;
4619 BUG_ON(rc->stage == UPDATE_DATA_PTRS &&
4620 root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID);
4622 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
4623 if (buf == root->node)
4624 __update_reloc_root(root, cow->start);
4627 level = btrfs_header_level(buf);
4628 if (btrfs_header_generation(buf) <=
4629 btrfs_root_last_snapshot(&root->root_item))
4632 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID &&
4633 rc->create_reloc_tree) {
4634 WARN_ON(!first_cow && level == 0);
4636 node = rc->backref_cache.path[level];
4637 BUG_ON(node->bytenr != buf->start &&
4638 node->new_bytenr != buf->start);
4640 drop_node_buffer(node);
4641 extent_buffer_get(cow);
4643 node->new_bytenr = cow->start;
4645 if (!node->pending) {
4646 list_move_tail(&node->list,
4647 &rc->backref_cache.pending[level]);
4652 __mark_block_processed(rc, node);
4654 if (first_cow && level > 0)
4655 rc->nodes_relocated += buf->len;
4658 if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS)
4659 ret = replace_file_extents(trans, rc, root, cow);
4664 * called before creating snapshot. it calculates metadata reservation
4665 * required for relocating tree blocks in the snapshot
4667 void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
4668 u64 *bytes_to_reserve)
4670 struct btrfs_root *root;
4671 struct reloc_control *rc;
4673 root = pending->root;
4674 if (!root->reloc_root)
4677 rc = root->fs_info->reloc_ctl;
4678 if (!rc->merge_reloc_tree)
4681 root = root->reloc_root;
4682 BUG_ON(btrfs_root_refs(&root->root_item) == 0);
4684 * relocation is in the stage of merging trees. the space
4685 * used by merging a reloc tree is twice the size of
4686 * relocated tree nodes in the worst case. half for cowing
4687 * the reloc tree, half for cowing the fs tree. the space
4688 * used by cowing the reloc tree will be freed after the
4689 * tree is dropped. if we create snapshot, cowing the fs
4690 * tree may use more space than it frees. so we need
4691 * reserve extra space.
4693 *bytes_to_reserve += rc->nodes_relocated;
4697 * called after snapshot is created. migrate block reservation
4698 * and create reloc root for the newly created snapshot
4700 int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
4701 struct btrfs_pending_snapshot *pending)
4703 struct btrfs_root *root = pending->root;
4704 struct btrfs_root *reloc_root;
4705 struct btrfs_root *new_root;
4706 struct reloc_control *rc;
4709 if (!root->reloc_root)
4712 rc = root->fs_info->reloc_ctl;
4713 rc->merging_rsv_size += rc->nodes_relocated;
4715 if (rc->merge_reloc_tree) {
4716 ret = btrfs_block_rsv_migrate(&pending->block_rsv,
4718 rc->nodes_relocated, true);
4723 new_root = pending->snap;
4724 reloc_root = create_reloc_root(trans, root->reloc_root,
4725 new_root->root_key.objectid);
4726 if (IS_ERR(reloc_root))
4727 return PTR_ERR(reloc_root);
4729 ret = __add_reloc_root(reloc_root);
4731 new_root->reloc_root = reloc_root;
4733 if (rc->create_reloc_tree)
4734 ret = clone_backref_node(trans, rc, root, reloc_root);