1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007,2008 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/rbtree.h>
12 #include "transaction.h"
13 #include "print-tree.h"
18 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
19 *root, struct btrfs_path *path, int level);
20 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root,
21 const struct btrfs_key *ins_key, struct btrfs_path *path,
22 int data_size, int extend);
23 static int push_node_left(struct btrfs_trans_handle *trans,
24 struct extent_buffer *dst,
25 struct extent_buffer *src, int empty);
26 static int balance_node_right(struct btrfs_trans_handle *trans,
27 struct extent_buffer *dst_buf,
28 struct extent_buffer *src_buf);
29 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
32 static const struct btrfs_csums {
35 const char driver[12];
37 [BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" },
38 [BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" },
39 [BTRFS_CSUM_TYPE_SHA256] = { .size = 32, .name = "sha256" },
40 [BTRFS_CSUM_TYPE_BLAKE2] = { .size = 32, .name = "blake2b",
41 .driver = "blake2b-256" },
44 int btrfs_super_csum_size(const struct btrfs_super_block *s)
46 u16 t = btrfs_super_csum_type(s);
48 * csum type is validated at mount time
50 return btrfs_csums[t].size;
53 const char *btrfs_super_csum_name(u16 csum_type)
55 /* csum type is validated at mount time */
56 return btrfs_csums[csum_type].name;
60 * Return driver name if defined, otherwise the name that's also a valid driver
63 const char *btrfs_super_csum_driver(u16 csum_type)
65 /* csum type is validated at mount time */
66 return btrfs_csums[csum_type].driver[0] ?
67 btrfs_csums[csum_type].driver :
68 btrfs_csums[csum_type].name;
71 size_t __const btrfs_get_num_csums(void)
73 return ARRAY_SIZE(btrfs_csums);
76 struct btrfs_path *btrfs_alloc_path(void)
78 return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
81 /* this also releases the path */
82 void btrfs_free_path(struct btrfs_path *p)
86 btrfs_release_path(p);
87 kmem_cache_free(btrfs_path_cachep, p);
91 * path release drops references on the extent buffers in the path
92 * and it drops any locks held by this path
94 * It is safe to call this on paths that no locks or extent buffers held.
96 noinline void btrfs_release_path(struct btrfs_path *p)
100 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
105 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
108 free_extent_buffer(p->nodes[i]);
114 * safely gets a reference on the root node of a tree. A lock
115 * is not taken, so a concurrent writer may put a different node
116 * at the root of the tree. See btrfs_lock_root_node for the
119 * The extent buffer returned by this has a reference taken, so
120 * it won't disappear. It may stop being the root of the tree
121 * at any time because there are no locks held.
123 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
125 struct extent_buffer *eb;
129 eb = rcu_dereference(root->node);
132 * RCU really hurts here, we could free up the root node because
133 * it was COWed but we may not get the new root node yet so do
134 * the inc_not_zero dance and if it doesn't work then
135 * synchronize_rcu and try again.
137 if (atomic_inc_not_zero(&eb->refs)) {
148 * Cowonly root (not-shareable trees, everything not subvolume or reloc roots),
149 * just get put onto a simple dirty list. Transaction walks this list to make
150 * sure they get properly updated on disk.
152 static void add_root_to_dirty_list(struct btrfs_root *root)
154 struct btrfs_fs_info *fs_info = root->fs_info;
156 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
157 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
160 spin_lock(&fs_info->trans_lock);
161 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
162 /* Want the extent tree to be the last on the list */
163 if (root->root_key.objectid == BTRFS_EXTENT_TREE_OBJECTID)
164 list_move_tail(&root->dirty_list,
165 &fs_info->dirty_cowonly_roots);
167 list_move(&root->dirty_list,
168 &fs_info->dirty_cowonly_roots);
170 spin_unlock(&fs_info->trans_lock);
174 * used by snapshot creation to make a copy of a root for a tree with
175 * a given objectid. The buffer with the new root node is returned in
176 * cow_ret, and this func returns zero on success or a negative error code.
178 int btrfs_copy_root(struct btrfs_trans_handle *trans,
179 struct btrfs_root *root,
180 struct extent_buffer *buf,
181 struct extent_buffer **cow_ret, u64 new_root_objectid)
183 struct btrfs_fs_info *fs_info = root->fs_info;
184 struct extent_buffer *cow;
187 struct btrfs_disk_key disk_key;
189 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
190 trans->transid != fs_info->running_transaction->transid);
191 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
192 trans->transid != root->last_trans);
194 level = btrfs_header_level(buf);
196 btrfs_item_key(buf, &disk_key, 0);
198 btrfs_node_key(buf, &disk_key, 0);
200 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
201 &disk_key, level, buf->start, 0);
205 copy_extent_buffer_full(cow, buf);
206 btrfs_set_header_bytenr(cow, cow->start);
207 btrfs_set_header_generation(cow, trans->transid);
208 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
209 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
210 BTRFS_HEADER_FLAG_RELOC);
211 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
212 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
214 btrfs_set_header_owner(cow, new_root_objectid);
216 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
218 WARN_ON(btrfs_header_generation(buf) > trans->transid);
219 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
220 ret = btrfs_inc_ref(trans, root, cow, 1);
222 ret = btrfs_inc_ref(trans, root, cow, 0);
227 btrfs_mark_buffer_dirty(cow);
236 MOD_LOG_KEY_REMOVE_WHILE_FREEING,
237 MOD_LOG_KEY_REMOVE_WHILE_MOVING,
239 MOD_LOG_ROOT_REPLACE,
242 struct tree_mod_root {
247 struct tree_mod_elem {
253 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
256 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
259 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
260 struct btrfs_disk_key key;
263 /* this is used for op == MOD_LOG_MOVE_KEYS */
269 /* this is used for op == MOD_LOG_ROOT_REPLACE */
270 struct tree_mod_root old_root;
274 * Pull a new tree mod seq number for our operation.
276 static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
278 return atomic64_inc_return(&fs_info->tree_mod_seq);
282 * This adds a new blocker to the tree mod log's blocker list if the @elem
283 * passed does not already have a sequence number set. So when a caller expects
284 * to record tree modifications, it should ensure to set elem->seq to zero
285 * before calling btrfs_get_tree_mod_seq.
286 * Returns a fresh, unused tree log modification sequence number, even if no new
289 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
290 struct seq_list *elem)
292 write_lock(&fs_info->tree_mod_log_lock);
294 elem->seq = btrfs_inc_tree_mod_seq(fs_info);
295 list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
297 write_unlock(&fs_info->tree_mod_log_lock);
302 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
303 struct seq_list *elem)
305 struct rb_root *tm_root;
306 struct rb_node *node;
307 struct rb_node *next;
308 struct tree_mod_elem *tm;
309 u64 min_seq = (u64)-1;
310 u64 seq_putting = elem->seq;
315 write_lock(&fs_info->tree_mod_log_lock);
316 list_del(&elem->list);
319 if (!list_empty(&fs_info->tree_mod_seq_list)) {
320 struct seq_list *first;
322 first = list_first_entry(&fs_info->tree_mod_seq_list,
323 struct seq_list, list);
324 if (seq_putting > first->seq) {
326 * Blocker with lower sequence number exists, we
327 * cannot remove anything from the log.
329 write_unlock(&fs_info->tree_mod_log_lock);
332 min_seq = first->seq;
336 * anything that's lower than the lowest existing (read: blocked)
337 * sequence number can be removed from the tree.
339 tm_root = &fs_info->tree_mod_log;
340 for (node = rb_first(tm_root); node; node = next) {
341 next = rb_next(node);
342 tm = rb_entry(node, struct tree_mod_elem, node);
343 if (tm->seq >= min_seq)
345 rb_erase(node, tm_root);
348 write_unlock(&fs_info->tree_mod_log_lock);
352 * key order of the log:
353 * node/leaf start address -> sequence
355 * The 'start address' is the logical address of the *new* root node
356 * for root replace operations, or the logical address of the affected
357 * block for all other operations.
360 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
362 struct rb_root *tm_root;
363 struct rb_node **new;
364 struct rb_node *parent = NULL;
365 struct tree_mod_elem *cur;
367 lockdep_assert_held_write(&fs_info->tree_mod_log_lock);
369 tm->seq = btrfs_inc_tree_mod_seq(fs_info);
371 tm_root = &fs_info->tree_mod_log;
372 new = &tm_root->rb_node;
374 cur = rb_entry(*new, struct tree_mod_elem, node);
376 if (cur->logical < tm->logical)
377 new = &((*new)->rb_left);
378 else if (cur->logical > tm->logical)
379 new = &((*new)->rb_right);
380 else if (cur->seq < tm->seq)
381 new = &((*new)->rb_left);
382 else if (cur->seq > tm->seq)
383 new = &((*new)->rb_right);
388 rb_link_node(&tm->node, parent, new);
389 rb_insert_color(&tm->node, tm_root);
394 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
395 * returns zero with the tree_mod_log_lock acquired. The caller must hold
396 * this until all tree mod log insertions are recorded in the rb tree and then
397 * write unlock fs_info::tree_mod_log_lock.
399 static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
400 struct extent_buffer *eb) {
402 if (list_empty(&(fs_info)->tree_mod_seq_list))
404 if (eb && btrfs_header_level(eb) == 0)
407 write_lock(&fs_info->tree_mod_log_lock);
408 if (list_empty(&(fs_info)->tree_mod_seq_list)) {
409 write_unlock(&fs_info->tree_mod_log_lock);
416 /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
417 static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info,
418 struct extent_buffer *eb)
421 if (list_empty(&(fs_info)->tree_mod_seq_list))
423 if (eb && btrfs_header_level(eb) == 0)
429 static struct tree_mod_elem *
430 alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
431 enum mod_log_op op, gfp_t flags)
433 struct tree_mod_elem *tm;
435 tm = kzalloc(sizeof(*tm), flags);
439 tm->logical = eb->start;
440 if (op != MOD_LOG_KEY_ADD) {
441 btrfs_node_key(eb, &tm->key, slot);
442 tm->blockptr = btrfs_node_blockptr(eb, slot);
446 tm->generation = btrfs_node_ptr_generation(eb, slot);
447 RB_CLEAR_NODE(&tm->node);
452 static noinline int tree_mod_log_insert_key(struct extent_buffer *eb, int slot,
453 enum mod_log_op op, gfp_t flags)
455 struct tree_mod_elem *tm;
458 if (!tree_mod_need_log(eb->fs_info, eb))
461 tm = alloc_tree_mod_elem(eb, slot, op, flags);
465 if (tree_mod_dont_log(eb->fs_info, eb)) {
470 ret = __tree_mod_log_insert(eb->fs_info, tm);
471 write_unlock(&eb->fs_info->tree_mod_log_lock);
478 static noinline int tree_mod_log_insert_move(struct extent_buffer *eb,
479 int dst_slot, int src_slot, int nr_items)
481 struct tree_mod_elem *tm = NULL;
482 struct tree_mod_elem **tm_list = NULL;
487 if (!tree_mod_need_log(eb->fs_info, eb))
490 tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), GFP_NOFS);
494 tm = kzalloc(sizeof(*tm), GFP_NOFS);
500 tm->logical = eb->start;
502 tm->move.dst_slot = dst_slot;
503 tm->move.nr_items = nr_items;
504 tm->op = MOD_LOG_MOVE_KEYS;
506 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
507 tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
508 MOD_LOG_KEY_REMOVE_WHILE_MOVING, GFP_NOFS);
515 if (tree_mod_dont_log(eb->fs_info, eb))
520 * When we override something during the move, we log these removals.
521 * This can only happen when we move towards the beginning of the
522 * buffer, i.e. dst_slot < src_slot.
524 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
525 ret = __tree_mod_log_insert(eb->fs_info, tm_list[i]);
530 ret = __tree_mod_log_insert(eb->fs_info, tm);
533 write_unlock(&eb->fs_info->tree_mod_log_lock);
538 for (i = 0; i < nr_items; i++) {
539 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
540 rb_erase(&tm_list[i]->node, &eb->fs_info->tree_mod_log);
544 write_unlock(&eb->fs_info->tree_mod_log_lock);
552 __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
553 struct tree_mod_elem **tm_list,
559 for (i = nritems - 1; i >= 0; i--) {
560 ret = __tree_mod_log_insert(fs_info, tm_list[i]);
562 for (j = nritems - 1; j > i; j--)
563 rb_erase(&tm_list[j]->node,
564 &fs_info->tree_mod_log);
572 static noinline int tree_mod_log_insert_root(struct extent_buffer *old_root,
573 struct extent_buffer *new_root, int log_removal)
575 struct btrfs_fs_info *fs_info = old_root->fs_info;
576 struct tree_mod_elem *tm = NULL;
577 struct tree_mod_elem **tm_list = NULL;
582 if (!tree_mod_need_log(fs_info, NULL))
585 if (log_removal && btrfs_header_level(old_root) > 0) {
586 nritems = btrfs_header_nritems(old_root);
587 tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *),
593 for (i = 0; i < nritems; i++) {
594 tm_list[i] = alloc_tree_mod_elem(old_root, i,
595 MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
603 tm = kzalloc(sizeof(*tm), GFP_NOFS);
609 tm->logical = new_root->start;
610 tm->old_root.logical = old_root->start;
611 tm->old_root.level = btrfs_header_level(old_root);
612 tm->generation = btrfs_header_generation(old_root);
613 tm->op = MOD_LOG_ROOT_REPLACE;
615 if (tree_mod_dont_log(fs_info, NULL))
619 ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
621 ret = __tree_mod_log_insert(fs_info, tm);
623 write_unlock(&fs_info->tree_mod_log_lock);
632 for (i = 0; i < nritems; i++)
641 static struct tree_mod_elem *
642 __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
645 struct rb_root *tm_root;
646 struct rb_node *node;
647 struct tree_mod_elem *cur = NULL;
648 struct tree_mod_elem *found = NULL;
650 read_lock(&fs_info->tree_mod_log_lock);
651 tm_root = &fs_info->tree_mod_log;
652 node = tm_root->rb_node;
654 cur = rb_entry(node, struct tree_mod_elem, node);
655 if (cur->logical < start) {
656 node = node->rb_left;
657 } else if (cur->logical > start) {
658 node = node->rb_right;
659 } else if (cur->seq < min_seq) {
660 node = node->rb_left;
661 } else if (!smallest) {
662 /* we want the node with the highest seq */
664 BUG_ON(found->seq > cur->seq);
666 node = node->rb_left;
667 } else if (cur->seq > min_seq) {
668 /* we want the node with the smallest seq */
670 BUG_ON(found->seq < cur->seq);
672 node = node->rb_right;
678 read_unlock(&fs_info->tree_mod_log_lock);
684 * this returns the element from the log with the smallest time sequence
685 * value that's in the log (the oldest log item). any element with a time
686 * sequence lower than min_seq will be ignored.
688 static struct tree_mod_elem *
689 tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
692 return __tree_mod_log_search(fs_info, start, min_seq, 1);
696 * this returns the element from the log with the largest time sequence
697 * value that's in the log (the most recent log item). any element with
698 * a time sequence lower than min_seq will be ignored.
700 static struct tree_mod_elem *
701 tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
703 return __tree_mod_log_search(fs_info, start, min_seq, 0);
706 static noinline int tree_mod_log_eb_copy(struct extent_buffer *dst,
707 struct extent_buffer *src, unsigned long dst_offset,
708 unsigned long src_offset, int nr_items)
710 struct btrfs_fs_info *fs_info = dst->fs_info;
712 struct tree_mod_elem **tm_list = NULL;
713 struct tree_mod_elem **tm_list_add, **tm_list_rem;
717 if (!tree_mod_need_log(fs_info, NULL))
720 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
723 tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *),
728 tm_list_add = tm_list;
729 tm_list_rem = tm_list + nr_items;
730 for (i = 0; i < nr_items; i++) {
731 tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
732 MOD_LOG_KEY_REMOVE, GFP_NOFS);
733 if (!tm_list_rem[i]) {
738 tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
739 MOD_LOG_KEY_ADD, GFP_NOFS);
740 if (!tm_list_add[i]) {
746 if (tree_mod_dont_log(fs_info, NULL))
750 for (i = 0; i < nr_items; i++) {
751 ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]);
754 ret = __tree_mod_log_insert(fs_info, tm_list_add[i]);
759 write_unlock(&fs_info->tree_mod_log_lock);
765 for (i = 0; i < nr_items * 2; i++) {
766 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
767 rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
771 write_unlock(&fs_info->tree_mod_log_lock);
777 static noinline int tree_mod_log_free_eb(struct extent_buffer *eb)
779 struct tree_mod_elem **tm_list = NULL;
784 if (btrfs_header_level(eb) == 0)
787 if (!tree_mod_need_log(eb->fs_info, NULL))
790 nritems = btrfs_header_nritems(eb);
791 tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS);
795 for (i = 0; i < nritems; i++) {
796 tm_list[i] = alloc_tree_mod_elem(eb, i,
797 MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
804 if (tree_mod_dont_log(eb->fs_info, eb))
807 ret = __tree_mod_log_free_eb(eb->fs_info, tm_list, nritems);
808 write_unlock(&eb->fs_info->tree_mod_log_lock);
816 for (i = 0; i < nritems; i++)
824 * check if the tree block can be shared by multiple trees
826 int btrfs_block_can_be_shared(struct btrfs_root *root,
827 struct extent_buffer *buf)
830 * Tree blocks not in shareable trees and tree roots are never shared.
831 * If a block was allocated after the last snapshot and the block was
832 * not allocated by tree relocation, we know the block is not shared.
834 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
835 buf != root->node && buf != root->commit_root &&
836 (btrfs_header_generation(buf) <=
837 btrfs_root_last_snapshot(&root->root_item) ||
838 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
844 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
845 struct btrfs_root *root,
846 struct extent_buffer *buf,
847 struct extent_buffer *cow,
850 struct btrfs_fs_info *fs_info = root->fs_info;
858 * Backrefs update rules:
860 * Always use full backrefs for extent pointers in tree block
861 * allocated by tree relocation.
863 * If a shared tree block is no longer referenced by its owner
864 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
865 * use full backrefs for extent pointers in tree block.
867 * If a tree block is been relocating
868 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
869 * use full backrefs for extent pointers in tree block.
870 * The reason for this is some operations (such as drop tree)
871 * are only allowed for blocks use full backrefs.
874 if (btrfs_block_can_be_shared(root, buf)) {
875 ret = btrfs_lookup_extent_info(trans, fs_info, buf->start,
876 btrfs_header_level(buf), 1,
882 btrfs_handle_fs_error(fs_info, ret, NULL);
887 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
888 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
889 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
894 owner = btrfs_header_owner(buf);
895 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
896 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
899 if ((owner == root->root_key.objectid ||
900 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
901 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
902 ret = btrfs_inc_ref(trans, root, buf, 1);
906 if (root->root_key.objectid ==
907 BTRFS_TREE_RELOC_OBJECTID) {
908 ret = btrfs_dec_ref(trans, root, buf, 0);
911 ret = btrfs_inc_ref(trans, root, cow, 1);
915 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
918 if (root->root_key.objectid ==
919 BTRFS_TREE_RELOC_OBJECTID)
920 ret = btrfs_inc_ref(trans, root, cow, 1);
922 ret = btrfs_inc_ref(trans, root, cow, 0);
926 if (new_flags != 0) {
927 int level = btrfs_header_level(buf);
929 ret = btrfs_set_disk_extent_flags(trans, buf,
930 new_flags, level, 0);
935 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
936 if (root->root_key.objectid ==
937 BTRFS_TREE_RELOC_OBJECTID)
938 ret = btrfs_inc_ref(trans, root, cow, 1);
940 ret = btrfs_inc_ref(trans, root, cow, 0);
943 ret = btrfs_dec_ref(trans, root, buf, 1);
947 btrfs_clean_tree_block(buf);
953 static struct extent_buffer *alloc_tree_block_no_bg_flush(
954 struct btrfs_trans_handle *trans,
955 struct btrfs_root *root,
957 const struct btrfs_disk_key *disk_key,
962 struct btrfs_fs_info *fs_info = root->fs_info;
963 struct extent_buffer *ret;
966 * If we are COWing a node/leaf from the extent, chunk, device or free
967 * space trees, make sure that we do not finish block group creation of
968 * pending block groups. We do this to avoid a deadlock.
969 * COWing can result in allocation of a new chunk, and flushing pending
970 * block groups (btrfs_create_pending_block_groups()) can be triggered
971 * when finishing allocation of a new chunk. Creation of a pending block
972 * group modifies the extent, chunk, device and free space trees,
973 * therefore we could deadlock with ourselves since we are holding a
974 * lock on an extent buffer that btrfs_create_pending_block_groups() may
976 * For similar reasons, we also need to delay flushing pending block
977 * groups when splitting a leaf or node, from one of those trees, since
978 * we are holding a write lock on it and its parent or when inserting a
979 * new root node for one of those trees.
981 if (root == fs_info->extent_root ||
982 root == fs_info->chunk_root ||
983 root == fs_info->dev_root ||
984 root == fs_info->free_space_root)
985 trans->can_flush_pending_bgs = false;
987 ret = btrfs_alloc_tree_block(trans, root, parent_start,
988 root->root_key.objectid, disk_key, level,
990 trans->can_flush_pending_bgs = true;
996 * does the dirty work in cow of a single block. The parent block (if
997 * supplied) is updated to point to the new cow copy. The new buffer is marked
998 * dirty and returned locked. If you modify the block it needs to be marked
1001 * search_start -- an allocation hint for the new block
1003 * empty_size -- a hint that you plan on doing more cow. This is the size in
1004 * bytes the allocator should try to find free next to the block it returns.
1005 * This is just a hint and may be ignored by the allocator.
1007 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1008 struct btrfs_root *root,
1009 struct extent_buffer *buf,
1010 struct extent_buffer *parent, int parent_slot,
1011 struct extent_buffer **cow_ret,
1012 u64 search_start, u64 empty_size)
1014 struct btrfs_fs_info *fs_info = root->fs_info;
1015 struct btrfs_disk_key disk_key;
1016 struct extent_buffer *cow;
1019 int unlock_orig = 0;
1020 u64 parent_start = 0;
1022 if (*cow_ret == buf)
1025 btrfs_assert_tree_locked(buf);
1027 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
1028 trans->transid != fs_info->running_transaction->transid);
1029 WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
1030 trans->transid != root->last_trans);
1032 level = btrfs_header_level(buf);
1035 btrfs_item_key(buf, &disk_key, 0);
1037 btrfs_node_key(buf, &disk_key, 0);
1039 if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
1040 parent_start = parent->start;
1042 cow = alloc_tree_block_no_bg_flush(trans, root, parent_start, &disk_key,
1043 level, search_start, empty_size);
1045 return PTR_ERR(cow);
1047 /* cow is set to blocking by btrfs_init_new_buffer */
1049 copy_extent_buffer_full(cow, buf);
1050 btrfs_set_header_bytenr(cow, cow->start);
1051 btrfs_set_header_generation(cow, trans->transid);
1052 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
1053 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
1054 BTRFS_HEADER_FLAG_RELOC);
1055 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1056 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
1058 btrfs_set_header_owner(cow, root->root_key.objectid);
1060 write_extent_buffer_fsid(cow, fs_info->fs_devices->metadata_uuid);
1062 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
1064 btrfs_abort_transaction(trans, ret);
1068 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
1069 ret = btrfs_reloc_cow_block(trans, root, buf, cow);
1071 btrfs_abort_transaction(trans, ret);
1076 if (buf == root->node) {
1077 WARN_ON(parent && parent != buf);
1078 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1079 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1080 parent_start = buf->start;
1082 atomic_inc(&cow->refs);
1083 ret = tree_mod_log_insert_root(root->node, cow, 1);
1085 rcu_assign_pointer(root->node, cow);
1087 btrfs_free_tree_block(trans, root, buf, parent_start,
1089 free_extent_buffer(buf);
1090 add_root_to_dirty_list(root);
1092 WARN_ON(trans->transid != btrfs_header_generation(parent));
1093 tree_mod_log_insert_key(parent, parent_slot,
1094 MOD_LOG_KEY_REPLACE, GFP_NOFS);
1095 btrfs_set_node_blockptr(parent, parent_slot,
1097 btrfs_set_node_ptr_generation(parent, parent_slot,
1099 btrfs_mark_buffer_dirty(parent);
1101 ret = tree_mod_log_free_eb(buf);
1103 btrfs_abort_transaction(trans, ret);
1107 btrfs_free_tree_block(trans, root, buf, parent_start,
1111 btrfs_tree_unlock(buf);
1112 free_extent_buffer_stale(buf);
1113 btrfs_mark_buffer_dirty(cow);
1119 * returns the logical address of the oldest predecessor of the given root.
1120 * entries older than time_seq are ignored.
1122 static struct tree_mod_elem *__tree_mod_log_oldest_root(
1123 struct extent_buffer *eb_root, u64 time_seq)
1125 struct tree_mod_elem *tm;
1126 struct tree_mod_elem *found = NULL;
1127 u64 root_logical = eb_root->start;
1134 * the very last operation that's logged for a root is the
1135 * replacement operation (if it is replaced at all). this has
1136 * the logical address of the *new* root, making it the very
1137 * first operation that's logged for this root.
1140 tm = tree_mod_log_search_oldest(eb_root->fs_info, root_logical,
1145 * if there are no tree operation for the oldest root, we simply
1146 * return it. this should only happen if that (old) root is at
1153 * if there's an operation that's not a root replacement, we
1154 * found the oldest version of our root. normally, we'll find a
1155 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1157 if (tm->op != MOD_LOG_ROOT_REPLACE)
1161 root_logical = tm->old_root.logical;
1165 /* if there's no old root to return, return what we found instead */
1173 * tm is a pointer to the first operation to rewind within eb. then, all
1174 * previous operations will be rewound (until we reach something older than
1178 __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1179 u64 time_seq, struct tree_mod_elem *first_tm)
1182 struct rb_node *next;
1183 struct tree_mod_elem *tm = first_tm;
1184 unsigned long o_dst;
1185 unsigned long o_src;
1186 unsigned long p_size = sizeof(struct btrfs_key_ptr);
1188 n = btrfs_header_nritems(eb);
1189 read_lock(&fs_info->tree_mod_log_lock);
1190 while (tm && tm->seq >= time_seq) {
1192 * all the operations are recorded with the operator used for
1193 * the modification. as we're going backwards, we do the
1194 * opposite of each operation here.
1197 case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1198 BUG_ON(tm->slot < n);
1200 case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1201 case MOD_LOG_KEY_REMOVE:
1202 btrfs_set_node_key(eb, &tm->key, tm->slot);
1203 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1204 btrfs_set_node_ptr_generation(eb, tm->slot,
1208 case MOD_LOG_KEY_REPLACE:
1209 BUG_ON(tm->slot >= n);
1210 btrfs_set_node_key(eb, &tm->key, tm->slot);
1211 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1212 btrfs_set_node_ptr_generation(eb, tm->slot,
1215 case MOD_LOG_KEY_ADD:
1216 /* if a move operation is needed it's in the log */
1219 case MOD_LOG_MOVE_KEYS:
1220 o_dst = btrfs_node_key_ptr_offset(tm->slot);
1221 o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1222 memmove_extent_buffer(eb, o_dst, o_src,
1223 tm->move.nr_items * p_size);
1225 case MOD_LOG_ROOT_REPLACE:
1227 * this operation is special. for roots, this must be
1228 * handled explicitly before rewinding.
1229 * for non-roots, this operation may exist if the node
1230 * was a root: root A -> child B; then A gets empty and
1231 * B is promoted to the new root. in the mod log, we'll
1232 * have a root-replace operation for B, a tree block
1233 * that is no root. we simply ignore that operation.
1237 next = rb_next(&tm->node);
1240 tm = rb_entry(next, struct tree_mod_elem, node);
1241 if (tm->logical != first_tm->logical)
1244 read_unlock(&fs_info->tree_mod_log_lock);
1245 btrfs_set_header_nritems(eb, n);
1249 * Called with eb read locked. If the buffer cannot be rewound, the same buffer
1250 * is returned. If rewind operations happen, a fresh buffer is returned. The
1251 * returned buffer is always read-locked. If the returned buffer is not the
1252 * input buffer, the lock on the input buffer is released and the input buffer
1253 * is freed (its refcount is decremented).
1255 static struct extent_buffer *
1256 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1257 struct extent_buffer *eb, u64 time_seq)
1259 struct extent_buffer *eb_rewin;
1260 struct tree_mod_elem *tm;
1265 if (btrfs_header_level(eb) == 0)
1268 tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1272 btrfs_set_path_blocking(path);
1273 btrfs_set_lock_blocking_read(eb);
1275 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1276 BUG_ON(tm->slot != 0);
1277 eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start);
1279 btrfs_tree_read_unlock_blocking(eb);
1280 free_extent_buffer(eb);
1283 btrfs_set_header_bytenr(eb_rewin, eb->start);
1284 btrfs_set_header_backref_rev(eb_rewin,
1285 btrfs_header_backref_rev(eb));
1286 btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1287 btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
1289 eb_rewin = btrfs_clone_extent_buffer(eb);
1291 btrfs_tree_read_unlock_blocking(eb);
1292 free_extent_buffer(eb);
1297 btrfs_tree_read_unlock_blocking(eb);
1298 free_extent_buffer(eb);
1300 btrfs_tree_read_lock(eb_rewin);
1301 __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
1302 WARN_ON(btrfs_header_nritems(eb_rewin) >
1303 BTRFS_NODEPTRS_PER_BLOCK(fs_info));
1309 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1310 * value. If there are no changes, the current root->root_node is returned. If
1311 * anything changed in between, there's a fresh buffer allocated on which the
1312 * rewind operations are done. In any case, the returned buffer is read locked.
1313 * Returns NULL on error (with no locks held).
1315 static inline struct extent_buffer *
1316 get_old_root(struct btrfs_root *root, u64 time_seq)
1318 struct btrfs_fs_info *fs_info = root->fs_info;
1319 struct tree_mod_elem *tm;
1320 struct extent_buffer *eb = NULL;
1321 struct extent_buffer *eb_root;
1322 u64 eb_root_owner = 0;
1323 struct extent_buffer *old;
1324 struct tree_mod_root *old_root = NULL;
1325 u64 old_generation = 0;
1329 eb_root = btrfs_read_lock_root_node(root);
1330 tm = __tree_mod_log_oldest_root(eb_root, time_seq);
1334 if (tm->op == MOD_LOG_ROOT_REPLACE) {
1335 old_root = &tm->old_root;
1336 old_generation = tm->generation;
1337 logical = old_root->logical;
1338 level = old_root->level;
1340 logical = eb_root->start;
1341 level = btrfs_header_level(eb_root);
1344 tm = tree_mod_log_search(fs_info, logical, time_seq);
1345 if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1346 btrfs_tree_read_unlock(eb_root);
1347 free_extent_buffer(eb_root);
1348 old = read_tree_block(fs_info, logical, 0, level, NULL);
1349 if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) {
1351 free_extent_buffer(old);
1353 "failed to read tree block %llu from get_old_root",
1356 eb = btrfs_clone_extent_buffer(old);
1357 free_extent_buffer(old);
1359 } else if (old_root) {
1360 eb_root_owner = btrfs_header_owner(eb_root);
1361 btrfs_tree_read_unlock(eb_root);
1362 free_extent_buffer(eb_root);
1363 eb = alloc_dummy_extent_buffer(fs_info, logical);
1365 btrfs_set_lock_blocking_read(eb_root);
1366 eb = btrfs_clone_extent_buffer(eb_root);
1367 btrfs_tree_read_unlock_blocking(eb_root);
1368 free_extent_buffer(eb_root);
1373 btrfs_tree_read_lock(eb);
1375 btrfs_set_header_bytenr(eb, eb->start);
1376 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1377 btrfs_set_header_owner(eb, eb_root_owner);
1378 btrfs_set_header_level(eb, old_root->level);
1379 btrfs_set_header_generation(eb, old_generation);
1382 __tree_mod_log_rewind(fs_info, eb, time_seq, tm);
1384 WARN_ON(btrfs_header_level(eb) != 0);
1385 WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(fs_info));
1390 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1392 struct tree_mod_elem *tm;
1394 struct extent_buffer *eb_root = btrfs_root_node(root);
1396 tm = __tree_mod_log_oldest_root(eb_root, time_seq);
1397 if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1398 level = tm->old_root.level;
1400 level = btrfs_header_level(eb_root);
1402 free_extent_buffer(eb_root);
1407 static inline int should_cow_block(struct btrfs_trans_handle *trans,
1408 struct btrfs_root *root,
1409 struct extent_buffer *buf)
1411 if (btrfs_is_testing(root->fs_info))
1414 /* Ensure we can see the FORCE_COW bit */
1415 smp_mb__before_atomic();
1418 * We do not need to cow a block if
1419 * 1) this block is not created or changed in this transaction;
1420 * 2) this block does not belong to TREE_RELOC tree;
1421 * 3) the root is not forced COW.
1423 * What is forced COW:
1424 * when we create snapshot during committing the transaction,
1425 * after we've finished copying src root, we must COW the shared
1426 * block to ensure the metadata consistency.
1428 if (btrfs_header_generation(buf) == trans->transid &&
1429 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1430 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1431 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1432 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
1438 * cows a single block, see __btrfs_cow_block for the real work.
1439 * This version of it has extra checks so that a block isn't COWed more than
1440 * once per transaction, as long as it hasn't been written yet
1442 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1443 struct btrfs_root *root, struct extent_buffer *buf,
1444 struct extent_buffer *parent, int parent_slot,
1445 struct extent_buffer **cow_ret)
1447 struct btrfs_fs_info *fs_info = root->fs_info;
1451 if (test_bit(BTRFS_ROOT_DELETING, &root->state))
1453 "COW'ing blocks on a fs root that's being dropped");
1455 if (trans->transaction != fs_info->running_transaction)
1456 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1458 fs_info->running_transaction->transid);
1460 if (trans->transid != fs_info->generation)
1461 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1462 trans->transid, fs_info->generation);
1464 if (!should_cow_block(trans, root, buf)) {
1465 trans->dirty = true;
1470 search_start = buf->start & ~((u64)SZ_1G - 1);
1473 btrfs_set_lock_blocking_write(parent);
1474 btrfs_set_lock_blocking_write(buf);
1477 * Before CoWing this block for later modification, check if it's
1478 * the subtree root and do the delayed subtree trace if needed.
1480 * Also We don't care about the error, as it's handled internally.
1482 btrfs_qgroup_trace_subtree_after_cow(trans, root, buf);
1483 ret = __btrfs_cow_block(trans, root, buf, parent,
1484 parent_slot, cow_ret, search_start, 0);
1486 trace_btrfs_cow_block(root, buf, *cow_ret);
1492 * helper function for defrag to decide if two blocks pointed to by a
1493 * node are actually close by
1495 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1497 if (blocknr < other && other - (blocknr + blocksize) < 32768)
1499 if (blocknr > other && blocknr - (other + blocksize) < 32768)
1504 #ifdef __LITTLE_ENDIAN
1507 * Compare two keys, on little-endian the disk order is same as CPU order and
1508 * we can avoid the conversion.
1510 static int comp_keys(const struct btrfs_disk_key *disk_key,
1511 const struct btrfs_key *k2)
1513 const struct btrfs_key *k1 = (const struct btrfs_key *)disk_key;
1515 return btrfs_comp_cpu_keys(k1, k2);
1521 * compare two keys in a memcmp fashion
1523 static int comp_keys(const struct btrfs_disk_key *disk,
1524 const struct btrfs_key *k2)
1526 struct btrfs_key k1;
1528 btrfs_disk_key_to_cpu(&k1, disk);
1530 return btrfs_comp_cpu_keys(&k1, k2);
1535 * same as comp_keys only with two btrfs_key's
1537 int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2)
1539 if (k1->objectid > k2->objectid)
1541 if (k1->objectid < k2->objectid)
1543 if (k1->type > k2->type)
1545 if (k1->type < k2->type)
1547 if (k1->offset > k2->offset)
1549 if (k1->offset < k2->offset)
1555 * this is used by the defrag code to go through all the
1556 * leaves pointed to by a node and reallocate them so that
1557 * disk order is close to key order
1559 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1560 struct btrfs_root *root, struct extent_buffer *parent,
1561 int start_slot, u64 *last_ret,
1562 struct btrfs_key *progress)
1564 struct btrfs_fs_info *fs_info = root->fs_info;
1565 struct extent_buffer *cur;
1568 u64 search_start = *last_ret;
1578 int progress_passed = 0;
1579 struct btrfs_disk_key disk_key;
1581 parent_level = btrfs_header_level(parent);
1583 WARN_ON(trans->transaction != fs_info->running_transaction);
1584 WARN_ON(trans->transid != fs_info->generation);
1586 parent_nritems = btrfs_header_nritems(parent);
1587 blocksize = fs_info->nodesize;
1588 end_slot = parent_nritems - 1;
1590 if (parent_nritems <= 1)
1593 btrfs_set_lock_blocking_write(parent);
1595 for (i = start_slot; i <= end_slot; i++) {
1596 struct btrfs_key first_key;
1599 btrfs_node_key(parent, &disk_key, i);
1600 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1603 progress_passed = 1;
1604 blocknr = btrfs_node_blockptr(parent, i);
1605 gen = btrfs_node_ptr_generation(parent, i);
1606 btrfs_node_key_to_cpu(parent, &first_key, i);
1607 if (last_block == 0)
1608 last_block = blocknr;
1611 other = btrfs_node_blockptr(parent, i - 1);
1612 close = close_blocks(blocknr, other, blocksize);
1614 if (!close && i < end_slot) {
1615 other = btrfs_node_blockptr(parent, i + 1);
1616 close = close_blocks(blocknr, other, blocksize);
1619 last_block = blocknr;
1623 cur = find_extent_buffer(fs_info, blocknr);
1625 uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1628 if (!cur || !uptodate) {
1630 cur = read_tree_block(fs_info, blocknr, gen,
1634 return PTR_ERR(cur);
1635 } else if (!extent_buffer_uptodate(cur)) {
1636 free_extent_buffer(cur);
1639 } else if (!uptodate) {
1640 err = btrfs_read_buffer(cur, gen,
1641 parent_level - 1,&first_key);
1643 free_extent_buffer(cur);
1648 if (search_start == 0)
1649 search_start = last_block;
1651 btrfs_tree_lock(cur);
1652 btrfs_set_lock_blocking_write(cur);
1653 err = __btrfs_cow_block(trans, root, cur, parent, i,
1656 (end_slot - i) * blocksize));
1658 btrfs_tree_unlock(cur);
1659 free_extent_buffer(cur);
1662 search_start = cur->start;
1663 last_block = cur->start;
1664 *last_ret = search_start;
1665 btrfs_tree_unlock(cur);
1666 free_extent_buffer(cur);
1672 * search for key in the extent_buffer. The items start at offset p,
1673 * and they are item_size apart. There are 'max' items in p.
1675 * the slot in the array is returned via slot, and it points to
1676 * the place where you would insert key if it is not found in
1679 * slot may point to max if the key is bigger than all of the keys
1681 static noinline int generic_bin_search(struct extent_buffer *eb,
1682 unsigned long p, int item_size,
1683 const struct btrfs_key *key,
1689 const int key_size = sizeof(struct btrfs_disk_key);
1692 btrfs_err(eb->fs_info,
1693 "%s: low (%d) > high (%d) eb %llu owner %llu level %d",
1694 __func__, low, high, eb->start,
1695 btrfs_header_owner(eb), btrfs_header_level(eb));
1699 while (low < high) {
1701 unsigned long offset;
1702 struct btrfs_disk_key *tmp;
1703 struct btrfs_disk_key unaligned;
1706 mid = (low + high) / 2;
1707 offset = p + mid * item_size;
1708 oip = offset_in_page(offset);
1710 if (oip + key_size <= PAGE_SIZE) {
1711 const unsigned long idx = offset >> PAGE_SHIFT;
1712 char *kaddr = page_address(eb->pages[idx]);
1714 tmp = (struct btrfs_disk_key *)(kaddr + oip);
1716 read_extent_buffer(eb, &unaligned, offset, key_size);
1720 ret = comp_keys(tmp, key);
1736 * simple bin_search frontend that does the right thing for
1739 int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
1742 if (btrfs_header_level(eb) == 0)
1743 return generic_bin_search(eb,
1744 offsetof(struct btrfs_leaf, items),
1745 sizeof(struct btrfs_item),
1746 key, btrfs_header_nritems(eb),
1749 return generic_bin_search(eb,
1750 offsetof(struct btrfs_node, ptrs),
1751 sizeof(struct btrfs_key_ptr),
1752 key, btrfs_header_nritems(eb),
1756 static void root_add_used(struct btrfs_root *root, u32 size)
1758 spin_lock(&root->accounting_lock);
1759 btrfs_set_root_used(&root->root_item,
1760 btrfs_root_used(&root->root_item) + size);
1761 spin_unlock(&root->accounting_lock);
1764 static void root_sub_used(struct btrfs_root *root, u32 size)
1766 spin_lock(&root->accounting_lock);
1767 btrfs_set_root_used(&root->root_item,
1768 btrfs_root_used(&root->root_item) - size);
1769 spin_unlock(&root->accounting_lock);
1772 /* given a node and slot number, this reads the blocks it points to. The
1773 * extent buffer is returned with a reference taken (but unlocked).
1775 struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent,
1778 int level = btrfs_header_level(parent);
1779 struct extent_buffer *eb;
1780 struct btrfs_key first_key;
1782 if (slot < 0 || slot >= btrfs_header_nritems(parent))
1783 return ERR_PTR(-ENOENT);
1787 btrfs_node_key_to_cpu(parent, &first_key, slot);
1788 eb = read_tree_block(parent->fs_info, btrfs_node_blockptr(parent, slot),
1789 btrfs_node_ptr_generation(parent, slot),
1790 level - 1, &first_key);
1791 if (!IS_ERR(eb) && !extent_buffer_uptodate(eb)) {
1792 free_extent_buffer(eb);
1800 * node level balancing, used to make sure nodes are in proper order for
1801 * item deletion. We balance from the top down, so we have to make sure
1802 * that a deletion won't leave an node completely empty later on.
1804 static noinline int balance_level(struct btrfs_trans_handle *trans,
1805 struct btrfs_root *root,
1806 struct btrfs_path *path, int level)
1808 struct btrfs_fs_info *fs_info = root->fs_info;
1809 struct extent_buffer *right = NULL;
1810 struct extent_buffer *mid;
1811 struct extent_buffer *left = NULL;
1812 struct extent_buffer *parent = NULL;
1816 int orig_slot = path->slots[level];
1821 mid = path->nodes[level];
1823 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1824 path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1825 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1827 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1829 if (level < BTRFS_MAX_LEVEL - 1) {
1830 parent = path->nodes[level + 1];
1831 pslot = path->slots[level + 1];
1835 * deal with the case where there is only one pointer in the root
1836 * by promoting the node below to a root
1839 struct extent_buffer *child;
1841 if (btrfs_header_nritems(mid) != 1)
1844 /* promote the child to a root */
1845 child = btrfs_read_node_slot(mid, 0);
1846 if (IS_ERR(child)) {
1847 ret = PTR_ERR(child);
1848 btrfs_handle_fs_error(fs_info, ret, NULL);
1852 btrfs_tree_lock(child);
1853 btrfs_set_lock_blocking_write(child);
1854 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1856 btrfs_tree_unlock(child);
1857 free_extent_buffer(child);
1861 ret = tree_mod_log_insert_root(root->node, child, 1);
1863 rcu_assign_pointer(root->node, child);
1865 add_root_to_dirty_list(root);
1866 btrfs_tree_unlock(child);
1868 path->locks[level] = 0;
1869 path->nodes[level] = NULL;
1870 btrfs_clean_tree_block(mid);
1871 btrfs_tree_unlock(mid);
1872 /* once for the path */
1873 free_extent_buffer(mid);
1875 root_sub_used(root, mid->len);
1876 btrfs_free_tree_block(trans, root, mid, 0, 1);
1877 /* once for the root ptr */
1878 free_extent_buffer_stale(mid);
1881 if (btrfs_header_nritems(mid) >
1882 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4)
1885 left = btrfs_read_node_slot(parent, pslot - 1);
1890 btrfs_tree_lock(left);
1891 btrfs_set_lock_blocking_write(left);
1892 wret = btrfs_cow_block(trans, root, left,
1893 parent, pslot - 1, &left);
1900 right = btrfs_read_node_slot(parent, pslot + 1);
1905 btrfs_tree_lock(right);
1906 btrfs_set_lock_blocking_write(right);
1907 wret = btrfs_cow_block(trans, root, right,
1908 parent, pslot + 1, &right);
1915 /* first, try to make some room in the middle buffer */
1917 orig_slot += btrfs_header_nritems(left);
1918 wret = push_node_left(trans, left, mid, 1);
1924 * then try to empty the right most buffer into the middle
1927 wret = push_node_left(trans, mid, right, 1);
1928 if (wret < 0 && wret != -ENOSPC)
1930 if (btrfs_header_nritems(right) == 0) {
1931 btrfs_clean_tree_block(right);
1932 btrfs_tree_unlock(right);
1933 del_ptr(root, path, level + 1, pslot + 1);
1934 root_sub_used(root, right->len);
1935 btrfs_free_tree_block(trans, root, right, 0, 1);
1936 free_extent_buffer_stale(right);
1939 struct btrfs_disk_key right_key;
1940 btrfs_node_key(right, &right_key, 0);
1941 ret = tree_mod_log_insert_key(parent, pslot + 1,
1942 MOD_LOG_KEY_REPLACE, GFP_NOFS);
1944 btrfs_set_node_key(parent, &right_key, pslot + 1);
1945 btrfs_mark_buffer_dirty(parent);
1948 if (btrfs_header_nritems(mid) == 1) {
1950 * we're not allowed to leave a node with one item in the
1951 * tree during a delete. A deletion from lower in the tree
1952 * could try to delete the only pointer in this node.
1953 * So, pull some keys from the left.
1954 * There has to be a left pointer at this point because
1955 * otherwise we would have pulled some pointers from the
1960 btrfs_handle_fs_error(fs_info, ret, NULL);
1963 wret = balance_node_right(trans, mid, left);
1969 wret = push_node_left(trans, left, mid, 1);
1975 if (btrfs_header_nritems(mid) == 0) {
1976 btrfs_clean_tree_block(mid);
1977 btrfs_tree_unlock(mid);
1978 del_ptr(root, path, level + 1, pslot);
1979 root_sub_used(root, mid->len);
1980 btrfs_free_tree_block(trans, root, mid, 0, 1);
1981 free_extent_buffer_stale(mid);
1984 /* update the parent key to reflect our changes */
1985 struct btrfs_disk_key mid_key;
1986 btrfs_node_key(mid, &mid_key, 0);
1987 ret = tree_mod_log_insert_key(parent, pslot,
1988 MOD_LOG_KEY_REPLACE, GFP_NOFS);
1990 btrfs_set_node_key(parent, &mid_key, pslot);
1991 btrfs_mark_buffer_dirty(parent);
1994 /* update the path */
1996 if (btrfs_header_nritems(left) > orig_slot) {
1997 atomic_inc(&left->refs);
1998 /* left was locked after cow */
1999 path->nodes[level] = left;
2000 path->slots[level + 1] -= 1;
2001 path->slots[level] = orig_slot;
2003 btrfs_tree_unlock(mid);
2004 free_extent_buffer(mid);
2007 orig_slot -= btrfs_header_nritems(left);
2008 path->slots[level] = orig_slot;
2011 /* double check we haven't messed things up */
2013 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
2017 btrfs_tree_unlock(right);
2018 free_extent_buffer(right);
2021 if (path->nodes[level] != left)
2022 btrfs_tree_unlock(left);
2023 free_extent_buffer(left);
2028 /* Node balancing for insertion. Here we only split or push nodes around
2029 * when they are completely full. This is also done top down, so we
2030 * have to be pessimistic.
2032 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
2033 struct btrfs_root *root,
2034 struct btrfs_path *path, int level)
2036 struct btrfs_fs_info *fs_info = root->fs_info;
2037 struct extent_buffer *right = NULL;
2038 struct extent_buffer *mid;
2039 struct extent_buffer *left = NULL;
2040 struct extent_buffer *parent = NULL;
2044 int orig_slot = path->slots[level];
2049 mid = path->nodes[level];
2050 WARN_ON(btrfs_header_generation(mid) != trans->transid);
2052 if (level < BTRFS_MAX_LEVEL - 1) {
2053 parent = path->nodes[level + 1];
2054 pslot = path->slots[level + 1];
2060 left = btrfs_read_node_slot(parent, pslot - 1);
2064 /* first, try to make some room in the middle buffer */
2068 btrfs_tree_lock(left);
2069 btrfs_set_lock_blocking_write(left);
2071 left_nr = btrfs_header_nritems(left);
2072 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
2075 ret = btrfs_cow_block(trans, root, left, parent,
2080 wret = push_node_left(trans, left, mid, 0);
2086 struct btrfs_disk_key disk_key;
2087 orig_slot += left_nr;
2088 btrfs_node_key(mid, &disk_key, 0);
2089 ret = tree_mod_log_insert_key(parent, pslot,
2090 MOD_LOG_KEY_REPLACE, GFP_NOFS);
2092 btrfs_set_node_key(parent, &disk_key, pslot);
2093 btrfs_mark_buffer_dirty(parent);
2094 if (btrfs_header_nritems(left) > orig_slot) {
2095 path->nodes[level] = left;
2096 path->slots[level + 1] -= 1;
2097 path->slots[level] = orig_slot;
2098 btrfs_tree_unlock(mid);
2099 free_extent_buffer(mid);
2102 btrfs_header_nritems(left);
2103 path->slots[level] = orig_slot;
2104 btrfs_tree_unlock(left);
2105 free_extent_buffer(left);
2109 btrfs_tree_unlock(left);
2110 free_extent_buffer(left);
2112 right = btrfs_read_node_slot(parent, pslot + 1);
2117 * then try to empty the right most buffer into the middle
2122 btrfs_tree_lock(right);
2123 btrfs_set_lock_blocking_write(right);
2125 right_nr = btrfs_header_nritems(right);
2126 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
2129 ret = btrfs_cow_block(trans, root, right,
2135 wret = balance_node_right(trans, right, mid);
2141 struct btrfs_disk_key disk_key;
2143 btrfs_node_key(right, &disk_key, 0);
2144 ret = tree_mod_log_insert_key(parent, pslot + 1,
2145 MOD_LOG_KEY_REPLACE, GFP_NOFS);
2147 btrfs_set_node_key(parent, &disk_key, pslot + 1);
2148 btrfs_mark_buffer_dirty(parent);
2150 if (btrfs_header_nritems(mid) <= orig_slot) {
2151 path->nodes[level] = right;
2152 path->slots[level + 1] += 1;
2153 path->slots[level] = orig_slot -
2154 btrfs_header_nritems(mid);
2155 btrfs_tree_unlock(mid);
2156 free_extent_buffer(mid);
2158 btrfs_tree_unlock(right);
2159 free_extent_buffer(right);
2163 btrfs_tree_unlock(right);
2164 free_extent_buffer(right);
2170 * readahead one full node of leaves, finding things that are close
2171 * to the block in 'slot', and triggering ra on them.
2173 static void reada_for_search(struct btrfs_fs_info *fs_info,
2174 struct btrfs_path *path,
2175 int level, int slot, u64 objectid)
2177 struct extent_buffer *node;
2178 struct btrfs_disk_key disk_key;
2183 struct extent_buffer *eb;
2191 if (!path->nodes[level])
2194 node = path->nodes[level];
2196 search = btrfs_node_blockptr(node, slot);
2197 blocksize = fs_info->nodesize;
2198 eb = find_extent_buffer(fs_info, search);
2200 free_extent_buffer(eb);
2206 nritems = btrfs_header_nritems(node);
2210 if (path->reada == READA_BACK) {
2214 } else if (path->reada == READA_FORWARD) {
2219 if (path->reada == READA_BACK && objectid) {
2220 btrfs_node_key(node, &disk_key, nr);
2221 if (btrfs_disk_key_objectid(&disk_key) != objectid)
2224 search = btrfs_node_blockptr(node, nr);
2225 if ((search <= target && target - search <= 65536) ||
2226 (search > target && search - target <= 65536)) {
2227 readahead_tree_block(fs_info, search);
2231 if ((nread > 65536 || nscan > 32))
2236 static noinline void reada_for_balance(struct btrfs_fs_info *fs_info,
2237 struct btrfs_path *path, int level)
2241 struct extent_buffer *parent;
2242 struct extent_buffer *eb;
2247 parent = path->nodes[level + 1];
2251 nritems = btrfs_header_nritems(parent);
2252 slot = path->slots[level + 1];
2255 block1 = btrfs_node_blockptr(parent, slot - 1);
2256 gen = btrfs_node_ptr_generation(parent, slot - 1);
2257 eb = find_extent_buffer(fs_info, block1);
2259 * if we get -eagain from btrfs_buffer_uptodate, we
2260 * don't want to return eagain here. That will loop
2263 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2265 free_extent_buffer(eb);
2267 if (slot + 1 < nritems) {
2268 block2 = btrfs_node_blockptr(parent, slot + 1);
2269 gen = btrfs_node_ptr_generation(parent, slot + 1);
2270 eb = find_extent_buffer(fs_info, block2);
2271 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2273 free_extent_buffer(eb);
2277 readahead_tree_block(fs_info, block1);
2279 readahead_tree_block(fs_info, block2);
2284 * when we walk down the tree, it is usually safe to unlock the higher layers
2285 * in the tree. The exceptions are when our path goes through slot 0, because
2286 * operations on the tree might require changing key pointers higher up in the
2289 * callers might also have set path->keep_locks, which tells this code to keep
2290 * the lock if the path points to the last slot in the block. This is part of
2291 * walking through the tree, and selecting the next slot in the higher block.
2293 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2294 * if lowest_unlock is 1, level 0 won't be unlocked
2296 static noinline void unlock_up(struct btrfs_path *path, int level,
2297 int lowest_unlock, int min_write_lock_level,
2298 int *write_lock_level)
2301 int skip_level = level;
2303 struct extent_buffer *t;
2305 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2306 if (!path->nodes[i])
2308 if (!path->locks[i])
2310 if (!no_skips && path->slots[i] == 0) {
2314 if (!no_skips && path->keep_locks) {
2317 nritems = btrfs_header_nritems(t);
2318 if (nritems < 1 || path->slots[i] >= nritems - 1) {
2323 if (skip_level < i && i >= lowest_unlock)
2327 if (i >= lowest_unlock && i > skip_level) {
2328 btrfs_tree_unlock_rw(t, path->locks[i]);
2330 if (write_lock_level &&
2331 i > min_write_lock_level &&
2332 i <= *write_lock_level) {
2333 *write_lock_level = i - 1;
2340 * helper function for btrfs_search_slot. The goal is to find a block
2341 * in cache without setting the path to blocking. If we find the block
2342 * we return zero and the path is unchanged.
2344 * If we can't find the block, we set the path blocking and do some
2345 * reada. -EAGAIN is returned and the search must be repeated.
2348 read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
2349 struct extent_buffer **eb_ret, int level, int slot,
2350 const struct btrfs_key *key)
2352 struct btrfs_fs_info *fs_info = root->fs_info;
2355 struct extent_buffer *tmp;
2356 struct btrfs_key first_key;
2360 blocknr = btrfs_node_blockptr(*eb_ret, slot);
2361 gen = btrfs_node_ptr_generation(*eb_ret, slot);
2362 parent_level = btrfs_header_level(*eb_ret);
2363 btrfs_node_key_to_cpu(*eb_ret, &first_key, slot);
2365 tmp = find_extent_buffer(fs_info, blocknr);
2367 /* first we do an atomic uptodate check */
2368 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2370 * Do extra check for first_key, eb can be stale due to
2371 * being cached, read from scrub, or have multiple
2372 * parents (shared tree blocks).
2374 if (btrfs_verify_level_key(tmp,
2375 parent_level - 1, &first_key, gen)) {
2376 free_extent_buffer(tmp);
2383 /* the pages were up to date, but we failed
2384 * the generation number check. Do a full
2385 * read for the generation number that is correct.
2386 * We must do this without dropping locks so
2387 * we can trust our generation number
2389 btrfs_set_path_blocking(p);
2391 /* now we're allowed to do a blocking uptodate check */
2392 ret = btrfs_read_buffer(tmp, gen, parent_level - 1, &first_key);
2397 free_extent_buffer(tmp);
2398 btrfs_release_path(p);
2403 * reduce lock contention at high levels
2404 * of the btree by dropping locks before
2405 * we read. Don't release the lock on the current
2406 * level because we need to walk this node to figure
2407 * out which blocks to read.
2409 btrfs_unlock_up_safe(p, level + 1);
2410 btrfs_set_path_blocking(p);
2412 if (p->reada != READA_NONE)
2413 reada_for_search(fs_info, p, level, slot, key->objectid);
2416 tmp = read_tree_block(fs_info, blocknr, gen, parent_level - 1,
2420 * If the read above didn't mark this buffer up to date,
2421 * it will never end up being up to date. Set ret to EIO now
2422 * and give up so that our caller doesn't loop forever
2425 if (!extent_buffer_uptodate(tmp))
2427 free_extent_buffer(tmp);
2432 btrfs_release_path(p);
2437 * helper function for btrfs_search_slot. This does all of the checks
2438 * for node-level blocks and does any balancing required based on
2441 * If no extra work was required, zero is returned. If we had to
2442 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2446 setup_nodes_for_search(struct btrfs_trans_handle *trans,
2447 struct btrfs_root *root, struct btrfs_path *p,
2448 struct extent_buffer *b, int level, int ins_len,
2449 int *write_lock_level)
2451 struct btrfs_fs_info *fs_info = root->fs_info;
2454 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2455 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) {
2458 if (*write_lock_level < level + 1) {
2459 *write_lock_level = level + 1;
2460 btrfs_release_path(p);
2464 btrfs_set_path_blocking(p);
2465 reada_for_balance(fs_info, p, level);
2466 sret = split_node(trans, root, p, level);
2473 b = p->nodes[level];
2474 } else if (ins_len < 0 && btrfs_header_nritems(b) <
2475 BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) {
2478 if (*write_lock_level < level + 1) {
2479 *write_lock_level = level + 1;
2480 btrfs_release_path(p);
2484 btrfs_set_path_blocking(p);
2485 reada_for_balance(fs_info, p, level);
2486 sret = balance_level(trans, root, p, level);
2492 b = p->nodes[level];
2494 btrfs_release_path(p);
2497 BUG_ON(btrfs_header_nritems(b) == 1);
2507 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
2508 u64 iobjectid, u64 ioff, u8 key_type,
2509 struct btrfs_key *found_key)
2512 struct btrfs_key key;
2513 struct extent_buffer *eb;
2518 key.type = key_type;
2519 key.objectid = iobjectid;
2522 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
2526 eb = path->nodes[0];
2527 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
2528 ret = btrfs_next_leaf(fs_root, path);
2531 eb = path->nodes[0];
2534 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
2535 if (found_key->type != key.type ||
2536 found_key->objectid != key.objectid)
2542 static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
2543 struct btrfs_path *p,
2544 int write_lock_level)
2546 struct btrfs_fs_info *fs_info = root->fs_info;
2547 struct extent_buffer *b;
2551 /* We try very hard to do read locks on the root */
2552 root_lock = BTRFS_READ_LOCK;
2554 if (p->search_commit_root) {
2556 * The commit roots are read only so we always do read locks,
2557 * and we always must hold the commit_root_sem when doing
2558 * searches on them, the only exception is send where we don't
2559 * want to block transaction commits for a long time, so
2560 * we need to clone the commit root in order to avoid races
2561 * with transaction commits that create a snapshot of one of
2562 * the roots used by a send operation.
2564 if (p->need_commit_sem) {
2565 down_read(&fs_info->commit_root_sem);
2566 b = btrfs_clone_extent_buffer(root->commit_root);
2567 up_read(&fs_info->commit_root_sem);
2569 return ERR_PTR(-ENOMEM);
2572 b = root->commit_root;
2573 atomic_inc(&b->refs);
2575 level = btrfs_header_level(b);
2577 * Ensure that all callers have set skip_locking when
2578 * p->search_commit_root = 1.
2580 ASSERT(p->skip_locking == 1);
2585 if (p->skip_locking) {
2586 b = btrfs_root_node(root);
2587 level = btrfs_header_level(b);
2592 * If the level is set to maximum, we can skip trying to get the read
2595 if (write_lock_level < BTRFS_MAX_LEVEL) {
2597 * We don't know the level of the root node until we actually
2598 * have it read locked
2600 b = btrfs_read_lock_root_node(root);
2601 level = btrfs_header_level(b);
2602 if (level > write_lock_level)
2605 /* Whoops, must trade for write lock */
2606 btrfs_tree_read_unlock(b);
2607 free_extent_buffer(b);
2610 b = btrfs_lock_root_node(root);
2611 root_lock = BTRFS_WRITE_LOCK;
2613 /* The level might have changed, check again */
2614 level = btrfs_header_level(b);
2617 p->nodes[level] = b;
2618 if (!p->skip_locking)
2619 p->locks[level] = root_lock;
2621 * Callers are responsible for dropping b's references.
2628 * btrfs_search_slot - look for a key in a tree and perform necessary
2629 * modifications to preserve tree invariants.
2631 * @trans: Handle of transaction, used when modifying the tree
2632 * @p: Holds all btree nodes along the search path
2633 * @root: The root node of the tree
2634 * @key: The key we are looking for
2635 * @ins_len: Indicates purpose of search, for inserts it is 1, for
2636 * deletions it's -1. 0 for plain searches
2637 * @cow: boolean should CoW operations be performed. Must always be 1
2638 * when modifying the tree.
2640 * If @ins_len > 0, nodes and leaves will be split as we walk down the tree.
2641 * If @ins_len < 0, nodes will be merged as we walk down the tree (if possible)
2643 * If @key is found, 0 is returned and you can find the item in the leaf level
2644 * of the path (level 0)
2646 * If @key isn't found, 1 is returned and the leaf level of the path (level 0)
2647 * points to the slot where it should be inserted
2649 * If an error is encountered while searching the tree a negative error number
2652 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2653 const struct btrfs_key *key, struct btrfs_path *p,
2654 int ins_len, int cow)
2656 struct extent_buffer *b;
2661 int lowest_unlock = 1;
2662 /* everything at write_lock_level or lower must be write locked */
2663 int write_lock_level = 0;
2664 u8 lowest_level = 0;
2665 int min_write_lock_level;
2668 lowest_level = p->lowest_level;
2669 WARN_ON(lowest_level && ins_len > 0);
2670 WARN_ON(p->nodes[0] != NULL);
2671 BUG_ON(!cow && ins_len);
2676 /* when we are removing items, we might have to go up to level
2677 * two as we update tree pointers Make sure we keep write
2678 * for those levels as well
2680 write_lock_level = 2;
2681 } else if (ins_len > 0) {
2683 * for inserting items, make sure we have a write lock on
2684 * level 1 so we can update keys
2686 write_lock_level = 1;
2690 write_lock_level = -1;
2692 if (cow && (p->keep_locks || p->lowest_level))
2693 write_lock_level = BTRFS_MAX_LEVEL;
2695 min_write_lock_level = write_lock_level;
2699 b = btrfs_search_slot_get_root(root, p, write_lock_level);
2708 level = btrfs_header_level(b);
2711 bool last_level = (level == (BTRFS_MAX_LEVEL - 1));
2714 * if we don't really need to cow this block
2715 * then we don't want to set the path blocking,
2716 * so we test it here
2718 if (!should_cow_block(trans, root, b)) {
2719 trans->dirty = true;
2724 * must have write locks on this node and the
2727 if (level > write_lock_level ||
2728 (level + 1 > write_lock_level &&
2729 level + 1 < BTRFS_MAX_LEVEL &&
2730 p->nodes[level + 1])) {
2731 write_lock_level = level + 1;
2732 btrfs_release_path(p);
2736 btrfs_set_path_blocking(p);
2738 err = btrfs_cow_block(trans, root, b, NULL, 0,
2741 err = btrfs_cow_block(trans, root, b,
2742 p->nodes[level + 1],
2743 p->slots[level + 1], &b);
2750 p->nodes[level] = b;
2752 * Leave path with blocking locks to avoid massive
2753 * lock context switch, this is made on purpose.
2757 * we have a lock on b and as long as we aren't changing
2758 * the tree, there is no way to for the items in b to change.
2759 * It is safe to drop the lock on our parent before we
2760 * go through the expensive btree search on b.
2762 * If we're inserting or deleting (ins_len != 0), then we might
2763 * be changing slot zero, which may require changing the parent.
2764 * So, we can't drop the lock until after we know which slot
2765 * we're operating on.
2767 if (!ins_len && !p->keep_locks) {
2770 if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
2771 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
2777 * If btrfs_bin_search returns an exact match (prev_cmp == 0)
2778 * we can safely assume the target key will always be in slot 0
2779 * on lower levels due to the invariants BTRFS' btree provides,
2780 * namely that a btrfs_key_ptr entry always points to the
2781 * lowest key in the child node, thus we can skip searching
2784 if (prev_cmp == 0) {
2788 ret = btrfs_bin_search(b, key, &slot);
2795 p->slots[level] = slot;
2797 btrfs_leaf_free_space(b) < ins_len) {
2798 if (write_lock_level < 1) {
2799 write_lock_level = 1;
2800 btrfs_release_path(p);
2804 btrfs_set_path_blocking(p);
2805 err = split_leaf(trans, root, key,
2806 p, ins_len, ret == 0);
2814 if (!p->search_for_split)
2815 unlock_up(p, level, lowest_unlock,
2816 min_write_lock_level, NULL);
2819 if (ret && slot > 0) {
2823 p->slots[level] = slot;
2824 err = setup_nodes_for_search(trans, root, p, b, level, ins_len,
2832 b = p->nodes[level];
2833 slot = p->slots[level];
2836 * Slot 0 is special, if we change the key we have to update
2837 * the parent pointer which means we must have a write lock on
2840 if (slot == 0 && ins_len && write_lock_level < level + 1) {
2841 write_lock_level = level + 1;
2842 btrfs_release_path(p);
2846 unlock_up(p, level, lowest_unlock, min_write_lock_level,
2849 if (level == lowest_level) {
2855 err = read_block_for_search(root, p, &b, level, slot, key);
2863 if (!p->skip_locking) {
2864 level = btrfs_header_level(b);
2865 if (level <= write_lock_level) {
2866 if (!btrfs_try_tree_write_lock(b)) {
2867 btrfs_set_path_blocking(p);
2870 p->locks[level] = BTRFS_WRITE_LOCK;
2872 if (!btrfs_tree_read_lock_atomic(b)) {
2873 btrfs_set_path_blocking(p);
2874 btrfs_tree_read_lock(b);
2876 p->locks[level] = BTRFS_READ_LOCK;
2878 p->nodes[level] = b;
2884 * we don't really know what they plan on doing with the path
2885 * from here on, so for now just mark it as blocking
2887 if (!p->leave_spinning)
2888 btrfs_set_path_blocking(p);
2889 if (ret < 0 && !p->skip_release_on_error)
2890 btrfs_release_path(p);
2895 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2896 * current state of the tree together with the operations recorded in the tree
2897 * modification log to search for the key in a previous version of this tree, as
2898 * denoted by the time_seq parameter.
2900 * Naturally, there is no support for insert, delete or cow operations.
2902 * The resulting path and return value will be set up as if we called
2903 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2905 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
2906 struct btrfs_path *p, u64 time_seq)
2908 struct btrfs_fs_info *fs_info = root->fs_info;
2909 struct extent_buffer *b;
2914 int lowest_unlock = 1;
2915 u8 lowest_level = 0;
2917 lowest_level = p->lowest_level;
2918 WARN_ON(p->nodes[0] != NULL);
2920 if (p->search_commit_root) {
2922 return btrfs_search_slot(NULL, root, key, p, 0, 0);
2926 b = get_old_root(root, time_seq);
2931 level = btrfs_header_level(b);
2932 p->locks[level] = BTRFS_READ_LOCK;
2937 level = btrfs_header_level(b);
2938 p->nodes[level] = b;
2941 * we have a lock on b and as long as we aren't changing
2942 * the tree, there is no way to for the items in b to change.
2943 * It is safe to drop the lock on our parent before we
2944 * go through the expensive btree search on b.
2946 btrfs_unlock_up_safe(p, level + 1);
2948 ret = btrfs_bin_search(b, key, &slot);
2953 p->slots[level] = slot;
2954 unlock_up(p, level, lowest_unlock, 0, NULL);
2958 if (ret && slot > 0) {
2962 p->slots[level] = slot;
2963 unlock_up(p, level, lowest_unlock, 0, NULL);
2965 if (level == lowest_level) {
2971 err = read_block_for_search(root, p, &b, level, slot, key);
2979 level = btrfs_header_level(b);
2980 if (!btrfs_tree_read_lock_atomic(b)) {
2981 btrfs_set_path_blocking(p);
2982 btrfs_tree_read_lock(b);
2984 b = tree_mod_log_rewind(fs_info, p, b, time_seq);
2989 p->locks[level] = BTRFS_READ_LOCK;
2990 p->nodes[level] = b;
2994 if (!p->leave_spinning)
2995 btrfs_set_path_blocking(p);
2997 btrfs_release_path(p);
3003 * helper to use instead of search slot if no exact match is needed but
3004 * instead the next or previous item should be returned.
3005 * When find_higher is true, the next higher item is returned, the next lower
3007 * When return_any and find_higher are both true, and no higher item is found,
3008 * return the next lower instead.
3009 * When return_any is true and find_higher is false, and no lower item is found,
3010 * return the next higher instead.
3011 * It returns 0 if any item is found, 1 if none is found (tree empty), and
3014 int btrfs_search_slot_for_read(struct btrfs_root *root,
3015 const struct btrfs_key *key,
3016 struct btrfs_path *p, int find_higher,
3020 struct extent_buffer *leaf;
3023 ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
3027 * a return value of 1 means the path is at the position where the
3028 * item should be inserted. Normally this is the next bigger item,
3029 * but in case the previous item is the last in a leaf, path points
3030 * to the first free slot in the previous leaf, i.e. at an invalid
3036 if (p->slots[0] >= btrfs_header_nritems(leaf)) {
3037 ret = btrfs_next_leaf(root, p);
3043 * no higher item found, return the next
3048 btrfs_release_path(p);
3052 if (p->slots[0] == 0) {
3053 ret = btrfs_prev_leaf(root, p);
3058 if (p->slots[0] == btrfs_header_nritems(leaf))
3065 * no lower item found, return the next
3070 btrfs_release_path(p);
3080 * adjust the pointers going up the tree, starting at level
3081 * making sure the right key of each node is points to 'key'.
3082 * This is used after shifting pointers to the left, so it stops
3083 * fixing up pointers when a given leaf/node is not in slot 0 of the
3087 static void fixup_low_keys(struct btrfs_path *path,
3088 struct btrfs_disk_key *key, int level)
3091 struct extent_buffer *t;
3094 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
3095 int tslot = path->slots[i];
3097 if (!path->nodes[i])
3100 ret = tree_mod_log_insert_key(t, tslot, MOD_LOG_KEY_REPLACE,
3103 btrfs_set_node_key(t, key, tslot);
3104 btrfs_mark_buffer_dirty(path->nodes[i]);
3113 * This function isn't completely safe. It's the caller's responsibility
3114 * that the new key won't break the order
3116 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
3117 struct btrfs_path *path,
3118 const struct btrfs_key *new_key)
3120 struct btrfs_disk_key disk_key;
3121 struct extent_buffer *eb;
3124 eb = path->nodes[0];
3125 slot = path->slots[0];
3127 btrfs_item_key(eb, &disk_key, slot - 1);
3128 if (unlikely(comp_keys(&disk_key, new_key) >= 0)) {
3130 "slot %u key (%llu %u %llu) new key (%llu %u %llu)",
3131 slot, btrfs_disk_key_objectid(&disk_key),
3132 btrfs_disk_key_type(&disk_key),
3133 btrfs_disk_key_offset(&disk_key),
3134 new_key->objectid, new_key->type,
3136 btrfs_print_leaf(eb);
3140 if (slot < btrfs_header_nritems(eb) - 1) {
3141 btrfs_item_key(eb, &disk_key, slot + 1);
3142 if (unlikely(comp_keys(&disk_key, new_key) <= 0)) {
3144 "slot %u key (%llu %u %llu) new key (%llu %u %llu)",
3145 slot, btrfs_disk_key_objectid(&disk_key),
3146 btrfs_disk_key_type(&disk_key),
3147 btrfs_disk_key_offset(&disk_key),
3148 new_key->objectid, new_key->type,
3150 btrfs_print_leaf(eb);
3155 btrfs_cpu_key_to_disk(&disk_key, new_key);
3156 btrfs_set_item_key(eb, &disk_key, slot);
3157 btrfs_mark_buffer_dirty(eb);
3159 fixup_low_keys(path, &disk_key, 1);
3163 * try to push data from one node into the next node left in the
3166 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
3167 * error, and > 0 if there was no room in the left hand block.
3169 static int push_node_left(struct btrfs_trans_handle *trans,
3170 struct extent_buffer *dst,
3171 struct extent_buffer *src, int empty)
3173 struct btrfs_fs_info *fs_info = trans->fs_info;
3179 src_nritems = btrfs_header_nritems(src);
3180 dst_nritems = btrfs_header_nritems(dst);
3181 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
3182 WARN_ON(btrfs_header_generation(src) != trans->transid);
3183 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3185 if (!empty && src_nritems <= 8)
3188 if (push_items <= 0)
3192 push_items = min(src_nritems, push_items);
3193 if (push_items < src_nritems) {
3194 /* leave at least 8 pointers in the node if
3195 * we aren't going to empty it
3197 if (src_nritems - push_items < 8) {
3198 if (push_items <= 8)
3204 push_items = min(src_nritems - 8, push_items);
3206 ret = tree_mod_log_eb_copy(dst, src, dst_nritems, 0, push_items);
3208 btrfs_abort_transaction(trans, ret);
3211 copy_extent_buffer(dst, src,
3212 btrfs_node_key_ptr_offset(dst_nritems),
3213 btrfs_node_key_ptr_offset(0),
3214 push_items * sizeof(struct btrfs_key_ptr));
3216 if (push_items < src_nritems) {
3218 * Don't call tree_mod_log_insert_move here, key removal was
3219 * already fully logged by tree_mod_log_eb_copy above.
3221 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
3222 btrfs_node_key_ptr_offset(push_items),
3223 (src_nritems - push_items) *
3224 sizeof(struct btrfs_key_ptr));
3226 btrfs_set_header_nritems(src, src_nritems - push_items);
3227 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3228 btrfs_mark_buffer_dirty(src);
3229 btrfs_mark_buffer_dirty(dst);
3235 * try to push data from one node into the next node right in the
3238 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3239 * error, and > 0 if there was no room in the right hand block.
3241 * this will only push up to 1/2 the contents of the left node over
3243 static int balance_node_right(struct btrfs_trans_handle *trans,
3244 struct extent_buffer *dst,
3245 struct extent_buffer *src)
3247 struct btrfs_fs_info *fs_info = trans->fs_info;
3254 WARN_ON(btrfs_header_generation(src) != trans->transid);
3255 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3257 src_nritems = btrfs_header_nritems(src);
3258 dst_nritems = btrfs_header_nritems(dst);
3259 push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
3260 if (push_items <= 0)
3263 if (src_nritems < 4)
3266 max_push = src_nritems / 2 + 1;
3267 /* don't try to empty the node */
3268 if (max_push >= src_nritems)
3271 if (max_push < push_items)
3272 push_items = max_push;
3274 ret = tree_mod_log_insert_move(dst, push_items, 0, dst_nritems);
3276 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
3277 btrfs_node_key_ptr_offset(0),
3279 sizeof(struct btrfs_key_ptr));
3281 ret = tree_mod_log_eb_copy(dst, src, 0, src_nritems - push_items,
3284 btrfs_abort_transaction(trans, ret);
3287 copy_extent_buffer(dst, src,
3288 btrfs_node_key_ptr_offset(0),
3289 btrfs_node_key_ptr_offset(src_nritems - push_items),
3290 push_items * sizeof(struct btrfs_key_ptr));
3292 btrfs_set_header_nritems(src, src_nritems - push_items);
3293 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3295 btrfs_mark_buffer_dirty(src);
3296 btrfs_mark_buffer_dirty(dst);
3302 * helper function to insert a new root level in the tree.
3303 * A new node is allocated, and a single item is inserted to
3304 * point to the existing root
3306 * returns zero on success or < 0 on failure.
3308 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3309 struct btrfs_root *root,
3310 struct btrfs_path *path, int level)
3312 struct btrfs_fs_info *fs_info = root->fs_info;
3314 struct extent_buffer *lower;
3315 struct extent_buffer *c;
3316 struct extent_buffer *old;
3317 struct btrfs_disk_key lower_key;
3320 BUG_ON(path->nodes[level]);
3321 BUG_ON(path->nodes[level-1] != root->node);
3323 lower = path->nodes[level-1];
3325 btrfs_item_key(lower, &lower_key, 0);
3327 btrfs_node_key(lower, &lower_key, 0);
3329 c = alloc_tree_block_no_bg_flush(trans, root, 0, &lower_key, level,
3330 root->node->start, 0);
3334 root_add_used(root, fs_info->nodesize);
3336 btrfs_set_header_nritems(c, 1);
3337 btrfs_set_node_key(c, &lower_key, 0);
3338 btrfs_set_node_blockptr(c, 0, lower->start);
3339 lower_gen = btrfs_header_generation(lower);
3340 WARN_ON(lower_gen != trans->transid);
3342 btrfs_set_node_ptr_generation(c, 0, lower_gen);
3344 btrfs_mark_buffer_dirty(c);
3347 ret = tree_mod_log_insert_root(root->node, c, 0);
3349 rcu_assign_pointer(root->node, c);
3351 /* the super has an extra ref to root->node */
3352 free_extent_buffer(old);
3354 add_root_to_dirty_list(root);
3355 atomic_inc(&c->refs);
3356 path->nodes[level] = c;
3357 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
3358 path->slots[level] = 0;
3363 * worker function to insert a single pointer in a node.
3364 * the node should have enough room for the pointer already
3366 * slot and level indicate where you want the key to go, and
3367 * blocknr is the block the key points to.
3369 static void insert_ptr(struct btrfs_trans_handle *trans,
3370 struct btrfs_path *path,
3371 struct btrfs_disk_key *key, u64 bytenr,
3372 int slot, int level)
3374 struct extent_buffer *lower;
3378 BUG_ON(!path->nodes[level]);
3379 btrfs_assert_tree_locked(path->nodes[level]);
3380 lower = path->nodes[level];
3381 nritems = btrfs_header_nritems(lower);
3382 BUG_ON(slot > nritems);
3383 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(trans->fs_info));
3384 if (slot != nritems) {
3386 ret = tree_mod_log_insert_move(lower, slot + 1, slot,
3390 memmove_extent_buffer(lower,
3391 btrfs_node_key_ptr_offset(slot + 1),
3392 btrfs_node_key_ptr_offset(slot),
3393 (nritems - slot) * sizeof(struct btrfs_key_ptr));
3396 ret = tree_mod_log_insert_key(lower, slot, MOD_LOG_KEY_ADD,
3400 btrfs_set_node_key(lower, key, slot);
3401 btrfs_set_node_blockptr(lower, slot, bytenr);
3402 WARN_ON(trans->transid == 0);
3403 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3404 btrfs_set_header_nritems(lower, nritems + 1);
3405 btrfs_mark_buffer_dirty(lower);
3409 * split the node at the specified level in path in two.
3410 * The path is corrected to point to the appropriate node after the split
3412 * Before splitting this tries to make some room in the node by pushing
3413 * left and right, if either one works, it returns right away.
3415 * returns 0 on success and < 0 on failure
3417 static noinline int split_node(struct btrfs_trans_handle *trans,
3418 struct btrfs_root *root,
3419 struct btrfs_path *path, int level)
3421 struct btrfs_fs_info *fs_info = root->fs_info;
3422 struct extent_buffer *c;
3423 struct extent_buffer *split;
3424 struct btrfs_disk_key disk_key;
3429 c = path->nodes[level];
3430 WARN_ON(btrfs_header_generation(c) != trans->transid);
3431 if (c == root->node) {
3433 * trying to split the root, lets make a new one
3435 * tree mod log: We don't log_removal old root in
3436 * insert_new_root, because that root buffer will be kept as a
3437 * normal node. We are going to log removal of half of the
3438 * elements below with tree_mod_log_eb_copy. We're holding a
3439 * tree lock on the buffer, which is why we cannot race with
3440 * other tree_mod_log users.
3442 ret = insert_new_root(trans, root, path, level + 1);
3446 ret = push_nodes_for_insert(trans, root, path, level);
3447 c = path->nodes[level];
3448 if (!ret && btrfs_header_nritems(c) <
3449 BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3)
3455 c_nritems = btrfs_header_nritems(c);
3456 mid = (c_nritems + 1) / 2;
3457 btrfs_node_key(c, &disk_key, mid);
3459 split = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, level,
3462 return PTR_ERR(split);
3464 root_add_used(root, fs_info->nodesize);
3465 ASSERT(btrfs_header_level(c) == level);
3467 ret = tree_mod_log_eb_copy(split, c, 0, mid, c_nritems - mid);
3469 btrfs_abort_transaction(trans, ret);
3472 copy_extent_buffer(split, c,
3473 btrfs_node_key_ptr_offset(0),
3474 btrfs_node_key_ptr_offset(mid),
3475 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3476 btrfs_set_header_nritems(split, c_nritems - mid);
3477 btrfs_set_header_nritems(c, mid);
3480 btrfs_mark_buffer_dirty(c);
3481 btrfs_mark_buffer_dirty(split);
3483 insert_ptr(trans, path, &disk_key, split->start,
3484 path->slots[level + 1] + 1, level + 1);
3486 if (path->slots[level] >= mid) {
3487 path->slots[level] -= mid;
3488 btrfs_tree_unlock(c);
3489 free_extent_buffer(c);
3490 path->nodes[level] = split;
3491 path->slots[level + 1] += 1;
3493 btrfs_tree_unlock(split);
3494 free_extent_buffer(split);
3500 * how many bytes are required to store the items in a leaf. start
3501 * and nr indicate which items in the leaf to check. This totals up the
3502 * space used both by the item structs and the item data
3504 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3506 struct btrfs_item *start_item;
3507 struct btrfs_item *end_item;
3509 int nritems = btrfs_header_nritems(l);
3510 int end = min(nritems, start + nr) - 1;
3514 start_item = btrfs_item_nr(start);
3515 end_item = btrfs_item_nr(end);
3516 data_len = btrfs_item_offset(l, start_item) +
3517 btrfs_item_size(l, start_item);
3518 data_len = data_len - btrfs_item_offset(l, end_item);
3519 data_len += sizeof(struct btrfs_item) * nr;
3520 WARN_ON(data_len < 0);
3525 * The space between the end of the leaf items and
3526 * the start of the leaf data. IOW, how much room
3527 * the leaf has left for both items and data
3529 noinline int btrfs_leaf_free_space(struct extent_buffer *leaf)
3531 struct btrfs_fs_info *fs_info = leaf->fs_info;
3532 int nritems = btrfs_header_nritems(leaf);
3535 ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems);
3538 "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3540 (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info),
3541 leaf_space_used(leaf, 0, nritems), nritems);
3547 * min slot controls the lowest index we're willing to push to the
3548 * right. We'll push up to and including min_slot, but no lower
3550 static noinline int __push_leaf_right(struct btrfs_path *path,
3551 int data_size, int empty,
3552 struct extent_buffer *right,
3553 int free_space, u32 left_nritems,
3556 struct btrfs_fs_info *fs_info = right->fs_info;
3557 struct extent_buffer *left = path->nodes[0];
3558 struct extent_buffer *upper = path->nodes[1];
3559 struct btrfs_map_token token;
3560 struct btrfs_disk_key disk_key;
3565 struct btrfs_item *item;
3574 nr = max_t(u32, 1, min_slot);
3576 if (path->slots[0] >= left_nritems)
3577 push_space += data_size;
3579 slot = path->slots[1];
3580 i = left_nritems - 1;
3582 item = btrfs_item_nr(i);
3584 if (!empty && push_items > 0) {
3585 if (path->slots[0] > i)
3587 if (path->slots[0] == i) {
3588 int space = btrfs_leaf_free_space(left);
3590 if (space + push_space * 2 > free_space)
3595 if (path->slots[0] == i)
3596 push_space += data_size;
3598 this_item_size = btrfs_item_size(left, item);
3599 if (this_item_size + sizeof(*item) + push_space > free_space)
3603 push_space += this_item_size + sizeof(*item);
3609 if (push_items == 0)
3612 WARN_ON(!empty && push_items == left_nritems);
3614 /* push left to right */
3615 right_nritems = btrfs_header_nritems(right);
3617 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
3618 push_space -= leaf_data_end(left);
3620 /* make room in the right data area */
3621 data_end = leaf_data_end(right);
3622 memmove_extent_buffer(right,
3623 BTRFS_LEAF_DATA_OFFSET + data_end - push_space,
3624 BTRFS_LEAF_DATA_OFFSET + data_end,
3625 BTRFS_LEAF_DATA_SIZE(fs_info) - data_end);
3627 /* copy from the left data area */
3628 copy_extent_buffer(right, left, BTRFS_LEAF_DATA_OFFSET +
3629 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3630 BTRFS_LEAF_DATA_OFFSET + leaf_data_end(left),
3633 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3634 btrfs_item_nr_offset(0),
3635 right_nritems * sizeof(struct btrfs_item));
3637 /* copy the items from left to right */
3638 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3639 btrfs_item_nr_offset(left_nritems - push_items),
3640 push_items * sizeof(struct btrfs_item));
3642 /* update the item pointers */
3643 btrfs_init_map_token(&token, right);
3644 right_nritems += push_items;
3645 btrfs_set_header_nritems(right, right_nritems);
3646 push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3647 for (i = 0; i < right_nritems; i++) {
3648 item = btrfs_item_nr(i);
3649 push_space -= btrfs_token_item_size(&token, item);
3650 btrfs_set_token_item_offset(&token, item, push_space);
3653 left_nritems -= push_items;
3654 btrfs_set_header_nritems(left, left_nritems);
3657 btrfs_mark_buffer_dirty(left);
3659 btrfs_clean_tree_block(left);
3661 btrfs_mark_buffer_dirty(right);
3663 btrfs_item_key(right, &disk_key, 0);
3664 btrfs_set_node_key(upper, &disk_key, slot + 1);
3665 btrfs_mark_buffer_dirty(upper);
3667 /* then fixup the leaf pointer in the path */
3668 if (path->slots[0] >= left_nritems) {
3669 path->slots[0] -= left_nritems;
3670 if (btrfs_header_nritems(path->nodes[0]) == 0)
3671 btrfs_clean_tree_block(path->nodes[0]);
3672 btrfs_tree_unlock(path->nodes[0]);
3673 free_extent_buffer(path->nodes[0]);
3674 path->nodes[0] = right;
3675 path->slots[1] += 1;
3677 btrfs_tree_unlock(right);
3678 free_extent_buffer(right);
3683 btrfs_tree_unlock(right);
3684 free_extent_buffer(right);
3689 * push some data in the path leaf to the right, trying to free up at
3690 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3692 * returns 1 if the push failed because the other node didn't have enough
3693 * room, 0 if everything worked out and < 0 if there were major errors.
3695 * this will push starting from min_slot to the end of the leaf. It won't
3696 * push any slot lower than min_slot
3698 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3699 *root, struct btrfs_path *path,
3700 int min_data_size, int data_size,
3701 int empty, u32 min_slot)
3703 struct extent_buffer *left = path->nodes[0];
3704 struct extent_buffer *right;
3705 struct extent_buffer *upper;
3711 if (!path->nodes[1])
3714 slot = path->slots[1];
3715 upper = path->nodes[1];
3716 if (slot >= btrfs_header_nritems(upper) - 1)
3719 btrfs_assert_tree_locked(path->nodes[1]);
3721 right = btrfs_read_node_slot(upper, slot + 1);
3723 * slot + 1 is not valid or we fail to read the right node,
3724 * no big deal, just return.
3729 btrfs_tree_lock(right);
3730 btrfs_set_lock_blocking_write(right);
3732 free_space = btrfs_leaf_free_space(right);
3733 if (free_space < data_size)
3736 /* cow and double check */
3737 ret = btrfs_cow_block(trans, root, right, upper,
3742 free_space = btrfs_leaf_free_space(right);
3743 if (free_space < data_size)
3746 left_nritems = btrfs_header_nritems(left);
3747 if (left_nritems == 0)
3750 if (path->slots[0] == left_nritems && !empty) {
3751 /* Key greater than all keys in the leaf, right neighbor has
3752 * enough room for it and we're not emptying our leaf to delete
3753 * it, therefore use right neighbor to insert the new item and
3754 * no need to touch/dirty our left leaf. */
3755 btrfs_tree_unlock(left);
3756 free_extent_buffer(left);
3757 path->nodes[0] = right;
3763 return __push_leaf_right(path, min_data_size, empty,
3764 right, free_space, left_nritems, min_slot);
3766 btrfs_tree_unlock(right);
3767 free_extent_buffer(right);
3772 * push some data in the path leaf to the left, trying to free up at
3773 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3775 * max_slot can put a limit on how far into the leaf we'll push items. The
3776 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3779 static noinline int __push_leaf_left(struct btrfs_path *path, int data_size,
3780 int empty, struct extent_buffer *left,
3781 int free_space, u32 right_nritems,
3784 struct btrfs_fs_info *fs_info = left->fs_info;
3785 struct btrfs_disk_key disk_key;
3786 struct extent_buffer *right = path->nodes[0];
3790 struct btrfs_item *item;
3791 u32 old_left_nritems;
3795 u32 old_left_item_size;
3796 struct btrfs_map_token token;
3799 nr = min(right_nritems, max_slot);
3801 nr = min(right_nritems - 1, max_slot);
3803 for (i = 0; i < nr; i++) {
3804 item = btrfs_item_nr(i);
3806 if (!empty && push_items > 0) {
3807 if (path->slots[0] < i)
3809 if (path->slots[0] == i) {
3810 int space = btrfs_leaf_free_space(right);
3812 if (space + push_space * 2 > free_space)
3817 if (path->slots[0] == i)
3818 push_space += data_size;
3820 this_item_size = btrfs_item_size(right, item);
3821 if (this_item_size + sizeof(*item) + push_space > free_space)
3825 push_space += this_item_size + sizeof(*item);
3828 if (push_items == 0) {
3832 WARN_ON(!empty && push_items == btrfs_header_nritems(right));
3834 /* push data from right to left */
3835 copy_extent_buffer(left, right,
3836 btrfs_item_nr_offset(btrfs_header_nritems(left)),
3837 btrfs_item_nr_offset(0),
3838 push_items * sizeof(struct btrfs_item));
3840 push_space = BTRFS_LEAF_DATA_SIZE(fs_info) -
3841 btrfs_item_offset_nr(right, push_items - 1);
3843 copy_extent_buffer(left, right, BTRFS_LEAF_DATA_OFFSET +
3844 leaf_data_end(left) - push_space,
3845 BTRFS_LEAF_DATA_OFFSET +
3846 btrfs_item_offset_nr(right, push_items - 1),
3848 old_left_nritems = btrfs_header_nritems(left);
3849 BUG_ON(old_left_nritems <= 0);
3851 btrfs_init_map_token(&token, left);
3852 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3853 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3856 item = btrfs_item_nr(i);
3858 ioff = btrfs_token_item_offset(&token, item);
3859 btrfs_set_token_item_offset(&token, item,
3860 ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size));
3862 btrfs_set_header_nritems(left, old_left_nritems + push_items);
3864 /* fixup right node */
3865 if (push_items > right_nritems)
3866 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3869 if (push_items < right_nritems) {
3870 push_space = btrfs_item_offset_nr(right, push_items - 1) -
3871 leaf_data_end(right);
3872 memmove_extent_buffer(right, BTRFS_LEAF_DATA_OFFSET +
3873 BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3874 BTRFS_LEAF_DATA_OFFSET +
3875 leaf_data_end(right), push_space);
3877 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3878 btrfs_item_nr_offset(push_items),
3879 (btrfs_header_nritems(right) - push_items) *
3880 sizeof(struct btrfs_item));
3883 btrfs_init_map_token(&token, right);
3884 right_nritems -= push_items;
3885 btrfs_set_header_nritems(right, right_nritems);
3886 push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3887 for (i = 0; i < right_nritems; i++) {
3888 item = btrfs_item_nr(i);
3890 push_space = push_space - btrfs_token_item_size(&token, item);
3891 btrfs_set_token_item_offset(&token, item, push_space);
3894 btrfs_mark_buffer_dirty(left);
3896 btrfs_mark_buffer_dirty(right);
3898 btrfs_clean_tree_block(right);
3900 btrfs_item_key(right, &disk_key, 0);
3901 fixup_low_keys(path, &disk_key, 1);
3903 /* then fixup the leaf pointer in the path */
3904 if (path->slots[0] < push_items) {
3905 path->slots[0] += old_left_nritems;
3906 btrfs_tree_unlock(path->nodes[0]);
3907 free_extent_buffer(path->nodes[0]);
3908 path->nodes[0] = left;
3909 path->slots[1] -= 1;
3911 btrfs_tree_unlock(left);
3912 free_extent_buffer(left);
3913 path->slots[0] -= push_items;
3915 BUG_ON(path->slots[0] < 0);
3918 btrfs_tree_unlock(left);
3919 free_extent_buffer(left);
3924 * push some data in the path leaf to the left, trying to free up at
3925 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3927 * max_slot can put a limit on how far into the leaf we'll push items. The
3928 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3931 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3932 *root, struct btrfs_path *path, int min_data_size,
3933 int data_size, int empty, u32 max_slot)
3935 struct extent_buffer *right = path->nodes[0];
3936 struct extent_buffer *left;
3942 slot = path->slots[1];
3945 if (!path->nodes[1])
3948 right_nritems = btrfs_header_nritems(right);
3949 if (right_nritems == 0)
3952 btrfs_assert_tree_locked(path->nodes[1]);
3954 left = btrfs_read_node_slot(path->nodes[1], slot - 1);
3956 * slot - 1 is not valid or we fail to read the left node,
3957 * no big deal, just return.
3962 btrfs_tree_lock(left);
3963 btrfs_set_lock_blocking_write(left);
3965 free_space = btrfs_leaf_free_space(left);
3966 if (free_space < data_size) {
3971 /* cow and double check */
3972 ret = btrfs_cow_block(trans, root, left,
3973 path->nodes[1], slot - 1, &left);
3975 /* we hit -ENOSPC, but it isn't fatal here */
3981 free_space = btrfs_leaf_free_space(left);
3982 if (free_space < data_size) {
3987 return __push_leaf_left(path, min_data_size,
3988 empty, left, free_space, right_nritems,
3991 btrfs_tree_unlock(left);
3992 free_extent_buffer(left);
3997 * split the path's leaf in two, making sure there is at least data_size
3998 * available for the resulting leaf level of the path.
4000 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
4001 struct btrfs_path *path,
4002 struct extent_buffer *l,
4003 struct extent_buffer *right,
4004 int slot, int mid, int nritems)
4006 struct btrfs_fs_info *fs_info = trans->fs_info;
4010 struct btrfs_disk_key disk_key;
4011 struct btrfs_map_token token;
4013 nritems = nritems - mid;
4014 btrfs_set_header_nritems(right, nritems);
4015 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(l);
4017 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
4018 btrfs_item_nr_offset(mid),
4019 nritems * sizeof(struct btrfs_item));
4021 copy_extent_buffer(right, l,
4022 BTRFS_LEAF_DATA_OFFSET + BTRFS_LEAF_DATA_SIZE(fs_info) -
4023 data_copy_size, BTRFS_LEAF_DATA_OFFSET +
4024 leaf_data_end(l), data_copy_size);
4026 rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_end_nr(l, mid);
4028 btrfs_init_map_token(&token, right);
4029 for (i = 0; i < nritems; i++) {
4030 struct btrfs_item *item = btrfs_item_nr(i);
4033 ioff = btrfs_token_item_offset(&token, item);
4034 btrfs_set_token_item_offset(&token, item, ioff + rt_data_off);
4037 btrfs_set_header_nritems(l, mid);
4038 btrfs_item_key(right, &disk_key, 0);
4039 insert_ptr(trans, path, &disk_key, right->start, path->slots[1] + 1, 1);
4041 btrfs_mark_buffer_dirty(right);
4042 btrfs_mark_buffer_dirty(l);
4043 BUG_ON(path->slots[0] != slot);
4046 btrfs_tree_unlock(path->nodes[0]);
4047 free_extent_buffer(path->nodes[0]);
4048 path->nodes[0] = right;
4049 path->slots[0] -= mid;
4050 path->slots[1] += 1;
4052 btrfs_tree_unlock(right);
4053 free_extent_buffer(right);
4056 BUG_ON(path->slots[0] < 0);
4060 * double splits happen when we need to insert a big item in the middle
4061 * of a leaf. A double split can leave us with 3 mostly empty leaves:
4062 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
4065 * We avoid this by trying to push the items on either side of our target
4066 * into the adjacent leaves. If all goes well we can avoid the double split
4069 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
4070 struct btrfs_root *root,
4071 struct btrfs_path *path,
4078 int space_needed = data_size;
4080 slot = path->slots[0];
4081 if (slot < btrfs_header_nritems(path->nodes[0]))
4082 space_needed -= btrfs_leaf_free_space(path->nodes[0]);
4085 * try to push all the items after our slot into the
4088 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
4095 nritems = btrfs_header_nritems(path->nodes[0]);
4097 * our goal is to get our slot at the start or end of a leaf. If
4098 * we've done so we're done
4100 if (path->slots[0] == 0 || path->slots[0] == nritems)
4103 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size)
4106 /* try to push all the items before our slot into the next leaf */
4107 slot = path->slots[0];
4108 space_needed = data_size;
4110 space_needed -= btrfs_leaf_free_space(path->nodes[0]);
4111 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
4124 * split the path's leaf in two, making sure there is at least data_size
4125 * available for the resulting leaf level of the path.
4127 * returns 0 if all went well and < 0 on failure.
4129 static noinline int split_leaf(struct btrfs_trans_handle *trans,
4130 struct btrfs_root *root,
4131 const struct btrfs_key *ins_key,
4132 struct btrfs_path *path, int data_size,
4135 struct btrfs_disk_key disk_key;
4136 struct extent_buffer *l;
4140 struct extent_buffer *right;
4141 struct btrfs_fs_info *fs_info = root->fs_info;
4145 int num_doubles = 0;
4146 int tried_avoid_double = 0;
4149 slot = path->slots[0];
4150 if (extend && data_size + btrfs_item_size_nr(l, slot) +
4151 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info))
4154 /* first try to make some room by pushing left and right */
4155 if (data_size && path->nodes[1]) {
4156 int space_needed = data_size;
4158 if (slot < btrfs_header_nritems(l))
4159 space_needed -= btrfs_leaf_free_space(l);
4161 wret = push_leaf_right(trans, root, path, space_needed,
4162 space_needed, 0, 0);
4166 space_needed = data_size;
4168 space_needed -= btrfs_leaf_free_space(l);
4169 wret = push_leaf_left(trans, root, path, space_needed,
4170 space_needed, 0, (u32)-1);
4176 /* did the pushes work? */
4177 if (btrfs_leaf_free_space(l) >= data_size)
4181 if (!path->nodes[1]) {
4182 ret = insert_new_root(trans, root, path, 1);
4189 slot = path->slots[0];
4190 nritems = btrfs_header_nritems(l);
4191 mid = (nritems + 1) / 2;
4195 leaf_space_used(l, mid, nritems - mid) + data_size >
4196 BTRFS_LEAF_DATA_SIZE(fs_info)) {
4197 if (slot >= nritems) {
4201 if (mid != nritems &&
4202 leaf_space_used(l, mid, nritems - mid) +
4203 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
4204 if (data_size && !tried_avoid_double)
4205 goto push_for_double;
4211 if (leaf_space_used(l, 0, mid) + data_size >
4212 BTRFS_LEAF_DATA_SIZE(fs_info)) {
4213 if (!extend && data_size && slot == 0) {
4215 } else if ((extend || !data_size) && slot == 0) {
4219 if (mid != nritems &&
4220 leaf_space_used(l, mid, nritems - mid) +
4221 data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
4222 if (data_size && !tried_avoid_double)
4223 goto push_for_double;
4231 btrfs_cpu_key_to_disk(&disk_key, ins_key);
4233 btrfs_item_key(l, &disk_key, mid);
4235 right = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, 0,
4238 return PTR_ERR(right);
4240 root_add_used(root, fs_info->nodesize);
4244 btrfs_set_header_nritems(right, 0);
4245 insert_ptr(trans, path, &disk_key,
4246 right->start, path->slots[1] + 1, 1);
4247 btrfs_tree_unlock(path->nodes[0]);
4248 free_extent_buffer(path->nodes[0]);
4249 path->nodes[0] = right;
4251 path->slots[1] += 1;
4253 btrfs_set_header_nritems(right, 0);
4254 insert_ptr(trans, path, &disk_key,
4255 right->start, path->slots[1], 1);
4256 btrfs_tree_unlock(path->nodes[0]);
4257 free_extent_buffer(path->nodes[0]);
4258 path->nodes[0] = right;
4260 if (path->slots[1] == 0)
4261 fixup_low_keys(path, &disk_key, 1);
4264 * We create a new leaf 'right' for the required ins_len and
4265 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying
4266 * the content of ins_len to 'right'.
4271 copy_for_split(trans, path, l, right, slot, mid, nritems);
4274 BUG_ON(num_doubles != 0);
4282 push_for_double_split(trans, root, path, data_size);
4283 tried_avoid_double = 1;
4284 if (btrfs_leaf_free_space(path->nodes[0]) >= data_size)
4289 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4290 struct btrfs_root *root,
4291 struct btrfs_path *path, int ins_len)
4293 struct btrfs_key key;
4294 struct extent_buffer *leaf;
4295 struct btrfs_file_extent_item *fi;
4300 leaf = path->nodes[0];
4301 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4303 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4304 key.type != BTRFS_EXTENT_CSUM_KEY);
4306 if (btrfs_leaf_free_space(leaf) >= ins_len)
4309 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4310 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4311 fi = btrfs_item_ptr(leaf, path->slots[0],
4312 struct btrfs_file_extent_item);
4313 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4315 btrfs_release_path(path);
4317 path->keep_locks = 1;
4318 path->search_for_split = 1;
4319 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4320 path->search_for_split = 0;
4327 leaf = path->nodes[0];
4328 /* if our item isn't there, return now */
4329 if (item_size != btrfs_item_size_nr(leaf, path->slots[0]))
4332 /* the leaf has changed, it now has room. return now */
4333 if (btrfs_leaf_free_space(path->nodes[0]) >= ins_len)
4336 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4337 fi = btrfs_item_ptr(leaf, path->slots[0],
4338 struct btrfs_file_extent_item);
4339 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4343 btrfs_set_path_blocking(path);
4344 ret = split_leaf(trans, root, &key, path, ins_len, 1);
4348 path->keep_locks = 0;
4349 btrfs_unlock_up_safe(path, 1);
4352 path->keep_locks = 0;
4356 static noinline int split_item(struct btrfs_path *path,
4357 const struct btrfs_key *new_key,
4358 unsigned long split_offset)
4360 struct extent_buffer *leaf;
4361 struct btrfs_item *item;
4362 struct btrfs_item *new_item;
4368 struct btrfs_disk_key disk_key;
4370 leaf = path->nodes[0];
4371 BUG_ON(btrfs_leaf_free_space(leaf) < sizeof(struct btrfs_item));
4373 btrfs_set_path_blocking(path);
4375 item = btrfs_item_nr(path->slots[0]);
4376 orig_offset = btrfs_item_offset(leaf, item);
4377 item_size = btrfs_item_size(leaf, item);
4379 buf = kmalloc(item_size, GFP_NOFS);
4383 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4384 path->slots[0]), item_size);
4386 slot = path->slots[0] + 1;
4387 nritems = btrfs_header_nritems(leaf);
4388 if (slot != nritems) {
4389 /* shift the items */
4390 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
4391 btrfs_item_nr_offset(slot),
4392 (nritems - slot) * sizeof(struct btrfs_item));
4395 btrfs_cpu_key_to_disk(&disk_key, new_key);
4396 btrfs_set_item_key(leaf, &disk_key, slot);
4398 new_item = btrfs_item_nr(slot);
4400 btrfs_set_item_offset(leaf, new_item, orig_offset);
4401 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4403 btrfs_set_item_offset(leaf, item,
4404 orig_offset + item_size - split_offset);
4405 btrfs_set_item_size(leaf, item, split_offset);
4407 btrfs_set_header_nritems(leaf, nritems + 1);
4409 /* write the data for the start of the original item */
4410 write_extent_buffer(leaf, buf,
4411 btrfs_item_ptr_offset(leaf, path->slots[0]),
4414 /* write the data for the new item */
4415 write_extent_buffer(leaf, buf + split_offset,
4416 btrfs_item_ptr_offset(leaf, slot),
4417 item_size - split_offset);
4418 btrfs_mark_buffer_dirty(leaf);
4420 BUG_ON(btrfs_leaf_free_space(leaf) < 0);
4426 * This function splits a single item into two items,
4427 * giving 'new_key' to the new item and splitting the
4428 * old one at split_offset (from the start of the item).
4430 * The path may be released by this operation. After
4431 * the split, the path is pointing to the old item. The
4432 * new item is going to be in the same node as the old one.
4434 * Note, the item being split must be smaller enough to live alone on
4435 * a tree block with room for one extra struct btrfs_item
4437 * This allows us to split the item in place, keeping a lock on the
4438 * leaf the entire time.
4440 int btrfs_split_item(struct btrfs_trans_handle *trans,
4441 struct btrfs_root *root,
4442 struct btrfs_path *path,
4443 const struct btrfs_key *new_key,
4444 unsigned long split_offset)
4447 ret = setup_leaf_for_split(trans, root, path,
4448 sizeof(struct btrfs_item));
4452 ret = split_item(path, new_key, split_offset);
4457 * This function duplicate a item, giving 'new_key' to the new item.
4458 * It guarantees both items live in the same tree leaf and the new item
4459 * is contiguous with the original item.
4461 * This allows us to split file extent in place, keeping a lock on the
4462 * leaf the entire time.
4464 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4465 struct btrfs_root *root,
4466 struct btrfs_path *path,
4467 const struct btrfs_key *new_key)
4469 struct extent_buffer *leaf;
4473 leaf = path->nodes[0];
4474 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4475 ret = setup_leaf_for_split(trans, root, path,
4476 item_size + sizeof(struct btrfs_item));
4481 setup_items_for_insert(root, path, new_key, &item_size,
4482 item_size, item_size +
4483 sizeof(struct btrfs_item), 1);
4484 leaf = path->nodes[0];
4485 memcpy_extent_buffer(leaf,
4486 btrfs_item_ptr_offset(leaf, path->slots[0]),
4487 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4493 * make the item pointed to by the path smaller. new_size indicates
4494 * how small to make it, and from_end tells us if we just chop bytes
4495 * off the end of the item or if we shift the item to chop bytes off
4498 void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
4501 struct extent_buffer *leaf;
4502 struct btrfs_item *item;
4504 unsigned int data_end;
4505 unsigned int old_data_start;
4506 unsigned int old_size;
4507 unsigned int size_diff;
4509 struct btrfs_map_token token;
4511 leaf = path->nodes[0];
4512 slot = path->slots[0];
4514 old_size = btrfs_item_size_nr(leaf, slot);
4515 if (old_size == new_size)
4518 nritems = btrfs_header_nritems(leaf);
4519 data_end = leaf_data_end(leaf);
4521 old_data_start = btrfs_item_offset_nr(leaf, slot);
4523 size_diff = old_size - new_size;
4526 BUG_ON(slot >= nritems);
4529 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4531 /* first correct the data pointers */
4532 btrfs_init_map_token(&token, leaf);
4533 for (i = slot; i < nritems; i++) {
4535 item = btrfs_item_nr(i);
4537 ioff = btrfs_token_item_offset(&token, item);
4538 btrfs_set_token_item_offset(&token, item, ioff + size_diff);
4541 /* shift the data */
4543 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4544 data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
4545 data_end, old_data_start + new_size - data_end);
4547 struct btrfs_disk_key disk_key;
4550 btrfs_item_key(leaf, &disk_key, slot);
4552 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4554 struct btrfs_file_extent_item *fi;
4556 fi = btrfs_item_ptr(leaf, slot,
4557 struct btrfs_file_extent_item);
4558 fi = (struct btrfs_file_extent_item *)(
4559 (unsigned long)fi - size_diff);
4561 if (btrfs_file_extent_type(leaf, fi) ==
4562 BTRFS_FILE_EXTENT_INLINE) {
4563 ptr = btrfs_item_ptr_offset(leaf, slot);
4564 memmove_extent_buffer(leaf, ptr,
4566 BTRFS_FILE_EXTENT_INLINE_DATA_START);
4570 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4571 data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
4572 data_end, old_data_start - data_end);
4574 offset = btrfs_disk_key_offset(&disk_key);
4575 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4576 btrfs_set_item_key(leaf, &disk_key, slot);
4578 fixup_low_keys(path, &disk_key, 1);
4581 item = btrfs_item_nr(slot);
4582 btrfs_set_item_size(leaf, item, new_size);
4583 btrfs_mark_buffer_dirty(leaf);
4585 if (btrfs_leaf_free_space(leaf) < 0) {
4586 btrfs_print_leaf(leaf);
4592 * make the item pointed to by the path bigger, data_size is the added size.
4594 void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
4597 struct extent_buffer *leaf;
4598 struct btrfs_item *item;
4600 unsigned int data_end;
4601 unsigned int old_data;
4602 unsigned int old_size;
4604 struct btrfs_map_token token;
4606 leaf = path->nodes[0];
4608 nritems = btrfs_header_nritems(leaf);
4609 data_end = leaf_data_end(leaf);
4611 if (btrfs_leaf_free_space(leaf) < data_size) {
4612 btrfs_print_leaf(leaf);
4615 slot = path->slots[0];
4616 old_data = btrfs_item_end_nr(leaf, slot);
4619 if (slot >= nritems) {
4620 btrfs_print_leaf(leaf);
4621 btrfs_crit(leaf->fs_info, "slot %d too large, nritems %d",
4627 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4629 /* first correct the data pointers */
4630 btrfs_init_map_token(&token, leaf);
4631 for (i = slot; i < nritems; i++) {
4633 item = btrfs_item_nr(i);
4635 ioff = btrfs_token_item_offset(&token, item);
4636 btrfs_set_token_item_offset(&token, item, ioff - data_size);
4639 /* shift the data */
4640 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4641 data_end - data_size, BTRFS_LEAF_DATA_OFFSET +
4642 data_end, old_data - data_end);
4644 data_end = old_data;
4645 old_size = btrfs_item_size_nr(leaf, slot);
4646 item = btrfs_item_nr(slot);
4647 btrfs_set_item_size(leaf, item, old_size + data_size);
4648 btrfs_mark_buffer_dirty(leaf);
4650 if (btrfs_leaf_free_space(leaf) < 0) {
4651 btrfs_print_leaf(leaf);
4657 * this is a helper for btrfs_insert_empty_items, the main goal here is
4658 * to save stack depth by doing the bulk of the work in a function
4659 * that doesn't call btrfs_search_slot
4661 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
4662 const struct btrfs_key *cpu_key, u32 *data_size,
4663 u32 total_data, u32 total_size, int nr)
4665 struct btrfs_fs_info *fs_info = root->fs_info;
4666 struct btrfs_item *item;
4669 unsigned int data_end;
4670 struct btrfs_disk_key disk_key;
4671 struct extent_buffer *leaf;
4673 struct btrfs_map_token token;
4675 if (path->slots[0] == 0) {
4676 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4677 fixup_low_keys(path, &disk_key, 1);
4679 btrfs_unlock_up_safe(path, 1);
4681 leaf = path->nodes[0];
4682 slot = path->slots[0];
4684 nritems = btrfs_header_nritems(leaf);
4685 data_end = leaf_data_end(leaf);
4687 if (btrfs_leaf_free_space(leaf) < total_size) {
4688 btrfs_print_leaf(leaf);
4689 btrfs_crit(fs_info, "not enough freespace need %u have %d",
4690 total_size, btrfs_leaf_free_space(leaf));
4694 btrfs_init_map_token(&token, leaf);
4695 if (slot != nritems) {
4696 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4698 if (old_data < data_end) {
4699 btrfs_print_leaf(leaf);
4700 btrfs_crit(fs_info, "slot %d old_data %d data_end %d",
4701 slot, old_data, data_end);
4705 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4707 /* first correct the data pointers */
4708 for (i = slot; i < nritems; i++) {
4711 item = btrfs_item_nr(i);
4712 ioff = btrfs_token_item_offset(&token, item);
4713 btrfs_set_token_item_offset(&token, item,
4716 /* shift the items */
4717 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4718 btrfs_item_nr_offset(slot),
4719 (nritems - slot) * sizeof(struct btrfs_item));
4721 /* shift the data */
4722 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4723 data_end - total_data, BTRFS_LEAF_DATA_OFFSET +
4724 data_end, old_data - data_end);
4725 data_end = old_data;
4728 /* setup the item for the new data */
4729 for (i = 0; i < nr; i++) {
4730 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4731 btrfs_set_item_key(leaf, &disk_key, slot + i);
4732 item = btrfs_item_nr(slot + i);
4733 btrfs_set_token_item_offset(&token, item, data_end - data_size[i]);
4734 data_end -= data_size[i];
4735 btrfs_set_token_item_size(&token, item, data_size[i]);
4738 btrfs_set_header_nritems(leaf, nritems + nr);
4739 btrfs_mark_buffer_dirty(leaf);
4741 if (btrfs_leaf_free_space(leaf) < 0) {
4742 btrfs_print_leaf(leaf);
4748 * Given a key and some data, insert items into the tree.
4749 * This does all the path init required, making room in the tree if needed.
4751 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4752 struct btrfs_root *root,
4753 struct btrfs_path *path,
4754 const struct btrfs_key *cpu_key, u32 *data_size,
4763 for (i = 0; i < nr; i++)
4764 total_data += data_size[i];
4766 total_size = total_data + (nr * sizeof(struct btrfs_item));
4767 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4773 slot = path->slots[0];
4776 setup_items_for_insert(root, path, cpu_key, data_size,
4777 total_data, total_size, nr);
4782 * Given a key and some data, insert an item into the tree.
4783 * This does all the path init required, making room in the tree if needed.
4785 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4786 const struct btrfs_key *cpu_key, void *data,
4790 struct btrfs_path *path;
4791 struct extent_buffer *leaf;
4794 path = btrfs_alloc_path();
4797 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4799 leaf = path->nodes[0];
4800 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4801 write_extent_buffer(leaf, data, ptr, data_size);
4802 btrfs_mark_buffer_dirty(leaf);
4804 btrfs_free_path(path);
4809 * delete the pointer from a given node.
4811 * the tree should have been previously balanced so the deletion does not
4814 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
4815 int level, int slot)
4817 struct extent_buffer *parent = path->nodes[level];
4821 nritems = btrfs_header_nritems(parent);
4822 if (slot != nritems - 1) {
4824 ret = tree_mod_log_insert_move(parent, slot, slot + 1,
4825 nritems - slot - 1);
4828 memmove_extent_buffer(parent,
4829 btrfs_node_key_ptr_offset(slot),
4830 btrfs_node_key_ptr_offset(slot + 1),
4831 sizeof(struct btrfs_key_ptr) *
4832 (nritems - slot - 1));
4834 ret = tree_mod_log_insert_key(parent, slot, MOD_LOG_KEY_REMOVE,
4840 btrfs_set_header_nritems(parent, nritems);
4841 if (nritems == 0 && parent == root->node) {
4842 BUG_ON(btrfs_header_level(root->node) != 1);
4843 /* just turn the root into a leaf and break */
4844 btrfs_set_header_level(root->node, 0);
4845 } else if (slot == 0) {
4846 struct btrfs_disk_key disk_key;
4848 btrfs_node_key(parent, &disk_key, 0);
4849 fixup_low_keys(path, &disk_key, level + 1);
4851 btrfs_mark_buffer_dirty(parent);
4855 * a helper function to delete the leaf pointed to by path->slots[1] and
4858 * This deletes the pointer in path->nodes[1] and frees the leaf
4859 * block extent. zero is returned if it all worked out, < 0 otherwise.
4861 * The path must have already been setup for deleting the leaf, including
4862 * all the proper balancing. path->nodes[1] must be locked.
4864 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4865 struct btrfs_root *root,
4866 struct btrfs_path *path,
4867 struct extent_buffer *leaf)
4869 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4870 del_ptr(root, path, 1, path->slots[1]);
4873 * btrfs_free_extent is expensive, we want to make sure we
4874 * aren't holding any locks when we call it
4876 btrfs_unlock_up_safe(path, 0);
4878 root_sub_used(root, leaf->len);
4880 atomic_inc(&leaf->refs);
4881 btrfs_free_tree_block(trans, root, leaf, 0, 1);
4882 free_extent_buffer_stale(leaf);
4885 * delete the item at the leaf level in path. If that empties
4886 * the leaf, remove it from the tree
4888 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4889 struct btrfs_path *path, int slot, int nr)
4891 struct btrfs_fs_info *fs_info = root->fs_info;
4892 struct extent_buffer *leaf;
4893 struct btrfs_item *item;
4901 leaf = path->nodes[0];
4902 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4904 for (i = 0; i < nr; i++)
4905 dsize += btrfs_item_size_nr(leaf, slot + i);
4907 nritems = btrfs_header_nritems(leaf);
4909 if (slot + nr != nritems) {
4910 int data_end = leaf_data_end(leaf);
4911 struct btrfs_map_token token;
4913 memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4915 BTRFS_LEAF_DATA_OFFSET + data_end,
4916 last_off - data_end);
4918 btrfs_init_map_token(&token, leaf);
4919 for (i = slot + nr; i < nritems; i++) {
4922 item = btrfs_item_nr(i);
4923 ioff = btrfs_token_item_offset(&token, item);
4924 btrfs_set_token_item_offset(&token, item, ioff + dsize);
4927 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4928 btrfs_item_nr_offset(slot + nr),
4929 sizeof(struct btrfs_item) *
4930 (nritems - slot - nr));
4932 btrfs_set_header_nritems(leaf, nritems - nr);
4935 /* delete the leaf if we've emptied it */
4937 if (leaf == root->node) {
4938 btrfs_set_header_level(leaf, 0);
4940 btrfs_set_path_blocking(path);
4941 btrfs_clean_tree_block(leaf);
4942 btrfs_del_leaf(trans, root, path, leaf);
4945 int used = leaf_space_used(leaf, 0, nritems);
4947 struct btrfs_disk_key disk_key;
4949 btrfs_item_key(leaf, &disk_key, 0);
4950 fixup_low_keys(path, &disk_key, 1);
4953 /* delete the leaf if it is mostly empty */
4954 if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) {
4955 /* push_leaf_left fixes the path.
4956 * make sure the path still points to our leaf
4957 * for possible call to del_ptr below
4959 slot = path->slots[1];
4960 atomic_inc(&leaf->refs);
4962 btrfs_set_path_blocking(path);
4963 wret = push_leaf_left(trans, root, path, 1, 1,
4965 if (wret < 0 && wret != -ENOSPC)
4968 if (path->nodes[0] == leaf &&
4969 btrfs_header_nritems(leaf)) {
4970 wret = push_leaf_right(trans, root, path, 1,
4972 if (wret < 0 && wret != -ENOSPC)
4976 if (btrfs_header_nritems(leaf) == 0) {
4977 path->slots[1] = slot;
4978 btrfs_del_leaf(trans, root, path, leaf);
4979 free_extent_buffer(leaf);
4982 /* if we're still in the path, make sure
4983 * we're dirty. Otherwise, one of the
4984 * push_leaf functions must have already
4985 * dirtied this buffer
4987 if (path->nodes[0] == leaf)
4988 btrfs_mark_buffer_dirty(leaf);
4989 free_extent_buffer(leaf);
4992 btrfs_mark_buffer_dirty(leaf);
4999 * search the tree again to find a leaf with lesser keys
5000 * returns 0 if it found something or 1 if there are no lesser leaves.
5001 * returns < 0 on io errors.
5003 * This may release the path, and so you may lose any locks held at the
5006 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
5008 struct btrfs_key key;
5009 struct btrfs_disk_key found_key;
5012 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
5014 if (key.offset > 0) {
5016 } else if (key.type > 0) {
5018 key.offset = (u64)-1;
5019 } else if (key.objectid > 0) {
5022 key.offset = (u64)-1;
5027 btrfs_release_path(path);
5028 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5031 btrfs_item_key(path->nodes[0], &found_key, 0);
5032 ret = comp_keys(&found_key, &key);
5034 * We might have had an item with the previous key in the tree right
5035 * before we released our path. And after we released our path, that
5036 * item might have been pushed to the first slot (0) of the leaf we
5037 * were holding due to a tree balance. Alternatively, an item with the
5038 * previous key can exist as the only element of a leaf (big fat item).
5039 * Therefore account for these 2 cases, so that our callers (like
5040 * btrfs_previous_item) don't miss an existing item with a key matching
5041 * the previous key we computed above.
5049 * A helper function to walk down the tree starting at min_key, and looking
5050 * for nodes or leaves that are have a minimum transaction id.
5051 * This is used by the btree defrag code, and tree logging
5053 * This does not cow, but it does stuff the starting key it finds back
5054 * into min_key, so you can call btrfs_search_slot with cow=1 on the
5055 * key and get a writable path.
5057 * This honors path->lowest_level to prevent descent past a given level
5060 * min_trans indicates the oldest transaction that you are interested
5061 * in walking through. Any nodes or leaves older than min_trans are
5062 * skipped over (without reading them).
5064 * returns zero if something useful was found, < 0 on error and 1 if there
5065 * was nothing in the tree that matched the search criteria.
5067 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
5068 struct btrfs_path *path,
5071 struct extent_buffer *cur;
5072 struct btrfs_key found_key;
5078 int keep_locks = path->keep_locks;
5080 path->keep_locks = 1;
5082 cur = btrfs_read_lock_root_node(root);
5083 level = btrfs_header_level(cur);
5084 WARN_ON(path->nodes[level]);
5085 path->nodes[level] = cur;
5086 path->locks[level] = BTRFS_READ_LOCK;
5088 if (btrfs_header_generation(cur) < min_trans) {
5093 nritems = btrfs_header_nritems(cur);
5094 level = btrfs_header_level(cur);
5095 sret = btrfs_bin_search(cur, min_key, &slot);
5101 /* at the lowest level, we're done, setup the path and exit */
5102 if (level == path->lowest_level) {
5103 if (slot >= nritems)
5106 path->slots[level] = slot;
5107 btrfs_item_key_to_cpu(cur, &found_key, slot);
5110 if (sret && slot > 0)
5113 * check this node pointer against the min_trans parameters.
5114 * If it is too old, old, skip to the next one.
5116 while (slot < nritems) {
5119 gen = btrfs_node_ptr_generation(cur, slot);
5120 if (gen < min_trans) {
5128 * we didn't find a candidate key in this node, walk forward
5129 * and find another one
5131 if (slot >= nritems) {
5132 path->slots[level] = slot;
5133 btrfs_set_path_blocking(path);
5134 sret = btrfs_find_next_key(root, path, min_key, level,
5137 btrfs_release_path(path);
5143 /* save our key for returning back */
5144 btrfs_node_key_to_cpu(cur, &found_key, slot);
5145 path->slots[level] = slot;
5146 if (level == path->lowest_level) {
5150 btrfs_set_path_blocking(path);
5151 cur = btrfs_read_node_slot(cur, slot);
5157 btrfs_tree_read_lock(cur);
5159 path->locks[level - 1] = BTRFS_READ_LOCK;
5160 path->nodes[level - 1] = cur;
5161 unlock_up(path, level, 1, 0, NULL);
5164 path->keep_locks = keep_locks;
5166 btrfs_unlock_up_safe(path, path->lowest_level + 1);
5167 btrfs_set_path_blocking(path);
5168 memcpy(min_key, &found_key, sizeof(found_key));
5174 * this is similar to btrfs_next_leaf, but does not try to preserve
5175 * and fixup the path. It looks for and returns the next key in the
5176 * tree based on the current path and the min_trans parameters.
5178 * 0 is returned if another key is found, < 0 if there are any errors
5179 * and 1 is returned if there are no higher keys in the tree
5181 * path->keep_locks should be set to 1 on the search made before
5182 * calling this function.
5184 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
5185 struct btrfs_key *key, int level, u64 min_trans)
5188 struct extent_buffer *c;
5190 WARN_ON(!path->keep_locks && !path->skip_locking);
5191 while (level < BTRFS_MAX_LEVEL) {
5192 if (!path->nodes[level])
5195 slot = path->slots[level] + 1;
5196 c = path->nodes[level];
5198 if (slot >= btrfs_header_nritems(c)) {
5201 struct btrfs_key cur_key;
5202 if (level + 1 >= BTRFS_MAX_LEVEL ||
5203 !path->nodes[level + 1])
5206 if (path->locks[level + 1] || path->skip_locking) {
5211 slot = btrfs_header_nritems(c) - 1;
5213 btrfs_item_key_to_cpu(c, &cur_key, slot);
5215 btrfs_node_key_to_cpu(c, &cur_key, slot);
5217 orig_lowest = path->lowest_level;
5218 btrfs_release_path(path);
5219 path->lowest_level = level;
5220 ret = btrfs_search_slot(NULL, root, &cur_key, path,
5222 path->lowest_level = orig_lowest;
5226 c = path->nodes[level];
5227 slot = path->slots[level];
5234 btrfs_item_key_to_cpu(c, key, slot);
5236 u64 gen = btrfs_node_ptr_generation(c, slot);
5238 if (gen < min_trans) {
5242 btrfs_node_key_to_cpu(c, key, slot);
5250 * search the tree again to find a leaf with greater keys
5251 * returns 0 if it found something or 1 if there are no greater leaves.
5252 * returns < 0 on io errors.
5254 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
5256 return btrfs_next_old_leaf(root, path, 0);
5259 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5264 struct extent_buffer *c;
5265 struct extent_buffer *next;
5266 struct btrfs_key key;
5269 int old_spinning = path->leave_spinning;
5270 int next_rw_lock = 0;
5272 nritems = btrfs_header_nritems(path->nodes[0]);
5276 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5281 btrfs_release_path(path);
5283 path->keep_locks = 1;
5284 path->leave_spinning = 1;
5287 ret = btrfs_search_old_slot(root, &key, path, time_seq);
5289 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5290 path->keep_locks = 0;
5295 nritems = btrfs_header_nritems(path->nodes[0]);
5297 * by releasing the path above we dropped all our locks. A balance
5298 * could have added more items next to the key that used to be
5299 * at the very end of the block. So, check again here and
5300 * advance the path if there are now more items available.
5302 if (nritems > 0 && path->slots[0] < nritems - 1) {
5309 * So the above check misses one case:
5310 * - after releasing the path above, someone has removed the item that
5311 * used to be at the very end of the block, and balance between leafs
5312 * gets another one with bigger key.offset to replace it.
5314 * This one should be returned as well, or we can get leaf corruption
5315 * later(esp. in __btrfs_drop_extents()).
5317 * And a bit more explanation about this check,
5318 * with ret > 0, the key isn't found, the path points to the slot
5319 * where it should be inserted, so the path->slots[0] item must be the
5322 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
5327 while (level < BTRFS_MAX_LEVEL) {
5328 if (!path->nodes[level]) {
5333 slot = path->slots[level] + 1;
5334 c = path->nodes[level];
5335 if (slot >= btrfs_header_nritems(c)) {
5337 if (level == BTRFS_MAX_LEVEL) {
5345 btrfs_tree_unlock_rw(next, next_rw_lock);
5346 free_extent_buffer(next);
5350 next_rw_lock = path->locks[level];
5351 ret = read_block_for_search(root, path, &next, level,
5357 btrfs_release_path(path);
5361 if (!path->skip_locking) {
5362 ret = btrfs_try_tree_read_lock(next);
5363 if (!ret && time_seq) {
5365 * If we don't get the lock, we may be racing
5366 * with push_leaf_left, holding that lock while
5367 * itself waiting for the leaf we've currently
5368 * locked. To solve this situation, we give up
5369 * on our lock and cycle.
5371 free_extent_buffer(next);
5372 btrfs_release_path(path);
5377 btrfs_set_path_blocking(path);
5378 btrfs_tree_read_lock(next);
5380 next_rw_lock = BTRFS_READ_LOCK;
5384 path->slots[level] = slot;
5387 c = path->nodes[level];
5388 if (path->locks[level])
5389 btrfs_tree_unlock_rw(c, path->locks[level]);
5391 free_extent_buffer(c);
5392 path->nodes[level] = next;
5393 path->slots[level] = 0;
5394 if (!path->skip_locking)
5395 path->locks[level] = next_rw_lock;
5399 ret = read_block_for_search(root, path, &next, level,
5405 btrfs_release_path(path);
5409 if (!path->skip_locking) {
5410 ret = btrfs_try_tree_read_lock(next);
5412 btrfs_set_path_blocking(path);
5413 btrfs_tree_read_lock(next);
5415 next_rw_lock = BTRFS_READ_LOCK;
5420 unlock_up(path, 0, 1, 0, NULL);
5421 path->leave_spinning = old_spinning;
5423 btrfs_set_path_blocking(path);
5429 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5430 * searching until it gets past min_objectid or finds an item of 'type'
5432 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5434 int btrfs_previous_item(struct btrfs_root *root,
5435 struct btrfs_path *path, u64 min_objectid,
5438 struct btrfs_key found_key;
5439 struct extent_buffer *leaf;
5444 if (path->slots[0] == 0) {
5445 btrfs_set_path_blocking(path);
5446 ret = btrfs_prev_leaf(root, path);
5452 leaf = path->nodes[0];
5453 nritems = btrfs_header_nritems(leaf);
5456 if (path->slots[0] == nritems)
5459 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5460 if (found_key.objectid < min_objectid)
5462 if (found_key.type == type)
5464 if (found_key.objectid == min_objectid &&
5465 found_key.type < type)
5472 * search in extent tree to find a previous Metadata/Data extent item with
5475 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5477 int btrfs_previous_extent_item(struct btrfs_root *root,
5478 struct btrfs_path *path, u64 min_objectid)
5480 struct btrfs_key found_key;
5481 struct extent_buffer *leaf;
5486 if (path->slots[0] == 0) {
5487 btrfs_set_path_blocking(path);
5488 ret = btrfs_prev_leaf(root, path);
5494 leaf = path->nodes[0];
5495 nritems = btrfs_header_nritems(leaf);
5498 if (path->slots[0] == nritems)
5501 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5502 if (found_key.objectid < min_objectid)
5504 if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
5505 found_key.type == BTRFS_METADATA_ITEM_KEY)
5507 if (found_key.objectid == min_objectid &&
5508 found_key.type < BTRFS_EXTENT_ITEM_KEY)