2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
23 #include "transaction.h"
24 #include "print-tree.h"
27 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
28 *root, struct btrfs_path *path, int level);
29 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
30 *root, struct btrfs_key *ins_key,
31 struct btrfs_path *path, int data_size, int extend);
32 static int push_node_left(struct btrfs_trans_handle *trans,
33 struct btrfs_root *root, struct extent_buffer *dst,
34 struct extent_buffer *src, int empty);
35 static int balance_node_right(struct btrfs_trans_handle *trans,
36 struct btrfs_root *root,
37 struct extent_buffer *dst_buf,
38 struct extent_buffer *src_buf);
39 static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
40 struct btrfs_path *path, int level, int slot);
41 static int setup_items_for_insert(struct btrfs_trans_handle *trans,
42 struct btrfs_root *root, struct btrfs_path *path,
43 struct btrfs_key *cpu_key, u32 *data_size,
44 u32 total_data, u32 total_size, int nr);
47 struct btrfs_path *btrfs_alloc_path(void)
49 struct btrfs_path *path;
50 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
57 * set all locked nodes in the path to blocking locks. This should
58 * be done before scheduling
60 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
63 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
64 if (p->nodes[i] && p->locks[i])
65 btrfs_set_lock_blocking(p->nodes[i]);
70 * reset all the locked nodes in the patch to spinning locks.
72 * held is used to keep lockdep happy, when lockdep is enabled
73 * we set held to a blocking lock before we go around and
74 * retake all the spinlocks in the path. You can safely use NULL
77 noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
78 struct extent_buffer *held)
82 #ifdef CONFIG_DEBUG_LOCK_ALLOC
83 /* lockdep really cares that we take all of these spinlocks
84 * in the right order. If any of the locks in the path are not
85 * currently blocking, it is going to complain. So, make really
86 * really sure by forcing the path to blocking before we clear
90 btrfs_set_lock_blocking(held);
91 btrfs_set_path_blocking(p);
94 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
95 if (p->nodes[i] && p->locks[i])
96 btrfs_clear_lock_blocking(p->nodes[i]);
99 #ifdef CONFIG_DEBUG_LOCK_ALLOC
101 btrfs_clear_lock_blocking(held);
105 /* this also releases the path */
106 void btrfs_free_path(struct btrfs_path *p)
108 btrfs_release_path(NULL, p);
109 kmem_cache_free(btrfs_path_cachep, p);
113 * path release drops references on the extent buffers in the path
114 * and it drops any locks held by this path
116 * It is safe to call this on paths that no locks or extent buffers held.
118 noinline void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p)
122 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
127 btrfs_tree_unlock(p->nodes[i]);
130 free_extent_buffer(p->nodes[i]);
136 * safely gets a reference on the root node of a tree. A lock
137 * is not taken, so a concurrent writer may put a different node
138 * at the root of the tree. See btrfs_lock_root_node for the
141 * The extent buffer returned by this has a reference taken, so
142 * it won't disappear. It may stop being the root of the tree
143 * at any time because there are no locks held.
145 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
147 struct extent_buffer *eb;
148 spin_lock(&root->node_lock);
150 extent_buffer_get(eb);
151 spin_unlock(&root->node_lock);
155 /* loop around taking references on and locking the root node of the
156 * tree until you end up with a lock on the root. A locked buffer
157 * is returned, with a reference held.
159 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
161 struct extent_buffer *eb;
164 eb = btrfs_root_node(root);
167 spin_lock(&root->node_lock);
168 if (eb == root->node) {
169 spin_unlock(&root->node_lock);
172 spin_unlock(&root->node_lock);
174 btrfs_tree_unlock(eb);
175 free_extent_buffer(eb);
180 /* cowonly root (everything not a reference counted cow subvolume), just get
181 * put onto a simple dirty list. transaction.c walks this to make sure they
182 * get properly updated on disk.
184 static void add_root_to_dirty_list(struct btrfs_root *root)
186 if (root->track_dirty && list_empty(&root->dirty_list)) {
187 list_add(&root->dirty_list,
188 &root->fs_info->dirty_cowonly_roots);
193 * used by snapshot creation to make a copy of a root for a tree with
194 * a given objectid. The buffer with the new root node is returned in
195 * cow_ret, and this func returns zero on success or a negative error code.
197 int btrfs_copy_root(struct btrfs_trans_handle *trans,
198 struct btrfs_root *root,
199 struct extent_buffer *buf,
200 struct extent_buffer **cow_ret, u64 new_root_objectid)
202 struct extent_buffer *cow;
206 struct btrfs_disk_key disk_key;
208 WARN_ON(root->ref_cows && trans->transid !=
209 root->fs_info->running_transaction->transid);
210 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
212 level = btrfs_header_level(buf);
213 nritems = btrfs_header_nritems(buf);
215 btrfs_item_key(buf, &disk_key, 0);
217 btrfs_node_key(buf, &disk_key, 0);
219 cow = btrfs_alloc_free_block(trans, root, buf->len, 0,
220 new_root_objectid, &disk_key, level,
225 copy_extent_buffer(cow, buf, 0, 0, cow->len);
226 btrfs_set_header_bytenr(cow, cow->start);
227 btrfs_set_header_generation(cow, trans->transid);
228 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
229 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
230 BTRFS_HEADER_FLAG_RELOC);
231 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
232 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
234 btrfs_set_header_owner(cow, new_root_objectid);
236 write_extent_buffer(cow, root->fs_info->fsid,
237 (unsigned long)btrfs_header_fsid(cow),
240 WARN_ON(btrfs_header_generation(buf) > trans->transid);
241 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
242 ret = btrfs_inc_ref(trans, root, cow, 1);
244 ret = btrfs_inc_ref(trans, root, cow, 0);
249 btrfs_mark_buffer_dirty(cow);
255 * check if the tree block can be shared by multiple trees
257 int btrfs_block_can_be_shared(struct btrfs_root *root,
258 struct extent_buffer *buf)
261 * Tree blocks not in refernece counted trees and tree roots
262 * are never shared. If a block was allocated after the last
263 * snapshot and the block was not allocated by tree relocation,
264 * we know the block is not shared.
266 if (root->ref_cows &&
267 buf != root->node && buf != root->commit_root &&
268 (btrfs_header_generation(buf) <=
269 btrfs_root_last_snapshot(&root->root_item) ||
270 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
272 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
273 if (root->ref_cows &&
274 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
280 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
281 struct btrfs_root *root,
282 struct extent_buffer *buf,
283 struct extent_buffer *cow,
293 * Backrefs update rules:
295 * Always use full backrefs for extent pointers in tree block
296 * allocated by tree relocation.
298 * If a shared tree block is no longer referenced by its owner
299 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
300 * use full backrefs for extent pointers in tree block.
302 * If a tree block is been relocating
303 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
304 * use full backrefs for extent pointers in tree block.
305 * The reason for this is some operations (such as drop tree)
306 * are only allowed for blocks use full backrefs.
309 if (btrfs_block_can_be_shared(root, buf)) {
310 ret = btrfs_lookup_extent_info(trans, root, buf->start,
311 buf->len, &refs, &flags);
316 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
317 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
318 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
323 owner = btrfs_header_owner(buf);
324 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
325 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
328 if ((owner == root->root_key.objectid ||
329 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
330 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
331 ret = btrfs_inc_ref(trans, root, buf, 1);
334 if (root->root_key.objectid ==
335 BTRFS_TREE_RELOC_OBJECTID) {
336 ret = btrfs_dec_ref(trans, root, buf, 0);
338 ret = btrfs_inc_ref(trans, root, cow, 1);
341 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
344 if (root->root_key.objectid ==
345 BTRFS_TREE_RELOC_OBJECTID)
346 ret = btrfs_inc_ref(trans, root, cow, 1);
348 ret = btrfs_inc_ref(trans, root, cow, 0);
351 if (new_flags != 0) {
352 ret = btrfs_set_disk_extent_flags(trans, root,
359 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
360 if (root->root_key.objectid ==
361 BTRFS_TREE_RELOC_OBJECTID)
362 ret = btrfs_inc_ref(trans, root, cow, 1);
364 ret = btrfs_inc_ref(trans, root, cow, 0);
366 ret = btrfs_dec_ref(trans, root, buf, 1);
369 clean_tree_block(trans, root, buf);
376 * does the dirty work in cow of a single block. The parent block (if
377 * supplied) is updated to point to the new cow copy. The new buffer is marked
378 * dirty and returned locked. If you modify the block it needs to be marked
381 * search_start -- an allocation hint for the new block
383 * empty_size -- a hint that you plan on doing more cow. This is the size in
384 * bytes the allocator should try to find free next to the block it returns.
385 * This is just a hint and may be ignored by the allocator.
387 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
388 struct btrfs_root *root,
389 struct extent_buffer *buf,
390 struct extent_buffer *parent, int parent_slot,
391 struct extent_buffer **cow_ret,
392 u64 search_start, u64 empty_size)
394 struct btrfs_disk_key disk_key;
395 struct extent_buffer *cow;
404 btrfs_assert_tree_locked(buf);
406 WARN_ON(root->ref_cows && trans->transid !=
407 root->fs_info->running_transaction->transid);
408 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
410 level = btrfs_header_level(buf);
413 btrfs_item_key(buf, &disk_key, 0);
415 btrfs_node_key(buf, &disk_key, 0);
417 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
419 parent_start = parent->start;
425 cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start,
426 root->root_key.objectid, &disk_key,
427 level, search_start, empty_size);
431 /* cow is set to blocking by btrfs_init_new_buffer */
433 copy_extent_buffer(cow, buf, 0, 0, cow->len);
434 btrfs_set_header_bytenr(cow, cow->start);
435 btrfs_set_header_generation(cow, trans->transid);
436 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
437 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
438 BTRFS_HEADER_FLAG_RELOC);
439 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
440 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
442 btrfs_set_header_owner(cow, root->root_key.objectid);
444 write_extent_buffer(cow, root->fs_info->fsid,
445 (unsigned long)btrfs_header_fsid(cow),
448 update_ref_for_cow(trans, root, buf, cow, &last_ref);
451 btrfs_reloc_cow_block(trans, root, buf, cow);
453 if (buf == root->node) {
454 WARN_ON(parent && parent != buf);
455 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
456 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
457 parent_start = buf->start;
461 spin_lock(&root->node_lock);
463 extent_buffer_get(cow);
464 spin_unlock(&root->node_lock);
466 btrfs_free_tree_block(trans, root, buf, parent_start,
468 free_extent_buffer(buf);
469 add_root_to_dirty_list(root);
471 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
472 parent_start = parent->start;
476 WARN_ON(trans->transid != btrfs_header_generation(parent));
477 btrfs_set_node_blockptr(parent, parent_slot,
479 btrfs_set_node_ptr_generation(parent, parent_slot,
481 btrfs_mark_buffer_dirty(parent);
482 btrfs_free_tree_block(trans, root, buf, parent_start,
486 btrfs_tree_unlock(buf);
487 free_extent_buffer(buf);
488 btrfs_mark_buffer_dirty(cow);
493 static inline int should_cow_block(struct btrfs_trans_handle *trans,
494 struct btrfs_root *root,
495 struct extent_buffer *buf)
497 if (btrfs_header_generation(buf) == trans->transid &&
498 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
499 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
500 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
506 * cows a single block, see __btrfs_cow_block for the real work.
507 * This version of it has extra checks so that a block isn't cow'd more than
508 * once per transaction, as long as it hasn't been written yet
510 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
511 struct btrfs_root *root, struct extent_buffer *buf,
512 struct extent_buffer *parent, int parent_slot,
513 struct extent_buffer **cow_ret)
518 if (trans->transaction != root->fs_info->running_transaction) {
519 printk(KERN_CRIT "trans %llu running %llu\n",
520 (unsigned long long)trans->transid,
522 root->fs_info->running_transaction->transid);
525 if (trans->transid != root->fs_info->generation) {
526 printk(KERN_CRIT "trans %llu running %llu\n",
527 (unsigned long long)trans->transid,
528 (unsigned long long)root->fs_info->generation);
532 if (!should_cow_block(trans, root, buf)) {
537 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
540 btrfs_set_lock_blocking(parent);
541 btrfs_set_lock_blocking(buf);
543 ret = __btrfs_cow_block(trans, root, buf, parent,
544 parent_slot, cow_ret, search_start, 0);
549 * helper function for defrag to decide if two blocks pointed to by a
550 * node are actually close by
552 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
554 if (blocknr < other && other - (blocknr + blocksize) < 32768)
556 if (blocknr > other && blocknr - (other + blocksize) < 32768)
562 * compare two keys in a memcmp fashion
564 static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
568 btrfs_disk_key_to_cpu(&k1, disk);
570 return btrfs_comp_cpu_keys(&k1, k2);
574 * same as comp_keys only with two btrfs_key's
576 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
578 if (k1->objectid > k2->objectid)
580 if (k1->objectid < k2->objectid)
582 if (k1->type > k2->type)
584 if (k1->type < k2->type)
586 if (k1->offset > k2->offset)
588 if (k1->offset < k2->offset)
594 * this is used by the defrag code to go through all the
595 * leaves pointed to by a node and reallocate them so that
596 * disk order is close to key order
598 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
599 struct btrfs_root *root, struct extent_buffer *parent,
600 int start_slot, int cache_only, u64 *last_ret,
601 struct btrfs_key *progress)
603 struct extent_buffer *cur;
606 u64 search_start = *last_ret;
616 int progress_passed = 0;
617 struct btrfs_disk_key disk_key;
619 parent_level = btrfs_header_level(parent);
620 if (cache_only && parent_level != 1)
623 if (trans->transaction != root->fs_info->running_transaction)
625 if (trans->transid != root->fs_info->generation)
628 parent_nritems = btrfs_header_nritems(parent);
629 blocksize = btrfs_level_size(root, parent_level - 1);
630 end_slot = parent_nritems;
632 if (parent_nritems == 1)
635 btrfs_set_lock_blocking(parent);
637 for (i = start_slot; i < end_slot; i++) {
640 if (!parent->map_token) {
641 map_extent_buffer(parent,
642 btrfs_node_key_ptr_offset(i),
643 sizeof(struct btrfs_key_ptr),
644 &parent->map_token, &parent->kaddr,
645 &parent->map_start, &parent->map_len,
648 btrfs_node_key(parent, &disk_key, i);
649 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
653 blocknr = btrfs_node_blockptr(parent, i);
654 gen = btrfs_node_ptr_generation(parent, i);
656 last_block = blocknr;
659 other = btrfs_node_blockptr(parent, i - 1);
660 close = close_blocks(blocknr, other, blocksize);
662 if (!close && i < end_slot - 2) {
663 other = btrfs_node_blockptr(parent, i + 1);
664 close = close_blocks(blocknr, other, blocksize);
667 last_block = blocknr;
670 if (parent->map_token) {
671 unmap_extent_buffer(parent, parent->map_token,
673 parent->map_token = NULL;
676 cur = btrfs_find_tree_block(root, blocknr, blocksize);
678 uptodate = btrfs_buffer_uptodate(cur, gen);
681 if (!cur || !uptodate) {
683 free_extent_buffer(cur);
687 cur = read_tree_block(root, blocknr,
689 } else if (!uptodate) {
690 btrfs_read_buffer(cur, gen);
693 if (search_start == 0)
694 search_start = last_block;
696 btrfs_tree_lock(cur);
697 btrfs_set_lock_blocking(cur);
698 err = __btrfs_cow_block(trans, root, cur, parent, i,
701 (end_slot - i) * blocksize));
703 btrfs_tree_unlock(cur);
704 free_extent_buffer(cur);
707 search_start = cur->start;
708 last_block = cur->start;
709 *last_ret = search_start;
710 btrfs_tree_unlock(cur);
711 free_extent_buffer(cur);
713 if (parent->map_token) {
714 unmap_extent_buffer(parent, parent->map_token,
716 parent->map_token = NULL;
722 * The leaf data grows from end-to-front in the node.
723 * this returns the address of the start of the last item,
724 * which is the stop of the leaf data stack
726 static inline unsigned int leaf_data_end(struct btrfs_root *root,
727 struct extent_buffer *leaf)
729 u32 nr = btrfs_header_nritems(leaf);
731 return BTRFS_LEAF_DATA_SIZE(root);
732 return btrfs_item_offset_nr(leaf, nr - 1);
736 * extra debugging checks to make sure all the items in a key are
737 * well formed and in the proper order
739 static int check_node(struct btrfs_root *root, struct btrfs_path *path,
742 struct extent_buffer *parent = NULL;
743 struct extent_buffer *node = path->nodes[level];
744 struct btrfs_disk_key parent_key;
745 struct btrfs_disk_key node_key;
748 struct btrfs_key cpukey;
749 u32 nritems = btrfs_header_nritems(node);
751 if (path->nodes[level + 1])
752 parent = path->nodes[level + 1];
754 slot = path->slots[level];
755 BUG_ON(nritems == 0);
757 parent_slot = path->slots[level + 1];
758 btrfs_node_key(parent, &parent_key, parent_slot);
759 btrfs_node_key(node, &node_key, 0);
760 BUG_ON(memcmp(&parent_key, &node_key,
761 sizeof(struct btrfs_disk_key)));
762 BUG_ON(btrfs_node_blockptr(parent, parent_slot) !=
763 btrfs_header_bytenr(node));
765 BUG_ON(nritems > BTRFS_NODEPTRS_PER_BLOCK(root));
767 btrfs_node_key_to_cpu(node, &cpukey, slot - 1);
768 btrfs_node_key(node, &node_key, slot);
769 BUG_ON(comp_keys(&node_key, &cpukey) <= 0);
771 if (slot < nritems - 1) {
772 btrfs_node_key_to_cpu(node, &cpukey, slot + 1);
773 btrfs_node_key(node, &node_key, slot);
774 BUG_ON(comp_keys(&node_key, &cpukey) >= 0);
780 * extra checking to make sure all the items in a leaf are
781 * well formed and in the proper order
783 static int check_leaf(struct btrfs_root *root, struct btrfs_path *path,
786 struct extent_buffer *leaf = path->nodes[level];
787 struct extent_buffer *parent = NULL;
789 struct btrfs_key cpukey;
790 struct btrfs_disk_key parent_key;
791 struct btrfs_disk_key leaf_key;
792 int slot = path->slots[0];
794 u32 nritems = btrfs_header_nritems(leaf);
796 if (path->nodes[level + 1])
797 parent = path->nodes[level + 1];
803 parent_slot = path->slots[level + 1];
804 btrfs_node_key(parent, &parent_key, parent_slot);
805 btrfs_item_key(leaf, &leaf_key, 0);
807 BUG_ON(memcmp(&parent_key, &leaf_key,
808 sizeof(struct btrfs_disk_key)));
809 BUG_ON(btrfs_node_blockptr(parent, parent_slot) !=
810 btrfs_header_bytenr(leaf));
812 if (slot != 0 && slot < nritems - 1) {
813 btrfs_item_key(leaf, &leaf_key, slot);
814 btrfs_item_key_to_cpu(leaf, &cpukey, slot - 1);
815 if (comp_keys(&leaf_key, &cpukey) <= 0) {
816 btrfs_print_leaf(root, leaf);
817 printk(KERN_CRIT "slot %d offset bad key\n", slot);
820 if (btrfs_item_offset_nr(leaf, slot - 1) !=
821 btrfs_item_end_nr(leaf, slot)) {
822 btrfs_print_leaf(root, leaf);
823 printk(KERN_CRIT "slot %d offset bad\n", slot);
827 if (slot < nritems - 1) {
828 btrfs_item_key(leaf, &leaf_key, slot);
829 btrfs_item_key_to_cpu(leaf, &cpukey, slot + 1);
830 BUG_ON(comp_keys(&leaf_key, &cpukey) >= 0);
831 if (btrfs_item_offset_nr(leaf, slot) !=
832 btrfs_item_end_nr(leaf, slot + 1)) {
833 btrfs_print_leaf(root, leaf);
834 printk(KERN_CRIT "slot %d offset bad\n", slot);
838 BUG_ON(btrfs_item_offset_nr(leaf, 0) +
839 btrfs_item_size_nr(leaf, 0) != BTRFS_LEAF_DATA_SIZE(root));
843 static noinline int check_block(struct btrfs_root *root,
844 struct btrfs_path *path, int level)
848 return check_leaf(root, path, level);
849 return check_node(root, path, level);
853 * search for key in the extent_buffer. The items start at offset p,
854 * and they are item_size apart. There are 'max' items in p.
856 * the slot in the array is returned via slot, and it points to
857 * the place where you would insert key if it is not found in
860 * slot may point to max if the key is bigger than all of the keys
862 static noinline int generic_bin_search(struct extent_buffer *eb,
864 int item_size, struct btrfs_key *key,
871 struct btrfs_disk_key *tmp = NULL;
872 struct btrfs_disk_key unaligned;
873 unsigned long offset;
874 char *map_token = NULL;
876 unsigned long map_start = 0;
877 unsigned long map_len = 0;
881 mid = (low + high) / 2;
882 offset = p + mid * item_size;
884 if (!map_token || offset < map_start ||
885 (offset + sizeof(struct btrfs_disk_key)) >
886 map_start + map_len) {
888 unmap_extent_buffer(eb, map_token, KM_USER0);
892 err = map_private_extent_buffer(eb, offset,
893 sizeof(struct btrfs_disk_key),
895 &map_start, &map_len, KM_USER0);
898 tmp = (struct btrfs_disk_key *)(kaddr + offset -
901 read_extent_buffer(eb, &unaligned,
902 offset, sizeof(unaligned));
907 tmp = (struct btrfs_disk_key *)(kaddr + offset -
910 ret = comp_keys(tmp, key);
919 unmap_extent_buffer(eb, map_token, KM_USER0);
925 unmap_extent_buffer(eb, map_token, KM_USER0);
930 * simple bin_search frontend that does the right thing for
933 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
934 int level, int *slot)
937 return generic_bin_search(eb,
938 offsetof(struct btrfs_leaf, items),
939 sizeof(struct btrfs_item),
940 key, btrfs_header_nritems(eb),
943 return generic_bin_search(eb,
944 offsetof(struct btrfs_node, ptrs),
945 sizeof(struct btrfs_key_ptr),
946 key, btrfs_header_nritems(eb),
952 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
953 int level, int *slot)
955 return bin_search(eb, key, level, slot);
958 static void root_add_used(struct btrfs_root *root, u32 size)
960 spin_lock(&root->accounting_lock);
961 btrfs_set_root_used(&root->root_item,
962 btrfs_root_used(&root->root_item) + size);
963 spin_unlock(&root->accounting_lock);
966 static void root_sub_used(struct btrfs_root *root, u32 size)
968 spin_lock(&root->accounting_lock);
969 btrfs_set_root_used(&root->root_item,
970 btrfs_root_used(&root->root_item) - size);
971 spin_unlock(&root->accounting_lock);
974 /* given a node and slot number, this reads the blocks it points to. The
975 * extent buffer is returned with a reference taken (but unlocked).
976 * NULL is returned on error.
978 static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
979 struct extent_buffer *parent, int slot)
981 int level = btrfs_header_level(parent);
984 if (slot >= btrfs_header_nritems(parent))
989 return read_tree_block(root, btrfs_node_blockptr(parent, slot),
990 btrfs_level_size(root, level - 1),
991 btrfs_node_ptr_generation(parent, slot));
995 * node level balancing, used to make sure nodes are in proper order for
996 * item deletion. We balance from the top down, so we have to make sure
997 * that a deletion won't leave an node completely empty later on.
999 static noinline int balance_level(struct btrfs_trans_handle *trans,
1000 struct btrfs_root *root,
1001 struct btrfs_path *path, int level)
1003 struct extent_buffer *right = NULL;
1004 struct extent_buffer *mid;
1005 struct extent_buffer *left = NULL;
1006 struct extent_buffer *parent = NULL;
1010 int orig_slot = path->slots[level];
1011 int err_on_enospc = 0;
1017 mid = path->nodes[level];
1019 WARN_ON(!path->locks[level]);
1020 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1022 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1024 if (level < BTRFS_MAX_LEVEL - 1)
1025 parent = path->nodes[level + 1];
1026 pslot = path->slots[level + 1];
1029 * deal with the case where there is only one pointer in the root
1030 * by promoting the node below to a root
1033 struct extent_buffer *child;
1035 if (btrfs_header_nritems(mid) != 1)
1038 /* promote the child to a root */
1039 child = read_node_slot(root, mid, 0);
1041 btrfs_tree_lock(child);
1042 btrfs_set_lock_blocking(child);
1043 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1045 btrfs_tree_unlock(child);
1046 free_extent_buffer(child);
1050 spin_lock(&root->node_lock);
1052 spin_unlock(&root->node_lock);
1054 add_root_to_dirty_list(root);
1055 btrfs_tree_unlock(child);
1057 path->locks[level] = 0;
1058 path->nodes[level] = NULL;
1059 clean_tree_block(trans, root, mid);
1060 btrfs_tree_unlock(mid);
1061 /* once for the path */
1062 free_extent_buffer(mid);
1064 root_sub_used(root, mid->len);
1065 btrfs_free_tree_block(trans, root, mid, 0, 1);
1066 /* once for the root ptr */
1067 free_extent_buffer(mid);
1070 if (btrfs_header_nritems(mid) >
1071 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
1074 if (btrfs_header_nritems(mid) < 2)
1077 left = read_node_slot(root, parent, pslot - 1);
1079 btrfs_tree_lock(left);
1080 btrfs_set_lock_blocking(left);
1081 wret = btrfs_cow_block(trans, root, left,
1082 parent, pslot - 1, &left);
1088 right = read_node_slot(root, parent, pslot + 1);
1090 btrfs_tree_lock(right);
1091 btrfs_set_lock_blocking(right);
1092 wret = btrfs_cow_block(trans, root, right,
1093 parent, pslot + 1, &right);
1100 /* first, try to make some room in the middle buffer */
1102 orig_slot += btrfs_header_nritems(left);
1103 wret = push_node_left(trans, root, left, mid, 1);
1106 if (btrfs_header_nritems(mid) < 2)
1111 * then try to empty the right most buffer into the middle
1114 wret = push_node_left(trans, root, mid, right, 1);
1115 if (wret < 0 && wret != -ENOSPC)
1117 if (btrfs_header_nritems(right) == 0) {
1118 clean_tree_block(trans, root, right);
1119 btrfs_tree_unlock(right);
1120 wret = del_ptr(trans, root, path, level + 1, pslot +
1124 root_sub_used(root, right->len);
1125 btrfs_free_tree_block(trans, root, right, 0, 1);
1126 free_extent_buffer(right);
1129 struct btrfs_disk_key right_key;
1130 btrfs_node_key(right, &right_key, 0);
1131 btrfs_set_node_key(parent, &right_key, pslot + 1);
1132 btrfs_mark_buffer_dirty(parent);
1135 if (btrfs_header_nritems(mid) == 1) {
1137 * we're not allowed to leave a node with one item in the
1138 * tree during a delete. A deletion from lower in the tree
1139 * could try to delete the only pointer in this node.
1140 * So, pull some keys from the left.
1141 * There has to be a left pointer at this point because
1142 * otherwise we would have pulled some pointers from the
1146 wret = balance_node_right(trans, root, mid, left);
1152 wret = push_node_left(trans, root, left, mid, 1);
1158 if (btrfs_header_nritems(mid) == 0) {
1159 clean_tree_block(trans, root, mid);
1160 btrfs_tree_unlock(mid);
1161 wret = del_ptr(trans, root, path, level + 1, pslot);
1164 root_sub_used(root, mid->len);
1165 btrfs_free_tree_block(trans, root, mid, 0, 1);
1166 free_extent_buffer(mid);
1169 /* update the parent key to reflect our changes */
1170 struct btrfs_disk_key mid_key;
1171 btrfs_node_key(mid, &mid_key, 0);
1172 btrfs_set_node_key(parent, &mid_key, pslot);
1173 btrfs_mark_buffer_dirty(parent);
1176 /* update the path */
1178 if (btrfs_header_nritems(left) > orig_slot) {
1179 extent_buffer_get(left);
1180 /* left was locked after cow */
1181 path->nodes[level] = left;
1182 path->slots[level + 1] -= 1;
1183 path->slots[level] = orig_slot;
1185 btrfs_tree_unlock(mid);
1186 free_extent_buffer(mid);
1189 orig_slot -= btrfs_header_nritems(left);
1190 path->slots[level] = orig_slot;
1193 /* double check we haven't messed things up */
1194 check_block(root, path, level);
1196 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
1200 btrfs_tree_unlock(right);
1201 free_extent_buffer(right);
1204 if (path->nodes[level] != left)
1205 btrfs_tree_unlock(left);
1206 free_extent_buffer(left);
1211 /* Node balancing for insertion. Here we only split or push nodes around
1212 * when they are completely full. This is also done top down, so we
1213 * have to be pessimistic.
1215 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1216 struct btrfs_root *root,
1217 struct btrfs_path *path, int level)
1219 struct extent_buffer *right = NULL;
1220 struct extent_buffer *mid;
1221 struct extent_buffer *left = NULL;
1222 struct extent_buffer *parent = NULL;
1226 int orig_slot = path->slots[level];
1232 mid = path->nodes[level];
1233 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1234 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1236 if (level < BTRFS_MAX_LEVEL - 1)
1237 parent = path->nodes[level + 1];
1238 pslot = path->slots[level + 1];
1243 left = read_node_slot(root, parent, pslot - 1);
1245 /* first, try to make some room in the middle buffer */
1249 btrfs_tree_lock(left);
1250 btrfs_set_lock_blocking(left);
1252 left_nr = btrfs_header_nritems(left);
1253 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1256 ret = btrfs_cow_block(trans, root, left, parent,
1261 wret = push_node_left(trans, root,
1268 struct btrfs_disk_key disk_key;
1269 orig_slot += left_nr;
1270 btrfs_node_key(mid, &disk_key, 0);
1271 btrfs_set_node_key(parent, &disk_key, pslot);
1272 btrfs_mark_buffer_dirty(parent);
1273 if (btrfs_header_nritems(left) > orig_slot) {
1274 path->nodes[level] = left;
1275 path->slots[level + 1] -= 1;
1276 path->slots[level] = orig_slot;
1277 btrfs_tree_unlock(mid);
1278 free_extent_buffer(mid);
1281 btrfs_header_nritems(left);
1282 path->slots[level] = orig_slot;
1283 btrfs_tree_unlock(left);
1284 free_extent_buffer(left);
1288 btrfs_tree_unlock(left);
1289 free_extent_buffer(left);
1291 right = read_node_slot(root, parent, pslot + 1);
1294 * then try to empty the right most buffer into the middle
1299 btrfs_tree_lock(right);
1300 btrfs_set_lock_blocking(right);
1302 right_nr = btrfs_header_nritems(right);
1303 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1306 ret = btrfs_cow_block(trans, root, right,
1312 wret = balance_node_right(trans, root,
1319 struct btrfs_disk_key disk_key;
1321 btrfs_node_key(right, &disk_key, 0);
1322 btrfs_set_node_key(parent, &disk_key, pslot + 1);
1323 btrfs_mark_buffer_dirty(parent);
1325 if (btrfs_header_nritems(mid) <= orig_slot) {
1326 path->nodes[level] = right;
1327 path->slots[level + 1] += 1;
1328 path->slots[level] = orig_slot -
1329 btrfs_header_nritems(mid);
1330 btrfs_tree_unlock(mid);
1331 free_extent_buffer(mid);
1333 btrfs_tree_unlock(right);
1334 free_extent_buffer(right);
1338 btrfs_tree_unlock(right);
1339 free_extent_buffer(right);
1345 * readahead one full node of leaves, finding things that are close
1346 * to the block in 'slot', and triggering ra on them.
1348 static void reada_for_search(struct btrfs_root *root,
1349 struct btrfs_path *path,
1350 int level, int slot, u64 objectid)
1352 struct extent_buffer *node;
1353 struct btrfs_disk_key disk_key;
1358 int direction = path->reada;
1359 struct extent_buffer *eb;
1367 if (!path->nodes[level])
1370 node = path->nodes[level];
1372 search = btrfs_node_blockptr(node, slot);
1373 blocksize = btrfs_level_size(root, level - 1);
1374 eb = btrfs_find_tree_block(root, search, blocksize);
1376 free_extent_buffer(eb);
1382 nritems = btrfs_header_nritems(node);
1385 if (direction < 0) {
1389 } else if (direction > 0) {
1394 if (path->reada < 0 && objectid) {
1395 btrfs_node_key(node, &disk_key, nr);
1396 if (btrfs_disk_key_objectid(&disk_key) != objectid)
1399 search = btrfs_node_blockptr(node, nr);
1400 if ((search <= target && target - search <= 65536) ||
1401 (search > target && search - target <= 65536)) {
1402 readahead_tree_block(root, search, blocksize,
1403 btrfs_node_ptr_generation(node, nr));
1407 if ((nread > 65536 || nscan > 32))
1413 * returns -EAGAIN if it had to drop the path, or zero if everything was in
1416 static noinline int reada_for_balance(struct btrfs_root *root,
1417 struct btrfs_path *path, int level)
1421 struct extent_buffer *parent;
1422 struct extent_buffer *eb;
1429 parent = path->nodes[level + 1];
1433 nritems = btrfs_header_nritems(parent);
1434 slot = path->slots[level + 1];
1435 blocksize = btrfs_level_size(root, level);
1438 block1 = btrfs_node_blockptr(parent, slot - 1);
1439 gen = btrfs_node_ptr_generation(parent, slot - 1);
1440 eb = btrfs_find_tree_block(root, block1, blocksize);
1441 if (eb && btrfs_buffer_uptodate(eb, gen))
1443 free_extent_buffer(eb);
1445 if (slot + 1 < nritems) {
1446 block2 = btrfs_node_blockptr(parent, slot + 1);
1447 gen = btrfs_node_ptr_generation(parent, slot + 1);
1448 eb = btrfs_find_tree_block(root, block2, blocksize);
1449 if (eb && btrfs_buffer_uptodate(eb, gen))
1451 free_extent_buffer(eb);
1453 if (block1 || block2) {
1456 /* release the whole path */
1457 btrfs_release_path(root, path);
1459 /* read the blocks */
1461 readahead_tree_block(root, block1, blocksize, 0);
1463 readahead_tree_block(root, block2, blocksize, 0);
1466 eb = read_tree_block(root, block1, blocksize, 0);
1467 free_extent_buffer(eb);
1470 eb = read_tree_block(root, block2, blocksize, 0);
1471 free_extent_buffer(eb);
1479 * when we walk down the tree, it is usually safe to unlock the higher layers
1480 * in the tree. The exceptions are when our path goes through slot 0, because
1481 * operations on the tree might require changing key pointers higher up in the
1484 * callers might also have set path->keep_locks, which tells this code to keep
1485 * the lock if the path points to the last slot in the block. This is part of
1486 * walking through the tree, and selecting the next slot in the higher block.
1488 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
1489 * if lowest_unlock is 1, level 0 won't be unlocked
1491 static noinline void unlock_up(struct btrfs_path *path, int level,
1495 int skip_level = level;
1497 struct extent_buffer *t;
1499 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1500 if (!path->nodes[i])
1502 if (!path->locks[i])
1504 if (!no_skips && path->slots[i] == 0) {
1508 if (!no_skips && path->keep_locks) {
1511 nritems = btrfs_header_nritems(t);
1512 if (nritems < 1 || path->slots[i] >= nritems - 1) {
1517 if (skip_level < i && i >= lowest_unlock)
1521 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
1522 btrfs_tree_unlock(t);
1529 * This releases any locks held in the path starting at level and
1530 * going all the way up to the root.
1532 * btrfs_search_slot will keep the lock held on higher nodes in a few
1533 * corner cases, such as COW of the block at slot zero in the node. This
1534 * ignores those rules, and it should only be called when there are no
1535 * more updates to be done higher up in the tree.
1537 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
1541 if (path->keep_locks)
1544 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1545 if (!path->nodes[i])
1547 if (!path->locks[i])
1549 btrfs_tree_unlock(path->nodes[i]);
1555 * helper function for btrfs_search_slot. The goal is to find a block
1556 * in cache without setting the path to blocking. If we find the block
1557 * we return zero and the path is unchanged.
1559 * If we can't find the block, we set the path blocking and do some
1560 * reada. -EAGAIN is returned and the search must be repeated.
1563 read_block_for_search(struct btrfs_trans_handle *trans,
1564 struct btrfs_root *root, struct btrfs_path *p,
1565 struct extent_buffer **eb_ret, int level, int slot,
1566 struct btrfs_key *key)
1571 struct extent_buffer *b = *eb_ret;
1572 struct extent_buffer *tmp;
1575 blocknr = btrfs_node_blockptr(b, slot);
1576 gen = btrfs_node_ptr_generation(b, slot);
1577 blocksize = btrfs_level_size(root, level - 1);
1579 tmp = btrfs_find_tree_block(root, blocknr, blocksize);
1580 if (tmp && btrfs_buffer_uptodate(tmp, gen)) {
1582 * we found an up to date block without sleeping, return
1590 * reduce lock contention at high levels
1591 * of the btree by dropping locks before
1592 * we read. Don't release the lock on the current
1593 * level because we need to walk this node to figure
1594 * out which blocks to read.
1596 btrfs_unlock_up_safe(p, level + 1);
1597 btrfs_set_path_blocking(p);
1600 free_extent_buffer(tmp);
1602 reada_for_search(root, p, level, slot, key->objectid);
1604 btrfs_release_path(NULL, p);
1607 tmp = read_tree_block(root, blocknr, blocksize, 0);
1610 * If the read above didn't mark this buffer up to date,
1611 * it will never end up being up to date. Set ret to EIO now
1612 * and give up so that our caller doesn't loop forever
1615 if (!btrfs_buffer_uptodate(tmp, 0))
1617 free_extent_buffer(tmp);
1623 * helper function for btrfs_search_slot. This does all of the checks
1624 * for node-level blocks and does any balancing required based on
1627 * If no extra work was required, zero is returned. If we had to
1628 * drop the path, -EAGAIN is returned and btrfs_search_slot must
1632 setup_nodes_for_search(struct btrfs_trans_handle *trans,
1633 struct btrfs_root *root, struct btrfs_path *p,
1634 struct extent_buffer *b, int level, int ins_len)
1637 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
1638 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
1641 sret = reada_for_balance(root, p, level);
1645 btrfs_set_path_blocking(p);
1646 sret = split_node(trans, root, p, level);
1647 btrfs_clear_path_blocking(p, NULL);
1654 b = p->nodes[level];
1655 } else if (ins_len < 0 && btrfs_header_nritems(b) <
1656 BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
1659 sret = reada_for_balance(root, p, level);
1663 btrfs_set_path_blocking(p);
1664 sret = balance_level(trans, root, p, level);
1665 btrfs_clear_path_blocking(p, NULL);
1671 b = p->nodes[level];
1673 btrfs_release_path(NULL, p);
1676 BUG_ON(btrfs_header_nritems(b) == 1);
1687 * look for key in the tree. path is filled in with nodes along the way
1688 * if key is found, we return zero and you can find the item in the leaf
1689 * level of the path (level 0)
1691 * If the key isn't found, the path points to the slot where it should
1692 * be inserted, and 1 is returned. If there are other errors during the
1693 * search a negative error number is returned.
1695 * if ins_len > 0, nodes and leaves will be split as we walk down the
1696 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
1699 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
1700 *root, struct btrfs_key *key, struct btrfs_path *p, int
1703 struct extent_buffer *b;
1708 int lowest_unlock = 1;
1709 u8 lowest_level = 0;
1711 lowest_level = p->lowest_level;
1712 WARN_ON(lowest_level && ins_len > 0);
1713 WARN_ON(p->nodes[0] != NULL);
1719 if (p->search_commit_root) {
1720 b = root->commit_root;
1721 extent_buffer_get(b);
1722 if (!p->skip_locking)
1725 if (p->skip_locking)
1726 b = btrfs_root_node(root);
1728 b = btrfs_lock_root_node(root);
1732 level = btrfs_header_level(b);
1735 * setup the path here so we can release it under lock
1736 * contention with the cow code
1738 p->nodes[level] = b;
1739 if (!p->skip_locking)
1740 p->locks[level] = 1;
1744 * if we don't really need to cow this block
1745 * then we don't want to set the path blocking,
1746 * so we test it here
1748 if (!should_cow_block(trans, root, b))
1751 btrfs_set_path_blocking(p);
1753 err = btrfs_cow_block(trans, root, b,
1754 p->nodes[level + 1],
1755 p->slots[level + 1], &b);
1762 BUG_ON(!cow && ins_len);
1763 if (level != btrfs_header_level(b))
1765 level = btrfs_header_level(b);
1767 p->nodes[level] = b;
1768 if (!p->skip_locking)
1769 p->locks[level] = 1;
1771 btrfs_clear_path_blocking(p, NULL);
1774 * we have a lock on b and as long as we aren't changing
1775 * the tree, there is no way to for the items in b to change.
1776 * It is safe to drop the lock on our parent before we
1777 * go through the expensive btree search on b.
1779 * If cow is true, then we might be changing slot zero,
1780 * which may require changing the parent. So, we can't
1781 * drop the lock until after we know which slot we're
1785 btrfs_unlock_up_safe(p, level + 1);
1787 ret = check_block(root, p, level);
1793 ret = bin_search(b, key, level, &slot);
1797 if (ret && slot > 0) {
1801 p->slots[level] = slot;
1802 err = setup_nodes_for_search(trans, root, p, b, level,
1810 b = p->nodes[level];
1811 slot = p->slots[level];
1813 unlock_up(p, level, lowest_unlock);
1815 if (level == lowest_level) {
1821 err = read_block_for_search(trans, root, p,
1822 &b, level, slot, key);
1830 if (!p->skip_locking) {
1831 btrfs_clear_path_blocking(p, NULL);
1832 err = btrfs_try_spin_lock(b);
1835 btrfs_set_path_blocking(p);
1837 btrfs_clear_path_blocking(p, b);
1841 p->slots[level] = slot;
1843 btrfs_leaf_free_space(root, b) < ins_len) {
1844 btrfs_set_path_blocking(p);
1845 err = split_leaf(trans, root, key,
1846 p, ins_len, ret == 0);
1847 btrfs_clear_path_blocking(p, NULL);
1855 if (!p->search_for_split)
1856 unlock_up(p, level, lowest_unlock);
1863 * we don't really know what they plan on doing with the path
1864 * from here on, so for now just mark it as blocking
1866 if (!p->leave_spinning)
1867 btrfs_set_path_blocking(p);
1869 btrfs_release_path(root, p);
1874 * adjust the pointers going up the tree, starting at level
1875 * making sure the right key of each node is points to 'key'.
1876 * This is used after shifting pointers to the left, so it stops
1877 * fixing up pointers when a given leaf/node is not in slot 0 of the
1880 * If this fails to write a tree block, it returns -1, but continues
1881 * fixing up the blocks in ram so the tree is consistent.
1883 static int fixup_low_keys(struct btrfs_trans_handle *trans,
1884 struct btrfs_root *root, struct btrfs_path *path,
1885 struct btrfs_disk_key *key, int level)
1889 struct extent_buffer *t;
1891 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1892 int tslot = path->slots[i];
1893 if (!path->nodes[i])
1896 btrfs_set_node_key(t, key, tslot);
1897 btrfs_mark_buffer_dirty(path->nodes[i]);
1907 * This function isn't completely safe. It's the caller's responsibility
1908 * that the new key won't break the order
1910 int btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
1911 struct btrfs_root *root, struct btrfs_path *path,
1912 struct btrfs_key *new_key)
1914 struct btrfs_disk_key disk_key;
1915 struct extent_buffer *eb;
1918 eb = path->nodes[0];
1919 slot = path->slots[0];
1921 btrfs_item_key(eb, &disk_key, slot - 1);
1922 if (comp_keys(&disk_key, new_key) >= 0)
1925 if (slot < btrfs_header_nritems(eb) - 1) {
1926 btrfs_item_key(eb, &disk_key, slot + 1);
1927 if (comp_keys(&disk_key, new_key) <= 0)
1931 btrfs_cpu_key_to_disk(&disk_key, new_key);
1932 btrfs_set_item_key(eb, &disk_key, slot);
1933 btrfs_mark_buffer_dirty(eb);
1935 fixup_low_keys(trans, root, path, &disk_key, 1);
1940 * try to push data from one node into the next node left in the
1943 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
1944 * error, and > 0 if there was no room in the left hand block.
1946 static int push_node_left(struct btrfs_trans_handle *trans,
1947 struct btrfs_root *root, struct extent_buffer *dst,
1948 struct extent_buffer *src, int empty)
1955 src_nritems = btrfs_header_nritems(src);
1956 dst_nritems = btrfs_header_nritems(dst);
1957 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
1958 WARN_ON(btrfs_header_generation(src) != trans->transid);
1959 WARN_ON(btrfs_header_generation(dst) != trans->transid);
1961 if (!empty && src_nritems <= 8)
1964 if (push_items <= 0)
1968 push_items = min(src_nritems, push_items);
1969 if (push_items < src_nritems) {
1970 /* leave at least 8 pointers in the node if
1971 * we aren't going to empty it
1973 if (src_nritems - push_items < 8) {
1974 if (push_items <= 8)
1980 push_items = min(src_nritems - 8, push_items);
1982 copy_extent_buffer(dst, src,
1983 btrfs_node_key_ptr_offset(dst_nritems),
1984 btrfs_node_key_ptr_offset(0),
1985 push_items * sizeof(struct btrfs_key_ptr));
1987 if (push_items < src_nritems) {
1988 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
1989 btrfs_node_key_ptr_offset(push_items),
1990 (src_nritems - push_items) *
1991 sizeof(struct btrfs_key_ptr));
1993 btrfs_set_header_nritems(src, src_nritems - push_items);
1994 btrfs_set_header_nritems(dst, dst_nritems + push_items);
1995 btrfs_mark_buffer_dirty(src);
1996 btrfs_mark_buffer_dirty(dst);
2002 * try to push data from one node into the next node right in the
2005 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
2006 * error, and > 0 if there was no room in the right hand block.
2008 * this will only push up to 1/2 the contents of the left node over
2010 static int balance_node_right(struct btrfs_trans_handle *trans,
2011 struct btrfs_root *root,
2012 struct extent_buffer *dst,
2013 struct extent_buffer *src)
2021 WARN_ON(btrfs_header_generation(src) != trans->transid);
2022 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2024 src_nritems = btrfs_header_nritems(src);
2025 dst_nritems = btrfs_header_nritems(dst);
2026 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
2027 if (push_items <= 0)
2030 if (src_nritems < 4)
2033 max_push = src_nritems / 2 + 1;
2034 /* don't try to empty the node */
2035 if (max_push >= src_nritems)
2038 if (max_push < push_items)
2039 push_items = max_push;
2041 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
2042 btrfs_node_key_ptr_offset(0),
2044 sizeof(struct btrfs_key_ptr));
2046 copy_extent_buffer(dst, src,
2047 btrfs_node_key_ptr_offset(0),
2048 btrfs_node_key_ptr_offset(src_nritems - push_items),
2049 push_items * sizeof(struct btrfs_key_ptr));
2051 btrfs_set_header_nritems(src, src_nritems - push_items);
2052 btrfs_set_header_nritems(dst, dst_nritems + push_items);
2054 btrfs_mark_buffer_dirty(src);
2055 btrfs_mark_buffer_dirty(dst);
2061 * helper function to insert a new root level in the tree.
2062 * A new node is allocated, and a single item is inserted to
2063 * point to the existing root
2065 * returns zero on success or < 0 on failure.
2067 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
2068 struct btrfs_root *root,
2069 struct btrfs_path *path, int level)
2072 struct extent_buffer *lower;
2073 struct extent_buffer *c;
2074 struct extent_buffer *old;
2075 struct btrfs_disk_key lower_key;
2077 BUG_ON(path->nodes[level]);
2078 BUG_ON(path->nodes[level-1] != root->node);
2080 lower = path->nodes[level-1];
2082 btrfs_item_key(lower, &lower_key, 0);
2084 btrfs_node_key(lower, &lower_key, 0);
2086 c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
2087 root->root_key.objectid, &lower_key,
2088 level, root->node->start, 0);
2092 root_add_used(root, root->nodesize);
2094 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
2095 btrfs_set_header_nritems(c, 1);
2096 btrfs_set_header_level(c, level);
2097 btrfs_set_header_bytenr(c, c->start);
2098 btrfs_set_header_generation(c, trans->transid);
2099 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
2100 btrfs_set_header_owner(c, root->root_key.objectid);
2102 write_extent_buffer(c, root->fs_info->fsid,
2103 (unsigned long)btrfs_header_fsid(c),
2106 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
2107 (unsigned long)btrfs_header_chunk_tree_uuid(c),
2110 btrfs_set_node_key(c, &lower_key, 0);
2111 btrfs_set_node_blockptr(c, 0, lower->start);
2112 lower_gen = btrfs_header_generation(lower);
2113 WARN_ON(lower_gen != trans->transid);
2115 btrfs_set_node_ptr_generation(c, 0, lower_gen);
2117 btrfs_mark_buffer_dirty(c);
2119 spin_lock(&root->node_lock);
2122 spin_unlock(&root->node_lock);
2124 /* the super has an extra ref to root->node */
2125 free_extent_buffer(old);
2127 add_root_to_dirty_list(root);
2128 extent_buffer_get(c);
2129 path->nodes[level] = c;
2130 path->locks[level] = 1;
2131 path->slots[level] = 0;
2136 * worker function to insert a single pointer in a node.
2137 * the node should have enough room for the pointer already
2139 * slot and level indicate where you want the key to go, and
2140 * blocknr is the block the key points to.
2142 * returns zero on success and < 0 on any error
2144 static int insert_ptr(struct btrfs_trans_handle *trans, struct btrfs_root
2145 *root, struct btrfs_path *path, struct btrfs_disk_key
2146 *key, u64 bytenr, int slot, int level)
2148 struct extent_buffer *lower;
2151 BUG_ON(!path->nodes[level]);
2152 btrfs_assert_tree_locked(path->nodes[level]);
2153 lower = path->nodes[level];
2154 nritems = btrfs_header_nritems(lower);
2155 BUG_ON(slot > nritems);
2156 if (nritems == BTRFS_NODEPTRS_PER_BLOCK(root))
2158 if (slot != nritems) {
2159 memmove_extent_buffer(lower,
2160 btrfs_node_key_ptr_offset(slot + 1),
2161 btrfs_node_key_ptr_offset(slot),
2162 (nritems - slot) * sizeof(struct btrfs_key_ptr));
2164 btrfs_set_node_key(lower, key, slot);
2165 btrfs_set_node_blockptr(lower, slot, bytenr);
2166 WARN_ON(trans->transid == 0);
2167 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
2168 btrfs_set_header_nritems(lower, nritems + 1);
2169 btrfs_mark_buffer_dirty(lower);
2174 * split the node at the specified level in path in two.
2175 * The path is corrected to point to the appropriate node after the split
2177 * Before splitting this tries to make some room in the node by pushing
2178 * left and right, if either one works, it returns right away.
2180 * returns 0 on success and < 0 on failure
2182 static noinline int split_node(struct btrfs_trans_handle *trans,
2183 struct btrfs_root *root,
2184 struct btrfs_path *path, int level)
2186 struct extent_buffer *c;
2187 struct extent_buffer *split;
2188 struct btrfs_disk_key disk_key;
2194 c = path->nodes[level];
2195 WARN_ON(btrfs_header_generation(c) != trans->transid);
2196 if (c == root->node) {
2197 /* trying to split the root, lets make a new one */
2198 ret = insert_new_root(trans, root, path, level + 1);
2202 ret = push_nodes_for_insert(trans, root, path, level);
2203 c = path->nodes[level];
2204 if (!ret && btrfs_header_nritems(c) <
2205 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
2211 c_nritems = btrfs_header_nritems(c);
2212 mid = (c_nritems + 1) / 2;
2213 btrfs_node_key(c, &disk_key, mid);
2215 split = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
2216 root->root_key.objectid,
2217 &disk_key, level, c->start, 0);
2219 return PTR_ERR(split);
2221 root_add_used(root, root->nodesize);
2223 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
2224 btrfs_set_header_level(split, btrfs_header_level(c));
2225 btrfs_set_header_bytenr(split, split->start);
2226 btrfs_set_header_generation(split, trans->transid);
2227 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
2228 btrfs_set_header_owner(split, root->root_key.objectid);
2229 write_extent_buffer(split, root->fs_info->fsid,
2230 (unsigned long)btrfs_header_fsid(split),
2232 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
2233 (unsigned long)btrfs_header_chunk_tree_uuid(split),
2237 copy_extent_buffer(split, c,
2238 btrfs_node_key_ptr_offset(0),
2239 btrfs_node_key_ptr_offset(mid),
2240 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
2241 btrfs_set_header_nritems(split, c_nritems - mid);
2242 btrfs_set_header_nritems(c, mid);
2245 btrfs_mark_buffer_dirty(c);
2246 btrfs_mark_buffer_dirty(split);
2248 wret = insert_ptr(trans, root, path, &disk_key, split->start,
2249 path->slots[level + 1] + 1,
2254 if (path->slots[level] >= mid) {
2255 path->slots[level] -= mid;
2256 btrfs_tree_unlock(c);
2257 free_extent_buffer(c);
2258 path->nodes[level] = split;
2259 path->slots[level + 1] += 1;
2261 btrfs_tree_unlock(split);
2262 free_extent_buffer(split);
2268 * how many bytes are required to store the items in a leaf. start
2269 * and nr indicate which items in the leaf to check. This totals up the
2270 * space used both by the item structs and the item data
2272 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
2275 int nritems = btrfs_header_nritems(l);
2276 int end = min(nritems, start + nr) - 1;
2280 data_len = btrfs_item_end_nr(l, start);
2281 data_len = data_len - btrfs_item_offset_nr(l, end);
2282 data_len += sizeof(struct btrfs_item) * nr;
2283 WARN_ON(data_len < 0);
2288 * The space between the end of the leaf items and
2289 * the start of the leaf data. IOW, how much room
2290 * the leaf has left for both items and data
2292 noinline int btrfs_leaf_free_space(struct btrfs_root *root,
2293 struct extent_buffer *leaf)
2295 int nritems = btrfs_header_nritems(leaf);
2297 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
2299 printk(KERN_CRIT "leaf free space ret %d, leaf data size %lu, "
2300 "used %d nritems %d\n",
2301 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
2302 leaf_space_used(leaf, 0, nritems), nritems);
2307 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
2308 struct btrfs_root *root,
2309 struct btrfs_path *path,
2310 int data_size, int empty,
2311 struct extent_buffer *right,
2312 int free_space, u32 left_nritems)
2314 struct extent_buffer *left = path->nodes[0];
2315 struct extent_buffer *upper = path->nodes[1];
2316 struct btrfs_disk_key disk_key;
2321 struct btrfs_item *item;
2332 if (path->slots[0] >= left_nritems)
2333 push_space += data_size;
2335 slot = path->slots[1];
2336 i = left_nritems - 1;
2338 item = btrfs_item_nr(left, i);
2340 if (!empty && push_items > 0) {
2341 if (path->slots[0] > i)
2343 if (path->slots[0] == i) {
2344 int space = btrfs_leaf_free_space(root, left);
2345 if (space + push_space * 2 > free_space)
2350 if (path->slots[0] == i)
2351 push_space += data_size;
2353 if (!left->map_token) {
2354 map_extent_buffer(left, (unsigned long)item,
2355 sizeof(struct btrfs_item),
2356 &left->map_token, &left->kaddr,
2357 &left->map_start, &left->map_len,
2361 this_item_size = btrfs_item_size(left, item);
2362 if (this_item_size + sizeof(*item) + push_space > free_space)
2366 push_space += this_item_size + sizeof(*item);
2371 if (left->map_token) {
2372 unmap_extent_buffer(left, left->map_token, KM_USER1);
2373 left->map_token = NULL;
2376 if (push_items == 0)
2379 if (!empty && push_items == left_nritems)
2382 /* push left to right */
2383 right_nritems = btrfs_header_nritems(right);
2385 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
2386 push_space -= leaf_data_end(root, left);
2388 /* make room in the right data area */
2389 data_end = leaf_data_end(root, right);
2390 memmove_extent_buffer(right,
2391 btrfs_leaf_data(right) + data_end - push_space,
2392 btrfs_leaf_data(right) + data_end,
2393 BTRFS_LEAF_DATA_SIZE(root) - data_end);
2395 /* copy from the left data area */
2396 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
2397 BTRFS_LEAF_DATA_SIZE(root) - push_space,
2398 btrfs_leaf_data(left) + leaf_data_end(root, left),
2401 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
2402 btrfs_item_nr_offset(0),
2403 right_nritems * sizeof(struct btrfs_item));
2405 /* copy the items from left to right */
2406 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
2407 btrfs_item_nr_offset(left_nritems - push_items),
2408 push_items * sizeof(struct btrfs_item));
2410 /* update the item pointers */
2411 right_nritems += push_items;
2412 btrfs_set_header_nritems(right, right_nritems);
2413 push_space = BTRFS_LEAF_DATA_SIZE(root);
2414 for (i = 0; i < right_nritems; i++) {
2415 item = btrfs_item_nr(right, i);
2416 if (!right->map_token) {
2417 map_extent_buffer(right, (unsigned long)item,
2418 sizeof(struct btrfs_item),
2419 &right->map_token, &right->kaddr,
2420 &right->map_start, &right->map_len,
2423 push_space -= btrfs_item_size(right, item);
2424 btrfs_set_item_offset(right, item, push_space);
2427 if (right->map_token) {
2428 unmap_extent_buffer(right, right->map_token, KM_USER1);
2429 right->map_token = NULL;
2431 left_nritems -= push_items;
2432 btrfs_set_header_nritems(left, left_nritems);
2435 btrfs_mark_buffer_dirty(left);
2437 clean_tree_block(trans, root, left);
2439 btrfs_mark_buffer_dirty(right);
2441 btrfs_item_key(right, &disk_key, 0);
2442 btrfs_set_node_key(upper, &disk_key, slot + 1);
2443 btrfs_mark_buffer_dirty(upper);
2445 /* then fixup the leaf pointer in the path */
2446 if (path->slots[0] >= left_nritems) {
2447 path->slots[0] -= left_nritems;
2448 if (btrfs_header_nritems(path->nodes[0]) == 0)
2449 clean_tree_block(trans, root, path->nodes[0]);
2450 btrfs_tree_unlock(path->nodes[0]);
2451 free_extent_buffer(path->nodes[0]);
2452 path->nodes[0] = right;
2453 path->slots[1] += 1;
2455 btrfs_tree_unlock(right);
2456 free_extent_buffer(right);
2461 btrfs_tree_unlock(right);
2462 free_extent_buffer(right);
2467 * push some data in the path leaf to the right, trying to free up at
2468 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2470 * returns 1 if the push failed because the other node didn't have enough
2471 * room, 0 if everything worked out and < 0 if there were major errors.
2473 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
2474 *root, struct btrfs_path *path, int data_size,
2477 struct extent_buffer *left = path->nodes[0];
2478 struct extent_buffer *right;
2479 struct extent_buffer *upper;
2485 if (!path->nodes[1])
2488 slot = path->slots[1];
2489 upper = path->nodes[1];
2490 if (slot >= btrfs_header_nritems(upper) - 1)
2493 btrfs_assert_tree_locked(path->nodes[1]);
2495 right = read_node_slot(root, upper, slot + 1);
2496 btrfs_tree_lock(right);
2497 btrfs_set_lock_blocking(right);
2499 free_space = btrfs_leaf_free_space(root, right);
2500 if (free_space < data_size)
2503 /* cow and double check */
2504 ret = btrfs_cow_block(trans, root, right, upper,
2509 free_space = btrfs_leaf_free_space(root, right);
2510 if (free_space < data_size)
2513 left_nritems = btrfs_header_nritems(left);
2514 if (left_nritems == 0)
2517 return __push_leaf_right(trans, root, path, data_size, empty,
2518 right, free_space, left_nritems);
2520 btrfs_tree_unlock(right);
2521 free_extent_buffer(right);
2526 * push some data in the path leaf to the left, trying to free up at
2527 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2529 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
2530 struct btrfs_root *root,
2531 struct btrfs_path *path, int data_size,
2532 int empty, struct extent_buffer *left,
2533 int free_space, int right_nritems)
2535 struct btrfs_disk_key disk_key;
2536 struct extent_buffer *right = path->nodes[0];
2541 struct btrfs_item *item;
2542 u32 old_left_nritems;
2547 u32 old_left_item_size;
2549 slot = path->slots[1];
2554 nr = right_nritems - 1;
2556 for (i = 0; i < nr; i++) {
2557 item = btrfs_item_nr(right, i);
2558 if (!right->map_token) {
2559 map_extent_buffer(right, (unsigned long)item,
2560 sizeof(struct btrfs_item),
2561 &right->map_token, &right->kaddr,
2562 &right->map_start, &right->map_len,
2566 if (!empty && push_items > 0) {
2567 if (path->slots[0] < i)
2569 if (path->slots[0] == i) {
2570 int space = btrfs_leaf_free_space(root, right);
2571 if (space + push_space * 2 > free_space)
2576 if (path->slots[0] == i)
2577 push_space += data_size;
2579 this_item_size = btrfs_item_size(right, item);
2580 if (this_item_size + sizeof(*item) + push_space > free_space)
2584 push_space += this_item_size + sizeof(*item);
2587 if (right->map_token) {
2588 unmap_extent_buffer(right, right->map_token, KM_USER1);
2589 right->map_token = NULL;
2592 if (push_items == 0) {
2596 if (!empty && push_items == btrfs_header_nritems(right))
2599 /* push data from right to left */
2600 copy_extent_buffer(left, right,
2601 btrfs_item_nr_offset(btrfs_header_nritems(left)),
2602 btrfs_item_nr_offset(0),
2603 push_items * sizeof(struct btrfs_item));
2605 push_space = BTRFS_LEAF_DATA_SIZE(root) -
2606 btrfs_item_offset_nr(right, push_items - 1);
2608 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
2609 leaf_data_end(root, left) - push_space,
2610 btrfs_leaf_data(right) +
2611 btrfs_item_offset_nr(right, push_items - 1),
2613 old_left_nritems = btrfs_header_nritems(left);
2614 BUG_ON(old_left_nritems <= 0);
2616 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
2617 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
2620 item = btrfs_item_nr(left, i);
2621 if (!left->map_token) {
2622 map_extent_buffer(left, (unsigned long)item,
2623 sizeof(struct btrfs_item),
2624 &left->map_token, &left->kaddr,
2625 &left->map_start, &left->map_len,
2629 ioff = btrfs_item_offset(left, item);
2630 btrfs_set_item_offset(left, item,
2631 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size));
2633 btrfs_set_header_nritems(left, old_left_nritems + push_items);
2634 if (left->map_token) {
2635 unmap_extent_buffer(left, left->map_token, KM_USER1);
2636 left->map_token = NULL;
2639 /* fixup right node */
2640 if (push_items > right_nritems) {
2641 printk(KERN_CRIT "push items %d nr %u\n", push_items,
2646 if (push_items < right_nritems) {
2647 push_space = btrfs_item_offset_nr(right, push_items - 1) -
2648 leaf_data_end(root, right);
2649 memmove_extent_buffer(right, btrfs_leaf_data(right) +
2650 BTRFS_LEAF_DATA_SIZE(root) - push_space,
2651 btrfs_leaf_data(right) +
2652 leaf_data_end(root, right), push_space);
2654 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
2655 btrfs_item_nr_offset(push_items),
2656 (btrfs_header_nritems(right) - push_items) *
2657 sizeof(struct btrfs_item));
2659 right_nritems -= push_items;
2660 btrfs_set_header_nritems(right, right_nritems);
2661 push_space = BTRFS_LEAF_DATA_SIZE(root);
2662 for (i = 0; i < right_nritems; i++) {
2663 item = btrfs_item_nr(right, i);
2665 if (!right->map_token) {
2666 map_extent_buffer(right, (unsigned long)item,
2667 sizeof(struct btrfs_item),
2668 &right->map_token, &right->kaddr,
2669 &right->map_start, &right->map_len,
2673 push_space = push_space - btrfs_item_size(right, item);
2674 btrfs_set_item_offset(right, item, push_space);
2676 if (right->map_token) {
2677 unmap_extent_buffer(right, right->map_token, KM_USER1);
2678 right->map_token = NULL;
2681 btrfs_mark_buffer_dirty(left);
2683 btrfs_mark_buffer_dirty(right);
2685 clean_tree_block(trans, root, right);
2687 btrfs_item_key(right, &disk_key, 0);
2688 wret = fixup_low_keys(trans, root, path, &disk_key, 1);
2692 /* then fixup the leaf pointer in the path */
2693 if (path->slots[0] < push_items) {
2694 path->slots[0] += old_left_nritems;
2695 btrfs_tree_unlock(path->nodes[0]);
2696 free_extent_buffer(path->nodes[0]);
2697 path->nodes[0] = left;
2698 path->slots[1] -= 1;
2700 btrfs_tree_unlock(left);
2701 free_extent_buffer(left);
2702 path->slots[0] -= push_items;
2704 BUG_ON(path->slots[0] < 0);
2707 btrfs_tree_unlock(left);
2708 free_extent_buffer(left);
2713 * push some data in the path leaf to the left, trying to free up at
2714 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2716 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
2717 *root, struct btrfs_path *path, int data_size,
2720 struct extent_buffer *right = path->nodes[0];
2721 struct extent_buffer *left;
2727 slot = path->slots[1];
2730 if (!path->nodes[1])
2733 right_nritems = btrfs_header_nritems(right);
2734 if (right_nritems == 0)
2737 btrfs_assert_tree_locked(path->nodes[1]);
2739 left = read_node_slot(root, path->nodes[1], slot - 1);
2740 btrfs_tree_lock(left);
2741 btrfs_set_lock_blocking(left);
2743 free_space = btrfs_leaf_free_space(root, left);
2744 if (free_space < data_size) {
2749 /* cow and double check */
2750 ret = btrfs_cow_block(trans, root, left,
2751 path->nodes[1], slot - 1, &left);
2753 /* we hit -ENOSPC, but it isn't fatal here */
2758 free_space = btrfs_leaf_free_space(root, left);
2759 if (free_space < data_size) {
2764 return __push_leaf_left(trans, root, path, data_size,
2765 empty, left, free_space, right_nritems);
2767 btrfs_tree_unlock(left);
2768 free_extent_buffer(left);
2773 * split the path's leaf in two, making sure there is at least data_size
2774 * available for the resulting leaf level of the path.
2776 * returns 0 if all went well and < 0 on failure.
2778 static noinline int copy_for_split(struct btrfs_trans_handle *trans,
2779 struct btrfs_root *root,
2780 struct btrfs_path *path,
2781 struct extent_buffer *l,
2782 struct extent_buffer *right,
2783 int slot, int mid, int nritems)
2790 struct btrfs_disk_key disk_key;
2792 nritems = nritems - mid;
2793 btrfs_set_header_nritems(right, nritems);
2794 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
2796 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
2797 btrfs_item_nr_offset(mid),
2798 nritems * sizeof(struct btrfs_item));
2800 copy_extent_buffer(right, l,
2801 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
2802 data_copy_size, btrfs_leaf_data(l) +
2803 leaf_data_end(root, l), data_copy_size);
2805 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
2806 btrfs_item_end_nr(l, mid);
2808 for (i = 0; i < nritems; i++) {
2809 struct btrfs_item *item = btrfs_item_nr(right, i);
2812 if (!right->map_token) {
2813 map_extent_buffer(right, (unsigned long)item,
2814 sizeof(struct btrfs_item),
2815 &right->map_token, &right->kaddr,
2816 &right->map_start, &right->map_len,
2820 ioff = btrfs_item_offset(right, item);
2821 btrfs_set_item_offset(right, item, ioff + rt_data_off);
2824 if (right->map_token) {
2825 unmap_extent_buffer(right, right->map_token, KM_USER1);
2826 right->map_token = NULL;
2829 btrfs_set_header_nritems(l, mid);
2831 btrfs_item_key(right, &disk_key, 0);
2832 wret = insert_ptr(trans, root, path, &disk_key, right->start,
2833 path->slots[1] + 1, 1);
2837 btrfs_mark_buffer_dirty(right);
2838 btrfs_mark_buffer_dirty(l);
2839 BUG_ON(path->slots[0] != slot);
2842 btrfs_tree_unlock(path->nodes[0]);
2843 free_extent_buffer(path->nodes[0]);
2844 path->nodes[0] = right;
2845 path->slots[0] -= mid;
2846 path->slots[1] += 1;
2848 btrfs_tree_unlock(right);
2849 free_extent_buffer(right);
2852 BUG_ON(path->slots[0] < 0);
2858 * split the path's leaf in two, making sure there is at least data_size
2859 * available for the resulting leaf level of the path.
2861 * returns 0 if all went well and < 0 on failure.
2863 static noinline int split_leaf(struct btrfs_trans_handle *trans,
2864 struct btrfs_root *root,
2865 struct btrfs_key *ins_key,
2866 struct btrfs_path *path, int data_size,
2869 struct btrfs_disk_key disk_key;
2870 struct extent_buffer *l;
2874 struct extent_buffer *right;
2878 int num_doubles = 0;
2881 slot = path->slots[0];
2882 if (extend && data_size + btrfs_item_size_nr(l, slot) +
2883 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
2886 /* first try to make some room by pushing left and right */
2887 if (data_size && ins_key->type != BTRFS_DIR_ITEM_KEY) {
2888 wret = push_leaf_right(trans, root, path, data_size, 0);
2892 wret = push_leaf_left(trans, root, path, data_size, 0);
2898 /* did the pushes work? */
2899 if (btrfs_leaf_free_space(root, l) >= data_size)
2903 if (!path->nodes[1]) {
2904 ret = insert_new_root(trans, root, path, 1);
2911 slot = path->slots[0];
2912 nritems = btrfs_header_nritems(l);
2913 mid = (nritems + 1) / 2;
2917 leaf_space_used(l, mid, nritems - mid) + data_size >
2918 BTRFS_LEAF_DATA_SIZE(root)) {
2919 if (slot >= nritems) {
2923 if (mid != nritems &&
2924 leaf_space_used(l, mid, nritems - mid) +
2925 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
2931 if (leaf_space_used(l, 0, mid) + data_size >
2932 BTRFS_LEAF_DATA_SIZE(root)) {
2933 if (!extend && data_size && slot == 0) {
2935 } else if ((extend || !data_size) && slot == 0) {
2939 if (mid != nritems &&
2940 leaf_space_used(l, mid, nritems - mid) +
2941 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
2949 btrfs_cpu_key_to_disk(&disk_key, ins_key);
2951 btrfs_item_key(l, &disk_key, mid);
2953 right = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
2954 root->root_key.objectid,
2955 &disk_key, 0, l->start, 0);
2957 return PTR_ERR(right);
2959 root_add_used(root, root->leafsize);
2961 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
2962 btrfs_set_header_bytenr(right, right->start);
2963 btrfs_set_header_generation(right, trans->transid);
2964 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
2965 btrfs_set_header_owner(right, root->root_key.objectid);
2966 btrfs_set_header_level(right, 0);
2967 write_extent_buffer(right, root->fs_info->fsid,
2968 (unsigned long)btrfs_header_fsid(right),
2971 write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
2972 (unsigned long)btrfs_header_chunk_tree_uuid(right),
2977 btrfs_set_header_nritems(right, 0);
2978 wret = insert_ptr(trans, root, path,
2979 &disk_key, right->start,
2980 path->slots[1] + 1, 1);
2984 btrfs_tree_unlock(path->nodes[0]);
2985 free_extent_buffer(path->nodes[0]);
2986 path->nodes[0] = right;
2988 path->slots[1] += 1;
2990 btrfs_set_header_nritems(right, 0);
2991 wret = insert_ptr(trans, root, path,
2997 btrfs_tree_unlock(path->nodes[0]);
2998 free_extent_buffer(path->nodes[0]);
2999 path->nodes[0] = right;
3001 if (path->slots[1] == 0) {
3002 wret = fixup_low_keys(trans, root,
3003 path, &disk_key, 1);
3008 btrfs_mark_buffer_dirty(right);
3012 ret = copy_for_split(trans, root, path, l, right, slot, mid, nritems);
3016 BUG_ON(num_doubles != 0);
3024 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
3025 struct btrfs_root *root,
3026 struct btrfs_path *path, int ins_len)
3028 struct btrfs_key key;
3029 struct extent_buffer *leaf;
3030 struct btrfs_file_extent_item *fi;
3035 leaf = path->nodes[0];
3036 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3038 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
3039 key.type != BTRFS_EXTENT_CSUM_KEY);
3041 if (btrfs_leaf_free_space(root, leaf) >= ins_len)
3044 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3045 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3046 fi = btrfs_item_ptr(leaf, path->slots[0],
3047 struct btrfs_file_extent_item);
3048 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
3050 btrfs_release_path(root, path);
3052 path->keep_locks = 1;
3053 path->search_for_split = 1;
3054 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
3055 path->search_for_split = 0;
3060 leaf = path->nodes[0];
3061 /* if our item isn't there or got smaller, return now */
3062 if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0]))
3065 /* the leaf has changed, it now has room. return now */
3066 if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
3069 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3070 fi = btrfs_item_ptr(leaf, path->slots[0],
3071 struct btrfs_file_extent_item);
3072 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
3076 btrfs_set_path_blocking(path);
3077 ret = split_leaf(trans, root, &key, path, ins_len, 1);
3081 path->keep_locks = 0;
3082 btrfs_unlock_up_safe(path, 1);
3085 path->keep_locks = 0;
3089 static noinline int split_item(struct btrfs_trans_handle *trans,
3090 struct btrfs_root *root,
3091 struct btrfs_path *path,
3092 struct btrfs_key *new_key,
3093 unsigned long split_offset)
3095 struct extent_buffer *leaf;
3096 struct btrfs_item *item;
3097 struct btrfs_item *new_item;
3103 struct btrfs_disk_key disk_key;
3105 leaf = path->nodes[0];
3106 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
3108 btrfs_set_path_blocking(path);
3110 item = btrfs_item_nr(leaf, path->slots[0]);
3111 orig_offset = btrfs_item_offset(leaf, item);
3112 item_size = btrfs_item_size(leaf, item);
3114 buf = kmalloc(item_size, GFP_NOFS);
3118 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
3119 path->slots[0]), item_size);
3121 slot = path->slots[0] + 1;
3122 nritems = btrfs_header_nritems(leaf);
3123 if (slot != nritems) {
3124 /* shift the items */
3125 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
3126 btrfs_item_nr_offset(slot),
3127 (nritems - slot) * sizeof(struct btrfs_item));
3130 btrfs_cpu_key_to_disk(&disk_key, new_key);
3131 btrfs_set_item_key(leaf, &disk_key, slot);
3133 new_item = btrfs_item_nr(leaf, slot);
3135 btrfs_set_item_offset(leaf, new_item, orig_offset);
3136 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
3138 btrfs_set_item_offset(leaf, item,
3139 orig_offset + item_size - split_offset);
3140 btrfs_set_item_size(leaf, item, split_offset);
3142 btrfs_set_header_nritems(leaf, nritems + 1);
3144 /* write the data for the start of the original item */
3145 write_extent_buffer(leaf, buf,
3146 btrfs_item_ptr_offset(leaf, path->slots[0]),
3149 /* write the data for the new item */
3150 write_extent_buffer(leaf, buf + split_offset,
3151 btrfs_item_ptr_offset(leaf, slot),
3152 item_size - split_offset);
3153 btrfs_mark_buffer_dirty(leaf);
3155 BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
3161 * This function splits a single item into two items,
3162 * giving 'new_key' to the new item and splitting the
3163 * old one at split_offset (from the start of the item).
3165 * The path may be released by this operation. After
3166 * the split, the path is pointing to the old item. The
3167 * new item is going to be in the same node as the old one.
3169 * Note, the item being split must be smaller enough to live alone on
3170 * a tree block with room for one extra struct btrfs_item
3172 * This allows us to split the item in place, keeping a lock on the
3173 * leaf the entire time.
3175 int btrfs_split_item(struct btrfs_trans_handle *trans,
3176 struct btrfs_root *root,
3177 struct btrfs_path *path,
3178 struct btrfs_key *new_key,
3179 unsigned long split_offset)
3182 ret = setup_leaf_for_split(trans, root, path,
3183 sizeof(struct btrfs_item));
3187 ret = split_item(trans, root, path, new_key, split_offset);
3192 * This function duplicate a item, giving 'new_key' to the new item.
3193 * It guarantees both items live in the same tree leaf and the new item
3194 * is contiguous with the original item.
3196 * This allows us to split file extent in place, keeping a lock on the
3197 * leaf the entire time.
3199 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
3200 struct btrfs_root *root,
3201 struct btrfs_path *path,
3202 struct btrfs_key *new_key)
3204 struct extent_buffer *leaf;
3208 leaf = path->nodes[0];
3209 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3210 ret = setup_leaf_for_split(trans, root, path,
3211 item_size + sizeof(struct btrfs_item));
3216 ret = setup_items_for_insert(trans, root, path, new_key, &item_size,
3217 item_size, item_size +
3218 sizeof(struct btrfs_item), 1);
3221 leaf = path->nodes[0];
3222 memcpy_extent_buffer(leaf,
3223 btrfs_item_ptr_offset(leaf, path->slots[0]),
3224 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
3230 * make the item pointed to by the path smaller. new_size indicates
3231 * how small to make it, and from_end tells us if we just chop bytes
3232 * off the end of the item or if we shift the item to chop bytes off
3235 int btrfs_truncate_item(struct btrfs_trans_handle *trans,
3236 struct btrfs_root *root,
3237 struct btrfs_path *path,
3238 u32 new_size, int from_end)
3243 struct extent_buffer *leaf;
3244 struct btrfs_item *item;
3246 unsigned int data_end;
3247 unsigned int old_data_start;
3248 unsigned int old_size;
3249 unsigned int size_diff;
3252 slot_orig = path->slots[0];
3253 leaf = path->nodes[0];
3254 slot = path->slots[0];
3256 old_size = btrfs_item_size_nr(leaf, slot);
3257 if (old_size == new_size)
3260 nritems = btrfs_header_nritems(leaf);
3261 data_end = leaf_data_end(root, leaf);
3263 old_data_start = btrfs_item_offset_nr(leaf, slot);
3265 size_diff = old_size - new_size;
3268 BUG_ON(slot >= nritems);
3271 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3273 /* first correct the data pointers */
3274 for (i = slot; i < nritems; i++) {
3276 item = btrfs_item_nr(leaf, i);
3278 if (!leaf->map_token) {
3279 map_extent_buffer(leaf, (unsigned long)item,
3280 sizeof(struct btrfs_item),
3281 &leaf->map_token, &leaf->kaddr,
3282 &leaf->map_start, &leaf->map_len,
3286 ioff = btrfs_item_offset(leaf, item);
3287 btrfs_set_item_offset(leaf, item, ioff + size_diff);
3290 if (leaf->map_token) {
3291 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3292 leaf->map_token = NULL;
3295 /* shift the data */
3297 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3298 data_end + size_diff, btrfs_leaf_data(leaf) +
3299 data_end, old_data_start + new_size - data_end);
3301 struct btrfs_disk_key disk_key;
3304 btrfs_item_key(leaf, &disk_key, slot);
3306 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
3308 struct btrfs_file_extent_item *fi;
3310 fi = btrfs_item_ptr(leaf, slot,
3311 struct btrfs_file_extent_item);
3312 fi = (struct btrfs_file_extent_item *)(
3313 (unsigned long)fi - size_diff);
3315 if (btrfs_file_extent_type(leaf, fi) ==
3316 BTRFS_FILE_EXTENT_INLINE) {
3317 ptr = btrfs_item_ptr_offset(leaf, slot);
3318 memmove_extent_buffer(leaf, ptr,
3320 offsetof(struct btrfs_file_extent_item,
3325 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3326 data_end + size_diff, btrfs_leaf_data(leaf) +
3327 data_end, old_data_start - data_end);
3329 offset = btrfs_disk_key_offset(&disk_key);
3330 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
3331 btrfs_set_item_key(leaf, &disk_key, slot);
3333 fixup_low_keys(trans, root, path, &disk_key, 1);
3336 item = btrfs_item_nr(leaf, slot);
3337 btrfs_set_item_size(leaf, item, new_size);
3338 btrfs_mark_buffer_dirty(leaf);
3341 if (btrfs_leaf_free_space(root, leaf) < 0) {
3342 btrfs_print_leaf(root, leaf);
3349 * make the item pointed to by the path bigger, data_size is the new size.
3351 int btrfs_extend_item(struct btrfs_trans_handle *trans,
3352 struct btrfs_root *root, struct btrfs_path *path,
3358 struct extent_buffer *leaf;
3359 struct btrfs_item *item;
3361 unsigned int data_end;
3362 unsigned int old_data;
3363 unsigned int old_size;
3366 slot_orig = path->slots[0];
3367 leaf = path->nodes[0];
3369 nritems = btrfs_header_nritems(leaf);
3370 data_end = leaf_data_end(root, leaf);
3372 if (btrfs_leaf_free_space(root, leaf) < data_size) {
3373 btrfs_print_leaf(root, leaf);
3376 slot = path->slots[0];
3377 old_data = btrfs_item_end_nr(leaf, slot);
3380 if (slot >= nritems) {
3381 btrfs_print_leaf(root, leaf);
3382 printk(KERN_CRIT "slot %d too large, nritems %d\n",
3388 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3390 /* first correct the data pointers */
3391 for (i = slot; i < nritems; i++) {
3393 item = btrfs_item_nr(leaf, i);
3395 if (!leaf->map_token) {
3396 map_extent_buffer(leaf, (unsigned long)item,
3397 sizeof(struct btrfs_item),
3398 &leaf->map_token, &leaf->kaddr,
3399 &leaf->map_start, &leaf->map_len,
3402 ioff = btrfs_item_offset(leaf, item);
3403 btrfs_set_item_offset(leaf, item, ioff - data_size);
3406 if (leaf->map_token) {
3407 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3408 leaf->map_token = NULL;
3411 /* shift the data */
3412 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3413 data_end - data_size, btrfs_leaf_data(leaf) +
3414 data_end, old_data - data_end);
3416 data_end = old_data;
3417 old_size = btrfs_item_size_nr(leaf, slot);
3418 item = btrfs_item_nr(leaf, slot);
3419 btrfs_set_item_size(leaf, item, old_size + data_size);
3420 btrfs_mark_buffer_dirty(leaf);
3423 if (btrfs_leaf_free_space(root, leaf) < 0) {
3424 btrfs_print_leaf(root, leaf);
3431 * Given a key and some data, insert items into the tree.
3432 * This does all the path init required, making room in the tree if needed.
3433 * Returns the number of keys that were inserted.
3435 int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
3436 struct btrfs_root *root,
3437 struct btrfs_path *path,
3438 struct btrfs_key *cpu_key, u32 *data_size,
3441 struct extent_buffer *leaf;
3442 struct btrfs_item *item;
3449 unsigned int data_end;
3450 struct btrfs_disk_key disk_key;
3451 struct btrfs_key found_key;
3453 for (i = 0; i < nr; i++) {
3454 if (total_size + data_size[i] + sizeof(struct btrfs_item) >
3455 BTRFS_LEAF_DATA_SIZE(root)) {
3459 total_data += data_size[i];
3460 total_size += data_size[i] + sizeof(struct btrfs_item);
3464 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
3470 leaf = path->nodes[0];
3472 nritems = btrfs_header_nritems(leaf);
3473 data_end = leaf_data_end(root, leaf);
3475 if (btrfs_leaf_free_space(root, leaf) < total_size) {
3476 for (i = nr; i >= 0; i--) {
3477 total_data -= data_size[i];
3478 total_size -= data_size[i] + sizeof(struct btrfs_item);
3479 if (total_size < btrfs_leaf_free_space(root, leaf))
3485 slot = path->slots[0];
3488 if (slot != nritems) {
3489 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
3491 item = btrfs_item_nr(leaf, slot);
3492 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3494 /* figure out how many keys we can insert in here */
3495 total_data = data_size[0];
3496 for (i = 1; i < nr; i++) {
3497 if (btrfs_comp_cpu_keys(&found_key, cpu_key + i) <= 0)
3499 total_data += data_size[i];
3503 if (old_data < data_end) {
3504 btrfs_print_leaf(root, leaf);
3505 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
3506 slot, old_data, data_end);
3510 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3512 /* first correct the data pointers */
3513 WARN_ON(leaf->map_token);
3514 for (i = slot; i < nritems; i++) {
3517 item = btrfs_item_nr(leaf, i);
3518 if (!leaf->map_token) {
3519 map_extent_buffer(leaf, (unsigned long)item,
3520 sizeof(struct btrfs_item),
3521 &leaf->map_token, &leaf->kaddr,
3522 &leaf->map_start, &leaf->map_len,
3526 ioff = btrfs_item_offset(leaf, item);
3527 btrfs_set_item_offset(leaf, item, ioff - total_data);
3529 if (leaf->map_token) {
3530 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3531 leaf->map_token = NULL;
3534 /* shift the items */
3535 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
3536 btrfs_item_nr_offset(slot),
3537 (nritems - slot) * sizeof(struct btrfs_item));
3539 /* shift the data */
3540 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3541 data_end - total_data, btrfs_leaf_data(leaf) +
3542 data_end, old_data - data_end);
3543 data_end = old_data;
3546 * this sucks but it has to be done, if we are inserting at
3547 * the end of the leaf only insert 1 of the items, since we
3548 * have no way of knowing whats on the next leaf and we'd have
3549 * to drop our current locks to figure it out
3554 /* setup the item for the new data */
3555 for (i = 0; i < nr; i++) {
3556 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
3557 btrfs_set_item_key(leaf, &disk_key, slot + i);
3558 item = btrfs_item_nr(leaf, slot + i);
3559 btrfs_set_item_offset(leaf, item, data_end - data_size[i]);
3560 data_end -= data_size[i];
3561 btrfs_set_item_size(leaf, item, data_size[i]);
3563 btrfs_set_header_nritems(leaf, nritems + nr);
3564 btrfs_mark_buffer_dirty(leaf);
3568 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
3569 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
3572 if (btrfs_leaf_free_space(root, leaf) < 0) {
3573 btrfs_print_leaf(root, leaf);
3583 * this is a helper for btrfs_insert_empty_items, the main goal here is
3584 * to save stack depth by doing the bulk of the work in a function
3585 * that doesn't call btrfs_search_slot
3587 static noinline_for_stack int
3588 setup_items_for_insert(struct btrfs_trans_handle *trans,
3589 struct btrfs_root *root, struct btrfs_path *path,
3590 struct btrfs_key *cpu_key, u32 *data_size,
3591 u32 total_data, u32 total_size, int nr)
3593 struct btrfs_item *item;
3596 unsigned int data_end;
3597 struct btrfs_disk_key disk_key;
3599 struct extent_buffer *leaf;
3602 leaf = path->nodes[0];
3603 slot = path->slots[0];
3605 nritems = btrfs_header_nritems(leaf);
3606 data_end = leaf_data_end(root, leaf);
3608 if (btrfs_leaf_free_space(root, leaf) < total_size) {
3609 btrfs_print_leaf(root, leaf);
3610 printk(KERN_CRIT "not enough freespace need %u have %d\n",
3611 total_size, btrfs_leaf_free_space(root, leaf));
3615 if (slot != nritems) {
3616 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
3618 if (old_data < data_end) {
3619 btrfs_print_leaf(root, leaf);
3620 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
3621 slot, old_data, data_end);
3625 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3627 /* first correct the data pointers */
3628 WARN_ON(leaf->map_token);
3629 for (i = slot; i < nritems; i++) {
3632 item = btrfs_item_nr(leaf, i);
3633 if (!leaf->map_token) {
3634 map_extent_buffer(leaf, (unsigned long)item,
3635 sizeof(struct btrfs_item),
3636 &leaf->map_token, &leaf->kaddr,
3637 &leaf->map_start, &leaf->map_len,
3641 ioff = btrfs_item_offset(leaf, item);
3642 btrfs_set_item_offset(leaf, item, ioff - total_data);
3644 if (leaf->map_token) {
3645 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3646 leaf->map_token = NULL;
3649 /* shift the items */
3650 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
3651 btrfs_item_nr_offset(slot),
3652 (nritems - slot) * sizeof(struct btrfs_item));
3654 /* shift the data */
3655 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3656 data_end - total_data, btrfs_leaf_data(leaf) +
3657 data_end, old_data - data_end);
3658 data_end = old_data;
3661 /* setup the item for the new data */
3662 for (i = 0; i < nr; i++) {
3663 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
3664 btrfs_set_item_key(leaf, &disk_key, slot + i);
3665 item = btrfs_item_nr(leaf, slot + i);
3666 btrfs_set_item_offset(leaf, item, data_end - data_size[i]);
3667 data_end -= data_size[i];
3668 btrfs_set_item_size(leaf, item, data_size[i]);
3671 btrfs_set_header_nritems(leaf, nritems + nr);
3675 struct btrfs_disk_key disk_key;
3676 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
3677 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
3679 btrfs_unlock_up_safe(path, 1);
3680 btrfs_mark_buffer_dirty(leaf);
3682 if (btrfs_leaf_free_space(root, leaf) < 0) {
3683 btrfs_print_leaf(root, leaf);
3690 * Given a key and some data, insert items into the tree.
3691 * This does all the path init required, making room in the tree if needed.
3693 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
3694 struct btrfs_root *root,
3695 struct btrfs_path *path,
3696 struct btrfs_key *cpu_key, u32 *data_size,
3699 struct extent_buffer *leaf;
3706 for (i = 0; i < nr; i++)
3707 total_data += data_size[i];
3709 total_size = total_data + (nr * sizeof(struct btrfs_item));
3710 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
3716 leaf = path->nodes[0];
3717 slot = path->slots[0];
3720 ret = setup_items_for_insert(trans, root, path, cpu_key, data_size,
3721 total_data, total_size, nr);
3728 * Given a key and some data, insert an item into the tree.
3729 * This does all the path init required, making room in the tree if needed.
3731 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
3732 *root, struct btrfs_key *cpu_key, void *data, u32
3736 struct btrfs_path *path;
3737 struct extent_buffer *leaf;
3740 path = btrfs_alloc_path();
3742 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
3744 leaf = path->nodes[0];
3745 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3746 write_extent_buffer(leaf, data, ptr, data_size);
3747 btrfs_mark_buffer_dirty(leaf);
3749 btrfs_free_path(path);
3754 * delete the pointer from a given node.
3756 * the tree should have been previously balanced so the deletion does not
3759 static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3760 struct btrfs_path *path, int level, int slot)
3762 struct extent_buffer *parent = path->nodes[level];
3767 nritems = btrfs_header_nritems(parent);
3768 if (slot != nritems - 1) {
3769 memmove_extent_buffer(parent,
3770 btrfs_node_key_ptr_offset(slot),
3771 btrfs_node_key_ptr_offset(slot + 1),
3772 sizeof(struct btrfs_key_ptr) *
3773 (nritems - slot - 1));
3776 btrfs_set_header_nritems(parent, nritems);
3777 if (nritems == 0 && parent == root->node) {
3778 BUG_ON(btrfs_header_level(root->node) != 1);
3779 /* just turn the root into a leaf and break */
3780 btrfs_set_header_level(root->node, 0);
3781 } else if (slot == 0) {
3782 struct btrfs_disk_key disk_key;
3784 btrfs_node_key(parent, &disk_key, 0);
3785 wret = fixup_low_keys(trans, root, path, &disk_key, level + 1);
3789 btrfs_mark_buffer_dirty(parent);
3794 * a helper function to delete the leaf pointed to by path->slots[1] and
3797 * This deletes the pointer in path->nodes[1] and frees the leaf
3798 * block extent. zero is returned if it all worked out, < 0 otherwise.
3800 * The path must have already been setup for deleting the leaf, including
3801 * all the proper balancing. path->nodes[1] must be locked.
3803 static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
3804 struct btrfs_root *root,
3805 struct btrfs_path *path,
3806 struct extent_buffer *leaf)
3810 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
3811 ret = del_ptr(trans, root, path, 1, path->slots[1]);
3816 * btrfs_free_extent is expensive, we want to make sure we
3817 * aren't holding any locks when we call it
3819 btrfs_unlock_up_safe(path, 0);
3821 root_sub_used(root, leaf->len);
3823 btrfs_free_tree_block(trans, root, leaf, 0, 1);
3827 * delete the item at the leaf level in path. If that empties
3828 * the leaf, remove it from the tree
3830 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3831 struct btrfs_path *path, int slot, int nr)
3833 struct extent_buffer *leaf;
3834 struct btrfs_item *item;
3842 leaf = path->nodes[0];
3843 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
3845 for (i = 0; i < nr; i++)
3846 dsize += btrfs_item_size_nr(leaf, slot + i);
3848 nritems = btrfs_header_nritems(leaf);
3850 if (slot + nr != nritems) {
3851 int data_end = leaf_data_end(root, leaf);
3853 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3855 btrfs_leaf_data(leaf) + data_end,
3856 last_off - data_end);
3858 for (i = slot + nr; i < nritems; i++) {
3861 item = btrfs_item_nr(leaf, i);
3862 if (!leaf->map_token) {
3863 map_extent_buffer(leaf, (unsigned long)item,
3864 sizeof(struct btrfs_item),
3865 &leaf->map_token, &leaf->kaddr,
3866 &leaf->map_start, &leaf->map_len,
3869 ioff = btrfs_item_offset(leaf, item);
3870 btrfs_set_item_offset(leaf, item, ioff + dsize);
3873 if (leaf->map_token) {
3874 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
3875 leaf->map_token = NULL;
3878 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
3879 btrfs_item_nr_offset(slot + nr),
3880 sizeof(struct btrfs_item) *
3881 (nritems - slot - nr));
3883 btrfs_set_header_nritems(leaf, nritems - nr);
3886 /* delete the leaf if we've emptied it */
3888 if (leaf == root->node) {
3889 btrfs_set_header_level(leaf, 0);
3891 btrfs_set_path_blocking(path);
3892 clean_tree_block(trans, root, leaf);
3893 ret = btrfs_del_leaf(trans, root, path, leaf);
3897 int used = leaf_space_used(leaf, 0, nritems);
3899 struct btrfs_disk_key disk_key;
3901 btrfs_item_key(leaf, &disk_key, 0);
3902 wret = fixup_low_keys(trans, root, path,
3908 /* delete the leaf if it is mostly empty */
3909 if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
3910 /* push_leaf_left fixes the path.
3911 * make sure the path still points to our leaf
3912 * for possible call to del_ptr below
3914 slot = path->slots[1];
3915 extent_buffer_get(leaf);
3917 btrfs_set_path_blocking(path);
3918 wret = push_leaf_left(trans, root, path, 1, 1);
3919 if (wret < 0 && wret != -ENOSPC)
3922 if (path->nodes[0] == leaf &&
3923 btrfs_header_nritems(leaf)) {
3924 wret = push_leaf_right(trans, root, path, 1, 1);
3925 if (wret < 0 && wret != -ENOSPC)
3929 if (btrfs_header_nritems(leaf) == 0) {
3930 path->slots[1] = slot;
3931 ret = btrfs_del_leaf(trans, root, path, leaf);
3933 free_extent_buffer(leaf);
3935 /* if we're still in the path, make sure
3936 * we're dirty. Otherwise, one of the
3937 * push_leaf functions must have already
3938 * dirtied this buffer
3940 if (path->nodes[0] == leaf)
3941 btrfs_mark_buffer_dirty(leaf);
3942 free_extent_buffer(leaf);
3945 btrfs_mark_buffer_dirty(leaf);
3952 * search the tree again to find a leaf with lesser keys
3953 * returns 0 if it found something or 1 if there are no lesser leaves.
3954 * returns < 0 on io errors.
3956 * This may release the path, and so you may lose any locks held at the
3959 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
3961 struct btrfs_key key;
3962 struct btrfs_disk_key found_key;
3965 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
3969 else if (key.type > 0)
3971 else if (key.objectid > 0)
3976 btrfs_release_path(root, path);
3977 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3980 btrfs_item_key(path->nodes[0], &found_key, 0);
3981 ret = comp_keys(&found_key, &key);
3988 * A helper function to walk down the tree starting at min_key, and looking
3989 * for nodes or leaves that are either in cache or have a minimum
3990 * transaction id. This is used by the btree defrag code, and tree logging
3992 * This does not cow, but it does stuff the starting key it finds back
3993 * into min_key, so you can call btrfs_search_slot with cow=1 on the
3994 * key and get a writable path.
3996 * This does lock as it descends, and path->keep_locks should be set
3997 * to 1 by the caller.
3999 * This honors path->lowest_level to prevent descent past a given level
4002 * min_trans indicates the oldest transaction that you are interested
4003 * in walking through. Any nodes or leaves older than min_trans are
4004 * skipped over (without reading them).
4006 * returns zero if something useful was found, < 0 on error and 1 if there
4007 * was nothing in the tree that matched the search criteria.
4009 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
4010 struct btrfs_key *max_key,
4011 struct btrfs_path *path, int cache_only,
4014 struct extent_buffer *cur;
4015 struct btrfs_key found_key;
4022 WARN_ON(!path->keep_locks);
4024 cur = btrfs_lock_root_node(root);
4025 level = btrfs_header_level(cur);
4026 WARN_ON(path->nodes[level]);
4027 path->nodes[level] = cur;
4028 path->locks[level] = 1;
4030 if (btrfs_header_generation(cur) < min_trans) {
4035 nritems = btrfs_header_nritems(cur);
4036 level = btrfs_header_level(cur);
4037 sret = bin_search(cur, min_key, level, &slot);
4039 /* at the lowest level, we're done, setup the path and exit */
4040 if (level == path->lowest_level) {
4041 if (slot >= nritems)
4044 path->slots[level] = slot;
4045 btrfs_item_key_to_cpu(cur, &found_key, slot);
4048 if (sret && slot > 0)
4051 * check this node pointer against the cache_only and
4052 * min_trans parameters. If it isn't in cache or is too
4053 * old, skip to the next one.
4055 while (slot < nritems) {
4058 struct extent_buffer *tmp;
4059 struct btrfs_disk_key disk_key;
4061 blockptr = btrfs_node_blockptr(cur, slot);
4062 gen = btrfs_node_ptr_generation(cur, slot);
4063 if (gen < min_trans) {
4071 btrfs_node_key(cur, &disk_key, slot);
4072 if (comp_keys(&disk_key, max_key) >= 0) {
4078 tmp = btrfs_find_tree_block(root, blockptr,
4079 btrfs_level_size(root, level - 1));
4081 if (tmp && btrfs_buffer_uptodate(tmp, gen)) {
4082 free_extent_buffer(tmp);
4086 free_extent_buffer(tmp);
4091 * we didn't find a candidate key in this node, walk forward
4092 * and find another one
4094 if (slot >= nritems) {
4095 path->slots[level] = slot;
4096 btrfs_set_path_blocking(path);
4097 sret = btrfs_find_next_key(root, path, min_key, level,
4098 cache_only, min_trans);
4100 btrfs_release_path(root, path);
4106 /* save our key for returning back */
4107 btrfs_node_key_to_cpu(cur, &found_key, slot);
4108 path->slots[level] = slot;
4109 if (level == path->lowest_level) {
4111 unlock_up(path, level, 1);
4114 btrfs_set_path_blocking(path);
4115 cur = read_node_slot(root, cur, slot);
4117 btrfs_tree_lock(cur);
4119 path->locks[level - 1] = 1;
4120 path->nodes[level - 1] = cur;
4121 unlock_up(path, level, 1);
4122 btrfs_clear_path_blocking(path, NULL);
4126 memcpy(min_key, &found_key, sizeof(found_key));
4127 btrfs_set_path_blocking(path);
4132 * this is similar to btrfs_next_leaf, but does not try to preserve
4133 * and fixup the path. It looks for and returns the next key in the
4134 * tree based on the current path and the cache_only and min_trans
4137 * 0 is returned if another key is found, < 0 if there are any errors
4138 * and 1 is returned if there are no higher keys in the tree
4140 * path->keep_locks should be set to 1 on the search made before
4141 * calling this function.
4143 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
4144 struct btrfs_key *key, int level,
4145 int cache_only, u64 min_trans)
4148 struct extent_buffer *c;
4150 WARN_ON(!path->keep_locks);
4151 while (level < BTRFS_MAX_LEVEL) {
4152 if (!path->nodes[level])
4155 slot = path->slots[level] + 1;
4156 c = path->nodes[level];
4158 if (slot >= btrfs_header_nritems(c)) {
4161 struct btrfs_key cur_key;
4162 if (level + 1 >= BTRFS_MAX_LEVEL ||
4163 !path->nodes[level + 1])
4166 if (path->locks[level + 1]) {
4171 slot = btrfs_header_nritems(c) - 1;
4173 btrfs_item_key_to_cpu(c, &cur_key, slot);
4175 btrfs_node_key_to_cpu(c, &cur_key, slot);
4177 orig_lowest = path->lowest_level;
4178 btrfs_release_path(root, path);
4179 path->lowest_level = level;
4180 ret = btrfs_search_slot(NULL, root, &cur_key, path,
4182 path->lowest_level = orig_lowest;
4186 c = path->nodes[level];
4187 slot = path->slots[level];
4194 btrfs_item_key_to_cpu(c, key, slot);
4196 u64 blockptr = btrfs_node_blockptr(c, slot);
4197 u64 gen = btrfs_node_ptr_generation(c, slot);
4200 struct extent_buffer *cur;
4201 cur = btrfs_find_tree_block(root, blockptr,
4202 btrfs_level_size(root, level - 1));
4203 if (!cur || !btrfs_buffer_uptodate(cur, gen)) {
4206 free_extent_buffer(cur);
4209 free_extent_buffer(cur);
4211 if (gen < min_trans) {
4215 btrfs_node_key_to_cpu(c, key, slot);
4223 * search the tree again to find a leaf with greater keys
4224 * returns 0 if it found something or 1 if there are no greater leaves.
4225 * returns < 0 on io errors.
4227 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
4231 struct extent_buffer *c;
4232 struct extent_buffer *next;
4233 struct btrfs_key key;
4236 int old_spinning = path->leave_spinning;
4237 int force_blocking = 0;
4239 nritems = btrfs_header_nritems(path->nodes[0]);
4244 * we take the blocks in an order that upsets lockdep. Using
4245 * blocking mode is the only way around it.
4247 #ifdef CONFIG_DEBUG_LOCK_ALLOC
4251 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
4255 btrfs_release_path(root, path);
4257 path->keep_locks = 1;
4259 if (!force_blocking)
4260 path->leave_spinning = 1;
4262 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4263 path->keep_locks = 0;
4268 nritems = btrfs_header_nritems(path->nodes[0]);
4270 * by releasing the path above we dropped all our locks. A balance
4271 * could have added more items next to the key that used to be
4272 * at the very end of the block. So, check again here and
4273 * advance the path if there are now more items available.
4275 if (nritems > 0 && path->slots[0] < nritems - 1) {
4282 while (level < BTRFS_MAX_LEVEL) {
4283 if (!path->nodes[level]) {
4288 slot = path->slots[level] + 1;
4289 c = path->nodes[level];
4290 if (slot >= btrfs_header_nritems(c)) {
4292 if (level == BTRFS_MAX_LEVEL) {
4300 btrfs_tree_unlock(next);
4301 free_extent_buffer(next);
4305 ret = read_block_for_search(NULL, root, path, &next, level,
4311 btrfs_release_path(root, path);
4315 if (!path->skip_locking) {
4316 ret = btrfs_try_spin_lock(next);
4318 btrfs_set_path_blocking(path);
4319 btrfs_tree_lock(next);
4320 if (!force_blocking)
4321 btrfs_clear_path_blocking(path, next);
4324 btrfs_set_lock_blocking(next);
4328 path->slots[level] = slot;
4331 c = path->nodes[level];
4332 if (path->locks[level])
4333 btrfs_tree_unlock(c);
4335 free_extent_buffer(c);
4336 path->nodes[level] = next;
4337 path->slots[level] = 0;
4338 if (!path->skip_locking)
4339 path->locks[level] = 1;
4344 ret = read_block_for_search(NULL, root, path, &next, level,
4350 btrfs_release_path(root, path);
4354 if (!path->skip_locking) {
4355 btrfs_assert_tree_locked(path->nodes[level]);
4356 ret = btrfs_try_spin_lock(next);
4358 btrfs_set_path_blocking(path);
4359 btrfs_tree_lock(next);
4360 if (!force_blocking)
4361 btrfs_clear_path_blocking(path, next);
4364 btrfs_set_lock_blocking(next);
4369 unlock_up(path, 0, 1);
4370 path->leave_spinning = old_spinning;
4372 btrfs_set_path_blocking(path);
4378 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
4379 * searching until it gets past min_objectid or finds an item of 'type'
4381 * returns 0 if something is found, 1 if nothing was found and < 0 on error
4383 int btrfs_previous_item(struct btrfs_root *root,
4384 struct btrfs_path *path, u64 min_objectid,
4387 struct btrfs_key found_key;
4388 struct extent_buffer *leaf;
4393 if (path->slots[0] == 0) {
4394 btrfs_set_path_blocking(path);
4395 ret = btrfs_prev_leaf(root, path);
4401 leaf = path->nodes[0];
4402 nritems = btrfs_header_nritems(leaf);
4405 if (path->slots[0] == nritems)
4408 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4409 if (found_key.objectid < min_objectid)
4411 if (found_key.type == type)
4413 if (found_key.objectid == min_objectid &&
4414 found_key.type < type)