2 * Copyright (C) 2009 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/sort.h>
23 #include "delayed-ref.h"
24 #include "transaction.h"
27 * delayed back reference update tracking. For subvolume trees
28 * we queue up extent allocations and backref maintenance for
29 * delayed processing. This avoids deep call chains where we
30 * add extents in the middle of btrfs_search_slot, and it allows
31 * us to buffer up frequently modified backrefs in an rb tree instead
32 * of hammering updates on the extent allocation tree.
36 * compare two delayed tree backrefs with same bytenr and type
38 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
39 struct btrfs_delayed_tree_ref *ref1)
41 if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
42 if (ref1->root < ref2->root)
44 if (ref1->root > ref2->root)
47 if (ref1->parent < ref2->parent)
49 if (ref1->parent > ref2->parent)
56 * compare two delayed data backrefs with same bytenr and type
58 static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
59 struct btrfs_delayed_data_ref *ref1)
61 if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
62 if (ref1->root < ref2->root)
64 if (ref1->root > ref2->root)
66 if (ref1->objectid < ref2->objectid)
68 if (ref1->objectid > ref2->objectid)
70 if (ref1->offset < ref2->offset)
72 if (ref1->offset > ref2->offset)
75 if (ref1->parent < ref2->parent)
77 if (ref1->parent > ref2->parent)
84 * entries in the rb tree are ordered by the byte number of the extent,
85 * type of the delayed backrefs and content of delayed backrefs.
87 static int comp_entry(struct btrfs_delayed_ref_node *ref2,
88 struct btrfs_delayed_ref_node *ref1)
90 if (ref1->bytenr < ref2->bytenr)
92 if (ref1->bytenr > ref2->bytenr)
94 if (ref1->is_head && ref2->is_head)
100 if (ref1->type < ref2->type)
102 if (ref1->type > ref2->type)
104 /* merging of sequenced refs is not allowed */
105 if (ref1->seq < ref2->seq)
107 if (ref1->seq > ref2->seq)
109 if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
110 ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) {
111 return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2),
112 btrfs_delayed_node_to_tree_ref(ref1));
113 } else if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY ||
114 ref1->type == BTRFS_SHARED_DATA_REF_KEY) {
115 return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2),
116 btrfs_delayed_node_to_data_ref(ref1));
123 * insert a new ref into the rbtree. This returns any existing refs
124 * for the same (bytenr,parent) tuple, or NULL if the new node was properly
127 static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root,
128 struct rb_node *node)
130 struct rb_node **p = &root->rb_node;
131 struct rb_node *parent_node = NULL;
132 struct btrfs_delayed_ref_node *entry;
133 struct btrfs_delayed_ref_node *ins;
136 ins = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
139 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
142 cmp = comp_entry(entry, ins);
151 rb_link_node(node, parent_node, p);
152 rb_insert_color(node, root);
157 * find an head entry based on bytenr. This returns the delayed ref
158 * head if it was able to find one, or NULL if nothing was in that spot.
159 * If return_bigger is given, the next bigger entry is returned if no exact
162 static struct btrfs_delayed_ref_node *find_ref_head(struct rb_root *root,
164 struct btrfs_delayed_ref_node **last,
168 struct btrfs_delayed_ref_node *entry;
175 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
176 WARN_ON(!entry->in_tree);
180 if (bytenr < entry->bytenr)
182 else if (bytenr > entry->bytenr)
184 else if (!btrfs_delayed_ref_is_head(entry))
196 if (entry && return_bigger) {
198 n = rb_next(&entry->rb_node);
201 entry = rb_entry(n, struct btrfs_delayed_ref_node,
203 bytenr = entry->bytenr;
212 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
213 struct btrfs_delayed_ref_head *head)
215 struct btrfs_delayed_ref_root *delayed_refs;
217 delayed_refs = &trans->transaction->delayed_refs;
218 assert_spin_locked(&delayed_refs->lock);
219 if (mutex_trylock(&head->mutex))
222 atomic_inc(&head->node.refs);
223 spin_unlock(&delayed_refs->lock);
225 mutex_lock(&head->mutex);
226 spin_lock(&delayed_refs->lock);
227 if (!head->node.in_tree) {
228 mutex_unlock(&head->mutex);
229 btrfs_put_delayed_ref(&head->node);
232 btrfs_put_delayed_ref(&head->node);
236 int btrfs_check_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
239 struct seq_list *elem;
241 assert_spin_locked(&delayed_refs->lock);
242 if (list_empty(&delayed_refs->seq_head))
245 elem = list_first_entry(&delayed_refs->seq_head, struct seq_list, list);
246 if (seq >= elem->seq) {
247 pr_debug("holding back delayed_ref %llu, lowest is %llu (%p)\n",
248 seq, elem->seq, delayed_refs);
254 int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
255 struct list_head *cluster, u64 start)
258 struct btrfs_delayed_ref_root *delayed_refs;
259 struct rb_node *node;
260 struct btrfs_delayed_ref_node *ref;
261 struct btrfs_delayed_ref_head *head;
263 delayed_refs = &trans->transaction->delayed_refs;
265 node = rb_first(&delayed_refs->root);
268 find_ref_head(&delayed_refs->root, start + 1, &ref, 1);
270 node = &ref->rb_node;
272 node = rb_first(&delayed_refs->root);
275 while (node && count < 32) {
276 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
277 if (btrfs_delayed_ref_is_head(ref)) {
278 head = btrfs_delayed_node_to_head(ref);
279 if (list_empty(&head->cluster)) {
280 list_add_tail(&head->cluster, cluster);
281 delayed_refs->run_delayed_start =
285 WARN_ON(delayed_refs->num_heads_ready == 0);
286 delayed_refs->num_heads_ready--;
288 /* the goal of the clustering is to find extents
289 * that are likely to end up in the same extent
290 * leaf on disk. So, we don't want them spread
291 * all over the tree. Stop now if we've hit
292 * a head that was already in use
297 node = rb_next(node);
303 * we've gone to the end of the rbtree without finding any
304 * clusters. start from the beginning and try again
307 node = rb_first(&delayed_refs->root);
314 * helper function to update an extent delayed ref in the
315 * rbtree. existing and update must both have the same
318 * This may free existing if the update cancels out whatever
319 * operation it was doing.
322 update_existing_ref(struct btrfs_trans_handle *trans,
323 struct btrfs_delayed_ref_root *delayed_refs,
324 struct btrfs_delayed_ref_node *existing,
325 struct btrfs_delayed_ref_node *update)
327 if (update->action != existing->action) {
329 * this is effectively undoing either an add or a
330 * drop. We decrement the ref_mod, and if it goes
331 * down to zero we just delete the entry without
332 * every changing the extent allocation tree.
335 if (existing->ref_mod == 0) {
336 rb_erase(&existing->rb_node,
337 &delayed_refs->root);
338 existing->in_tree = 0;
339 btrfs_put_delayed_ref(existing);
340 delayed_refs->num_entries--;
341 if (trans->delayed_ref_updates)
342 trans->delayed_ref_updates--;
344 WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
345 existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
348 WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
349 existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
351 * the action on the existing ref matches
352 * the action on the ref we're trying to add.
353 * Bump the ref_mod by one so the backref that
354 * is eventually added/removed has the correct
357 existing->ref_mod += update->ref_mod;
362 * helper function to update the accounting in the head ref
363 * existing and update must have the same bytenr
366 update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
367 struct btrfs_delayed_ref_node *update)
369 struct btrfs_delayed_ref_head *existing_ref;
370 struct btrfs_delayed_ref_head *ref;
372 existing_ref = btrfs_delayed_node_to_head(existing);
373 ref = btrfs_delayed_node_to_head(update);
374 BUG_ON(existing_ref->is_data != ref->is_data);
376 if (ref->must_insert_reserved) {
377 /* if the extent was freed and then
378 * reallocated before the delayed ref
379 * entries were processed, we can end up
380 * with an existing head ref without
381 * the must_insert_reserved flag set.
384 existing_ref->must_insert_reserved = ref->must_insert_reserved;
387 * update the num_bytes so we make sure the accounting
390 existing->num_bytes = update->num_bytes;
394 if (ref->extent_op) {
395 if (!existing_ref->extent_op) {
396 existing_ref->extent_op = ref->extent_op;
398 if (ref->extent_op->update_key) {
399 memcpy(&existing_ref->extent_op->key,
400 &ref->extent_op->key,
401 sizeof(ref->extent_op->key));
402 existing_ref->extent_op->update_key = 1;
404 if (ref->extent_op->update_flags) {
405 existing_ref->extent_op->flags_to_set |=
406 ref->extent_op->flags_to_set;
407 existing_ref->extent_op->update_flags = 1;
409 kfree(ref->extent_op);
413 * update the reference mod on the head to reflect this new operation
415 existing->ref_mod += update->ref_mod;
419 * helper function to actually insert a head node into the rbtree.
420 * this does all the dirty work in terms of maintaining the correct
421 * overall modification count.
423 static noinline void add_delayed_ref_head(struct btrfs_fs_info *fs_info,
424 struct btrfs_trans_handle *trans,
425 struct btrfs_delayed_ref_node *ref,
426 u64 bytenr, u64 num_bytes,
427 int action, int is_data)
429 struct btrfs_delayed_ref_node *existing;
430 struct btrfs_delayed_ref_head *head_ref = NULL;
431 struct btrfs_delayed_ref_root *delayed_refs;
433 int must_insert_reserved = 0;
436 * the head node stores the sum of all the mods, so dropping a ref
437 * should drop the sum in the head node by one.
439 if (action == BTRFS_UPDATE_DELAYED_HEAD)
441 else if (action == BTRFS_DROP_DELAYED_REF)
445 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
446 * the reserved accounting when the extent is finally added, or
447 * if a later modification deletes the delayed ref without ever
448 * inserting the extent into the extent allocation tree.
449 * ref->must_insert_reserved is the flag used to record
450 * that accounting mods are required.
452 * Once we record must_insert_reserved, switch the action to
453 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
455 if (action == BTRFS_ADD_DELAYED_EXTENT)
456 must_insert_reserved = 1;
458 must_insert_reserved = 0;
460 delayed_refs = &trans->transaction->delayed_refs;
462 /* first set the basic ref node struct up */
463 atomic_set(&ref->refs, 1);
464 ref->bytenr = bytenr;
465 ref->num_bytes = num_bytes;
466 ref->ref_mod = count_mod;
473 head_ref = btrfs_delayed_node_to_head(ref);
474 head_ref->must_insert_reserved = must_insert_reserved;
475 head_ref->is_data = is_data;
477 INIT_LIST_HEAD(&head_ref->cluster);
478 mutex_init(&head_ref->mutex);
480 trace_btrfs_delayed_ref_head(ref, head_ref, action);
482 existing = tree_insert(&delayed_refs->root, &ref->rb_node);
485 update_existing_head_ref(existing, ref);
487 * we've updated the existing ref, free the newly
492 delayed_refs->num_heads++;
493 delayed_refs->num_heads_ready++;
494 delayed_refs->num_entries++;
495 trans->delayed_ref_updates++;
500 * helper to insert a delayed tree ref into the rbtree.
502 static noinline void add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
503 struct btrfs_trans_handle *trans,
504 struct btrfs_delayed_ref_node *ref,
505 u64 bytenr, u64 num_bytes, u64 parent,
506 u64 ref_root, int level, int action,
509 struct btrfs_delayed_ref_node *existing;
510 struct btrfs_delayed_tree_ref *full_ref;
511 struct btrfs_delayed_ref_root *delayed_refs;
514 if (action == BTRFS_ADD_DELAYED_EXTENT)
515 action = BTRFS_ADD_DELAYED_REF;
517 delayed_refs = &trans->transaction->delayed_refs;
519 /* first set the basic ref node struct up */
520 atomic_set(&ref->refs, 1);
521 ref->bytenr = bytenr;
522 ref->num_bytes = num_bytes;
524 ref->action = action;
528 if (is_fstree(ref_root))
529 seq = inc_delayed_seq(delayed_refs);
532 full_ref = btrfs_delayed_node_to_tree_ref(ref);
533 full_ref->parent = parent;
534 full_ref->root = ref_root;
536 ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
538 ref->type = BTRFS_TREE_BLOCK_REF_KEY;
539 full_ref->level = level;
541 trace_btrfs_delayed_tree_ref(ref, full_ref, action);
543 existing = tree_insert(&delayed_refs->root, &ref->rb_node);
546 update_existing_ref(trans, delayed_refs, existing, ref);
548 * we've updated the existing ref, free the newly
553 delayed_refs->num_entries++;
554 trans->delayed_ref_updates++;
559 * helper to insert a delayed data ref into the rbtree.
561 static noinline void add_delayed_data_ref(struct btrfs_fs_info *fs_info,
562 struct btrfs_trans_handle *trans,
563 struct btrfs_delayed_ref_node *ref,
564 u64 bytenr, u64 num_bytes, u64 parent,
565 u64 ref_root, u64 owner, u64 offset,
566 int action, int for_cow)
568 struct btrfs_delayed_ref_node *existing;
569 struct btrfs_delayed_data_ref *full_ref;
570 struct btrfs_delayed_ref_root *delayed_refs;
573 if (action == BTRFS_ADD_DELAYED_EXTENT)
574 action = BTRFS_ADD_DELAYED_REF;
576 delayed_refs = &trans->transaction->delayed_refs;
578 /* first set the basic ref node struct up */
579 atomic_set(&ref->refs, 1);
580 ref->bytenr = bytenr;
581 ref->num_bytes = num_bytes;
583 ref->action = action;
587 if (is_fstree(ref_root))
588 seq = inc_delayed_seq(delayed_refs);
591 full_ref = btrfs_delayed_node_to_data_ref(ref);
592 full_ref->parent = parent;
593 full_ref->root = ref_root;
595 ref->type = BTRFS_SHARED_DATA_REF_KEY;
597 ref->type = BTRFS_EXTENT_DATA_REF_KEY;
599 full_ref->objectid = owner;
600 full_ref->offset = offset;
602 trace_btrfs_delayed_data_ref(ref, full_ref, action);
604 existing = tree_insert(&delayed_refs->root, &ref->rb_node);
607 update_existing_ref(trans, delayed_refs, existing, ref);
609 * we've updated the existing ref, free the newly
614 delayed_refs->num_entries++;
615 trans->delayed_ref_updates++;
620 * add a delayed tree ref. This does all of the accounting required
621 * to make sure the delayed ref is eventually processed before this
622 * transaction commits.
624 int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
625 struct btrfs_trans_handle *trans,
626 u64 bytenr, u64 num_bytes, u64 parent,
627 u64 ref_root, int level, int action,
628 struct btrfs_delayed_extent_op *extent_op,
631 struct btrfs_delayed_tree_ref *ref;
632 struct btrfs_delayed_ref_head *head_ref;
633 struct btrfs_delayed_ref_root *delayed_refs;
635 BUG_ON(extent_op && extent_op->is_data);
636 ref = kmalloc(sizeof(*ref), GFP_NOFS);
640 head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
646 head_ref->extent_op = extent_op;
648 delayed_refs = &trans->transaction->delayed_refs;
649 spin_lock(&delayed_refs->lock);
652 * insert both the head node and the new ref without dropping
655 add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
656 num_bytes, action, 0);
658 add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr,
659 num_bytes, parent, ref_root, level, action,
661 if (!is_fstree(ref_root) &&
662 waitqueue_active(&delayed_refs->seq_wait))
663 wake_up(&delayed_refs->seq_wait);
664 spin_unlock(&delayed_refs->lock);
670 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
672 int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
673 struct btrfs_trans_handle *trans,
674 u64 bytenr, u64 num_bytes,
675 u64 parent, u64 ref_root,
676 u64 owner, u64 offset, int action,
677 struct btrfs_delayed_extent_op *extent_op,
680 struct btrfs_delayed_data_ref *ref;
681 struct btrfs_delayed_ref_head *head_ref;
682 struct btrfs_delayed_ref_root *delayed_refs;
684 BUG_ON(extent_op && !extent_op->is_data);
685 ref = kmalloc(sizeof(*ref), GFP_NOFS);
689 head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
695 head_ref->extent_op = extent_op;
697 delayed_refs = &trans->transaction->delayed_refs;
698 spin_lock(&delayed_refs->lock);
701 * insert both the head node and the new ref without dropping
704 add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
705 num_bytes, action, 1);
707 add_delayed_data_ref(fs_info, trans, &ref->node, bytenr,
708 num_bytes, parent, ref_root, owner, offset,
710 if (!is_fstree(ref_root) &&
711 waitqueue_active(&delayed_refs->seq_wait))
712 wake_up(&delayed_refs->seq_wait);
713 spin_unlock(&delayed_refs->lock);
718 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
719 struct btrfs_trans_handle *trans,
720 u64 bytenr, u64 num_bytes,
721 struct btrfs_delayed_extent_op *extent_op)
723 struct btrfs_delayed_ref_head *head_ref;
724 struct btrfs_delayed_ref_root *delayed_refs;
726 head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
730 head_ref->extent_op = extent_op;
732 delayed_refs = &trans->transaction->delayed_refs;
733 spin_lock(&delayed_refs->lock);
735 add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
736 num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
739 if (waitqueue_active(&delayed_refs->seq_wait))
740 wake_up(&delayed_refs->seq_wait);
741 spin_unlock(&delayed_refs->lock);
746 * this does a simple search for the head node for a given extent.
747 * It must be called with the delayed ref spinlock held, and it returns
748 * the head node if any where found, or NULL if not.
750 struct btrfs_delayed_ref_head *
751 btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
753 struct btrfs_delayed_ref_node *ref;
754 struct btrfs_delayed_ref_root *delayed_refs;
756 delayed_refs = &trans->transaction->delayed_refs;
757 ref = find_ref_head(&delayed_refs->root, bytenr, NULL, 0);
759 return btrfs_delayed_node_to_head(ref);