2 * Copyright (C) 2009 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/sort.h>
23 #include "delayed-ref.h"
24 #include "transaction.h"
27 struct kmem_cache *btrfs_delayed_ref_head_cachep;
28 struct kmem_cache *btrfs_delayed_tree_ref_cachep;
29 struct kmem_cache *btrfs_delayed_data_ref_cachep;
30 struct kmem_cache *btrfs_delayed_extent_op_cachep;
32 * delayed back reference update tracking. For subvolume trees
33 * we queue up extent allocations and backref maintenance for
34 * delayed processing. This avoids deep call chains where we
35 * add extents in the middle of btrfs_search_slot, and it allows
36 * us to buffer up frequently modified backrefs in an rb tree instead
37 * of hammering updates on the extent allocation tree.
41 * compare two delayed tree backrefs with same bytenr and type
43 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
44 struct btrfs_delayed_tree_ref *ref1, int type)
46 if (type == BTRFS_TREE_BLOCK_REF_KEY) {
47 if (ref1->root < ref2->root)
49 if (ref1->root > ref2->root)
52 if (ref1->parent < ref2->parent)
54 if (ref1->parent > ref2->parent)
61 * compare two delayed data backrefs with same bytenr and type
63 static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
64 struct btrfs_delayed_data_ref *ref1)
66 if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
67 if (ref1->root < ref2->root)
69 if (ref1->root > ref2->root)
71 if (ref1->objectid < ref2->objectid)
73 if (ref1->objectid > ref2->objectid)
75 if (ref1->offset < ref2->offset)
77 if (ref1->offset > ref2->offset)
80 if (ref1->parent < ref2->parent)
82 if (ref1->parent > ref2->parent)
88 /* insert a new ref to head ref rbtree */
89 static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
92 struct rb_node **p = &root->rb_node;
93 struct rb_node *parent_node = NULL;
94 struct btrfs_delayed_ref_head *entry;
95 struct btrfs_delayed_ref_head *ins;
98 ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
99 bytenr = ins->node.bytenr;
102 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
105 if (bytenr < entry->node.bytenr)
107 else if (bytenr > entry->node.bytenr)
113 rb_link_node(node, parent_node, p);
114 rb_insert_color(node, root);
119 * find an head entry based on bytenr. This returns the delayed ref
120 * head if it was able to find one, or NULL if nothing was in that spot.
121 * If return_bigger is given, the next bigger entry is returned if no exact
124 static struct btrfs_delayed_ref_head *
125 find_ref_head(struct rb_root *root, u64 bytenr,
129 struct btrfs_delayed_ref_head *entry;
134 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
136 if (bytenr < entry->node.bytenr)
138 else if (bytenr > entry->node.bytenr)
143 if (entry && return_bigger) {
144 if (bytenr > entry->node.bytenr) {
145 n = rb_next(&entry->href_node);
148 entry = rb_entry(n, struct btrfs_delayed_ref_head,
157 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
158 struct btrfs_delayed_ref_head *head)
160 struct btrfs_delayed_ref_root *delayed_refs;
162 delayed_refs = &trans->transaction->delayed_refs;
163 assert_spin_locked(&delayed_refs->lock);
164 if (mutex_trylock(&head->mutex))
167 atomic_inc(&head->node.refs);
168 spin_unlock(&delayed_refs->lock);
170 mutex_lock(&head->mutex);
171 spin_lock(&delayed_refs->lock);
172 if (!head->node.in_tree) {
173 mutex_unlock(&head->mutex);
174 btrfs_put_delayed_ref(&head->node);
177 btrfs_put_delayed_ref(&head->node);
181 static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
182 struct btrfs_delayed_ref_root *delayed_refs,
183 struct btrfs_delayed_ref_head *head,
184 struct btrfs_delayed_ref_node *ref)
186 if (btrfs_delayed_ref_is_head(ref)) {
187 head = btrfs_delayed_node_to_head(ref);
188 rb_erase(&head->href_node, &delayed_refs->href_root);
190 assert_spin_locked(&head->lock);
191 list_del(&ref->list);
194 btrfs_put_delayed_ref(ref);
195 atomic_dec(&delayed_refs->num_entries);
196 if (trans->delayed_ref_updates)
197 trans->delayed_ref_updates--;
200 static bool merge_ref(struct btrfs_trans_handle *trans,
201 struct btrfs_delayed_ref_root *delayed_refs,
202 struct btrfs_delayed_ref_head *head,
203 struct btrfs_delayed_ref_node *ref,
206 struct btrfs_delayed_ref_node *next;
209 next = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
211 while (!done && &next->list != &head->ref_list) {
213 struct btrfs_delayed_ref_node *next2;
215 next2 = list_next_entry(next, list);
220 if (seq && next->seq >= seq)
223 if (next->type != ref->type)
226 if ((ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
227 ref->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
228 comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref),
229 btrfs_delayed_node_to_tree_ref(next),
232 if ((ref->type == BTRFS_EXTENT_DATA_REF_KEY ||
233 ref->type == BTRFS_SHARED_DATA_REF_KEY) &&
234 comp_data_refs(btrfs_delayed_node_to_data_ref(ref),
235 btrfs_delayed_node_to_data_ref(next)))
238 if (ref->action == next->action) {
241 if (ref->ref_mod < next->ref_mod) {
245 mod = -next->ref_mod;
248 drop_delayed_ref(trans, delayed_refs, head, next);
250 if (ref->ref_mod == 0) {
251 drop_delayed_ref(trans, delayed_refs, head, ref);
255 * Can't have multiples of the same ref on a tree block.
257 WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
258 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
267 void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
268 struct btrfs_fs_info *fs_info,
269 struct btrfs_delayed_ref_root *delayed_refs,
270 struct btrfs_delayed_ref_head *head)
272 struct btrfs_delayed_ref_node *ref;
275 assert_spin_locked(&head->lock);
277 if (list_empty(&head->ref_list))
280 /* We don't have too many refs to merge for data. */
284 spin_lock(&fs_info->tree_mod_seq_lock);
285 if (!list_empty(&fs_info->tree_mod_seq_list)) {
286 struct seq_list *elem;
288 elem = list_first_entry(&fs_info->tree_mod_seq_list,
289 struct seq_list, list);
292 spin_unlock(&fs_info->tree_mod_seq_lock);
294 ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
296 while (&ref->list != &head->ref_list) {
297 if (seq && ref->seq >= seq)
300 if (merge_ref(trans, delayed_refs, head, ref, seq)) {
301 if (list_empty(&head->ref_list))
303 ref = list_first_entry(&head->ref_list,
304 struct btrfs_delayed_ref_node,
309 ref = list_next_entry(ref, list);
313 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
314 struct btrfs_delayed_ref_root *delayed_refs,
317 struct seq_list *elem;
320 spin_lock(&fs_info->tree_mod_seq_lock);
321 if (!list_empty(&fs_info->tree_mod_seq_list)) {
322 elem = list_first_entry(&fs_info->tree_mod_seq_list,
323 struct seq_list, list);
324 if (seq >= elem->seq) {
325 pr_debug("holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)\n",
326 (u32)(seq >> 32), (u32)seq,
327 (u32)(elem->seq >> 32), (u32)elem->seq,
333 spin_unlock(&fs_info->tree_mod_seq_lock);
337 struct btrfs_delayed_ref_head *
338 btrfs_select_ref_head(struct btrfs_trans_handle *trans)
340 struct btrfs_delayed_ref_root *delayed_refs;
341 struct btrfs_delayed_ref_head *head;
345 delayed_refs = &trans->transaction->delayed_refs;
348 start = delayed_refs->run_delayed_start;
349 head = find_ref_head(&delayed_refs->href_root, start, 1);
350 if (!head && !loop) {
351 delayed_refs->run_delayed_start = 0;
354 head = find_ref_head(&delayed_refs->href_root, start, 1);
357 } else if (!head && loop) {
361 while (head->processing) {
362 struct rb_node *node;
364 node = rb_next(&head->href_node);
368 delayed_refs->run_delayed_start = 0;
373 head = rb_entry(node, struct btrfs_delayed_ref_head,
377 head->processing = 1;
378 WARN_ON(delayed_refs->num_heads_ready == 0);
379 delayed_refs->num_heads_ready--;
380 delayed_refs->run_delayed_start = head->node.bytenr +
381 head->node.num_bytes;
386 * Helper to insert the ref_node to the tail or merge with tail.
388 * Return 0 for insert.
389 * Return >0 for merge.
392 add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
393 struct btrfs_delayed_ref_root *root,
394 struct btrfs_delayed_ref_head *href,
395 struct btrfs_delayed_ref_node *ref)
397 struct btrfs_delayed_ref_node *exist;
401 spin_lock(&href->lock);
402 /* Check whether we can merge the tail node with ref */
403 if (list_empty(&href->ref_list))
405 exist = list_entry(href->ref_list.prev, struct btrfs_delayed_ref_node,
407 /* No need to compare bytenr nor is_head */
408 if (exist->type != ref->type || exist->seq != ref->seq)
411 if ((exist->type == BTRFS_TREE_BLOCK_REF_KEY ||
412 exist->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
413 comp_tree_refs(btrfs_delayed_node_to_tree_ref(exist),
414 btrfs_delayed_node_to_tree_ref(ref),
417 if ((exist->type == BTRFS_EXTENT_DATA_REF_KEY ||
418 exist->type == BTRFS_SHARED_DATA_REF_KEY) &&
419 comp_data_refs(btrfs_delayed_node_to_data_ref(exist),
420 btrfs_delayed_node_to_data_ref(ref)))
423 /* Now we are sure we can merge */
425 if (exist->action == ref->action) {
428 /* Need to change action */
429 if (exist->ref_mod < ref->ref_mod) {
430 exist->action = ref->action;
431 mod = -exist->ref_mod;
432 exist->ref_mod = ref->ref_mod;
436 exist->ref_mod += mod;
438 /* remove existing tail if its ref_mod is zero */
439 if (exist->ref_mod == 0)
440 drop_delayed_ref(trans, root, href, exist);
441 spin_unlock(&href->lock);
445 list_add_tail(&ref->list, &href->ref_list);
446 atomic_inc(&root->num_entries);
447 trans->delayed_ref_updates++;
448 spin_unlock(&href->lock);
453 * helper function to update the accounting in the head ref
454 * existing and update must have the same bytenr
457 update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
458 struct btrfs_delayed_ref_node *existing,
459 struct btrfs_delayed_ref_node *update)
461 struct btrfs_delayed_ref_head *existing_ref;
462 struct btrfs_delayed_ref_head *ref;
465 existing_ref = btrfs_delayed_node_to_head(existing);
466 ref = btrfs_delayed_node_to_head(update);
467 BUG_ON(existing_ref->is_data != ref->is_data);
469 spin_lock(&existing_ref->lock);
470 if (ref->must_insert_reserved) {
471 /* if the extent was freed and then
472 * reallocated before the delayed ref
473 * entries were processed, we can end up
474 * with an existing head ref without
475 * the must_insert_reserved flag set.
478 existing_ref->must_insert_reserved = ref->must_insert_reserved;
481 * update the num_bytes so we make sure the accounting
484 existing->num_bytes = update->num_bytes;
488 if (ref->extent_op) {
489 if (!existing_ref->extent_op) {
490 existing_ref->extent_op = ref->extent_op;
492 if (ref->extent_op->update_key) {
493 memcpy(&existing_ref->extent_op->key,
494 &ref->extent_op->key,
495 sizeof(ref->extent_op->key));
496 existing_ref->extent_op->update_key = true;
498 if (ref->extent_op->update_flags) {
499 existing_ref->extent_op->flags_to_set |=
500 ref->extent_op->flags_to_set;
501 existing_ref->extent_op->update_flags = true;
503 btrfs_free_delayed_extent_op(ref->extent_op);
507 * update the reference mod on the head to reflect this new operation,
508 * only need the lock for this case cause we could be processing it
509 * currently, for refs we just added we know we're a-ok.
511 old_ref_mod = existing_ref->total_ref_mod;
512 existing->ref_mod += update->ref_mod;
513 existing_ref->total_ref_mod += update->ref_mod;
516 * If we are going to from a positive ref mod to a negative or vice
517 * versa we need to make sure to adjust pending_csums accordingly.
519 if (existing_ref->is_data) {
520 if (existing_ref->total_ref_mod >= 0 && old_ref_mod < 0)
521 delayed_refs->pending_csums -= existing->num_bytes;
522 if (existing_ref->total_ref_mod < 0 && old_ref_mod >= 0)
523 delayed_refs->pending_csums += existing->num_bytes;
525 spin_unlock(&existing_ref->lock);
529 * helper function to actually insert a head node into the rbtree.
530 * this does all the dirty work in terms of maintaining the correct
531 * overall modification count.
533 static noinline struct btrfs_delayed_ref_head *
534 add_delayed_ref_head(struct btrfs_fs_info *fs_info,
535 struct btrfs_trans_handle *trans,
536 struct btrfs_delayed_ref_node *ref,
537 struct btrfs_qgroup_extent_record *qrecord,
538 u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
539 int action, int is_data)
541 struct btrfs_delayed_ref_head *existing;
542 struct btrfs_delayed_ref_head *head_ref = NULL;
543 struct btrfs_delayed_ref_root *delayed_refs;
544 struct btrfs_qgroup_extent_record *qexisting;
546 int must_insert_reserved = 0;
548 /* If reserved is provided, it must be a data extent. */
549 BUG_ON(!is_data && reserved);
552 * the head node stores the sum of all the mods, so dropping a ref
553 * should drop the sum in the head node by one.
555 if (action == BTRFS_UPDATE_DELAYED_HEAD)
557 else if (action == BTRFS_DROP_DELAYED_REF)
561 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
562 * the reserved accounting when the extent is finally added, or
563 * if a later modification deletes the delayed ref without ever
564 * inserting the extent into the extent allocation tree.
565 * ref->must_insert_reserved is the flag used to record
566 * that accounting mods are required.
568 * Once we record must_insert_reserved, switch the action to
569 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
571 if (action == BTRFS_ADD_DELAYED_EXTENT)
572 must_insert_reserved = 1;
574 must_insert_reserved = 0;
576 delayed_refs = &trans->transaction->delayed_refs;
578 /* first set the basic ref node struct up */
579 atomic_set(&ref->refs, 1);
580 ref->bytenr = bytenr;
581 ref->num_bytes = num_bytes;
582 ref->ref_mod = count_mod;
589 head_ref = btrfs_delayed_node_to_head(ref);
590 head_ref->must_insert_reserved = must_insert_reserved;
591 head_ref->is_data = is_data;
592 INIT_LIST_HEAD(&head_ref->ref_list);
593 head_ref->processing = 0;
594 head_ref->total_ref_mod = count_mod;
595 head_ref->qgroup_reserved = 0;
596 head_ref->qgroup_ref_root = 0;
598 /* Record qgroup extent info if provided */
600 if (ref_root && reserved) {
601 head_ref->qgroup_ref_root = ref_root;
602 head_ref->qgroup_reserved = reserved;
605 qrecord->bytenr = bytenr;
606 qrecord->num_bytes = num_bytes;
607 qrecord->old_roots = NULL;
609 qexisting = btrfs_qgroup_insert_dirty_extent(delayed_refs,
615 spin_lock_init(&head_ref->lock);
616 mutex_init(&head_ref->mutex);
618 trace_add_delayed_ref_head(ref, head_ref, action);
620 existing = htree_insert(&delayed_refs->href_root,
621 &head_ref->href_node);
623 WARN_ON(ref_root && reserved && existing->qgroup_ref_root
624 && existing->qgroup_reserved);
625 update_existing_head_ref(delayed_refs, &existing->node, ref);
627 * we've updated the existing ref, free the newly
630 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
633 if (is_data && count_mod < 0)
634 delayed_refs->pending_csums += num_bytes;
635 delayed_refs->num_heads++;
636 delayed_refs->num_heads_ready++;
637 atomic_inc(&delayed_refs->num_entries);
638 trans->delayed_ref_updates++;
644 * helper to insert a delayed tree ref into the rbtree.
647 add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
648 struct btrfs_trans_handle *trans,
649 struct btrfs_delayed_ref_head *head_ref,
650 struct btrfs_delayed_ref_node *ref, u64 bytenr,
651 u64 num_bytes, u64 parent, u64 ref_root, int level,
654 struct btrfs_delayed_tree_ref *full_ref;
655 struct btrfs_delayed_ref_root *delayed_refs;
659 if (action == BTRFS_ADD_DELAYED_EXTENT)
660 action = BTRFS_ADD_DELAYED_REF;
662 if (is_fstree(ref_root))
663 seq = atomic64_read(&fs_info->tree_mod_seq);
664 delayed_refs = &trans->transaction->delayed_refs;
666 /* first set the basic ref node struct up */
667 atomic_set(&ref->refs, 1);
668 ref->bytenr = bytenr;
669 ref->num_bytes = num_bytes;
671 ref->action = action;
676 full_ref = btrfs_delayed_node_to_tree_ref(ref);
677 full_ref->parent = parent;
678 full_ref->root = ref_root;
680 ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
682 ref->type = BTRFS_TREE_BLOCK_REF_KEY;
683 full_ref->level = level;
685 trace_add_delayed_tree_ref(ref, full_ref, action);
687 ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
690 * XXX: memory should be freed at the same level allocated.
691 * But bad practice is anywhere... Follow it now. Need cleanup.
694 kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
698 * helper to insert a delayed data ref into the rbtree.
701 add_delayed_data_ref(struct btrfs_fs_info *fs_info,
702 struct btrfs_trans_handle *trans,
703 struct btrfs_delayed_ref_head *head_ref,
704 struct btrfs_delayed_ref_node *ref, u64 bytenr,
705 u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
706 u64 offset, int action)
708 struct btrfs_delayed_data_ref *full_ref;
709 struct btrfs_delayed_ref_root *delayed_refs;
713 if (action == BTRFS_ADD_DELAYED_EXTENT)
714 action = BTRFS_ADD_DELAYED_REF;
716 delayed_refs = &trans->transaction->delayed_refs;
718 if (is_fstree(ref_root))
719 seq = atomic64_read(&fs_info->tree_mod_seq);
721 /* first set the basic ref node struct up */
722 atomic_set(&ref->refs, 1);
723 ref->bytenr = bytenr;
724 ref->num_bytes = num_bytes;
726 ref->action = action;
731 full_ref = btrfs_delayed_node_to_data_ref(ref);
732 full_ref->parent = parent;
733 full_ref->root = ref_root;
735 ref->type = BTRFS_SHARED_DATA_REF_KEY;
737 ref->type = BTRFS_EXTENT_DATA_REF_KEY;
739 full_ref->objectid = owner;
740 full_ref->offset = offset;
742 trace_add_delayed_data_ref(ref, full_ref, action);
744 ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
747 kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
751 * add a delayed tree ref. This does all of the accounting required
752 * to make sure the delayed ref is eventually processed before this
753 * transaction commits.
755 int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
756 struct btrfs_trans_handle *trans,
757 u64 bytenr, u64 num_bytes, u64 parent,
758 u64 ref_root, int level, int action,
759 struct btrfs_delayed_extent_op *extent_op)
761 struct btrfs_delayed_tree_ref *ref;
762 struct btrfs_delayed_ref_head *head_ref;
763 struct btrfs_delayed_ref_root *delayed_refs;
764 struct btrfs_qgroup_extent_record *record = NULL;
766 BUG_ON(extent_op && extent_op->is_data);
767 ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
771 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
775 if (fs_info->quota_enabled && is_fstree(ref_root)) {
776 record = kmalloc(sizeof(*record), GFP_NOFS);
781 head_ref->extent_op = extent_op;
783 delayed_refs = &trans->transaction->delayed_refs;
784 spin_lock(&delayed_refs->lock);
787 * insert both the head node and the new ref without dropping
790 head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
791 bytenr, num_bytes, 0, 0, action, 0);
793 add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
794 num_bytes, parent, ref_root, level, action);
795 spin_unlock(&delayed_refs->lock);
800 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
802 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
808 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
810 int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
811 struct btrfs_trans_handle *trans,
812 u64 bytenr, u64 num_bytes,
813 u64 parent, u64 ref_root,
814 u64 owner, u64 offset, u64 reserved, int action,
815 struct btrfs_delayed_extent_op *extent_op)
817 struct btrfs_delayed_data_ref *ref;
818 struct btrfs_delayed_ref_head *head_ref;
819 struct btrfs_delayed_ref_root *delayed_refs;
820 struct btrfs_qgroup_extent_record *record = NULL;
822 BUG_ON(extent_op && !extent_op->is_data);
823 ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
827 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
829 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
833 if (fs_info->quota_enabled && is_fstree(ref_root)) {
834 record = kmalloc(sizeof(*record), GFP_NOFS);
836 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
837 kmem_cache_free(btrfs_delayed_ref_head_cachep,
843 head_ref->extent_op = extent_op;
845 delayed_refs = &trans->transaction->delayed_refs;
846 spin_lock(&delayed_refs->lock);
849 * insert both the head node and the new ref without dropping
852 head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
853 bytenr, num_bytes, ref_root, reserved,
856 add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
857 num_bytes, parent, ref_root, owner, offset,
859 spin_unlock(&delayed_refs->lock);
864 int btrfs_add_delayed_qgroup_reserve(struct btrfs_fs_info *fs_info,
865 struct btrfs_trans_handle *trans,
866 u64 ref_root, u64 bytenr, u64 num_bytes)
868 struct btrfs_delayed_ref_root *delayed_refs;
869 struct btrfs_delayed_ref_head *ref_head;
872 if (!fs_info->quota_enabled || !is_fstree(ref_root))
875 delayed_refs = &trans->transaction->delayed_refs;
877 spin_lock(&delayed_refs->lock);
878 ref_head = find_ref_head(&delayed_refs->href_root, bytenr, 0);
883 WARN_ON(ref_head->qgroup_reserved || ref_head->qgroup_ref_root);
884 ref_head->qgroup_ref_root = ref_root;
885 ref_head->qgroup_reserved = num_bytes;
887 spin_unlock(&delayed_refs->lock);
891 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
892 struct btrfs_trans_handle *trans,
893 u64 bytenr, u64 num_bytes,
894 struct btrfs_delayed_extent_op *extent_op)
896 struct btrfs_delayed_ref_head *head_ref;
897 struct btrfs_delayed_ref_root *delayed_refs;
899 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
903 head_ref->extent_op = extent_op;
905 delayed_refs = &trans->transaction->delayed_refs;
906 spin_lock(&delayed_refs->lock);
908 add_delayed_ref_head(fs_info, trans, &head_ref->node, NULL, bytenr,
909 num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
912 spin_unlock(&delayed_refs->lock);
917 * this does a simple search for the head node for a given extent.
918 * It must be called with the delayed ref spinlock held, and it returns
919 * the head node if any where found, or NULL if not.
921 struct btrfs_delayed_ref_head *
922 btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
924 struct btrfs_delayed_ref_root *delayed_refs;
926 delayed_refs = &trans->transaction->delayed_refs;
927 return find_ref_head(&delayed_refs->href_root, bytenr, 0);
930 void btrfs_delayed_ref_exit(void)
932 if (btrfs_delayed_ref_head_cachep)
933 kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
934 if (btrfs_delayed_tree_ref_cachep)
935 kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
936 if (btrfs_delayed_data_ref_cachep)
937 kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
938 if (btrfs_delayed_extent_op_cachep)
939 kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
942 int btrfs_delayed_ref_init(void)
944 btrfs_delayed_ref_head_cachep = kmem_cache_create(
945 "btrfs_delayed_ref_head",
946 sizeof(struct btrfs_delayed_ref_head), 0,
947 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
948 if (!btrfs_delayed_ref_head_cachep)
951 btrfs_delayed_tree_ref_cachep = kmem_cache_create(
952 "btrfs_delayed_tree_ref",
953 sizeof(struct btrfs_delayed_tree_ref), 0,
954 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
955 if (!btrfs_delayed_tree_ref_cachep)
958 btrfs_delayed_data_ref_cachep = kmem_cache_create(
959 "btrfs_delayed_data_ref",
960 sizeof(struct btrfs_delayed_data_ref), 0,
961 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
962 if (!btrfs_delayed_data_ref_cachep)
965 btrfs_delayed_extent_op_cachep = kmem_cache_create(
966 "btrfs_delayed_extent_op",
967 sizeof(struct btrfs_delayed_extent_op), 0,
968 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
969 if (!btrfs_delayed_extent_op_cachep)
974 btrfs_delayed_ref_exit();