2 * Copyright (C) 2009 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/sort.h>
23 #include "delayed-ref.h"
24 #include "transaction.h"
27 struct kmem_cache *btrfs_delayed_ref_head_cachep;
28 struct kmem_cache *btrfs_delayed_tree_ref_cachep;
29 struct kmem_cache *btrfs_delayed_data_ref_cachep;
30 struct kmem_cache *btrfs_delayed_extent_op_cachep;
32 * delayed back reference update tracking. For subvolume trees
33 * we queue up extent allocations and backref maintenance for
34 * delayed processing. This avoids deep call chains where we
35 * add extents in the middle of btrfs_search_slot, and it allows
36 * us to buffer up frequently modified backrefs in an rb tree instead
37 * of hammering updates on the extent allocation tree.
41 * compare two delayed tree backrefs with same bytenr and type
43 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
44 struct btrfs_delayed_tree_ref *ref1, int type)
46 if (type == BTRFS_TREE_BLOCK_REF_KEY) {
47 if (ref1->root < ref2->root)
49 if (ref1->root > ref2->root)
52 if (ref1->parent < ref2->parent)
54 if (ref1->parent > ref2->parent)
61 * compare two delayed data backrefs with same bytenr and type
63 static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
64 struct btrfs_delayed_data_ref *ref1)
66 if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
67 if (ref1->root < ref2->root)
69 if (ref1->root > ref2->root)
71 if (ref1->objectid < ref2->objectid)
73 if (ref1->objectid > ref2->objectid)
75 if (ref1->offset < ref2->offset)
77 if (ref1->offset > ref2->offset)
80 if (ref1->parent < ref2->parent)
82 if (ref1->parent > ref2->parent)
88 /* insert a new ref to head ref rbtree */
89 static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
92 struct rb_node **p = &root->rb_node;
93 struct rb_node *parent_node = NULL;
94 struct btrfs_delayed_ref_head *entry;
95 struct btrfs_delayed_ref_head *ins;
98 ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
99 bytenr = ins->node.bytenr;
102 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
105 if (bytenr < entry->node.bytenr)
107 else if (bytenr > entry->node.bytenr)
113 rb_link_node(node, parent_node, p);
114 rb_insert_color(node, root);
119 * find an head entry based on bytenr. This returns the delayed ref
120 * head if it was able to find one, or NULL if nothing was in that spot.
121 * If return_bigger is given, the next bigger entry is returned if no exact
124 static struct btrfs_delayed_ref_head *
125 find_ref_head(struct rb_root *root, u64 bytenr,
129 struct btrfs_delayed_ref_head *entry;
134 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
136 if (bytenr < entry->node.bytenr)
138 else if (bytenr > entry->node.bytenr)
143 if (entry && return_bigger) {
144 if (bytenr > entry->node.bytenr) {
145 n = rb_next(&entry->href_node);
148 entry = rb_entry(n, struct btrfs_delayed_ref_head,
157 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
158 struct btrfs_delayed_ref_head *head)
160 struct btrfs_delayed_ref_root *delayed_refs;
162 delayed_refs = &trans->transaction->delayed_refs;
163 assert_spin_locked(&delayed_refs->lock);
164 if (mutex_trylock(&head->mutex))
167 refcount_inc(&head->node.refs);
168 spin_unlock(&delayed_refs->lock);
170 mutex_lock(&head->mutex);
171 spin_lock(&delayed_refs->lock);
172 if (!head->node.in_tree) {
173 mutex_unlock(&head->mutex);
174 btrfs_put_delayed_ref(&head->node);
177 btrfs_put_delayed_ref(&head->node);
181 static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
182 struct btrfs_delayed_ref_root *delayed_refs,
183 struct btrfs_delayed_ref_head *head,
184 struct btrfs_delayed_ref_node *ref)
186 if (btrfs_delayed_ref_is_head(ref)) {
187 head = btrfs_delayed_node_to_head(ref);
188 rb_erase(&head->href_node, &delayed_refs->href_root);
190 assert_spin_locked(&head->lock);
191 list_del(&ref->list);
192 if (!list_empty(&ref->add_list))
193 list_del(&ref->add_list);
196 btrfs_put_delayed_ref(ref);
197 atomic_dec(&delayed_refs->num_entries);
198 if (trans->delayed_ref_updates)
199 trans->delayed_ref_updates--;
202 static bool merge_ref(struct btrfs_trans_handle *trans,
203 struct btrfs_delayed_ref_root *delayed_refs,
204 struct btrfs_delayed_ref_head *head,
205 struct btrfs_delayed_ref_node *ref,
208 struct btrfs_delayed_ref_node *next;
211 next = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
213 while (!done && &next->list != &head->ref_list) {
215 struct btrfs_delayed_ref_node *next2;
217 next2 = list_next_entry(next, list);
222 if (seq && next->seq >= seq)
225 if (next->type != ref->type)
228 if ((ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
229 ref->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
230 comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref),
231 btrfs_delayed_node_to_tree_ref(next),
234 if ((ref->type == BTRFS_EXTENT_DATA_REF_KEY ||
235 ref->type == BTRFS_SHARED_DATA_REF_KEY) &&
236 comp_data_refs(btrfs_delayed_node_to_data_ref(ref),
237 btrfs_delayed_node_to_data_ref(next)))
240 if (ref->action == next->action) {
243 if (ref->ref_mod < next->ref_mod) {
247 mod = -next->ref_mod;
250 drop_delayed_ref(trans, delayed_refs, head, next);
252 if (ref->ref_mod == 0) {
253 drop_delayed_ref(trans, delayed_refs, head, ref);
257 * Can't have multiples of the same ref on a tree block.
259 WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
260 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
269 void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
270 struct btrfs_fs_info *fs_info,
271 struct btrfs_delayed_ref_root *delayed_refs,
272 struct btrfs_delayed_ref_head *head)
274 struct btrfs_delayed_ref_node *ref;
277 assert_spin_locked(&head->lock);
279 if (list_empty(&head->ref_list))
282 /* We don't have too many refs to merge for data. */
286 spin_lock(&fs_info->tree_mod_seq_lock);
287 if (!list_empty(&fs_info->tree_mod_seq_list)) {
288 struct seq_list *elem;
290 elem = list_first_entry(&fs_info->tree_mod_seq_list,
291 struct seq_list, list);
294 spin_unlock(&fs_info->tree_mod_seq_lock);
296 ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
298 while (&ref->list != &head->ref_list) {
299 if (seq && ref->seq >= seq)
302 if (merge_ref(trans, delayed_refs, head, ref, seq)) {
303 if (list_empty(&head->ref_list))
305 ref = list_first_entry(&head->ref_list,
306 struct btrfs_delayed_ref_node,
311 ref = list_next_entry(ref, list);
315 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
316 struct btrfs_delayed_ref_root *delayed_refs,
319 struct seq_list *elem;
322 spin_lock(&fs_info->tree_mod_seq_lock);
323 if (!list_empty(&fs_info->tree_mod_seq_list)) {
324 elem = list_first_entry(&fs_info->tree_mod_seq_list,
325 struct seq_list, list);
326 if (seq >= elem->seq) {
328 "holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)",
329 (u32)(seq >> 32), (u32)seq,
330 (u32)(elem->seq >> 32), (u32)elem->seq,
336 spin_unlock(&fs_info->tree_mod_seq_lock);
340 struct btrfs_delayed_ref_head *
341 btrfs_select_ref_head(struct btrfs_trans_handle *trans)
343 struct btrfs_delayed_ref_root *delayed_refs;
344 struct btrfs_delayed_ref_head *head;
348 delayed_refs = &trans->transaction->delayed_refs;
351 start = delayed_refs->run_delayed_start;
352 head = find_ref_head(&delayed_refs->href_root, start, 1);
353 if (!head && !loop) {
354 delayed_refs->run_delayed_start = 0;
357 head = find_ref_head(&delayed_refs->href_root, start, 1);
360 } else if (!head && loop) {
364 while (head->processing) {
365 struct rb_node *node;
367 node = rb_next(&head->href_node);
371 delayed_refs->run_delayed_start = 0;
376 head = rb_entry(node, struct btrfs_delayed_ref_head,
380 head->processing = 1;
381 WARN_ON(delayed_refs->num_heads_ready == 0);
382 delayed_refs->num_heads_ready--;
383 delayed_refs->run_delayed_start = head->node.bytenr +
384 head->node.num_bytes;
389 * Helper to insert the ref_node to the tail or merge with tail.
391 * Return 0 for insert.
392 * Return >0 for merge.
395 add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
396 struct btrfs_delayed_ref_root *root,
397 struct btrfs_delayed_ref_head *href,
398 struct btrfs_delayed_ref_node *ref)
400 struct btrfs_delayed_ref_node *exist;
404 spin_lock(&href->lock);
405 /* Check whether we can merge the tail node with ref */
406 if (list_empty(&href->ref_list))
408 exist = list_entry(href->ref_list.prev, struct btrfs_delayed_ref_node,
410 /* No need to compare bytenr nor is_head */
411 if (exist->type != ref->type || exist->seq != ref->seq)
414 if ((exist->type == BTRFS_TREE_BLOCK_REF_KEY ||
415 exist->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
416 comp_tree_refs(btrfs_delayed_node_to_tree_ref(exist),
417 btrfs_delayed_node_to_tree_ref(ref),
420 if ((exist->type == BTRFS_EXTENT_DATA_REF_KEY ||
421 exist->type == BTRFS_SHARED_DATA_REF_KEY) &&
422 comp_data_refs(btrfs_delayed_node_to_data_ref(exist),
423 btrfs_delayed_node_to_data_ref(ref)))
426 /* Now we are sure we can merge */
428 if (exist->action == ref->action) {
431 /* Need to change action */
432 if (exist->ref_mod < ref->ref_mod) {
433 exist->action = ref->action;
434 mod = -exist->ref_mod;
435 exist->ref_mod = ref->ref_mod;
436 if (ref->action == BTRFS_ADD_DELAYED_REF)
437 list_add_tail(&exist->add_list,
438 &href->ref_add_list);
439 else if (ref->action == BTRFS_DROP_DELAYED_REF) {
440 ASSERT(!list_empty(&exist->add_list));
441 list_del(&exist->add_list);
448 exist->ref_mod += mod;
450 /* remove existing tail if its ref_mod is zero */
451 if (exist->ref_mod == 0)
452 drop_delayed_ref(trans, root, href, exist);
453 spin_unlock(&href->lock);
457 list_add_tail(&ref->list, &href->ref_list);
458 if (ref->action == BTRFS_ADD_DELAYED_REF)
459 list_add_tail(&ref->add_list, &href->ref_add_list);
460 atomic_inc(&root->num_entries);
461 trans->delayed_ref_updates++;
462 spin_unlock(&href->lock);
467 * helper function to update the accounting in the head ref
468 * existing and update must have the same bytenr
471 update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
472 struct btrfs_delayed_ref_node *existing,
473 struct btrfs_delayed_ref_node *update)
475 struct btrfs_delayed_ref_head *existing_ref;
476 struct btrfs_delayed_ref_head *ref;
479 existing_ref = btrfs_delayed_node_to_head(existing);
480 ref = btrfs_delayed_node_to_head(update);
481 BUG_ON(existing_ref->is_data != ref->is_data);
483 spin_lock(&existing_ref->lock);
484 if (ref->must_insert_reserved) {
485 /* if the extent was freed and then
486 * reallocated before the delayed ref
487 * entries were processed, we can end up
488 * with an existing head ref without
489 * the must_insert_reserved flag set.
492 existing_ref->must_insert_reserved = ref->must_insert_reserved;
495 * update the num_bytes so we make sure the accounting
498 existing->num_bytes = update->num_bytes;
502 if (ref->extent_op) {
503 if (!existing_ref->extent_op) {
504 existing_ref->extent_op = ref->extent_op;
506 if (ref->extent_op->update_key) {
507 memcpy(&existing_ref->extent_op->key,
508 &ref->extent_op->key,
509 sizeof(ref->extent_op->key));
510 existing_ref->extent_op->update_key = true;
512 if (ref->extent_op->update_flags) {
513 existing_ref->extent_op->flags_to_set |=
514 ref->extent_op->flags_to_set;
515 existing_ref->extent_op->update_flags = true;
517 btrfs_free_delayed_extent_op(ref->extent_op);
521 * update the reference mod on the head to reflect this new operation,
522 * only need the lock for this case cause we could be processing it
523 * currently, for refs we just added we know we're a-ok.
525 old_ref_mod = existing_ref->total_ref_mod;
526 existing->ref_mod += update->ref_mod;
527 existing_ref->total_ref_mod += update->ref_mod;
530 * If we are going to from a positive ref mod to a negative or vice
531 * versa we need to make sure to adjust pending_csums accordingly.
533 if (existing_ref->is_data) {
534 if (existing_ref->total_ref_mod >= 0 && old_ref_mod < 0)
535 delayed_refs->pending_csums -= existing->num_bytes;
536 if (existing_ref->total_ref_mod < 0 && old_ref_mod >= 0)
537 delayed_refs->pending_csums += existing->num_bytes;
539 spin_unlock(&existing_ref->lock);
543 * helper function to actually insert a head node into the rbtree.
544 * this does all the dirty work in terms of maintaining the correct
545 * overall modification count.
547 static noinline struct btrfs_delayed_ref_head *
548 add_delayed_ref_head(struct btrfs_fs_info *fs_info,
549 struct btrfs_trans_handle *trans,
550 struct btrfs_delayed_ref_node *ref,
551 struct btrfs_qgroup_extent_record *qrecord,
552 u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
553 int action, int is_data, int *qrecord_inserted_ret)
555 struct btrfs_delayed_ref_head *existing;
556 struct btrfs_delayed_ref_head *head_ref = NULL;
557 struct btrfs_delayed_ref_root *delayed_refs;
559 int must_insert_reserved = 0;
560 int qrecord_inserted = 0;
562 /* If reserved is provided, it must be a data extent. */
563 BUG_ON(!is_data && reserved);
566 * the head node stores the sum of all the mods, so dropping a ref
567 * should drop the sum in the head node by one.
569 if (action == BTRFS_UPDATE_DELAYED_HEAD)
571 else if (action == BTRFS_DROP_DELAYED_REF)
575 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
576 * the reserved accounting when the extent is finally added, or
577 * if a later modification deletes the delayed ref without ever
578 * inserting the extent into the extent allocation tree.
579 * ref->must_insert_reserved is the flag used to record
580 * that accounting mods are required.
582 * Once we record must_insert_reserved, switch the action to
583 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
585 if (action == BTRFS_ADD_DELAYED_EXTENT)
586 must_insert_reserved = 1;
588 must_insert_reserved = 0;
590 delayed_refs = &trans->transaction->delayed_refs;
592 /* first set the basic ref node struct up */
593 refcount_set(&ref->refs, 1);
594 ref->bytenr = bytenr;
595 ref->num_bytes = num_bytes;
596 ref->ref_mod = count_mod;
603 head_ref = btrfs_delayed_node_to_head(ref);
604 head_ref->must_insert_reserved = must_insert_reserved;
605 head_ref->is_data = is_data;
606 INIT_LIST_HEAD(&head_ref->ref_list);
607 INIT_LIST_HEAD(&head_ref->ref_add_list);
608 head_ref->processing = 0;
609 head_ref->total_ref_mod = count_mod;
610 head_ref->qgroup_reserved = 0;
611 head_ref->qgroup_ref_root = 0;
613 /* Record qgroup extent info if provided */
615 if (ref_root && reserved) {
616 head_ref->qgroup_ref_root = ref_root;
617 head_ref->qgroup_reserved = reserved;
620 qrecord->bytenr = bytenr;
621 qrecord->num_bytes = num_bytes;
622 qrecord->old_roots = NULL;
624 if(btrfs_qgroup_trace_extent_nolock(fs_info,
625 delayed_refs, qrecord))
628 qrecord_inserted = 1;
631 spin_lock_init(&head_ref->lock);
632 mutex_init(&head_ref->mutex);
634 trace_add_delayed_ref_head(fs_info, ref, head_ref, action);
636 existing = htree_insert(&delayed_refs->href_root,
637 &head_ref->href_node);
639 WARN_ON(ref_root && reserved && existing->qgroup_ref_root
640 && existing->qgroup_reserved);
641 update_existing_head_ref(delayed_refs, &existing->node, ref);
643 * we've updated the existing ref, free the newly
646 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
649 if (is_data && count_mod < 0)
650 delayed_refs->pending_csums += num_bytes;
651 delayed_refs->num_heads++;
652 delayed_refs->num_heads_ready++;
653 atomic_inc(&delayed_refs->num_entries);
654 trans->delayed_ref_updates++;
656 if (qrecord_inserted_ret)
657 *qrecord_inserted_ret = qrecord_inserted;
662 * helper to insert a delayed tree ref into the rbtree.
665 add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
666 struct btrfs_trans_handle *trans,
667 struct btrfs_delayed_ref_head *head_ref,
668 struct btrfs_delayed_ref_node *ref, u64 bytenr,
669 u64 num_bytes, u64 parent, u64 ref_root, int level,
672 struct btrfs_delayed_tree_ref *full_ref;
673 struct btrfs_delayed_ref_root *delayed_refs;
677 if (action == BTRFS_ADD_DELAYED_EXTENT)
678 action = BTRFS_ADD_DELAYED_REF;
680 if (is_fstree(ref_root))
681 seq = atomic64_read(&fs_info->tree_mod_seq);
682 delayed_refs = &trans->transaction->delayed_refs;
684 /* first set the basic ref node struct up */
685 refcount_set(&ref->refs, 1);
686 ref->bytenr = bytenr;
687 ref->num_bytes = num_bytes;
689 ref->action = action;
693 INIT_LIST_HEAD(&ref->list);
694 INIT_LIST_HEAD(&ref->add_list);
696 full_ref = btrfs_delayed_node_to_tree_ref(ref);
697 full_ref->parent = parent;
698 full_ref->root = ref_root;
700 ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
702 ref->type = BTRFS_TREE_BLOCK_REF_KEY;
703 full_ref->level = level;
705 trace_add_delayed_tree_ref(fs_info, ref, full_ref, action);
707 ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
710 * XXX: memory should be freed at the same level allocated.
711 * But bad practice is anywhere... Follow it now. Need cleanup.
714 kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
718 * helper to insert a delayed data ref into the rbtree.
721 add_delayed_data_ref(struct btrfs_fs_info *fs_info,
722 struct btrfs_trans_handle *trans,
723 struct btrfs_delayed_ref_head *head_ref,
724 struct btrfs_delayed_ref_node *ref, u64 bytenr,
725 u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
726 u64 offset, int action)
728 struct btrfs_delayed_data_ref *full_ref;
729 struct btrfs_delayed_ref_root *delayed_refs;
733 if (action == BTRFS_ADD_DELAYED_EXTENT)
734 action = BTRFS_ADD_DELAYED_REF;
736 delayed_refs = &trans->transaction->delayed_refs;
738 if (is_fstree(ref_root))
739 seq = atomic64_read(&fs_info->tree_mod_seq);
741 /* first set the basic ref node struct up */
742 refcount_set(&ref->refs, 1);
743 ref->bytenr = bytenr;
744 ref->num_bytes = num_bytes;
746 ref->action = action;
750 INIT_LIST_HEAD(&ref->list);
751 INIT_LIST_HEAD(&ref->add_list);
753 full_ref = btrfs_delayed_node_to_data_ref(ref);
754 full_ref->parent = parent;
755 full_ref->root = ref_root;
757 ref->type = BTRFS_SHARED_DATA_REF_KEY;
759 ref->type = BTRFS_EXTENT_DATA_REF_KEY;
761 full_ref->objectid = owner;
762 full_ref->offset = offset;
764 trace_add_delayed_data_ref(fs_info, ref, full_ref, action);
766 ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
769 kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
773 * add a delayed tree ref. This does all of the accounting required
774 * to make sure the delayed ref is eventually processed before this
775 * transaction commits.
777 int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
778 struct btrfs_trans_handle *trans,
779 u64 bytenr, u64 num_bytes, u64 parent,
780 u64 ref_root, int level, int action,
781 struct btrfs_delayed_extent_op *extent_op)
783 struct btrfs_delayed_tree_ref *ref;
784 struct btrfs_delayed_ref_head *head_ref;
785 struct btrfs_delayed_ref_root *delayed_refs;
786 struct btrfs_qgroup_extent_record *record = NULL;
787 int qrecord_inserted;
789 BUG_ON(extent_op && extent_op->is_data);
790 ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
794 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
798 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
799 is_fstree(ref_root)) {
800 record = kmalloc(sizeof(*record), GFP_NOFS);
805 head_ref->extent_op = extent_op;
807 delayed_refs = &trans->transaction->delayed_refs;
808 spin_lock(&delayed_refs->lock);
811 * insert both the head node and the new ref without dropping
814 head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
815 bytenr, num_bytes, 0, 0, action, 0,
818 add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
819 num_bytes, parent, ref_root, level, action);
820 spin_unlock(&delayed_refs->lock);
822 if (qrecord_inserted)
823 return btrfs_qgroup_trace_extent_post(fs_info, record);
827 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
829 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
835 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
837 int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
838 struct btrfs_trans_handle *trans,
839 u64 bytenr, u64 num_bytes,
840 u64 parent, u64 ref_root,
841 u64 owner, u64 offset, u64 reserved, int action)
843 struct btrfs_delayed_data_ref *ref;
844 struct btrfs_delayed_ref_head *head_ref;
845 struct btrfs_delayed_ref_root *delayed_refs;
846 struct btrfs_qgroup_extent_record *record = NULL;
847 int qrecord_inserted;
849 ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
853 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
855 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
859 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
860 is_fstree(ref_root)) {
861 record = kmalloc(sizeof(*record), GFP_NOFS);
863 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
864 kmem_cache_free(btrfs_delayed_ref_head_cachep,
870 head_ref->extent_op = NULL;
872 delayed_refs = &trans->transaction->delayed_refs;
873 spin_lock(&delayed_refs->lock);
876 * insert both the head node and the new ref without dropping
879 head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
880 bytenr, num_bytes, ref_root, reserved,
881 action, 1, &qrecord_inserted);
883 add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
884 num_bytes, parent, ref_root, owner, offset,
886 spin_unlock(&delayed_refs->lock);
888 if (qrecord_inserted)
889 return btrfs_qgroup_trace_extent_post(fs_info, record);
893 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
894 struct btrfs_trans_handle *trans,
895 u64 bytenr, u64 num_bytes,
896 struct btrfs_delayed_extent_op *extent_op)
898 struct btrfs_delayed_ref_head *head_ref;
899 struct btrfs_delayed_ref_root *delayed_refs;
901 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
905 head_ref->extent_op = extent_op;
907 delayed_refs = &trans->transaction->delayed_refs;
908 spin_lock(&delayed_refs->lock);
910 add_delayed_ref_head(fs_info, trans, &head_ref->node, NULL, bytenr,
911 num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
912 extent_op->is_data, NULL);
914 spin_unlock(&delayed_refs->lock);
919 * this does a simple search for the head node for a given extent.
920 * It must be called with the delayed ref spinlock held, and it returns
921 * the head node if any where found, or NULL if not.
923 struct btrfs_delayed_ref_head *
924 btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
926 return find_ref_head(&delayed_refs->href_root, bytenr, 0);
929 void btrfs_delayed_ref_exit(void)
931 kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
932 kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
933 kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
934 kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
937 int btrfs_delayed_ref_init(void)
939 btrfs_delayed_ref_head_cachep = kmem_cache_create(
940 "btrfs_delayed_ref_head",
941 sizeof(struct btrfs_delayed_ref_head), 0,
942 SLAB_MEM_SPREAD, NULL);
943 if (!btrfs_delayed_ref_head_cachep)
946 btrfs_delayed_tree_ref_cachep = kmem_cache_create(
947 "btrfs_delayed_tree_ref",
948 sizeof(struct btrfs_delayed_tree_ref), 0,
949 SLAB_MEM_SPREAD, NULL);
950 if (!btrfs_delayed_tree_ref_cachep)
953 btrfs_delayed_data_ref_cachep = kmem_cache_create(
954 "btrfs_delayed_data_ref",
955 sizeof(struct btrfs_delayed_data_ref), 0,
956 SLAB_MEM_SPREAD, NULL);
957 if (!btrfs_delayed_data_ref_cachep)
960 btrfs_delayed_extent_op_cachep = kmem_cache_create(
961 "btrfs_delayed_extent_op",
962 sizeof(struct btrfs_delayed_extent_op), 0,
963 SLAB_MEM_SPREAD, NULL);
964 if (!btrfs_delayed_extent_op_cachep)
969 btrfs_delayed_ref_exit();