1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2009 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/sort.h>
11 #include "delayed-ref.h"
12 #include "transaction.h"
14 #include "space-info.h"
15 #include "tree-mod-log.h"
18 struct kmem_cache *btrfs_delayed_ref_head_cachep;
19 struct kmem_cache *btrfs_delayed_tree_ref_cachep;
20 struct kmem_cache *btrfs_delayed_data_ref_cachep;
21 struct kmem_cache *btrfs_delayed_extent_op_cachep;
23 * delayed back reference update tracking. For subvolume trees
24 * we queue up extent allocations and backref maintenance for
25 * delayed processing. This avoids deep call chains where we
26 * add extents in the middle of btrfs_search_slot, and it allows
27 * us to buffer up frequently modified backrefs in an rb tree instead
28 * of hammering updates on the extent allocation tree.
31 bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info)
33 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
34 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
38 spin_lock(&global_rsv->lock);
39 reserved = global_rsv->reserved;
40 spin_unlock(&global_rsv->lock);
43 * Since the global reserve is just kind of magic we don't really want
44 * to rely on it to save our bacon, so if our size is more than the
45 * delayed_refs_rsv and the global rsv then it's time to think about
48 spin_lock(&delayed_refs_rsv->lock);
49 reserved += delayed_refs_rsv->reserved;
50 if (delayed_refs_rsv->size >= reserved)
52 spin_unlock(&delayed_refs_rsv->lock);
57 * Release a ref head's reservation.
59 * @fs_info: the filesystem
60 * @nr: number of items to drop
62 * Drops the delayed ref head's count from the delayed refs rsv and free any
63 * excess reservation we had.
65 void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr)
67 struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
68 const u64 num_bytes = btrfs_calc_delayed_ref_bytes(fs_info, nr);
71 released = btrfs_block_rsv_release(fs_info, block_rsv, num_bytes, NULL);
73 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
78 * Adjust the size of the delayed refs rsv.
80 * This is to be called anytime we may have adjusted trans->delayed_ref_updates,
81 * it'll calculate the additional size and add it to the delayed_refs_rsv.
83 void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
85 struct btrfs_fs_info *fs_info = trans->fs_info;
86 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
89 if (!trans->delayed_ref_updates)
92 num_bytes = btrfs_calc_delayed_ref_bytes(fs_info,
93 trans->delayed_ref_updates);
95 spin_lock(&delayed_rsv->lock);
96 delayed_rsv->size += num_bytes;
97 delayed_rsv->full = false;
98 spin_unlock(&delayed_rsv->lock);
99 trans->delayed_ref_updates = 0;
103 * Transfer bytes to our delayed refs rsv.
105 * @fs_info: the filesystem
106 * @src: source block rsv to transfer from
107 * @num_bytes: number of bytes to transfer
109 * This transfers up to the num_bytes amount from the src rsv to the
110 * delayed_refs_rsv. Any extra bytes are returned to the space info.
112 void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
113 struct btrfs_block_rsv *src,
116 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
119 spin_lock(&src->lock);
120 src->reserved -= num_bytes;
121 src->size -= num_bytes;
122 spin_unlock(&src->lock);
124 spin_lock(&delayed_refs_rsv->lock);
125 if (delayed_refs_rsv->size > delayed_refs_rsv->reserved) {
126 u64 delta = delayed_refs_rsv->size -
127 delayed_refs_rsv->reserved;
128 if (num_bytes > delta) {
129 to_free = num_bytes - delta;
138 delayed_refs_rsv->reserved += num_bytes;
139 if (delayed_refs_rsv->reserved >= delayed_refs_rsv->size)
140 delayed_refs_rsv->full = true;
141 spin_unlock(&delayed_refs_rsv->lock);
144 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
147 btrfs_space_info_free_bytes_may_use(fs_info,
148 delayed_refs_rsv->space_info, to_free);
152 * Refill based on our delayed refs usage.
154 * @fs_info: the filesystem
155 * @flush: control how we can flush for this reservation.
157 * This will refill the delayed block_rsv up to 1 items size worth of space and
158 * will return -ENOSPC if we can't make the reservation.
160 int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
161 enum btrfs_reserve_flush_enum flush)
163 struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
164 u64 limit = btrfs_calc_delayed_ref_bytes(fs_info, 1);
168 spin_lock(&block_rsv->lock);
169 if (block_rsv->reserved < block_rsv->size) {
170 num_bytes = block_rsv->size - block_rsv->reserved;
171 num_bytes = min(num_bytes, limit);
173 spin_unlock(&block_rsv->lock);
178 ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, num_bytes, flush);
181 btrfs_block_rsv_add_bytes(block_rsv, num_bytes, false);
182 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
188 * compare two delayed tree backrefs with same bytenr and type
190 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref1,
191 struct btrfs_delayed_tree_ref *ref2)
193 if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
194 if (ref1->root < ref2->root)
196 if (ref1->root > ref2->root)
199 if (ref1->parent < ref2->parent)
201 if (ref1->parent > ref2->parent)
208 * compare two delayed data backrefs with same bytenr and type
210 static int comp_data_refs(struct btrfs_delayed_data_ref *ref1,
211 struct btrfs_delayed_data_ref *ref2)
213 if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
214 if (ref1->root < ref2->root)
216 if (ref1->root > ref2->root)
218 if (ref1->objectid < ref2->objectid)
220 if (ref1->objectid > ref2->objectid)
222 if (ref1->offset < ref2->offset)
224 if (ref1->offset > ref2->offset)
227 if (ref1->parent < ref2->parent)
229 if (ref1->parent > ref2->parent)
235 static int comp_refs(struct btrfs_delayed_ref_node *ref1,
236 struct btrfs_delayed_ref_node *ref2,
241 if (ref1->type < ref2->type)
243 if (ref1->type > ref2->type)
245 if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
246 ref1->type == BTRFS_SHARED_BLOCK_REF_KEY)
247 ret = comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref1),
248 btrfs_delayed_node_to_tree_ref(ref2));
250 ret = comp_data_refs(btrfs_delayed_node_to_data_ref(ref1),
251 btrfs_delayed_node_to_data_ref(ref2));
255 if (ref1->seq < ref2->seq)
257 if (ref1->seq > ref2->seq)
263 /* insert a new ref to head ref rbtree */
264 static struct btrfs_delayed_ref_head *htree_insert(struct rb_root_cached *root,
265 struct rb_node *node)
267 struct rb_node **p = &root->rb_root.rb_node;
268 struct rb_node *parent_node = NULL;
269 struct btrfs_delayed_ref_head *entry;
270 struct btrfs_delayed_ref_head *ins;
272 bool leftmost = true;
274 ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
275 bytenr = ins->bytenr;
278 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
281 if (bytenr < entry->bytenr) {
283 } else if (bytenr > entry->bytenr) {
291 rb_link_node(node, parent_node, p);
292 rb_insert_color_cached(node, root, leftmost);
296 static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
297 struct btrfs_delayed_ref_node *ins)
299 struct rb_node **p = &root->rb_root.rb_node;
300 struct rb_node *node = &ins->ref_node;
301 struct rb_node *parent_node = NULL;
302 struct btrfs_delayed_ref_node *entry;
303 bool leftmost = true;
309 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
311 comp = comp_refs(ins, entry, true);
314 } else if (comp > 0) {
322 rb_link_node(node, parent_node, p);
323 rb_insert_color_cached(node, root, leftmost);
327 static struct btrfs_delayed_ref_head *find_first_ref_head(
328 struct btrfs_delayed_ref_root *dr)
331 struct btrfs_delayed_ref_head *entry;
333 n = rb_first_cached(&dr->href_root);
337 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
343 * Find a head entry based on bytenr. This returns the delayed ref head if it
344 * was able to find one, or NULL if nothing was in that spot. If return_bigger
345 * is given, the next bigger entry is returned if no exact match is found.
347 static struct btrfs_delayed_ref_head *find_ref_head(
348 struct btrfs_delayed_ref_root *dr, u64 bytenr,
351 struct rb_root *root = &dr->href_root.rb_root;
353 struct btrfs_delayed_ref_head *entry;
358 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
360 if (bytenr < entry->bytenr)
362 else if (bytenr > entry->bytenr)
367 if (entry && return_bigger) {
368 if (bytenr > entry->bytenr) {
369 n = rb_next(&entry->href_node);
372 entry = rb_entry(n, struct btrfs_delayed_ref_head,
380 int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
381 struct btrfs_delayed_ref_head *head)
383 lockdep_assert_held(&delayed_refs->lock);
384 if (mutex_trylock(&head->mutex))
387 refcount_inc(&head->refs);
388 spin_unlock(&delayed_refs->lock);
390 mutex_lock(&head->mutex);
391 spin_lock(&delayed_refs->lock);
392 if (RB_EMPTY_NODE(&head->href_node)) {
393 mutex_unlock(&head->mutex);
394 btrfs_put_delayed_ref_head(head);
397 btrfs_put_delayed_ref_head(head);
401 static inline void drop_delayed_ref(struct btrfs_delayed_ref_root *delayed_refs,
402 struct btrfs_delayed_ref_head *head,
403 struct btrfs_delayed_ref_node *ref)
405 lockdep_assert_held(&head->lock);
406 rb_erase_cached(&ref->ref_node, &head->ref_tree);
407 RB_CLEAR_NODE(&ref->ref_node);
408 if (!list_empty(&ref->add_list))
409 list_del(&ref->add_list);
411 btrfs_put_delayed_ref(ref);
412 atomic_dec(&delayed_refs->num_entries);
415 static bool merge_ref(struct btrfs_delayed_ref_root *delayed_refs,
416 struct btrfs_delayed_ref_head *head,
417 struct btrfs_delayed_ref_node *ref,
420 struct btrfs_delayed_ref_node *next;
421 struct rb_node *node = rb_next(&ref->ref_node);
424 while (!done && node) {
427 next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
428 node = rb_next(node);
429 if (seq && next->seq >= seq)
431 if (comp_refs(ref, next, false))
434 if (ref->action == next->action) {
437 if (ref->ref_mod < next->ref_mod) {
441 mod = -next->ref_mod;
444 drop_delayed_ref(delayed_refs, head, next);
446 if (ref->ref_mod == 0) {
447 drop_delayed_ref(delayed_refs, head, ref);
451 * Can't have multiples of the same ref on a tree block.
453 WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
454 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
461 void btrfs_merge_delayed_refs(struct btrfs_fs_info *fs_info,
462 struct btrfs_delayed_ref_root *delayed_refs,
463 struct btrfs_delayed_ref_head *head)
465 struct btrfs_delayed_ref_node *ref;
466 struct rb_node *node;
469 lockdep_assert_held(&head->lock);
471 if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
474 /* We don't have too many refs to merge for data. */
478 seq = btrfs_tree_mod_log_lowest_seq(fs_info);
480 for (node = rb_first_cached(&head->ref_tree); node;
481 node = rb_next(node)) {
482 ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
483 if (seq && ref->seq >= seq)
485 if (merge_ref(delayed_refs, head, ref, seq))
490 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq)
493 u64 min_seq = btrfs_tree_mod_log_lowest_seq(fs_info);
495 if (min_seq != 0 && seq >= min_seq) {
497 "holding back delayed_ref %llu, lowest is %llu",
505 struct btrfs_delayed_ref_head *btrfs_select_ref_head(
506 struct btrfs_delayed_ref_root *delayed_refs)
508 struct btrfs_delayed_ref_head *head;
511 head = find_ref_head(delayed_refs, delayed_refs->run_delayed_start,
513 if (!head && delayed_refs->run_delayed_start != 0) {
514 delayed_refs->run_delayed_start = 0;
515 head = find_first_ref_head(delayed_refs);
520 while (head->processing) {
521 struct rb_node *node;
523 node = rb_next(&head->href_node);
525 if (delayed_refs->run_delayed_start == 0)
527 delayed_refs->run_delayed_start = 0;
530 head = rb_entry(node, struct btrfs_delayed_ref_head,
534 head->processing = 1;
535 WARN_ON(delayed_refs->num_heads_ready == 0);
536 delayed_refs->num_heads_ready--;
537 delayed_refs->run_delayed_start = head->bytenr +
542 void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
543 struct btrfs_delayed_ref_head *head)
545 lockdep_assert_held(&delayed_refs->lock);
546 lockdep_assert_held(&head->lock);
548 rb_erase_cached(&head->href_node, &delayed_refs->href_root);
549 RB_CLEAR_NODE(&head->href_node);
550 atomic_dec(&delayed_refs->num_entries);
551 delayed_refs->num_heads--;
552 if (head->processing == 0)
553 delayed_refs->num_heads_ready--;
557 * Helper to insert the ref_node to the tail or merge with tail.
559 * Return 0 for insert.
560 * Return >0 for merge.
562 static int insert_delayed_ref(struct btrfs_delayed_ref_root *root,
563 struct btrfs_delayed_ref_head *href,
564 struct btrfs_delayed_ref_node *ref)
566 struct btrfs_delayed_ref_node *exist;
570 spin_lock(&href->lock);
571 exist = tree_insert(&href->ref_tree, ref);
575 /* Now we are sure we can merge */
577 if (exist->action == ref->action) {
580 /* Need to change action */
581 if (exist->ref_mod < ref->ref_mod) {
582 exist->action = ref->action;
583 mod = -exist->ref_mod;
584 exist->ref_mod = ref->ref_mod;
585 if (ref->action == BTRFS_ADD_DELAYED_REF)
586 list_add_tail(&exist->add_list,
587 &href->ref_add_list);
588 else if (ref->action == BTRFS_DROP_DELAYED_REF) {
589 ASSERT(!list_empty(&exist->add_list));
590 list_del(&exist->add_list);
597 exist->ref_mod += mod;
599 /* remove existing tail if its ref_mod is zero */
600 if (exist->ref_mod == 0)
601 drop_delayed_ref(root, href, exist);
602 spin_unlock(&href->lock);
605 if (ref->action == BTRFS_ADD_DELAYED_REF)
606 list_add_tail(&ref->add_list, &href->ref_add_list);
607 atomic_inc(&root->num_entries);
608 spin_unlock(&href->lock);
613 * helper function to update the accounting in the head ref
614 * existing and update must have the same bytenr
616 static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
617 struct btrfs_delayed_ref_head *existing,
618 struct btrfs_delayed_ref_head *update)
620 struct btrfs_delayed_ref_root *delayed_refs =
621 &trans->transaction->delayed_refs;
622 struct btrfs_fs_info *fs_info = trans->fs_info;
625 BUG_ON(existing->is_data != update->is_data);
627 spin_lock(&existing->lock);
628 if (update->must_insert_reserved) {
629 /* if the extent was freed and then
630 * reallocated before the delayed ref
631 * entries were processed, we can end up
632 * with an existing head ref without
633 * the must_insert_reserved flag set.
636 existing->must_insert_reserved = update->must_insert_reserved;
639 * update the num_bytes so we make sure the accounting
642 existing->num_bytes = update->num_bytes;
646 if (update->extent_op) {
647 if (!existing->extent_op) {
648 existing->extent_op = update->extent_op;
650 if (update->extent_op->update_key) {
651 memcpy(&existing->extent_op->key,
652 &update->extent_op->key,
653 sizeof(update->extent_op->key));
654 existing->extent_op->update_key = true;
656 if (update->extent_op->update_flags) {
657 existing->extent_op->flags_to_set |=
658 update->extent_op->flags_to_set;
659 existing->extent_op->update_flags = true;
661 btrfs_free_delayed_extent_op(update->extent_op);
665 * update the reference mod on the head to reflect this new operation,
666 * only need the lock for this case cause we could be processing it
667 * currently, for refs we just added we know we're a-ok.
669 old_ref_mod = existing->total_ref_mod;
670 existing->ref_mod += update->ref_mod;
671 existing->total_ref_mod += update->ref_mod;
674 * If we are going to from a positive ref mod to a negative or vice
675 * versa we need to make sure to adjust pending_csums accordingly.
677 if (existing->is_data) {
679 btrfs_csum_bytes_to_leaves(fs_info,
680 existing->num_bytes);
682 if (existing->total_ref_mod >= 0 && old_ref_mod < 0) {
683 delayed_refs->pending_csums -= existing->num_bytes;
684 btrfs_delayed_refs_rsv_release(fs_info, csum_leaves);
686 if (existing->total_ref_mod < 0 && old_ref_mod >= 0) {
687 delayed_refs->pending_csums += existing->num_bytes;
688 trans->delayed_ref_updates += csum_leaves;
692 spin_unlock(&existing->lock);
695 static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
696 struct btrfs_qgroup_extent_record *qrecord,
697 u64 bytenr, u64 num_bytes, u64 ref_root,
698 u64 reserved, int action, bool is_data,
702 int must_insert_reserved = 0;
704 /* If reserved is provided, it must be a data extent. */
705 BUG_ON(!is_data && reserved);
708 * The head node stores the sum of all the mods, so dropping a ref
709 * should drop the sum in the head node by one.
711 if (action == BTRFS_UPDATE_DELAYED_HEAD)
713 else if (action == BTRFS_DROP_DELAYED_REF)
717 * BTRFS_ADD_DELAYED_EXTENT means that we need to update the reserved
718 * accounting when the extent is finally added, or if a later
719 * modification deletes the delayed ref without ever inserting the
720 * extent into the extent allocation tree. ref->must_insert_reserved
721 * is the flag used to record that accounting mods are required.
723 * Once we record must_insert_reserved, switch the action to
724 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
726 if (action == BTRFS_ADD_DELAYED_EXTENT)
727 must_insert_reserved = 1;
729 must_insert_reserved = 0;
731 refcount_set(&head_ref->refs, 1);
732 head_ref->bytenr = bytenr;
733 head_ref->num_bytes = num_bytes;
734 head_ref->ref_mod = count_mod;
735 head_ref->must_insert_reserved = must_insert_reserved;
736 head_ref->is_data = is_data;
737 head_ref->is_system = is_system;
738 head_ref->ref_tree = RB_ROOT_CACHED;
739 INIT_LIST_HEAD(&head_ref->ref_add_list);
740 RB_CLEAR_NODE(&head_ref->href_node);
741 head_ref->processing = 0;
742 head_ref->total_ref_mod = count_mod;
743 spin_lock_init(&head_ref->lock);
744 mutex_init(&head_ref->mutex);
747 if (ref_root && reserved) {
748 qrecord->data_rsv = reserved;
749 qrecord->data_rsv_refroot = ref_root;
751 qrecord->bytenr = bytenr;
752 qrecord->num_bytes = num_bytes;
753 qrecord->old_roots = NULL;
758 * helper function to actually insert a head node into the rbtree.
759 * this does all the dirty work in terms of maintaining the correct
760 * overall modification count.
762 static noinline struct btrfs_delayed_ref_head *
763 add_delayed_ref_head(struct btrfs_trans_handle *trans,
764 struct btrfs_delayed_ref_head *head_ref,
765 struct btrfs_qgroup_extent_record *qrecord,
766 int action, int *qrecord_inserted_ret)
768 struct btrfs_delayed_ref_head *existing;
769 struct btrfs_delayed_ref_root *delayed_refs;
770 int qrecord_inserted = 0;
772 delayed_refs = &trans->transaction->delayed_refs;
774 /* Record qgroup extent info if provided */
776 if (btrfs_qgroup_trace_extent_nolock(trans->fs_info,
777 delayed_refs, qrecord))
780 qrecord_inserted = 1;
783 trace_add_delayed_ref_head(trans->fs_info, head_ref, action);
785 existing = htree_insert(&delayed_refs->href_root,
786 &head_ref->href_node);
788 update_existing_head_ref(trans, existing, head_ref);
790 * we've updated the existing ref, free the newly
793 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
796 if (head_ref->is_data && head_ref->ref_mod < 0) {
797 delayed_refs->pending_csums += head_ref->num_bytes;
798 trans->delayed_ref_updates +=
799 btrfs_csum_bytes_to_leaves(trans->fs_info,
800 head_ref->num_bytes);
802 delayed_refs->num_heads++;
803 delayed_refs->num_heads_ready++;
804 atomic_inc(&delayed_refs->num_entries);
805 trans->delayed_ref_updates++;
807 if (qrecord_inserted_ret)
808 *qrecord_inserted_ret = qrecord_inserted;
814 * init_delayed_ref_common - Initialize the structure which represents a
815 * modification to a an extent.
817 * @fs_info: Internal to the mounted filesystem mount structure.
819 * @ref: The structure which is going to be initialized.
821 * @bytenr: The logical address of the extent for which a modification is
822 * going to be recorded.
824 * @num_bytes: Size of the extent whose modification is being recorded.
826 * @ref_root: The id of the root where this modification has originated, this
827 * can be either one of the well-known metadata trees or the
828 * subvolume id which references this extent.
830 * @action: Can be one of BTRFS_ADD_DELAYED_REF/BTRFS_DROP_DELAYED_REF or
831 * BTRFS_ADD_DELAYED_EXTENT
833 * @ref_type: Holds the type of the extent which is being recorded, can be
834 * one of BTRFS_SHARED_BLOCK_REF_KEY/BTRFS_TREE_BLOCK_REF_KEY
835 * when recording a metadata extent or BTRFS_SHARED_DATA_REF_KEY/
836 * BTRFS_EXTENT_DATA_REF_KEY when recording data extent
838 static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
839 struct btrfs_delayed_ref_node *ref,
840 u64 bytenr, u64 num_bytes, u64 ref_root,
841 int action, u8 ref_type)
845 if (action == BTRFS_ADD_DELAYED_EXTENT)
846 action = BTRFS_ADD_DELAYED_REF;
848 if (is_fstree(ref_root))
849 seq = atomic64_read(&fs_info->tree_mod_seq);
851 refcount_set(&ref->refs, 1);
852 ref->bytenr = bytenr;
853 ref->num_bytes = num_bytes;
855 ref->action = action;
859 ref->type = ref_type;
860 RB_CLEAR_NODE(&ref->ref_node);
861 INIT_LIST_HEAD(&ref->add_list);
865 * add a delayed tree ref. This does all of the accounting required
866 * to make sure the delayed ref is eventually processed before this
867 * transaction commits.
869 int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
870 struct btrfs_ref *generic_ref,
871 struct btrfs_delayed_extent_op *extent_op)
873 struct btrfs_fs_info *fs_info = trans->fs_info;
874 struct btrfs_delayed_tree_ref *ref;
875 struct btrfs_delayed_ref_head *head_ref;
876 struct btrfs_delayed_ref_root *delayed_refs;
877 struct btrfs_qgroup_extent_record *record = NULL;
878 int qrecord_inserted;
880 int action = generic_ref->action;
881 int level = generic_ref->tree_ref.level;
883 u64 bytenr = generic_ref->bytenr;
884 u64 num_bytes = generic_ref->len;
885 u64 parent = generic_ref->parent;
888 is_system = (generic_ref->tree_ref.owning_root == BTRFS_CHUNK_TREE_OBJECTID);
890 ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
891 ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
895 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
897 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
901 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
902 !generic_ref->skip_qgroup) {
903 record = kzalloc(sizeof(*record), GFP_NOFS);
905 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
906 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
912 ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
914 ref_type = BTRFS_TREE_BLOCK_REF_KEY;
916 init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
917 generic_ref->tree_ref.owning_root, action,
919 ref->root = generic_ref->tree_ref.owning_root;
920 ref->parent = parent;
923 init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
924 generic_ref->tree_ref.owning_root, 0, action,
926 head_ref->extent_op = extent_op;
928 delayed_refs = &trans->transaction->delayed_refs;
929 spin_lock(&delayed_refs->lock);
932 * insert both the head node and the new ref without dropping
935 head_ref = add_delayed_ref_head(trans, head_ref, record,
936 action, &qrecord_inserted);
938 ret = insert_delayed_ref(delayed_refs, head_ref, &ref->node);
939 spin_unlock(&delayed_refs->lock);
942 * Need to update the delayed_refs_rsv with any changes we may have
945 btrfs_update_delayed_refs_rsv(trans);
947 trace_add_delayed_tree_ref(fs_info, &ref->node, ref,
948 action == BTRFS_ADD_DELAYED_EXTENT ?
949 BTRFS_ADD_DELAYED_REF : action);
951 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
953 if (qrecord_inserted)
954 btrfs_qgroup_trace_extent_post(trans, record);
960 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
962 int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
963 struct btrfs_ref *generic_ref,
966 struct btrfs_fs_info *fs_info = trans->fs_info;
967 struct btrfs_delayed_data_ref *ref;
968 struct btrfs_delayed_ref_head *head_ref;
969 struct btrfs_delayed_ref_root *delayed_refs;
970 struct btrfs_qgroup_extent_record *record = NULL;
971 int qrecord_inserted;
972 int action = generic_ref->action;
974 u64 bytenr = generic_ref->bytenr;
975 u64 num_bytes = generic_ref->len;
976 u64 parent = generic_ref->parent;
977 u64 ref_root = generic_ref->data_ref.owning_root;
978 u64 owner = generic_ref->data_ref.ino;
979 u64 offset = generic_ref->data_ref.offset;
982 ASSERT(generic_ref->type == BTRFS_REF_DATA && action);
983 ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
988 ref_type = BTRFS_SHARED_DATA_REF_KEY;
990 ref_type = BTRFS_EXTENT_DATA_REF_KEY;
991 init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
992 ref_root, action, ref_type);
993 ref->root = ref_root;
994 ref->parent = parent;
995 ref->objectid = owner;
996 ref->offset = offset;
999 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1001 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1005 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
1006 !generic_ref->skip_qgroup) {
1007 record = kzalloc(sizeof(*record), GFP_NOFS);
1009 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1010 kmem_cache_free(btrfs_delayed_ref_head_cachep,
1016 init_delayed_ref_head(head_ref, record, bytenr, num_bytes, ref_root,
1017 reserved, action, true, false);
1018 head_ref->extent_op = NULL;
1020 delayed_refs = &trans->transaction->delayed_refs;
1021 spin_lock(&delayed_refs->lock);
1024 * insert both the head node and the new ref without dropping
1027 head_ref = add_delayed_ref_head(trans, head_ref, record,
1028 action, &qrecord_inserted);
1030 ret = insert_delayed_ref(delayed_refs, head_ref, &ref->node);
1031 spin_unlock(&delayed_refs->lock);
1034 * Need to update the delayed_refs_rsv with any changes we may have
1037 btrfs_update_delayed_refs_rsv(trans);
1039 trace_add_delayed_data_ref(trans->fs_info, &ref->node, ref,
1040 action == BTRFS_ADD_DELAYED_EXTENT ?
1041 BTRFS_ADD_DELAYED_REF : action);
1043 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1046 if (qrecord_inserted)
1047 return btrfs_qgroup_trace_extent_post(trans, record);
1051 int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
1052 u64 bytenr, u64 num_bytes,
1053 struct btrfs_delayed_extent_op *extent_op)
1055 struct btrfs_delayed_ref_head *head_ref;
1056 struct btrfs_delayed_ref_root *delayed_refs;
1058 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1062 init_delayed_ref_head(head_ref, NULL, bytenr, num_bytes, 0, 0,
1063 BTRFS_UPDATE_DELAYED_HEAD, false, false);
1064 head_ref->extent_op = extent_op;
1066 delayed_refs = &trans->transaction->delayed_refs;
1067 spin_lock(&delayed_refs->lock);
1069 add_delayed_ref_head(trans, head_ref, NULL, BTRFS_UPDATE_DELAYED_HEAD,
1072 spin_unlock(&delayed_refs->lock);
1075 * Need to update the delayed_refs_rsv with any changes we may have
1078 btrfs_update_delayed_refs_rsv(trans);
1083 * This does a simple search for the head node for a given extent. Returns the
1084 * head node if found, or NULL if not.
1086 struct btrfs_delayed_ref_head *
1087 btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
1089 lockdep_assert_held(&delayed_refs->lock);
1091 return find_ref_head(delayed_refs, bytenr, false);
1094 void __cold btrfs_delayed_ref_exit(void)
1096 kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
1097 kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
1098 kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
1099 kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
1102 int __init btrfs_delayed_ref_init(void)
1104 btrfs_delayed_ref_head_cachep = kmem_cache_create(
1105 "btrfs_delayed_ref_head",
1106 sizeof(struct btrfs_delayed_ref_head), 0,
1107 SLAB_MEM_SPREAD, NULL);
1108 if (!btrfs_delayed_ref_head_cachep)
1111 btrfs_delayed_tree_ref_cachep = kmem_cache_create(
1112 "btrfs_delayed_tree_ref",
1113 sizeof(struct btrfs_delayed_tree_ref), 0,
1114 SLAB_MEM_SPREAD, NULL);
1115 if (!btrfs_delayed_tree_ref_cachep)
1118 btrfs_delayed_data_ref_cachep = kmem_cache_create(
1119 "btrfs_delayed_data_ref",
1120 sizeof(struct btrfs_delayed_data_ref), 0,
1121 SLAB_MEM_SPREAD, NULL);
1122 if (!btrfs_delayed_data_ref_cachep)
1125 btrfs_delayed_extent_op_cachep = kmem_cache_create(
1126 "btrfs_delayed_extent_op",
1127 sizeof(struct btrfs_delayed_extent_op), 0,
1128 SLAB_MEM_SPREAD, NULL);
1129 if (!btrfs_delayed_extent_op_cachep)
1134 btrfs_delayed_ref_exit();