]> Git Repo - linux.git/blob - fs/btrfs/delayed-ref.c
Merge tag 'auxdisplay-6.3' of https://github.com/ojeda/linux
[linux.git] / fs / btrfs / delayed-ref.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2009 Oracle.  All rights reserved.
4  */
5
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/sort.h>
9 #include "messages.h"
10 #include "ctree.h"
11 #include "delayed-ref.h"
12 #include "transaction.h"
13 #include "qgroup.h"
14 #include "space-info.h"
15 #include "tree-mod-log.h"
16 #include "fs.h"
17
18 struct kmem_cache *btrfs_delayed_ref_head_cachep;
19 struct kmem_cache *btrfs_delayed_tree_ref_cachep;
20 struct kmem_cache *btrfs_delayed_data_ref_cachep;
21 struct kmem_cache *btrfs_delayed_extent_op_cachep;
22 /*
23  * delayed back reference update tracking.  For subvolume trees
24  * we queue up extent allocations and backref maintenance for
25  * delayed processing.   This avoids deep call chains where we
26  * add extents in the middle of btrfs_search_slot, and it allows
27  * us to buffer up frequently modified backrefs in an rb tree instead
28  * of hammering updates on the extent allocation tree.
29  */
30
31 bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info)
32 {
33         struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
34         struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
35         bool ret = false;
36         u64 reserved;
37
38         spin_lock(&global_rsv->lock);
39         reserved = global_rsv->reserved;
40         spin_unlock(&global_rsv->lock);
41
42         /*
43          * Since the global reserve is just kind of magic we don't really want
44          * to rely on it to save our bacon, so if our size is more than the
45          * delayed_refs_rsv and the global rsv then it's time to think about
46          * bailing.
47          */
48         spin_lock(&delayed_refs_rsv->lock);
49         reserved += delayed_refs_rsv->reserved;
50         if (delayed_refs_rsv->size >= reserved)
51                 ret = true;
52         spin_unlock(&delayed_refs_rsv->lock);
53         return ret;
54 }
55
56 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans)
57 {
58         u64 num_entries =
59                 atomic_read(&trans->transaction->delayed_refs.num_entries);
60         u64 avg_runtime;
61         u64 val;
62
63         smp_mb();
64         avg_runtime = trans->fs_info->avg_delayed_ref_runtime;
65         val = num_entries * avg_runtime;
66         if (val >= NSEC_PER_SEC)
67                 return 1;
68         if (val >= NSEC_PER_SEC / 2)
69                 return 2;
70
71         return btrfs_check_space_for_delayed_refs(trans->fs_info);
72 }
73
74 /*
75  * Release a ref head's reservation.
76  *
77  * @fs_info:  the filesystem
78  * @nr:       number of items to drop
79  *
80  * Drops the delayed ref head's count from the delayed refs rsv and free any
81  * excess reservation we had.
82  */
83 void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr)
84 {
85         struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
86         u64 num_bytes = btrfs_calc_insert_metadata_size(fs_info, nr);
87         u64 released = 0;
88
89         /*
90          * We have to check the mount option here because we could be enabling
91          * the free space tree for the first time and don't have the compat_ro
92          * option set yet.
93          *
94          * We need extra reservations if we have the free space tree because
95          * we'll have to modify that tree as well.
96          */
97         if (btrfs_test_opt(fs_info, FREE_SPACE_TREE))
98                 num_bytes *= 2;
99
100         released = btrfs_block_rsv_release(fs_info, block_rsv, num_bytes, NULL);
101         if (released)
102                 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
103                                               0, released, 0);
104 }
105
106 /*
107  * Adjust the size of the delayed refs rsv.
108  *
109  * This is to be called anytime we may have adjusted trans->delayed_ref_updates,
110  * it'll calculate the additional size and add it to the delayed_refs_rsv.
111  */
112 void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
113 {
114         struct btrfs_fs_info *fs_info = trans->fs_info;
115         struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
116         u64 num_bytes;
117
118         if (!trans->delayed_ref_updates)
119                 return;
120
121         num_bytes = btrfs_calc_insert_metadata_size(fs_info,
122                                                     trans->delayed_ref_updates);
123         /*
124          * We have to check the mount option here because we could be enabling
125          * the free space tree for the first time and don't have the compat_ro
126          * option set yet.
127          *
128          * We need extra reservations if we have the free space tree because
129          * we'll have to modify that tree as well.
130          */
131         if (btrfs_test_opt(fs_info, FREE_SPACE_TREE))
132                 num_bytes *= 2;
133
134         spin_lock(&delayed_rsv->lock);
135         delayed_rsv->size += num_bytes;
136         delayed_rsv->full = false;
137         spin_unlock(&delayed_rsv->lock);
138         trans->delayed_ref_updates = 0;
139 }
140
141 /*
142  * Transfer bytes to our delayed refs rsv.
143  *
144  * @fs_info:   the filesystem
145  * @src:       source block rsv to transfer from
146  * @num_bytes: number of bytes to transfer
147  *
148  * This transfers up to the num_bytes amount from the src rsv to the
149  * delayed_refs_rsv.  Any extra bytes are returned to the space info.
150  */
151 void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
152                                        struct btrfs_block_rsv *src,
153                                        u64 num_bytes)
154 {
155         struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
156         u64 to_free = 0;
157
158         spin_lock(&src->lock);
159         src->reserved -= num_bytes;
160         src->size -= num_bytes;
161         spin_unlock(&src->lock);
162
163         spin_lock(&delayed_refs_rsv->lock);
164         if (delayed_refs_rsv->size > delayed_refs_rsv->reserved) {
165                 u64 delta = delayed_refs_rsv->size -
166                         delayed_refs_rsv->reserved;
167                 if (num_bytes > delta) {
168                         to_free = num_bytes - delta;
169                         num_bytes = delta;
170                 }
171         } else {
172                 to_free = num_bytes;
173                 num_bytes = 0;
174         }
175
176         if (num_bytes)
177                 delayed_refs_rsv->reserved += num_bytes;
178         if (delayed_refs_rsv->reserved >= delayed_refs_rsv->size)
179                 delayed_refs_rsv->full = true;
180         spin_unlock(&delayed_refs_rsv->lock);
181
182         if (num_bytes)
183                 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
184                                               0, num_bytes, 1);
185         if (to_free)
186                 btrfs_space_info_free_bytes_may_use(fs_info,
187                                 delayed_refs_rsv->space_info, to_free);
188 }
189
190 /*
191  * Refill based on our delayed refs usage.
192  *
193  * @fs_info: the filesystem
194  * @flush:   control how we can flush for this reservation.
195  *
196  * This will refill the delayed block_rsv up to 1 items size worth of space and
197  * will return -ENOSPC if we can't make the reservation.
198  */
199 int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
200                                   enum btrfs_reserve_flush_enum flush)
201 {
202         struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
203         u64 limit = btrfs_calc_insert_metadata_size(fs_info, 1);
204         u64 num_bytes = 0;
205         int ret = -ENOSPC;
206
207         spin_lock(&block_rsv->lock);
208         if (block_rsv->reserved < block_rsv->size) {
209                 num_bytes = block_rsv->size - block_rsv->reserved;
210                 num_bytes = min(num_bytes, limit);
211         }
212         spin_unlock(&block_rsv->lock);
213
214         if (!num_bytes)
215                 return 0;
216
217         ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, num_bytes, flush);
218         if (ret)
219                 return ret;
220         btrfs_block_rsv_add_bytes(block_rsv, num_bytes, 0);
221         trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
222                                       0, num_bytes, 1);
223         return 0;
224 }
225
226 /*
227  * compare two delayed tree backrefs with same bytenr and type
228  */
229 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref1,
230                           struct btrfs_delayed_tree_ref *ref2)
231 {
232         if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
233                 if (ref1->root < ref2->root)
234                         return -1;
235                 if (ref1->root > ref2->root)
236                         return 1;
237         } else {
238                 if (ref1->parent < ref2->parent)
239                         return -1;
240                 if (ref1->parent > ref2->parent)
241                         return 1;
242         }
243         return 0;
244 }
245
246 /*
247  * compare two delayed data backrefs with same bytenr and type
248  */
249 static int comp_data_refs(struct btrfs_delayed_data_ref *ref1,
250                           struct btrfs_delayed_data_ref *ref2)
251 {
252         if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
253                 if (ref1->root < ref2->root)
254                         return -1;
255                 if (ref1->root > ref2->root)
256                         return 1;
257                 if (ref1->objectid < ref2->objectid)
258                         return -1;
259                 if (ref1->objectid > ref2->objectid)
260                         return 1;
261                 if (ref1->offset < ref2->offset)
262                         return -1;
263                 if (ref1->offset > ref2->offset)
264                         return 1;
265         } else {
266                 if (ref1->parent < ref2->parent)
267                         return -1;
268                 if (ref1->parent > ref2->parent)
269                         return 1;
270         }
271         return 0;
272 }
273
274 static int comp_refs(struct btrfs_delayed_ref_node *ref1,
275                      struct btrfs_delayed_ref_node *ref2,
276                      bool check_seq)
277 {
278         int ret = 0;
279
280         if (ref1->type < ref2->type)
281                 return -1;
282         if (ref1->type > ref2->type)
283                 return 1;
284         if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
285             ref1->type == BTRFS_SHARED_BLOCK_REF_KEY)
286                 ret = comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref1),
287                                      btrfs_delayed_node_to_tree_ref(ref2));
288         else
289                 ret = comp_data_refs(btrfs_delayed_node_to_data_ref(ref1),
290                                      btrfs_delayed_node_to_data_ref(ref2));
291         if (ret)
292                 return ret;
293         if (check_seq) {
294                 if (ref1->seq < ref2->seq)
295                         return -1;
296                 if (ref1->seq > ref2->seq)
297                         return 1;
298         }
299         return 0;
300 }
301
302 /* insert a new ref to head ref rbtree */
303 static struct btrfs_delayed_ref_head *htree_insert(struct rb_root_cached *root,
304                                                    struct rb_node *node)
305 {
306         struct rb_node **p = &root->rb_root.rb_node;
307         struct rb_node *parent_node = NULL;
308         struct btrfs_delayed_ref_head *entry;
309         struct btrfs_delayed_ref_head *ins;
310         u64 bytenr;
311         bool leftmost = true;
312
313         ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
314         bytenr = ins->bytenr;
315         while (*p) {
316                 parent_node = *p;
317                 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
318                                  href_node);
319
320                 if (bytenr < entry->bytenr) {
321                         p = &(*p)->rb_left;
322                 } else if (bytenr > entry->bytenr) {
323                         p = &(*p)->rb_right;
324                         leftmost = false;
325                 } else {
326                         return entry;
327                 }
328         }
329
330         rb_link_node(node, parent_node, p);
331         rb_insert_color_cached(node, root, leftmost);
332         return NULL;
333 }
334
335 static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
336                 struct btrfs_delayed_ref_node *ins)
337 {
338         struct rb_node **p = &root->rb_root.rb_node;
339         struct rb_node *node = &ins->ref_node;
340         struct rb_node *parent_node = NULL;
341         struct btrfs_delayed_ref_node *entry;
342         bool leftmost = true;
343
344         while (*p) {
345                 int comp;
346
347                 parent_node = *p;
348                 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
349                                  ref_node);
350                 comp = comp_refs(ins, entry, true);
351                 if (comp < 0) {
352                         p = &(*p)->rb_left;
353                 } else if (comp > 0) {
354                         p = &(*p)->rb_right;
355                         leftmost = false;
356                 } else {
357                         return entry;
358                 }
359         }
360
361         rb_link_node(node, parent_node, p);
362         rb_insert_color_cached(node, root, leftmost);
363         return NULL;
364 }
365
366 static struct btrfs_delayed_ref_head *find_first_ref_head(
367                 struct btrfs_delayed_ref_root *dr)
368 {
369         struct rb_node *n;
370         struct btrfs_delayed_ref_head *entry;
371
372         n = rb_first_cached(&dr->href_root);
373         if (!n)
374                 return NULL;
375
376         entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
377
378         return entry;
379 }
380
381 /*
382  * Find a head entry based on bytenr. This returns the delayed ref head if it
383  * was able to find one, or NULL if nothing was in that spot.  If return_bigger
384  * is given, the next bigger entry is returned if no exact match is found.
385  */
386 static struct btrfs_delayed_ref_head *find_ref_head(
387                 struct btrfs_delayed_ref_root *dr, u64 bytenr,
388                 bool return_bigger)
389 {
390         struct rb_root *root = &dr->href_root.rb_root;
391         struct rb_node *n;
392         struct btrfs_delayed_ref_head *entry;
393
394         n = root->rb_node;
395         entry = NULL;
396         while (n) {
397                 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
398
399                 if (bytenr < entry->bytenr)
400                         n = n->rb_left;
401                 else if (bytenr > entry->bytenr)
402                         n = n->rb_right;
403                 else
404                         return entry;
405         }
406         if (entry && return_bigger) {
407                 if (bytenr > entry->bytenr) {
408                         n = rb_next(&entry->href_node);
409                         if (!n)
410                                 return NULL;
411                         entry = rb_entry(n, struct btrfs_delayed_ref_head,
412                                          href_node);
413                 }
414                 return entry;
415         }
416         return NULL;
417 }
418
419 int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
420                            struct btrfs_delayed_ref_head *head)
421 {
422         lockdep_assert_held(&delayed_refs->lock);
423         if (mutex_trylock(&head->mutex))
424                 return 0;
425
426         refcount_inc(&head->refs);
427         spin_unlock(&delayed_refs->lock);
428
429         mutex_lock(&head->mutex);
430         spin_lock(&delayed_refs->lock);
431         if (RB_EMPTY_NODE(&head->href_node)) {
432                 mutex_unlock(&head->mutex);
433                 btrfs_put_delayed_ref_head(head);
434                 return -EAGAIN;
435         }
436         btrfs_put_delayed_ref_head(head);
437         return 0;
438 }
439
440 static inline void drop_delayed_ref(struct btrfs_delayed_ref_root *delayed_refs,
441                                     struct btrfs_delayed_ref_head *head,
442                                     struct btrfs_delayed_ref_node *ref)
443 {
444         lockdep_assert_held(&head->lock);
445         rb_erase_cached(&ref->ref_node, &head->ref_tree);
446         RB_CLEAR_NODE(&ref->ref_node);
447         if (!list_empty(&ref->add_list))
448                 list_del(&ref->add_list);
449         ref->in_tree = 0;
450         btrfs_put_delayed_ref(ref);
451         atomic_dec(&delayed_refs->num_entries);
452 }
453
454 static bool merge_ref(struct btrfs_delayed_ref_root *delayed_refs,
455                       struct btrfs_delayed_ref_head *head,
456                       struct btrfs_delayed_ref_node *ref,
457                       u64 seq)
458 {
459         struct btrfs_delayed_ref_node *next;
460         struct rb_node *node = rb_next(&ref->ref_node);
461         bool done = false;
462
463         while (!done && node) {
464                 int mod;
465
466                 next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
467                 node = rb_next(node);
468                 if (seq && next->seq >= seq)
469                         break;
470                 if (comp_refs(ref, next, false))
471                         break;
472
473                 if (ref->action == next->action) {
474                         mod = next->ref_mod;
475                 } else {
476                         if (ref->ref_mod < next->ref_mod) {
477                                 swap(ref, next);
478                                 done = true;
479                         }
480                         mod = -next->ref_mod;
481                 }
482
483                 drop_delayed_ref(delayed_refs, head, next);
484                 ref->ref_mod += mod;
485                 if (ref->ref_mod == 0) {
486                         drop_delayed_ref(delayed_refs, head, ref);
487                         done = true;
488                 } else {
489                         /*
490                          * Can't have multiples of the same ref on a tree block.
491                          */
492                         WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
493                                 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
494                 }
495         }
496
497         return done;
498 }
499
500 void btrfs_merge_delayed_refs(struct btrfs_fs_info *fs_info,
501                               struct btrfs_delayed_ref_root *delayed_refs,
502                               struct btrfs_delayed_ref_head *head)
503 {
504         struct btrfs_delayed_ref_node *ref;
505         struct rb_node *node;
506         u64 seq = 0;
507
508         lockdep_assert_held(&head->lock);
509
510         if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
511                 return;
512
513         /* We don't have too many refs to merge for data. */
514         if (head->is_data)
515                 return;
516
517         seq = btrfs_tree_mod_log_lowest_seq(fs_info);
518 again:
519         for (node = rb_first_cached(&head->ref_tree); node;
520              node = rb_next(node)) {
521                 ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
522                 if (seq && ref->seq >= seq)
523                         continue;
524                 if (merge_ref(delayed_refs, head, ref, seq))
525                         goto again;
526         }
527 }
528
529 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq)
530 {
531         int ret = 0;
532         u64 min_seq = btrfs_tree_mod_log_lowest_seq(fs_info);
533
534         if (min_seq != 0 && seq >= min_seq) {
535                 btrfs_debug(fs_info,
536                             "holding back delayed_ref %llu, lowest is %llu",
537                             seq, min_seq);
538                 ret = 1;
539         }
540
541         return ret;
542 }
543
544 struct btrfs_delayed_ref_head *btrfs_select_ref_head(
545                 struct btrfs_delayed_ref_root *delayed_refs)
546 {
547         struct btrfs_delayed_ref_head *head;
548
549 again:
550         head = find_ref_head(delayed_refs, delayed_refs->run_delayed_start,
551                              true);
552         if (!head && delayed_refs->run_delayed_start != 0) {
553                 delayed_refs->run_delayed_start = 0;
554                 head = find_first_ref_head(delayed_refs);
555         }
556         if (!head)
557                 return NULL;
558
559         while (head->processing) {
560                 struct rb_node *node;
561
562                 node = rb_next(&head->href_node);
563                 if (!node) {
564                         if (delayed_refs->run_delayed_start == 0)
565                                 return NULL;
566                         delayed_refs->run_delayed_start = 0;
567                         goto again;
568                 }
569                 head = rb_entry(node, struct btrfs_delayed_ref_head,
570                                 href_node);
571         }
572
573         head->processing = 1;
574         WARN_ON(delayed_refs->num_heads_ready == 0);
575         delayed_refs->num_heads_ready--;
576         delayed_refs->run_delayed_start = head->bytenr +
577                 head->num_bytes;
578         return head;
579 }
580
581 void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
582                            struct btrfs_delayed_ref_head *head)
583 {
584         lockdep_assert_held(&delayed_refs->lock);
585         lockdep_assert_held(&head->lock);
586
587         rb_erase_cached(&head->href_node, &delayed_refs->href_root);
588         RB_CLEAR_NODE(&head->href_node);
589         atomic_dec(&delayed_refs->num_entries);
590         delayed_refs->num_heads--;
591         if (head->processing == 0)
592                 delayed_refs->num_heads_ready--;
593 }
594
595 /*
596  * Helper to insert the ref_node to the tail or merge with tail.
597  *
598  * Return 0 for insert.
599  * Return >0 for merge.
600  */
601 static int insert_delayed_ref(struct btrfs_delayed_ref_root *root,
602                               struct btrfs_delayed_ref_head *href,
603                               struct btrfs_delayed_ref_node *ref)
604 {
605         struct btrfs_delayed_ref_node *exist;
606         int mod;
607         int ret = 0;
608
609         spin_lock(&href->lock);
610         exist = tree_insert(&href->ref_tree, ref);
611         if (!exist)
612                 goto inserted;
613
614         /* Now we are sure we can merge */
615         ret = 1;
616         if (exist->action == ref->action) {
617                 mod = ref->ref_mod;
618         } else {
619                 /* Need to change action */
620                 if (exist->ref_mod < ref->ref_mod) {
621                         exist->action = ref->action;
622                         mod = -exist->ref_mod;
623                         exist->ref_mod = ref->ref_mod;
624                         if (ref->action == BTRFS_ADD_DELAYED_REF)
625                                 list_add_tail(&exist->add_list,
626                                               &href->ref_add_list);
627                         else if (ref->action == BTRFS_DROP_DELAYED_REF) {
628                                 ASSERT(!list_empty(&exist->add_list));
629                                 list_del(&exist->add_list);
630                         } else {
631                                 ASSERT(0);
632                         }
633                 } else
634                         mod = -ref->ref_mod;
635         }
636         exist->ref_mod += mod;
637
638         /* remove existing tail if its ref_mod is zero */
639         if (exist->ref_mod == 0)
640                 drop_delayed_ref(root, href, exist);
641         spin_unlock(&href->lock);
642         return ret;
643 inserted:
644         if (ref->action == BTRFS_ADD_DELAYED_REF)
645                 list_add_tail(&ref->add_list, &href->ref_add_list);
646         atomic_inc(&root->num_entries);
647         spin_unlock(&href->lock);
648         return ret;
649 }
650
651 /*
652  * helper function to update the accounting in the head ref
653  * existing and update must have the same bytenr
654  */
655 static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
656                          struct btrfs_delayed_ref_head *existing,
657                          struct btrfs_delayed_ref_head *update)
658 {
659         struct btrfs_delayed_ref_root *delayed_refs =
660                 &trans->transaction->delayed_refs;
661         struct btrfs_fs_info *fs_info = trans->fs_info;
662         int old_ref_mod;
663
664         BUG_ON(existing->is_data != update->is_data);
665
666         spin_lock(&existing->lock);
667         if (update->must_insert_reserved) {
668                 /* if the extent was freed and then
669                  * reallocated before the delayed ref
670                  * entries were processed, we can end up
671                  * with an existing head ref without
672                  * the must_insert_reserved flag set.
673                  * Set it again here
674                  */
675                 existing->must_insert_reserved = update->must_insert_reserved;
676
677                 /*
678                  * update the num_bytes so we make sure the accounting
679                  * is done correctly
680                  */
681                 existing->num_bytes = update->num_bytes;
682
683         }
684
685         if (update->extent_op) {
686                 if (!existing->extent_op) {
687                         existing->extent_op = update->extent_op;
688                 } else {
689                         if (update->extent_op->update_key) {
690                                 memcpy(&existing->extent_op->key,
691                                        &update->extent_op->key,
692                                        sizeof(update->extent_op->key));
693                                 existing->extent_op->update_key = true;
694                         }
695                         if (update->extent_op->update_flags) {
696                                 existing->extent_op->flags_to_set |=
697                                         update->extent_op->flags_to_set;
698                                 existing->extent_op->update_flags = true;
699                         }
700                         btrfs_free_delayed_extent_op(update->extent_op);
701                 }
702         }
703         /*
704          * update the reference mod on the head to reflect this new operation,
705          * only need the lock for this case cause we could be processing it
706          * currently, for refs we just added we know we're a-ok.
707          */
708         old_ref_mod = existing->total_ref_mod;
709         existing->ref_mod += update->ref_mod;
710         existing->total_ref_mod += update->ref_mod;
711
712         /*
713          * If we are going to from a positive ref mod to a negative or vice
714          * versa we need to make sure to adjust pending_csums accordingly.
715          */
716         if (existing->is_data) {
717                 u64 csum_leaves =
718                         btrfs_csum_bytes_to_leaves(fs_info,
719                                                    existing->num_bytes);
720
721                 if (existing->total_ref_mod >= 0 && old_ref_mod < 0) {
722                         delayed_refs->pending_csums -= existing->num_bytes;
723                         btrfs_delayed_refs_rsv_release(fs_info, csum_leaves);
724                 }
725                 if (existing->total_ref_mod < 0 && old_ref_mod >= 0) {
726                         delayed_refs->pending_csums += existing->num_bytes;
727                         trans->delayed_ref_updates += csum_leaves;
728                 }
729         }
730
731         spin_unlock(&existing->lock);
732 }
733
734 static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
735                                   struct btrfs_qgroup_extent_record *qrecord,
736                                   u64 bytenr, u64 num_bytes, u64 ref_root,
737                                   u64 reserved, int action, bool is_data,
738                                   bool is_system)
739 {
740         int count_mod = 1;
741         int must_insert_reserved = 0;
742
743         /* If reserved is provided, it must be a data extent. */
744         BUG_ON(!is_data && reserved);
745
746         /*
747          * The head node stores the sum of all the mods, so dropping a ref
748          * should drop the sum in the head node by one.
749          */
750         if (action == BTRFS_UPDATE_DELAYED_HEAD)
751                 count_mod = 0;
752         else if (action == BTRFS_DROP_DELAYED_REF)
753                 count_mod = -1;
754
755         /*
756          * BTRFS_ADD_DELAYED_EXTENT means that we need to update the reserved
757          * accounting when the extent is finally added, or if a later
758          * modification deletes the delayed ref without ever inserting the
759          * extent into the extent allocation tree.  ref->must_insert_reserved
760          * is the flag used to record that accounting mods are required.
761          *
762          * Once we record must_insert_reserved, switch the action to
763          * BTRFS_ADD_DELAYED_REF because other special casing is not required.
764          */
765         if (action == BTRFS_ADD_DELAYED_EXTENT)
766                 must_insert_reserved = 1;
767         else
768                 must_insert_reserved = 0;
769
770         refcount_set(&head_ref->refs, 1);
771         head_ref->bytenr = bytenr;
772         head_ref->num_bytes = num_bytes;
773         head_ref->ref_mod = count_mod;
774         head_ref->must_insert_reserved = must_insert_reserved;
775         head_ref->is_data = is_data;
776         head_ref->is_system = is_system;
777         head_ref->ref_tree = RB_ROOT_CACHED;
778         INIT_LIST_HEAD(&head_ref->ref_add_list);
779         RB_CLEAR_NODE(&head_ref->href_node);
780         head_ref->processing = 0;
781         head_ref->total_ref_mod = count_mod;
782         spin_lock_init(&head_ref->lock);
783         mutex_init(&head_ref->mutex);
784
785         if (qrecord) {
786                 if (ref_root && reserved) {
787                         qrecord->data_rsv = reserved;
788                         qrecord->data_rsv_refroot = ref_root;
789                 }
790                 qrecord->bytenr = bytenr;
791                 qrecord->num_bytes = num_bytes;
792                 qrecord->old_roots = NULL;
793         }
794 }
795
796 /*
797  * helper function to actually insert a head node into the rbtree.
798  * this does all the dirty work in terms of maintaining the correct
799  * overall modification count.
800  */
801 static noinline struct btrfs_delayed_ref_head *
802 add_delayed_ref_head(struct btrfs_trans_handle *trans,
803                      struct btrfs_delayed_ref_head *head_ref,
804                      struct btrfs_qgroup_extent_record *qrecord,
805                      int action, int *qrecord_inserted_ret)
806 {
807         struct btrfs_delayed_ref_head *existing;
808         struct btrfs_delayed_ref_root *delayed_refs;
809         int qrecord_inserted = 0;
810
811         delayed_refs = &trans->transaction->delayed_refs;
812
813         /* Record qgroup extent info if provided */
814         if (qrecord) {
815                 if (btrfs_qgroup_trace_extent_nolock(trans->fs_info,
816                                         delayed_refs, qrecord))
817                         kfree(qrecord);
818                 else
819                         qrecord_inserted = 1;
820         }
821
822         trace_add_delayed_ref_head(trans->fs_info, head_ref, action);
823
824         existing = htree_insert(&delayed_refs->href_root,
825                                 &head_ref->href_node);
826         if (existing) {
827                 update_existing_head_ref(trans, existing, head_ref);
828                 /*
829                  * we've updated the existing ref, free the newly
830                  * allocated ref
831                  */
832                 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
833                 head_ref = existing;
834         } else {
835                 if (head_ref->is_data && head_ref->ref_mod < 0) {
836                         delayed_refs->pending_csums += head_ref->num_bytes;
837                         trans->delayed_ref_updates +=
838                                 btrfs_csum_bytes_to_leaves(trans->fs_info,
839                                                            head_ref->num_bytes);
840                 }
841                 delayed_refs->num_heads++;
842                 delayed_refs->num_heads_ready++;
843                 atomic_inc(&delayed_refs->num_entries);
844                 trans->delayed_ref_updates++;
845         }
846         if (qrecord_inserted_ret)
847                 *qrecord_inserted_ret = qrecord_inserted;
848
849         return head_ref;
850 }
851
852 /*
853  * init_delayed_ref_common - Initialize the structure which represents a
854  *                           modification to a an extent.
855  *
856  * @fs_info:    Internal to the mounted filesystem mount structure.
857  *
858  * @ref:        The structure which is going to be initialized.
859  *
860  * @bytenr:     The logical address of the extent for which a modification is
861  *              going to be recorded.
862  *
863  * @num_bytes:  Size of the extent whose modification is being recorded.
864  *
865  * @ref_root:   The id of the root where this modification has originated, this
866  *              can be either one of the well-known metadata trees or the
867  *              subvolume id which references this extent.
868  *
869  * @action:     Can be one of BTRFS_ADD_DELAYED_REF/BTRFS_DROP_DELAYED_REF or
870  *              BTRFS_ADD_DELAYED_EXTENT
871  *
872  * @ref_type:   Holds the type of the extent which is being recorded, can be
873  *              one of BTRFS_SHARED_BLOCK_REF_KEY/BTRFS_TREE_BLOCK_REF_KEY
874  *              when recording a metadata extent or BTRFS_SHARED_DATA_REF_KEY/
875  *              BTRFS_EXTENT_DATA_REF_KEY when recording data extent
876  */
877 static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
878                                     struct btrfs_delayed_ref_node *ref,
879                                     u64 bytenr, u64 num_bytes, u64 ref_root,
880                                     int action, u8 ref_type)
881 {
882         u64 seq = 0;
883
884         if (action == BTRFS_ADD_DELAYED_EXTENT)
885                 action = BTRFS_ADD_DELAYED_REF;
886
887         if (is_fstree(ref_root))
888                 seq = atomic64_read(&fs_info->tree_mod_seq);
889
890         refcount_set(&ref->refs, 1);
891         ref->bytenr = bytenr;
892         ref->num_bytes = num_bytes;
893         ref->ref_mod = 1;
894         ref->action = action;
895         ref->is_head = 0;
896         ref->in_tree = 1;
897         ref->seq = seq;
898         ref->type = ref_type;
899         RB_CLEAR_NODE(&ref->ref_node);
900         INIT_LIST_HEAD(&ref->add_list);
901 }
902
903 /*
904  * add a delayed tree ref.  This does all of the accounting required
905  * to make sure the delayed ref is eventually processed before this
906  * transaction commits.
907  */
908 int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
909                                struct btrfs_ref *generic_ref,
910                                struct btrfs_delayed_extent_op *extent_op)
911 {
912         struct btrfs_fs_info *fs_info = trans->fs_info;
913         struct btrfs_delayed_tree_ref *ref;
914         struct btrfs_delayed_ref_head *head_ref;
915         struct btrfs_delayed_ref_root *delayed_refs;
916         struct btrfs_qgroup_extent_record *record = NULL;
917         int qrecord_inserted;
918         bool is_system;
919         int action = generic_ref->action;
920         int level = generic_ref->tree_ref.level;
921         int ret;
922         u64 bytenr = generic_ref->bytenr;
923         u64 num_bytes = generic_ref->len;
924         u64 parent = generic_ref->parent;
925         u8 ref_type;
926
927         is_system = (generic_ref->tree_ref.owning_root == BTRFS_CHUNK_TREE_OBJECTID);
928
929         ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
930         ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
931         if (!ref)
932                 return -ENOMEM;
933
934         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
935         if (!head_ref) {
936                 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
937                 return -ENOMEM;
938         }
939
940         if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
941             !generic_ref->skip_qgroup) {
942                 record = kzalloc(sizeof(*record), GFP_NOFS);
943                 if (!record) {
944                         kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
945                         kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
946                         return -ENOMEM;
947                 }
948         }
949
950         if (parent)
951                 ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
952         else
953                 ref_type = BTRFS_TREE_BLOCK_REF_KEY;
954
955         init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
956                                 generic_ref->tree_ref.owning_root, action,
957                                 ref_type);
958         ref->root = generic_ref->tree_ref.owning_root;
959         ref->parent = parent;
960         ref->level = level;
961
962         init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
963                               generic_ref->tree_ref.owning_root, 0, action,
964                               false, is_system);
965         head_ref->extent_op = extent_op;
966
967         delayed_refs = &trans->transaction->delayed_refs;
968         spin_lock(&delayed_refs->lock);
969
970         /*
971          * insert both the head node and the new ref without dropping
972          * the spin lock
973          */
974         head_ref = add_delayed_ref_head(trans, head_ref, record,
975                                         action, &qrecord_inserted);
976
977         ret = insert_delayed_ref(delayed_refs, head_ref, &ref->node);
978         spin_unlock(&delayed_refs->lock);
979
980         /*
981          * Need to update the delayed_refs_rsv with any changes we may have
982          * made.
983          */
984         btrfs_update_delayed_refs_rsv(trans);
985
986         trace_add_delayed_tree_ref(fs_info, &ref->node, ref,
987                                    action == BTRFS_ADD_DELAYED_EXTENT ?
988                                    BTRFS_ADD_DELAYED_REF : action);
989         if (ret > 0)
990                 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
991
992         if (qrecord_inserted)
993                 btrfs_qgroup_trace_extent_post(trans, record);
994
995         return 0;
996 }
997
998 /*
999  * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
1000  */
1001 int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
1002                                struct btrfs_ref *generic_ref,
1003                                u64 reserved)
1004 {
1005         struct btrfs_fs_info *fs_info = trans->fs_info;
1006         struct btrfs_delayed_data_ref *ref;
1007         struct btrfs_delayed_ref_head *head_ref;
1008         struct btrfs_delayed_ref_root *delayed_refs;
1009         struct btrfs_qgroup_extent_record *record = NULL;
1010         int qrecord_inserted;
1011         int action = generic_ref->action;
1012         int ret;
1013         u64 bytenr = generic_ref->bytenr;
1014         u64 num_bytes = generic_ref->len;
1015         u64 parent = generic_ref->parent;
1016         u64 ref_root = generic_ref->data_ref.owning_root;
1017         u64 owner = generic_ref->data_ref.ino;
1018         u64 offset = generic_ref->data_ref.offset;
1019         u8 ref_type;
1020
1021         ASSERT(generic_ref->type == BTRFS_REF_DATA && action);
1022         ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
1023         if (!ref)
1024                 return -ENOMEM;
1025
1026         if (parent)
1027                 ref_type = BTRFS_SHARED_DATA_REF_KEY;
1028         else
1029                 ref_type = BTRFS_EXTENT_DATA_REF_KEY;
1030         init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
1031                                 ref_root, action, ref_type);
1032         ref->root = ref_root;
1033         ref->parent = parent;
1034         ref->objectid = owner;
1035         ref->offset = offset;
1036
1037
1038         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1039         if (!head_ref) {
1040                 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1041                 return -ENOMEM;
1042         }
1043
1044         if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
1045             !generic_ref->skip_qgroup) {
1046                 record = kzalloc(sizeof(*record), GFP_NOFS);
1047                 if (!record) {
1048                         kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1049                         kmem_cache_free(btrfs_delayed_ref_head_cachep,
1050                                         head_ref);
1051                         return -ENOMEM;
1052                 }
1053         }
1054
1055         init_delayed_ref_head(head_ref, record, bytenr, num_bytes, ref_root,
1056                               reserved, action, true, false);
1057         head_ref->extent_op = NULL;
1058
1059         delayed_refs = &trans->transaction->delayed_refs;
1060         spin_lock(&delayed_refs->lock);
1061
1062         /*
1063          * insert both the head node and the new ref without dropping
1064          * the spin lock
1065          */
1066         head_ref = add_delayed_ref_head(trans, head_ref, record,
1067                                         action, &qrecord_inserted);
1068
1069         ret = insert_delayed_ref(delayed_refs, head_ref, &ref->node);
1070         spin_unlock(&delayed_refs->lock);
1071
1072         /*
1073          * Need to update the delayed_refs_rsv with any changes we may have
1074          * made.
1075          */
1076         btrfs_update_delayed_refs_rsv(trans);
1077
1078         trace_add_delayed_data_ref(trans->fs_info, &ref->node, ref,
1079                                    action == BTRFS_ADD_DELAYED_EXTENT ?
1080                                    BTRFS_ADD_DELAYED_REF : action);
1081         if (ret > 0)
1082                 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1083
1084
1085         if (qrecord_inserted)
1086                 return btrfs_qgroup_trace_extent_post(trans, record);
1087         return 0;
1088 }
1089
1090 int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
1091                                 u64 bytenr, u64 num_bytes,
1092                                 struct btrfs_delayed_extent_op *extent_op)
1093 {
1094         struct btrfs_delayed_ref_head *head_ref;
1095         struct btrfs_delayed_ref_root *delayed_refs;
1096
1097         head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1098         if (!head_ref)
1099                 return -ENOMEM;
1100
1101         init_delayed_ref_head(head_ref, NULL, bytenr, num_bytes, 0, 0,
1102                               BTRFS_UPDATE_DELAYED_HEAD, false, false);
1103         head_ref->extent_op = extent_op;
1104
1105         delayed_refs = &trans->transaction->delayed_refs;
1106         spin_lock(&delayed_refs->lock);
1107
1108         add_delayed_ref_head(trans, head_ref, NULL, BTRFS_UPDATE_DELAYED_HEAD,
1109                              NULL);
1110
1111         spin_unlock(&delayed_refs->lock);
1112
1113         /*
1114          * Need to update the delayed_refs_rsv with any changes we may have
1115          * made.
1116          */
1117         btrfs_update_delayed_refs_rsv(trans);
1118         return 0;
1119 }
1120
1121 /*
1122  * This does a simple search for the head node for a given extent.  Returns the
1123  * head node if found, or NULL if not.
1124  */
1125 struct btrfs_delayed_ref_head *
1126 btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
1127 {
1128         lockdep_assert_held(&delayed_refs->lock);
1129
1130         return find_ref_head(delayed_refs, bytenr, false);
1131 }
1132
1133 void __cold btrfs_delayed_ref_exit(void)
1134 {
1135         kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
1136         kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
1137         kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
1138         kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
1139 }
1140
1141 int __init btrfs_delayed_ref_init(void)
1142 {
1143         btrfs_delayed_ref_head_cachep = kmem_cache_create(
1144                                 "btrfs_delayed_ref_head",
1145                                 sizeof(struct btrfs_delayed_ref_head), 0,
1146                                 SLAB_MEM_SPREAD, NULL);
1147         if (!btrfs_delayed_ref_head_cachep)
1148                 goto fail;
1149
1150         btrfs_delayed_tree_ref_cachep = kmem_cache_create(
1151                                 "btrfs_delayed_tree_ref",
1152                                 sizeof(struct btrfs_delayed_tree_ref), 0,
1153                                 SLAB_MEM_SPREAD, NULL);
1154         if (!btrfs_delayed_tree_ref_cachep)
1155                 goto fail;
1156
1157         btrfs_delayed_data_ref_cachep = kmem_cache_create(
1158                                 "btrfs_delayed_data_ref",
1159                                 sizeof(struct btrfs_delayed_data_ref), 0,
1160                                 SLAB_MEM_SPREAD, NULL);
1161         if (!btrfs_delayed_data_ref_cachep)
1162                 goto fail;
1163
1164         btrfs_delayed_extent_op_cachep = kmem_cache_create(
1165                                 "btrfs_delayed_extent_op",
1166                                 sizeof(struct btrfs_delayed_extent_op), 0,
1167                                 SLAB_MEM_SPREAD, NULL);
1168         if (!btrfs_delayed_extent_op_cachep)
1169                 goto fail;
1170
1171         return 0;
1172 fail:
1173         btrfs_delayed_ref_exit();
1174         return -ENOMEM;
1175 }
This page took 0.101236 seconds and 4 git commands to generate.