1 // SPDX-License-Identifier: GPL-2.0
5 #include "space-info.h"
8 #include "free-space-cache.h"
9 #include "ordered-data.h"
10 #include "transaction.h"
11 #include "block-group.h"
14 * HOW DOES SPACE RESERVATION WORK
16 * If you want to know about delalloc specifically, there is a separate comment
17 * for that with the delalloc code. This comment is about how the whole system
22 * 1) space_info. This is the ultimate arbiter of how much space we can use.
23 * There's a description of the bytes_ fields with the struct declaration,
24 * refer to that for specifics on each field. Suffice it to say that for
25 * reservations we care about total_bytes - SUM(space_info->bytes_) when
26 * determining if there is space to make an allocation. There is a space_info
27 * for METADATA, SYSTEM, and DATA areas.
29 * 2) block_rsv's. These are basically buckets for every different type of
30 * metadata reservation we have. You can see the comment in the block_rsv
31 * code on the rules for each type, but generally block_rsv->reserved is how
32 * much space is accounted for in space_info->bytes_may_use.
34 * 3) btrfs_calc*_size. These are the worst case calculations we used based
35 * on the number of items we will want to modify. We have one for changing
36 * items, and one for inserting new items. Generally we use these helpers to
37 * determine the size of the block reserves, and then use the actual bytes
38 * values to adjust the space_info counters.
40 * MAKING RESERVATIONS, THE NORMAL CASE
42 * We call into either btrfs_reserve_data_bytes() or
43 * btrfs_reserve_metadata_bytes(), depending on which we're looking for, with
44 * num_bytes we want to reserve.
47 * space_info->bytes_may_reserve += num_bytes
50 * Call btrfs_add_reserved_bytes() which does
51 * space_info->bytes_may_reserve -= num_bytes
52 * space_info->bytes_reserved += extent_bytes
55 * Call btrfs_update_block_group() which does
56 * space_info->bytes_reserved -= extent_bytes
57 * space_info->bytes_used += extent_bytes
59 * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority)
61 * Assume we are unable to simply make the reservation because we do not have
65 * create a reserve_ticket with ->bytes set to our reservation, add it to
66 * the tail of space_info->tickets, kick async flush thread
68 * ->handle_reserve_ticket
69 * wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set
72 * -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space
73 * Flushes various things attempting to free up space.
75 * -> btrfs_try_granting_tickets()
76 * This is called by anything that either subtracts space from
77 * space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the
78 * space_info->total_bytes. This loops through the ->priority_tickets and
79 * then the ->tickets list checking to see if the reservation can be
80 * completed. If it can the space is added to space_info->bytes_may_use and
81 * the ticket is woken up.
84 * Check if ->bytes == 0, if it does we got our reservation and we can carry
85 * on, if not return the appropriate error (ENOSPC, but can be EINTR if we
88 * MAKING RESERVATIONS, FLUSHING HIGH PRIORITY
90 * Same as the above, except we add ourselves to the
91 * space_info->priority_tickets, and we do not use ticket->wait, we simply
92 * call flush_space() ourselves for the states that are safe for us to call
93 * without deadlocking and hope for the best.
97 * Generally speaking we will have two cases for each state, a "nice" state
98 * and a "ALL THE THINGS" state. In btrfs we delay a lot of work in order to
99 * reduce the locking over head on the various trees, and even to keep from
100 * doing any work at all in the case of delayed refs. Each of these delayed
101 * things however hold reservations, and so letting them run allows us to
102 * reclaim space so we can make new reservations.
104 * FLUSH_DELAYED_ITEMS
105 * Every inode has a delayed item to update the inode. Take a simple write
106 * for example, we would update the inode item at write time to update the
107 * mtime, and then again at finish_ordered_io() time in order to update the
108 * isize or bytes. We keep these delayed items to coalesce these operations
109 * into a single operation done on demand. These are an easy way to reclaim
113 * Look at the delalloc comment to get an idea of how much space is reserved
114 * for delayed allocation. We can reclaim some of this space simply by
115 * running delalloc, but usually we need to wait for ordered extents to
116 * reclaim the bulk of this space.
119 * We have a block reserve for the outstanding delayed refs space, and every
120 * delayed ref operation holds a reservation. Running these is a quick way
121 * to reclaim space, but we want to hold this until the end because COW can
122 * churn a lot and we can avoid making some extent tree modifications if we
123 * are able to delay for as long as possible.
126 * We will skip this the first time through space reservation, because of
127 * overcommit and we don't want to have a lot of useless metadata space when
128 * our worst case reservations will likely never come true.
131 * If we're freeing inodes we're likely freeing checksums, file extent
132 * items, and extent tree items. Loads of space could be freed up by these
133 * operations, however they won't be usable until the transaction commits.
136 * may_commit_transaction() is the ultimate arbiter on whether we commit the
137 * transaction or not. In order to avoid constantly churning we do all the
138 * above flushing first and then commit the transaction as the last resort.
139 * However we need to take into account things like pinned space that would
140 * be freed, plus any delayed work we may not have gotten rid of in the case
144 * For use by the preemptive flusher. We use this to bypass the ticketing
145 * checks in may_commit_transaction, as we have more information about the
146 * overall state of the system and may want to commit the transaction ahead
147 * of actual ENOSPC conditions.
151 * Because we hold so many reservations for metadata we will allow you to
152 * reserve more space than is currently free in the currently allocate
153 * metadata space. This only happens with metadata, data does not allow
156 * You can see the current logic for when we allow overcommit in
157 * btrfs_can_overcommit(), but it only applies to unallocated space. If there
158 * is no unallocated space to be had, all reservations are kept within the
159 * free space in the allocated metadata chunks.
161 * Because of overcommitting, you generally want to use the
162 * btrfs_can_overcommit() logic for metadata allocations, as it does the right
163 * thing with or without extra unallocated space.
166 u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info,
167 bool may_use_included)
170 return s_info->bytes_used + s_info->bytes_reserved +
171 s_info->bytes_pinned + s_info->bytes_readonly +
172 s_info->bytes_zone_unusable +
173 (may_use_included ? s_info->bytes_may_use : 0);
177 * after adding space to the filesystem, we need to clear the full flags
178 * on all the space infos.
180 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
182 struct list_head *head = &info->space_info;
183 struct btrfs_space_info *found;
185 list_for_each_entry(found, head, list)
189 static int create_space_info(struct btrfs_fs_info *info, u64 flags)
192 struct btrfs_space_info *space_info;
196 space_info = kzalloc(sizeof(*space_info), GFP_NOFS);
200 ret = percpu_counter_init(&space_info->total_bytes_pinned, 0,
207 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
208 INIT_LIST_HEAD(&space_info->block_groups[i]);
209 init_rwsem(&space_info->groups_sem);
210 spin_lock_init(&space_info->lock);
211 space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
212 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
213 INIT_LIST_HEAD(&space_info->ro_bgs);
214 INIT_LIST_HEAD(&space_info->tickets);
215 INIT_LIST_HEAD(&space_info->priority_tickets);
216 space_info->clamp = 1;
218 ret = btrfs_sysfs_add_space_info_type(info, space_info);
222 list_add(&space_info->list, &info->space_info);
223 if (flags & BTRFS_BLOCK_GROUP_DATA)
224 info->data_sinfo = space_info;
229 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
231 struct btrfs_super_block *disk_super;
237 disk_super = fs_info->super_copy;
238 if (!btrfs_super_root(disk_super))
241 features = btrfs_super_incompat_flags(disk_super);
242 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
245 flags = BTRFS_BLOCK_GROUP_SYSTEM;
246 ret = create_space_info(fs_info, flags);
251 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
252 ret = create_space_info(fs_info, flags);
254 flags = BTRFS_BLOCK_GROUP_METADATA;
255 ret = create_space_info(fs_info, flags);
259 flags = BTRFS_BLOCK_GROUP_DATA;
260 ret = create_space_info(fs_info, flags);
266 void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags,
267 u64 total_bytes, u64 bytes_used,
268 u64 bytes_readonly, u64 bytes_zone_unusable,
269 struct btrfs_space_info **space_info)
271 struct btrfs_space_info *found;
274 factor = btrfs_bg_type_to_factor(flags);
276 found = btrfs_find_space_info(info, flags);
278 spin_lock(&found->lock);
279 found->total_bytes += total_bytes;
280 found->disk_total += total_bytes * factor;
281 found->bytes_used += bytes_used;
282 found->disk_used += bytes_used * factor;
283 found->bytes_readonly += bytes_readonly;
284 found->bytes_zone_unusable += bytes_zone_unusable;
287 btrfs_try_granting_tickets(info, found);
288 spin_unlock(&found->lock);
292 struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
295 struct list_head *head = &info->space_info;
296 struct btrfs_space_info *found;
298 flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
300 list_for_each_entry(found, head, list) {
301 if (found->flags & flags)
307 static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
308 struct btrfs_space_info *space_info,
309 enum btrfs_reserve_flush_enum flush)
315 if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM)
316 profile = btrfs_system_alloc_profile(fs_info);
318 profile = btrfs_metadata_alloc_profile(fs_info);
320 avail = atomic64_read(&fs_info->free_chunk_space);
323 * If we have dup, raid1 or raid10 then only half of the free
324 * space is actually usable. For raid56, the space info used
325 * doesn't include the parity drive, so we don't have to
328 factor = btrfs_bg_type_to_factor(profile);
329 avail = div_u64(avail, factor);
332 * If we aren't flushing all things, let us overcommit up to
333 * 1/2th of the space. If we can flush, don't let us overcommit
334 * too much, let it overcommit up to 1/8 of the space.
336 if (flush == BTRFS_RESERVE_FLUSH_ALL)
343 int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
344 struct btrfs_space_info *space_info, u64 bytes,
345 enum btrfs_reserve_flush_enum flush)
350 /* Don't overcommit when in mixed mode */
351 if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
354 used = btrfs_space_info_used(space_info, true);
355 avail = calc_available_free_space(fs_info, space_info, flush);
357 if (used + bytes < space_info->total_bytes + avail)
362 static void remove_ticket(struct btrfs_space_info *space_info,
363 struct reserve_ticket *ticket)
365 if (!list_empty(&ticket->list)) {
366 list_del_init(&ticket->list);
367 ASSERT(space_info->reclaim_size >= ticket->bytes);
368 space_info->reclaim_size -= ticket->bytes;
373 * This is for space we already have accounted in space_info->bytes_may_use, so
374 * basically when we're returning space from block_rsv's.
376 void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info,
377 struct btrfs_space_info *space_info)
379 struct list_head *head;
380 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
382 lockdep_assert_held(&space_info->lock);
384 head = &space_info->priority_tickets;
386 while (!list_empty(head)) {
387 struct reserve_ticket *ticket;
388 u64 used = btrfs_space_info_used(space_info, true);
390 ticket = list_first_entry(head, struct reserve_ticket, list);
392 /* Check and see if our ticket can be satisified now. */
393 if ((used + ticket->bytes <= space_info->total_bytes) ||
394 btrfs_can_overcommit(fs_info, space_info, ticket->bytes,
396 btrfs_space_info_update_bytes_may_use(fs_info,
399 remove_ticket(space_info, ticket);
401 space_info->tickets_id++;
402 wake_up(&ticket->wait);
408 if (head == &space_info->priority_tickets) {
409 head = &space_info->tickets;
410 flush = BTRFS_RESERVE_FLUSH_ALL;
415 #define DUMP_BLOCK_RSV(fs_info, rsv_name) \
417 struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name; \
418 spin_lock(&__rsv->lock); \
419 btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu", \
420 __rsv->size, __rsv->reserved); \
421 spin_unlock(&__rsv->lock); \
424 static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
425 struct btrfs_space_info *info)
427 lockdep_assert_held(&info->lock);
429 btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull",
431 info->total_bytes - btrfs_space_info_used(info, true),
432 info->full ? "" : "not ");
434 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu",
435 info->total_bytes, info->bytes_used, info->bytes_pinned,
436 info->bytes_reserved, info->bytes_may_use,
437 info->bytes_readonly, info->bytes_zone_unusable);
439 DUMP_BLOCK_RSV(fs_info, global_block_rsv);
440 DUMP_BLOCK_RSV(fs_info, trans_block_rsv);
441 DUMP_BLOCK_RSV(fs_info, chunk_block_rsv);
442 DUMP_BLOCK_RSV(fs_info, delayed_block_rsv);
443 DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv);
447 void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
448 struct btrfs_space_info *info, u64 bytes,
449 int dump_block_groups)
451 struct btrfs_block_group *cache;
454 spin_lock(&info->lock);
455 __btrfs_dump_space_info(fs_info, info);
456 spin_unlock(&info->lock);
458 if (!dump_block_groups)
461 down_read(&info->groups_sem);
463 list_for_each_entry(cache, &info->block_groups[index], list) {
464 spin_lock(&cache->lock);
466 "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu zone_unusable %s",
467 cache->start, cache->length, cache->used, cache->pinned,
468 cache->reserved, cache->zone_unusable,
469 cache->ro ? "[readonly]" : "");
470 spin_unlock(&cache->lock);
471 btrfs_dump_free_space(cache, bytes);
473 if (++index < BTRFS_NR_RAID_TYPES)
475 up_read(&info->groups_sem);
478 static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info,
484 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
485 nr = div64_u64(to_reclaim, bytes);
491 #define EXTENT_SIZE_PER_ITEM SZ_256K
494 * shrink metadata reservation for delalloc
496 static void shrink_delalloc(struct btrfs_fs_info *fs_info,
497 struct btrfs_space_info *space_info,
498 u64 to_reclaim, bool wait_ordered,
501 struct btrfs_trans_handle *trans;
508 /* Calc the number of the pages we need flush for space reservation */
509 if (to_reclaim == U64_MAX) {
513 * to_reclaim is set to however much metadata we need to
514 * reclaim, but reclaiming that much data doesn't really track
515 * exactly, so increase the amount to reclaim by 2x in order to
516 * make sure we're flushing enough delalloc to hopefully reclaim
517 * some metadata reservations.
519 items = calc_reclaim_items_nr(fs_info, to_reclaim) * 2;
520 to_reclaim = items * EXTENT_SIZE_PER_ITEM;
523 trans = (struct btrfs_trans_handle *)current->journal_info;
525 delalloc_bytes = percpu_counter_sum_positive(
526 &fs_info->delalloc_bytes);
527 ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes);
528 if (delalloc_bytes == 0 && ordered_bytes == 0)
532 * If we are doing more ordered than delalloc we need to just wait on
533 * ordered extents, otherwise we'll waste time trying to flush delalloc
534 * that likely won't give us the space back we need.
536 if (ordered_bytes > delalloc_bytes && !for_preempt)
540 while ((delalloc_bytes || ordered_bytes) && loops < 3) {
541 u64 temp = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
542 long nr_pages = min_t(u64, temp, LONG_MAX);
544 btrfs_start_delalloc_roots(fs_info, nr_pages, true);
547 if (wait_ordered && !trans) {
548 btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
550 time_left = schedule_timeout_killable(1);
556 * If we are for preemption we just want a one-shot of delalloc
557 * flushing so we can stop flushing if we decide we don't need
563 spin_lock(&space_info->lock);
564 if (list_empty(&space_info->tickets) &&
565 list_empty(&space_info->priority_tickets)) {
566 spin_unlock(&space_info->lock);
569 spin_unlock(&space_info->lock);
571 delalloc_bytes = percpu_counter_sum_positive(
572 &fs_info->delalloc_bytes);
573 ordered_bytes = percpu_counter_sum_positive(
574 &fs_info->ordered_bytes);
579 * Possibly commit the transaction if its ok to
581 * @fs_info: the filesystem
582 * @space_info: space_info we are checking for commit, either data or metadata
584 * This will check to make sure that committing the transaction will actually
585 * get us somewhere and then commit the transaction if it does. Otherwise it
586 * will return -ENOSPC.
588 static int may_commit_transaction(struct btrfs_fs_info *fs_info,
589 struct btrfs_space_info *space_info)
591 struct reserve_ticket *ticket = NULL;
592 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv;
593 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
594 struct btrfs_block_rsv *trans_rsv = &fs_info->trans_block_rsv;
595 struct btrfs_trans_handle *trans;
596 u64 reclaim_bytes = 0;
597 u64 bytes_needed = 0;
598 u64 cur_free_bytes = 0;
600 trans = (struct btrfs_trans_handle *)current->journal_info;
604 spin_lock(&space_info->lock);
605 cur_free_bytes = btrfs_space_info_used(space_info, true);
606 if (cur_free_bytes < space_info->total_bytes)
607 cur_free_bytes = space_info->total_bytes - cur_free_bytes;
611 if (!list_empty(&space_info->priority_tickets))
612 ticket = list_first_entry(&space_info->priority_tickets,
613 struct reserve_ticket, list);
614 else if (!list_empty(&space_info->tickets))
615 ticket = list_first_entry(&space_info->tickets,
616 struct reserve_ticket, list);
618 bytes_needed = ticket->bytes;
620 if (bytes_needed > cur_free_bytes)
621 bytes_needed -= cur_free_bytes;
624 spin_unlock(&space_info->lock);
629 trans = btrfs_join_transaction(fs_info->extent_root);
631 return PTR_ERR(trans);
634 * See if there is enough pinned space to make this reservation, or if
635 * we have block groups that are going to be freed, allowing us to
636 * possibly do a chunk allocation the next loop through.
638 if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags) ||
639 __percpu_counter_compare(&space_info->total_bytes_pinned,
641 BTRFS_TOTAL_BYTES_PINNED_BATCH) >= 0)
645 * See if there is some space in the delayed insertion reserve for this
646 * reservation. If the space_info's don't match (like for DATA or
647 * SYSTEM) then just go enospc, reclaiming this space won't recover any
648 * space to satisfy those reservations.
650 if (space_info != delayed_rsv->space_info)
653 spin_lock(&delayed_rsv->lock);
654 reclaim_bytes += delayed_rsv->reserved;
655 spin_unlock(&delayed_rsv->lock);
657 spin_lock(&delayed_refs_rsv->lock);
658 reclaim_bytes += delayed_refs_rsv->reserved;
659 spin_unlock(&delayed_refs_rsv->lock);
661 spin_lock(&trans_rsv->lock);
662 reclaim_bytes += trans_rsv->reserved;
663 spin_unlock(&trans_rsv->lock);
665 if (reclaim_bytes >= bytes_needed)
667 bytes_needed -= reclaim_bytes;
669 if (__percpu_counter_compare(&space_info->total_bytes_pinned,
671 BTRFS_TOTAL_BYTES_PINNED_BATCH) < 0)
675 return btrfs_commit_transaction(trans);
677 btrfs_end_transaction(trans);
682 * Try to flush some data based on policy set by @state. This is only advisory
683 * and may fail for various reasons. The caller is supposed to examine the
684 * state of @space_info to detect the outcome.
686 static void flush_space(struct btrfs_fs_info *fs_info,
687 struct btrfs_space_info *space_info, u64 num_bytes,
688 enum btrfs_flush_state state, bool for_preempt)
690 struct btrfs_root *root = fs_info->extent_root;
691 struct btrfs_trans_handle *trans;
696 case FLUSH_DELAYED_ITEMS_NR:
697 case FLUSH_DELAYED_ITEMS:
698 if (state == FLUSH_DELAYED_ITEMS_NR)
699 nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2;
703 trans = btrfs_join_transaction(root);
705 ret = PTR_ERR(trans);
708 ret = btrfs_run_delayed_items_nr(trans, nr);
709 btrfs_end_transaction(trans);
712 case FLUSH_DELALLOC_WAIT:
713 shrink_delalloc(fs_info, space_info, num_bytes,
714 state == FLUSH_DELALLOC_WAIT, for_preempt);
716 case FLUSH_DELAYED_REFS_NR:
717 case FLUSH_DELAYED_REFS:
718 trans = btrfs_join_transaction(root);
720 ret = PTR_ERR(trans);
723 if (state == FLUSH_DELAYED_REFS_NR)
724 nr = calc_reclaim_items_nr(fs_info, num_bytes);
727 btrfs_run_delayed_refs(trans, nr);
728 btrfs_end_transaction(trans);
731 case ALLOC_CHUNK_FORCE:
732 trans = btrfs_join_transaction(root);
734 ret = PTR_ERR(trans);
737 ret = btrfs_chunk_alloc(trans,
738 btrfs_get_alloc_profile(fs_info, space_info->flags),
739 (state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE :
741 btrfs_end_transaction(trans);
742 if (ret > 0 || ret == -ENOSPC)
745 case RUN_DELAYED_IPUTS:
747 * If we have pending delayed iputs then we could free up a
748 * bunch of pinned space, so make sure we run the iputs before
749 * we do our pinned bytes check below.
751 btrfs_run_delayed_iputs(fs_info);
752 btrfs_wait_on_delayed_iputs(fs_info);
755 ret = may_commit_transaction(fs_info, space_info);
757 case FORCE_COMMIT_TRANS:
758 trans = btrfs_join_transaction(root);
760 ret = PTR_ERR(trans);
763 ret = btrfs_commit_transaction(trans);
770 trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state,
776 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
777 struct btrfs_space_info *space_info)
781 u64 to_reclaim = space_info->reclaim_size;
783 lockdep_assert_held(&space_info->lock);
785 avail = calc_available_free_space(fs_info, space_info,
786 BTRFS_RESERVE_FLUSH_ALL);
787 used = btrfs_space_info_used(space_info, true);
790 * We may be flushing because suddenly we have less space than we had
791 * before, and now we're well over-committed based on our current free
792 * space. If that's the case add in our overage so we make sure to put
793 * appropriate pressure on the flushing state machine.
795 if (space_info->total_bytes + avail < used)
796 to_reclaim += used - (space_info->total_bytes + avail);
801 static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
802 struct btrfs_space_info *space_info)
804 u64 global_rsv_size = fs_info->global_block_rsv.reserved;
805 u64 ordered, delalloc;
806 u64 thresh = div_factor_fine(space_info->total_bytes, 98);
809 /* If we're just plain full then async reclaim just slows us down. */
810 if ((space_info->bytes_used + space_info->bytes_reserved +
811 global_rsv_size) >= thresh)
815 * We have tickets queued, bail so we don't compete with the async
818 if (space_info->reclaim_size)
822 * If we have over half of the free space occupied by reservations or
823 * pinned then we want to start flushing.
825 * We do not do the traditional thing here, which is to say
827 * if (used >= ((total_bytes + avail) / 2))
830 * because this doesn't quite work how we want. If we had more than 50%
831 * of the space_info used by bytes_used and we had 0 available we'd just
832 * constantly run the background flusher. Instead we want it to kick in
833 * if our reclaimable space exceeds our clamped free space.
835 * Our clamping range is 2^1 -> 2^8. Practically speaking that means
838 * Amount of RAM Minimum threshold Maximum threshold
841 * 128GiB 512MiB 64GiB
846 * These are the range our thresholds will fall in, corresponding to how
847 * much delalloc we need for the background flusher to kick in.
850 thresh = calc_available_free_space(fs_info, space_info,
851 BTRFS_RESERVE_FLUSH_ALL);
852 used = space_info->bytes_used + space_info->bytes_reserved +
853 space_info->bytes_readonly + global_rsv_size;
854 if (used < space_info->total_bytes)
855 thresh += space_info->total_bytes - used;
856 thresh >>= space_info->clamp;
858 used = space_info->bytes_pinned;
861 * If we have more ordered bytes than delalloc bytes then we're either
862 * doing a lot of DIO, or we simply don't have a lot of delalloc waiting
863 * around. Preemptive flushing is only useful in that it can free up
864 * space before tickets need to wait for things to finish. In the case
865 * of ordered extents, preemptively waiting on ordered extents gets us
866 * nothing, if our reservations are tied up in ordered extents we'll
867 * simply have to slow down writers by forcing them to wait on ordered
870 * In the case that ordered is larger than delalloc, only include the
871 * block reserves that we would actually be able to directly reclaim
872 * from. In this case if we're heavy on metadata operations this will
873 * clearly be heavy enough to warrant preemptive flushing. In the case
874 * of heavy DIO or ordered reservations, preemptive flushing will just
875 * waste time and cause us to slow down.
877 * We want to make sure we truly are maxed out on ordered however, so
878 * cut ordered in half, and if it's still higher than delalloc then we
879 * can keep flushing. This is to avoid the case where we start
880 * flushing, and now delalloc == ordered and we stop preemptively
881 * flushing when we could still have several gigs of delalloc to flush.
883 ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1;
884 delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes);
885 if (ordered >= delalloc)
886 used += fs_info->delayed_refs_rsv.reserved +
887 fs_info->delayed_block_rsv.reserved;
889 used += space_info->bytes_may_use - global_rsv_size;
891 return (used >= thresh && !btrfs_fs_closing(fs_info) &&
892 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
895 static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info,
896 struct btrfs_space_info *space_info,
897 struct reserve_ticket *ticket)
899 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
902 if (global_rsv->space_info != space_info)
905 spin_lock(&global_rsv->lock);
906 min_bytes = div_factor(global_rsv->size, 1);
907 if (global_rsv->reserved < min_bytes + ticket->bytes) {
908 spin_unlock(&global_rsv->lock);
911 global_rsv->reserved -= ticket->bytes;
912 remove_ticket(space_info, ticket);
914 wake_up(&ticket->wait);
915 space_info->tickets_id++;
916 if (global_rsv->reserved < global_rsv->size)
917 global_rsv->full = 0;
918 spin_unlock(&global_rsv->lock);
924 * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets
925 * @fs_info - fs_info for this fs
926 * @space_info - the space info we were flushing
928 * We call this when we've exhausted our flushing ability and haven't made
929 * progress in satisfying tickets. The reservation code handles tickets in
930 * order, so if there is a large ticket first and then smaller ones we could
931 * very well satisfy the smaller tickets. This will attempt to wake up any
932 * tickets in the list to catch this case.
934 * This function returns true if it was able to make progress by clearing out
935 * other tickets, or if it stumbles across a ticket that was smaller than the
938 static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info,
939 struct btrfs_space_info *space_info)
941 struct reserve_ticket *ticket;
942 u64 tickets_id = space_info->tickets_id;
943 u64 first_ticket_bytes = 0;
945 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
946 btrfs_info(fs_info, "cannot satisfy tickets, dumping space info");
947 __btrfs_dump_space_info(fs_info, space_info);
950 while (!list_empty(&space_info->tickets) &&
951 tickets_id == space_info->tickets_id) {
952 ticket = list_first_entry(&space_info->tickets,
953 struct reserve_ticket, list);
956 steal_from_global_rsv(fs_info, space_info, ticket))
960 * may_commit_transaction will avoid committing the transaction
961 * if it doesn't feel like the space reclaimed by the commit
962 * would result in the ticket succeeding. However if we have a
963 * smaller ticket in the queue it may be small enough to be
964 * satisified by committing the transaction, so if any
965 * subsequent ticket is smaller than the first ticket go ahead
966 * and send us back for another loop through the enospc flushing
969 if (first_ticket_bytes == 0)
970 first_ticket_bytes = ticket->bytes;
971 else if (first_ticket_bytes > ticket->bytes)
974 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
975 btrfs_info(fs_info, "failing ticket with %llu bytes",
978 remove_ticket(space_info, ticket);
979 ticket->error = -ENOSPC;
980 wake_up(&ticket->wait);
983 * We're just throwing tickets away, so more flushing may not
984 * trip over btrfs_try_granting_tickets, so we need to call it
985 * here to see if we can make progress with the next ticket in
988 btrfs_try_granting_tickets(fs_info, space_info);
990 return (tickets_id != space_info->tickets_id);
994 * This is for normal flushers, we can wait all goddamned day if we want to. We
995 * will loop and continuously try to flush as long as we are making progress.
996 * We count progress as clearing off tickets each time we have to loop.
998 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
1000 struct btrfs_fs_info *fs_info;
1001 struct btrfs_space_info *space_info;
1003 enum btrfs_flush_state flush_state;
1004 int commit_cycles = 0;
1005 u64 last_tickets_id;
1007 fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
1008 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
1010 spin_lock(&space_info->lock);
1011 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
1013 space_info->flush = 0;
1014 spin_unlock(&space_info->lock);
1017 last_tickets_id = space_info->tickets_id;
1018 spin_unlock(&space_info->lock);
1020 flush_state = FLUSH_DELAYED_ITEMS_NR;
1022 flush_space(fs_info, space_info, to_reclaim, flush_state, false);
1023 spin_lock(&space_info->lock);
1024 if (list_empty(&space_info->tickets)) {
1025 space_info->flush = 0;
1026 spin_unlock(&space_info->lock);
1029 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info,
1031 if (last_tickets_id == space_info->tickets_id) {
1034 last_tickets_id = space_info->tickets_id;
1035 flush_state = FLUSH_DELAYED_ITEMS_NR;
1041 * We don't want to force a chunk allocation until we've tried
1042 * pretty hard to reclaim space. Think of the case where we
1043 * freed up a bunch of space and so have a lot of pinned space
1044 * to reclaim. We would rather use that than possibly create a
1045 * underutilized metadata chunk. So if this is our first run
1046 * through the flushing state machine skip ALLOC_CHUNK_FORCE and
1047 * commit the transaction. If nothing has changed the next go
1048 * around then we can force a chunk allocation.
1050 if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles)
1053 if (flush_state > COMMIT_TRANS) {
1055 if (commit_cycles > 2) {
1056 if (maybe_fail_all_tickets(fs_info, space_info)) {
1057 flush_state = FLUSH_DELAYED_ITEMS_NR;
1060 space_info->flush = 0;
1063 flush_state = FLUSH_DELAYED_ITEMS_NR;
1066 spin_unlock(&space_info->lock);
1067 } while (flush_state <= COMMIT_TRANS);
1071 * This handles pre-flushing of metadata space before we get to the point that
1072 * we need to start blocking threads on tickets. The logic here is different
1073 * from the other flush paths because it doesn't rely on tickets to tell us how
1074 * much we need to flush, instead it attempts to keep us below the 80% full
1075 * watermark of space by flushing whichever reservation pool is currently the
1078 static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
1080 struct btrfs_fs_info *fs_info;
1081 struct btrfs_space_info *space_info;
1082 struct btrfs_block_rsv *delayed_block_rsv;
1083 struct btrfs_block_rsv *delayed_refs_rsv;
1084 struct btrfs_block_rsv *global_rsv;
1085 struct btrfs_block_rsv *trans_rsv;
1088 fs_info = container_of(work, struct btrfs_fs_info,
1089 preempt_reclaim_work);
1090 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
1091 delayed_block_rsv = &fs_info->delayed_block_rsv;
1092 delayed_refs_rsv = &fs_info->delayed_refs_rsv;
1093 global_rsv = &fs_info->global_block_rsv;
1094 trans_rsv = &fs_info->trans_block_rsv;
1096 spin_lock(&space_info->lock);
1097 while (need_preemptive_reclaim(fs_info, space_info)) {
1098 enum btrfs_flush_state flush;
1099 u64 delalloc_size = 0;
1100 u64 to_reclaim, block_rsv_size;
1101 u64 global_rsv_size = global_rsv->reserved;
1106 * We don't have a precise counter for the metadata being
1107 * reserved for delalloc, so we'll approximate it by subtracting
1108 * out the block rsv's space from the bytes_may_use. If that
1109 * amount is higher than the individual reserves, then we can
1110 * assume it's tied up in delalloc reservations.
1112 block_rsv_size = global_rsv_size +
1113 delayed_block_rsv->reserved +
1114 delayed_refs_rsv->reserved +
1115 trans_rsv->reserved;
1116 if (block_rsv_size < space_info->bytes_may_use)
1117 delalloc_size = space_info->bytes_may_use - block_rsv_size;
1118 spin_unlock(&space_info->lock);
1121 * We don't want to include the global_rsv in our calculation,
1122 * because that's space we can't touch. Subtract it from the
1123 * block_rsv_size for the next checks.
1125 block_rsv_size -= global_rsv_size;
1128 * We really want to avoid flushing delalloc too much, as it
1129 * could result in poor allocation patterns, so only flush it if
1130 * it's larger than the rest of the pools combined.
1132 if (delalloc_size > block_rsv_size) {
1133 to_reclaim = delalloc_size;
1134 flush = FLUSH_DELALLOC;
1135 } else if (space_info->bytes_pinned >
1136 (delayed_block_rsv->reserved +
1137 delayed_refs_rsv->reserved)) {
1138 to_reclaim = space_info->bytes_pinned;
1139 flush = FORCE_COMMIT_TRANS;
1140 } else if (delayed_block_rsv->reserved >
1141 delayed_refs_rsv->reserved) {
1142 to_reclaim = delayed_block_rsv->reserved;
1143 flush = FLUSH_DELAYED_ITEMS_NR;
1145 to_reclaim = delayed_refs_rsv->reserved;
1146 flush = FLUSH_DELAYED_REFS_NR;
1150 * We don't want to reclaim everything, just a portion, so scale
1151 * down the to_reclaim by 1/4. If it takes us down to 0,
1152 * reclaim 1 items worth.
1156 to_reclaim = btrfs_calc_insert_metadata_size(fs_info, 1);
1157 flush_space(fs_info, space_info, to_reclaim, flush, true);
1159 spin_lock(&space_info->lock);
1162 /* We only went through once, back off our clamping. */
1163 if (loops == 1 && !space_info->reclaim_size)
1164 space_info->clamp = max(1, space_info->clamp - 1);
1165 trace_btrfs_done_preemptive_reclaim(fs_info, space_info);
1166 spin_unlock(&space_info->lock);
1170 * FLUSH_DELALLOC_WAIT:
1171 * Space is freed from flushing delalloc in one of two ways.
1173 * 1) compression is on and we allocate less space than we reserved
1174 * 2) we are overwriting existing space
1176 * For #1 that extra space is reclaimed as soon as the delalloc pages are
1177 * COWed, by way of btrfs_add_reserved_bytes() which adds the actual extent
1178 * length to ->bytes_reserved, and subtracts the reserved space from
1181 * For #2 this is trickier. Once the ordered extent runs we will drop the
1182 * extent in the range we are overwriting, which creates a delayed ref for
1183 * that freed extent. This however is not reclaimed until the transaction
1184 * commits, thus the next stages.
1187 * If we are freeing inodes, we want to make sure all delayed iputs have
1188 * completed, because they could have been on an inode with i_nlink == 0, and
1189 * thus have been truncated and freed up space. But again this space is not
1190 * immediately re-usable, it comes in the form of a delayed ref, which must be
1191 * run and then the transaction must be committed.
1193 * FLUSH_DELAYED_REFS
1194 * The above two cases generate delayed refs that will affect
1195 * ->total_bytes_pinned. However this counter can be inconsistent with
1196 * reality if there are outstanding delayed refs. This is because we adjust
1197 * the counter based solely on the current set of delayed refs and disregard
1198 * any on-disk state which might include more refs. So for example, if we
1199 * have an extent with 2 references, but we only drop 1, we'll see that there
1200 * is a negative delayed ref count for the extent and assume that the space
1201 * will be freed, and thus increase ->total_bytes_pinned.
1203 * Running the delayed refs gives us the actual real view of what will be
1204 * freed at the transaction commit time. This stage will not actually free
1205 * space for us, it just makes sure that may_commit_transaction() has all of
1206 * the information it needs to make the right decision.
1209 * This is where we reclaim all of the pinned space generated by the previous
1210 * two stages. We will not commit the transaction if we don't think we're
1211 * likely to satisfy our request, which means if our current free space +
1212 * total_bytes_pinned < reservation we will not commit. This is why the
1213 * previous states are actually important, to make sure we know for sure
1214 * whether committing the transaction will allow us to make progress.
1217 * For data we start with alloc chunk force, however we could have been full
1218 * before, and then the transaction commit could have freed new block groups,
1219 * so if we now have space to allocate do the force chunk allocation.
1221 static const enum btrfs_flush_state data_flush_states[] = {
1222 FLUSH_DELALLOC_WAIT,
1229 static void btrfs_async_reclaim_data_space(struct work_struct *work)
1231 struct btrfs_fs_info *fs_info;
1232 struct btrfs_space_info *space_info;
1233 u64 last_tickets_id;
1234 enum btrfs_flush_state flush_state = 0;
1236 fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work);
1237 space_info = fs_info->data_sinfo;
1239 spin_lock(&space_info->lock);
1240 if (list_empty(&space_info->tickets)) {
1241 space_info->flush = 0;
1242 spin_unlock(&space_info->lock);
1245 last_tickets_id = space_info->tickets_id;
1246 spin_unlock(&space_info->lock);
1248 while (!space_info->full) {
1249 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
1250 spin_lock(&space_info->lock);
1251 if (list_empty(&space_info->tickets)) {
1252 space_info->flush = 0;
1253 spin_unlock(&space_info->lock);
1256 last_tickets_id = space_info->tickets_id;
1257 spin_unlock(&space_info->lock);
1260 while (flush_state < ARRAY_SIZE(data_flush_states)) {
1261 flush_space(fs_info, space_info, U64_MAX,
1262 data_flush_states[flush_state], false);
1263 spin_lock(&space_info->lock);
1264 if (list_empty(&space_info->tickets)) {
1265 space_info->flush = 0;
1266 spin_unlock(&space_info->lock);
1270 if (last_tickets_id == space_info->tickets_id) {
1273 last_tickets_id = space_info->tickets_id;
1277 if (flush_state >= ARRAY_SIZE(data_flush_states)) {
1278 if (space_info->full) {
1279 if (maybe_fail_all_tickets(fs_info, space_info))
1282 space_info->flush = 0;
1287 spin_unlock(&space_info->lock);
1291 void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info)
1293 INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space);
1294 INIT_WORK(&fs_info->async_data_reclaim_work, btrfs_async_reclaim_data_space);
1295 INIT_WORK(&fs_info->preempt_reclaim_work,
1296 btrfs_preempt_reclaim_metadata_space);
1299 static const enum btrfs_flush_state priority_flush_states[] = {
1300 FLUSH_DELAYED_ITEMS_NR,
1301 FLUSH_DELAYED_ITEMS,
1305 static const enum btrfs_flush_state evict_flush_states[] = {
1306 FLUSH_DELAYED_ITEMS_NR,
1307 FLUSH_DELAYED_ITEMS,
1308 FLUSH_DELAYED_REFS_NR,
1311 FLUSH_DELALLOC_WAIT,
1316 static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
1317 struct btrfs_space_info *space_info,
1318 struct reserve_ticket *ticket,
1319 const enum btrfs_flush_state *states,
1325 spin_lock(&space_info->lock);
1326 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
1328 spin_unlock(&space_info->lock);
1331 spin_unlock(&space_info->lock);
1335 flush_space(fs_info, space_info, to_reclaim, states[flush_state],
1338 spin_lock(&space_info->lock);
1339 if (ticket->bytes == 0) {
1340 spin_unlock(&space_info->lock);
1343 spin_unlock(&space_info->lock);
1344 } while (flush_state < states_nr);
1347 static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info,
1348 struct btrfs_space_info *space_info,
1349 struct reserve_ticket *ticket)
1351 while (!space_info->full) {
1352 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
1353 spin_lock(&space_info->lock);
1354 if (ticket->bytes == 0) {
1355 spin_unlock(&space_info->lock);
1358 spin_unlock(&space_info->lock);
1362 static void wait_reserve_ticket(struct btrfs_fs_info *fs_info,
1363 struct btrfs_space_info *space_info,
1364 struct reserve_ticket *ticket)
1370 spin_lock(&space_info->lock);
1371 while (ticket->bytes > 0 && ticket->error == 0) {
1372 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
1375 * Delete us from the list. After we unlock the space
1376 * info, we don't want the async reclaim job to reserve
1377 * space for this ticket. If that would happen, then the
1378 * ticket's task would not known that space was reserved
1379 * despite getting an error, resulting in a space leak
1380 * (bytes_may_use counter of our space_info).
1382 remove_ticket(space_info, ticket);
1383 ticket->error = -EINTR;
1386 spin_unlock(&space_info->lock);
1390 finish_wait(&ticket->wait, &wait);
1391 spin_lock(&space_info->lock);
1393 spin_unlock(&space_info->lock);
1397 * Do the appropriate flushing and waiting for a ticket
1399 * @fs_info: the filesystem
1400 * @space_info: space info for the reservation
1401 * @ticket: ticket for the reservation
1402 * @start_ns: timestamp when the reservation started
1403 * @orig_bytes: amount of bytes originally reserved
1404 * @flush: how much we can flush
1406 * This does the work of figuring out how to flush for the ticket, waiting for
1407 * the reservation, and returning the appropriate error if there is one.
1409 static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
1410 struct btrfs_space_info *space_info,
1411 struct reserve_ticket *ticket,
1412 u64 start_ns, u64 orig_bytes,
1413 enum btrfs_reserve_flush_enum flush)
1418 case BTRFS_RESERVE_FLUSH_DATA:
1419 case BTRFS_RESERVE_FLUSH_ALL:
1420 case BTRFS_RESERVE_FLUSH_ALL_STEAL:
1421 wait_reserve_ticket(fs_info, space_info, ticket);
1423 case BTRFS_RESERVE_FLUSH_LIMIT:
1424 priority_reclaim_metadata_space(fs_info, space_info, ticket,
1425 priority_flush_states,
1426 ARRAY_SIZE(priority_flush_states));
1428 case BTRFS_RESERVE_FLUSH_EVICT:
1429 priority_reclaim_metadata_space(fs_info, space_info, ticket,
1431 ARRAY_SIZE(evict_flush_states));
1433 case BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE:
1434 priority_reclaim_data_space(fs_info, space_info, ticket);
1441 spin_lock(&space_info->lock);
1442 ret = ticket->error;
1443 if (ticket->bytes || ticket->error) {
1445 * We were a priority ticket, so we need to delete ourselves
1446 * from the list. Because we could have other priority tickets
1447 * behind us that require less space, run
1448 * btrfs_try_granting_tickets() to see if their reservations can
1451 if (!list_empty(&ticket->list)) {
1452 remove_ticket(space_info, ticket);
1453 btrfs_try_granting_tickets(fs_info, space_info);
1459 spin_unlock(&space_info->lock);
1460 ASSERT(list_empty(&ticket->list));
1462 * Check that we can't have an error set if the reservation succeeded,
1463 * as that would confuse tasks and lead them to error out without
1464 * releasing reserved space (if an error happens the expectation is that
1465 * space wasn't reserved at all).
1467 ASSERT(!(ticket->bytes == 0 && ticket->error));
1468 trace_btrfs_reserve_ticket(fs_info, space_info->flags, orig_bytes,
1469 start_ns, flush, ticket->error);
1474 * This returns true if this flush state will go through the ordinary flushing
1477 static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush)
1479 return (flush == BTRFS_RESERVE_FLUSH_ALL) ||
1480 (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
1483 static inline void maybe_clamp_preempt(struct btrfs_fs_info *fs_info,
1484 struct btrfs_space_info *space_info)
1486 u64 ordered = percpu_counter_sum_positive(&fs_info->ordered_bytes);
1487 u64 delalloc = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
1490 * If we're heavy on ordered operations then clamping won't help us. We
1491 * need to clamp specifically to keep up with dirty'ing buffered
1492 * writers, because there's not a 1:1 correlation of writing delalloc
1493 * and freeing space, like there is with flushing delayed refs or
1494 * delayed nodes. If we're already more ordered than delalloc then
1495 * we're keeping up, otherwise we aren't and should probably clamp.
1497 if (ordered < delalloc)
1498 space_info->clamp = min(space_info->clamp + 1, 8);
1502 * Try to reserve bytes from the block_rsv's space
1504 * @fs_info: the filesystem
1505 * @space_info: space info we want to allocate from
1506 * @orig_bytes: number of bytes we want
1507 * @flush: whether or not we can flush to make our reservation
1509 * This will reserve orig_bytes number of bytes from the space info associated
1510 * with the block_rsv. If there is not enough space it will make an attempt to
1511 * flush out space to make room. It will do this by flushing delalloc if
1512 * possible or committing the transaction. If flush is 0 then no attempts to
1513 * regain reservations will be made and this will fail if there is not enough
1516 static int __reserve_bytes(struct btrfs_fs_info *fs_info,
1517 struct btrfs_space_info *space_info, u64 orig_bytes,
1518 enum btrfs_reserve_flush_enum flush)
1520 struct work_struct *async_work;
1521 struct reserve_ticket ticket;
1525 bool pending_tickets;
1528 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL);
1530 if (flush == BTRFS_RESERVE_FLUSH_DATA)
1531 async_work = &fs_info->async_data_reclaim_work;
1533 async_work = &fs_info->async_reclaim_work;
1535 spin_lock(&space_info->lock);
1537 used = btrfs_space_info_used(space_info, true);
1540 * We don't want NO_FLUSH allocations to jump everybody, they can
1541 * generally handle ENOSPC in a different way, so treat them the same as
1542 * normal flushers when it comes to skipping pending tickets.
1544 if (is_normal_flushing(flush) || (flush == BTRFS_RESERVE_NO_FLUSH))
1545 pending_tickets = !list_empty(&space_info->tickets) ||
1546 !list_empty(&space_info->priority_tickets);
1548 pending_tickets = !list_empty(&space_info->priority_tickets);
1551 * Carry on if we have enough space (short-circuit) OR call
1552 * can_overcommit() to ensure we can overcommit to continue.
1554 if (!pending_tickets &&
1555 ((used + orig_bytes <= space_info->total_bytes) ||
1556 btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) {
1557 btrfs_space_info_update_bytes_may_use(fs_info, space_info,
1563 * If we couldn't make a reservation then setup our reservation ticket
1564 * and kick the async worker if it's not already running.
1566 * If we are a priority flusher then we just need to add our ticket to
1567 * the list and we will do our own flushing further down.
1569 if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
1570 ticket.bytes = orig_bytes;
1572 space_info->reclaim_size += ticket.bytes;
1573 init_waitqueue_head(&ticket.wait);
1574 ticket.steal = (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
1575 if (trace_btrfs_reserve_ticket_enabled())
1576 start_ns = ktime_get_ns();
1578 if (flush == BTRFS_RESERVE_FLUSH_ALL ||
1579 flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
1580 flush == BTRFS_RESERVE_FLUSH_DATA) {
1581 list_add_tail(&ticket.list, &space_info->tickets);
1582 if (!space_info->flush) {
1584 * We were forced to add a reserve ticket, so
1585 * our preemptive flushing is unable to keep
1586 * up. Clamp down on the threshold for the
1587 * preemptive flushing in order to keep up with
1590 maybe_clamp_preempt(fs_info, space_info);
1592 space_info->flush = 1;
1593 trace_btrfs_trigger_flush(fs_info,
1597 queue_work(system_unbound_wq, async_work);
1600 list_add_tail(&ticket.list,
1601 &space_info->priority_tickets);
1603 } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
1606 * We will do the space reservation dance during log replay,
1607 * which means we won't have fs_info->fs_root set, so don't do
1608 * the async reclaim as we will panic.
1610 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
1611 !work_busy(&fs_info->preempt_reclaim_work) &&
1612 need_preemptive_reclaim(fs_info, space_info)) {
1613 trace_btrfs_trigger_flush(fs_info, space_info->flags,
1614 orig_bytes, flush, "preempt");
1615 queue_work(system_unbound_wq,
1616 &fs_info->preempt_reclaim_work);
1619 spin_unlock(&space_info->lock);
1620 if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
1623 return handle_reserve_ticket(fs_info, space_info, &ticket, start_ns,
1628 * Trye to reserve metadata bytes from the block_rsv's space
1630 * @root: the root we're allocating for
1631 * @block_rsv: block_rsv we're allocating for
1632 * @orig_bytes: number of bytes we want
1633 * @flush: whether or not we can flush to make our reservation
1635 * This will reserve orig_bytes number of bytes from the space info associated
1636 * with the block_rsv. If there is not enough space it will make an attempt to
1637 * flush out space to make room. It will do this by flushing delalloc if
1638 * possible or committing the transaction. If flush is 0 then no attempts to
1639 * regain reservations will be made and this will fail if there is not enough
1642 int btrfs_reserve_metadata_bytes(struct btrfs_root *root,
1643 struct btrfs_block_rsv *block_rsv,
1645 enum btrfs_reserve_flush_enum flush)
1647 struct btrfs_fs_info *fs_info = root->fs_info;
1648 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
1651 ret = __reserve_bytes(fs_info, block_rsv->space_info, orig_bytes, flush);
1652 if (ret == -ENOSPC &&
1653 unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
1654 if (block_rsv != global_rsv &&
1655 !btrfs_block_rsv_use_bytes(global_rsv, orig_bytes))
1658 if (ret == -ENOSPC) {
1659 trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1660 block_rsv->space_info->flags,
1663 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1664 btrfs_dump_space_info(fs_info, block_rsv->space_info,
1671 * Try to reserve data bytes for an allocation
1673 * @fs_info: the filesystem
1674 * @bytes: number of bytes we need
1675 * @flush: how we are allowed to flush
1677 * This will reserve bytes from the data space info. If there is not enough
1678 * space then we will attempt to flush space as specified by flush.
1680 int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes,
1681 enum btrfs_reserve_flush_enum flush)
1683 struct btrfs_space_info *data_sinfo = fs_info->data_sinfo;
1686 ASSERT(flush == BTRFS_RESERVE_FLUSH_DATA ||
1687 flush == BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE);
1688 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA);
1690 ret = __reserve_bytes(fs_info, data_sinfo, bytes, flush);
1691 if (ret == -ENOSPC) {
1692 trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1693 data_sinfo->flags, bytes, 1);
1694 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1695 btrfs_dump_space_info(fs_info, data_sinfo, bytes, 0);