1 // SPDX-License-Identifier: GPL-2.0
5 #include "space-info.h"
8 #include "free-space-cache.h"
9 #include "ordered-data.h"
10 #include "transaction.h"
11 #include "block-group.h"
15 * HOW DOES SPACE RESERVATION WORK
17 * If you want to know about delalloc specifically, there is a separate comment
18 * for that with the delalloc code. This comment is about how the whole system
23 * 1) space_info. This is the ultimate arbiter of how much space we can use.
24 * There's a description of the bytes_ fields with the struct declaration,
25 * refer to that for specifics on each field. Suffice it to say that for
26 * reservations we care about total_bytes - SUM(space_info->bytes_) when
27 * determining if there is space to make an allocation. There is a space_info
28 * for METADATA, SYSTEM, and DATA areas.
30 * 2) block_rsv's. These are basically buckets for every different type of
31 * metadata reservation we have. You can see the comment in the block_rsv
32 * code on the rules for each type, but generally block_rsv->reserved is how
33 * much space is accounted for in space_info->bytes_may_use.
35 * 3) btrfs_calc*_size. These are the worst case calculations we used based
36 * on the number of items we will want to modify. We have one for changing
37 * items, and one for inserting new items. Generally we use these helpers to
38 * determine the size of the block reserves, and then use the actual bytes
39 * values to adjust the space_info counters.
41 * MAKING RESERVATIONS, THE NORMAL CASE
43 * We call into either btrfs_reserve_data_bytes() or
44 * btrfs_reserve_metadata_bytes(), depending on which we're looking for, with
45 * num_bytes we want to reserve.
48 * space_info->bytes_may_reserve += num_bytes
51 * Call btrfs_add_reserved_bytes() which does
52 * space_info->bytes_may_reserve -= num_bytes
53 * space_info->bytes_reserved += extent_bytes
56 * Call btrfs_update_block_group() which does
57 * space_info->bytes_reserved -= extent_bytes
58 * space_info->bytes_used += extent_bytes
60 * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority)
62 * Assume we are unable to simply make the reservation because we do not have
66 * create a reserve_ticket with ->bytes set to our reservation, add it to
67 * the tail of space_info->tickets, kick async flush thread
69 * ->handle_reserve_ticket
70 * wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set
73 * -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space
74 * Flushes various things attempting to free up space.
76 * -> btrfs_try_granting_tickets()
77 * This is called by anything that either subtracts space from
78 * space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the
79 * space_info->total_bytes. This loops through the ->priority_tickets and
80 * then the ->tickets list checking to see if the reservation can be
81 * completed. If it can the space is added to space_info->bytes_may_use and
82 * the ticket is woken up.
85 * Check if ->bytes == 0, if it does we got our reservation and we can carry
86 * on, if not return the appropriate error (ENOSPC, but can be EINTR if we
89 * MAKING RESERVATIONS, FLUSHING HIGH PRIORITY
91 * Same as the above, except we add ourselves to the
92 * space_info->priority_tickets, and we do not use ticket->wait, we simply
93 * call flush_space() ourselves for the states that are safe for us to call
94 * without deadlocking and hope for the best.
98 * Generally speaking we will have two cases for each state, a "nice" state
99 * and a "ALL THE THINGS" state. In btrfs we delay a lot of work in order to
100 * reduce the locking over head on the various trees, and even to keep from
101 * doing any work at all in the case of delayed refs. Each of these delayed
102 * things however hold reservations, and so letting them run allows us to
103 * reclaim space so we can make new reservations.
105 * FLUSH_DELAYED_ITEMS
106 * Every inode has a delayed item to update the inode. Take a simple write
107 * for example, we would update the inode item at write time to update the
108 * mtime, and then again at finish_ordered_io() time in order to update the
109 * isize or bytes. We keep these delayed items to coalesce these operations
110 * into a single operation done on demand. These are an easy way to reclaim
114 * Look at the delalloc comment to get an idea of how much space is reserved
115 * for delayed allocation. We can reclaim some of this space simply by
116 * running delalloc, but usually we need to wait for ordered extents to
117 * reclaim the bulk of this space.
120 * We have a block reserve for the outstanding delayed refs space, and every
121 * delayed ref operation holds a reservation. Running these is a quick way
122 * to reclaim space, but we want to hold this until the end because COW can
123 * churn a lot and we can avoid making some extent tree modifications if we
124 * are able to delay for as long as possible.
127 * We will skip this the first time through space reservation, because of
128 * overcommit and we don't want to have a lot of useless metadata space when
129 * our worst case reservations will likely never come true.
132 * If we're freeing inodes we're likely freeing checksums, file extent
133 * items, and extent tree items. Loads of space could be freed up by these
134 * operations, however they won't be usable until the transaction commits.
137 * This will commit the transaction. Historically we had a lot of logic
138 * surrounding whether or not we'd commit the transaction, but this waits born
139 * out of a pre-tickets era where we could end up committing the transaction
140 * thousands of times in a row without making progress. Now thanks to our
141 * ticketing system we know if we're not making progress and can error
142 * everybody out after a few commits rather than burning the disk hoping for
143 * a different answer.
147 * Because we hold so many reservations for metadata we will allow you to
148 * reserve more space than is currently free in the currently allocate
149 * metadata space. This only happens with metadata, data does not allow
152 * You can see the current logic for when we allow overcommit in
153 * btrfs_can_overcommit(), but it only applies to unallocated space. If there
154 * is no unallocated space to be had, all reservations are kept within the
155 * free space in the allocated metadata chunks.
157 * Because of overcommitting, you generally want to use the
158 * btrfs_can_overcommit() logic for metadata allocations, as it does the right
159 * thing with or without extra unallocated space.
162 u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info,
163 bool may_use_included)
166 return s_info->bytes_used + s_info->bytes_reserved +
167 s_info->bytes_pinned + s_info->bytes_readonly +
168 s_info->bytes_zone_unusable +
169 (may_use_included ? s_info->bytes_may_use : 0);
173 * after adding space to the filesystem, we need to clear the full flags
174 * on all the space infos.
176 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
178 struct list_head *head = &info->space_info;
179 struct btrfs_space_info *found;
181 list_for_each_entry(found, head, list)
186 * Block groups with more than this value (percents) of unusable space will be
187 * scheduled for background reclaim.
189 #define BTRFS_DEFAULT_ZONED_RECLAIM_THRESH (75)
192 * Calculate chunk size depending on volume type (regular or zoned).
194 static u64 calc_chunk_size(const struct btrfs_fs_info *fs_info, u64 flags)
196 if (btrfs_is_zoned(fs_info))
197 return fs_info->zone_size;
199 ASSERT(flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
201 if (flags & BTRFS_BLOCK_GROUP_DATA)
203 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
206 /* Handle BTRFS_BLOCK_GROUP_METADATA */
207 if (fs_info->fs_devices->total_rw_bytes > 50ULL * SZ_1G)
214 * Update default chunk size.
216 void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info,
219 WRITE_ONCE(space_info->chunk_size, chunk_size);
222 static int create_space_info(struct btrfs_fs_info *info, u64 flags)
225 struct btrfs_space_info *space_info;
229 space_info = kzalloc(sizeof(*space_info), GFP_NOFS);
233 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
234 INIT_LIST_HEAD(&space_info->block_groups[i]);
235 init_rwsem(&space_info->groups_sem);
236 spin_lock_init(&space_info->lock);
237 space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
238 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
239 INIT_LIST_HEAD(&space_info->ro_bgs);
240 INIT_LIST_HEAD(&space_info->tickets);
241 INIT_LIST_HEAD(&space_info->priority_tickets);
242 space_info->clamp = 1;
243 btrfs_update_space_info_chunk_size(space_info, calc_chunk_size(info, flags));
245 if (btrfs_is_zoned(info))
246 space_info->bg_reclaim_threshold = BTRFS_DEFAULT_ZONED_RECLAIM_THRESH;
248 ret = btrfs_sysfs_add_space_info_type(info, space_info);
252 list_add(&space_info->list, &info->space_info);
253 if (flags & BTRFS_BLOCK_GROUP_DATA)
254 info->data_sinfo = space_info;
259 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
261 struct btrfs_super_block *disk_super;
267 disk_super = fs_info->super_copy;
268 if (!btrfs_super_root(disk_super))
271 features = btrfs_super_incompat_flags(disk_super);
272 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
275 flags = BTRFS_BLOCK_GROUP_SYSTEM;
276 ret = create_space_info(fs_info, flags);
281 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
282 ret = create_space_info(fs_info, flags);
284 flags = BTRFS_BLOCK_GROUP_METADATA;
285 ret = create_space_info(fs_info, flags);
289 flags = BTRFS_BLOCK_GROUP_DATA;
290 ret = create_space_info(fs_info, flags);
296 void btrfs_update_space_info(struct btrfs_fs_info *info, u64 flags,
297 u64 total_bytes, u64 bytes_used,
298 u64 bytes_readonly, u64 bytes_zone_unusable,
299 bool active, struct btrfs_space_info **space_info)
301 struct btrfs_space_info *found;
304 factor = btrfs_bg_type_to_factor(flags);
306 found = btrfs_find_space_info(info, flags);
308 spin_lock(&found->lock);
309 found->total_bytes += total_bytes;
311 found->active_total_bytes += total_bytes;
312 found->disk_total += total_bytes * factor;
313 found->bytes_used += bytes_used;
314 found->disk_used += bytes_used * factor;
315 found->bytes_readonly += bytes_readonly;
316 found->bytes_zone_unusable += bytes_zone_unusable;
319 btrfs_try_granting_tickets(info, found);
320 spin_unlock(&found->lock);
324 struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
327 struct list_head *head = &info->space_info;
328 struct btrfs_space_info *found;
330 flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
332 list_for_each_entry(found, head, list) {
333 if (found->flags & flags)
339 static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
340 struct btrfs_space_info *space_info,
341 enum btrfs_reserve_flush_enum flush)
347 if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM)
348 profile = btrfs_system_alloc_profile(fs_info);
350 profile = btrfs_metadata_alloc_profile(fs_info);
352 avail = atomic64_read(&fs_info->free_chunk_space);
355 * If we have dup, raid1 or raid10 then only half of the free
356 * space is actually usable. For raid56, the space info used
357 * doesn't include the parity drive, so we don't have to
360 factor = btrfs_bg_type_to_factor(profile);
361 avail = div_u64(avail, factor);
364 * If we aren't flushing all things, let us overcommit up to
365 * 1/2th of the space. If we can flush, don't let us overcommit
366 * too much, let it overcommit up to 1/8 of the space.
368 if (flush == BTRFS_RESERVE_FLUSH_ALL)
375 static inline u64 writable_total_bytes(struct btrfs_fs_info *fs_info,
376 struct btrfs_space_info *space_info)
379 * On regular filesystem, all total_bytes are always writable. On zoned
380 * filesystem, there may be a limitation imposed by max_active_zones.
381 * For metadata allocation, we cannot finish an existing active block
382 * group to avoid a deadlock. Thus, we need to consider only the active
383 * groups to be writable for metadata space.
385 if (!btrfs_is_zoned(fs_info) || (space_info->flags & BTRFS_BLOCK_GROUP_DATA))
386 return space_info->total_bytes;
388 return space_info->active_total_bytes;
391 int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
392 struct btrfs_space_info *space_info, u64 bytes,
393 enum btrfs_reserve_flush_enum flush)
398 /* Don't overcommit when in mixed mode */
399 if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
402 used = btrfs_space_info_used(space_info, true);
403 if (btrfs_is_zoned(fs_info) && (space_info->flags & BTRFS_BLOCK_GROUP_METADATA))
406 avail = calc_available_free_space(fs_info, space_info, flush);
408 if (used + bytes < writable_total_bytes(fs_info, space_info) + avail)
413 static void remove_ticket(struct btrfs_space_info *space_info,
414 struct reserve_ticket *ticket)
416 if (!list_empty(&ticket->list)) {
417 list_del_init(&ticket->list);
418 ASSERT(space_info->reclaim_size >= ticket->bytes);
419 space_info->reclaim_size -= ticket->bytes;
424 * This is for space we already have accounted in space_info->bytes_may_use, so
425 * basically when we're returning space from block_rsv's.
427 void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info,
428 struct btrfs_space_info *space_info)
430 struct list_head *head;
431 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
433 lockdep_assert_held(&space_info->lock);
435 head = &space_info->priority_tickets;
437 while (!list_empty(head)) {
438 struct reserve_ticket *ticket;
439 u64 used = btrfs_space_info_used(space_info, true);
441 ticket = list_first_entry(head, struct reserve_ticket, list);
443 /* Check and see if our ticket can be satisfied now. */
444 if ((used + ticket->bytes <= writable_total_bytes(fs_info, space_info)) ||
445 btrfs_can_overcommit(fs_info, space_info, ticket->bytes,
447 btrfs_space_info_update_bytes_may_use(fs_info,
450 remove_ticket(space_info, ticket);
452 space_info->tickets_id++;
453 wake_up(&ticket->wait);
459 if (head == &space_info->priority_tickets) {
460 head = &space_info->tickets;
461 flush = BTRFS_RESERVE_FLUSH_ALL;
466 #define DUMP_BLOCK_RSV(fs_info, rsv_name) \
468 struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name; \
469 spin_lock(&__rsv->lock); \
470 btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu", \
471 __rsv->size, __rsv->reserved); \
472 spin_unlock(&__rsv->lock); \
475 static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
476 struct btrfs_space_info *info)
478 lockdep_assert_held(&info->lock);
480 /* The free space could be negative in case of overcommit */
481 btrfs_info(fs_info, "space_info %llu has %lld free, is %sfull",
483 (s64)(info->total_bytes - btrfs_space_info_used(info, true)),
484 info->full ? "" : "not ");
486 "space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu",
487 info->total_bytes, info->bytes_used, info->bytes_pinned,
488 info->bytes_reserved, info->bytes_may_use,
489 info->bytes_readonly, info->bytes_zone_unusable);
491 DUMP_BLOCK_RSV(fs_info, global_block_rsv);
492 DUMP_BLOCK_RSV(fs_info, trans_block_rsv);
493 DUMP_BLOCK_RSV(fs_info, chunk_block_rsv);
494 DUMP_BLOCK_RSV(fs_info, delayed_block_rsv);
495 DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv);
499 void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
500 struct btrfs_space_info *info, u64 bytes,
501 int dump_block_groups)
503 struct btrfs_block_group *cache;
506 spin_lock(&info->lock);
507 __btrfs_dump_space_info(fs_info, info);
508 spin_unlock(&info->lock);
510 if (!dump_block_groups)
513 down_read(&info->groups_sem);
515 list_for_each_entry(cache, &info->block_groups[index], list) {
516 spin_lock(&cache->lock);
518 "block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu zone_unusable %s",
519 cache->start, cache->length, cache->used, cache->pinned,
520 cache->reserved, cache->zone_unusable,
521 cache->ro ? "[readonly]" : "");
522 spin_unlock(&cache->lock);
523 btrfs_dump_free_space(cache, bytes);
525 if (++index < BTRFS_NR_RAID_TYPES)
527 up_read(&info->groups_sem);
530 static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info,
536 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
537 nr = div64_u64(to_reclaim, bytes);
543 #define EXTENT_SIZE_PER_ITEM SZ_256K
546 * shrink metadata reservation for delalloc
548 static void shrink_delalloc(struct btrfs_fs_info *fs_info,
549 struct btrfs_space_info *space_info,
550 u64 to_reclaim, bool wait_ordered,
553 struct btrfs_trans_handle *trans;
560 delalloc_bytes = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
561 ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes);
562 if (delalloc_bytes == 0 && ordered_bytes == 0)
565 /* Calc the number of the pages we need flush for space reservation */
566 if (to_reclaim == U64_MAX) {
570 * to_reclaim is set to however much metadata we need to
571 * reclaim, but reclaiming that much data doesn't really track
572 * exactly. What we really want to do is reclaim full inode's
573 * worth of reservations, however that's not available to us
574 * here. We will take a fraction of the delalloc bytes for our
575 * flushing loops and hope for the best. Delalloc will expand
576 * the amount we write to cover an entire dirty extent, which
577 * will reclaim the metadata reservation for that range. If
578 * it's not enough subsequent flush stages will be more
581 to_reclaim = max(to_reclaim, delalloc_bytes >> 3);
582 items = calc_reclaim_items_nr(fs_info, to_reclaim) * 2;
585 trans = current->journal_info;
588 * If we are doing more ordered than delalloc we need to just wait on
589 * ordered extents, otherwise we'll waste time trying to flush delalloc
590 * that likely won't give us the space back we need.
592 if (ordered_bytes > delalloc_bytes && !for_preempt)
596 while ((delalloc_bytes || ordered_bytes) && loops < 3) {
597 u64 temp = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
598 long nr_pages = min_t(u64, temp, LONG_MAX);
601 btrfs_start_delalloc_roots(fs_info, nr_pages, true);
604 * We need to make sure any outstanding async pages are now
605 * processed before we continue. This is because things like
606 * sync_inode() try to be smart and skip writing if the inode is
607 * marked clean. We don't use filemap_fwrite for flushing
608 * because we want to control how many pages we write out at a
609 * time, thus this is the only safe way to make sure we've
610 * waited for outstanding compressed workers to have started
611 * their jobs and thus have ordered extents set up properly.
613 * This exists because we do not want to wait for each
614 * individual inode to finish its async work, we simply want to
615 * start the IO on everybody, and then come back here and wait
616 * for all of the async work to catch up. Once we're done with
617 * that we know we'll have ordered extents for everything and we
618 * can decide if we wait for that or not.
620 * If we choose to replace this in the future, make absolutely
621 * sure that the proper waiting is being done in the async case,
622 * as there have been bugs in that area before.
624 async_pages = atomic_read(&fs_info->async_delalloc_pages);
629 * We don't want to wait forever, if we wrote less pages in this
630 * loop than we have outstanding, only wait for that number of
631 * pages, otherwise we can wait for all async pages to finish
634 if (async_pages > nr_pages)
635 async_pages -= nr_pages;
638 wait_event(fs_info->async_submit_wait,
639 atomic_read(&fs_info->async_delalloc_pages) <=
643 if (wait_ordered && !trans) {
644 btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
646 time_left = schedule_timeout_killable(1);
652 * If we are for preemption we just want a one-shot of delalloc
653 * flushing so we can stop flushing if we decide we don't need
659 spin_lock(&space_info->lock);
660 if (list_empty(&space_info->tickets) &&
661 list_empty(&space_info->priority_tickets)) {
662 spin_unlock(&space_info->lock);
665 spin_unlock(&space_info->lock);
667 delalloc_bytes = percpu_counter_sum_positive(
668 &fs_info->delalloc_bytes);
669 ordered_bytes = percpu_counter_sum_positive(
670 &fs_info->ordered_bytes);
675 * Try to flush some data based on policy set by @state. This is only advisory
676 * and may fail for various reasons. The caller is supposed to examine the
677 * state of @space_info to detect the outcome.
679 static void flush_space(struct btrfs_fs_info *fs_info,
680 struct btrfs_space_info *space_info, u64 num_bytes,
681 enum btrfs_flush_state state, bool for_preempt)
683 struct btrfs_root *root = fs_info->tree_root;
684 struct btrfs_trans_handle *trans;
689 case FLUSH_DELAYED_ITEMS_NR:
690 case FLUSH_DELAYED_ITEMS:
691 if (state == FLUSH_DELAYED_ITEMS_NR)
692 nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2;
696 trans = btrfs_join_transaction(root);
698 ret = PTR_ERR(trans);
701 ret = btrfs_run_delayed_items_nr(trans, nr);
702 btrfs_end_transaction(trans);
705 case FLUSH_DELALLOC_WAIT:
706 case FLUSH_DELALLOC_FULL:
707 if (state == FLUSH_DELALLOC_FULL)
709 shrink_delalloc(fs_info, space_info, num_bytes,
710 state != FLUSH_DELALLOC, for_preempt);
712 case FLUSH_DELAYED_REFS_NR:
713 case FLUSH_DELAYED_REFS:
714 trans = btrfs_join_transaction(root);
716 ret = PTR_ERR(trans);
719 if (state == FLUSH_DELAYED_REFS_NR)
720 nr = calc_reclaim_items_nr(fs_info, num_bytes);
723 btrfs_run_delayed_refs(trans, nr);
724 btrfs_end_transaction(trans);
727 case ALLOC_CHUNK_FORCE:
729 * For metadata space on zoned filesystem, reaching here means we
730 * don't have enough space left in active_total_bytes. Try to
731 * activate a block group first, because we may have inactive
732 * block group already allocated.
734 ret = btrfs_zoned_activate_one_bg(fs_info, space_info, false);
740 trans = btrfs_join_transaction(root);
742 ret = PTR_ERR(trans);
745 ret = btrfs_chunk_alloc(trans,
746 btrfs_get_alloc_profile(fs_info, space_info->flags),
747 (state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE :
749 btrfs_end_transaction(trans);
752 * For metadata space on zoned filesystem, allocating a new chunk
753 * is not enough. We still need to activate the block * group.
754 * Active the newly allocated block group by (maybe) finishing
758 ret = btrfs_zoned_activate_one_bg(fs_info, space_info, true);
760 * Revert to the original ret regardless we could finish
761 * one block group or not.
767 if (ret > 0 || ret == -ENOSPC)
770 case RUN_DELAYED_IPUTS:
772 * If we have pending delayed iputs then we could free up a
773 * bunch of pinned space, so make sure we run the iputs before
774 * we do our pinned bytes check below.
776 btrfs_run_delayed_iputs(fs_info);
777 btrfs_wait_on_delayed_iputs(fs_info);
780 ASSERT(current->journal_info == NULL);
781 trans = btrfs_join_transaction(root);
783 ret = PTR_ERR(trans);
786 ret = btrfs_commit_transaction(trans);
793 trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state,
799 btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
800 struct btrfs_space_info *space_info)
805 u64 to_reclaim = space_info->reclaim_size;
807 lockdep_assert_held(&space_info->lock);
809 avail = calc_available_free_space(fs_info, space_info,
810 BTRFS_RESERVE_FLUSH_ALL);
811 used = btrfs_space_info_used(space_info, true);
814 * We may be flushing because suddenly we have less space than we had
815 * before, and now we're well over-committed based on our current free
816 * space. If that's the case add in our overage so we make sure to put
817 * appropriate pressure on the flushing state machine.
819 total = writable_total_bytes(fs_info, space_info);
820 if (total + avail < used)
821 to_reclaim += used - (total + avail);
826 static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
827 struct btrfs_space_info *space_info)
829 u64 global_rsv_size = fs_info->global_block_rsv.reserved;
830 u64 ordered, delalloc;
831 u64 total = writable_total_bytes(fs_info, space_info);
835 thresh = div_factor_fine(total, 90);
837 lockdep_assert_held(&space_info->lock);
839 /* If we're just plain full then async reclaim just slows us down. */
840 if ((space_info->bytes_used + space_info->bytes_reserved +
841 global_rsv_size) >= thresh)
844 used = space_info->bytes_may_use + space_info->bytes_pinned;
846 /* The total flushable belongs to the global rsv, don't flush. */
847 if (global_rsv_size >= used)
851 * 128MiB is 1/4 of the maximum global rsv size. If we have less than
852 * that devoted to other reservations then there's no sense in flushing,
853 * we don't have a lot of things that need flushing.
855 if (used - global_rsv_size <= SZ_128M)
859 * We have tickets queued, bail so we don't compete with the async
862 if (space_info->reclaim_size)
866 * If we have over half of the free space occupied by reservations or
867 * pinned then we want to start flushing.
869 * We do not do the traditional thing here, which is to say
871 * if (used >= ((total_bytes + avail) / 2))
874 * because this doesn't quite work how we want. If we had more than 50%
875 * of the space_info used by bytes_used and we had 0 available we'd just
876 * constantly run the background flusher. Instead we want it to kick in
877 * if our reclaimable space exceeds our clamped free space.
879 * Our clamping range is 2^1 -> 2^8. Practically speaking that means
882 * Amount of RAM Minimum threshold Maximum threshold
885 * 128GiB 512MiB 64GiB
890 * These are the range our thresholds will fall in, corresponding to how
891 * much delalloc we need for the background flusher to kick in.
894 thresh = calc_available_free_space(fs_info, space_info,
895 BTRFS_RESERVE_FLUSH_ALL);
896 used = space_info->bytes_used + space_info->bytes_reserved +
897 space_info->bytes_readonly + global_rsv_size;
899 thresh += total - used;
900 thresh >>= space_info->clamp;
902 used = space_info->bytes_pinned;
905 * If we have more ordered bytes than delalloc bytes then we're either
906 * doing a lot of DIO, or we simply don't have a lot of delalloc waiting
907 * around. Preemptive flushing is only useful in that it can free up
908 * space before tickets need to wait for things to finish. In the case
909 * of ordered extents, preemptively waiting on ordered extents gets us
910 * nothing, if our reservations are tied up in ordered extents we'll
911 * simply have to slow down writers by forcing them to wait on ordered
914 * In the case that ordered is larger than delalloc, only include the
915 * block reserves that we would actually be able to directly reclaim
916 * from. In this case if we're heavy on metadata operations this will
917 * clearly be heavy enough to warrant preemptive flushing. In the case
918 * of heavy DIO or ordered reservations, preemptive flushing will just
919 * waste time and cause us to slow down.
921 * We want to make sure we truly are maxed out on ordered however, so
922 * cut ordered in half, and if it's still higher than delalloc then we
923 * can keep flushing. This is to avoid the case where we start
924 * flushing, and now delalloc == ordered and we stop preemptively
925 * flushing when we could still have several gigs of delalloc to flush.
927 ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1;
928 delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes);
929 if (ordered >= delalloc)
930 used += fs_info->delayed_refs_rsv.reserved +
931 fs_info->delayed_block_rsv.reserved;
933 used += space_info->bytes_may_use - global_rsv_size;
935 return (used >= thresh && !btrfs_fs_closing(fs_info) &&
936 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
939 static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info,
940 struct btrfs_space_info *space_info,
941 struct reserve_ticket *ticket)
943 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
949 if (global_rsv->space_info != space_info)
952 spin_lock(&global_rsv->lock);
953 min_bytes = div_factor(global_rsv->size, 1);
954 if (global_rsv->reserved < min_bytes + ticket->bytes) {
955 spin_unlock(&global_rsv->lock);
958 global_rsv->reserved -= ticket->bytes;
959 remove_ticket(space_info, ticket);
961 wake_up(&ticket->wait);
962 space_info->tickets_id++;
963 if (global_rsv->reserved < global_rsv->size)
964 global_rsv->full = 0;
965 spin_unlock(&global_rsv->lock);
971 * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets
972 * @fs_info - fs_info for this fs
973 * @space_info - the space info we were flushing
975 * We call this when we've exhausted our flushing ability and haven't made
976 * progress in satisfying tickets. The reservation code handles tickets in
977 * order, so if there is a large ticket first and then smaller ones we could
978 * very well satisfy the smaller tickets. This will attempt to wake up any
979 * tickets in the list to catch this case.
981 * This function returns true if it was able to make progress by clearing out
982 * other tickets, or if it stumbles across a ticket that was smaller than the
985 static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info,
986 struct btrfs_space_info *space_info)
988 struct reserve_ticket *ticket;
989 u64 tickets_id = space_info->tickets_id;
990 const bool aborted = BTRFS_FS_ERROR(fs_info);
992 trace_btrfs_fail_all_tickets(fs_info, space_info);
994 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
995 btrfs_info(fs_info, "cannot satisfy tickets, dumping space info");
996 __btrfs_dump_space_info(fs_info, space_info);
999 while (!list_empty(&space_info->tickets) &&
1000 tickets_id == space_info->tickets_id) {
1001 ticket = list_first_entry(&space_info->tickets,
1002 struct reserve_ticket, list);
1004 if (!aborted && steal_from_global_rsv(fs_info, space_info, ticket))
1007 if (!aborted && btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1008 btrfs_info(fs_info, "failing ticket with %llu bytes",
1011 remove_ticket(space_info, ticket);
1013 ticket->error = -EIO;
1015 ticket->error = -ENOSPC;
1016 wake_up(&ticket->wait);
1019 * We're just throwing tickets away, so more flushing may not
1020 * trip over btrfs_try_granting_tickets, so we need to call it
1021 * here to see if we can make progress with the next ticket in
1025 btrfs_try_granting_tickets(fs_info, space_info);
1027 return (tickets_id != space_info->tickets_id);
1031 * This is for normal flushers, we can wait all goddamned day if we want to. We
1032 * will loop and continuously try to flush as long as we are making progress.
1033 * We count progress as clearing off tickets each time we have to loop.
1035 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
1037 struct btrfs_fs_info *fs_info;
1038 struct btrfs_space_info *space_info;
1040 enum btrfs_flush_state flush_state;
1041 int commit_cycles = 0;
1042 u64 last_tickets_id;
1044 fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
1045 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
1047 spin_lock(&space_info->lock);
1048 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
1050 space_info->flush = 0;
1051 spin_unlock(&space_info->lock);
1054 last_tickets_id = space_info->tickets_id;
1055 spin_unlock(&space_info->lock);
1057 flush_state = FLUSH_DELAYED_ITEMS_NR;
1059 flush_space(fs_info, space_info, to_reclaim, flush_state, false);
1060 spin_lock(&space_info->lock);
1061 if (list_empty(&space_info->tickets)) {
1062 space_info->flush = 0;
1063 spin_unlock(&space_info->lock);
1066 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info,
1068 if (last_tickets_id == space_info->tickets_id) {
1071 last_tickets_id = space_info->tickets_id;
1072 flush_state = FLUSH_DELAYED_ITEMS_NR;
1078 * We do not want to empty the system of delalloc unless we're
1079 * under heavy pressure, so allow one trip through the flushing
1080 * logic before we start doing a FLUSH_DELALLOC_FULL.
1082 if (flush_state == FLUSH_DELALLOC_FULL && !commit_cycles)
1086 * We don't want to force a chunk allocation until we've tried
1087 * pretty hard to reclaim space. Think of the case where we
1088 * freed up a bunch of space and so have a lot of pinned space
1089 * to reclaim. We would rather use that than possibly create a
1090 * underutilized metadata chunk. So if this is our first run
1091 * through the flushing state machine skip ALLOC_CHUNK_FORCE and
1092 * commit the transaction. If nothing has changed the next go
1093 * around then we can force a chunk allocation.
1095 if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles)
1098 if (flush_state > COMMIT_TRANS) {
1100 if (commit_cycles > 2) {
1101 if (maybe_fail_all_tickets(fs_info, space_info)) {
1102 flush_state = FLUSH_DELAYED_ITEMS_NR;
1105 space_info->flush = 0;
1108 flush_state = FLUSH_DELAYED_ITEMS_NR;
1111 spin_unlock(&space_info->lock);
1112 } while (flush_state <= COMMIT_TRANS);
1116 * This handles pre-flushing of metadata space before we get to the point that
1117 * we need to start blocking threads on tickets. The logic here is different
1118 * from the other flush paths because it doesn't rely on tickets to tell us how
1119 * much we need to flush, instead it attempts to keep us below the 80% full
1120 * watermark of space by flushing whichever reservation pool is currently the
1123 static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
1125 struct btrfs_fs_info *fs_info;
1126 struct btrfs_space_info *space_info;
1127 struct btrfs_block_rsv *delayed_block_rsv;
1128 struct btrfs_block_rsv *delayed_refs_rsv;
1129 struct btrfs_block_rsv *global_rsv;
1130 struct btrfs_block_rsv *trans_rsv;
1133 fs_info = container_of(work, struct btrfs_fs_info,
1134 preempt_reclaim_work);
1135 space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
1136 delayed_block_rsv = &fs_info->delayed_block_rsv;
1137 delayed_refs_rsv = &fs_info->delayed_refs_rsv;
1138 global_rsv = &fs_info->global_block_rsv;
1139 trans_rsv = &fs_info->trans_block_rsv;
1141 spin_lock(&space_info->lock);
1142 while (need_preemptive_reclaim(fs_info, space_info)) {
1143 enum btrfs_flush_state flush;
1144 u64 delalloc_size = 0;
1145 u64 to_reclaim, block_rsv_size;
1146 u64 global_rsv_size = global_rsv->reserved;
1151 * We don't have a precise counter for the metadata being
1152 * reserved for delalloc, so we'll approximate it by subtracting
1153 * out the block rsv's space from the bytes_may_use. If that
1154 * amount is higher than the individual reserves, then we can
1155 * assume it's tied up in delalloc reservations.
1157 block_rsv_size = global_rsv_size +
1158 delayed_block_rsv->reserved +
1159 delayed_refs_rsv->reserved +
1160 trans_rsv->reserved;
1161 if (block_rsv_size < space_info->bytes_may_use)
1162 delalloc_size = space_info->bytes_may_use - block_rsv_size;
1165 * We don't want to include the global_rsv in our calculation,
1166 * because that's space we can't touch. Subtract it from the
1167 * block_rsv_size for the next checks.
1169 block_rsv_size -= global_rsv_size;
1172 * We really want to avoid flushing delalloc too much, as it
1173 * could result in poor allocation patterns, so only flush it if
1174 * it's larger than the rest of the pools combined.
1176 if (delalloc_size > block_rsv_size) {
1177 to_reclaim = delalloc_size;
1178 flush = FLUSH_DELALLOC;
1179 } else if (space_info->bytes_pinned >
1180 (delayed_block_rsv->reserved +
1181 delayed_refs_rsv->reserved)) {
1182 to_reclaim = space_info->bytes_pinned;
1183 flush = COMMIT_TRANS;
1184 } else if (delayed_block_rsv->reserved >
1185 delayed_refs_rsv->reserved) {
1186 to_reclaim = delayed_block_rsv->reserved;
1187 flush = FLUSH_DELAYED_ITEMS_NR;
1189 to_reclaim = delayed_refs_rsv->reserved;
1190 flush = FLUSH_DELAYED_REFS_NR;
1193 spin_unlock(&space_info->lock);
1196 * We don't want to reclaim everything, just a portion, so scale
1197 * down the to_reclaim by 1/4. If it takes us down to 0,
1198 * reclaim 1 items worth.
1202 to_reclaim = btrfs_calc_insert_metadata_size(fs_info, 1);
1203 flush_space(fs_info, space_info, to_reclaim, flush, true);
1205 spin_lock(&space_info->lock);
1208 /* We only went through once, back off our clamping. */
1209 if (loops == 1 && !space_info->reclaim_size)
1210 space_info->clamp = max(1, space_info->clamp - 1);
1211 trace_btrfs_done_preemptive_reclaim(fs_info, space_info);
1212 spin_unlock(&space_info->lock);
1216 * FLUSH_DELALLOC_WAIT:
1217 * Space is freed from flushing delalloc in one of two ways.
1219 * 1) compression is on and we allocate less space than we reserved
1220 * 2) we are overwriting existing space
1222 * For #1 that extra space is reclaimed as soon as the delalloc pages are
1223 * COWed, by way of btrfs_add_reserved_bytes() which adds the actual extent
1224 * length to ->bytes_reserved, and subtracts the reserved space from
1227 * For #2 this is trickier. Once the ordered extent runs we will drop the
1228 * extent in the range we are overwriting, which creates a delayed ref for
1229 * that freed extent. This however is not reclaimed until the transaction
1230 * commits, thus the next stages.
1233 * If we are freeing inodes, we want to make sure all delayed iputs have
1234 * completed, because they could have been on an inode with i_nlink == 0, and
1235 * thus have been truncated and freed up space. But again this space is not
1236 * immediately re-usable, it comes in the form of a delayed ref, which must be
1237 * run and then the transaction must be committed.
1240 * This is where we reclaim all of the pinned space generated by running the
1244 * For data we start with alloc chunk force, however we could have been full
1245 * before, and then the transaction commit could have freed new block groups,
1246 * so if we now have space to allocate do the force chunk allocation.
1248 static const enum btrfs_flush_state data_flush_states[] = {
1249 FLUSH_DELALLOC_FULL,
1255 static void btrfs_async_reclaim_data_space(struct work_struct *work)
1257 struct btrfs_fs_info *fs_info;
1258 struct btrfs_space_info *space_info;
1259 u64 last_tickets_id;
1260 enum btrfs_flush_state flush_state = 0;
1262 fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work);
1263 space_info = fs_info->data_sinfo;
1265 spin_lock(&space_info->lock);
1266 if (list_empty(&space_info->tickets)) {
1267 space_info->flush = 0;
1268 spin_unlock(&space_info->lock);
1271 last_tickets_id = space_info->tickets_id;
1272 spin_unlock(&space_info->lock);
1274 while (!space_info->full) {
1275 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
1276 spin_lock(&space_info->lock);
1277 if (list_empty(&space_info->tickets)) {
1278 space_info->flush = 0;
1279 spin_unlock(&space_info->lock);
1283 /* Something happened, fail everything and bail. */
1284 if (BTRFS_FS_ERROR(fs_info))
1286 last_tickets_id = space_info->tickets_id;
1287 spin_unlock(&space_info->lock);
1290 while (flush_state < ARRAY_SIZE(data_flush_states)) {
1291 flush_space(fs_info, space_info, U64_MAX,
1292 data_flush_states[flush_state], false);
1293 spin_lock(&space_info->lock);
1294 if (list_empty(&space_info->tickets)) {
1295 space_info->flush = 0;
1296 spin_unlock(&space_info->lock);
1300 if (last_tickets_id == space_info->tickets_id) {
1303 last_tickets_id = space_info->tickets_id;
1307 if (flush_state >= ARRAY_SIZE(data_flush_states)) {
1308 if (space_info->full) {
1309 if (maybe_fail_all_tickets(fs_info, space_info))
1312 space_info->flush = 0;
1317 /* Something happened, fail everything and bail. */
1318 if (BTRFS_FS_ERROR(fs_info))
1322 spin_unlock(&space_info->lock);
1327 maybe_fail_all_tickets(fs_info, space_info);
1328 space_info->flush = 0;
1329 spin_unlock(&space_info->lock);
1332 void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info)
1334 INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space);
1335 INIT_WORK(&fs_info->async_data_reclaim_work, btrfs_async_reclaim_data_space);
1336 INIT_WORK(&fs_info->preempt_reclaim_work,
1337 btrfs_preempt_reclaim_metadata_space);
1340 static const enum btrfs_flush_state priority_flush_states[] = {
1341 FLUSH_DELAYED_ITEMS_NR,
1342 FLUSH_DELAYED_ITEMS,
1346 static const enum btrfs_flush_state evict_flush_states[] = {
1347 FLUSH_DELAYED_ITEMS_NR,
1348 FLUSH_DELAYED_ITEMS,
1349 FLUSH_DELAYED_REFS_NR,
1352 FLUSH_DELALLOC_WAIT,
1353 FLUSH_DELALLOC_FULL,
1358 static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
1359 struct btrfs_space_info *space_info,
1360 struct reserve_ticket *ticket,
1361 const enum btrfs_flush_state *states,
1365 int flush_state = 0;
1367 spin_lock(&space_info->lock);
1368 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
1370 * This is the priority reclaim path, so to_reclaim could be >0 still
1371 * because we may have only satisfied the priority tickets and still
1372 * left non priority tickets on the list. We would then have
1373 * to_reclaim but ->bytes == 0.
1375 if (ticket->bytes == 0) {
1376 spin_unlock(&space_info->lock);
1380 while (flush_state < states_nr) {
1381 spin_unlock(&space_info->lock);
1382 flush_space(fs_info, space_info, to_reclaim, states[flush_state],
1385 spin_lock(&space_info->lock);
1386 if (ticket->bytes == 0) {
1387 spin_unlock(&space_info->lock);
1392 /* Attempt to steal from the global rsv if we can. */
1393 if (!steal_from_global_rsv(fs_info, space_info, ticket)) {
1394 ticket->error = -ENOSPC;
1395 remove_ticket(space_info, ticket);
1399 * We must run try_granting_tickets here because we could be a large
1400 * ticket in front of a smaller ticket that can now be satisfied with
1401 * the available space.
1403 btrfs_try_granting_tickets(fs_info, space_info);
1404 spin_unlock(&space_info->lock);
1407 static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info,
1408 struct btrfs_space_info *space_info,
1409 struct reserve_ticket *ticket)
1411 spin_lock(&space_info->lock);
1413 /* We could have been granted before we got here. */
1414 if (ticket->bytes == 0) {
1415 spin_unlock(&space_info->lock);
1419 while (!space_info->full) {
1420 spin_unlock(&space_info->lock);
1421 flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
1422 spin_lock(&space_info->lock);
1423 if (ticket->bytes == 0) {
1424 spin_unlock(&space_info->lock);
1429 ticket->error = -ENOSPC;
1430 remove_ticket(space_info, ticket);
1431 btrfs_try_granting_tickets(fs_info, space_info);
1432 spin_unlock(&space_info->lock);
1435 static void wait_reserve_ticket(struct btrfs_fs_info *fs_info,
1436 struct btrfs_space_info *space_info,
1437 struct reserve_ticket *ticket)
1443 spin_lock(&space_info->lock);
1444 while (ticket->bytes > 0 && ticket->error == 0) {
1445 ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
1448 * Delete us from the list. After we unlock the space
1449 * info, we don't want the async reclaim job to reserve
1450 * space for this ticket. If that would happen, then the
1451 * ticket's task would not known that space was reserved
1452 * despite getting an error, resulting in a space leak
1453 * (bytes_may_use counter of our space_info).
1455 remove_ticket(space_info, ticket);
1456 ticket->error = -EINTR;
1459 spin_unlock(&space_info->lock);
1463 finish_wait(&ticket->wait, &wait);
1464 spin_lock(&space_info->lock);
1466 spin_unlock(&space_info->lock);
1470 * Do the appropriate flushing and waiting for a ticket
1472 * @fs_info: the filesystem
1473 * @space_info: space info for the reservation
1474 * @ticket: ticket for the reservation
1475 * @start_ns: timestamp when the reservation started
1476 * @orig_bytes: amount of bytes originally reserved
1477 * @flush: how much we can flush
1479 * This does the work of figuring out how to flush for the ticket, waiting for
1480 * the reservation, and returning the appropriate error if there is one.
1482 static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
1483 struct btrfs_space_info *space_info,
1484 struct reserve_ticket *ticket,
1485 u64 start_ns, u64 orig_bytes,
1486 enum btrfs_reserve_flush_enum flush)
1491 case BTRFS_RESERVE_FLUSH_DATA:
1492 case BTRFS_RESERVE_FLUSH_ALL:
1493 case BTRFS_RESERVE_FLUSH_ALL_STEAL:
1494 wait_reserve_ticket(fs_info, space_info, ticket);
1496 case BTRFS_RESERVE_FLUSH_LIMIT:
1497 priority_reclaim_metadata_space(fs_info, space_info, ticket,
1498 priority_flush_states,
1499 ARRAY_SIZE(priority_flush_states));
1501 case BTRFS_RESERVE_FLUSH_EVICT:
1502 priority_reclaim_metadata_space(fs_info, space_info, ticket,
1504 ARRAY_SIZE(evict_flush_states));
1506 case BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE:
1507 priority_reclaim_data_space(fs_info, space_info, ticket);
1514 ret = ticket->error;
1515 ASSERT(list_empty(&ticket->list));
1517 * Check that we can't have an error set if the reservation succeeded,
1518 * as that would confuse tasks and lead them to error out without
1519 * releasing reserved space (if an error happens the expectation is that
1520 * space wasn't reserved at all).
1522 ASSERT(!(ticket->bytes == 0 && ticket->error));
1523 trace_btrfs_reserve_ticket(fs_info, space_info->flags, orig_bytes,
1524 start_ns, flush, ticket->error);
1529 * This returns true if this flush state will go through the ordinary flushing
1532 static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush)
1534 return (flush == BTRFS_RESERVE_FLUSH_ALL) ||
1535 (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
1538 static inline void maybe_clamp_preempt(struct btrfs_fs_info *fs_info,
1539 struct btrfs_space_info *space_info)
1541 u64 ordered = percpu_counter_sum_positive(&fs_info->ordered_bytes);
1542 u64 delalloc = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
1545 * If we're heavy on ordered operations then clamping won't help us. We
1546 * need to clamp specifically to keep up with dirty'ing buffered
1547 * writers, because there's not a 1:1 correlation of writing delalloc
1548 * and freeing space, like there is with flushing delayed refs or
1549 * delayed nodes. If we're already more ordered than delalloc then
1550 * we're keeping up, otherwise we aren't and should probably clamp.
1552 if (ordered < delalloc)
1553 space_info->clamp = min(space_info->clamp + 1, 8);
1556 static inline bool can_steal(enum btrfs_reserve_flush_enum flush)
1558 return (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
1559 flush == BTRFS_RESERVE_FLUSH_EVICT);
1563 * Try to reserve bytes from the block_rsv's space
1565 * @fs_info: the filesystem
1566 * @space_info: space info we want to allocate from
1567 * @orig_bytes: number of bytes we want
1568 * @flush: whether or not we can flush to make our reservation
1570 * This will reserve orig_bytes number of bytes from the space info associated
1571 * with the block_rsv. If there is not enough space it will make an attempt to
1572 * flush out space to make room. It will do this by flushing delalloc if
1573 * possible or committing the transaction. If flush is 0 then no attempts to
1574 * regain reservations will be made and this will fail if there is not enough
1577 static int __reserve_bytes(struct btrfs_fs_info *fs_info,
1578 struct btrfs_space_info *space_info, u64 orig_bytes,
1579 enum btrfs_reserve_flush_enum flush)
1581 struct work_struct *async_work;
1582 struct reserve_ticket ticket;
1586 bool pending_tickets;
1589 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL);
1591 if (flush == BTRFS_RESERVE_FLUSH_DATA)
1592 async_work = &fs_info->async_data_reclaim_work;
1594 async_work = &fs_info->async_reclaim_work;
1596 spin_lock(&space_info->lock);
1598 used = btrfs_space_info_used(space_info, true);
1601 * We don't want NO_FLUSH allocations to jump everybody, they can
1602 * generally handle ENOSPC in a different way, so treat them the same as
1603 * normal flushers when it comes to skipping pending tickets.
1605 if (is_normal_flushing(flush) || (flush == BTRFS_RESERVE_NO_FLUSH))
1606 pending_tickets = !list_empty(&space_info->tickets) ||
1607 !list_empty(&space_info->priority_tickets);
1609 pending_tickets = !list_empty(&space_info->priority_tickets);
1612 * Carry on if we have enough space (short-circuit) OR call
1613 * can_overcommit() to ensure we can overcommit to continue.
1615 if (!pending_tickets &&
1616 ((used + orig_bytes <= writable_total_bytes(fs_info, space_info)) ||
1617 btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) {
1618 btrfs_space_info_update_bytes_may_use(fs_info, space_info,
1624 * If we couldn't make a reservation then setup our reservation ticket
1625 * and kick the async worker if it's not already running.
1627 * If we are a priority flusher then we just need to add our ticket to
1628 * the list and we will do our own flushing further down.
1630 if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
1631 ticket.bytes = orig_bytes;
1633 space_info->reclaim_size += ticket.bytes;
1634 init_waitqueue_head(&ticket.wait);
1635 ticket.steal = can_steal(flush);
1636 if (trace_btrfs_reserve_ticket_enabled())
1637 start_ns = ktime_get_ns();
1639 if (flush == BTRFS_RESERVE_FLUSH_ALL ||
1640 flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
1641 flush == BTRFS_RESERVE_FLUSH_DATA) {
1642 list_add_tail(&ticket.list, &space_info->tickets);
1643 if (!space_info->flush) {
1645 * We were forced to add a reserve ticket, so
1646 * our preemptive flushing is unable to keep
1647 * up. Clamp down on the threshold for the
1648 * preemptive flushing in order to keep up with
1651 maybe_clamp_preempt(fs_info, space_info);
1653 space_info->flush = 1;
1654 trace_btrfs_trigger_flush(fs_info,
1658 queue_work(system_unbound_wq, async_work);
1661 list_add_tail(&ticket.list,
1662 &space_info->priority_tickets);
1664 } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
1667 * We will do the space reservation dance during log replay,
1668 * which means we won't have fs_info->fs_root set, so don't do
1669 * the async reclaim as we will panic.
1671 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
1672 !work_busy(&fs_info->preempt_reclaim_work) &&
1673 need_preemptive_reclaim(fs_info, space_info)) {
1674 trace_btrfs_trigger_flush(fs_info, space_info->flags,
1675 orig_bytes, flush, "preempt");
1676 queue_work(system_unbound_wq,
1677 &fs_info->preempt_reclaim_work);
1680 spin_unlock(&space_info->lock);
1681 if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
1684 return handle_reserve_ticket(fs_info, space_info, &ticket, start_ns,
1689 * Trye to reserve metadata bytes from the block_rsv's space
1691 * @fs_info: the filesystem
1692 * @block_rsv: block_rsv we're allocating for
1693 * @orig_bytes: number of bytes we want
1694 * @flush: whether or not we can flush to make our reservation
1696 * This will reserve orig_bytes number of bytes from the space info associated
1697 * with the block_rsv. If there is not enough space it will make an attempt to
1698 * flush out space to make room. It will do this by flushing delalloc if
1699 * possible or committing the transaction. If flush is 0 then no attempts to
1700 * regain reservations will be made and this will fail if there is not enough
1703 int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
1704 struct btrfs_block_rsv *block_rsv,
1706 enum btrfs_reserve_flush_enum flush)
1710 ret = __reserve_bytes(fs_info, block_rsv->space_info, orig_bytes, flush);
1711 if (ret == -ENOSPC) {
1712 trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1713 block_rsv->space_info->flags,
1716 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1717 btrfs_dump_space_info(fs_info, block_rsv->space_info,
1724 * Try to reserve data bytes for an allocation
1726 * @fs_info: the filesystem
1727 * @bytes: number of bytes we need
1728 * @flush: how we are allowed to flush
1730 * This will reserve bytes from the data space info. If there is not enough
1731 * space then we will attempt to flush space as specified by flush.
1733 int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes,
1734 enum btrfs_reserve_flush_enum flush)
1736 struct btrfs_space_info *data_sinfo = fs_info->data_sinfo;
1739 ASSERT(flush == BTRFS_RESERVE_FLUSH_DATA ||
1740 flush == BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE);
1741 ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA);
1743 ret = __reserve_bytes(fs_info, data_sinfo, bytes, flush);
1744 if (ret == -ENOSPC) {
1745 trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1746 data_sinfo->flags, bytes, 1);
1747 if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1748 btrfs_dump_space_info(fs_info, data_sinfo, bytes, 0);