1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/jiffies.h>
4 #include <linux/kernel.h>
5 #include <linux/ktime.h>
6 #include <linux/list.h>
7 #include <linux/math64.h>
8 #include <linux/sizes.h>
9 #include <linux/workqueue.h>
11 #include "block-group.h"
13 #include "free-space-cache.h"
17 * This contains the logic to handle async discard.
19 * Async discard manages trimming of free space outside of transaction commit.
20 * Discarding is done by managing the block_groups on a LRU list based on free
21 * space recency. Two passes are used to first prioritize discarding extents
22 * and then allow for trimming in the bitmap the best opportunity to coalesce.
23 * The block_groups are maintained on multiple lists to allow for multiple
24 * passes with different discard filter requirements. A delayed work item is
25 * used to manage discarding with timeout determined by a max of the delay
26 * incurred by the iops rate limit, the byte rate limit, and the max delay of
27 * BTRFS_DISCARD_MAX_DELAY.
29 * Note, this only keeps track of block_groups that are explicitly for data.
30 * Mixed block_groups are not supported.
32 * The first list is special to manage discarding of fully free block groups.
33 * This is necessary because we issue a final trim for a full free block group
34 * after forgetting it. When a block group becomes unused, instead of directly
35 * being added to the unused_bgs list, we add it to this first list. Then
36 * from there, if it becomes fully discarded, we place it onto the unused_bgs
39 * The in-memory free space cache serves as the backing state for discard.
40 * Consequently this means there is no persistence. We opt to load all the
41 * block groups in as not discarded, so the mount case degenerates to the
44 * As the free space cache uses bitmaps, there exists a tradeoff between
45 * ease/efficiency for find_free_extent() and the accuracy of discard state.
46 * Here we opt to let untrimmed regions merge with everything while only letting
47 * trimmed regions merge with other trimmed regions. This can cause
48 * overtrimming, but the coalescing benefit seems to be worth it. Additionally,
49 * bitmap state is tracked as a whole. If we're able to fully trim a bitmap,
50 * the trimmed flag is set on the bitmap. Otherwise, if an allocation comes in,
51 * this resets the state and we will retry trimming the whole bitmap. This is a
52 * tradeoff between discard state accuracy and the cost of accounting.
55 /* This is an initial delay to give some chance for block reuse */
56 #define BTRFS_DISCARD_DELAY (120ULL * NSEC_PER_SEC)
57 #define BTRFS_DISCARD_UNUSED_DELAY (10ULL * NSEC_PER_SEC)
59 /* Target completion latency of discarding all discardable extents */
60 #define BTRFS_DISCARD_TARGET_MSEC (6 * 60 * 60UL * MSEC_PER_SEC)
61 #define BTRFS_DISCARD_MIN_DELAY_MSEC (1UL)
62 #define BTRFS_DISCARD_MAX_DELAY_MSEC (1000UL)
63 #define BTRFS_DISCARD_MAX_IOPS (10U)
65 /* Monotonically decreasing minimum length filters after index 0 */
66 static int discard_minlen[BTRFS_NR_DISCARD_LISTS] = {
68 BTRFS_ASYNC_DISCARD_MAX_FILTER,
69 BTRFS_ASYNC_DISCARD_MIN_FILTER
72 static struct list_head *get_discard_list(struct btrfs_discard_ctl *discard_ctl,
73 struct btrfs_block_group *block_group)
75 return &discard_ctl->discard_list[block_group->discard_index];
78 static void __add_to_discard_list(struct btrfs_discard_ctl *discard_ctl,
79 struct btrfs_block_group *block_group)
81 if (!btrfs_run_discard_work(discard_ctl))
84 if (list_empty(&block_group->discard_list) ||
85 block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED) {
86 if (block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED)
87 block_group->discard_index = BTRFS_DISCARD_INDEX_START;
88 block_group->discard_eligible_time = (ktime_get_ns() +
90 block_group->discard_state = BTRFS_DISCARD_RESET_CURSOR;
93 list_move_tail(&block_group->discard_list,
94 get_discard_list(discard_ctl, block_group));
97 static void add_to_discard_list(struct btrfs_discard_ctl *discard_ctl,
98 struct btrfs_block_group *block_group)
100 if (!btrfs_is_block_group_data_only(block_group))
103 spin_lock(&discard_ctl->lock);
104 __add_to_discard_list(discard_ctl, block_group);
105 spin_unlock(&discard_ctl->lock);
108 static void add_to_discard_unused_list(struct btrfs_discard_ctl *discard_ctl,
109 struct btrfs_block_group *block_group)
111 spin_lock(&discard_ctl->lock);
113 if (!btrfs_run_discard_work(discard_ctl)) {
114 spin_unlock(&discard_ctl->lock);
118 list_del_init(&block_group->discard_list);
120 block_group->discard_index = BTRFS_DISCARD_INDEX_UNUSED;
121 block_group->discard_eligible_time = (ktime_get_ns() +
122 BTRFS_DISCARD_UNUSED_DELAY);
123 block_group->discard_state = BTRFS_DISCARD_RESET_CURSOR;
124 list_add_tail(&block_group->discard_list,
125 &discard_ctl->discard_list[BTRFS_DISCARD_INDEX_UNUSED]);
127 spin_unlock(&discard_ctl->lock);
130 static bool remove_from_discard_list(struct btrfs_discard_ctl *discard_ctl,
131 struct btrfs_block_group *block_group)
133 bool running = false;
135 spin_lock(&discard_ctl->lock);
137 if (block_group == discard_ctl->block_group) {
139 discard_ctl->block_group = NULL;
142 block_group->discard_eligible_time = 0;
143 list_del_init(&block_group->discard_list);
145 spin_unlock(&discard_ctl->lock);
151 * Find block_group that's up next for discarding.
153 * @discard_ctl: discard control
156 * Iterate over the discard lists to find the next block_group up for
157 * discarding checking the discard_eligible_time of block_group.
159 static struct btrfs_block_group *find_next_block_group(
160 struct btrfs_discard_ctl *discard_ctl,
163 struct btrfs_block_group *ret_block_group = NULL, *block_group;
166 for (i = 0; i < BTRFS_NR_DISCARD_LISTS; i++) {
167 struct list_head *discard_list = &discard_ctl->discard_list[i];
169 if (!list_empty(discard_list)) {
170 block_group = list_first_entry(discard_list,
171 struct btrfs_block_group,
174 if (!ret_block_group)
175 ret_block_group = block_group;
177 if (ret_block_group->discard_eligible_time < now)
180 if (ret_block_group->discard_eligible_time >
181 block_group->discard_eligible_time)
182 ret_block_group = block_group;
186 return ret_block_group;
190 * Look up next block group and set it for use.
192 * @discard_ctl: discard control
193 * @discard_state: the discard_state of the block_group after state management
194 * @discard_index: the discard_index of the block_group after state management
195 * @now: time when discard was invoked, in ns
197 * Wrap find_next_block_group() and set the block_group to be in use.
198 * @discard_state's control flow is managed here. Variables related to
199 * @discard_state are reset here as needed (eg. @discard_cursor). @discard_state
200 * and @discard_index are remembered as it may change while we're discarding,
201 * but we want the discard to execute in the context determined here.
203 static struct btrfs_block_group *peek_discard_list(
204 struct btrfs_discard_ctl *discard_ctl,
205 enum btrfs_discard_state *discard_state,
206 int *discard_index, u64 now)
208 struct btrfs_block_group *block_group;
210 spin_lock(&discard_ctl->lock);
212 block_group = find_next_block_group(discard_ctl, now);
214 if (block_group && now >= block_group->discard_eligible_time) {
215 if (block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED &&
216 block_group->used != 0) {
217 if (btrfs_is_block_group_data_only(block_group))
218 __add_to_discard_list(discard_ctl, block_group);
220 list_del_init(&block_group->discard_list);
223 if (block_group->discard_state == BTRFS_DISCARD_RESET_CURSOR) {
224 block_group->discard_cursor = block_group->start;
225 block_group->discard_state = BTRFS_DISCARD_EXTENTS;
227 discard_ctl->block_group = block_group;
230 *discard_state = block_group->discard_state;
231 *discard_index = block_group->discard_index;
233 spin_unlock(&discard_ctl->lock);
239 * Update a block group's filters.
241 * @block_group: block group of interest
242 * @bytes: recently freed region size after coalescing
244 * Async discard maintains multiple lists with progressively smaller filters
245 * to prioritize discarding based on size. Should a free space that matches
246 * a larger filter be returned to the free_space_cache, prioritize that discard
247 * by moving @block_group to the proper filter.
249 void btrfs_discard_check_filter(struct btrfs_block_group *block_group,
252 struct btrfs_discard_ctl *discard_ctl;
255 !btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC))
258 discard_ctl = &block_group->fs_info->discard_ctl;
260 if (block_group->discard_index > BTRFS_DISCARD_INDEX_START &&
261 bytes >= discard_minlen[block_group->discard_index - 1]) {
264 remove_from_discard_list(discard_ctl, block_group);
266 for (i = BTRFS_DISCARD_INDEX_START; i < BTRFS_NR_DISCARD_LISTS;
268 if (bytes >= discard_minlen[i]) {
269 block_group->discard_index = i;
270 add_to_discard_list(discard_ctl, block_group);
278 * Move a block group along the discard lists.
280 * @discard_ctl: discard control
281 * @block_group: block_group of interest
283 * Increment @block_group's discard_index. If it falls of the list, let it be.
284 * Otherwise add it back to the appropriate list.
286 static void btrfs_update_discard_index(struct btrfs_discard_ctl *discard_ctl,
287 struct btrfs_block_group *block_group)
289 block_group->discard_index++;
290 if (block_group->discard_index == BTRFS_NR_DISCARD_LISTS) {
291 block_group->discard_index = 1;
295 add_to_discard_list(discard_ctl, block_group);
299 * Remove a block_group from the discard lists.
301 * @discard_ctl: discard control
302 * @block_group: block_group of interest
304 * Remove @block_group from the discard lists. If necessary, wait on the
305 * current work and then reschedule the delayed work.
307 void btrfs_discard_cancel_work(struct btrfs_discard_ctl *discard_ctl,
308 struct btrfs_block_group *block_group)
310 if (remove_from_discard_list(discard_ctl, block_group)) {
311 cancel_delayed_work_sync(&discard_ctl->work);
312 btrfs_discard_schedule_work(discard_ctl, true);
317 * Handles queuing the block_groups.
319 * @discard_ctl: discard control
320 * @block_group: block_group of interest
322 * Maintain the LRU order of the discard lists.
324 void btrfs_discard_queue_work(struct btrfs_discard_ctl *discard_ctl,
325 struct btrfs_block_group *block_group)
327 if (!block_group || !btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC))
330 if (block_group->used == 0)
331 add_to_discard_unused_list(discard_ctl, block_group);
333 add_to_discard_list(discard_ctl, block_group);
335 if (!delayed_work_pending(&discard_ctl->work))
336 btrfs_discard_schedule_work(discard_ctl, false);
339 static void __btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
340 u64 now, bool override)
342 struct btrfs_block_group *block_group;
344 if (!btrfs_run_discard_work(discard_ctl))
346 if (!override && delayed_work_pending(&discard_ctl->work))
349 block_group = find_next_block_group(discard_ctl, now);
351 u64 delay = discard_ctl->delay_ms * NSEC_PER_MSEC;
352 u32 kbps_limit = READ_ONCE(discard_ctl->kbps_limit);
355 * A single delayed workqueue item is responsible for
356 * discarding, so we can manage the bytes rate limit by keeping
357 * track of the previous discard.
359 if (kbps_limit && discard_ctl->prev_discard) {
360 u64 bps_limit = ((u64)kbps_limit) * SZ_1K;
361 u64 bps_delay = div64_u64(discard_ctl->prev_discard *
362 NSEC_PER_SEC, bps_limit);
364 delay = max(delay, bps_delay);
368 * This timeout is to hopefully prevent immediate discarding
369 * in a recently allocated block group.
371 if (now < block_group->discard_eligible_time) {
372 u64 bg_timeout = block_group->discard_eligible_time - now;
374 delay = max(delay, bg_timeout);
377 if (override && discard_ctl->prev_discard) {
378 u64 elapsed = now - discard_ctl->prev_discard_time;
386 mod_delayed_work(discard_ctl->discard_workers,
387 &discard_ctl->work, nsecs_to_jiffies(delay));
392 * Responsible for scheduling the discard work.
394 * @discard_ctl: discard control
395 * @override: override the current timer
397 * Discards are issued by a delayed workqueue item. @override is used to
398 * update the current delay as the baseline delay interval is reevaluated on
399 * transaction commit. This is also maxed with any other rate limit.
401 void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
404 const u64 now = ktime_get_ns();
406 spin_lock(&discard_ctl->lock);
407 __btrfs_discard_schedule_work(discard_ctl, now, override);
408 spin_unlock(&discard_ctl->lock);
412 * Determine next step of a block_group.
414 * @discard_ctl: discard control
415 * @block_group: block_group of interest
417 * Determine the next step for a block group after it's finished going through
418 * a pass on a discard list. If it is unused and fully trimmed, we can mark it
419 * unused and send it to the unused_bgs path. Otherwise, pass it onto the
420 * appropriate filter list or let it fall off.
422 static void btrfs_finish_discard_pass(struct btrfs_discard_ctl *discard_ctl,
423 struct btrfs_block_group *block_group)
425 remove_from_discard_list(discard_ctl, block_group);
427 if (block_group->used == 0) {
428 if (btrfs_is_free_space_trimmed(block_group))
429 btrfs_mark_bg_unused(block_group);
431 add_to_discard_unused_list(discard_ctl, block_group);
433 btrfs_update_discard_index(discard_ctl, block_group);
438 * Discard work queue callback
442 * Find the next block_group to start discarding and then discard a single
443 * region. It does this in a two-pass fashion: first extents and second
444 * bitmaps. Completely discarded block groups are sent to the unused_bgs path.
446 static void btrfs_discard_workfn(struct work_struct *work)
448 struct btrfs_discard_ctl *discard_ctl;
449 struct btrfs_block_group *block_group;
450 enum btrfs_discard_state discard_state;
451 int discard_index = 0;
454 u64 now = ktime_get_ns();
456 discard_ctl = container_of(work, struct btrfs_discard_ctl, work.work);
458 block_group = peek_discard_list(discard_ctl, &discard_state,
459 &discard_index, now);
460 if (!block_group || !btrfs_run_discard_work(discard_ctl))
462 if (now < block_group->discard_eligible_time) {
463 btrfs_discard_schedule_work(discard_ctl, false);
467 /* Perform discarding */
468 minlen = discard_minlen[discard_index];
470 if (discard_state == BTRFS_DISCARD_BITMAPS) {
474 * Use the previous levels minimum discard length as the max
475 * length filter. In the case something is added to make a
476 * region go beyond the max filter, the entire bitmap is set
477 * back to BTRFS_TRIM_STATE_UNTRIMMED.
479 if (discard_index != BTRFS_DISCARD_INDEX_UNUSED)
480 maxlen = discard_minlen[discard_index - 1];
482 btrfs_trim_block_group_bitmaps(block_group, &trimmed,
483 block_group->discard_cursor,
484 btrfs_block_group_end(block_group),
485 minlen, maxlen, true);
486 discard_ctl->discard_bitmap_bytes += trimmed;
488 btrfs_trim_block_group_extents(block_group, &trimmed,
489 block_group->discard_cursor,
490 btrfs_block_group_end(block_group),
492 discard_ctl->discard_extent_bytes += trimmed;
495 /* Determine next steps for a block_group */
496 if (block_group->discard_cursor >= btrfs_block_group_end(block_group)) {
497 if (discard_state == BTRFS_DISCARD_BITMAPS) {
498 btrfs_finish_discard_pass(discard_ctl, block_group);
500 block_group->discard_cursor = block_group->start;
501 spin_lock(&discard_ctl->lock);
502 if (block_group->discard_state !=
503 BTRFS_DISCARD_RESET_CURSOR)
504 block_group->discard_state =
505 BTRFS_DISCARD_BITMAPS;
506 spin_unlock(&discard_ctl->lock);
510 now = ktime_get_ns();
511 spin_lock(&discard_ctl->lock);
512 discard_ctl->prev_discard = trimmed;
513 discard_ctl->prev_discard_time = now;
514 discard_ctl->block_group = NULL;
515 __btrfs_discard_schedule_work(discard_ctl, now, false);
516 spin_unlock(&discard_ctl->lock);
520 * Determine if async discard should be running.
522 * @discard_ctl: discard control
524 * Check if the file system is writeable and BTRFS_FS_DISCARD_RUNNING is set.
526 bool btrfs_run_discard_work(struct btrfs_discard_ctl *discard_ctl)
528 struct btrfs_fs_info *fs_info = container_of(discard_ctl,
529 struct btrfs_fs_info,
532 return (!(fs_info->sb->s_flags & SB_RDONLY) &&
533 test_bit(BTRFS_FS_DISCARD_RUNNING, &fs_info->flags));
537 * Recalculate the base delay.
539 * @discard_ctl: discard control
541 * Recalculate the base delay which is based off the total number of
542 * discardable_extents. Clamp this between the lower_limit (iops_limit or 1ms)
543 * and the upper_limit (BTRFS_DISCARD_MAX_DELAY_MSEC).
545 void btrfs_discard_calc_delay(struct btrfs_discard_ctl *discard_ctl)
547 s32 discardable_extents;
548 s64 discardable_bytes;
552 discardable_extents = atomic_read(&discard_ctl->discardable_extents);
553 if (!discardable_extents)
556 spin_lock(&discard_ctl->lock);
559 * The following is to fix a potential -1 discrepancy that we're not
560 * sure how to reproduce. But given that this is the only place that
561 * utilizes these numbers and this is only called by from
562 * btrfs_finish_extent_commit() which is synchronized, we can correct
565 if (discardable_extents < 0)
566 atomic_add(-discardable_extents,
567 &discard_ctl->discardable_extents);
569 discardable_bytes = atomic64_read(&discard_ctl->discardable_bytes);
570 if (discardable_bytes < 0)
571 atomic64_add(-discardable_bytes,
572 &discard_ctl->discardable_bytes);
574 if (discardable_extents <= 0) {
575 spin_unlock(&discard_ctl->lock);
579 iops_limit = READ_ONCE(discard_ctl->iops_limit);
581 delay = MSEC_PER_SEC / iops_limit;
583 delay = BTRFS_DISCARD_TARGET_MSEC / discardable_extents;
585 delay = clamp(delay, BTRFS_DISCARD_MIN_DELAY_MSEC,
586 BTRFS_DISCARD_MAX_DELAY_MSEC);
587 discard_ctl->delay_ms = delay;
589 spin_unlock(&discard_ctl->lock);
593 * Propagate discard counters.
595 * @block_group: block_group of interest
597 * Propagate deltas of counters up to the discard_ctl. It maintains a current
598 * counter and a previous counter passing the delta up to the global stat.
599 * Then the current counter value becomes the previous counter value.
601 void btrfs_discard_update_discardable(struct btrfs_block_group *block_group)
603 struct btrfs_free_space_ctl *ctl;
604 struct btrfs_discard_ctl *discard_ctl;
609 !btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC) ||
610 !btrfs_is_block_group_data_only(block_group))
613 ctl = block_group->free_space_ctl;
614 discard_ctl = &block_group->fs_info->discard_ctl;
616 lockdep_assert_held(&ctl->tree_lock);
617 extents_delta = ctl->discardable_extents[BTRFS_STAT_CURR] -
618 ctl->discardable_extents[BTRFS_STAT_PREV];
620 atomic_add(extents_delta, &discard_ctl->discardable_extents);
621 ctl->discardable_extents[BTRFS_STAT_PREV] =
622 ctl->discardable_extents[BTRFS_STAT_CURR];
625 bytes_delta = ctl->discardable_bytes[BTRFS_STAT_CURR] -
626 ctl->discardable_bytes[BTRFS_STAT_PREV];
628 atomic64_add(bytes_delta, &discard_ctl->discardable_bytes);
629 ctl->discardable_bytes[BTRFS_STAT_PREV] =
630 ctl->discardable_bytes[BTRFS_STAT_CURR];
635 * Punt unused_bgs list to discard lists.
637 * @fs_info: fs_info of interest
639 * The unused_bgs list needs to be punted to the discard lists because the
640 * order of operations is changed. In the normal synchronous discard path, the
641 * block groups are trimmed via a single large trim in transaction commit. This
642 * is ultimately what we are trying to avoid with asynchronous discard. Thus,
643 * it must be done before going down the unused_bgs path.
645 void btrfs_discard_punt_unused_bgs_list(struct btrfs_fs_info *fs_info)
647 struct btrfs_block_group *block_group, *next;
649 spin_lock(&fs_info->unused_bgs_lock);
650 /* We enabled async discard, so punt all to the queue */
651 list_for_each_entry_safe(block_group, next, &fs_info->unused_bgs,
653 list_del_init(&block_group->bg_list);
654 btrfs_put_block_group(block_group);
655 btrfs_discard_queue_work(&fs_info->discard_ctl, block_group);
657 spin_unlock(&fs_info->unused_bgs_lock);
661 * Purge discard lists.
663 * @discard_ctl: discard control
665 * If we are disabling async discard, we may have intercepted block groups that
666 * are completely free and ready for the unused_bgs path. As discarding will
667 * now happen in transaction commit or not at all, we can safely mark the
668 * corresponding block groups as unused and they will be sent on their merry
669 * way to the unused_bgs list.
671 static void btrfs_discard_purge_list(struct btrfs_discard_ctl *discard_ctl)
673 struct btrfs_block_group *block_group, *next;
676 spin_lock(&discard_ctl->lock);
677 for (i = 0; i < BTRFS_NR_DISCARD_LISTS; i++) {
678 list_for_each_entry_safe(block_group, next,
679 &discard_ctl->discard_list[i],
681 list_del_init(&block_group->discard_list);
682 spin_unlock(&discard_ctl->lock);
683 if (block_group->used == 0)
684 btrfs_mark_bg_unused(block_group);
685 spin_lock(&discard_ctl->lock);
688 spin_unlock(&discard_ctl->lock);
691 void btrfs_discard_resume(struct btrfs_fs_info *fs_info)
693 if (!btrfs_test_opt(fs_info, DISCARD_ASYNC)) {
694 btrfs_discard_cleanup(fs_info);
698 btrfs_discard_punt_unused_bgs_list(fs_info);
700 set_bit(BTRFS_FS_DISCARD_RUNNING, &fs_info->flags);
703 void btrfs_discard_stop(struct btrfs_fs_info *fs_info)
705 clear_bit(BTRFS_FS_DISCARD_RUNNING, &fs_info->flags);
708 void btrfs_discard_init(struct btrfs_fs_info *fs_info)
710 struct btrfs_discard_ctl *discard_ctl = &fs_info->discard_ctl;
713 spin_lock_init(&discard_ctl->lock);
714 INIT_DELAYED_WORK(&discard_ctl->work, btrfs_discard_workfn);
716 for (i = 0; i < BTRFS_NR_DISCARD_LISTS; i++)
717 INIT_LIST_HEAD(&discard_ctl->discard_list[i]);
719 discard_ctl->prev_discard = 0;
720 discard_ctl->prev_discard_time = 0;
721 atomic_set(&discard_ctl->discardable_extents, 0);
722 atomic64_set(&discard_ctl->discardable_bytes, 0);
723 discard_ctl->max_discard_size = BTRFS_ASYNC_DISCARD_DEFAULT_MAX_SIZE;
724 discard_ctl->delay_ms = BTRFS_DISCARD_MAX_DELAY_MSEC;
725 discard_ctl->iops_limit = BTRFS_DISCARD_MAX_IOPS;
726 discard_ctl->kbps_limit = 0;
727 discard_ctl->discard_extent_bytes = 0;
728 discard_ctl->discard_bitmap_bytes = 0;
729 atomic64_set(&discard_ctl->discard_bytes_saved, 0);
732 void btrfs_discard_cleanup(struct btrfs_fs_info *fs_info)
734 btrfs_discard_stop(fs_info);
735 cancel_delayed_work_sync(&fs_info->discard_ctl.work);
736 btrfs_discard_purge_list(&fs_info->discard_ctl);