1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/f2fs_fs.h>
12 #include <linux/kthread.h>
13 #include <linux/delay.h>
14 #include <linux/freezer.h>
15 #include <linux/sched/signal.h>
16 #include <linux/random.h>
17 #include <linux/sched/mm.h>
24 #include <trace/events/f2fs.h>
26 static struct kmem_cache *victim_entry_slab;
28 static unsigned int count_bits(const unsigned long *addr,
29 unsigned int offset, unsigned int len);
31 static int gc_thread_func(void *data)
33 struct f2fs_sb_info *sbi = data;
34 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
35 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
36 wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq;
38 struct f2fs_gc_control gc_control = {
39 .victim_segno = NULL_SEGNO,
40 .should_migrate_blocks = false,
41 .err_gc_skipped = false };
43 wait_ms = gc_th->min_sleep_time;
47 bool sync_mode, foreground = false;
49 wait_event_freezable_timeout(*wq,
50 kthread_should_stop() ||
51 waitqueue_active(fggc_wq) ||
53 msecs_to_jiffies(wait_ms));
55 if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq))
58 /* give it a try one time */
60 gc_th->gc_wake = false;
62 if (f2fs_readonly(sbi->sb)) {
63 stat_other_skip_bggc_count(sbi);
66 if (kthread_should_stop())
69 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
70 increase_sleep_time(gc_th, &wait_ms);
71 stat_other_skip_bggc_count(sbi);
75 if (time_to_inject(sbi, FAULT_CHECKPOINT))
76 f2fs_stop_checkpoint(sbi, false,
77 STOP_CP_REASON_FAULT_INJECT);
79 if (!sb_start_write_trylock(sbi->sb)) {
80 stat_other_skip_bggc_count(sbi);
84 gc_control.one_time = false;
87 * [GC triggering condition]
88 * 0. GC is not conducted currently.
89 * 1. There are enough dirty segments.
90 * 2. IO subsystem is idle by checking the # of writeback pages.
91 * 3. IO subsystem is idle by checking the # of requests in
92 * bdev's request list.
94 * Note) We have to avoid triggering GCs frequently.
95 * Because it is possible that some segments can be
96 * invalidated soon after by user update or deletion.
97 * So, I'd like to wait some time to collect dirty segments.
99 if (sbi->gc_mode == GC_URGENT_HIGH ||
100 sbi->gc_mode == GC_URGENT_MID) {
101 wait_ms = gc_th->urgent_sleep_time;
102 f2fs_down_write(&sbi->gc_lock);
107 f2fs_down_write(&sbi->gc_lock);
109 } else if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
110 stat_other_skip_bggc_count(sbi);
114 if (!is_idle(sbi, GC_TIME)) {
115 increase_sleep_time(gc_th, &wait_ms);
116 f2fs_up_write(&sbi->gc_lock);
117 stat_io_skip_bggc_count(sbi);
121 if (f2fs_sb_has_blkzoned(sbi)) {
122 if (has_enough_free_blocks(sbi,
123 gc_th->no_zoned_gc_percent)) {
124 wait_ms = gc_th->no_gc_sleep_time;
125 f2fs_up_write(&sbi->gc_lock);
128 if (wait_ms == gc_th->no_gc_sleep_time)
129 wait_ms = gc_th->max_sleep_time;
132 if (need_to_boost_gc(sbi)) {
133 decrease_sleep_time(gc_th, &wait_ms);
134 if (f2fs_sb_has_blkzoned(sbi))
135 gc_control.one_time = true;
137 increase_sleep_time(gc_th, &wait_ms);
140 stat_inc_gc_call_count(sbi, foreground ?
141 FOREGROUND : BACKGROUND);
143 sync_mode = (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC) ||
146 /* foreground GC was been triggered via f2fs_balance_fs() */
150 gc_control.init_gc_type = sync_mode ? FG_GC : BG_GC;
151 gc_control.no_bg_gc = foreground;
152 gc_control.nr_free_secs = foreground ? 1 : 0;
154 /* if return value is not zero, no victim was selected */
155 if (f2fs_gc(sbi, &gc_control)) {
156 /* don't bother wait_ms by foreground gc */
158 wait_ms = gc_th->no_gc_sleep_time;
160 /* reset wait_ms to default sleep time */
161 if (wait_ms == gc_th->no_gc_sleep_time)
162 wait_ms = gc_th->min_sleep_time;
166 wake_up_all(&gc_th->fggc_wq);
168 trace_f2fs_background_gc(sbi->sb, wait_ms,
169 prefree_segments(sbi), free_segments(sbi));
171 /* balancing f2fs's metadata periodically */
172 f2fs_balance_fs_bg(sbi, true);
174 if (sbi->gc_mode != GC_NORMAL) {
175 spin_lock(&sbi->gc_remaining_trials_lock);
176 if (sbi->gc_remaining_trials) {
177 sbi->gc_remaining_trials--;
178 if (!sbi->gc_remaining_trials)
179 sbi->gc_mode = GC_NORMAL;
181 spin_unlock(&sbi->gc_remaining_trials_lock);
183 sb_end_write(sbi->sb);
185 } while (!kthread_should_stop());
189 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
191 struct f2fs_gc_kthread *gc_th;
192 dev_t dev = sbi->sb->s_bdev->bd_dev;
194 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
198 gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
199 gc_th->valid_thresh_ratio = DEF_GC_THREAD_VALID_THRESH_RATIO;
201 if (f2fs_sb_has_blkzoned(sbi)) {
202 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME_ZONED;
203 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME_ZONED;
204 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME_ZONED;
205 gc_th->no_zoned_gc_percent = LIMIT_NO_ZONED_GC;
206 gc_th->boost_zoned_gc_percent = LIMIT_BOOST_ZONED_GC;
208 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
209 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
210 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
211 gc_th->no_zoned_gc_percent = 0;
212 gc_th->boost_zoned_gc_percent = 0;
215 gc_th->gc_wake = false;
217 sbi->gc_thread = gc_th;
218 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
219 init_waitqueue_head(&sbi->gc_thread->fggc_wq);
220 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
221 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
222 if (IS_ERR(gc_th->f2fs_gc_task)) {
223 int err = PTR_ERR(gc_th->f2fs_gc_task);
226 sbi->gc_thread = NULL;
233 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
235 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
239 kthread_stop(gc_th->f2fs_gc_task);
240 wake_up_all(&gc_th->fggc_wq);
242 sbi->gc_thread = NULL;
245 static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
249 if (gc_type == BG_GC) {
250 if (sbi->am.atgc_enabled)
258 switch (sbi->gc_mode) {
276 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
277 int type, struct victim_sel_policy *p)
279 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
281 if (p->alloc_mode == SSR) {
282 p->gc_mode = GC_GREEDY;
283 p->dirty_bitmap = dirty_i->dirty_segmap[type];
284 p->max_search = dirty_i->nr_dirty[type];
286 } else if (p->alloc_mode == AT_SSR) {
287 p->gc_mode = GC_GREEDY;
288 p->dirty_bitmap = dirty_i->dirty_segmap[type];
289 p->max_search = dirty_i->nr_dirty[type];
292 p->gc_mode = select_gc_type(sbi, gc_type);
293 p->ofs_unit = SEGS_PER_SEC(sbi);
294 if (__is_large_section(sbi)) {
295 p->dirty_bitmap = dirty_i->dirty_secmap;
296 p->max_search = count_bits(p->dirty_bitmap,
299 p->dirty_bitmap = dirty_i->dirty_segmap[DIRTY];
300 p->max_search = dirty_i->nr_dirty[DIRTY];
305 * adjust candidates range, should select all dirty segments for
306 * foreground GC and urgent GC cases.
308 if (gc_type != FG_GC &&
309 (sbi->gc_mode != GC_URGENT_HIGH) &&
310 (p->gc_mode != GC_AT && p->alloc_mode != AT_SSR) &&
311 p->max_search > sbi->max_victim_search)
312 p->max_search = sbi->max_victim_search;
314 /* let's select beginning hot/small space first. */
315 if (f2fs_need_rand_seg(sbi))
316 p->offset = get_random_u32_below(MAIN_SECS(sbi) *
318 else if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
321 p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
324 static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
325 struct victim_sel_policy *p)
327 /* SSR allocates in a segment unit */
328 if (p->alloc_mode == SSR)
329 return BLKS_PER_SEG(sbi);
330 else if (p->alloc_mode == AT_SSR)
334 if (p->gc_mode == GC_GREEDY)
335 return SEGS_TO_BLKS(sbi, 2 * p->ofs_unit);
336 else if (p->gc_mode == GC_CB)
338 else if (p->gc_mode == GC_AT)
340 else /* No other gc_mode */
344 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
346 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
350 * If the gc_type is FG_GC, we can select victim segments
351 * selected by background GC before.
352 * Those segments guarantee they have small valid blocks.
354 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
355 if (sec_usage_check(sbi, secno))
357 clear_bit(secno, dirty_i->victim_secmap);
358 return GET_SEG_FROM_SEC(sbi, secno);
363 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
365 struct sit_info *sit_i = SIT_I(sbi);
366 unsigned long long mtime = 0;
367 unsigned int vblocks;
368 unsigned char age = 0;
370 unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi);
372 mtime = f2fs_get_section_mtime(sbi, segno);
373 f2fs_bug_on(sbi, mtime == INVALID_MTIME);
374 vblocks = get_valid_blocks(sbi, segno, true);
375 vblocks = div_u64(vblocks, usable_segs_per_sec);
377 u = BLKS_TO_SEGS(sbi, vblocks * 100);
379 /* Handle if the system time has changed by the user */
380 if (mtime < sit_i->min_mtime)
381 sit_i->min_mtime = mtime;
382 if (mtime > sit_i->max_mtime)
383 sit_i->max_mtime = mtime;
384 if (sit_i->max_mtime != sit_i->min_mtime)
385 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
386 sit_i->max_mtime - sit_i->min_mtime);
388 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
391 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
392 unsigned int segno, struct victim_sel_policy *p)
394 if (p->alloc_mode == SSR)
395 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
397 if (p->one_time_gc && (get_valid_blocks(sbi, segno, true) >=
398 CAP_BLKS_PER_SEC(sbi) * sbi->gc_thread->valid_thresh_ratio /
402 /* alloc_mode == LFS */
403 if (p->gc_mode == GC_GREEDY)
404 return get_valid_blocks(sbi, segno, true);
405 else if (p->gc_mode == GC_CB)
406 return get_cb_cost(sbi, segno);
412 static unsigned int count_bits(const unsigned long *addr,
413 unsigned int offset, unsigned int len)
415 unsigned int end = offset + len, sum = 0;
417 while (offset < end) {
418 if (test_bit(offset++, addr))
424 static bool f2fs_check_victim_tree(struct f2fs_sb_info *sbi,
425 struct rb_root_cached *root)
427 #ifdef CONFIG_F2FS_CHECK_FS
428 struct rb_node *cur = rb_first_cached(root), *next;
429 struct victim_entry *cur_ve, *next_ve;
436 cur_ve = rb_entry(cur, struct victim_entry, rb_node);
437 next_ve = rb_entry(next, struct victim_entry, rb_node);
439 if (cur_ve->mtime > next_ve->mtime) {
440 f2fs_info(sbi, "broken victim_rbtree, "
441 "cur_mtime(%llu) next_mtime(%llu)",
442 cur_ve->mtime, next_ve->mtime);
451 static struct victim_entry *__lookup_victim_entry(struct f2fs_sb_info *sbi,
452 unsigned long long mtime)
454 struct atgc_management *am = &sbi->am;
455 struct rb_node *node = am->root.rb_root.rb_node;
456 struct victim_entry *ve = NULL;
459 ve = rb_entry(node, struct victim_entry, rb_node);
461 if (mtime < ve->mtime)
462 node = node->rb_left;
464 node = node->rb_right;
469 static struct victim_entry *__create_victim_entry(struct f2fs_sb_info *sbi,
470 unsigned long long mtime, unsigned int segno)
472 struct atgc_management *am = &sbi->am;
473 struct victim_entry *ve;
475 ve = f2fs_kmem_cache_alloc(victim_entry_slab, GFP_NOFS, true, NULL);
480 list_add_tail(&ve->list, &am->victim_list);
486 static void __insert_victim_entry(struct f2fs_sb_info *sbi,
487 unsigned long long mtime, unsigned int segno)
489 struct atgc_management *am = &sbi->am;
490 struct rb_root_cached *root = &am->root;
491 struct rb_node **p = &root->rb_root.rb_node;
492 struct rb_node *parent = NULL;
493 struct victim_entry *ve;
494 bool left_most = true;
496 /* look up rb tree to find parent node */
499 ve = rb_entry(parent, struct victim_entry, rb_node);
501 if (mtime < ve->mtime) {
509 ve = __create_victim_entry(sbi, mtime, segno);
511 rb_link_node(&ve->rb_node, parent, p);
512 rb_insert_color_cached(&ve->rb_node, root, left_most);
515 static void add_victim_entry(struct f2fs_sb_info *sbi,
516 struct victim_sel_policy *p, unsigned int segno)
518 struct sit_info *sit_i = SIT_I(sbi);
519 unsigned long long mtime = 0;
521 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
522 if (p->gc_mode == GC_AT &&
523 get_valid_blocks(sbi, segno, true) == 0)
527 mtime = f2fs_get_section_mtime(sbi, segno);
528 f2fs_bug_on(sbi, mtime == INVALID_MTIME);
530 /* Handle if the system time has changed by the user */
531 if (mtime < sit_i->min_mtime)
532 sit_i->min_mtime = mtime;
533 if (mtime > sit_i->max_mtime)
534 sit_i->max_mtime = mtime;
535 if (mtime < sit_i->dirty_min_mtime)
536 sit_i->dirty_min_mtime = mtime;
537 if (mtime > sit_i->dirty_max_mtime)
538 sit_i->dirty_max_mtime = mtime;
540 /* don't choose young section as candidate */
541 if (sit_i->dirty_max_mtime - mtime < p->age_threshold)
544 __insert_victim_entry(sbi, mtime, segno);
547 static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
548 struct victim_sel_policy *p)
550 struct sit_info *sit_i = SIT_I(sbi);
551 struct atgc_management *am = &sbi->am;
552 struct rb_root_cached *root = &am->root;
553 struct rb_node *node;
554 struct victim_entry *ve;
555 unsigned long long total_time;
556 unsigned long long age, u, accu;
557 unsigned long long max_mtime = sit_i->dirty_max_mtime;
558 unsigned long long min_mtime = sit_i->dirty_min_mtime;
559 unsigned int sec_blocks = CAP_BLKS_PER_SEC(sbi);
560 unsigned int vblocks;
561 unsigned int dirty_threshold = max(am->max_candidate_count,
562 am->candidate_ratio *
563 am->victim_count / 100);
564 unsigned int age_weight = am->age_weight;
566 unsigned int iter = 0;
568 if (max_mtime < min_mtime)
572 total_time = max_mtime - min_mtime;
574 accu = div64_u64(ULLONG_MAX, total_time);
575 accu = min_t(unsigned long long, div_u64(accu, 100),
576 DEFAULT_ACCURACY_CLASS);
578 node = rb_first_cached(root);
580 ve = rb_entry_safe(node, struct victim_entry, rb_node);
584 if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
587 /* age = 10000 * x% * 60 */
588 age = div64_u64(accu * (max_mtime - ve->mtime), total_time) *
591 vblocks = get_valid_blocks(sbi, ve->segno, true);
592 f2fs_bug_on(sbi, !vblocks || vblocks == sec_blocks);
594 /* u = 10000 * x% * 40 */
595 u = div64_u64(accu * (sec_blocks - vblocks), sec_blocks) *
598 f2fs_bug_on(sbi, age + u >= UINT_MAX);
600 cost = UINT_MAX - (age + u);
603 if (cost < p->min_cost ||
604 (cost == p->min_cost && age > p->oldest_age)) {
607 p->min_segno = ve->segno;
610 if (iter < dirty_threshold) {
611 node = rb_next(node);
617 * select candidates around source section in range of
618 * [target - dirty_threshold, target + dirty_threshold]
620 static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
621 struct victim_sel_policy *p)
623 struct sit_info *sit_i = SIT_I(sbi);
624 struct atgc_management *am = &sbi->am;
625 struct victim_entry *ve;
626 unsigned long long age;
627 unsigned long long max_mtime = sit_i->dirty_max_mtime;
628 unsigned long long min_mtime = sit_i->dirty_min_mtime;
629 unsigned int vblocks;
630 unsigned int dirty_threshold = max(am->max_candidate_count,
631 am->candidate_ratio *
632 am->victim_count / 100);
633 unsigned int cost, iter;
636 if (max_mtime < min_mtime)
641 ve = __lookup_victim_entry(sbi, p->age);
649 if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
652 age = max_mtime - ve->mtime;
654 vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks;
655 f2fs_bug_on(sbi, !vblocks);
658 if (vblocks == BLKS_PER_SEG(sbi))
663 age = max_mtime - abs(p->age - age);
664 cost = UINT_MAX - vblocks;
666 if (cost < p->min_cost ||
667 (cost == p->min_cost && age > p->oldest_age)) {
670 p->min_segno = ve->segno;
673 if (iter < dirty_threshold) {
674 ve = rb_entry(stage == 0 ? rb_prev(&ve->rb_node) :
675 rb_next(&ve->rb_node),
676 struct victim_entry, rb_node);
684 static void lookup_victim_by_age(struct f2fs_sb_info *sbi,
685 struct victim_sel_policy *p)
687 f2fs_bug_on(sbi, !f2fs_check_victim_tree(sbi, &sbi->am.root));
689 if (p->gc_mode == GC_AT)
690 atgc_lookup_victim(sbi, p);
691 else if (p->alloc_mode == AT_SSR)
692 atssr_lookup_victim(sbi, p);
697 static void release_victim_entry(struct f2fs_sb_info *sbi)
699 struct atgc_management *am = &sbi->am;
700 struct victim_entry *ve, *tmp;
702 list_for_each_entry_safe(ve, tmp, &am->victim_list, list) {
704 kmem_cache_free(victim_entry_slab, ve);
708 am->root = RB_ROOT_CACHED;
710 f2fs_bug_on(sbi, am->victim_count);
711 f2fs_bug_on(sbi, !list_empty(&am->victim_list));
714 static bool f2fs_pin_section(struct f2fs_sb_info *sbi, unsigned int segno)
716 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
717 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
719 if (!dirty_i->enable_pin_section)
721 if (!test_and_set_bit(secno, dirty_i->pinned_secmap))
722 dirty_i->pinned_secmap_cnt++;
726 static bool f2fs_pinned_section_exists(struct dirty_seglist_info *dirty_i)
728 return dirty_i->pinned_secmap_cnt;
731 static bool f2fs_section_is_pinned(struct dirty_seglist_info *dirty_i,
734 return dirty_i->enable_pin_section &&
735 f2fs_pinned_section_exists(dirty_i) &&
736 test_bit(secno, dirty_i->pinned_secmap);
739 static void f2fs_unpin_all_sections(struct f2fs_sb_info *sbi, bool enable)
741 unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
743 if (f2fs_pinned_section_exists(DIRTY_I(sbi))) {
744 memset(DIRTY_I(sbi)->pinned_secmap, 0, bitmap_size);
745 DIRTY_I(sbi)->pinned_secmap_cnt = 0;
747 DIRTY_I(sbi)->enable_pin_section = enable;
750 static int f2fs_gc_pinned_control(struct inode *inode, int gc_type,
753 if (!f2fs_is_pinned_file(inode))
755 if (gc_type != FG_GC)
757 if (!f2fs_pin_section(F2FS_I_SB(inode), segno))
758 f2fs_pin_file_control(inode, true);
763 * This function is called from two paths.
764 * One is garbage collection and the other is SSR segment selection.
765 * When it is called during GC, it just gets a victim segment
766 * and it does not remove it from dirty seglist.
767 * When it is called from SSR segment selection, it finds a segment
768 * which has minimum valid blocks and removes it from dirty seglist.
770 int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
771 int gc_type, int type, char alloc_mode,
772 unsigned long long age, bool one_time)
774 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
775 struct sit_info *sm = SIT_I(sbi);
776 struct victim_sel_policy p;
777 unsigned int secno, last_victim;
778 unsigned int last_segment;
779 unsigned int nsearched;
783 mutex_lock(&dirty_i->seglist_lock);
784 last_segment = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi);
786 p.alloc_mode = alloc_mode;
788 p.age_threshold = sbi->am.age_threshold;
789 p.one_time_gc = one_time;
792 select_policy(sbi, gc_type, type, &p);
793 p.min_segno = NULL_SEGNO;
795 p.min_cost = get_max_cost(sbi, &p);
797 is_atgc = (p.gc_mode == GC_AT || p.alloc_mode == AT_SSR);
801 SIT_I(sbi)->dirty_min_mtime = ULLONG_MAX;
803 if (*result != NULL_SEGNO) {
804 if (!get_valid_blocks(sbi, *result, false)) {
809 if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
812 p.min_segno = *result;
817 if (p.max_search == 0)
820 if (__is_large_section(sbi) && p.alloc_mode == LFS) {
821 if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) {
822 p.min_segno = sbi->next_victim_seg[BG_GC];
823 *result = p.min_segno;
824 sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
827 if (gc_type == FG_GC &&
828 sbi->next_victim_seg[FG_GC] != NULL_SEGNO) {
829 p.min_segno = sbi->next_victim_seg[FG_GC];
830 *result = p.min_segno;
831 sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
836 last_victim = sm->last_victim[p.gc_mode];
837 if (p.alloc_mode == LFS && gc_type == FG_GC) {
838 p.min_segno = check_bg_victims(sbi);
839 if (p.min_segno != NULL_SEGNO)
844 unsigned long cost, *dirty_bitmap;
845 unsigned int unit_no, segno;
847 dirty_bitmap = p.dirty_bitmap;
848 unit_no = find_next_bit(dirty_bitmap,
849 last_segment / p.ofs_unit,
850 p.offset / p.ofs_unit);
851 segno = unit_no * p.ofs_unit;
852 if (segno >= last_segment) {
853 if (sm->last_victim[p.gc_mode]) {
855 sm->last_victim[p.gc_mode];
856 sm->last_victim[p.gc_mode] = 0;
863 p.offset = segno + p.ofs_unit;
866 #ifdef CONFIG_F2FS_CHECK_FS
868 * skip selecting the invalid segno (that is failed due to block
869 * validity check failure during GC) to avoid endless GC loop in
872 if (test_bit(segno, sm->invalid_segmap))
876 secno = GET_SEC_FROM_SEG(sbi, segno);
878 if (sec_usage_check(sbi, secno))
881 /* Don't touch checkpointed data */
882 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
883 if (p.alloc_mode == LFS) {
885 * LFS is set to find source section during GC.
886 * The victim should have no checkpointed data.
888 if (get_ckpt_valid_blocks(sbi, segno, true))
892 * SSR | AT_SSR are set to find target segment
893 * for writes which can be full by checkpointed
894 * and newly written blocks.
896 if (!f2fs_segment_has_free_slot(sbi, segno))
901 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
904 if (gc_type == FG_GC && f2fs_section_is_pinned(dirty_i, secno))
908 add_victim_entry(sbi, &p, segno);
912 cost = get_gc_cost(sbi, segno, &p);
914 if (p.min_cost > cost) {
919 if (nsearched >= p.max_search) {
920 if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
921 sm->last_victim[p.gc_mode] =
922 last_victim + p.ofs_unit;
924 sm->last_victim[p.gc_mode] = segno + p.ofs_unit;
925 sm->last_victim[p.gc_mode] %=
926 (MAIN_SECS(sbi) * SEGS_PER_SEC(sbi));
931 /* get victim for GC_AT/AT_SSR */
933 lookup_victim_by_age(sbi, &p);
934 release_victim_entry(sbi);
937 if (is_atgc && p.min_segno == NULL_SEGNO &&
938 sm->elapsed_time < p.age_threshold) {
943 if (p.min_segno != NULL_SEGNO) {
945 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
947 if (p.alloc_mode == LFS) {
948 secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
949 if (gc_type == FG_GC)
950 sbi->cur_victim_sec = secno;
952 set_bit(secno, dirty_i->victim_secmap);
958 if (p.min_segno != NULL_SEGNO)
959 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
961 prefree_segments(sbi), free_segments(sbi));
962 mutex_unlock(&dirty_i->seglist_lock);
967 static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
969 struct inode_entry *ie;
971 ie = radix_tree_lookup(&gc_list->iroot, ino);
977 static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
979 struct inode_entry *new_ie;
981 if (inode == find_gc_inode(gc_list, inode->i_ino)) {
985 new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab,
986 GFP_NOFS, true, NULL);
987 new_ie->inode = inode;
989 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
990 list_add_tail(&new_ie->list, &gc_list->ilist);
993 static void put_gc_inode(struct gc_inode_list *gc_list)
995 struct inode_entry *ie, *next_ie;
997 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
998 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
1000 list_del(&ie->list);
1001 kmem_cache_free(f2fs_inode_entry_slab, ie);
1005 static int check_valid_map(struct f2fs_sb_info *sbi,
1006 unsigned int segno, int offset)
1008 struct sit_info *sit_i = SIT_I(sbi);
1009 struct seg_entry *sentry;
1012 down_read(&sit_i->sentry_lock);
1013 sentry = get_seg_entry(sbi, segno);
1014 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
1015 up_read(&sit_i->sentry_lock);
1020 * This function compares node address got in summary with that in NAT.
1021 * On validity, copy that node with cold status, otherwise (invalid node)
1024 static int gc_node_segment(struct f2fs_sb_info *sbi,
1025 struct f2fs_summary *sum, unsigned int segno, int gc_type)
1027 struct f2fs_summary *entry;
1031 bool fggc = (gc_type == FG_GC);
1033 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
1035 start_addr = START_BLOCK(sbi, segno);
1040 if (fggc && phase == 2)
1041 atomic_inc(&sbi->wb_sync_req[NODE]);
1043 for (off = 0; off < usable_blks_in_seg; off++, entry++) {
1044 nid_t nid = le32_to_cpu(entry->nid);
1045 struct page *node_page;
1046 struct node_info ni;
1049 /* stop BG_GC if there is not enough free sections. */
1050 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
1053 if (check_valid_map(sbi, segno, off) == 0)
1057 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1063 f2fs_ra_node_page(sbi, nid);
1068 node_page = f2fs_get_node_page(sbi, nid);
1069 if (IS_ERR(node_page))
1072 /* block may become invalid during f2fs_get_node_page */
1073 if (check_valid_map(sbi, segno, off) == 0) {
1074 f2fs_put_page(node_page, 1);
1078 if (f2fs_get_node_info(sbi, nid, &ni, false)) {
1079 f2fs_put_page(node_page, 1);
1083 if (ni.blk_addr != start_addr + off) {
1084 f2fs_put_page(node_page, 1);
1088 err = f2fs_move_node_page(node_page, gc_type);
1089 if (!err && gc_type == FG_GC)
1091 stat_inc_node_blk_count(sbi, 1, gc_type);
1098 atomic_dec(&sbi->wb_sync_req[NODE]);
1103 * Calculate start block index indicating the given node offset.
1104 * Be careful, caller should give this node offset only indicating direct node
1105 * blocks. If any node offsets, which point the other types of node blocks such
1106 * as indirect or double indirect node blocks, are given, it must be a caller's
1109 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
1111 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
1117 if (node_ofs <= 2) {
1118 bidx = node_ofs - 1;
1119 } else if (node_ofs <= indirect_blks) {
1120 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
1122 bidx = node_ofs - 2 - dec;
1124 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
1126 bidx = node_ofs - 5 - dec;
1128 return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode);
1131 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1132 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
1134 struct page *node_page;
1136 unsigned int ofs_in_node, max_addrs, base;
1137 block_t source_blkaddr;
1139 nid = le32_to_cpu(sum->nid);
1140 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
1142 node_page = f2fs_get_node_page(sbi, nid);
1143 if (IS_ERR(node_page))
1146 if (f2fs_get_node_info(sbi, nid, dni, false)) {
1147 f2fs_put_page(node_page, 1);
1151 if (sum->version != dni->version) {
1152 f2fs_warn(sbi, "%s: valid data with mismatched node version.",
1154 set_sbi_flag(sbi, SBI_NEED_FSCK);
1157 if (f2fs_check_nid_range(sbi, dni->ino)) {
1158 f2fs_put_page(node_page, 1);
1162 if (IS_INODE(node_page)) {
1163 base = offset_in_addr(F2FS_INODE(node_page));
1164 max_addrs = DEF_ADDRS_PER_INODE;
1167 max_addrs = DEF_ADDRS_PER_BLOCK;
1170 if (base + ofs_in_node >= max_addrs) {
1171 f2fs_err(sbi, "Inconsistent blkaddr offset: base:%u, ofs_in_node:%u, max:%u, ino:%u, nid:%u",
1172 base, ofs_in_node, max_addrs, dni->ino, dni->nid);
1173 f2fs_put_page(node_page, 1);
1177 *nofs = ofs_of_node(node_page);
1178 source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node);
1179 f2fs_put_page(node_page, 1);
1181 if (source_blkaddr != blkaddr) {
1182 #ifdef CONFIG_F2FS_CHECK_FS
1183 unsigned int segno = GET_SEGNO(sbi, blkaddr);
1184 unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1186 if (unlikely(check_valid_map(sbi, segno, offset))) {
1187 if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) {
1188 f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u",
1189 blkaddr, source_blkaddr, segno);
1190 set_sbi_flag(sbi, SBI_NEED_FSCK);
1199 static int ra_data_block(struct inode *inode, pgoff_t index)
1201 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1202 struct address_space *mapping = f2fs_is_cow_file(inode) ?
1203 F2FS_I(inode)->atomic_inode->i_mapping : inode->i_mapping;
1204 struct dnode_of_data dn;
1206 struct f2fs_io_info fio = {
1208 .ino = inode->i_ino,
1213 .encrypted_page = NULL,
1218 page = f2fs_grab_cache_page(mapping, index, true);
1222 if (f2fs_lookup_read_extent_cache_block(inode, index,
1223 &dn.data_blkaddr)) {
1224 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1225 DATA_GENERIC_ENHANCE_READ))) {
1226 err = -EFSCORRUPTED;
1232 set_new_dnode(&dn, inode, NULL, NULL, 0);
1233 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
1236 f2fs_put_dnode(&dn);
1238 if (!__is_valid_data_blkaddr(dn.data_blkaddr)) {
1242 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1243 DATA_GENERIC_ENHANCE))) {
1244 err = -EFSCORRUPTED;
1250 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1253 * don't cache encrypted data into meta inode until previous dirty
1254 * data were writebacked to avoid racing between GC and flush.
1256 f2fs_wait_on_page_writeback(page, DATA, true, true);
1258 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1260 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
1262 FGP_LOCK | FGP_CREAT, GFP_NOFS);
1263 if (!fio.encrypted_page) {
1268 err = f2fs_submit_page_bio(&fio);
1270 goto put_encrypted_page;
1271 f2fs_put_page(fio.encrypted_page, 0);
1272 f2fs_put_page(page, 1);
1274 f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE);
1275 f2fs_update_iostat(sbi, NULL, FS_GDATA_READ_IO, F2FS_BLKSIZE);
1279 f2fs_put_page(fio.encrypted_page, 1);
1281 f2fs_put_page(page, 1);
1286 * Move data block via META_MAPPING while keeping locked data page.
1287 * This can be used to move blocks, aka LBAs, directly on disk.
1289 static int move_data_block(struct inode *inode, block_t bidx,
1290 int gc_type, unsigned int segno, int off)
1292 struct address_space *mapping = f2fs_is_cow_file(inode) ?
1293 F2FS_I(inode)->atomic_inode->i_mapping : inode->i_mapping;
1294 struct f2fs_io_info fio = {
1295 .sbi = F2FS_I_SB(inode),
1296 .ino = inode->i_ino,
1301 .encrypted_page = NULL,
1304 struct dnode_of_data dn;
1305 struct f2fs_summary sum;
1306 struct node_info ni;
1307 struct page *page, *mpage;
1310 bool lfs_mode = f2fs_lfs_mode(fio.sbi);
1311 int type = fio.sbi->am.atgc_enabled && (gc_type == BG_GC) &&
1312 (fio.sbi->gc_mode != GC_URGENT_HIGH) ?
1313 CURSEG_ALL_DATA_ATGC : CURSEG_COLD_DATA;
1315 /* do not read out */
1316 page = f2fs_grab_cache_page(mapping, bidx, false);
1320 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1325 err = f2fs_gc_pinned_control(inode, gc_type, segno);
1329 set_new_dnode(&dn, inode, NULL, NULL, 0);
1330 err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
1334 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
1335 ClearPageUptodate(page);
1341 * don't cache encrypted data into meta inode until previous dirty
1342 * data were writebacked to avoid racing between GC and flush.
1344 f2fs_wait_on_page_writeback(page, DATA, true, true);
1346 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1348 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
1354 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1357 f2fs_down_write(&fio.sbi->io_order_lock);
1359 mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
1360 fio.old_blkaddr, false);
1366 fio.encrypted_page = mpage;
1368 /* read source block in mpage */
1369 if (!PageUptodate(mpage)) {
1370 err = f2fs_submit_page_bio(&fio);
1372 f2fs_put_page(mpage, 1);
1376 f2fs_update_iostat(fio.sbi, inode, FS_DATA_READ_IO,
1378 f2fs_update_iostat(fio.sbi, NULL, FS_GDATA_READ_IO,
1382 if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
1383 !PageUptodate(mpage))) {
1385 f2fs_put_page(mpage, 1);
1390 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
1392 /* allocate block address */
1393 err = f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
1396 f2fs_put_page(mpage, 1);
1397 /* filesystem should shutdown, no need to recovery block */
1401 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
1402 newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
1403 if (!fio.encrypted_page) {
1405 f2fs_put_page(mpage, 1);
1409 /* write target block */
1410 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
1411 memcpy(page_address(fio.encrypted_page),
1412 page_address(mpage), PAGE_SIZE);
1413 f2fs_put_page(mpage, 1);
1415 f2fs_invalidate_internal_cache(fio.sbi, fio.old_blkaddr);
1417 set_page_dirty(fio.encrypted_page);
1418 if (clear_page_dirty_for_io(fio.encrypted_page))
1419 dec_page_count(fio.sbi, F2FS_DIRTY_META);
1421 set_page_writeback(fio.encrypted_page);
1423 fio.op = REQ_OP_WRITE;
1424 fio.op_flags = REQ_SYNC;
1425 fio.new_blkaddr = newaddr;
1426 f2fs_submit_page_write(&fio);
1428 f2fs_update_iostat(fio.sbi, NULL, FS_GC_DATA_IO, F2FS_BLKSIZE);
1430 f2fs_update_data_blkaddr(&dn, newaddr);
1431 set_inode_flag(inode, FI_APPEND_WRITE);
1433 f2fs_put_page(fio.encrypted_page, 1);
1436 f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
1440 f2fs_up_write(&fio.sbi->io_order_lock);
1442 f2fs_put_dnode(&dn);
1444 f2fs_put_page(page, 1);
1448 static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
1449 unsigned int segno, int off)
1454 page = f2fs_get_lock_data_page(inode, bidx, true);
1456 return PTR_ERR(page);
1458 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1463 err = f2fs_gc_pinned_control(inode, gc_type, segno);
1467 if (gc_type == BG_GC) {
1468 if (folio_test_writeback(page_folio(page))) {
1472 set_page_dirty(page);
1473 set_page_private_gcing(page);
1475 struct f2fs_io_info fio = {
1476 .sbi = F2FS_I_SB(inode),
1477 .ino = inode->i_ino,
1481 .op_flags = REQ_SYNC,
1482 .old_blkaddr = NULL_ADDR,
1484 .encrypted_page = NULL,
1485 .need_lock = LOCK_REQ,
1486 .io_type = FS_GC_DATA_IO,
1488 bool is_dirty = PageDirty(page);
1491 f2fs_wait_on_page_writeback(page, DATA, true, true);
1493 set_page_dirty(page);
1494 if (clear_page_dirty_for_io(page)) {
1495 inode_dec_dirty_pages(inode);
1496 f2fs_remove_dirty_inode(inode);
1499 set_page_private_gcing(page);
1501 err = f2fs_do_write_data_page(&fio);
1503 clear_page_private_gcing(page);
1504 if (err == -ENOMEM) {
1505 memalloc_retry_wait(GFP_NOFS);
1509 set_page_dirty(page);
1513 f2fs_put_page(page, 1);
1518 * This function tries to get parent node of victim data block, and identifies
1519 * data block validity. If the block is valid, copy that with cold status and
1520 * modify parent node.
1521 * If the parent node is not valid or the data block address is different,
1522 * the victim data block is ignored.
1524 static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1525 struct gc_inode_list *gc_list, unsigned int segno, int gc_type,
1528 struct super_block *sb = sbi->sb;
1529 struct f2fs_summary *entry;
1534 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
1536 start_addr = START_BLOCK(sbi, segno);
1541 for (off = 0; off < usable_blks_in_seg; off++, entry++) {
1542 struct page *data_page;
1543 struct inode *inode;
1544 struct node_info dni; /* dnode info for the data */
1545 unsigned int ofs_in_node, nofs;
1547 nid_t nid = le32_to_cpu(entry->nid);
1550 * stop BG_GC if there is not enough free sections.
1551 * Or, stop GC if the segment becomes fully valid caused by
1552 * race condition along with SSR block allocation.
1554 if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
1555 (!force_migrate && get_valid_blocks(sbi, segno, true) ==
1556 CAP_BLKS_PER_SEC(sbi)))
1559 if (check_valid_map(sbi, segno, off) == 0)
1563 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1569 f2fs_ra_node_page(sbi, nid);
1573 /* Get an inode by ino with checking validity */
1574 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
1578 f2fs_ra_node_page(sbi, dni.ino);
1582 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
1587 inode = f2fs_iget(sb, dni.ino);
1591 if (is_bad_inode(inode) ||
1592 special_file(inode->i_mode)) {
1597 if (f2fs_has_inline_data(inode)) {
1599 set_sbi_flag(sbi, SBI_NEED_FSCK);
1600 f2fs_err_ratelimited(sbi,
1601 "inode %lx has both inline_data flag and "
1602 "data block, nid=%u, ofs_in_node=%u",
1603 inode->i_ino, dni.nid, ofs_in_node);
1607 err = f2fs_gc_pinned_control(inode, gc_type, segno);
1608 if (err == -EAGAIN) {
1613 if (!f2fs_down_write_trylock(
1614 &F2FS_I(inode)->i_gc_rwsem[WRITE])) {
1616 sbi->skipped_gc_rwsem++;
1620 start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
1623 if (f2fs_meta_inode_gc_required(inode)) {
1624 int err = ra_data_block(inode, start_bidx);
1626 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1631 add_gc_inode(gc_list, inode);
1635 data_page = f2fs_get_read_data_page(inode, start_bidx,
1636 REQ_RAHEAD, true, NULL);
1637 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1638 if (IS_ERR(data_page)) {
1643 f2fs_put_page(data_page, 0);
1644 add_gc_inode(gc_list, inode);
1649 inode = find_gc_inode(gc_list, dni.ino);
1651 struct f2fs_inode_info *fi = F2FS_I(inode);
1652 bool locked = false;
1655 if (S_ISREG(inode->i_mode)) {
1656 if (!f2fs_down_write_trylock(&fi->i_gc_rwsem[WRITE])) {
1657 sbi->skipped_gc_rwsem++;
1660 if (!f2fs_down_write_trylock(
1661 &fi->i_gc_rwsem[READ])) {
1662 sbi->skipped_gc_rwsem++;
1663 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
1668 /* wait for all inflight aio data */
1669 inode_dio_wait(inode);
1672 start_bidx = f2fs_start_bidx_of_node(nofs, inode)
1674 if (f2fs_meta_inode_gc_required(inode))
1675 err = move_data_block(inode, start_bidx,
1676 gc_type, segno, off);
1678 err = move_data_page(inode, start_bidx, gc_type,
1681 if (!err && (gc_type == FG_GC ||
1682 f2fs_meta_inode_gc_required(inode)))
1686 f2fs_up_write(&fi->i_gc_rwsem[READ]);
1687 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
1690 stat_inc_data_blk_count(sbi, 1, gc_type);
1700 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
1701 int gc_type, bool one_time)
1703 struct sit_info *sit_i = SIT_I(sbi);
1706 down_write(&sit_i->sentry_lock);
1707 ret = f2fs_get_victim(sbi, victim, gc_type, NO_CHECK_TYPE,
1709 up_write(&sit_i->sentry_lock);
1713 static int do_garbage_collect(struct f2fs_sb_info *sbi,
1714 unsigned int start_segno,
1715 struct gc_inode_list *gc_list, int gc_type,
1716 bool force_migrate, bool one_time)
1718 struct page *sum_page;
1719 struct f2fs_summary_block *sum;
1720 struct blk_plug plug;
1721 unsigned int segno = start_segno;
1722 unsigned int end_segno = start_segno + SEGS_PER_SEC(sbi);
1723 unsigned int sec_end_segno;
1724 int seg_freed = 0, migrated = 0;
1725 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
1726 SUM_TYPE_DATA : SUM_TYPE_NODE;
1727 unsigned char data_type = (type == SUM_TYPE_DATA) ? DATA : NODE;
1730 if (__is_large_section(sbi)) {
1731 sec_end_segno = rounddown(end_segno, SEGS_PER_SEC(sbi));
1734 * zone-capacity can be less than zone-size in zoned devices,
1735 * resulting in less than expected usable segments in the zone,
1736 * calculate the end segno in the zone which can be garbage
1739 if (f2fs_sb_has_blkzoned(sbi))
1740 sec_end_segno -= SEGS_PER_SEC(sbi) -
1741 f2fs_usable_segs_in_sec(sbi);
1743 if (gc_type == BG_GC || one_time) {
1744 unsigned int window_granularity =
1745 sbi->migration_window_granularity;
1747 if (f2fs_sb_has_blkzoned(sbi) &&
1748 !has_enough_free_blocks(sbi,
1749 sbi->gc_thread->boost_zoned_gc_percent))
1750 window_granularity *=
1753 end_segno = start_segno + window_granularity;
1756 if (end_segno > sec_end_segno)
1757 end_segno = sec_end_segno;
1760 sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type);
1762 /* readahead multi ssa blocks those have contiguous address */
1763 if (__is_large_section(sbi))
1764 f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
1765 end_segno - segno, META_SSA, true);
1767 /* reference all summary page */
1768 while (segno < end_segno) {
1769 sum_page = f2fs_get_sum_page(sbi, segno++);
1770 if (IS_ERR(sum_page)) {
1771 int err = PTR_ERR(sum_page);
1773 end_segno = segno - 1;
1774 for (segno = start_segno; segno < end_segno; segno++) {
1775 sum_page = find_get_page(META_MAPPING(sbi),
1776 GET_SUM_BLOCK(sbi, segno));
1777 f2fs_put_page(sum_page, 0);
1778 f2fs_put_page(sum_page, 0);
1782 unlock_page(sum_page);
1785 blk_start_plug(&plug);
1787 for (segno = start_segno; segno < end_segno; segno++) {
1789 /* find segment summary of victim */
1790 sum_page = find_get_page(META_MAPPING(sbi),
1791 GET_SUM_BLOCK(sbi, segno));
1792 f2fs_put_page(sum_page, 0);
1794 if (get_valid_blocks(sbi, segno, false) == 0)
1796 if (gc_type == BG_GC && __is_large_section(sbi) &&
1797 migrated >= sbi->migration_granularity)
1799 if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
1802 sum = page_address(sum_page);
1803 if (type != GET_SUM_TYPE((&sum->footer))) {
1804 f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT",
1805 segno, type, GET_SUM_TYPE((&sum->footer)));
1806 f2fs_stop_checkpoint(sbi, false,
1807 STOP_CP_REASON_CORRUPTED_SUMMARY);
1812 * this is to avoid deadlock:
1813 * - lock_page(sum_page) - f2fs_replace_block
1814 * - check_valid_map() - down_write(sentry_lock)
1815 * - down_read(sentry_lock) - change_curseg()
1816 * - lock_page(sum_page)
1818 if (type == SUM_TYPE_NODE)
1819 submitted += gc_node_segment(sbi, sum->entries, segno,
1822 submitted += gc_data_segment(sbi, sum->entries, gc_list,
1826 stat_inc_gc_seg_count(sbi, data_type, gc_type);
1827 sbi->gc_reclaimed_segs[sbi->gc_mode]++;
1831 if (gc_type == FG_GC &&
1832 get_valid_blocks(sbi, segno, false) == 0)
1835 if (__is_large_section(sbi))
1836 sbi->next_victim_seg[gc_type] =
1837 (segno + 1 < sec_end_segno) ?
1838 segno + 1 : NULL_SEGNO;
1840 f2fs_put_page(sum_page, 0);
1844 f2fs_submit_merged_write(sbi, data_type);
1846 blk_finish_plug(&plug);
1849 stat_inc_gc_sec_count(sbi, data_type, gc_type);
1854 int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control)
1856 int gc_type = gc_control->init_gc_type;
1857 unsigned int segno = gc_control->victim_segno;
1858 int sec_freed = 0, seg_freed = 0, total_freed = 0, total_sec_freed = 0;
1860 struct cp_control cpc;
1861 struct gc_inode_list gc_list = {
1862 .ilist = LIST_HEAD_INIT(gc_list.ilist),
1863 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1865 unsigned int skipped_round = 0, round = 0;
1866 unsigned int upper_secs;
1868 trace_f2fs_gc_begin(sbi->sb, gc_type, gc_control->no_bg_gc,
1869 gc_control->nr_free_secs,
1870 get_pages(sbi, F2FS_DIRTY_NODES),
1871 get_pages(sbi, F2FS_DIRTY_DENTS),
1872 get_pages(sbi, F2FS_DIRTY_IMETA),
1875 reserved_segments(sbi),
1876 prefree_segments(sbi));
1878 cpc.reason = __get_cp_reason(sbi);
1880 sbi->skipped_gc_rwsem = 0;
1881 if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
1885 if (unlikely(f2fs_cp_error(sbi))) {
1890 /* Let's run FG_GC, if we don't have enough space. */
1891 if (has_not_enough_free_secs(sbi, 0, 0)) {
1895 * For example, if there are many prefree_segments below given
1896 * threshold, we can make them free by checkpoint. Then, we
1897 * secure free segments which doesn't need fggc any more.
1899 if (prefree_segments(sbi)) {
1900 stat_inc_cp_call_count(sbi, TOTAL_CALL);
1901 ret = f2fs_write_checkpoint(sbi, &cpc);
1904 /* Reset due to checkpoint */
1909 /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
1910 if (gc_type == BG_GC && gc_control->no_bg_gc) {
1915 ret = __get_victim(sbi, &segno, gc_type, gc_control->one_time);
1917 /* allow to search victim from sections has pinned data */
1918 if (ret == -ENODATA && gc_type == FG_GC &&
1919 f2fs_pinned_section_exists(DIRTY_I(sbi))) {
1920 f2fs_unpin_all_sections(sbi, false);
1926 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type,
1927 gc_control->should_migrate_blocks,
1928 gc_control->one_time);
1932 total_freed += seg_freed;
1934 if (seg_freed == f2fs_usable_segs_in_sec(sbi)) {
1939 if (gc_control->one_time)
1942 if (gc_type == FG_GC) {
1943 sbi->cur_victim_sec = NULL_SEGNO;
1945 if (has_enough_free_secs(sbi, sec_freed, 0)) {
1946 if (!gc_control->no_bg_gc &&
1947 total_sec_freed < gc_control->nr_free_secs)
1951 if (sbi->skipped_gc_rwsem)
1954 if (skipped_round > MAX_SKIP_GC_COUNT &&
1955 skipped_round * 2 >= round) {
1956 stat_inc_cp_call_count(sbi, TOTAL_CALL);
1957 ret = f2fs_write_checkpoint(sbi, &cpc);
1960 } else if (has_enough_free_secs(sbi, 0, 0)) {
1964 __get_secs_required(sbi, NULL, &upper_secs, NULL);
1967 * Write checkpoint to reclaim prefree segments.
1968 * We need more three extra sections for writer's data/node/dentry.
1970 if (free_sections(sbi) <= upper_secs + NR_GC_CHECKPOINT_SECS &&
1971 prefree_segments(sbi)) {
1972 stat_inc_cp_call_count(sbi, TOTAL_CALL);
1973 ret = f2fs_write_checkpoint(sbi, &cpc);
1976 /* Reset due to checkpoint */
1984 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
1985 SIT_I(sbi)->last_victim[FLUSH_DEVICE] = gc_control->victim_segno;
1987 if (gc_type == FG_GC)
1988 f2fs_unpin_all_sections(sbi, true);
1990 trace_f2fs_gc_end(sbi->sb, ret, total_freed, total_sec_freed,
1991 get_pages(sbi, F2FS_DIRTY_NODES),
1992 get_pages(sbi, F2FS_DIRTY_DENTS),
1993 get_pages(sbi, F2FS_DIRTY_IMETA),
1996 reserved_segments(sbi),
1997 prefree_segments(sbi));
1999 f2fs_up_write(&sbi->gc_lock);
2001 put_gc_inode(&gc_list);
2003 if (gc_control->err_gc_skipped && !ret)
2004 ret = total_sec_freed ? 0 : -EAGAIN;
2008 int __init f2fs_create_garbage_collection_cache(void)
2010 victim_entry_slab = f2fs_kmem_cache_create("f2fs_victim_entry",
2011 sizeof(struct victim_entry));
2012 return victim_entry_slab ? 0 : -ENOMEM;
2015 void f2fs_destroy_garbage_collection_cache(void)
2017 kmem_cache_destroy(victim_entry_slab);
2020 static void init_atgc_management(struct f2fs_sb_info *sbi)
2022 struct atgc_management *am = &sbi->am;
2024 if (test_opt(sbi, ATGC) &&
2025 SIT_I(sbi)->elapsed_time >= DEF_GC_THREAD_AGE_THRESHOLD)
2026 am->atgc_enabled = true;
2028 am->root = RB_ROOT_CACHED;
2029 INIT_LIST_HEAD(&am->victim_list);
2030 am->victim_count = 0;
2032 am->candidate_ratio = DEF_GC_THREAD_CANDIDATE_RATIO;
2033 am->max_candidate_count = DEF_GC_THREAD_MAX_CANDIDATE_COUNT;
2034 am->age_weight = DEF_GC_THREAD_AGE_WEIGHT;
2035 am->age_threshold = DEF_GC_THREAD_AGE_THRESHOLD;
2038 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
2040 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
2042 /* give warm/cold data area from slower device */
2043 if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi))
2044 SIT_I(sbi)->last_victim[ALLOC_NEXT] =
2045 GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
2047 init_atgc_management(sbi);
2050 int f2fs_gc_range(struct f2fs_sb_info *sbi,
2051 unsigned int start_seg, unsigned int end_seg,
2052 bool dry_run, unsigned int dry_run_sections)
2055 unsigned int gc_secs = dry_run_sections;
2057 if (unlikely(f2fs_cp_error(sbi)))
2060 for (segno = start_seg; segno <= end_seg; segno += SEGS_PER_SEC(sbi)) {
2061 struct gc_inode_list gc_list = {
2062 .ilist = LIST_HEAD_INIT(gc_list.ilist),
2063 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
2066 do_garbage_collect(sbi, segno, &gc_list, FG_GC, true, false);
2067 put_gc_inode(&gc_list);
2069 if (!dry_run && get_valid_blocks(sbi, segno, true))
2071 if (dry_run && dry_run_sections &&
2072 !get_valid_blocks(sbi, segno, true) && --gc_secs == 0)
2075 if (fatal_signal_pending(current))
2076 return -ERESTARTSYS;
2082 static int free_segment_range(struct f2fs_sb_info *sbi,
2083 unsigned int secs, bool dry_run)
2085 unsigned int next_inuse, start, end;
2086 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
2087 int gc_mode, gc_type;
2091 /* Force block allocation for GC */
2092 MAIN_SECS(sbi) -= secs;
2093 start = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi);
2094 end = MAIN_SEGS(sbi) - 1;
2096 mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2097 for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++)
2098 if (SIT_I(sbi)->last_victim[gc_mode] >= start)
2099 SIT_I(sbi)->last_victim[gc_mode] = 0;
2101 for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++)
2102 if (sbi->next_victim_seg[gc_type] >= start)
2103 sbi->next_victim_seg[gc_type] = NULL_SEGNO;
2104 mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
2106 /* Move out cursegs from the target range */
2107 for (type = CURSEG_HOT_DATA; type < NR_CURSEG_PERSIST_TYPE; type++) {
2108 err = f2fs_allocate_segment_for_resize(sbi, type, start, end);
2113 /* do GC to move out valid blocks in the range */
2114 err = f2fs_gc_range(sbi, start, end, dry_run, 0);
2118 stat_inc_cp_call_count(sbi, TOTAL_CALL);
2119 err = f2fs_write_checkpoint(sbi, &cpc);
2123 next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start);
2124 if (next_inuse <= end) {
2125 f2fs_err(sbi, "segno %u should be free but still inuse!",
2127 f2fs_bug_on(sbi, 1);
2130 MAIN_SECS(sbi) += secs;
2134 static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
2136 struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
2139 int segment_count_main;
2140 long long block_count;
2141 int segs = secs * SEGS_PER_SEC(sbi);
2143 f2fs_down_write(&sbi->sb_lock);
2145 section_count = le32_to_cpu(raw_sb->section_count);
2146 segment_count = le32_to_cpu(raw_sb->segment_count);
2147 segment_count_main = le32_to_cpu(raw_sb->segment_count_main);
2148 block_count = le64_to_cpu(raw_sb->block_count);
2150 raw_sb->section_count = cpu_to_le32(section_count + secs);
2151 raw_sb->segment_count = cpu_to_le32(segment_count + segs);
2152 raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
2153 raw_sb->block_count = cpu_to_le64(block_count +
2154 (long long)SEGS_TO_BLKS(sbi, segs));
2155 if (f2fs_is_multi_device(sbi)) {
2156 int last_dev = sbi->s_ndevs - 1;
2158 le32_to_cpu(raw_sb->devs[last_dev].total_segments);
2160 raw_sb->devs[last_dev].total_segments =
2161 cpu_to_le32(dev_segs + segs);
2164 f2fs_up_write(&sbi->sb_lock);
2167 static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
2169 int segs = secs * SEGS_PER_SEC(sbi);
2170 long long blks = SEGS_TO_BLKS(sbi, segs);
2171 long long user_block_count =
2172 le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
2174 SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
2175 MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
2176 MAIN_SECS(sbi) += secs;
2177 FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
2178 FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
2179 F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
2181 if (f2fs_is_multi_device(sbi)) {
2182 int last_dev = sbi->s_ndevs - 1;
2184 FDEV(last_dev).total_segments =
2185 (int)FDEV(last_dev).total_segments + segs;
2186 FDEV(last_dev).end_blk =
2187 (long long)FDEV(last_dev).end_blk + blks;
2188 #ifdef CONFIG_BLK_DEV_ZONED
2189 FDEV(last_dev).nr_blkz = FDEV(last_dev).nr_blkz +
2190 div_u64(blks, sbi->blocks_per_blkz);
2195 int f2fs_resize_fs(struct file *filp, __u64 block_count)
2197 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2198 __u64 old_block_count, shrunk_blocks;
2199 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
2204 old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count);
2205 if (block_count > old_block_count)
2208 if (f2fs_is_multi_device(sbi)) {
2209 int last_dev = sbi->s_ndevs - 1;
2210 __u64 last_segs = FDEV(last_dev).total_segments;
2212 if (block_count + SEGS_TO_BLKS(sbi, last_segs) <=
2217 /* new fs size should align to section size */
2218 div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem);
2222 if (block_count == old_block_count)
2225 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
2226 f2fs_err(sbi, "Should run fsck to repair first.");
2227 return -EFSCORRUPTED;
2230 if (test_opt(sbi, DISABLE_CHECKPOINT)) {
2231 f2fs_err(sbi, "Checkpoint should be enabled.");
2235 err = mnt_want_write_file(filp);
2239 shrunk_blocks = old_block_count - block_count;
2240 secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
2243 if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2245 goto out_drop_write;
2248 /* stop CP to protect MAIN_SEC in free_segment_range */
2251 spin_lock(&sbi->stat_lock);
2252 if (shrunk_blocks + valid_user_blocks(sbi) +
2253 sbi->current_reserved_blocks + sbi->unusable_block_count +
2254 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2256 spin_unlock(&sbi->stat_lock);
2261 err = free_segment_range(sbi, secs, true);
2264 f2fs_unlock_op(sbi);
2265 f2fs_up_write(&sbi->gc_lock);
2267 mnt_drop_write_file(filp);
2271 err = freeze_super(sbi->sb, FREEZE_HOLDER_USERSPACE);
2275 if (f2fs_readonly(sbi->sb)) {
2276 err = thaw_super(sbi->sb, FREEZE_HOLDER_USERSPACE);
2282 f2fs_down_write(&sbi->gc_lock);
2283 f2fs_down_write(&sbi->cp_global_sem);
2285 spin_lock(&sbi->stat_lock);
2286 if (shrunk_blocks + valid_user_blocks(sbi) +
2287 sbi->current_reserved_blocks + sbi->unusable_block_count +
2288 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2291 sbi->user_block_count -= shrunk_blocks;
2292 spin_unlock(&sbi->stat_lock);
2296 set_sbi_flag(sbi, SBI_IS_RESIZEFS);
2297 err = free_segment_range(sbi, secs, false);
2301 update_sb_metadata(sbi, -secs);
2303 err = f2fs_commit_super(sbi, false);
2305 update_sb_metadata(sbi, secs);
2309 update_fs_metadata(sbi, -secs);
2310 clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2311 set_sbi_flag(sbi, SBI_IS_DIRTY);
2313 stat_inc_cp_call_count(sbi, TOTAL_CALL);
2314 err = f2fs_write_checkpoint(sbi, &cpc);
2316 update_fs_metadata(sbi, secs);
2317 update_sb_metadata(sbi, secs);
2318 f2fs_commit_super(sbi, false);
2321 clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2323 set_sbi_flag(sbi, SBI_NEED_FSCK);
2324 f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
2326 spin_lock(&sbi->stat_lock);
2327 sbi->user_block_count += shrunk_blocks;
2328 spin_unlock(&sbi->stat_lock);
2331 f2fs_up_write(&sbi->cp_global_sem);
2332 f2fs_up_write(&sbi->gc_lock);
2333 thaw_super(sbi->sb, FREEZE_HOLDER_USERSPACE);