1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/module.h>
10 #include <linux/backing-dev.h>
11 #include <linux/init.h>
12 #include <linux/f2fs_fs.h>
13 #include <linux/kthread.h>
14 #include <linux/delay.h>
15 #include <linux/freezer.h>
21 #include <trace/events/f2fs.h>
23 static int gc_thread_func(void *data)
25 struct f2fs_sb_info *sbi = data;
26 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
27 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
30 wait_ms = gc_th->min_sleep_time;
34 wait_event_interruptible_timeout(*wq,
35 kthread_should_stop() || freezing(current) ||
37 msecs_to_jiffies(wait_ms));
39 /* give it a try one time */
43 if (try_to_freeze()) {
44 stat_other_skip_bggc_count(sbi);
47 if (kthread_should_stop())
50 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
51 increase_sleep_time(gc_th, &wait_ms);
52 stat_other_skip_bggc_count(sbi);
56 if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
57 f2fs_show_injection_info(FAULT_CHECKPOINT);
58 f2fs_stop_checkpoint(sbi, false);
61 if (!sb_start_write_trylock(sbi->sb)) {
62 stat_other_skip_bggc_count(sbi);
67 * [GC triggering condition]
68 * 0. GC is not conducted currently.
69 * 1. There are enough dirty segments.
70 * 2. IO subsystem is idle by checking the # of writeback pages.
71 * 3. IO subsystem is idle by checking the # of requests in
72 * bdev's request list.
74 * Note) We have to avoid triggering GCs frequently.
75 * Because it is possible that some segments can be
76 * invalidated soon after by user update or deletion.
77 * So, I'd like to wait some time to collect dirty segments.
79 if (sbi->gc_mode == GC_URGENT) {
80 wait_ms = gc_th->urgent_sleep_time;
81 mutex_lock(&sbi->gc_mutex);
85 if (!mutex_trylock(&sbi->gc_mutex)) {
86 stat_other_skip_bggc_count(sbi);
90 if (!is_idle(sbi, GC_TIME)) {
91 increase_sleep_time(gc_th, &wait_ms);
92 mutex_unlock(&sbi->gc_mutex);
93 stat_io_skip_bggc_count(sbi);
97 if (has_enough_invalid_blocks(sbi))
98 decrease_sleep_time(gc_th, &wait_ms);
100 increase_sleep_time(gc_th, &wait_ms);
102 stat_inc_bggc_count(sbi);
104 /* if return value is not zero, no victim was selected */
105 if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC), true, NULL_SEGNO))
106 wait_ms = gc_th->no_gc_sleep_time;
108 trace_f2fs_background_gc(sbi->sb, wait_ms,
109 prefree_segments(sbi), free_segments(sbi));
111 /* balancing f2fs's metadata periodically */
112 f2fs_balance_fs_bg(sbi);
114 sb_end_write(sbi->sb);
116 } while (!kthread_should_stop());
120 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
122 struct f2fs_gc_kthread *gc_th;
123 dev_t dev = sbi->sb->s_bdev->bd_dev;
126 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
132 gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
133 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
134 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
135 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
139 sbi->gc_thread = gc_th;
140 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
141 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
142 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
143 if (IS_ERR(gc_th->f2fs_gc_task)) {
144 err = PTR_ERR(gc_th->f2fs_gc_task);
146 sbi->gc_thread = NULL;
152 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
154 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
157 kthread_stop(gc_th->f2fs_gc_task);
159 sbi->gc_thread = NULL;
162 static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
164 int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
166 switch (sbi->gc_mode) {
178 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
179 int type, struct victim_sel_policy *p)
181 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
183 if (p->alloc_mode == SSR) {
184 p->gc_mode = GC_GREEDY;
185 p->dirty_segmap = dirty_i->dirty_segmap[type];
186 p->max_search = dirty_i->nr_dirty[type];
189 p->gc_mode = select_gc_type(sbi, gc_type);
190 p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
191 p->max_search = dirty_i->nr_dirty[DIRTY];
192 p->ofs_unit = sbi->segs_per_sec;
195 /* we need to check every dirty segments in the FG_GC case */
196 if (gc_type != FG_GC &&
197 (sbi->gc_mode != GC_URGENT) &&
198 p->max_search > sbi->max_victim_search)
199 p->max_search = sbi->max_victim_search;
201 /* let's select beginning hot/small space first in no_heap mode*/
202 if (test_opt(sbi, NOHEAP) &&
203 (type == CURSEG_HOT_DATA || IS_NODESEG(type)))
206 p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
209 static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
210 struct victim_sel_policy *p)
212 /* SSR allocates in a segment unit */
213 if (p->alloc_mode == SSR)
214 return sbi->blocks_per_seg;
215 if (p->gc_mode == GC_GREEDY)
216 return 2 * sbi->blocks_per_seg * p->ofs_unit;
217 else if (p->gc_mode == GC_CB)
219 else /* No other gc_mode */
223 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
225 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
229 * If the gc_type is FG_GC, we can select victim segments
230 * selected by background GC before.
231 * Those segments guarantee they have small valid blocks.
233 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
234 if (sec_usage_check(sbi, secno))
236 clear_bit(secno, dirty_i->victim_secmap);
237 return GET_SEG_FROM_SEC(sbi, secno);
242 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
244 struct sit_info *sit_i = SIT_I(sbi);
245 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
246 unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
247 unsigned long long mtime = 0;
248 unsigned int vblocks;
249 unsigned char age = 0;
253 for (i = 0; i < sbi->segs_per_sec; i++)
254 mtime += get_seg_entry(sbi, start + i)->mtime;
255 vblocks = get_valid_blocks(sbi, segno, true);
257 mtime = div_u64(mtime, sbi->segs_per_sec);
258 vblocks = div_u64(vblocks, sbi->segs_per_sec);
260 u = (vblocks * 100) >> sbi->log_blocks_per_seg;
262 /* Handle if the system time has changed by the user */
263 if (mtime < sit_i->min_mtime)
264 sit_i->min_mtime = mtime;
265 if (mtime > sit_i->max_mtime)
266 sit_i->max_mtime = mtime;
267 if (sit_i->max_mtime != sit_i->min_mtime)
268 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
269 sit_i->max_mtime - sit_i->min_mtime);
271 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
274 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
275 unsigned int segno, struct victim_sel_policy *p)
277 if (p->alloc_mode == SSR)
278 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
280 /* alloc_mode == LFS */
281 if (p->gc_mode == GC_GREEDY)
282 return get_valid_blocks(sbi, segno, true);
284 return get_cb_cost(sbi, segno);
287 static unsigned int count_bits(const unsigned long *addr,
288 unsigned int offset, unsigned int len)
290 unsigned int end = offset + len, sum = 0;
292 while (offset < end) {
293 if (test_bit(offset++, addr))
300 * This function is called from two paths.
301 * One is garbage collection and the other is SSR segment selection.
302 * When it is called during GC, it just gets a victim segment
303 * and it does not remove it from dirty seglist.
304 * When it is called from SSR segment selection, it finds a segment
305 * which has minimum valid blocks and removes it from dirty seglist.
307 static int get_victim_by_default(struct f2fs_sb_info *sbi,
308 unsigned int *result, int gc_type, int type, char alloc_mode)
310 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
311 struct sit_info *sm = SIT_I(sbi);
312 struct victim_sel_policy p;
313 unsigned int secno, last_victim;
314 unsigned int last_segment = MAIN_SEGS(sbi);
315 unsigned int nsearched = 0;
317 mutex_lock(&dirty_i->seglist_lock);
319 p.alloc_mode = alloc_mode;
320 select_policy(sbi, gc_type, type, &p);
322 p.min_segno = NULL_SEGNO;
323 p.min_cost = get_max_cost(sbi, &p);
325 if (*result != NULL_SEGNO) {
326 if (get_valid_blocks(sbi, *result, false) &&
327 !sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
328 p.min_segno = *result;
332 if (p.max_search == 0)
335 if (__is_large_section(sbi) && p.alloc_mode == LFS) {
336 if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) {
337 p.min_segno = sbi->next_victim_seg[BG_GC];
338 *result = p.min_segno;
339 sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
342 if (gc_type == FG_GC &&
343 sbi->next_victim_seg[FG_GC] != NULL_SEGNO) {
344 p.min_segno = sbi->next_victim_seg[FG_GC];
345 *result = p.min_segno;
346 sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
351 last_victim = sm->last_victim[p.gc_mode];
352 if (p.alloc_mode == LFS && gc_type == FG_GC) {
353 p.min_segno = check_bg_victims(sbi);
354 if (p.min_segno != NULL_SEGNO)
362 segno = find_next_bit(p.dirty_segmap, last_segment, p.offset);
363 if (segno >= last_segment) {
364 if (sm->last_victim[p.gc_mode]) {
366 sm->last_victim[p.gc_mode];
367 sm->last_victim[p.gc_mode] = 0;
374 p.offset = segno + p.ofs_unit;
375 if (p.ofs_unit > 1) {
376 p.offset -= segno % p.ofs_unit;
377 nsearched += count_bits(p.dirty_segmap,
378 p.offset - p.ofs_unit,
384 secno = GET_SEC_FROM_SEG(sbi, segno);
386 if (sec_usage_check(sbi, secno))
388 /* Don't touch checkpointed data */
389 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
390 get_ckpt_valid_blocks(sbi, segno)))
392 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
395 cost = get_gc_cost(sbi, segno, &p);
397 if (p.min_cost > cost) {
402 if (nsearched >= p.max_search) {
403 if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
404 sm->last_victim[p.gc_mode] = last_victim + 1;
406 sm->last_victim[p.gc_mode] = segno + 1;
407 sm->last_victim[p.gc_mode] %= MAIN_SEGS(sbi);
411 if (p.min_segno != NULL_SEGNO) {
413 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
415 if (p.alloc_mode == LFS) {
416 secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
417 if (gc_type == FG_GC)
418 sbi->cur_victim_sec = secno;
420 set_bit(secno, dirty_i->victim_secmap);
425 if (p.min_segno != NULL_SEGNO)
426 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
428 prefree_segments(sbi), free_segments(sbi));
429 mutex_unlock(&dirty_i->seglist_lock);
431 return (p.min_segno == NULL_SEGNO) ? 0 : 1;
434 static const struct victim_selection default_v_ops = {
435 .get_victim = get_victim_by_default,
438 static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
440 struct inode_entry *ie;
442 ie = radix_tree_lookup(&gc_list->iroot, ino);
448 static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
450 struct inode_entry *new_ie;
452 if (inode == find_gc_inode(gc_list, inode->i_ino)) {
456 new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab, GFP_NOFS);
457 new_ie->inode = inode;
459 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
460 list_add_tail(&new_ie->list, &gc_list->ilist);
463 static void put_gc_inode(struct gc_inode_list *gc_list)
465 struct inode_entry *ie, *next_ie;
466 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
467 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
470 kmem_cache_free(f2fs_inode_entry_slab, ie);
474 static int check_valid_map(struct f2fs_sb_info *sbi,
475 unsigned int segno, int offset)
477 struct sit_info *sit_i = SIT_I(sbi);
478 struct seg_entry *sentry;
481 down_read(&sit_i->sentry_lock);
482 sentry = get_seg_entry(sbi, segno);
483 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
484 up_read(&sit_i->sentry_lock);
489 * This function compares node address got in summary with that in NAT.
490 * On validity, copy that node with cold status, otherwise (invalid node)
493 static int gc_node_segment(struct f2fs_sb_info *sbi,
494 struct f2fs_summary *sum, unsigned int segno, int gc_type)
496 struct f2fs_summary *entry;
500 bool fggc = (gc_type == FG_GC);
503 start_addr = START_BLOCK(sbi, segno);
508 if (fggc && phase == 2)
509 atomic_inc(&sbi->wb_sync_req[NODE]);
511 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
512 nid_t nid = le32_to_cpu(entry->nid);
513 struct page *node_page;
517 /* stop BG_GC if there is not enough free sections. */
518 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
521 if (check_valid_map(sbi, segno, off) == 0)
525 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
531 f2fs_ra_node_page(sbi, nid);
536 node_page = f2fs_get_node_page(sbi, nid);
537 if (IS_ERR(node_page))
540 /* block may become invalid during f2fs_get_node_page */
541 if (check_valid_map(sbi, segno, off) == 0) {
542 f2fs_put_page(node_page, 1);
546 if (f2fs_get_node_info(sbi, nid, &ni)) {
547 f2fs_put_page(node_page, 1);
551 if (ni.blk_addr != start_addr + off) {
552 f2fs_put_page(node_page, 1);
556 err = f2fs_move_node_page(node_page, gc_type);
557 if (!err && gc_type == FG_GC)
559 stat_inc_node_blk_count(sbi, 1, gc_type);
566 atomic_dec(&sbi->wb_sync_req[NODE]);
571 * Calculate start block index indicating the given node offset.
572 * Be careful, caller should give this node offset only indicating direct node
573 * blocks. If any node offsets, which point the other types of node blocks such
574 * as indirect or double indirect node blocks, are given, it must be a caller's
577 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
579 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
587 } else if (node_ofs <= indirect_blks) {
588 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
589 bidx = node_ofs - 2 - dec;
591 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
592 bidx = node_ofs - 5 - dec;
594 return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode);
597 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
598 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
600 struct page *node_page;
602 unsigned int ofs_in_node;
603 block_t source_blkaddr;
605 nid = le32_to_cpu(sum->nid);
606 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
608 node_page = f2fs_get_node_page(sbi, nid);
609 if (IS_ERR(node_page))
612 if (f2fs_get_node_info(sbi, nid, dni)) {
613 f2fs_put_page(node_page, 1);
617 if (sum->version != dni->version) {
618 f2fs_msg(sbi->sb, KERN_WARNING,
619 "%s: valid data with mismatched node version.",
621 set_sbi_flag(sbi, SBI_NEED_FSCK);
624 *nofs = ofs_of_node(node_page);
625 source_blkaddr = datablock_addr(NULL, node_page, ofs_in_node);
626 f2fs_put_page(node_page, 1);
628 if (source_blkaddr != blkaddr)
633 static int ra_data_block(struct inode *inode, pgoff_t index)
635 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
636 struct address_space *mapping = inode->i_mapping;
637 struct dnode_of_data dn;
639 struct extent_info ei = {0, 0, 0};
640 struct f2fs_io_info fio = {
647 .encrypted_page = NULL,
653 page = f2fs_grab_cache_page(mapping, index, true);
657 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
658 dn.data_blkaddr = ei.blk + index - ei.fofs;
659 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
660 DATA_GENERIC_ENHANCE_READ))) {
667 set_new_dnode(&dn, inode, NULL, NULL, 0);
668 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
673 if (!__is_valid_data_blkaddr(dn.data_blkaddr)) {
677 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
678 DATA_GENERIC_ENHANCE))) {
685 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
688 * don't cache encrypted data into meta inode until previous dirty
689 * data were writebacked to avoid racing between GC and flush.
691 f2fs_wait_on_page_writeback(page, DATA, true, true);
693 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
695 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
697 FGP_LOCK | FGP_CREAT, GFP_NOFS);
698 if (!fio.encrypted_page) {
703 err = f2fs_submit_page_bio(&fio);
705 goto put_encrypted_page;
706 f2fs_put_page(fio.encrypted_page, 0);
707 f2fs_put_page(page, 1);
710 f2fs_put_page(fio.encrypted_page, 1);
712 f2fs_put_page(page, 1);
717 * Move data block via META_MAPPING while keeping locked data page.
718 * This can be used to move blocks, aka LBAs, directly on disk.
720 static int move_data_block(struct inode *inode, block_t bidx,
721 int gc_type, unsigned int segno, int off)
723 struct f2fs_io_info fio = {
724 .sbi = F2FS_I_SB(inode),
730 .encrypted_page = NULL,
734 struct dnode_of_data dn;
735 struct f2fs_summary sum;
737 struct page *page, *mpage;
740 bool lfs_mode = test_opt(fio.sbi, LFS);
742 /* do not read out */
743 page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
747 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
752 if (f2fs_is_atomic_file(inode)) {
753 F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
754 F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
759 if (f2fs_is_pinned_file(inode)) {
760 f2fs_pin_file_control(inode, true);
765 set_new_dnode(&dn, inode, NULL, NULL, 0);
766 err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
770 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
771 ClearPageUptodate(page);
777 * don't cache encrypted data into meta inode until previous dirty
778 * data were writebacked to avoid racing between GC and flush.
780 f2fs_wait_on_page_writeback(page, DATA, true, true);
782 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
784 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
788 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
792 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
795 down_write(&fio.sbi->io_order_lock);
797 f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
798 &sum, CURSEG_COLD_DATA, NULL, false);
800 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
801 newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
802 if (!fio.encrypted_page) {
807 mpage = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
808 fio.old_blkaddr, FGP_LOCK, GFP_NOFS);
810 bool updated = false;
812 if (PageUptodate(mpage)) {
813 memcpy(page_address(fio.encrypted_page),
814 page_address(mpage), PAGE_SIZE);
817 f2fs_put_page(mpage, 1);
818 invalidate_mapping_pages(META_MAPPING(fio.sbi),
819 fio.old_blkaddr, fio.old_blkaddr);
824 err = f2fs_submit_page_bio(&fio);
829 lock_page(fio.encrypted_page);
831 if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
835 if (unlikely(!PageUptodate(fio.encrypted_page))) {
841 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
842 set_page_dirty(fio.encrypted_page);
843 if (clear_page_dirty_for_io(fio.encrypted_page))
844 dec_page_count(fio.sbi, F2FS_DIRTY_META);
846 set_page_writeback(fio.encrypted_page);
847 ClearPageError(page);
849 /* allocate block address */
850 f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true);
852 fio.op = REQ_OP_WRITE;
853 fio.op_flags = REQ_SYNC;
854 fio.new_blkaddr = newaddr;
855 f2fs_submit_page_write(&fio);
858 if (PageWriteback(fio.encrypted_page))
859 end_page_writeback(fio.encrypted_page);
863 f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE);
865 f2fs_update_data_blkaddr(&dn, newaddr);
866 set_inode_flag(inode, FI_APPEND_WRITE);
867 if (page->index == 0)
868 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
870 f2fs_put_page(fio.encrypted_page, 1);
873 up_write(&fio.sbi->io_order_lock);
875 f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
880 f2fs_put_page(page, 1);
884 static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
885 unsigned int segno, int off)
890 page = f2fs_get_lock_data_page(inode, bidx, true);
892 return PTR_ERR(page);
894 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
899 if (f2fs_is_atomic_file(inode)) {
900 F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
901 F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
905 if (f2fs_is_pinned_file(inode)) {
906 if (gc_type == FG_GC)
907 f2fs_pin_file_control(inode, true);
912 if (gc_type == BG_GC) {
913 if (PageWriteback(page)) {
917 set_page_dirty(page);
920 struct f2fs_io_info fio = {
921 .sbi = F2FS_I_SB(inode),
926 .op_flags = REQ_SYNC,
927 .old_blkaddr = NULL_ADDR,
929 .encrypted_page = NULL,
930 .need_lock = LOCK_REQ,
931 .io_type = FS_GC_DATA_IO,
933 bool is_dirty = PageDirty(page);
936 f2fs_wait_on_page_writeback(page, DATA, true, true);
938 set_page_dirty(page);
939 if (clear_page_dirty_for_io(page)) {
940 inode_dec_dirty_pages(inode);
941 f2fs_remove_dirty_inode(inode);
946 err = f2fs_do_write_data_page(&fio);
948 clear_cold_data(page);
949 if (err == -ENOMEM) {
950 congestion_wait(BLK_RW_ASYNC, HZ/50);
954 set_page_dirty(page);
958 f2fs_put_page(page, 1);
963 * This function tries to get parent node of victim data block, and identifies
964 * data block validity. If the block is valid, copy that with cold status and
965 * modify parent node.
966 * If the parent node is not valid or the data block address is different,
967 * the victim data block is ignored.
969 static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
970 struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
972 struct super_block *sb = sbi->sb;
973 struct f2fs_summary *entry;
979 start_addr = START_BLOCK(sbi, segno);
984 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
985 struct page *data_page;
987 struct node_info dni; /* dnode info for the data */
988 unsigned int ofs_in_node, nofs;
990 nid_t nid = le32_to_cpu(entry->nid);
992 /* stop BG_GC if there is not enough free sections. */
993 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
996 if (check_valid_map(sbi, segno, off) == 0)
1000 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1006 f2fs_ra_node_page(sbi, nid);
1010 /* Get an inode by ino with checking validity */
1011 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
1015 f2fs_ra_node_page(sbi, dni.ino);
1019 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
1022 inode = f2fs_iget(sb, dni.ino);
1023 if (IS_ERR(inode) || is_bad_inode(inode))
1026 if (!down_write_trylock(
1027 &F2FS_I(inode)->i_gc_rwsem[WRITE])) {
1029 sbi->skipped_gc_rwsem++;
1033 start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
1036 if (f2fs_post_read_required(inode)) {
1037 int err = ra_data_block(inode, start_bidx);
1039 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1044 add_gc_inode(gc_list, inode);
1048 data_page = f2fs_get_read_data_page(inode,
1049 start_bidx, REQ_RAHEAD, true);
1050 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1051 if (IS_ERR(data_page)) {
1056 f2fs_put_page(data_page, 0);
1057 add_gc_inode(gc_list, inode);
1062 inode = find_gc_inode(gc_list, dni.ino);
1064 struct f2fs_inode_info *fi = F2FS_I(inode);
1065 bool locked = false;
1068 if (S_ISREG(inode->i_mode)) {
1069 if (!down_write_trylock(&fi->i_gc_rwsem[READ]))
1071 if (!down_write_trylock(
1072 &fi->i_gc_rwsem[WRITE])) {
1073 sbi->skipped_gc_rwsem++;
1074 up_write(&fi->i_gc_rwsem[READ]);
1079 /* wait for all inflight aio data */
1080 inode_dio_wait(inode);
1083 start_bidx = f2fs_start_bidx_of_node(nofs, inode)
1085 if (f2fs_post_read_required(inode))
1086 err = move_data_block(inode, start_bidx,
1087 gc_type, segno, off);
1089 err = move_data_page(inode, start_bidx, gc_type,
1092 if (!err && (gc_type == FG_GC ||
1093 f2fs_post_read_required(inode)))
1097 up_write(&fi->i_gc_rwsem[WRITE]);
1098 up_write(&fi->i_gc_rwsem[READ]);
1101 stat_inc_data_blk_count(sbi, 1, gc_type);
1111 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
1114 struct sit_info *sit_i = SIT_I(sbi);
1117 down_write(&sit_i->sentry_lock);
1118 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
1119 NO_CHECK_TYPE, LFS);
1120 up_write(&sit_i->sentry_lock);
1124 static int do_garbage_collect(struct f2fs_sb_info *sbi,
1125 unsigned int start_segno,
1126 struct gc_inode_list *gc_list, int gc_type)
1128 struct page *sum_page;
1129 struct f2fs_summary_block *sum;
1130 struct blk_plug plug;
1131 unsigned int segno = start_segno;
1132 unsigned int end_segno = start_segno + sbi->segs_per_sec;
1133 int seg_freed = 0, migrated = 0;
1134 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
1135 SUM_TYPE_DATA : SUM_TYPE_NODE;
1138 if (__is_large_section(sbi))
1139 end_segno = rounddown(end_segno, sbi->segs_per_sec);
1141 /* readahead multi ssa blocks those have contiguous address */
1142 if (__is_large_section(sbi))
1143 f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
1144 end_segno - segno, META_SSA, true);
1146 /* reference all summary page */
1147 while (segno < end_segno) {
1148 sum_page = f2fs_get_sum_page(sbi, segno++);
1149 if (IS_ERR(sum_page)) {
1150 int err = PTR_ERR(sum_page);
1152 end_segno = segno - 1;
1153 for (segno = start_segno; segno < end_segno; segno++) {
1154 sum_page = find_get_page(META_MAPPING(sbi),
1155 GET_SUM_BLOCK(sbi, segno));
1156 f2fs_put_page(sum_page, 0);
1157 f2fs_put_page(sum_page, 0);
1161 unlock_page(sum_page);
1164 blk_start_plug(&plug);
1166 for (segno = start_segno; segno < end_segno; segno++) {
1168 /* find segment summary of victim */
1169 sum_page = find_get_page(META_MAPPING(sbi),
1170 GET_SUM_BLOCK(sbi, segno));
1171 f2fs_put_page(sum_page, 0);
1173 if (get_valid_blocks(sbi, segno, false) == 0)
1175 if (__is_large_section(sbi) &&
1176 migrated >= sbi->migration_granularity)
1178 if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
1181 sum = page_address(sum_page);
1182 if (type != GET_SUM_TYPE((&sum->footer))) {
1183 f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent segment (%u) "
1184 "type [%d, %d] in SSA and SIT",
1185 segno, type, GET_SUM_TYPE((&sum->footer)));
1186 set_sbi_flag(sbi, SBI_NEED_FSCK);
1187 f2fs_stop_checkpoint(sbi, false);
1192 * this is to avoid deadlock:
1193 * - lock_page(sum_page) - f2fs_replace_block
1194 * - check_valid_map() - down_write(sentry_lock)
1195 * - down_read(sentry_lock) - change_curseg()
1196 * - lock_page(sum_page)
1198 if (type == SUM_TYPE_NODE)
1199 submitted += gc_node_segment(sbi, sum->entries, segno,
1202 submitted += gc_data_segment(sbi, sum->entries, gc_list,
1205 stat_inc_seg_count(sbi, type, gc_type);
1208 if (gc_type == FG_GC &&
1209 get_valid_blocks(sbi, segno, false) == 0)
1213 if (__is_large_section(sbi) && segno + 1 < end_segno)
1214 sbi->next_victim_seg[gc_type] = segno + 1;
1216 f2fs_put_page(sum_page, 0);
1220 f2fs_submit_merged_write(sbi,
1221 (type == SUM_TYPE_NODE) ? NODE : DATA);
1223 blk_finish_plug(&plug);
1225 stat_inc_call_count(sbi->stat_info);
1230 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
1231 bool background, unsigned int segno)
1233 int gc_type = sync ? FG_GC : BG_GC;
1234 int sec_freed = 0, seg_freed = 0, total_freed = 0;
1236 struct cp_control cpc;
1237 unsigned int init_segno = segno;
1238 struct gc_inode_list gc_list = {
1239 .ilist = LIST_HEAD_INIT(gc_list.ilist),
1240 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1242 unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC];
1243 unsigned long long first_skipped;
1244 unsigned int skipped_round = 0, round = 0;
1246 trace_f2fs_gc_begin(sbi->sb, sync, background,
1247 get_pages(sbi, F2FS_DIRTY_NODES),
1248 get_pages(sbi, F2FS_DIRTY_DENTS),
1249 get_pages(sbi, F2FS_DIRTY_IMETA),
1252 reserved_segments(sbi),
1253 prefree_segments(sbi));
1255 cpc.reason = __get_cp_reason(sbi);
1256 sbi->skipped_gc_rwsem = 0;
1257 first_skipped = last_skipped;
1259 if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
1263 if (unlikely(f2fs_cp_error(sbi))) {
1268 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) {
1270 * For example, if there are many prefree_segments below given
1271 * threshold, we can make them free by checkpoint. Then, we
1272 * secure free segments which doesn't need fggc any more.
1274 if (prefree_segments(sbi) &&
1275 !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
1276 ret = f2fs_write_checkpoint(sbi, &cpc);
1280 if (has_not_enough_free_secs(sbi, 0, 0))
1284 /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
1285 if (gc_type == BG_GC && !background) {
1289 if (!__get_victim(sbi, &segno, gc_type)) {
1294 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type);
1295 if (gc_type == FG_GC && seg_freed == sbi->segs_per_sec)
1297 total_freed += seg_freed;
1299 if (gc_type == FG_GC) {
1300 if (sbi->skipped_atomic_files[FG_GC] > last_skipped ||
1301 sbi->skipped_gc_rwsem)
1303 last_skipped = sbi->skipped_atomic_files[FG_GC];
1307 if (gc_type == FG_GC)
1308 sbi->cur_victim_sec = NULL_SEGNO;
1313 if (has_not_enough_free_secs(sbi, sec_freed, 0)) {
1314 if (skipped_round <= MAX_SKIP_GC_COUNT ||
1315 skipped_round * 2 < round) {
1320 if (first_skipped < last_skipped &&
1321 (last_skipped - first_skipped) >
1322 sbi->skipped_gc_rwsem) {
1323 f2fs_drop_inmem_pages_all(sbi, true);
1327 if (gc_type == FG_GC && !is_sbi_flag_set(sbi, SBI_CP_DISABLED))
1328 ret = f2fs_write_checkpoint(sbi, &cpc);
1331 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
1332 SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno;
1334 trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed,
1335 get_pages(sbi, F2FS_DIRTY_NODES),
1336 get_pages(sbi, F2FS_DIRTY_DENTS),
1337 get_pages(sbi, F2FS_DIRTY_IMETA),
1340 reserved_segments(sbi),
1341 prefree_segments(sbi));
1343 mutex_unlock(&sbi->gc_mutex);
1345 put_gc_inode(&gc_list);
1348 ret = sec_freed ? 0 : -EAGAIN;
1352 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
1354 DIRTY_I(sbi)->v_ops = &default_v_ops;
1356 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
1358 /* give warm/cold data area from slower device */
1359 if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi))
1360 SIT_I(sbi)->last_victim[ALLOC_NEXT] =
1361 GET_SEGNO(sbi, FDEV(0).end_blk) + 1;