]> Git Repo - linux.git/blame - fs/f2fs/gc.c
f2fs: avoid fi->i_gc_rwsem[WRITE] lock in f2fs_gc
[linux.git] / fs / f2fs / gc.c
CommitLineData
0a8165d7 1/*
7bc09003
JK
2 * fs/f2fs/gc.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/module.h>
13#include <linux/backing-dev.h>
7bc09003
JK
14#include <linux/init.h>
15#include <linux/f2fs_fs.h>
16#include <linux/kthread.h>
17#include <linux/delay.h>
18#include <linux/freezer.h>
7bc09003
JK
19
20#include "f2fs.h"
21#include "node.h"
22#include "segment.h"
23#include "gc.h"
8e46b3ed 24#include <trace/events/f2fs.h>
7bc09003 25
7bc09003
JK
26static int gc_thread_func(void *data)
27{
28 struct f2fs_sb_info *sbi = data;
b59d0bae 29 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
7bc09003 30 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
b8c502b8 31 unsigned int wait_ms;
7bc09003 32
b59d0bae 33 wait_ms = gc_th->min_sleep_time;
7bc09003 34
1d7be270 35 set_freezable();
7bc09003 36 do {
1d7be270 37 wait_event_interruptible_timeout(*wq,
d9872a69
JK
38 kthread_should_stop() || freezing(current) ||
39 gc_th->gc_wake,
1d7be270
JK
40 msecs_to_jiffies(wait_ms));
41
d9872a69
JK
42 /* give it a try one time */
43 if (gc_th->gc_wake)
44 gc_th->gc_wake = 0;
45
7bc09003
JK
46 if (try_to_freeze())
47 continue;
7bc09003
JK
48 if (kthread_should_stop())
49 break;
50
d6212a5f 51 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
88dd8934 52 increase_sleep_time(gc_th, &wait_ms);
d6212a5f
CL
53 continue;
54 }
55
55523519
CY
56 if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
57 f2fs_show_injection_info(FAULT_CHECKPOINT);
0f348028 58 f2fs_stop_checkpoint(sbi, false);
55523519 59 }
0f348028 60
dc6febb6
CY
61 if (!sb_start_write_trylock(sbi->sb))
62 continue;
63
7bc09003
JK
64 /*
65 * [GC triggering condition]
66 * 0. GC is not conducted currently.
67 * 1. There are enough dirty segments.
68 * 2. IO subsystem is idle by checking the # of writeback pages.
69 * 3. IO subsystem is idle by checking the # of requests in
70 * bdev's request list.
71 *
e1c42045 72 * Note) We have to avoid triggering GCs frequently.
7bc09003
JK
73 * Because it is possible that some segments can be
74 * invalidated soon after by user update or deletion.
75 * So, I'd like to wait some time to collect dirty segments.
76 */
5b0e9539 77 if (sbi->gc_mode == GC_URGENT) {
d9872a69 78 wait_ms = gc_th->urgent_sleep_time;
69babac0 79 mutex_lock(&sbi->gc_mutex);
d9872a69
JK
80 goto do_gc;
81 }
82
69babac0
JK
83 if (!mutex_trylock(&sbi->gc_mutex))
84 goto next;
85
7bc09003 86 if (!is_idle(sbi)) {
88dd8934 87 increase_sleep_time(gc_th, &wait_ms);
7bc09003 88 mutex_unlock(&sbi->gc_mutex);
dc6febb6 89 goto next;
7bc09003
JK
90 }
91
92 if (has_enough_invalid_blocks(sbi))
88dd8934 93 decrease_sleep_time(gc_th, &wait_ms);
7bc09003 94 else
88dd8934 95 increase_sleep_time(gc_th, &wait_ms);
d9872a69 96do_gc:
dcdfff65 97 stat_inc_bggc_count(sbi);
7bc09003 98
43727527 99 /* if return value is not zero, no victim was selected */
e066b83c 100 if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC), true, NULL_SEGNO))
b59d0bae 101 wait_ms = gc_th->no_gc_sleep_time;
81eb8d6e 102
84e4214f
JK
103 trace_f2fs_background_gc(sbi->sb, wait_ms,
104 prefree_segments(sbi), free_segments(sbi));
105
4660f9c0
JK
106 /* balancing f2fs's metadata periodically */
107 f2fs_balance_fs_bg(sbi);
dc6febb6
CY
108next:
109 sb_end_write(sbi->sb);
81eb8d6e 110
7bc09003
JK
111 } while (!kthread_should_stop());
112 return 0;
113}
114
4d57b86d 115int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
7bc09003 116{
1042d60f 117 struct f2fs_gc_kthread *gc_th;
ec7b1f2d 118 dev_t dev = sbi->sb->s_bdev->bd_dev;
7a267f8d 119 int err = 0;
7bc09003 120
1ecc0c5c 121 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
7a267f8d
NJ
122 if (!gc_th) {
123 err = -ENOMEM;
124 goto out;
125 }
7bc09003 126
d9872a69 127 gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
b59d0bae
NJ
128 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
129 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
130 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
131
d9872a69 132 gc_th->gc_wake= 0;
d2dc095f 133
7bc09003
JK
134 sbi->gc_thread = gc_th;
135 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
136 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
ec7b1f2d 137 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
7bc09003 138 if (IS_ERR(gc_th->f2fs_gc_task)) {
7a267f8d 139 err = PTR_ERR(gc_th->f2fs_gc_task);
7bc09003 140 kfree(gc_th);
25718423 141 sbi->gc_thread = NULL;
7bc09003 142 }
7a267f8d
NJ
143out:
144 return err;
7bc09003
JK
145}
146
4d57b86d 147void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
7bc09003
JK
148{
149 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
150 if (!gc_th)
151 return;
152 kthread_stop(gc_th->f2fs_gc_task);
153 kfree(gc_th);
154 sbi->gc_thread = NULL;
155}
156
5b0e9539 157static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
7bc09003 158{
d2dc095f
NJ
159 int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
160
5b0e9539
JK
161 switch (sbi->gc_mode) {
162 case GC_IDLE_CB:
163 gc_mode = GC_CB;
164 break;
165 case GC_IDLE_GREEDY:
166 case GC_URGENT:
b27bc809 167 gc_mode = GC_GREEDY;
5b0e9539
JK
168 break;
169 }
d2dc095f 170 return gc_mode;
7bc09003
JK
171}
172
173static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
174 int type, struct victim_sel_policy *p)
175{
176 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
177
4ebefc44 178 if (p->alloc_mode == SSR) {
7bc09003
JK
179 p->gc_mode = GC_GREEDY;
180 p->dirty_segmap = dirty_i->dirty_segmap[type];
a26b7c8a 181 p->max_search = dirty_i->nr_dirty[type];
7bc09003
JK
182 p->ofs_unit = 1;
183 } else {
5b0e9539 184 p->gc_mode = select_gc_type(sbi, gc_type);
7bc09003 185 p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
a26b7c8a 186 p->max_search = dirty_i->nr_dirty[DIRTY];
7bc09003
JK
187 p->ofs_unit = sbi->segs_per_sec;
188 }
a26b7c8a 189
e93b9865 190 /* we need to check every dirty segments in the FG_GC case */
b27bc809 191 if (gc_type != FG_GC &&
5b0e9539 192 (sbi->gc_mode != GC_URGENT) &&
b27bc809 193 p->max_search > sbi->max_victim_search)
b1c57c1c 194 p->max_search = sbi->max_victim_search;
a26b7c8a 195
b94929d9
YS
196 /* let's select beginning hot/small space first in no_heap mode*/
197 if (test_opt(sbi, NOHEAP) &&
198 (type == CURSEG_HOT_DATA || IS_NODESEG(type)))
7a20b8a6
JK
199 p->offset = 0;
200 else
e066b83c 201 p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
7bc09003
JK
202}
203
204static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
205 struct victim_sel_policy *p)
206{
b7250d2d
JK
207 /* SSR allocates in a segment unit */
208 if (p->alloc_mode == SSR)
3519e3f9 209 return sbi->blocks_per_seg;
7bc09003 210 if (p->gc_mode == GC_GREEDY)
c541a51b 211 return 2 * sbi->blocks_per_seg * p->ofs_unit;
7bc09003
JK
212 else if (p->gc_mode == GC_CB)
213 return UINT_MAX;
214 else /* No other gc_mode */
215 return 0;
216}
217
218static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
219{
220 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5ec4e49f 221 unsigned int secno;
7bc09003
JK
222
223 /*
224 * If the gc_type is FG_GC, we can select victim segments
225 * selected by background GC before.
226 * Those segments guarantee they have small valid blocks.
227 */
7cd8558b 228 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
5ec4e49f 229 if (sec_usage_check(sbi, secno))
b65ee148 230 continue;
5ec4e49f 231 clear_bit(secno, dirty_i->victim_secmap);
4ddb1a4d 232 return GET_SEG_FROM_SEC(sbi, secno);
7bc09003
JK
233 }
234 return NULL_SEGNO;
235}
236
237static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
238{
239 struct sit_info *sit_i = SIT_I(sbi);
4ddb1a4d
JK
240 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
241 unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
7bc09003
JK
242 unsigned long long mtime = 0;
243 unsigned int vblocks;
244 unsigned char age = 0;
245 unsigned char u;
246 unsigned int i;
247
248 for (i = 0; i < sbi->segs_per_sec; i++)
249 mtime += get_seg_entry(sbi, start + i)->mtime;
302bd348 250 vblocks = get_valid_blocks(sbi, segno, true);
7bc09003
JK
251
252 mtime = div_u64(mtime, sbi->segs_per_sec);
253 vblocks = div_u64(vblocks, sbi->segs_per_sec);
254
255 u = (vblocks * 100) >> sbi->log_blocks_per_seg;
256
e1c42045 257 /* Handle if the system time has changed by the user */
7bc09003
JK
258 if (mtime < sit_i->min_mtime)
259 sit_i->min_mtime = mtime;
260 if (mtime > sit_i->max_mtime)
261 sit_i->max_mtime = mtime;
262 if (sit_i->max_mtime != sit_i->min_mtime)
263 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
264 sit_i->max_mtime - sit_i->min_mtime);
265
266 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
267}
268
a57e564d
JX
269static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
270 unsigned int segno, struct victim_sel_policy *p)
7bc09003
JK
271{
272 if (p->alloc_mode == SSR)
2afce76a 273 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
7bc09003
JK
274
275 /* alloc_mode == LFS */
276 if (p->gc_mode == GC_GREEDY)
91f4382b 277 return get_valid_blocks(sbi, segno, true);
7bc09003
JK
278 else
279 return get_cb_cost(sbi, segno);
280}
281
688159b6
FL
282static unsigned int count_bits(const unsigned long *addr,
283 unsigned int offset, unsigned int len)
284{
285 unsigned int end = offset + len, sum = 0;
286
287 while (offset < end) {
288 if (test_bit(offset++, addr))
289 ++sum;
290 }
291 return sum;
292}
293
0a8165d7 294/*
111d2495 295 * This function is called from two paths.
7bc09003
JK
296 * One is garbage collection and the other is SSR segment selection.
297 * When it is called during GC, it just gets a victim segment
298 * and it does not remove it from dirty seglist.
299 * When it is called from SSR segment selection, it finds a segment
300 * which has minimum valid blocks and removes it from dirty seglist.
301 */
302static int get_victim_by_default(struct f2fs_sb_info *sbi,
303 unsigned int *result, int gc_type, int type, char alloc_mode)
304{
305 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
e066b83c 306 struct sit_info *sm = SIT_I(sbi);
7bc09003 307 struct victim_sel_policy p;
3fa56503 308 unsigned int secno, last_victim;
a43f7ec3 309 unsigned int last_segment = MAIN_SEGS(sbi);
688159b6 310 unsigned int nsearched = 0;
7bc09003 311
210f41bc
CY
312 mutex_lock(&dirty_i->seglist_lock);
313
7bc09003
JK
314 p.alloc_mode = alloc_mode;
315 select_policy(sbi, gc_type, type, &p);
316
317 p.min_segno = NULL_SEGNO;
3fa56503 318 p.min_cost = get_max_cost(sbi, &p);
7bc09003 319
e066b83c
JK
320 if (*result != NULL_SEGNO) {
321 if (IS_DATASEG(get_seg_entry(sbi, *result)->type) &&
322 get_valid_blocks(sbi, *result, false) &&
323 !sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
324 p.min_segno = *result;
325 goto out;
326 }
327
3342bb30
CY
328 if (p.max_search == 0)
329 goto out;
330
e066b83c 331 last_victim = sm->last_victim[p.gc_mode];
7bc09003
JK
332 if (p.alloc_mode == LFS && gc_type == FG_GC) {
333 p.min_segno = check_bg_victims(sbi);
334 if (p.min_segno != NULL_SEGNO)
335 goto got_it;
336 }
337
338 while (1) {
339 unsigned long cost;
5ec4e49f 340 unsigned int segno;
7bc09003 341
a43f7ec3
CY
342 segno = find_next_bit(p.dirty_segmap, last_segment, p.offset);
343 if (segno >= last_segment) {
e066b83c
JK
344 if (sm->last_victim[p.gc_mode]) {
345 last_segment =
346 sm->last_victim[p.gc_mode];
347 sm->last_victim[p.gc_mode] = 0;
7bc09003
JK
348 p.offset = 0;
349 continue;
350 }
351 break;
352 }
a57e564d
JX
353
354 p.offset = segno + p.ofs_unit;
688159b6 355 if (p.ofs_unit > 1) {
a57e564d 356 p.offset -= segno % p.ofs_unit;
688159b6
FL
357 nsearched += count_bits(p.dirty_segmap,
358 p.offset - p.ofs_unit,
359 p.ofs_unit);
360 } else {
361 nsearched++;
362 }
363
4ddb1a4d 364 secno = GET_SEC_FROM_SEG(sbi, segno);
7bc09003 365
5ec4e49f 366 if (sec_usage_check(sbi, secno))
688159b6 367 goto next;
5ec4e49f 368 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
688159b6 369 goto next;
7bc09003
JK
370
371 cost = get_gc_cost(sbi, segno, &p);
372
373 if (p.min_cost > cost) {
374 p.min_segno = segno;
375 p.min_cost = cost;
a57e564d 376 }
688159b6
FL
377next:
378 if (nsearched >= p.max_search) {
e066b83c
JK
379 if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
380 sm->last_victim[p.gc_mode] = last_victim + 1;
4ce53776 381 else
e066b83c
JK
382 sm->last_victim[p.gc_mode] = segno + 1;
383 sm->last_victim[p.gc_mode] %= MAIN_SEGS(sbi);
7bc09003
JK
384 break;
385 }
386 }
7bc09003 387 if (p.min_segno != NULL_SEGNO) {
b2b3460a 388got_it:
7bc09003 389 if (p.alloc_mode == LFS) {
4ddb1a4d 390 secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
5ec4e49f
JK
391 if (gc_type == FG_GC)
392 sbi->cur_victim_sec = secno;
393 else
394 set_bit(secno, dirty_i->victim_secmap);
7bc09003 395 }
5ec4e49f 396 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
8e46b3ed
NJ
397
398 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
399 sbi->cur_victim_sec,
400 prefree_segments(sbi), free_segments(sbi));
7bc09003 401 }
3342bb30 402out:
7bc09003
JK
403 mutex_unlock(&dirty_i->seglist_lock);
404
405 return (p.min_segno == NULL_SEGNO) ? 0 : 1;
406}
407
408static const struct victim_selection default_v_ops = {
409 .get_victim = get_victim_by_default,
410};
411
7dda2af8 412static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
7bc09003 413{
7bc09003
JK
414 struct inode_entry *ie;
415
7dda2af8
CL
416 ie = radix_tree_lookup(&gc_list->iroot, ino);
417 if (ie)
418 return ie->inode;
7bc09003
JK
419 return NULL;
420}
421
7dda2af8 422static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
7bc09003 423{
6cc4af56
GZ
424 struct inode_entry *new_ie;
425
7dda2af8 426 if (inode == find_gc_inode(gc_list, inode->i_ino)) {
6cc4af56
GZ
427 iput(inode);
428 return;
7bc09003 429 }
4d57b86d 430 new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab, GFP_NOFS);
7bc09003 431 new_ie->inode = inode;
f28e5034
CY
432
433 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
7dda2af8 434 list_add_tail(&new_ie->list, &gc_list->ilist);
7bc09003
JK
435}
436
7dda2af8 437static void put_gc_inode(struct gc_inode_list *gc_list)
7bc09003
JK
438{
439 struct inode_entry *ie, *next_ie;
7dda2af8
CL
440 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
441 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
7bc09003
JK
442 iput(ie->inode);
443 list_del(&ie->list);
4d57b86d 444 kmem_cache_free(f2fs_inode_entry_slab, ie);
7bc09003
JK
445 }
446}
447
448static int check_valid_map(struct f2fs_sb_info *sbi,
449 unsigned int segno, int offset)
450{
451 struct sit_info *sit_i = SIT_I(sbi);
452 struct seg_entry *sentry;
453 int ret;
454
3d26fa6b 455 down_read(&sit_i->sentry_lock);
7bc09003
JK
456 sentry = get_seg_entry(sbi, segno);
457 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
3d26fa6b 458 up_read(&sit_i->sentry_lock);
43727527 459 return ret;
7bc09003
JK
460}
461
0a8165d7 462/*
7bc09003
JK
463 * This function compares node address got in summary with that in NAT.
464 * On validity, copy that node with cold status, otherwise (invalid node)
465 * ignore that.
466 */
718e53fa 467static void gc_node_segment(struct f2fs_sb_info *sbi,
7bc09003
JK
468 struct f2fs_summary *sum, unsigned int segno, int gc_type)
469{
7bc09003 470 struct f2fs_summary *entry;
26d58599 471 block_t start_addr;
7bc09003 472 int off;
7ea984b0 473 int phase = 0;
c29fd0c0 474 bool fggc = (gc_type == FG_GC);
7bc09003 475
26d58599
JK
476 start_addr = START_BLOCK(sbi, segno);
477
7bc09003
JK
478next_step:
479 entry = sum;
c718379b 480
c29fd0c0
CY
481 if (fggc && phase == 2)
482 atomic_inc(&sbi->wb_sync_req[NODE]);
483
7bc09003
JK
484 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
485 nid_t nid = le32_to_cpu(entry->nid);
486 struct page *node_page;
26d58599 487 struct node_info ni;
7bc09003 488
43727527 489 /* stop BG_GC if there is not enough free sections. */
7f3037a5 490 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
718e53fa 491 return;
7bc09003 492
43727527 493 if (check_valid_map(sbi, segno, off) == 0)
7bc09003
JK
494 continue;
495
7ea984b0 496 if (phase == 0) {
4d57b86d 497 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
7ea984b0
CY
498 META_NAT, true);
499 continue;
500 }
501
502 if (phase == 1) {
4d57b86d 503 f2fs_ra_node_page(sbi, nid);
7bc09003
JK
504 continue;
505 }
7ea984b0
CY
506
507 /* phase == 2 */
4d57b86d 508 node_page = f2fs_get_node_page(sbi, nid);
7bc09003
JK
509 if (IS_ERR(node_page))
510 continue;
511
4d57b86d 512 /* block may become invalid during f2fs_get_node_page */
9a01b56b
YH
513 if (check_valid_map(sbi, segno, off) == 0) {
514 f2fs_put_page(node_page, 1);
515 continue;
26d58599
JK
516 }
517
7735730d
CY
518 if (f2fs_get_node_info(sbi, nid, &ni)) {
519 f2fs_put_page(node_page, 1);
520 continue;
521 }
522
26d58599
JK
523 if (ni.blk_addr != start_addr + off) {
524 f2fs_put_page(node_page, 1);
525 continue;
9a01b56b
YH
526 }
527
4d57b86d 528 f2fs_move_node_page(node_page, gc_type);
e1235983 529 stat_inc_node_blk_count(sbi, 1, gc_type);
7bc09003 530 }
c718379b 531
7ea984b0 532 if (++phase < 3)
7bc09003 533 goto next_step;
c29fd0c0
CY
534
535 if (fggc)
536 atomic_dec(&sbi->wb_sync_req[NODE]);
7bc09003
JK
537}
538
0a8165d7 539/*
9af45ef5
JK
540 * Calculate start block index indicating the given node offset.
541 * Be careful, caller should give this node offset only indicating direct node
542 * blocks. If any node offsets, which point the other types of node blocks such
543 * as indirect or double indirect node blocks, are given, it must be a caller's
544 * bug.
7bc09003 545 */
4d57b86d 546block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
7bc09003 547{
ce19a5d4
JK
548 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
549 unsigned int bidx;
7bc09003 550
ce19a5d4
JK
551 if (node_ofs == 0)
552 return 0;
7bc09003 553
ce19a5d4 554 if (node_ofs <= 2) {
7bc09003
JK
555 bidx = node_ofs - 1;
556 } else if (node_ofs <= indirect_blks) {
ce19a5d4 557 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
7bc09003
JK
558 bidx = node_ofs - 2 - dec;
559 } else {
ce19a5d4 560 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
7bc09003
JK
561 bidx = node_ofs - 5 - dec;
562 }
81ca7350 563 return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode);
7bc09003
JK
564}
565
c1079892 566static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
7bc09003
JK
567 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
568{
569 struct page *node_page;
570 nid_t nid;
571 unsigned int ofs_in_node;
572 block_t source_blkaddr;
573
574 nid = le32_to_cpu(sum->nid);
575 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
576
4d57b86d 577 node_page = f2fs_get_node_page(sbi, nid);
7bc09003 578 if (IS_ERR(node_page))
c1079892 579 return false;
7bc09003 580
7735730d
CY
581 if (f2fs_get_node_info(sbi, nid, dni)) {
582 f2fs_put_page(node_page, 1);
583 return false;
584 }
7bc09003
JK
585
586 if (sum->version != dni->version) {
c13ff37e
JK
587 f2fs_msg(sbi->sb, KERN_WARNING,
588 "%s: valid data with mismatched node version.",
589 __func__);
590 set_sbi_flag(sbi, SBI_NEED_FSCK);
7bc09003
JK
591 }
592
593 *nofs = ofs_of_node(node_page);
7a2af766 594 source_blkaddr = datablock_addr(NULL, node_page, ofs_in_node);
7bc09003
JK
595 f2fs_put_page(node_page, 1);
596
597 if (source_blkaddr != blkaddr)
c1079892
NK
598 return false;
599 return true;
7bc09003
JK
600}
601
d4c759ee
JK
602/*
603 * Move data block via META_MAPPING while keeping locked data page.
604 * This can be used to move blocks, aka LBAs, directly on disk.
605 */
606static void move_data_block(struct inode *inode, block_t bidx,
2ef79ecb 607 int gc_type, unsigned int segno, int off)
4375a336
JK
608{
609 struct f2fs_io_info fio = {
610 .sbi = F2FS_I_SB(inode),
39d787be 611 .ino = inode->i_ino,
4375a336 612 .type = DATA,
a912b54d 613 .temp = COLD,
04d328de 614 .op = REQ_OP_READ,
70fd7614 615 .op_flags = 0,
4375a336 616 .encrypted_page = NULL,
fb830fc5 617 .in_list = false,
fe16efe6 618 .retry = false,
4375a336
JK
619 };
620 struct dnode_of_data dn;
621 struct f2fs_summary sum;
622 struct node_info ni;
623 struct page *page;
4356e48e 624 block_t newaddr;
4375a336 625 int err;
107a805d 626 bool lfs_mode = test_opt(fio.sbi, LFS);
4375a336
JK
627
628 /* do not read out */
a56c7c6f 629 page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
4375a336
JK
630 if (!page)
631 return;
632
20614711
YH
633 if (!check_valid_map(F2FS_I_SB(inode), segno, off))
634 goto out;
635
2ef79ecb
CY
636 if (f2fs_is_atomic_file(inode)) {
637 F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
638 F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
5fe45743 639 goto out;
2ef79ecb 640 }
5fe45743 641
1ad71a27
JK
642 if (f2fs_is_pinned_file(inode)) {
643 f2fs_pin_file_control(inode, true);
644 goto out;
645 }
646
4375a336 647 set_new_dnode(&dn, inode, NULL, NULL, 0);
4d57b86d 648 err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
4375a336
JK
649 if (err)
650 goto out;
651
08b39fbd
CY
652 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
653 ClearPageUptodate(page);
4375a336 654 goto put_out;
08b39fbd
CY
655 }
656
657 /*
658 * don't cache encrypted data into meta inode until previous dirty
659 * data were writebacked to avoid racing between GC and flush.
660 */
fec1d657 661 f2fs_wait_on_page_writeback(page, DATA, true);
4375a336 662
7735730d
CY
663 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
664 if (err)
665 goto put_out;
666
4375a336
JK
667 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
668
669 /* read page */
670 fio.page = page;
7a9d7548 671 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
4375a336 672
107a805d
CY
673 if (lfs_mode)
674 down_write(&fio.sbi->io_order_lock);
675
4d57b86d 676 f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
fb830fc5 677 &sum, CURSEG_COLD_DATA, NULL, false);
4356e48e 678
01eccef7
CY
679 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
680 newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
4356e48e
CY
681 if (!fio.encrypted_page) {
682 err = -ENOMEM;
683 goto recover_block;
684 }
4375a336 685
548aedac
JK
686 err = f2fs_submit_page_bio(&fio);
687 if (err)
688 goto put_page_out;
689
690 /* write page */
691 lock_page(fio.encrypted_page);
692
1563ac75 693 if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
4356e48e 694 err = -EIO;
548aedac 695 goto put_page_out;
4356e48e 696 }
1563ac75 697 if (unlikely(!PageUptodate(fio.encrypted_page))) {
4356e48e 698 err = -EIO;
548aedac 699 goto put_page_out;
4356e48e 700 }
548aedac 701
6282adbf 702 set_page_dirty(fio.encrypted_page);
fec1d657 703 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
6282adbf
JK
704 if (clear_page_dirty_for_io(fio.encrypted_page))
705 dec_page_count(fio.sbi, F2FS_DIRTY_META);
706
548aedac 707 set_page_writeback(fio.encrypted_page);
17c50035 708 ClearPageError(page);
4375a336
JK
709
710 /* allocate block address */
fec1d657 711 f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
4356e48e 712
04d328de 713 fio.op = REQ_OP_WRITE;
70fd7614 714 fio.op_flags = REQ_SYNC;
4356e48e 715 fio.new_blkaddr = newaddr;
fe16efe6
CY
716 f2fs_submit_page_write(&fio);
717 if (fio.retry) {
a9d572c7
SY
718 if (PageWriteback(fio.encrypted_page))
719 end_page_writeback(fio.encrypted_page);
720 goto put_page_out;
721 }
4375a336 722
b0af6d49
CY
723 f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE);
724
f28b3434 725 f2fs_update_data_blkaddr(&dn, newaddr);
91942321 726 set_inode_flag(inode, FI_APPEND_WRITE);
4375a336 727 if (page->index == 0)
91942321 728 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
548aedac 729put_page_out:
4375a336 730 f2fs_put_page(fio.encrypted_page, 1);
4356e48e 731recover_block:
107a805d
CY
732 if (lfs_mode)
733 up_write(&fio.sbi->io_order_lock);
4356e48e 734 if (err)
4d57b86d 735 f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
4356e48e 736 true, true);
4375a336
JK
737put_out:
738 f2fs_put_dnode(&dn);
739out:
740 f2fs_put_page(page, 1);
741}
742
20614711
YH
743static void move_data_page(struct inode *inode, block_t bidx, int gc_type,
744 unsigned int segno, int off)
7bc09003 745{
c879f90d
JK
746 struct page *page;
747
4d57b86d 748 page = f2fs_get_lock_data_page(inode, bidx, true);
c879f90d
JK
749 if (IS_ERR(page))
750 return;
63a0b7cb 751
20614711
YH
752 if (!check_valid_map(F2FS_I_SB(inode), segno, off))
753 goto out;
754
2ef79ecb
CY
755 if (f2fs_is_atomic_file(inode)) {
756 F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
757 F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
5fe45743 758 goto out;
2ef79ecb 759 }
1ad71a27
JK
760 if (f2fs_is_pinned_file(inode)) {
761 if (gc_type == FG_GC)
762 f2fs_pin_file_control(inode, true);
763 goto out;
764 }
5fe45743 765
7bc09003 766 if (gc_type == BG_GC) {
4ebefc44
JK
767 if (PageWriteback(page))
768 goto out;
7bc09003
JK
769 set_page_dirty(page);
770 set_cold_data(page);
771 } else {
c879f90d
JK
772 struct f2fs_io_info fio = {
773 .sbi = F2FS_I_SB(inode),
39d787be 774 .ino = inode->i_ino,
c879f90d 775 .type = DATA,
a912b54d 776 .temp = COLD,
04d328de 777 .op = REQ_OP_WRITE,
70fd7614 778 .op_flags = REQ_SYNC,
e959c8f5 779 .old_blkaddr = NULL_ADDR,
c879f90d 780 .page = page,
4375a336 781 .encrypted_page = NULL,
cc15620b 782 .need_lock = LOCK_REQ,
b0af6d49 783 .io_type = FS_GC_DATA_IO,
c879f90d 784 };
72e1c797
CY
785 bool is_dirty = PageDirty(page);
786 int err;
787
788retry:
6282adbf 789 set_page_dirty(page);
fec1d657 790 f2fs_wait_on_page_writeback(page, DATA, true);
933439c8 791 if (clear_page_dirty_for_io(page)) {
a7ffdbe2 792 inode_dec_dirty_pages(inode);
4d57b86d 793 f2fs_remove_dirty_inode(inode);
933439c8 794 }
72e1c797 795
7bc09003 796 set_cold_data(page);
72e1c797 797
4d57b86d 798 err = f2fs_do_write_data_page(&fio);
14a28559
CY
799 if (err) {
800 clear_cold_data(page);
801 if (err == -ENOMEM) {
802 congestion_wait(BLK_RW_ASYNC, HZ/50);
803 goto retry;
804 }
805 if (is_dirty)
806 set_page_dirty(page);
72e1c797 807 }
7bc09003
JK
808 }
809out:
810 f2fs_put_page(page, 1);
811}
812
0a8165d7 813/*
7bc09003
JK
814 * This function tries to get parent node of victim data block, and identifies
815 * data block validity. If the block is valid, copy that with cold status and
816 * modify parent node.
817 * If the parent node is not valid or the data block address is different,
818 * the victim data block is ignored.
819 */
718e53fa 820static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
7dda2af8 821 struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
7bc09003
JK
822{
823 struct super_block *sb = sbi->sb;
824 struct f2fs_summary *entry;
825 block_t start_addr;
43727527 826 int off;
7bc09003
JK
827 int phase = 0;
828
829 start_addr = START_BLOCK(sbi, segno);
830
831next_step:
832 entry = sum;
c718379b 833
7bc09003
JK
834 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
835 struct page *data_page;
836 struct inode *inode;
837 struct node_info dni; /* dnode info for the data */
838 unsigned int ofs_in_node, nofs;
839 block_t start_bidx;
7ea984b0 840 nid_t nid = le32_to_cpu(entry->nid);
7bc09003 841
43727527 842 /* stop BG_GC if there is not enough free sections. */
7f3037a5 843 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
718e53fa 844 return;
7bc09003 845
43727527 846 if (check_valid_map(sbi, segno, off) == 0)
7bc09003
JK
847 continue;
848
849 if (phase == 0) {
4d57b86d 850 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
7ea984b0
CY
851 META_NAT, true);
852 continue;
853 }
854
855 if (phase == 1) {
4d57b86d 856 f2fs_ra_node_page(sbi, nid);
7bc09003
JK
857 continue;
858 }
859
860 /* Get an inode by ino with checking validity */
c1079892 861 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
7bc09003
JK
862 continue;
863
7ea984b0 864 if (phase == 2) {
4d57b86d 865 f2fs_ra_node_page(sbi, dni.ino);
7bc09003
JK
866 continue;
867 }
868
7bc09003
JK
869 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
870
7ea984b0 871 if (phase == 3) {
d4686d56 872 inode = f2fs_iget(sb, dni.ino);
b73e5282 873 if (IS_ERR(inode) || is_bad_inode(inode))
7bc09003
JK
874 continue;
875
6dbb1796
EB
876 /* if inode uses special I/O path, let's go phase 3 */
877 if (f2fs_post_read_required(inode)) {
4375a336
JK
878 add_gc_inode(gc_list, inode);
879 continue;
880 }
881
bb06664a 882 if (!down_write_trylock(
b2532c69 883 &F2FS_I(inode)->i_gc_rwsem[WRITE])) {
bb06664a 884 iput(inode);
6f8d4455 885 sbi->skipped_gc_rwsem++;
bb06664a
CY
886 continue;
887 }
888
4d57b86d
CY
889 start_bidx = f2fs_start_bidx_of_node(nofs, inode);
890 data_page = f2fs_get_read_data_page(inode,
70246286
CH
891 start_bidx + ofs_in_node, REQ_RAHEAD,
892 true);
b2532c69 893 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
31a32688
CL
894 if (IS_ERR(data_page)) {
895 iput(inode);
896 continue;
897 }
7bc09003
JK
898
899 f2fs_put_page(data_page, 0);
7dda2af8 900 add_gc_inode(gc_list, inode);
31a32688
CL
901 continue;
902 }
903
7ea984b0 904 /* phase 4 */
7dda2af8 905 inode = find_gc_inode(gc_list, dni.ino);
31a32688 906 if (inode) {
82e0a5aa
CY
907 struct f2fs_inode_info *fi = F2FS_I(inode);
908 bool locked = false;
909
910 if (S_ISREG(inode->i_mode)) {
b2532c69 911 if (!down_write_trylock(&fi->i_gc_rwsem[READ]))
82e0a5aa
CY
912 continue;
913 if (!down_write_trylock(
b2532c69 914 &fi->i_gc_rwsem[WRITE])) {
6f8d4455 915 sbi->skipped_gc_rwsem++;
b2532c69 916 up_write(&fi->i_gc_rwsem[READ]);
82e0a5aa
CY
917 continue;
918 }
919 locked = true;
73ac2f4e
CY
920
921 /* wait for all inflight aio data */
922 inode_dio_wait(inode);
82e0a5aa
CY
923 }
924
4d57b86d 925 start_bidx = f2fs_start_bidx_of_node(nofs, inode)
c879f90d 926 + ofs_in_node;
6dbb1796 927 if (f2fs_post_read_required(inode))
2ef79ecb
CY
928 move_data_block(inode, start_bidx, gc_type,
929 segno, off);
4375a336 930 else
d4c759ee
JK
931 move_data_page(inode, start_bidx, gc_type,
932 segno, off);
82e0a5aa
CY
933
934 if (locked) {
b2532c69
CY
935 up_write(&fi->i_gc_rwsem[WRITE]);
936 up_write(&fi->i_gc_rwsem[READ]);
82e0a5aa
CY
937 }
938
e1235983 939 stat_inc_data_blk_count(sbi, 1, gc_type);
7bc09003 940 }
7bc09003 941 }
c718379b 942
7ea984b0 943 if (++phase < 5)
7bc09003 944 goto next_step;
7bc09003
JK
945}
946
947static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
8a2d0ace 948 int gc_type)
7bc09003
JK
949{
950 struct sit_info *sit_i = SIT_I(sbi);
951 int ret;
8a2d0ace 952
3d26fa6b 953 down_write(&sit_i->sentry_lock);
8a2d0ace
GZ
954 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
955 NO_CHECK_TYPE, LFS);
3d26fa6b 956 up_write(&sit_i->sentry_lock);
7bc09003
JK
957 return ret;
958}
959
718e53fa
CY
960static int do_garbage_collect(struct f2fs_sb_info *sbi,
961 unsigned int start_segno,
7dda2af8 962 struct gc_inode_list *gc_list, int gc_type)
7bc09003
JK
963{
964 struct page *sum_page;
965 struct f2fs_summary_block *sum;
c718379b 966 struct blk_plug plug;
718e53fa
CY
967 unsigned int segno = start_segno;
968 unsigned int end_segno = start_segno + sbi->segs_per_sec;
c56f16da 969 int seg_freed = 0;
718e53fa
CY
970 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
971 SUM_TYPE_DATA : SUM_TYPE_NODE;
7bc09003 972
718e53fa
CY
973 /* readahead multi ssa blocks those have contiguous address */
974 if (sbi->segs_per_sec > 1)
4d57b86d 975 f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
718e53fa
CY
976 sbi->segs_per_sec, META_SSA, true);
977
978 /* reference all summary page */
979 while (segno < end_segno) {
4d57b86d 980 sum_page = f2fs_get_sum_page(sbi, segno++);
718e53fa
CY
981 unlock_page(sum_page);
982 }
7bc09003 983
c718379b
JK
984 blk_start_plug(&plug);
985
718e53fa 986 for (segno = start_segno; segno < end_segno; segno++) {
aa987273 987
718e53fa
CY
988 /* find segment summary of victim */
989 sum_page = find_get_page(META_MAPPING(sbi),
990 GET_SUM_BLOCK(sbi, segno));
718e53fa 991 f2fs_put_page(sum_page, 0);
7bc09003 992
302bd348 993 if (get_valid_blocks(sbi, segno, false) == 0 ||
de0dcc40
JK
994 !PageUptodate(sum_page) ||
995 unlikely(f2fs_cp_error(sbi)))
996 goto next;
997
718e53fa 998 sum = page_address(sum_page);
10d255c3
CY
999 if (type != GET_SUM_TYPE((&sum->footer))) {
1000 f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent segment (%u) "
1001 "type [%d, %d] in SSA and SIT",
1002 segno, type, GET_SUM_TYPE((&sum->footer)));
1003 set_sbi_flag(sbi, SBI_NEED_FSCK);
1004 goto next;
1005 }
718e53fa
CY
1006
1007 /*
1008 * this is to avoid deadlock:
1009 * - lock_page(sum_page) - f2fs_replace_block
3d26fa6b
CY
1010 * - check_valid_map() - down_write(sentry_lock)
1011 * - down_read(sentry_lock) - change_curseg()
718e53fa
CY
1012 * - lock_page(sum_page)
1013 */
718e53fa
CY
1014 if (type == SUM_TYPE_NODE)
1015 gc_node_segment(sbi, sum->entries, segno, gc_type);
1016 else
1017 gc_data_segment(sbi, sum->entries, gc_list, segno,
1018 gc_type);
1019
1020 stat_inc_seg_count(sbi, type, gc_type);
c56f16da
CY
1021
1022 if (gc_type == FG_GC &&
1023 get_valid_blocks(sbi, segno, false) == 0)
1024 seg_freed++;
f6fe2be3 1025next:
718e53fa
CY
1026 f2fs_put_page(sum_page, 0);
1027 }
1028
da011cc0 1029 if (gc_type == FG_GC)
b9109b0e
JK
1030 f2fs_submit_merged_write(sbi,
1031 (type == SUM_TYPE_NODE) ? NODE : DATA);
c718379b 1032
718e53fa 1033 blk_finish_plug(&plug);
7bc09003 1034
17d899df
CY
1035 stat_inc_call_count(sbi->stat_info);
1036
c56f16da 1037 return seg_freed;
7bc09003
JK
1038}
1039
e066b83c
JK
1040int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
1041 bool background, unsigned int segno)
7bc09003 1042{
d530d4d8 1043 int gc_type = sync ? FG_GC : BG_GC;
c56f16da
CY
1044 int sec_freed = 0, seg_freed = 0, total_freed = 0;
1045 int ret = 0;
d5053a34 1046 struct cp_control cpc;
e066b83c 1047 unsigned int init_segno = segno;
7dda2af8
CL
1048 struct gc_inode_list gc_list = {
1049 .ilist = LIST_HEAD_INIT(gc_list.ilist),
f6bb2a2c 1050 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
7dda2af8 1051 };
2ef79ecb 1052 unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC];
6f8d4455 1053 unsigned long long first_skipped;
2ef79ecb 1054 unsigned int skipped_round = 0, round = 0;
d5053a34 1055
c56f16da
CY
1056 trace_f2fs_gc_begin(sbi->sb, sync, background,
1057 get_pages(sbi, F2FS_DIRTY_NODES),
1058 get_pages(sbi, F2FS_DIRTY_DENTS),
1059 get_pages(sbi, F2FS_DIRTY_IMETA),
1060 free_sections(sbi),
1061 free_segments(sbi),
1062 reserved_segments(sbi),
1063 prefree_segments(sbi));
1064
119ee914 1065 cpc.reason = __get_cp_reason(sbi);
6f8d4455
JK
1066 sbi->skipped_gc_rwsem = 0;
1067 first_skipped = last_skipped;
7bc09003 1068gc_more:
1751e8a6 1069 if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
e5dbd956 1070 ret = -EINVAL;
408e9375 1071 goto stop;
e5dbd956 1072 }
6d5a1495
CY
1073 if (unlikely(f2fs_cp_error(sbi))) {
1074 ret = -EIO;
203681f6 1075 goto stop;
6d5a1495 1076 }
7bc09003 1077
19f4e688 1078 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) {
6e17bfbc 1079 /*
19f4e688
HP
1080 * For example, if there are many prefree_segments below given
1081 * threshold, we can make them free by checkpoint. Then, we
1082 * secure free segments which doesn't need fggc any more.
6e17bfbc 1083 */
8fd5a37e 1084 if (prefree_segments(sbi)) {
4d57b86d 1085 ret = f2fs_write_checkpoint(sbi, &cpc);
8fd5a37e
JK
1086 if (ret)
1087 goto stop;
1088 }
19f4e688
HP
1089 if (has_not_enough_free_secs(sbi, 0, 0))
1090 gc_type = FG_GC;
d64f8047 1091 }
7bc09003 1092
19f4e688 1093 /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
c56f16da
CY
1094 if (gc_type == BG_GC && !background) {
1095 ret = -EINVAL;
19f4e688 1096 goto stop;
c56f16da
CY
1097 }
1098 if (!__get_victim(sbi, &segno, gc_type)) {
1099 ret = -ENODATA;
408e9375 1100 goto stop;
c56f16da 1101 }
7bc09003 1102
c56f16da
CY
1103 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type);
1104 if (gc_type == FG_GC && seg_freed == sbi->segs_per_sec)
45fe8492 1105 sec_freed++;
c56f16da 1106 total_freed += seg_freed;
43727527 1107
2ef79ecb 1108 if (gc_type == FG_GC) {
6f8d4455
JK
1109 if (sbi->skipped_atomic_files[FG_GC] > last_skipped ||
1110 sbi->skipped_gc_rwsem)
2ef79ecb
CY
1111 skipped_round++;
1112 last_skipped = sbi->skipped_atomic_files[FG_GC];
1113 round++;
1114 }
1115
5ee5293c 1116 if (gc_type == FG_GC)
5ec4e49f 1117 sbi->cur_victim_sec = NULL_SEGNO;
43727527 1118
6f8d4455
JK
1119 if (sync)
1120 goto stop;
1121
1122 if (has_not_enough_free_secs(sbi, sec_freed, 0)) {
1123 if (skipped_round <= MAX_SKIP_GC_COUNT ||
1124 skipped_round * 2 < round) {
e066b83c 1125 segno = NULL_SEGNO;
d530d4d8 1126 goto gc_more;
e066b83c 1127 }
43727527 1128
6f8d4455
JK
1129 if (first_skipped < last_skipped &&
1130 (last_skipped - first_skipped) >
1131 sbi->skipped_gc_rwsem) {
1132 f2fs_drop_inmem_pages_all(sbi, true);
1133 segno = NULL_SEGNO;
1134 goto gc_more;
1135 }
d530d4d8 1136 if (gc_type == FG_GC)
4d57b86d 1137 ret = f2fs_write_checkpoint(sbi, &cpc);
d530d4d8 1138 }
408e9375 1139stop:
e066b83c
JK
1140 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
1141 SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno;
c56f16da
CY
1142
1143 trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed,
1144 get_pages(sbi, F2FS_DIRTY_NODES),
1145 get_pages(sbi, F2FS_DIRTY_DENTS),
1146 get_pages(sbi, F2FS_DIRTY_IMETA),
1147 free_sections(sbi),
1148 free_segments(sbi),
1149 reserved_segments(sbi),
1150 prefree_segments(sbi));
1151
7bc09003
JK
1152 mutex_unlock(&sbi->gc_mutex);
1153
7dda2af8 1154 put_gc_inode(&gc_list);
d530d4d8
CY
1155
1156 if (sync)
1157 ret = sec_freed ? 0 : -EAGAIN;
43727527 1158 return ret;
7bc09003
JK
1159}
1160
4d57b86d 1161void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
7bc09003
JK
1162{
1163 DIRTY_I(sbi)->v_ops = &default_v_ops;
e93b9865 1164
1ad71a27 1165 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
d5793249
JK
1166
1167 /* give warm/cold data area from slower device */
1168 if (sbi->s_ndevs && sbi->segs_per_sec == 1)
1169 SIT_I(sbi)->last_victim[ALLOC_NEXT] =
1170 GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
7bc09003 1171}
This page took 0.482946 seconds and 4 git commands to generate.