]>
Commit | Line | Data |
---|---|---|
7c1a000d | 1 | // SPDX-License-Identifier: GPL-2.0 |
0a8165d7 | 2 | /* |
7bc09003 JK |
3 | * fs/f2fs/gc.c |
4 | * | |
5 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. | |
6 | * http://www.samsung.com/ | |
7bc09003 JK |
7 | */ |
8 | #include <linux/fs.h> | |
9 | #include <linux/module.h> | |
10 | #include <linux/backing-dev.h> | |
7bc09003 JK |
11 | #include <linux/init.h> |
12 | #include <linux/f2fs_fs.h> | |
13 | #include <linux/kthread.h> | |
14 | #include <linux/delay.h> | |
15 | #include <linux/freezer.h> | |
b4b10061 | 16 | #include <linux/sched/signal.h> |
7bc09003 JK |
17 | |
18 | #include "f2fs.h" | |
19 | #include "node.h" | |
20 | #include "segment.h" | |
21 | #include "gc.h" | |
8e46b3ed | 22 | #include <trace/events/f2fs.h> |
7bc09003 | 23 | |
093749e2 CY |
24 | static struct kmem_cache *victim_entry_slab; |
25 | ||
da52f8ad JQ |
26 | static unsigned int count_bits(const unsigned long *addr, |
27 | unsigned int offset, unsigned int len); | |
28 | ||
7bc09003 JK |
29 | static int gc_thread_func(void *data) |
30 | { | |
31 | struct f2fs_sb_info *sbi = data; | |
b59d0bae | 32 | struct f2fs_gc_kthread *gc_th = sbi->gc_thread; |
7bc09003 | 33 | wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head; |
b8c502b8 | 34 | unsigned int wait_ms; |
7bc09003 | 35 | |
b59d0bae | 36 | wait_ms = gc_th->min_sleep_time; |
7bc09003 | 37 | |
1d7be270 | 38 | set_freezable(); |
7bc09003 | 39 | do { |
bbbc34fd CY |
40 | bool sync_mode; |
41 | ||
1d7be270 | 42 | wait_event_interruptible_timeout(*wq, |
d9872a69 JK |
43 | kthread_should_stop() || freezing(current) || |
44 | gc_th->gc_wake, | |
1d7be270 JK |
45 | msecs_to_jiffies(wait_ms)); |
46 | ||
d9872a69 JK |
47 | /* give it a try one time */ |
48 | if (gc_th->gc_wake) | |
49 | gc_th->gc_wake = 0; | |
50 | ||
274bd9ba CY |
51 | if (try_to_freeze()) { |
52 | stat_other_skip_bggc_count(sbi); | |
7bc09003 | 53 | continue; |
274bd9ba | 54 | } |
7bc09003 JK |
55 | if (kthread_should_stop()) |
56 | break; | |
57 | ||
d6212a5f | 58 | if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) { |
88dd8934 | 59 | increase_sleep_time(gc_th, &wait_ms); |
274bd9ba | 60 | stat_other_skip_bggc_count(sbi); |
d6212a5f CL |
61 | continue; |
62 | } | |
63 | ||
55523519 | 64 | if (time_to_inject(sbi, FAULT_CHECKPOINT)) { |
c45d6002 | 65 | f2fs_show_injection_info(sbi, FAULT_CHECKPOINT); |
0f348028 | 66 | f2fs_stop_checkpoint(sbi, false); |
55523519 | 67 | } |
0f348028 | 68 | |
274bd9ba CY |
69 | if (!sb_start_write_trylock(sbi->sb)) { |
70 | stat_other_skip_bggc_count(sbi); | |
dc6febb6 | 71 | continue; |
274bd9ba | 72 | } |
dc6febb6 | 73 | |
7bc09003 JK |
74 | /* |
75 | * [GC triggering condition] | |
76 | * 0. GC is not conducted currently. | |
77 | * 1. There are enough dirty segments. | |
78 | * 2. IO subsystem is idle by checking the # of writeback pages. | |
79 | * 3. IO subsystem is idle by checking the # of requests in | |
80 | * bdev's request list. | |
81 | * | |
e1c42045 | 82 | * Note) We have to avoid triggering GCs frequently. |
7bc09003 JK |
83 | * Because it is possible that some segments can be |
84 | * invalidated soon after by user update or deletion. | |
85 | * So, I'd like to wait some time to collect dirty segments. | |
86 | */ | |
0e5e8111 | 87 | if (sbi->gc_mode == GC_URGENT_HIGH) { |
d9872a69 | 88 | wait_ms = gc_th->urgent_sleep_time; |
fb24fea7 | 89 | down_write(&sbi->gc_lock); |
d9872a69 JK |
90 | goto do_gc; |
91 | } | |
92 | ||
fb24fea7 | 93 | if (!down_write_trylock(&sbi->gc_lock)) { |
274bd9ba | 94 | stat_other_skip_bggc_count(sbi); |
69babac0 | 95 | goto next; |
274bd9ba | 96 | } |
69babac0 | 97 | |
a7d10cf3 | 98 | if (!is_idle(sbi, GC_TIME)) { |
88dd8934 | 99 | increase_sleep_time(gc_th, &wait_ms); |
fb24fea7 | 100 | up_write(&sbi->gc_lock); |
274bd9ba | 101 | stat_io_skip_bggc_count(sbi); |
dc6febb6 | 102 | goto next; |
7bc09003 JK |
103 | } |
104 | ||
105 | if (has_enough_invalid_blocks(sbi)) | |
88dd8934 | 106 | decrease_sleep_time(gc_th, &wait_ms); |
7bc09003 | 107 | else |
88dd8934 | 108 | increase_sleep_time(gc_th, &wait_ms); |
d9872a69 | 109 | do_gc: |
fc7100ea | 110 | stat_inc_bggc_count(sbi->stat_info); |
7bc09003 | 111 | |
bbbc34fd CY |
112 | sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC; |
113 | ||
43727527 | 114 | /* if return value is not zero, no victim was selected */ |
bbbc34fd | 115 | if (f2fs_gc(sbi, sync_mode, true, NULL_SEGNO)) |
b59d0bae | 116 | wait_ms = gc_th->no_gc_sleep_time; |
81eb8d6e | 117 | |
84e4214f JK |
118 | trace_f2fs_background_gc(sbi->sb, wait_ms, |
119 | prefree_segments(sbi), free_segments(sbi)); | |
120 | ||
4660f9c0 | 121 | /* balancing f2fs's metadata periodically */ |
7bcd0cfa | 122 | f2fs_balance_fs_bg(sbi, true); |
dc6febb6 CY |
123 | next: |
124 | sb_end_write(sbi->sb); | |
81eb8d6e | 125 | |
7bc09003 JK |
126 | } while (!kthread_should_stop()); |
127 | return 0; | |
128 | } | |
129 | ||
4d57b86d | 130 | int f2fs_start_gc_thread(struct f2fs_sb_info *sbi) |
7bc09003 | 131 | { |
1042d60f | 132 | struct f2fs_gc_kthread *gc_th; |
ec7b1f2d | 133 | dev_t dev = sbi->sb->s_bdev->bd_dev; |
7a267f8d | 134 | int err = 0; |
7bc09003 | 135 | |
1ecc0c5c | 136 | gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL); |
7a267f8d NJ |
137 | if (!gc_th) { |
138 | err = -ENOMEM; | |
139 | goto out; | |
140 | } | |
7bc09003 | 141 | |
d9872a69 | 142 | gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME; |
b59d0bae NJ |
143 | gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME; |
144 | gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME; | |
145 | gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME; | |
146 | ||
d9872a69 | 147 | gc_th->gc_wake= 0; |
d2dc095f | 148 | |
7bc09003 JK |
149 | sbi->gc_thread = gc_th; |
150 | init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head); | |
151 | sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi, | |
ec7b1f2d | 152 | "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev)); |
7bc09003 | 153 | if (IS_ERR(gc_th->f2fs_gc_task)) { |
7a267f8d | 154 | err = PTR_ERR(gc_th->f2fs_gc_task); |
c8eb7024 | 155 | kfree(gc_th); |
25718423 | 156 | sbi->gc_thread = NULL; |
7bc09003 | 157 | } |
7a267f8d NJ |
158 | out: |
159 | return err; | |
7bc09003 JK |
160 | } |
161 | ||
4d57b86d | 162 | void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi) |
7bc09003 JK |
163 | { |
164 | struct f2fs_gc_kthread *gc_th = sbi->gc_thread; | |
165 | if (!gc_th) | |
166 | return; | |
167 | kthread_stop(gc_th->f2fs_gc_task); | |
c8eb7024 | 168 | kfree(gc_th); |
7bc09003 JK |
169 | sbi->gc_thread = NULL; |
170 | } | |
171 | ||
5b0e9539 | 172 | static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type) |
7bc09003 | 173 | { |
093749e2 CY |
174 | int gc_mode; |
175 | ||
176 | if (gc_type == BG_GC) { | |
177 | if (sbi->am.atgc_enabled) | |
178 | gc_mode = GC_AT; | |
179 | else | |
180 | gc_mode = GC_CB; | |
181 | } else { | |
182 | gc_mode = GC_GREEDY; | |
183 | } | |
d2dc095f | 184 | |
5b0e9539 JK |
185 | switch (sbi->gc_mode) { |
186 | case GC_IDLE_CB: | |
187 | gc_mode = GC_CB; | |
188 | break; | |
189 | case GC_IDLE_GREEDY: | |
0e5e8111 | 190 | case GC_URGENT_HIGH: |
b27bc809 | 191 | gc_mode = GC_GREEDY; |
5b0e9539 | 192 | break; |
093749e2 CY |
193 | case GC_IDLE_AT: |
194 | gc_mode = GC_AT; | |
195 | break; | |
5b0e9539 | 196 | } |
093749e2 | 197 | |
d2dc095f | 198 | return gc_mode; |
7bc09003 JK |
199 | } |
200 | ||
201 | static void select_policy(struct f2fs_sb_info *sbi, int gc_type, | |
202 | int type, struct victim_sel_policy *p) | |
203 | { | |
204 | struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); | |
205 | ||
4ebefc44 | 206 | if (p->alloc_mode == SSR) { |
7bc09003 | 207 | p->gc_mode = GC_GREEDY; |
da52f8ad | 208 | p->dirty_bitmap = dirty_i->dirty_segmap[type]; |
a26b7c8a | 209 | p->max_search = dirty_i->nr_dirty[type]; |
7bc09003 | 210 | p->ofs_unit = 1; |
093749e2 CY |
211 | } else if (p->alloc_mode == AT_SSR) { |
212 | p->gc_mode = GC_GREEDY; | |
213 | p->dirty_bitmap = dirty_i->dirty_segmap[type]; | |
214 | p->max_search = dirty_i->nr_dirty[type]; | |
215 | p->ofs_unit = 1; | |
7bc09003 | 216 | } else { |
5b0e9539 | 217 | p->gc_mode = select_gc_type(sbi, gc_type); |
7bc09003 | 218 | p->ofs_unit = sbi->segs_per_sec; |
da52f8ad JQ |
219 | if (__is_large_section(sbi)) { |
220 | p->dirty_bitmap = dirty_i->dirty_secmap; | |
221 | p->max_search = count_bits(p->dirty_bitmap, | |
222 | 0, MAIN_SECS(sbi)); | |
223 | } else { | |
224 | p->dirty_bitmap = dirty_i->dirty_segmap[DIRTY]; | |
225 | p->max_search = dirty_i->nr_dirty[DIRTY]; | |
226 | } | |
7bc09003 | 227 | } |
a26b7c8a | 228 | |
7a88ddb5 CY |
229 | /* |
230 | * adjust candidates range, should select all dirty segments for | |
231 | * foreground GC and urgent GC cases. | |
232 | */ | |
b27bc809 | 233 | if (gc_type != FG_GC && |
0e5e8111 | 234 | (sbi->gc_mode != GC_URGENT_HIGH) && |
093749e2 | 235 | (p->gc_mode != GC_AT && p->alloc_mode != AT_SSR) && |
b27bc809 | 236 | p->max_search > sbi->max_victim_search) |
b1c57c1c | 237 | p->max_search = sbi->max_victim_search; |
a26b7c8a | 238 | |
b94929d9 YS |
239 | /* let's select beginning hot/small space first in no_heap mode*/ |
240 | if (test_opt(sbi, NOHEAP) && | |
241 | (type == CURSEG_HOT_DATA || IS_NODESEG(type))) | |
7a20b8a6 JK |
242 | p->offset = 0; |
243 | else | |
e066b83c | 244 | p->offset = SIT_I(sbi)->last_victim[p->gc_mode]; |
7bc09003 JK |
245 | } |
246 | ||
247 | static unsigned int get_max_cost(struct f2fs_sb_info *sbi, | |
248 | struct victim_sel_policy *p) | |
249 | { | |
b7250d2d JK |
250 | /* SSR allocates in a segment unit */ |
251 | if (p->alloc_mode == SSR) | |
3519e3f9 | 252 | return sbi->blocks_per_seg; |
093749e2 CY |
253 | else if (p->alloc_mode == AT_SSR) |
254 | return UINT_MAX; | |
255 | ||
256 | /* LFS */ | |
7bc09003 | 257 | if (p->gc_mode == GC_GREEDY) |
c541a51b | 258 | return 2 * sbi->blocks_per_seg * p->ofs_unit; |
7bc09003 JK |
259 | else if (p->gc_mode == GC_CB) |
260 | return UINT_MAX; | |
093749e2 CY |
261 | else if (p->gc_mode == GC_AT) |
262 | return UINT_MAX; | |
7bc09003 JK |
263 | else /* No other gc_mode */ |
264 | return 0; | |
265 | } | |
266 | ||
267 | static unsigned int check_bg_victims(struct f2fs_sb_info *sbi) | |
268 | { | |
269 | struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); | |
5ec4e49f | 270 | unsigned int secno; |
7bc09003 JK |
271 | |
272 | /* | |
273 | * If the gc_type is FG_GC, we can select victim segments | |
274 | * selected by background GC before. | |
275 | * Those segments guarantee they have small valid blocks. | |
276 | */ | |
7cd8558b | 277 | for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) { |
5ec4e49f | 278 | if (sec_usage_check(sbi, secno)) |
b65ee148 | 279 | continue; |
5ec4e49f | 280 | clear_bit(secno, dirty_i->victim_secmap); |
4ddb1a4d | 281 | return GET_SEG_FROM_SEC(sbi, secno); |
7bc09003 JK |
282 | } |
283 | return NULL_SEGNO; | |
284 | } | |
285 | ||
286 | static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno) | |
287 | { | |
288 | struct sit_info *sit_i = SIT_I(sbi); | |
4ddb1a4d JK |
289 | unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); |
290 | unsigned int start = GET_SEG_FROM_SEC(sbi, secno); | |
7bc09003 JK |
291 | unsigned long long mtime = 0; |
292 | unsigned int vblocks; | |
293 | unsigned char age = 0; | |
294 | unsigned char u; | |
295 | unsigned int i; | |
de881df9 | 296 | unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi, segno); |
7bc09003 | 297 | |
de881df9 | 298 | for (i = 0; i < usable_segs_per_sec; i++) |
7bc09003 | 299 | mtime += get_seg_entry(sbi, start + i)->mtime; |
302bd348 | 300 | vblocks = get_valid_blocks(sbi, segno, true); |
7bc09003 | 301 | |
de881df9 AR |
302 | mtime = div_u64(mtime, usable_segs_per_sec); |
303 | vblocks = div_u64(vblocks, usable_segs_per_sec); | |
7bc09003 JK |
304 | |
305 | u = (vblocks * 100) >> sbi->log_blocks_per_seg; | |
306 | ||
e1c42045 | 307 | /* Handle if the system time has changed by the user */ |
7bc09003 JK |
308 | if (mtime < sit_i->min_mtime) |
309 | sit_i->min_mtime = mtime; | |
310 | if (mtime > sit_i->max_mtime) | |
311 | sit_i->max_mtime = mtime; | |
312 | if (sit_i->max_mtime != sit_i->min_mtime) | |
313 | age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime), | |
314 | sit_i->max_mtime - sit_i->min_mtime); | |
315 | ||
316 | return UINT_MAX - ((100 * (100 - u) * age) / (100 + u)); | |
317 | } | |
318 | ||
a57e564d JX |
319 | static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi, |
320 | unsigned int segno, struct victim_sel_policy *p) | |
7bc09003 JK |
321 | { |
322 | if (p->alloc_mode == SSR) | |
2afce76a | 323 | return get_seg_entry(sbi, segno)->ckpt_valid_blocks; |
7bc09003 JK |
324 | |
325 | /* alloc_mode == LFS */ | |
326 | if (p->gc_mode == GC_GREEDY) | |
91f4382b | 327 | return get_valid_blocks(sbi, segno, true); |
093749e2 | 328 | else if (p->gc_mode == GC_CB) |
7bc09003 | 329 | return get_cb_cost(sbi, segno); |
093749e2 CY |
330 | |
331 | f2fs_bug_on(sbi, 1); | |
332 | return 0; | |
7bc09003 JK |
333 | } |
334 | ||
688159b6 FL |
335 | static unsigned int count_bits(const unsigned long *addr, |
336 | unsigned int offset, unsigned int len) | |
337 | { | |
338 | unsigned int end = offset + len, sum = 0; | |
339 | ||
340 | while (offset < end) { | |
341 | if (test_bit(offset++, addr)) | |
342 | ++sum; | |
343 | } | |
344 | return sum; | |
345 | } | |
346 | ||
093749e2 CY |
347 | static struct victim_entry *attach_victim_entry(struct f2fs_sb_info *sbi, |
348 | unsigned long long mtime, unsigned int segno, | |
349 | struct rb_node *parent, struct rb_node **p, | |
350 | bool left_most) | |
351 | { | |
352 | struct atgc_management *am = &sbi->am; | |
353 | struct victim_entry *ve; | |
354 | ||
355 | ve = f2fs_kmem_cache_alloc(victim_entry_slab, GFP_NOFS); | |
356 | ||
357 | ve->mtime = mtime; | |
358 | ve->segno = segno; | |
359 | ||
360 | rb_link_node(&ve->rb_node, parent, p); | |
361 | rb_insert_color_cached(&ve->rb_node, &am->root, left_most); | |
362 | ||
363 | list_add_tail(&ve->list, &am->victim_list); | |
364 | ||
365 | am->victim_count++; | |
366 | ||
367 | return ve; | |
368 | } | |
369 | ||
370 | static void insert_victim_entry(struct f2fs_sb_info *sbi, | |
371 | unsigned long long mtime, unsigned int segno) | |
372 | { | |
373 | struct atgc_management *am = &sbi->am; | |
374 | struct rb_node **p; | |
375 | struct rb_node *parent = NULL; | |
376 | bool left_most = true; | |
377 | ||
378 | p = f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, mtime, &left_most); | |
379 | attach_victim_entry(sbi, mtime, segno, parent, p, left_most); | |
380 | } | |
381 | ||
382 | static void add_victim_entry(struct f2fs_sb_info *sbi, | |
383 | struct victim_sel_policy *p, unsigned int segno) | |
384 | { | |
385 | struct sit_info *sit_i = SIT_I(sbi); | |
386 | unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); | |
387 | unsigned int start = GET_SEG_FROM_SEC(sbi, secno); | |
388 | unsigned long long mtime = 0; | |
389 | unsigned int i; | |
390 | ||
391 | if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { | |
392 | if (p->gc_mode == GC_AT && | |
393 | get_valid_blocks(sbi, segno, true) == 0) | |
394 | return; | |
395 | ||
396 | if (p->alloc_mode == AT_SSR && | |
397 | get_seg_entry(sbi, segno)->ckpt_valid_blocks == 0) | |
398 | return; | |
399 | } | |
400 | ||
401 | for (i = 0; i < sbi->segs_per_sec; i++) | |
402 | mtime += get_seg_entry(sbi, start + i)->mtime; | |
403 | mtime = div_u64(mtime, sbi->segs_per_sec); | |
404 | ||
405 | /* Handle if the system time has changed by the user */ | |
406 | if (mtime < sit_i->min_mtime) | |
407 | sit_i->min_mtime = mtime; | |
408 | if (mtime > sit_i->max_mtime) | |
409 | sit_i->max_mtime = mtime; | |
410 | if (mtime < sit_i->dirty_min_mtime) | |
411 | sit_i->dirty_min_mtime = mtime; | |
412 | if (mtime > sit_i->dirty_max_mtime) | |
413 | sit_i->dirty_max_mtime = mtime; | |
414 | ||
415 | /* don't choose young section as candidate */ | |
416 | if (sit_i->dirty_max_mtime - mtime < p->age_threshold) | |
417 | return; | |
418 | ||
419 | insert_victim_entry(sbi, mtime, segno); | |
420 | } | |
421 | ||
422 | static struct rb_node *lookup_central_victim(struct f2fs_sb_info *sbi, | |
423 | struct victim_sel_policy *p) | |
424 | { | |
425 | struct atgc_management *am = &sbi->am; | |
426 | struct rb_node *parent = NULL; | |
427 | bool left_most; | |
428 | ||
429 | f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, p->age, &left_most); | |
430 | ||
431 | return parent; | |
432 | } | |
433 | ||
434 | static void atgc_lookup_victim(struct f2fs_sb_info *sbi, | |
435 | struct victim_sel_policy *p) | |
436 | { | |
437 | struct sit_info *sit_i = SIT_I(sbi); | |
438 | struct atgc_management *am = &sbi->am; | |
439 | struct rb_root_cached *root = &am->root; | |
440 | struct rb_node *node; | |
441 | struct rb_entry *re; | |
442 | struct victim_entry *ve; | |
443 | unsigned long long total_time; | |
444 | unsigned long long age, u, accu; | |
445 | unsigned long long max_mtime = sit_i->dirty_max_mtime; | |
446 | unsigned long long min_mtime = sit_i->dirty_min_mtime; | |
447 | unsigned int sec_blocks = BLKS_PER_SEC(sbi); | |
448 | unsigned int vblocks; | |
449 | unsigned int dirty_threshold = max(am->max_candidate_count, | |
450 | am->candidate_ratio * | |
451 | am->victim_count / 100); | |
452 | unsigned int age_weight = am->age_weight; | |
453 | unsigned int cost; | |
454 | unsigned int iter = 0; | |
455 | ||
456 | if (max_mtime < min_mtime) | |
457 | return; | |
458 | ||
459 | max_mtime += 1; | |
460 | total_time = max_mtime - min_mtime; | |
461 | ||
462 | accu = div64_u64(ULLONG_MAX, total_time); | |
463 | accu = min_t(unsigned long long, div_u64(accu, 100), | |
464 | DEFAULT_ACCURACY_CLASS); | |
465 | ||
466 | node = rb_first_cached(root); | |
467 | next: | |
468 | re = rb_entry_safe(node, struct rb_entry, rb_node); | |
469 | if (!re) | |
470 | return; | |
471 | ||
472 | ve = (struct victim_entry *)re; | |
473 | ||
474 | if (ve->mtime >= max_mtime || ve->mtime < min_mtime) | |
475 | goto skip; | |
476 | ||
477 | /* age = 10000 * x% * 60 */ | |
478 | age = div64_u64(accu * (max_mtime - ve->mtime), total_time) * | |
479 | age_weight; | |
480 | ||
481 | vblocks = get_valid_blocks(sbi, ve->segno, true); | |
482 | f2fs_bug_on(sbi, !vblocks || vblocks == sec_blocks); | |
483 | ||
484 | /* u = 10000 * x% * 40 */ | |
485 | u = div64_u64(accu * (sec_blocks - vblocks), sec_blocks) * | |
486 | (100 - age_weight); | |
487 | ||
488 | f2fs_bug_on(sbi, age + u >= UINT_MAX); | |
489 | ||
490 | cost = UINT_MAX - (age + u); | |
491 | iter++; | |
492 | ||
493 | if (cost < p->min_cost || | |
494 | (cost == p->min_cost && age > p->oldest_age)) { | |
495 | p->min_cost = cost; | |
496 | p->oldest_age = age; | |
497 | p->min_segno = ve->segno; | |
498 | } | |
499 | skip: | |
500 | if (iter < dirty_threshold) { | |
501 | node = rb_next(node); | |
502 | goto next; | |
503 | } | |
504 | } | |
505 | ||
506 | /* | |
507 | * select candidates around source section in range of | |
508 | * [target - dirty_threshold, target + dirty_threshold] | |
509 | */ | |
510 | static void atssr_lookup_victim(struct f2fs_sb_info *sbi, | |
511 | struct victim_sel_policy *p) | |
512 | { | |
513 | struct sit_info *sit_i = SIT_I(sbi); | |
514 | struct atgc_management *am = &sbi->am; | |
515 | struct rb_node *node; | |
516 | struct rb_entry *re; | |
517 | struct victim_entry *ve; | |
518 | unsigned long long age; | |
519 | unsigned long long max_mtime = sit_i->dirty_max_mtime; | |
520 | unsigned long long min_mtime = sit_i->dirty_min_mtime; | |
521 | unsigned int seg_blocks = sbi->blocks_per_seg; | |
522 | unsigned int vblocks; | |
523 | unsigned int dirty_threshold = max(am->max_candidate_count, | |
524 | am->candidate_ratio * | |
525 | am->victim_count / 100); | |
526 | unsigned int cost; | |
527 | unsigned int iter = 0; | |
528 | int stage = 0; | |
529 | ||
530 | if (max_mtime < min_mtime) | |
531 | return; | |
532 | max_mtime += 1; | |
533 | next_stage: | |
534 | node = lookup_central_victim(sbi, p); | |
535 | next_node: | |
536 | re = rb_entry_safe(node, struct rb_entry, rb_node); | |
537 | if (!re) { | |
538 | if (stage == 0) | |
539 | goto skip_stage; | |
540 | return; | |
541 | } | |
542 | ||
543 | ve = (struct victim_entry *)re; | |
544 | ||
545 | if (ve->mtime >= max_mtime || ve->mtime < min_mtime) | |
546 | goto skip_node; | |
547 | ||
548 | age = max_mtime - ve->mtime; | |
549 | ||
550 | vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks; | |
551 | f2fs_bug_on(sbi, !vblocks); | |
552 | ||
553 | /* rare case */ | |
554 | if (vblocks == seg_blocks) | |
555 | goto skip_node; | |
556 | ||
557 | iter++; | |
558 | ||
559 | age = max_mtime - abs(p->age - age); | |
560 | cost = UINT_MAX - vblocks; | |
561 | ||
562 | if (cost < p->min_cost || | |
563 | (cost == p->min_cost && age > p->oldest_age)) { | |
564 | p->min_cost = cost; | |
565 | p->oldest_age = age; | |
566 | p->min_segno = ve->segno; | |
567 | } | |
568 | skip_node: | |
569 | if (iter < dirty_threshold) { | |
570 | if (stage == 0) | |
571 | node = rb_prev(node); | |
572 | else if (stage == 1) | |
573 | node = rb_next(node); | |
574 | goto next_node; | |
575 | } | |
576 | skip_stage: | |
577 | if (stage < 1) { | |
578 | stage++; | |
579 | iter = 0; | |
580 | goto next_stage; | |
581 | } | |
582 | } | |
583 | static void lookup_victim_by_age(struct f2fs_sb_info *sbi, | |
584 | struct victim_sel_policy *p) | |
585 | { | |
586 | f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi, | |
587 | &sbi->am.root, true)); | |
588 | ||
589 | if (p->gc_mode == GC_AT) | |
590 | atgc_lookup_victim(sbi, p); | |
591 | else if (p->alloc_mode == AT_SSR) | |
592 | atssr_lookup_victim(sbi, p); | |
593 | else | |
594 | f2fs_bug_on(sbi, 1); | |
595 | } | |
596 | ||
597 | static void release_victim_entry(struct f2fs_sb_info *sbi) | |
598 | { | |
599 | struct atgc_management *am = &sbi->am; | |
600 | struct victim_entry *ve, *tmp; | |
601 | ||
602 | list_for_each_entry_safe(ve, tmp, &am->victim_list, list) { | |
603 | list_del(&ve->list); | |
604 | kmem_cache_free(victim_entry_slab, ve); | |
605 | am->victim_count--; | |
606 | } | |
607 | ||
608 | am->root = RB_ROOT_CACHED; | |
609 | ||
610 | f2fs_bug_on(sbi, am->victim_count); | |
611 | f2fs_bug_on(sbi, !list_empty(&am->victim_list)); | |
612 | } | |
613 | ||
0a8165d7 | 614 | /* |
111d2495 | 615 | * This function is called from two paths. |
7bc09003 JK |
616 | * One is garbage collection and the other is SSR segment selection. |
617 | * When it is called during GC, it just gets a victim segment | |
618 | * and it does not remove it from dirty seglist. | |
619 | * When it is called from SSR segment selection, it finds a segment | |
620 | * which has minimum valid blocks and removes it from dirty seglist. | |
621 | */ | |
622 | static int get_victim_by_default(struct f2fs_sb_info *sbi, | |
093749e2 CY |
623 | unsigned int *result, int gc_type, int type, |
624 | char alloc_mode, unsigned long long age) | |
7bc09003 JK |
625 | { |
626 | struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); | |
e066b83c | 627 | struct sit_info *sm = SIT_I(sbi); |
7bc09003 | 628 | struct victim_sel_policy p; |
3fa56503 | 629 | unsigned int secno, last_victim; |
04f0b2ea | 630 | unsigned int last_segment; |
093749e2 CY |
631 | unsigned int nsearched; |
632 | bool is_atgc; | |
97767500 | 633 | int ret = 0; |
7bc09003 | 634 | |
210f41bc | 635 | mutex_lock(&dirty_i->seglist_lock); |
04f0b2ea | 636 | last_segment = MAIN_SECS(sbi) * sbi->segs_per_sec; |
210f41bc | 637 | |
7bc09003 | 638 | p.alloc_mode = alloc_mode; |
093749e2 CY |
639 | p.age = age; |
640 | p.age_threshold = sbi->am.age_threshold; | |
7bc09003 | 641 | |
093749e2 CY |
642 | retry: |
643 | select_policy(sbi, gc_type, type, &p); | |
7bc09003 | 644 | p.min_segno = NULL_SEGNO; |
093749e2 | 645 | p.oldest_age = 0; |
3fa56503 | 646 | p.min_cost = get_max_cost(sbi, &p); |
7bc09003 | 647 | |
093749e2 CY |
648 | is_atgc = (p.gc_mode == GC_AT || p.alloc_mode == AT_SSR); |
649 | nsearched = 0; | |
650 | ||
651 | if (is_atgc) | |
652 | SIT_I(sbi)->dirty_min_mtime = ULLONG_MAX; | |
653 | ||
e066b83c | 654 | if (*result != NULL_SEGNO) { |
97767500 QZ |
655 | if (!get_valid_blocks(sbi, *result, false)) { |
656 | ret = -ENODATA; | |
657 | goto out; | |
658 | } | |
659 | ||
660 | if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result))) | |
661 | ret = -EBUSY; | |
662 | else | |
e066b83c JK |
663 | p.min_segno = *result; |
664 | goto out; | |
665 | } | |
666 | ||
97767500 | 667 | ret = -ENODATA; |
3342bb30 CY |
668 | if (p.max_search == 0) |
669 | goto out; | |
670 | ||
e3080b01 CY |
671 | if (__is_large_section(sbi) && p.alloc_mode == LFS) { |
672 | if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) { | |
673 | p.min_segno = sbi->next_victim_seg[BG_GC]; | |
674 | *result = p.min_segno; | |
675 | sbi->next_victim_seg[BG_GC] = NULL_SEGNO; | |
676 | goto got_result; | |
677 | } | |
678 | if (gc_type == FG_GC && | |
679 | sbi->next_victim_seg[FG_GC] != NULL_SEGNO) { | |
680 | p.min_segno = sbi->next_victim_seg[FG_GC]; | |
681 | *result = p.min_segno; | |
682 | sbi->next_victim_seg[FG_GC] = NULL_SEGNO; | |
683 | goto got_result; | |
684 | } | |
685 | } | |
686 | ||
e066b83c | 687 | last_victim = sm->last_victim[p.gc_mode]; |
7bc09003 JK |
688 | if (p.alloc_mode == LFS && gc_type == FG_GC) { |
689 | p.min_segno = check_bg_victims(sbi); | |
690 | if (p.min_segno != NULL_SEGNO) | |
691 | goto got_it; | |
692 | } | |
693 | ||
694 | while (1) { | |
da52f8ad JQ |
695 | unsigned long cost, *dirty_bitmap; |
696 | unsigned int unit_no, segno; | |
697 | ||
698 | dirty_bitmap = p.dirty_bitmap; | |
699 | unit_no = find_next_bit(dirty_bitmap, | |
700 | last_segment / p.ofs_unit, | |
701 | p.offset / p.ofs_unit); | |
702 | segno = unit_no * p.ofs_unit; | |
a43f7ec3 | 703 | if (segno >= last_segment) { |
e066b83c JK |
704 | if (sm->last_victim[p.gc_mode]) { |
705 | last_segment = | |
706 | sm->last_victim[p.gc_mode]; | |
707 | sm->last_victim[p.gc_mode] = 0; | |
7bc09003 JK |
708 | p.offset = 0; |
709 | continue; | |
710 | } | |
711 | break; | |
712 | } | |
a57e564d JX |
713 | |
714 | p.offset = segno + p.ofs_unit; | |
da52f8ad | 715 | nsearched++; |
688159b6 | 716 | |
bbf9f7d9 ST |
717 | #ifdef CONFIG_F2FS_CHECK_FS |
718 | /* | |
719 | * skip selecting the invalid segno (that is failed due to block | |
720 | * validity check failure during GC) to avoid endless GC loop in | |
721 | * such cases. | |
722 | */ | |
723 | if (test_bit(segno, sm->invalid_segmap)) | |
724 | goto next; | |
725 | #endif | |
726 | ||
4ddb1a4d | 727 | secno = GET_SEC_FROM_SEG(sbi, segno); |
7bc09003 | 728 | |
5ec4e49f | 729 | if (sec_usage_check(sbi, secno)) |
688159b6 | 730 | goto next; |
4354994f DR |
731 | /* Don't touch checkpointed data */ |
732 | if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) && | |
49dd883c | 733 | get_ckpt_valid_blocks(sbi, segno) && |
093749e2 | 734 | p.alloc_mode == LFS)) |
4354994f | 735 | goto next; |
5ec4e49f | 736 | if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap)) |
688159b6 | 737 | goto next; |
7bc09003 | 738 | |
093749e2 CY |
739 | if (is_atgc) { |
740 | add_victim_entry(sbi, &p, segno); | |
741 | goto next; | |
742 | } | |
743 | ||
7bc09003 JK |
744 | cost = get_gc_cost(sbi, segno, &p); |
745 | ||
746 | if (p.min_cost > cost) { | |
747 | p.min_segno = segno; | |
748 | p.min_cost = cost; | |
a57e564d | 749 | } |
688159b6 FL |
750 | next: |
751 | if (nsearched >= p.max_search) { | |
e066b83c | 752 | if (!sm->last_victim[p.gc_mode] && segno <= last_victim) |
da52f8ad JQ |
753 | sm->last_victim[p.gc_mode] = |
754 | last_victim + p.ofs_unit; | |
4ce53776 | 755 | else |
da52f8ad | 756 | sm->last_victim[p.gc_mode] = segno + p.ofs_unit; |
04f0b2ea QS |
757 | sm->last_victim[p.gc_mode] %= |
758 | (MAIN_SECS(sbi) * sbi->segs_per_sec); | |
7bc09003 JK |
759 | break; |
760 | } | |
761 | } | |
093749e2 CY |
762 | |
763 | /* get victim for GC_AT/AT_SSR */ | |
764 | if (is_atgc) { | |
765 | lookup_victim_by_age(sbi, &p); | |
766 | release_victim_entry(sbi); | |
767 | } | |
768 | ||
769 | if (is_atgc && p.min_segno == NULL_SEGNO && | |
770 | sm->elapsed_time < p.age_threshold) { | |
771 | p.age_threshold = 0; | |
772 | goto retry; | |
773 | } | |
774 | ||
7bc09003 | 775 | if (p.min_segno != NULL_SEGNO) { |
b2b3460a | 776 | got_it: |
e3080b01 CY |
777 | *result = (p.min_segno / p.ofs_unit) * p.ofs_unit; |
778 | got_result: | |
7bc09003 | 779 | if (p.alloc_mode == LFS) { |
4ddb1a4d | 780 | secno = GET_SEC_FROM_SEG(sbi, p.min_segno); |
5ec4e49f JK |
781 | if (gc_type == FG_GC) |
782 | sbi->cur_victim_sec = secno; | |
783 | else | |
784 | set_bit(secno, dirty_i->victim_secmap); | |
7bc09003 | 785 | } |
97767500 | 786 | ret = 0; |
8e46b3ed | 787 | |
e3c59108 ST |
788 | } |
789 | out: | |
790 | if (p.min_segno != NULL_SEGNO) | |
8e46b3ed NJ |
791 | trace_f2fs_get_victim(sbi->sb, type, gc_type, &p, |
792 | sbi->cur_victim_sec, | |
793 | prefree_segments(sbi), free_segments(sbi)); | |
7bc09003 JK |
794 | mutex_unlock(&dirty_i->seglist_lock); |
795 | ||
97767500 | 796 | return ret; |
7bc09003 JK |
797 | } |
798 | ||
799 | static const struct victim_selection default_v_ops = { | |
800 | .get_victim = get_victim_by_default, | |
801 | }; | |
802 | ||
7dda2af8 | 803 | static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino) |
7bc09003 | 804 | { |
7bc09003 JK |
805 | struct inode_entry *ie; |
806 | ||
7dda2af8 CL |
807 | ie = radix_tree_lookup(&gc_list->iroot, ino); |
808 | if (ie) | |
809 | return ie->inode; | |
7bc09003 JK |
810 | return NULL; |
811 | } | |
812 | ||
7dda2af8 | 813 | static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode) |
7bc09003 | 814 | { |
6cc4af56 GZ |
815 | struct inode_entry *new_ie; |
816 | ||
7dda2af8 | 817 | if (inode == find_gc_inode(gc_list, inode->i_ino)) { |
6cc4af56 GZ |
818 | iput(inode); |
819 | return; | |
7bc09003 | 820 | } |
4d57b86d | 821 | new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab, GFP_NOFS); |
7bc09003 | 822 | new_ie->inode = inode; |
f28e5034 CY |
823 | |
824 | f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie); | |
7dda2af8 | 825 | list_add_tail(&new_ie->list, &gc_list->ilist); |
7bc09003 JK |
826 | } |
827 | ||
7dda2af8 | 828 | static void put_gc_inode(struct gc_inode_list *gc_list) |
7bc09003 JK |
829 | { |
830 | struct inode_entry *ie, *next_ie; | |
7dda2af8 CL |
831 | list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) { |
832 | radix_tree_delete(&gc_list->iroot, ie->inode->i_ino); | |
7bc09003 JK |
833 | iput(ie->inode); |
834 | list_del(&ie->list); | |
4d57b86d | 835 | kmem_cache_free(f2fs_inode_entry_slab, ie); |
7bc09003 JK |
836 | } |
837 | } | |
838 | ||
839 | static int check_valid_map(struct f2fs_sb_info *sbi, | |
840 | unsigned int segno, int offset) | |
841 | { | |
842 | struct sit_info *sit_i = SIT_I(sbi); | |
843 | struct seg_entry *sentry; | |
844 | int ret; | |
845 | ||
3d26fa6b | 846 | down_read(&sit_i->sentry_lock); |
7bc09003 JK |
847 | sentry = get_seg_entry(sbi, segno); |
848 | ret = f2fs_test_bit(offset, sentry->cur_valid_map); | |
3d26fa6b | 849 | up_read(&sit_i->sentry_lock); |
43727527 | 850 | return ret; |
7bc09003 JK |
851 | } |
852 | ||
0a8165d7 | 853 | /* |
7bc09003 JK |
854 | * This function compares node address got in summary with that in NAT. |
855 | * On validity, copy that node with cold status, otherwise (invalid node) | |
856 | * ignore that. | |
857 | */ | |
48018b4c | 858 | static int gc_node_segment(struct f2fs_sb_info *sbi, |
7bc09003 JK |
859 | struct f2fs_summary *sum, unsigned int segno, int gc_type) |
860 | { | |
7bc09003 | 861 | struct f2fs_summary *entry; |
26d58599 | 862 | block_t start_addr; |
7bc09003 | 863 | int off; |
7ea984b0 | 864 | int phase = 0; |
c29fd0c0 | 865 | bool fggc = (gc_type == FG_GC); |
48018b4c | 866 | int submitted = 0; |
de881df9 | 867 | unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno); |
7bc09003 | 868 | |
26d58599 JK |
869 | start_addr = START_BLOCK(sbi, segno); |
870 | ||
7bc09003 JK |
871 | next_step: |
872 | entry = sum; | |
c718379b | 873 | |
c29fd0c0 CY |
874 | if (fggc && phase == 2) |
875 | atomic_inc(&sbi->wb_sync_req[NODE]); | |
876 | ||
de881df9 | 877 | for (off = 0; off < usable_blks_in_seg; off++, entry++) { |
7bc09003 JK |
878 | nid_t nid = le32_to_cpu(entry->nid); |
879 | struct page *node_page; | |
26d58599 | 880 | struct node_info ni; |
48018b4c | 881 | int err; |
7bc09003 | 882 | |
43727527 | 883 | /* stop BG_GC if there is not enough free sections. */ |
7f3037a5 | 884 | if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) |
48018b4c | 885 | return submitted; |
7bc09003 | 886 | |
43727527 | 887 | if (check_valid_map(sbi, segno, off) == 0) |
7bc09003 JK |
888 | continue; |
889 | ||
7ea984b0 | 890 | if (phase == 0) { |
4d57b86d | 891 | f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1, |
7ea984b0 CY |
892 | META_NAT, true); |
893 | continue; | |
894 | } | |
895 | ||
896 | if (phase == 1) { | |
4d57b86d | 897 | f2fs_ra_node_page(sbi, nid); |
7bc09003 JK |
898 | continue; |
899 | } | |
7ea984b0 CY |
900 | |
901 | /* phase == 2 */ | |
4d57b86d | 902 | node_page = f2fs_get_node_page(sbi, nid); |
7bc09003 JK |
903 | if (IS_ERR(node_page)) |
904 | continue; | |
905 | ||
4d57b86d | 906 | /* block may become invalid during f2fs_get_node_page */ |
9a01b56b YH |
907 | if (check_valid_map(sbi, segno, off) == 0) { |
908 | f2fs_put_page(node_page, 1); | |
909 | continue; | |
26d58599 JK |
910 | } |
911 | ||
7735730d CY |
912 | if (f2fs_get_node_info(sbi, nid, &ni)) { |
913 | f2fs_put_page(node_page, 1); | |
914 | continue; | |
915 | } | |
916 | ||
26d58599 JK |
917 | if (ni.blk_addr != start_addr + off) { |
918 | f2fs_put_page(node_page, 1); | |
919 | continue; | |
9a01b56b YH |
920 | } |
921 | ||
48018b4c CY |
922 | err = f2fs_move_node_page(node_page, gc_type); |
923 | if (!err && gc_type == FG_GC) | |
924 | submitted++; | |
e1235983 | 925 | stat_inc_node_blk_count(sbi, 1, gc_type); |
7bc09003 | 926 | } |
c718379b | 927 | |
7ea984b0 | 928 | if (++phase < 3) |
7bc09003 | 929 | goto next_step; |
c29fd0c0 CY |
930 | |
931 | if (fggc) | |
932 | atomic_dec(&sbi->wb_sync_req[NODE]); | |
48018b4c | 933 | return submitted; |
7bc09003 JK |
934 | } |
935 | ||
0a8165d7 | 936 | /* |
9af45ef5 JK |
937 | * Calculate start block index indicating the given node offset. |
938 | * Be careful, caller should give this node offset only indicating direct node | |
939 | * blocks. If any node offsets, which point the other types of node blocks such | |
940 | * as indirect or double indirect node blocks, are given, it must be a caller's | |
941 | * bug. | |
7bc09003 | 942 | */ |
4d57b86d | 943 | block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode) |
7bc09003 | 944 | { |
ce19a5d4 JK |
945 | unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4; |
946 | unsigned int bidx; | |
7bc09003 | 947 | |
ce19a5d4 JK |
948 | if (node_ofs == 0) |
949 | return 0; | |
7bc09003 | 950 | |
ce19a5d4 | 951 | if (node_ofs <= 2) { |
7bc09003 JK |
952 | bidx = node_ofs - 1; |
953 | } else if (node_ofs <= indirect_blks) { | |
ce19a5d4 | 954 | int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1); |
7bc09003 JK |
955 | bidx = node_ofs - 2 - dec; |
956 | } else { | |
ce19a5d4 | 957 | int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1); |
7bc09003 JK |
958 | bidx = node_ofs - 5 - dec; |
959 | } | |
d02a6e61 | 960 | return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode); |
7bc09003 JK |
961 | } |
962 | ||
c1079892 | 963 | static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, |
7bc09003 JK |
964 | struct node_info *dni, block_t blkaddr, unsigned int *nofs) |
965 | { | |
966 | struct page *node_page; | |
967 | nid_t nid; | |
968 | unsigned int ofs_in_node; | |
969 | block_t source_blkaddr; | |
970 | ||
971 | nid = le32_to_cpu(sum->nid); | |
972 | ofs_in_node = le16_to_cpu(sum->ofs_in_node); | |
973 | ||
4d57b86d | 974 | node_page = f2fs_get_node_page(sbi, nid); |
7bc09003 | 975 | if (IS_ERR(node_page)) |
c1079892 | 976 | return false; |
7bc09003 | 977 | |
7735730d CY |
978 | if (f2fs_get_node_info(sbi, nid, dni)) { |
979 | f2fs_put_page(node_page, 1); | |
980 | return false; | |
981 | } | |
7bc09003 JK |
982 | |
983 | if (sum->version != dni->version) { | |
dcbb4c10 JP |
984 | f2fs_warn(sbi, "%s: valid data with mismatched node version.", |
985 | __func__); | |
c13ff37e | 986 | set_sbi_flag(sbi, SBI_NEED_FSCK); |
7bc09003 JK |
987 | } |
988 | ||
989 | *nofs = ofs_of_node(node_page); | |
a2ced1ce | 990 | source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node); |
7bc09003 JK |
991 | f2fs_put_page(node_page, 1); |
992 | ||
bbf9f7d9 ST |
993 | if (source_blkaddr != blkaddr) { |
994 | #ifdef CONFIG_F2FS_CHECK_FS | |
995 | unsigned int segno = GET_SEGNO(sbi, blkaddr); | |
996 | unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); | |
997 | ||
998 | if (unlikely(check_valid_map(sbi, segno, offset))) { | |
999 | if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) { | |
1000 | f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u\n", | |
1001 | blkaddr, source_blkaddr, segno); | |
1002 | f2fs_bug_on(sbi, 1); | |
1003 | } | |
1004 | } | |
1005 | #endif | |
c1079892 | 1006 | return false; |
bbf9f7d9 | 1007 | } |
c1079892 | 1008 | return true; |
7bc09003 JK |
1009 | } |
1010 | ||
6aa58d8a CY |
1011 | static int ra_data_block(struct inode *inode, pgoff_t index) |
1012 | { | |
1013 | struct f2fs_sb_info *sbi = F2FS_I_SB(inode); | |
1014 | struct address_space *mapping = inode->i_mapping; | |
1015 | struct dnode_of_data dn; | |
1016 | struct page *page; | |
1017 | struct extent_info ei = {0, 0, 0}; | |
1018 | struct f2fs_io_info fio = { | |
1019 | .sbi = sbi, | |
1020 | .ino = inode->i_ino, | |
1021 | .type = DATA, | |
1022 | .temp = COLD, | |
1023 | .op = REQ_OP_READ, | |
1024 | .op_flags = 0, | |
1025 | .encrypted_page = NULL, | |
1026 | .in_list = false, | |
1027 | .retry = false, | |
1028 | }; | |
1029 | int err; | |
1030 | ||
1031 | page = f2fs_grab_cache_page(mapping, index, true); | |
1032 | if (!page) | |
1033 | return -ENOMEM; | |
1034 | ||
1035 | if (f2fs_lookup_extent_cache(inode, index, &ei)) { | |
1036 | dn.data_blkaddr = ei.blk + index - ei.fofs; | |
93770ab7 CY |
1037 | if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr, |
1038 | DATA_GENERIC_ENHANCE_READ))) { | |
10f966bb | 1039 | err = -EFSCORRUPTED; |
93770ab7 CY |
1040 | goto put_page; |
1041 | } | |
6aa58d8a CY |
1042 | goto got_it; |
1043 | } | |
1044 | ||
1045 | set_new_dnode(&dn, inode, NULL, NULL, 0); | |
1046 | err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE); | |
1047 | if (err) | |
1048 | goto put_page; | |
1049 | f2fs_put_dnode(&dn); | |
1050 | ||
93770ab7 CY |
1051 | if (!__is_valid_data_blkaddr(dn.data_blkaddr)) { |
1052 | err = -ENOENT; | |
1053 | goto put_page; | |
1054 | } | |
6aa58d8a | 1055 | if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr, |
93770ab7 | 1056 | DATA_GENERIC_ENHANCE))) { |
10f966bb | 1057 | err = -EFSCORRUPTED; |
6aa58d8a CY |
1058 | goto put_page; |
1059 | } | |
1060 | got_it: | |
1061 | /* read page */ | |
1062 | fio.page = page; | |
1063 | fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr; | |
1064 | ||
9bf1a3f7 YS |
1065 | /* |
1066 | * don't cache encrypted data into meta inode until previous dirty | |
1067 | * data were writebacked to avoid racing between GC and flush. | |
1068 | */ | |
bae0ee7a | 1069 | f2fs_wait_on_page_writeback(page, DATA, true, true); |
9bf1a3f7 YS |
1070 | |
1071 | f2fs_wait_on_block_writeback(inode, dn.data_blkaddr); | |
1072 | ||
6aa58d8a CY |
1073 | fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi), |
1074 | dn.data_blkaddr, | |
1075 | FGP_LOCK | FGP_CREAT, GFP_NOFS); | |
1076 | if (!fio.encrypted_page) { | |
1077 | err = -ENOMEM; | |
1078 | goto put_page; | |
1079 | } | |
1080 | ||
1081 | err = f2fs_submit_page_bio(&fio); | |
1082 | if (err) | |
1083 | goto put_encrypted_page; | |
1084 | f2fs_put_page(fio.encrypted_page, 0); | |
1085 | f2fs_put_page(page, 1); | |
8b83ac81 CY |
1086 | |
1087 | f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE); | |
9c122384 | 1088 | f2fs_update_iostat(sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE); |
8b83ac81 | 1089 | |
6aa58d8a CY |
1090 | return 0; |
1091 | put_encrypted_page: | |
1092 | f2fs_put_page(fio.encrypted_page, 1); | |
1093 | put_page: | |
1094 | f2fs_put_page(page, 1); | |
1095 | return err; | |
1096 | } | |
1097 | ||
d4c759ee JK |
1098 | /* |
1099 | * Move data block via META_MAPPING while keeping locked data page. | |
1100 | * This can be used to move blocks, aka LBAs, directly on disk. | |
1101 | */ | |
48018b4c | 1102 | static int move_data_block(struct inode *inode, block_t bidx, |
2ef79ecb | 1103 | int gc_type, unsigned int segno, int off) |
4375a336 JK |
1104 | { |
1105 | struct f2fs_io_info fio = { | |
1106 | .sbi = F2FS_I_SB(inode), | |
39d787be | 1107 | .ino = inode->i_ino, |
4375a336 | 1108 | .type = DATA, |
a912b54d | 1109 | .temp = COLD, |
04d328de | 1110 | .op = REQ_OP_READ, |
70fd7614 | 1111 | .op_flags = 0, |
4375a336 | 1112 | .encrypted_page = NULL, |
fb830fc5 | 1113 | .in_list = false, |
fe16efe6 | 1114 | .retry = false, |
4375a336 JK |
1115 | }; |
1116 | struct dnode_of_data dn; | |
1117 | struct f2fs_summary sum; | |
1118 | struct node_info ni; | |
6aa58d8a | 1119 | struct page *page, *mpage; |
4356e48e | 1120 | block_t newaddr; |
48018b4c | 1121 | int err = 0; |
b0332a0f | 1122 | bool lfs_mode = f2fs_lfs_mode(fio.sbi); |
093749e2 CY |
1123 | int type = fio.sbi->am.atgc_enabled ? |
1124 | CURSEG_ALL_DATA_ATGC : CURSEG_COLD_DATA; | |
4375a336 JK |
1125 | |
1126 | /* do not read out */ | |
a56c7c6f | 1127 | page = f2fs_grab_cache_page(inode->i_mapping, bidx, false); |
4375a336 | 1128 | if (!page) |
48018b4c | 1129 | return -ENOMEM; |
4375a336 | 1130 | |
48018b4c CY |
1131 | if (!check_valid_map(F2FS_I_SB(inode), segno, off)) { |
1132 | err = -ENOENT; | |
20614711 | 1133 | goto out; |
48018b4c | 1134 | } |
20614711 | 1135 | |
2ef79ecb CY |
1136 | if (f2fs_is_atomic_file(inode)) { |
1137 | F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++; | |
1138 | F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++; | |
48018b4c | 1139 | err = -EAGAIN; |
5fe45743 | 1140 | goto out; |
2ef79ecb | 1141 | } |
5fe45743 | 1142 | |
1ad71a27 JK |
1143 | if (f2fs_is_pinned_file(inode)) { |
1144 | f2fs_pin_file_control(inode, true); | |
48018b4c | 1145 | err = -EAGAIN; |
1ad71a27 JK |
1146 | goto out; |
1147 | } | |
1148 | ||
4375a336 | 1149 | set_new_dnode(&dn, inode, NULL, NULL, 0); |
4d57b86d | 1150 | err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE); |
4375a336 JK |
1151 | if (err) |
1152 | goto out; | |
1153 | ||
08b39fbd CY |
1154 | if (unlikely(dn.data_blkaddr == NULL_ADDR)) { |
1155 | ClearPageUptodate(page); | |
48018b4c | 1156 | err = -ENOENT; |
4375a336 | 1157 | goto put_out; |
08b39fbd CY |
1158 | } |
1159 | ||
1160 | /* | |
1161 | * don't cache encrypted data into meta inode until previous dirty | |
1162 | * data were writebacked to avoid racing between GC and flush. | |
1163 | */ | |
bae0ee7a | 1164 | f2fs_wait_on_page_writeback(page, DATA, true, true); |
4375a336 | 1165 | |
9bf1a3f7 YS |
1166 | f2fs_wait_on_block_writeback(inode, dn.data_blkaddr); |
1167 | ||
7735730d CY |
1168 | err = f2fs_get_node_info(fio.sbi, dn.nid, &ni); |
1169 | if (err) | |
1170 | goto put_out; | |
1171 | ||
4375a336 JK |
1172 | set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version); |
1173 | ||
1174 | /* read page */ | |
1175 | fio.page = page; | |
7a9d7548 | 1176 | fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr; |
4375a336 | 1177 | |
107a805d CY |
1178 | if (lfs_mode) |
1179 | down_write(&fio.sbi->io_order_lock); | |
1180 | ||
543b8c46 JK |
1181 | mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi), |
1182 | fio.old_blkaddr, false); | |
d7cd3702 CY |
1183 | if (!mpage) { |
1184 | err = -ENOMEM; | |
543b8c46 | 1185 | goto up_out; |
d7cd3702 | 1186 | } |
543b8c46 JK |
1187 | |
1188 | fio.encrypted_page = mpage; | |
1189 | ||
1190 | /* read source block in mpage */ | |
1191 | if (!PageUptodate(mpage)) { | |
1192 | err = f2fs_submit_page_bio(&fio); | |
1193 | if (err) { | |
1194 | f2fs_put_page(mpage, 1); | |
1195 | goto up_out; | |
1196 | } | |
8b83ac81 CY |
1197 | |
1198 | f2fs_update_iostat(fio.sbi, FS_DATA_READ_IO, F2FS_BLKSIZE); | |
9c122384 | 1199 | f2fs_update_iostat(fio.sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE); |
8b83ac81 | 1200 | |
543b8c46 JK |
1201 | lock_page(mpage); |
1202 | if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) || | |
1203 | !PageUptodate(mpage))) { | |
1204 | err = -EIO; | |
1205 | f2fs_put_page(mpage, 1); | |
1206 | goto up_out; | |
1207 | } | |
1208 | } | |
1209 | ||
4d57b86d | 1210 | f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr, |
093749e2 | 1211 | &sum, type, NULL); |
4356e48e | 1212 | |
01eccef7 CY |
1213 | fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi), |
1214 | newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS); | |
4356e48e CY |
1215 | if (!fio.encrypted_page) { |
1216 | err = -ENOMEM; | |
6aa58d8a | 1217 | f2fs_put_page(mpage, 1); |
543b8c46 | 1218 | goto recover_block; |
4356e48e | 1219 | } |
548aedac | 1220 | |
543b8c46 | 1221 | /* write target block */ |
bae0ee7a | 1222 | f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true); |
543b8c46 JK |
1223 | memcpy(page_address(fio.encrypted_page), |
1224 | page_address(mpage), PAGE_SIZE); | |
1225 | f2fs_put_page(mpage, 1); | |
1226 | invalidate_mapping_pages(META_MAPPING(fio.sbi), | |
1227 | fio.old_blkaddr, fio.old_blkaddr); | |
1228 | ||
8d64d365 | 1229 | set_page_dirty(fio.encrypted_page); |
6282adbf JK |
1230 | if (clear_page_dirty_for_io(fio.encrypted_page)) |
1231 | dec_page_count(fio.sbi, F2FS_DIRTY_META); | |
1232 | ||
548aedac | 1233 | set_page_writeback(fio.encrypted_page); |
17c50035 | 1234 | ClearPageError(page); |
4375a336 JK |
1235 | |
1236 | /* allocate block address */ | |
bae0ee7a | 1237 | f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true); |
4356e48e | 1238 | |
04d328de | 1239 | fio.op = REQ_OP_WRITE; |
70fd7614 | 1240 | fio.op_flags = REQ_SYNC; |
4356e48e | 1241 | fio.new_blkaddr = newaddr; |
fe16efe6 CY |
1242 | f2fs_submit_page_write(&fio); |
1243 | if (fio.retry) { | |
48018b4c | 1244 | err = -EAGAIN; |
a9d572c7 SY |
1245 | if (PageWriteback(fio.encrypted_page)) |
1246 | end_page_writeback(fio.encrypted_page); | |
1247 | goto put_page_out; | |
1248 | } | |
4375a336 | 1249 | |
b0af6d49 CY |
1250 | f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE); |
1251 | ||
f28b3434 | 1252 | f2fs_update_data_blkaddr(&dn, newaddr); |
91942321 | 1253 | set_inode_flag(inode, FI_APPEND_WRITE); |
4375a336 | 1254 | if (page->index == 0) |
91942321 | 1255 | set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN); |
548aedac | 1256 | put_page_out: |
4375a336 | 1257 | f2fs_put_page(fio.encrypted_page, 1); |
4356e48e CY |
1258 | recover_block: |
1259 | if (err) | |
4d57b86d | 1260 | f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr, |
c5d02785 | 1261 | true, true, true); |
543b8c46 JK |
1262 | up_out: |
1263 | if (lfs_mode) | |
1264 | up_write(&fio.sbi->io_order_lock); | |
4375a336 JK |
1265 | put_out: |
1266 | f2fs_put_dnode(&dn); | |
1267 | out: | |
1268 | f2fs_put_page(page, 1); | |
48018b4c | 1269 | return err; |
4375a336 JK |
1270 | } |
1271 | ||
48018b4c | 1272 | static int move_data_page(struct inode *inode, block_t bidx, int gc_type, |
20614711 | 1273 | unsigned int segno, int off) |
7bc09003 | 1274 | { |
c879f90d | 1275 | struct page *page; |
48018b4c | 1276 | int err = 0; |
c879f90d | 1277 | |
4d57b86d | 1278 | page = f2fs_get_lock_data_page(inode, bidx, true); |
c879f90d | 1279 | if (IS_ERR(page)) |
48018b4c | 1280 | return PTR_ERR(page); |
63a0b7cb | 1281 | |
48018b4c CY |
1282 | if (!check_valid_map(F2FS_I_SB(inode), segno, off)) { |
1283 | err = -ENOENT; | |
20614711 | 1284 | goto out; |
48018b4c | 1285 | } |
20614711 | 1286 | |
2ef79ecb CY |
1287 | if (f2fs_is_atomic_file(inode)) { |
1288 | F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++; | |
1289 | F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++; | |
48018b4c | 1290 | err = -EAGAIN; |
5fe45743 | 1291 | goto out; |
2ef79ecb | 1292 | } |
1ad71a27 JK |
1293 | if (f2fs_is_pinned_file(inode)) { |
1294 | if (gc_type == FG_GC) | |
1295 | f2fs_pin_file_control(inode, true); | |
48018b4c | 1296 | err = -EAGAIN; |
1ad71a27 JK |
1297 | goto out; |
1298 | } | |
5fe45743 | 1299 | |
7bc09003 | 1300 | if (gc_type == BG_GC) { |
48018b4c CY |
1301 | if (PageWriteback(page)) { |
1302 | err = -EAGAIN; | |
4ebefc44 | 1303 | goto out; |
48018b4c | 1304 | } |
7bc09003 JK |
1305 | set_page_dirty(page); |
1306 | set_cold_data(page); | |
1307 | } else { | |
c879f90d JK |
1308 | struct f2fs_io_info fio = { |
1309 | .sbi = F2FS_I_SB(inode), | |
39d787be | 1310 | .ino = inode->i_ino, |
c879f90d | 1311 | .type = DATA, |
a912b54d | 1312 | .temp = COLD, |
04d328de | 1313 | .op = REQ_OP_WRITE, |
70fd7614 | 1314 | .op_flags = REQ_SYNC, |
e959c8f5 | 1315 | .old_blkaddr = NULL_ADDR, |
c879f90d | 1316 | .page = page, |
4375a336 | 1317 | .encrypted_page = NULL, |
cc15620b | 1318 | .need_lock = LOCK_REQ, |
b0af6d49 | 1319 | .io_type = FS_GC_DATA_IO, |
c879f90d | 1320 | }; |
72e1c797 | 1321 | bool is_dirty = PageDirty(page); |
72e1c797 CY |
1322 | |
1323 | retry: | |
bae0ee7a | 1324 | f2fs_wait_on_page_writeback(page, DATA, true, true); |
8d64d365 CY |
1325 | |
1326 | set_page_dirty(page); | |
933439c8 | 1327 | if (clear_page_dirty_for_io(page)) { |
a7ffdbe2 | 1328 | inode_dec_dirty_pages(inode); |
4d57b86d | 1329 | f2fs_remove_dirty_inode(inode); |
933439c8 | 1330 | } |
72e1c797 | 1331 | |
7bc09003 | 1332 | set_cold_data(page); |
72e1c797 | 1333 | |
4d57b86d | 1334 | err = f2fs_do_write_data_page(&fio); |
14a28559 CY |
1335 | if (err) { |
1336 | clear_cold_data(page); | |
1337 | if (err == -ENOMEM) { | |
5df7731f CY |
1338 | congestion_wait(BLK_RW_ASYNC, |
1339 | DEFAULT_IO_TIMEOUT); | |
14a28559 CY |
1340 | goto retry; |
1341 | } | |
1342 | if (is_dirty) | |
1343 | set_page_dirty(page); | |
72e1c797 | 1344 | } |
7bc09003 JK |
1345 | } |
1346 | out: | |
1347 | f2fs_put_page(page, 1); | |
48018b4c | 1348 | return err; |
7bc09003 JK |
1349 | } |
1350 | ||
0a8165d7 | 1351 | /* |
7bc09003 JK |
1352 | * This function tries to get parent node of victim data block, and identifies |
1353 | * data block validity. If the block is valid, copy that with cold status and | |
1354 | * modify parent node. | |
1355 | * If the parent node is not valid or the data block address is different, | |
1356 | * the victim data block is ignored. | |
1357 | */ | |
48018b4c | 1358 | static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, |
7dda2af8 | 1359 | struct gc_inode_list *gc_list, unsigned int segno, int gc_type) |
7bc09003 JK |
1360 | { |
1361 | struct super_block *sb = sbi->sb; | |
1362 | struct f2fs_summary *entry; | |
1363 | block_t start_addr; | |
43727527 | 1364 | int off; |
7bc09003 | 1365 | int phase = 0; |
48018b4c | 1366 | int submitted = 0; |
de881df9 | 1367 | unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno); |
7bc09003 JK |
1368 | |
1369 | start_addr = START_BLOCK(sbi, segno); | |
1370 | ||
1371 | next_step: | |
1372 | entry = sum; | |
c718379b | 1373 | |
de881df9 | 1374 | for (off = 0; off < usable_blks_in_seg; off++, entry++) { |
7bc09003 JK |
1375 | struct page *data_page; |
1376 | struct inode *inode; | |
1377 | struct node_info dni; /* dnode info for the data */ | |
1378 | unsigned int ofs_in_node, nofs; | |
1379 | block_t start_bidx; | |
7ea984b0 | 1380 | nid_t nid = le32_to_cpu(entry->nid); |
7bc09003 | 1381 | |
803e74be JK |
1382 | /* |
1383 | * stop BG_GC if there is not enough free sections. | |
1384 | * Or, stop GC if the segment becomes fully valid caused by | |
1385 | * race condition along with SSR block allocation. | |
1386 | */ | |
1387 | if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) || | |
2bac0763 JK |
1388 | get_valid_blocks(sbi, segno, true) == |
1389 | BLKS_PER_SEC(sbi)) | |
48018b4c | 1390 | return submitted; |
7bc09003 | 1391 | |
43727527 | 1392 | if (check_valid_map(sbi, segno, off) == 0) |
7bc09003 JK |
1393 | continue; |
1394 | ||
1395 | if (phase == 0) { | |
4d57b86d | 1396 | f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1, |
7ea984b0 CY |
1397 | META_NAT, true); |
1398 | continue; | |
1399 | } | |
1400 | ||
1401 | if (phase == 1) { | |
4d57b86d | 1402 | f2fs_ra_node_page(sbi, nid); |
7bc09003 JK |
1403 | continue; |
1404 | } | |
1405 | ||
1406 | /* Get an inode by ino with checking validity */ | |
c1079892 | 1407 | if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs)) |
7bc09003 JK |
1408 | continue; |
1409 | ||
7ea984b0 | 1410 | if (phase == 2) { |
4d57b86d | 1411 | f2fs_ra_node_page(sbi, dni.ino); |
7bc09003 JK |
1412 | continue; |
1413 | } | |
1414 | ||
7bc09003 JK |
1415 | ofs_in_node = le16_to_cpu(entry->ofs_in_node); |
1416 | ||
7ea984b0 | 1417 | if (phase == 3) { |
d4686d56 | 1418 | inode = f2fs_iget(sb, dni.ino); |
4eea93e3 JK |
1419 | if (IS_ERR(inode) || is_bad_inode(inode)) { |
1420 | set_sbi_flag(sbi, SBI_NEED_FSCK); | |
7bc09003 | 1421 | continue; |
4eea93e3 | 1422 | } |
7bc09003 | 1423 | |
bb06664a | 1424 | if (!down_write_trylock( |
b2532c69 | 1425 | &F2FS_I(inode)->i_gc_rwsem[WRITE])) { |
bb06664a | 1426 | iput(inode); |
6f8d4455 | 1427 | sbi->skipped_gc_rwsem++; |
bb06664a CY |
1428 | continue; |
1429 | } | |
1430 | ||
6aa58d8a CY |
1431 | start_bidx = f2fs_start_bidx_of_node(nofs, inode) + |
1432 | ofs_in_node; | |
1433 | ||
1434 | if (f2fs_post_read_required(inode)) { | |
1435 | int err = ra_data_block(inode, start_bidx); | |
1436 | ||
1437 | up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); | |
1438 | if (err) { | |
1439 | iput(inode); | |
1440 | continue; | |
1441 | } | |
1442 | add_gc_inode(gc_list, inode); | |
1443 | continue; | |
1444 | } | |
1445 | ||
4d57b86d | 1446 | data_page = f2fs_get_read_data_page(inode, |
6aa58d8a | 1447 | start_bidx, REQ_RAHEAD, true); |
b2532c69 | 1448 | up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); |
31a32688 CL |
1449 | if (IS_ERR(data_page)) { |
1450 | iput(inode); | |
1451 | continue; | |
1452 | } | |
7bc09003 JK |
1453 | |
1454 | f2fs_put_page(data_page, 0); | |
7dda2af8 | 1455 | add_gc_inode(gc_list, inode); |
31a32688 CL |
1456 | continue; |
1457 | } | |
1458 | ||
7ea984b0 | 1459 | /* phase 4 */ |
7dda2af8 | 1460 | inode = find_gc_inode(gc_list, dni.ino); |
31a32688 | 1461 | if (inode) { |
82e0a5aa CY |
1462 | struct f2fs_inode_info *fi = F2FS_I(inode); |
1463 | bool locked = false; | |
48018b4c | 1464 | int err; |
82e0a5aa CY |
1465 | |
1466 | if (S_ISREG(inode->i_mode)) { | |
b2532c69 | 1467 | if (!down_write_trylock(&fi->i_gc_rwsem[READ])) |
82e0a5aa CY |
1468 | continue; |
1469 | if (!down_write_trylock( | |
b2532c69 | 1470 | &fi->i_gc_rwsem[WRITE])) { |
6f8d4455 | 1471 | sbi->skipped_gc_rwsem++; |
b2532c69 | 1472 | up_write(&fi->i_gc_rwsem[READ]); |
82e0a5aa CY |
1473 | continue; |
1474 | } | |
1475 | locked = true; | |
73ac2f4e CY |
1476 | |
1477 | /* wait for all inflight aio data */ | |
1478 | inode_dio_wait(inode); | |
82e0a5aa CY |
1479 | } |
1480 | ||
4d57b86d | 1481 | start_bidx = f2fs_start_bidx_of_node(nofs, inode) |
c879f90d | 1482 | + ofs_in_node; |
6dbb1796 | 1483 | if (f2fs_post_read_required(inode)) |
48018b4c CY |
1484 | err = move_data_block(inode, start_bidx, |
1485 | gc_type, segno, off); | |
4375a336 | 1486 | else |
48018b4c | 1487 | err = move_data_page(inode, start_bidx, gc_type, |
d4c759ee | 1488 | segno, off); |
82e0a5aa | 1489 | |
48018b4c CY |
1490 | if (!err && (gc_type == FG_GC || |
1491 | f2fs_post_read_required(inode))) | |
1492 | submitted++; | |
1493 | ||
82e0a5aa | 1494 | if (locked) { |
b2532c69 CY |
1495 | up_write(&fi->i_gc_rwsem[WRITE]); |
1496 | up_write(&fi->i_gc_rwsem[READ]); | |
82e0a5aa CY |
1497 | } |
1498 | ||
e1235983 | 1499 | stat_inc_data_blk_count(sbi, 1, gc_type); |
7bc09003 | 1500 | } |
7bc09003 | 1501 | } |
c718379b | 1502 | |
7ea984b0 | 1503 | if (++phase < 5) |
7bc09003 | 1504 | goto next_step; |
48018b4c CY |
1505 | |
1506 | return submitted; | |
7bc09003 JK |
1507 | } |
1508 | ||
1509 | static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim, | |
8a2d0ace | 1510 | int gc_type) |
7bc09003 JK |
1511 | { |
1512 | struct sit_info *sit_i = SIT_I(sbi); | |
1513 | int ret; | |
8a2d0ace | 1514 | |
3d26fa6b | 1515 | down_write(&sit_i->sentry_lock); |
8a2d0ace | 1516 | ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type, |
093749e2 | 1517 | NO_CHECK_TYPE, LFS, 0); |
3d26fa6b | 1518 | up_write(&sit_i->sentry_lock); |
7bc09003 JK |
1519 | return ret; |
1520 | } | |
1521 | ||
718e53fa CY |
1522 | static int do_garbage_collect(struct f2fs_sb_info *sbi, |
1523 | unsigned int start_segno, | |
7dda2af8 | 1524 | struct gc_inode_list *gc_list, int gc_type) |
7bc09003 JK |
1525 | { |
1526 | struct page *sum_page; | |
1527 | struct f2fs_summary_block *sum; | |
c718379b | 1528 | struct blk_plug plug; |
718e53fa CY |
1529 | unsigned int segno = start_segno; |
1530 | unsigned int end_segno = start_segno + sbi->segs_per_sec; | |
e3080b01 | 1531 | int seg_freed = 0, migrated = 0; |
718e53fa CY |
1532 | unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ? |
1533 | SUM_TYPE_DATA : SUM_TYPE_NODE; | |
48018b4c | 1534 | int submitted = 0; |
7bc09003 | 1535 | |
e3080b01 CY |
1536 | if (__is_large_section(sbi)) |
1537 | end_segno = rounddown(end_segno, sbi->segs_per_sec); | |
1538 | ||
de881df9 AR |
1539 | /* |
1540 | * zone-capacity can be less than zone-size in zoned devices, | |
1541 | * resulting in less than expected usable segments in the zone, | |
1542 | * calculate the end segno in the zone which can be garbage collected | |
1543 | */ | |
1544 | if (f2fs_sb_has_blkzoned(sbi)) | |
1545 | end_segno -= sbi->segs_per_sec - | |
1546 | f2fs_usable_segs_in_sec(sbi, segno); | |
1547 | ||
093749e2 CY |
1548 | sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type); |
1549 | ||
718e53fa | 1550 | /* readahead multi ssa blocks those have contiguous address */ |
2c70c5e3 | 1551 | if (__is_large_section(sbi)) |
4d57b86d | 1552 | f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno), |
e3080b01 | 1553 | end_segno - segno, META_SSA, true); |
718e53fa CY |
1554 | |
1555 | /* reference all summary page */ | |
1556 | while (segno < end_segno) { | |
4d57b86d | 1557 | sum_page = f2fs_get_sum_page(sbi, segno++); |
edc55aaf JK |
1558 | if (IS_ERR(sum_page)) { |
1559 | int err = PTR_ERR(sum_page); | |
1560 | ||
1561 | end_segno = segno - 1; | |
1562 | for (segno = start_segno; segno < end_segno; segno++) { | |
1563 | sum_page = find_get_page(META_MAPPING(sbi), | |
1564 | GET_SUM_BLOCK(sbi, segno)); | |
1565 | f2fs_put_page(sum_page, 0); | |
1566 | f2fs_put_page(sum_page, 0); | |
1567 | } | |
1568 | return err; | |
1569 | } | |
718e53fa CY |
1570 | unlock_page(sum_page); |
1571 | } | |
7bc09003 | 1572 | |
c718379b JK |
1573 | blk_start_plug(&plug); |
1574 | ||
718e53fa | 1575 | for (segno = start_segno; segno < end_segno; segno++) { |
aa987273 | 1576 | |
718e53fa CY |
1577 | /* find segment summary of victim */ |
1578 | sum_page = find_get_page(META_MAPPING(sbi), | |
1579 | GET_SUM_BLOCK(sbi, segno)); | |
718e53fa | 1580 | f2fs_put_page(sum_page, 0); |
7bc09003 | 1581 | |
d6c66cd1 YS |
1582 | if (get_valid_blocks(sbi, segno, false) == 0) |
1583 | goto freed; | |
dabfbbc8 | 1584 | if (gc_type == BG_GC && __is_large_section(sbi) && |
e3080b01 CY |
1585 | migrated >= sbi->migration_granularity) |
1586 | goto skip; | |
d6c66cd1 | 1587 | if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi))) |
e3080b01 | 1588 | goto skip; |
de0dcc40 | 1589 | |
718e53fa | 1590 | sum = page_address(sum_page); |
10d255c3 | 1591 | if (type != GET_SUM_TYPE((&sum->footer))) { |
dcbb4c10 JP |
1592 | f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT", |
1593 | segno, type, GET_SUM_TYPE((&sum->footer))); | |
10d255c3 | 1594 | set_sbi_flag(sbi, SBI_NEED_FSCK); |
793ab1c8 | 1595 | f2fs_stop_checkpoint(sbi, false); |
e3080b01 | 1596 | goto skip; |
10d255c3 | 1597 | } |
718e53fa CY |
1598 | |
1599 | /* | |
1600 | * this is to avoid deadlock: | |
1601 | * - lock_page(sum_page) - f2fs_replace_block | |
3d26fa6b CY |
1602 | * - check_valid_map() - down_write(sentry_lock) |
1603 | * - down_read(sentry_lock) - change_curseg() | |
718e53fa CY |
1604 | * - lock_page(sum_page) |
1605 | */ | |
718e53fa | 1606 | if (type == SUM_TYPE_NODE) |
48018b4c | 1607 | submitted += gc_node_segment(sbi, sum->entries, segno, |
718e53fa | 1608 | gc_type); |
48018b4c CY |
1609 | else |
1610 | submitted += gc_data_segment(sbi, sum->entries, gc_list, | |
1611 | segno, gc_type); | |
718e53fa CY |
1612 | |
1613 | stat_inc_seg_count(sbi, type, gc_type); | |
8c7b9ac1 | 1614 | migrated++; |
c56f16da | 1615 | |
d6c66cd1 | 1616 | freed: |
c56f16da CY |
1617 | if (gc_type == FG_GC && |
1618 | get_valid_blocks(sbi, segno, false) == 0) | |
1619 | seg_freed++; | |
e3080b01 CY |
1620 | |
1621 | if (__is_large_section(sbi) && segno + 1 < end_segno) | |
1622 | sbi->next_victim_seg[gc_type] = segno + 1; | |
1623 | skip: | |
718e53fa CY |
1624 | f2fs_put_page(sum_page, 0); |
1625 | } | |
1626 | ||
48018b4c | 1627 | if (submitted) |
b9109b0e JK |
1628 | f2fs_submit_merged_write(sbi, |
1629 | (type == SUM_TYPE_NODE) ? NODE : DATA); | |
c718379b | 1630 | |
718e53fa | 1631 | blk_finish_plug(&plug); |
7bc09003 | 1632 | |
17d899df CY |
1633 | stat_inc_call_count(sbi->stat_info); |
1634 | ||
c56f16da | 1635 | return seg_freed; |
7bc09003 JK |
1636 | } |
1637 | ||
e066b83c JK |
1638 | int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, |
1639 | bool background, unsigned int segno) | |
7bc09003 | 1640 | { |
d530d4d8 | 1641 | int gc_type = sync ? FG_GC : BG_GC; |
c56f16da CY |
1642 | int sec_freed = 0, seg_freed = 0, total_freed = 0; |
1643 | int ret = 0; | |
d5053a34 | 1644 | struct cp_control cpc; |
e066b83c | 1645 | unsigned int init_segno = segno; |
7dda2af8 CL |
1646 | struct gc_inode_list gc_list = { |
1647 | .ilist = LIST_HEAD_INIT(gc_list.ilist), | |
f6bb2a2c | 1648 | .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS), |
7dda2af8 | 1649 | }; |
2ef79ecb | 1650 | unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC]; |
6f8d4455 | 1651 | unsigned long long first_skipped; |
2ef79ecb | 1652 | unsigned int skipped_round = 0, round = 0; |
d5053a34 | 1653 | |
c56f16da CY |
1654 | trace_f2fs_gc_begin(sbi->sb, sync, background, |
1655 | get_pages(sbi, F2FS_DIRTY_NODES), | |
1656 | get_pages(sbi, F2FS_DIRTY_DENTS), | |
1657 | get_pages(sbi, F2FS_DIRTY_IMETA), | |
1658 | free_sections(sbi), | |
1659 | free_segments(sbi), | |
1660 | reserved_segments(sbi), | |
1661 | prefree_segments(sbi)); | |
1662 | ||
119ee914 | 1663 | cpc.reason = __get_cp_reason(sbi); |
6f8d4455 JK |
1664 | sbi->skipped_gc_rwsem = 0; |
1665 | first_skipped = last_skipped; | |
7bc09003 | 1666 | gc_more: |
1751e8a6 | 1667 | if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) { |
e5dbd956 | 1668 | ret = -EINVAL; |
408e9375 | 1669 | goto stop; |
e5dbd956 | 1670 | } |
6d5a1495 CY |
1671 | if (unlikely(f2fs_cp_error(sbi))) { |
1672 | ret = -EIO; | |
203681f6 | 1673 | goto stop; |
6d5a1495 | 1674 | } |
7bc09003 | 1675 | |
19f4e688 | 1676 | if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) { |
6e17bfbc | 1677 | /* |
19f4e688 HP |
1678 | * For example, if there are many prefree_segments below given |
1679 | * threshold, we can make them free by checkpoint. Then, we | |
1680 | * secure free segments which doesn't need fggc any more. | |
6e17bfbc | 1681 | */ |
4354994f DR |
1682 | if (prefree_segments(sbi) && |
1683 | !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) { | |
4d57b86d | 1684 | ret = f2fs_write_checkpoint(sbi, &cpc); |
8fd5a37e JK |
1685 | if (ret) |
1686 | goto stop; | |
1687 | } | |
19f4e688 HP |
1688 | if (has_not_enough_free_secs(sbi, 0, 0)) |
1689 | gc_type = FG_GC; | |
d64f8047 | 1690 | } |
7bc09003 | 1691 | |
19f4e688 | 1692 | /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */ |
c56f16da CY |
1693 | if (gc_type == BG_GC && !background) { |
1694 | ret = -EINVAL; | |
19f4e688 | 1695 | goto stop; |
c56f16da | 1696 | } |
97767500 QZ |
1697 | ret = __get_victim(sbi, &segno, gc_type); |
1698 | if (ret) | |
408e9375 | 1699 | goto stop; |
7bc09003 | 1700 | |
c56f16da | 1701 | seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type); |
de881df9 AR |
1702 | if (gc_type == FG_GC && |
1703 | seg_freed == f2fs_usable_segs_in_sec(sbi, segno)) | |
45fe8492 | 1704 | sec_freed++; |
c56f16da | 1705 | total_freed += seg_freed; |
43727527 | 1706 | |
2ef79ecb | 1707 | if (gc_type == FG_GC) { |
6f8d4455 JK |
1708 | if (sbi->skipped_atomic_files[FG_GC] > last_skipped || |
1709 | sbi->skipped_gc_rwsem) | |
2ef79ecb CY |
1710 | skipped_round++; |
1711 | last_skipped = sbi->skipped_atomic_files[FG_GC]; | |
1712 | round++; | |
1713 | } | |
1714 | ||
957fa478 | 1715 | if (gc_type == FG_GC && seg_freed) |
5ec4e49f | 1716 | sbi->cur_victim_sec = NULL_SEGNO; |
43727527 | 1717 | |
6f8d4455 JK |
1718 | if (sync) |
1719 | goto stop; | |
1720 | ||
1721 | if (has_not_enough_free_secs(sbi, sec_freed, 0)) { | |
1722 | if (skipped_round <= MAX_SKIP_GC_COUNT || | |
1723 | skipped_round * 2 < round) { | |
e066b83c | 1724 | segno = NULL_SEGNO; |
d530d4d8 | 1725 | goto gc_more; |
e066b83c | 1726 | } |
43727527 | 1727 | |
6f8d4455 JK |
1728 | if (first_skipped < last_skipped && |
1729 | (last_skipped - first_skipped) > | |
1730 | sbi->skipped_gc_rwsem) { | |
1731 | f2fs_drop_inmem_pages_all(sbi, true); | |
1732 | segno = NULL_SEGNO; | |
1733 | goto gc_more; | |
1734 | } | |
4354994f | 1735 | if (gc_type == FG_GC && !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) |
4d57b86d | 1736 | ret = f2fs_write_checkpoint(sbi, &cpc); |
d530d4d8 | 1737 | } |
408e9375 | 1738 | stop: |
e066b83c JK |
1739 | SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0; |
1740 | SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno; | |
c56f16da CY |
1741 | |
1742 | trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed, | |
1743 | get_pages(sbi, F2FS_DIRTY_NODES), | |
1744 | get_pages(sbi, F2FS_DIRTY_DENTS), | |
1745 | get_pages(sbi, F2FS_DIRTY_IMETA), | |
1746 | free_sections(sbi), | |
1747 | free_segments(sbi), | |
1748 | reserved_segments(sbi), | |
1749 | prefree_segments(sbi)); | |
1750 | ||
fb24fea7 | 1751 | up_write(&sbi->gc_lock); |
7bc09003 | 1752 | |
7dda2af8 | 1753 | put_gc_inode(&gc_list); |
d530d4d8 | 1754 | |
61f7725a | 1755 | if (sync && !ret) |
d530d4d8 | 1756 | ret = sec_freed ? 0 : -EAGAIN; |
43727527 | 1757 | return ret; |
7bc09003 JK |
1758 | } |
1759 | ||
093749e2 CY |
1760 | int __init f2fs_create_garbage_collection_cache(void) |
1761 | { | |
1762 | victim_entry_slab = f2fs_kmem_cache_create("f2fs_victim_entry", | |
1763 | sizeof(struct victim_entry)); | |
1764 | if (!victim_entry_slab) | |
1765 | return -ENOMEM; | |
1766 | return 0; | |
1767 | } | |
1768 | ||
1769 | void f2fs_destroy_garbage_collection_cache(void) | |
1770 | { | |
1771 | kmem_cache_destroy(victim_entry_slab); | |
1772 | } | |
1773 | ||
1774 | static void init_atgc_management(struct f2fs_sb_info *sbi) | |
1775 | { | |
1776 | struct atgc_management *am = &sbi->am; | |
1777 | ||
1778 | if (test_opt(sbi, ATGC) && | |
1779 | SIT_I(sbi)->elapsed_time >= DEF_GC_THREAD_AGE_THRESHOLD) | |
1780 | am->atgc_enabled = true; | |
1781 | ||
1782 | am->root = RB_ROOT_CACHED; | |
1783 | INIT_LIST_HEAD(&am->victim_list); | |
1784 | am->victim_count = 0; | |
1785 | ||
1786 | am->candidate_ratio = DEF_GC_THREAD_CANDIDATE_RATIO; | |
1787 | am->max_candidate_count = DEF_GC_THREAD_MAX_CANDIDATE_COUNT; | |
1788 | am->age_weight = DEF_GC_THREAD_AGE_WEIGHT; | |
1789 | } | |
1790 | ||
4d57b86d | 1791 | void f2fs_build_gc_manager(struct f2fs_sb_info *sbi) |
7bc09003 JK |
1792 | { |
1793 | DIRTY_I(sbi)->v_ops = &default_v_ops; | |
e93b9865 | 1794 | |
1ad71a27 | 1795 | sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES; |
d5793249 JK |
1796 | |
1797 | /* give warm/cold data area from slower device */ | |
0916878d | 1798 | if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi)) |
d5793249 JK |
1799 | SIT_I(sbi)->last_victim[ALLOC_NEXT] = |
1800 | GET_SEGNO(sbi, FDEV(0).end_blk) + 1; | |
093749e2 CY |
1801 | |
1802 | init_atgc_management(sbi); | |
7bc09003 | 1803 | } |
04f0b2ea | 1804 | |
b4b10061 JK |
1805 | static int free_segment_range(struct f2fs_sb_info *sbi, |
1806 | unsigned int secs, bool gc_only) | |
04f0b2ea | 1807 | { |
b4b10061 JK |
1808 | unsigned int segno, next_inuse, start, end; |
1809 | struct cp_control cpc = { CP_RESIZE, 0, 0, 0 }; | |
1810 | int gc_mode, gc_type; | |
04f0b2ea | 1811 | int err = 0; |
b4b10061 JK |
1812 | int type; |
1813 | ||
1814 | /* Force block allocation for GC */ | |
1815 | MAIN_SECS(sbi) -= secs; | |
1816 | start = MAIN_SECS(sbi) * sbi->segs_per_sec; | |
1817 | end = MAIN_SEGS(sbi) - 1; | |
1818 | ||
1819 | mutex_lock(&DIRTY_I(sbi)->seglist_lock); | |
1820 | for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++) | |
1821 | if (SIT_I(sbi)->last_victim[gc_mode] >= start) | |
1822 | SIT_I(sbi)->last_victim[gc_mode] = 0; | |
1823 | ||
1824 | for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++) | |
1825 | if (sbi->next_victim_seg[gc_type] >= start) | |
1826 | sbi->next_victim_seg[gc_type] = NULL_SEGNO; | |
1827 | mutex_unlock(&DIRTY_I(sbi)->seglist_lock); | |
04f0b2ea QS |
1828 | |
1829 | /* Move out cursegs from the target range */ | |
d0b9e42a | 1830 | for (type = CURSEG_HOT_DATA; type < NR_CURSEG_PERSIST_TYPE; type++) |
0ef81833 | 1831 | f2fs_allocate_segment_for_resize(sbi, type, start, end); |
04f0b2ea QS |
1832 | |
1833 | /* do GC to move out valid blocks in the range */ | |
1834 | for (segno = start; segno <= end; segno += sbi->segs_per_sec) { | |
1835 | struct gc_inode_list gc_list = { | |
1836 | .ilist = LIST_HEAD_INIT(gc_list.ilist), | |
1837 | .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS), | |
1838 | }; | |
1839 | ||
04f0b2ea | 1840 | do_garbage_collect(sbi, segno, &gc_list, FG_GC); |
04f0b2ea QS |
1841 | put_gc_inode(&gc_list); |
1842 | ||
b4b10061 JK |
1843 | if (!gc_only && get_valid_blocks(sbi, segno, true)) { |
1844 | err = -EAGAIN; | |
1845 | goto out; | |
1846 | } | |
1847 | if (fatal_signal_pending(current)) { | |
1848 | err = -ERESTARTSYS; | |
1849 | goto out; | |
1850 | } | |
04f0b2ea | 1851 | } |
b4b10061 JK |
1852 | if (gc_only) |
1853 | goto out; | |
04f0b2ea | 1854 | |
b4b10061 | 1855 | err = f2fs_write_checkpoint(sbi, &cpc); |
04f0b2ea | 1856 | if (err) |
b4b10061 | 1857 | goto out; |
04f0b2ea QS |
1858 | |
1859 | next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start); | |
1860 | if (next_inuse <= end) { | |
dcbb4c10 JP |
1861 | f2fs_err(sbi, "segno %u should be free but still inuse!", |
1862 | next_inuse); | |
04f0b2ea QS |
1863 | f2fs_bug_on(sbi, 1); |
1864 | } | |
b4b10061 JK |
1865 | out: |
1866 | MAIN_SECS(sbi) += secs; | |
04f0b2ea QS |
1867 | return err; |
1868 | } | |
1869 | ||
1870 | static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs) | |
1871 | { | |
1872 | struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi); | |
a4ba5dfc CY |
1873 | int section_count; |
1874 | int segment_count; | |
1875 | int segment_count_main; | |
1876 | long long block_count; | |
04f0b2ea QS |
1877 | int segs = secs * sbi->segs_per_sec; |
1878 | ||
a4ba5dfc CY |
1879 | down_write(&sbi->sb_lock); |
1880 | ||
1881 | section_count = le32_to_cpu(raw_sb->section_count); | |
1882 | segment_count = le32_to_cpu(raw_sb->segment_count); | |
1883 | segment_count_main = le32_to_cpu(raw_sb->segment_count_main); | |
1884 | block_count = le64_to_cpu(raw_sb->block_count); | |
1885 | ||
04f0b2ea QS |
1886 | raw_sb->section_count = cpu_to_le32(section_count + secs); |
1887 | raw_sb->segment_count = cpu_to_le32(segment_count + segs); | |
1888 | raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs); | |
1889 | raw_sb->block_count = cpu_to_le64(block_count + | |
1890 | (long long)segs * sbi->blocks_per_seg); | |
46d9ce19 QS |
1891 | if (f2fs_is_multi_device(sbi)) { |
1892 | int last_dev = sbi->s_ndevs - 1; | |
1893 | int dev_segs = | |
1894 | le32_to_cpu(raw_sb->devs[last_dev].total_segments); | |
1895 | ||
1896 | raw_sb->devs[last_dev].total_segments = | |
1897 | cpu_to_le32(dev_segs + segs); | |
1898 | } | |
a4ba5dfc CY |
1899 | |
1900 | up_write(&sbi->sb_lock); | |
04f0b2ea QS |
1901 | } |
1902 | ||
1903 | static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs) | |
1904 | { | |
1905 | int segs = secs * sbi->segs_per_sec; | |
46d9ce19 | 1906 | long long blks = (long long)segs * sbi->blocks_per_seg; |
04f0b2ea QS |
1907 | long long user_block_count = |
1908 | le64_to_cpu(F2FS_CKPT(sbi)->user_block_count); | |
1909 | ||
1910 | SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs; | |
1911 | MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs; | |
b4b10061 | 1912 | MAIN_SECS(sbi) += secs; |
04f0b2ea QS |
1913 | FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs; |
1914 | FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs; | |
46d9ce19 QS |
1915 | F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks); |
1916 | ||
1917 | if (f2fs_is_multi_device(sbi)) { | |
1918 | int last_dev = sbi->s_ndevs - 1; | |
1919 | ||
1920 | FDEV(last_dev).total_segments = | |
1921 | (int)FDEV(last_dev).total_segments + segs; | |
1922 | FDEV(last_dev).end_blk = | |
1923 | (long long)FDEV(last_dev).end_blk + blks; | |
1924 | #ifdef CONFIG_BLK_DEV_ZONED | |
1925 | FDEV(last_dev).nr_blkz = (int)FDEV(last_dev).nr_blkz + | |
1926 | (int)(blks >> sbi->log_blocks_per_blkz); | |
1927 | #endif | |
1928 | } | |
04f0b2ea QS |
1929 | } |
1930 | ||
1931 | int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count) | |
1932 | { | |
1933 | __u64 old_block_count, shrunk_blocks; | |
b4b10061 | 1934 | struct cp_control cpc = { CP_RESIZE, 0, 0, 0 }; |
04f0b2ea | 1935 | unsigned int secs; |
04f0b2ea QS |
1936 | int err = 0; |
1937 | __u32 rem; | |
1938 | ||
1939 | old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count); | |
1940 | if (block_count > old_block_count) | |
1941 | return -EINVAL; | |
1942 | ||
46d9ce19 QS |
1943 | if (f2fs_is_multi_device(sbi)) { |
1944 | int last_dev = sbi->s_ndevs - 1; | |
1945 | __u64 last_segs = FDEV(last_dev).total_segments; | |
1946 | ||
1947 | if (block_count + last_segs * sbi->blocks_per_seg <= | |
1948 | old_block_count) | |
1949 | return -EINVAL; | |
1950 | } | |
1951 | ||
04f0b2ea QS |
1952 | /* new fs size should align to section size */ |
1953 | div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem); | |
1954 | if (rem) | |
1955 | return -EINVAL; | |
1956 | ||
1957 | if (block_count == old_block_count) | |
1958 | return 0; | |
1959 | ||
1960 | if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) { | |
dcbb4c10 | 1961 | f2fs_err(sbi, "Should run fsck to repair first."); |
10f966bb | 1962 | return -EFSCORRUPTED; |
04f0b2ea QS |
1963 | } |
1964 | ||
1965 | if (test_opt(sbi, DISABLE_CHECKPOINT)) { | |
dcbb4c10 | 1966 | f2fs_err(sbi, "Checkpoint should be enabled."); |
04f0b2ea QS |
1967 | return -EINVAL; |
1968 | } | |
1969 | ||
04f0b2ea QS |
1970 | shrunk_blocks = old_block_count - block_count; |
1971 | secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi)); | |
b4b10061 JK |
1972 | |
1973 | /* stop other GC */ | |
1974 | if (!down_write_trylock(&sbi->gc_lock)) | |
1975 | return -EAGAIN; | |
1976 | ||
1977 | /* stop CP to protect MAIN_SEC in free_segment_range */ | |
1978 | f2fs_lock_op(sbi); | |
1979 | err = free_segment_range(sbi, secs, true); | |
1980 | f2fs_unlock_op(sbi); | |
1981 | up_write(&sbi->gc_lock); | |
1982 | if (err) | |
1983 | return err; | |
1984 | ||
1985 | set_sbi_flag(sbi, SBI_IS_RESIZEFS); | |
1986 | ||
1987 | freeze_super(sbi->sb); | |
1988 | down_write(&sbi->gc_lock); | |
1989 | mutex_lock(&sbi->cp_mutex); | |
1990 | ||
04f0b2ea QS |
1991 | spin_lock(&sbi->stat_lock); |
1992 | if (shrunk_blocks + valid_user_blocks(sbi) + | |
1993 | sbi->current_reserved_blocks + sbi->unusable_block_count + | |
1994 | F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count) | |
1995 | err = -ENOSPC; | |
1996 | else | |
1997 | sbi->user_block_count -= shrunk_blocks; | |
1998 | spin_unlock(&sbi->stat_lock); | |
b4b10061 JK |
1999 | if (err) |
2000 | goto out_err; | |
04f0b2ea | 2001 | |
b4b10061 | 2002 | err = free_segment_range(sbi, secs, false); |
04f0b2ea | 2003 | if (err) |
b4b10061 | 2004 | goto recover_out; |
04f0b2ea QS |
2005 | |
2006 | update_sb_metadata(sbi, -secs); | |
2007 | ||
2008 | err = f2fs_commit_super(sbi, false); | |
2009 | if (err) { | |
2010 | update_sb_metadata(sbi, secs); | |
b4b10061 | 2011 | goto recover_out; |
04f0b2ea QS |
2012 | } |
2013 | ||
2014 | update_fs_metadata(sbi, -secs); | |
2015 | clear_sbi_flag(sbi, SBI_IS_RESIZEFS); | |
68275682 | 2016 | set_sbi_flag(sbi, SBI_IS_DIRTY); |
68275682 | 2017 | |
b4b10061 | 2018 | err = f2fs_write_checkpoint(sbi, &cpc); |
04f0b2ea QS |
2019 | if (err) { |
2020 | update_fs_metadata(sbi, secs); | |
2021 | update_sb_metadata(sbi, secs); | |
2022 | f2fs_commit_super(sbi, false); | |
2023 | } | |
b4b10061 | 2024 | recover_out: |
04f0b2ea QS |
2025 | if (err) { |
2026 | set_sbi_flag(sbi, SBI_NEED_FSCK); | |
dcbb4c10 | 2027 | f2fs_err(sbi, "resize_fs failed, should run fsck to repair!"); |
04f0b2ea | 2028 | |
04f0b2ea QS |
2029 | spin_lock(&sbi->stat_lock); |
2030 | sbi->user_block_count += shrunk_blocks; | |
2031 | spin_unlock(&sbi->stat_lock); | |
2032 | } | |
b4b10061 JK |
2033 | out_err: |
2034 | mutex_unlock(&sbi->cp_mutex); | |
2035 | up_write(&sbi->gc_lock); | |
2036 | thaw_super(sbi->sb); | |
04f0b2ea | 2037 | clear_sbi_flag(sbi, SBI_IS_RESIZEFS); |
04f0b2ea QS |
2038 | return err; |
2039 | } |