]>
Commit | Line | Data |
---|---|---|
0a8165d7 | 1 | /* |
d624c96f JK |
2 | * fs/f2fs/recovery.c |
3 | * | |
4 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. | |
5 | * http://www.samsung.com/ | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | */ | |
11 | #include <linux/fs.h> | |
12 | #include <linux/f2fs_fs.h> | |
13 | #include "f2fs.h" | |
14 | #include "node.h" | |
15 | #include "segment.h" | |
16 | ||
441ac5cb JK |
17 | /* |
18 | * Roll forward recovery scenarios. | |
19 | * | |
20 | * [Term] F: fsync_mark, D: dentry_mark | |
21 | * | |
22 | * 1. inode(x) | CP | inode(x) | dnode(F) | |
23 | * -> Update the latest inode(x). | |
24 | * | |
25 | * 2. inode(x) | CP | inode(F) | dnode(F) | |
26 | * -> No problem. | |
27 | * | |
28 | * 3. inode(x) | CP | dnode(F) | inode(x) | |
29 | * -> Recover to the latest dnode(F), and drop the last inode(x) | |
30 | * | |
31 | * 4. inode(x) | CP | dnode(F) | inode(F) | |
32 | * -> No problem. | |
33 | * | |
34 | * 5. CP | inode(x) | dnode(F) | |
35 | * -> The inode(DF) was missing. Should drop this dnode(F). | |
36 | * | |
37 | * 6. CP | inode(DF) | dnode(F) | |
38 | * -> No problem. | |
39 | * | |
40 | * 7. CP | dnode(F) | inode(DF) | |
41 | * -> If f2fs_iget fails, then goto next to find inode(DF). | |
42 | * | |
43 | * 8. CP | dnode(F) | inode(x) | |
44 | * -> If f2fs_iget fails, then goto next to find inode(DF). | |
45 | * But it will fail due to no inode(DF). | |
46 | */ | |
47 | ||
d624c96f JK |
48 | static struct kmem_cache *fsync_entry_slab; |
49 | ||
50 | bool space_for_roll_forward(struct f2fs_sb_info *sbi) | |
51 | { | |
52 | if (sbi->last_valid_block_count + sbi->alloc_valid_block_count | |
53 | > sbi->user_block_count) | |
54 | return false; | |
55 | return true; | |
56 | } | |
57 | ||
58 | static struct fsync_inode_entry *get_fsync_inode(struct list_head *head, | |
59 | nid_t ino) | |
60 | { | |
d624c96f JK |
61 | struct fsync_inode_entry *entry; |
62 | ||
2d7b822a | 63 | list_for_each_entry(entry, head, list) |
d624c96f JK |
64 | if (entry->inode->i_ino == ino) |
65 | return entry; | |
2d7b822a | 66 | |
d624c96f JK |
67 | return NULL; |
68 | } | |
69 | ||
c52e1b10 | 70 | static int recover_dentry(struct inode *inode, struct page *ipage) |
d624c96f | 71 | { |
58bfaf44 | 72 | struct f2fs_inode *raw_inode = F2FS_INODE(ipage); |
74d0b917 | 73 | nid_t pino = le32_to_cpu(raw_inode->i_pino); |
6b8213d9 | 74 | struct f2fs_dir_entry *de; |
b7f7a5e0 | 75 | struct qstr name; |
d624c96f | 76 | struct page *page; |
6b8213d9 | 77 | struct inode *dir, *einode; |
d624c96f JK |
78 | int err = 0; |
79 | ||
ed57c27f JK |
80 | dir = f2fs_iget(inode->i_sb, pino); |
81 | if (IS_ERR(dir)) { | |
82 | err = PTR_ERR(dir); | |
83 | goto out; | |
84 | } | |
85 | ||
e7d55452 JK |
86 | if (file_enc_name(inode)) { |
87 | iput(dir); | |
88 | return 0; | |
89 | } | |
90 | ||
b7f7a5e0 AV |
91 | name.len = le32_to_cpu(raw_inode->i_namelen); |
92 | name.name = raw_inode->i_name; | |
d96b1431 CY |
93 | |
94 | if (unlikely(name.len > F2FS_NAME_LEN)) { | |
95 | WARN_ON(1); | |
96 | err = -ENAMETOOLONG; | |
86928f98 | 97 | goto out_err; |
d96b1431 | 98 | } |
6b8213d9 JK |
99 | retry: |
100 | de = f2fs_find_entry(dir, &name, &page); | |
418f6c27 | 101 | if (de && inode->i_ino == le32_to_cpu(de->ino)) |
2e5558f4 | 102 | goto out_unmap_put; |
418f6c27 | 103 | |
6b8213d9 JK |
104 | if (de) { |
105 | einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino)); | |
106 | if (IS_ERR(einode)) { | |
107 | WARN_ON(1); | |
5c1f9927 CY |
108 | err = PTR_ERR(einode); |
109 | if (err == -ENOENT) | |
6b8213d9 | 110 | err = -EEXIST; |
2e5558f4 RK |
111 | goto out_unmap_put; |
112 | } | |
4081363f | 113 | err = acquire_orphan_inode(F2FS_I_SB(inode)); |
2e5558f4 RK |
114 | if (err) { |
115 | iput(einode); | |
116 | goto out_unmap_put; | |
6b8213d9 | 117 | } |
dbeacf02 | 118 | f2fs_delete_entry(de, page, dir, einode); |
6b8213d9 JK |
119 | iput(einode); |
120 | goto retry; | |
d624c96f | 121 | } |
510022a8 | 122 | err = __f2fs_add_link(dir, &name, inode, inode->i_ino, inode->i_mode); |
86928f98 JK |
123 | if (err) |
124 | goto out_err; | |
125 | ||
126 | if (is_inode_flag_set(F2FS_I(dir), FI_DELAY_IPUT)) { | |
127 | iput(dir); | |
128 | } else { | |
129 | add_dirty_dir_inode(dir); | |
130 | set_inode_flag(F2FS_I(dir), FI_DELAY_IPUT); | |
131 | } | |
132 | ||
2e5558f4 RK |
133 | goto out; |
134 | ||
135 | out_unmap_put: | |
9486ba44 | 136 | f2fs_dentry_kunmap(dir, page); |
2e5558f4 | 137 | f2fs_put_page(page, 0); |
86928f98 JK |
138 | out_err: |
139 | iput(dir); | |
d624c96f | 140 | out: |
6c311ec6 CF |
141 | f2fs_msg(inode->i_sb, KERN_NOTICE, |
142 | "%s: ino = %x, name = %s, dir = %lx, err = %d", | |
143 | __func__, ino_of_node(ipage), raw_inode->i_name, | |
f28c06fa | 144 | IS_ERR(dir) ? 0 : dir->i_ino, err); |
d624c96f JK |
145 | return err; |
146 | } | |
147 | ||
c52e1b10 | 148 | static void recover_inode(struct inode *inode, struct page *page) |
d624c96f | 149 | { |
441ac5cb | 150 | struct f2fs_inode *raw = F2FS_INODE(page); |
e7d55452 | 151 | char *name; |
441ac5cb JK |
152 | |
153 | inode->i_mode = le16_to_cpu(raw->i_mode); | |
154 | i_size_write(inode, le64_to_cpu(raw->i_size)); | |
155 | inode->i_atime.tv_sec = le64_to_cpu(raw->i_mtime); | |
156 | inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime); | |
157 | inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime); | |
158 | inode->i_atime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec); | |
159 | inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec); | |
160 | inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec); | |
f356fe0c | 161 | |
e7d55452 JK |
162 | if (file_enc_name(inode)) |
163 | name = "<encrypted>"; | |
164 | else | |
165 | name = F2FS_INODE(page)->i_name; | |
166 | ||
f356fe0c | 167 | f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s", |
e7d55452 | 168 | ino_of_node(page), name); |
d624c96f JK |
169 | } |
170 | ||
171 | static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head) | |
172 | { | |
d71b5564 | 173 | unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi)); |
d624c96f | 174 | struct curseg_info *curseg; |
4c521f49 | 175 | struct page *page = NULL; |
d624c96f JK |
176 | block_t blkaddr; |
177 | int err = 0; | |
178 | ||
179 | /* get node pages in the current segment */ | |
180 | curseg = CURSEG_I(sbi, CURSEG_WARM_NODE); | |
695fd1ed | 181 | blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); |
d624c96f | 182 | |
635aee1f CY |
183 | ra_meta_pages(sbi, blkaddr, 1, META_POR); |
184 | ||
d624c96f JK |
185 | while (1) { |
186 | struct fsync_inode_entry *entry; | |
187 | ||
f0c9cada | 188 | if (!is_valid_blkaddr(sbi, blkaddr, META_POR)) |
4c521f49 | 189 | return 0; |
d624c96f | 190 | |
635aee1f | 191 | page = get_meta_page(sbi, blkaddr); |
393ff91f | 192 | |
6ead1142 | 193 | if (cp_ver != cpver_of_node(page)) |
f356fe0c | 194 | break; |
d624c96f JK |
195 | |
196 | if (!is_fsync_dnode(page)) | |
197 | goto next; | |
198 | ||
199 | entry = get_fsync_inode(head, ino_of_node(page)); | |
418f6c27 | 200 | if (!entry) { |
d624c96f | 201 | if (IS_INODE(page) && is_dent_dnode(page)) { |
6ead1142 JK |
202 | err = recover_inode_page(sbi, page); |
203 | if (err) | |
f356fe0c | 204 | break; |
d624c96f JK |
205 | } |
206 | ||
207 | /* add this fsync inode to the list */ | |
c52e1b10 | 208 | entry = kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO); |
d624c96f JK |
209 | if (!entry) { |
210 | err = -ENOMEM; | |
f356fe0c | 211 | break; |
d624c96f | 212 | } |
441ac5cb JK |
213 | /* |
214 | * CP | dnode(F) | inode(DF) | |
215 | * For this case, we should not give up now. | |
216 | */ | |
d624c96f JK |
217 | entry->inode = f2fs_iget(sbi->sb, ino_of_node(page)); |
218 | if (IS_ERR(entry->inode)) { | |
219 | err = PTR_ERR(entry->inode); | |
fd8bb65f | 220 | kmem_cache_free(fsync_entry_slab, entry); |
8fbc418f JK |
221 | if (err == -ENOENT) { |
222 | err = 0; | |
441ac5cb | 223 | goto next; |
8fbc418f | 224 | } |
f356fe0c | 225 | break; |
d624c96f | 226 | } |
fd8bb65f | 227 | list_add_tail(&entry->list, head); |
d624c96f | 228 | } |
addbe45b JK |
229 | entry->blkaddr = blkaddr; |
230 | ||
c52e1b10 JK |
231 | if (IS_INODE(page)) { |
232 | entry->last_inode = blkaddr; | |
233 | if (is_dent_dnode(page)) | |
234 | entry->last_dentry = blkaddr; | |
235 | } | |
d624c96f JK |
236 | next: |
237 | /* check next segment */ | |
238 | blkaddr = next_blkaddr_of_node(page); | |
4c521f49 | 239 | f2fs_put_page(page, 1); |
635aee1f CY |
240 | |
241 | ra_meta_pages_cond(sbi, blkaddr); | |
d624c96f | 242 | } |
4c521f49 | 243 | f2fs_put_page(page, 1); |
d624c96f JK |
244 | return err; |
245 | } | |
246 | ||
5ebefc5b | 247 | static void destroy_fsync_dnodes(struct list_head *head) |
d624c96f | 248 | { |
d8b79b2f DC |
249 | struct fsync_inode_entry *entry, *tmp; |
250 | ||
251 | list_for_each_entry_safe(entry, tmp, head, list) { | |
d624c96f JK |
252 | iput(entry->inode); |
253 | list_del(&entry->list); | |
254 | kmem_cache_free(fsync_entry_slab, entry); | |
255 | } | |
256 | } | |
257 | ||
39cf72cf | 258 | static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi, |
b292dcab | 259 | block_t blkaddr, struct dnode_of_data *dn) |
d624c96f JK |
260 | { |
261 | struct seg_entry *sentry; | |
262 | unsigned int segno = GET_SEGNO(sbi, blkaddr); | |
491c0854 | 263 | unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); |
f6517cfc | 264 | struct f2fs_summary_block *sum_node; |
d624c96f | 265 | struct f2fs_summary sum; |
f6517cfc | 266 | struct page *sum_page, *node_page; |
c9ef4810 | 267 | struct dnode_of_data tdn = *dn; |
b292dcab | 268 | nid_t ino, nid; |
d624c96f | 269 | struct inode *inode; |
de93653f | 270 | unsigned int offset; |
d624c96f JK |
271 | block_t bidx; |
272 | int i; | |
273 | ||
274 | sentry = get_seg_entry(sbi, segno); | |
275 | if (!f2fs_test_bit(blkoff, sentry->cur_valid_map)) | |
39cf72cf | 276 | return 0; |
d624c96f JK |
277 | |
278 | /* Get the previous summary */ | |
279 | for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) { | |
280 | struct curseg_info *curseg = CURSEG_I(sbi, i); | |
281 | if (curseg->segno == segno) { | |
282 | sum = curseg->sum_blk->entries[blkoff]; | |
f6517cfc | 283 | goto got_it; |
d624c96f JK |
284 | } |
285 | } | |
d624c96f | 286 | |
f6517cfc JK |
287 | sum_page = get_sum_page(sbi, segno); |
288 | sum_node = (struct f2fs_summary_block *)page_address(sum_page); | |
289 | sum = sum_node->entries[blkoff]; | |
290 | f2fs_put_page(sum_page, 1); | |
291 | got_it: | |
b292dcab JK |
292 | /* Use the locked dnode page and inode */ |
293 | nid = le32_to_cpu(sum.nid); | |
294 | if (dn->inode->i_ino == nid) { | |
b292dcab | 295 | tdn.nid = nid; |
c9ef4810 JK |
296 | if (!dn->inode_page_locked) |
297 | lock_page(dn->inode_page); | |
b292dcab | 298 | tdn.node_page = dn->inode_page; |
060dd67b | 299 | tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node); |
c9ef4810 | 300 | goto truncate_out; |
b292dcab | 301 | } else if (dn->nid == nid) { |
060dd67b | 302 | tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node); |
c9ef4810 | 303 | goto truncate_out; |
b292dcab JK |
304 | } |
305 | ||
d624c96f | 306 | /* Get the node page */ |
b292dcab | 307 | node_page = get_node_page(sbi, nid); |
39cf72cf JK |
308 | if (IS_ERR(node_page)) |
309 | return PTR_ERR(node_page); | |
de93653f JK |
310 | |
311 | offset = ofs_of_node(node_page); | |
d624c96f JK |
312 | ino = ino_of_node(node_page); |
313 | f2fs_put_page(node_page, 1); | |
314 | ||
60979115 JK |
315 | if (ino != dn->inode->i_ino) { |
316 | /* Deallocate previous index in the node page */ | |
317 | inode = f2fs_iget(sbi->sb, ino); | |
318 | if (IS_ERR(inode)) | |
319 | return PTR_ERR(inode); | |
320 | } else { | |
321 | inode = dn->inode; | |
322 | } | |
06025f4d | 323 | |
de93653f | 324 | bidx = start_bidx_of_node(offset, F2FS_I(inode)) + |
60979115 | 325 | le16_to_cpu(sum.ofs_in_node); |
de93653f | 326 | |
c9ef4810 JK |
327 | /* |
328 | * if inode page is locked, unlock temporarily, but its reference | |
329 | * count keeps alive. | |
330 | */ | |
331 | if (ino == dn->inode->i_ino && dn->inode_page_locked) | |
332 | unlock_page(dn->inode_page); | |
333 | ||
334 | set_new_dnode(&tdn, inode, NULL, NULL, 0); | |
335 | if (get_dnode_of_data(&tdn, bidx, LOOKUP_NODE)) | |
336 | goto out; | |
337 | ||
338 | if (tdn.data_blkaddr == blkaddr) | |
339 | truncate_data_blocks_range(&tdn, 1); | |
340 | ||
341 | f2fs_put_dnode(&tdn); | |
342 | out: | |
343 | if (ino != dn->inode->i_ino) | |
60979115 | 344 | iput(inode); |
c9ef4810 JK |
345 | else if (dn->inode_page_locked) |
346 | lock_page(dn->inode_page); | |
347 | return 0; | |
348 | ||
349 | truncate_out: | |
350 | if (datablock_addr(tdn.node_page, tdn.ofs_in_node) == blkaddr) | |
351 | truncate_data_blocks_range(&tdn, 1); | |
352 | if (dn->inode->i_ino == nid && !dn->inode_page_locked) | |
353 | unlock_page(dn->inode_page); | |
39cf72cf | 354 | return 0; |
d624c96f JK |
355 | } |
356 | ||
6ead1142 | 357 | static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, |
d624c96f JK |
358 | struct page *page, block_t blkaddr) |
359 | { | |
de93653f | 360 | struct f2fs_inode_info *fi = F2FS_I(inode); |
d624c96f JK |
361 | unsigned int start, end; |
362 | struct dnode_of_data dn; | |
d624c96f | 363 | struct node_info ni; |
f356fe0c | 364 | int err = 0, recovered = 0; |
d624c96f | 365 | |
1c35a90e JK |
366 | /* step 1: recover xattr */ |
367 | if (IS_INODE(page)) { | |
368 | recover_inline_xattr(inode, page); | |
369 | } else if (f2fs_has_xattr_block(ofs_of_node(page))) { | |
bc4a1f87 JK |
370 | /* |
371 | * Deprecated; xattr blocks should be found from cold log. | |
372 | * But, we should remain this for backward compatibility. | |
373 | */ | |
1c35a90e | 374 | recover_xattr_data(inode, page, blkaddr); |
1e1bb4ba | 375 | goto out; |
1c35a90e | 376 | } |
1e1bb4ba | 377 | |
1c35a90e JK |
378 | /* step 2: recover inline data */ |
379 | if (recover_inline_data(inode, page)) | |
abb2366c JK |
380 | goto out; |
381 | ||
1c35a90e | 382 | /* step 3: recover data indices */ |
de93653f | 383 | start = start_bidx_of_node(ofs_of_node(page), fi); |
6403eb1f | 384 | end = start + ADDRS_PER_PAGE(page, fi); |
d624c96f | 385 | |
e479556b | 386 | f2fs_lock_op(sbi); |
1e1bb4ba | 387 | |
d624c96f | 388 | set_new_dnode(&dn, inode, NULL, NULL, 0); |
39936837 | 389 | |
6ead1142 | 390 | err = get_dnode_of_data(&dn, start, ALLOC_NODE); |
39936837 | 391 | if (err) { |
e479556b | 392 | f2fs_unlock_op(sbi); |
1e1bb4ba | 393 | goto out; |
39936837 | 394 | } |
d624c96f | 395 | |
3cb5ad15 | 396 | f2fs_wait_on_page_writeback(dn.node_page, NODE); |
d624c96f JK |
397 | |
398 | get_node_info(sbi, dn.nid, &ni); | |
9850cf4a JK |
399 | f2fs_bug_on(sbi, ni.ino != ino_of_node(page)); |
400 | f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page)); | |
d624c96f | 401 | |
12a8343e | 402 | for (; start < end; start++, dn.ofs_in_node++) { |
d624c96f JK |
403 | block_t src, dest; |
404 | ||
405 | src = datablock_addr(dn.node_page, dn.ofs_in_node); | |
406 | dest = datablock_addr(page, dn.ofs_in_node); | |
407 | ||
12a8343e CY |
408 | /* skip recovering if dest is the same as src */ |
409 | if (src == dest) | |
410 | continue; | |
411 | ||
412 | /* dest is invalid, just invalidate src block */ | |
413 | if (dest == NULL_ADDR) { | |
414 | truncate_data_blocks_range(&dn, 1); | |
415 | continue; | |
416 | } | |
417 | ||
418 | /* | |
419 | * dest is reserved block, invalidate src block | |
420 | * and then reserve one new block in dnode page. | |
421 | */ | |
422 | if (dest == NEW_ADDR) { | |
423 | truncate_data_blocks_range(&dn, 1); | |
424 | err = reserve_new_block(&dn); | |
425 | f2fs_bug_on(sbi, err); | |
426 | continue; | |
427 | } | |
428 | ||
429 | /* dest is valid block, try to recover from src to dest */ | |
430 | if (is_valid_blkaddr(sbi, dest, META_POR)) { | |
e03b07d9 | 431 | |
d624c96f | 432 | if (src == NULL_ADDR) { |
5d56b671 | 433 | err = reserve_new_block(&dn); |
d624c96f | 434 | /* We should not get -ENOSPC */ |
9850cf4a | 435 | f2fs_bug_on(sbi, err); |
d624c96f JK |
436 | } |
437 | ||
438 | /* Check the previous node page having this index */ | |
39cf72cf JK |
439 | err = check_index_in_prev_nodes(sbi, dest, &dn); |
440 | if (err) | |
441 | goto err; | |
d624c96f | 442 | |
d624c96f | 443 | /* write dummy data page */ |
528e3459 CY |
444 | f2fs_replace_block(sbi, &dn, src, dest, |
445 | ni.version, false); | |
f356fe0c | 446 | recovered++; |
d624c96f | 447 | } |
d624c96f JK |
448 | } |
449 | ||
d624c96f JK |
450 | if (IS_INODE(dn.node_page)) |
451 | sync_inode_page(&dn); | |
452 | ||
453 | copy_node_footer(dn.node_page, page); | |
454 | fill_node_footer(dn.node_page, dn.nid, ni.ino, | |
455 | ofs_of_node(page), false); | |
456 | set_page_dirty(dn.node_page); | |
39cf72cf | 457 | err: |
d624c96f | 458 | f2fs_put_dnode(&dn); |
e479556b | 459 | f2fs_unlock_op(sbi); |
1e1bb4ba | 460 | out: |
6c311ec6 CF |
461 | f2fs_msg(sbi->sb, KERN_NOTICE, |
462 | "recover_data: ino = %lx, recovered = %d blocks, err = %d", | |
463 | inode->i_ino, recovered, err); | |
39cf72cf | 464 | return err; |
d624c96f JK |
465 | } |
466 | ||
6ead1142 | 467 | static int recover_data(struct f2fs_sb_info *sbi, |
d624c96f JK |
468 | struct list_head *head, int type) |
469 | { | |
d71b5564 | 470 | unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi)); |
d624c96f | 471 | struct curseg_info *curseg; |
4c521f49 | 472 | struct page *page = NULL; |
6ead1142 | 473 | int err = 0; |
d624c96f JK |
474 | block_t blkaddr; |
475 | ||
476 | /* get node pages in the current segment */ | |
477 | curseg = CURSEG_I(sbi, type); | |
478 | blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); | |
479 | ||
d624c96f JK |
480 | while (1) { |
481 | struct fsync_inode_entry *entry; | |
482 | ||
f0c9cada | 483 | if (!is_valid_blkaddr(sbi, blkaddr, META_POR)) |
4c521f49 | 484 | break; |
d624c96f | 485 | |
635aee1f CY |
486 | ra_meta_pages_cond(sbi, blkaddr); |
487 | ||
488 | page = get_meta_page(sbi, blkaddr); | |
393ff91f | 489 | |
4c521f49 JK |
490 | if (cp_ver != cpver_of_node(page)) { |
491 | f2fs_put_page(page, 1); | |
45856aff | 492 | break; |
4c521f49 | 493 | } |
d624c96f JK |
494 | |
495 | entry = get_fsync_inode(head, ino_of_node(page)); | |
496 | if (!entry) | |
497 | goto next; | |
441ac5cb JK |
498 | /* |
499 | * inode(x) | CP | inode(x) | dnode(F) | |
500 | * In this case, we can lose the latest inode(x). | |
c52e1b10 | 501 | * So, call recover_inode for the inode update. |
441ac5cb | 502 | */ |
c52e1b10 JK |
503 | if (entry->last_inode == blkaddr) |
504 | recover_inode(entry->inode, page); | |
505 | if (entry->last_dentry == blkaddr) { | |
506 | err = recover_dentry(entry->inode, page); | |
507 | if (err) { | |
508 | f2fs_put_page(page, 1); | |
509 | break; | |
510 | } | |
511 | } | |
6ead1142 | 512 | err = do_recover_data(sbi, entry->inode, page, blkaddr); |
4c521f49 JK |
513 | if (err) { |
514 | f2fs_put_page(page, 1); | |
45856aff | 515 | break; |
4c521f49 | 516 | } |
d624c96f JK |
517 | |
518 | if (entry->blkaddr == blkaddr) { | |
519 | iput(entry->inode); | |
520 | list_del(&entry->list); | |
521 | kmem_cache_free(fsync_entry_slab, entry); | |
522 | } | |
523 | next: | |
524 | /* check next segment */ | |
525 | blkaddr = next_blkaddr_of_node(page); | |
4c521f49 | 526 | f2fs_put_page(page, 1); |
d624c96f | 527 | } |
6ead1142 JK |
528 | if (!err) |
529 | allocate_new_segments(sbi); | |
530 | return err; | |
d624c96f JK |
531 | } |
532 | ||
6ead1142 | 533 | int recover_fsync_data(struct f2fs_sb_info *sbi) |
d624c96f | 534 | { |
cf2271e7 | 535 | struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE); |
d624c96f | 536 | struct list_head inode_list; |
cf2271e7 | 537 | block_t blkaddr; |
6ead1142 | 538 | int err; |
aabe5136 | 539 | bool need_writecp = false; |
d624c96f JK |
540 | |
541 | fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry", | |
e8512d2e | 542 | sizeof(struct fsync_inode_entry)); |
6bacf52f | 543 | if (!fsync_entry_slab) |
6ead1142 | 544 | return -ENOMEM; |
d624c96f JK |
545 | |
546 | INIT_LIST_HEAD(&inode_list); | |
547 | ||
14f4e690 JK |
548 | /* prevent checkpoint */ |
549 | mutex_lock(&sbi->cp_mutex); | |
550 | ||
cf2271e7 JK |
551 | blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); |
552 | ||
315df839 | 553 | /* step #1: find fsynced inode numbers */ |
6ead1142 JK |
554 | err = find_fsync_dnodes(sbi, &inode_list); |
555 | if (err) | |
d624c96f JK |
556 | goto out; |
557 | ||
558 | if (list_empty(&inode_list)) | |
559 | goto out; | |
560 | ||
aabe5136 | 561 | need_writecp = true; |
691c6fd2 | 562 | |
d624c96f | 563 | /* step #2: recover data */ |
6ead1142 | 564 | err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE); |
b307384e | 565 | if (!err) |
9850cf4a | 566 | f2fs_bug_on(sbi, !list_empty(&inode_list)); |
d624c96f | 567 | out: |
5ebefc5b | 568 | destroy_fsync_dnodes(&inode_list); |
d624c96f | 569 | kmem_cache_destroy(fsync_entry_slab); |
cf2271e7 | 570 | |
4c521f49 JK |
571 | /* truncate meta pages to be used by the recovery */ |
572 | truncate_inode_pages_range(META_MAPPING(sbi), | |
7cd8558b | 573 | MAIN_BLKADDR(sbi) << PAGE_CACHE_SHIFT, -1); |
4c521f49 | 574 | |
cf2271e7 JK |
575 | if (err) { |
576 | truncate_inode_pages_final(NODE_MAPPING(sbi)); | |
577 | truncate_inode_pages_final(META_MAPPING(sbi)); | |
578 | } | |
579 | ||
caf0047e | 580 | clear_sbi_flag(sbi, SBI_POR_DOING); |
cf2271e7 | 581 | if (err) { |
e90c2d28 CY |
582 | bool invalidate = false; |
583 | ||
584 | if (discard_next_dnode(sbi, blkaddr)) | |
585 | invalidate = true; | |
cf2271e7 JK |
586 | |
587 | /* Flush all the NAT/SIT pages */ | |
588 | while (get_pages(sbi, F2FS_DIRTY_META)) | |
589 | sync_meta_pages(sbi, META, LONG_MAX); | |
e90c2d28 CY |
590 | |
591 | /* invalidate temporary meta page */ | |
592 | if (invalidate) | |
593 | invalidate_mapping_pages(META_MAPPING(sbi), | |
594 | blkaddr, blkaddr); | |
595 | ||
14f4e690 JK |
596 | set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG); |
597 | mutex_unlock(&sbi->cp_mutex); | |
cf2271e7 | 598 | } else if (need_writecp) { |
75ab4cb8 | 599 | struct cp_control cpc = { |
10027551 | 600 | .reason = CP_RECOVERY, |
75ab4cb8 | 601 | }; |
14f4e690 | 602 | mutex_unlock(&sbi->cp_mutex); |
75ab4cb8 | 603 | write_checkpoint(sbi, &cpc); |
14f4e690 JK |
604 | } else { |
605 | mutex_unlock(&sbi->cp_mutex); | |
cf2271e7 | 606 | } |
6ead1142 | 607 | return err; |
d624c96f | 608 | } |