]>
Commit | Line | Data |
---|---|---|
0a8165d7 | 1 | /* |
d624c96f JK |
2 | * fs/f2fs/recovery.c |
3 | * | |
4 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. | |
5 | * http://www.samsung.com/ | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | */ | |
11 | #include <linux/fs.h> | |
12 | #include <linux/f2fs_fs.h> | |
13 | #include "f2fs.h" | |
14 | #include "node.h" | |
15 | #include "segment.h" | |
16 | ||
441ac5cb JK |
17 | /* |
18 | * Roll forward recovery scenarios. | |
19 | * | |
20 | * [Term] F: fsync_mark, D: dentry_mark | |
21 | * | |
22 | * 1. inode(x) | CP | inode(x) | dnode(F) | |
23 | * -> Update the latest inode(x). | |
24 | * | |
25 | * 2. inode(x) | CP | inode(F) | dnode(F) | |
26 | * -> No problem. | |
27 | * | |
28 | * 3. inode(x) | CP | dnode(F) | inode(x) | |
29 | * -> Recover to the latest dnode(F), and drop the last inode(x) | |
30 | * | |
31 | * 4. inode(x) | CP | dnode(F) | inode(F) | |
32 | * -> No problem. | |
33 | * | |
34 | * 5. CP | inode(x) | dnode(F) | |
35 | * -> The inode(DF) was missing. Should drop this dnode(F). | |
36 | * | |
37 | * 6. CP | inode(DF) | dnode(F) | |
38 | * -> No problem. | |
39 | * | |
40 | * 7. CP | dnode(F) | inode(DF) | |
41 | * -> If f2fs_iget fails, then goto next to find inode(DF). | |
42 | * | |
43 | * 8. CP | dnode(F) | inode(x) | |
44 | * -> If f2fs_iget fails, then goto next to find inode(DF). | |
45 | * But it will fail due to no inode(DF). | |
46 | */ | |
47 | ||
d624c96f JK |
48 | static struct kmem_cache *fsync_entry_slab; |
49 | ||
50 | bool space_for_roll_forward(struct f2fs_sb_info *sbi) | |
51 | { | |
41382ec4 JK |
52 | s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count); |
53 | ||
54 | if (sbi->last_valid_block_count + nalloc > sbi->user_block_count) | |
d624c96f JK |
55 | return false; |
56 | return true; | |
57 | } | |
58 | ||
59 | static struct fsync_inode_entry *get_fsync_inode(struct list_head *head, | |
60 | nid_t ino) | |
61 | { | |
d624c96f JK |
62 | struct fsync_inode_entry *entry; |
63 | ||
2d7b822a | 64 | list_for_each_entry(entry, head, list) |
d624c96f JK |
65 | if (entry->inode->i_ino == ino) |
66 | return entry; | |
2d7b822a | 67 | |
d624c96f JK |
68 | return NULL; |
69 | } | |
70 | ||
f4702d61 JK |
71 | static struct fsync_inode_entry *add_fsync_inode(struct f2fs_sb_info *sbi, |
72 | struct list_head *head, nid_t ino) | |
3f8ab270 | 73 | { |
e8ea9b3d | 74 | struct inode *inode; |
3f8ab270 CY |
75 | struct fsync_inode_entry *entry; |
76 | ||
e8ea9b3d | 77 | inode = f2fs_iget_retry(sbi->sb, ino); |
f4702d61 JK |
78 | if (IS_ERR(inode)) |
79 | return ERR_CAST(inode); | |
80 | ||
e8ea9b3d | 81 | entry = f2fs_kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO); |
3f8ab270 CY |
82 | entry->inode = inode; |
83 | list_add_tail(&entry->list, head); | |
84 | ||
85 | return entry; | |
86 | } | |
87 | ||
88 | static void del_fsync_inode(struct fsync_inode_entry *entry) | |
89 | { | |
90 | iput(entry->inode); | |
91 | list_del(&entry->list); | |
92 | kmem_cache_free(fsync_entry_slab, entry); | |
93 | } | |
94 | ||
f61cce5b CY |
95 | static int recover_dentry(struct inode *inode, struct page *ipage, |
96 | struct list_head *dir_list) | |
d624c96f | 97 | { |
58bfaf44 | 98 | struct f2fs_inode *raw_inode = F2FS_INODE(ipage); |
74d0b917 | 99 | nid_t pino = le32_to_cpu(raw_inode->i_pino); |
6b8213d9 | 100 | struct f2fs_dir_entry *de; |
e7ba108a | 101 | struct fscrypt_name fname; |
d624c96f | 102 | struct page *page; |
6b8213d9 | 103 | struct inode *dir, *einode; |
f61cce5b | 104 | struct fsync_inode_entry *entry; |
d624c96f | 105 | int err = 0; |
e7ba108a | 106 | char *name; |
d624c96f | 107 | |
f61cce5b CY |
108 | entry = get_fsync_inode(dir_list, pino); |
109 | if (!entry) { | |
f4702d61 JK |
110 | entry = add_fsync_inode(F2FS_I_SB(inode), dir_list, pino); |
111 | if (IS_ERR(entry)) { | |
112 | dir = ERR_CAST(entry); | |
113 | err = PTR_ERR(entry); | |
f61cce5b CY |
114 | goto out; |
115 | } | |
ed57c27f JK |
116 | } |
117 | ||
f61cce5b CY |
118 | dir = entry->inode; |
119 | ||
e7ba108a SL |
120 | memset(&fname, 0, sizeof(struct fscrypt_name)); |
121 | fname.disk_name.len = le32_to_cpu(raw_inode->i_namelen); | |
122 | fname.disk_name.name = raw_inode->i_name; | |
d96b1431 | 123 | |
e7ba108a | 124 | if (unlikely(fname.disk_name.len > F2FS_NAME_LEN)) { |
d96b1431 CY |
125 | WARN_ON(1); |
126 | err = -ENAMETOOLONG; | |
f61cce5b | 127 | goto out; |
d96b1431 | 128 | } |
6b8213d9 | 129 | retry: |
e7ba108a | 130 | de = __f2fs_find_entry(dir, &fname, &page); |
418f6c27 | 131 | if (de && inode->i_ino == le32_to_cpu(de->ino)) |
2e5558f4 | 132 | goto out_unmap_put; |
418f6c27 | 133 | |
6b8213d9 | 134 | if (de) { |
e8ea9b3d | 135 | einode = f2fs_iget_retry(inode->i_sb, le32_to_cpu(de->ino)); |
6b8213d9 JK |
136 | if (IS_ERR(einode)) { |
137 | WARN_ON(1); | |
5c1f9927 CY |
138 | err = PTR_ERR(einode); |
139 | if (err == -ENOENT) | |
6b8213d9 | 140 | err = -EEXIST; |
2e5558f4 RK |
141 | goto out_unmap_put; |
142 | } | |
4081363f | 143 | err = acquire_orphan_inode(F2FS_I_SB(inode)); |
2e5558f4 RK |
144 | if (err) { |
145 | iput(einode); | |
146 | goto out_unmap_put; | |
6b8213d9 | 147 | } |
dbeacf02 | 148 | f2fs_delete_entry(de, page, dir, einode); |
6b8213d9 JK |
149 | iput(einode); |
150 | goto retry; | |
91246c21 CY |
151 | } else if (IS_ERR(page)) { |
152 | err = PTR_ERR(page); | |
153 | } else { | |
e7ba108a | 154 | err = __f2fs_do_add_link(dir, &fname, inode, |
91246c21 | 155 | inode->i_ino, inode->i_mode); |
d624c96f | 156 | } |
e8ea9b3d JK |
157 | if (err == -ENOMEM) |
158 | goto retry; | |
2e5558f4 RK |
159 | goto out; |
160 | ||
161 | out_unmap_put: | |
9486ba44 | 162 | f2fs_dentry_kunmap(dir, page); |
2e5558f4 | 163 | f2fs_put_page(page, 0); |
d624c96f | 164 | out: |
e7ba108a SL |
165 | if (file_enc_name(inode)) |
166 | name = "<encrypted>"; | |
167 | else | |
168 | name = raw_inode->i_name; | |
6c311ec6 CF |
169 | f2fs_msg(inode->i_sb, KERN_NOTICE, |
170 | "%s: ino = %x, name = %s, dir = %lx, err = %d", | |
e7ba108a | 171 | __func__, ino_of_node(ipage), name, |
f28c06fa | 172 | IS_ERR(dir) ? 0 : dir->i_ino, err); |
d624c96f JK |
173 | return err; |
174 | } | |
175 | ||
c52e1b10 | 176 | static void recover_inode(struct inode *inode, struct page *page) |
d624c96f | 177 | { |
441ac5cb | 178 | struct f2fs_inode *raw = F2FS_INODE(page); |
e7d55452 | 179 | char *name; |
441ac5cb JK |
180 | |
181 | inode->i_mode = le16_to_cpu(raw->i_mode); | |
fc9581c8 | 182 | f2fs_i_size_write(inode, le64_to_cpu(raw->i_size)); |
9f0552e0 | 183 | inode->i_atime.tv_sec = le64_to_cpu(raw->i_atime); |
441ac5cb JK |
184 | inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime); |
185 | inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime); | |
9f0552e0 | 186 | inode->i_atime.tv_nsec = le32_to_cpu(raw->i_atime_nsec); |
441ac5cb JK |
187 | inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec); |
188 | inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec); | |
f356fe0c | 189 | |
26787236 JK |
190 | F2FS_I(inode)->i_advise = raw->i_advise; |
191 | ||
e7d55452 JK |
192 | if (file_enc_name(inode)) |
193 | name = "<encrypted>"; | |
194 | else | |
195 | name = F2FS_INODE(page)->i_name; | |
196 | ||
f356fe0c | 197 | f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s", |
e7d55452 | 198 | ino_of_node(page), name); |
d624c96f JK |
199 | } |
200 | ||
201 | static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head) | |
202 | { | |
d624c96f | 203 | struct curseg_info *curseg; |
4c521f49 | 204 | struct page *page = NULL; |
d624c96f JK |
205 | block_t blkaddr; |
206 | int err = 0; | |
207 | ||
208 | /* get node pages in the current segment */ | |
209 | curseg = CURSEG_I(sbi, CURSEG_WARM_NODE); | |
695fd1ed | 210 | blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); |
d624c96f | 211 | |
d624c96f JK |
212 | while (1) { |
213 | struct fsync_inode_entry *entry; | |
214 | ||
f0c9cada | 215 | if (!is_valid_blkaddr(sbi, blkaddr, META_POR)) |
4c521f49 | 216 | return 0; |
d624c96f | 217 | |
2b947003 | 218 | page = get_tmp_page(sbi, blkaddr); |
393ff91f | 219 | |
a468f0ef | 220 | if (!is_recoverable_dnode(page)) |
f356fe0c | 221 | break; |
d624c96f JK |
222 | |
223 | if (!is_fsync_dnode(page)) | |
224 | goto next; | |
225 | ||
226 | entry = get_fsync_inode(head, ino_of_node(page)); | |
d47b8715 | 227 | if (!entry) { |
d624c96f | 228 | if (IS_INODE(page) && is_dent_dnode(page)) { |
6ead1142 JK |
229 | err = recover_inode_page(sbi, page); |
230 | if (err) | |
f356fe0c | 231 | break; |
d624c96f JK |
232 | } |
233 | ||
441ac5cb JK |
234 | /* |
235 | * CP | dnode(F) | inode(DF) | |
236 | * For this case, we should not give up now. | |
237 | */ | |
f4702d61 JK |
238 | entry = add_fsync_inode(sbi, head, ino_of_node(page)); |
239 | if (IS_ERR(entry)) { | |
240 | err = PTR_ERR(entry); | |
8fbc418f JK |
241 | if (err == -ENOENT) { |
242 | err = 0; | |
441ac5cb | 243 | goto next; |
8fbc418f | 244 | } |
f356fe0c | 245 | break; |
d624c96f | 246 | } |
d624c96f | 247 | } |
addbe45b JK |
248 | entry->blkaddr = blkaddr; |
249 | ||
608514de JK |
250 | if (IS_INODE(page) && is_dent_dnode(page)) |
251 | entry->last_dentry = blkaddr; | |
d624c96f JK |
252 | next: |
253 | /* check next segment */ | |
254 | blkaddr = next_blkaddr_of_node(page); | |
4c521f49 | 255 | f2fs_put_page(page, 1); |
635aee1f CY |
256 | |
257 | ra_meta_pages_cond(sbi, blkaddr); | |
d624c96f | 258 | } |
4c521f49 | 259 | f2fs_put_page(page, 1); |
d624c96f JK |
260 | return err; |
261 | } | |
262 | ||
5ebefc5b | 263 | static void destroy_fsync_dnodes(struct list_head *head) |
d624c96f | 264 | { |
d8b79b2f DC |
265 | struct fsync_inode_entry *entry, *tmp; |
266 | ||
3f8ab270 CY |
267 | list_for_each_entry_safe(entry, tmp, head, list) |
268 | del_fsync_inode(entry); | |
d624c96f JK |
269 | } |
270 | ||
39cf72cf | 271 | static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi, |
b292dcab | 272 | block_t blkaddr, struct dnode_of_data *dn) |
d624c96f JK |
273 | { |
274 | struct seg_entry *sentry; | |
275 | unsigned int segno = GET_SEGNO(sbi, blkaddr); | |
491c0854 | 276 | unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); |
f6517cfc | 277 | struct f2fs_summary_block *sum_node; |
d624c96f | 278 | struct f2fs_summary sum; |
f6517cfc | 279 | struct page *sum_page, *node_page; |
c9ef4810 | 280 | struct dnode_of_data tdn = *dn; |
b292dcab | 281 | nid_t ino, nid; |
d624c96f | 282 | struct inode *inode; |
de93653f | 283 | unsigned int offset; |
d624c96f JK |
284 | block_t bidx; |
285 | int i; | |
286 | ||
287 | sentry = get_seg_entry(sbi, segno); | |
288 | if (!f2fs_test_bit(blkoff, sentry->cur_valid_map)) | |
39cf72cf | 289 | return 0; |
d624c96f JK |
290 | |
291 | /* Get the previous summary */ | |
292 | for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) { | |
293 | struct curseg_info *curseg = CURSEG_I(sbi, i); | |
294 | if (curseg->segno == segno) { | |
295 | sum = curseg->sum_blk->entries[blkoff]; | |
f6517cfc | 296 | goto got_it; |
d624c96f JK |
297 | } |
298 | } | |
d624c96f | 299 | |
f6517cfc JK |
300 | sum_page = get_sum_page(sbi, segno); |
301 | sum_node = (struct f2fs_summary_block *)page_address(sum_page); | |
302 | sum = sum_node->entries[blkoff]; | |
303 | f2fs_put_page(sum_page, 1); | |
304 | got_it: | |
b292dcab JK |
305 | /* Use the locked dnode page and inode */ |
306 | nid = le32_to_cpu(sum.nid); | |
307 | if (dn->inode->i_ino == nid) { | |
b292dcab | 308 | tdn.nid = nid; |
c9ef4810 JK |
309 | if (!dn->inode_page_locked) |
310 | lock_page(dn->inode_page); | |
b292dcab | 311 | tdn.node_page = dn->inode_page; |
060dd67b | 312 | tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node); |
c9ef4810 | 313 | goto truncate_out; |
b292dcab | 314 | } else if (dn->nid == nid) { |
060dd67b | 315 | tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node); |
c9ef4810 | 316 | goto truncate_out; |
b292dcab JK |
317 | } |
318 | ||
d624c96f | 319 | /* Get the node page */ |
b292dcab | 320 | node_page = get_node_page(sbi, nid); |
39cf72cf JK |
321 | if (IS_ERR(node_page)) |
322 | return PTR_ERR(node_page); | |
de93653f JK |
323 | |
324 | offset = ofs_of_node(node_page); | |
d624c96f JK |
325 | ino = ino_of_node(node_page); |
326 | f2fs_put_page(node_page, 1); | |
327 | ||
60979115 JK |
328 | if (ino != dn->inode->i_ino) { |
329 | /* Deallocate previous index in the node page */ | |
e8ea9b3d | 330 | inode = f2fs_iget_retry(sbi->sb, ino); |
60979115 JK |
331 | if (IS_ERR(inode)) |
332 | return PTR_ERR(inode); | |
333 | } else { | |
334 | inode = dn->inode; | |
335 | } | |
06025f4d | 336 | |
81ca7350 | 337 | bidx = start_bidx_of_node(offset, inode) + le16_to_cpu(sum.ofs_in_node); |
de93653f | 338 | |
c9ef4810 JK |
339 | /* |
340 | * if inode page is locked, unlock temporarily, but its reference | |
341 | * count keeps alive. | |
342 | */ | |
343 | if (ino == dn->inode->i_ino && dn->inode_page_locked) | |
344 | unlock_page(dn->inode_page); | |
345 | ||
346 | set_new_dnode(&tdn, inode, NULL, NULL, 0); | |
347 | if (get_dnode_of_data(&tdn, bidx, LOOKUP_NODE)) | |
348 | goto out; | |
349 | ||
350 | if (tdn.data_blkaddr == blkaddr) | |
351 | truncate_data_blocks_range(&tdn, 1); | |
352 | ||
353 | f2fs_put_dnode(&tdn); | |
354 | out: | |
355 | if (ino != dn->inode->i_ino) | |
60979115 | 356 | iput(inode); |
c9ef4810 JK |
357 | else if (dn->inode_page_locked) |
358 | lock_page(dn->inode_page); | |
359 | return 0; | |
360 | ||
361 | truncate_out: | |
362 | if (datablock_addr(tdn.node_page, tdn.ofs_in_node) == blkaddr) | |
363 | truncate_data_blocks_range(&tdn, 1); | |
364 | if (dn->inode->i_ino == nid && !dn->inode_page_locked) | |
365 | unlock_page(dn->inode_page); | |
39cf72cf | 366 | return 0; |
d624c96f JK |
367 | } |
368 | ||
6ead1142 | 369 | static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, |
d624c96f JK |
370 | struct page *page, block_t blkaddr) |
371 | { | |
d624c96f | 372 | struct dnode_of_data dn; |
d624c96f | 373 | struct node_info ni; |
81ca7350 | 374 | unsigned int start, end; |
f356fe0c | 375 | int err = 0, recovered = 0; |
d624c96f | 376 | |
1c35a90e JK |
377 | /* step 1: recover xattr */ |
378 | if (IS_INODE(page)) { | |
379 | recover_inline_xattr(inode, page); | |
380 | } else if (f2fs_has_xattr_block(ofs_of_node(page))) { | |
d260081c CY |
381 | err = recover_xattr_data(inode, page, blkaddr); |
382 | if (!err) | |
383 | recovered++; | |
1e1bb4ba | 384 | goto out; |
1c35a90e | 385 | } |
1e1bb4ba | 386 | |
1c35a90e JK |
387 | /* step 2: recover inline data */ |
388 | if (recover_inline_data(inode, page)) | |
abb2366c JK |
389 | goto out; |
390 | ||
1c35a90e | 391 | /* step 3: recover data indices */ |
81ca7350 CY |
392 | start = start_bidx_of_node(ofs_of_node(page), inode); |
393 | end = start + ADDRS_PER_PAGE(page, inode); | |
d624c96f JK |
394 | |
395 | set_new_dnode(&dn, inode, NULL, NULL, 0); | |
e8ea9b3d | 396 | retry_dn: |
6ead1142 | 397 | err = get_dnode_of_data(&dn, start, ALLOC_NODE); |
e8ea9b3d JK |
398 | if (err) { |
399 | if (err == -ENOMEM) { | |
400 | congestion_wait(BLK_RW_ASYNC, HZ/50); | |
401 | goto retry_dn; | |
402 | } | |
1e1bb4ba | 403 | goto out; |
e8ea9b3d | 404 | } |
d624c96f | 405 | |
fec1d657 | 406 | f2fs_wait_on_page_writeback(dn.node_page, NODE, true); |
d624c96f JK |
407 | |
408 | get_node_info(sbi, dn.nid, &ni); | |
9850cf4a JK |
409 | f2fs_bug_on(sbi, ni.ino != ino_of_node(page)); |
410 | f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page)); | |
d624c96f | 411 | |
12a8343e | 412 | for (; start < end; start++, dn.ofs_in_node++) { |
d624c96f JK |
413 | block_t src, dest; |
414 | ||
415 | src = datablock_addr(dn.node_page, dn.ofs_in_node); | |
416 | dest = datablock_addr(page, dn.ofs_in_node); | |
417 | ||
12a8343e CY |
418 | /* skip recovering if dest is the same as src */ |
419 | if (src == dest) | |
420 | continue; | |
421 | ||
422 | /* dest is invalid, just invalidate src block */ | |
423 | if (dest == NULL_ADDR) { | |
424 | truncate_data_blocks_range(&dn, 1); | |
425 | continue; | |
426 | } | |
427 | ||
26787236 | 428 | if (!file_keep_isize(inode) && |
dba79f38 CY |
429 | (i_size_read(inode) <= ((loff_t)start << PAGE_SHIFT))) |
430 | f2fs_i_size_write(inode, | |
431 | (loff_t)(start + 1) << PAGE_SHIFT); | |
26de9b11 | 432 | |
12a8343e CY |
433 | /* |
434 | * dest is reserved block, invalidate src block | |
435 | * and then reserve one new block in dnode page. | |
436 | */ | |
437 | if (dest == NEW_ADDR) { | |
438 | truncate_data_blocks_range(&dn, 1); | |
3b9b10f9 | 439 | reserve_new_block(&dn); |
12a8343e CY |
440 | continue; |
441 | } | |
442 | ||
443 | /* dest is valid block, try to recover from src to dest */ | |
444 | if (is_valid_blkaddr(sbi, dest, META_POR)) { | |
e03b07d9 | 445 | |
d624c96f | 446 | if (src == NULL_ADDR) { |
5d56b671 | 447 | err = reserve_new_block(&dn); |
975756c4 JK |
448 | #ifdef CONFIG_F2FS_FAULT_INJECTION |
449 | while (err) | |
450 | err = reserve_new_block(&dn); | |
451 | #endif | |
d624c96f | 452 | /* We should not get -ENOSPC */ |
9850cf4a | 453 | f2fs_bug_on(sbi, err); |
6f3ec995 JK |
454 | if (err) |
455 | goto err; | |
d624c96f | 456 | } |
e8ea9b3d | 457 | retry_prev: |
d624c96f | 458 | /* Check the previous node page having this index */ |
39cf72cf | 459 | err = check_index_in_prev_nodes(sbi, dest, &dn); |
e8ea9b3d JK |
460 | if (err) { |
461 | if (err == -ENOMEM) { | |
462 | congestion_wait(BLK_RW_ASYNC, HZ/50); | |
463 | goto retry_prev; | |
464 | } | |
39cf72cf | 465 | goto err; |
e8ea9b3d | 466 | } |
d624c96f | 467 | |
d624c96f | 468 | /* write dummy data page */ |
528e3459 | 469 | f2fs_replace_block(sbi, &dn, src, dest, |
28bc106b | 470 | ni.version, false, false); |
f356fe0c | 471 | recovered++; |
d624c96f | 472 | } |
d624c96f JK |
473 | } |
474 | ||
d624c96f JK |
475 | copy_node_footer(dn.node_page, page); |
476 | fill_node_footer(dn.node_page, dn.nid, ni.ino, | |
477 | ofs_of_node(page), false); | |
478 | set_page_dirty(dn.node_page); | |
39cf72cf | 479 | err: |
d624c96f | 480 | f2fs_put_dnode(&dn); |
1e1bb4ba | 481 | out: |
6c311ec6 | 482 | f2fs_msg(sbi->sb, KERN_NOTICE, |
26787236 JK |
483 | "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d", |
484 | inode->i_ino, | |
485 | file_keep_isize(inode) ? "keep" : "recover", | |
486 | recovered, err); | |
39cf72cf | 487 | return err; |
d624c96f JK |
488 | } |
489 | ||
f61cce5b CY |
490 | static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list, |
491 | struct list_head *dir_list) | |
d624c96f | 492 | { |
d624c96f | 493 | struct curseg_info *curseg; |
4c521f49 | 494 | struct page *page = NULL; |
6ead1142 | 495 | int err = 0; |
d624c96f JK |
496 | block_t blkaddr; |
497 | ||
498 | /* get node pages in the current segment */ | |
b7973f23 | 499 | curseg = CURSEG_I(sbi, CURSEG_WARM_NODE); |
d624c96f JK |
500 | blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); |
501 | ||
d624c96f JK |
502 | while (1) { |
503 | struct fsync_inode_entry *entry; | |
504 | ||
f0c9cada | 505 | if (!is_valid_blkaddr(sbi, blkaddr, META_POR)) |
4c521f49 | 506 | break; |
d624c96f | 507 | |
635aee1f CY |
508 | ra_meta_pages_cond(sbi, blkaddr); |
509 | ||
2b947003 | 510 | page = get_tmp_page(sbi, blkaddr); |
393ff91f | 511 | |
a468f0ef | 512 | if (!is_recoverable_dnode(page)) { |
4c521f49 | 513 | f2fs_put_page(page, 1); |
45856aff | 514 | break; |
4c521f49 | 515 | } |
d624c96f | 516 | |
f61cce5b | 517 | entry = get_fsync_inode(inode_list, ino_of_node(page)); |
d624c96f JK |
518 | if (!entry) |
519 | goto next; | |
441ac5cb JK |
520 | /* |
521 | * inode(x) | CP | inode(x) | dnode(F) | |
522 | * In this case, we can lose the latest inode(x). | |
c52e1b10 | 523 | * So, call recover_inode for the inode update. |
441ac5cb | 524 | */ |
608514de | 525 | if (IS_INODE(page)) |
c52e1b10 JK |
526 | recover_inode(entry->inode, page); |
527 | if (entry->last_dentry == blkaddr) { | |
f61cce5b | 528 | err = recover_dentry(entry->inode, page, dir_list); |
c52e1b10 JK |
529 | if (err) { |
530 | f2fs_put_page(page, 1); | |
531 | break; | |
532 | } | |
533 | } | |
6ead1142 | 534 | err = do_recover_data(sbi, entry->inode, page, blkaddr); |
4c521f49 JK |
535 | if (err) { |
536 | f2fs_put_page(page, 1); | |
45856aff | 537 | break; |
4c521f49 | 538 | } |
d624c96f | 539 | |
3f8ab270 CY |
540 | if (entry->blkaddr == blkaddr) |
541 | del_fsync_inode(entry); | |
d624c96f JK |
542 | next: |
543 | /* check next segment */ | |
544 | blkaddr = next_blkaddr_of_node(page); | |
4c521f49 | 545 | f2fs_put_page(page, 1); |
d624c96f | 546 | } |
6ead1142 JK |
547 | if (!err) |
548 | allocate_new_segments(sbi); | |
549 | return err; | |
d624c96f JK |
550 | } |
551 | ||
6781eabb | 552 | int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only) |
d624c96f JK |
553 | { |
554 | struct list_head inode_list; | |
f61cce5b | 555 | struct list_head dir_list; |
6ead1142 | 556 | int err; |
6781eabb | 557 | int ret = 0; |
aabe5136 | 558 | bool need_writecp = false; |
d624c96f JK |
559 | |
560 | fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry", | |
e8512d2e | 561 | sizeof(struct fsync_inode_entry)); |
6bacf52f | 562 | if (!fsync_entry_slab) |
6ead1142 | 563 | return -ENOMEM; |
d624c96f JK |
564 | |
565 | INIT_LIST_HEAD(&inode_list); | |
f61cce5b | 566 | INIT_LIST_HEAD(&dir_list); |
d624c96f | 567 | |
14f4e690 JK |
568 | /* prevent checkpoint */ |
569 | mutex_lock(&sbi->cp_mutex); | |
570 | ||
315df839 | 571 | /* step #1: find fsynced inode numbers */ |
6ead1142 | 572 | err = find_fsync_dnodes(sbi, &inode_list); |
6781eabb | 573 | if (err || list_empty(&inode_list)) |
d624c96f JK |
574 | goto out; |
575 | ||
6781eabb JK |
576 | if (check_only) { |
577 | ret = 1; | |
d624c96f | 578 | goto out; |
6781eabb | 579 | } |
d624c96f | 580 | |
aabe5136 | 581 | need_writecp = true; |
691c6fd2 | 582 | |
d624c96f | 583 | /* step #2: recover data */ |
f61cce5b | 584 | err = recover_data(sbi, &inode_list, &dir_list); |
b307384e | 585 | if (!err) |
9850cf4a | 586 | f2fs_bug_on(sbi, !list_empty(&inode_list)); |
d624c96f | 587 | out: |
5ebefc5b | 588 | destroy_fsync_dnodes(&inode_list); |
cf2271e7 | 589 | |
4c521f49 JK |
590 | /* truncate meta pages to be used by the recovery */ |
591 | truncate_inode_pages_range(META_MAPPING(sbi), | |
09cbfeaf | 592 | (loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1); |
4c521f49 | 593 | |
cf2271e7 JK |
594 | if (err) { |
595 | truncate_inode_pages_final(NODE_MAPPING(sbi)); | |
596 | truncate_inode_pages_final(META_MAPPING(sbi)); | |
597 | } | |
598 | ||
caf0047e | 599 | clear_sbi_flag(sbi, SBI_POR_DOING); |
a468f0ef | 600 | if (err) |
aaec2b1d | 601 | set_ckpt_flags(sbi, CP_ERROR_FLAG); |
a468f0ef JK |
602 | mutex_unlock(&sbi->cp_mutex); |
603 | ||
9e1e6df4 JK |
604 | /* let's drop all the directory inodes for clean checkpoint */ |
605 | destroy_fsync_dnodes(&dir_list); | |
606 | ||
a468f0ef | 607 | if (!err && need_writecp) { |
75ab4cb8 | 608 | struct cp_control cpc = { |
10027551 | 609 | .reason = CP_RECOVERY, |
75ab4cb8 | 610 | }; |
c34f42e2 | 611 | err = write_checkpoint(sbi, &cpc); |
cf2271e7 | 612 | } |
f61cce5b | 613 | |
f61cce5b | 614 | kmem_cache_destroy(fsync_entry_slab); |
6781eabb | 615 | return ret ? ret: err; |
d624c96f | 616 | } |