1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/f2fs_fs.h>
15 * Roll forward recovery scenarios.
17 * [Term] F: fsync_mark, D: dentry_mark
19 * 1. inode(x) | CP | inode(x) | dnode(F)
20 * -> Update the latest inode(x).
22 * 2. inode(x) | CP | inode(F) | dnode(F)
25 * 3. inode(x) | CP | dnode(F) | inode(x)
26 * -> Recover to the latest dnode(F), and drop the last inode(x)
28 * 4. inode(x) | CP | dnode(F) | inode(F)
31 * 5. CP | inode(x) | dnode(F)
32 * -> The inode(DF) was missing. Should drop this dnode(F).
34 * 6. CP | inode(DF) | dnode(F)
37 * 7. CP | dnode(F) | inode(DF)
38 * -> If f2fs_iget fails, then goto next to find inode(DF).
40 * 8. CP | dnode(F) | inode(x)
41 * -> If f2fs_iget fails, then goto next to find inode(DF).
42 * But it will fail due to no inode(DF).
45 static struct kmem_cache *fsync_entry_slab;
47 bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi)
49 s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count);
51 if (sbi->last_valid_block_count + nalloc > sbi->user_block_count)
56 static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
59 struct fsync_inode_entry *entry;
61 list_for_each_entry(entry, head, list)
62 if (entry->inode->i_ino == ino)
68 static struct fsync_inode_entry *add_fsync_inode(struct f2fs_sb_info *sbi,
69 struct list_head *head, nid_t ino, bool quota_inode)
72 struct fsync_inode_entry *entry;
75 inode = f2fs_iget_retry(sbi->sb, ino);
77 return ERR_CAST(inode);
79 err = dquot_initialize(inode);
84 err = dquot_alloc_inode(inode);
89 entry = f2fs_kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
91 list_add_tail(&entry->list, head);
99 static void del_fsync_inode(struct fsync_inode_entry *entry, int drop)
102 /* inode should not be recovered, drop it */
103 f2fs_inode_synced(entry->inode);
106 list_del(&entry->list);
107 kmem_cache_free(fsync_entry_slab, entry);
110 static int init_recovered_filename(const struct inode *dir,
111 struct f2fs_inode *raw_inode,
112 struct f2fs_filename *fname,
113 struct qstr *usr_fname)
117 memset(fname, 0, sizeof(*fname));
118 fname->disk_name.len = le32_to_cpu(raw_inode->i_namelen);
119 fname->disk_name.name = raw_inode->i_name;
121 if (WARN_ON(fname->disk_name.len > F2FS_NAME_LEN))
122 return -ENAMETOOLONG;
124 if (!IS_ENCRYPTED(dir)) {
125 usr_fname->name = fname->disk_name.name;
126 usr_fname->len = fname->disk_name.len;
127 fname->usr_fname = usr_fname;
130 /* Compute the hash of the filename */
131 if (IS_CASEFOLDED(dir)) {
132 err = f2fs_init_casefolded_name(dir, fname);
135 f2fs_hash_filename(dir, fname);
136 #ifdef CONFIG_UNICODE
137 /* Case-sensitive match is fine for recovery */
138 kfree(fname->cf_name.name);
139 fname->cf_name.name = NULL;
142 f2fs_hash_filename(dir, fname);
147 static int recover_dentry(struct inode *inode, struct page *ipage,
148 struct list_head *dir_list)
150 struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
151 nid_t pino = le32_to_cpu(raw_inode->i_pino);
152 struct f2fs_dir_entry *de;
153 struct f2fs_filename fname;
154 struct qstr usr_fname;
156 struct inode *dir, *einode;
157 struct fsync_inode_entry *entry;
161 entry = get_fsync_inode(dir_list, pino);
163 entry = add_fsync_inode(F2FS_I_SB(inode), dir_list,
166 dir = ERR_CAST(entry);
167 err = PTR_ERR(entry);
173 err = init_recovered_filename(dir, raw_inode, &fname, &usr_fname);
177 de = __f2fs_find_entry(dir, &fname, &page);
178 if (de && inode->i_ino == le32_to_cpu(de->ino))
182 einode = f2fs_iget_retry(inode->i_sb, le32_to_cpu(de->ino));
183 if (IS_ERR(einode)) {
185 err = PTR_ERR(einode);
191 err = dquot_initialize(einode);
197 err = f2fs_acquire_orphan_inode(F2FS_I_SB(inode));
202 f2fs_delete_entry(de, page, dir, einode);
205 } else if (IS_ERR(page)) {
208 err = f2fs_add_dentry(dir, &fname, inode,
209 inode->i_ino, inode->i_mode);
216 f2fs_put_page(page, 0);
218 if (file_enc_name(inode))
219 name = "<encrypted>";
221 name = raw_inode->i_name;
222 f2fs_notice(F2FS_I_SB(inode), "%s: ino = %x, name = %s, dir = %lx, err = %d",
223 __func__, ino_of_node(ipage), name,
224 IS_ERR(dir) ? 0 : dir->i_ino, err);
228 static int recover_quota_data(struct inode *inode, struct page *page)
230 struct f2fs_inode *raw = F2FS_INODE(page);
232 uid_t i_uid = le32_to_cpu(raw->i_uid);
233 gid_t i_gid = le32_to_cpu(raw->i_gid);
236 memset(&attr, 0, sizeof(attr));
238 attr.ia_uid = make_kuid(inode->i_sb->s_user_ns, i_uid);
239 attr.ia_gid = make_kgid(inode->i_sb->s_user_ns, i_gid);
241 if (!uid_eq(attr.ia_uid, inode->i_uid))
242 attr.ia_valid |= ATTR_UID;
243 if (!gid_eq(attr.ia_gid, inode->i_gid))
244 attr.ia_valid |= ATTR_GID;
249 err = dquot_transfer(inode, &attr);
251 set_sbi_flag(F2FS_I_SB(inode), SBI_QUOTA_NEED_REPAIR);
255 static void recover_inline_flags(struct inode *inode, struct f2fs_inode *ri)
257 if (ri->i_inline & F2FS_PIN_FILE)
258 set_inode_flag(inode, FI_PIN_FILE);
260 clear_inode_flag(inode, FI_PIN_FILE);
261 if (ri->i_inline & F2FS_DATA_EXIST)
262 set_inode_flag(inode, FI_DATA_EXIST);
264 clear_inode_flag(inode, FI_DATA_EXIST);
267 static int recover_inode(struct inode *inode, struct page *page)
269 struct f2fs_inode *raw = F2FS_INODE(page);
273 inode->i_mode = le16_to_cpu(raw->i_mode);
275 err = recover_quota_data(inode, page);
279 i_uid_write(inode, le32_to_cpu(raw->i_uid));
280 i_gid_write(inode, le32_to_cpu(raw->i_gid));
282 if (raw->i_inline & F2FS_EXTRA_ATTR) {
283 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
284 F2FS_FITS_IN_INODE(raw, le16_to_cpu(raw->i_extra_isize),
289 i_projid = (projid_t)le32_to_cpu(raw->i_projid);
290 kprojid = make_kprojid(&init_user_ns, i_projid);
292 if (!projid_eq(kprojid, F2FS_I(inode)->i_projid)) {
293 err = f2fs_transfer_project_quota(inode,
297 F2FS_I(inode)->i_projid = kprojid;
302 f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
303 inode->i_atime.tv_sec = le64_to_cpu(raw->i_atime);
304 inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
305 inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
306 inode->i_atime.tv_nsec = le32_to_cpu(raw->i_atime_nsec);
307 inode->i_ctime.tv_nsec = le32_to_cpu(raw->i_ctime_nsec);
308 inode->i_mtime.tv_nsec = le32_to_cpu(raw->i_mtime_nsec);
310 F2FS_I(inode)->i_advise = raw->i_advise;
311 F2FS_I(inode)->i_flags = le32_to_cpu(raw->i_flags);
312 f2fs_set_inode_flags(inode);
313 F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] =
314 le16_to_cpu(raw->i_gc_failures);
316 recover_inline_flags(inode, raw);
318 f2fs_mark_inode_dirty_sync(inode, true);
320 if (file_enc_name(inode))
321 name = "<encrypted>";
323 name = F2FS_INODE(page)->i_name;
325 f2fs_notice(F2FS_I_SB(inode), "recover_inode: ino = %x, name = %s, inline = %x",
326 ino_of_node(page), name, raw->i_inline);
330 static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
333 struct curseg_info *curseg;
334 struct page *page = NULL;
336 unsigned int loop_cnt = 0;
337 unsigned int free_blocks = MAIN_SEGS(sbi) * sbi->blocks_per_seg -
338 valid_user_blocks(sbi);
341 /* get node pages in the current segment */
342 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
343 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
346 struct fsync_inode_entry *entry;
348 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
351 page = f2fs_get_tmp_page(sbi, blkaddr);
357 if (!is_recoverable_dnode(page)) {
358 f2fs_put_page(page, 1);
362 if (!is_fsync_dnode(page))
365 entry = get_fsync_inode(head, ino_of_node(page));
367 bool quota_inode = false;
370 IS_INODE(page) && is_dent_dnode(page)) {
371 err = f2fs_recover_inode_page(sbi, page);
373 f2fs_put_page(page, 1);
380 * CP | dnode(F) | inode(DF)
381 * For this case, we should not give up now.
383 entry = add_fsync_inode(sbi, head, ino_of_node(page),
386 err = PTR_ERR(entry);
387 if (err == -ENOENT) {
391 f2fs_put_page(page, 1);
395 entry->blkaddr = blkaddr;
397 if (IS_INODE(page) && is_dent_dnode(page))
398 entry->last_dentry = blkaddr;
400 /* sanity check in order to detect looped node chain */
401 if (++loop_cnt >= free_blocks ||
402 blkaddr == next_blkaddr_of_node(page)) {
403 f2fs_notice(sbi, "%s: detect looped node chain, blkaddr:%u, next:%u",
405 next_blkaddr_of_node(page));
406 f2fs_put_page(page, 1);
411 /* check next segment */
412 blkaddr = next_blkaddr_of_node(page);
413 f2fs_put_page(page, 1);
415 f2fs_ra_meta_pages_cond(sbi, blkaddr);
420 static void destroy_fsync_dnodes(struct list_head *head, int drop)
422 struct fsync_inode_entry *entry, *tmp;
424 list_for_each_entry_safe(entry, tmp, head, list)
425 del_fsync_inode(entry, drop);
428 static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
429 block_t blkaddr, struct dnode_of_data *dn)
431 struct seg_entry *sentry;
432 unsigned int segno = GET_SEGNO(sbi, blkaddr);
433 unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
434 struct f2fs_summary_block *sum_node;
435 struct f2fs_summary sum;
436 struct page *sum_page, *node_page;
437 struct dnode_of_data tdn = *dn;
444 sentry = get_seg_entry(sbi, segno);
445 if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
448 /* Get the previous summary */
449 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
450 struct curseg_info *curseg = CURSEG_I(sbi, i);
451 if (curseg->segno == segno) {
452 sum = curseg->sum_blk->entries[blkoff];
457 sum_page = f2fs_get_sum_page(sbi, segno);
458 if (IS_ERR(sum_page))
459 return PTR_ERR(sum_page);
460 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
461 sum = sum_node->entries[blkoff];
462 f2fs_put_page(sum_page, 1);
464 /* Use the locked dnode page and inode */
465 nid = le32_to_cpu(sum.nid);
466 if (dn->inode->i_ino == nid) {
468 if (!dn->inode_page_locked)
469 lock_page(dn->inode_page);
470 tdn.node_page = dn->inode_page;
471 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
473 } else if (dn->nid == nid) {
474 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
478 /* Get the node page */
479 node_page = f2fs_get_node_page(sbi, nid);
480 if (IS_ERR(node_page))
481 return PTR_ERR(node_page);
483 offset = ofs_of_node(node_page);
484 ino = ino_of_node(node_page);
485 f2fs_put_page(node_page, 1);
487 if (ino != dn->inode->i_ino) {
490 /* Deallocate previous index in the node page */
491 inode = f2fs_iget_retry(sbi->sb, ino);
493 return PTR_ERR(inode);
495 ret = dquot_initialize(inode);
504 bidx = f2fs_start_bidx_of_node(offset, inode) +
505 le16_to_cpu(sum.ofs_in_node);
508 * if inode page is locked, unlock temporarily, but its reference
511 if (ino == dn->inode->i_ino && dn->inode_page_locked)
512 unlock_page(dn->inode_page);
514 set_new_dnode(&tdn, inode, NULL, NULL, 0);
515 if (f2fs_get_dnode_of_data(&tdn, bidx, LOOKUP_NODE))
518 if (tdn.data_blkaddr == blkaddr)
519 f2fs_truncate_data_blocks_range(&tdn, 1);
521 f2fs_put_dnode(&tdn);
523 if (ino != dn->inode->i_ino)
525 else if (dn->inode_page_locked)
526 lock_page(dn->inode_page);
530 if (f2fs_data_blkaddr(&tdn) == blkaddr)
531 f2fs_truncate_data_blocks_range(&tdn, 1);
532 if (dn->inode->i_ino == nid && !dn->inode_page_locked)
533 unlock_page(dn->inode_page);
537 static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
540 struct dnode_of_data dn;
542 unsigned int start, end;
543 int err = 0, recovered = 0;
545 /* step 1: recover xattr */
546 if (IS_INODE(page)) {
547 err = f2fs_recover_inline_xattr(inode, page);
550 } else if (f2fs_has_xattr_block(ofs_of_node(page))) {
551 err = f2fs_recover_xattr_data(inode, page);
557 /* step 2: recover inline data */
558 err = f2fs_recover_inline_data(inode, page);
565 /* step 3: recover data indices */
566 start = f2fs_start_bidx_of_node(ofs_of_node(page), inode);
567 end = start + ADDRS_PER_PAGE(page, inode);
569 set_new_dnode(&dn, inode, NULL, NULL, 0);
571 err = f2fs_get_dnode_of_data(&dn, start, ALLOC_NODE);
573 if (err == -ENOMEM) {
574 congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
580 f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true);
582 err = f2fs_get_node_info(sbi, dn.nid, &ni);
586 f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
588 if (ofs_of_node(dn.node_page) != ofs_of_node(page)) {
589 f2fs_warn(sbi, "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u",
590 inode->i_ino, ofs_of_node(dn.node_page),
596 for (; start < end; start++, dn.ofs_in_node++) {
599 src = f2fs_data_blkaddr(&dn);
600 dest = data_blkaddr(dn.inode, page, dn.ofs_in_node);
602 if (__is_valid_data_blkaddr(src) &&
603 !f2fs_is_valid_blkaddr(sbi, src, META_POR)) {
608 if (__is_valid_data_blkaddr(dest) &&
609 !f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
614 /* skip recovering if dest is the same as src */
618 /* dest is invalid, just invalidate src block */
619 if (dest == NULL_ADDR) {
620 f2fs_truncate_data_blocks_range(&dn, 1);
624 if (!file_keep_isize(inode) &&
625 (i_size_read(inode) <= ((loff_t)start << PAGE_SHIFT)))
626 f2fs_i_size_write(inode,
627 (loff_t)(start + 1) << PAGE_SHIFT);
630 * dest is reserved block, invalidate src block
631 * and then reserve one new block in dnode page.
633 if (dest == NEW_ADDR) {
634 f2fs_truncate_data_blocks_range(&dn, 1);
635 f2fs_reserve_new_block(&dn);
639 /* dest is valid block, try to recover from src to dest */
640 if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
642 if (src == NULL_ADDR) {
643 err = f2fs_reserve_new_block(&dn);
645 IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION))
646 err = f2fs_reserve_new_block(&dn);
647 /* We should not get -ENOSPC */
648 f2fs_bug_on(sbi, err);
653 /* Check the previous node page having this index */
654 err = check_index_in_prev_nodes(sbi, dest, &dn);
656 if (err == -ENOMEM) {
657 congestion_wait(BLK_RW_ASYNC,
664 /* write dummy data page */
665 f2fs_replace_block(sbi, &dn, src, dest,
666 ni.version, false, false);
671 copy_node_footer(dn.node_page, page);
672 fill_node_footer(dn.node_page, dn.nid, ni.ino,
673 ofs_of_node(page), false);
674 set_page_dirty(dn.node_page);
678 f2fs_notice(sbi, "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d",
679 inode->i_ino, file_keep_isize(inode) ? "keep" : "recover",
684 static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
685 struct list_head *tmp_inode_list, struct list_head *dir_list)
687 struct curseg_info *curseg;
688 struct page *page = NULL;
692 /* get node pages in the current segment */
693 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
694 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
697 struct fsync_inode_entry *entry;
699 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
702 f2fs_ra_meta_pages_cond(sbi, blkaddr);
704 page = f2fs_get_tmp_page(sbi, blkaddr);
710 if (!is_recoverable_dnode(page)) {
711 f2fs_put_page(page, 1);
715 entry = get_fsync_inode(inode_list, ino_of_node(page));
719 * inode(x) | CP | inode(x) | dnode(F)
720 * In this case, we can lose the latest inode(x).
721 * So, call recover_inode for the inode update.
723 if (IS_INODE(page)) {
724 err = recover_inode(entry->inode, page);
726 f2fs_put_page(page, 1);
730 if (entry->last_dentry == blkaddr) {
731 err = recover_dentry(entry->inode, page, dir_list);
733 f2fs_put_page(page, 1);
737 err = do_recover_data(sbi, entry->inode, page);
739 f2fs_put_page(page, 1);
743 if (entry->blkaddr == blkaddr)
744 list_move_tail(&entry->list, tmp_inode_list);
746 /* check next segment */
747 blkaddr = next_blkaddr_of_node(page);
748 f2fs_put_page(page, 1);
751 f2fs_allocate_new_segments(sbi);
755 int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
757 struct list_head inode_list, tmp_inode_list;
758 struct list_head dir_list;
761 unsigned long s_flags = sbi->sb->s_flags;
762 bool need_writecp = false;
763 bool fix_curseg_write_pointer = false;
768 if (s_flags & SB_RDONLY) {
769 f2fs_info(sbi, "recover fsync data on readonly fs");
770 sbi->sb->s_flags &= ~SB_RDONLY;
774 /* Needed for iput() to work correctly and not trash data */
775 sbi->sb->s_flags |= SB_ACTIVE;
776 /* Turn on quotas so that they are updated correctly */
777 quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
780 fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
781 sizeof(struct fsync_inode_entry));
782 if (!fsync_entry_slab) {
787 INIT_LIST_HEAD(&inode_list);
788 INIT_LIST_HEAD(&tmp_inode_list);
789 INIT_LIST_HEAD(&dir_list);
791 /* prevent checkpoint */
792 mutex_lock(&sbi->cp_mutex);
794 /* step #1: find fsynced inode numbers */
795 err = find_fsync_dnodes(sbi, &inode_list, check_only);
796 if (err || list_empty(&inode_list))
806 /* step #2: recover data */
807 err = recover_data(sbi, &inode_list, &tmp_inode_list, &dir_list);
809 f2fs_bug_on(sbi, !list_empty(&inode_list));
811 /* restore s_flags to let iput() trash data */
812 sbi->sb->s_flags = s_flags;
815 fix_curseg_write_pointer = !check_only || list_empty(&inode_list);
817 destroy_fsync_dnodes(&inode_list, err);
818 destroy_fsync_dnodes(&tmp_inode_list, err);
820 /* truncate meta pages to be used by the recovery */
821 truncate_inode_pages_range(META_MAPPING(sbi),
822 (loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1);
825 truncate_inode_pages_final(NODE_MAPPING(sbi));
826 truncate_inode_pages_final(META_MAPPING(sbi));
830 * If fsync data succeeds or there is no fsync data to recover,
831 * and the f2fs is not read only, check and fix zoned block devices'
832 * write pointer consistency.
834 if (!err && fix_curseg_write_pointer && !f2fs_readonly(sbi->sb) &&
835 f2fs_sb_has_blkzoned(sbi)) {
836 err = f2fs_fix_curseg_write_pointer(sbi);
841 clear_sbi_flag(sbi, SBI_POR_DOING);
843 mutex_unlock(&sbi->cp_mutex);
845 /* let's drop all the directory inodes for clean checkpoint */
846 destroy_fsync_dnodes(&dir_list, err);
849 set_sbi_flag(sbi, SBI_IS_RECOVERED);
852 struct cp_control cpc = {
853 .reason = CP_RECOVERY,
855 err = f2fs_write_checkpoint(sbi, &cpc);
859 kmem_cache_destroy(fsync_entry_slab);
862 /* Turn quotas off */
864 f2fs_quota_off_umount(sbi->sb);
866 sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
868 return ret ? ret: err;