1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/f2fs_fs.h>
10 #include <linux/buffer_head.h>
11 #include <linux/writeback.h>
12 #include <linux/sched/mm.h>
19 #include <trace/events/f2fs.h>
21 #ifdef CONFIG_F2FS_FS_COMPRESSION
22 extern const struct address_space_operations f2fs_compress_aops;
25 void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync)
27 if (is_inode_flag_set(inode, FI_NEW_INODE))
30 if (f2fs_inode_dirtied(inode, sync))
33 mark_inode_dirty_sync(inode);
36 void f2fs_set_inode_flags(struct inode *inode)
38 unsigned int flags = F2FS_I(inode)->i_flags;
39 unsigned int new_fl = 0;
41 if (flags & F2FS_SYNC_FL)
43 if (flags & F2FS_APPEND_FL)
45 if (flags & F2FS_IMMUTABLE_FL)
46 new_fl |= S_IMMUTABLE;
47 if (flags & F2FS_NOATIME_FL)
49 if (flags & F2FS_DIRSYNC_FL)
51 if (file_is_encrypt(inode))
52 new_fl |= S_ENCRYPTED;
53 if (file_is_verity(inode))
55 if (flags & F2FS_CASEFOLD_FL)
57 inode_set_flags(inode, new_fl,
58 S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|
59 S_ENCRYPTED|S_VERITY|S_CASEFOLD);
62 static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
64 int extra_size = get_extra_isize(inode);
66 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
67 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
68 if (ri->i_addr[extra_size])
69 inode->i_rdev = old_decode_dev(
70 le32_to_cpu(ri->i_addr[extra_size]));
72 inode->i_rdev = new_decode_dev(
73 le32_to_cpu(ri->i_addr[extra_size + 1]));
77 static int __written_first_block(struct f2fs_sb_info *sbi,
78 struct f2fs_inode *ri)
80 block_t addr = le32_to_cpu(ri->i_addr[offset_in_addr(ri)]);
82 if (!__is_valid_data_blkaddr(addr))
84 if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC_ENHANCE))
89 static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
91 int extra_size = get_extra_isize(inode);
93 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
94 if (old_valid_dev(inode->i_rdev)) {
95 ri->i_addr[extra_size] =
96 cpu_to_le32(old_encode_dev(inode->i_rdev));
97 ri->i_addr[extra_size + 1] = 0;
99 ri->i_addr[extra_size] = 0;
100 ri->i_addr[extra_size + 1] =
101 cpu_to_le32(new_encode_dev(inode->i_rdev));
102 ri->i_addr[extra_size + 2] = 0;
107 static void __recover_inline_status(struct inode *inode, struct page *ipage)
109 void *inline_data = inline_data_addr(inode, ipage);
110 __le32 *start = inline_data;
111 __le32 *end = start + MAX_INLINE_DATA(inode) / sizeof(__le32);
113 while (start < end) {
115 f2fs_wait_on_page_writeback(ipage, NODE, true, true);
117 set_inode_flag(inode, FI_DATA_EXIST);
118 set_raw_inline(inode, F2FS_INODE(ipage));
119 set_page_dirty(ipage);
126 static bool f2fs_enable_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
128 struct f2fs_inode *ri = &F2FS_NODE(page)->i;
130 if (!f2fs_sb_has_inode_chksum(sbi))
133 if (!IS_INODE(page) || !(ri->i_inline & F2FS_EXTRA_ATTR))
136 if (!F2FS_FITS_IN_INODE(ri, le16_to_cpu(ri->i_extra_isize),
143 static __u32 f2fs_inode_chksum(struct f2fs_sb_info *sbi, struct page *page)
145 struct f2fs_node *node = F2FS_NODE(page);
146 struct f2fs_inode *ri = &node->i;
147 __le32 ino = node->footer.ino;
148 __le32 gen = ri->i_generation;
149 __u32 chksum, chksum_seed;
151 unsigned int offset = offsetof(struct f2fs_inode, i_inode_checksum);
152 unsigned int cs_size = sizeof(dummy_cs);
154 chksum = f2fs_chksum(sbi, sbi->s_chksum_seed, (__u8 *)&ino,
156 chksum_seed = f2fs_chksum(sbi, chksum, (__u8 *)&gen, sizeof(gen));
158 chksum = f2fs_chksum(sbi, chksum_seed, (__u8 *)ri, offset);
159 chksum = f2fs_chksum(sbi, chksum, (__u8 *)&dummy_cs, cs_size);
161 chksum = f2fs_chksum(sbi, chksum, (__u8 *)ri + offset,
162 F2FS_BLKSIZE - offset);
166 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page)
168 struct f2fs_inode *ri;
169 __u32 provided, calculated;
171 if (unlikely(is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)))
174 #ifdef CONFIG_F2FS_CHECK_FS
175 if (!f2fs_enable_inode_chksum(sbi, page))
177 if (!f2fs_enable_inode_chksum(sbi, page) ||
178 PageDirty(page) || PageWriteback(page))
182 ri = &F2FS_NODE(page)->i;
183 provided = le32_to_cpu(ri->i_inode_checksum);
184 calculated = f2fs_inode_chksum(sbi, page);
186 if (provided != calculated)
187 f2fs_warn(sbi, "checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x",
188 page->index, ino_of_node(page), provided, calculated);
190 return provided == calculated;
193 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page)
195 struct f2fs_inode *ri = &F2FS_NODE(page)->i;
197 if (!f2fs_enable_inode_chksum(sbi, page))
200 ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, page));
203 static bool sanity_check_inode(struct inode *inode, struct page *node_page)
205 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
206 struct f2fs_inode_info *fi = F2FS_I(inode);
207 struct f2fs_inode *ri = F2FS_INODE(node_page);
208 unsigned long long iblocks;
210 iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks);
212 set_sbi_flag(sbi, SBI_NEED_FSCK);
213 f2fs_warn(sbi, "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, run fsck to fix.",
214 __func__, inode->i_ino, iblocks);
218 if (ino_of_node(node_page) != nid_of_node(node_page)) {
219 set_sbi_flag(sbi, SBI_NEED_FSCK);
220 f2fs_warn(sbi, "%s: corrupted inode footer i_ino=%lx, ino,nid: [%u, %u] run fsck to fix.",
221 __func__, inode->i_ino,
222 ino_of_node(node_page), nid_of_node(node_page));
226 if (f2fs_sb_has_flexible_inline_xattr(sbi)
227 && !f2fs_has_extra_attr(inode)) {
228 set_sbi_flag(sbi, SBI_NEED_FSCK);
229 f2fs_warn(sbi, "%s: corrupted inode ino=%lx, run fsck to fix.",
230 __func__, inode->i_ino);
234 if (f2fs_has_extra_attr(inode) &&
235 !f2fs_sb_has_extra_attr(sbi)) {
236 set_sbi_flag(sbi, SBI_NEED_FSCK);
237 f2fs_warn(sbi, "%s: inode (ino=%lx) is with extra_attr, but extra_attr feature is off",
238 __func__, inode->i_ino);
242 if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE ||
243 fi->i_extra_isize % sizeof(__le32)) {
244 set_sbi_flag(sbi, SBI_NEED_FSCK);
245 f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, max: %zu",
246 __func__, inode->i_ino, fi->i_extra_isize,
247 F2FS_TOTAL_EXTRA_ATTR_SIZE);
251 if (f2fs_has_extra_attr(inode) &&
252 f2fs_sb_has_flexible_inline_xattr(sbi) &&
253 f2fs_has_inline_xattr(inode) &&
254 (!fi->i_inline_xattr_size ||
255 fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) {
256 set_sbi_flag(sbi, SBI_NEED_FSCK);
257 f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %zu",
258 __func__, inode->i_ino, fi->i_inline_xattr_size,
259 MAX_INLINE_XATTR_SIZE);
263 if (F2FS_I(inode)->extent_tree) {
264 struct extent_info *ei = &F2FS_I(inode)->extent_tree->largest;
267 (!f2fs_is_valid_blkaddr(sbi, ei->blk,
268 DATA_GENERIC_ENHANCE) ||
269 !f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1,
270 DATA_GENERIC_ENHANCE))) {
271 set_sbi_flag(sbi, SBI_NEED_FSCK);
272 f2fs_warn(sbi, "%s: inode (ino=%lx) extent info [%u, %u, %u] is incorrect, run fsck to fix",
273 __func__, inode->i_ino,
274 ei->blk, ei->fofs, ei->len);
279 if (f2fs_has_inline_data(inode) &&
280 (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))) {
281 set_sbi_flag(sbi, SBI_NEED_FSCK);
282 f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_data, run fsck to fix",
283 __func__, inode->i_ino, inode->i_mode);
287 if (f2fs_has_inline_dentry(inode) && !S_ISDIR(inode->i_mode)) {
288 set_sbi_flag(sbi, SBI_NEED_FSCK);
289 f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_dentry, run fsck to fix",
290 __func__, inode->i_ino, inode->i_mode);
294 if ((fi->i_flags & F2FS_CASEFOLD_FL) && !f2fs_sb_has_casefold(sbi)) {
295 set_sbi_flag(sbi, SBI_NEED_FSCK);
296 f2fs_warn(sbi, "%s: inode (ino=%lx) has casefold flag, but casefold feature is off",
297 __func__, inode->i_ino);
301 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) &&
302 fi->i_flags & F2FS_COMPR_FL &&
303 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
304 i_log_cluster_size)) {
305 if (ri->i_compress_algorithm >= COMPRESS_MAX) {
306 set_sbi_flag(sbi, SBI_NEED_FSCK);
307 f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported "
308 "compress algorithm: %u, run fsck to fix",
309 __func__, inode->i_ino,
310 ri->i_compress_algorithm);
313 if (le64_to_cpu(ri->i_compr_blocks) >
314 SECTOR_TO_BLOCK(inode->i_blocks)) {
315 set_sbi_flag(sbi, SBI_NEED_FSCK);
316 f2fs_warn(sbi, "%s: inode (ino=%lx) has inconsistent "
317 "i_compr_blocks:%llu, i_blocks:%llu, run fsck to fix",
318 __func__, inode->i_ino,
319 le64_to_cpu(ri->i_compr_blocks),
320 SECTOR_TO_BLOCK(inode->i_blocks));
323 if (ri->i_log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
324 ri->i_log_cluster_size > MAX_COMPRESS_LOG_SIZE) {
325 set_sbi_flag(sbi, SBI_NEED_FSCK);
326 f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported "
327 "log cluster size: %u, run fsck to fix",
328 __func__, inode->i_ino,
329 ri->i_log_cluster_size);
337 static int do_read_inode(struct inode *inode)
339 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
340 struct f2fs_inode_info *fi = F2FS_I(inode);
341 struct page *node_page;
342 struct f2fs_inode *ri;
346 /* Check if ino is within scope */
347 if (f2fs_check_nid_range(sbi, inode->i_ino))
350 node_page = f2fs_get_node_page(sbi, inode->i_ino);
351 if (IS_ERR(node_page))
352 return PTR_ERR(node_page);
354 ri = F2FS_INODE(node_page);
356 inode->i_mode = le16_to_cpu(ri->i_mode);
357 i_uid_write(inode, le32_to_cpu(ri->i_uid));
358 i_gid_write(inode, le32_to_cpu(ri->i_gid));
359 set_nlink(inode, le32_to_cpu(ri->i_links));
360 inode->i_size = le64_to_cpu(ri->i_size);
361 inode->i_blocks = SECTOR_FROM_BLOCK(le64_to_cpu(ri->i_blocks) - 1);
363 inode->i_atime.tv_sec = le64_to_cpu(ri->i_atime);
364 inode->i_ctime.tv_sec = le64_to_cpu(ri->i_ctime);
365 inode->i_mtime.tv_sec = le64_to_cpu(ri->i_mtime);
366 inode->i_atime.tv_nsec = le32_to_cpu(ri->i_atime_nsec);
367 inode->i_ctime.tv_nsec = le32_to_cpu(ri->i_ctime_nsec);
368 inode->i_mtime.tv_nsec = le32_to_cpu(ri->i_mtime_nsec);
369 inode->i_generation = le32_to_cpu(ri->i_generation);
370 if (S_ISDIR(inode->i_mode))
371 fi->i_current_depth = le32_to_cpu(ri->i_current_depth);
372 else if (S_ISREG(inode->i_mode))
373 fi->i_gc_failures[GC_FAILURE_PIN] =
374 le16_to_cpu(ri->i_gc_failures);
375 fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
376 fi->i_flags = le32_to_cpu(ri->i_flags);
377 if (S_ISREG(inode->i_mode))
378 fi->i_flags &= ~F2FS_PROJINHERIT_FL;
379 bitmap_zero(fi->flags, FI_MAX);
380 fi->i_advise = ri->i_advise;
381 fi->i_pino = le32_to_cpu(ri->i_pino);
382 fi->i_dir_level = ri->i_dir_level;
384 f2fs_init_extent_tree(inode, node_page);
386 get_inline_info(inode, ri);
388 fi->i_extra_isize = f2fs_has_extra_attr(inode) ?
389 le16_to_cpu(ri->i_extra_isize) : 0;
391 if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
392 fi->i_inline_xattr_size = le16_to_cpu(ri->i_inline_xattr_size);
393 } else if (f2fs_has_inline_xattr(inode) ||
394 f2fs_has_inline_dentry(inode)) {
395 fi->i_inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
399 * Previous inline data or directory always reserved 200 bytes
400 * in inode layout, even if inline_xattr is disabled. In order
401 * to keep inline_dentry's structure for backward compatibility,
402 * we get the space back only from inline_data.
404 fi->i_inline_xattr_size = 0;
407 if (!sanity_check_inode(inode, node_page)) {
408 f2fs_put_page(node_page, 1);
409 return -EFSCORRUPTED;
412 /* check data exist */
413 if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
414 __recover_inline_status(inode, node_page);
416 /* try to recover cold bit for non-dir inode */
417 if (!S_ISDIR(inode->i_mode) && !is_cold_node(node_page)) {
418 f2fs_wait_on_page_writeback(node_page, NODE, true, true);
419 set_cold_node(node_page, false);
420 set_page_dirty(node_page);
423 /* get rdev by using inline_info */
424 __get_inode_rdev(inode, ri);
426 if (S_ISREG(inode->i_mode)) {
427 err = __written_first_block(sbi, ri);
429 f2fs_put_page(node_page, 1);
433 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
436 if (!f2fs_need_inode_block_update(sbi, inode->i_ino))
437 fi->last_disk_size = inode->i_size;
439 if (fi->i_flags & F2FS_PROJINHERIT_FL)
440 set_inode_flag(inode, FI_PROJ_INHERIT);
442 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_project_quota(sbi) &&
443 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_projid))
444 i_projid = (projid_t)le32_to_cpu(ri->i_projid);
446 i_projid = F2FS_DEF_PROJID;
447 fi->i_projid = make_kprojid(&init_user_ns, i_projid);
449 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_inode_crtime(sbi) &&
450 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
451 fi->i_crtime.tv_sec = le64_to_cpu(ri->i_crtime);
452 fi->i_crtime.tv_nsec = le32_to_cpu(ri->i_crtime_nsec);
455 if (f2fs_has_extra_attr(inode) && f2fs_sb_has_compression(sbi) &&
456 (fi->i_flags & F2FS_COMPR_FL)) {
457 if (F2FS_FITS_IN_INODE(ri, fi->i_extra_isize,
458 i_log_cluster_size)) {
459 atomic_set(&fi->i_compr_blocks,
460 le64_to_cpu(ri->i_compr_blocks));
461 fi->i_compress_algorithm = ri->i_compress_algorithm;
462 fi->i_log_cluster_size = ri->i_log_cluster_size;
463 fi->i_compress_flag = le16_to_cpu(ri->i_compress_flag);
464 fi->i_cluster_size = 1 << fi->i_log_cluster_size;
465 set_inode_flag(inode, FI_COMPRESSED_FILE);
469 F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
470 F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
471 F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
472 F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
473 f2fs_put_page(node_page, 1);
475 stat_inc_inline_xattr(inode);
476 stat_inc_inline_inode(inode);
477 stat_inc_inline_dir(inode);
478 stat_inc_compr_inode(inode);
479 stat_add_compr_blocks(inode, atomic_read(&fi->i_compr_blocks));
484 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
486 struct f2fs_sb_info *sbi = F2FS_SB(sb);
490 inode = iget_locked(sb, ino);
492 return ERR_PTR(-ENOMEM);
494 if (!(inode->i_state & I_NEW)) {
495 trace_f2fs_iget(inode);
498 if (ino == F2FS_NODE_INO(sbi) || ino == F2FS_META_INO(sbi))
501 #ifdef CONFIG_F2FS_FS_COMPRESSION
502 if (ino == F2FS_COMPRESS_INO(sbi))
506 ret = do_read_inode(inode);
510 if (ino == F2FS_NODE_INO(sbi)) {
511 inode->i_mapping->a_ops = &f2fs_node_aops;
512 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
513 } else if (ino == F2FS_META_INO(sbi)) {
514 inode->i_mapping->a_ops = &f2fs_meta_aops;
515 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
516 } else if (ino == F2FS_COMPRESS_INO(sbi)) {
517 #ifdef CONFIG_F2FS_FS_COMPRESSION
518 inode->i_mapping->a_ops = &f2fs_compress_aops;
520 * generic_error_remove_page only truncates pages of regular
523 inode->i_mode |= S_IFREG;
525 mapping_set_gfp_mask(inode->i_mapping,
526 GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE);
527 } else if (S_ISREG(inode->i_mode)) {
528 inode->i_op = &f2fs_file_inode_operations;
529 inode->i_fop = &f2fs_file_operations;
530 inode->i_mapping->a_ops = &f2fs_dblock_aops;
531 } else if (S_ISDIR(inode->i_mode)) {
532 inode->i_op = &f2fs_dir_inode_operations;
533 inode->i_fop = &f2fs_dir_operations;
534 inode->i_mapping->a_ops = &f2fs_dblock_aops;
535 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
536 } else if (S_ISLNK(inode->i_mode)) {
537 if (file_is_encrypt(inode))
538 inode->i_op = &f2fs_encrypted_symlink_inode_operations;
540 inode->i_op = &f2fs_symlink_inode_operations;
541 inode_nohighmem(inode);
542 inode->i_mapping->a_ops = &f2fs_dblock_aops;
543 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
544 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
545 inode->i_op = &f2fs_special_inode_operations;
546 init_special_inode(inode, inode->i_mode, inode->i_rdev);
551 f2fs_set_inode_flags(inode);
553 if (file_should_truncate(inode)) {
554 ret = f2fs_truncate(inode);
557 file_dont_truncate(inode);
560 unlock_new_inode(inode);
561 trace_f2fs_iget(inode);
565 f2fs_inode_synced(inode);
567 trace_f2fs_iget_exit(inode, ret);
571 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino)
575 inode = f2fs_iget(sb, ino);
577 if (PTR_ERR(inode) == -ENOMEM) {
578 memalloc_retry_wait(GFP_NOFS);
585 void f2fs_update_inode(struct inode *inode, struct page *node_page)
587 struct f2fs_inode *ri;
588 struct extent_tree *et = F2FS_I(inode)->extent_tree;
590 f2fs_wait_on_page_writeback(node_page, NODE, true, true);
591 set_page_dirty(node_page);
593 f2fs_inode_synced(inode);
595 ri = F2FS_INODE(node_page);
597 ri->i_mode = cpu_to_le16(inode->i_mode);
598 ri->i_advise = F2FS_I(inode)->i_advise;
599 ri->i_uid = cpu_to_le32(i_uid_read(inode));
600 ri->i_gid = cpu_to_le32(i_gid_read(inode));
601 ri->i_links = cpu_to_le32(inode->i_nlink);
602 ri->i_size = cpu_to_le64(i_size_read(inode));
603 ri->i_blocks = cpu_to_le64(SECTOR_TO_BLOCK(inode->i_blocks) + 1);
606 read_lock(&et->lock);
607 set_raw_extent(&et->largest, &ri->i_ext);
608 read_unlock(&et->lock);
610 memset(&ri->i_ext, 0, sizeof(ri->i_ext));
612 set_raw_inline(inode, ri);
614 ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
615 ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
616 ri->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
617 ri->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
618 ri->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
619 ri->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
620 if (S_ISDIR(inode->i_mode))
621 ri->i_current_depth =
622 cpu_to_le32(F2FS_I(inode)->i_current_depth);
623 else if (S_ISREG(inode->i_mode))
625 cpu_to_le16(F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN]);
626 ri->i_xattr_nid = cpu_to_le32(F2FS_I(inode)->i_xattr_nid);
627 ri->i_flags = cpu_to_le32(F2FS_I(inode)->i_flags);
628 ri->i_pino = cpu_to_le32(F2FS_I(inode)->i_pino);
629 ri->i_generation = cpu_to_le32(inode->i_generation);
630 ri->i_dir_level = F2FS_I(inode)->i_dir_level;
632 if (f2fs_has_extra_attr(inode)) {
633 ri->i_extra_isize = cpu_to_le16(F2FS_I(inode)->i_extra_isize);
635 if (f2fs_sb_has_flexible_inline_xattr(F2FS_I_SB(inode)))
636 ri->i_inline_xattr_size =
637 cpu_to_le16(F2FS_I(inode)->i_inline_xattr_size);
639 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)) &&
640 F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
644 i_projid = from_kprojid(&init_user_ns,
645 F2FS_I(inode)->i_projid);
646 ri->i_projid = cpu_to_le32(i_projid);
649 if (f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
650 F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
653 cpu_to_le64(F2FS_I(inode)->i_crtime.tv_sec);
655 cpu_to_le32(F2FS_I(inode)->i_crtime.tv_nsec);
658 if (f2fs_sb_has_compression(F2FS_I_SB(inode)) &&
659 F2FS_FITS_IN_INODE(ri, F2FS_I(inode)->i_extra_isize,
660 i_log_cluster_size)) {
662 cpu_to_le64(atomic_read(
663 &F2FS_I(inode)->i_compr_blocks));
664 ri->i_compress_algorithm =
665 F2FS_I(inode)->i_compress_algorithm;
666 ri->i_compress_flag =
667 cpu_to_le16(F2FS_I(inode)->i_compress_flag);
668 ri->i_log_cluster_size =
669 F2FS_I(inode)->i_log_cluster_size;
673 __set_inode_rdev(inode, ri);
676 if (inode->i_nlink == 0)
677 clear_page_private_inline(node_page);
679 F2FS_I(inode)->i_disk_time[0] = inode->i_atime;
680 F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
681 F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
682 F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
684 #ifdef CONFIG_F2FS_CHECK_FS
685 f2fs_inode_chksum_set(F2FS_I_SB(inode), node_page);
689 void f2fs_update_inode_page(struct inode *inode)
691 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
692 struct page *node_page;
694 node_page = f2fs_get_node_page(sbi, inode->i_ino);
695 if (IS_ERR(node_page)) {
696 int err = PTR_ERR(node_page);
698 if (err == -ENOMEM) {
701 } else if (err != -ENOENT) {
702 f2fs_stop_checkpoint(sbi, false);
706 f2fs_update_inode(inode, node_page);
707 f2fs_put_page(node_page, 1);
710 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
712 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
714 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
715 inode->i_ino == F2FS_META_INO(sbi))
719 * atime could be updated without dirtying f2fs inode in lazytime mode
721 if (f2fs_is_time_consistent(inode) &&
722 !is_inode_flag_set(inode, FI_DIRTY_INODE))
725 if (!f2fs_is_checkpoint_ready(sbi))
729 * We need to balance fs here to prevent from producing dirty node pages
730 * during the urgent cleaning time when running out of free sections.
732 f2fs_update_inode_page(inode);
733 if (wbc && wbc->nr_to_write)
734 f2fs_balance_fs(sbi, true);
739 * Called at the last iput() if i_nlink is zero
741 void f2fs_evict_inode(struct inode *inode)
743 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
744 nid_t xnid = F2FS_I(inode)->i_xattr_nid;
747 /* some remained atomic pages should discarded */
748 if (f2fs_is_atomic_file(inode))
749 f2fs_drop_inmem_pages(inode);
751 trace_f2fs_evict_inode(inode);
752 truncate_inode_pages_final(&inode->i_data);
754 if ((inode->i_nlink || is_bad_inode(inode)) &&
755 test_opt(sbi, COMPRESS_CACHE) && f2fs_compressed_file(inode))
756 f2fs_invalidate_compress_pages(sbi, inode->i_ino);
758 if (inode->i_ino == F2FS_NODE_INO(sbi) ||
759 inode->i_ino == F2FS_META_INO(sbi) ||
760 inode->i_ino == F2FS_COMPRESS_INO(sbi))
763 f2fs_bug_on(sbi, get_dirty_pages(inode));
764 f2fs_remove_dirty_inode(inode);
766 f2fs_destroy_extent_tree(inode);
768 if (inode->i_nlink || is_bad_inode(inode))
771 err = f2fs_dquot_initialize(inode);
774 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
777 f2fs_remove_ino_entry(sbi, inode->i_ino, APPEND_INO);
778 f2fs_remove_ino_entry(sbi, inode->i_ino, UPDATE_INO);
779 f2fs_remove_ino_entry(sbi, inode->i_ino, FLUSH_INO);
781 sb_start_intwrite(inode->i_sb);
782 set_inode_flag(inode, FI_NO_ALLOC);
783 i_size_write(inode, 0);
785 if (F2FS_HAS_BLOCKS(inode))
786 err = f2fs_truncate(inode);
788 if (time_to_inject(sbi, FAULT_EVICT_INODE)) {
789 f2fs_show_injection_info(sbi, FAULT_EVICT_INODE);
795 err = f2fs_remove_inode_page(inode);
801 /* give more chances, if ENOMEM case */
802 if (err == -ENOMEM) {
808 f2fs_update_inode_page(inode);
809 if (dquot_initialize_needed(inode))
810 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
812 sb_end_intwrite(inode->i_sb);
816 stat_dec_inline_xattr(inode);
817 stat_dec_inline_dir(inode);
818 stat_dec_inline_inode(inode);
819 stat_dec_compr_inode(inode);
820 stat_sub_compr_blocks(inode,
821 atomic_read(&F2FS_I(inode)->i_compr_blocks));
823 if (likely(!f2fs_cp_error(sbi) &&
824 !is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
825 f2fs_bug_on(sbi, is_inode_flag_set(inode, FI_DIRTY_INODE));
827 f2fs_inode_synced(inode);
829 /* for the case f2fs_new_inode() was failed, .i_ino is zero, skip it */
831 invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino,
834 invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
835 if (inode->i_nlink) {
836 if (is_inode_flag_set(inode, FI_APPEND_WRITE))
837 f2fs_add_ino_entry(sbi, inode->i_ino, APPEND_INO);
838 if (is_inode_flag_set(inode, FI_UPDATE_WRITE))
839 f2fs_add_ino_entry(sbi, inode->i_ino, UPDATE_INO);
841 if (is_inode_flag_set(inode, FI_FREE_NID)) {
842 f2fs_alloc_nid_failed(sbi, inode->i_ino);
843 clear_inode_flag(inode, FI_FREE_NID);
846 * If xattr nid is corrupted, we can reach out error condition,
847 * err & !f2fs_exist_written_data(sbi, inode->i_ino, ORPHAN_INO)).
848 * In that case, f2fs_check_nid_range() is enough to give a clue.
852 fscrypt_put_encryption_info(inode);
853 fsverity_cleanup_inode(inode);
857 /* caller should call f2fs_lock_op() */
858 void f2fs_handle_failed_inode(struct inode *inode)
860 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
865 * clear nlink of inode in order to release resource of inode
871 * we must call this to avoid inode being remained as dirty, resulting
872 * in a panic when flushing dirty inodes in gdirty_list.
874 f2fs_update_inode_page(inode);
875 f2fs_inode_synced(inode);
877 /* don't make bad inode, since it becomes a regular file. */
878 unlock_new_inode(inode);
881 * Note: we should add inode to orphan list before f2fs_unlock_op()
882 * so we can prevent losing this orphan when encoutering checkpoint
883 * and following suddenly power-off.
885 err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false);
887 set_sbi_flag(sbi, SBI_NEED_FSCK);
888 f2fs_warn(sbi, "May loss orphan inode, run fsck to fix.");
892 if (ni.blk_addr != NULL_ADDR) {
893 err = f2fs_acquire_orphan_inode(sbi);
895 set_sbi_flag(sbi, SBI_NEED_FSCK);
896 f2fs_warn(sbi, "Too many orphan inodes, run fsck to fix.");
898 f2fs_add_orphan_inode(inode);
900 f2fs_alloc_nid_done(sbi, inode->i_ino);
902 set_inode_flag(inode, FI_FREE_NID);
908 /* iput will drop the inode object */