1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
11 #include <linux/uio.h>
12 #include <linux/types.h>
13 #include <linux/page-flags.h>
14 #include <linux/buffer_head.h>
15 #include <linux/slab.h>
16 #include <linux/crc32.h>
17 #include <linux/magic.h>
18 #include <linux/kobject.h>
19 #include <linux/sched.h>
20 #include <linux/cred.h>
21 #include <linux/vmalloc.h>
22 #include <linux/bio.h>
23 #include <linux/blkdev.h>
24 #include <linux/quotaops.h>
25 #include <linux/part_stat.h>
26 #include <crypto/hash.h>
28 #include <linux/fscrypt.h>
29 #include <linux/fsverity.h>
31 #ifdef CONFIG_F2FS_CHECK_FS
32 #define f2fs_bug_on(sbi, condition) BUG_ON(condition)
34 #define f2fs_bug_on(sbi, condition) \
36 if (unlikely(condition)) { \
38 set_sbi_flag(sbi, SBI_NEED_FSCK); \
62 #ifdef CONFIG_F2FS_FAULT_INJECTION
63 #define F2FS_ALL_FAULT_TYPE ((1 << FAULT_MAX) - 1)
65 struct f2fs_fault_info {
67 unsigned int inject_rate;
68 unsigned int inject_type;
71 extern const char *f2fs_fault_name[FAULT_MAX];
72 #define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type)))
78 #define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000002
79 #define F2FS_MOUNT_DISCARD 0x00000004
80 #define F2FS_MOUNT_NOHEAP 0x00000008
81 #define F2FS_MOUNT_XATTR_USER 0x00000010
82 #define F2FS_MOUNT_POSIX_ACL 0x00000020
83 #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000040
84 #define F2FS_MOUNT_INLINE_XATTR 0x00000080
85 #define F2FS_MOUNT_INLINE_DATA 0x00000100
86 #define F2FS_MOUNT_INLINE_DENTRY 0x00000200
87 #define F2FS_MOUNT_FLUSH_MERGE 0x00000400
88 #define F2FS_MOUNT_NOBARRIER 0x00000800
89 #define F2FS_MOUNT_FASTBOOT 0x00001000
90 #define F2FS_MOUNT_EXTENT_CACHE 0x00002000
91 #define F2FS_MOUNT_DATA_FLUSH 0x00008000
92 #define F2FS_MOUNT_FAULT_INJECTION 0x00010000
93 #define F2FS_MOUNT_USRQUOTA 0x00080000
94 #define F2FS_MOUNT_GRPQUOTA 0x00100000
95 #define F2FS_MOUNT_PRJQUOTA 0x00200000
96 #define F2FS_MOUNT_QUOTA 0x00400000
97 #define F2FS_MOUNT_INLINE_XATTR_SIZE 0x00800000
98 #define F2FS_MOUNT_RESERVE_ROOT 0x01000000
99 #define F2FS_MOUNT_DISABLE_CHECKPOINT 0x02000000
100 #define F2FS_MOUNT_NORECOVERY 0x04000000
102 #define F2FS_OPTION(sbi) ((sbi)->mount_opt)
103 #define clear_opt(sbi, option) (F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option)
104 #define set_opt(sbi, option) (F2FS_OPTION(sbi).opt |= F2FS_MOUNT_##option)
105 #define test_opt(sbi, option) (F2FS_OPTION(sbi).opt & F2FS_MOUNT_##option)
107 #define ver_after(a, b) (typecheck(unsigned long long, a) && \
108 typecheck(unsigned long long, b) && \
109 ((long long)((a) - (b)) > 0))
111 typedef u32 block_t; /*
112 * should not change u32, since it is the on-disk block
113 * address format, __le32.
117 #define COMPRESS_EXT_NUM 16
119 struct f2fs_mount_info {
121 int write_io_size_bits; /* Write IO size bits */
122 block_t root_reserved_blocks; /* root reserved blocks */
123 kuid_t s_resuid; /* reserved blocks for uid */
124 kgid_t s_resgid; /* reserved blocks for gid */
125 int active_logs; /* # of active logs */
126 int inline_xattr_size; /* inline xattr size */
127 #ifdef CONFIG_F2FS_FAULT_INJECTION
128 struct f2fs_fault_info fault_info; /* For fault injection */
131 /* Names of quota files with journalled quota */
132 char *s_qf_names[MAXQUOTAS];
133 int s_jquota_fmt; /* Format of quota to use */
135 /* For which write hints are passed down to block layer */
137 int alloc_mode; /* segment allocation policy */
138 int fsync_mode; /* fsync policy */
139 int fs_mode; /* fs mode: LFS or ADAPTIVE */
140 int bggc_mode; /* bggc mode: off, on or sync */
141 struct fscrypt_dummy_context dummy_enc_ctx; /* test dummy encryption */
142 block_t unusable_cap; /* Amount of space allowed to be
143 * unusable when disabling checkpoint
146 /* For compression */
147 unsigned char compress_algorithm; /* algorithm type */
148 unsigned compress_log_size; /* cluster log size */
149 unsigned char compress_ext_cnt; /* extension count */
150 unsigned char extensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN]; /* extensions */
153 #define F2FS_FEATURE_ENCRYPT 0x0001
154 #define F2FS_FEATURE_BLKZONED 0x0002
155 #define F2FS_FEATURE_ATOMIC_WRITE 0x0004
156 #define F2FS_FEATURE_EXTRA_ATTR 0x0008
157 #define F2FS_FEATURE_PRJQUOTA 0x0010
158 #define F2FS_FEATURE_INODE_CHKSUM 0x0020
159 #define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR 0x0040
160 #define F2FS_FEATURE_QUOTA_INO 0x0080
161 #define F2FS_FEATURE_INODE_CRTIME 0x0100
162 #define F2FS_FEATURE_LOST_FOUND 0x0200
163 #define F2FS_FEATURE_VERITY 0x0400
164 #define F2FS_FEATURE_SB_CHKSUM 0x0800
165 #define F2FS_FEATURE_CASEFOLD 0x1000
166 #define F2FS_FEATURE_COMPRESSION 0x2000
168 #define __F2FS_HAS_FEATURE(raw_super, mask) \
169 ((raw_super->feature & cpu_to_le32(mask)) != 0)
170 #define F2FS_HAS_FEATURE(sbi, mask) __F2FS_HAS_FEATURE(sbi->raw_super, mask)
171 #define F2FS_SET_FEATURE(sbi, mask) \
172 (sbi->raw_super->feature |= cpu_to_le32(mask))
173 #define F2FS_CLEAR_FEATURE(sbi, mask) \
174 (sbi->raw_super->feature &= ~cpu_to_le32(mask))
177 * Default values for user and/or group using reserved blocks
179 #define F2FS_DEF_RESUID 0
180 #define F2FS_DEF_RESGID 0
183 * For checkpoint manager
190 #define CP_UMOUNT 0x00000001
191 #define CP_FASTBOOT 0x00000002
192 #define CP_SYNC 0x00000004
193 #define CP_RECOVERY 0x00000008
194 #define CP_DISCARD 0x00000010
195 #define CP_TRIMMED 0x00000020
196 #define CP_PAUSE 0x00000040
198 #define MAX_DISCARD_BLOCKS(sbi) BLKS_PER_SEC(sbi)
199 #define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */
200 #define DEF_MIN_DISCARD_ISSUE_TIME 50 /* 50 ms, if exists */
201 #define DEF_MID_DISCARD_ISSUE_TIME 500 /* 500 ms, if device busy */
202 #define DEF_MAX_DISCARD_ISSUE_TIME 60000 /* 60 s, if no candidates */
203 #define DEF_DISCARD_URGENT_UTIL 80 /* do more discard over 80% */
204 #define DEF_CP_INTERVAL 60 /* 60 secs */
205 #define DEF_IDLE_INTERVAL 5 /* 5 secs */
206 #define DEF_DISABLE_INTERVAL 5 /* 5 secs */
207 #define DEF_DISABLE_QUICK_INTERVAL 1 /* 1 secs */
208 #define DEF_UMOUNT_DISCARD_TIMEOUT 5 /* 5 secs */
218 * indicate meta/data type
227 DATA_GENERIC, /* check range only */
228 DATA_GENERIC_ENHANCE, /* strong check on range and segment bitmap */
229 DATA_GENERIC_ENHANCE_READ, /*
230 * strong check on range and segment
231 * bitmap but no warning due to race
232 * condition of read on truncated area
238 /* for the list of ino */
240 ORPHAN_INO, /* for orphan ino list */
241 APPEND_INO, /* for append ino list */
242 UPDATE_INO, /* for update ino list */
243 TRANS_DIR_INO, /* for trasactions dir ino list */
244 FLUSH_INO, /* for multiple device flushing */
245 MAX_INO_ENTRY, /* max. list */
249 struct list_head list; /* list head */
250 nid_t ino; /* inode number */
251 unsigned int dirty_device; /* dirty device bitmap */
254 /* for the list of inodes to be GCed */
256 struct list_head list; /* list head */
257 struct inode *inode; /* vfs inode pointer */
260 struct fsync_node_entry {
261 struct list_head list; /* list head */
262 struct page *page; /* warm node page pointer */
263 unsigned int seq_id; /* sequence id */
266 /* for the bitmap indicate blocks to be discarded */
267 struct discard_entry {
268 struct list_head list; /* list head */
269 block_t start_blkaddr; /* start blockaddr of current segment */
270 unsigned char discard_map[SIT_VBLOCK_MAP_SIZE]; /* segment discard bitmap */
273 /* default discard granularity of inner discard thread, unit: block count */
274 #define DEFAULT_DISCARD_GRANULARITY 16
276 /* max discard pend list number */
277 #define MAX_PLIST_NUM 512
278 #define plist_idx(blk_num) ((blk_num) >= MAX_PLIST_NUM ? \
279 (MAX_PLIST_NUM - 1) : ((blk_num) - 1))
282 D_PREP, /* initial */
283 D_PARTIAL, /* partially submitted */
284 D_SUBMIT, /* all submitted */
285 D_DONE, /* finished */
288 struct discard_info {
289 block_t lstart; /* logical start address */
290 block_t len; /* length */
291 block_t start; /* actual start address in dev */
295 struct rb_node rb_node; /* rb node located in rb-tree */
298 block_t lstart; /* logical start address */
299 block_t len; /* length */
300 block_t start; /* actual start address in dev */
302 struct discard_info di; /* discard info */
305 struct list_head list; /* command list */
306 struct completion wait; /* compleation */
307 struct block_device *bdev; /* bdev */
308 unsigned short ref; /* reference count */
309 unsigned char state; /* state */
310 unsigned char queued; /* queued discard */
311 int error; /* bio error */
312 spinlock_t lock; /* for state/bio_ref updating */
313 unsigned short bio_ref; /* bio reference count */
324 struct discard_policy {
325 int type; /* type of discard */
326 unsigned int min_interval; /* used for candidates exist */
327 unsigned int mid_interval; /* used for device busy */
328 unsigned int max_interval; /* used for candidates not exist */
329 unsigned int max_requests; /* # of discards issued per round */
330 unsigned int io_aware_gran; /* minimum granularity discard not be aware of I/O */
331 bool io_aware; /* issue discard in idle time */
332 bool sync; /* submit discard with REQ_SYNC flag */
333 bool ordered; /* issue discard by lba order */
334 bool timeout; /* discard timeout for put_super */
335 unsigned int granularity; /* discard granularity */
338 struct discard_cmd_control {
339 struct task_struct *f2fs_issue_discard; /* discard thread */
340 struct list_head entry_list; /* 4KB discard entry list */
341 struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */
342 struct list_head wait_list; /* store on-flushing entries */
343 struct list_head fstrim_list; /* in-flight discard from fstrim */
344 wait_queue_head_t discard_wait_queue; /* waiting queue for wake-up */
345 unsigned int discard_wake; /* to wake up discard thread */
346 struct mutex cmd_lock;
347 unsigned int nr_discards; /* # of discards in the list */
348 unsigned int max_discards; /* max. discards to be issued */
349 unsigned int discard_granularity; /* discard granularity */
350 unsigned int undiscard_blks; /* # of undiscard blocks */
351 unsigned int next_pos; /* next discard position */
352 atomic_t issued_discard; /* # of issued discard */
353 atomic_t queued_discard; /* # of queued discard */
354 atomic_t discard_cmd_cnt; /* # of cached cmd count */
355 struct rb_root_cached root; /* root of discard rb-tree */
356 bool rbtree_check; /* config for consistence check */
359 /* for the list of fsync inodes, used only during recovery */
360 struct fsync_inode_entry {
361 struct list_head list; /* list head */
362 struct inode *inode; /* vfs inode pointer */
363 block_t blkaddr; /* block address locating the last fsync */
364 block_t last_dentry; /* block address locating the last dentry */
367 #define nats_in_cursum(jnl) (le16_to_cpu((jnl)->n_nats))
368 #define sits_in_cursum(jnl) (le16_to_cpu((jnl)->n_sits))
370 #define nat_in_journal(jnl, i) ((jnl)->nat_j.entries[i].ne)
371 #define nid_in_journal(jnl, i) ((jnl)->nat_j.entries[i].nid)
372 #define sit_in_journal(jnl, i) ((jnl)->sit_j.entries[i].se)
373 #define segno_in_journal(jnl, i) ((jnl)->sit_j.entries[i].segno)
375 #define MAX_NAT_JENTRIES(jnl) (NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl))
376 #define MAX_SIT_JENTRIES(jnl) (SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl))
378 static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i)
380 int before = nats_in_cursum(journal);
382 journal->n_nats = cpu_to_le16(before + i);
386 static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i)
388 int before = sits_in_cursum(journal);
390 journal->n_sits = cpu_to_le16(before + i);
394 static inline bool __has_cursum_space(struct f2fs_journal *journal,
397 if (type == NAT_JOURNAL)
398 return size <= MAX_NAT_JENTRIES(journal);
399 return size <= MAX_SIT_JENTRIES(journal);
405 #define F2FS_IOC_GETFLAGS FS_IOC_GETFLAGS
406 #define F2FS_IOC_SETFLAGS FS_IOC_SETFLAGS
407 #define F2FS_IOC_GETVERSION FS_IOC_GETVERSION
409 #define F2FS_IOCTL_MAGIC 0xf5
410 #define F2FS_IOC_START_ATOMIC_WRITE _IO(F2FS_IOCTL_MAGIC, 1)
411 #define F2FS_IOC_COMMIT_ATOMIC_WRITE _IO(F2FS_IOCTL_MAGIC, 2)
412 #define F2FS_IOC_START_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 3)
413 #define F2FS_IOC_RELEASE_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 4)
414 #define F2FS_IOC_ABORT_VOLATILE_WRITE _IO(F2FS_IOCTL_MAGIC, 5)
415 #define F2FS_IOC_GARBAGE_COLLECT _IOW(F2FS_IOCTL_MAGIC, 6, __u32)
416 #define F2FS_IOC_WRITE_CHECKPOINT _IO(F2FS_IOCTL_MAGIC, 7)
417 #define F2FS_IOC_DEFRAGMENT _IOWR(F2FS_IOCTL_MAGIC, 8, \
418 struct f2fs_defragment)
419 #define F2FS_IOC_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \
420 struct f2fs_move_range)
421 #define F2FS_IOC_FLUSH_DEVICE _IOW(F2FS_IOCTL_MAGIC, 10, \
422 struct f2fs_flush_device)
423 #define F2FS_IOC_GARBAGE_COLLECT_RANGE _IOW(F2FS_IOCTL_MAGIC, 11, \
424 struct f2fs_gc_range)
425 #define F2FS_IOC_GET_FEATURES _IOR(F2FS_IOCTL_MAGIC, 12, __u32)
426 #define F2FS_IOC_SET_PIN_FILE _IOW(F2FS_IOCTL_MAGIC, 13, __u32)
427 #define F2FS_IOC_GET_PIN_FILE _IOR(F2FS_IOCTL_MAGIC, 14, __u32)
428 #define F2FS_IOC_PRECACHE_EXTENTS _IO(F2FS_IOCTL_MAGIC, 15)
429 #define F2FS_IOC_RESIZE_FS _IOW(F2FS_IOCTL_MAGIC, 16, __u64)
430 #define F2FS_IOC_GET_COMPRESS_BLOCKS _IOR(F2FS_IOCTL_MAGIC, 17, __u64)
432 #define F2FS_IOC_GET_VOLUME_NAME FS_IOC_GETFSLABEL
433 #define F2FS_IOC_SET_VOLUME_NAME FS_IOC_SETFSLABEL
435 #define F2FS_IOC_SET_ENCRYPTION_POLICY FS_IOC_SET_ENCRYPTION_POLICY
436 #define F2FS_IOC_GET_ENCRYPTION_POLICY FS_IOC_GET_ENCRYPTION_POLICY
437 #define F2FS_IOC_GET_ENCRYPTION_PWSALT FS_IOC_GET_ENCRYPTION_PWSALT
440 * should be same as XFS_IOC_GOINGDOWN.
441 * Flags for going down operation used by FS_IOC_GOINGDOWN
443 #define F2FS_IOC_SHUTDOWN _IOR('X', 125, __u32) /* Shutdown */
444 #define F2FS_GOING_DOWN_FULLSYNC 0x0 /* going down with full sync */
445 #define F2FS_GOING_DOWN_METASYNC 0x1 /* going down with metadata */
446 #define F2FS_GOING_DOWN_NOSYNC 0x2 /* going down */
447 #define F2FS_GOING_DOWN_METAFLUSH 0x3 /* going down with meta flush */
448 #define F2FS_GOING_DOWN_NEED_FSCK 0x4 /* going down to trigger fsck */
450 #if defined(__KERNEL__) && defined(CONFIG_COMPAT)
452 * ioctl commands in 32 bit emulation
454 #define F2FS_IOC32_GETFLAGS FS_IOC32_GETFLAGS
455 #define F2FS_IOC32_SETFLAGS FS_IOC32_SETFLAGS
456 #define F2FS_IOC32_GETVERSION FS_IOC32_GETVERSION
459 #define F2FS_IOC_FSGETXATTR FS_IOC_FSGETXATTR
460 #define F2FS_IOC_FSSETXATTR FS_IOC_FSSETXATTR
462 struct f2fs_gc_range {
468 struct f2fs_defragment {
473 struct f2fs_move_range {
474 u32 dst_fd; /* destination fd */
475 u64 pos_in; /* start position in src_fd */
476 u64 pos_out; /* start position in dst_fd */
477 u64 len; /* size to move */
480 struct f2fs_flush_device {
481 u32 dev_num; /* device number to flush */
482 u32 segments; /* # of segments to flush */
485 /* for inline stuff */
486 #define DEF_INLINE_RESERVED_SIZE 1
487 static inline int get_extra_isize(struct inode *inode);
488 static inline int get_inline_xattr_addrs(struct inode *inode);
489 #define MAX_INLINE_DATA(inode) (sizeof(__le32) * \
490 (CUR_ADDRS_PER_INODE(inode) - \
491 get_inline_xattr_addrs(inode) - \
492 DEF_INLINE_RESERVED_SIZE))
495 #define NR_INLINE_DENTRY(inode) (MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \
496 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
498 #define INLINE_DENTRY_BITMAP_SIZE(inode) \
499 DIV_ROUND_UP(NR_INLINE_DENTRY(inode), BITS_PER_BYTE)
500 #define INLINE_RESERVED_SIZE(inode) (MAX_INLINE_DATA(inode) - \
501 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
502 NR_INLINE_DENTRY(inode) + \
503 INLINE_DENTRY_BITMAP_SIZE(inode)))
506 * For INODE and NODE manager
508 /* for directory operations */
509 struct f2fs_dentry_ptr {
512 struct f2fs_dir_entry *dentry;
513 __u8 (*filename)[F2FS_SLOT_LEN];
518 static inline void make_dentry_ptr_block(struct inode *inode,
519 struct f2fs_dentry_ptr *d, struct f2fs_dentry_block *t)
522 d->max = NR_DENTRY_IN_BLOCK;
523 d->nr_bitmap = SIZE_OF_DENTRY_BITMAP;
524 d->bitmap = t->dentry_bitmap;
525 d->dentry = t->dentry;
526 d->filename = t->filename;
529 static inline void make_dentry_ptr_inline(struct inode *inode,
530 struct f2fs_dentry_ptr *d, void *t)
532 int entry_cnt = NR_INLINE_DENTRY(inode);
533 int bitmap_size = INLINE_DENTRY_BITMAP_SIZE(inode);
534 int reserved_size = INLINE_RESERVED_SIZE(inode);
538 d->nr_bitmap = bitmap_size;
540 d->dentry = t + bitmap_size + reserved_size;
541 d->filename = t + bitmap_size + reserved_size +
542 SIZE_OF_DIR_ENTRY * entry_cnt;
546 * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1
547 * as its node offset to distinguish from index node blocks.
548 * But some bits are used to mark the node block.
550 #define XATTR_NODE_OFFSET ((((unsigned int)-1) << OFFSET_BIT_SHIFT) \
553 ALLOC_NODE, /* allocate a new node page if needed */
554 LOOKUP_NODE, /* look up a node without readahead */
556 * look up a node with readahead called
561 #define DEFAULT_RETRY_IO_COUNT 8 /* maximum retry read IO count */
563 /* congestion wait timeout value, default: 20ms */
564 #define DEFAULT_IO_TIMEOUT (msecs_to_jiffies(20))
566 /* maximum retry quota flush count */
567 #define DEFAULT_RETRY_QUOTA_FLUSH_COUNT 8
569 #define F2FS_LINK_MAX 0xffffffff /* maximum link count per file */
571 #define MAX_DIR_RA_PAGES 4 /* maximum ra pages of dir */
573 /* for in-memory extent cache entry */
574 #define F2FS_MIN_EXTENT_LEN 64 /* minimum extent length */
576 /* number of extent info in extent cache we try to shrink */
577 #define EXTENT_CACHE_SHRINK_NUMBER 128
580 struct rb_node rb_node; /* rb node located in rb-tree */
581 unsigned int ofs; /* start offset of the entry */
582 unsigned int len; /* length of the entry */
586 unsigned int fofs; /* start offset in a file */
587 unsigned int len; /* length of the extent */
588 u32 blk; /* start block address of the extent */
592 struct rb_node rb_node; /* rb node located in rb-tree */
593 struct extent_info ei; /* extent info */
594 struct list_head list; /* node in global extent list of sbi */
595 struct extent_tree *et; /* extent tree pointer */
599 nid_t ino; /* inode number */
600 struct rb_root_cached root; /* root of extent info rb-tree */
601 struct extent_node *cached_en; /* recently accessed extent node */
602 struct extent_info largest; /* largested extent info */
603 struct list_head list; /* to be used by sbi->zombie_list */
604 rwlock_t lock; /* protect extent info rb-tree */
605 atomic_t node_cnt; /* # of extent node in rb-tree*/
606 bool largest_updated; /* largest extent updated */
610 * This structure is taken from ext4_map_blocks.
612 * Note that, however, f2fs uses NEW and MAPPED flags for f2fs_map_blocks().
614 #define F2FS_MAP_NEW (1 << BH_New)
615 #define F2FS_MAP_MAPPED (1 << BH_Mapped)
616 #define F2FS_MAP_UNWRITTEN (1 << BH_Unwritten)
617 #define F2FS_MAP_FLAGS (F2FS_MAP_NEW | F2FS_MAP_MAPPED |\
620 struct f2fs_map_blocks {
624 unsigned int m_flags;
625 pgoff_t *m_next_pgofs; /* point next possible non-hole pgofs */
626 pgoff_t *m_next_extent; /* point to next possible extent */
628 bool m_may_create; /* indicate it is from write path */
631 /* for flag in get_data_block */
633 F2FS_GET_BLOCK_DEFAULT,
634 F2FS_GET_BLOCK_FIEMAP,
637 F2FS_GET_BLOCK_PRE_DIO,
638 F2FS_GET_BLOCK_PRE_AIO,
639 F2FS_GET_BLOCK_PRECACHE,
643 * i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
645 #define FADVISE_COLD_BIT 0x01
646 #define FADVISE_LOST_PINO_BIT 0x02
647 #define FADVISE_ENCRYPT_BIT 0x04
648 #define FADVISE_ENC_NAME_BIT 0x08
649 #define FADVISE_KEEP_SIZE_BIT 0x10
650 #define FADVISE_HOT_BIT 0x20
651 #define FADVISE_VERITY_BIT 0x40
653 #define FADVISE_MODIFIABLE_BITS (FADVISE_COLD_BIT | FADVISE_HOT_BIT)
655 #define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT)
656 #define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT)
657 #define file_set_cold(inode) set_file(inode, FADVISE_COLD_BIT)
658 #define file_lost_pino(inode) set_file(inode, FADVISE_LOST_PINO_BIT)
659 #define file_clear_cold(inode) clear_file(inode, FADVISE_COLD_BIT)
660 #define file_got_pino(inode) clear_file(inode, FADVISE_LOST_PINO_BIT)
661 #define file_is_encrypt(inode) is_file(inode, FADVISE_ENCRYPT_BIT)
662 #define file_set_encrypt(inode) set_file(inode, FADVISE_ENCRYPT_BIT)
663 #define file_clear_encrypt(inode) clear_file(inode, FADVISE_ENCRYPT_BIT)
664 #define file_enc_name(inode) is_file(inode, FADVISE_ENC_NAME_BIT)
665 #define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT)
666 #define file_keep_isize(inode) is_file(inode, FADVISE_KEEP_SIZE_BIT)
667 #define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT)
668 #define file_is_hot(inode) is_file(inode, FADVISE_HOT_BIT)
669 #define file_set_hot(inode) set_file(inode, FADVISE_HOT_BIT)
670 #define file_clear_hot(inode) clear_file(inode, FADVISE_HOT_BIT)
671 #define file_is_verity(inode) is_file(inode, FADVISE_VERITY_BIT)
672 #define file_set_verity(inode) set_file(inode, FADVISE_VERITY_BIT)
674 #define DEF_DIR_LEVEL 0
682 /* used for f2fs_inode_info->flags */
684 FI_NEW_INODE, /* indicate newly allocated inode */
685 FI_DIRTY_INODE, /* indicate inode is dirty or not */
686 FI_AUTO_RECOVER, /* indicate inode is recoverable */
687 FI_DIRTY_DIR, /* indicate directory has dirty pages */
688 FI_INC_LINK, /* need to increment i_nlink */
689 FI_ACL_MODE, /* indicate acl mode */
690 FI_NO_ALLOC, /* should not allocate any blocks */
691 FI_FREE_NID, /* free allocated nide */
692 FI_NO_EXTENT, /* not to use the extent cache */
693 FI_INLINE_XATTR, /* used for inline xattr */
694 FI_INLINE_DATA, /* used for inline data*/
695 FI_INLINE_DENTRY, /* used for inline dentry */
696 FI_APPEND_WRITE, /* inode has appended data */
697 FI_UPDATE_WRITE, /* inode has in-place-update data */
698 FI_NEED_IPU, /* used for ipu per file */
699 FI_ATOMIC_FILE, /* indicate atomic file */
700 FI_ATOMIC_COMMIT, /* indicate the state of atomical committing */
701 FI_VOLATILE_FILE, /* indicate volatile file */
702 FI_FIRST_BLOCK_WRITTEN, /* indicate #0 data block was written */
703 FI_DROP_CACHE, /* drop dirty page cache */
704 FI_DATA_EXIST, /* indicate data exists */
705 FI_INLINE_DOTS, /* indicate inline dot dentries */
706 FI_DO_DEFRAG, /* indicate defragment is running */
707 FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */
708 FI_NO_PREALLOC, /* indicate skipped preallocated blocks */
709 FI_HOT_DATA, /* indicate file is hot */
710 FI_EXTRA_ATTR, /* indicate file has extra attribute */
711 FI_PROJ_INHERIT, /* indicate file inherits projectid */
712 FI_PIN_FILE, /* indicate file should not be gced */
713 FI_ATOMIC_REVOKE_REQUEST, /* request to drop atomic data */
714 FI_VERITY_IN_PROGRESS, /* building fs-verity Merkle tree */
715 FI_COMPRESSED_FILE, /* indicate file's data can be compressed */
716 FI_MMAP_FILE, /* indicate file was mmapped */
717 FI_MAX, /* max flag, never be used */
720 struct f2fs_inode_info {
721 struct inode vfs_inode; /* serve a vfs inode */
722 unsigned long i_flags; /* keep an inode flags for ioctl */
723 unsigned char i_advise; /* use to give file attribute hints */
724 unsigned char i_dir_level; /* use for dentry level for large dir */
725 unsigned int i_current_depth; /* only for directory depth */
726 /* for gc failure statistic */
727 unsigned int i_gc_failures[MAX_GC_FAILURE];
728 unsigned int i_pino; /* parent inode number */
729 umode_t i_acl_mode; /* keep file acl mode temporarily */
731 /* Use below internally in f2fs*/
732 unsigned long flags[BITS_TO_LONGS(FI_MAX)]; /* use to pass per-file flags */
733 struct rw_semaphore i_sem; /* protect fi info */
734 atomic_t dirty_pages; /* # of dirty pages */
735 f2fs_hash_t chash; /* hash value of given file name */
736 unsigned int clevel; /* maximum level of given file name */
737 struct task_struct *task; /* lookup and create consistency */
738 struct task_struct *cp_task; /* separate cp/wb IO stats*/
739 nid_t i_xattr_nid; /* node id that contains xattrs */
740 loff_t last_disk_size; /* lastly written file size */
741 spinlock_t i_size_lock; /* protect last_disk_size */
744 struct dquot *i_dquot[MAXQUOTAS];
746 /* quota space reservation, managed internally by quota code */
747 qsize_t i_reserved_quota;
749 struct list_head dirty_list; /* dirty list for dirs and files */
750 struct list_head gdirty_list; /* linked in global dirty list */
751 struct list_head inmem_ilist; /* list for inmem inodes */
752 struct list_head inmem_pages; /* inmemory pages managed by f2fs */
753 struct task_struct *inmem_task; /* store inmemory task */
754 struct mutex inmem_lock; /* lock for inmemory pages */
755 struct extent_tree *extent_tree; /* cached extent_tree entry */
757 /* avoid racing between foreground op and gc */
758 struct rw_semaphore i_gc_rwsem[2];
759 struct rw_semaphore i_mmap_sem;
760 struct rw_semaphore i_xattr_sem; /* avoid racing between reading and changing EAs */
762 int i_extra_isize; /* size of extra space located in i_addr */
763 kprojid_t i_projid; /* id for project quota */
764 int i_inline_xattr_size; /* inline xattr size */
765 struct timespec64 i_crtime; /* inode creation time */
766 struct timespec64 i_disk_time[4];/* inode disk times */
768 /* for file compress */
769 u64 i_compr_blocks; /* # of compressed blocks */
770 unsigned char i_compress_algorithm; /* algorithm type */
771 unsigned char i_log_cluster_size; /* log of cluster size */
772 unsigned int i_cluster_size; /* cluster size */
775 static inline void get_extent_info(struct extent_info *ext,
776 struct f2fs_extent *i_ext)
778 ext->fofs = le32_to_cpu(i_ext->fofs);
779 ext->blk = le32_to_cpu(i_ext->blk);
780 ext->len = le32_to_cpu(i_ext->len);
783 static inline void set_raw_extent(struct extent_info *ext,
784 struct f2fs_extent *i_ext)
786 i_ext->fofs = cpu_to_le32(ext->fofs);
787 i_ext->blk = cpu_to_le32(ext->blk);
788 i_ext->len = cpu_to_le32(ext->len);
791 static inline void set_extent_info(struct extent_info *ei, unsigned int fofs,
792 u32 blk, unsigned int len)
799 static inline bool __is_discard_mergeable(struct discard_info *back,
800 struct discard_info *front, unsigned int max_len)
802 return (back->lstart + back->len == front->lstart) &&
803 (back->len + front->len <= max_len);
806 static inline bool __is_discard_back_mergeable(struct discard_info *cur,
807 struct discard_info *back, unsigned int max_len)
809 return __is_discard_mergeable(back, cur, max_len);
812 static inline bool __is_discard_front_mergeable(struct discard_info *cur,
813 struct discard_info *front, unsigned int max_len)
815 return __is_discard_mergeable(cur, front, max_len);
818 static inline bool __is_extent_mergeable(struct extent_info *back,
819 struct extent_info *front)
821 return (back->fofs + back->len == front->fofs &&
822 back->blk + back->len == front->blk);
825 static inline bool __is_back_mergeable(struct extent_info *cur,
826 struct extent_info *back)
828 return __is_extent_mergeable(back, cur);
831 static inline bool __is_front_mergeable(struct extent_info *cur,
832 struct extent_info *front)
834 return __is_extent_mergeable(cur, front);
837 extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync);
838 static inline void __try_update_largest_extent(struct extent_tree *et,
839 struct extent_node *en)
841 if (en->ei.len > et->largest.len) {
842 et->largest = en->ei;
843 et->largest_updated = true;
848 * For free nid management
851 FREE_NID, /* newly added to free nid list */
852 PREALLOC_NID, /* it is preallocated */
856 struct f2fs_nm_info {
857 block_t nat_blkaddr; /* base disk address of NAT */
858 nid_t max_nid; /* maximum possible node ids */
859 nid_t available_nids; /* # of available node ids */
860 nid_t next_scan_nid; /* the next nid to be scanned */
861 unsigned int ram_thresh; /* control the memory footprint */
862 unsigned int ra_nid_pages; /* # of nid pages to be readaheaded */
863 unsigned int dirty_nats_ratio; /* control dirty nats ratio threshold */
865 /* NAT cache management */
866 struct radix_tree_root nat_root;/* root of the nat entry cache */
867 struct radix_tree_root nat_set_root;/* root of the nat set cache */
868 struct rw_semaphore nat_tree_lock; /* protect nat_tree_lock */
869 struct list_head nat_entries; /* cached nat entry list (clean) */
870 spinlock_t nat_list_lock; /* protect clean nat entry list */
871 unsigned int nat_cnt; /* the # of cached nat entries */
872 unsigned int dirty_nat_cnt; /* total num of nat entries in set */
873 unsigned int nat_blocks; /* # of nat blocks */
875 /* free node ids management */
876 struct radix_tree_root free_nid_root;/* root of the free_nid cache */
877 struct list_head free_nid_list; /* list for free nids excluding preallocated nids */
878 unsigned int nid_cnt[MAX_NID_STATE]; /* the number of free node id */
879 spinlock_t nid_list_lock; /* protect nid lists ops */
880 struct mutex build_lock; /* lock for build free nids */
881 unsigned char **free_nid_bitmap;
882 unsigned char *nat_block_bitmap;
883 unsigned short *free_nid_count; /* free nid count of NAT block */
886 char *nat_bitmap; /* NAT bitmap pointer */
888 unsigned int nat_bits_blocks; /* # of nat bits blocks */
889 unsigned char *nat_bits; /* NAT bits blocks */
890 unsigned char *full_nat_bits; /* full NAT pages */
891 unsigned char *empty_nat_bits; /* empty NAT pages */
892 #ifdef CONFIG_F2FS_CHECK_FS
893 char *nat_bitmap_mir; /* NAT bitmap mirror */
895 int bitmap_size; /* bitmap size */
899 * this structure is used as one of function parameters.
900 * all the information are dedicated to a given direct node block determined
901 * by the data offset in a file.
903 struct dnode_of_data {
904 struct inode *inode; /* vfs inode pointer */
905 struct page *inode_page; /* its inode page, NULL is possible */
906 struct page *node_page; /* cached direct node page */
907 nid_t nid; /* node id of the direct node block */
908 unsigned int ofs_in_node; /* data offset in the node page */
909 bool inode_page_locked; /* inode page is locked or not */
910 bool node_changed; /* is node block changed */
911 char cur_level; /* level of hole node page */
912 char max_level; /* level of current page located */
913 block_t data_blkaddr; /* block address of the node block */
916 static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,
917 struct page *ipage, struct page *npage, nid_t nid)
919 memset(dn, 0, sizeof(*dn));
921 dn->inode_page = ipage;
922 dn->node_page = npage;
929 * By default, there are 6 active log areas across the whole main area.
930 * When considering hot and cold data separation to reduce cleaning overhead,
931 * we split 3 for data logs and 3 for node logs as hot, warm, and cold types,
933 * In the current design, you should not change the numbers intentionally.
934 * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6
935 * logs individually according to the underlying devices. (default: 6)
936 * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for
937 * data and 8 for node logs.
939 #define NR_CURSEG_DATA_TYPE (3)
940 #define NR_CURSEG_NODE_TYPE (3)
941 #define NR_CURSEG_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE)
944 CURSEG_HOT_DATA = 0, /* directory entry blocks */
945 CURSEG_WARM_DATA, /* data blocks */
946 CURSEG_COLD_DATA, /* multimedia or GCed data blocks */
947 CURSEG_HOT_NODE, /* direct node blocks of directory files */
948 CURSEG_WARM_NODE, /* direct node blocks of normal files */
949 CURSEG_COLD_NODE, /* indirect node blocks */
951 CURSEG_COLD_DATA_PINNED,/* cold data for pinned file */
955 struct completion wait;
956 struct llist_node llnode;
961 struct flush_cmd_control {
962 struct task_struct *f2fs_issue_flush; /* flush thread */
963 wait_queue_head_t flush_wait_queue; /* waiting queue for wake-up */
964 atomic_t issued_flush; /* # of issued flushes */
965 atomic_t queued_flush; /* # of queued flushes */
966 struct llist_head issue_list; /* list for command issue */
967 struct llist_node *dispatch_list; /* list for command dispatch */
970 struct f2fs_sm_info {
971 struct sit_info *sit_info; /* whole segment information */
972 struct free_segmap_info *free_info; /* free segment information */
973 struct dirty_seglist_info *dirty_info; /* dirty segment information */
974 struct curseg_info *curseg_array; /* active segment information */
976 struct rw_semaphore curseg_lock; /* for preventing curseg change */
978 block_t seg0_blkaddr; /* block address of 0'th segment */
979 block_t main_blkaddr; /* start block address of main area */
980 block_t ssa_blkaddr; /* start block address of SSA area */
982 unsigned int segment_count; /* total # of segments */
983 unsigned int main_segments; /* # of segments in main area */
984 unsigned int reserved_segments; /* # of reserved segments */
985 unsigned int ovp_segments; /* # of overprovision segments */
987 /* a threshold to reclaim prefree segments */
988 unsigned int rec_prefree_segments;
990 /* for batched trimming */
991 unsigned int trim_sections; /* # of sections to trim */
993 struct list_head sit_entry_set; /* sit entry set list */
995 unsigned int ipu_policy; /* in-place-update policy */
996 unsigned int min_ipu_util; /* in-place-update threshold */
997 unsigned int min_fsync_blocks; /* threshold for fsync */
998 unsigned int min_seq_blocks; /* threshold for sequential blocks */
999 unsigned int min_hot_blocks; /* threshold for hot block allocation */
1000 unsigned int min_ssr_sections; /* threshold to trigger SSR allocation */
1002 /* for flush command control */
1003 struct flush_cmd_control *fcc_info;
1005 /* for discard command control */
1006 struct discard_cmd_control *dcc_info;
1013 * COUNT_TYPE for monitoring
1015 * f2fs monitors the number of several block types such as on-writeback,
1016 * dirty dentry blocks, dirty node blocks, and dirty meta blocks.
1018 #define WB_DATA_TYPE(p) (__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
1038 * The below are the page types of bios used in submit_bio().
1039 * The available types are:
1040 * DATA User data pages. It operates as async mode.
1041 * NODE Node pages. It operates as async mode.
1042 * META FS metadata pages such as SIT, NAT, CP.
1043 * NR_PAGE_TYPE The number of page types.
1044 * META_FLUSH Make sure the previous pages are written
1045 * with waiting the bio's completion
1046 * ... Only can be used with META.
1048 #define PAGE_TYPE_OF_BIO(type) ((type) > META ? META : (type))
1055 INMEM, /* the below types are used by tracepoints only. */
1064 HOT = 0, /* must be zero for meta bio */
1070 enum need_lock_type {
1076 enum cp_reason_type {
1091 APP_DIRECT_IO, /* app direct IOs */
1092 APP_BUFFERED_IO, /* app buffered IOs */
1093 APP_WRITE_IO, /* app write IOs */
1094 APP_MAPPED_IO, /* app mapped IOs */
1095 FS_DATA_IO, /* data IOs from kworker/fsync/reclaimer */
1096 FS_NODE_IO, /* node IOs from kworker/fsync/reclaimer */
1097 FS_META_IO, /* meta IOs from kworker/reclaimer */
1098 FS_GC_DATA_IO, /* data IOs from forground gc */
1099 FS_GC_NODE_IO, /* node IOs from forground gc */
1100 FS_CP_DATA_IO, /* data IOs from checkpoint */
1101 FS_CP_NODE_IO, /* node IOs from checkpoint */
1102 FS_CP_META_IO, /* meta IOs from checkpoint */
1103 FS_DISCARD, /* discard */
1107 struct f2fs_io_info {
1108 struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */
1109 nid_t ino; /* inode number */
1110 enum page_type type; /* contains DATA/NODE/META/META_FLUSH */
1111 enum temp_type temp; /* contains HOT/WARM/COLD */
1112 int op; /* contains REQ_OP_ */
1113 int op_flags; /* req_flag_bits */
1114 block_t new_blkaddr; /* new block address to be written */
1115 block_t old_blkaddr; /* old block address before Cow */
1116 struct page *page; /* page to be written */
1117 struct page *encrypted_page; /* encrypted page */
1118 struct page *compressed_page; /* compressed page */
1119 struct list_head list; /* serialize IOs */
1120 bool submitted; /* indicate IO submission */
1121 int need_lock; /* indicate we need to lock cp_rwsem */
1122 bool in_list; /* indicate fio is in io_list */
1123 bool is_por; /* indicate IO is from recovery or not */
1124 bool retry; /* need to reallocate block address */
1125 int compr_blocks; /* # of compressed block addresses */
1126 bool encrypted; /* indicate file is encrypted */
1127 enum iostat_type io_type; /* io type */
1128 struct writeback_control *io_wbc; /* writeback control */
1129 struct bio **bio; /* bio for ipu */
1130 sector_t *last_block; /* last block number in bio */
1131 unsigned char version; /* version of the node */
1136 struct list_head list;
1139 #define is_read_io(rw) ((rw) == READ)
1140 struct f2fs_bio_info {
1141 struct f2fs_sb_info *sbi; /* f2fs superblock */
1142 struct bio *bio; /* bios to merge */
1143 sector_t last_block_in_bio; /* last block number */
1144 struct f2fs_io_info fio; /* store buffered io info. */
1145 struct rw_semaphore io_rwsem; /* blocking op for bio */
1146 spinlock_t io_lock; /* serialize DATA/NODE IOs */
1147 struct list_head io_list; /* track fios */
1148 struct list_head bio_list; /* bio entry list head */
1149 struct rw_semaphore bio_list_lock; /* lock to protect bio entry list */
1152 #define FDEV(i) (sbi->devs[i])
1153 #define RDEV(i) (raw_super->devs[i])
1154 struct f2fs_dev_info {
1155 struct block_device *bdev;
1156 char path[MAX_PATH_LEN];
1157 unsigned int total_segments;
1160 #ifdef CONFIG_BLK_DEV_ZONED
1161 unsigned int nr_blkz; /* Total number of zones */
1162 unsigned long *blkz_seq; /* Bitmap indicating sequential zones */
1167 DIR_INODE, /* for dirty dir inode */
1168 FILE_INODE, /* for dirty regular/symlink inode */
1169 DIRTY_META, /* for all dirtied inode metadata */
1170 ATOMIC_FILE, /* for all atomic files */
1174 /* for inner inode cache management */
1175 struct inode_management {
1176 struct radix_tree_root ino_root; /* ino entry array */
1177 spinlock_t ino_lock; /* for ino entry lock */
1178 struct list_head ino_list; /* inode list head */
1179 unsigned long ino_num; /* number of entries */
1182 /* For s_flag in struct f2fs_sb_info */
1184 SBI_IS_DIRTY, /* dirty flag for checkpoint */
1185 SBI_IS_CLOSE, /* specify unmounting */
1186 SBI_NEED_FSCK, /* need fsck.f2fs to fix */
1187 SBI_POR_DOING, /* recovery is doing or not */
1188 SBI_NEED_SB_WRITE, /* need to recover superblock */
1189 SBI_NEED_CP, /* need to checkpoint */
1190 SBI_IS_SHUTDOWN, /* shutdown by ioctl */
1191 SBI_IS_RECOVERED, /* recovered orphan/data */
1192 SBI_CP_DISABLED, /* CP was disabled last mount */
1193 SBI_CP_DISABLED_QUICK, /* CP was disabled quickly */
1194 SBI_QUOTA_NEED_FLUSH, /* need to flush quota info in CP */
1195 SBI_QUOTA_SKIP_FLUSH, /* skip flushing quota in current CP */
1196 SBI_QUOTA_NEED_REPAIR, /* quota file may be corrupted */
1197 SBI_IS_RESIZEFS, /* resizefs is in process */
1206 UMOUNT_DISCARD_TIMEOUT,
1218 BGGC_MODE_ON, /* background gc is on */
1219 BGGC_MODE_OFF, /* background gc is off */
1221 * background gc is on, migrating blocks
1222 * like foreground gc
1227 FS_MODE_ADAPTIVE, /* use both lfs/ssr allocation */
1228 FS_MODE_LFS, /* use lfs allocation only */
1232 WHINT_MODE_OFF, /* not pass down write hints */
1233 WHINT_MODE_USER, /* try to pass down hints given by users */
1234 WHINT_MODE_FS, /* pass down hints with F2FS policy */
1238 ALLOC_MODE_DEFAULT, /* stay default */
1239 ALLOC_MODE_REUSE, /* reuse segments as much as possible */
1243 FSYNC_MODE_POSIX, /* fsync follows posix semantics */
1244 FSYNC_MODE_STRICT, /* fsync behaves in line with ext4 */
1245 FSYNC_MODE_NOBARRIER, /* fsync behaves nobarrier based on posix */
1249 * this value is set in page as a private data which indicate that
1250 * the page is atomically written, and it is in inmem_pages list.
1252 #define ATOMIC_WRITTEN_PAGE ((unsigned long)-1)
1253 #define DUMMY_WRITTEN_PAGE ((unsigned long)-2)
1255 #define IS_ATOMIC_WRITTEN_PAGE(page) \
1256 (page_private(page) == (unsigned long)ATOMIC_WRITTEN_PAGE)
1257 #define IS_DUMMY_WRITTEN_PAGE(page) \
1258 (page_private(page) == (unsigned long)DUMMY_WRITTEN_PAGE)
1260 #ifdef CONFIG_FS_ENCRYPTION
1261 #define DUMMY_ENCRYPTION_ENABLED(sbi) \
1262 (unlikely(F2FS_OPTION(sbi).dummy_enc_ctx.ctx != NULL))
1264 #define DUMMY_ENCRYPTION_ENABLED(sbi) (0)
1267 /* For compression */
1268 enum compress_algorithm_type {
1275 #define COMPRESS_DATA_RESERVED_SIZE 5
1276 struct compress_data {
1277 __le32 clen; /* compressed data size */
1278 __le32 reserved[COMPRESS_DATA_RESERVED_SIZE]; /* reserved */
1279 u8 cdata[]; /* compressed data */
1282 #define COMPRESS_HEADER_SIZE (sizeof(struct compress_data))
1284 #define F2FS_COMPRESSED_PAGE_MAGIC 0xF5F2C000
1286 /* compress context */
1287 struct compress_ctx {
1288 struct inode *inode; /* inode the context belong to */
1289 pgoff_t cluster_idx; /* cluster index number */
1290 unsigned int cluster_size; /* page count in cluster */
1291 unsigned int log_cluster_size; /* log of cluster size */
1292 struct page **rpages; /* pages store raw data in cluster */
1293 unsigned int nr_rpages; /* total page number in rpages */
1294 struct page **cpages; /* pages store compressed data in cluster */
1295 unsigned int nr_cpages; /* total page number in cpages */
1296 void *rbuf; /* virtual mapped address on rpages */
1297 struct compress_data *cbuf; /* virtual mapped address on cpages */
1298 size_t rlen; /* valid data length in rbuf */
1299 size_t clen; /* valid data length in cbuf */
1300 void *private; /* payload buffer for specified compression algorithm */
1301 void *private2; /* extra payload buffer */
1304 /* compress context for write IO path */
1305 struct compress_io_ctx {
1306 u32 magic; /* magic number to indicate page is compressed */
1307 struct inode *inode; /* inode the context belong to */
1308 struct page **rpages; /* pages store raw data in cluster */
1309 unsigned int nr_rpages; /* total page number in rpages */
1310 refcount_t ref; /* referrence count of raw page */
1313 /* decompress io context for read IO path */
1314 struct decompress_io_ctx {
1315 u32 magic; /* magic number to indicate page is compressed */
1316 struct inode *inode; /* inode the context belong to */
1317 pgoff_t cluster_idx; /* cluster index number */
1318 unsigned int cluster_size; /* page count in cluster */
1319 unsigned int log_cluster_size; /* log of cluster size */
1320 struct page **rpages; /* pages store raw data in cluster */
1321 unsigned int nr_rpages; /* total page number in rpages */
1322 struct page **cpages; /* pages store compressed data in cluster */
1323 unsigned int nr_cpages; /* total page number in cpages */
1324 struct page **tpages; /* temp pages to pad holes in cluster */
1325 void *rbuf; /* virtual mapped address on rpages */
1326 struct compress_data *cbuf; /* virtual mapped address on cpages */
1327 size_t rlen; /* valid data length in rbuf */
1328 size_t clen; /* valid data length in cbuf */
1329 refcount_t ref; /* referrence count of compressed page */
1330 bool failed; /* indicate IO error during decompression */
1331 void *private; /* payload buffer for specified decompression algorithm */
1332 void *private2; /* extra payload buffer */
1335 #define NULL_CLUSTER ((unsigned int)(~0))
1336 #define MIN_COMPRESS_LOG_SIZE 2
1337 #define MAX_COMPRESS_LOG_SIZE 8
1338 #define MAX_COMPRESS_WINDOW_SIZE ((PAGE_SIZE) << MAX_COMPRESS_LOG_SIZE)
1340 struct f2fs_sb_info {
1341 struct super_block *sb; /* pointer to VFS super block */
1342 struct proc_dir_entry *s_proc; /* proc entry */
1343 struct f2fs_super_block *raw_super; /* raw super block pointer */
1344 struct rw_semaphore sb_lock; /* lock for raw super block */
1345 int valid_super_block; /* valid super block no */
1346 unsigned long s_flag; /* flags for sbi */
1347 struct mutex writepages; /* mutex for writepages() */
1348 #ifdef CONFIG_UNICODE
1349 struct unicode_map *s_encoding;
1350 __u16 s_encoding_flags;
1353 #ifdef CONFIG_BLK_DEV_ZONED
1354 unsigned int blocks_per_blkz; /* F2FS blocks per zone */
1355 unsigned int log_blocks_per_blkz; /* log2 F2FS blocks per zone */
1358 /* for node-related operations */
1359 struct f2fs_nm_info *nm_info; /* node manager */
1360 struct inode *node_inode; /* cache node blocks */
1362 /* for segment-related operations */
1363 struct f2fs_sm_info *sm_info; /* segment manager */
1365 /* for bio operations */
1366 struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */
1367 /* keep migration IO order for LFS mode */
1368 struct rw_semaphore io_order_lock;
1369 mempool_t *write_io_dummy; /* Dummy pages */
1371 /* for checkpoint */
1372 struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */
1373 int cur_cp_pack; /* remain current cp pack */
1374 spinlock_t cp_lock; /* for flag in ckpt */
1375 struct inode *meta_inode; /* cache meta blocks */
1376 struct mutex cp_mutex; /* checkpoint procedure lock */
1377 struct rw_semaphore cp_rwsem; /* blocking FS operations */
1378 struct rw_semaphore node_write; /* locking node writes */
1379 struct rw_semaphore node_change; /* locking node change */
1380 wait_queue_head_t cp_wait;
1381 unsigned long last_time[MAX_TIME]; /* to store time in jiffies */
1382 long interval_time[MAX_TIME]; /* to store thresholds */
1384 struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */
1386 spinlock_t fsync_node_lock; /* for node entry lock */
1387 struct list_head fsync_node_list; /* node list head */
1388 unsigned int fsync_seg_id; /* sequence id */
1389 unsigned int fsync_node_num; /* number of node entries */
1391 /* for orphan inode, use 0'th array */
1392 unsigned int max_orphans; /* max orphan inodes */
1394 /* for inode management */
1395 struct list_head inode_list[NR_INODE_TYPE]; /* dirty inode list */
1396 spinlock_t inode_lock[NR_INODE_TYPE]; /* for dirty inode list lock */
1397 struct mutex flush_lock; /* for flush exclusion */
1399 /* for extent tree cache */
1400 struct radix_tree_root extent_tree_root;/* cache extent cache entries */
1401 struct mutex extent_tree_lock; /* locking extent radix tree */
1402 struct list_head extent_list; /* lru list for shrinker */
1403 spinlock_t extent_lock; /* locking extent lru list */
1404 atomic_t total_ext_tree; /* extent tree count */
1405 struct list_head zombie_list; /* extent zombie tree list */
1406 atomic_t total_zombie_tree; /* extent zombie tree count */
1407 atomic_t total_ext_node; /* extent info count */
1409 /* basic filesystem units */
1410 unsigned int log_sectors_per_block; /* log2 sectors per block */
1411 unsigned int log_blocksize; /* log2 block size */
1412 unsigned int blocksize; /* block size */
1413 unsigned int root_ino_num; /* root inode number*/
1414 unsigned int node_ino_num; /* node inode number*/
1415 unsigned int meta_ino_num; /* meta inode number*/
1416 unsigned int log_blocks_per_seg; /* log2 blocks per segment */
1417 unsigned int blocks_per_seg; /* blocks per segment */
1418 unsigned int segs_per_sec; /* segments per section */
1419 unsigned int secs_per_zone; /* sections per zone */
1420 unsigned int total_sections; /* total section count */
1421 struct mutex resize_mutex; /* for resize exclusion */
1422 unsigned int total_node_count; /* total node block count */
1423 unsigned int total_valid_node_count; /* valid node block count */
1424 loff_t max_file_blocks; /* max block index of file */
1425 int dir_level; /* directory level */
1426 int readdir_ra; /* readahead inode in readdir */
1428 block_t user_block_count; /* # of user blocks */
1429 block_t total_valid_block_count; /* # of valid blocks */
1430 block_t discard_blks; /* discard command candidats */
1431 block_t last_valid_block_count; /* for recovery */
1432 block_t reserved_blocks; /* configurable reserved blocks */
1433 block_t current_reserved_blocks; /* current reserved blocks */
1435 /* Additional tracking for no checkpoint mode */
1436 block_t unusable_block_count; /* # of blocks saved by last cp */
1438 unsigned int nquota_files; /* # of quota sysfile */
1439 struct rw_semaphore quota_sem; /* blocking cp for flags */
1441 /* # of pages, see count_type */
1442 atomic_t nr_pages[NR_COUNT_TYPE];
1443 /* # of allocated blocks */
1444 struct percpu_counter alloc_valid_block_count;
1446 /* writeback control */
1447 atomic_t wb_sync_req[META]; /* count # of WB_SYNC threads */
1449 /* valid inode count */
1450 struct percpu_counter total_valid_inode_count;
1452 struct f2fs_mount_info mount_opt; /* mount options */
1454 /* for cleaning operations */
1455 struct rw_semaphore gc_lock; /*
1456 * semaphore for GC, avoid
1457 * race between GC and GC or CP
1459 struct f2fs_gc_kthread *gc_thread; /* GC thread */
1460 unsigned int cur_victim_sec; /* current victim section num */
1461 unsigned int gc_mode; /* current GC state */
1462 unsigned int next_victim_seg[2]; /* next segment in victim section */
1463 /* for skip statistic */
1464 unsigned int atomic_files; /* # of opened atomic file */
1465 unsigned long long skipped_atomic_files[2]; /* FG_GC and BG_GC */
1466 unsigned long long skipped_gc_rwsem; /* FG_GC only */
1468 /* threshold for gc trials on pinned files */
1469 u64 gc_pin_file_threshold;
1470 struct rw_semaphore pin_sem;
1472 /* maximum # of trials to find a victim segment for SSR and GC */
1473 unsigned int max_victim_search;
1474 /* migration granularity of garbage collection, unit: segment */
1475 unsigned int migration_granularity;
1478 * for stat information.
1479 * one is for the LFS mode, and the other is for the SSR mode.
1481 #ifdef CONFIG_F2FS_STAT_FS
1482 struct f2fs_stat_info *stat_info; /* FS status information */
1483 atomic_t meta_count[META_MAX]; /* # of meta blocks */
1484 unsigned int segment_count[2]; /* # of allocated segments */
1485 unsigned int block_count[2]; /* # of allocated blocks */
1486 atomic_t inplace_count; /* # of inplace update */
1487 atomic64_t total_hit_ext; /* # of lookup extent cache */
1488 atomic64_t read_hit_rbtree; /* # of hit rbtree extent node */
1489 atomic64_t read_hit_largest; /* # of hit largest extent node */
1490 atomic64_t read_hit_cached; /* # of hit cached extent node */
1491 atomic_t inline_xattr; /* # of inline_xattr inodes */
1492 atomic_t inline_inode; /* # of inline_data inodes */
1493 atomic_t inline_dir; /* # of inline_dentry inodes */
1494 atomic_t compr_inode; /* # of compressed inodes */
1495 atomic_t compr_blocks; /* # of compressed blocks */
1496 atomic_t vw_cnt; /* # of volatile writes */
1497 atomic_t max_aw_cnt; /* max # of atomic writes */
1498 atomic_t max_vw_cnt; /* max # of volatile writes */
1499 unsigned int io_skip_bggc; /* skip background gc for in-flight IO */
1500 unsigned int other_skip_bggc; /* skip background gc for other reasons */
1501 unsigned int ndirty_inode[NR_INODE_TYPE]; /* # of dirty inodes */
1503 spinlock_t stat_lock; /* lock for stat operations */
1505 /* For app/fs IO statistics */
1506 spinlock_t iostat_lock;
1507 unsigned long long write_iostat[NR_IO_TYPE];
1510 /* For sysfs suppport */
1511 struct kobject s_kobj;
1512 struct completion s_kobj_unregister;
1514 /* For shrinker support */
1515 struct list_head s_list;
1516 int s_ndevs; /* number of devices */
1517 struct f2fs_dev_info *devs; /* for device list */
1518 unsigned int dirty_device; /* for checkpoint data flush */
1519 spinlock_t dev_lock; /* protect dirty_device */
1520 struct mutex umount_mutex;
1521 unsigned int shrinker_run_no;
1523 /* For write statistics */
1524 u64 sectors_written_start;
1527 /* Reference to checksum algorithm driver via cryptoapi */
1528 struct crypto_shash *s_chksum_driver;
1530 /* Precomputed FS UUID checksum for seeding other checksums */
1531 __u32 s_chksum_seed;
1533 struct workqueue_struct *post_read_wq; /* post read workqueue */
1535 struct kmem_cache *inline_xattr_slab; /* inline xattr entry */
1536 unsigned int inline_xattr_slab_size; /* default inline xattr slab size */
1539 struct f2fs_private_dio {
1540 struct inode *inode;
1542 bio_end_io_t *orig_end_io;
1546 #ifdef CONFIG_F2FS_FAULT_INJECTION
1547 #define f2fs_show_injection_info(sbi, type) \
1548 printk_ratelimited("%sF2FS-fs (%s) : inject %s in %s of %pS\n", \
1549 KERN_INFO, sbi->sb->s_id, \
1550 f2fs_fault_name[type], \
1551 __func__, __builtin_return_address(0))
1552 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
1554 struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
1556 if (!ffi->inject_rate)
1559 if (!IS_FAULT_SET(ffi, type))
1562 atomic_inc(&ffi->inject_ops);
1563 if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
1564 atomic_set(&ffi->inject_ops, 0);
1570 #define f2fs_show_injection_info(sbi, type) do { } while (0)
1571 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
1578 * Test if the mounted volume is a multi-device volume.
1579 * - For a single regular disk volume, sbi->s_ndevs is 0.
1580 * - For a single zoned disk volume, sbi->s_ndevs is 1.
1581 * - For a multi-device volume, sbi->s_ndevs is always 2 or more.
1583 static inline bool f2fs_is_multi_device(struct f2fs_sb_info *sbi)
1585 return sbi->s_ndevs > 1;
1588 /* For write statistics. Suppose sector size is 512 bytes,
1589 * and the return value is in kbytes. s is of struct f2fs_sb_info.
1591 #define BD_PART_WRITTEN(s) \
1592 (((u64)part_stat_read((s)->sb->s_bdev->bd_part, sectors[STAT_WRITE]) - \
1593 (s)->sectors_written_start) >> 1)
1595 static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type)
1597 unsigned long now = jiffies;
1599 sbi->last_time[type] = now;
1601 /* DISCARD_TIME and GC_TIME are based on REQ_TIME */
1602 if (type == REQ_TIME) {
1603 sbi->last_time[DISCARD_TIME] = now;
1604 sbi->last_time[GC_TIME] = now;
1608 static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type)
1610 unsigned long interval = sbi->interval_time[type] * HZ;
1612 return time_after(jiffies, sbi->last_time[type] + interval);
1615 static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi,
1618 unsigned long interval = sbi->interval_time[type] * HZ;
1619 unsigned int wait_ms = 0;
1622 delta = (sbi->last_time[type] + interval) - jiffies;
1624 wait_ms = jiffies_to_msecs(delta);
1632 static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc,
1633 const void *address, unsigned int length)
1636 struct shash_desc shash;
1641 BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx));
1643 desc.shash.tfm = sbi->s_chksum_driver;
1644 *(u32 *)desc.ctx = crc;
1646 err = crypto_shash_update(&desc.shash, address, length);
1649 return *(u32 *)desc.ctx;
1652 static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
1653 unsigned int length)
1655 return __f2fs_crc32(sbi, F2FS_SUPER_MAGIC, address, length);
1658 static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc,
1659 void *buf, size_t buf_size)
1661 return f2fs_crc32(sbi, buf, buf_size) == blk_crc;
1664 static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc,
1665 const void *address, unsigned int length)
1667 return __f2fs_crc32(sbi, crc, address, length);
1670 static inline struct f2fs_inode_info *F2FS_I(struct inode *inode)
1672 return container_of(inode, struct f2fs_inode_info, vfs_inode);
1675 static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb)
1677 return sb->s_fs_info;
1680 static inline struct f2fs_sb_info *F2FS_I_SB(struct inode *inode)
1682 return F2FS_SB(inode->i_sb);
1685 static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping)
1687 return F2FS_I_SB(mapping->host);
1690 static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page)
1692 return F2FS_M_SB(page_file_mapping(page));
1695 static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
1697 return (struct f2fs_super_block *)(sbi->raw_super);
1700 static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi)
1702 return (struct f2fs_checkpoint *)(sbi->ckpt);
1705 static inline struct f2fs_node *F2FS_NODE(struct page *page)
1707 return (struct f2fs_node *)page_address(page);
1710 static inline struct f2fs_inode *F2FS_INODE(struct page *page)
1712 return &((struct f2fs_node *)page_address(page))->i;
1715 static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi)
1717 return (struct f2fs_nm_info *)(sbi->nm_info);
1720 static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi)
1722 return (struct f2fs_sm_info *)(sbi->sm_info);
1725 static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi)
1727 return (struct sit_info *)(SM_I(sbi)->sit_info);
1730 static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi)
1732 return (struct free_segmap_info *)(SM_I(sbi)->free_info);
1735 static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi)
1737 return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info);
1740 static inline struct address_space *META_MAPPING(struct f2fs_sb_info *sbi)
1742 return sbi->meta_inode->i_mapping;
1745 static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi)
1747 return sbi->node_inode->i_mapping;
1750 static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type)
1752 return test_bit(type, &sbi->s_flag);
1755 static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
1757 set_bit(type, &sbi->s_flag);
1760 static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
1762 clear_bit(type, &sbi->s_flag);
1765 static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp)
1767 return le64_to_cpu(cp->checkpoint_ver);
1770 static inline unsigned long f2fs_qf_ino(struct super_block *sb, int type)
1772 if (type < F2FS_MAX_QUOTAS)
1773 return le32_to_cpu(F2FS_SB(sb)->raw_super->qf_ino[type]);
1777 static inline __u64 cur_cp_crc(struct f2fs_checkpoint *cp)
1779 size_t crc_offset = le32_to_cpu(cp->checksum_offset);
1780 return le32_to_cpu(*((__le32 *)((unsigned char *)cp + crc_offset)));
1783 static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
1785 unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
1787 return ckpt_flags & f;
1790 static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
1792 return __is_set_ckpt_flags(F2FS_CKPT(sbi), f);
1795 static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
1797 unsigned int ckpt_flags;
1799 ckpt_flags = le32_to_cpu(cp->ckpt_flags);
1801 cp->ckpt_flags = cpu_to_le32(ckpt_flags);
1804 static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
1806 unsigned long flags;
1808 spin_lock_irqsave(&sbi->cp_lock, flags);
1809 __set_ckpt_flags(F2FS_CKPT(sbi), f);
1810 spin_unlock_irqrestore(&sbi->cp_lock, flags);
1813 static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
1815 unsigned int ckpt_flags;
1817 ckpt_flags = le32_to_cpu(cp->ckpt_flags);
1819 cp->ckpt_flags = cpu_to_le32(ckpt_flags);
1822 static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
1824 unsigned long flags;
1826 spin_lock_irqsave(&sbi->cp_lock, flags);
1827 __clear_ckpt_flags(F2FS_CKPT(sbi), f);
1828 spin_unlock_irqrestore(&sbi->cp_lock, flags);
1831 static inline void disable_nat_bits(struct f2fs_sb_info *sbi, bool lock)
1833 unsigned long flags;
1834 unsigned char *nat_bits;
1837 * In order to re-enable nat_bits we need to call fsck.f2fs by
1838 * set_sbi_flag(sbi, SBI_NEED_FSCK). But it may give huge cost,
1839 * so let's rely on regular fsck or unclean shutdown.
1843 spin_lock_irqsave(&sbi->cp_lock, flags);
1844 __clear_ckpt_flags(F2FS_CKPT(sbi), CP_NAT_BITS_FLAG);
1845 nat_bits = NM_I(sbi)->nat_bits;
1846 NM_I(sbi)->nat_bits = NULL;
1848 spin_unlock_irqrestore(&sbi->cp_lock, flags);
1853 static inline bool enabled_nat_bits(struct f2fs_sb_info *sbi,
1854 struct cp_control *cpc)
1856 bool set = is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
1858 return (cpc) ? (cpc->reason & CP_UMOUNT) && set : set;
1861 static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
1863 down_read(&sbi->cp_rwsem);
1866 static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi)
1868 return down_read_trylock(&sbi->cp_rwsem);
1871 static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
1873 up_read(&sbi->cp_rwsem);
1876 static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
1878 down_write(&sbi->cp_rwsem);
1881 static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
1883 up_write(&sbi->cp_rwsem);
1886 static inline int __get_cp_reason(struct f2fs_sb_info *sbi)
1888 int reason = CP_SYNC;
1890 if (test_opt(sbi, FASTBOOT))
1891 reason = CP_FASTBOOT;
1892 if (is_sbi_flag_set(sbi, SBI_IS_CLOSE))
1897 static inline bool __remain_node_summaries(int reason)
1899 return (reason & (CP_UMOUNT | CP_FASTBOOT));
1902 static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi)
1904 return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) ||
1905 is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG));
1909 * Check whether the inode has blocks or not
1911 static inline int F2FS_HAS_BLOCKS(struct inode *inode)
1913 block_t xattr_block = F2FS_I(inode)->i_xattr_nid ? 1 : 0;
1915 return (inode->i_blocks >> F2FS_LOG_SECTORS_PER_BLOCK) > xattr_block;
1918 static inline bool f2fs_has_xattr_block(unsigned int ofs)
1920 return ofs == XATTR_NODE_OFFSET;
1923 static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi,
1924 struct inode *inode, bool cap)
1928 if (!test_opt(sbi, RESERVE_ROOT))
1930 if (IS_NOQUOTA(inode))
1932 if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid()))
1934 if (!gid_eq(F2FS_OPTION(sbi).s_resgid, GLOBAL_ROOT_GID) &&
1935 in_group_p(F2FS_OPTION(sbi).s_resgid))
1937 if (cap && capable(CAP_SYS_RESOURCE))
1942 static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool);
1943 static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
1944 struct inode *inode, blkcnt_t *count)
1946 blkcnt_t diff = 0, release = 0;
1947 block_t avail_user_block_count;
1950 ret = dquot_reserve_block(inode, *count);
1954 if (time_to_inject(sbi, FAULT_BLOCK)) {
1955 f2fs_show_injection_info(sbi, FAULT_BLOCK);
1961 * let's increase this in prior to actual block count change in order
1962 * for f2fs_sync_file to avoid data races when deciding checkpoint.
1964 percpu_counter_add(&sbi->alloc_valid_block_count, (*count));
1966 spin_lock(&sbi->stat_lock);
1967 sbi->total_valid_block_count += (block_t)(*count);
1968 avail_user_block_count = sbi->user_block_count -
1969 sbi->current_reserved_blocks;
1971 if (!__allow_reserved_blocks(sbi, inode, true))
1972 avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks;
1973 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
1974 if (avail_user_block_count > sbi->unusable_block_count)
1975 avail_user_block_count -= sbi->unusable_block_count;
1977 avail_user_block_count = 0;
1979 if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) {
1980 diff = sbi->total_valid_block_count - avail_user_block_count;
1985 sbi->total_valid_block_count -= diff;
1987 spin_unlock(&sbi->stat_lock);
1991 spin_unlock(&sbi->stat_lock);
1993 if (unlikely(release)) {
1994 percpu_counter_sub(&sbi->alloc_valid_block_count, release);
1995 dquot_release_reservation_block(inode, release);
1997 f2fs_i_blocks_write(inode, *count, true, true);
2001 percpu_counter_sub(&sbi->alloc_valid_block_count, release);
2003 dquot_release_reservation_block(inode, release);
2008 void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...);
2010 #define f2fs_err(sbi, fmt, ...) \
2011 f2fs_printk(sbi, KERN_ERR fmt, ##__VA_ARGS__)
2012 #define f2fs_warn(sbi, fmt, ...) \
2013 f2fs_printk(sbi, KERN_WARNING fmt, ##__VA_ARGS__)
2014 #define f2fs_notice(sbi, fmt, ...) \
2015 f2fs_printk(sbi, KERN_NOTICE fmt, ##__VA_ARGS__)
2016 #define f2fs_info(sbi, fmt, ...) \
2017 f2fs_printk(sbi, KERN_INFO fmt, ##__VA_ARGS__)
2018 #define f2fs_debug(sbi, fmt, ...) \
2019 f2fs_printk(sbi, KERN_DEBUG fmt, ##__VA_ARGS__)
2021 static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
2022 struct inode *inode,
2025 blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK;
2027 spin_lock(&sbi->stat_lock);
2028 f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count);
2029 sbi->total_valid_block_count -= (block_t)count;
2030 if (sbi->reserved_blocks &&
2031 sbi->current_reserved_blocks < sbi->reserved_blocks)
2032 sbi->current_reserved_blocks = min(sbi->reserved_blocks,
2033 sbi->current_reserved_blocks + count);
2034 spin_unlock(&sbi->stat_lock);
2035 if (unlikely(inode->i_blocks < sectors)) {
2036 f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu",
2038 (unsigned long long)inode->i_blocks,
2039 (unsigned long long)sectors);
2040 set_sbi_flag(sbi, SBI_NEED_FSCK);
2043 f2fs_i_blocks_write(inode, count, false, true);
2046 static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
2048 atomic_inc(&sbi->nr_pages[count_type]);
2050 if (count_type == F2FS_DIRTY_DENTS ||
2051 count_type == F2FS_DIRTY_NODES ||
2052 count_type == F2FS_DIRTY_META ||
2053 count_type == F2FS_DIRTY_QDATA ||
2054 count_type == F2FS_DIRTY_IMETA)
2055 set_sbi_flag(sbi, SBI_IS_DIRTY);
2058 static inline void inode_inc_dirty_pages(struct inode *inode)
2060 atomic_inc(&F2FS_I(inode)->dirty_pages);
2061 inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
2062 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
2063 if (IS_NOQUOTA(inode))
2064 inc_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA);
2067 static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type)
2069 atomic_dec(&sbi->nr_pages[count_type]);
2072 static inline void inode_dec_dirty_pages(struct inode *inode)
2074 if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
2075 !S_ISLNK(inode->i_mode))
2078 atomic_dec(&F2FS_I(inode)->dirty_pages);
2079 dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
2080 F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
2081 if (IS_NOQUOTA(inode))
2082 dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA);
2085 static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type)
2087 return atomic_read(&sbi->nr_pages[count_type]);
2090 static inline int get_dirty_pages(struct inode *inode)
2092 return atomic_read(&F2FS_I(inode)->dirty_pages);
2095 static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type)
2097 unsigned int pages_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg;
2098 unsigned int segs = (get_pages(sbi, block_type) + pages_per_sec - 1) >>
2099 sbi->log_blocks_per_seg;
2101 return segs / sbi->segs_per_sec;
2104 static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
2106 return sbi->total_valid_block_count;
2109 static inline block_t discard_blocks(struct f2fs_sb_info *sbi)
2111 return sbi->discard_blks;
2114 static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag)
2116 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2118 /* return NAT or SIT bitmap */
2119 if (flag == NAT_BITMAP)
2120 return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
2121 else if (flag == SIT_BITMAP)
2122 return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
2127 static inline block_t __cp_payload(struct f2fs_sb_info *sbi)
2129 return le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
2132 static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
2134 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2137 if (is_set_ckpt_flags(sbi, CP_LARGE_NAT_BITMAP_FLAG)) {
2138 offset = (flag == SIT_BITMAP) ?
2139 le32_to_cpu(ckpt->nat_ver_bitmap_bytesize) : 0;
2141 * if large_nat_bitmap feature is enabled, leave checksum
2142 * protection for all nat/sit bitmaps.
2144 return &ckpt->sit_nat_version_bitmap + offset + sizeof(__le32);
2147 if (__cp_payload(sbi) > 0) {
2148 if (flag == NAT_BITMAP)
2149 return &ckpt->sit_nat_version_bitmap;
2151 return (unsigned char *)ckpt + F2FS_BLKSIZE;
2153 offset = (flag == NAT_BITMAP) ?
2154 le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0;
2155 return &ckpt->sit_nat_version_bitmap + offset;
2159 static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
2161 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
2163 if (sbi->cur_cp_pack == 2)
2164 start_addr += sbi->blocks_per_seg;
2168 static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi)
2170 block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
2172 if (sbi->cur_cp_pack == 1)
2173 start_addr += sbi->blocks_per_seg;
2177 static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi)
2179 sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1;
2182 static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi)
2184 return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
2187 static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
2188 struct inode *inode, bool is_inode)
2190 block_t valid_block_count;
2191 unsigned int valid_node_count, user_block_count;
2196 err = dquot_alloc_inode(inode);
2201 err = dquot_reserve_block(inode, 1);
2206 if (time_to_inject(sbi, FAULT_BLOCK)) {
2207 f2fs_show_injection_info(sbi, FAULT_BLOCK);
2211 spin_lock(&sbi->stat_lock);
2213 valid_block_count = sbi->total_valid_block_count +
2214 sbi->current_reserved_blocks + 1;
2216 if (!__allow_reserved_blocks(sbi, inode, false))
2217 valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks;
2218 user_block_count = sbi->user_block_count;
2219 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2220 user_block_count -= sbi->unusable_block_count;
2222 if (unlikely(valid_block_count > user_block_count)) {
2223 spin_unlock(&sbi->stat_lock);
2227 valid_node_count = sbi->total_valid_node_count + 1;
2228 if (unlikely(valid_node_count > sbi->total_node_count)) {
2229 spin_unlock(&sbi->stat_lock);
2233 sbi->total_valid_node_count++;
2234 sbi->total_valid_block_count++;
2235 spin_unlock(&sbi->stat_lock);
2239 f2fs_mark_inode_dirty_sync(inode, true);
2241 f2fs_i_blocks_write(inode, 1, true, true);
2244 percpu_counter_inc(&sbi->alloc_valid_block_count);
2250 dquot_free_inode(inode);
2252 dquot_release_reservation_block(inode, 1);
2257 static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
2258 struct inode *inode, bool is_inode)
2260 spin_lock(&sbi->stat_lock);
2262 f2fs_bug_on(sbi, !sbi->total_valid_block_count);
2263 f2fs_bug_on(sbi, !sbi->total_valid_node_count);
2265 sbi->total_valid_node_count--;
2266 sbi->total_valid_block_count--;
2267 if (sbi->reserved_blocks &&
2268 sbi->current_reserved_blocks < sbi->reserved_blocks)
2269 sbi->current_reserved_blocks++;
2271 spin_unlock(&sbi->stat_lock);
2274 dquot_free_inode(inode);
2276 if (unlikely(inode->i_blocks == 0)) {
2277 f2fs_warn(sbi, "dec_valid_node_count: inconsistent i_blocks, ino:%lu, iblocks:%llu",
2279 (unsigned long long)inode->i_blocks);
2280 set_sbi_flag(sbi, SBI_NEED_FSCK);
2283 f2fs_i_blocks_write(inode, 1, false, true);
2287 static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi)
2289 return sbi->total_valid_node_count;
2292 static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi)
2294 percpu_counter_inc(&sbi->total_valid_inode_count);
2297 static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi)
2299 percpu_counter_dec(&sbi->total_valid_inode_count);
2302 static inline s64 valid_inode_count(struct f2fs_sb_info *sbi)
2304 return percpu_counter_sum_positive(&sbi->total_valid_inode_count);
2307 static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
2308 pgoff_t index, bool for_write)
2312 if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) {
2314 page = find_get_page_flags(mapping, index,
2315 FGP_LOCK | FGP_ACCESSED);
2317 page = find_lock_page(mapping, index);
2321 if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) {
2322 f2fs_show_injection_info(F2FS_M_SB(mapping),
2329 return grab_cache_page(mapping, index);
2330 return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
2333 static inline struct page *f2fs_pagecache_get_page(
2334 struct address_space *mapping, pgoff_t index,
2335 int fgp_flags, gfp_t gfp_mask)
2337 if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) {
2338 f2fs_show_injection_info(F2FS_M_SB(mapping), FAULT_PAGE_GET);
2342 return pagecache_get_page(mapping, index, fgp_flags, gfp_mask);
2345 static inline void f2fs_copy_page(struct page *src, struct page *dst)
2347 char *src_kaddr = kmap(src);
2348 char *dst_kaddr = kmap(dst);
2350 memcpy(dst_kaddr, src_kaddr, PAGE_SIZE);
2355 static inline void f2fs_put_page(struct page *page, int unlock)
2361 f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page));
2367 static inline void f2fs_put_dnode(struct dnode_of_data *dn)
2370 f2fs_put_page(dn->node_page, 1);
2371 if (dn->inode_page && dn->node_page != dn->inode_page)
2372 f2fs_put_page(dn->inode_page, 0);
2373 dn->node_page = NULL;
2374 dn->inode_page = NULL;
2377 static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name,
2380 return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, NULL);
2383 static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep,
2388 entry = kmem_cache_alloc(cachep, flags);
2390 entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL);
2394 static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
2396 if (sbi->gc_mode == GC_URGENT)
2399 if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) ||
2400 get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) ||
2401 get_pages(sbi, F2FS_WB_CP_DATA) ||
2402 get_pages(sbi, F2FS_DIO_READ) ||
2403 get_pages(sbi, F2FS_DIO_WRITE))
2406 if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info &&
2407 atomic_read(&SM_I(sbi)->dcc_info->queued_discard))
2410 if (SM_I(sbi) && SM_I(sbi)->fcc_info &&
2411 atomic_read(&SM_I(sbi)->fcc_info->queued_flush))
2414 return f2fs_time_over(sbi, type);
2417 static inline void f2fs_radix_tree_insert(struct radix_tree_root *root,
2418 unsigned long index, void *item)
2420 while (radix_tree_insert(root, index, item))
2424 #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino)
2426 static inline bool IS_INODE(struct page *page)
2428 struct f2fs_node *p = F2FS_NODE(page);
2430 return RAW_IS_INODE(p);
2433 static inline int offset_in_addr(struct f2fs_inode *i)
2435 return (i->i_inline & F2FS_EXTRA_ATTR) ?
2436 (le16_to_cpu(i->i_extra_isize) / sizeof(__le32)) : 0;
2439 static inline __le32 *blkaddr_in_node(struct f2fs_node *node)
2441 return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr;
2444 static inline int f2fs_has_extra_attr(struct inode *inode);
2445 static inline block_t data_blkaddr(struct inode *inode,
2446 struct page *node_page, unsigned int offset)
2448 struct f2fs_node *raw_node;
2451 bool is_inode = IS_INODE(node_page);
2453 raw_node = F2FS_NODE(node_page);
2457 /* from GC path only */
2458 base = offset_in_addr(&raw_node->i);
2459 else if (f2fs_has_extra_attr(inode))
2460 base = get_extra_isize(inode);
2463 addr_array = blkaddr_in_node(raw_node);
2464 return le32_to_cpu(addr_array[base + offset]);
2467 static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn)
2469 return data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node);
2472 static inline int f2fs_test_bit(unsigned int nr, char *addr)
2477 mask = 1 << (7 - (nr & 0x07));
2478 return mask & *addr;
2481 static inline void f2fs_set_bit(unsigned int nr, char *addr)
2486 mask = 1 << (7 - (nr & 0x07));
2490 static inline void f2fs_clear_bit(unsigned int nr, char *addr)
2495 mask = 1 << (7 - (nr & 0x07));
2499 static inline int f2fs_test_and_set_bit(unsigned int nr, char *addr)
2505 mask = 1 << (7 - (nr & 0x07));
2511 static inline int f2fs_test_and_clear_bit(unsigned int nr, char *addr)
2517 mask = 1 << (7 - (nr & 0x07));
2523 static inline void f2fs_change_bit(unsigned int nr, char *addr)
2528 mask = 1 << (7 - (nr & 0x07));
2533 * On-disk inode flags (f2fs_inode::i_flags)
2535 #define F2FS_COMPR_FL 0x00000004 /* Compress file */
2536 #define F2FS_SYNC_FL 0x00000008 /* Synchronous updates */
2537 #define F2FS_IMMUTABLE_FL 0x00000010 /* Immutable file */
2538 #define F2FS_APPEND_FL 0x00000020 /* writes to file may only append */
2539 #define F2FS_NODUMP_FL 0x00000040 /* do not dump file */
2540 #define F2FS_NOATIME_FL 0x00000080 /* do not update atime */
2541 #define F2FS_NOCOMP_FL 0x00000400 /* Don't compress */
2542 #define F2FS_INDEX_FL 0x00001000 /* hash-indexed directory */
2543 #define F2FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */
2544 #define F2FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */
2545 #define F2FS_CASEFOLD_FL 0x40000000 /* Casefolded file */
2547 /* Flags that should be inherited by new inodes from their parent. */
2548 #define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \
2549 F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
2550 F2FS_CASEFOLD_FL | F2FS_COMPR_FL | F2FS_NOCOMP_FL)
2552 /* Flags that are appropriate for regular files (all but dir-specific ones). */
2553 #define F2FS_REG_FLMASK (~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
2556 /* Flags that are appropriate for non-directories/regular files. */
2557 #define F2FS_OTHER_FLMASK (F2FS_NODUMP_FL | F2FS_NOATIME_FL)
2559 static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags)
2563 else if (S_ISREG(mode))
2564 return flags & F2FS_REG_FLMASK;
2566 return flags & F2FS_OTHER_FLMASK;
2569 static inline void __mark_inode_dirty_flag(struct inode *inode,
2573 case FI_INLINE_XATTR:
2574 case FI_INLINE_DATA:
2575 case FI_INLINE_DENTRY:
2581 case FI_INLINE_DOTS:
2583 f2fs_mark_inode_dirty_sync(inode, true);
2587 static inline void set_inode_flag(struct inode *inode, int flag)
2589 test_and_set_bit(flag, F2FS_I(inode)->flags);
2590 __mark_inode_dirty_flag(inode, flag, true);
2593 static inline int is_inode_flag_set(struct inode *inode, int flag)
2595 return test_bit(flag, F2FS_I(inode)->flags);
2598 static inline void clear_inode_flag(struct inode *inode, int flag)
2600 test_and_clear_bit(flag, F2FS_I(inode)->flags);
2601 __mark_inode_dirty_flag(inode, flag, false);
2604 static inline bool f2fs_verity_in_progress(struct inode *inode)
2606 return IS_ENABLED(CONFIG_FS_VERITY) &&
2607 is_inode_flag_set(inode, FI_VERITY_IN_PROGRESS);
2610 static inline void set_acl_inode(struct inode *inode, umode_t mode)
2612 F2FS_I(inode)->i_acl_mode = mode;
2613 set_inode_flag(inode, FI_ACL_MODE);
2614 f2fs_mark_inode_dirty_sync(inode, false);
2617 static inline void f2fs_i_links_write(struct inode *inode, bool inc)
2623 f2fs_mark_inode_dirty_sync(inode, true);
2626 static inline void f2fs_i_blocks_write(struct inode *inode,
2627 block_t diff, bool add, bool claim)
2629 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
2630 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
2632 /* add = 1, claim = 1 should be dquot_reserve_block in pair */
2635 dquot_claim_block(inode, diff);
2637 dquot_alloc_block_nofail(inode, diff);
2639 dquot_free_block(inode, diff);
2642 f2fs_mark_inode_dirty_sync(inode, true);
2643 if (clean || recover)
2644 set_inode_flag(inode, FI_AUTO_RECOVER);
2647 static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size)
2649 bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
2650 bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
2652 if (i_size_read(inode) == i_size)
2655 i_size_write(inode, i_size);
2656 f2fs_mark_inode_dirty_sync(inode, true);
2657 if (clean || recover)
2658 set_inode_flag(inode, FI_AUTO_RECOVER);
2661 static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth)
2663 F2FS_I(inode)->i_current_depth = depth;
2664 f2fs_mark_inode_dirty_sync(inode, true);
2667 static inline void f2fs_i_gc_failures_write(struct inode *inode,
2670 F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = count;
2671 f2fs_mark_inode_dirty_sync(inode, true);
2674 static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid)
2676 F2FS_I(inode)->i_xattr_nid = xnid;
2677 f2fs_mark_inode_dirty_sync(inode, true);
2680 static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino)
2682 F2FS_I(inode)->i_pino = pino;
2683 f2fs_mark_inode_dirty_sync(inode, true);
2686 static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri)
2688 struct f2fs_inode_info *fi = F2FS_I(inode);
2690 if (ri->i_inline & F2FS_INLINE_XATTR)
2691 set_bit(FI_INLINE_XATTR, fi->flags);
2692 if (ri->i_inline & F2FS_INLINE_DATA)
2693 set_bit(FI_INLINE_DATA, fi->flags);
2694 if (ri->i_inline & F2FS_INLINE_DENTRY)
2695 set_bit(FI_INLINE_DENTRY, fi->flags);
2696 if (ri->i_inline & F2FS_DATA_EXIST)
2697 set_bit(FI_DATA_EXIST, fi->flags);
2698 if (ri->i_inline & F2FS_INLINE_DOTS)
2699 set_bit(FI_INLINE_DOTS, fi->flags);
2700 if (ri->i_inline & F2FS_EXTRA_ATTR)
2701 set_bit(FI_EXTRA_ATTR, fi->flags);
2702 if (ri->i_inline & F2FS_PIN_FILE)
2703 set_bit(FI_PIN_FILE, fi->flags);
2706 static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
2710 if (is_inode_flag_set(inode, FI_INLINE_XATTR))
2711 ri->i_inline |= F2FS_INLINE_XATTR;
2712 if (is_inode_flag_set(inode, FI_INLINE_DATA))
2713 ri->i_inline |= F2FS_INLINE_DATA;
2714 if (is_inode_flag_set(inode, FI_INLINE_DENTRY))
2715 ri->i_inline |= F2FS_INLINE_DENTRY;
2716 if (is_inode_flag_set(inode, FI_DATA_EXIST))
2717 ri->i_inline |= F2FS_DATA_EXIST;
2718 if (is_inode_flag_set(inode, FI_INLINE_DOTS))
2719 ri->i_inline |= F2FS_INLINE_DOTS;
2720 if (is_inode_flag_set(inode, FI_EXTRA_ATTR))
2721 ri->i_inline |= F2FS_EXTRA_ATTR;
2722 if (is_inode_flag_set(inode, FI_PIN_FILE))
2723 ri->i_inline |= F2FS_PIN_FILE;
2726 static inline int f2fs_has_extra_attr(struct inode *inode)
2728 return is_inode_flag_set(inode, FI_EXTRA_ATTR);
2731 static inline int f2fs_has_inline_xattr(struct inode *inode)
2733 return is_inode_flag_set(inode, FI_INLINE_XATTR);
2736 static inline int f2fs_compressed_file(struct inode *inode)
2738 return S_ISREG(inode->i_mode) &&
2739 is_inode_flag_set(inode, FI_COMPRESSED_FILE);
2742 static inline unsigned int addrs_per_inode(struct inode *inode)
2744 unsigned int addrs = CUR_ADDRS_PER_INODE(inode) -
2745 get_inline_xattr_addrs(inode);
2747 if (!f2fs_compressed_file(inode))
2749 return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size);
2752 static inline unsigned int addrs_per_block(struct inode *inode)
2754 if (!f2fs_compressed_file(inode))
2755 return DEF_ADDRS_PER_BLOCK;
2756 return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, F2FS_I(inode)->i_cluster_size);
2759 static inline void *inline_xattr_addr(struct inode *inode, struct page *page)
2761 struct f2fs_inode *ri = F2FS_INODE(page);
2763 return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE -
2764 get_inline_xattr_addrs(inode)]);
2767 static inline int inline_xattr_size(struct inode *inode)
2769 if (f2fs_has_inline_xattr(inode))
2770 return get_inline_xattr_addrs(inode) * sizeof(__le32);
2774 static inline int f2fs_has_inline_data(struct inode *inode)
2776 return is_inode_flag_set(inode, FI_INLINE_DATA);
2779 static inline int f2fs_exist_data(struct inode *inode)
2781 return is_inode_flag_set(inode, FI_DATA_EXIST);
2784 static inline int f2fs_has_inline_dots(struct inode *inode)
2786 return is_inode_flag_set(inode, FI_INLINE_DOTS);
2789 static inline int f2fs_is_mmap_file(struct inode *inode)
2791 return is_inode_flag_set(inode, FI_MMAP_FILE);
2794 static inline bool f2fs_is_pinned_file(struct inode *inode)
2796 return is_inode_flag_set(inode, FI_PIN_FILE);
2799 static inline bool f2fs_is_atomic_file(struct inode *inode)
2801 return is_inode_flag_set(inode, FI_ATOMIC_FILE);
2804 static inline bool f2fs_is_commit_atomic_write(struct inode *inode)
2806 return is_inode_flag_set(inode, FI_ATOMIC_COMMIT);
2809 static inline bool f2fs_is_volatile_file(struct inode *inode)
2811 return is_inode_flag_set(inode, FI_VOLATILE_FILE);
2814 static inline bool f2fs_is_first_block_written(struct inode *inode)
2816 return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN);
2819 static inline bool f2fs_is_drop_cache(struct inode *inode)
2821 return is_inode_flag_set(inode, FI_DROP_CACHE);
2824 static inline void *inline_data_addr(struct inode *inode, struct page *page)
2826 struct f2fs_inode *ri = F2FS_INODE(page);
2827 int extra_size = get_extra_isize(inode);
2829 return (void *)&(ri->i_addr[extra_size + DEF_INLINE_RESERVED_SIZE]);
2832 static inline int f2fs_has_inline_dentry(struct inode *inode)
2834 return is_inode_flag_set(inode, FI_INLINE_DENTRY);
2837 static inline int is_file(struct inode *inode, int type)
2839 return F2FS_I(inode)->i_advise & type;
2842 static inline void set_file(struct inode *inode, int type)
2844 F2FS_I(inode)->i_advise |= type;
2845 f2fs_mark_inode_dirty_sync(inode, true);
2848 static inline void clear_file(struct inode *inode, int type)
2850 F2FS_I(inode)->i_advise &= ~type;
2851 f2fs_mark_inode_dirty_sync(inode, true);
2854 static inline bool f2fs_is_time_consistent(struct inode *inode)
2856 if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime))
2858 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime))
2860 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime))
2862 if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 3,
2863 &F2FS_I(inode)->i_crtime))
2868 static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
2873 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2875 spin_lock(&sbi->inode_lock[DIRTY_META]);
2876 ret = list_empty(&F2FS_I(inode)->gdirty_list);
2877 spin_unlock(&sbi->inode_lock[DIRTY_META]);
2880 if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) ||
2881 file_keep_isize(inode) ||
2882 i_size_read(inode) & ~PAGE_MASK)
2885 if (!f2fs_is_time_consistent(inode))
2888 spin_lock(&F2FS_I(inode)->i_size_lock);
2889 ret = F2FS_I(inode)->last_disk_size == i_size_read(inode);
2890 spin_unlock(&F2FS_I(inode)->i_size_lock);
2895 static inline bool f2fs_readonly(struct super_block *sb)
2897 return sb_rdonly(sb);
2900 static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
2902 return is_set_ckpt_flags(sbi, CP_ERROR_FLAG);
2905 static inline bool is_dot_dotdot(const struct qstr *str)
2907 if (str->len == 1 && str->name[0] == '.')
2910 if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.')
2916 static inline bool f2fs_may_extent_tree(struct inode *inode)
2918 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2920 if (!test_opt(sbi, EXTENT_CACHE) ||
2921 is_inode_flag_set(inode, FI_NO_EXTENT) ||
2922 is_inode_flag_set(inode, FI_COMPRESSED_FILE))
2926 * for recovered files during mount do not create extents
2927 * if shrinker is not registered.
2929 if (list_empty(&sbi->s_list))
2932 return S_ISREG(inode->i_mode);
2935 static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
2936 size_t size, gfp_t flags)
2940 if (time_to_inject(sbi, FAULT_KMALLOC)) {
2941 f2fs_show_injection_info(sbi, FAULT_KMALLOC);
2945 ret = kmalloc(size, flags);
2949 return kvmalloc(size, flags);
2952 static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi,
2953 size_t size, gfp_t flags)
2955 return f2fs_kmalloc(sbi, size, flags | __GFP_ZERO);
2958 static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi,
2959 size_t size, gfp_t flags)
2961 if (time_to_inject(sbi, FAULT_KVMALLOC)) {
2962 f2fs_show_injection_info(sbi, FAULT_KVMALLOC);
2966 return kvmalloc(size, flags);
2969 static inline void *f2fs_kvzalloc(struct f2fs_sb_info *sbi,
2970 size_t size, gfp_t flags)
2972 return f2fs_kvmalloc(sbi, size, flags | __GFP_ZERO);
2975 static inline int get_extra_isize(struct inode *inode)
2977 return F2FS_I(inode)->i_extra_isize / sizeof(__le32);
2980 static inline int get_inline_xattr_addrs(struct inode *inode)
2982 return F2FS_I(inode)->i_inline_xattr_size;
2985 #define f2fs_get_inode_mode(i) \
2986 ((is_inode_flag_set(i, FI_ACL_MODE)) ? \
2987 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
2989 #define F2FS_TOTAL_EXTRA_ATTR_SIZE \
2990 (offsetof(struct f2fs_inode, i_extra_end) - \
2991 offsetof(struct f2fs_inode, i_extra_isize)) \
2993 #define F2FS_OLD_ATTRIBUTE_SIZE (offsetof(struct f2fs_inode, i_addr))
2994 #define F2FS_FITS_IN_INODE(f2fs_inode, extra_isize, field) \
2995 ((offsetof(typeof(*(f2fs_inode)), field) + \
2996 sizeof((f2fs_inode)->field)) \
2997 <= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize))) \
2999 static inline void f2fs_reset_iostat(struct f2fs_sb_info *sbi)
3003 spin_lock(&sbi->iostat_lock);
3004 for (i = 0; i < NR_IO_TYPE; i++)
3005 sbi->write_iostat[i] = 0;
3006 spin_unlock(&sbi->iostat_lock);
3009 static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi,
3010 enum iostat_type type, unsigned long long io_bytes)
3012 if (!sbi->iostat_enable)
3014 spin_lock(&sbi->iostat_lock);
3015 sbi->write_iostat[type] += io_bytes;
3017 if (type == APP_WRITE_IO || type == APP_DIRECT_IO)
3018 sbi->write_iostat[APP_BUFFERED_IO] =
3019 sbi->write_iostat[APP_WRITE_IO] -
3020 sbi->write_iostat[APP_DIRECT_IO];
3021 spin_unlock(&sbi->iostat_lock);
3024 #define __is_large_section(sbi) ((sbi)->segs_per_sec > 1)
3026 #define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META)
3028 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
3029 block_t blkaddr, int type);
3030 static inline void verify_blkaddr(struct f2fs_sb_info *sbi,
3031 block_t blkaddr, int type)
3033 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) {
3034 f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.",
3036 f2fs_bug_on(sbi, 1);
3040 static inline bool __is_valid_data_blkaddr(block_t blkaddr)
3042 if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR ||
3043 blkaddr == COMPRESS_ADDR)
3048 static inline void f2fs_set_page_private(struct page *page,
3051 if (PagePrivate(page))
3054 attach_page_private(page, (void *)data);
3057 static inline void f2fs_clear_page_private(struct page *page)
3059 detach_page_private(page);
3065 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
3066 void f2fs_truncate_data_blocks(struct dnode_of_data *dn);
3067 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock);
3068 int f2fs_truncate(struct inode *inode);
3069 int f2fs_getattr(const struct path *path, struct kstat *stat,
3070 u32 request_mask, unsigned int flags);
3071 int f2fs_setattr(struct dentry *dentry, struct iattr *attr);
3072 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end);
3073 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count);
3074 int f2fs_precache_extents(struct inode *inode);
3075 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
3076 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
3077 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid);
3078 int f2fs_pin_file_control(struct inode *inode, bool inc);
3083 void f2fs_set_inode_flags(struct inode *inode);
3084 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page);
3085 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page);
3086 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino);
3087 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino);
3088 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink);
3089 void f2fs_update_inode(struct inode *inode, struct page *node_page);
3090 void f2fs_update_inode_page(struct inode *inode);
3091 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc);
3092 void f2fs_evict_inode(struct inode *inode);
3093 void f2fs_handle_failed_inode(struct inode *inode);
3098 int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
3099 bool hot, bool set);
3100 struct dentry *f2fs_get_parent(struct dentry *child);
3102 extern int f2fs_ci_compare(const struct inode *parent,
3103 const struct qstr *name,
3104 const struct qstr *entry,
3110 unsigned char f2fs_get_de_type(struct f2fs_dir_entry *de);
3111 struct f2fs_dir_entry *f2fs_find_target_dentry(struct fscrypt_name *fname,
3112 f2fs_hash_t namehash, int *max_slots,
3113 struct f2fs_dentry_ptr *d);
3114 int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
3115 unsigned int start_pos, struct fscrypt_str *fstr);
3116 void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent,
3117 struct f2fs_dentry_ptr *d);
3118 struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
3119 const struct qstr *new_name,
3120 const struct qstr *orig_name, struct page *dpage);
3121 void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode,
3122 unsigned int current_depth);
3123 int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots);
3124 void f2fs_drop_nlink(struct inode *dir, struct inode *inode);
3125 struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
3126 struct fscrypt_name *fname, struct page **res_page);
3127 struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
3128 const struct qstr *child, struct page **res_page);
3129 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p);
3130 ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr,
3131 struct page **page);
3132 void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
3133 struct page *page, struct inode *inode);
3134 bool f2fs_has_enough_room(struct inode *dir, struct page *ipage,
3135 struct fscrypt_name *fname);
3136 void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
3137 const struct qstr *name, f2fs_hash_t name_hash,
3138 unsigned int bit_pos);
3139 int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
3140 const struct qstr *orig_name,
3141 struct inode *inode, nid_t ino, umode_t mode);
3142 int f2fs_add_dentry(struct inode *dir, struct fscrypt_name *fname,
3143 struct inode *inode, nid_t ino, umode_t mode);
3144 int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
3145 struct inode *inode, nid_t ino, umode_t mode);
3146 void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
3147 struct inode *dir, struct inode *inode);
3148 int f2fs_do_tmpfile(struct inode *inode, struct inode *dir);
3149 bool f2fs_empty_dir(struct inode *dir);
3151 static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
3153 return f2fs_do_add_link(d_inode(dentry->d_parent), &dentry->d_name,
3154 inode, inode->i_ino, inode->i_mode);
3160 int f2fs_inode_dirtied(struct inode *inode, bool sync);
3161 void f2fs_inode_synced(struct inode *inode);
3162 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly);
3163 int f2fs_quota_sync(struct super_block *sb, int type);
3164 void f2fs_quota_off_umount(struct super_block *sb);
3165 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover);
3166 int f2fs_sync_fs(struct super_block *sb, int sync);
3167 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi);
3172 f2fs_hash_t f2fs_dentry_hash(const struct inode *dir,
3173 const struct qstr *name_info, struct fscrypt_name *fname);
3178 struct dnode_of_data;
3181 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid);
3182 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type);
3183 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page);
3184 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi);
3185 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page);
3186 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi);
3187 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid);
3188 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid);
3189 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino);
3190 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
3191 struct node_info *ni);
3192 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs);
3193 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode);
3194 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from);
3195 int f2fs_truncate_xattr_node(struct inode *inode);
3196 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
3197 unsigned int seq_id);
3198 int f2fs_remove_inode_page(struct inode *inode);
3199 struct page *f2fs_new_inode_page(struct inode *inode);
3200 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs);
3201 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
3202 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
3203 struct page *f2fs_get_node_page_ra(struct page *parent, int start);
3204 int f2fs_move_node_page(struct page *node_page, int gc_type);
3205 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
3206 struct writeback_control *wbc, bool atomic,
3207 unsigned int *seq_id);
3208 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
3209 struct writeback_control *wbc,
3210 bool do_balance, enum iostat_type io_type);
3211 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount);
3212 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid);
3213 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
3214 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid);
3215 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink);
3216 void f2fs_recover_inline_xattr(struct inode *inode, struct page *page);
3217 int f2fs_recover_xattr_data(struct inode *inode, struct page *page);
3218 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
3219 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
3220 unsigned int segno, struct f2fs_summary_block *sum);
3221 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3222 int f2fs_build_node_manager(struct f2fs_sb_info *sbi);
3223 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi);
3224 int __init f2fs_create_node_manager_caches(void);
3225 void f2fs_destroy_node_manager_caches(void);
3230 bool f2fs_need_SSR(struct f2fs_sb_info *sbi);
3231 void f2fs_register_inmem_page(struct inode *inode, struct page *page);
3232 void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure);
3233 void f2fs_drop_inmem_pages(struct inode *inode);
3234 void f2fs_drop_inmem_page(struct inode *inode, struct page *page);
3235 int f2fs_commit_inmem_pages(struct inode *inode);
3236 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need);
3237 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg);
3238 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino);
3239 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi);
3240 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi);
3241 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free);
3242 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
3243 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
3244 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi);
3245 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi);
3246 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi);
3247 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
3248 struct cp_control *cpc);
3249 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi);
3250 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi);
3251 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable);
3252 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi);
3253 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
3254 void allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
3255 unsigned int start, unsigned int end);
3256 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi, int type);
3257 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
3258 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
3259 struct cp_control *cpc);
3260 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno);
3261 void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src,
3263 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
3264 enum iostat_type io_type);
3265 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio);
3266 void f2fs_outplace_write_data(struct dnode_of_data *dn,
3267 struct f2fs_io_info *fio);
3268 int f2fs_inplace_write_data(struct f2fs_io_info *fio);
3269 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
3270 block_t old_blkaddr, block_t new_blkaddr,
3271 bool recover_curseg, bool recover_newaddr);
3272 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
3273 block_t old_addr, block_t new_addr,
3274 unsigned char version, bool recover_curseg,
3275 bool recover_newaddr);
3276 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
3277 block_t old_blkaddr, block_t *new_blkaddr,
3278 struct f2fs_summary *sum, int type,
3279 struct f2fs_io_info *fio, bool add_list);
3280 void f2fs_wait_on_page_writeback(struct page *page,
3281 enum page_type type, bool ordered, bool locked);
3282 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr);
3283 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
3285 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
3286 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
3287 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
3288 unsigned int val, int alloc);
3289 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3290 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi);
3291 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi);
3292 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi);
3293 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi);
3294 int __init f2fs_create_segment_manager_caches(void);
3295 void f2fs_destroy_segment_manager_caches(void);
3296 int f2fs_rw_hint_to_seg_type(enum rw_hint hint);
3297 enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
3298 enum page_type type, enum temp_type temp);
3303 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io);
3304 struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
3305 struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
3306 struct page *f2fs_get_meta_page_nofail(struct f2fs_sb_info *sbi, pgoff_t index);
3307 struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index);
3308 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
3309 block_t blkaddr, int type);
3310 int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
3311 int type, bool sync);
3312 void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index);
3313 long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
3314 long nr_to_write, enum iostat_type io_type);
3315 void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
3316 void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
3317 void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all);
3318 bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode);
3319 void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
3320 unsigned int devidx, int type);
3321 bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
3322 unsigned int devidx, int type);
3323 int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi);
3324 int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi);
3325 void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi);
3326 void f2fs_add_orphan_inode(struct inode *inode);
3327 void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino);
3328 int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi);
3329 int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi);
3330 void f2fs_update_dirty_page(struct inode *inode, struct page *page);
3331 void f2fs_remove_dirty_inode(struct inode *inode);
3332 int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type);
3333 void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type);
3334 int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3335 void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi);
3336 int __init f2fs_create_checkpoint_caches(void);
3337 void f2fs_destroy_checkpoint_caches(void);
3342 int __init f2fs_init_bioset(void);
3343 void f2fs_destroy_bioset(void);
3344 struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi, int npages, bool noio);
3345 int f2fs_init_bio_entry_cache(void);
3346 void f2fs_destroy_bio_entry_cache(void);
3347 void f2fs_submit_bio(struct f2fs_sb_info *sbi,
3348 struct bio *bio, enum page_type type);
3349 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type);
3350 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
3351 struct inode *inode, struct page *page,
3352 nid_t ino, enum page_type type);
3353 void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
3354 struct bio **bio, struct page *page);
3355 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi);
3356 int f2fs_submit_page_bio(struct f2fs_io_info *fio);
3357 int f2fs_merge_page_bio(struct f2fs_io_info *fio);
3358 void f2fs_submit_page_write(struct f2fs_io_info *fio);
3359 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
3360 block_t blk_addr, struct bio *bio);
3361 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr);
3362 void f2fs_set_data_blkaddr(struct dnode_of_data *dn);
3363 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr);
3364 int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count);
3365 int f2fs_reserve_new_block(struct dnode_of_data *dn);
3366 int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index);
3367 int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from);
3368 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index);
3369 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
3370 int op_flags, bool for_write);
3371 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index);
3372 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
3374 struct page *f2fs_get_new_data_page(struct inode *inode,
3375 struct page *ipage, pgoff_t index, bool new_i_size);
3376 int f2fs_do_write_data_page(struct f2fs_io_info *fio);
3377 void __do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock);
3378 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
3379 int create, int flag);
3380 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3381 u64 start, u64 len);
3382 int f2fs_encrypt_one_page(struct f2fs_io_info *fio);
3383 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio);
3384 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio);
3385 int f2fs_write_single_data_page(struct page *page, int *submitted,
3386 struct bio **bio, sector_t *last_block,
3387 struct writeback_control *wbc,
3388 enum iostat_type io_type,
3390 void f2fs_invalidate_page(struct page *page, unsigned int offset,
3391 unsigned int length);
3392 int f2fs_release_page(struct page *page, gfp_t wait);
3393 #ifdef CONFIG_MIGRATION
3394 int f2fs_migrate_page(struct address_space *mapping, struct page *newpage,
3395 struct page *page, enum migrate_mode mode);
3397 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len);
3398 void f2fs_clear_page_cache_dirty_tag(struct page *page);
3399 int f2fs_init_post_read_processing(void);
3400 void f2fs_destroy_post_read_processing(void);
3401 int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi);
3402 void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi);
3407 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi);
3408 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi);
3409 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode);
3410 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background,
3411 unsigned int segno);
3412 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi);
3413 int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count);
3418 int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only);
3419 bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi);
3424 #ifdef CONFIG_F2FS_STAT_FS
3425 struct f2fs_stat_info {
3426 struct list_head stat_list;
3427 struct f2fs_sb_info *sbi;
3428 int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs;
3429 int main_area_segs, main_area_sections, main_area_zones;
3430 unsigned long long hit_largest, hit_cached, hit_rbtree;
3431 unsigned long long hit_total, total_ext;
3432 int ext_tree, zombie_tree, ext_node;
3433 int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta;
3434 int ndirty_data, ndirty_qdata;
3436 unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all;
3437 int nats, dirty_nats, sits, dirty_sits;
3438 int free_nids, avail_nids, alloc_nids;
3439 int total_count, utilization;
3440 int bg_gc, nr_wb_cp_data, nr_wb_data;
3441 int nr_rd_data, nr_rd_node, nr_rd_meta;
3442 int nr_dio_read, nr_dio_write;
3443 unsigned int io_skip_bggc, other_skip_bggc;
3444 int nr_flushing, nr_flushed, flush_list_empty;
3445 int nr_discarding, nr_discarded;
3447 unsigned int undiscard_blks;
3448 int inline_xattr, inline_inode, inline_dir, append, update, orphans;
3449 int compr_inode, compr_blocks;
3450 int aw_cnt, max_aw_cnt, vw_cnt, max_vw_cnt;
3451 unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks;
3452 unsigned int bimodal, avg_vblocks;
3453 int util_free, util_valid, util_invalid;
3454 int rsvd_segs, overp_segs;
3455 int dirty_count, node_pages, meta_pages;
3456 int prefree_count, call_count, cp_count, bg_cp_count;
3457 int tot_segs, node_segs, data_segs, free_segs, free_secs;
3458 int bg_node_segs, bg_data_segs;
3459 int tot_blks, data_blks, node_blks;
3460 int bg_data_blks, bg_node_blks;
3461 unsigned long long skipped_atomic_files[2];
3462 int curseg[NR_CURSEG_TYPE];
3463 int cursec[NR_CURSEG_TYPE];
3464 int curzone[NR_CURSEG_TYPE];
3466 unsigned int meta_count[META_MAX];
3467 unsigned int segment_count[2];
3468 unsigned int block_count[2];
3469 unsigned int inplace_count;
3470 unsigned long long base_mem, cache_mem, page_mem;
3473 static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
3475 return (struct f2fs_stat_info *)sbi->stat_info;
3478 #define stat_inc_cp_count(si) ((si)->cp_count++)
3479 #define stat_inc_bg_cp_count(si) ((si)->bg_cp_count++)
3480 #define stat_inc_call_count(si) ((si)->call_count++)
3481 #define stat_inc_bggc_count(si) ((si)->bg_gc++)
3482 #define stat_io_skip_bggc_count(sbi) ((sbi)->io_skip_bggc++)
3483 #define stat_other_skip_bggc_count(sbi) ((sbi)->other_skip_bggc++)
3484 #define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++)
3485 #define stat_dec_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]--)
3486 #define stat_inc_total_hit(sbi) (atomic64_inc(&(sbi)->total_hit_ext))
3487 #define stat_inc_rbtree_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_rbtree))
3488 #define stat_inc_largest_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_largest))
3489 #define stat_inc_cached_node_hit(sbi) (atomic64_inc(&(sbi)->read_hit_cached))
3490 #define stat_inc_inline_xattr(inode) \
3492 if (f2fs_has_inline_xattr(inode)) \
3493 (atomic_inc(&F2FS_I_SB(inode)->inline_xattr)); \
3495 #define stat_dec_inline_xattr(inode) \
3497 if (f2fs_has_inline_xattr(inode)) \
3498 (atomic_dec(&F2FS_I_SB(inode)->inline_xattr)); \
3500 #define stat_inc_inline_inode(inode) \
3502 if (f2fs_has_inline_data(inode)) \
3503 (atomic_inc(&F2FS_I_SB(inode)->inline_inode)); \
3505 #define stat_dec_inline_inode(inode) \
3507 if (f2fs_has_inline_data(inode)) \
3508 (atomic_dec(&F2FS_I_SB(inode)->inline_inode)); \
3510 #define stat_inc_inline_dir(inode) \
3512 if (f2fs_has_inline_dentry(inode)) \
3513 (atomic_inc(&F2FS_I_SB(inode)->inline_dir)); \
3515 #define stat_dec_inline_dir(inode) \
3517 if (f2fs_has_inline_dentry(inode)) \
3518 (atomic_dec(&F2FS_I_SB(inode)->inline_dir)); \
3520 #define stat_inc_compr_inode(inode) \
3522 if (f2fs_compressed_file(inode)) \
3523 (atomic_inc(&F2FS_I_SB(inode)->compr_inode)); \
3525 #define stat_dec_compr_inode(inode) \
3527 if (f2fs_compressed_file(inode)) \
3528 (atomic_dec(&F2FS_I_SB(inode)->compr_inode)); \
3530 #define stat_add_compr_blocks(inode, blocks) \
3531 (atomic_add(blocks, &F2FS_I_SB(inode)->compr_blocks))
3532 #define stat_sub_compr_blocks(inode, blocks) \
3533 (atomic_sub(blocks, &F2FS_I_SB(inode)->compr_blocks))
3534 #define stat_inc_meta_count(sbi, blkaddr) \
3536 if (blkaddr < SIT_I(sbi)->sit_base_addr) \
3537 atomic_inc(&(sbi)->meta_count[META_CP]); \
3538 else if (blkaddr < NM_I(sbi)->nat_blkaddr) \
3539 atomic_inc(&(sbi)->meta_count[META_SIT]); \
3540 else if (blkaddr < SM_I(sbi)->ssa_blkaddr) \
3541 atomic_inc(&(sbi)->meta_count[META_NAT]); \
3542 else if (blkaddr < SM_I(sbi)->main_blkaddr) \
3543 atomic_inc(&(sbi)->meta_count[META_SSA]); \
3545 #define stat_inc_seg_type(sbi, curseg) \
3546 ((sbi)->segment_count[(curseg)->alloc_type]++)
3547 #define stat_inc_block_count(sbi, curseg) \
3548 ((sbi)->block_count[(curseg)->alloc_type]++)
3549 #define stat_inc_inplace_blocks(sbi) \
3550 (atomic_inc(&(sbi)->inplace_count))
3551 #define stat_update_max_atomic_write(inode) \
3553 int cur = F2FS_I_SB(inode)->atomic_files; \
3554 int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt); \
3556 atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur); \
3558 #define stat_inc_volatile_write(inode) \
3559 (atomic_inc(&F2FS_I_SB(inode)->vw_cnt))
3560 #define stat_dec_volatile_write(inode) \
3561 (atomic_dec(&F2FS_I_SB(inode)->vw_cnt))
3562 #define stat_update_max_volatile_write(inode) \
3564 int cur = atomic_read(&F2FS_I_SB(inode)->vw_cnt); \
3565 int max = atomic_read(&F2FS_I_SB(inode)->max_vw_cnt); \
3567 atomic_set(&F2FS_I_SB(inode)->max_vw_cnt, cur); \
3569 #define stat_inc_seg_count(sbi, type, gc_type) \
3571 struct f2fs_stat_info *si = F2FS_STAT(sbi); \
3573 if ((type) == SUM_TYPE_DATA) { \
3575 si->bg_data_segs += (gc_type == BG_GC) ? 1 : 0; \
3578 si->bg_node_segs += (gc_type == BG_GC) ? 1 : 0; \
3582 #define stat_inc_tot_blk_count(si, blks) \
3583 ((si)->tot_blks += (blks))
3585 #define stat_inc_data_blk_count(sbi, blks, gc_type) \
3587 struct f2fs_stat_info *si = F2FS_STAT(sbi); \
3588 stat_inc_tot_blk_count(si, blks); \
3589 si->data_blks += (blks); \
3590 si->bg_data_blks += ((gc_type) == BG_GC) ? (blks) : 0; \
3593 #define stat_inc_node_blk_count(sbi, blks, gc_type) \
3595 struct f2fs_stat_info *si = F2FS_STAT(sbi); \
3596 stat_inc_tot_blk_count(si, blks); \
3597 si->node_blks += (blks); \
3598 si->bg_node_blks += ((gc_type) == BG_GC) ? (blks) : 0; \
3601 int f2fs_build_stats(struct f2fs_sb_info *sbi);
3602 void f2fs_destroy_stats(struct f2fs_sb_info *sbi);
3603 void __init f2fs_create_root_stats(void);
3604 void f2fs_destroy_root_stats(void);
3605 void f2fs_update_sit_info(struct f2fs_sb_info *sbi);
3607 #define stat_inc_cp_count(si) do { } while (0)
3608 #define stat_inc_bg_cp_count(si) do { } while (0)
3609 #define stat_inc_call_count(si) do { } while (0)
3610 #define stat_inc_bggc_count(si) do { } while (0)
3611 #define stat_io_skip_bggc_count(sbi) do { } while (0)
3612 #define stat_other_skip_bggc_count(sbi) do { } while (0)
3613 #define stat_inc_dirty_inode(sbi, type) do { } while (0)
3614 #define stat_dec_dirty_inode(sbi, type) do { } while (0)
3615 #define stat_inc_total_hit(sbi) do { } while (0)
3616 #define stat_inc_rbtree_node_hit(sbi) do { } while (0)
3617 #define stat_inc_largest_node_hit(sbi) do { } while (0)
3618 #define stat_inc_cached_node_hit(sbi) do { } while (0)
3619 #define stat_inc_inline_xattr(inode) do { } while (0)
3620 #define stat_dec_inline_xattr(inode) do { } while (0)
3621 #define stat_inc_inline_inode(inode) do { } while (0)
3622 #define stat_dec_inline_inode(inode) do { } while (0)
3623 #define stat_inc_inline_dir(inode) do { } while (0)
3624 #define stat_dec_inline_dir(inode) do { } while (0)
3625 #define stat_inc_compr_inode(inode) do { } while (0)
3626 #define stat_dec_compr_inode(inode) do { } while (0)
3627 #define stat_add_compr_blocks(inode, blocks) do { } while (0)
3628 #define stat_sub_compr_blocks(inode, blocks) do { } while (0)
3629 #define stat_inc_atomic_write(inode) do { } while (0)
3630 #define stat_dec_atomic_write(inode) do { } while (0)
3631 #define stat_update_max_atomic_write(inode) do { } while (0)
3632 #define stat_inc_volatile_write(inode) do { } while (0)
3633 #define stat_dec_volatile_write(inode) do { } while (0)
3634 #define stat_update_max_volatile_write(inode) do { } while (0)
3635 #define stat_inc_meta_count(sbi, blkaddr) do { } while (0)
3636 #define stat_inc_seg_type(sbi, curseg) do { } while (0)
3637 #define stat_inc_block_count(sbi, curseg) do { } while (0)
3638 #define stat_inc_inplace_blocks(sbi) do { } while (0)
3639 #define stat_inc_seg_count(sbi, type, gc_type) do { } while (0)
3640 #define stat_inc_tot_blk_count(si, blks) do { } while (0)
3641 #define stat_inc_data_blk_count(sbi, blks, gc_type) do { } while (0)
3642 #define stat_inc_node_blk_count(sbi, blks, gc_type) do { } while (0)
3644 static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
3645 static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
3646 static inline void __init f2fs_create_root_stats(void) { }
3647 static inline void f2fs_destroy_root_stats(void) { }
3648 static inline void update_sit_info(struct f2fs_sb_info *sbi) {}
3651 extern const struct file_operations f2fs_dir_operations;
3652 #ifdef CONFIG_UNICODE
3653 extern const struct dentry_operations f2fs_dentry_ops;
3655 extern const struct file_operations f2fs_file_operations;
3656 extern const struct inode_operations f2fs_file_inode_operations;
3657 extern const struct address_space_operations f2fs_dblock_aops;
3658 extern const struct address_space_operations f2fs_node_aops;
3659 extern const struct address_space_operations f2fs_meta_aops;
3660 extern const struct inode_operations f2fs_dir_inode_operations;
3661 extern const struct inode_operations f2fs_symlink_inode_operations;
3662 extern const struct inode_operations f2fs_encrypted_symlink_inode_operations;
3663 extern const struct inode_operations f2fs_special_inode_operations;
3664 extern struct kmem_cache *f2fs_inode_entry_slab;
3669 bool f2fs_may_inline_data(struct inode *inode);
3670 bool f2fs_may_inline_dentry(struct inode *inode);
3671 void f2fs_do_read_inline_data(struct page *page, struct page *ipage);
3672 void f2fs_truncate_inline_inode(struct inode *inode,
3673 struct page *ipage, u64 from);
3674 int f2fs_read_inline_data(struct inode *inode, struct page *page);
3675 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page);
3676 int f2fs_convert_inline_inode(struct inode *inode);
3677 int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry);
3678 int f2fs_write_inline_data(struct inode *inode, struct page *page);
3679 bool f2fs_recover_inline_data(struct inode *inode, struct page *npage);
3680 struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
3681 struct fscrypt_name *fname, struct page **res_page);
3682 int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
3683 struct page *ipage);
3684 int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name,
3685 const struct qstr *orig_name,
3686 struct inode *inode, nid_t ino, umode_t mode);
3687 void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry,
3688 struct page *page, struct inode *dir,
3689 struct inode *inode);
3690 bool f2fs_empty_inline_dir(struct inode *dir);
3691 int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
3692 struct fscrypt_str *fstr);
3693 int f2fs_inline_data_fiemap(struct inode *inode,
3694 struct fiemap_extent_info *fieinfo,
3695 __u64 start, __u64 len);
3700 unsigned long f2fs_shrink_count(struct shrinker *shrink,
3701 struct shrink_control *sc);
3702 unsigned long f2fs_shrink_scan(struct shrinker *shrink,
3703 struct shrink_control *sc);
3704 void f2fs_join_shrinker(struct f2fs_sb_info *sbi);
3705 void f2fs_leave_shrinker(struct f2fs_sb_info *sbi);
3710 struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root,
3711 struct rb_entry *cached_re, unsigned int ofs);
3712 struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
3713 struct rb_root_cached *root,
3714 struct rb_node **parent,
3715 unsigned int ofs, bool *leftmost);
3716 struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
3717 struct rb_entry *cached_re, unsigned int ofs,
3718 struct rb_entry **prev_entry, struct rb_entry **next_entry,
3719 struct rb_node ***insert_p, struct rb_node **insert_parent,
3720 bool force, bool *leftmost);
3721 bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
3722 struct rb_root_cached *root);
3723 unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink);
3724 bool f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext);
3725 void f2fs_drop_extent_tree(struct inode *inode);
3726 unsigned int f2fs_destroy_extent_node(struct inode *inode);
3727 void f2fs_destroy_extent_tree(struct inode *inode);
3728 bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
3729 struct extent_info *ei);
3730 void f2fs_update_extent_cache(struct dnode_of_data *dn);
3731 void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
3732 pgoff_t fofs, block_t blkaddr, unsigned int len);
3733 void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi);
3734 int __init f2fs_create_extent_cache(void);
3735 void f2fs_destroy_extent_cache(void);
3740 int __init f2fs_init_sysfs(void);
3741 void f2fs_exit_sysfs(void);
3742 int f2fs_register_sysfs(struct f2fs_sb_info *sbi);
3743 void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi);
3746 extern const struct fsverity_operations f2fs_verityops;
3751 static inline bool f2fs_encrypted_file(struct inode *inode)
3753 return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode);
3756 static inline void f2fs_set_encrypted_inode(struct inode *inode)
3758 #ifdef CONFIG_FS_ENCRYPTION
3759 file_set_encrypt(inode);
3760 f2fs_set_inode_flags(inode);
3765 * Returns true if the reads of the inode's data need to undergo some
3766 * postprocessing step, like decryption or authenticity verification.
3768 static inline bool f2fs_post_read_required(struct inode *inode)
3770 return f2fs_encrypted_file(inode) || fsverity_active(inode) ||
3771 f2fs_compressed_file(inode);
3777 #ifdef CONFIG_F2FS_FS_COMPRESSION
3778 bool f2fs_is_compressed_page(struct page *page);
3779 struct page *f2fs_compress_control_page(struct page *page);
3780 int f2fs_prepare_compress_overwrite(struct inode *inode,
3781 struct page **pagep, pgoff_t index, void **fsdata);
3782 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
3783 pgoff_t index, unsigned copied);
3784 void f2fs_compress_write_end_io(struct bio *bio, struct page *page);
3785 bool f2fs_is_compress_backend_ready(struct inode *inode);
3786 void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity);
3787 bool f2fs_cluster_is_empty(struct compress_ctx *cc);
3788 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
3789 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
3790 int f2fs_write_multi_pages(struct compress_ctx *cc,
3792 struct writeback_control *wbc,
3793 enum iostat_type io_type);
3794 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index);
3795 int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
3796 unsigned nr_pages, sector_t *last_block_in_bio,
3797 bool is_readahead, bool for_write);
3798 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
3799 void f2fs_free_dic(struct decompress_io_ctx *dic);
3800 void f2fs_decompress_end_io(struct page **rpages,
3801 unsigned int cluster_size, bool err, bool verity);
3802 int f2fs_init_compress_ctx(struct compress_ctx *cc);
3803 void f2fs_destroy_compress_ctx(struct compress_ctx *cc);
3804 void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
3806 static inline bool f2fs_is_compressed_page(struct page *page) { return false; }
3807 static inline bool f2fs_is_compress_backend_ready(struct inode *inode)
3809 if (!f2fs_compressed_file(inode))
3811 /* not support compression */
3814 static inline struct page *f2fs_compress_control_page(struct page *page)
3817 return ERR_PTR(-EINVAL);
3821 static inline void set_compress_context(struct inode *inode)
3823 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3825 F2FS_I(inode)->i_compress_algorithm =
3826 F2FS_OPTION(sbi).compress_algorithm;
3827 F2FS_I(inode)->i_log_cluster_size =
3828 F2FS_OPTION(sbi).compress_log_size;
3829 F2FS_I(inode)->i_cluster_size =
3830 1 << F2FS_I(inode)->i_log_cluster_size;
3831 F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
3832 set_inode_flag(inode, FI_COMPRESSED_FILE);
3833 stat_inc_compr_inode(inode);
3834 f2fs_mark_inode_dirty_sync(inode, true);
3837 static inline u64 f2fs_disable_compressed_file(struct inode *inode)
3839 struct f2fs_inode_info *fi = F2FS_I(inode);
3841 if (!f2fs_compressed_file(inode))
3843 if (S_ISREG(inode->i_mode)) {
3844 if (get_dirty_pages(inode))
3846 if (fi->i_compr_blocks)
3847 return fi->i_compr_blocks;
3850 fi->i_flags &= ~F2FS_COMPR_FL;
3851 stat_dec_compr_inode(inode);
3852 clear_inode_flag(inode, FI_COMPRESSED_FILE);
3853 f2fs_mark_inode_dirty_sync(inode, true);
3857 #define F2FS_FEATURE_FUNCS(name, flagname) \
3858 static inline int f2fs_sb_has_##name(struct f2fs_sb_info *sbi) \
3860 return F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_##flagname); \
3863 F2FS_FEATURE_FUNCS(encrypt, ENCRYPT);
3864 F2FS_FEATURE_FUNCS(blkzoned, BLKZONED);
3865 F2FS_FEATURE_FUNCS(extra_attr, EXTRA_ATTR);
3866 F2FS_FEATURE_FUNCS(project_quota, PRJQUOTA);
3867 F2FS_FEATURE_FUNCS(inode_chksum, INODE_CHKSUM);
3868 F2FS_FEATURE_FUNCS(flexible_inline_xattr, FLEXIBLE_INLINE_XATTR);
3869 F2FS_FEATURE_FUNCS(quota_ino, QUOTA_INO);
3870 F2FS_FEATURE_FUNCS(inode_crtime, INODE_CRTIME);
3871 F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND);
3872 F2FS_FEATURE_FUNCS(verity, VERITY);
3873 F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM);
3874 F2FS_FEATURE_FUNCS(casefold, CASEFOLD);
3875 F2FS_FEATURE_FUNCS(compression, COMPRESSION);
3877 #ifdef CONFIG_BLK_DEV_ZONED
3878 static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi,
3881 unsigned int zno = blkaddr >> sbi->log_blocks_per_blkz;
3883 return test_bit(zno, FDEV(devi).blkz_seq);
3887 static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi)
3889 return f2fs_sb_has_blkzoned(sbi);
3892 static inline bool f2fs_bdev_support_discard(struct block_device *bdev)
3894 return blk_queue_discard(bdev_get_queue(bdev)) ||
3895 bdev_is_zoned(bdev);
3898 static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi)
3902 if (!f2fs_is_multi_device(sbi))
3903 return f2fs_bdev_support_discard(sbi->sb->s_bdev);
3905 for (i = 0; i < sbi->s_ndevs; i++)
3906 if (f2fs_bdev_support_discard(FDEV(i).bdev))
3911 static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi)
3913 return (test_opt(sbi, DISCARD) && f2fs_hw_support_discard(sbi)) ||
3914 f2fs_hw_should_discard(sbi);
3917 static inline bool f2fs_hw_is_readonly(struct f2fs_sb_info *sbi)
3921 if (!f2fs_is_multi_device(sbi))
3922 return bdev_read_only(sbi->sb->s_bdev);
3924 for (i = 0; i < sbi->s_ndevs; i++)
3925 if (bdev_read_only(FDEV(i).bdev))
3930 static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi)
3932 return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS;
3935 static inline bool f2fs_may_encrypt(struct inode *dir, struct inode *inode)
3937 #ifdef CONFIG_FS_ENCRYPTION
3938 struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
3939 umode_t mode = inode->i_mode;
3942 * If the directory encrypted or dummy encryption enabled,
3943 * then we should encrypt the inode.
3945 if (IS_ENCRYPTED(dir) || DUMMY_ENCRYPTION_ENABLED(sbi))
3946 return (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode));
3951 static inline bool f2fs_may_compress(struct inode *inode)
3953 if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) ||
3954 f2fs_is_atomic_file(inode) ||
3955 f2fs_is_volatile_file(inode))
3957 return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode);
3960 static inline void f2fs_i_compr_blocks_update(struct inode *inode,
3961 u64 blocks, bool add)
3963 int diff = F2FS_I(inode)->i_cluster_size - blocks;
3966 F2FS_I(inode)->i_compr_blocks += diff;
3967 stat_add_compr_blocks(inode, diff);
3969 F2FS_I(inode)->i_compr_blocks -= diff;
3970 stat_sub_compr_blocks(inode, diff);
3972 f2fs_mark_inode_dirty_sync(inode, true);
3975 static inline int block_unaligned_IO(struct inode *inode,
3976 struct kiocb *iocb, struct iov_iter *iter)
3978 unsigned int i_blkbits = READ_ONCE(inode->i_blkbits);
3979 unsigned int blocksize_mask = (1 << i_blkbits) - 1;
3980 loff_t offset = iocb->ki_pos;
3981 unsigned long align = offset | iov_iter_alignment(iter);
3983 return align & blocksize_mask;
3986 static inline int allow_outplace_dio(struct inode *inode,
3987 struct kiocb *iocb, struct iov_iter *iter)
3989 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3990 int rw = iov_iter_rw(iter);
3992 return (f2fs_lfs_mode(sbi) && (rw == WRITE) &&
3993 !block_unaligned_IO(inode, iocb, iter));
3996 static inline bool f2fs_force_buffered_io(struct inode *inode,
3997 struct kiocb *iocb, struct iov_iter *iter)
3999 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4000 int rw = iov_iter_rw(iter);
4002 if (f2fs_post_read_required(inode))
4004 if (f2fs_is_multi_device(sbi))
4006 if (f2fs_compressed_file(inode))
4009 * for blkzoned device, fallback direct IO to buffered IO, so
4010 * all IOs can be serialized by log-structured write.
4012 if (f2fs_sb_has_blkzoned(sbi))
4014 if (f2fs_lfs_mode(sbi) && (rw == WRITE)) {
4015 if (block_unaligned_IO(inode, iocb, iter))
4017 if (F2FS_IO_ALIGNED(sbi))
4020 if (is_sbi_flag_set(F2FS_I_SB(inode), SBI_CP_DISABLED) &&
4021 !IS_SWAPFILE(inode))
4027 #ifdef CONFIG_F2FS_FAULT_INJECTION
4028 extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
4031 #define f2fs_build_fault_attr(sbi, rate, type) do { } while (0)
4034 static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
4037 if (f2fs_sb_has_quota_ino(sbi))
4039 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
4040 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
4041 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
4047 #define EFSBADCRC EBADMSG /* Bad CRC detected */
4048 #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
4050 #endif /* _LINUX_F2FS_H */