]>
Commit | Line | Data |
---|---|---|
0a8165d7 | 1 | /* |
39a53e0c JK |
2 | * fs/f2fs/f2fs.h |
3 | * | |
4 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. | |
5 | * http://www.samsung.com/ | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | */ | |
11 | #ifndef _LINUX_F2FS_H | |
12 | #define _LINUX_F2FS_H | |
13 | ||
14 | #include <linux/types.h> | |
15 | #include <linux/page-flags.h> | |
16 | #include <linux/buffer_head.h> | |
39a53e0c JK |
17 | #include <linux/slab.h> |
18 | #include <linux/crc32.h> | |
19 | #include <linux/magic.h> | |
20 | ||
21 | /* | |
22 | * For mount options | |
23 | */ | |
24 | #define F2FS_MOUNT_BG_GC 0x00000001 | |
25 | #define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000002 | |
26 | #define F2FS_MOUNT_DISCARD 0x00000004 | |
27 | #define F2FS_MOUNT_NOHEAP 0x00000008 | |
28 | #define F2FS_MOUNT_XATTR_USER 0x00000010 | |
29 | #define F2FS_MOUNT_POSIX_ACL 0x00000020 | |
30 | #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000040 | |
31 | ||
32 | #define clear_opt(sbi, option) (sbi->mount_opt.opt &= ~F2FS_MOUNT_##option) | |
33 | #define set_opt(sbi, option) (sbi->mount_opt.opt |= F2FS_MOUNT_##option) | |
34 | #define test_opt(sbi, option) (sbi->mount_opt.opt & F2FS_MOUNT_##option) | |
35 | ||
36 | #define ver_after(a, b) (typecheck(unsigned long long, a) && \ | |
37 | typecheck(unsigned long long, b) && \ | |
38 | ((long long)((a) - (b)) > 0)) | |
39 | ||
a9841c4d JK |
40 | typedef u32 block_t; /* |
41 | * should not change u32, since it is the on-disk block | |
42 | * address format, __le32. | |
43 | */ | |
39a53e0c JK |
44 | typedef u32 nid_t; |
45 | ||
46 | struct f2fs_mount_info { | |
47 | unsigned int opt; | |
48 | }; | |
49 | ||
50 | static inline __u32 f2fs_crc32(void *buff, size_t len) | |
51 | { | |
52 | return crc32_le(F2FS_SUPER_MAGIC, buff, len); | |
53 | } | |
54 | ||
55 | static inline bool f2fs_crc_valid(__u32 blk_crc, void *buff, size_t buff_size) | |
56 | { | |
57 | return f2fs_crc32(buff, buff_size) == blk_crc; | |
58 | } | |
59 | ||
60 | /* | |
61 | * For checkpoint manager | |
62 | */ | |
63 | enum { | |
64 | NAT_BITMAP, | |
65 | SIT_BITMAP | |
66 | }; | |
67 | ||
68 | /* for the list of orphan inodes */ | |
69 | struct orphan_inode_entry { | |
70 | struct list_head list; /* list head */ | |
71 | nid_t ino; /* inode number */ | |
72 | }; | |
73 | ||
74 | /* for the list of directory inodes */ | |
75 | struct dir_inode_entry { | |
76 | struct list_head list; /* list head */ | |
77 | struct inode *inode; /* vfs inode pointer */ | |
78 | }; | |
79 | ||
80 | /* for the list of fsync inodes, used only during recovery */ | |
81 | struct fsync_inode_entry { | |
82 | struct list_head list; /* list head */ | |
83 | struct inode *inode; /* vfs inode pointer */ | |
84 | block_t blkaddr; /* block address locating the last inode */ | |
85 | }; | |
86 | ||
87 | #define nats_in_cursum(sum) (le16_to_cpu(sum->n_nats)) | |
88 | #define sits_in_cursum(sum) (le16_to_cpu(sum->n_sits)) | |
89 | ||
90 | #define nat_in_journal(sum, i) (sum->nat_j.entries[i].ne) | |
91 | #define nid_in_journal(sum, i) (sum->nat_j.entries[i].nid) | |
92 | #define sit_in_journal(sum, i) (sum->sit_j.entries[i].se) | |
93 | #define segno_in_journal(sum, i) (sum->sit_j.entries[i].segno) | |
94 | ||
95 | static inline int update_nats_in_cursum(struct f2fs_summary_block *rs, int i) | |
96 | { | |
97 | int before = nats_in_cursum(rs); | |
98 | rs->n_nats = cpu_to_le16(before + i); | |
99 | return before; | |
100 | } | |
101 | ||
102 | static inline int update_sits_in_cursum(struct f2fs_summary_block *rs, int i) | |
103 | { | |
104 | int before = sits_in_cursum(rs); | |
105 | rs->n_sits = cpu_to_le16(before + i); | |
106 | return before; | |
107 | } | |
108 | ||
e9750824 NJ |
109 | /* |
110 | * ioctl commands | |
111 | */ | |
112 | #define F2FS_IOC_GETFLAGS FS_IOC_GETFLAGS | |
113 | #define F2FS_IOC_SETFLAGS FS_IOC_SETFLAGS | |
114 | ||
115 | #if defined(__KERNEL__) && defined(CONFIG_COMPAT) | |
116 | /* | |
117 | * ioctl commands in 32 bit emulation | |
118 | */ | |
119 | #define F2FS_IOC32_GETFLAGS FS_IOC32_GETFLAGS | |
120 | #define F2FS_IOC32_SETFLAGS FS_IOC32_SETFLAGS | |
121 | #endif | |
122 | ||
39a53e0c JK |
123 | /* |
124 | * For INODE and NODE manager | |
125 | */ | |
126 | #define XATTR_NODE_OFFSET (-1) /* | |
127 | * store xattrs to one node block per | |
128 | * file keeping -1 as its node offset to | |
129 | * distinguish from index node blocks. | |
130 | */ | |
266e97a8 JK |
131 | enum { |
132 | ALLOC_NODE, /* allocate a new node page if needed */ | |
133 | LOOKUP_NODE, /* look up a node without readahead */ | |
134 | LOOKUP_NODE_RA, /* | |
135 | * look up a node with readahead called | |
136 | * by get_datablock_ro. | |
39a53e0c | 137 | */ |
266e97a8 JK |
138 | }; |
139 | ||
39a53e0c JK |
140 | #define F2FS_LINK_MAX 32000 /* maximum link count per file */ |
141 | ||
142 | /* for in-memory extent cache entry */ | |
143 | struct extent_info { | |
144 | rwlock_t ext_lock; /* rwlock for consistency */ | |
145 | unsigned int fofs; /* start offset in a file */ | |
146 | u32 blk_addr; /* start block address of the extent */ | |
111d2495 | 147 | unsigned int len; /* length of the extent */ |
39a53e0c JK |
148 | }; |
149 | ||
150 | /* | |
151 | * i_advise uses FADVISE_XXX_BIT. We can add additional hints later. | |
152 | */ | |
153 | #define FADVISE_COLD_BIT 0x01 | |
953a3e27 | 154 | #define FADVISE_CP_BIT 0x02 |
39a53e0c JK |
155 | |
156 | struct f2fs_inode_info { | |
157 | struct inode vfs_inode; /* serve a vfs inode */ | |
158 | unsigned long i_flags; /* keep an inode flags for ioctl */ | |
159 | unsigned char i_advise; /* use to give file attribute hints */ | |
160 | unsigned int i_current_depth; /* use only in directory structure */ | |
6666e6aa | 161 | unsigned int i_pino; /* parent inode number */ |
39a53e0c JK |
162 | umode_t i_acl_mode; /* keep file acl mode temporarily */ |
163 | ||
164 | /* Use below internally in f2fs*/ | |
165 | unsigned long flags; /* use to pass per-file flags */ | |
39a53e0c JK |
166 | atomic_t dirty_dents; /* # of dirty dentry pages */ |
167 | f2fs_hash_t chash; /* hash value of given file name */ | |
168 | unsigned int clevel; /* maximum level of given file name */ | |
169 | nid_t i_xattr_nid; /* node id that contains xattrs */ | |
170 | struct extent_info ext; /* in-memory extent cache entry */ | |
171 | }; | |
172 | ||
173 | static inline void get_extent_info(struct extent_info *ext, | |
174 | struct f2fs_extent i_ext) | |
175 | { | |
176 | write_lock(&ext->ext_lock); | |
177 | ext->fofs = le32_to_cpu(i_ext.fofs); | |
178 | ext->blk_addr = le32_to_cpu(i_ext.blk_addr); | |
179 | ext->len = le32_to_cpu(i_ext.len); | |
180 | write_unlock(&ext->ext_lock); | |
181 | } | |
182 | ||
183 | static inline void set_raw_extent(struct extent_info *ext, | |
184 | struct f2fs_extent *i_ext) | |
185 | { | |
186 | read_lock(&ext->ext_lock); | |
187 | i_ext->fofs = cpu_to_le32(ext->fofs); | |
188 | i_ext->blk_addr = cpu_to_le32(ext->blk_addr); | |
189 | i_ext->len = cpu_to_le32(ext->len); | |
190 | read_unlock(&ext->ext_lock); | |
191 | } | |
192 | ||
193 | struct f2fs_nm_info { | |
194 | block_t nat_blkaddr; /* base disk address of NAT */ | |
195 | nid_t max_nid; /* maximum possible node ids */ | |
39a53e0c JK |
196 | nid_t next_scan_nid; /* the next nid to be scanned */ |
197 | ||
198 | /* NAT cache management */ | |
199 | struct radix_tree_root nat_root;/* root of the nat entry cache */ | |
200 | rwlock_t nat_tree_lock; /* protect nat_tree_lock */ | |
201 | unsigned int nat_cnt; /* the # of cached nat entries */ | |
202 | struct list_head nat_entries; /* cached nat entry list (clean) */ | |
203 | struct list_head dirty_nat_entries; /* cached nat entry list (dirty) */ | |
204 | ||
205 | /* free node ids management */ | |
206 | struct list_head free_nid_list; /* a list for free nids */ | |
207 | spinlock_t free_nid_list_lock; /* protect free nid list */ | |
208 | unsigned int fcnt; /* the number of free node id */ | |
209 | struct mutex build_lock; /* lock for build free nids */ | |
210 | ||
211 | /* for checkpoint */ | |
212 | char *nat_bitmap; /* NAT bitmap pointer */ | |
213 | int bitmap_size; /* bitmap size */ | |
214 | }; | |
215 | ||
216 | /* | |
217 | * this structure is used as one of function parameters. | |
218 | * all the information are dedicated to a given direct node block determined | |
219 | * by the data offset in a file. | |
220 | */ | |
221 | struct dnode_of_data { | |
222 | struct inode *inode; /* vfs inode pointer */ | |
223 | struct page *inode_page; /* its inode page, NULL is possible */ | |
224 | struct page *node_page; /* cached direct node page */ | |
225 | nid_t nid; /* node id of the direct node block */ | |
226 | unsigned int ofs_in_node; /* data offset in the node page */ | |
227 | bool inode_page_locked; /* inode page is locked or not */ | |
228 | block_t data_blkaddr; /* block address of the node block */ | |
229 | }; | |
230 | ||
231 | static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode, | |
232 | struct page *ipage, struct page *npage, nid_t nid) | |
233 | { | |
d66d1f76 | 234 | memset(dn, 0, sizeof(*dn)); |
39a53e0c JK |
235 | dn->inode = inode; |
236 | dn->inode_page = ipage; | |
237 | dn->node_page = npage; | |
238 | dn->nid = nid; | |
39a53e0c JK |
239 | } |
240 | ||
241 | /* | |
242 | * For SIT manager | |
243 | * | |
244 | * By default, there are 6 active log areas across the whole main area. | |
245 | * When considering hot and cold data separation to reduce cleaning overhead, | |
246 | * we split 3 for data logs and 3 for node logs as hot, warm, and cold types, | |
247 | * respectively. | |
248 | * In the current design, you should not change the numbers intentionally. | |
249 | * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6 | |
250 | * logs individually according to the underlying devices. (default: 6) | |
251 | * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for | |
252 | * data and 8 for node logs. | |
253 | */ | |
254 | #define NR_CURSEG_DATA_TYPE (3) | |
255 | #define NR_CURSEG_NODE_TYPE (3) | |
256 | #define NR_CURSEG_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE) | |
257 | ||
258 | enum { | |
259 | CURSEG_HOT_DATA = 0, /* directory entry blocks */ | |
260 | CURSEG_WARM_DATA, /* data blocks */ | |
261 | CURSEG_COLD_DATA, /* multimedia or GCed data blocks */ | |
262 | CURSEG_HOT_NODE, /* direct node blocks of directory files */ | |
263 | CURSEG_WARM_NODE, /* direct node blocks of normal files */ | |
264 | CURSEG_COLD_NODE, /* indirect node blocks */ | |
265 | NO_CHECK_TYPE | |
266 | }; | |
267 | ||
268 | struct f2fs_sm_info { | |
269 | struct sit_info *sit_info; /* whole segment information */ | |
270 | struct free_segmap_info *free_info; /* free segment information */ | |
271 | struct dirty_seglist_info *dirty_info; /* dirty segment information */ | |
272 | struct curseg_info *curseg_array; /* active segment information */ | |
273 | ||
274 | struct list_head wblist_head; /* list of under-writeback pages */ | |
275 | spinlock_t wblist_lock; /* lock for checkpoint */ | |
276 | ||
277 | block_t seg0_blkaddr; /* block address of 0'th segment */ | |
278 | block_t main_blkaddr; /* start block address of main area */ | |
279 | block_t ssa_blkaddr; /* start block address of SSA area */ | |
280 | ||
281 | unsigned int segment_count; /* total # of segments */ | |
282 | unsigned int main_segments; /* # of segments in main area */ | |
283 | unsigned int reserved_segments; /* # of reserved segments */ | |
284 | unsigned int ovp_segments; /* # of overprovision segments */ | |
285 | }; | |
286 | ||
287 | /* | |
288 | * For directory operation | |
289 | */ | |
290 | #define NODE_DIR1_BLOCK (ADDRS_PER_INODE + 1) | |
291 | #define NODE_DIR2_BLOCK (ADDRS_PER_INODE + 2) | |
292 | #define NODE_IND1_BLOCK (ADDRS_PER_INODE + 3) | |
293 | #define NODE_IND2_BLOCK (ADDRS_PER_INODE + 4) | |
294 | #define NODE_DIND_BLOCK (ADDRS_PER_INODE + 5) | |
295 | ||
296 | /* | |
297 | * For superblock | |
298 | */ | |
299 | /* | |
300 | * COUNT_TYPE for monitoring | |
301 | * | |
302 | * f2fs monitors the number of several block types such as on-writeback, | |
303 | * dirty dentry blocks, dirty node blocks, and dirty meta blocks. | |
304 | */ | |
305 | enum count_type { | |
306 | F2FS_WRITEBACK, | |
307 | F2FS_DIRTY_DENTS, | |
308 | F2FS_DIRTY_NODES, | |
309 | F2FS_DIRTY_META, | |
310 | NR_COUNT_TYPE, | |
311 | }; | |
312 | ||
313 | /* | |
39936837 JK |
314 | * Uses as sbi->fs_lock[NR_GLOBAL_LOCKS]. |
315 | * The checkpoint procedure blocks all the locks in this fs_lock array. | |
316 | * Some FS operations grab free locks, and if there is no free lock, | |
317 | * then wait to grab a lock in a round-robin manner. | |
39a53e0c | 318 | */ |
39936837 | 319 | #define NR_GLOBAL_LOCKS 8 |
39a53e0c JK |
320 | |
321 | /* | |
322 | * The below are the page types of bios used in submti_bio(). | |
323 | * The available types are: | |
324 | * DATA User data pages. It operates as async mode. | |
325 | * NODE Node pages. It operates as async mode. | |
326 | * META FS metadata pages such as SIT, NAT, CP. | |
327 | * NR_PAGE_TYPE The number of page types. | |
328 | * META_FLUSH Make sure the previous pages are written | |
329 | * with waiting the bio's completion | |
330 | * ... Only can be used with META. | |
331 | */ | |
332 | enum page_type { | |
333 | DATA, | |
334 | NODE, | |
335 | META, | |
336 | NR_PAGE_TYPE, | |
337 | META_FLUSH, | |
338 | }; | |
339 | ||
340 | struct f2fs_sb_info { | |
341 | struct super_block *sb; /* pointer to VFS super block */ | |
342 | struct buffer_head *raw_super_buf; /* buffer head of raw sb */ | |
343 | struct f2fs_super_block *raw_super; /* raw super block pointer */ | |
344 | int s_dirty; /* dirty flag for checkpoint */ | |
345 | ||
346 | /* for node-related operations */ | |
347 | struct f2fs_nm_info *nm_info; /* node manager */ | |
348 | struct inode *node_inode; /* cache node blocks */ | |
349 | ||
350 | /* for segment-related operations */ | |
351 | struct f2fs_sm_info *sm_info; /* segment manager */ | |
352 | struct bio *bio[NR_PAGE_TYPE]; /* bios to merge */ | |
353 | sector_t last_block_in_bio[NR_PAGE_TYPE]; /* last block number */ | |
354 | struct rw_semaphore bio_sem; /* IO semaphore */ | |
355 | ||
356 | /* for checkpoint */ | |
357 | struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */ | |
358 | struct inode *meta_inode; /* cache meta blocks */ | |
39936837 JK |
359 | struct mutex cp_mutex; /* checkpoint procedure lock */ |
360 | struct mutex fs_lock[NR_GLOBAL_LOCKS]; /* blocking FS operations */ | |
361 | struct mutex node_write; /* locking node writes */ | |
39a53e0c | 362 | struct mutex writepages; /* mutex for writepages() */ |
39936837 | 363 | unsigned char next_lock_num; /* round-robin global locks */ |
39a53e0c | 364 | int por_doing; /* recovery is doing or not */ |
55008d84 | 365 | int on_build_free_nids; /* build_free_nids is doing */ |
39a53e0c JK |
366 | |
367 | /* for orphan inode management */ | |
368 | struct list_head orphan_inode_list; /* orphan inode list */ | |
369 | struct mutex orphan_inode_mutex; /* for orphan inode list */ | |
370 | unsigned int n_orphans; /* # of orphan inodes */ | |
371 | ||
372 | /* for directory inode management */ | |
373 | struct list_head dir_inode_list; /* dir inode list */ | |
374 | spinlock_t dir_inode_lock; /* for dir inode list lock */ | |
39a53e0c JK |
375 | |
376 | /* basic file system units */ | |
377 | unsigned int log_sectors_per_block; /* log2 sectors per block */ | |
378 | unsigned int log_blocksize; /* log2 block size */ | |
379 | unsigned int blocksize; /* block size */ | |
380 | unsigned int root_ino_num; /* root inode number*/ | |
381 | unsigned int node_ino_num; /* node inode number*/ | |
382 | unsigned int meta_ino_num; /* meta inode number*/ | |
383 | unsigned int log_blocks_per_seg; /* log2 blocks per segment */ | |
384 | unsigned int blocks_per_seg; /* blocks per segment */ | |
385 | unsigned int segs_per_sec; /* segments per section */ | |
386 | unsigned int secs_per_zone; /* sections per zone */ | |
387 | unsigned int total_sections; /* total section count */ | |
388 | unsigned int total_node_count; /* total node block count */ | |
389 | unsigned int total_valid_node_count; /* valid node block count */ | |
390 | unsigned int total_valid_inode_count; /* valid inode count */ | |
391 | int active_logs; /* # of active logs */ | |
392 | ||
393 | block_t user_block_count; /* # of user blocks */ | |
394 | block_t total_valid_block_count; /* # of valid blocks */ | |
395 | block_t alloc_valid_block_count; /* # of allocated blocks */ | |
396 | block_t last_valid_block_count; /* for recovery */ | |
397 | u32 s_next_generation; /* for NFS support */ | |
398 | atomic_t nr_pages[NR_COUNT_TYPE]; /* # of pages, see count_type */ | |
399 | ||
400 | struct f2fs_mount_info mount_opt; /* mount options */ | |
401 | ||
402 | /* for cleaning operations */ | |
403 | struct mutex gc_mutex; /* mutex for GC */ | |
404 | struct f2fs_gc_kthread *gc_thread; /* GC thread */ | |
5ec4e49f | 405 | unsigned int cur_victim_sec; /* current victim section num */ |
39a53e0c JK |
406 | |
407 | /* | |
408 | * for stat information. | |
409 | * one is for the LFS mode, and the other is for the SSR mode. | |
410 | */ | |
35b09d82 | 411 | #ifdef CONFIG_F2FS_STAT_FS |
39a53e0c JK |
412 | struct f2fs_stat_info *stat_info; /* FS status information */ |
413 | unsigned int segment_count[2]; /* # of allocated segments */ | |
414 | unsigned int block_count[2]; /* # of allocated blocks */ | |
39a53e0c JK |
415 | int total_hit_ext, read_hit_ext; /* extent cache hit ratio */ |
416 | int bg_gc; /* background gc calls */ | |
35b09d82 NJ |
417 | unsigned int n_dirty_dirs; /* # of dir inodes */ |
418 | #endif | |
419 | unsigned int last_victim[2]; /* last victim segment # */ | |
39a53e0c JK |
420 | spinlock_t stat_lock; /* lock for stat operations */ |
421 | }; | |
422 | ||
423 | /* | |
424 | * Inline functions | |
425 | */ | |
426 | static inline struct f2fs_inode_info *F2FS_I(struct inode *inode) | |
427 | { | |
428 | return container_of(inode, struct f2fs_inode_info, vfs_inode); | |
429 | } | |
430 | ||
431 | static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb) | |
432 | { | |
433 | return sb->s_fs_info; | |
434 | } | |
435 | ||
436 | static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi) | |
437 | { | |
438 | return (struct f2fs_super_block *)(sbi->raw_super); | |
439 | } | |
440 | ||
441 | static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi) | |
442 | { | |
443 | return (struct f2fs_checkpoint *)(sbi->ckpt); | |
444 | } | |
445 | ||
446 | static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi) | |
447 | { | |
448 | return (struct f2fs_nm_info *)(sbi->nm_info); | |
449 | } | |
450 | ||
451 | static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi) | |
452 | { | |
453 | return (struct f2fs_sm_info *)(sbi->sm_info); | |
454 | } | |
455 | ||
456 | static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi) | |
457 | { | |
458 | return (struct sit_info *)(SM_I(sbi)->sit_info); | |
459 | } | |
460 | ||
461 | static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi) | |
462 | { | |
463 | return (struct free_segmap_info *)(SM_I(sbi)->free_info); | |
464 | } | |
465 | ||
466 | static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi) | |
467 | { | |
468 | return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info); | |
469 | } | |
470 | ||
471 | static inline void F2FS_SET_SB_DIRT(struct f2fs_sb_info *sbi) | |
472 | { | |
473 | sbi->s_dirty = 1; | |
474 | } | |
475 | ||
476 | static inline void F2FS_RESET_SB_DIRT(struct f2fs_sb_info *sbi) | |
477 | { | |
478 | sbi->s_dirty = 0; | |
479 | } | |
480 | ||
25ca923b JK |
481 | static inline bool is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) |
482 | { | |
483 | unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); | |
484 | return ckpt_flags & f; | |
485 | } | |
486 | ||
487 | static inline void set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) | |
488 | { | |
489 | unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); | |
490 | ckpt_flags |= f; | |
491 | cp->ckpt_flags = cpu_to_le32(ckpt_flags); | |
492 | } | |
493 | ||
494 | static inline void clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f) | |
495 | { | |
496 | unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags); | |
497 | ckpt_flags &= (~f); | |
498 | cp->ckpt_flags = cpu_to_le32(ckpt_flags); | |
499 | } | |
500 | ||
39936837 JK |
501 | static inline void mutex_lock_all(struct f2fs_sb_info *sbi) |
502 | { | |
bfe35965 PZ |
503 | int i; |
504 | ||
505 | for (i = 0; i < NR_GLOBAL_LOCKS; i++) { | |
506 | /* | |
507 | * This is the only time we take multiple fs_lock[] | |
508 | * instances; the order is immaterial since we | |
509 | * always hold cp_mutex, which serializes multiple | |
510 | * such operations. | |
511 | */ | |
512 | mutex_lock_nest_lock(&sbi->fs_lock[i], &sbi->cp_mutex); | |
513 | } | |
39936837 JK |
514 | } |
515 | ||
516 | static inline void mutex_unlock_all(struct f2fs_sb_info *sbi) | |
39a53e0c | 517 | { |
39936837 JK |
518 | int i = 0; |
519 | for (; i < NR_GLOBAL_LOCKS; i++) | |
520 | mutex_unlock(&sbi->fs_lock[i]); | |
39a53e0c JK |
521 | } |
522 | ||
39936837 | 523 | static inline int mutex_lock_op(struct f2fs_sb_info *sbi) |
39a53e0c | 524 | { |
39936837 JK |
525 | unsigned char next_lock = sbi->next_lock_num % NR_GLOBAL_LOCKS; |
526 | int i = 0; | |
527 | ||
528 | for (; i < NR_GLOBAL_LOCKS; i++) | |
529 | if (mutex_trylock(&sbi->fs_lock[i])) | |
530 | return i; | |
531 | ||
532 | mutex_lock(&sbi->fs_lock[next_lock]); | |
533 | sbi->next_lock_num++; | |
534 | return next_lock; | |
535 | } | |
536 | ||
537 | static inline void mutex_unlock_op(struct f2fs_sb_info *sbi, int ilock) | |
538 | { | |
539 | if (ilock < 0) | |
540 | return; | |
541 | BUG_ON(ilock >= NR_GLOBAL_LOCKS); | |
542 | mutex_unlock(&sbi->fs_lock[ilock]); | |
39a53e0c JK |
543 | } |
544 | ||
545 | /* | |
546 | * Check whether the given nid is within node id range. | |
547 | */ | |
064e0823 | 548 | static inline int check_nid_range(struct f2fs_sb_info *sbi, nid_t nid) |
39a53e0c | 549 | { |
064e0823 NJ |
550 | WARN_ON((nid >= NM_I(sbi)->max_nid)); |
551 | if (nid >= NM_I(sbi)->max_nid) | |
552 | return -EINVAL; | |
553 | return 0; | |
39a53e0c JK |
554 | } |
555 | ||
556 | #define F2FS_DEFAULT_ALLOCATED_BLOCKS 1 | |
557 | ||
558 | /* | |
559 | * Check whether the inode has blocks or not | |
560 | */ | |
561 | static inline int F2FS_HAS_BLOCKS(struct inode *inode) | |
562 | { | |
563 | if (F2FS_I(inode)->i_xattr_nid) | |
564 | return (inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS + 1); | |
565 | else | |
566 | return (inode->i_blocks > F2FS_DEFAULT_ALLOCATED_BLOCKS); | |
567 | } | |
568 | ||
569 | static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi, | |
570 | struct inode *inode, blkcnt_t count) | |
571 | { | |
572 | block_t valid_block_count; | |
573 | ||
574 | spin_lock(&sbi->stat_lock); | |
575 | valid_block_count = | |
576 | sbi->total_valid_block_count + (block_t)count; | |
577 | if (valid_block_count > sbi->user_block_count) { | |
578 | spin_unlock(&sbi->stat_lock); | |
579 | return false; | |
580 | } | |
581 | inode->i_blocks += count; | |
582 | sbi->total_valid_block_count = valid_block_count; | |
583 | sbi->alloc_valid_block_count += (block_t)count; | |
584 | spin_unlock(&sbi->stat_lock); | |
585 | return true; | |
586 | } | |
587 | ||
588 | static inline int dec_valid_block_count(struct f2fs_sb_info *sbi, | |
589 | struct inode *inode, | |
590 | blkcnt_t count) | |
591 | { | |
592 | spin_lock(&sbi->stat_lock); | |
593 | BUG_ON(sbi->total_valid_block_count < (block_t) count); | |
594 | BUG_ON(inode->i_blocks < count); | |
595 | inode->i_blocks -= count; | |
596 | sbi->total_valid_block_count -= (block_t)count; | |
597 | spin_unlock(&sbi->stat_lock); | |
598 | return 0; | |
599 | } | |
600 | ||
601 | static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type) | |
602 | { | |
603 | atomic_inc(&sbi->nr_pages[count_type]); | |
604 | F2FS_SET_SB_DIRT(sbi); | |
605 | } | |
606 | ||
607 | static inline void inode_inc_dirty_dents(struct inode *inode) | |
608 | { | |
609 | atomic_inc(&F2FS_I(inode)->dirty_dents); | |
610 | } | |
611 | ||
612 | static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type) | |
613 | { | |
614 | atomic_dec(&sbi->nr_pages[count_type]); | |
615 | } | |
616 | ||
617 | static inline void inode_dec_dirty_dents(struct inode *inode) | |
618 | { | |
619 | atomic_dec(&F2FS_I(inode)->dirty_dents); | |
620 | } | |
621 | ||
622 | static inline int get_pages(struct f2fs_sb_info *sbi, int count_type) | |
623 | { | |
624 | return atomic_read(&sbi->nr_pages[count_type]); | |
625 | } | |
626 | ||
5ac206cf NJ |
627 | static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type) |
628 | { | |
629 | unsigned int pages_per_sec = sbi->segs_per_sec * | |
630 | (1 << sbi->log_blocks_per_seg); | |
631 | return ((get_pages(sbi, block_type) + pages_per_sec - 1) | |
632 | >> sbi->log_blocks_per_seg) / sbi->segs_per_sec; | |
633 | } | |
634 | ||
39a53e0c JK |
635 | static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi) |
636 | { | |
637 | block_t ret; | |
638 | spin_lock(&sbi->stat_lock); | |
639 | ret = sbi->total_valid_block_count; | |
640 | spin_unlock(&sbi->stat_lock); | |
641 | return ret; | |
642 | } | |
643 | ||
644 | static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag) | |
645 | { | |
646 | struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); | |
647 | ||
648 | /* return NAT or SIT bitmap */ | |
649 | if (flag == NAT_BITMAP) | |
650 | return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize); | |
651 | else if (flag == SIT_BITMAP) | |
652 | return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize); | |
653 | ||
654 | return 0; | |
655 | } | |
656 | ||
657 | static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag) | |
658 | { | |
659 | struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); | |
25ca923b JK |
660 | int offset = (flag == NAT_BITMAP) ? |
661 | le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0; | |
39a53e0c JK |
662 | return &ckpt->sit_nat_version_bitmap + offset; |
663 | } | |
664 | ||
665 | static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi) | |
666 | { | |
667 | block_t start_addr; | |
668 | struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); | |
669 | unsigned long long ckpt_version = le64_to_cpu(ckpt->checkpoint_ver); | |
670 | ||
25ca923b | 671 | start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr); |
39a53e0c JK |
672 | |
673 | /* | |
674 | * odd numbered checkpoint should at cp segment 0 | |
675 | * and even segent must be at cp segment 1 | |
676 | */ | |
677 | if (!(ckpt_version & 1)) | |
678 | start_addr += sbi->blocks_per_seg; | |
679 | ||
680 | return start_addr; | |
681 | } | |
682 | ||
683 | static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi) | |
684 | { | |
685 | return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum); | |
686 | } | |
687 | ||
688 | static inline bool inc_valid_node_count(struct f2fs_sb_info *sbi, | |
689 | struct inode *inode, | |
690 | unsigned int count) | |
691 | { | |
692 | block_t valid_block_count; | |
693 | unsigned int valid_node_count; | |
694 | ||
695 | spin_lock(&sbi->stat_lock); | |
696 | ||
697 | valid_block_count = sbi->total_valid_block_count + (block_t)count; | |
698 | sbi->alloc_valid_block_count += (block_t)count; | |
699 | valid_node_count = sbi->total_valid_node_count + count; | |
700 | ||
701 | if (valid_block_count > sbi->user_block_count) { | |
702 | spin_unlock(&sbi->stat_lock); | |
703 | return false; | |
704 | } | |
705 | ||
706 | if (valid_node_count > sbi->total_node_count) { | |
707 | spin_unlock(&sbi->stat_lock); | |
708 | return false; | |
709 | } | |
710 | ||
711 | if (inode) | |
712 | inode->i_blocks += count; | |
713 | sbi->total_valid_node_count = valid_node_count; | |
714 | sbi->total_valid_block_count = valid_block_count; | |
715 | spin_unlock(&sbi->stat_lock); | |
716 | ||
717 | return true; | |
718 | } | |
719 | ||
720 | static inline void dec_valid_node_count(struct f2fs_sb_info *sbi, | |
721 | struct inode *inode, | |
722 | unsigned int count) | |
723 | { | |
724 | spin_lock(&sbi->stat_lock); | |
725 | ||
726 | BUG_ON(sbi->total_valid_block_count < count); | |
727 | BUG_ON(sbi->total_valid_node_count < count); | |
728 | BUG_ON(inode->i_blocks < count); | |
729 | ||
730 | inode->i_blocks -= count; | |
731 | sbi->total_valid_node_count -= count; | |
732 | sbi->total_valid_block_count -= (block_t)count; | |
733 | ||
734 | spin_unlock(&sbi->stat_lock); | |
735 | } | |
736 | ||
737 | static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi) | |
738 | { | |
739 | unsigned int ret; | |
740 | spin_lock(&sbi->stat_lock); | |
741 | ret = sbi->total_valid_node_count; | |
742 | spin_unlock(&sbi->stat_lock); | |
743 | return ret; | |
744 | } | |
745 | ||
746 | static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi) | |
747 | { | |
748 | spin_lock(&sbi->stat_lock); | |
749 | BUG_ON(sbi->total_valid_inode_count == sbi->total_node_count); | |
750 | sbi->total_valid_inode_count++; | |
751 | spin_unlock(&sbi->stat_lock); | |
752 | } | |
753 | ||
754 | static inline int dec_valid_inode_count(struct f2fs_sb_info *sbi) | |
755 | { | |
756 | spin_lock(&sbi->stat_lock); | |
757 | BUG_ON(!sbi->total_valid_inode_count); | |
758 | sbi->total_valid_inode_count--; | |
759 | spin_unlock(&sbi->stat_lock); | |
760 | return 0; | |
761 | } | |
762 | ||
763 | static inline unsigned int valid_inode_count(struct f2fs_sb_info *sbi) | |
764 | { | |
765 | unsigned int ret; | |
766 | spin_lock(&sbi->stat_lock); | |
767 | ret = sbi->total_valid_inode_count; | |
768 | spin_unlock(&sbi->stat_lock); | |
769 | return ret; | |
770 | } | |
771 | ||
772 | static inline void f2fs_put_page(struct page *page, int unlock) | |
773 | { | |
774 | if (!page || IS_ERR(page)) | |
775 | return; | |
776 | ||
777 | if (unlock) { | |
778 | BUG_ON(!PageLocked(page)); | |
779 | unlock_page(page); | |
780 | } | |
781 | page_cache_release(page); | |
782 | } | |
783 | ||
784 | static inline void f2fs_put_dnode(struct dnode_of_data *dn) | |
785 | { | |
786 | if (dn->node_page) | |
787 | f2fs_put_page(dn->node_page, 1); | |
788 | if (dn->inode_page && dn->node_page != dn->inode_page) | |
789 | f2fs_put_page(dn->inode_page, 0); | |
790 | dn->node_page = NULL; | |
791 | dn->inode_page = NULL; | |
792 | } | |
793 | ||
794 | static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name, | |
795 | size_t size, void (*ctor)(void *)) | |
796 | { | |
797 | return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, ctor); | |
798 | } | |
799 | ||
800 | #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino) | |
801 | ||
802 | static inline bool IS_INODE(struct page *page) | |
803 | { | |
804 | struct f2fs_node *p = (struct f2fs_node *)page_address(page); | |
805 | return RAW_IS_INODE(p); | |
806 | } | |
807 | ||
808 | static inline __le32 *blkaddr_in_node(struct f2fs_node *node) | |
809 | { | |
810 | return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr; | |
811 | } | |
812 | ||
813 | static inline block_t datablock_addr(struct page *node_page, | |
814 | unsigned int offset) | |
815 | { | |
816 | struct f2fs_node *raw_node; | |
817 | __le32 *addr_array; | |
818 | raw_node = (struct f2fs_node *)page_address(node_page); | |
819 | addr_array = blkaddr_in_node(raw_node); | |
820 | return le32_to_cpu(addr_array[offset]); | |
821 | } | |
822 | ||
823 | static inline int f2fs_test_bit(unsigned int nr, char *addr) | |
824 | { | |
825 | int mask; | |
826 | ||
827 | addr += (nr >> 3); | |
828 | mask = 1 << (7 - (nr & 0x07)); | |
829 | return mask & *addr; | |
830 | } | |
831 | ||
832 | static inline int f2fs_set_bit(unsigned int nr, char *addr) | |
833 | { | |
834 | int mask; | |
835 | int ret; | |
836 | ||
837 | addr += (nr >> 3); | |
838 | mask = 1 << (7 - (nr & 0x07)); | |
839 | ret = mask & *addr; | |
840 | *addr |= mask; | |
841 | return ret; | |
842 | } | |
843 | ||
844 | static inline int f2fs_clear_bit(unsigned int nr, char *addr) | |
845 | { | |
846 | int mask; | |
847 | int ret; | |
848 | ||
849 | addr += (nr >> 3); | |
850 | mask = 1 << (7 - (nr & 0x07)); | |
851 | ret = mask & *addr; | |
852 | *addr &= ~mask; | |
853 | return ret; | |
854 | } | |
855 | ||
856 | /* used for f2fs_inode_info->flags */ | |
857 | enum { | |
858 | FI_NEW_INODE, /* indicate newly allocated inode */ | |
39a53e0c JK |
859 | FI_INC_LINK, /* need to increment i_nlink */ |
860 | FI_ACL_MODE, /* indicate acl mode */ | |
861 | FI_NO_ALLOC, /* should not allocate any blocks */ | |
699489bb | 862 | FI_UPDATE_DIR, /* should update inode block for consistency */ |
74d0b917 | 863 | FI_DELAY_IPUT, /* used for the recovery */ |
39a53e0c JK |
864 | }; |
865 | ||
866 | static inline void set_inode_flag(struct f2fs_inode_info *fi, int flag) | |
867 | { | |
868 | set_bit(flag, &fi->flags); | |
869 | } | |
870 | ||
871 | static inline int is_inode_flag_set(struct f2fs_inode_info *fi, int flag) | |
872 | { | |
873 | return test_bit(flag, &fi->flags); | |
874 | } | |
875 | ||
876 | static inline void clear_inode_flag(struct f2fs_inode_info *fi, int flag) | |
877 | { | |
878 | clear_bit(flag, &fi->flags); | |
879 | } | |
880 | ||
881 | static inline void set_acl_inode(struct f2fs_inode_info *fi, umode_t mode) | |
882 | { | |
883 | fi->i_acl_mode = mode; | |
884 | set_inode_flag(fi, FI_ACL_MODE); | |
885 | } | |
886 | ||
887 | static inline int cond_clear_inode_flag(struct f2fs_inode_info *fi, int flag) | |
888 | { | |
889 | if (is_inode_flag_set(fi, FI_ACL_MODE)) { | |
890 | clear_inode_flag(fi, FI_ACL_MODE); | |
891 | return 1; | |
892 | } | |
893 | return 0; | |
894 | } | |
895 | ||
77888c1e JK |
896 | static inline int f2fs_readonly(struct super_block *sb) |
897 | { | |
898 | return sb->s_flags & MS_RDONLY; | |
899 | } | |
900 | ||
39a53e0c JK |
901 | /* |
902 | * file.c | |
903 | */ | |
904 | int f2fs_sync_file(struct file *, loff_t, loff_t, int); | |
905 | void truncate_data_blocks(struct dnode_of_data *); | |
906 | void f2fs_truncate(struct inode *); | |
2d4d9fb5 | 907 | int f2fs_getattr(struct vfsmount *, struct dentry *, struct kstat *); |
39a53e0c JK |
908 | int f2fs_setattr(struct dentry *, struct iattr *); |
909 | int truncate_hole(struct inode *, pgoff_t, pgoff_t); | |
b292dcab | 910 | int truncate_data_blocks_range(struct dnode_of_data *, int); |
39a53e0c | 911 | long f2fs_ioctl(struct file *, unsigned int, unsigned long); |
e9750824 | 912 | long f2fs_compat_ioctl(struct file *, unsigned int, unsigned long); |
39a53e0c JK |
913 | |
914 | /* | |
915 | * inode.c | |
916 | */ | |
917 | void f2fs_set_inode_flags(struct inode *); | |
39a53e0c JK |
918 | struct inode *f2fs_iget(struct super_block *, unsigned long); |
919 | void update_inode(struct inode *, struct page *); | |
39936837 | 920 | int update_inode_page(struct inode *); |
39a53e0c JK |
921 | int f2fs_write_inode(struct inode *, struct writeback_control *); |
922 | void f2fs_evict_inode(struct inode *); | |
923 | ||
924 | /* | |
925 | * namei.c | |
926 | */ | |
927 | struct dentry *f2fs_get_parent(struct dentry *child); | |
928 | ||
929 | /* | |
930 | * dir.c | |
931 | */ | |
932 | struct f2fs_dir_entry *f2fs_find_entry(struct inode *, struct qstr *, | |
933 | struct page **); | |
934 | struct f2fs_dir_entry *f2fs_parent_dir(struct inode *, struct page **); | |
935 | ino_t f2fs_inode_by_name(struct inode *, struct qstr *); | |
936 | void f2fs_set_link(struct inode *, struct f2fs_dir_entry *, | |
937 | struct page *, struct inode *); | |
b7f7a5e0 | 938 | int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *); |
39a53e0c JK |
939 | void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *); |
940 | int f2fs_make_empty(struct inode *, struct inode *); | |
941 | bool f2fs_empty_dir(struct inode *); | |
942 | ||
b7f7a5e0 AV |
943 | static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode) |
944 | { | |
945 | return __f2fs_add_link(dentry->d_parent->d_inode, &dentry->d_name, | |
946 | inode); | |
947 | } | |
948 | ||
39a53e0c JK |
949 | /* |
950 | * super.c | |
951 | */ | |
952 | int f2fs_sync_fs(struct super_block *, int); | |
a07ef784 NJ |
953 | extern __printf(3, 4) |
954 | void f2fs_msg(struct super_block *, const char *, const char *, ...); | |
39a53e0c JK |
955 | |
956 | /* | |
957 | * hash.c | |
958 | */ | |
9836b8b9 | 959 | f2fs_hash_t f2fs_dentry_hash(const char *, size_t); |
39a53e0c JK |
960 | |
961 | /* | |
962 | * node.c | |
963 | */ | |
964 | struct dnode_of_data; | |
965 | struct node_info; | |
966 | ||
967 | int is_checkpointed_node(struct f2fs_sb_info *, nid_t); | |
968 | void get_node_info(struct f2fs_sb_info *, nid_t, struct node_info *); | |
969 | int get_dnode_of_data(struct dnode_of_data *, pgoff_t, int); | |
970 | int truncate_inode_blocks(struct inode *, pgoff_t); | |
971 | int remove_inode_page(struct inode *); | |
44a83ff6 | 972 | struct page *new_inode_page(struct inode *, const struct qstr *); |
8ae8f162 | 973 | struct page *new_node_page(struct dnode_of_data *, unsigned int, struct page *); |
39a53e0c JK |
974 | void ra_node_page(struct f2fs_sb_info *, nid_t); |
975 | struct page *get_node_page(struct f2fs_sb_info *, pgoff_t); | |
976 | struct page *get_node_page_ra(struct page *, int); | |
977 | void sync_inode_page(struct dnode_of_data *); | |
978 | int sync_node_pages(struct f2fs_sb_info *, nid_t, struct writeback_control *); | |
979 | bool alloc_nid(struct f2fs_sb_info *, nid_t *); | |
980 | void alloc_nid_done(struct f2fs_sb_info *, nid_t); | |
981 | void alloc_nid_failed(struct f2fs_sb_info *, nid_t); | |
982 | void recover_node_page(struct f2fs_sb_info *, struct page *, | |
983 | struct f2fs_summary *, struct node_info *, block_t); | |
984 | int recover_inode_page(struct f2fs_sb_info *, struct page *); | |
985 | int restore_node_summary(struct f2fs_sb_info *, unsigned int, | |
986 | struct f2fs_summary_block *); | |
987 | void flush_nat_entries(struct f2fs_sb_info *); | |
988 | int build_node_manager(struct f2fs_sb_info *); | |
989 | void destroy_node_manager(struct f2fs_sb_info *); | |
6e6093a8 | 990 | int __init create_node_manager_caches(void); |
39a53e0c JK |
991 | void destroy_node_manager_caches(void); |
992 | ||
993 | /* | |
994 | * segment.c | |
995 | */ | |
996 | void f2fs_balance_fs(struct f2fs_sb_info *); | |
997 | void invalidate_blocks(struct f2fs_sb_info *, block_t); | |
998 | void locate_dirty_segment(struct f2fs_sb_info *, unsigned int); | |
999 | void clear_prefree_segments(struct f2fs_sb_info *); | |
1000 | int npages_for_summary_flush(struct f2fs_sb_info *); | |
1001 | void allocate_new_segments(struct f2fs_sb_info *); | |
1002 | struct page *get_sum_page(struct f2fs_sb_info *, unsigned int); | |
3cd8a239 | 1003 | struct bio *f2fs_bio_alloc(struct block_device *, int); |
39a53e0c | 1004 | void f2fs_submit_bio(struct f2fs_sb_info *, enum page_type, bool sync); |
577e3495 | 1005 | void write_meta_page(struct f2fs_sb_info *, struct page *); |
39a53e0c JK |
1006 | void write_node_page(struct f2fs_sb_info *, struct page *, unsigned int, |
1007 | block_t, block_t *); | |
1008 | void write_data_page(struct inode *, struct page *, struct dnode_of_data*, | |
1009 | block_t, block_t *); | |
1010 | void rewrite_data_page(struct f2fs_sb_info *, struct page *, block_t); | |
1011 | void recover_data_page(struct f2fs_sb_info *, struct page *, | |
1012 | struct f2fs_summary *, block_t, block_t); | |
1013 | void rewrite_node_page(struct f2fs_sb_info *, struct page *, | |
1014 | struct f2fs_summary *, block_t, block_t); | |
1015 | void write_data_summaries(struct f2fs_sb_info *, block_t); | |
1016 | void write_node_summaries(struct f2fs_sb_info *, block_t); | |
1017 | int lookup_journal_in_cursum(struct f2fs_summary_block *, | |
1018 | int, unsigned int, int); | |
1019 | void flush_sit_entries(struct f2fs_sb_info *); | |
1020 | int build_segment_manager(struct f2fs_sb_info *); | |
39a53e0c JK |
1021 | void destroy_segment_manager(struct f2fs_sb_info *); |
1022 | ||
1023 | /* | |
1024 | * checkpoint.c | |
1025 | */ | |
1026 | struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t); | |
1027 | struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t); | |
1028 | long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long); | |
1029 | int check_orphan_space(struct f2fs_sb_info *); | |
1030 | void add_orphan_inode(struct f2fs_sb_info *, nid_t); | |
1031 | void remove_orphan_inode(struct f2fs_sb_info *, nid_t); | |
1032 | int recover_orphan_inodes(struct f2fs_sb_info *); | |
1033 | int get_valid_checkpoint(struct f2fs_sb_info *); | |
1034 | void set_dirty_dir_page(struct inode *, struct page *); | |
5deb8267 | 1035 | void add_dirty_dir_inode(struct inode *); |
39a53e0c | 1036 | void remove_dirty_dir_inode(struct inode *); |
74d0b917 | 1037 | struct inode *check_dirty_dir_inode(struct f2fs_sb_info *, nid_t); |
39a53e0c | 1038 | void sync_dirty_dir_inodes(struct f2fs_sb_info *); |
43727527 | 1039 | void write_checkpoint(struct f2fs_sb_info *, bool); |
39a53e0c | 1040 | void init_orphan_info(struct f2fs_sb_info *); |
6e6093a8 | 1041 | int __init create_checkpoint_caches(void); |
39a53e0c JK |
1042 | void destroy_checkpoint_caches(void); |
1043 | ||
1044 | /* | |
1045 | * data.c | |
1046 | */ | |
1047 | int reserve_new_block(struct dnode_of_data *); | |
1048 | void update_extent_cache(block_t, struct dnode_of_data *); | |
c718379b | 1049 | struct page *find_data_page(struct inode *, pgoff_t, bool); |
39a53e0c | 1050 | struct page *get_lock_data_page(struct inode *, pgoff_t); |
64aa7ed9 | 1051 | struct page *get_new_data_page(struct inode *, struct page *, pgoff_t, bool); |
39a53e0c JK |
1052 | int f2fs_readpage(struct f2fs_sb_info *, struct page *, block_t, int); |
1053 | int do_write_data_page(struct page *); | |
1054 | ||
1055 | /* | |
1056 | * gc.c | |
1057 | */ | |
1058 | int start_gc_thread(struct f2fs_sb_info *); | |
1059 | void stop_gc_thread(struct f2fs_sb_info *); | |
1060 | block_t start_bidx_of_node(unsigned int); | |
408e9375 | 1061 | int f2fs_gc(struct f2fs_sb_info *); |
39a53e0c | 1062 | void build_gc_manager(struct f2fs_sb_info *); |
6e6093a8 | 1063 | int __init create_gc_caches(void); |
39a53e0c JK |
1064 | void destroy_gc_caches(void); |
1065 | ||
1066 | /* | |
1067 | * recovery.c | |
1068 | */ | |
6ead1142 | 1069 | int recover_fsync_data(struct f2fs_sb_info *); |
39a53e0c JK |
1070 | bool space_for_roll_forward(struct f2fs_sb_info *); |
1071 | ||
1072 | /* | |
1073 | * debug.c | |
1074 | */ | |
1075 | #ifdef CONFIG_F2FS_STAT_FS | |
1076 | struct f2fs_stat_info { | |
1077 | struct list_head stat_list; | |
1078 | struct f2fs_sb_info *sbi; | |
1079 | struct mutex stat_lock; | |
1080 | int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs; | |
1081 | int main_area_segs, main_area_sections, main_area_zones; | |
1082 | int hit_ext, total_ext; | |
1083 | int ndirty_node, ndirty_dent, ndirty_dirs, ndirty_meta; | |
1084 | int nats, sits, fnids; | |
1085 | int total_count, utilization; | |
1086 | int bg_gc; | |
1087 | unsigned int valid_count, valid_node_count, valid_inode_count; | |
1088 | unsigned int bimodal, avg_vblocks; | |
1089 | int util_free, util_valid, util_invalid; | |
1090 | int rsvd_segs, overp_segs; | |
1091 | int dirty_count, node_pages, meta_pages; | |
1092 | int prefree_count, call_count; | |
1093 | int tot_segs, node_segs, data_segs, free_segs, free_secs; | |
1094 | int tot_blks, data_blks, node_blks; | |
1095 | int curseg[NR_CURSEG_TYPE]; | |
1096 | int cursec[NR_CURSEG_TYPE]; | |
1097 | int curzone[NR_CURSEG_TYPE]; | |
1098 | ||
1099 | unsigned int segment_count[2]; | |
1100 | unsigned int block_count[2]; | |
1101 | unsigned base_mem, cache_mem; | |
1102 | }; | |
1103 | ||
1104 | #define stat_inc_call_count(si) ((si)->call_count++) | |
1105 | ||
1106 | #define stat_inc_seg_count(sbi, type) \ | |
1107 | do { \ | |
1108 | struct f2fs_stat_info *si = sbi->stat_info; \ | |
1109 | (si)->tot_segs++; \ | |
1110 | if (type == SUM_TYPE_DATA) \ | |
1111 | si->data_segs++; \ | |
1112 | else \ | |
1113 | si->node_segs++; \ | |
1114 | } while (0) | |
1115 | ||
1116 | #define stat_inc_tot_blk_count(si, blks) \ | |
1117 | (si->tot_blks += (blks)) | |
1118 | ||
1119 | #define stat_inc_data_blk_count(sbi, blks) \ | |
1120 | do { \ | |
1121 | struct f2fs_stat_info *si = sbi->stat_info; \ | |
1122 | stat_inc_tot_blk_count(si, blks); \ | |
1123 | si->data_blks += (blks); \ | |
1124 | } while (0) | |
1125 | ||
1126 | #define stat_inc_node_blk_count(sbi, blks) \ | |
1127 | do { \ | |
1128 | struct f2fs_stat_info *si = sbi->stat_info; \ | |
1129 | stat_inc_tot_blk_count(si, blks); \ | |
1130 | si->node_blks += (blks); \ | |
1131 | } while (0) | |
1132 | ||
1133 | int f2fs_build_stats(struct f2fs_sb_info *); | |
1134 | void f2fs_destroy_stats(struct f2fs_sb_info *); | |
6e6093a8 | 1135 | void __init f2fs_create_root_stats(void); |
4589d25d | 1136 | void f2fs_destroy_root_stats(void); |
39a53e0c JK |
1137 | #else |
1138 | #define stat_inc_call_count(si) | |
1139 | #define stat_inc_seg_count(si, type) | |
1140 | #define stat_inc_tot_blk_count(si, blks) | |
1141 | #define stat_inc_data_blk_count(si, blks) | |
1142 | #define stat_inc_node_blk_count(sbi, blks) | |
1143 | ||
1144 | static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; } | |
1145 | static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { } | |
6e6093a8 | 1146 | static inline void __init f2fs_create_root_stats(void) { } |
4589d25d | 1147 | static inline void f2fs_destroy_root_stats(void) { } |
39a53e0c JK |
1148 | #endif |
1149 | ||
1150 | extern const struct file_operations f2fs_dir_operations; | |
1151 | extern const struct file_operations f2fs_file_operations; | |
1152 | extern const struct inode_operations f2fs_file_inode_operations; | |
1153 | extern const struct address_space_operations f2fs_dblock_aops; | |
1154 | extern const struct address_space_operations f2fs_node_aops; | |
1155 | extern const struct address_space_operations f2fs_meta_aops; | |
1156 | extern const struct inode_operations f2fs_dir_inode_operations; | |
1157 | extern const struct inode_operations f2fs_symlink_inode_operations; | |
1158 | extern const struct inode_operations f2fs_special_inode_operations; | |
1159 | #endif |