]> Git Repo - J-linux.git/blob - fs/nilfs2/super.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / fs / nilfs2 / super.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * NILFS module and super block management.
4  *
5  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6  *
7  * Written by Ryusuke Konishi.
8  */
9 /*
10  *  linux/fs/ext2/super.c
11  *
12  * Copyright (C) 1992, 1993, 1994, 1995
13  * Remy Card ([email protected])
14  * Laboratoire MASI - Institut Blaise Pascal
15  * Universite Pierre et Marie Curie (Paris VI)
16  *
17  *  from
18  *
19  *  linux/fs/minix/inode.c
20  *
21  *  Copyright (C) 1991, 1992  Linus Torvalds
22  *
23  *  Big-endian to little-endian byte-swapping/bitmaps by
24  *        David S. Miller ([email protected]), 1995
25  */
26
27 #include <linux/module.h>
28 #include <linux/string.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/blkdev.h>
32 #include <linux/crc32.h>
33 #include <linux/vfs.h>
34 #include <linux/writeback.h>
35 #include <linux/seq_file.h>
36 #include <linux/mount.h>
37 #include <linux/fs_context.h>
38 #include <linux/fs_parser.h>
39 #include "nilfs.h"
40 #include "export.h"
41 #include "mdt.h"
42 #include "alloc.h"
43 #include "btree.h"
44 #include "btnode.h"
45 #include "page.h"
46 #include "cpfile.h"
47 #include "sufile.h" /* nilfs_sufile_resize(), nilfs_sufile_set_alloc_range() */
48 #include "ifile.h"
49 #include "dat.h"
50 #include "segment.h"
51 #include "segbuf.h"
52
53 MODULE_AUTHOR("NTT Corp.");
54 MODULE_DESCRIPTION("A New Implementation of the Log-structured Filesystem "
55                    "(NILFS)");
56 MODULE_LICENSE("GPL");
57
58 static struct kmem_cache *nilfs_inode_cachep;
59 struct kmem_cache *nilfs_transaction_cachep;
60 struct kmem_cache *nilfs_segbuf_cachep;
61 struct kmem_cache *nilfs_btree_path_cache;
62
63 static int nilfs_setup_super(struct super_block *sb, int is_mount);
64
65 void __nilfs_msg(struct super_block *sb, const char *fmt, ...)
66 {
67         struct va_format vaf;
68         va_list args;
69         int level;
70
71         va_start(args, fmt);
72
73         level = printk_get_level(fmt);
74         vaf.fmt = printk_skip_level(fmt);
75         vaf.va = &args;
76
77         if (sb)
78                 printk("%c%cNILFS (%s): %pV\n",
79                        KERN_SOH_ASCII, level, sb->s_id, &vaf);
80         else
81                 printk("%c%cNILFS: %pV\n",
82                        KERN_SOH_ASCII, level, &vaf);
83
84         va_end(args);
85 }
86
87 static void nilfs_set_error(struct super_block *sb)
88 {
89         struct the_nilfs *nilfs = sb->s_fs_info;
90         struct nilfs_super_block **sbp;
91
92         down_write(&nilfs->ns_sem);
93         if (!(nilfs->ns_mount_state & NILFS_ERROR_FS)) {
94                 nilfs->ns_mount_state |= NILFS_ERROR_FS;
95                 sbp = nilfs_prepare_super(sb, 0);
96                 if (likely(sbp)) {
97                         sbp[0]->s_state |= cpu_to_le16(NILFS_ERROR_FS);
98                         if (sbp[1])
99                                 sbp[1]->s_state |= cpu_to_le16(NILFS_ERROR_FS);
100                         nilfs_commit_super(sb, NILFS_SB_COMMIT_ALL);
101                 }
102         }
103         up_write(&nilfs->ns_sem);
104 }
105
106 /**
107  * __nilfs_error() - report failure condition on a filesystem
108  * @sb:       super block instance
109  * @function: name of calling function
110  * @fmt:      format string for message to be output
111  * @...:      optional arguments to @fmt
112  *
113  * __nilfs_error() sets an ERROR_FS flag on the superblock as well as
114  * reporting an error message.  This function should be called when
115  * NILFS detects incoherences or defects of meta data on disk.
116  *
117  * This implements the body of nilfs_error() macro.  Normally,
118  * nilfs_error() should be used.  As for sustainable errors such as a
119  * single-shot I/O error, nilfs_err() should be used instead.
120  *
121  * Callers should not add a trailing newline since this will do it.
122  */
123 void __nilfs_error(struct super_block *sb, const char *function,
124                    const char *fmt, ...)
125 {
126         struct the_nilfs *nilfs = sb->s_fs_info;
127         struct va_format vaf;
128         va_list args;
129
130         va_start(args, fmt);
131
132         vaf.fmt = fmt;
133         vaf.va = &args;
134
135         printk(KERN_CRIT "NILFS error (device %s): %s: %pV\n",
136                sb->s_id, function, &vaf);
137
138         va_end(args);
139
140         if (!sb_rdonly(sb)) {
141                 nilfs_set_error(sb);
142
143                 if (nilfs_test_opt(nilfs, ERRORS_RO)) {
144                         printk(KERN_CRIT "Remounting filesystem read-only\n");
145                         sb->s_flags |= SB_RDONLY;
146                 }
147         }
148
149         if (nilfs_test_opt(nilfs, ERRORS_PANIC))
150                 panic("NILFS (device %s): panic forced after error\n",
151                       sb->s_id);
152 }
153
154 struct inode *nilfs_alloc_inode(struct super_block *sb)
155 {
156         struct nilfs_inode_info *ii;
157
158         ii = alloc_inode_sb(sb, nilfs_inode_cachep, GFP_NOFS);
159         if (!ii)
160                 return NULL;
161         ii->i_bh = NULL;
162         ii->i_state = 0;
163         ii->i_type = 0;
164         ii->i_cno = 0;
165         ii->i_assoc_inode = NULL;
166         ii->i_bmap = &ii->i_bmap_data;
167         return &ii->vfs_inode;
168 }
169
170 static void nilfs_free_inode(struct inode *inode)
171 {
172         if (nilfs_is_metadata_file_inode(inode))
173                 nilfs_mdt_destroy(inode);
174
175         kmem_cache_free(nilfs_inode_cachep, NILFS_I(inode));
176 }
177
178 static int nilfs_sync_super(struct super_block *sb, int flag)
179 {
180         struct the_nilfs *nilfs = sb->s_fs_info;
181         int err;
182
183  retry:
184         set_buffer_dirty(nilfs->ns_sbh[0]);
185         if (nilfs_test_opt(nilfs, BARRIER)) {
186                 err = __sync_dirty_buffer(nilfs->ns_sbh[0],
187                                           REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
188         } else {
189                 err = sync_dirty_buffer(nilfs->ns_sbh[0]);
190         }
191
192         if (unlikely(err)) {
193                 nilfs_err(sb, "unable to write superblock: err=%d", err);
194                 if (err == -EIO && nilfs->ns_sbh[1]) {
195                         /*
196                          * sbp[0] points to newer log than sbp[1],
197                          * so copy sbp[0] to sbp[1] to take over sbp[0].
198                          */
199                         memcpy(nilfs->ns_sbp[1], nilfs->ns_sbp[0],
200                                nilfs->ns_sbsize);
201                         nilfs_fall_back_super_block(nilfs);
202                         goto retry;
203                 }
204         } else {
205                 struct nilfs_super_block *sbp = nilfs->ns_sbp[0];
206
207                 nilfs->ns_sbwcount++;
208
209                 /*
210                  * The latest segment becomes trailable from the position
211                  * written in superblock.
212                  */
213                 clear_nilfs_discontinued(nilfs);
214
215                 /* update GC protection for recent segments */
216                 if (nilfs->ns_sbh[1]) {
217                         if (flag == NILFS_SB_COMMIT_ALL) {
218                                 set_buffer_dirty(nilfs->ns_sbh[1]);
219                                 if (sync_dirty_buffer(nilfs->ns_sbh[1]) < 0)
220                                         goto out;
221                         }
222                         if (le64_to_cpu(nilfs->ns_sbp[1]->s_last_cno) <
223                             le64_to_cpu(nilfs->ns_sbp[0]->s_last_cno))
224                                 sbp = nilfs->ns_sbp[1];
225                 }
226
227                 spin_lock(&nilfs->ns_last_segment_lock);
228                 nilfs->ns_prot_seq = le64_to_cpu(sbp->s_last_seq);
229                 spin_unlock(&nilfs->ns_last_segment_lock);
230         }
231  out:
232         return err;
233 }
234
235 void nilfs_set_log_cursor(struct nilfs_super_block *sbp,
236                           struct the_nilfs *nilfs)
237 {
238         sector_t nfreeblocks;
239
240         /* nilfs->ns_sem must be locked by the caller. */
241         nilfs_count_free_blocks(nilfs, &nfreeblocks);
242         sbp->s_free_blocks_count = cpu_to_le64(nfreeblocks);
243
244         spin_lock(&nilfs->ns_last_segment_lock);
245         sbp->s_last_seq = cpu_to_le64(nilfs->ns_last_seq);
246         sbp->s_last_pseg = cpu_to_le64(nilfs->ns_last_pseg);
247         sbp->s_last_cno = cpu_to_le64(nilfs->ns_last_cno);
248         spin_unlock(&nilfs->ns_last_segment_lock);
249 }
250
251 struct nilfs_super_block **nilfs_prepare_super(struct super_block *sb,
252                                                int flip)
253 {
254         struct the_nilfs *nilfs = sb->s_fs_info;
255         struct nilfs_super_block **sbp = nilfs->ns_sbp;
256
257         /* nilfs->ns_sem must be locked by the caller. */
258         if (sbp[0]->s_magic != cpu_to_le16(NILFS_SUPER_MAGIC)) {
259                 if (sbp[1] &&
260                     sbp[1]->s_magic == cpu_to_le16(NILFS_SUPER_MAGIC)) {
261                         memcpy(sbp[0], sbp[1], nilfs->ns_sbsize);
262                 } else {
263                         nilfs_crit(sb, "superblock broke");
264                         return NULL;
265                 }
266         } else if (sbp[1] &&
267                    sbp[1]->s_magic != cpu_to_le16(NILFS_SUPER_MAGIC)) {
268                 memcpy(sbp[1], sbp[0], nilfs->ns_sbsize);
269         }
270
271         if (flip && sbp[1])
272                 nilfs_swap_super_block(nilfs);
273
274         return sbp;
275 }
276
277 int nilfs_commit_super(struct super_block *sb, int flag)
278 {
279         struct the_nilfs *nilfs = sb->s_fs_info;
280         struct nilfs_super_block **sbp = nilfs->ns_sbp;
281         time64_t t;
282
283         /* nilfs->ns_sem must be locked by the caller. */
284         t = ktime_get_real_seconds();
285         nilfs->ns_sbwtime = t;
286         sbp[0]->s_wtime = cpu_to_le64(t);
287         sbp[0]->s_sum = 0;
288         sbp[0]->s_sum = cpu_to_le32(crc32_le(nilfs->ns_crc_seed,
289                                              (unsigned char *)sbp[0],
290                                              nilfs->ns_sbsize));
291         if (flag == NILFS_SB_COMMIT_ALL && sbp[1]) {
292                 sbp[1]->s_wtime = sbp[0]->s_wtime;
293                 sbp[1]->s_sum = 0;
294                 sbp[1]->s_sum = cpu_to_le32(crc32_le(nilfs->ns_crc_seed,
295                                             (unsigned char *)sbp[1],
296                                             nilfs->ns_sbsize));
297         }
298         clear_nilfs_sb_dirty(nilfs);
299         nilfs->ns_flushed_device = 1;
300         /* make sure store to ns_flushed_device cannot be reordered */
301         smp_wmb();
302         return nilfs_sync_super(sb, flag);
303 }
304
305 /**
306  * nilfs_cleanup_super() - write filesystem state for cleanup
307  * @sb: super block instance to be unmounted or degraded to read-only
308  *
309  * This function restores state flags in the on-disk super block.
310  * This will set "clean" flag (i.e. NILFS_VALID_FS) unless the
311  * filesystem was not clean previously.
312  */
313 int nilfs_cleanup_super(struct super_block *sb)
314 {
315         struct the_nilfs *nilfs = sb->s_fs_info;
316         struct nilfs_super_block **sbp;
317         int flag = NILFS_SB_COMMIT;
318         int ret = -EIO;
319
320         sbp = nilfs_prepare_super(sb, 0);
321         if (sbp) {
322                 sbp[0]->s_state = cpu_to_le16(nilfs->ns_mount_state);
323                 nilfs_set_log_cursor(sbp[0], nilfs);
324                 if (sbp[1] && sbp[0]->s_last_cno == sbp[1]->s_last_cno) {
325                         /*
326                          * make the "clean" flag also to the opposite
327                          * super block if both super blocks point to
328                          * the same checkpoint.
329                          */
330                         sbp[1]->s_state = sbp[0]->s_state;
331                         flag = NILFS_SB_COMMIT_ALL;
332                 }
333                 ret = nilfs_commit_super(sb, flag);
334         }
335         return ret;
336 }
337
338 /**
339  * nilfs_move_2nd_super - relocate secondary super block
340  * @sb: super block instance
341  * @sb2off: new offset of the secondary super block (in bytes)
342  */
343 static int nilfs_move_2nd_super(struct super_block *sb, loff_t sb2off)
344 {
345         struct the_nilfs *nilfs = sb->s_fs_info;
346         struct buffer_head *nsbh;
347         struct nilfs_super_block *nsbp;
348         sector_t blocknr, newblocknr;
349         unsigned long offset;
350         int sb2i;  /* array index of the secondary superblock */
351         int ret = 0;
352
353         /* nilfs->ns_sem must be locked by the caller. */
354         if (nilfs->ns_sbh[1] &&
355             nilfs->ns_sbh[1]->b_blocknr > nilfs->ns_first_data_block) {
356                 sb2i = 1;
357                 blocknr = nilfs->ns_sbh[1]->b_blocknr;
358         } else if (nilfs->ns_sbh[0]->b_blocknr > nilfs->ns_first_data_block) {
359                 sb2i = 0;
360                 blocknr = nilfs->ns_sbh[0]->b_blocknr;
361         } else {
362                 sb2i = -1;
363                 blocknr = 0;
364         }
365         if (sb2i >= 0 && (u64)blocknr << nilfs->ns_blocksize_bits == sb2off)
366                 goto out;  /* super block location is unchanged */
367
368         /* Get new super block buffer */
369         newblocknr = sb2off >> nilfs->ns_blocksize_bits;
370         offset = sb2off & (nilfs->ns_blocksize - 1);
371         nsbh = sb_getblk(sb, newblocknr);
372         if (!nsbh) {
373                 nilfs_warn(sb,
374                            "unable to move secondary superblock to block %llu",
375                            (unsigned long long)newblocknr);
376                 ret = -EIO;
377                 goto out;
378         }
379         nsbp = (void *)nsbh->b_data + offset;
380
381         lock_buffer(nsbh);
382         if (sb2i >= 0) {
383                 /*
384                  * The position of the second superblock only changes by 4KiB,
385                  * which is larger than the maximum superblock data size
386                  * (= 1KiB), so there is no need to use memmove() to allow
387                  * overlap between source and destination.
388                  */
389                 memcpy(nsbp, nilfs->ns_sbp[sb2i], nilfs->ns_sbsize);
390
391                 /*
392                  * Zero fill after copy to avoid overwriting in case of move
393                  * within the same block.
394                  */
395                 memset(nsbh->b_data, 0, offset);
396                 memset((void *)nsbp + nilfs->ns_sbsize, 0,
397                        nsbh->b_size - offset - nilfs->ns_sbsize);
398         } else {
399                 memset(nsbh->b_data, 0, nsbh->b_size);
400         }
401         set_buffer_uptodate(nsbh);
402         unlock_buffer(nsbh);
403
404         if (sb2i >= 0) {
405                 brelse(nilfs->ns_sbh[sb2i]);
406                 nilfs->ns_sbh[sb2i] = nsbh;
407                 nilfs->ns_sbp[sb2i] = nsbp;
408         } else if (nilfs->ns_sbh[0]->b_blocknr < nilfs->ns_first_data_block) {
409                 /* secondary super block will be restored to index 1 */
410                 nilfs->ns_sbh[1] = nsbh;
411                 nilfs->ns_sbp[1] = nsbp;
412         } else {
413                 brelse(nsbh);
414         }
415 out:
416         return ret;
417 }
418
419 /**
420  * nilfs_resize_fs - resize the filesystem
421  * @sb: super block instance
422  * @newsize: new size of the filesystem (in bytes)
423  */
424 int nilfs_resize_fs(struct super_block *sb, __u64 newsize)
425 {
426         struct the_nilfs *nilfs = sb->s_fs_info;
427         struct nilfs_super_block **sbp;
428         __u64 devsize, newnsegs;
429         loff_t sb2off;
430         int ret;
431
432         ret = -ERANGE;
433         devsize = bdev_nr_bytes(sb->s_bdev);
434         if (newsize > devsize)
435                 goto out;
436
437         /*
438          * Prevent underflow in second superblock position calculation.
439          * The exact minimum size check is done in nilfs_sufile_resize().
440          */
441         if (newsize < 4096) {
442                 ret = -ENOSPC;
443                 goto out;
444         }
445
446         /*
447          * Write lock is required to protect some functions depending
448          * on the number of segments, the number of reserved segments,
449          * and so forth.
450          */
451         down_write(&nilfs->ns_segctor_sem);
452
453         sb2off = NILFS_SB2_OFFSET_BYTES(newsize);
454         newnsegs = sb2off >> nilfs->ns_blocksize_bits;
455         newnsegs = div64_ul(newnsegs, nilfs->ns_blocks_per_segment);
456
457         ret = nilfs_sufile_resize(nilfs->ns_sufile, newnsegs);
458         up_write(&nilfs->ns_segctor_sem);
459         if (ret < 0)
460                 goto out;
461
462         ret = nilfs_construct_segment(sb);
463         if (ret < 0)
464                 goto out;
465
466         down_write(&nilfs->ns_sem);
467         nilfs_move_2nd_super(sb, sb2off);
468         ret = -EIO;
469         sbp = nilfs_prepare_super(sb, 0);
470         if (likely(sbp)) {
471                 nilfs_set_log_cursor(sbp[0], nilfs);
472                 /*
473                  * Drop NILFS_RESIZE_FS flag for compatibility with
474                  * mount-time resize which may be implemented in a
475                  * future release.
476                  */
477                 sbp[0]->s_state = cpu_to_le16(le16_to_cpu(sbp[0]->s_state) &
478                                               ~NILFS_RESIZE_FS);
479                 sbp[0]->s_dev_size = cpu_to_le64(newsize);
480                 sbp[0]->s_nsegments = cpu_to_le64(nilfs->ns_nsegments);
481                 if (sbp[1])
482                         memcpy(sbp[1], sbp[0], nilfs->ns_sbsize);
483                 ret = nilfs_commit_super(sb, NILFS_SB_COMMIT_ALL);
484         }
485         up_write(&nilfs->ns_sem);
486
487         /*
488          * Reset the range of allocatable segments last.  This order
489          * is important in the case of expansion because the secondary
490          * superblock must be protected from log write until migration
491          * completes.
492          */
493         if (!ret)
494                 nilfs_sufile_set_alloc_range(nilfs->ns_sufile, 0, newnsegs - 1);
495 out:
496         return ret;
497 }
498
499 static void nilfs_put_super(struct super_block *sb)
500 {
501         struct the_nilfs *nilfs = sb->s_fs_info;
502
503         nilfs_detach_log_writer(sb);
504
505         if (!sb_rdonly(sb)) {
506                 down_write(&nilfs->ns_sem);
507                 nilfs_cleanup_super(sb);
508                 up_write(&nilfs->ns_sem);
509         }
510
511         nilfs_sysfs_delete_device_group(nilfs);
512         iput(nilfs->ns_sufile);
513         iput(nilfs->ns_cpfile);
514         iput(nilfs->ns_dat);
515
516         destroy_nilfs(nilfs);
517         sb->s_fs_info = NULL;
518 }
519
520 static int nilfs_sync_fs(struct super_block *sb, int wait)
521 {
522         struct the_nilfs *nilfs = sb->s_fs_info;
523         struct nilfs_super_block **sbp;
524         int err = 0;
525
526         /* This function is called when super block should be written back */
527         if (wait)
528                 err = nilfs_construct_segment(sb);
529
530         down_write(&nilfs->ns_sem);
531         if (nilfs_sb_dirty(nilfs)) {
532                 sbp = nilfs_prepare_super(sb, nilfs_sb_will_flip(nilfs));
533                 if (likely(sbp)) {
534                         nilfs_set_log_cursor(sbp[0], nilfs);
535                         nilfs_commit_super(sb, NILFS_SB_COMMIT);
536                 }
537         }
538         up_write(&nilfs->ns_sem);
539
540         if (!err)
541                 err = nilfs_flush_device(nilfs);
542
543         return err;
544 }
545
546 int nilfs_attach_checkpoint(struct super_block *sb, __u64 cno, int curr_mnt,
547                             struct nilfs_root **rootp)
548 {
549         struct the_nilfs *nilfs = sb->s_fs_info;
550         struct nilfs_root *root;
551         int err = -ENOMEM;
552
553         root = nilfs_find_or_create_root(
554                 nilfs, curr_mnt ? NILFS_CPTREE_CURRENT_CNO : cno);
555         if (!root)
556                 return err;
557
558         if (root->ifile)
559                 goto reuse; /* already attached checkpoint */
560
561         down_read(&nilfs->ns_segctor_sem);
562         err = nilfs_ifile_read(sb, root, cno, nilfs->ns_inode_size);
563         up_read(&nilfs->ns_segctor_sem);
564         if (unlikely(err))
565                 goto failed;
566
567  reuse:
568         *rootp = root;
569         return 0;
570
571  failed:
572         if (err == -EINVAL)
573                 nilfs_err(sb, "Invalid checkpoint (checkpoint number=%llu)",
574                           (unsigned long long)cno);
575         nilfs_put_root(root);
576
577         return err;
578 }
579
580 static int nilfs_freeze(struct super_block *sb)
581 {
582         struct the_nilfs *nilfs = sb->s_fs_info;
583         int err;
584
585         if (sb_rdonly(sb))
586                 return 0;
587
588         /* Mark super block clean */
589         down_write(&nilfs->ns_sem);
590         err = nilfs_cleanup_super(sb);
591         up_write(&nilfs->ns_sem);
592         return err;
593 }
594
595 static int nilfs_unfreeze(struct super_block *sb)
596 {
597         struct the_nilfs *nilfs = sb->s_fs_info;
598
599         if (sb_rdonly(sb))
600                 return 0;
601
602         down_write(&nilfs->ns_sem);
603         nilfs_setup_super(sb, false);
604         up_write(&nilfs->ns_sem);
605         return 0;
606 }
607
608 static int nilfs_statfs(struct dentry *dentry, struct kstatfs *buf)
609 {
610         struct super_block *sb = dentry->d_sb;
611         struct nilfs_root *root = NILFS_I(d_inode(dentry))->i_root;
612         struct the_nilfs *nilfs = root->nilfs;
613         u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
614         unsigned long long blocks;
615         unsigned long overhead;
616         unsigned long nrsvblocks;
617         sector_t nfreeblocks;
618         u64 nmaxinodes, nfreeinodes;
619         int err;
620
621         /*
622          * Compute all of the segment blocks
623          *
624          * The blocks before first segment and after last segment
625          * are excluded.
626          */
627         blocks = nilfs->ns_blocks_per_segment * nilfs->ns_nsegments
628                 - nilfs->ns_first_data_block;
629         nrsvblocks = nilfs->ns_nrsvsegs * nilfs->ns_blocks_per_segment;
630
631         /*
632          * Compute the overhead
633          *
634          * When distributing meta data blocks outside segment structure,
635          * We must count them as the overhead.
636          */
637         overhead = 0;
638
639         err = nilfs_count_free_blocks(nilfs, &nfreeblocks);
640         if (unlikely(err))
641                 return err;
642
643         err = nilfs_ifile_count_free_inodes(root->ifile,
644                                             &nmaxinodes, &nfreeinodes);
645         if (unlikely(err)) {
646                 nilfs_warn(sb, "failed to count free inodes: err=%d", err);
647                 if (err == -ERANGE) {
648                         /*
649                          * If nilfs_palloc_count_max_entries() returns
650                          * -ERANGE error code then we simply treat
651                          * curent inodes count as maximum possible and
652                          * zero as free inodes value.
653                          */
654                         nmaxinodes = atomic64_read(&root->inodes_count);
655                         nfreeinodes = 0;
656                         err = 0;
657                 } else
658                         return err;
659         }
660
661         buf->f_type = NILFS_SUPER_MAGIC;
662         buf->f_bsize = sb->s_blocksize;
663         buf->f_blocks = blocks - overhead;
664         buf->f_bfree = nfreeblocks;
665         buf->f_bavail = (buf->f_bfree >= nrsvblocks) ?
666                 (buf->f_bfree - nrsvblocks) : 0;
667         buf->f_files = nmaxinodes;
668         buf->f_ffree = nfreeinodes;
669         buf->f_namelen = NILFS_NAME_LEN;
670         buf->f_fsid = u64_to_fsid(id);
671
672         return 0;
673 }
674
675 static int nilfs_show_options(struct seq_file *seq, struct dentry *dentry)
676 {
677         struct super_block *sb = dentry->d_sb;
678         struct the_nilfs *nilfs = sb->s_fs_info;
679         struct nilfs_root *root = NILFS_I(d_inode(dentry))->i_root;
680
681         if (!nilfs_test_opt(nilfs, BARRIER))
682                 seq_puts(seq, ",nobarrier");
683         if (root->cno != NILFS_CPTREE_CURRENT_CNO)
684                 seq_printf(seq, ",cp=%llu", (unsigned long long)root->cno);
685         if (nilfs_test_opt(nilfs, ERRORS_PANIC))
686                 seq_puts(seq, ",errors=panic");
687         if (nilfs_test_opt(nilfs, ERRORS_CONT))
688                 seq_puts(seq, ",errors=continue");
689         if (nilfs_test_opt(nilfs, STRICT_ORDER))
690                 seq_puts(seq, ",order=strict");
691         if (nilfs_test_opt(nilfs, NORECOVERY))
692                 seq_puts(seq, ",norecovery");
693         if (nilfs_test_opt(nilfs, DISCARD))
694                 seq_puts(seq, ",discard");
695
696         return 0;
697 }
698
699 static const struct super_operations nilfs_sops = {
700         .alloc_inode    = nilfs_alloc_inode,
701         .free_inode     = nilfs_free_inode,
702         .dirty_inode    = nilfs_dirty_inode,
703         .evict_inode    = nilfs_evict_inode,
704         .put_super      = nilfs_put_super,
705         .sync_fs        = nilfs_sync_fs,
706         .freeze_fs      = nilfs_freeze,
707         .unfreeze_fs    = nilfs_unfreeze,
708         .statfs         = nilfs_statfs,
709         .show_options = nilfs_show_options
710 };
711
712 enum {
713         Opt_err, Opt_barrier, Opt_snapshot, Opt_order, Opt_norecovery,
714         Opt_discard,
715 };
716
717 static const struct constant_table nilfs_param_err[] = {
718         {"continue",    NILFS_MOUNT_ERRORS_CONT},
719         {"panic",       NILFS_MOUNT_ERRORS_PANIC},
720         {"remount-ro",  NILFS_MOUNT_ERRORS_RO},
721         {}
722 };
723
724 static const struct fs_parameter_spec nilfs_param_spec[] = {
725         fsparam_enum    ("errors", Opt_err, nilfs_param_err),
726         fsparam_flag_no ("barrier", Opt_barrier),
727         fsparam_u64     ("cp", Opt_snapshot),
728         fsparam_string  ("order", Opt_order),
729         fsparam_flag    ("norecovery", Opt_norecovery),
730         fsparam_flag_no ("discard", Opt_discard),
731         {}
732 };
733
734 struct nilfs_fs_context {
735         unsigned long ns_mount_opt;
736         __u64 cno;
737 };
738
739 static int nilfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
740 {
741         struct nilfs_fs_context *nilfs = fc->fs_private;
742         int is_remount = fc->purpose == FS_CONTEXT_FOR_RECONFIGURE;
743         struct fs_parse_result result;
744         int opt;
745
746         opt = fs_parse(fc, nilfs_param_spec, param, &result);
747         if (opt < 0)
748                 return opt;
749
750         switch (opt) {
751         case Opt_barrier:
752                 if (result.negated)
753                         nilfs_clear_opt(nilfs, BARRIER);
754                 else
755                         nilfs_set_opt(nilfs, BARRIER);
756                 break;
757         case Opt_order:
758                 if (strcmp(param->string, "relaxed") == 0)
759                         /* Ordered data semantics */
760                         nilfs_clear_opt(nilfs, STRICT_ORDER);
761                 else if (strcmp(param->string, "strict") == 0)
762                         /* Strict in-order semantics */
763                         nilfs_set_opt(nilfs, STRICT_ORDER);
764                 else
765                         return -EINVAL;
766                 break;
767         case Opt_err:
768                 nilfs->ns_mount_opt &= ~NILFS_MOUNT_ERROR_MODE;
769                 nilfs->ns_mount_opt |= result.uint_32;
770                 break;
771         case Opt_snapshot:
772                 if (is_remount) {
773                         struct super_block *sb = fc->root->d_sb;
774
775                         nilfs_err(sb,
776                                   "\"%s\" option is invalid for remount",
777                                   param->key);
778                         return -EINVAL;
779                 }
780                 if (result.uint_64 == 0) {
781                         nilfs_err(NULL,
782                                   "invalid option \"cp=0\": invalid checkpoint number 0");
783                         return -EINVAL;
784                 }
785                 nilfs->cno = result.uint_64;
786                 break;
787         case Opt_norecovery:
788                 nilfs_set_opt(nilfs, NORECOVERY);
789                 break;
790         case Opt_discard:
791                 if (result.negated)
792                         nilfs_clear_opt(nilfs, DISCARD);
793                 else
794                         nilfs_set_opt(nilfs, DISCARD);
795                 break;
796         default:
797                 return -EINVAL;
798         }
799
800         return 0;
801 }
802
803 static int nilfs_setup_super(struct super_block *sb, int is_mount)
804 {
805         struct the_nilfs *nilfs = sb->s_fs_info;
806         struct nilfs_super_block **sbp;
807         int max_mnt_count;
808         int mnt_count;
809
810         /* nilfs->ns_sem must be locked by the caller. */
811         sbp = nilfs_prepare_super(sb, 0);
812         if (!sbp)
813                 return -EIO;
814
815         if (!is_mount)
816                 goto skip_mount_setup;
817
818         max_mnt_count = le16_to_cpu(sbp[0]->s_max_mnt_count);
819         mnt_count = le16_to_cpu(sbp[0]->s_mnt_count);
820
821         if (nilfs->ns_mount_state & NILFS_ERROR_FS) {
822                 nilfs_warn(sb, "mounting fs with errors");
823 #if 0
824         } else if (max_mnt_count >= 0 && mnt_count >= max_mnt_count) {
825                 nilfs_warn(sb, "maximal mount count reached");
826 #endif
827         }
828         if (!max_mnt_count)
829                 sbp[0]->s_max_mnt_count = cpu_to_le16(NILFS_DFL_MAX_MNT_COUNT);
830
831         sbp[0]->s_mnt_count = cpu_to_le16(mnt_count + 1);
832         sbp[0]->s_mtime = cpu_to_le64(ktime_get_real_seconds());
833
834 skip_mount_setup:
835         sbp[0]->s_state =
836                 cpu_to_le16(le16_to_cpu(sbp[0]->s_state) & ~NILFS_VALID_FS);
837         /* synchronize sbp[1] with sbp[0] */
838         if (sbp[1])
839                 memcpy(sbp[1], sbp[0], nilfs->ns_sbsize);
840         return nilfs_commit_super(sb, NILFS_SB_COMMIT_ALL);
841 }
842
843 struct nilfs_super_block *nilfs_read_super_block(struct super_block *sb,
844                                                  u64 pos, int blocksize,
845                                                  struct buffer_head **pbh)
846 {
847         unsigned long long sb_index = pos;
848         unsigned long offset;
849
850         offset = do_div(sb_index, blocksize);
851         *pbh = sb_bread(sb, sb_index);
852         if (!*pbh)
853                 return NULL;
854         return (struct nilfs_super_block *)((char *)(*pbh)->b_data + offset);
855 }
856
857 int nilfs_store_magic(struct super_block *sb,
858                       struct nilfs_super_block *sbp)
859 {
860         struct the_nilfs *nilfs = sb->s_fs_info;
861
862         sb->s_magic = le16_to_cpu(sbp->s_magic);
863
864         /* FS independent flags */
865 #ifdef NILFS_ATIME_DISABLE
866         sb->s_flags |= SB_NOATIME;
867 #endif
868
869         nilfs->ns_resuid = le16_to_cpu(sbp->s_def_resuid);
870         nilfs->ns_resgid = le16_to_cpu(sbp->s_def_resgid);
871         nilfs->ns_interval = le32_to_cpu(sbp->s_c_interval);
872         nilfs->ns_watermark = le32_to_cpu(sbp->s_c_block_max);
873
874         return 0;
875 }
876
877 int nilfs_check_feature_compatibility(struct super_block *sb,
878                                       struct nilfs_super_block *sbp)
879 {
880         __u64 features;
881
882         features = le64_to_cpu(sbp->s_feature_incompat) &
883                 ~NILFS_FEATURE_INCOMPAT_SUPP;
884         if (features) {
885                 nilfs_err(sb,
886                           "couldn't mount because of unsupported optional features (%llx)",
887                           (unsigned long long)features);
888                 return -EINVAL;
889         }
890         features = le64_to_cpu(sbp->s_feature_compat_ro) &
891                 ~NILFS_FEATURE_COMPAT_RO_SUPP;
892         if (!sb_rdonly(sb) && features) {
893                 nilfs_err(sb,
894                           "couldn't mount RDWR because of unsupported optional features (%llx)",
895                           (unsigned long long)features);
896                 return -EINVAL;
897         }
898         return 0;
899 }
900
901 static int nilfs_get_root_dentry(struct super_block *sb,
902                                  struct nilfs_root *root,
903                                  struct dentry **root_dentry)
904 {
905         struct inode *inode;
906         struct dentry *dentry;
907         int ret = 0;
908
909         inode = nilfs_iget(sb, root, NILFS_ROOT_INO);
910         if (IS_ERR(inode)) {
911                 ret = PTR_ERR(inode);
912                 nilfs_err(sb, "error %d getting root inode", ret);
913                 goto out;
914         }
915         if (!S_ISDIR(inode->i_mode) || !inode->i_blocks || !inode->i_size) {
916                 iput(inode);
917                 nilfs_err(sb, "corrupt root inode");
918                 ret = -EINVAL;
919                 goto out;
920         }
921
922         if (root->cno == NILFS_CPTREE_CURRENT_CNO) {
923                 dentry = d_find_alias(inode);
924                 if (!dentry) {
925                         dentry = d_make_root(inode);
926                         if (!dentry) {
927                                 ret = -ENOMEM;
928                                 goto failed_dentry;
929                         }
930                 } else {
931                         iput(inode);
932                 }
933         } else {
934                 dentry = d_obtain_root(inode);
935                 if (IS_ERR(dentry)) {
936                         ret = PTR_ERR(dentry);
937                         goto failed_dentry;
938                 }
939         }
940         *root_dentry = dentry;
941  out:
942         return ret;
943
944  failed_dentry:
945         nilfs_err(sb, "error %d getting root dentry", ret);
946         goto out;
947 }
948
949 static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
950                                  struct dentry **root_dentry)
951 {
952         struct the_nilfs *nilfs = s->s_fs_info;
953         struct nilfs_root *root;
954         int ret;
955
956         mutex_lock(&nilfs->ns_snapshot_mount_mutex);
957
958         down_read(&nilfs->ns_segctor_sem);
959         ret = nilfs_cpfile_is_snapshot(nilfs->ns_cpfile, cno);
960         up_read(&nilfs->ns_segctor_sem);
961         if (ret < 0) {
962                 ret = (ret == -ENOENT) ? -EINVAL : ret;
963                 goto out;
964         } else if (!ret) {
965                 nilfs_err(s,
966                           "The specified checkpoint is not a snapshot (checkpoint number=%llu)",
967                           (unsigned long long)cno);
968                 ret = -EINVAL;
969                 goto out;
970         }
971
972         ret = nilfs_attach_checkpoint(s, cno, false, &root);
973         if (ret) {
974                 nilfs_err(s,
975                           "error %d while loading snapshot (checkpoint number=%llu)",
976                           ret, (unsigned long long)cno);
977                 goto out;
978         }
979         ret = nilfs_get_root_dentry(s, root, root_dentry);
980         nilfs_put_root(root);
981  out:
982         mutex_unlock(&nilfs->ns_snapshot_mount_mutex);
983         return ret;
984 }
985
986 /**
987  * nilfs_tree_is_busy() - try to shrink dentries of a checkpoint
988  * @root_dentry: root dentry of the tree to be shrunk
989  *
990  * This function returns true if the tree was in-use.
991  */
992 static bool nilfs_tree_is_busy(struct dentry *root_dentry)
993 {
994         shrink_dcache_parent(root_dentry);
995         return d_count(root_dentry) > 1;
996 }
997
998 int nilfs_checkpoint_is_mounted(struct super_block *sb, __u64 cno)
999 {
1000         struct the_nilfs *nilfs = sb->s_fs_info;
1001         struct nilfs_root *root;
1002         struct inode *inode;
1003         struct dentry *dentry;
1004         int ret;
1005
1006         if (cno > nilfs->ns_cno)
1007                 return false;
1008
1009         if (cno >= nilfs_last_cno(nilfs))
1010                 return true;    /* protect recent checkpoints */
1011
1012         ret = false;
1013         root = nilfs_lookup_root(nilfs, cno);
1014         if (root) {
1015                 inode = nilfs_ilookup(sb, root, NILFS_ROOT_INO);
1016                 if (inode) {
1017                         dentry = d_find_alias(inode);
1018                         if (dentry) {
1019                                 ret = nilfs_tree_is_busy(dentry);
1020                                 dput(dentry);
1021                         }
1022                         iput(inode);
1023                 }
1024                 nilfs_put_root(root);
1025         }
1026         return ret;
1027 }
1028
1029 /**
1030  * nilfs_fill_super() - initialize a super block instance
1031  * @sb: super_block
1032  * @fc: filesystem context
1033  *
1034  * This function is called exclusively by nilfs->ns_mount_mutex.
1035  * So, the recovery process is protected from other simultaneous mounts.
1036  */
1037 static int
1038 nilfs_fill_super(struct super_block *sb, struct fs_context *fc)
1039 {
1040         struct the_nilfs *nilfs;
1041         struct nilfs_root *fsroot;
1042         struct nilfs_fs_context *ctx = fc->fs_private;
1043         __u64 cno;
1044         int err;
1045
1046         nilfs = alloc_nilfs(sb);
1047         if (!nilfs)
1048                 return -ENOMEM;
1049
1050         sb->s_fs_info = nilfs;
1051
1052         err = init_nilfs(nilfs, sb);
1053         if (err)
1054                 goto failed_nilfs;
1055
1056         /* Copy in parsed mount options */
1057         nilfs->ns_mount_opt = ctx->ns_mount_opt;
1058
1059         sb->s_op = &nilfs_sops;
1060         sb->s_export_op = &nilfs_export_ops;
1061         sb->s_root = NULL;
1062         sb->s_time_gran = 1;
1063         sb->s_max_links = NILFS_LINK_MAX;
1064
1065         sb->s_bdi = bdi_get(sb->s_bdev->bd_disk->bdi);
1066
1067         err = load_nilfs(nilfs, sb);
1068         if (err)
1069                 goto failed_nilfs;
1070
1071         super_set_uuid(sb, nilfs->ns_sbp[0]->s_uuid,
1072                        sizeof(nilfs->ns_sbp[0]->s_uuid));
1073         super_set_sysfs_name_bdev(sb);
1074
1075         cno = nilfs_last_cno(nilfs);
1076         err = nilfs_attach_checkpoint(sb, cno, true, &fsroot);
1077         if (err) {
1078                 nilfs_err(sb,
1079                           "error %d while loading last checkpoint (checkpoint number=%llu)",
1080                           err, (unsigned long long)cno);
1081                 goto failed_unload;
1082         }
1083
1084         if (!sb_rdonly(sb)) {
1085                 err = nilfs_attach_log_writer(sb, fsroot);
1086                 if (err)
1087                         goto failed_checkpoint;
1088         }
1089
1090         err = nilfs_get_root_dentry(sb, fsroot, &sb->s_root);
1091         if (err)
1092                 goto failed_segctor;
1093
1094         nilfs_put_root(fsroot);
1095
1096         if (!sb_rdonly(sb)) {
1097                 down_write(&nilfs->ns_sem);
1098                 nilfs_setup_super(sb, true);
1099                 up_write(&nilfs->ns_sem);
1100         }
1101
1102         return 0;
1103
1104  failed_segctor:
1105         nilfs_detach_log_writer(sb);
1106
1107  failed_checkpoint:
1108         nilfs_put_root(fsroot);
1109
1110  failed_unload:
1111         nilfs_sysfs_delete_device_group(nilfs);
1112         iput(nilfs->ns_sufile);
1113         iput(nilfs->ns_cpfile);
1114         iput(nilfs->ns_dat);
1115
1116  failed_nilfs:
1117         destroy_nilfs(nilfs);
1118         return err;
1119 }
1120
1121 static int nilfs_reconfigure(struct fs_context *fc)
1122 {
1123         struct nilfs_fs_context *ctx = fc->fs_private;
1124         struct super_block *sb = fc->root->d_sb;
1125         struct the_nilfs *nilfs = sb->s_fs_info;
1126         int err;
1127
1128         sync_filesystem(sb);
1129
1130         err = -EINVAL;
1131
1132         if (!nilfs_valid_fs(nilfs)) {
1133                 nilfs_warn(sb,
1134                            "couldn't remount because the filesystem is in an incomplete recovery state");
1135                 goto ignore_opts;
1136         }
1137         if ((bool)(fc->sb_flags & SB_RDONLY) == sb_rdonly(sb))
1138                 goto out;
1139         if (fc->sb_flags & SB_RDONLY) {
1140                 sb->s_flags |= SB_RDONLY;
1141
1142                 /*
1143                  * Remounting a valid RW partition RDONLY, so set
1144                  * the RDONLY flag and then mark the partition as valid again.
1145                  */
1146                 down_write(&nilfs->ns_sem);
1147                 nilfs_cleanup_super(sb);
1148                 up_write(&nilfs->ns_sem);
1149         } else {
1150                 __u64 features;
1151                 struct nilfs_root *root;
1152
1153                 /*
1154                  * Mounting a RDONLY partition read-write, so reread and
1155                  * store the current valid flag.  (It may have been changed
1156                  * by fsck since we originally mounted the partition.)
1157                  */
1158                 down_read(&nilfs->ns_sem);
1159                 features = le64_to_cpu(nilfs->ns_sbp[0]->s_feature_compat_ro) &
1160                         ~NILFS_FEATURE_COMPAT_RO_SUPP;
1161                 up_read(&nilfs->ns_sem);
1162                 if (features) {
1163                         nilfs_warn(sb,
1164                                    "couldn't remount RDWR because of unsupported optional features (%llx)",
1165                                    (unsigned long long)features);
1166                         err = -EROFS;
1167                         goto ignore_opts;
1168                 }
1169
1170                 sb->s_flags &= ~SB_RDONLY;
1171
1172                 root = NILFS_I(d_inode(sb->s_root))->i_root;
1173                 err = nilfs_attach_log_writer(sb, root);
1174                 if (err) {
1175                         sb->s_flags |= SB_RDONLY;
1176                         goto ignore_opts;
1177                 }
1178
1179                 down_write(&nilfs->ns_sem);
1180                 nilfs_setup_super(sb, true);
1181                 up_write(&nilfs->ns_sem);
1182         }
1183  out:
1184         sb->s_flags = (sb->s_flags & ~SB_POSIXACL);
1185         /* Copy over parsed remount options */
1186         nilfs->ns_mount_opt = ctx->ns_mount_opt;
1187
1188         return 0;
1189
1190  ignore_opts:
1191         return err;
1192 }
1193
1194 static int
1195 nilfs_get_tree(struct fs_context *fc)
1196 {
1197         struct nilfs_fs_context *ctx = fc->fs_private;
1198         struct super_block *s;
1199         dev_t dev;
1200         int err;
1201
1202         if (ctx->cno && !(fc->sb_flags & SB_RDONLY)) {
1203                 nilfs_err(NULL,
1204                           "invalid option \"cp=%llu\": read-only option is not specified",
1205                           ctx->cno);
1206                 return -EINVAL;
1207         }
1208
1209         err = lookup_bdev(fc->source, &dev);
1210         if (err)
1211                 return err;
1212
1213         s = sget_dev(fc, dev);
1214         if (IS_ERR(s))
1215                 return PTR_ERR(s);
1216
1217         if (!s->s_root) {
1218                 err = setup_bdev_super(s, fc->sb_flags, fc);
1219                 if (!err)
1220                         err = nilfs_fill_super(s, fc);
1221                 if (err)
1222                         goto failed_super;
1223
1224                 s->s_flags |= SB_ACTIVE;
1225         } else if (!ctx->cno) {
1226                 if (nilfs_tree_is_busy(s->s_root)) {
1227                         if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) {
1228                                 nilfs_err(s,
1229                                           "the device already has a %s mount.",
1230                                           sb_rdonly(s) ? "read-only" : "read/write");
1231                                 err = -EBUSY;
1232                                 goto failed_super;
1233                         }
1234                 } else {
1235                         /*
1236                          * Try reconfigure to setup mount states if the current
1237                          * tree is not mounted and only snapshots use this sb.
1238                          *
1239                          * Since nilfs_reconfigure() requires fc->root to be
1240                          * set, set it first and release it on failure.
1241                          */
1242                         fc->root = dget(s->s_root);
1243                         err = nilfs_reconfigure(fc);
1244                         if (err) {
1245                                 dput(fc->root);
1246                                 fc->root = NULL;  /* prevent double release */
1247                                 goto failed_super;
1248                         }
1249                         return 0;
1250                 }
1251         }
1252
1253         if (ctx->cno) {
1254                 struct dentry *root_dentry;
1255
1256                 err = nilfs_attach_snapshot(s, ctx->cno, &root_dentry);
1257                 if (err)
1258                         goto failed_super;
1259                 fc->root = root_dentry;
1260                 return 0;
1261         }
1262
1263         fc->root = dget(s->s_root);
1264         return 0;
1265
1266  failed_super:
1267         deactivate_locked_super(s);
1268         return err;
1269 }
1270
1271 static void nilfs_free_fc(struct fs_context *fc)
1272 {
1273         kfree(fc->fs_private);
1274 }
1275
1276 static const struct fs_context_operations nilfs_context_ops = {
1277         .parse_param    = nilfs_parse_param,
1278         .get_tree       = nilfs_get_tree,
1279         .reconfigure    = nilfs_reconfigure,
1280         .free           = nilfs_free_fc,
1281 };
1282
1283 static int nilfs_init_fs_context(struct fs_context *fc)
1284 {
1285         struct nilfs_fs_context *ctx;
1286
1287         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1288         if (!ctx)
1289                 return -ENOMEM;
1290
1291         ctx->ns_mount_opt = NILFS_MOUNT_ERRORS_RO | NILFS_MOUNT_BARRIER;
1292         fc->fs_private = ctx;
1293         fc->ops = &nilfs_context_ops;
1294
1295         return 0;
1296 }
1297
1298 struct file_system_type nilfs_fs_type = {
1299         .owner    = THIS_MODULE,
1300         .name     = "nilfs2",
1301         .kill_sb  = kill_block_super,
1302         .fs_flags = FS_REQUIRES_DEV,
1303         .init_fs_context = nilfs_init_fs_context,
1304         .parameters = nilfs_param_spec,
1305 };
1306 MODULE_ALIAS_FS("nilfs2");
1307
1308 static void nilfs_inode_init_once(void *obj)
1309 {
1310         struct nilfs_inode_info *ii = obj;
1311
1312         INIT_LIST_HEAD(&ii->i_dirty);
1313 #ifdef CONFIG_NILFS_XATTR
1314         init_rwsem(&ii->xattr_sem);
1315 #endif
1316         inode_init_once(&ii->vfs_inode);
1317 }
1318
1319 static void nilfs_segbuf_init_once(void *obj)
1320 {
1321         memset(obj, 0, sizeof(struct nilfs_segment_buffer));
1322 }
1323
1324 static void nilfs_destroy_cachep(void)
1325 {
1326         /*
1327          * Make sure all delayed rcu free inodes are flushed before we
1328          * destroy cache.
1329          */
1330         rcu_barrier();
1331
1332         kmem_cache_destroy(nilfs_inode_cachep);
1333         kmem_cache_destroy(nilfs_transaction_cachep);
1334         kmem_cache_destroy(nilfs_segbuf_cachep);
1335         kmem_cache_destroy(nilfs_btree_path_cache);
1336 }
1337
1338 static int __init nilfs_init_cachep(void)
1339 {
1340         nilfs_inode_cachep = kmem_cache_create("nilfs2_inode_cache",
1341                         sizeof(struct nilfs_inode_info), 0,
1342                         SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT,
1343                         nilfs_inode_init_once);
1344         if (!nilfs_inode_cachep)
1345                 goto fail;
1346
1347         nilfs_transaction_cachep = kmem_cache_create("nilfs2_transaction_cache",
1348                         sizeof(struct nilfs_transaction_info), 0,
1349                         SLAB_RECLAIM_ACCOUNT, NULL);
1350         if (!nilfs_transaction_cachep)
1351                 goto fail;
1352
1353         nilfs_segbuf_cachep = kmem_cache_create("nilfs2_segbuf_cache",
1354                         sizeof(struct nilfs_segment_buffer), 0,
1355                         SLAB_RECLAIM_ACCOUNT, nilfs_segbuf_init_once);
1356         if (!nilfs_segbuf_cachep)
1357                 goto fail;
1358
1359         nilfs_btree_path_cache = kmem_cache_create("nilfs2_btree_path_cache",
1360                         sizeof(struct nilfs_btree_path) * NILFS_BTREE_LEVEL_MAX,
1361                         0, 0, NULL);
1362         if (!nilfs_btree_path_cache)
1363                 goto fail;
1364
1365         return 0;
1366
1367 fail:
1368         nilfs_destroy_cachep();
1369         return -ENOMEM;
1370 }
1371
1372 static int __init init_nilfs_fs(void)
1373 {
1374         int err;
1375
1376         err = nilfs_init_cachep();
1377         if (err)
1378                 goto fail;
1379
1380         err = nilfs_sysfs_init();
1381         if (err)
1382                 goto free_cachep;
1383
1384         err = register_filesystem(&nilfs_fs_type);
1385         if (err)
1386                 goto deinit_sysfs_entry;
1387
1388         printk(KERN_INFO "NILFS version 2 loaded\n");
1389         return 0;
1390
1391 deinit_sysfs_entry:
1392         nilfs_sysfs_exit();
1393 free_cachep:
1394         nilfs_destroy_cachep();
1395 fail:
1396         return err;
1397 }
1398
1399 static void __exit exit_nilfs_fs(void)
1400 {
1401         nilfs_destroy_cachep();
1402         nilfs_sysfs_exit();
1403         unregister_filesystem(&nilfs_fs_type);
1404 }
1405
1406 module_init(init_nilfs_fs)
1407 module_exit(exit_nilfs_fs)
This page took 0.103117 seconds and 4 git commands to generate.