1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 1991, 1992 Linus Torvalds
7 * super.c contains code to handle: - mount structures
9 * - filesystem drivers list
11 * - umount system call
14 * GK 2/5/95 - Changed to support mounting the root fs via NFS
16 * Added kerneld support: Jacques Gelinas and Bjorn Ekwall
17 * Added change_root: Werner Almesberger & Hans Lermen, Feb '96
18 * Added options to /proc/mounts:
21 * Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000
24 #include <linux/export.h>
25 #include <linux/slab.h>
26 #include <linux/blkdev.h>
27 #include <linux/mount.h>
28 #include <linux/security.h>
29 #include <linux/writeback.h> /* for the emergency remount stuff */
30 #include <linux/idr.h>
31 #include <linux/mutex.h>
32 #include <linux/backing-dev.h>
33 #include <linux/rculist_bl.h>
34 #include <linux/fscrypt.h>
35 #include <linux/fsnotify.h>
36 #include <linux/lockdep.h>
37 #include <linux/user_namespace.h>
38 #include <linux/fs_context.h>
39 #include <uapi/linux/mount.h>
42 static int thaw_super_locked(struct super_block *sb, enum freeze_holder who);
44 static LIST_HEAD(super_blocks);
45 static DEFINE_SPINLOCK(sb_lock);
47 static char *sb_writers_name[SB_FREEZE_LEVELS] = {
53 static inline void __super_lock(struct super_block *sb, bool excl)
56 down_write(&sb->s_umount);
58 down_read(&sb->s_umount);
61 static inline void super_unlock(struct super_block *sb, bool excl)
64 up_write(&sb->s_umount);
66 up_read(&sb->s_umount);
69 static inline void __super_lock_excl(struct super_block *sb)
71 __super_lock(sb, true);
74 static inline void super_unlock_excl(struct super_block *sb)
76 super_unlock(sb, true);
79 static inline void super_unlock_shared(struct super_block *sb)
81 super_unlock(sb, false);
84 static bool super_flags(const struct super_block *sb, unsigned int flags)
87 * Pairs with smp_store_release() in super_wake() and ensures
88 * that we see @flags after we're woken.
90 return smp_load_acquire(&sb->s_flags) & flags;
94 * super_lock - wait for superblock to become ready and lock it
95 * @sb: superblock to wait for
96 * @excl: whether exclusive access is required
98 * If the superblock has neither passed through vfs_get_tree() or
99 * generic_shutdown_super() yet wait for it to happen. Either superblock
100 * creation will succeed and SB_BORN is set by vfs_get_tree() or we're
101 * woken and we'll see SB_DYING.
103 * The caller must have acquired a temporary reference on @sb->s_count.
105 * Return: The function returns true if SB_BORN was set and with
106 * s_umount held. The function returns false if SB_DYING was
107 * set and without s_umount held.
109 static __must_check bool super_lock(struct super_block *sb, bool excl)
111 lockdep_assert_not_held(&sb->s_umount);
113 /* wait until the superblock is ready or dying */
114 wait_var_event(&sb->s_flags, super_flags(sb, SB_BORN | SB_DYING));
116 /* Don't pointlessly acquire s_umount. */
117 if (super_flags(sb, SB_DYING))
120 __super_lock(sb, excl);
123 * Has gone through generic_shutdown_super() in the meantime.
124 * @sb->s_root is NULL and @sb->s_active is 0. No one needs to
125 * grab a reference to this. Tell them so.
127 if (sb->s_flags & SB_DYING) {
128 super_unlock(sb, excl);
132 WARN_ON_ONCE(!(sb->s_flags & SB_BORN));
136 /* wait and try to acquire read-side of @sb->s_umount */
137 static inline bool super_lock_shared(struct super_block *sb)
139 return super_lock(sb, false);
142 /* wait and try to acquire write-side of @sb->s_umount */
143 static inline bool super_lock_excl(struct super_block *sb)
145 return super_lock(sb, true);
149 #define SUPER_WAKE_FLAGS (SB_BORN | SB_DYING | SB_DEAD)
150 static void super_wake(struct super_block *sb, unsigned int flag)
152 WARN_ON_ONCE((flag & ~SUPER_WAKE_FLAGS));
153 WARN_ON_ONCE(hweight32(flag & SUPER_WAKE_FLAGS) > 1);
156 * Pairs with smp_load_acquire() in super_lock() to make sure
157 * all initializations in the superblock are seen by the user
158 * seeing SB_BORN sent.
160 smp_store_release(&sb->s_flags, sb->s_flags | flag);
162 * Pairs with the barrier in prepare_to_wait_event() to make sure
163 * ___wait_var_event() either sees SB_BORN set or
164 * waitqueue_active() check in wake_up_var() sees the waiter.
167 wake_up_var(&sb->s_flags);
171 * One thing we have to be careful of with a per-sb shrinker is that we don't
172 * drop the last active reference to the superblock from within the shrinker.
173 * If that happens we could trigger unregistering the shrinker from within the
174 * shrinker path and that leads to deadlock on the shrinker_mutex. Hence we
175 * take a passive reference to the superblock to avoid this from occurring.
177 static unsigned long super_cache_scan(struct shrinker *shrink,
178 struct shrink_control *sc)
180 struct super_block *sb;
187 sb = shrink->private_data;
190 * Deadlock avoidance. We may hold various FS locks, and we don't want
191 * to recurse into the FS that called us in clear_inode() and friends..
193 if (!(sc->gfp_mask & __GFP_FS))
196 if (!super_trylock_shared(sb))
199 if (sb->s_op->nr_cached_objects)
200 fs_objects = sb->s_op->nr_cached_objects(sb, sc);
202 inodes = list_lru_shrink_count(&sb->s_inode_lru, sc);
203 dentries = list_lru_shrink_count(&sb->s_dentry_lru, sc);
204 total_objects = dentries + inodes + fs_objects + 1;
208 /* proportion the scan between the caches */
209 dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
210 inodes = mult_frac(sc->nr_to_scan, inodes, total_objects);
211 fs_objects = mult_frac(sc->nr_to_scan, fs_objects, total_objects);
214 * prune the dcache first as the icache is pinned by it, then
215 * prune the icache, followed by the filesystem specific caches
217 * Ensure that we always scan at least one object - memcg kmem
218 * accounting uses this to fully empty the caches.
220 sc->nr_to_scan = dentries + 1;
221 freed = prune_dcache_sb(sb, sc);
222 sc->nr_to_scan = inodes + 1;
223 freed += prune_icache_sb(sb, sc);
226 sc->nr_to_scan = fs_objects + 1;
227 freed += sb->s_op->free_cached_objects(sb, sc);
230 super_unlock_shared(sb);
234 static unsigned long super_cache_count(struct shrinker *shrink,
235 struct shrink_control *sc)
237 struct super_block *sb;
238 long total_objects = 0;
240 sb = shrink->private_data;
243 * We don't call super_trylock_shared() here as it is a scalability
244 * bottleneck, so we're exposed to partial setup state. The shrinker
245 * rwsem does not protect filesystem operations backing
246 * list_lru_shrink_count() or s_op->nr_cached_objects(). Counts can
247 * change between super_cache_count and super_cache_scan, so we really
248 * don't need locks here.
250 * However, if we are currently mounting the superblock, the underlying
251 * filesystem might be in a state of partial construction and hence it
252 * is dangerous to access it. super_trylock_shared() uses a SB_BORN check
253 * to avoid this situation, so do the same here. The memory barrier is
254 * matched with the one in mount_fs() as we don't hold locks here.
256 if (!(sb->s_flags & SB_BORN))
260 if (sb->s_op && sb->s_op->nr_cached_objects)
261 total_objects = sb->s_op->nr_cached_objects(sb, sc);
263 total_objects += list_lru_shrink_count(&sb->s_dentry_lru, sc);
264 total_objects += list_lru_shrink_count(&sb->s_inode_lru, sc);
269 total_objects = vfs_pressure_ratio(total_objects);
270 return total_objects;
273 static void destroy_super_work(struct work_struct *work)
275 struct super_block *s = container_of(work, struct super_block,
279 for (i = 0; i < SB_FREEZE_LEVELS; i++)
280 percpu_free_rwsem(&s->s_writers.rw_sem[i]);
284 static void destroy_super_rcu(struct rcu_head *head)
286 struct super_block *s = container_of(head, struct super_block, rcu);
287 INIT_WORK(&s->destroy_work, destroy_super_work);
288 schedule_work(&s->destroy_work);
291 /* Free a superblock that has never been seen by anyone */
292 static void destroy_unused_super(struct super_block *s)
296 super_unlock_excl(s);
297 list_lru_destroy(&s->s_dentry_lru);
298 list_lru_destroy(&s->s_inode_lru);
300 put_user_ns(s->s_user_ns);
302 shrinker_free(s->s_shrink);
303 /* no delays needed */
304 destroy_super_work(&s->destroy_work);
308 * alloc_super - create new superblock
309 * @type: filesystem type superblock should belong to
310 * @flags: the mount flags
311 * @user_ns: User namespace for the super_block
313 * Allocates and initializes a new &struct super_block. alloc_super()
314 * returns a pointer new superblock or %NULL if allocation had failed.
316 static struct super_block *alloc_super(struct file_system_type *type, int flags,
317 struct user_namespace *user_ns)
319 struct super_block *s = kzalloc(sizeof(struct super_block), GFP_KERNEL);
320 static const struct super_operations default_op;
326 INIT_LIST_HEAD(&s->s_mounts);
327 s->s_user_ns = get_user_ns(user_ns);
328 init_rwsem(&s->s_umount);
329 lockdep_set_class(&s->s_umount, &type->s_umount_key);
331 * sget() can have s_umount recursion.
333 * When it cannot find a suitable sb, it allocates a new
334 * one (this one), and tries again to find a suitable old
337 * In case that succeeds, it will acquire the s_umount
338 * lock of the old one. Since these are clearly distrinct
339 * locks, and this object isn't exposed yet, there's no
342 * Annotate this by putting this lock in a different
345 down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
347 if (security_sb_alloc(s))
350 for (i = 0; i < SB_FREEZE_LEVELS; i++) {
351 if (__percpu_init_rwsem(&s->s_writers.rw_sem[i],
353 &type->s_writers_key[i]))
356 s->s_bdi = &noop_backing_dev_info;
358 if (s->s_user_ns != &init_user_ns)
359 s->s_iflags |= SB_I_NODEV;
360 INIT_HLIST_NODE(&s->s_instances);
361 INIT_HLIST_BL_HEAD(&s->s_roots);
362 mutex_init(&s->s_sync_lock);
363 INIT_LIST_HEAD(&s->s_inodes);
364 spin_lock_init(&s->s_inode_list_lock);
365 INIT_LIST_HEAD(&s->s_inodes_wb);
366 spin_lock_init(&s->s_inode_wblist_lock);
369 atomic_set(&s->s_active, 1);
370 mutex_init(&s->s_vfs_rename_mutex);
371 lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
372 init_rwsem(&s->s_dquot.dqio_sem);
373 s->s_maxbytes = MAX_NON_LFS;
374 s->s_op = &default_op;
375 s->s_time_gran = 1000000000;
376 s->s_time_min = TIME64_MIN;
377 s->s_time_max = TIME64_MAX;
379 s->s_shrink = shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE,
380 "sb-%s", type->name);
384 s->s_shrink->scan_objects = super_cache_scan;
385 s->s_shrink->count_objects = super_cache_count;
386 s->s_shrink->batch = 1024;
387 s->s_shrink->private_data = s;
389 if (list_lru_init_memcg(&s->s_dentry_lru, s->s_shrink))
391 if (list_lru_init_memcg(&s->s_inode_lru, s->s_shrink))
396 destroy_unused_super(s);
400 /* Superblock refcounting */
403 * Drop a superblock's refcount. The caller must hold sb_lock.
405 static void __put_super(struct super_block *s)
408 list_del_init(&s->s_list);
409 WARN_ON(s->s_dentry_lru.node);
410 WARN_ON(s->s_inode_lru.node);
411 WARN_ON(!list_empty(&s->s_mounts));
413 put_user_ns(s->s_user_ns);
415 call_rcu(&s->rcu, destroy_super_rcu);
420 * put_super - drop a temporary reference to superblock
421 * @sb: superblock in question
423 * Drops a temporary reference, frees superblock if there's no
426 void put_super(struct super_block *sb)
430 spin_unlock(&sb_lock);
433 static void kill_super_notify(struct super_block *sb)
435 lockdep_assert_not_held(&sb->s_umount);
437 /* already notified earlier */
438 if (sb->s_flags & SB_DEAD)
442 * Remove it from @fs_supers so it isn't found by new
443 * sget{_fc}() walkers anymore. Any concurrent mounter still
444 * managing to grab a temporary reference is guaranteed to
445 * already see SB_DYING and will wait until we notify them about
449 hlist_del_init(&sb->s_instances);
450 spin_unlock(&sb_lock);
453 * Let concurrent mounts know that this thing is really dead.
454 * We don't need @sb->s_umount here as every concurrent caller
455 * will see SB_DYING and either discard the superblock or wait
458 super_wake(sb, SB_DEAD);
462 * deactivate_locked_super - drop an active reference to superblock
463 * @s: superblock to deactivate
465 * Drops an active reference to superblock, converting it into a temporary
466 * one if there is no other active references left. In that case we
467 * tell fs driver to shut it down and drop the temporary reference we
470 * Caller holds exclusive lock on superblock; that lock is released.
472 void deactivate_locked_super(struct super_block *s)
474 struct file_system_type *fs = s->s_type;
475 if (atomic_dec_and_test(&s->s_active)) {
476 shrinker_free(s->s_shrink);
479 kill_super_notify(s);
482 * Since list_lru_destroy() may sleep, we cannot call it from
483 * put_super(), where we hold the sb_lock. Therefore we destroy
484 * the lru lists right now.
486 list_lru_destroy(&s->s_dentry_lru);
487 list_lru_destroy(&s->s_inode_lru);
492 super_unlock_excl(s);
496 EXPORT_SYMBOL(deactivate_locked_super);
499 * deactivate_super - drop an active reference to superblock
500 * @s: superblock to deactivate
502 * Variant of deactivate_locked_super(), except that superblock is *not*
503 * locked by caller. If we are going to drop the final active reference,
504 * lock will be acquired prior to that.
506 void deactivate_super(struct super_block *s)
508 if (!atomic_add_unless(&s->s_active, -1, 1)) {
509 __super_lock_excl(s);
510 deactivate_locked_super(s);
514 EXPORT_SYMBOL(deactivate_super);
517 * grab_super - acquire an active reference to a superblock
518 * @sb: superblock to acquire
520 * Acquire a temporary reference on a superblock and try to trade it for
521 * an active reference. This is used in sget{_fc}() to wait for a
522 * superblock to either become SB_BORN or for it to pass through
523 * sb->kill() and be marked as SB_DEAD.
525 * Return: This returns true if an active reference could be acquired,
528 static bool grab_super(struct super_block *sb)
533 spin_unlock(&sb_lock);
534 locked = super_lock_excl(sb);
536 if (atomic_inc_not_zero(&sb->s_active)) {
540 super_unlock_excl(sb);
542 wait_var_event(&sb->s_flags, super_flags(sb, SB_DEAD));
548 * super_trylock_shared - try to grab ->s_umount shared
549 * @sb: reference we are trying to grab
551 * Try to prevent fs shutdown. This is used in places where we
552 * cannot take an active reference but we need to ensure that the
553 * filesystem is not shut down while we are working on it. It returns
554 * false if we cannot acquire s_umount or if we lose the race and
555 * filesystem already got into shutdown, and returns true with the s_umount
556 * lock held in read mode in case of success. On successful return,
557 * the caller must drop the s_umount lock when done.
559 * Note that unlike get_super() et.al. this one does *not* bump ->s_count.
560 * The reason why it's safe is that we are OK with doing trylock instead
561 * of down_read(). There's a couple of places that are OK with that, but
562 * it's very much not a general-purpose interface.
564 bool super_trylock_shared(struct super_block *sb)
566 if (down_read_trylock(&sb->s_umount)) {
567 if (!(sb->s_flags & SB_DYING) && sb->s_root &&
568 (sb->s_flags & SB_BORN))
570 super_unlock_shared(sb);
577 * retire_super - prevents superblock from being reused
578 * @sb: superblock to retire
580 * The function marks superblock to be ignored in superblock test, which
581 * prevents it from being reused for any new mounts. If the superblock has
582 * a private bdi, it also unregisters it, but doesn't reduce the refcount
583 * of the superblock to prevent potential races. The refcount is reduced
584 * by generic_shutdown_super(). The function can not be called
585 * concurrently with generic_shutdown_super(). It is safe to call the
586 * function multiple times, subsequent calls have no effect.
588 * The marker will affect the re-use only for block-device-based
589 * superblocks. Other superblocks will still get marked if this function
590 * is used, but that will not affect their reusability.
592 void retire_super(struct super_block *sb)
594 WARN_ON(!sb->s_bdev);
595 __super_lock_excl(sb);
596 if (sb->s_iflags & SB_I_PERSB_BDI) {
597 bdi_unregister(sb->s_bdi);
598 sb->s_iflags &= ~SB_I_PERSB_BDI;
600 sb->s_iflags |= SB_I_RETIRED;
601 super_unlock_excl(sb);
603 EXPORT_SYMBOL(retire_super);
606 * generic_shutdown_super - common helper for ->kill_sb()
607 * @sb: superblock to kill
609 * generic_shutdown_super() does all fs-independent work on superblock
610 * shutdown. Typical ->kill_sb() should pick all fs-specific objects
611 * that need destruction out of superblock, call generic_shutdown_super()
612 * and release aforementioned objects. Note: dentries and inodes _are_
613 * taken care of and do not need specific handling.
615 * Upon calling this function, the filesystem may no longer alter or
616 * rearrange the set of dentries belonging to this super_block, nor may it
617 * change the attachments of dentries to inodes.
619 void generic_shutdown_super(struct super_block *sb)
621 const struct super_operations *sop = sb->s_op;
624 shrink_dcache_for_umount(sb);
626 sb->s_flags &= ~SB_ACTIVE;
628 cgroup_writeback_umount();
630 /* Evict all inodes with zero refcount. */
634 * Clean up and evict any inodes that still have references due
635 * to fsnotify or the security policy.
637 fsnotify_sb_delete(sb);
638 security_sb_delete(sb);
640 if (sb->s_dio_done_wq) {
641 destroy_workqueue(sb->s_dio_done_wq);
642 sb->s_dio_done_wq = NULL;
649 * Now that all potentially-encrypted inodes have been evicted,
650 * the fscrypt keyring can be destroyed.
652 fscrypt_destroy_keyring(sb);
654 if (CHECK_DATA_CORRUPTION(!list_empty(&sb->s_inodes),
655 "VFS: Busy inodes after unmount of %s (%s)",
656 sb->s_id, sb->s_type->name)) {
658 * Adding a proper bailout path here would be hard, but
659 * we can at least make it more likely that a later
660 * iput_final() or such crashes cleanly.
664 spin_lock(&sb->s_inode_list_lock);
665 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
666 inode->i_op = VFS_PTR_POISON;
667 inode->i_sb = VFS_PTR_POISON;
668 inode->i_mapping = VFS_PTR_POISON;
670 spin_unlock(&sb->s_inode_list_lock);
674 * Broadcast to everyone that grabbed a temporary reference to this
675 * superblock before we removed it from @fs_supers that the superblock
676 * is dying. Every walker of @fs_supers outside of sget{_fc}() will now
677 * discard this superblock and treat it as dead.
679 * We leave the superblock on @fs_supers so it can be found by
680 * sget{_fc}() until we passed sb->kill_sb().
682 super_wake(sb, SB_DYING);
683 super_unlock_excl(sb);
684 if (sb->s_bdi != &noop_backing_dev_info) {
685 if (sb->s_iflags & SB_I_PERSB_BDI)
686 bdi_unregister(sb->s_bdi);
688 sb->s_bdi = &noop_backing_dev_info;
692 EXPORT_SYMBOL(generic_shutdown_super);
694 bool mount_capable(struct fs_context *fc)
696 if (!(fc->fs_type->fs_flags & FS_USERNS_MOUNT))
697 return capable(CAP_SYS_ADMIN);
699 return ns_capable(fc->user_ns, CAP_SYS_ADMIN);
703 * sget_fc - Find or create a superblock
704 * @fc: Filesystem context.
705 * @test: Comparison callback
706 * @set: Setup callback
708 * Create a new superblock or find an existing one.
710 * The @test callback is used to find a matching existing superblock.
711 * Whether or not the requested parameters in @fc are taken into account
712 * is specific to the @test callback that is used. They may even be
713 * completely ignored.
715 * If an extant superblock is matched, it will be returned unless:
717 * (1) the namespace the filesystem context @fc and the extant
718 * superblock's namespace differ
720 * (2) the filesystem context @fc has requested that reusing an extant
721 * superblock is not allowed
723 * In both cases EBUSY will be returned.
725 * If no match is made, a new superblock will be allocated and basic
726 * initialisation will be performed (s_type, s_fs_info and s_id will be
727 * set and the @set callback will be invoked), the superblock will be
728 * published and it will be returned in a partially constructed state
729 * with SB_BORN and SB_ACTIVE as yet unset.
731 * Return: On success, an extant or newly created superblock is
732 * returned. On failure an error pointer is returned.
734 struct super_block *sget_fc(struct fs_context *fc,
735 int (*test)(struct super_block *, struct fs_context *),
736 int (*set)(struct super_block *, struct fs_context *))
738 struct super_block *s = NULL;
739 struct super_block *old;
740 struct user_namespace *user_ns = fc->global ? &init_user_ns : fc->user_ns;
746 hlist_for_each_entry(old, &fc->fs_type->fs_supers, s_instances) {
748 goto share_extant_sb;
752 spin_unlock(&sb_lock);
753 s = alloc_super(fc->fs_type, fc->sb_flags, user_ns);
755 return ERR_PTR(-ENOMEM);
759 s->s_fs_info = fc->s_fs_info;
763 spin_unlock(&sb_lock);
764 destroy_unused_super(s);
767 fc->s_fs_info = NULL;
768 s->s_type = fc->fs_type;
769 s->s_iflags |= fc->s_iflags;
770 strscpy(s->s_id, s->s_type->name, sizeof(s->s_id));
772 * Make the superblock visible on @super_blocks and @fs_supers.
773 * It's in a nascent state and users should wait on SB_BORN or
774 * SB_DYING to be set.
776 list_add_tail(&s->s_list, &super_blocks);
777 hlist_add_head(&s->s_instances, &s->s_type->fs_supers);
778 spin_unlock(&sb_lock);
779 get_filesystem(s->s_type);
780 shrinker_register(s->s_shrink);
784 if (user_ns != old->s_user_ns || fc->exclusive) {
785 spin_unlock(&sb_lock);
786 destroy_unused_super(s);
788 warnfc(fc, "reusing existing filesystem not allowed");
790 warnfc(fc, "reusing existing filesystem in another namespace not allowed");
791 return ERR_PTR(-EBUSY);
793 if (!grab_super(old))
795 destroy_unused_super(s);
798 EXPORT_SYMBOL(sget_fc);
801 * sget - find or create a superblock
802 * @type: filesystem type superblock should belong to
803 * @test: comparison callback
804 * @set: setup callback
805 * @flags: mount flags
806 * @data: argument to each of them
808 struct super_block *sget(struct file_system_type *type,
809 int (*test)(struct super_block *,void *),
810 int (*set)(struct super_block *,void *),
814 struct user_namespace *user_ns = current_user_ns();
815 struct super_block *s = NULL;
816 struct super_block *old;
819 /* We don't yet pass the user namespace of the parent
820 * mount through to here so always use &init_user_ns
821 * until that changes.
823 if (flags & SB_SUBMOUNT)
824 user_ns = &init_user_ns;
829 hlist_for_each_entry(old, &type->fs_supers, s_instances) {
830 if (!test(old, data))
832 if (user_ns != old->s_user_ns) {
833 spin_unlock(&sb_lock);
834 destroy_unused_super(s);
835 return ERR_PTR(-EBUSY);
837 if (!grab_super(old))
839 destroy_unused_super(s);
844 spin_unlock(&sb_lock);
845 s = alloc_super(type, (flags & ~SB_SUBMOUNT), user_ns);
847 return ERR_PTR(-ENOMEM);
853 spin_unlock(&sb_lock);
854 destroy_unused_super(s);
858 strscpy(s->s_id, type->name, sizeof(s->s_id));
859 list_add_tail(&s->s_list, &super_blocks);
860 hlist_add_head(&s->s_instances, &type->fs_supers);
861 spin_unlock(&sb_lock);
862 get_filesystem(type);
863 shrinker_register(s->s_shrink);
868 void drop_super(struct super_block *sb)
870 super_unlock_shared(sb);
874 EXPORT_SYMBOL(drop_super);
876 void drop_super_exclusive(struct super_block *sb)
878 super_unlock_excl(sb);
881 EXPORT_SYMBOL(drop_super_exclusive);
883 static void __iterate_supers(void (*f)(struct super_block *))
885 struct super_block *sb, *p = NULL;
888 list_for_each_entry(sb, &super_blocks, s_list) {
889 if (super_flags(sb, SB_DYING))
892 spin_unlock(&sb_lock);
903 spin_unlock(&sb_lock);
906 * iterate_supers - call function for all active superblocks
907 * @f: function to call
908 * @arg: argument to pass to it
910 * Scans the superblock list and calls given function, passing it
911 * locked superblock and given argument.
913 void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
915 struct super_block *sb, *p = NULL;
918 list_for_each_entry(sb, &super_blocks, s_list) {
922 spin_unlock(&sb_lock);
924 locked = super_lock_shared(sb);
928 super_unlock_shared(sb);
938 spin_unlock(&sb_lock);
942 * iterate_supers_type - call function for superblocks of given type
944 * @f: function to call
945 * @arg: argument to pass to it
947 * Scans the superblock list and calls given function, passing it
948 * locked superblock and given argument.
950 void iterate_supers_type(struct file_system_type *type,
951 void (*f)(struct super_block *, void *), void *arg)
953 struct super_block *sb, *p = NULL;
956 hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
960 spin_unlock(&sb_lock);
962 locked = super_lock_shared(sb);
966 super_unlock_shared(sb);
976 spin_unlock(&sb_lock);
979 EXPORT_SYMBOL(iterate_supers_type);
981 struct super_block *user_get_super(dev_t dev, bool excl)
983 struct super_block *sb;
986 list_for_each_entry(sb, &super_blocks, s_list) {
987 if (sb->s_dev == dev) {
991 spin_unlock(&sb_lock);
993 locked = super_lock(sb, excl);
997 super_unlock(sb, excl);
999 /* nope, got unmounted */
1000 spin_lock(&sb_lock);
1005 spin_unlock(&sb_lock);
1010 * reconfigure_super - asks filesystem to change superblock parameters
1011 * @fc: The superblock and configuration
1013 * Alters the configuration parameters of a live superblock.
1015 int reconfigure_super(struct fs_context *fc)
1017 struct super_block *sb = fc->root->d_sb;
1019 bool remount_ro = false;
1020 bool remount_rw = false;
1021 bool force = fc->sb_flags & SB_FORCE;
1023 if (fc->sb_flags_mask & ~MS_RMT_MASK)
1025 if (sb->s_writers.frozen != SB_UNFROZEN)
1028 retval = security_sb_remount(sb, fc->security);
1032 if (fc->sb_flags_mask & SB_RDONLY) {
1034 if (!(fc->sb_flags & SB_RDONLY) && sb->s_bdev &&
1035 bdev_read_only(sb->s_bdev))
1038 remount_rw = !(fc->sb_flags & SB_RDONLY) && sb_rdonly(sb);
1039 remount_ro = (fc->sb_flags & SB_RDONLY) && !sb_rdonly(sb);
1043 if (!hlist_empty(&sb->s_pins)) {
1044 super_unlock_excl(sb);
1045 group_pin_kill(&sb->s_pins);
1046 __super_lock_excl(sb);
1049 if (sb->s_writers.frozen != SB_UNFROZEN)
1051 remount_ro = !sb_rdonly(sb);
1054 shrink_dcache_sb(sb);
1056 /* If we are reconfiguring to RDONLY and current sb is read/write,
1057 * make sure there are no files open for writing.
1061 sb_start_ro_state_change(sb);
1063 retval = sb_prepare_remount_readonly(sb);
1067 } else if (remount_rw) {
1069 * Protect filesystem's reconfigure code from writes from
1070 * userspace until reconfigure finishes.
1072 sb_start_ro_state_change(sb);
1075 if (fc->ops->reconfigure) {
1076 retval = fc->ops->reconfigure(fc);
1079 goto cancel_readonly;
1080 /* If forced remount, go ahead despite any errors */
1081 WARN(1, "forced remount of a %s fs returned %i\n",
1082 sb->s_type->name, retval);
1086 WRITE_ONCE(sb->s_flags, ((sb->s_flags & ~fc->sb_flags_mask) |
1087 (fc->sb_flags & fc->sb_flags_mask)));
1088 sb_end_ro_state_change(sb);
1091 * Some filesystems modify their metadata via some other path than the
1092 * bdev buffer cache (eg. use a private mapping, or directories in
1093 * pagecache, etc). Also file data modifications go via their own
1094 * mappings. So If we try to mount readonly then copy the filesystem
1095 * from bdev, we could get stale data, so invalidate it to give a best
1096 * effort at coherency.
1098 if (remount_ro && sb->s_bdev)
1099 invalidate_bdev(sb->s_bdev);
1103 sb_end_ro_state_change(sb);
1107 static void do_emergency_remount_callback(struct super_block *sb)
1109 bool locked = super_lock_excl(sb);
1111 if (locked && sb->s_root && sb->s_bdev && !sb_rdonly(sb)) {
1112 struct fs_context *fc;
1114 fc = fs_context_for_reconfigure(sb->s_root,
1115 SB_RDONLY | SB_FORCE, SB_RDONLY);
1117 if (parse_monolithic_mount_data(fc, NULL) == 0)
1118 (void)reconfigure_super(fc);
1123 super_unlock_excl(sb);
1126 static void do_emergency_remount(struct work_struct *work)
1128 __iterate_supers(do_emergency_remount_callback);
1130 printk("Emergency Remount complete\n");
1133 void emergency_remount(void)
1135 struct work_struct *work;
1137 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1139 INIT_WORK(work, do_emergency_remount);
1140 schedule_work(work);
1144 static void do_thaw_all_callback(struct super_block *sb)
1146 bool locked = super_lock_excl(sb);
1148 if (locked && sb->s_root) {
1149 if (IS_ENABLED(CONFIG_BLOCK))
1150 while (sb->s_bdev && !bdev_thaw(sb->s_bdev))
1151 pr_warn("Emergency Thaw on %pg\n", sb->s_bdev);
1152 thaw_super_locked(sb, FREEZE_HOLDER_USERSPACE);
1156 super_unlock_excl(sb);
1159 static void do_thaw_all(struct work_struct *work)
1161 __iterate_supers(do_thaw_all_callback);
1163 printk(KERN_WARNING "Emergency Thaw complete\n");
1167 * emergency_thaw_all -- forcibly thaw every frozen filesystem
1169 * Used for emergency unfreeze of all filesystems via SysRq
1171 void emergency_thaw_all(void)
1173 struct work_struct *work;
1175 work = kmalloc(sizeof(*work), GFP_ATOMIC);
1177 INIT_WORK(work, do_thaw_all);
1178 schedule_work(work);
1182 static DEFINE_IDA(unnamed_dev_ida);
1185 * get_anon_bdev - Allocate a block device for filesystems which don't have one.
1186 * @p: Pointer to a dev_t.
1188 * Filesystems which don't use real block devices can call this function
1189 * to allocate a virtual block device.
1191 * Context: Any context. Frequently called while holding sb_lock.
1192 * Return: 0 on success, -EMFILE if there are no anonymous bdevs left
1193 * or -ENOMEM if memory allocation failed.
1195 int get_anon_bdev(dev_t *p)
1200 * Many userspace utilities consider an FSID of 0 invalid.
1201 * Always return at least 1 from get_anon_bdev.
1203 dev = ida_alloc_range(&unnamed_dev_ida, 1, (1 << MINORBITS) - 1,
1213 EXPORT_SYMBOL(get_anon_bdev);
1215 void free_anon_bdev(dev_t dev)
1217 ida_free(&unnamed_dev_ida, MINOR(dev));
1219 EXPORT_SYMBOL(free_anon_bdev);
1221 int set_anon_super(struct super_block *s, void *data)
1223 return get_anon_bdev(&s->s_dev);
1225 EXPORT_SYMBOL(set_anon_super);
1227 void kill_anon_super(struct super_block *sb)
1229 dev_t dev = sb->s_dev;
1230 generic_shutdown_super(sb);
1231 kill_super_notify(sb);
1232 free_anon_bdev(dev);
1234 EXPORT_SYMBOL(kill_anon_super);
1236 void kill_litter_super(struct super_block *sb)
1239 d_genocide(sb->s_root);
1240 kill_anon_super(sb);
1242 EXPORT_SYMBOL(kill_litter_super);
1244 int set_anon_super_fc(struct super_block *sb, struct fs_context *fc)
1246 return set_anon_super(sb, NULL);
1248 EXPORT_SYMBOL(set_anon_super_fc);
1250 static int test_keyed_super(struct super_block *sb, struct fs_context *fc)
1252 return sb->s_fs_info == fc->s_fs_info;
1255 static int test_single_super(struct super_block *s, struct fs_context *fc)
1260 static int vfs_get_super(struct fs_context *fc,
1261 int (*test)(struct super_block *, struct fs_context *),
1262 int (*fill_super)(struct super_block *sb,
1263 struct fs_context *fc))
1265 struct super_block *sb;
1268 sb = sget_fc(fc, test, set_anon_super_fc);
1273 err = fill_super(sb, fc);
1277 sb->s_flags |= SB_ACTIVE;
1280 fc->root = dget(sb->s_root);
1284 deactivate_locked_super(sb);
1288 int get_tree_nodev(struct fs_context *fc,
1289 int (*fill_super)(struct super_block *sb,
1290 struct fs_context *fc))
1292 return vfs_get_super(fc, NULL, fill_super);
1294 EXPORT_SYMBOL(get_tree_nodev);
1296 int get_tree_single(struct fs_context *fc,
1297 int (*fill_super)(struct super_block *sb,
1298 struct fs_context *fc))
1300 return vfs_get_super(fc, test_single_super, fill_super);
1302 EXPORT_SYMBOL(get_tree_single);
1304 int get_tree_keyed(struct fs_context *fc,
1305 int (*fill_super)(struct super_block *sb,
1306 struct fs_context *fc),
1309 fc->s_fs_info = key;
1310 return vfs_get_super(fc, test_keyed_super, fill_super);
1312 EXPORT_SYMBOL(get_tree_keyed);
1314 static int set_bdev_super(struct super_block *s, void *data)
1316 s->s_dev = *(dev_t *)data;
1320 static int super_s_dev_set(struct super_block *s, struct fs_context *fc)
1322 return set_bdev_super(s, fc->sget_key);
1325 static int super_s_dev_test(struct super_block *s, struct fs_context *fc)
1327 return !(s->s_iflags & SB_I_RETIRED) &&
1328 s->s_dev == *(dev_t *)fc->sget_key;
1332 * sget_dev - Find or create a superblock by device number
1333 * @fc: Filesystem context.
1334 * @dev: device number
1336 * Find or create a superblock using the provided device number that
1337 * will be stored in fc->sget_key.
1339 * If an extant superblock is matched, then that will be returned with
1340 * an elevated reference count that the caller must transfer or discard.
1342 * If no match is made, a new superblock will be allocated and basic
1343 * initialisation will be performed (s_type, s_fs_info, s_id, s_dev will
1344 * be set). The superblock will be published and it will be returned in
1345 * a partially constructed state with SB_BORN and SB_ACTIVE as yet
1348 * Return: an existing or newly created superblock on success, an error
1349 * pointer on failure.
1351 struct super_block *sget_dev(struct fs_context *fc, dev_t dev)
1353 fc->sget_key = &dev;
1354 return sget_fc(fc, super_s_dev_test, super_s_dev_set);
1356 EXPORT_SYMBOL(sget_dev);
1360 * Lock the superblock that is holder of the bdev. Returns the superblock
1361 * pointer if we successfully locked the superblock and it is alive. Otherwise
1362 * we return NULL and just unlock bdev->bd_holder_lock.
1364 * The function must be called with bdev->bd_holder_lock and releases it.
1366 static struct super_block *bdev_super_lock(struct block_device *bdev, bool excl)
1367 __releases(&bdev->bd_holder_lock)
1369 struct super_block *sb = bdev->bd_holder;
1372 lockdep_assert_held(&bdev->bd_holder_lock);
1373 lockdep_assert_not_held(&sb->s_umount);
1374 lockdep_assert_not_held(&bdev->bd_disk->open_mutex);
1376 /* Make sure sb doesn't go away from under us */
1377 spin_lock(&sb_lock);
1379 spin_unlock(&sb_lock);
1381 mutex_unlock(&bdev->bd_holder_lock);
1383 locked = super_lock(sb, excl);
1386 * If the superblock wasn't already SB_DYING then we hold
1387 * s_umount and can safely drop our temporary reference.
1394 if (!sb->s_root || !(sb->s_flags & SB_ACTIVE)) {
1395 super_unlock(sb, excl);
1402 static void fs_bdev_mark_dead(struct block_device *bdev, bool surprise)
1404 struct super_block *sb;
1406 sb = bdev_super_lock(bdev, false);
1411 sync_filesystem(sb);
1412 shrink_dcache_sb(sb);
1413 invalidate_inodes(sb);
1414 if (sb->s_op->shutdown)
1415 sb->s_op->shutdown(sb);
1417 super_unlock_shared(sb);
1420 static void fs_bdev_sync(struct block_device *bdev)
1422 struct super_block *sb;
1424 sb = bdev_super_lock(bdev, false);
1428 sync_filesystem(sb);
1429 super_unlock_shared(sb);
1432 static struct super_block *get_bdev_super(struct block_device *bdev)
1434 bool active = false;
1435 struct super_block *sb;
1437 sb = bdev_super_lock(bdev, true);
1439 active = atomic_inc_not_zero(&sb->s_active);
1440 super_unlock_excl(sb);
1448 * fs_bdev_freeze - freeze owning filesystem of block device
1449 * @bdev: block device
1451 * Freeze the filesystem that owns this block device if it is still
1454 * A filesystem that owns multiple block devices may be frozen from each
1455 * block device and won't be unfrozen until all block devices are
1456 * unfrozen. Each block device can only freeze the filesystem once as we
1457 * nest freezes for block devices in the block layer.
1459 * Return: If the freeze was successful zero is returned. If the freeze
1460 * failed a negative error code is returned.
1462 static int fs_bdev_freeze(struct block_device *bdev)
1464 struct super_block *sb;
1467 lockdep_assert_held(&bdev->bd_fsfreeze_mutex);
1469 sb = get_bdev_super(bdev);
1473 if (sb->s_op->freeze_super)
1474 error = sb->s_op->freeze_super(sb,
1475 FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE);
1477 error = freeze_super(sb,
1478 FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE);
1480 error = sync_blockdev(bdev);
1481 deactivate_super(sb);
1486 * fs_bdev_thaw - thaw owning filesystem of block device
1487 * @bdev: block device
1489 * Thaw the filesystem that owns this block device.
1491 * A filesystem that owns multiple block devices may be frozen from each
1492 * block device and won't be unfrozen until all block devices are
1493 * unfrozen. Each block device can only freeze the filesystem once as we
1494 * nest freezes for block devices in the block layer.
1496 * Return: If the thaw was successful zero is returned. If the thaw
1497 * failed a negative error code is returned. If this function
1498 * returns zero it doesn't mean that the filesystem is unfrozen
1499 * as it may have been frozen multiple times (kernel may hold a
1500 * freeze or might be frozen from other block devices).
1502 static int fs_bdev_thaw(struct block_device *bdev)
1504 struct super_block *sb;
1507 lockdep_assert_held(&bdev->bd_fsfreeze_mutex);
1509 sb = get_bdev_super(bdev);
1510 if (WARN_ON_ONCE(!sb))
1513 if (sb->s_op->thaw_super)
1514 error = sb->s_op->thaw_super(sb,
1515 FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE);
1517 error = thaw_super(sb,
1518 FREEZE_MAY_NEST | FREEZE_HOLDER_USERSPACE);
1519 deactivate_super(sb);
1523 const struct blk_holder_ops fs_holder_ops = {
1524 .mark_dead = fs_bdev_mark_dead,
1525 .sync = fs_bdev_sync,
1526 .freeze = fs_bdev_freeze,
1527 .thaw = fs_bdev_thaw,
1529 EXPORT_SYMBOL_GPL(fs_holder_ops);
1531 int setup_bdev_super(struct super_block *sb, int sb_flags,
1532 struct fs_context *fc)
1534 blk_mode_t mode = sb_open_mode(sb_flags);
1535 struct bdev_handle *bdev_handle;
1536 struct block_device *bdev;
1538 bdev_handle = bdev_open_by_dev(sb->s_dev, mode, sb, &fs_holder_ops);
1539 if (IS_ERR(bdev_handle)) {
1541 errorf(fc, "%s: Can't open blockdev", fc->source);
1542 return PTR_ERR(bdev_handle);
1544 bdev = bdev_handle->bdev;
1547 * This really should be in blkdev_get_by_dev, but right now can't due
1548 * to legacy issues that require us to allow opening a block device node
1549 * writable from userspace even for a read-only block device.
1551 if ((mode & BLK_OPEN_WRITE) && bdev_read_only(bdev)) {
1552 bdev_release(bdev_handle);
1557 * It is enough to check bdev was not frozen before we set
1558 * s_bdev as freezing will wait until SB_BORN is set.
1560 if (atomic_read(&bdev->bd_fsfreeze_count) > 0) {
1562 warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev);
1563 bdev_release(bdev_handle);
1566 spin_lock(&sb_lock);
1567 sb->s_bdev_handle = bdev_handle;
1569 sb->s_bdi = bdi_get(bdev->bd_disk->bdi);
1570 if (bdev_stable_writes(bdev))
1571 sb->s_iflags |= SB_I_STABLE_WRITES;
1572 spin_unlock(&sb_lock);
1574 snprintf(sb->s_id, sizeof(sb->s_id), "%pg", bdev);
1575 shrinker_debugfs_rename(sb->s_shrink, "sb-%s:%s", sb->s_type->name,
1577 sb_set_blocksize(sb, block_size(bdev));
1580 EXPORT_SYMBOL_GPL(setup_bdev_super);
1583 * get_tree_bdev - Get a superblock based on a single block device
1584 * @fc: The filesystem context holding the parameters
1585 * @fill_super: Helper to initialise a new superblock
1587 int get_tree_bdev(struct fs_context *fc,
1588 int (*fill_super)(struct super_block *,
1589 struct fs_context *))
1591 struct super_block *s;
1596 return invalf(fc, "No source specified");
1598 error = lookup_bdev(fc->source, &dev);
1600 errorf(fc, "%s: Can't lookup blockdev", fc->source);
1604 fc->sb_flags |= SB_NOSEC;
1605 s = sget_dev(fc, dev);
1610 /* Don't summarily change the RO/RW state. */
1611 if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) {
1612 warnf(fc, "%pg: Can't mount, would change RO state", s->s_bdev);
1613 deactivate_locked_super(s);
1617 error = setup_bdev_super(s, fc->sb_flags, fc);
1619 error = fill_super(s, fc);
1621 deactivate_locked_super(s);
1624 s->s_flags |= SB_ACTIVE;
1628 fc->root = dget(s->s_root);
1631 EXPORT_SYMBOL(get_tree_bdev);
1633 static int test_bdev_super(struct super_block *s, void *data)
1635 return !(s->s_iflags & SB_I_RETIRED) && s->s_dev == *(dev_t *)data;
1638 struct dentry *mount_bdev(struct file_system_type *fs_type,
1639 int flags, const char *dev_name, void *data,
1640 int (*fill_super)(struct super_block *, void *, int))
1642 struct super_block *s;
1646 error = lookup_bdev(dev_name, &dev);
1648 return ERR_PTR(error);
1651 s = sget(fs_type, test_bdev_super, set_bdev_super, flags, &dev);
1656 if ((flags ^ s->s_flags) & SB_RDONLY) {
1657 deactivate_locked_super(s);
1658 return ERR_PTR(-EBUSY);
1661 error = setup_bdev_super(s, flags, NULL);
1663 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
1665 deactivate_locked_super(s);
1666 return ERR_PTR(error);
1669 s->s_flags |= SB_ACTIVE;
1672 return dget(s->s_root);
1674 EXPORT_SYMBOL(mount_bdev);
1676 void kill_block_super(struct super_block *sb)
1678 struct block_device *bdev = sb->s_bdev;
1680 generic_shutdown_super(sb);
1682 sync_blockdev(bdev);
1683 bdev_release(sb->s_bdev_handle);
1687 EXPORT_SYMBOL(kill_block_super);
1690 struct dentry *mount_nodev(struct file_system_type *fs_type,
1691 int flags, void *data,
1692 int (*fill_super)(struct super_block *, void *, int))
1695 struct super_block *s = sget(fs_type, NULL, set_anon_super, flags, NULL);
1700 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
1702 deactivate_locked_super(s);
1703 return ERR_PTR(error);
1705 s->s_flags |= SB_ACTIVE;
1706 return dget(s->s_root);
1708 EXPORT_SYMBOL(mount_nodev);
1710 int reconfigure_single(struct super_block *s,
1711 int flags, void *data)
1713 struct fs_context *fc;
1716 /* The caller really need to be passing fc down into mount_single(),
1717 * then a chunk of this can be removed. [Bollocks -- AV]
1718 * Better yet, reconfiguration shouldn't happen, but rather the second
1719 * mount should be rejected if the parameters are not compatible.
1721 fc = fs_context_for_reconfigure(s->s_root, flags, MS_RMT_MASK);
1725 ret = parse_monolithic_mount_data(fc, data);
1729 ret = reconfigure_super(fc);
1735 static int compare_single(struct super_block *s, void *p)
1740 struct dentry *mount_single(struct file_system_type *fs_type,
1741 int flags, void *data,
1742 int (*fill_super)(struct super_block *, void *, int))
1744 struct super_block *s;
1747 s = sget(fs_type, compare_single, set_anon_super, flags, NULL);
1751 error = fill_super(s, data, flags & SB_SILENT ? 1 : 0);
1753 s->s_flags |= SB_ACTIVE;
1755 error = reconfigure_single(s, flags, data);
1757 if (unlikely(error)) {
1758 deactivate_locked_super(s);
1759 return ERR_PTR(error);
1761 return dget(s->s_root);
1763 EXPORT_SYMBOL(mount_single);
1766 * vfs_get_tree - Get the mountable root
1767 * @fc: The superblock configuration context.
1769 * The filesystem is invoked to get or create a superblock which can then later
1770 * be used for mounting. The filesystem places a pointer to the root to be
1771 * used for mounting in @fc->root.
1773 int vfs_get_tree(struct fs_context *fc)
1775 struct super_block *sb;
1781 /* Get the mountable root in fc->root, with a ref on the root and a ref
1782 * on the superblock.
1784 error = fc->ops->get_tree(fc);
1789 pr_err("Filesystem %s get_tree() didn't set fc->root\n",
1791 /* We don't know what the locking state of the superblock is -
1792 * if there is a superblock.
1797 sb = fc->root->d_sb;
1798 WARN_ON(!sb->s_bdi);
1801 * super_wake() contains a memory barrier which also care of
1802 * ordering for super_cache_count(). We place it before setting
1803 * SB_BORN as the data dependency between the two functions is
1804 * the superblock structure contents that we just set up, not
1807 super_wake(sb, SB_BORN);
1809 error = security_sb_set_mnt_opts(sb, fc->security, 0, NULL);
1810 if (unlikely(error)) {
1816 * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE
1817 * but s_maxbytes was an unsigned long long for many releases. Throw
1818 * this warning for a little while to try and catch filesystems that
1819 * violate this rule.
1821 WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to "
1822 "negative value (%lld)\n", fc->fs_type->name, sb->s_maxbytes);
1826 EXPORT_SYMBOL(vfs_get_tree);
1829 * Setup private BDI for given superblock. It gets automatically cleaned up
1830 * in generic_shutdown_super().
1832 int super_setup_bdi_name(struct super_block *sb, char *fmt, ...)
1834 struct backing_dev_info *bdi;
1838 bdi = bdi_alloc(NUMA_NO_NODE);
1842 va_start(args, fmt);
1843 err = bdi_register_va(bdi, fmt, args);
1849 WARN_ON(sb->s_bdi != &noop_backing_dev_info);
1851 sb->s_iflags |= SB_I_PERSB_BDI;
1855 EXPORT_SYMBOL(super_setup_bdi_name);
1858 * Setup private BDI for given superblock. I gets automatically cleaned up
1859 * in generic_shutdown_super().
1861 int super_setup_bdi(struct super_block *sb)
1863 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
1865 return super_setup_bdi_name(sb, "%.28s-%ld", sb->s_type->name,
1866 atomic_long_inc_return(&bdi_seq));
1868 EXPORT_SYMBOL(super_setup_bdi);
1871 * sb_wait_write - wait until all writers to given file system finish
1872 * @sb: the super for which we wait
1873 * @level: type of writers we wait for (normal vs page fault)
1875 * This function waits until there are no writers of given type to given file
1878 static void sb_wait_write(struct super_block *sb, int level)
1880 percpu_down_write(sb->s_writers.rw_sem + level-1);
1884 * We are going to return to userspace and forget about these locks, the
1885 * ownership goes to the caller of thaw_super() which does unlock().
1887 static void lockdep_sb_freeze_release(struct super_block *sb)
1891 for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
1892 percpu_rwsem_release(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
1896 * Tell lockdep we are holding these locks before we call ->unfreeze_fs(sb).
1898 static void lockdep_sb_freeze_acquire(struct super_block *sb)
1902 for (level = 0; level < SB_FREEZE_LEVELS; ++level)
1903 percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
1906 static void sb_freeze_unlock(struct super_block *sb, int level)
1908 for (level--; level >= 0; level--)
1909 percpu_up_write(sb->s_writers.rw_sem + level);
1912 static int wait_for_partially_frozen(struct super_block *sb)
1917 unsigned short old = sb->s_writers.frozen;
1919 up_write(&sb->s_umount);
1920 ret = wait_var_event_killable(&sb->s_writers.frozen,
1921 sb->s_writers.frozen != old);
1922 down_write(&sb->s_umount);
1923 } while (ret == 0 &&
1924 sb->s_writers.frozen != SB_UNFROZEN &&
1925 sb->s_writers.frozen != SB_FREEZE_COMPLETE);
1930 #define FREEZE_HOLDERS (FREEZE_HOLDER_KERNEL | FREEZE_HOLDER_USERSPACE)
1931 #define FREEZE_FLAGS (FREEZE_HOLDERS | FREEZE_MAY_NEST)
1933 static inline int freeze_inc(struct super_block *sb, enum freeze_holder who)
1935 WARN_ON_ONCE((who & ~FREEZE_FLAGS));
1936 WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1);
1938 if (who & FREEZE_HOLDER_KERNEL)
1939 ++sb->s_writers.freeze_kcount;
1940 if (who & FREEZE_HOLDER_USERSPACE)
1941 ++sb->s_writers.freeze_ucount;
1942 return sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount;
1945 static inline int freeze_dec(struct super_block *sb, enum freeze_holder who)
1947 WARN_ON_ONCE((who & ~FREEZE_FLAGS));
1948 WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1);
1950 if ((who & FREEZE_HOLDER_KERNEL) && sb->s_writers.freeze_kcount)
1951 --sb->s_writers.freeze_kcount;
1952 if ((who & FREEZE_HOLDER_USERSPACE) && sb->s_writers.freeze_ucount)
1953 --sb->s_writers.freeze_ucount;
1954 return sb->s_writers.freeze_kcount + sb->s_writers.freeze_ucount;
1957 static inline bool may_freeze(struct super_block *sb, enum freeze_holder who)
1959 WARN_ON_ONCE((who & ~FREEZE_FLAGS));
1960 WARN_ON_ONCE(hweight32(who & FREEZE_HOLDERS) > 1);
1962 if (who & FREEZE_HOLDER_KERNEL)
1963 return (who & FREEZE_MAY_NEST) ||
1964 sb->s_writers.freeze_kcount == 0;
1965 if (who & FREEZE_HOLDER_USERSPACE)
1966 return (who & FREEZE_MAY_NEST) ||
1967 sb->s_writers.freeze_ucount == 0;
1972 * freeze_super - lock the filesystem and force it into a consistent state
1973 * @sb: the super to lock
1974 * @who: context that wants to freeze
1976 * Syncs the super to make sure the filesystem is consistent and calls the fs's
1977 * freeze_fs. Subsequent calls to this without first thawing the fs may return
1981 * * %FREEZE_HOLDER_USERSPACE if userspace wants to freeze the fs;
1982 * * %FREEZE_HOLDER_KERNEL if the kernel wants to freeze the fs.
1983 * * %FREEZE_MAY_NEST whether nesting freeze and thaw requests is allowed.
1985 * The @who argument distinguishes between the kernel and userspace trying to
1986 * freeze the filesystem. Although there cannot be multiple kernel freezes or
1987 * multiple userspace freezes in effect at any given time, the kernel and
1988 * userspace can both hold a filesystem frozen. The filesystem remains frozen
1989 * until there are no kernel or userspace freezes in effect.
1991 * A filesystem may hold multiple devices and thus a filesystems may be
1992 * frozen through the block layer via multiple block devices. In this
1993 * case the request is marked as being allowed to nest by passing
1994 * FREEZE_MAY_NEST. The filesystem remains frozen until all block
1995 * devices are unfrozen. If multiple freezes are attempted without
1996 * FREEZE_MAY_NEST -EBUSY will be returned.
1998 * During this function, sb->s_writers.frozen goes through these values:
2000 * SB_UNFROZEN: File system is normal, all writes progress as usual.
2002 * SB_FREEZE_WRITE: The file system is in the process of being frozen. New
2003 * writes should be blocked, though page faults are still allowed. We wait for
2004 * all writes to complete and then proceed to the next stage.
2006 * SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked
2007 * but internal fs threads can still modify the filesystem (although they
2008 * should not dirty new pages or inodes), writeback can run etc. After waiting
2009 * for all running page faults we sync the filesystem which will clean all
2010 * dirty pages and inodes (no new dirty pages or inodes can be created when
2013 * SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs
2014 * modification are blocked (e.g. XFS preallocation truncation on inode
2015 * reclaim). This is usually implemented by blocking new transactions for
2016 * filesystems that have them and need this additional guard. After all
2017 * internal writers are finished we call ->freeze_fs() to finish filesystem
2018 * freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is
2019 * mostly auxiliary for filesystems to verify they do not modify frozen fs.
2021 * sb->s_writers.frozen is protected by sb->s_umount.
2023 * Return: If the freeze was successful zero is returned. If the freeze
2024 * failed a negative error code is returned.
2026 int freeze_super(struct super_block *sb, enum freeze_holder who)
2030 if (!super_lock_excl(sb)) {
2031 WARN_ON_ONCE("Dying superblock while freezing!");
2034 atomic_inc(&sb->s_active);
2037 if (sb->s_writers.frozen == SB_FREEZE_COMPLETE) {
2038 if (may_freeze(sb, who))
2039 ret = !!WARN_ON_ONCE(freeze_inc(sb, who) == 1);
2042 /* All freezers share a single active reference. */
2043 deactivate_locked_super(sb);
2047 if (sb->s_writers.frozen != SB_UNFROZEN) {
2048 ret = wait_for_partially_frozen(sb);
2050 deactivate_locked_super(sb);
2057 if (sb_rdonly(sb)) {
2058 /* Nothing to do really... */
2059 WARN_ON_ONCE(freeze_inc(sb, who) > 1);
2060 sb->s_writers.frozen = SB_FREEZE_COMPLETE;
2061 wake_up_var(&sb->s_writers.frozen);
2062 super_unlock_excl(sb);
2066 sb->s_writers.frozen = SB_FREEZE_WRITE;
2067 /* Release s_umount to preserve sb_start_write -> s_umount ordering */
2068 super_unlock_excl(sb);
2069 sb_wait_write(sb, SB_FREEZE_WRITE);
2070 __super_lock_excl(sb);
2072 /* Now we go and block page faults... */
2073 sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
2074 sb_wait_write(sb, SB_FREEZE_PAGEFAULT);
2076 /* All writers are done so after syncing there won't be dirty data */
2077 ret = sync_filesystem(sb);
2079 sb->s_writers.frozen = SB_UNFROZEN;
2080 sb_freeze_unlock(sb, SB_FREEZE_PAGEFAULT);
2081 wake_up_var(&sb->s_writers.frozen);
2082 deactivate_locked_super(sb);
2086 /* Now wait for internal filesystem counter */
2087 sb->s_writers.frozen = SB_FREEZE_FS;
2088 sb_wait_write(sb, SB_FREEZE_FS);
2090 if (sb->s_op->freeze_fs) {
2091 ret = sb->s_op->freeze_fs(sb);
2094 "VFS:Filesystem freeze failed\n");
2095 sb->s_writers.frozen = SB_UNFROZEN;
2096 sb_freeze_unlock(sb, SB_FREEZE_FS);
2097 wake_up_var(&sb->s_writers.frozen);
2098 deactivate_locked_super(sb);
2103 * For debugging purposes so that fs can warn if it sees write activity
2104 * when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super().
2106 WARN_ON_ONCE(freeze_inc(sb, who) > 1);
2107 sb->s_writers.frozen = SB_FREEZE_COMPLETE;
2108 wake_up_var(&sb->s_writers.frozen);
2109 lockdep_sb_freeze_release(sb);
2110 super_unlock_excl(sb);
2113 EXPORT_SYMBOL(freeze_super);
2116 * Undoes the effect of a freeze_super_locked call. If the filesystem is
2117 * frozen both by userspace and the kernel, a thaw call from either source
2118 * removes that state without releasing the other state or unlocking the
2121 static int thaw_super_locked(struct super_block *sb, enum freeze_holder who)
2123 int error = -EINVAL;
2125 if (sb->s_writers.frozen != SB_FREEZE_COMPLETE)
2129 * All freezers share a single active reference.
2130 * So just unlock in case there are any left.
2132 if (freeze_dec(sb, who))
2135 if (sb_rdonly(sb)) {
2136 sb->s_writers.frozen = SB_UNFROZEN;
2137 wake_up_var(&sb->s_writers.frozen);
2138 goto out_deactivate;
2141 lockdep_sb_freeze_acquire(sb);
2143 if (sb->s_op->unfreeze_fs) {
2144 error = sb->s_op->unfreeze_fs(sb);
2146 pr_err("VFS: Filesystem thaw failed\n");
2147 freeze_inc(sb, who);
2148 lockdep_sb_freeze_release(sb);
2153 sb->s_writers.frozen = SB_UNFROZEN;
2154 wake_up_var(&sb->s_writers.frozen);
2155 sb_freeze_unlock(sb, SB_FREEZE_FS);
2157 deactivate_locked_super(sb);
2161 super_unlock_excl(sb);
2166 * thaw_super -- unlock filesystem
2167 * @sb: the super to thaw
2168 * @who: context that wants to freeze
2170 * Unlocks the filesystem and marks it writeable again after freeze_super()
2171 * if there are no remaining freezes on the filesystem.
2174 * * %FREEZE_HOLDER_USERSPACE if userspace wants to thaw the fs;
2175 * * %FREEZE_HOLDER_KERNEL if the kernel wants to thaw the fs.
2176 * * %FREEZE_MAY_NEST whether nesting freeze and thaw requests is allowed
2178 * A filesystem may hold multiple devices and thus a filesystems may
2179 * have been frozen through the block layer via multiple block devices.
2180 * The filesystem remains frozen until all block devices are unfrozen.
2182 int thaw_super(struct super_block *sb, enum freeze_holder who)
2184 if (!super_lock_excl(sb)) {
2185 WARN_ON_ONCE("Dying superblock while thawing!");
2188 return thaw_super_locked(sb, who);
2190 EXPORT_SYMBOL(thaw_super);
2193 * Create workqueue for deferred direct IO completions. We allocate the
2194 * workqueue when it's first needed. This avoids creating workqueue for
2195 * filesystems that don't need it and also allows us to create the workqueue
2196 * late enough so the we can include s_id in the name of the workqueue.
2198 int sb_init_dio_done_wq(struct super_block *sb)
2200 struct workqueue_struct *old;
2201 struct workqueue_struct *wq = alloc_workqueue("dio/%s",
2207 * This has to be atomic as more DIOs can race to create the workqueue
2209 old = cmpxchg(&sb->s_dio_done_wq, NULL, wq);
2210 /* Someone created workqueue before us? Free ours... */
2212 destroy_workqueue(wq);
2215 EXPORT_SYMBOL_GPL(sb_init_dio_done_wq);