]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * linux/fs/super.c | |
3 | * | |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | |
5 | * | |
6 | * super.c contains code to handle: - mount structures | |
7 | * - super-block tables | |
8 | * - filesystem drivers list | |
9 | * - mount system call | |
10 | * - umount system call | |
11 | * - ustat system call | |
12 | * | |
13 | * GK 2/5/95 - Changed to support mounting the root fs via NFS | |
14 | * | |
15 | * Added kerneld support: Jacques Gelinas and Bjorn Ekwall | |
16 | * Added change_root: Werner Almesberger & Hans Lermen, Feb '96 | |
17 | * Added options to /proc/mounts: | |
18 | * Torbjörn Lindh ([email protected]), April 14, 1996. | |
19 | * Added devfs support: Richard Gooch <[email protected]>, 13-JAN-1998 | |
20 | * Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000 | |
21 | */ | |
22 | ||
23 | #include <linux/export.h> | |
24 | #include <linux/slab.h> | |
25 | #include <linux/acct.h> | |
26 | #include <linux/blkdev.h> | |
27 | #include <linux/mount.h> | |
28 | #include <linux/security.h> | |
29 | #include <linux/writeback.h> /* for the emergency remount stuff */ | |
30 | #include <linux/idr.h> | |
31 | #include <linux/mutex.h> | |
32 | #include <linux/backing-dev.h> | |
33 | #include <linux/rculist_bl.h> | |
34 | #include <linux/cleancache.h> | |
35 | #include <linux/fsnotify.h> | |
36 | #include <linux/lockdep.h> | |
37 | #include "internal.h" | |
38 | ||
39 | ||
40 | LIST_HEAD(super_blocks); | |
41 | DEFINE_SPINLOCK(sb_lock); | |
42 | ||
43 | static char *sb_writers_name[SB_FREEZE_LEVELS] = { | |
44 | "sb_writers", | |
45 | "sb_pagefaults", | |
46 | "sb_internal", | |
47 | }; | |
48 | ||
49 | /* | |
50 | * One thing we have to be careful of with a per-sb shrinker is that we don't | |
51 | * drop the last active reference to the superblock from within the shrinker. | |
52 | * If that happens we could trigger unregistering the shrinker from within the | |
53 | * shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we | |
54 | * take a passive reference to the superblock to avoid this from occurring. | |
55 | */ | |
56 | static unsigned long super_cache_scan(struct shrinker *shrink, | |
57 | struct shrink_control *sc) | |
58 | { | |
59 | struct super_block *sb; | |
60 | long fs_objects = 0; | |
61 | long total_objects; | |
62 | long freed = 0; | |
63 | long dentries; | |
64 | long inodes; | |
65 | ||
66 | sb = container_of(shrink, struct super_block, s_shrink); | |
67 | ||
68 | /* | |
69 | * Deadlock avoidance. We may hold various FS locks, and we don't want | |
70 | * to recurse into the FS that called us in clear_inode() and friends.. | |
71 | */ | |
72 | if (!(sc->gfp_mask & __GFP_FS)) | |
73 | return SHRINK_STOP; | |
74 | ||
75 | if (!grab_super_passive(sb)) | |
76 | return SHRINK_STOP; | |
77 | ||
78 | if (sb->s_op->nr_cached_objects) | |
79 | fs_objects = sb->s_op->nr_cached_objects(sb, sc->nid); | |
80 | ||
81 | inodes = list_lru_count_node(&sb->s_inode_lru, sc->nid); | |
82 | dentries = list_lru_count_node(&sb->s_dentry_lru, sc->nid); | |
83 | total_objects = dentries + inodes + fs_objects + 1; | |
84 | ||
85 | /* proportion the scan between the caches */ | |
86 | dentries = mult_frac(sc->nr_to_scan, dentries, total_objects); | |
87 | inodes = mult_frac(sc->nr_to_scan, inodes, total_objects); | |
88 | ||
89 | /* | |
90 | * prune the dcache first as the icache is pinned by it, then | |
91 | * prune the icache, followed by the filesystem specific caches | |
92 | */ | |
93 | freed = prune_dcache_sb(sb, dentries, sc->nid); | |
94 | freed += prune_icache_sb(sb, inodes, sc->nid); | |
95 | ||
96 | if (fs_objects) { | |
97 | fs_objects = mult_frac(sc->nr_to_scan, fs_objects, | |
98 | total_objects); | |
99 | freed += sb->s_op->free_cached_objects(sb, fs_objects, | |
100 | sc->nid); | |
101 | } | |
102 | ||
103 | drop_super(sb); | |
104 | return freed; | |
105 | } | |
106 | ||
107 | static unsigned long super_cache_count(struct shrinker *shrink, | |
108 | struct shrink_control *sc) | |
109 | { | |
110 | struct super_block *sb; | |
111 | long total_objects = 0; | |
112 | ||
113 | sb = container_of(shrink, struct super_block, s_shrink); | |
114 | ||
115 | if (!grab_super_passive(sb)) | |
116 | return 0; | |
117 | ||
118 | if (sb->s_op && sb->s_op->nr_cached_objects) | |
119 | total_objects = sb->s_op->nr_cached_objects(sb, | |
120 | sc->nid); | |
121 | ||
122 | total_objects += list_lru_count_node(&sb->s_dentry_lru, | |
123 | sc->nid); | |
124 | total_objects += list_lru_count_node(&sb->s_inode_lru, | |
125 | sc->nid); | |
126 | ||
127 | total_objects = vfs_pressure_ratio(total_objects); | |
128 | drop_super(sb); | |
129 | return total_objects; | |
130 | } | |
131 | ||
132 | /** | |
133 | * destroy_super - frees a superblock | |
134 | * @s: superblock to free | |
135 | * | |
136 | * Frees a superblock. | |
137 | */ | |
138 | static void destroy_super(struct super_block *s) | |
139 | { | |
140 | int i; | |
141 | list_lru_destroy(&s->s_dentry_lru); | |
142 | list_lru_destroy(&s->s_inode_lru); | |
143 | for (i = 0; i < SB_FREEZE_LEVELS; i++) | |
144 | percpu_counter_destroy(&s->s_writers.counter[i]); | |
145 | security_sb_free(s); | |
146 | WARN_ON(!list_empty(&s->s_mounts)); | |
147 | kfree(s->s_subtype); | |
148 | kfree(s->s_options); | |
149 | kfree_rcu(s, rcu); | |
150 | } | |
151 | ||
152 | /** | |
153 | * alloc_super - create new superblock | |
154 | * @type: filesystem type superblock should belong to | |
155 | * @flags: the mount flags | |
156 | * | |
157 | * Allocates and initializes a new &struct super_block. alloc_super() | |
158 | * returns a pointer new superblock or %NULL if allocation had failed. | |
159 | */ | |
160 | static struct super_block *alloc_super(struct file_system_type *type, int flags) | |
161 | { | |
162 | struct super_block *s = kzalloc(sizeof(struct super_block), GFP_USER); | |
163 | static const struct super_operations default_op; | |
164 | int i; | |
165 | ||
166 | if (!s) | |
167 | return NULL; | |
168 | ||
169 | INIT_LIST_HEAD(&s->s_mounts); | |
170 | ||
171 | if (security_sb_alloc(s)) | |
172 | goto fail; | |
173 | ||
174 | for (i = 0; i < SB_FREEZE_LEVELS; i++) { | |
175 | if (percpu_counter_init(&s->s_writers.counter[i], 0) < 0) | |
176 | goto fail; | |
177 | lockdep_init_map(&s->s_writers.lock_map[i], sb_writers_name[i], | |
178 | &type->s_writers_key[i], 0); | |
179 | } | |
180 | init_waitqueue_head(&s->s_writers.wait); | |
181 | init_waitqueue_head(&s->s_writers.wait_unfrozen); | |
182 | s->s_flags = flags; | |
183 | s->s_bdi = &default_backing_dev_info; | |
184 | INIT_HLIST_NODE(&s->s_instances); | |
185 | INIT_HLIST_BL_HEAD(&s->s_anon); | |
186 | INIT_LIST_HEAD(&s->s_inodes); | |
187 | ||
188 | if (list_lru_init(&s->s_dentry_lru)) | |
189 | goto fail; | |
190 | if (list_lru_init(&s->s_inode_lru)) | |
191 | goto fail; | |
192 | ||
193 | init_rwsem(&s->s_umount); | |
194 | lockdep_set_class(&s->s_umount, &type->s_umount_key); | |
195 | /* | |
196 | * sget() can have s_umount recursion. | |
197 | * | |
198 | * When it cannot find a suitable sb, it allocates a new | |
199 | * one (this one), and tries again to find a suitable old | |
200 | * one. | |
201 | * | |
202 | * In case that succeeds, it will acquire the s_umount | |
203 | * lock of the old one. Since these are clearly distrinct | |
204 | * locks, and this object isn't exposed yet, there's no | |
205 | * risk of deadlocks. | |
206 | * | |
207 | * Annotate this by putting this lock in a different | |
208 | * subclass. | |
209 | */ | |
210 | down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING); | |
211 | s->s_count = 1; | |
212 | atomic_set(&s->s_active, 1); | |
213 | mutex_init(&s->s_vfs_rename_mutex); | |
214 | lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key); | |
215 | mutex_init(&s->s_dquot.dqio_mutex); | |
216 | mutex_init(&s->s_dquot.dqonoff_mutex); | |
217 | init_rwsem(&s->s_dquot.dqptr_sem); | |
218 | s->s_maxbytes = MAX_NON_LFS; | |
219 | s->s_op = &default_op; | |
220 | s->s_time_gran = 1000000000; | |
221 | s->cleancache_poolid = -1; | |
222 | ||
223 | s->s_shrink.seeks = DEFAULT_SEEKS; | |
224 | s->s_shrink.scan_objects = super_cache_scan; | |
225 | s->s_shrink.count_objects = super_cache_count; | |
226 | s->s_shrink.batch = 1024; | |
227 | s->s_shrink.flags = SHRINKER_NUMA_AWARE; | |
228 | return s; | |
229 | ||
230 | fail: | |
231 | destroy_super(s); | |
232 | return NULL; | |
233 | } | |
234 | ||
235 | /* Superblock refcounting */ | |
236 | ||
237 | /* | |
238 | * Drop a superblock's refcount. The caller must hold sb_lock. | |
239 | */ | |
240 | static void __put_super(struct super_block *sb) | |
241 | { | |
242 | if (!--sb->s_count) { | |
243 | list_del_init(&sb->s_list); | |
244 | destroy_super(sb); | |
245 | } | |
246 | } | |
247 | ||
248 | /** | |
249 | * put_super - drop a temporary reference to superblock | |
250 | * @sb: superblock in question | |
251 | * | |
252 | * Drops a temporary reference, frees superblock if there's no | |
253 | * references left. | |
254 | */ | |
255 | static void put_super(struct super_block *sb) | |
256 | { | |
257 | spin_lock(&sb_lock); | |
258 | __put_super(sb); | |
259 | spin_unlock(&sb_lock); | |
260 | } | |
261 | ||
262 | ||
263 | /** | |
264 | * deactivate_locked_super - drop an active reference to superblock | |
265 | * @s: superblock to deactivate | |
266 | * | |
267 | * Drops an active reference to superblock, converting it into a temprory | |
268 | * one if there is no other active references left. In that case we | |
269 | * tell fs driver to shut it down and drop the temporary reference we | |
270 | * had just acquired. | |
271 | * | |
272 | * Caller holds exclusive lock on superblock; that lock is released. | |
273 | */ | |
274 | void deactivate_locked_super(struct super_block *s) | |
275 | { | |
276 | struct file_system_type *fs = s->s_type; | |
277 | if (atomic_dec_and_test(&s->s_active)) { | |
278 | cleancache_invalidate_fs(s); | |
279 | fs->kill_sb(s); | |
280 | ||
281 | /* caches are now gone, we can safely kill the shrinker now */ | |
282 | unregister_shrinker(&s->s_shrink); | |
283 | ||
284 | put_filesystem(fs); | |
285 | put_super(s); | |
286 | } else { | |
287 | up_write(&s->s_umount); | |
288 | } | |
289 | } | |
290 | ||
291 | EXPORT_SYMBOL(deactivate_locked_super); | |
292 | ||
293 | /** | |
294 | * deactivate_super - drop an active reference to superblock | |
295 | * @s: superblock to deactivate | |
296 | * | |
297 | * Variant of deactivate_locked_super(), except that superblock is *not* | |
298 | * locked by caller. If we are going to drop the final active reference, | |
299 | * lock will be acquired prior to that. | |
300 | */ | |
301 | void deactivate_super(struct super_block *s) | |
302 | { | |
303 | if (!atomic_add_unless(&s->s_active, -1, 1)) { | |
304 | down_write(&s->s_umount); | |
305 | deactivate_locked_super(s); | |
306 | } | |
307 | } | |
308 | ||
309 | EXPORT_SYMBOL(deactivate_super); | |
310 | ||
311 | /** | |
312 | * grab_super - acquire an active reference | |
313 | * @s: reference we are trying to make active | |
314 | * | |
315 | * Tries to acquire an active reference. grab_super() is used when we | |
316 | * had just found a superblock in super_blocks or fs_type->fs_supers | |
317 | * and want to turn it into a full-blown active reference. grab_super() | |
318 | * is called with sb_lock held and drops it. Returns 1 in case of | |
319 | * success, 0 if we had failed (superblock contents was already dead or | |
320 | * dying when grab_super() had been called). Note that this is only | |
321 | * called for superblocks not in rundown mode (== ones still on ->fs_supers | |
322 | * of their type), so increment of ->s_count is OK here. | |
323 | */ | |
324 | static int grab_super(struct super_block *s) __releases(sb_lock) | |
325 | { | |
326 | s->s_count++; | |
327 | spin_unlock(&sb_lock); | |
328 | down_write(&s->s_umount); | |
329 | if ((s->s_flags & MS_BORN) && atomic_inc_not_zero(&s->s_active)) { | |
330 | put_super(s); | |
331 | return 1; | |
332 | } | |
333 | up_write(&s->s_umount); | |
334 | put_super(s); | |
335 | return 0; | |
336 | } | |
337 | ||
338 | /* | |
339 | * grab_super_passive - acquire a passive reference | |
340 | * @sb: reference we are trying to grab | |
341 | * | |
342 | * Tries to acquire a passive reference. This is used in places where we | |
343 | * cannot take an active reference but we need to ensure that the | |
344 | * superblock does not go away while we are working on it. It returns | |
345 | * false if a reference was not gained, and returns true with the s_umount | |
346 | * lock held in read mode if a reference is gained. On successful return, | |
347 | * the caller must drop the s_umount lock and the passive reference when | |
348 | * done. | |
349 | */ | |
350 | bool grab_super_passive(struct super_block *sb) | |
351 | { | |
352 | spin_lock(&sb_lock); | |
353 | if (hlist_unhashed(&sb->s_instances)) { | |
354 | spin_unlock(&sb_lock); | |
355 | return false; | |
356 | } | |
357 | ||
358 | sb->s_count++; | |
359 | spin_unlock(&sb_lock); | |
360 | ||
361 | if (down_read_trylock(&sb->s_umount)) { | |
362 | if (sb->s_root && (sb->s_flags & MS_BORN)) | |
363 | return true; | |
364 | up_read(&sb->s_umount); | |
365 | } | |
366 | ||
367 | put_super(sb); | |
368 | return false; | |
369 | } | |
370 | ||
371 | /** | |
372 | * generic_shutdown_super - common helper for ->kill_sb() | |
373 | * @sb: superblock to kill | |
374 | * | |
375 | * generic_shutdown_super() does all fs-independent work on superblock | |
376 | * shutdown. Typical ->kill_sb() should pick all fs-specific objects | |
377 | * that need destruction out of superblock, call generic_shutdown_super() | |
378 | * and release aforementioned objects. Note: dentries and inodes _are_ | |
379 | * taken care of and do not need specific handling. | |
380 | * | |
381 | * Upon calling this function, the filesystem may no longer alter or | |
382 | * rearrange the set of dentries belonging to this super_block, nor may it | |
383 | * change the attachments of dentries to inodes. | |
384 | */ | |
385 | void generic_shutdown_super(struct super_block *sb) | |
386 | { | |
387 | const struct super_operations *sop = sb->s_op; | |
388 | ||
389 | if (sb->s_root) { | |
390 | shrink_dcache_for_umount(sb); | |
391 | sync_filesystem(sb); | |
392 | sb->s_flags &= ~MS_ACTIVE; | |
393 | ||
394 | fsnotify_unmount_inodes(&sb->s_inodes); | |
395 | ||
396 | evict_inodes(sb); | |
397 | ||
398 | if (sb->s_dio_done_wq) { | |
399 | destroy_workqueue(sb->s_dio_done_wq); | |
400 | sb->s_dio_done_wq = NULL; | |
401 | } | |
402 | ||
403 | if (sop->put_super) | |
404 | sop->put_super(sb); | |
405 | ||
406 | if (!list_empty(&sb->s_inodes)) { | |
407 | printk("VFS: Busy inodes after unmount of %s. " | |
408 | "Self-destruct in 5 seconds. Have a nice day...\n", | |
409 | sb->s_id); | |
410 | } | |
411 | } | |
412 | spin_lock(&sb_lock); | |
413 | /* should be initialized for __put_super_and_need_restart() */ | |
414 | hlist_del_init(&sb->s_instances); | |
415 | spin_unlock(&sb_lock); | |
416 | up_write(&sb->s_umount); | |
417 | } | |
418 | ||
419 | EXPORT_SYMBOL(generic_shutdown_super); | |
420 | ||
421 | /** | |
422 | * sget - find or create a superblock | |
423 | * @type: filesystem type superblock should belong to | |
424 | * @test: comparison callback | |
425 | * @set: setup callback | |
426 | * @flags: mount flags | |
427 | * @data: argument to each of them | |
428 | */ | |
429 | struct super_block *sget(struct file_system_type *type, | |
430 | int (*test)(struct super_block *,void *), | |
431 | int (*set)(struct super_block *,void *), | |
432 | int flags, | |
433 | void *data) | |
434 | { | |
435 | struct super_block *s = NULL; | |
436 | struct super_block *old; | |
437 | int err; | |
438 | ||
439 | retry: | |
440 | spin_lock(&sb_lock); | |
441 | if (test) { | |
442 | hlist_for_each_entry(old, &type->fs_supers, s_instances) { | |
443 | if (!test(old, data)) | |
444 | continue; | |
445 | if (!grab_super(old)) | |
446 | goto retry; | |
447 | if (s) { | |
448 | up_write(&s->s_umount); | |
449 | destroy_super(s); | |
450 | s = NULL; | |
451 | } | |
452 | return old; | |
453 | } | |
454 | } | |
455 | if (!s) { | |
456 | spin_unlock(&sb_lock); | |
457 | s = alloc_super(type, flags); | |
458 | if (!s) | |
459 | return ERR_PTR(-ENOMEM); | |
460 | goto retry; | |
461 | } | |
462 | ||
463 | err = set(s, data); | |
464 | if (err) { | |
465 | spin_unlock(&sb_lock); | |
466 | up_write(&s->s_umount); | |
467 | destroy_super(s); | |
468 | return ERR_PTR(err); | |
469 | } | |
470 | s->s_type = type; | |
471 | strlcpy(s->s_id, type->name, sizeof(s->s_id)); | |
472 | list_add_tail(&s->s_list, &super_blocks); | |
473 | hlist_add_head(&s->s_instances, &type->fs_supers); | |
474 | spin_unlock(&sb_lock); | |
475 | get_filesystem(type); | |
476 | register_shrinker(&s->s_shrink); | |
477 | return s; | |
478 | } | |
479 | ||
480 | EXPORT_SYMBOL(sget); | |
481 | ||
482 | void drop_super(struct super_block *sb) | |
483 | { | |
484 | up_read(&sb->s_umount); | |
485 | put_super(sb); | |
486 | } | |
487 | ||
488 | EXPORT_SYMBOL(drop_super); | |
489 | ||
490 | /** | |
491 | * iterate_supers - call function for all active superblocks | |
492 | * @f: function to call | |
493 | * @arg: argument to pass to it | |
494 | * | |
495 | * Scans the superblock list and calls given function, passing it | |
496 | * locked superblock and given argument. | |
497 | */ | |
498 | void iterate_supers(void (*f)(struct super_block *, void *), void *arg) | |
499 | { | |
500 | struct super_block *sb, *p = NULL; | |
501 | ||
502 | spin_lock(&sb_lock); | |
503 | list_for_each_entry(sb, &super_blocks, s_list) { | |
504 | if (hlist_unhashed(&sb->s_instances)) | |
505 | continue; | |
506 | sb->s_count++; | |
507 | spin_unlock(&sb_lock); | |
508 | ||
509 | down_read(&sb->s_umount); | |
510 | if (sb->s_root && (sb->s_flags & MS_BORN)) | |
511 | f(sb, arg); | |
512 | up_read(&sb->s_umount); | |
513 | ||
514 | spin_lock(&sb_lock); | |
515 | if (p) | |
516 | __put_super(p); | |
517 | p = sb; | |
518 | } | |
519 | if (p) | |
520 | __put_super(p); | |
521 | spin_unlock(&sb_lock); | |
522 | } | |
523 | ||
524 | /** | |
525 | * iterate_supers_type - call function for superblocks of given type | |
526 | * @type: fs type | |
527 | * @f: function to call | |
528 | * @arg: argument to pass to it | |
529 | * | |
530 | * Scans the superblock list and calls given function, passing it | |
531 | * locked superblock and given argument. | |
532 | */ | |
533 | void iterate_supers_type(struct file_system_type *type, | |
534 | void (*f)(struct super_block *, void *), void *arg) | |
535 | { | |
536 | struct super_block *sb, *p = NULL; | |
537 | ||
538 | spin_lock(&sb_lock); | |
539 | hlist_for_each_entry(sb, &type->fs_supers, s_instances) { | |
540 | sb->s_count++; | |
541 | spin_unlock(&sb_lock); | |
542 | ||
543 | down_read(&sb->s_umount); | |
544 | if (sb->s_root && (sb->s_flags & MS_BORN)) | |
545 | f(sb, arg); | |
546 | up_read(&sb->s_umount); | |
547 | ||
548 | spin_lock(&sb_lock); | |
549 | if (p) | |
550 | __put_super(p); | |
551 | p = sb; | |
552 | } | |
553 | if (p) | |
554 | __put_super(p); | |
555 | spin_unlock(&sb_lock); | |
556 | } | |
557 | ||
558 | EXPORT_SYMBOL(iterate_supers_type); | |
559 | ||
560 | /** | |
561 | * get_super - get the superblock of a device | |
562 | * @bdev: device to get the superblock for | |
563 | * | |
564 | * Scans the superblock list and finds the superblock of the file system | |
565 | * mounted on the device given. %NULL is returned if no match is found. | |
566 | */ | |
567 | ||
568 | struct super_block *get_super(struct block_device *bdev) | |
569 | { | |
570 | struct super_block *sb; | |
571 | ||
572 | if (!bdev) | |
573 | return NULL; | |
574 | ||
575 | spin_lock(&sb_lock); | |
576 | rescan: | |
577 | list_for_each_entry(sb, &super_blocks, s_list) { | |
578 | if (hlist_unhashed(&sb->s_instances)) | |
579 | continue; | |
580 | if (sb->s_bdev == bdev) { | |
581 | sb->s_count++; | |
582 | spin_unlock(&sb_lock); | |
583 | down_read(&sb->s_umount); | |
584 | /* still alive? */ | |
585 | if (sb->s_root && (sb->s_flags & MS_BORN)) | |
586 | return sb; | |
587 | up_read(&sb->s_umount); | |
588 | /* nope, got unmounted */ | |
589 | spin_lock(&sb_lock); | |
590 | __put_super(sb); | |
591 | goto rescan; | |
592 | } | |
593 | } | |
594 | spin_unlock(&sb_lock); | |
595 | return NULL; | |
596 | } | |
597 | ||
598 | EXPORT_SYMBOL(get_super); | |
599 | ||
600 | /** | |
601 | * get_super_thawed - get thawed superblock of a device | |
602 | * @bdev: device to get the superblock for | |
603 | * | |
604 | * Scans the superblock list and finds the superblock of the file system | |
605 | * mounted on the device. The superblock is returned once it is thawed | |
606 | * (or immediately if it was not frozen). %NULL is returned if no match | |
607 | * is found. | |
608 | */ | |
609 | struct super_block *get_super_thawed(struct block_device *bdev) | |
610 | { | |
611 | while (1) { | |
612 | struct super_block *s = get_super(bdev); | |
613 | if (!s || s->s_writers.frozen == SB_UNFROZEN) | |
614 | return s; | |
615 | up_read(&s->s_umount); | |
616 | wait_event(s->s_writers.wait_unfrozen, | |
617 | s->s_writers.frozen == SB_UNFROZEN); | |
618 | put_super(s); | |
619 | } | |
620 | } | |
621 | EXPORT_SYMBOL(get_super_thawed); | |
622 | ||
623 | /** | |
624 | * get_active_super - get an active reference to the superblock of a device | |
625 | * @bdev: device to get the superblock for | |
626 | * | |
627 | * Scans the superblock list and finds the superblock of the file system | |
628 | * mounted on the device given. Returns the superblock with an active | |
629 | * reference or %NULL if none was found. | |
630 | */ | |
631 | struct super_block *get_active_super(struct block_device *bdev) | |
632 | { | |
633 | struct super_block *sb; | |
634 | ||
635 | if (!bdev) | |
636 | return NULL; | |
637 | ||
638 | restart: | |
639 | spin_lock(&sb_lock); | |
640 | list_for_each_entry(sb, &super_blocks, s_list) { | |
641 | if (hlist_unhashed(&sb->s_instances)) | |
642 | continue; | |
643 | if (sb->s_bdev == bdev) { | |
644 | if (!grab_super(sb)) | |
645 | goto restart; | |
646 | up_write(&sb->s_umount); | |
647 | return sb; | |
648 | } | |
649 | } | |
650 | spin_unlock(&sb_lock); | |
651 | return NULL; | |
652 | } | |
653 | ||
654 | struct super_block *user_get_super(dev_t dev) | |
655 | { | |
656 | struct super_block *sb; | |
657 | ||
658 | spin_lock(&sb_lock); | |
659 | rescan: | |
660 | list_for_each_entry(sb, &super_blocks, s_list) { | |
661 | if (hlist_unhashed(&sb->s_instances)) | |
662 | continue; | |
663 | if (sb->s_dev == dev) { | |
664 | sb->s_count++; | |
665 | spin_unlock(&sb_lock); | |
666 | down_read(&sb->s_umount); | |
667 | /* still alive? */ | |
668 | if (sb->s_root && (sb->s_flags & MS_BORN)) | |
669 | return sb; | |
670 | up_read(&sb->s_umount); | |
671 | /* nope, got unmounted */ | |
672 | spin_lock(&sb_lock); | |
673 | __put_super(sb); | |
674 | goto rescan; | |
675 | } | |
676 | } | |
677 | spin_unlock(&sb_lock); | |
678 | return NULL; | |
679 | } | |
680 | ||
681 | /** | |
682 | * do_remount_sb - asks filesystem to change mount options. | |
683 | * @sb: superblock in question | |
684 | * @flags: numeric part of options | |
685 | * @data: the rest of options | |
686 | * @force: whether or not to force the change | |
687 | * | |
688 | * Alters the mount options of a mounted file system. | |
689 | */ | |
690 | int do_remount_sb(struct super_block *sb, int flags, void *data, int force) | |
691 | { | |
692 | int retval; | |
693 | int remount_ro; | |
694 | ||
695 | if (sb->s_writers.frozen != SB_UNFROZEN) | |
696 | return -EBUSY; | |
697 | ||
698 | #ifdef CONFIG_BLOCK | |
699 | if (!(flags & MS_RDONLY) && bdev_read_only(sb->s_bdev)) | |
700 | return -EACCES; | |
701 | #endif | |
702 | ||
703 | if (flags & MS_RDONLY) | |
704 | acct_auto_close(sb); | |
705 | shrink_dcache_sb(sb); | |
706 | ||
707 | remount_ro = (flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY); | |
708 | ||
709 | /* If we are remounting RDONLY and current sb is read/write, | |
710 | make sure there are no rw files opened */ | |
711 | if (remount_ro) { | |
712 | if (force) { | |
713 | sb->s_readonly_remount = 1; | |
714 | smp_wmb(); | |
715 | } else { | |
716 | retval = sb_prepare_remount_readonly(sb); | |
717 | if (retval) | |
718 | return retval; | |
719 | } | |
720 | } | |
721 | ||
722 | sync_filesystem(sb); | |
723 | ||
724 | if (sb->s_op->remount_fs) { | |
725 | retval = sb->s_op->remount_fs(sb, &flags, data); | |
726 | if (retval) { | |
727 | if (!force) | |
728 | goto cancel_readonly; | |
729 | /* If forced remount, go ahead despite any errors */ | |
730 | WARN(1, "forced remount of a %s fs returned %i\n", | |
731 | sb->s_type->name, retval); | |
732 | } | |
733 | } | |
734 | sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK); | |
735 | /* Needs to be ordered wrt mnt_is_readonly() */ | |
736 | smp_wmb(); | |
737 | sb->s_readonly_remount = 0; | |
738 | ||
739 | /* | |
740 | * Some filesystems modify their metadata via some other path than the | |
741 | * bdev buffer cache (eg. use a private mapping, or directories in | |
742 | * pagecache, etc). Also file data modifications go via their own | |
743 | * mappings. So If we try to mount readonly then copy the filesystem | |
744 | * from bdev, we could get stale data, so invalidate it to give a best | |
745 | * effort at coherency. | |
746 | */ | |
747 | if (remount_ro && sb->s_bdev) | |
748 | invalidate_bdev(sb->s_bdev); | |
749 | return 0; | |
750 | ||
751 | cancel_readonly: | |
752 | sb->s_readonly_remount = 0; | |
753 | return retval; | |
754 | } | |
755 | ||
756 | static void do_emergency_remount(struct work_struct *work) | |
757 | { | |
758 | struct super_block *sb, *p = NULL; | |
759 | ||
760 | spin_lock(&sb_lock); | |
761 | list_for_each_entry(sb, &super_blocks, s_list) { | |
762 | if (hlist_unhashed(&sb->s_instances)) | |
763 | continue; | |
764 | sb->s_count++; | |
765 | spin_unlock(&sb_lock); | |
766 | down_write(&sb->s_umount); | |
767 | if (sb->s_root && sb->s_bdev && (sb->s_flags & MS_BORN) && | |
768 | !(sb->s_flags & MS_RDONLY)) { | |
769 | /* | |
770 | * What lock protects sb->s_flags?? | |
771 | */ | |
772 | do_remount_sb(sb, MS_RDONLY, NULL, 1); | |
773 | } | |
774 | up_write(&sb->s_umount); | |
775 | spin_lock(&sb_lock); | |
776 | if (p) | |
777 | __put_super(p); | |
778 | p = sb; | |
779 | } | |
780 | if (p) | |
781 | __put_super(p); | |
782 | spin_unlock(&sb_lock); | |
783 | kfree(work); | |
784 | printk("Emergency Remount complete\n"); | |
785 | } | |
786 | ||
787 | void emergency_remount(void) | |
788 | { | |
789 | struct work_struct *work; | |
790 | ||
791 | work = kmalloc(sizeof(*work), GFP_ATOMIC); | |
792 | if (work) { | |
793 | INIT_WORK(work, do_emergency_remount); | |
794 | schedule_work(work); | |
795 | } | |
796 | } | |
797 | ||
798 | /* | |
799 | * Unnamed block devices are dummy devices used by virtual | |
800 | * filesystems which don't use real block-devices. -- jrs | |
801 | */ | |
802 | ||
803 | static DEFINE_IDA(unnamed_dev_ida); | |
804 | static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */ | |
805 | static int unnamed_dev_start = 0; /* don't bother trying below it */ | |
806 | ||
807 | int get_anon_bdev(dev_t *p) | |
808 | { | |
809 | int dev; | |
810 | int error; | |
811 | ||
812 | retry: | |
813 | if (ida_pre_get(&unnamed_dev_ida, GFP_ATOMIC) == 0) | |
814 | return -ENOMEM; | |
815 | spin_lock(&unnamed_dev_lock); | |
816 | error = ida_get_new_above(&unnamed_dev_ida, unnamed_dev_start, &dev); | |
817 | if (!error) | |
818 | unnamed_dev_start = dev + 1; | |
819 | spin_unlock(&unnamed_dev_lock); | |
820 | if (error == -EAGAIN) | |
821 | /* We raced and lost with another CPU. */ | |
822 | goto retry; | |
823 | else if (error) | |
824 | return -EAGAIN; | |
825 | ||
826 | if (dev == (1 << MINORBITS)) { | |
827 | spin_lock(&unnamed_dev_lock); | |
828 | ida_remove(&unnamed_dev_ida, dev); | |
829 | if (unnamed_dev_start > dev) | |
830 | unnamed_dev_start = dev; | |
831 | spin_unlock(&unnamed_dev_lock); | |
832 | return -EMFILE; | |
833 | } | |
834 | *p = MKDEV(0, dev & MINORMASK); | |
835 | return 0; | |
836 | } | |
837 | EXPORT_SYMBOL(get_anon_bdev); | |
838 | ||
839 | void free_anon_bdev(dev_t dev) | |
840 | { | |
841 | int slot = MINOR(dev); | |
842 | spin_lock(&unnamed_dev_lock); | |
843 | ida_remove(&unnamed_dev_ida, slot); | |
844 | if (slot < unnamed_dev_start) | |
845 | unnamed_dev_start = slot; | |
846 | spin_unlock(&unnamed_dev_lock); | |
847 | } | |
848 | EXPORT_SYMBOL(free_anon_bdev); | |
849 | ||
850 | int set_anon_super(struct super_block *s, void *data) | |
851 | { | |
852 | int error = get_anon_bdev(&s->s_dev); | |
853 | if (!error) | |
854 | s->s_bdi = &noop_backing_dev_info; | |
855 | return error; | |
856 | } | |
857 | ||
858 | EXPORT_SYMBOL(set_anon_super); | |
859 | ||
860 | void kill_anon_super(struct super_block *sb) | |
861 | { | |
862 | dev_t dev = sb->s_dev; | |
863 | generic_shutdown_super(sb); | |
864 | free_anon_bdev(dev); | |
865 | } | |
866 | ||
867 | EXPORT_SYMBOL(kill_anon_super); | |
868 | ||
869 | void kill_litter_super(struct super_block *sb) | |
870 | { | |
871 | if (sb->s_root) | |
872 | d_genocide(sb->s_root); | |
873 | kill_anon_super(sb); | |
874 | } | |
875 | ||
876 | EXPORT_SYMBOL(kill_litter_super); | |
877 | ||
878 | static int ns_test_super(struct super_block *sb, void *data) | |
879 | { | |
880 | return sb->s_fs_info == data; | |
881 | } | |
882 | ||
883 | static int ns_set_super(struct super_block *sb, void *data) | |
884 | { | |
885 | sb->s_fs_info = data; | |
886 | return set_anon_super(sb, NULL); | |
887 | } | |
888 | ||
889 | struct dentry *mount_ns(struct file_system_type *fs_type, int flags, | |
890 | void *data, int (*fill_super)(struct super_block *, void *, int)) | |
891 | { | |
892 | struct super_block *sb; | |
893 | ||
894 | sb = sget(fs_type, ns_test_super, ns_set_super, flags, data); | |
895 | if (IS_ERR(sb)) | |
896 | return ERR_CAST(sb); | |
897 | ||
898 | if (!sb->s_root) { | |
899 | int err; | |
900 | err = fill_super(sb, data, flags & MS_SILENT ? 1 : 0); | |
901 | if (err) { | |
902 | deactivate_locked_super(sb); | |
903 | return ERR_PTR(err); | |
904 | } | |
905 | ||
906 | sb->s_flags |= MS_ACTIVE; | |
907 | } | |
908 | ||
909 | return dget(sb->s_root); | |
910 | } | |
911 | ||
912 | EXPORT_SYMBOL(mount_ns); | |
913 | ||
914 | #ifdef CONFIG_BLOCK | |
915 | static int set_bdev_super(struct super_block *s, void *data) | |
916 | { | |
917 | s->s_bdev = data; | |
918 | s->s_dev = s->s_bdev->bd_dev; | |
919 | ||
920 | /* | |
921 | * We set the bdi here to the queue backing, file systems can | |
922 | * overwrite this in ->fill_super() | |
923 | */ | |
924 | s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info; | |
925 | return 0; | |
926 | } | |
927 | ||
928 | static int test_bdev_super(struct super_block *s, void *data) | |
929 | { | |
930 | return (void *)s->s_bdev == data; | |
931 | } | |
932 | ||
933 | struct dentry *mount_bdev(struct file_system_type *fs_type, | |
934 | int flags, const char *dev_name, void *data, | |
935 | int (*fill_super)(struct super_block *, void *, int)) | |
936 | { | |
937 | struct block_device *bdev; | |
938 | struct super_block *s; | |
939 | fmode_t mode = FMODE_READ | FMODE_EXCL; | |
940 | int error = 0; | |
941 | ||
942 | if (!(flags & MS_RDONLY)) | |
943 | mode |= FMODE_WRITE; | |
944 | ||
945 | bdev = blkdev_get_by_path(dev_name, mode, fs_type); | |
946 | if (IS_ERR(bdev)) | |
947 | return ERR_CAST(bdev); | |
948 | ||
949 | /* | |
950 | * once the super is inserted into the list by sget, s_umount | |
951 | * will protect the lockfs code from trying to start a snapshot | |
952 | * while we are mounting | |
953 | */ | |
954 | mutex_lock(&bdev->bd_fsfreeze_mutex); | |
955 | if (bdev->bd_fsfreeze_count > 0) { | |
956 | mutex_unlock(&bdev->bd_fsfreeze_mutex); | |
957 | error = -EBUSY; | |
958 | goto error_bdev; | |
959 | } | |
960 | s = sget(fs_type, test_bdev_super, set_bdev_super, flags | MS_NOSEC, | |
961 | bdev); | |
962 | mutex_unlock(&bdev->bd_fsfreeze_mutex); | |
963 | if (IS_ERR(s)) | |
964 | goto error_s; | |
965 | ||
966 | if (s->s_root) { | |
967 | if ((flags ^ s->s_flags) & MS_RDONLY) { | |
968 | deactivate_locked_super(s); | |
969 | error = -EBUSY; | |
970 | goto error_bdev; | |
971 | } | |
972 | ||
973 | /* | |
974 | * s_umount nests inside bd_mutex during | |
975 | * __invalidate_device(). blkdev_put() acquires | |
976 | * bd_mutex and can't be called under s_umount. Drop | |
977 | * s_umount temporarily. This is safe as we're | |
978 | * holding an active reference. | |
979 | */ | |
980 | up_write(&s->s_umount); | |
981 | blkdev_put(bdev, mode); | |
982 | down_write(&s->s_umount); | |
983 | } else { | |
984 | char b[BDEVNAME_SIZE]; | |
985 | ||
986 | s->s_mode = mode; | |
987 | strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id)); | |
988 | sb_set_blocksize(s, block_size(bdev)); | |
989 | error = fill_super(s, data, flags & MS_SILENT ? 1 : 0); | |
990 | if (error) { | |
991 | deactivate_locked_super(s); | |
992 | goto error; | |
993 | } | |
994 | ||
995 | s->s_flags |= MS_ACTIVE; | |
996 | bdev->bd_super = s; | |
997 | } | |
998 | ||
999 | return dget(s->s_root); | |
1000 | ||
1001 | error_s: | |
1002 | error = PTR_ERR(s); | |
1003 | error_bdev: | |
1004 | blkdev_put(bdev, mode); | |
1005 | error: | |
1006 | return ERR_PTR(error); | |
1007 | } | |
1008 | EXPORT_SYMBOL(mount_bdev); | |
1009 | ||
1010 | void kill_block_super(struct super_block *sb) | |
1011 | { | |
1012 | struct block_device *bdev = sb->s_bdev; | |
1013 | fmode_t mode = sb->s_mode; | |
1014 | ||
1015 | bdev->bd_super = NULL; | |
1016 | generic_shutdown_super(sb); | |
1017 | sync_blockdev(bdev); | |
1018 | WARN_ON_ONCE(!(mode & FMODE_EXCL)); | |
1019 | blkdev_put(bdev, mode | FMODE_EXCL); | |
1020 | } | |
1021 | ||
1022 | EXPORT_SYMBOL(kill_block_super); | |
1023 | #endif | |
1024 | ||
1025 | struct dentry *mount_nodev(struct file_system_type *fs_type, | |
1026 | int flags, void *data, | |
1027 | int (*fill_super)(struct super_block *, void *, int)) | |
1028 | { | |
1029 | int error; | |
1030 | struct super_block *s = sget(fs_type, NULL, set_anon_super, flags, NULL); | |
1031 | ||
1032 | if (IS_ERR(s)) | |
1033 | return ERR_CAST(s); | |
1034 | ||
1035 | error = fill_super(s, data, flags & MS_SILENT ? 1 : 0); | |
1036 | if (error) { | |
1037 | deactivate_locked_super(s); | |
1038 | return ERR_PTR(error); | |
1039 | } | |
1040 | s->s_flags |= MS_ACTIVE; | |
1041 | return dget(s->s_root); | |
1042 | } | |
1043 | EXPORT_SYMBOL(mount_nodev); | |
1044 | ||
1045 | static int compare_single(struct super_block *s, void *p) | |
1046 | { | |
1047 | return 1; | |
1048 | } | |
1049 | ||
1050 | struct dentry *mount_single(struct file_system_type *fs_type, | |
1051 | int flags, void *data, | |
1052 | int (*fill_super)(struct super_block *, void *, int)) | |
1053 | { | |
1054 | struct super_block *s; | |
1055 | int error; | |
1056 | ||
1057 | s = sget(fs_type, compare_single, set_anon_super, flags, NULL); | |
1058 | if (IS_ERR(s)) | |
1059 | return ERR_CAST(s); | |
1060 | if (!s->s_root) { | |
1061 | error = fill_super(s, data, flags & MS_SILENT ? 1 : 0); | |
1062 | if (error) { | |
1063 | deactivate_locked_super(s); | |
1064 | return ERR_PTR(error); | |
1065 | } | |
1066 | s->s_flags |= MS_ACTIVE; | |
1067 | } else { | |
1068 | do_remount_sb(s, flags, data, 0); | |
1069 | } | |
1070 | return dget(s->s_root); | |
1071 | } | |
1072 | EXPORT_SYMBOL(mount_single); | |
1073 | ||
1074 | struct dentry * | |
1075 | mount_fs(struct file_system_type *type, int flags, const char *name, void *data) | |
1076 | { | |
1077 | struct dentry *root; | |
1078 | struct super_block *sb; | |
1079 | char *secdata = NULL; | |
1080 | int error = -ENOMEM; | |
1081 | ||
1082 | if (data && !(type->fs_flags & FS_BINARY_MOUNTDATA)) { | |
1083 | secdata = alloc_secdata(); | |
1084 | if (!secdata) | |
1085 | goto out; | |
1086 | ||
1087 | error = security_sb_copy_data(data, secdata); | |
1088 | if (error) | |
1089 | goto out_free_secdata; | |
1090 | } | |
1091 | ||
1092 | root = type->mount(type, flags, name, data); | |
1093 | if (IS_ERR(root)) { | |
1094 | error = PTR_ERR(root); | |
1095 | goto out_free_secdata; | |
1096 | } | |
1097 | sb = root->d_sb; | |
1098 | BUG_ON(!sb); | |
1099 | WARN_ON(!sb->s_bdi); | |
1100 | WARN_ON(sb->s_bdi == &default_backing_dev_info); | |
1101 | sb->s_flags |= MS_BORN; | |
1102 | ||
1103 | error = security_sb_kern_mount(sb, flags, secdata); | |
1104 | if (error) | |
1105 | goto out_sb; | |
1106 | ||
1107 | /* | |
1108 | * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE | |
1109 | * but s_maxbytes was an unsigned long long for many releases. Throw | |
1110 | * this warning for a little while to try and catch filesystems that | |
1111 | * violate this rule. | |
1112 | */ | |
1113 | WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to " | |
1114 | "negative value (%lld)\n", type->name, sb->s_maxbytes); | |
1115 | ||
1116 | up_write(&sb->s_umount); | |
1117 | free_secdata(secdata); | |
1118 | return root; | |
1119 | out_sb: | |
1120 | dput(root); | |
1121 | deactivate_locked_super(sb); | |
1122 | out_free_secdata: | |
1123 | free_secdata(secdata); | |
1124 | out: | |
1125 | return ERR_PTR(error); | |
1126 | } | |
1127 | ||
1128 | /* | |
1129 | * This is an internal function, please use sb_end_{write,pagefault,intwrite} | |
1130 | * instead. | |
1131 | */ | |
1132 | void __sb_end_write(struct super_block *sb, int level) | |
1133 | { | |
1134 | percpu_counter_dec(&sb->s_writers.counter[level-1]); | |
1135 | /* | |
1136 | * Make sure s_writers are updated before we wake up waiters in | |
1137 | * freeze_super(). | |
1138 | */ | |
1139 | smp_mb(); | |
1140 | if (waitqueue_active(&sb->s_writers.wait)) | |
1141 | wake_up(&sb->s_writers.wait); | |
1142 | rwsem_release(&sb->s_writers.lock_map[level-1], 1, _RET_IP_); | |
1143 | } | |
1144 | EXPORT_SYMBOL(__sb_end_write); | |
1145 | ||
1146 | #ifdef CONFIG_LOCKDEP | |
1147 | /* | |
1148 | * We want lockdep to tell us about possible deadlocks with freezing but | |
1149 | * it's it bit tricky to properly instrument it. Getting a freeze protection | |
1150 | * works as getting a read lock but there are subtle problems. XFS for example | |
1151 | * gets freeze protection on internal level twice in some cases, which is OK | |
1152 | * only because we already hold a freeze protection also on higher level. Due | |
1153 | * to these cases we have to tell lockdep we are doing trylock when we | |
1154 | * already hold a freeze protection for a higher freeze level. | |
1155 | */ | |
1156 | static void acquire_freeze_lock(struct super_block *sb, int level, bool trylock, | |
1157 | unsigned long ip) | |
1158 | { | |
1159 | int i; | |
1160 | ||
1161 | if (!trylock) { | |
1162 | for (i = 0; i < level - 1; i++) | |
1163 | if (lock_is_held(&sb->s_writers.lock_map[i])) { | |
1164 | trylock = true; | |
1165 | break; | |
1166 | } | |
1167 | } | |
1168 | rwsem_acquire_read(&sb->s_writers.lock_map[level-1], 0, trylock, ip); | |
1169 | } | |
1170 | #endif | |
1171 | ||
1172 | /* | |
1173 | * This is an internal function, please use sb_start_{write,pagefault,intwrite} | |
1174 | * instead. | |
1175 | */ | |
1176 | int __sb_start_write(struct super_block *sb, int level, bool wait) | |
1177 | { | |
1178 | retry: | |
1179 | if (unlikely(sb->s_writers.frozen >= level)) { | |
1180 | if (!wait) | |
1181 | return 0; | |
1182 | wait_event(sb->s_writers.wait_unfrozen, | |
1183 | sb->s_writers.frozen < level); | |
1184 | } | |
1185 | ||
1186 | #ifdef CONFIG_LOCKDEP | |
1187 | acquire_freeze_lock(sb, level, !wait, _RET_IP_); | |
1188 | #endif | |
1189 | percpu_counter_inc(&sb->s_writers.counter[level-1]); | |
1190 | /* | |
1191 | * Make sure counter is updated before we check for frozen. | |
1192 | * freeze_super() first sets frozen and then checks the counter. | |
1193 | */ | |
1194 | smp_mb(); | |
1195 | if (unlikely(sb->s_writers.frozen >= level)) { | |
1196 | __sb_end_write(sb, level); | |
1197 | goto retry; | |
1198 | } | |
1199 | return 1; | |
1200 | } | |
1201 | EXPORT_SYMBOL(__sb_start_write); | |
1202 | ||
1203 | /** | |
1204 | * sb_wait_write - wait until all writers to given file system finish | |
1205 | * @sb: the super for which we wait | |
1206 | * @level: type of writers we wait for (normal vs page fault) | |
1207 | * | |
1208 | * This function waits until there are no writers of given type to given file | |
1209 | * system. Caller of this function should make sure there can be no new writers | |
1210 | * of type @level before calling this function. Otherwise this function can | |
1211 | * livelock. | |
1212 | */ | |
1213 | static void sb_wait_write(struct super_block *sb, int level) | |
1214 | { | |
1215 | s64 writers; | |
1216 | ||
1217 | /* | |
1218 | * We just cycle-through lockdep here so that it does not complain | |
1219 | * about returning with lock to userspace | |
1220 | */ | |
1221 | rwsem_acquire(&sb->s_writers.lock_map[level-1], 0, 0, _THIS_IP_); | |
1222 | rwsem_release(&sb->s_writers.lock_map[level-1], 1, _THIS_IP_); | |
1223 | ||
1224 | do { | |
1225 | DEFINE_WAIT(wait); | |
1226 | ||
1227 | /* | |
1228 | * We use a barrier in prepare_to_wait() to separate setting | |
1229 | * of frozen and checking of the counter | |
1230 | */ | |
1231 | prepare_to_wait(&sb->s_writers.wait, &wait, | |
1232 | TASK_UNINTERRUPTIBLE); | |
1233 | ||
1234 | writers = percpu_counter_sum(&sb->s_writers.counter[level-1]); | |
1235 | if (writers) | |
1236 | schedule(); | |
1237 | ||
1238 | finish_wait(&sb->s_writers.wait, &wait); | |
1239 | } while (writers); | |
1240 | } | |
1241 | ||
1242 | /** | |
1243 | * freeze_super - lock the filesystem and force it into a consistent state | |
1244 | * @sb: the super to lock | |
1245 | * | |
1246 | * Syncs the super to make sure the filesystem is consistent and calls the fs's | |
1247 | * freeze_fs. Subsequent calls to this without first thawing the fs will return | |
1248 | * -EBUSY. | |
1249 | * | |
1250 | * During this function, sb->s_writers.frozen goes through these values: | |
1251 | * | |
1252 | * SB_UNFROZEN: File system is normal, all writes progress as usual. | |
1253 | * | |
1254 | * SB_FREEZE_WRITE: The file system is in the process of being frozen. New | |
1255 | * writes should be blocked, though page faults are still allowed. We wait for | |
1256 | * all writes to complete and then proceed to the next stage. | |
1257 | * | |
1258 | * SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked | |
1259 | * but internal fs threads can still modify the filesystem (although they | |
1260 | * should not dirty new pages or inodes), writeback can run etc. After waiting | |
1261 | * for all running page faults we sync the filesystem which will clean all | |
1262 | * dirty pages and inodes (no new dirty pages or inodes can be created when | |
1263 | * sync is running). | |
1264 | * | |
1265 | * SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs | |
1266 | * modification are blocked (e.g. XFS preallocation truncation on inode | |
1267 | * reclaim). This is usually implemented by blocking new transactions for | |
1268 | * filesystems that have them and need this additional guard. After all | |
1269 | * internal writers are finished we call ->freeze_fs() to finish filesystem | |
1270 | * freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is | |
1271 | * mostly auxiliary for filesystems to verify they do not modify frozen fs. | |
1272 | * | |
1273 | * sb->s_writers.frozen is protected by sb->s_umount. | |
1274 | */ | |
1275 | int freeze_super(struct super_block *sb) | |
1276 | { | |
1277 | int ret; | |
1278 | ||
1279 | atomic_inc(&sb->s_active); | |
1280 | down_write(&sb->s_umount); | |
1281 | if (sb->s_writers.frozen != SB_UNFROZEN) { | |
1282 | deactivate_locked_super(sb); | |
1283 | return -EBUSY; | |
1284 | } | |
1285 | ||
1286 | if (!(sb->s_flags & MS_BORN)) { | |
1287 | up_write(&sb->s_umount); | |
1288 | return 0; /* sic - it's "nothing to do" */ | |
1289 | } | |
1290 | ||
1291 | if (sb->s_flags & MS_RDONLY) { | |
1292 | /* Nothing to do really... */ | |
1293 | sb->s_writers.frozen = SB_FREEZE_COMPLETE; | |
1294 | up_write(&sb->s_umount); | |
1295 | return 0; | |
1296 | } | |
1297 | ||
1298 | /* From now on, no new normal writers can start */ | |
1299 | sb->s_writers.frozen = SB_FREEZE_WRITE; | |
1300 | smp_wmb(); | |
1301 | ||
1302 | /* Release s_umount to preserve sb_start_write -> s_umount ordering */ | |
1303 | up_write(&sb->s_umount); | |
1304 | ||
1305 | sb_wait_write(sb, SB_FREEZE_WRITE); | |
1306 | ||
1307 | /* Now we go and block page faults... */ | |
1308 | down_write(&sb->s_umount); | |
1309 | sb->s_writers.frozen = SB_FREEZE_PAGEFAULT; | |
1310 | smp_wmb(); | |
1311 | ||
1312 | sb_wait_write(sb, SB_FREEZE_PAGEFAULT); | |
1313 | ||
1314 | /* All writers are done so after syncing there won't be dirty data */ | |
1315 | sync_filesystem(sb); | |
1316 | ||
1317 | /* Now wait for internal filesystem counter */ | |
1318 | sb->s_writers.frozen = SB_FREEZE_FS; | |
1319 | smp_wmb(); | |
1320 | sb_wait_write(sb, SB_FREEZE_FS); | |
1321 | ||
1322 | if (sb->s_op->freeze_fs) { | |
1323 | ret = sb->s_op->freeze_fs(sb); | |
1324 | if (ret) { | |
1325 | printk(KERN_ERR | |
1326 | "VFS:Filesystem freeze failed\n"); | |
1327 | sb->s_writers.frozen = SB_UNFROZEN; | |
1328 | smp_wmb(); | |
1329 | wake_up(&sb->s_writers.wait_unfrozen); | |
1330 | deactivate_locked_super(sb); | |
1331 | return ret; | |
1332 | } | |
1333 | } | |
1334 | /* | |
1335 | * This is just for debugging purposes so that fs can warn if it | |
1336 | * sees write activity when frozen is set to SB_FREEZE_COMPLETE. | |
1337 | */ | |
1338 | sb->s_writers.frozen = SB_FREEZE_COMPLETE; | |
1339 | up_write(&sb->s_umount); | |
1340 | return 0; | |
1341 | } | |
1342 | EXPORT_SYMBOL(freeze_super); | |
1343 | ||
1344 | /** | |
1345 | * thaw_super -- unlock filesystem | |
1346 | * @sb: the super to thaw | |
1347 | * | |
1348 | * Unlocks the filesystem and marks it writeable again after freeze_super(). | |
1349 | */ | |
1350 | int thaw_super(struct super_block *sb) | |
1351 | { | |
1352 | int error; | |
1353 | ||
1354 | down_write(&sb->s_umount); | |
1355 | if (sb->s_writers.frozen == SB_UNFROZEN) { | |
1356 | up_write(&sb->s_umount); | |
1357 | return -EINVAL; | |
1358 | } | |
1359 | ||
1360 | if (sb->s_flags & MS_RDONLY) | |
1361 | goto out; | |
1362 | ||
1363 | if (sb->s_op->unfreeze_fs) { | |
1364 | error = sb->s_op->unfreeze_fs(sb); | |
1365 | if (error) { | |
1366 | printk(KERN_ERR | |
1367 | "VFS:Filesystem thaw failed\n"); | |
1368 | up_write(&sb->s_umount); | |
1369 | return error; | |
1370 | } | |
1371 | } | |
1372 | ||
1373 | out: | |
1374 | sb->s_writers.frozen = SB_UNFROZEN; | |
1375 | smp_wmb(); | |
1376 | wake_up(&sb->s_writers.wait_unfrozen); | |
1377 | deactivate_locked_super(sb); | |
1378 | ||
1379 | return 0; | |
1380 | } | |
1381 | EXPORT_SYMBOL(thaw_super); |