1 // SPDX-License-Identifier: GPL-2.0
3 * Implementation of the diskquota system for the LINUX operating system. QUOTA
4 * is implemented using the BSD system call interface as the means of
5 * communication with the user level. This file contains the generic routines
6 * called by the different filesystems on allocation of an inode or block.
7 * These routines take care of the administration needed to have a consistent
8 * diskquota tracking system. The ideas of both user and group quotas are based
9 * on the Melbourne quota system as used on BSD derived systems. The internal
10 * implementation is based on one of the several variants of the LINUX
11 * inode-subsystem with added complexity of the diskquota system.
17 * Revised list management to avoid races
20 * Fixed races in dquot_transfer(), dqget() and dquot_alloc_...().
21 * As the consequence the locking was moved from dquot_decr_...(),
22 * dquot_incr_...() to calling functions.
23 * invalidate_dquots() now writes modified dquots.
24 * Serialized quota_off() and quota_on() for mount point.
25 * Fixed a few bugs in grow_dquots().
26 * Fixed deadlock in write_dquot() - we no longer account quotas on
28 * remove_dquot_ref() moved to inode.c - it now traverses through inodes
29 * add_dquot_ref() restarts after blocking
30 * Added check for bogus uid and fixed check for group in quotactl.
33 * Used struct list_head instead of own list struct
34 * Invalidation of referenced dquots is no longer possible
35 * Improved free_dquots list management
36 * Quota and i_blocks are now updated in one place to avoid races
37 * Warnings are now delayed so we won't block in critical section
38 * Write updated not to require dquot lock
41 * Added dynamic quota structure allocation
44 * Rewritten quota interface. Implemented new quota format and
45 * formats registering.
51 * Added journalled quota support, fix lock inversion problems
54 * (C) Copyright 1994 - 1997 Marco van Wieringen
57 #include <linux/errno.h>
58 #include <linux/kernel.h>
60 #include <linux/mount.h>
62 #include <linux/time.h>
63 #include <linux/types.h>
64 #include <linux/string.h>
65 #include <linux/fcntl.h>
66 #include <linux/stat.h>
67 #include <linux/tty.h>
68 #include <linux/file.h>
69 #include <linux/slab.h>
70 #include <linux/sysctl.h>
71 #include <linux/init.h>
72 #include <linux/module.h>
73 #include <linux/proc_fs.h>
74 #include <linux/security.h>
75 #include <linux/sched.h>
76 #include <linux/cred.h>
77 #include <linux/kmod.h>
78 #include <linux/namei.h>
79 #include <linux/capability.h>
80 #include <linux/quotaops.h>
81 #include "../internal.h" /* ugh */
83 #include <linux/uaccess.h>
86 * There are five quota SMP locks:
87 * * dq_list_lock protects all lists with quotas and quota formats.
88 * * dquot->dq_dqb_lock protects data from dq_dqb
89 * * inode->i_lock protects inode->i_blocks, i_bytes and also guards
90 * consistency of dquot->dq_dqb with inode->i_blocks, i_bytes so that
91 * dquot_transfer() can stabilize amount it transfers
92 * * dq_data_lock protects mem_dqinfo structures and modifications of dquot
93 * pointers in the inode
94 * * dq_state_lock protects modifications of quota state (on quotaon and
95 * quotaoff) and readers who care about latest values take it as well.
97 * The spinlock ordering is hence:
98 * dq_data_lock > dq_list_lock > i_lock > dquot->dq_dqb_lock,
99 * dq_list_lock > dq_state_lock
101 * Note that some things (eg. sb pointer, type, id) doesn't change during
102 * the life of the dquot structure and so needn't to be protected by a lock
104 * Operation accessing dquots via inode pointers are protected by dquot_srcu.
105 * Operation of reading pointer needs srcu_read_lock(&dquot_srcu), and
106 * synchronize_srcu(&dquot_srcu) is called after clearing pointers from
107 * inode and before dropping dquot references to avoid use of dquots after
108 * they are freed. dq_data_lock is used to serialize the pointer setting and
109 * clearing operations.
110 * Special care needs to be taken about S_NOQUOTA inode flag (marking that
111 * inode is a quota file). Functions adding pointers from inode to dquots have
112 * to check this flag under dq_data_lock and then (if S_NOQUOTA is not set) they
113 * have to do all pointer modifications before dropping dq_data_lock. This makes
114 * sure they cannot race with quotaon which first sets S_NOQUOTA flag and
115 * then drops all pointers to dquots from an inode.
117 * Each dquot has its dq_lock mutex. Dquot is locked when it is being read to
118 * memory (or space for it is being allocated) on the first dqget(), when it is
119 * being written out, and when it is being released on the last dqput(). The
120 * allocation and release operations are serialized by the dq_lock and by
121 * checking the use count in dquot_release().
123 * Lock ordering (including related VFS locks) is the following:
124 * s_umount > i_mutex > journal_lock > dquot->dq_lock > dqio_sem
127 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
128 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_state_lock);
129 __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_data_lock);
130 EXPORT_SYMBOL(dq_data_lock);
131 DEFINE_STATIC_SRCU(dquot_srcu);
133 static DECLARE_WAIT_QUEUE_HEAD(dquot_ref_wq);
135 void __quota_error(struct super_block *sb, const char *func,
136 const char *fmt, ...)
138 if (printk_ratelimit()) {
140 struct va_format vaf;
147 printk(KERN_ERR "Quota error (device %s): %s: %pV\n",
148 sb->s_id, func, &vaf);
153 EXPORT_SYMBOL(__quota_error);
155 #if defined(CONFIG_QUOTA_DEBUG) || defined(CONFIG_PRINT_QUOTA_WARNING)
156 static char *quotatypes[] = INITQFNAMES;
158 static struct quota_format_type *quota_formats; /* List of registered formats */
159 static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
161 /* SLAB cache for dquot structures */
162 static struct kmem_cache *dquot_cachep;
164 int register_quota_format(struct quota_format_type *fmt)
166 spin_lock(&dq_list_lock);
167 fmt->qf_next = quota_formats;
169 spin_unlock(&dq_list_lock);
172 EXPORT_SYMBOL(register_quota_format);
174 void unregister_quota_format(struct quota_format_type *fmt)
176 struct quota_format_type **actqf;
178 spin_lock(&dq_list_lock);
179 for (actqf = "a_formats; *actqf && *actqf != fmt;
180 actqf = &(*actqf)->qf_next)
183 *actqf = (*actqf)->qf_next;
184 spin_unlock(&dq_list_lock);
186 EXPORT_SYMBOL(unregister_quota_format);
188 static struct quota_format_type *find_quota_format(int id)
190 struct quota_format_type *actqf;
192 spin_lock(&dq_list_lock);
193 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
194 actqf = actqf->qf_next)
196 if (!actqf || !try_module_get(actqf->qf_owner)) {
199 spin_unlock(&dq_list_lock);
201 for (qm = 0; module_names[qm].qm_fmt_id &&
202 module_names[qm].qm_fmt_id != id; qm++)
204 if (!module_names[qm].qm_fmt_id ||
205 request_module(module_names[qm].qm_mod_name))
208 spin_lock(&dq_list_lock);
209 for (actqf = quota_formats; actqf && actqf->qf_fmt_id != id;
210 actqf = actqf->qf_next)
212 if (actqf && !try_module_get(actqf->qf_owner))
215 spin_unlock(&dq_list_lock);
219 static void put_quota_format(struct quota_format_type *fmt)
221 module_put(fmt->qf_owner);
225 * Dquot List Management:
226 * The quota code uses four lists for dquot management: the inuse_list,
227 * free_dquots, dqi_dirty_list, and dquot_hash[] array. A single dquot
228 * structure may be on some of those lists, depending on its current state.
230 * All dquots are placed to the end of inuse_list when first created, and this
231 * list is used for invalidate operation, which must look at every dquot.
233 * Unused dquots (dq_count == 0) are added to the free_dquots list when freed,
234 * and this list is searched whenever we need an available dquot. Dquots are
235 * removed from the list as soon as they are used again, and
236 * dqstats.free_dquots gives the number of dquots on the list. When
237 * dquot is invalidated it's completely released from memory.
239 * Dirty dquots are added to the dqi_dirty_list of quota_info when mark
240 * dirtied, and this list is searched when writing dirty dquots back to
241 * quota file. Note that some filesystems do dirty dquot tracking on their
242 * own (e.g. in a journal) and thus don't use dqi_dirty_list.
244 * Dquots with a specific identity (device, type and id) are placed on
245 * one of the dquot_hash[] hash chains. The provides an efficient search
246 * mechanism to locate a specific dquot.
249 static LIST_HEAD(inuse_list);
250 static LIST_HEAD(free_dquots);
251 static unsigned int dq_hash_bits, dq_hash_mask;
252 static struct hlist_head *dquot_hash;
254 struct dqstats dqstats;
255 EXPORT_SYMBOL(dqstats);
257 static qsize_t inode_get_rsv_space(struct inode *inode);
258 static qsize_t __inode_get_rsv_space(struct inode *inode);
259 static int __dquot_initialize(struct inode *inode, int type);
261 static inline unsigned int
262 hashfn(const struct super_block *sb, struct kqid qid)
264 unsigned int id = from_kqid(&init_user_ns, qid);
268 tmp = (((unsigned long)sb>>L1_CACHE_SHIFT) ^ id) * (MAXQUOTAS - type);
269 return (tmp + (tmp >> dq_hash_bits)) & dq_hash_mask;
273 * Following list functions expect dq_list_lock to be held
275 static inline void insert_dquot_hash(struct dquot *dquot)
277 struct hlist_head *head;
278 head = dquot_hash + hashfn(dquot->dq_sb, dquot->dq_id);
279 hlist_add_head(&dquot->dq_hash, head);
282 static inline void remove_dquot_hash(struct dquot *dquot)
284 hlist_del_init(&dquot->dq_hash);
287 static struct dquot *find_dquot(unsigned int hashent, struct super_block *sb,
290 struct hlist_node *node;
293 hlist_for_each (node, dquot_hash+hashent) {
294 dquot = hlist_entry(node, struct dquot, dq_hash);
295 if (dquot->dq_sb == sb && qid_eq(dquot->dq_id, qid))
301 /* Add a dquot to the tail of the free list */
302 static inline void put_dquot_last(struct dquot *dquot)
304 list_add_tail(&dquot->dq_free, &free_dquots);
305 dqstats_inc(DQST_FREE_DQUOTS);
308 static inline void remove_free_dquot(struct dquot *dquot)
310 if (list_empty(&dquot->dq_free))
312 list_del_init(&dquot->dq_free);
313 dqstats_dec(DQST_FREE_DQUOTS);
316 static inline void put_inuse(struct dquot *dquot)
318 /* We add to the back of inuse list so we don't have to restart
319 * when traversing this list and we block */
320 list_add_tail(&dquot->dq_inuse, &inuse_list);
321 dqstats_inc(DQST_ALLOC_DQUOTS);
324 static inline void remove_inuse(struct dquot *dquot)
326 dqstats_dec(DQST_ALLOC_DQUOTS);
327 list_del(&dquot->dq_inuse);
330 * End of list functions needing dq_list_lock
333 static void wait_on_dquot(struct dquot *dquot)
335 mutex_lock(&dquot->dq_lock);
336 mutex_unlock(&dquot->dq_lock);
339 static inline int dquot_dirty(struct dquot *dquot)
341 return test_bit(DQ_MOD_B, &dquot->dq_flags);
344 static inline int mark_dquot_dirty(struct dquot *dquot)
346 return dquot->dq_sb->dq_op->mark_dirty(dquot);
349 /* Mark dquot dirty in atomic manner, and return it's old dirty flag state */
350 int dquot_mark_dquot_dirty(struct dquot *dquot)
354 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
357 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY)
358 return test_and_set_bit(DQ_MOD_B, &dquot->dq_flags);
360 /* If quota is dirty already, we don't have to acquire dq_list_lock */
361 if (test_bit(DQ_MOD_B, &dquot->dq_flags))
364 spin_lock(&dq_list_lock);
365 if (!test_and_set_bit(DQ_MOD_B, &dquot->dq_flags)) {
366 list_add(&dquot->dq_dirty, &sb_dqopt(dquot->dq_sb)->
367 info[dquot->dq_id.type].dqi_dirty_list);
370 spin_unlock(&dq_list_lock);
373 EXPORT_SYMBOL(dquot_mark_dquot_dirty);
375 /* Dirtify all the dquots - this can block when journalling */
376 static inline int mark_all_dquot_dirty(struct dquot * const *dquot)
381 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
383 /* Even in case of error we have to continue */
384 ret = mark_dquot_dirty(dquot[cnt]);
391 static inline void dqput_all(struct dquot **dquot)
395 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
399 static inline int clear_dquot_dirty(struct dquot *dquot)
401 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NOLIST_DIRTY)
402 return test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags);
404 spin_lock(&dq_list_lock);
405 if (!test_and_clear_bit(DQ_MOD_B, &dquot->dq_flags)) {
406 spin_unlock(&dq_list_lock);
409 list_del_init(&dquot->dq_dirty);
410 spin_unlock(&dq_list_lock);
414 void mark_info_dirty(struct super_block *sb, int type)
416 spin_lock(&dq_data_lock);
417 sb_dqopt(sb)->info[type].dqi_flags |= DQF_INFO_DIRTY;
418 spin_unlock(&dq_data_lock);
420 EXPORT_SYMBOL(mark_info_dirty);
423 * Read dquot from disk and alloc space for it
426 int dquot_acquire(struct dquot *dquot)
428 int ret = 0, ret2 = 0;
429 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
431 mutex_lock(&dquot->dq_lock);
432 if (!test_bit(DQ_READ_B, &dquot->dq_flags)) {
433 ret = dqopt->ops[dquot->dq_id.type]->read_dqblk(dquot);
437 /* Make sure flags update is visible after dquot has been filled */
438 smp_mb__before_atomic();
439 set_bit(DQ_READ_B, &dquot->dq_flags);
440 /* Instantiate dquot if needed */
441 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && !dquot->dq_off) {
442 ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
443 /* Write the info if needed */
444 if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
445 ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info(
446 dquot->dq_sb, dquot->dq_id.type);
456 * Make sure flags update is visible after on-disk struct has been
457 * allocated. Paired with smp_rmb() in dqget().
459 smp_mb__before_atomic();
460 set_bit(DQ_ACTIVE_B, &dquot->dq_flags);
462 mutex_unlock(&dquot->dq_lock);
465 EXPORT_SYMBOL(dquot_acquire);
468 * Write dquot to disk
470 int dquot_commit(struct dquot *dquot)
473 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
475 mutex_lock(&dquot->dq_lock);
476 if (!clear_dquot_dirty(dquot))
478 /* Inactive dquot can be only if there was error during read/init
479 * => we have better not writing it */
480 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
481 ret = dqopt->ops[dquot->dq_id.type]->commit_dqblk(dquot);
485 mutex_unlock(&dquot->dq_lock);
488 EXPORT_SYMBOL(dquot_commit);
493 int dquot_release(struct dquot *dquot)
495 int ret = 0, ret2 = 0;
496 struct quota_info *dqopt = sb_dqopt(dquot->dq_sb);
498 mutex_lock(&dquot->dq_lock);
499 /* Check whether we are not racing with some other dqget() */
500 if (atomic_read(&dquot->dq_count) > 1)
502 if (dqopt->ops[dquot->dq_id.type]->release_dqblk) {
503 ret = dqopt->ops[dquot->dq_id.type]->release_dqblk(dquot);
505 if (info_dirty(&dqopt->info[dquot->dq_id.type])) {
506 ret2 = dqopt->ops[dquot->dq_id.type]->write_file_info(
507 dquot->dq_sb, dquot->dq_id.type);
512 clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
514 mutex_unlock(&dquot->dq_lock);
517 EXPORT_SYMBOL(dquot_release);
519 void dquot_destroy(struct dquot *dquot)
521 kmem_cache_free(dquot_cachep, dquot);
523 EXPORT_SYMBOL(dquot_destroy);
525 static inline void do_destroy_dquot(struct dquot *dquot)
527 dquot->dq_sb->dq_op->destroy_dquot(dquot);
530 /* Invalidate all dquots on the list. Note that this function is called after
531 * quota is disabled and pointers from inodes removed so there cannot be new
532 * quota users. There can still be some users of quotas due to inodes being
533 * just deleted or pruned by prune_icache() (those are not attached to any
534 * list) or parallel quotactl call. We have to wait for such users.
536 static void invalidate_dquots(struct super_block *sb, int type)
538 struct dquot *dquot, *tmp;
541 spin_lock(&dq_list_lock);
542 list_for_each_entry_safe(dquot, tmp, &inuse_list, dq_inuse) {
543 if (dquot->dq_sb != sb)
545 if (dquot->dq_id.type != type)
547 /* Wait for dquot users */
548 if (atomic_read(&dquot->dq_count)) {
550 spin_unlock(&dq_list_lock);
552 * Once dqput() wakes us up, we know it's time to free
554 * IMPORTANT: we rely on the fact that there is always
555 * at most one process waiting for dquot to free.
556 * Otherwise dq_count would be > 1 and we would never
559 wait_event(dquot_ref_wq,
560 atomic_read(&dquot->dq_count) == 1);
562 /* At this moment dquot() need not exist (it could be
563 * reclaimed by prune_dqcache(). Hence we must
568 * Quota now has no users and it has been written on last
571 remove_dquot_hash(dquot);
572 remove_free_dquot(dquot);
574 do_destroy_dquot(dquot);
576 spin_unlock(&dq_list_lock);
579 /* Call callback for every active dquot on given filesystem */
580 int dquot_scan_active(struct super_block *sb,
581 int (*fn)(struct dquot *dquot, unsigned long priv),
584 struct dquot *dquot, *old_dquot = NULL;
587 WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount));
589 spin_lock(&dq_list_lock);
590 list_for_each_entry(dquot, &inuse_list, dq_inuse) {
591 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags))
593 if (dquot->dq_sb != sb)
595 /* Now we have active dquot so we can just increase use count */
596 atomic_inc(&dquot->dq_count);
597 spin_unlock(&dq_list_lock);
598 dqstats_inc(DQST_LOOKUPS);
602 * ->release_dquot() can be racing with us. Our reference
603 * protects us from new calls to it so just wait for any
604 * outstanding call and recheck the DQ_ACTIVE_B after that.
606 wait_on_dquot(dquot);
607 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
608 ret = fn(dquot, priv);
612 spin_lock(&dq_list_lock);
613 /* We are safe to continue now because our dquot could not
614 * be moved out of the inuse list while we hold the reference */
616 spin_unlock(&dq_list_lock);
621 EXPORT_SYMBOL(dquot_scan_active);
623 /* Write all dquot structures to quota files */
624 int dquot_writeback_dquots(struct super_block *sb, int type)
626 struct list_head *dirty;
628 struct quota_info *dqopt = sb_dqopt(sb);
632 WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount));
634 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
635 if (type != -1 && cnt != type)
637 if (!sb_has_quota_active(sb, cnt))
639 spin_lock(&dq_list_lock);
640 dirty = &dqopt->info[cnt].dqi_dirty_list;
641 while (!list_empty(dirty)) {
642 dquot = list_first_entry(dirty, struct dquot,
645 WARN_ON(!test_bit(DQ_ACTIVE_B, &dquot->dq_flags));
647 /* Now we have active dquot from which someone is
648 * holding reference so we can safely just increase
651 spin_unlock(&dq_list_lock);
652 dqstats_inc(DQST_LOOKUPS);
653 err = sb->dq_op->write_dquot(dquot);
656 * Clear dirty bit anyway to avoid infinite
659 clear_dquot_dirty(dquot);
664 spin_lock(&dq_list_lock);
666 spin_unlock(&dq_list_lock);
669 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
670 if ((cnt == type || type == -1) && sb_has_quota_active(sb, cnt)
671 && info_dirty(&dqopt->info[cnt]))
672 sb->dq_op->write_info(sb, cnt);
673 dqstats_inc(DQST_SYNCS);
677 EXPORT_SYMBOL(dquot_writeback_dquots);
679 /* Write all dquot structures to disk and make them visible from userspace */
680 int dquot_quota_sync(struct super_block *sb, int type)
682 struct quota_info *dqopt = sb_dqopt(sb);
686 ret = dquot_writeback_dquots(sb, type);
689 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
692 /* This is not very clever (and fast) but currently I don't know about
693 * any other simple way of getting quota data to disk and we must get
694 * them there for userspace to be visible... */
695 if (sb->s_op->sync_fs)
696 sb->s_op->sync_fs(sb, 1);
697 sync_blockdev(sb->s_bdev);
700 * Now when everything is written we can discard the pagecache so
701 * that userspace sees the changes.
703 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
704 if (type != -1 && cnt != type)
706 if (!sb_has_quota_active(sb, cnt))
708 inode_lock(dqopt->files[cnt]);
709 truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
710 inode_unlock(dqopt->files[cnt]);
715 EXPORT_SYMBOL(dquot_quota_sync);
718 dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
721 unsigned long freed = 0;
723 spin_lock(&dq_list_lock);
724 while (!list_empty(&free_dquots) && sc->nr_to_scan) {
725 dquot = list_first_entry(&free_dquots, struct dquot, dq_free);
726 remove_dquot_hash(dquot);
727 remove_free_dquot(dquot);
729 do_destroy_dquot(dquot);
733 spin_unlock(&dq_list_lock);
738 dqcache_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
740 return vfs_pressure_ratio(
741 percpu_counter_read_positive(&dqstats.counter[DQST_FREE_DQUOTS]));
744 static struct shrinker dqcache_shrinker = {
745 .count_objects = dqcache_shrink_count,
746 .scan_objects = dqcache_shrink_scan,
747 .seeks = DEFAULT_SEEKS,
751 * Put reference to dquot
753 void dqput(struct dquot *dquot)
759 #ifdef CONFIG_QUOTA_DEBUG
760 if (!atomic_read(&dquot->dq_count)) {
761 quota_error(dquot->dq_sb, "trying to free free dquot of %s %d",
762 quotatypes[dquot->dq_id.type],
763 from_kqid(&init_user_ns, dquot->dq_id));
767 dqstats_inc(DQST_DROPS);
769 spin_lock(&dq_list_lock);
770 if (atomic_read(&dquot->dq_count) > 1) {
771 /* We have more than one user... nothing to do */
772 atomic_dec(&dquot->dq_count);
773 /* Releasing dquot during quotaoff phase? */
774 if (!sb_has_quota_active(dquot->dq_sb, dquot->dq_id.type) &&
775 atomic_read(&dquot->dq_count) == 1)
776 wake_up(&dquot_ref_wq);
777 spin_unlock(&dq_list_lock);
780 /* Need to release dquot? */
781 if (dquot_dirty(dquot)) {
782 spin_unlock(&dq_list_lock);
783 /* Commit dquot before releasing */
784 ret = dquot->dq_sb->dq_op->write_dquot(dquot);
786 quota_error(dquot->dq_sb, "Can't write quota structure"
787 " (error %d). Quota may get out of sync!",
790 * We clear dirty bit anyway, so that we avoid
793 clear_dquot_dirty(dquot);
797 if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
798 spin_unlock(&dq_list_lock);
799 dquot->dq_sb->dq_op->release_dquot(dquot);
802 atomic_dec(&dquot->dq_count);
803 #ifdef CONFIG_QUOTA_DEBUG
805 BUG_ON(!list_empty(&dquot->dq_free));
807 put_dquot_last(dquot);
808 spin_unlock(&dq_list_lock);
810 EXPORT_SYMBOL(dqput);
812 struct dquot *dquot_alloc(struct super_block *sb, int type)
814 return kmem_cache_zalloc(dquot_cachep, GFP_NOFS);
816 EXPORT_SYMBOL(dquot_alloc);
818 static struct dquot *get_empty_dquot(struct super_block *sb, int type)
822 dquot = sb->dq_op->alloc_dquot(sb, type);
826 mutex_init(&dquot->dq_lock);
827 INIT_LIST_HEAD(&dquot->dq_free);
828 INIT_LIST_HEAD(&dquot->dq_inuse);
829 INIT_HLIST_NODE(&dquot->dq_hash);
830 INIT_LIST_HEAD(&dquot->dq_dirty);
832 dquot->dq_id = make_kqid_invalid(type);
833 atomic_set(&dquot->dq_count, 1);
834 spin_lock_init(&dquot->dq_dqb_lock);
840 * Get reference to dquot
842 * Locking is slightly tricky here. We are guarded from parallel quotaoff()
843 * destroying our dquot by:
844 * a) checking for quota flags under dq_list_lock and
845 * b) getting a reference to dquot before we release dq_list_lock
847 struct dquot *dqget(struct super_block *sb, struct kqid qid)
849 unsigned int hashent = hashfn(sb, qid);
850 struct dquot *dquot, *empty = NULL;
852 if (!qid_has_mapping(sb->s_user_ns, qid))
853 return ERR_PTR(-EINVAL);
855 if (!sb_has_quota_active(sb, qid.type))
856 return ERR_PTR(-ESRCH);
858 spin_lock(&dq_list_lock);
859 spin_lock(&dq_state_lock);
860 if (!sb_has_quota_active(sb, qid.type)) {
861 spin_unlock(&dq_state_lock);
862 spin_unlock(&dq_list_lock);
863 dquot = ERR_PTR(-ESRCH);
866 spin_unlock(&dq_state_lock);
868 dquot = find_dquot(hashent, sb, qid);
871 spin_unlock(&dq_list_lock);
872 empty = get_empty_dquot(sb, qid.type);
874 schedule(); /* Try to wait for a moment... */
880 /* all dquots go on the inuse_list */
882 /* hash it first so it can be found */
883 insert_dquot_hash(dquot);
884 spin_unlock(&dq_list_lock);
885 dqstats_inc(DQST_LOOKUPS);
887 if (!atomic_read(&dquot->dq_count))
888 remove_free_dquot(dquot);
889 atomic_inc(&dquot->dq_count);
890 spin_unlock(&dq_list_lock);
891 dqstats_inc(DQST_CACHE_HITS);
892 dqstats_inc(DQST_LOOKUPS);
894 /* Wait for dq_lock - after this we know that either dquot_release() is
895 * already finished or it will be canceled due to dq_count > 1 test */
896 wait_on_dquot(dquot);
897 /* Read the dquot / allocate space in quota file */
898 if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
901 err = sb->dq_op->acquire_dquot(dquot);
904 dquot = ERR_PTR(err);
909 * Make sure following reads see filled structure - paired with
910 * smp_mb__before_atomic() in dquot_acquire().
913 #ifdef CONFIG_QUOTA_DEBUG
914 BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */
918 do_destroy_dquot(empty);
922 EXPORT_SYMBOL(dqget);
924 static inline struct dquot **i_dquot(struct inode *inode)
926 return inode->i_sb->s_op->get_dquots(inode);
929 static int dqinit_needed(struct inode *inode, int type)
931 struct dquot * const *dquots;
934 if (IS_NOQUOTA(inode))
937 dquots = i_dquot(inode);
939 return !dquots[type];
940 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
946 /* This routine is guarded by s_umount semaphore */
947 static int add_dquot_ref(struct super_block *sb, int type)
949 struct inode *inode, *old_inode = NULL;
950 #ifdef CONFIG_QUOTA_DEBUG
955 spin_lock(&sb->s_inode_list_lock);
956 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
957 spin_lock(&inode->i_lock);
958 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
959 !atomic_read(&inode->i_writecount) ||
960 !dqinit_needed(inode, type)) {
961 spin_unlock(&inode->i_lock);
965 spin_unlock(&inode->i_lock);
966 spin_unlock(&sb->s_inode_list_lock);
968 #ifdef CONFIG_QUOTA_DEBUG
969 if (unlikely(inode_get_rsv_space(inode) > 0))
973 err = __dquot_initialize(inode, type);
980 * We hold a reference to 'inode' so it couldn't have been
981 * removed from s_inodes list while we dropped the
982 * s_inode_list_lock. We cannot iput the inode now as we can be
983 * holding the last reference and we cannot iput it under
984 * s_inode_list_lock. So we keep the reference and iput it
988 spin_lock(&sb->s_inode_list_lock);
990 spin_unlock(&sb->s_inode_list_lock);
993 #ifdef CONFIG_QUOTA_DEBUG
995 quota_error(sb, "Writes happened before quota was turned on "
996 "thus quota information is probably inconsistent. "
997 "Please run quotacheck(8)");
1004 * Remove references to dquots from inode and add dquot to list for freeing
1005 * if we have the last reference to dquot
1007 static void remove_inode_dquot_ref(struct inode *inode, int type,
1008 struct list_head *tofree_head)
1010 struct dquot **dquots = i_dquot(inode);
1011 struct dquot *dquot = dquots[type];
1016 dquots[type] = NULL;
1017 if (list_empty(&dquot->dq_free)) {
1019 * The inode still has reference to dquot so it can't be in the
1022 spin_lock(&dq_list_lock);
1023 list_add(&dquot->dq_free, tofree_head);
1024 spin_unlock(&dq_list_lock);
1027 * Dquot is already in a list to put so we won't drop the last
1035 * Free list of dquots
1036 * Dquots are removed from inodes and no new references can be got so we are
1037 * the only ones holding reference
1039 static void put_dquot_list(struct list_head *tofree_head)
1041 struct list_head *act_head;
1042 struct dquot *dquot;
1044 act_head = tofree_head->next;
1045 while (act_head != tofree_head) {
1046 dquot = list_entry(act_head, struct dquot, dq_free);
1047 act_head = act_head->next;
1048 /* Remove dquot from the list so we won't have problems... */
1049 list_del_init(&dquot->dq_free);
1054 static void remove_dquot_ref(struct super_block *sb, int type,
1055 struct list_head *tofree_head)
1057 struct inode *inode;
1058 #ifdef CONFIG_QUOTA_DEBUG
1062 spin_lock(&sb->s_inode_list_lock);
1063 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1065 * We have to scan also I_NEW inodes because they can already
1066 * have quota pointer initialized. Luckily, we need to touch
1067 * only quota pointers and these have separate locking
1070 spin_lock(&dq_data_lock);
1071 if (!IS_NOQUOTA(inode)) {
1072 #ifdef CONFIG_QUOTA_DEBUG
1073 if (unlikely(inode_get_rsv_space(inode) > 0))
1076 remove_inode_dquot_ref(inode, type, tofree_head);
1078 spin_unlock(&dq_data_lock);
1080 spin_unlock(&sb->s_inode_list_lock);
1081 #ifdef CONFIG_QUOTA_DEBUG
1083 printk(KERN_WARNING "VFS (%s): Writes happened after quota"
1084 " was disabled thus quota information is probably "
1085 "inconsistent. Please run quotacheck(8).\n", sb->s_id);
1090 /* Gather all references from inodes and drop them */
1091 static void drop_dquot_ref(struct super_block *sb, int type)
1093 LIST_HEAD(tofree_head);
1096 remove_dquot_ref(sb, type, &tofree_head);
1097 synchronize_srcu(&dquot_srcu);
1098 put_dquot_list(&tofree_head);
1103 void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
1105 if (dquot->dq_dqb.dqb_rsvspace >= number)
1106 dquot->dq_dqb.dqb_rsvspace -= number;
1109 dquot->dq_dqb.dqb_rsvspace = 0;
1111 if (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace <=
1112 dquot->dq_dqb.dqb_bsoftlimit)
1113 dquot->dq_dqb.dqb_btime = (time64_t) 0;
1114 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
1117 static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
1119 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
1120 dquot->dq_dqb.dqb_curinodes >= number)
1121 dquot->dq_dqb.dqb_curinodes -= number;
1123 dquot->dq_dqb.dqb_curinodes = 0;
1124 if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit)
1125 dquot->dq_dqb.dqb_itime = (time64_t) 0;
1126 clear_bit(DQ_INODES_B, &dquot->dq_flags);
1129 static void dquot_decr_space(struct dquot *dquot, qsize_t number)
1131 if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
1132 dquot->dq_dqb.dqb_curspace >= number)
1133 dquot->dq_dqb.dqb_curspace -= number;
1135 dquot->dq_dqb.dqb_curspace = 0;
1136 if (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace <=
1137 dquot->dq_dqb.dqb_bsoftlimit)
1138 dquot->dq_dqb.dqb_btime = (time64_t) 0;
1139 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
1143 struct super_block *w_sb;
1144 struct kqid w_dq_id;
1148 static int warning_issued(struct dquot *dquot, const int warntype)
1150 int flag = (warntype == QUOTA_NL_BHARDWARN ||
1151 warntype == QUOTA_NL_BSOFTLONGWARN) ? DQ_BLKS_B :
1152 ((warntype == QUOTA_NL_IHARDWARN ||
1153 warntype == QUOTA_NL_ISOFTLONGWARN) ? DQ_INODES_B : 0);
1157 return test_and_set_bit(flag, &dquot->dq_flags);
1160 #ifdef CONFIG_PRINT_QUOTA_WARNING
1161 static int flag_print_warnings = 1;
1163 static int need_print_warning(struct dquot_warn *warn)
1165 if (!flag_print_warnings)
1168 switch (warn->w_dq_id.type) {
1170 return uid_eq(current_fsuid(), warn->w_dq_id.uid);
1172 return in_group_p(warn->w_dq_id.gid);
1179 /* Print warning to user which exceeded quota */
1180 static void print_warning(struct dquot_warn *warn)
1183 struct tty_struct *tty;
1184 int warntype = warn->w_type;
1186 if (warntype == QUOTA_NL_IHARDBELOW ||
1187 warntype == QUOTA_NL_ISOFTBELOW ||
1188 warntype == QUOTA_NL_BHARDBELOW ||
1189 warntype == QUOTA_NL_BSOFTBELOW || !need_print_warning(warn))
1192 tty = get_current_tty();
1195 tty_write_message(tty, warn->w_sb->s_id);
1196 if (warntype == QUOTA_NL_ISOFTWARN || warntype == QUOTA_NL_BSOFTWARN)
1197 tty_write_message(tty, ": warning, ");
1199 tty_write_message(tty, ": write failed, ");
1200 tty_write_message(tty, quotatypes[warn->w_dq_id.type]);
1202 case QUOTA_NL_IHARDWARN:
1203 msg = " file limit reached.\r\n";
1205 case QUOTA_NL_ISOFTLONGWARN:
1206 msg = " file quota exceeded too long.\r\n";
1208 case QUOTA_NL_ISOFTWARN:
1209 msg = " file quota exceeded.\r\n";
1211 case QUOTA_NL_BHARDWARN:
1212 msg = " block limit reached.\r\n";
1214 case QUOTA_NL_BSOFTLONGWARN:
1215 msg = " block quota exceeded too long.\r\n";
1217 case QUOTA_NL_BSOFTWARN:
1218 msg = " block quota exceeded.\r\n";
1221 tty_write_message(tty, msg);
1226 static void prepare_warning(struct dquot_warn *warn, struct dquot *dquot,
1229 if (warning_issued(dquot, warntype))
1231 warn->w_type = warntype;
1232 warn->w_sb = dquot->dq_sb;
1233 warn->w_dq_id = dquot->dq_id;
1237 * Write warnings to the console and send warning messages over netlink.
1239 * Note that this function can call into tty and networking code.
1241 static void flush_warnings(struct dquot_warn *warn)
1245 for (i = 0; i < MAXQUOTAS; i++) {
1246 if (warn[i].w_type == QUOTA_NL_NOWARN)
1248 #ifdef CONFIG_PRINT_QUOTA_WARNING
1249 print_warning(&warn[i]);
1251 quota_send_warning(warn[i].w_dq_id,
1252 warn[i].w_sb->s_dev, warn[i].w_type);
1256 static int ignore_hardlimit(struct dquot *dquot)
1258 struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
1260 return capable(CAP_SYS_RESOURCE) &&
1261 (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD ||
1262 !(info->dqi_flags & DQF_ROOT_SQUASH));
1265 static int dquot_add_inodes(struct dquot *dquot, qsize_t inodes,
1266 struct dquot_warn *warn)
1271 spin_lock(&dquot->dq_dqb_lock);
1272 newinodes = dquot->dq_dqb.dqb_curinodes + inodes;
1273 if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type) ||
1274 test_bit(DQ_FAKE_B, &dquot->dq_flags))
1277 if (dquot->dq_dqb.dqb_ihardlimit &&
1278 newinodes > dquot->dq_dqb.dqb_ihardlimit &&
1279 !ignore_hardlimit(dquot)) {
1280 prepare_warning(warn, dquot, QUOTA_NL_IHARDWARN);
1285 if (dquot->dq_dqb.dqb_isoftlimit &&
1286 newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1287 dquot->dq_dqb.dqb_itime &&
1288 ktime_get_real_seconds() >= dquot->dq_dqb.dqb_itime &&
1289 !ignore_hardlimit(dquot)) {
1290 prepare_warning(warn, dquot, QUOTA_NL_ISOFTLONGWARN);
1295 if (dquot->dq_dqb.dqb_isoftlimit &&
1296 newinodes > dquot->dq_dqb.dqb_isoftlimit &&
1297 dquot->dq_dqb.dqb_itime == 0) {
1298 prepare_warning(warn, dquot, QUOTA_NL_ISOFTWARN);
1299 dquot->dq_dqb.dqb_itime = ktime_get_real_seconds() +
1300 sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type].dqi_igrace;
1303 dquot->dq_dqb.dqb_curinodes = newinodes;
1306 spin_unlock(&dquot->dq_dqb_lock);
1310 static int dquot_add_space(struct dquot *dquot, qsize_t space,
1311 qsize_t rsv_space, unsigned int flags,
1312 struct dquot_warn *warn)
1315 struct super_block *sb = dquot->dq_sb;
1318 spin_lock(&dquot->dq_dqb_lock);
1319 if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) ||
1320 test_bit(DQ_FAKE_B, &dquot->dq_flags))
1323 tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace
1324 + space + rsv_space;
1326 if (dquot->dq_dqb.dqb_bhardlimit &&
1327 tspace > dquot->dq_dqb.dqb_bhardlimit &&
1328 !ignore_hardlimit(dquot)) {
1329 if (flags & DQUOT_SPACE_WARN)
1330 prepare_warning(warn, dquot, QUOTA_NL_BHARDWARN);
1335 if (dquot->dq_dqb.dqb_bsoftlimit &&
1336 tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1337 dquot->dq_dqb.dqb_btime &&
1338 ktime_get_real_seconds() >= dquot->dq_dqb.dqb_btime &&
1339 !ignore_hardlimit(dquot)) {
1340 if (flags & DQUOT_SPACE_WARN)
1341 prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN);
1346 if (dquot->dq_dqb.dqb_bsoftlimit &&
1347 tspace > dquot->dq_dqb.dqb_bsoftlimit &&
1348 dquot->dq_dqb.dqb_btime == 0) {
1349 if (flags & DQUOT_SPACE_WARN) {
1350 prepare_warning(warn, dquot, QUOTA_NL_BSOFTWARN);
1351 dquot->dq_dqb.dqb_btime = ktime_get_real_seconds() +
1352 sb_dqopt(sb)->info[dquot->dq_id.type].dqi_bgrace;
1355 * We don't allow preallocation to exceed softlimit so exceeding will
1364 * We have to be careful and go through warning generation & grace time
1365 * setting even if DQUOT_SPACE_NOFAIL is set. That's why we check it
1368 if (flags & DQUOT_SPACE_NOFAIL)
1371 dquot->dq_dqb.dqb_rsvspace += rsv_space;
1372 dquot->dq_dqb.dqb_curspace += space;
1374 spin_unlock(&dquot->dq_dqb_lock);
1378 static int info_idq_free(struct dquot *dquot, qsize_t inodes)
1382 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1383 dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit ||
1384 !sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_id.type))
1385 return QUOTA_NL_NOWARN;
1387 newinodes = dquot->dq_dqb.dqb_curinodes - inodes;
1388 if (newinodes <= dquot->dq_dqb.dqb_isoftlimit)
1389 return QUOTA_NL_ISOFTBELOW;
1390 if (dquot->dq_dqb.dqb_curinodes >= dquot->dq_dqb.dqb_ihardlimit &&
1391 newinodes < dquot->dq_dqb.dqb_ihardlimit)
1392 return QUOTA_NL_IHARDBELOW;
1393 return QUOTA_NL_NOWARN;
1396 static int info_bdq_free(struct dquot *dquot, qsize_t space)
1400 tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace;
1402 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) ||
1403 tspace <= dquot->dq_dqb.dqb_bsoftlimit)
1404 return QUOTA_NL_NOWARN;
1406 if (tspace - space <= dquot->dq_dqb.dqb_bsoftlimit)
1407 return QUOTA_NL_BSOFTBELOW;
1408 if (tspace >= dquot->dq_dqb.dqb_bhardlimit &&
1409 tspace - space < dquot->dq_dqb.dqb_bhardlimit)
1410 return QUOTA_NL_BHARDBELOW;
1411 return QUOTA_NL_NOWARN;
1414 static int dquot_active(const struct inode *inode)
1416 struct super_block *sb = inode->i_sb;
1418 if (IS_NOQUOTA(inode))
1420 return sb_any_quota_loaded(sb) & ~sb_any_quota_suspended(sb);
1424 * Initialize quota pointers in inode
1426 * It is better to call this function outside of any transaction as it
1427 * might need a lot of space in journal for dquot structure allocation.
1429 static int __dquot_initialize(struct inode *inode, int type)
1431 int cnt, init_needed = 0;
1432 struct dquot **dquots, *got[MAXQUOTAS] = {};
1433 struct super_block *sb = inode->i_sb;
1437 if (!dquot_active(inode))
1440 dquots = i_dquot(inode);
1442 /* First get references to structures we might need. */
1443 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1447 struct dquot *dquot;
1449 if (type != -1 && cnt != type)
1452 * The i_dquot should have been initialized in most cases,
1453 * we check it without locking here to avoid unnecessary
1454 * dqget()/dqput() calls.
1459 if (!sb_has_quota_active(sb, cnt))
1466 qid = make_kqid_uid(inode->i_uid);
1469 qid = make_kqid_gid(inode->i_gid);
1472 rc = inode->i_sb->dq_op->get_projid(inode, &projid);
1475 qid = make_kqid_projid(projid);
1478 dquot = dqget(sb, qid);
1479 if (IS_ERR(dquot)) {
1480 /* We raced with somebody turning quotas off... */
1481 if (PTR_ERR(dquot) != -ESRCH) {
1482 ret = PTR_ERR(dquot);
1490 /* All required i_dquot has been initialized */
1494 spin_lock(&dq_data_lock);
1495 if (IS_NOQUOTA(inode))
1497 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1498 if (type != -1 && cnt != type)
1500 /* Avoid races with quotaoff() */
1501 if (!sb_has_quota_active(sb, cnt))
1503 /* We could race with quotaon or dqget() could have failed */
1507 dquots[cnt] = got[cnt];
1510 * Make quota reservation system happy if someone
1511 * did a write before quota was turned on
1513 rsv = inode_get_rsv_space(inode);
1514 if (unlikely(rsv)) {
1515 spin_lock(&inode->i_lock);
1516 /* Get reservation again under proper lock */
1517 rsv = __inode_get_rsv_space(inode);
1518 spin_lock(&dquots[cnt]->dq_dqb_lock);
1519 dquots[cnt]->dq_dqb.dqb_rsvspace += rsv;
1520 spin_unlock(&dquots[cnt]->dq_dqb_lock);
1521 spin_unlock(&inode->i_lock);
1526 spin_unlock(&dq_data_lock);
1528 /* Drop unused references */
1534 int dquot_initialize(struct inode *inode)
1536 return __dquot_initialize(inode, -1);
1538 EXPORT_SYMBOL(dquot_initialize);
1540 bool dquot_initialize_needed(struct inode *inode)
1542 struct dquot **dquots;
1545 if (!dquot_active(inode))
1548 dquots = i_dquot(inode);
1549 for (i = 0; i < MAXQUOTAS; i++)
1550 if (!dquots[i] && sb_has_quota_active(inode->i_sb, i))
1554 EXPORT_SYMBOL(dquot_initialize_needed);
1557 * Release all quotas referenced by inode.
1559 * This function only be called on inode free or converting
1560 * a file to quota file, no other users for the i_dquot in
1561 * both cases, so we needn't call synchronize_srcu() after
1564 static void __dquot_drop(struct inode *inode)
1567 struct dquot **dquots = i_dquot(inode);
1568 struct dquot *put[MAXQUOTAS];
1570 spin_lock(&dq_data_lock);
1571 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1572 put[cnt] = dquots[cnt];
1575 spin_unlock(&dq_data_lock);
1579 void dquot_drop(struct inode *inode)
1581 struct dquot * const *dquots;
1584 if (IS_NOQUOTA(inode))
1588 * Test before calling to rule out calls from proc and such
1589 * where we are not allowed to block. Note that this is
1590 * actually reliable test even without the lock - the caller
1591 * must assure that nobody can come after the DQUOT_DROP and
1592 * add quota pointers back anyway.
1594 dquots = i_dquot(inode);
1595 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1600 if (cnt < MAXQUOTAS)
1601 __dquot_drop(inode);
1603 EXPORT_SYMBOL(dquot_drop);
1606 * inode_reserved_space is managed internally by quota, and protected by
1607 * i_lock similar to i_blocks+i_bytes.
1609 static qsize_t *inode_reserved_space(struct inode * inode)
1611 /* Filesystem must explicitly define it's own method in order to use
1612 * quota reservation interface */
1613 BUG_ON(!inode->i_sb->dq_op->get_reserved_space);
1614 return inode->i_sb->dq_op->get_reserved_space(inode);
1617 static qsize_t __inode_get_rsv_space(struct inode *inode)
1619 if (!inode->i_sb->dq_op->get_reserved_space)
1621 return *inode_reserved_space(inode);
1624 static qsize_t inode_get_rsv_space(struct inode *inode)
1628 if (!inode->i_sb->dq_op->get_reserved_space)
1630 spin_lock(&inode->i_lock);
1631 ret = __inode_get_rsv_space(inode);
1632 spin_unlock(&inode->i_lock);
1637 * This functions updates i_blocks+i_bytes fields and quota information
1638 * (together with appropriate checks).
1640 * NOTE: We absolutely rely on the fact that caller dirties the inode
1641 * (usually helpers in quotaops.h care about this) and holds a handle for
1642 * the current transaction so that dquot write and inode write go into the
1647 * This operation can block, but only after everything is updated
1649 int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
1651 int cnt, ret = 0, index;
1652 struct dquot_warn warn[MAXQUOTAS];
1653 int reserve = flags & DQUOT_SPACE_RESERVE;
1654 struct dquot **dquots;
1656 if (!dquot_active(inode)) {
1658 spin_lock(&inode->i_lock);
1659 *inode_reserved_space(inode) += number;
1660 spin_unlock(&inode->i_lock);
1662 inode_add_bytes(inode, number);
1667 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1668 warn[cnt].w_type = QUOTA_NL_NOWARN;
1670 dquots = i_dquot(inode);
1671 index = srcu_read_lock(&dquot_srcu);
1672 spin_lock(&inode->i_lock);
1673 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1677 ret = dquot_add_space(dquots[cnt], 0, number, flags,
1680 ret = dquot_add_space(dquots[cnt], number, 0, flags,
1684 /* Back out changes we already did */
1685 for (cnt--; cnt >= 0; cnt--) {
1688 spin_lock(&dquots[cnt]->dq_dqb_lock);
1690 dquot_free_reserved_space(dquots[cnt],
1693 dquot_decr_space(dquots[cnt], number);
1694 spin_unlock(&dquots[cnt]->dq_dqb_lock);
1696 spin_unlock(&inode->i_lock);
1697 goto out_flush_warn;
1701 *inode_reserved_space(inode) += number;
1703 __inode_add_bytes(inode, number);
1704 spin_unlock(&inode->i_lock);
1707 goto out_flush_warn;
1708 mark_all_dquot_dirty(dquots);
1710 srcu_read_unlock(&dquot_srcu, index);
1711 flush_warnings(warn);
1715 EXPORT_SYMBOL(__dquot_alloc_space);
1718 * This operation can block, but only after everything is updated
1720 int dquot_alloc_inode(struct inode *inode)
1722 int cnt, ret = 0, index;
1723 struct dquot_warn warn[MAXQUOTAS];
1724 struct dquot * const *dquots;
1726 if (!dquot_active(inode))
1728 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
1729 warn[cnt].w_type = QUOTA_NL_NOWARN;
1731 dquots = i_dquot(inode);
1732 index = srcu_read_lock(&dquot_srcu);
1733 spin_lock(&inode->i_lock);
1734 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1737 ret = dquot_add_inodes(dquots[cnt], 1, &warn[cnt]);
1739 for (cnt--; cnt >= 0; cnt--) {
1742 /* Back out changes we already did */
1743 spin_lock(&dquots[cnt]->dq_dqb_lock);
1744 dquot_decr_inodes(dquots[cnt], 1);
1745 spin_unlock(&dquots[cnt]->dq_dqb_lock);
1752 spin_unlock(&inode->i_lock);
1754 mark_all_dquot_dirty(dquots);
1755 srcu_read_unlock(&dquot_srcu, index);
1756 flush_warnings(warn);
1759 EXPORT_SYMBOL(dquot_alloc_inode);
1762 * Convert in-memory reserved quotas to real consumed quotas
1764 int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
1766 struct dquot **dquots;
1769 if (!dquot_active(inode)) {
1770 spin_lock(&inode->i_lock);
1771 *inode_reserved_space(inode) -= number;
1772 __inode_add_bytes(inode, number);
1773 spin_unlock(&inode->i_lock);
1777 dquots = i_dquot(inode);
1778 index = srcu_read_lock(&dquot_srcu);
1779 spin_lock(&inode->i_lock);
1780 /* Claim reserved quotas to allocated quotas */
1781 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1783 struct dquot *dquot = dquots[cnt];
1785 spin_lock(&dquot->dq_dqb_lock);
1786 if (WARN_ON_ONCE(dquot->dq_dqb.dqb_rsvspace < number))
1787 number = dquot->dq_dqb.dqb_rsvspace;
1788 dquot->dq_dqb.dqb_curspace += number;
1789 dquot->dq_dqb.dqb_rsvspace -= number;
1790 spin_unlock(&dquot->dq_dqb_lock);
1793 /* Update inode bytes */
1794 *inode_reserved_space(inode) -= number;
1795 __inode_add_bytes(inode, number);
1796 spin_unlock(&inode->i_lock);
1797 mark_all_dquot_dirty(dquots);
1798 srcu_read_unlock(&dquot_srcu, index);
1801 EXPORT_SYMBOL(dquot_claim_space_nodirty);
1804 * Convert allocated space back to in-memory reserved quotas
1806 void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
1808 struct dquot **dquots;
1811 if (!dquot_active(inode)) {
1812 spin_lock(&inode->i_lock);
1813 *inode_reserved_space(inode) += number;
1814 __inode_sub_bytes(inode, number);
1815 spin_unlock(&inode->i_lock);
1819 dquots = i_dquot(inode);
1820 index = srcu_read_lock(&dquot_srcu);
1821 spin_lock(&inode->i_lock);
1822 /* Claim reserved quotas to allocated quotas */
1823 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1825 struct dquot *dquot = dquots[cnt];
1827 spin_lock(&dquot->dq_dqb_lock);
1828 if (WARN_ON_ONCE(dquot->dq_dqb.dqb_curspace < number))
1829 number = dquot->dq_dqb.dqb_curspace;
1830 dquot->dq_dqb.dqb_rsvspace += number;
1831 dquot->dq_dqb.dqb_curspace -= number;
1832 spin_unlock(&dquot->dq_dqb_lock);
1835 /* Update inode bytes */
1836 *inode_reserved_space(inode) += number;
1837 __inode_sub_bytes(inode, number);
1838 spin_unlock(&inode->i_lock);
1839 mark_all_dquot_dirty(dquots);
1840 srcu_read_unlock(&dquot_srcu, index);
1843 EXPORT_SYMBOL(dquot_reclaim_space_nodirty);
1846 * This operation can block, but only after everything is updated
1848 void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
1851 struct dquot_warn warn[MAXQUOTAS];
1852 struct dquot **dquots;
1853 int reserve = flags & DQUOT_SPACE_RESERVE, index;
1855 if (!dquot_active(inode)) {
1857 spin_lock(&inode->i_lock);
1858 *inode_reserved_space(inode) -= number;
1859 spin_unlock(&inode->i_lock);
1861 inode_sub_bytes(inode, number);
1866 dquots = i_dquot(inode);
1867 index = srcu_read_lock(&dquot_srcu);
1868 spin_lock(&inode->i_lock);
1869 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1872 warn[cnt].w_type = QUOTA_NL_NOWARN;
1875 spin_lock(&dquots[cnt]->dq_dqb_lock);
1876 wtype = info_bdq_free(dquots[cnt], number);
1877 if (wtype != QUOTA_NL_NOWARN)
1878 prepare_warning(&warn[cnt], dquots[cnt], wtype);
1880 dquot_free_reserved_space(dquots[cnt], number);
1882 dquot_decr_space(dquots[cnt], number);
1883 spin_unlock(&dquots[cnt]->dq_dqb_lock);
1886 *inode_reserved_space(inode) -= number;
1888 __inode_sub_bytes(inode, number);
1889 spin_unlock(&inode->i_lock);
1893 mark_all_dquot_dirty(dquots);
1895 srcu_read_unlock(&dquot_srcu, index);
1896 flush_warnings(warn);
1898 EXPORT_SYMBOL(__dquot_free_space);
1901 * This operation can block, but only after everything is updated
1903 void dquot_free_inode(struct inode *inode)
1906 struct dquot_warn warn[MAXQUOTAS];
1907 struct dquot * const *dquots;
1910 if (!dquot_active(inode))
1913 dquots = i_dquot(inode);
1914 index = srcu_read_lock(&dquot_srcu);
1915 spin_lock(&inode->i_lock);
1916 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1919 warn[cnt].w_type = QUOTA_NL_NOWARN;
1922 spin_lock(&dquots[cnt]->dq_dqb_lock);
1923 wtype = info_idq_free(dquots[cnt], 1);
1924 if (wtype != QUOTA_NL_NOWARN)
1925 prepare_warning(&warn[cnt], dquots[cnt], wtype);
1926 dquot_decr_inodes(dquots[cnt], 1);
1927 spin_unlock(&dquots[cnt]->dq_dqb_lock);
1929 spin_unlock(&inode->i_lock);
1930 mark_all_dquot_dirty(dquots);
1931 srcu_read_unlock(&dquot_srcu, index);
1932 flush_warnings(warn);
1934 EXPORT_SYMBOL(dquot_free_inode);
1937 * Transfer the number of inode and blocks from one diskquota to an other.
1938 * On success, dquot references in transfer_to are consumed and references
1939 * to original dquots that need to be released are placed there. On failure,
1940 * references are kept untouched.
1942 * This operation can block, but only after everything is updated
1943 * A transaction must be started when entering this function.
1945 * We are holding reference on transfer_from & transfer_to, no need to
1946 * protect them by srcu_read_lock().
1948 int __dquot_transfer(struct inode *inode, struct dquot **transfer_to)
1951 qsize_t rsv_space = 0;
1952 qsize_t inode_usage = 1;
1953 struct dquot *transfer_from[MAXQUOTAS] = {};
1955 char is_valid[MAXQUOTAS] = {};
1956 struct dquot_warn warn_to[MAXQUOTAS];
1957 struct dquot_warn warn_from_inodes[MAXQUOTAS];
1958 struct dquot_warn warn_from_space[MAXQUOTAS];
1960 if (IS_NOQUOTA(inode))
1963 if (inode->i_sb->dq_op->get_inode_usage) {
1964 ret = inode->i_sb->dq_op->get_inode_usage(inode, &inode_usage);
1969 /* Initialize the arrays */
1970 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1971 warn_to[cnt].w_type = QUOTA_NL_NOWARN;
1972 warn_from_inodes[cnt].w_type = QUOTA_NL_NOWARN;
1973 warn_from_space[cnt].w_type = QUOTA_NL_NOWARN;
1976 spin_lock(&dq_data_lock);
1977 spin_lock(&inode->i_lock);
1978 if (IS_NOQUOTA(inode)) { /* File without quota accounting? */
1979 spin_unlock(&inode->i_lock);
1980 spin_unlock(&dq_data_lock);
1983 cur_space = __inode_get_bytes(inode);
1984 rsv_space = __inode_get_rsv_space(inode);
1986 * Build the transfer_from list, check limits, and update usage in
1987 * the target structures.
1989 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
1991 * Skip changes for same uid or gid or for turned off quota-type.
1993 if (!transfer_to[cnt])
1995 /* Avoid races with quotaoff() */
1996 if (!sb_has_quota_active(inode->i_sb, cnt))
1999 transfer_from[cnt] = i_dquot(inode)[cnt];
2000 ret = dquot_add_inodes(transfer_to[cnt], inode_usage,
2004 ret = dquot_add_space(transfer_to[cnt], cur_space, rsv_space,
2005 DQUOT_SPACE_WARN, &warn_to[cnt]);
2007 spin_lock(&transfer_to[cnt]->dq_dqb_lock);
2008 dquot_decr_inodes(transfer_to[cnt], inode_usage);
2009 spin_unlock(&transfer_to[cnt]->dq_dqb_lock);
2014 /* Decrease usage for source structures and update quota pointers */
2015 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2018 /* Due to IO error we might not have transfer_from[] structure */
2019 if (transfer_from[cnt]) {
2022 spin_lock(&transfer_from[cnt]->dq_dqb_lock);
2023 wtype = info_idq_free(transfer_from[cnt], inode_usage);
2024 if (wtype != QUOTA_NL_NOWARN)
2025 prepare_warning(&warn_from_inodes[cnt],
2026 transfer_from[cnt], wtype);
2027 wtype = info_bdq_free(transfer_from[cnt],
2028 cur_space + rsv_space);
2029 if (wtype != QUOTA_NL_NOWARN)
2030 prepare_warning(&warn_from_space[cnt],
2031 transfer_from[cnt], wtype);
2032 dquot_decr_inodes(transfer_from[cnt], inode_usage);
2033 dquot_decr_space(transfer_from[cnt], cur_space);
2034 dquot_free_reserved_space(transfer_from[cnt],
2036 spin_unlock(&transfer_from[cnt]->dq_dqb_lock);
2038 i_dquot(inode)[cnt] = transfer_to[cnt];
2040 spin_unlock(&inode->i_lock);
2041 spin_unlock(&dq_data_lock);
2043 mark_all_dquot_dirty(transfer_from);
2044 mark_all_dquot_dirty(transfer_to);
2045 flush_warnings(warn_to);
2046 flush_warnings(warn_from_inodes);
2047 flush_warnings(warn_from_space);
2048 /* Pass back references to put */
2049 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2051 transfer_to[cnt] = transfer_from[cnt];
2054 /* Back out changes we already did */
2055 for (cnt--; cnt >= 0; cnt--) {
2058 spin_lock(&transfer_to[cnt]->dq_dqb_lock);
2059 dquot_decr_inodes(transfer_to[cnt], inode_usage);
2060 dquot_decr_space(transfer_to[cnt], cur_space);
2061 dquot_free_reserved_space(transfer_to[cnt], rsv_space);
2062 spin_unlock(&transfer_to[cnt]->dq_dqb_lock);
2064 spin_unlock(&inode->i_lock);
2065 spin_unlock(&dq_data_lock);
2066 flush_warnings(warn_to);
2069 EXPORT_SYMBOL(__dquot_transfer);
2071 /* Wrapper for transferring ownership of an inode for uid/gid only
2072 * Called from FSXXX_setattr()
2074 int dquot_transfer(struct inode *inode, struct iattr *iattr)
2076 struct dquot *transfer_to[MAXQUOTAS] = {};
2077 struct dquot *dquot;
2078 struct super_block *sb = inode->i_sb;
2081 if (!dquot_active(inode))
2084 if (iattr->ia_valid & ATTR_UID && !uid_eq(iattr->ia_uid, inode->i_uid)){
2085 dquot = dqget(sb, make_kqid_uid(iattr->ia_uid));
2086 if (IS_ERR(dquot)) {
2087 if (PTR_ERR(dquot) != -ESRCH) {
2088 ret = PTR_ERR(dquot);
2093 transfer_to[USRQUOTA] = dquot;
2095 if (iattr->ia_valid & ATTR_GID && !gid_eq(iattr->ia_gid, inode->i_gid)){
2096 dquot = dqget(sb, make_kqid_gid(iattr->ia_gid));
2097 if (IS_ERR(dquot)) {
2098 if (PTR_ERR(dquot) != -ESRCH) {
2099 ret = PTR_ERR(dquot);
2104 transfer_to[GRPQUOTA] = dquot;
2106 ret = __dquot_transfer(inode, transfer_to);
2108 dqput_all(transfer_to);
2111 EXPORT_SYMBOL(dquot_transfer);
2114 * Write info of quota file to disk
2116 int dquot_commit_info(struct super_block *sb, int type)
2118 struct quota_info *dqopt = sb_dqopt(sb);
2120 return dqopt->ops[type]->write_file_info(sb, type);
2122 EXPORT_SYMBOL(dquot_commit_info);
2124 int dquot_get_next_id(struct super_block *sb, struct kqid *qid)
2126 struct quota_info *dqopt = sb_dqopt(sb);
2128 if (!sb_has_quota_active(sb, qid->type))
2130 if (!dqopt->ops[qid->type]->get_next_id)
2132 return dqopt->ops[qid->type]->get_next_id(sb, qid);
2134 EXPORT_SYMBOL(dquot_get_next_id);
2137 * Definitions of diskquota operations.
2139 const struct dquot_operations dquot_operations = {
2140 .write_dquot = dquot_commit,
2141 .acquire_dquot = dquot_acquire,
2142 .release_dquot = dquot_release,
2143 .mark_dirty = dquot_mark_dquot_dirty,
2144 .write_info = dquot_commit_info,
2145 .alloc_dquot = dquot_alloc,
2146 .destroy_dquot = dquot_destroy,
2147 .get_next_id = dquot_get_next_id,
2149 EXPORT_SYMBOL(dquot_operations);
2152 * Generic helper for ->open on filesystems supporting disk quotas.
2154 int dquot_file_open(struct inode *inode, struct file *file)
2158 error = generic_file_open(inode, file);
2159 if (!error && (file->f_mode & FMODE_WRITE))
2160 error = dquot_initialize(inode);
2163 EXPORT_SYMBOL(dquot_file_open);
2166 * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
2168 int dquot_disable(struct super_block *sb, int type, unsigned int flags)
2171 struct quota_info *dqopt = sb_dqopt(sb);
2172 struct inode *toputinode[MAXQUOTAS];
2174 /* s_umount should be held in exclusive mode */
2175 if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
2176 up_read(&sb->s_umount);
2178 /* Cannot turn off usage accounting without turning off limits, or
2179 * suspend quotas and simultaneously turn quotas off. */
2180 if ((flags & DQUOT_USAGE_ENABLED && !(flags & DQUOT_LIMITS_ENABLED))
2181 || (flags & DQUOT_SUSPENDED && flags & (DQUOT_LIMITS_ENABLED |
2182 DQUOT_USAGE_ENABLED)))
2186 * Skip everything if there's nothing to do. We have to do this because
2187 * sometimes we are called when fill_super() failed and calling
2188 * sync_fs() in such cases does no good.
2190 if (!sb_any_quota_loaded(sb))
2193 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2194 toputinode[cnt] = NULL;
2195 if (type != -1 && cnt != type)
2197 if (!sb_has_quota_loaded(sb, cnt))
2200 if (flags & DQUOT_SUSPENDED) {
2201 spin_lock(&dq_state_lock);
2203 dquot_state_flag(DQUOT_SUSPENDED, cnt);
2204 spin_unlock(&dq_state_lock);
2206 spin_lock(&dq_state_lock);
2207 dqopt->flags &= ~dquot_state_flag(flags, cnt);
2208 /* Turning off suspended quotas? */
2209 if (!sb_has_quota_loaded(sb, cnt) &&
2210 sb_has_quota_suspended(sb, cnt)) {
2211 dqopt->flags &= ~dquot_state_flag(
2212 DQUOT_SUSPENDED, cnt);
2213 spin_unlock(&dq_state_lock);
2214 iput(dqopt->files[cnt]);
2215 dqopt->files[cnt] = NULL;
2218 spin_unlock(&dq_state_lock);
2221 /* We still have to keep quota loaded? */
2222 if (sb_has_quota_loaded(sb, cnt) && !(flags & DQUOT_SUSPENDED))
2225 /* Note: these are blocking operations */
2226 drop_dquot_ref(sb, cnt);
2227 invalidate_dquots(sb, cnt);
2229 * Now all dquots should be invalidated, all writes done so we
2230 * should be only users of the info. No locks needed.
2232 if (info_dirty(&dqopt->info[cnt]))
2233 sb->dq_op->write_info(sb, cnt);
2234 if (dqopt->ops[cnt]->free_file_info)
2235 dqopt->ops[cnt]->free_file_info(sb, cnt);
2236 put_quota_format(dqopt->info[cnt].dqi_format);
2238 toputinode[cnt] = dqopt->files[cnt];
2239 if (!sb_has_quota_loaded(sb, cnt))
2240 dqopt->files[cnt] = NULL;
2241 dqopt->info[cnt].dqi_flags = 0;
2242 dqopt->info[cnt].dqi_igrace = 0;
2243 dqopt->info[cnt].dqi_bgrace = 0;
2244 dqopt->ops[cnt] = NULL;
2247 /* Skip syncing and setting flags if quota files are hidden */
2248 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
2251 /* Sync the superblock so that buffers with quota data are written to
2252 * disk (and so userspace sees correct data afterwards). */
2253 if (sb->s_op->sync_fs)
2254 sb->s_op->sync_fs(sb, 1);
2255 sync_blockdev(sb->s_bdev);
2256 /* Now the quota files are just ordinary files and we can set the
2257 * inode flags back. Moreover we discard the pagecache so that
2258 * userspace sees the writes we did bypassing the pagecache. We
2259 * must also discard the blockdev buffers so that we see the
2260 * changes done by userspace on the next quotaon() */
2261 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2262 /* This can happen when suspending quotas on remount-ro... */
2263 if (toputinode[cnt] && !sb_has_quota_loaded(sb, cnt)) {
2264 inode_lock(toputinode[cnt]);
2265 toputinode[cnt]->i_flags &= ~S_NOQUOTA;
2266 truncate_inode_pages(&toputinode[cnt]->i_data, 0);
2267 inode_unlock(toputinode[cnt]);
2268 mark_inode_dirty_sync(toputinode[cnt]);
2271 invalidate_bdev(sb->s_bdev);
2273 for (cnt = 0; cnt < MAXQUOTAS; cnt++)
2274 if (toputinode[cnt]) {
2275 /* On remount RO, we keep the inode pointer so that we
2276 * can reenable quota on the subsequent remount RW. We
2277 * have to check 'flags' variable and not use sb_has_
2278 * function because another quotaon / quotaoff could
2279 * change global state before we got here. We refuse
2280 * to suspend quotas when there is pending delete on
2281 * the quota file... */
2282 if (!(flags & DQUOT_SUSPENDED))
2283 iput(toputinode[cnt]);
2284 else if (!toputinode[cnt]->i_nlink)
2289 EXPORT_SYMBOL(dquot_disable);
2291 int dquot_quota_off(struct super_block *sb, int type)
2293 return dquot_disable(sb, type,
2294 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2296 EXPORT_SYMBOL(dquot_quota_off);
2299 * Turn quotas on on a device
2303 * Helper function to turn quotas on when we already have the inode of
2304 * quota file and no quota information is loaded.
2306 static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
2309 struct quota_format_type *fmt = find_quota_format(format_id);
2310 struct super_block *sb = inode->i_sb;
2311 struct quota_info *dqopt = sb_dqopt(sb);
2316 if (!S_ISREG(inode->i_mode)) {
2320 if (IS_RDONLY(inode)) {
2324 if (!sb->s_op->quota_write || !sb->s_op->quota_read ||
2325 (type == PRJQUOTA && sb->dq_op->get_projid == NULL)) {
2329 /* Filesystems outside of init_user_ns not yet supported */
2330 if (sb->s_user_ns != &init_user_ns) {
2334 /* Usage always has to be set... */
2335 if (!(flags & DQUOT_USAGE_ENABLED)) {
2339 if (sb_has_quota_loaded(sb, type)) {
2344 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2345 /* As we bypass the pagecache we must now flush all the
2346 * dirty data and invalidate caches so that kernel sees
2347 * changes from userspace. It is not enough to just flush
2348 * the quota file since if blocksize < pagesize, invalidation
2349 * of the cache could fail because of other unrelated dirty
2351 sync_filesystem(sb);
2352 invalidate_bdev(sb->s_bdev);
2355 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE)) {
2356 /* We don't want quota and atime on quota files (deadlocks
2357 * possible) Also nobody should write to the file - we use
2358 * special IO operations which ignore the immutable bit. */
2360 inode->i_flags |= S_NOQUOTA;
2361 inode_unlock(inode);
2363 * When S_NOQUOTA is set, remove dquot references as no more
2364 * references can be added
2366 __dquot_drop(inode);
2370 dqopt->files[type] = igrab(inode);
2371 if (!dqopt->files[type])
2372 goto out_file_flags;
2374 if (!fmt->qf_ops->check_quota_file(sb, type))
2377 dqopt->ops[type] = fmt->qf_ops;
2378 dqopt->info[type].dqi_format = fmt;
2379 dqopt->info[type].dqi_fmt_id = format_id;
2380 INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
2381 error = dqopt->ops[type]->read_file_info(sb, type);
2384 if (dqopt->flags & DQUOT_QUOTA_SYS_FILE) {
2385 spin_lock(&dq_data_lock);
2386 dqopt->info[type].dqi_flags |= DQF_SYS_FILE;
2387 spin_unlock(&dq_data_lock);
2389 spin_lock(&dq_state_lock);
2390 dqopt->flags |= dquot_state_flag(flags, type);
2391 spin_unlock(&dq_state_lock);
2393 error = add_dquot_ref(sb, type);
2395 dquot_disable(sb, type, flags);
2399 dqopt->files[type] = NULL;
2403 inode->i_flags &= ~S_NOQUOTA;
2404 inode_unlock(inode);
2406 put_quota_format(fmt);
2411 /* Reenable quotas on remount RW */
2412 int dquot_resume(struct super_block *sb, int type)
2414 struct quota_info *dqopt = sb_dqopt(sb);
2415 struct inode *inode;
2419 /* s_umount should be held in exclusive mode */
2420 if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
2421 up_read(&sb->s_umount);
2423 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2424 if (type != -1 && cnt != type)
2426 if (!sb_has_quota_suspended(sb, cnt))
2429 inode = dqopt->files[cnt];
2430 dqopt->files[cnt] = NULL;
2431 spin_lock(&dq_state_lock);
2432 flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED |
2433 DQUOT_LIMITS_ENABLED,
2435 dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, cnt);
2436 spin_unlock(&dq_state_lock);
2438 flags = dquot_generic_flag(flags, cnt);
2439 ret = vfs_load_quota_inode(inode, cnt,
2440 dqopt->info[cnt].dqi_fmt_id, flags);
2446 EXPORT_SYMBOL(dquot_resume);
2448 int dquot_quota_on(struct super_block *sb, int type, int format_id,
2449 const struct path *path)
2451 int error = security_quota_on(path->dentry);
2454 /* Quota file not on the same filesystem? */
2455 if (path->dentry->d_sb != sb)
2458 error = vfs_load_quota_inode(d_inode(path->dentry), type,
2459 format_id, DQUOT_USAGE_ENABLED |
2460 DQUOT_LIMITS_ENABLED);
2463 EXPORT_SYMBOL(dquot_quota_on);
2466 * More powerful function for turning on quotas allowing setting
2467 * of individual quota flags
2469 int dquot_enable(struct inode *inode, int type, int format_id,
2472 struct super_block *sb = inode->i_sb;
2474 /* Just unsuspend quotas? */
2475 BUG_ON(flags & DQUOT_SUSPENDED);
2476 /* s_umount should be held in exclusive mode */
2477 if (WARN_ON_ONCE(down_read_trylock(&sb->s_umount)))
2478 up_read(&sb->s_umount);
2482 /* Just updating flags needed? */
2483 if (sb_has_quota_loaded(sb, type)) {
2484 if (flags & DQUOT_USAGE_ENABLED &&
2485 sb_has_quota_usage_enabled(sb, type))
2487 if (flags & DQUOT_LIMITS_ENABLED &&
2488 sb_has_quota_limits_enabled(sb, type))
2490 spin_lock(&dq_state_lock);
2491 sb_dqopt(sb)->flags |= dquot_state_flag(flags, type);
2492 spin_unlock(&dq_state_lock);
2496 return vfs_load_quota_inode(inode, type, format_id, flags);
2498 EXPORT_SYMBOL(dquot_enable);
2501 * This function is used when filesystem needs to initialize quotas
2502 * during mount time.
2504 int dquot_quota_on_mount(struct super_block *sb, char *qf_name,
2505 int format_id, int type)
2507 struct dentry *dentry;
2510 dentry = lookup_one_len_unlocked(qf_name, sb->s_root, strlen(qf_name));
2512 return PTR_ERR(dentry);
2514 if (d_really_is_negative(dentry)) {
2519 error = security_quota_on(dentry);
2521 error = vfs_load_quota_inode(d_inode(dentry), type, format_id,
2522 DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
2528 EXPORT_SYMBOL(dquot_quota_on_mount);
2530 static int dquot_quota_enable(struct super_block *sb, unsigned int flags)
2534 struct quota_info *dqopt = sb_dqopt(sb);
2536 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE))
2538 /* Accounting cannot be turned on while fs is mounted */
2539 flags &= ~(FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT);
2542 for (type = 0; type < MAXQUOTAS; type++) {
2543 if (!(flags & qtype_enforce_flag(type)))
2545 /* Can't enforce without accounting */
2546 if (!sb_has_quota_usage_enabled(sb, type))
2548 ret = dquot_enable(dqopt->files[type], type,
2549 dqopt->info[type].dqi_fmt_id,
2550 DQUOT_LIMITS_ENABLED);
2556 /* Backout enforcement enablement we already did */
2557 for (type--; type >= 0; type--) {
2558 if (flags & qtype_enforce_flag(type))
2559 dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
2561 /* Error code translation for better compatibility with XFS */
2567 static int dquot_quota_disable(struct super_block *sb, unsigned int flags)
2571 struct quota_info *dqopt = sb_dqopt(sb);
2573 if (!(dqopt->flags & DQUOT_QUOTA_SYS_FILE))
2576 * We don't support turning off accounting via quotactl. In principle
2577 * quota infrastructure can do this but filesystems don't expect
2578 * userspace to be able to do it.
2581 (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT | FS_QUOTA_PDQ_ACCT))
2584 /* Filter out limits not enabled */
2585 for (type = 0; type < MAXQUOTAS; type++)
2586 if (!sb_has_quota_limits_enabled(sb, type))
2587 flags &= ~qtype_enforce_flag(type);
2591 for (type = 0; type < MAXQUOTAS; type++) {
2592 if (flags & qtype_enforce_flag(type)) {
2593 ret = dquot_disable(sb, type, DQUOT_LIMITS_ENABLED);
2600 /* Backout enforcement disabling we already did */
2601 for (type--; type >= 0; type--) {
2602 if (flags & qtype_enforce_flag(type))
2603 dquot_enable(dqopt->files[type], type,
2604 dqopt->info[type].dqi_fmt_id,
2605 DQUOT_LIMITS_ENABLED);
2610 /* Generic routine for getting common part of quota structure */
2611 static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di)
2613 struct mem_dqblk *dm = &dquot->dq_dqb;
2615 memset(di, 0, sizeof(*di));
2616 spin_lock(&dquot->dq_dqb_lock);
2617 di->d_spc_hardlimit = dm->dqb_bhardlimit;
2618 di->d_spc_softlimit = dm->dqb_bsoftlimit;
2619 di->d_ino_hardlimit = dm->dqb_ihardlimit;
2620 di->d_ino_softlimit = dm->dqb_isoftlimit;
2621 di->d_space = dm->dqb_curspace + dm->dqb_rsvspace;
2622 di->d_ino_count = dm->dqb_curinodes;
2623 di->d_spc_timer = dm->dqb_btime;
2624 di->d_ino_timer = dm->dqb_itime;
2625 spin_unlock(&dquot->dq_dqb_lock);
2628 int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
2629 struct qc_dqblk *di)
2631 struct dquot *dquot;
2633 dquot = dqget(sb, qid);
2635 return PTR_ERR(dquot);
2636 do_get_dqblk(dquot, di);
2641 EXPORT_SYMBOL(dquot_get_dqblk);
2643 int dquot_get_next_dqblk(struct super_block *sb, struct kqid *qid,
2644 struct qc_dqblk *di)
2646 struct dquot *dquot;
2649 if (!sb->dq_op->get_next_id)
2651 err = sb->dq_op->get_next_id(sb, qid);
2654 dquot = dqget(sb, *qid);
2656 return PTR_ERR(dquot);
2657 do_get_dqblk(dquot, di);
2662 EXPORT_SYMBOL(dquot_get_next_dqblk);
2664 #define VFS_QC_MASK \
2665 (QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \
2666 QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \
2667 QC_SPC_TIMER | QC_INO_TIMER)
2669 /* Generic routine for setting common part of quota structure */
2670 static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
2672 struct mem_dqblk *dm = &dquot->dq_dqb;
2673 int check_blim = 0, check_ilim = 0;
2674 struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
2676 if (di->d_fieldmask & ~VFS_QC_MASK)
2679 if (((di->d_fieldmask & QC_SPC_SOFT) &&
2680 di->d_spc_softlimit > dqi->dqi_max_spc_limit) ||
2681 ((di->d_fieldmask & QC_SPC_HARD) &&
2682 di->d_spc_hardlimit > dqi->dqi_max_spc_limit) ||
2683 ((di->d_fieldmask & QC_INO_SOFT) &&
2684 (di->d_ino_softlimit > dqi->dqi_max_ino_limit)) ||
2685 ((di->d_fieldmask & QC_INO_HARD) &&
2686 (di->d_ino_hardlimit > dqi->dqi_max_ino_limit)))
2689 spin_lock(&dquot->dq_dqb_lock);
2690 if (di->d_fieldmask & QC_SPACE) {
2691 dm->dqb_curspace = di->d_space - dm->dqb_rsvspace;
2693 set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
2696 if (di->d_fieldmask & QC_SPC_SOFT)
2697 dm->dqb_bsoftlimit = di->d_spc_softlimit;
2698 if (di->d_fieldmask & QC_SPC_HARD)
2699 dm->dqb_bhardlimit = di->d_spc_hardlimit;
2700 if (di->d_fieldmask & (QC_SPC_SOFT | QC_SPC_HARD)) {
2702 set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
2705 if (di->d_fieldmask & QC_INO_COUNT) {
2706 dm->dqb_curinodes = di->d_ino_count;
2708 set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
2711 if (di->d_fieldmask & QC_INO_SOFT)
2712 dm->dqb_isoftlimit = di->d_ino_softlimit;
2713 if (di->d_fieldmask & QC_INO_HARD)
2714 dm->dqb_ihardlimit = di->d_ino_hardlimit;
2715 if (di->d_fieldmask & (QC_INO_SOFT | QC_INO_HARD)) {
2717 set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
2720 if (di->d_fieldmask & QC_SPC_TIMER) {
2721 dm->dqb_btime = di->d_spc_timer;
2723 set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
2726 if (di->d_fieldmask & QC_INO_TIMER) {
2727 dm->dqb_itime = di->d_ino_timer;
2729 set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
2733 if (!dm->dqb_bsoftlimit ||
2734 dm->dqb_curspace + dm->dqb_rsvspace < dm->dqb_bsoftlimit) {
2736 clear_bit(DQ_BLKS_B, &dquot->dq_flags);
2737 } else if (!(di->d_fieldmask & QC_SPC_TIMER))
2738 /* Set grace only if user hasn't provided his own... */
2739 dm->dqb_btime = ktime_get_real_seconds() + dqi->dqi_bgrace;
2742 if (!dm->dqb_isoftlimit ||
2743 dm->dqb_curinodes < dm->dqb_isoftlimit) {
2745 clear_bit(DQ_INODES_B, &dquot->dq_flags);
2746 } else if (!(di->d_fieldmask & QC_INO_TIMER))
2747 /* Set grace only if user hasn't provided his own... */
2748 dm->dqb_itime = ktime_get_real_seconds() + dqi->dqi_igrace;
2750 if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit ||
2752 clear_bit(DQ_FAKE_B, &dquot->dq_flags);
2754 set_bit(DQ_FAKE_B, &dquot->dq_flags);
2755 spin_unlock(&dquot->dq_dqb_lock);
2756 mark_dquot_dirty(dquot);
2761 int dquot_set_dqblk(struct super_block *sb, struct kqid qid,
2762 struct qc_dqblk *di)
2764 struct dquot *dquot;
2767 dquot = dqget(sb, qid);
2768 if (IS_ERR(dquot)) {
2769 rc = PTR_ERR(dquot);
2772 rc = do_set_dqblk(dquot, di);
2777 EXPORT_SYMBOL(dquot_set_dqblk);
2779 /* Generic routine for getting common part of quota file information */
2780 int dquot_get_state(struct super_block *sb, struct qc_state *state)
2782 struct mem_dqinfo *mi;
2783 struct qc_type_state *tstate;
2784 struct quota_info *dqopt = sb_dqopt(sb);
2787 memset(state, 0, sizeof(*state));
2788 for (type = 0; type < MAXQUOTAS; type++) {
2789 if (!sb_has_quota_active(sb, type))
2791 tstate = state->s_state + type;
2792 mi = sb_dqopt(sb)->info + type;
2793 tstate->flags = QCI_ACCT_ENABLED;
2794 spin_lock(&dq_data_lock);
2795 if (mi->dqi_flags & DQF_SYS_FILE)
2796 tstate->flags |= QCI_SYSFILE;
2797 if (mi->dqi_flags & DQF_ROOT_SQUASH)
2798 tstate->flags |= QCI_ROOT_SQUASH;
2799 if (sb_has_quota_limits_enabled(sb, type))
2800 tstate->flags |= QCI_LIMITS_ENFORCED;
2801 tstate->spc_timelimit = mi->dqi_bgrace;
2802 tstate->ino_timelimit = mi->dqi_igrace;
2803 tstate->ino = dqopt->files[type]->i_ino;
2804 tstate->blocks = dqopt->files[type]->i_blocks;
2805 tstate->nextents = 1; /* We don't know... */
2806 spin_unlock(&dq_data_lock);
2810 EXPORT_SYMBOL(dquot_get_state);
2812 /* Generic routine for setting common part of quota file information */
2813 int dquot_set_dqinfo(struct super_block *sb, int type, struct qc_info *ii)
2815 struct mem_dqinfo *mi;
2818 if ((ii->i_fieldmask & QC_WARNS_MASK) ||
2819 (ii->i_fieldmask & QC_RT_SPC_TIMER))
2821 if (!sb_has_quota_active(sb, type))
2823 mi = sb_dqopt(sb)->info + type;
2824 if (ii->i_fieldmask & QC_FLAGS) {
2825 if ((ii->i_flags & QCI_ROOT_SQUASH &&
2826 mi->dqi_format->qf_fmt_id != QFMT_VFS_OLD))
2829 spin_lock(&dq_data_lock);
2830 if (ii->i_fieldmask & QC_SPC_TIMER)
2831 mi->dqi_bgrace = ii->i_spc_timelimit;
2832 if (ii->i_fieldmask & QC_INO_TIMER)
2833 mi->dqi_igrace = ii->i_ino_timelimit;
2834 if (ii->i_fieldmask & QC_FLAGS) {
2835 if (ii->i_flags & QCI_ROOT_SQUASH)
2836 mi->dqi_flags |= DQF_ROOT_SQUASH;
2838 mi->dqi_flags &= ~DQF_ROOT_SQUASH;
2840 spin_unlock(&dq_data_lock);
2841 mark_info_dirty(sb, type);
2842 /* Force write to disk */
2843 sb->dq_op->write_info(sb, type);
2846 EXPORT_SYMBOL(dquot_set_dqinfo);
2848 const struct quotactl_ops dquot_quotactl_sysfile_ops = {
2849 .quota_enable = dquot_quota_enable,
2850 .quota_disable = dquot_quota_disable,
2851 .quota_sync = dquot_quota_sync,
2852 .get_state = dquot_get_state,
2853 .set_info = dquot_set_dqinfo,
2854 .get_dqblk = dquot_get_dqblk,
2855 .get_nextdqblk = dquot_get_next_dqblk,
2856 .set_dqblk = dquot_set_dqblk
2858 EXPORT_SYMBOL(dquot_quotactl_sysfile_ops);
2860 static int do_proc_dqstats(struct ctl_table *table, int write,
2861 void __user *buffer, size_t *lenp, loff_t *ppos)
2863 unsigned int type = (int *)table->data - dqstats.stat;
2865 /* Update global table */
2866 dqstats.stat[type] =
2867 percpu_counter_sum_positive(&dqstats.counter[type]);
2868 return proc_dointvec(table, write, buffer, lenp, ppos);
2871 static struct ctl_table fs_dqstats_table[] = {
2873 .procname = "lookups",
2874 .data = &dqstats.stat[DQST_LOOKUPS],
2875 .maxlen = sizeof(int),
2877 .proc_handler = do_proc_dqstats,
2880 .procname = "drops",
2881 .data = &dqstats.stat[DQST_DROPS],
2882 .maxlen = sizeof(int),
2884 .proc_handler = do_proc_dqstats,
2887 .procname = "reads",
2888 .data = &dqstats.stat[DQST_READS],
2889 .maxlen = sizeof(int),
2891 .proc_handler = do_proc_dqstats,
2894 .procname = "writes",
2895 .data = &dqstats.stat[DQST_WRITES],
2896 .maxlen = sizeof(int),
2898 .proc_handler = do_proc_dqstats,
2901 .procname = "cache_hits",
2902 .data = &dqstats.stat[DQST_CACHE_HITS],
2903 .maxlen = sizeof(int),
2905 .proc_handler = do_proc_dqstats,
2908 .procname = "allocated_dquots",
2909 .data = &dqstats.stat[DQST_ALLOC_DQUOTS],
2910 .maxlen = sizeof(int),
2912 .proc_handler = do_proc_dqstats,
2915 .procname = "free_dquots",
2916 .data = &dqstats.stat[DQST_FREE_DQUOTS],
2917 .maxlen = sizeof(int),
2919 .proc_handler = do_proc_dqstats,
2922 .procname = "syncs",
2923 .data = &dqstats.stat[DQST_SYNCS],
2924 .maxlen = sizeof(int),
2926 .proc_handler = do_proc_dqstats,
2928 #ifdef CONFIG_PRINT_QUOTA_WARNING
2930 .procname = "warnings",
2931 .data = &flag_print_warnings,
2932 .maxlen = sizeof(int),
2934 .proc_handler = proc_dointvec,
2940 static struct ctl_table fs_table[] = {
2942 .procname = "quota",
2944 .child = fs_dqstats_table,
2949 static struct ctl_table sys_table[] = {
2958 static int __init dquot_init(void)
2961 unsigned long nr_hash, order;
2963 printk(KERN_NOTICE "VFS: Disk quotas %s\n", __DQUOT_VERSION__);
2965 register_sysctl_table(sys_table);
2967 dquot_cachep = kmem_cache_create("dquot",
2968 sizeof(struct dquot), sizeof(unsigned long) * 4,
2969 (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
2970 SLAB_MEM_SPREAD|SLAB_PANIC),
2974 dquot_hash = (struct hlist_head *)__get_free_pages(GFP_KERNEL, order);
2976 panic("Cannot create dquot hash table");
2978 for (i = 0; i < _DQST_DQSTAT_LAST; i++) {
2979 ret = percpu_counter_init(&dqstats.counter[i], 0, GFP_KERNEL);
2981 panic("Cannot create dquot stat counters");
2984 /* Find power-of-two hlist_heads which can fit into allocation */
2985 nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct hlist_head);
2989 } while (nr_hash >> dq_hash_bits);
2992 nr_hash = 1UL << dq_hash_bits;
2993 dq_hash_mask = nr_hash - 1;
2994 for (i = 0; i < nr_hash; i++)
2995 INIT_HLIST_HEAD(dquot_hash + i);
2997 pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld,"
2998 " %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order));
3000 if (register_shrinker(&dqcache_shrinker))
3001 panic("Cannot register dquot shrinker");
3005 fs_initcall(dquot_init);