1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
10 #include "xfs_shared.h"
11 #include "xfs_format.h"
12 #include "xfs_log_format.h"
13 #include "xfs_trans_resv.h"
15 #include "xfs_mount.h"
16 #include "xfs_inode.h"
17 #include "xfs_trans.h"
18 #include "xfs_quota.h"
20 #include "xfs_icache.h"
25 struct xfs_qoff_logitem **qoffstartp,
30 struct xfs_qoff_logitem *qoffi;
32 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_quotaoff, 0, 0, 0, &tp);
36 qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT);
37 xfs_trans_log_quotaoff_item(tp, qoffi);
39 spin_lock(&mp->m_sb_lock);
40 mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL;
41 spin_unlock(&mp->m_sb_lock);
46 * We have to make sure that the transaction is secure on disk before we
47 * return and actually stop quota accounting. So, make it synchronous.
48 * We don't care about quotoff's performance.
50 xfs_trans_set_sync(tp);
51 error = xfs_trans_commit(tp);
61 xfs_qm_log_quotaoff_end(
63 struct xfs_qoff_logitem **startqoff,
68 struct xfs_qoff_logitem *qoffi;
70 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_equotaoff, 0, 0, 0, &tp);
74 qoffi = xfs_trans_get_qoff_item(tp, *startqoff,
75 flags & XFS_ALL_QUOTA_ACCT);
76 xfs_trans_log_quotaoff_item(tp, qoffi);
80 * We have to make sure that the transaction is secure on disk before we
81 * return and actually stop quota accounting. So, make it synchronous.
82 * We don't care about quotoff's performance.
84 xfs_trans_set_sync(tp);
85 return xfs_trans_commit(tp);
89 * Turn off quota accounting and/or enforcement for all udquots and/or
90 * gdquots. Called only at unmount time.
92 * This assumes that there are no dquots of this file system cached
93 * incore, and modifies the ondisk dquot directly. Therefore, for example,
94 * it is an error to call this twice, without purging the cache.
97 xfs_qm_scall_quotaoff(
101 struct xfs_quotainfo *q = mp->m_quotainfo;
104 uint inactivate_flags;
105 struct xfs_qoff_logitem *qoffstart = NULL;
108 * No file system can have quotas enabled on disk but not in core.
109 * Note that quota utilities (like quotaoff) _expect_
110 * errno == -EEXIST here.
112 if ((mp->m_qflags & flags) == 0)
116 flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
119 * We don't want to deal with two quotaoffs messing up each other,
120 * so we're going to serialize it. quotaoff isn't exactly a performance
122 * If quotaoff, then we must be dealing with the root filesystem.
125 mutex_lock(&q->qi_quotaofflock);
128 * If we're just turning off quota enforcement, change mp and go.
130 if ((flags & XFS_ALL_QUOTA_ACCT) == 0) {
131 mp->m_qflags &= ~(flags);
133 spin_lock(&mp->m_sb_lock);
134 mp->m_sb.sb_qflags = mp->m_qflags;
135 spin_unlock(&mp->m_sb_lock);
136 mutex_unlock(&q->qi_quotaofflock);
138 /* XXX what to do if error ? Revert back to old vals incore ? */
139 return xfs_sync_sb(mp, false);
143 inactivate_flags = 0;
145 * If accounting is off, we must turn enforcement off, clear the
146 * quota 'CHKD' certificate to make it known that we have to
147 * do a quotacheck the next time this quota is turned on.
149 if (flags & XFS_UQUOTA_ACCT) {
150 dqtype |= XFS_QMOPT_UQUOTA;
151 flags |= (XFS_UQUOTA_CHKD | XFS_UQUOTA_ENFD);
152 inactivate_flags |= XFS_UQUOTA_ACTIVE;
154 if (flags & XFS_GQUOTA_ACCT) {
155 dqtype |= XFS_QMOPT_GQUOTA;
156 flags |= (XFS_GQUOTA_CHKD | XFS_GQUOTA_ENFD);
157 inactivate_flags |= XFS_GQUOTA_ACTIVE;
159 if (flags & XFS_PQUOTA_ACCT) {
160 dqtype |= XFS_QMOPT_PQUOTA;
161 flags |= (XFS_PQUOTA_CHKD | XFS_PQUOTA_ENFD);
162 inactivate_flags |= XFS_PQUOTA_ACTIVE;
166 * Nothing to do? Don't complain. This happens when we're just
167 * turning off quota enforcement.
169 if ((mp->m_qflags & flags) == 0)
173 * Write the LI_QUOTAOFF log record, and do SB changes atomically,
174 * and synchronously. If we fail to write, we should abort the
175 * operation as it cannot be recovered safely if we crash.
177 error = xfs_qm_log_quotaoff(mp, &qoffstart, flags);
182 * Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct
183 * to take care of the race between dqget and quotaoff. We don't take
184 * any special locks to reset these bits. All processes need to check
185 * these bits *after* taking inode lock(s) to see if the particular
186 * quota type is in the process of being turned off. If *ACTIVE, it is
187 * guaranteed that all dquot structures and all quotainode ptrs will all
188 * stay valid as long as that inode is kept locked.
190 * There is no turning back after this.
192 mp->m_qflags &= ~inactivate_flags;
195 * Give back all the dquot reference(s) held by inodes.
196 * Here we go thru every single incore inode in this file system, and
197 * do a dqrele on the i_udquot/i_gdquot that it may have.
198 * Essentially, as long as somebody has an inode locked, this guarantees
199 * that quotas will not be turned off. This is handy because in a
200 * transaction once we lock the inode(s) and check for quotaon, we can
201 * depend on the quota inodes (and other things) being valid as long as
202 * we keep the lock(s).
204 xfs_qm_dqrele_all_inodes(mp, flags);
207 * Next we make the changes in the quota flag in the mount struct.
208 * This isn't protected by a particular lock directly, because we
209 * don't want to take a mrlock every time we depend on quotas being on.
211 mp->m_qflags &= ~flags;
214 * Go through all the dquots of this file system and purge them,
215 * according to what was turned off.
217 xfs_qm_dqpurge_all(mp, dqtype);
220 * Transactions that had started before ACTIVE state bit was cleared
221 * could have logged many dquots, so they'd have higher LSNs than
222 * the first QUOTAOFF log record does. If we happen to crash when
223 * the tail of the log has gone past the QUOTAOFF record, but
224 * before the last dquot modification, those dquots __will__
225 * recover, and that's not good.
227 * So, we have QUOTAOFF start and end logitems; the start
228 * logitem won't get overwritten until the end logitem appears...
230 error = xfs_qm_log_quotaoff_end(mp, &qoffstart, flags);
232 /* We're screwed now. Shutdown is the only option. */
233 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
238 * If all quotas are completely turned off, close shop.
240 if (mp->m_qflags == 0) {
241 mutex_unlock(&q->qi_quotaofflock);
242 xfs_qm_destroy_quotainfo(mp);
247 * Release our quotainode references if we don't need them anymore.
249 if ((dqtype & XFS_QMOPT_UQUOTA) && q->qi_uquotaip) {
250 xfs_irele(q->qi_uquotaip);
251 q->qi_uquotaip = NULL;
253 if ((dqtype & XFS_QMOPT_GQUOTA) && q->qi_gquotaip) {
254 xfs_irele(q->qi_gquotaip);
255 q->qi_gquotaip = NULL;
257 if ((dqtype & XFS_QMOPT_PQUOTA) && q->qi_pquotaip) {
258 xfs_irele(q->qi_pquotaip);
259 q->qi_pquotaip = NULL;
263 if (error && qoffstart)
264 xfs_qm_qoff_logitem_relse(qoffstart);
265 mutex_unlock(&q->qi_quotaofflock);
270 xfs_qm_scall_trunc_qfile(
271 struct xfs_mount *mp,
274 struct xfs_inode *ip;
275 struct xfs_trans *tp;
278 if (ino == NULLFSINO)
281 error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
285 xfs_ilock(ip, XFS_IOLOCK_EXCL);
287 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
289 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
293 xfs_ilock(ip, XFS_ILOCK_EXCL);
294 xfs_trans_ijoin(tp, ip, 0);
297 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
299 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
301 xfs_trans_cancel(tp);
305 ASSERT(ip->i_df.if_nextents == 0);
307 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
308 error = xfs_trans_commit(tp);
311 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
318 xfs_qm_scall_trunc_qfiles(
324 if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0 ||
325 (flags & ~XFS_QMOPT_QUOTALL)) {
326 xfs_debug(mp, "%s: flags=%x m_qflags=%x",
327 __func__, flags, mp->m_qflags);
331 if (flags & XFS_QMOPT_UQUOTA) {
332 error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino);
336 if (flags & XFS_QMOPT_GQUOTA) {
337 error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino);
341 if (flags & XFS_QMOPT_PQUOTA)
342 error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_pquotino);
348 * Switch on (a given) quota enforcement for a filesystem. This takes
349 * effect immediately.
350 * (Switching on quota accounting must be done at mount time.)
353 xfs_qm_scall_quotaon(
361 * Switching on quota accounting must be done at mount time,
362 * only consider quota enforcement stuff here.
364 flags &= XFS_ALL_QUOTA_ENFD;
367 xfs_debug(mp, "%s: zero flags, m_qflags=%x",
368 __func__, mp->m_qflags);
373 * Can't enforce without accounting. We check the superblock
374 * qflags here instead of m_qflags because rootfs can have
375 * quota acct on ondisk without m_qflags' knowing.
377 if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 &&
378 (flags & XFS_UQUOTA_ENFD)) ||
379 ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 &&
380 (flags & XFS_GQUOTA_ENFD)) ||
381 ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 &&
382 (flags & XFS_PQUOTA_ENFD))) {
384 "%s: Can't enforce without acct, flags=%x sbflags=%x",
385 __func__, flags, mp->m_sb.sb_qflags);
389 * If everything's up to-date incore, then don't waste time.
391 if ((mp->m_qflags & flags) == flags)
395 * Change sb_qflags on disk but not incore mp->qflags
396 * if this is the root filesystem.
398 spin_lock(&mp->m_sb_lock);
399 qf = mp->m_sb.sb_qflags;
400 mp->m_sb.sb_qflags = qf | flags;
401 spin_unlock(&mp->m_sb_lock);
404 * There's nothing to change if it's the same.
406 if ((qf & flags) == flags)
409 error = xfs_sync_sb(mp, false);
413 * If we aren't trying to switch on quota enforcement, we are done.
415 if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) !=
416 (mp->m_qflags & XFS_UQUOTA_ACCT)) ||
417 ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) !=
418 (mp->m_qflags & XFS_PQUOTA_ACCT)) ||
419 ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) !=
420 (mp->m_qflags & XFS_GQUOTA_ACCT)))
423 if (! XFS_IS_QUOTA_RUNNING(mp))
427 * Switch on quota enforcement in core.
429 mutex_lock(&mp->m_quotainfo->qi_quotaofflock);
430 mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD);
431 mutex_unlock(&mp->m_quotainfo->qi_quotaofflock);
436 #define XFS_QC_MASK \
437 (QC_LIMIT_MASK | QC_TIMER_MASK | QC_WARNS_MASK)
440 * Adjust limits of this quota, and the defaults if passed in. Returns true
441 * if the new limits made sense and were applied, false otherwise.
445 struct xfs_mount *mp,
446 struct xfs_dquot_res *res,
447 struct xfs_quota_limits *qlim,
452 /* The hard limit can't be less than the soft limit. */
453 if (hard != 0 && hard < soft) {
454 xfs_debug(mp, "%shard %lld < %ssoft %lld", tag, hard, tag,
459 res->hardlimit = hard;
460 res->softlimit = soft;
471 struct xfs_dquot_res *res,
472 struct xfs_quota_limits *qlim,
475 res->warnings = warns;
482 struct xfs_mount *mp,
483 struct xfs_dquot_res *res,
484 struct xfs_quota_limits *qlim,
488 /* Set the length of the default grace period. */
489 res->timer = xfs_dquot_set_grace_period(timer);
490 qlim->time = res->timer;
492 /* Set the grace period expiration on a quota. */
493 res->timer = xfs_dquot_set_timeout(mp, timer);
498 * Adjust quota limits, and start/stop timers accordingly.
501 xfs_qm_scall_setqlim(
502 struct xfs_mount *mp,
505 struct qc_dqblk *newlim)
507 struct xfs_quotainfo *q = mp->m_quotainfo;
508 struct xfs_dquot *dqp;
509 struct xfs_trans *tp;
510 struct xfs_def_quota *defq;
511 struct xfs_dquot_res *res;
512 struct xfs_quota_limits *qlim;
514 xfs_qcnt_t hard, soft;
516 if (newlim->d_fieldmask & ~XFS_QC_MASK)
518 if ((newlim->d_fieldmask & XFS_QC_MASK) == 0)
522 * We don't want to race with a quotaoff so take the quotaoff lock.
523 * We don't hold an inode lock, so there's nothing else to stop
524 * a quotaoff from happening.
526 mutex_lock(&q->qi_quotaofflock);
529 * Get the dquot (locked) before we start, as we need to do a
530 * transaction to allocate it if it doesn't exist. Once we have the
531 * dquot, unlock it so we can start the next transaction safely. We hold
532 * a reference to the dquot, so it's safe to do this unlock/lock without
533 * it being reclaimed in the mean time.
535 error = xfs_qm_dqget(mp, id, type, true, &dqp);
537 ASSERT(error != -ENOENT);
541 defq = xfs_get_defquota(q, xfs_dquot_type(dqp));
544 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_setqlim, 0, 0, 0, &tp);
549 xfs_trans_dqjoin(tp, dqp);
552 * Update quota limits, warnings, and timers, and the defaults
553 * if we're touching id == 0.
555 * Make sure that hardlimits are >= soft limits before changing.
557 * Update warnings counter(s) if requested.
559 * Timelimits for the super user set the relative time the other users
560 * can be over quota for this file system. If it is zero a default is
561 * used. Ditto for the default soft and hard limit values (already
562 * done, above), and for warnings.
564 * For other IDs, userspace can bump out the grace period if over
568 /* Blocks on the data device. */
569 hard = (newlim->d_fieldmask & QC_SPC_HARD) ?
570 (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_hardlimit) :
571 dqp->q_blk.hardlimit;
572 soft = (newlim->d_fieldmask & QC_SPC_SOFT) ?
573 (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_softlimit) :
574 dqp->q_blk.softlimit;
576 qlim = id == 0 ? &defq->blk : NULL;
578 if (xfs_setqlim_limits(mp, res, qlim, hard, soft, "blk"))
579 xfs_dquot_set_prealloc_limits(dqp);
580 if (newlim->d_fieldmask & QC_SPC_WARNS)
581 xfs_setqlim_warns(res, qlim, newlim->d_spc_warns);
582 if (newlim->d_fieldmask & QC_SPC_TIMER)
583 xfs_setqlim_timer(mp, res, qlim, newlim->d_spc_timer);
585 /* Blocks on the realtime device. */
586 hard = (newlim->d_fieldmask & QC_RT_SPC_HARD) ?
587 (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_hardlimit) :
588 dqp->q_rtb.hardlimit;
589 soft = (newlim->d_fieldmask & QC_RT_SPC_SOFT) ?
590 (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_softlimit) :
591 dqp->q_rtb.softlimit;
593 qlim = id == 0 ? &defq->rtb : NULL;
595 xfs_setqlim_limits(mp, res, qlim, hard, soft, "rtb");
596 if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
597 xfs_setqlim_warns(res, qlim, newlim->d_rt_spc_warns);
598 if (newlim->d_fieldmask & QC_RT_SPC_TIMER)
599 xfs_setqlim_timer(mp, res, qlim, newlim->d_rt_spc_timer);
602 hard = (newlim->d_fieldmask & QC_INO_HARD) ?
603 (xfs_qcnt_t) newlim->d_ino_hardlimit :
604 dqp->q_ino.hardlimit;
605 soft = (newlim->d_fieldmask & QC_INO_SOFT) ?
606 (xfs_qcnt_t) newlim->d_ino_softlimit :
607 dqp->q_ino.softlimit;
609 qlim = id == 0 ? &defq->ino : NULL;
611 xfs_setqlim_limits(mp, res, qlim, hard, soft, "ino");
612 if (newlim->d_fieldmask & QC_INO_WARNS)
613 xfs_setqlim_warns(res, qlim, newlim->d_ino_warns);
614 if (newlim->d_fieldmask & QC_INO_TIMER)
615 xfs_setqlim_timer(mp, res, qlim, newlim->d_ino_timer);
619 * If the user is now over quota, start the timelimit.
620 * The user will not be 'warned'.
621 * Note that we keep the timers ticking, whether enforcement
622 * is on or off. We don't really want to bother with iterating
623 * over all ondisk dquots and turning the timers on/off.
625 xfs_qm_adjust_dqtimers(dqp);
627 dqp->q_flags |= XFS_DQFLAG_DIRTY;
628 xfs_trans_log_dquot(tp, dqp);
630 error = xfs_trans_commit(tp);
635 mutex_unlock(&q->qi_quotaofflock);
639 /* Fill out the quota context. */
641 xfs_qm_scall_getquota_fill_qc(
642 struct xfs_mount *mp,
644 const struct xfs_dquot *dqp,
645 struct qc_dqblk *dst)
647 memset(dst, 0, sizeof(*dst));
648 dst->d_spc_hardlimit = XFS_FSB_TO_B(mp, dqp->q_blk.hardlimit);
649 dst->d_spc_softlimit = XFS_FSB_TO_B(mp, dqp->q_blk.softlimit);
650 dst->d_ino_hardlimit = dqp->q_ino.hardlimit;
651 dst->d_ino_softlimit = dqp->q_ino.softlimit;
652 dst->d_space = XFS_FSB_TO_B(mp, dqp->q_blk.reserved);
653 dst->d_ino_count = dqp->q_ino.reserved;
654 dst->d_spc_timer = dqp->q_blk.timer;
655 dst->d_ino_timer = dqp->q_ino.timer;
656 dst->d_ino_warns = dqp->q_ino.warnings;
657 dst->d_spc_warns = dqp->q_blk.warnings;
658 dst->d_rt_spc_hardlimit = XFS_FSB_TO_B(mp, dqp->q_rtb.hardlimit);
659 dst->d_rt_spc_softlimit = XFS_FSB_TO_B(mp, dqp->q_rtb.softlimit);
660 dst->d_rt_space = XFS_FSB_TO_B(mp, dqp->q_rtb.reserved);
661 dst->d_rt_spc_timer = dqp->q_rtb.timer;
662 dst->d_rt_spc_warns = dqp->q_rtb.warnings;
665 * Internally, we don't reset all the timers when quota enforcement
666 * gets turned off. No need to confuse the user level code,
667 * so return zeroes in that case.
669 if (!xfs_dquot_is_enforced(dqp)) {
670 dst->d_spc_timer = 0;
671 dst->d_ino_timer = 0;
672 dst->d_rt_spc_timer = 0;
676 if (xfs_dquot_is_enforced(dqp) && dqp->q_id != 0) {
677 if ((dst->d_space > dst->d_spc_softlimit) &&
678 (dst->d_spc_softlimit > 0)) {
679 ASSERT(dst->d_spc_timer != 0);
681 if ((dst->d_ino_count > dqp->q_ino.softlimit) &&
682 (dqp->q_ino.softlimit > 0)) {
683 ASSERT(dst->d_ino_timer != 0);
689 /* Return the quota information for the dquot matching id. */
691 xfs_qm_scall_getquota(
692 struct xfs_mount *mp,
695 struct qc_dqblk *dst)
697 struct xfs_dquot *dqp;
701 * Try to get the dquot. We don't want it allocated on disk, so don't
702 * set doalloc. If it doesn't exist, we'll get ENOENT back.
704 error = xfs_qm_dqget(mp, id, type, false, &dqp);
709 * If everything's NULL, this dquot doesn't quite exist as far as
710 * our utility programs are concerned.
712 if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
717 xfs_qm_scall_getquota_fill_qc(mp, type, dqp, dst);
725 * Return the quota information for the first initialized dquot whose id
726 * is at least as high as id.
729 xfs_qm_scall_getquota_next(
730 struct xfs_mount *mp,
733 struct qc_dqblk *dst)
735 struct xfs_dquot *dqp;
738 error = xfs_qm_dqget_next(mp, *id, type, &dqp);
742 /* Fill in the ID we actually read from disk */
745 xfs_qm_scall_getquota_fill_qc(mp, type, dqp, dst);
753 struct xfs_inode *ip,
758 /* skip quota inodes */
759 if (ip == ip->i_mount->m_quotainfo->qi_uquotaip ||
760 ip == ip->i_mount->m_quotainfo->qi_gquotaip ||
761 ip == ip->i_mount->m_quotainfo->qi_pquotaip) {
762 ASSERT(ip->i_udquot == NULL);
763 ASSERT(ip->i_gdquot == NULL);
764 ASSERT(ip->i_pdquot == NULL);
768 xfs_ilock(ip, XFS_ILOCK_EXCL);
769 if ((*flags & XFS_UQUOTA_ACCT) && ip->i_udquot) {
770 xfs_qm_dqrele(ip->i_udquot);
773 if ((*flags & XFS_GQUOTA_ACCT) && ip->i_gdquot) {
774 xfs_qm_dqrele(ip->i_gdquot);
777 if ((*flags & XFS_PQUOTA_ACCT) && ip->i_pdquot) {
778 xfs_qm_dqrele(ip->i_pdquot);
781 xfs_iunlock(ip, XFS_ILOCK_EXCL);
787 * Go thru all the inodes in the file system, releasing their dquots.
789 * Note that the mount structure gets modified to indicate that quotas are off
790 * AFTER this, in the case of quotaoff.
793 xfs_qm_dqrele_all_inodes(
794 struct xfs_mount *mp,
797 ASSERT(mp->m_quotainfo);
798 xfs_inode_walk(mp, XFS_INODE_WALK_INEW_WAIT, xfs_dqrele_inode,
799 &flags, XFS_ICI_NO_TAG);