1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2002 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_trans.h"
15 #include "xfs_trans_priv.h"
16 #include "xfs_quota.h"
18 #include "xfs_trace.h"
19 #include "xfs_error.h"
20 #include "xfs_health.h"
22 STATIC void xfs_trans_alloc_dqinfo(xfs_trans_t *);
25 * Add the locked dquot to the transaction.
26 * The dquot must be locked, and it cannot be associated with any
32 struct xfs_dquot *dqp)
34 ASSERT(XFS_DQ_IS_LOCKED(dqp));
35 ASSERT(dqp->q_logitem.qli_dquot == dqp);
38 * Get a log_item_desc to point at the new item.
40 xfs_trans_add_item(tp, &dqp->q_logitem.qli_item);
44 * This is called to mark the dquot as needing
45 * to be logged when the transaction is committed. The dquot must
46 * already be associated with the given transaction.
47 * Note that it marks the entire transaction as dirty. In the ordinary
48 * case, this gets called via xfs_trans_commit, after the transaction
49 * is already dirty. However, there's nothing stop this from getting
50 * called directly, as done by xfs_qm_scall_setqlim. Hence, the TRANS_DIRTY
56 struct xfs_dquot *dqp)
58 ASSERT(XFS_DQ_IS_LOCKED(dqp));
60 /* Upgrade the dquot to bigtime format if possible. */
62 xfs_has_bigtime(tp->t_mountp) &&
63 !(dqp->q_type & XFS_DQTYPE_BIGTIME))
64 dqp->q_type |= XFS_DQTYPE_BIGTIME;
66 tp->t_flags |= XFS_TRANS_DIRTY;
67 set_bit(XFS_LI_DIRTY, &dqp->q_logitem.qli_item.li_flags);
71 * Carry forward whatever is left of the quota blk reservation to
72 * the spanky new transaction
76 struct xfs_trans *otp,
77 struct xfs_trans *ntp)
79 struct xfs_dqtrx *oq, *nq;
81 struct xfs_dqtrx *oqa, *nqa;
82 uint64_t blk_res_used;
87 xfs_trans_alloc_dqinfo(ntp);
89 for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
90 oqa = otp->t_dqinfo->dqs[j];
91 nqa = ntp->t_dqinfo->dqs[j];
92 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
95 if (oqa[i].qt_dquot == NULL)
100 if (oq->qt_blk_res && oq->qt_bcount_delta > 0)
101 blk_res_used = oq->qt_bcount_delta;
103 nq->qt_dquot = oq->qt_dquot;
104 nq->qt_bcount_delta = nq->qt_icount_delta = 0;
105 nq->qt_rtbcount_delta = 0;
108 * Transfer whatever is left of the reservations.
110 nq->qt_blk_res = oq->qt_blk_res - blk_res_used;
111 oq->qt_blk_res = blk_res_used;
113 nq->qt_rtblk_res = oq->qt_rtblk_res -
114 oq->qt_rtblk_res_used;
115 oq->qt_rtblk_res = oq->qt_rtblk_res_used;
117 nq->qt_ino_res = oq->qt_ino_res - oq->qt_ino_res_used;
118 oq->qt_ino_res = oq->qt_ino_res_used;
124 #ifdef CONFIG_XFS_LIVE_HOOKS
126 * Use a static key here to reduce the overhead of quota live updates. If the
127 * compiler supports jump labels, the static branch will be replaced by a nop
128 * sled when there are no hook users. Online fsck is currently the only
129 * caller, so this is a reasonable tradeoff.
131 * Note: Patching the kernel code requires taking the cpu hotplug lock. Other
132 * parts of the kernel allocate memory with that lock held, which means that
133 * XFS callers cannot hold any locks that might be used by memory reclaim or
134 * writeback when calling the static_branch_{inc,dec} functions.
136 DEFINE_STATIC_XFS_HOOK_SWITCH(xfs_dqtrx_hooks_switch);
139 xfs_dqtrx_hook_disable(void)
141 xfs_hooks_switch_off(&xfs_dqtrx_hooks_switch);
145 xfs_dqtrx_hook_enable(void)
147 xfs_hooks_switch_on(&xfs_dqtrx_hooks_switch);
150 /* Schedule a transactional dquot update on behalf of an inode. */
152 xfs_trans_mod_ino_dquot(
153 struct xfs_trans *tp,
154 struct xfs_inode *ip,
155 struct xfs_dquot *dqp,
159 xfs_trans_mod_dquot(tp, dqp, field, delta);
161 if (xfs_hooks_switched_on(&xfs_dqtrx_hooks_switch)) {
162 struct xfs_mod_ino_dqtrx_params p = {
163 .tx_id = (uintptr_t)tp,
165 .q_type = xfs_dquot_type(dqp),
169 struct xfs_quotainfo *qi = tp->t_mountp->m_quotainfo;
171 xfs_hooks_call(&qi->qi_mod_ino_dqtrx_hooks, field, &p);
175 /* Call the specified functions during a dquot counter update. */
178 struct xfs_quotainfo *qi,
179 struct xfs_dqtrx_hook *hook)
184 * Transactional dquot updates first call the mod hook when changes
185 * are attached to the transaction and then call the apply hook when
186 * those changes are committed (or canceled).
188 * The apply hook must be installed before the mod hook so that we
189 * never fail to catch the end of a quota update sequence.
191 error = xfs_hooks_add(&qi->qi_apply_dqtrx_hooks, &hook->apply_hook);
195 error = xfs_hooks_add(&qi->qi_mod_ino_dqtrx_hooks, &hook->mod_hook);
202 xfs_hooks_del(&qi->qi_apply_dqtrx_hooks, &hook->apply_hook);
207 /* Stop calling the specified function during a dquot counter update. */
210 struct xfs_quotainfo *qi,
211 struct xfs_dqtrx_hook *hook)
214 * The mod hook must be removed before apply hook to avoid giving the
215 * hook consumer with an incomplete update. No hooks should be running
216 * after these functions return.
218 xfs_hooks_del(&qi->qi_mod_ino_dqtrx_hooks, &hook->mod_hook);
219 xfs_hooks_del(&qi->qi_apply_dqtrx_hooks, &hook->apply_hook);
222 /* Configure dquot update hook functions. */
224 xfs_dqtrx_hook_setup(
225 struct xfs_dqtrx_hook *hook,
226 notifier_fn_t mod_fn,
227 notifier_fn_t apply_fn)
229 xfs_hook_setup(&hook->mod_hook, mod_fn);
230 xfs_hook_setup(&hook->apply_hook, apply_fn);
232 #endif /* CONFIG_XFS_LIVE_HOOKS */
235 * Wrap around mod_dquot to account for both user and group quotas.
238 xfs_trans_mod_dquot_byino(
244 xfs_mount_t *mp = tp->t_mountp;
246 if (!XFS_IS_QUOTA_ON(mp) ||
247 xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
250 if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot)
251 xfs_trans_mod_ino_dquot(tp, ip, ip->i_udquot, field, delta);
252 if (XFS_IS_GQUOTA_ON(mp) && ip->i_gdquot)
253 xfs_trans_mod_ino_dquot(tp, ip, ip->i_gdquot, field, delta);
254 if (XFS_IS_PQUOTA_ON(mp) && ip->i_pdquot)
255 xfs_trans_mod_ino_dquot(tp, ip, ip->i_pdquot, field, delta);
258 STATIC struct xfs_dqtrx *
260 struct xfs_trans *tp,
261 struct xfs_dquot *dqp)
264 struct xfs_dqtrx *qa;
266 switch (xfs_dquot_type(dqp)) {
267 case XFS_DQTYPE_USER:
268 qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_USR];
270 case XFS_DQTYPE_GROUP:
271 qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_GRP];
273 case XFS_DQTYPE_PROJ:
274 qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_PRJ];
280 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
281 if (qa[i].qt_dquot == NULL ||
282 qa[i].qt_dquot == dqp)
290 * Make the changes in the transaction structure.
291 * The moral equivalent to xfs_trans_mod_sb().
292 * We don't touch any fields in the dquot, so we don't care
293 * if it's locked or not (most of the time it won't be).
297 struct xfs_trans *tp,
298 struct xfs_dquot *dqp,
302 struct xfs_dqtrx *qtrx;
305 ASSERT(XFS_IS_QUOTA_ON(tp->t_mountp));
311 if (tp->t_dqinfo == NULL)
312 xfs_trans_alloc_dqinfo(tp);
314 * Find either the first free slot or the slot that belongs
317 qtrx = xfs_trans_get_dqtrx(tp, dqp);
319 if (qtrx->qt_dquot == NULL)
320 qtrx->qt_dquot = dqp;
322 trace_xfs_trans_mod_dquot_before(qtrx);
323 trace_xfs_trans_mod_dquot(tp, dqp, field, delta);
326 /* regular disk blk reservation */
327 case XFS_TRANS_DQ_RES_BLKS:
328 qtrx->qt_blk_res += delta;
331 /* inode reservation */
332 case XFS_TRANS_DQ_RES_INOS:
333 qtrx->qt_ino_res += delta;
336 /* disk blocks used. */
337 case XFS_TRANS_DQ_BCOUNT:
338 qtrx->qt_bcount_delta += delta;
341 case XFS_TRANS_DQ_DELBCOUNT:
342 qtrx->qt_delbcnt_delta += delta;
346 case XFS_TRANS_DQ_ICOUNT:
347 if (qtrx->qt_ino_res && delta > 0) {
348 qtrx->qt_ino_res_used += delta;
349 ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used);
351 qtrx->qt_icount_delta += delta;
354 /* rtblk reservation */
355 case XFS_TRANS_DQ_RES_RTBLKS:
356 qtrx->qt_rtblk_res += delta;
360 case XFS_TRANS_DQ_RTBCOUNT:
361 if (qtrx->qt_rtblk_res && delta > 0) {
362 qtrx->qt_rtblk_res_used += delta;
363 ASSERT(qtrx->qt_rtblk_res >= qtrx->qt_rtblk_res_used);
365 qtrx->qt_rtbcount_delta += delta;
368 case XFS_TRANS_DQ_DELRTBCOUNT:
369 qtrx->qt_delrtb_delta += delta;
376 trace_xfs_trans_mod_dquot_after(qtrx);
381 * Given an array of dqtrx structures, lock all the dquots associated and join
382 * them to the transaction, provided they have been modified.
385 xfs_trans_dqlockedjoin(
386 struct xfs_trans *tp,
390 ASSERT(q[0].qt_dquot != NULL);
391 if (q[1].qt_dquot == NULL) {
392 xfs_dqlock(q[0].qt_dquot);
393 xfs_trans_dqjoin(tp, q[0].qt_dquot);
394 } else if (q[2].qt_dquot == NULL) {
395 xfs_dqlock2(q[0].qt_dquot, q[1].qt_dquot);
396 xfs_trans_dqjoin(tp, q[0].qt_dquot);
397 xfs_trans_dqjoin(tp, q[1].qt_dquot);
400 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
401 if (q[i].qt_dquot == NULL)
403 xfs_trans_dqjoin(tp, q[i].qt_dquot);
408 /* Apply dqtrx changes to the quota reservation counters. */
410 xfs_apply_quota_reservation_deltas(
411 struct xfs_dquot_res *res,
418 * Subtle math here: If reserved > res_used (the normal case),
419 * we're simply subtracting the unused transaction quota
420 * reservation from the dquot reservation.
422 * If, however, res_used > reserved, then we have allocated
423 * more quota blocks than were reserved for the transaction.
424 * We must add that excess to the dquot reservation since it
425 * tracks (usage + resv) and by definition we didn't reserve
428 res->reserved -= abs(reserved - res_used);
429 } else if (count_delta != 0) {
431 * These blks were never reserved, either inside a transaction
432 * or outside one (in a delayed allocation). Also, this isn't
433 * always a negative number since we sometimes deliberately
434 * skip quota reservations.
436 res->reserved += count_delta;
440 #ifdef CONFIG_XFS_LIVE_HOOKS
441 /* Call downstream hooks now that it's time to apply dquot deltas. */
443 xfs_trans_apply_dquot_deltas_hook(
444 struct xfs_trans *tp,
445 struct xfs_dquot *dqp)
447 if (xfs_hooks_switched_on(&xfs_dqtrx_hooks_switch)) {
448 struct xfs_apply_dqtrx_params p = {
449 .tx_id = (uintptr_t)tp,
450 .q_type = xfs_dquot_type(dqp),
453 struct xfs_quotainfo *qi = tp->t_mountp->m_quotainfo;
455 xfs_hooks_call(&qi->qi_apply_dqtrx_hooks,
456 XFS_APPLY_DQTRX_COMMIT, &p);
460 # define xfs_trans_apply_dquot_deltas_hook(tp, dqp) ((void)0)
461 #endif /* CONFIG_XFS_LIVE_HOOKS */
464 * Called by xfs_trans_commit() and similar in spirit to
465 * xfs_trans_apply_sb_deltas().
466 * Go thru all the dquots belonging to this transaction and modify the
467 * INCORE dquot to reflect the actual usages.
468 * Unreserve just the reservations done by this transaction.
469 * dquot is still left locked at exit.
472 xfs_trans_apply_dquot_deltas(
473 struct xfs_trans *tp)
476 struct xfs_dquot *dqp;
477 struct xfs_dqtrx *qtrx, *qa;
479 int64_t totalrtbdelta;
484 ASSERT(tp->t_dqinfo);
485 for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
486 qa = tp->t_dqinfo->dqs[j];
487 if (qa[0].qt_dquot == NULL)
491 * Lock all of the dquots and join them to the transaction.
493 xfs_trans_dqlockedjoin(tp, qa);
495 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
496 uint64_t blk_res_used;
500 * The array of dquots is filled
501 * sequentially, not sparsely.
503 if ((dqp = qtrx->qt_dquot) == NULL)
506 ASSERT(XFS_DQ_IS_LOCKED(dqp));
508 xfs_trans_apply_dquot_deltas_hook(tp, dqp);
511 * adjust the actual number of blocks used
515 * The issue here is - sometimes we don't make a blkquota
516 * reservation intentionally to be fair to users
517 * (when the amount is small). On the other hand,
518 * delayed allocs do make reservations, but that's
519 * outside of a transaction, so we have no
520 * idea how much was really reserved.
521 * So, here we've accumulated delayed allocation blks and
522 * non-delay blks. The assumption is that the
523 * delayed ones are always reserved (outside of a
524 * transaction), and the others may or may not have
525 * quota reservations.
527 totalbdelta = qtrx->qt_bcount_delta +
528 qtrx->qt_delbcnt_delta;
529 totalrtbdelta = qtrx->qt_rtbcount_delta +
530 qtrx->qt_delrtb_delta;
532 if (totalbdelta != 0 || totalrtbdelta != 0 ||
533 qtrx->qt_icount_delta != 0) {
534 trace_xfs_trans_apply_dquot_deltas_before(dqp);
535 trace_xfs_trans_apply_dquot_deltas(qtrx);
540 ASSERT(dqp->q_blk.count >= -totalbdelta);
542 if (totalrtbdelta < 0)
543 ASSERT(dqp->q_rtb.count >= -totalrtbdelta);
545 if (qtrx->qt_icount_delta < 0)
546 ASSERT(dqp->q_ino.count >= -qtrx->qt_icount_delta);
549 dqp->q_blk.count += totalbdelta;
551 if (qtrx->qt_icount_delta)
552 dqp->q_ino.count += qtrx->qt_icount_delta;
555 dqp->q_rtb.count += totalrtbdelta;
557 if (totalbdelta != 0 || totalrtbdelta != 0 ||
558 qtrx->qt_icount_delta != 0)
559 trace_xfs_trans_apply_dquot_deltas_after(dqp);
562 * Get any default limits in use.
563 * Start/reset the timer(s) if needed.
566 xfs_qm_adjust_dqlimits(dqp);
567 xfs_qm_adjust_dqtimers(dqp);
570 dqp->q_flags |= XFS_DQFLAG_DIRTY;
572 * add this to the list of items to get logged
574 xfs_trans_log_dquot(tp, dqp);
576 * Take off what's left of the original reservation.
577 * In case of delayed allocations, there's no
578 * reservation that a transaction structure knows of.
580 blk_res_used = max_t(int64_t, 0, qtrx->qt_bcount_delta);
581 xfs_apply_quota_reservation_deltas(&dqp->q_blk,
582 qtrx->qt_blk_res, blk_res_used,
583 qtrx->qt_bcount_delta);
586 * Adjust the RT reservation.
588 xfs_apply_quota_reservation_deltas(&dqp->q_rtb,
590 qtrx->qt_rtblk_res_used,
591 qtrx->qt_rtbcount_delta);
594 * Adjust the inode reservation.
596 ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used);
597 xfs_apply_quota_reservation_deltas(&dqp->q_ino,
599 qtrx->qt_ino_res_used,
600 qtrx->qt_icount_delta);
602 ASSERT(dqp->q_blk.reserved >= dqp->q_blk.count);
603 ASSERT(dqp->q_ino.reserved >= dqp->q_ino.count);
604 ASSERT(dqp->q_rtb.reserved >= dqp->q_rtb.count);
609 #ifdef CONFIG_XFS_LIVE_HOOKS
610 /* Call downstream hooks now that it's time to cancel dquot deltas. */
612 xfs_trans_unreserve_and_mod_dquots_hook(
613 struct xfs_trans *tp,
614 struct xfs_dquot *dqp)
616 if (xfs_hooks_switched_on(&xfs_dqtrx_hooks_switch)) {
617 struct xfs_apply_dqtrx_params p = {
618 .tx_id = (uintptr_t)tp,
619 .q_type = xfs_dquot_type(dqp),
622 struct xfs_quotainfo *qi = tp->t_mountp->m_quotainfo;
624 xfs_hooks_call(&qi->qi_apply_dqtrx_hooks,
625 XFS_APPLY_DQTRX_UNRESERVE, &p);
629 # define xfs_trans_unreserve_and_mod_dquots_hook(tp, dqp) ((void)0)
630 #endif /* CONFIG_XFS_LIVE_HOOKS */
633 * Release the reservations, and adjust the dquots accordingly.
634 * This is called only when the transaction is being aborted. If by
635 * any chance we have done dquot modifications incore (ie. deltas) already,
636 * we simply throw those away, since that's the expected behavior
637 * when a transaction is curtailed without a commit.
640 xfs_trans_unreserve_and_mod_dquots(
641 struct xfs_trans *tp)
644 struct xfs_dquot *dqp;
645 struct xfs_dqtrx *qtrx, *qa;
651 for (j = 0; j < XFS_QM_TRANS_DQTYPES; j++) {
652 qa = tp->t_dqinfo->dqs[j];
654 for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
657 * We assume that the array of dquots is filled
658 * sequentially, not sparsely.
660 if ((dqp = qtrx->qt_dquot) == NULL)
663 xfs_trans_unreserve_and_mod_dquots_hook(tp, dqp);
666 * Unreserve the original reservation. We don't care
667 * about the number of blocks used field, or deltas.
668 * Also we don't bother to zero the fields.
671 if (qtrx->qt_blk_res) {
674 dqp->q_blk.reserved -=
675 (xfs_qcnt_t)qtrx->qt_blk_res;
677 if (qtrx->qt_ino_res) {
682 dqp->q_ino.reserved -=
683 (xfs_qcnt_t)qtrx->qt_ino_res;
686 if (qtrx->qt_rtblk_res) {
691 dqp->q_rtb.reserved -=
692 (xfs_qcnt_t)qtrx->qt_rtblk_res;
703 struct xfs_mount *mp,
704 struct xfs_dquot *dqp,
707 enum quota_type qtype;
709 switch (xfs_dquot_type(dqp)) {
710 case XFS_DQTYPE_PROJ:
713 case XFS_DQTYPE_USER:
716 case XFS_DQTYPE_GROUP:
723 quota_send_warning(make_kqid(&init_user_ns, qtype, dqp->q_id),
724 mp->m_super->s_dev, type);
728 * Decide if we can make an additional reservation against a quota resource.
729 * Returns an inode QUOTA_NL_ warning code and whether or not it's fatal.
731 * Note that we assume that the numeric difference between the inode and block
732 * warning codes will always be 3 since it's userspace ABI now, and will never
733 * decrease the quota reservation, so the *BELOW messages are irrelevant.
737 struct xfs_dquot_res *res,
738 struct xfs_quota_limits *qlim,
742 xfs_qcnt_t hardlimit = res->hardlimit;
743 xfs_qcnt_t softlimit = res->softlimit;
744 xfs_qcnt_t total_count = res->reserved + delta;
746 BUILD_BUG_ON(QUOTA_NL_BHARDWARN != QUOTA_NL_IHARDWARN + 3);
747 BUILD_BUG_ON(QUOTA_NL_BSOFTLONGWARN != QUOTA_NL_ISOFTLONGWARN + 3);
748 BUILD_BUG_ON(QUOTA_NL_BSOFTWARN != QUOTA_NL_ISOFTWARN + 3);
752 return QUOTA_NL_NOWARN;
755 hardlimit = qlim->hard;
757 softlimit = qlim->soft;
759 if (hardlimit && total_count > hardlimit) {
761 return QUOTA_NL_IHARDWARN;
764 if (softlimit && total_count > softlimit) {
765 time64_t now = ktime_get_real_seconds();
767 if (res->timer != 0 && now > res->timer) {
769 return QUOTA_NL_ISOFTLONGWARN;
772 return QUOTA_NL_ISOFTWARN;
775 return QUOTA_NL_NOWARN;
779 * This reserves disk blocks and inodes against a dquot.
780 * Flags indicate if the dquot is to be locked here and also
781 * if the blk reservation is for RT or regular blocks.
782 * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check.
786 struct xfs_trans *tp,
787 struct xfs_mount *mp,
788 struct xfs_dquot *dqp,
793 struct xfs_quotainfo *q = mp->m_quotainfo;
794 struct xfs_def_quota *defq;
795 struct xfs_dquot_res *blkres;
796 struct xfs_quota_limits *qlim;
800 defq = xfs_get_defquota(q, xfs_dquot_type(dqp));
802 if (flags & XFS_TRANS_DQ_RES_BLKS) {
803 blkres = &dqp->q_blk;
806 blkres = &dqp->q_rtb;
810 if ((flags & XFS_QMOPT_FORCE_RES) == 0 && dqp->q_id &&
811 xfs_dquot_is_enforced(dqp)) {
816 * dquot is locked already. See if we'd go over the hardlimit
817 * or exceed the timelimit if we'd reserve resources.
819 quota_nl = xfs_dqresv_check(blkres, qlim, nblks, &fatal);
820 if (quota_nl != QUOTA_NL_NOWARN) {
822 * Quota block warning codes are 3 more than the inode
823 * codes, which we check above.
825 xfs_quota_warn(mp, dqp, quota_nl + 3);
830 quota_nl = xfs_dqresv_check(&dqp->q_ino, &defq->ino, ninos,
832 if (quota_nl != QUOTA_NL_NOWARN) {
833 xfs_quota_warn(mp, dqp, quota_nl);
840 * Change the reservation, but not the actual usage.
841 * Note that q_blk.reserved = q_blk.count + resv
843 blkres->reserved += (xfs_qcnt_t)nblks;
844 dqp->q_ino.reserved += (xfs_qcnt_t)ninos;
847 * note the reservation amt in the trans struct too,
848 * so that the transaction knows how much was reserved by
849 * it against this particular dquot.
850 * We don't do this when we are reserving for a delayed allocation,
851 * because we don't have the luxury of a transaction envelope then.
854 ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
855 xfs_trans_mod_dquot(tp, dqp, flags & XFS_QMOPT_RESBLK_MASK,
857 xfs_trans_mod_dquot(tp, dqp, XFS_TRANS_DQ_RES_INOS, ninos);
860 if (XFS_IS_CORRUPT(mp, dqp->q_blk.reserved < dqp->q_blk.count) ||
861 XFS_IS_CORRUPT(mp, dqp->q_rtb.reserved < dqp->q_rtb.count) ||
862 XFS_IS_CORRUPT(mp, dqp->q_ino.reserved < dqp->q_ino.count))
870 if (xfs_dquot_type(dqp) == XFS_DQTYPE_PROJ)
875 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
876 xfs_fs_mark_sick(mp, XFS_SICK_FS_QUOTACHECK);
877 return -EFSCORRUPTED;
882 * Given dquot(s), make disk block and/or inode reservations against them.
883 * The fact that this does the reservation against user, group and
884 * project quotas is important, because this follows a all-or-nothing
887 * flags = XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown.
888 * XFS_QMOPT_ENOSPC returns ENOSPC not EDQUOT. Used by pquota.
889 * XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks
890 * XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks
891 * dquots are unlocked on return, if they were not locked by caller.
894 xfs_trans_reserve_quota_bydquots(
895 struct xfs_trans *tp,
896 struct xfs_mount *mp,
897 struct xfs_dquot *udqp,
898 struct xfs_dquot *gdqp,
899 struct xfs_dquot *pdqp,
906 if (!XFS_IS_QUOTA_ON(mp))
909 ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
912 error = xfs_trans_dqresv(tp, mp, udqp, nblks, ninos, flags);
918 error = xfs_trans_dqresv(tp, mp, gdqp, nblks, ninos, flags);
924 error = xfs_trans_dqresv(tp, mp, pdqp, nblks, ninos, flags);
930 * Didn't change anything critical, so, no need to log
935 flags |= XFS_QMOPT_FORCE_RES;
937 xfs_trans_dqresv(tp, mp, gdqp, -nblks, -ninos, flags);
939 flags |= XFS_QMOPT_FORCE_RES;
941 xfs_trans_dqresv(tp, mp, udqp, -nblks, -ninos, flags);
947 * Lock the dquot and change the reservation if we can.
948 * This doesn't change the actual usage, just the reservation.
949 * The inode sent in is locked.
952 xfs_trans_reserve_quota_nblks(
953 struct xfs_trans *tp,
954 struct xfs_inode *ip,
959 struct xfs_mount *mp = ip->i_mount;
960 unsigned int qflags = 0;
963 if (!XFS_IS_QUOTA_ON(mp))
966 ASSERT(!xfs_is_quota_inode(&mp->m_sb, ip->i_ino));
967 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
970 qflags |= XFS_QMOPT_FORCE_RES;
972 /* Reserve data device quota against the inode's dquots. */
973 error = xfs_trans_reserve_quota_bydquots(tp, mp, ip->i_udquot,
974 ip->i_gdquot, ip->i_pdquot, dblocks, 0,
975 XFS_QMOPT_RES_REGBLKS | qflags);
979 /* Do the same but for realtime blocks. */
980 error = xfs_trans_reserve_quota_bydquots(tp, mp, ip->i_udquot,
981 ip->i_gdquot, ip->i_pdquot, rblocks, 0,
982 XFS_QMOPT_RES_RTBLKS | qflags);
984 xfs_trans_reserve_quota_bydquots(tp, mp, ip->i_udquot,
985 ip->i_gdquot, ip->i_pdquot, -dblocks, 0,
986 XFS_QMOPT_RES_REGBLKS);
993 /* Change the quota reservations for an inode creation activity. */
995 xfs_trans_reserve_quota_icreate(
996 struct xfs_trans *tp,
997 struct xfs_dquot *udqp,
998 struct xfs_dquot *gdqp,
999 struct xfs_dquot *pdqp,
1002 struct xfs_mount *mp = tp->t_mountp;
1004 if (!XFS_IS_QUOTA_ON(mp))
1007 return xfs_trans_reserve_quota_bydquots(tp, mp, udqp, gdqp, pdqp,
1008 dblocks, 1, XFS_QMOPT_RES_REGBLKS);
1012 xfs_trans_alloc_dqinfo(
1015 tp->t_dqinfo = kmem_cache_zalloc(xfs_dqtrx_cache,
1016 GFP_KERNEL | __GFP_NOFAIL);
1020 xfs_trans_free_dqinfo(
1025 kmem_cache_free(xfs_dqtrx_cache, tp->t_dqinfo);
1026 tp->t_dqinfo = NULL;