2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3 * Copyright (C) 2010 Red Hat, Inc.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 #include "xfs_shared.h"
22 #include "xfs_format.h"
23 #include "xfs_log_format.h"
24 #include "xfs_trans_resv.h"
25 #include "xfs_mount.h"
26 #include "xfs_inode.h"
27 #include "xfs_extent_busy.h"
28 #include "xfs_quota.h"
29 #include "xfs_trans.h"
30 #include "xfs_trans_priv.h"
32 #include "xfs_trace.h"
33 #include "xfs_error.h"
35 kmem_zone_t *xfs_trans_zone;
36 kmem_zone_t *xfs_log_item_desc_zone;
39 * Initialize the precomputed transaction reservation values
40 * in the mount structure.
46 xfs_trans_resv_calc(mp, M_RES(mp));
50 * This routine is called to allocate a transaction structure.
51 * The type parameter indicates the type of the transaction. These
52 * are enumerated in xfs_trans.h.
54 * Dynamically allocate the transaction structure from the transaction
55 * zone, initialize it, and return it to the caller.
64 sb_start_intwrite(mp->m_super);
65 tp = _xfs_trans_alloc(mp, type, KM_SLEEP);
66 tp->t_flags |= XFS_TRANS_FREEZE_PROT;
74 xfs_km_flags_t memflags)
78 WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
79 atomic_inc(&mp->m_active_trans);
81 tp = kmem_zone_zalloc(xfs_trans_zone, memflags);
82 tp->t_magic = XFS_TRANS_HEADER_MAGIC;
85 INIT_LIST_HEAD(&tp->t_items);
86 INIT_LIST_HEAD(&tp->t_busy);
91 * Free the transaction structure. If there is more clean up
92 * to do when the structure is freed, add it here.
98 xfs_extent_busy_sort(&tp->t_busy);
99 xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false);
101 atomic_dec(&tp->t_mountp->m_active_trans);
102 if (tp->t_flags & XFS_TRANS_FREEZE_PROT)
103 sb_end_intwrite(tp->t_mountp->m_super);
104 xfs_trans_free_dqinfo(tp);
105 kmem_zone_free(xfs_trans_zone, tp);
109 * This is called to create a new transaction which will share the
110 * permanent log reservation of the given transaction. The remaining
111 * unused block and rt extent reservations are also inherited. This
112 * implies that the original transaction is no longer allowed to allocate
113 * blocks. Locks and log items, however, are no inherited. They must
114 * be added to the new transaction explicitly.
122 ntp = kmem_zone_zalloc(xfs_trans_zone, KM_SLEEP);
125 * Initialize the new transaction structure.
127 ntp->t_magic = XFS_TRANS_HEADER_MAGIC;
128 ntp->t_type = tp->t_type;
129 ntp->t_mountp = tp->t_mountp;
130 INIT_LIST_HEAD(&ntp->t_items);
131 INIT_LIST_HEAD(&ntp->t_busy);
133 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
134 ASSERT(tp->t_ticket != NULL);
136 ntp->t_flags = XFS_TRANS_PERM_LOG_RES |
137 (tp->t_flags & XFS_TRANS_RESERVE) |
138 (tp->t_flags & XFS_TRANS_FREEZE_PROT);
139 /* We gave our writer reference to the new transaction */
140 tp->t_flags &= ~XFS_TRANS_FREEZE_PROT;
141 ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket);
142 ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used;
143 tp->t_blk_res = tp->t_blk_res_used;
144 ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used;
145 tp->t_rtx_res = tp->t_rtx_res_used;
146 ntp->t_pflags = tp->t_pflags;
148 xfs_trans_dup_dqinfo(tp, ntp);
150 atomic_inc(&tp->t_mountp->m_active_trans);
155 * This is called to reserve free disk blocks and log space for the
156 * given transaction. This must be done before allocating any resources
157 * within the transaction.
159 * This will return ENOSPC if there are not enough blocks available.
160 * It will sleep waiting for available log space.
161 * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which
162 * is used by long running transactions. If any one of the reservations
163 * fails then they will all be backed out.
165 * This does not do quota reservations. That typically is done by the
170 struct xfs_trans *tp,
171 struct xfs_trans_res *resp,
176 bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
178 /* Mark this thread as being in a transaction */
179 current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
182 * Attempt to reserve the needed disk blocks by decrementing
183 * the number needed from the number available. This will
184 * fail if the count would go below zero.
187 error = xfs_mod_fdblocks(tp->t_mountp, -((int64_t)blocks), rsvd);
189 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
192 tp->t_blk_res += blocks;
196 * Reserve the log space needed for this transaction.
198 if (resp->tr_logres > 0) {
199 bool permanent = false;
201 ASSERT(tp->t_log_res == 0 ||
202 tp->t_log_res == resp->tr_logres);
203 ASSERT(tp->t_log_count == 0 ||
204 tp->t_log_count == resp->tr_logcount);
206 if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) {
207 tp->t_flags |= XFS_TRANS_PERM_LOG_RES;
210 ASSERT(tp->t_ticket == NULL);
211 ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
214 if (tp->t_ticket != NULL) {
215 ASSERT(resp->tr_logflags & XFS_TRANS_PERM_LOG_RES);
216 error = xfs_log_regrant(tp->t_mountp, tp->t_ticket);
218 error = xfs_log_reserve(tp->t_mountp,
221 &tp->t_ticket, XFS_TRANSACTION,
222 permanent, tp->t_type);
228 tp->t_log_res = resp->tr_logres;
229 tp->t_log_count = resp->tr_logcount;
233 * Attempt to reserve the needed realtime extents by decrementing
234 * the number needed from the number available. This will
235 * fail if the count would go below zero.
238 error = xfs_mod_frextents(tp->t_mountp, -((int64_t)rtextents));
243 tp->t_rtx_res += rtextents;
249 * Error cases jump to one of these labels to undo any
250 * reservations which have already been performed.
253 if (resp->tr_logres > 0) {
254 xfs_log_done(tp->t_mountp, tp->t_ticket, NULL, false);
257 tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES;
262 xfs_mod_fdblocks(tp->t_mountp, -((int64_t)blocks), rsvd);
266 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
272 * Record the indicated change to the given field for application
273 * to the file system's superblock when the transaction commits.
274 * For now, just store the change in the transaction structure.
276 * Mark the transaction structure to indicate that the superblock
277 * needs to be updated before committing.
279 * Because we may not be keeping track of allocated/free inodes and
280 * used filesystem blocks in the superblock, we do not mark the
281 * superblock dirty in this transaction if we modify these fields.
282 * We still need to update the transaction deltas so that they get
283 * applied to the incore superblock, but we don't want them to
284 * cause the superblock to get locked and logged if these are the
285 * only fields in the superblock that the transaction modifies.
293 uint32_t flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY);
294 xfs_mount_t *mp = tp->t_mountp;
297 case XFS_TRANS_SB_ICOUNT:
298 tp->t_icount_delta += delta;
299 if (xfs_sb_version_haslazysbcount(&mp->m_sb))
300 flags &= ~XFS_TRANS_SB_DIRTY;
302 case XFS_TRANS_SB_IFREE:
303 tp->t_ifree_delta += delta;
304 if (xfs_sb_version_haslazysbcount(&mp->m_sb))
305 flags &= ~XFS_TRANS_SB_DIRTY;
307 case XFS_TRANS_SB_FDBLOCKS:
309 * Track the number of blocks allocated in the
310 * transaction. Make sure it does not exceed the
314 tp->t_blk_res_used += (uint)-delta;
315 ASSERT(tp->t_blk_res_used <= tp->t_blk_res);
317 tp->t_fdblocks_delta += delta;
318 if (xfs_sb_version_haslazysbcount(&mp->m_sb))
319 flags &= ~XFS_TRANS_SB_DIRTY;
321 case XFS_TRANS_SB_RES_FDBLOCKS:
323 * The allocation has already been applied to the
324 * in-core superblock's counter. This should only
325 * be applied to the on-disk superblock.
328 tp->t_res_fdblocks_delta += delta;
329 if (xfs_sb_version_haslazysbcount(&mp->m_sb))
330 flags &= ~XFS_TRANS_SB_DIRTY;
332 case XFS_TRANS_SB_FREXTENTS:
334 * Track the number of blocks allocated in the
335 * transaction. Make sure it does not exceed the
339 tp->t_rtx_res_used += (uint)-delta;
340 ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res);
342 tp->t_frextents_delta += delta;
344 case XFS_TRANS_SB_RES_FREXTENTS:
346 * The allocation has already been applied to the
347 * in-core superblock's counter. This should only
348 * be applied to the on-disk superblock.
351 tp->t_res_frextents_delta += delta;
353 case XFS_TRANS_SB_DBLOCKS:
355 tp->t_dblocks_delta += delta;
357 case XFS_TRANS_SB_AGCOUNT:
359 tp->t_agcount_delta += delta;
361 case XFS_TRANS_SB_IMAXPCT:
362 tp->t_imaxpct_delta += delta;
364 case XFS_TRANS_SB_REXTSIZE:
365 tp->t_rextsize_delta += delta;
367 case XFS_TRANS_SB_RBMBLOCKS:
368 tp->t_rbmblocks_delta += delta;
370 case XFS_TRANS_SB_RBLOCKS:
371 tp->t_rblocks_delta += delta;
373 case XFS_TRANS_SB_REXTENTS:
374 tp->t_rextents_delta += delta;
376 case XFS_TRANS_SB_REXTSLOG:
377 tp->t_rextslog_delta += delta;
384 tp->t_flags |= flags;
388 * xfs_trans_apply_sb_deltas() is called from the commit code
389 * to bring the superblock buffer into the current transaction
390 * and modify it as requested by earlier calls to xfs_trans_mod_sb().
392 * For now we just look at each field allowed to change and change
396 xfs_trans_apply_sb_deltas(
403 bp = xfs_trans_getsb(tp, tp->t_mountp, 0);
404 sbp = XFS_BUF_TO_SBP(bp);
407 * Check that superblock mods match the mods made to AGF counters.
409 ASSERT((tp->t_fdblocks_delta + tp->t_res_fdblocks_delta) ==
410 (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta +
411 tp->t_ag_btree_delta));
414 * Only update the superblock counters if we are logging them
416 if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) {
417 if (tp->t_icount_delta)
418 be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta);
419 if (tp->t_ifree_delta)
420 be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta);
421 if (tp->t_fdblocks_delta)
422 be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta);
423 if (tp->t_res_fdblocks_delta)
424 be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta);
427 if (tp->t_frextents_delta)
428 be64_add_cpu(&sbp->sb_frextents, tp->t_frextents_delta);
429 if (tp->t_res_frextents_delta)
430 be64_add_cpu(&sbp->sb_frextents, tp->t_res_frextents_delta);
432 if (tp->t_dblocks_delta) {
433 be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta);
436 if (tp->t_agcount_delta) {
437 be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta);
440 if (tp->t_imaxpct_delta) {
441 sbp->sb_imax_pct += tp->t_imaxpct_delta;
444 if (tp->t_rextsize_delta) {
445 be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta);
448 if (tp->t_rbmblocks_delta) {
449 be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta);
452 if (tp->t_rblocks_delta) {
453 be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta);
456 if (tp->t_rextents_delta) {
457 be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta);
460 if (tp->t_rextslog_delta) {
461 sbp->sb_rextslog += tp->t_rextslog_delta;
465 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
468 * Log the whole thing, the fields are noncontiguous.
470 xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_dsb_t) - 1);
473 * Since all the modifiable fields are contiguous, we
474 * can get away with this.
476 xfs_trans_log_buf(tp, bp, offsetof(xfs_dsb_t, sb_icount),
477 offsetof(xfs_dsb_t, sb_frextents) +
478 sizeof(sbp->sb_frextents) - 1);
486 int8_t counter = *field;
502 int32_t counter = *field;
518 int64_t counter = *field;
530 * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations
531 * and apply superblock counter changes to the in-core superblock. The
532 * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT
533 * applied to the in-core superblock. The idea is that that has already been
536 * If we are not logging superblock counters, then the inode allocated/free and
537 * used block counts are not updated in the on disk superblock. In this case,
538 * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
539 * still need to update the incore superblock with the changes.
542 xfs_trans_unreserve_and_mod_sb(
543 struct xfs_trans *tp)
545 struct xfs_mount *mp = tp->t_mountp;
546 bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
547 int64_t blkdelta = 0;
548 int64_t rtxdelta = 0;
550 int64_t ifreedelta = 0;
553 /* calculate deltas */
554 if (tp->t_blk_res > 0)
555 blkdelta = tp->t_blk_res;
556 if ((tp->t_fdblocks_delta != 0) &&
557 (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
558 (tp->t_flags & XFS_TRANS_SB_DIRTY)))
559 blkdelta += tp->t_fdblocks_delta;
561 if (tp->t_rtx_res > 0)
562 rtxdelta = tp->t_rtx_res;
563 if ((tp->t_frextents_delta != 0) &&
564 (tp->t_flags & XFS_TRANS_SB_DIRTY))
565 rtxdelta += tp->t_frextents_delta;
567 if (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
568 (tp->t_flags & XFS_TRANS_SB_DIRTY)) {
569 idelta = tp->t_icount_delta;
570 ifreedelta = tp->t_ifree_delta;
573 /* apply the per-cpu counters */
575 error = xfs_mod_fdblocks(mp, blkdelta, rsvd);
581 error = xfs_mod_icount(mp, idelta);
583 goto out_undo_fdblocks;
587 error = xfs_mod_ifree(mp, ifreedelta);
589 goto out_undo_icount;
592 if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY))
595 /* apply remaining deltas */
596 spin_lock(&mp->m_sb_lock);
598 error = xfs_sb_mod64(&mp->m_sb.sb_frextents, rtxdelta);
603 if (tp->t_dblocks_delta != 0) {
604 error = xfs_sb_mod64(&mp->m_sb.sb_dblocks, tp->t_dblocks_delta);
606 goto out_undo_frextents;
608 if (tp->t_agcount_delta != 0) {
609 error = xfs_sb_mod32(&mp->m_sb.sb_agcount, tp->t_agcount_delta);
611 goto out_undo_dblocks;
613 if (tp->t_imaxpct_delta != 0) {
614 error = xfs_sb_mod8(&mp->m_sb.sb_imax_pct, tp->t_imaxpct_delta);
616 goto out_undo_agcount;
618 if (tp->t_rextsize_delta != 0) {
619 error = xfs_sb_mod32(&mp->m_sb.sb_rextsize,
620 tp->t_rextsize_delta);
622 goto out_undo_imaxpct;
624 if (tp->t_rbmblocks_delta != 0) {
625 error = xfs_sb_mod32(&mp->m_sb.sb_rbmblocks,
626 tp->t_rbmblocks_delta);
628 goto out_undo_rextsize;
630 if (tp->t_rblocks_delta != 0) {
631 error = xfs_sb_mod64(&mp->m_sb.sb_rblocks, tp->t_rblocks_delta);
633 goto out_undo_rbmblocks;
635 if (tp->t_rextents_delta != 0) {
636 error = xfs_sb_mod64(&mp->m_sb.sb_rextents,
637 tp->t_rextents_delta);
639 goto out_undo_rblocks;
641 if (tp->t_rextslog_delta != 0) {
642 error = xfs_sb_mod8(&mp->m_sb.sb_rextslog,
643 tp->t_rextslog_delta);
645 goto out_undo_rextents;
647 spin_unlock(&mp->m_sb_lock);
651 if (tp->t_rextents_delta)
652 xfs_sb_mod64(&mp->m_sb.sb_rextents, -tp->t_rextents_delta);
654 if (tp->t_rblocks_delta)
655 xfs_sb_mod64(&mp->m_sb.sb_rblocks, -tp->t_rblocks_delta);
657 if (tp->t_rbmblocks_delta)
658 xfs_sb_mod32(&mp->m_sb.sb_rbmblocks, -tp->t_rbmblocks_delta);
660 if (tp->t_rextsize_delta)
661 xfs_sb_mod32(&mp->m_sb.sb_rextsize, -tp->t_rextsize_delta);
663 if (tp->t_rextsize_delta)
664 xfs_sb_mod8(&mp->m_sb.sb_imax_pct, -tp->t_imaxpct_delta);
666 if (tp->t_agcount_delta)
667 xfs_sb_mod32(&mp->m_sb.sb_agcount, -tp->t_agcount_delta);
669 if (tp->t_dblocks_delta)
670 xfs_sb_mod64(&mp->m_sb.sb_dblocks, -tp->t_dblocks_delta);
673 xfs_sb_mod64(&mp->m_sb.sb_frextents, -rtxdelta);
675 spin_unlock(&mp->m_sb_lock);
677 xfs_mod_ifree(mp, -ifreedelta);
680 xfs_mod_icount(mp, -idelta);
683 xfs_mod_fdblocks(mp, -blkdelta, rsvd);
690 * Add the given log item to the transaction's list of log items.
692 * The log item will now point to its new descriptor with its li_desc field.
696 struct xfs_trans *tp,
697 struct xfs_log_item *lip)
699 struct xfs_log_item_desc *lidp;
701 ASSERT(lip->li_mountp == tp->t_mountp);
702 ASSERT(lip->li_ailp == tp->t_mountp->m_ail);
704 lidp = kmem_zone_zalloc(xfs_log_item_desc_zone, KM_SLEEP | KM_NOFS);
706 lidp->lid_item = lip;
708 list_add_tail(&lidp->lid_trans, &tp->t_items);
714 xfs_trans_free_item_desc(
715 struct xfs_log_item_desc *lidp)
717 list_del_init(&lidp->lid_trans);
718 kmem_zone_free(xfs_log_item_desc_zone, lidp);
722 * Unlink and free the given descriptor.
726 struct xfs_log_item *lip)
728 xfs_trans_free_item_desc(lip->li_desc);
733 * Unlock all of the items of a transaction and free all the descriptors
734 * of that transaction.
737 xfs_trans_free_items(
738 struct xfs_trans *tp,
739 xfs_lsn_t commit_lsn,
742 struct xfs_log_item_desc *lidp, *next;
744 list_for_each_entry_safe(lidp, next, &tp->t_items, lid_trans) {
745 struct xfs_log_item *lip = lidp->lid_item;
749 if (commit_lsn != NULLCOMMITLSN)
750 lip->li_ops->iop_committing(lip, commit_lsn);
752 lip->li_flags |= XFS_LI_ABORTED;
753 lip->li_ops->iop_unlock(lip);
755 xfs_trans_free_item_desc(lidp);
760 xfs_log_item_batch_insert(
761 struct xfs_ail *ailp,
762 struct xfs_ail_cursor *cur,
763 struct xfs_log_item **log_items,
765 xfs_lsn_t commit_lsn)
769 spin_lock(&ailp->xa_lock);
770 /* xfs_trans_ail_update_bulk drops ailp->xa_lock */
771 xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
773 for (i = 0; i < nr_items; i++) {
774 struct xfs_log_item *lip = log_items[i];
776 lip->li_ops->iop_unpin(lip, 0);
781 * Bulk operation version of xfs_trans_committed that takes a log vector of
782 * items to insert into the AIL. This uses bulk AIL insertion techniques to
783 * minimise lock traffic.
785 * If we are called with the aborted flag set, it is because a log write during
786 * a CIL checkpoint commit has failed. In this case, all the items in the
787 * checkpoint have already gone through iop_commited and iop_unlock, which
788 * means that checkpoint commit abort handling is treated exactly the same
789 * as an iclog write error even though we haven't started any IO yet. Hence in
790 * this case all we need to do is iop_committed processing, followed by an
791 * iop_unpin(aborted) call.
793 * The AIL cursor is used to optimise the insert process. If commit_lsn is not
794 * at the end of the AIL, the insert cursor avoids the need to walk
795 * the AIL to find the insertion point on every xfs_log_item_batch_insert()
796 * call. This saves a lot of needless list walking and is a net win, even
797 * though it slightly increases that amount of AIL lock traffic to set it up
801 xfs_trans_committed_bulk(
802 struct xfs_ail *ailp,
803 struct xfs_log_vec *log_vector,
804 xfs_lsn_t commit_lsn,
807 #define LOG_ITEM_BATCH_SIZE 32
808 struct xfs_log_item *log_items[LOG_ITEM_BATCH_SIZE];
809 struct xfs_log_vec *lv;
810 struct xfs_ail_cursor cur;
813 spin_lock(&ailp->xa_lock);
814 xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn);
815 spin_unlock(&ailp->xa_lock);
817 /* unpin all the log items */
818 for (lv = log_vector; lv; lv = lv->lv_next ) {
819 struct xfs_log_item *lip = lv->lv_item;
823 lip->li_flags |= XFS_LI_ABORTED;
824 item_lsn = lip->li_ops->iop_committed(lip, commit_lsn);
826 /* item_lsn of -1 means the item needs no further processing */
827 if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
831 * if we are aborting the operation, no point in inserting the
832 * object into the AIL as we are in a shutdown situation.
835 ASSERT(XFS_FORCED_SHUTDOWN(ailp->xa_mount));
836 lip->li_ops->iop_unpin(lip, 1);
840 if (item_lsn != commit_lsn) {
843 * Not a bulk update option due to unusual item_lsn.
844 * Push into AIL immediately, rechecking the lsn once
845 * we have the ail lock. Then unpin the item. This does
846 * not affect the AIL cursor the bulk insert path is
849 spin_lock(&ailp->xa_lock);
850 if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
851 xfs_trans_ail_update(ailp, lip, item_lsn);
853 spin_unlock(&ailp->xa_lock);
854 lip->li_ops->iop_unpin(lip, 0);
858 /* Item is a candidate for bulk AIL insert. */
859 log_items[i++] = lv->lv_item;
860 if (i >= LOG_ITEM_BATCH_SIZE) {
861 xfs_log_item_batch_insert(ailp, &cur, log_items,
862 LOG_ITEM_BATCH_SIZE, commit_lsn);
867 /* make sure we insert the remainder! */
869 xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn);
871 spin_lock(&ailp->xa_lock);
872 xfs_trans_ail_cursor_done(&cur);
873 spin_unlock(&ailp->xa_lock);
877 * Commit the given transaction to the log.
879 * XFS disk error handling mechanism is not based on a typical
880 * transaction abort mechanism. Logically after the filesystem
881 * gets marked 'SHUTDOWN', we can't let any new transactions
882 * be durable - ie. committed to disk - because some metadata might
883 * be inconsistent. In such cases, this returns an error, and the
884 * caller may assume that all locked objects joined to the transaction
885 * have already been unlocked as if the commit had succeeded.
886 * Do not reference the transaction structure after this call.
890 struct xfs_trans *tp,
893 struct xfs_mount *mp = tp->t_mountp;
894 xfs_lsn_t commit_lsn = -1;
896 int sync = tp->t_flags & XFS_TRANS_SYNC;
899 * If there is nothing to be logged by the transaction,
900 * then unlock all of the items associated with the
901 * transaction and free the transaction structure.
902 * Also make sure to return any reserved blocks to
905 if (!(tp->t_flags & XFS_TRANS_DIRTY))
908 if (XFS_FORCED_SHUTDOWN(mp)) {
913 ASSERT(tp->t_ticket != NULL);
916 * If we need to update the superblock, then do it now.
918 if (tp->t_flags & XFS_TRANS_SB_DIRTY)
919 xfs_trans_apply_sb_deltas(tp);
920 xfs_trans_apply_dquot_deltas(tp);
922 xfs_log_commit_cil(mp, tp, &commit_lsn, regrant);
924 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
928 * If the transaction needs to be synchronous, then force the
929 * log out now and wait for it.
932 error = _xfs_log_force_lsn(mp, commit_lsn, XFS_LOG_SYNC, NULL);
933 XFS_STATS_INC(xs_trans_sync);
935 XFS_STATS_INC(xs_trans_async);
941 xfs_trans_unreserve_and_mod_sb(tp);
944 * It is indeed possible for the transaction to be not dirty but
945 * the dqinfo portion to be. All that means is that we have some
946 * (non-persistent) quota reservations that need to be unreserved.
948 xfs_trans_unreserve_and_mod_dquots(tp);
950 commit_lsn = xfs_log_done(mp, tp->t_ticket, NULL, regrant);
951 if (commit_lsn == -1 && !error)
954 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
955 xfs_trans_free_items(tp, NULLCOMMITLSN, !!error);
958 XFS_STATS_INC(xs_trans_empty);
964 struct xfs_trans *tp)
966 return __xfs_trans_commit(tp, false);
970 * Unlock all of the transaction's items and free the transaction.
971 * The transaction must not have modified any of its items, because
972 * there is no way to restore them to their previous state.
974 * If the transaction has made a log reservation, make sure to release
979 struct xfs_trans *tp)
981 struct xfs_mount *mp = tp->t_mountp;
982 bool dirty = (tp->t_flags & XFS_TRANS_DIRTY);
985 * See if the caller is relying on us to shut down the
986 * filesystem. This happens in paths where we detect
987 * corruption and decide to give up.
989 if (dirty && !XFS_FORCED_SHUTDOWN(mp)) {
990 XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp);
991 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
994 if (!dirty && !XFS_FORCED_SHUTDOWN(mp)) {
995 struct xfs_log_item_desc *lidp;
997 list_for_each_entry(lidp, &tp->t_items, lid_trans)
998 ASSERT(!(lidp->lid_item->li_type == XFS_LI_EFD));
1001 xfs_trans_unreserve_and_mod_sb(tp);
1002 xfs_trans_unreserve_and_mod_dquots(tp);
1005 xfs_log_done(mp, tp->t_ticket, NULL, false);
1007 /* mark this thread as no longer being in a transaction */
1008 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
1010 xfs_trans_free_items(tp, NULLCOMMITLSN, dirty);
1015 * Roll from one trans in the sequence of PERMANENT transactions to
1016 * the next: permanent transactions are only flushed out when
1017 * committed with xfs_trans_commit(), but we still want as soon
1018 * as possible to let chunks of it go to the log. So we commit the
1019 * chunk we've been working on and get a new transaction to continue.
1023 struct xfs_trans **tpp,
1024 struct xfs_inode *dp,
1027 struct xfs_trans *trans;
1028 struct xfs_trans_res tres;
1032 * Ensure that the inode is always logged.
1036 xfs_trans_log_inode(trans, dp, XFS_ILOG_CORE);
1039 * Copy the critical parameters from one trans to the next.
1041 tres.tr_logres = trans->t_log_res;
1042 tres.tr_logcount = trans->t_log_count;
1043 *tpp = xfs_trans_dup(trans);
1046 * Commit the current transaction.
1047 * If this commit failed, then it'd just unlock those items that
1048 * are not marked ihold. That also means that a filesystem shutdown
1049 * is in progress. The caller takes the responsibility to cancel
1050 * the duplicate transaction that gets returned.
1052 error = __xfs_trans_commit(trans, true);
1060 * Reserve space in the log for th next transaction.
1061 * This also pushes items in the "AIL", the list of logged items,
1062 * out to disk if they are taking up space at the tail of the log
1063 * that we want to use. This requires that either nothing be locked
1064 * across this call, or that anything that is locked be logged in
1065 * the prior and the next transactions.
1067 tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
1068 error = xfs_trans_reserve(trans, &tres, 0, 0);
1070 * Ensure that the inode is in the new transaction and locked.
1076 xfs_trans_ijoin(trans, dp, 0);
1082 struct xfs_trans **tpp,
1083 struct xfs_inode *dp)
1086 return __xfs_trans_roll(tpp, dp, &committed);