1 // SPDX-License-Identifier: GPL-2.0+
3 * linux/fs/jbd2/checkpoint.c
7 * Copyright 1999 Red Hat Software --- All Rights Reserved
9 * Checkpoint routines for the generic filesystem journaling code.
10 * Part of the ext2fs journaling system.
12 * Checkpointing is the process of ensuring that a section of the log is
13 * committed fully to disk, so that that portion of the log can be
17 #include <linux/time.h>
19 #include <linux/jbd2.h>
20 #include <linux/errno.h>
21 #include <linux/slab.h>
22 #include <linux/blkdev.h>
23 #include <trace/events/jbd2.h>
26 * Unlink a buffer from a transaction checkpoint list.
28 * Called with j_list_lock held.
30 static inline void __buffer_unlink(struct journal_head *jh)
32 transaction_t *transaction = jh->b_cp_transaction;
34 jh->b_cpnext->b_cpprev = jh->b_cpprev;
35 jh->b_cpprev->b_cpnext = jh->b_cpnext;
36 if (transaction->t_checkpoint_list == jh) {
37 transaction->t_checkpoint_list = jh->b_cpnext;
38 if (transaction->t_checkpoint_list == jh)
39 transaction->t_checkpoint_list = NULL;
44 * __jbd2_log_wait_for_space: wait until there is space in the journal.
46 * Called under j-state_lock *only*. It will be unlocked if we have to wait
47 * for a checkpoint to free up some space in the log.
49 void __jbd2_log_wait_for_space(journal_t *journal)
50 __acquires(&journal->j_state_lock)
51 __releases(&journal->j_state_lock)
53 int nblocks, space_left;
54 /* assert_spin_locked(&journal->j_state_lock); */
56 nblocks = journal->j_max_transaction_buffers;
57 while (jbd2_log_space_left(journal) < nblocks) {
58 write_unlock(&journal->j_state_lock);
59 mutex_lock_io(&journal->j_checkpoint_mutex);
62 * Test again, another process may have checkpointed while we
63 * were waiting for the checkpoint lock. If there are no
64 * transactions ready to be checkpointed, try to recover
65 * journal space by calling cleanup_journal_tail(), and if
66 * that doesn't work, by waiting for the currently committing
67 * transaction to complete. If there is absolutely no way
68 * to make progress, this is either a BUG or corrupted
69 * filesystem, so abort the journal and leave a stack
70 * trace for forensic evidence.
72 write_lock(&journal->j_state_lock);
73 if (journal->j_flags & JBD2_ABORT) {
74 mutex_unlock(&journal->j_checkpoint_mutex);
77 spin_lock(&journal->j_list_lock);
78 space_left = jbd2_log_space_left(journal);
79 if (space_left < nblocks) {
80 int chkpt = journal->j_checkpoint_transactions != NULL;
83 if (journal->j_committing_transaction)
84 tid = journal->j_committing_transaction->t_tid;
85 spin_unlock(&journal->j_list_lock);
86 write_unlock(&journal->j_state_lock);
88 jbd2_log_do_checkpoint(journal);
89 } else if (jbd2_cleanup_journal_tail(journal) == 0) {
90 /* We were able to recover space; yay! */
94 * jbd2_journal_commit_transaction() may want
95 * to take the checkpoint_mutex if JBD2_FLUSHED
96 * is set. So we need to temporarily drop it.
98 mutex_unlock(&journal->j_checkpoint_mutex);
99 jbd2_log_wait_commit(journal, tid);
100 write_lock(&journal->j_state_lock);
103 printk(KERN_ERR "%s: needed %d blocks and "
104 "only had %d space available\n",
105 __func__, nblocks, space_left);
106 printk(KERN_ERR "%s: no way to get more "
107 "journal space in %s\n", __func__,
110 jbd2_journal_abort(journal, -EIO);
112 write_lock(&journal->j_state_lock);
114 spin_unlock(&journal->j_list_lock);
116 mutex_unlock(&journal->j_checkpoint_mutex);
121 __flush_batch(journal_t *journal, int *batch_count)
124 struct blk_plug plug;
126 blk_start_plug(&plug);
127 for (i = 0; i < *batch_count; i++)
128 write_dirty_buffer(journal->j_chkpt_bhs[i], REQ_SYNC);
129 blk_finish_plug(&plug);
131 for (i = 0; i < *batch_count; i++) {
132 struct buffer_head *bh = journal->j_chkpt_bhs[i];
133 BUFFER_TRACE(bh, "brelse");
135 journal->j_chkpt_bhs[i] = NULL;
141 * Perform an actual checkpoint. We take the first transaction on the
142 * list of transactions to be checkpointed and send all its buffers
143 * to disk. We submit larger chunks of data at once.
145 * The journal should be locked before calling this function.
146 * Called with j_checkpoint_mutex held.
148 int jbd2_log_do_checkpoint(journal_t *journal)
150 struct journal_head *jh;
151 struct buffer_head *bh;
152 transaction_t *transaction;
154 int result, batch_count = 0;
156 jbd2_debug(1, "Start checkpoint\n");
159 * First thing: if there are any transactions in the log which
160 * don't need checkpointing, just eliminate them from the
161 * journal straight away.
163 result = jbd2_cleanup_journal_tail(journal);
164 trace_jbd2_checkpoint(journal, result);
165 jbd2_debug(1, "cleanup_journal_tail returned %d\n", result);
170 * OK, we need to start writing disk blocks. Take one transaction
173 spin_lock(&journal->j_list_lock);
174 if (!journal->j_checkpoint_transactions)
176 transaction = journal->j_checkpoint_transactions;
177 if (transaction->t_chp_stats.cs_chp_time == 0)
178 transaction->t_chp_stats.cs_chp_time = jiffies;
179 this_tid = transaction->t_tid;
182 * If someone cleaned up this transaction while we slept, we're
183 * done (maybe it's a new transaction, but it fell at the same
186 if (journal->j_checkpoint_transactions != transaction ||
187 transaction->t_tid != this_tid)
190 /* checkpoint all of the transaction's buffers */
191 while (transaction->t_checkpoint_list) {
192 jh = transaction->t_checkpoint_list;
195 if (jh->b_transaction != NULL) {
196 transaction_t *t = jh->b_transaction;
197 tid_t tid = t->t_tid;
199 transaction->t_chp_stats.cs_forced_to_close++;
200 spin_unlock(&journal->j_list_lock);
201 if (unlikely(journal->j_flags & JBD2_UNMOUNT))
203 * The journal thread is dead; so
204 * starting and waiting for a commit
205 * to finish will cause us to wait for
206 * a _very_ long time.
209 "JBD2: %s: Waiting for Godot: block %llu\n",
210 journal->j_devname, (unsigned long long) bh->b_blocknr);
213 __flush_batch(journal, &batch_count);
214 jbd2_log_start_commit(journal, tid);
216 * jbd2_journal_commit_transaction() may want
217 * to take the checkpoint_mutex if JBD2_FLUSHED
218 * is set, jbd2_update_log_tail() called by
219 * jbd2_journal_commit_transaction() may also take
220 * checkpoint_mutex. So we need to temporarily
223 mutex_unlock(&journal->j_checkpoint_mutex);
224 jbd2_log_wait_commit(journal, tid);
225 mutex_lock_io(&journal->j_checkpoint_mutex);
226 spin_lock(&journal->j_list_lock);
229 if (!trylock_buffer(bh)) {
231 * The buffer is locked, it may be writing back, or
232 * flushing out in the last couple of cycles, or
233 * re-adding into a new transaction, need to check
234 * it again until it's unlocked.
237 spin_unlock(&journal->j_list_lock);
239 /* the journal_head may have gone by now */
240 BUFFER_TRACE(bh, "brelse");
243 } else if (!buffer_dirty(bh)) {
245 BUFFER_TRACE(bh, "remove from checkpoint");
247 * If the transaction was released or the checkpoint
248 * list was empty, we're done.
250 if (__jbd2_journal_remove_checkpoint(jh) ||
251 !transaction->t_checkpoint_list)
256 * We are about to write the buffer, it could be
257 * raced by some other transaction shrink or buffer
258 * re-log logic once we release the j_list_lock,
259 * leave it on the checkpoint list and check status
260 * again to make sure it's clean.
262 BUFFER_TRACE(bh, "queue");
264 J_ASSERT_BH(bh, !buffer_jwrite(bh));
265 journal->j_chkpt_bhs[batch_count++] = bh;
266 transaction->t_chp_stats.cs_written++;
267 transaction->t_checkpoint_list = jh->b_cpnext;
270 if ((batch_count == JBD2_NR_BATCH) ||
271 need_resched() || spin_needbreak(&journal->j_list_lock) ||
272 jh2bh(transaction->t_checkpoint_list) == journal->j_chkpt_bhs[0])
273 goto unlock_and_flush;
278 spin_unlock(&journal->j_list_lock);
281 __flush_batch(journal, &batch_count);
282 spin_lock(&journal->j_list_lock);
287 spin_unlock(&journal->j_list_lock);
288 result = jbd2_cleanup_journal_tail(journal);
290 return (result < 0) ? result : 0;
294 * Check the list of checkpoint transactions for the journal to see if
295 * we have already got rid of any since the last update of the log tail
296 * in the journal superblock. If so, we can instantly roll the
297 * superblock forward to remove those transactions from the log.
299 * Return <0 on error, 0 on success, 1 if there was nothing to clean up.
301 * Called with the journal lock held.
303 * This is the only part of the journaling code which really needs to be
304 * aware of transaction aborts. Checkpointing involves writing to the
305 * main filesystem area rather than to the journal, so it can proceed
306 * even in abort state, but we must not update the super block if
307 * checkpointing may have failed. Otherwise, we would lose some metadata
308 * buffers which should be written-back to the filesystem.
311 int jbd2_cleanup_journal_tail(journal_t *journal)
314 unsigned long blocknr;
316 if (is_journal_aborted(journal))
319 if (!jbd2_journal_get_log_tail(journal, &first_tid, &blocknr))
321 J_ASSERT(blocknr != 0);
324 * We need to make sure that any blocks that were recently written out
325 * --- perhaps by jbd2_log_do_checkpoint() --- are flushed out before
326 * we drop the transactions from the journal. It's unlikely this will
327 * be necessary, especially with an appropriately sized journal, but we
328 * need this to guarantee correctness. Fortunately
329 * jbd2_cleanup_journal_tail() doesn't get called all that often.
331 if (journal->j_flags & JBD2_BARRIER)
332 blkdev_issue_flush(journal->j_fs_dev);
334 return __jbd2_update_log_tail(journal, first_tid, blocknr);
338 /* Checkpoint list management */
340 enum shrink_type {SHRINK_DESTROY, SHRINK_BUSY_STOP, SHRINK_BUSY_SKIP};
343 * journal_shrink_one_cp_list
345 * Find all the written-back checkpoint buffers in the given list
346 * and try to release them. If the whole transaction is released, set
347 * the 'released' parameter. Return the number of released checkpointed
350 * Called with j_list_lock held.
352 static unsigned long journal_shrink_one_cp_list(struct journal_head *jh,
353 enum shrink_type type,
356 struct journal_head *last_jh;
357 struct journal_head *next_jh = jh;
358 unsigned long nr_freed = 0;
365 last_jh = jh->b_cpprev;
368 next_jh = jh->b_cpnext;
370 if (type == SHRINK_DESTROY) {
371 ret = __jbd2_journal_remove_checkpoint(jh);
373 ret = jbd2_journal_try_remove_checkpoint(jh);
375 if (type == SHRINK_BUSY_SKIP)
389 } while (jh != last_jh);
395 * jbd2_journal_shrink_checkpoint_list
397 * Find 'nr_to_scan' written-back checkpoint buffers in the journal
398 * and try to release them. Return the number of released checkpointed
401 * Called with j_list_lock held.
403 unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal,
404 unsigned long *nr_to_scan)
406 transaction_t *transaction, *last_transaction, *next_transaction;
407 bool __maybe_unused released;
408 tid_t first_tid = 0, last_tid = 0, next_tid = 0;
410 unsigned long nr_freed = 0;
414 spin_lock(&journal->j_list_lock);
415 if (!journal->j_checkpoint_transactions) {
416 spin_unlock(&journal->j_list_lock);
421 * Get next shrink transaction, resume previous scan or start
422 * over again. If some others do checkpoint and drop transaction
423 * from the checkpoint list, we ignore saved j_shrink_transaction
424 * and start over unconditionally.
426 if (journal->j_shrink_transaction)
427 transaction = journal->j_shrink_transaction;
429 transaction = journal->j_checkpoint_transactions;
432 first_tid = transaction->t_tid;
433 last_transaction = journal->j_checkpoint_transactions->t_cpprev;
434 next_transaction = transaction;
435 last_tid = last_transaction->t_tid;
437 transaction = next_transaction;
438 next_transaction = transaction->t_cpnext;
439 tid = transaction->t_tid;
441 freed = journal_shrink_one_cp_list(transaction->t_checkpoint_list,
442 SHRINK_BUSY_SKIP, &released);
444 (*nr_to_scan) -= min(*nr_to_scan, freed);
445 if (*nr_to_scan == 0)
447 if (need_resched() || spin_needbreak(&journal->j_list_lock))
449 } while (transaction != last_transaction);
451 if (transaction != last_transaction) {
452 journal->j_shrink_transaction = next_transaction;
453 next_tid = next_transaction->t_tid;
455 journal->j_shrink_transaction = NULL;
459 spin_unlock(&journal->j_list_lock);
462 if (*nr_to_scan && next_tid)
465 trace_jbd2_shrink_checkpoint_list(journal, first_tid, tid, last_tid,
472 * journal_clean_checkpoint_list
474 * Find all the written-back checkpoint buffers in the journal and release them.
475 * If 'destroy' is set, release all buffers unconditionally.
477 * Called with j_list_lock held.
479 void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy)
481 transaction_t *transaction, *last_transaction, *next_transaction;
482 enum shrink_type type;
485 transaction = journal->j_checkpoint_transactions;
489 type = destroy ? SHRINK_DESTROY : SHRINK_BUSY_STOP;
490 last_transaction = transaction->t_cpprev;
491 next_transaction = transaction;
493 transaction = next_transaction;
494 next_transaction = transaction->t_cpnext;
495 journal_shrink_one_cp_list(transaction->t_checkpoint_list,
498 * This function only frees up some memory if possible so we
499 * dont have an obligation to finish processing. Bail out if
500 * preemption requested:
505 * Stop scanning if we couldn't free the transaction. This
506 * avoids pointless scanning of transactions which still
507 * weren't checkpointed.
511 } while (transaction != last_transaction);
515 * Remove buffers from all checkpoint lists as journal is aborted and we just
516 * need to free memory
518 void jbd2_journal_destroy_checkpoint(journal_t *journal)
521 * We loop because __jbd2_journal_clean_checkpoint_list() may abort
522 * early due to a need of rescheduling.
525 spin_lock(&journal->j_list_lock);
526 if (!journal->j_checkpoint_transactions) {
527 spin_unlock(&journal->j_list_lock);
530 __jbd2_journal_clean_checkpoint_list(journal, true);
531 spin_unlock(&journal->j_list_lock);
537 * journal_remove_checkpoint: called after a buffer has been committed
538 * to disk (either by being write-back flushed to disk, or being
539 * committed to the log).
541 * We cannot safely clean a transaction out of the log until all of the
542 * buffer updates committed in that transaction have safely been stored
543 * elsewhere on disk. To achieve this, all of the buffers in a
544 * transaction need to be maintained on the transaction's checkpoint
545 * lists until they have been rewritten, at which point this function is
546 * called to remove the buffer from the existing transaction's
549 * The function returns 1 if it frees the transaction, 0 otherwise.
550 * The function can free jh and bh.
552 * This function is called with j_list_lock held.
554 int __jbd2_journal_remove_checkpoint(struct journal_head *jh)
556 struct transaction_chp_stats_s *stats;
557 transaction_t *transaction;
559 struct buffer_head *bh = jh2bh(jh);
561 JBUFFER_TRACE(jh, "entry");
563 transaction = jh->b_cp_transaction;
565 JBUFFER_TRACE(jh, "not on transaction");
568 journal = transaction->t_journal;
570 JBUFFER_TRACE(jh, "removing from transaction");
573 * If we have failed to write the buffer out to disk, the filesystem
574 * may become inconsistent. We cannot abort the journal here since
575 * we hold j_list_lock and we have to be careful about races with
576 * jbd2_journal_destroy(). So mark the writeback IO error in the
577 * journal here and we abort the journal later from a better context.
579 if (buffer_write_io_error(bh))
580 set_bit(JBD2_CHECKPOINT_IO_ERROR, &journal->j_atomic_flags);
583 jh->b_cp_transaction = NULL;
584 percpu_counter_dec(&journal->j_checkpoint_jh_count);
585 jbd2_journal_put_journal_head(jh);
587 /* Is this transaction empty? */
588 if (transaction->t_checkpoint_list)
592 * There is one special case to worry about: if we have just pulled the
593 * buffer off a running or committing transaction's checkpoing list,
594 * then even if the checkpoint list is empty, the transaction obviously
597 * The locking here around t_state is a bit sleazy.
598 * See the comment at the end of jbd2_journal_commit_transaction().
600 if (transaction->t_state != T_FINISHED)
604 * OK, that was the last buffer for the transaction, we can now
605 * safely remove this transaction from the log.
607 stats = &transaction->t_chp_stats;
608 if (stats->cs_chp_time)
609 stats->cs_chp_time = jbd2_time_diff(stats->cs_chp_time,
611 trace_jbd2_checkpoint_stats(journal->j_fs_dev->bd_dev,
612 transaction->t_tid, stats);
614 __jbd2_journal_drop_transaction(journal, transaction);
615 jbd2_journal_free_transaction(transaction);
620 * Check the checkpoint buffer and try to remove it from the checkpoint
621 * list if it's clean. Returns -EBUSY if it is not clean, returns 1 if
622 * it frees the transaction, 0 otherwise.
624 * This function is called with j_list_lock held.
626 int jbd2_journal_try_remove_checkpoint(struct journal_head *jh)
628 struct buffer_head *bh = jh2bh(jh);
630 if (jh->b_transaction)
632 if (!trylock_buffer(bh))
634 if (buffer_dirty(bh)) {
641 * Buffer is clean and the IO has finished (we held the buffer
642 * lock) so the checkpoint is done. We can safely remove the
643 * buffer from this transaction.
645 JBUFFER_TRACE(jh, "remove from checkpoint list");
646 return __jbd2_journal_remove_checkpoint(jh);
650 * journal_insert_checkpoint: put a committed buffer onto a checkpoint
651 * list so that we know when it is safe to clean the transaction out of
654 * Called with the journal locked.
655 * Called with j_list_lock held.
657 void __jbd2_journal_insert_checkpoint(struct journal_head *jh,
658 transaction_t *transaction)
660 JBUFFER_TRACE(jh, "entry");
661 J_ASSERT_JH(jh, buffer_dirty(jh2bh(jh)) || buffer_jbddirty(jh2bh(jh)));
662 J_ASSERT_JH(jh, jh->b_cp_transaction == NULL);
664 /* Get reference for checkpointing transaction */
665 jbd2_journal_grab_journal_head(jh2bh(jh));
666 jh->b_cp_transaction = transaction;
668 if (!transaction->t_checkpoint_list) {
669 jh->b_cpnext = jh->b_cpprev = jh;
671 jh->b_cpnext = transaction->t_checkpoint_list;
672 jh->b_cpprev = transaction->t_checkpoint_list->b_cpprev;
673 jh->b_cpprev->b_cpnext = jh;
674 jh->b_cpnext->b_cpprev = jh;
676 transaction->t_checkpoint_list = jh;
677 percpu_counter_inc(&transaction->t_journal->j_checkpoint_jh_count);
681 * We've finished with this transaction structure: adios...
683 * The transaction must have no links except for the checkpoint by this
686 * Called with the journal locked.
687 * Called with j_list_lock held.
690 void __jbd2_journal_drop_transaction(journal_t *journal, transaction_t *transaction)
692 assert_spin_locked(&journal->j_list_lock);
694 journal->j_shrink_transaction = NULL;
695 if (transaction->t_cpnext) {
696 transaction->t_cpnext->t_cpprev = transaction->t_cpprev;
697 transaction->t_cpprev->t_cpnext = transaction->t_cpnext;
698 if (journal->j_checkpoint_transactions == transaction)
699 journal->j_checkpoint_transactions =
700 transaction->t_cpnext;
701 if (journal->j_checkpoint_transactions == transaction)
702 journal->j_checkpoint_transactions = NULL;
705 J_ASSERT(transaction->t_state == T_FINISHED);
706 J_ASSERT(transaction->t_buffers == NULL);
707 J_ASSERT(transaction->t_forget == NULL);
708 J_ASSERT(transaction->t_shadow_list == NULL);
709 J_ASSERT(transaction->t_checkpoint_list == NULL);
710 J_ASSERT(atomic_read(&transaction->t_updates) == 0);
711 J_ASSERT(journal->j_committing_transaction != transaction);
712 J_ASSERT(journal->j_running_transaction != transaction);
714 trace_jbd2_drop_transaction(journal, transaction);
716 jbd2_debug(1, "Dropping transaction %d, all done\n", transaction->t_tid);