1 // SPDX-License-Identifier: GPL-2.0
4 #include "alloc_background.h"
6 #include "btree_journal_iter.h"
7 #include "btree_node_scan.h"
8 #include "btree_update.h"
9 #include "btree_update_interior.h"
15 #include "fs-common.h"
16 #include "journal_io.h"
17 #include "journal_reclaim.h"
18 #include "journal_seq_blacklist.h"
19 #include "logged_ops.h"
22 #include "rebalance.h"
24 #include "recovery_passes.h"
27 #include "sb-downgrade.h"
31 #include <linux/sort.h>
32 #include <linux/stat.h>
34 #define QSTR(n) { { { .len = strlen(n) } }, .name = n }
36 void bch2_btree_lost_data(struct bch_fs *c, enum btree_id btree)
38 if (btree >= BTREE_ID_NR_MAX)
41 u64 b = BIT_ULL(btree);
43 if (!(c->sb.btrees_lost_data & b)) {
44 bch_err(c, "flagging btree %s lost data", bch2_btree_id_str(btree));
46 mutex_lock(&c->sb_lock);
47 bch2_sb_field_get(c->disk_sb.sb, ext)->btrees_lost_data |= cpu_to_le64(b);
49 mutex_unlock(&c->sb_lock);
53 /* for -o reconstruct_alloc: */
54 static void bch2_reconstruct_alloc(struct bch_fs *c)
56 bch2_journal_log_msg(c, "dropping alloc info");
57 bch_info(c, "dropping and reconstructing all alloc info");
59 mutex_lock(&c->sb_lock);
60 struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
62 __set_bit_le64(BCH_RECOVERY_PASS_STABLE_check_allocations, ext->recovery_passes_required);
63 __set_bit_le64(BCH_RECOVERY_PASS_STABLE_check_alloc_info, ext->recovery_passes_required);
64 __set_bit_le64(BCH_RECOVERY_PASS_STABLE_check_lrus, ext->recovery_passes_required);
65 __set_bit_le64(BCH_RECOVERY_PASS_STABLE_check_extents_to_backpointers, ext->recovery_passes_required);
66 __set_bit_le64(BCH_RECOVERY_PASS_STABLE_check_alloc_to_lru_refs, ext->recovery_passes_required);
68 __set_bit_le64(BCH_FSCK_ERR_ptr_to_missing_alloc_key, ext->errors_silent);
69 __set_bit_le64(BCH_FSCK_ERR_ptr_gen_newer_than_bucket_gen, ext->errors_silent);
70 __set_bit_le64(BCH_FSCK_ERR_stale_dirty_ptr, ext->errors_silent);
72 __set_bit_le64(BCH_FSCK_ERR_dev_usage_buckets_wrong, ext->errors_silent);
73 __set_bit_le64(BCH_FSCK_ERR_dev_usage_sectors_wrong, ext->errors_silent);
74 __set_bit_le64(BCH_FSCK_ERR_dev_usage_fragmented_wrong, ext->errors_silent);
76 __set_bit_le64(BCH_FSCK_ERR_fs_usage_btree_wrong, ext->errors_silent);
77 __set_bit_le64(BCH_FSCK_ERR_fs_usage_cached_wrong, ext->errors_silent);
78 __set_bit_le64(BCH_FSCK_ERR_fs_usage_persistent_reserved_wrong, ext->errors_silent);
79 __set_bit_le64(BCH_FSCK_ERR_fs_usage_replicas_wrong, ext->errors_silent);
81 __set_bit_le64(BCH_FSCK_ERR_alloc_key_data_type_wrong, ext->errors_silent);
82 __set_bit_le64(BCH_FSCK_ERR_alloc_key_gen_wrong, ext->errors_silent);
83 __set_bit_le64(BCH_FSCK_ERR_alloc_key_dirty_sectors_wrong, ext->errors_silent);
84 __set_bit_le64(BCH_FSCK_ERR_alloc_key_cached_sectors_wrong, ext->errors_silent);
85 __set_bit_le64(BCH_FSCK_ERR_alloc_key_stripe_wrong, ext->errors_silent);
86 __set_bit_le64(BCH_FSCK_ERR_alloc_key_stripe_redundancy_wrong, ext->errors_silent);
87 __set_bit_le64(BCH_FSCK_ERR_need_discard_key_wrong, ext->errors_silent);
88 __set_bit_le64(BCH_FSCK_ERR_freespace_key_wrong, ext->errors_silent);
89 __set_bit_le64(BCH_FSCK_ERR_bucket_gens_key_wrong, ext->errors_silent);
90 __set_bit_le64(BCH_FSCK_ERR_freespace_hole_missing, ext->errors_silent);
91 __set_bit_le64(BCH_FSCK_ERR_ptr_to_missing_backpointer, ext->errors_silent);
92 __set_bit_le64(BCH_FSCK_ERR_lru_entry_bad, ext->errors_silent);
93 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
96 mutex_unlock(&c->sb_lock);
98 c->recovery_passes_explicit |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0]));
101 bch2_shoot_down_journal_keys(c, BTREE_ID_alloc,
102 0, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
103 bch2_shoot_down_journal_keys(c, BTREE_ID_backpointers,
104 0, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
105 bch2_shoot_down_journal_keys(c, BTREE_ID_need_discard,
106 0, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
107 bch2_shoot_down_journal_keys(c, BTREE_ID_freespace,
108 0, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
109 bch2_shoot_down_journal_keys(c, BTREE_ID_bucket_gens,
110 0, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
114 * Btree node pointers have a field to stack a pointer to the in memory btree
115 * node; we need to zero out this field when reading in btree nodes, or when
116 * reading in keys from the journal:
118 static void zero_out_btree_mem_ptr(struct journal_keys *keys)
120 darray_for_each(*keys, i)
121 if (i->k->k.type == KEY_TYPE_btree_ptr_v2)
122 bkey_i_to_btree_ptr_v2(i->k)->v.mem_ptr = 0;
125 /* journal replay: */
127 static void replay_now_at(struct journal *j, u64 seq)
129 BUG_ON(seq < j->replay_journal_seq);
131 seq = min(seq, j->replay_journal_seq_end);
133 while (j->replay_journal_seq < seq)
134 bch2_journal_pin_put(j, j->replay_journal_seq++);
137 static int bch2_journal_replay_key(struct btree_trans *trans,
138 struct journal_key *k)
140 struct btree_iter iter;
141 unsigned iter_flags =
143 BTREE_ITER_not_extents;
144 unsigned update_flags = BTREE_TRIGGER_norun;
150 trans->journal_res.seq = k->journal_seq;
153 * BTREE_UPDATE_key_cache_reclaim disables key cache lookup/update to
154 * keep the key cache coherent with the underlying btree. Nothing
155 * besides the allocator is doing updates yet so we don't need key cache
156 * coherency for non-alloc btrees, and key cache fills for snapshots
157 * btrees use BTREE_ITER_filter_snapshots, which isn't available until
158 * the snapshots recovery pass runs.
160 if (!k->level && k->btree_id == BTREE_ID_alloc)
161 iter_flags |= BTREE_ITER_cached;
163 update_flags |= BTREE_UPDATE_key_cache_reclaim;
165 bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p,
166 BTREE_MAX_DEPTH, k->level,
168 ret = bch2_btree_iter_traverse(&iter);
172 struct btree_path *path = btree_iter_path(trans, &iter);
173 if (unlikely(!btree_path_node(path, k->level))) {
174 bch2_trans_iter_exit(trans, &iter);
175 bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p,
176 BTREE_MAX_DEPTH, 0, iter_flags);
177 ret = bch2_btree_iter_traverse(&iter) ?:
178 bch2_btree_increase_depth(trans, iter.path, 0) ?:
179 -BCH_ERR_transaction_restart_nested;
183 /* Must be checked with btree locked: */
187 ret = bch2_trans_update(trans, &iter, k->k, update_flags);
189 bch2_trans_iter_exit(trans, &iter);
193 static int journal_sort_seq_cmp(const void *_l, const void *_r)
195 const struct journal_key *l = *((const struct journal_key **)_l);
196 const struct journal_key *r = *((const struct journal_key **)_r);
198 return cmp_int(l->journal_seq, r->journal_seq);
201 int bch2_journal_replay(struct bch_fs *c)
203 struct journal_keys *keys = &c->journal_keys;
204 DARRAY(struct journal_key *) keys_sorted = { 0 };
205 struct journal *j = &c->journal;
206 u64 start_seq = c->journal_replay_seq_start;
207 u64 end_seq = c->journal_replay_seq_start;
208 struct btree_trans *trans = NULL;
209 bool immediate_flush = false;
213 ret = bch2_journal_log_msg(c, "Starting journal replay (%zu keys in entries %llu-%llu)",
214 keys->nr, start_seq, end_seq);
219 BUG_ON(!atomic_read(&keys->ref));
221 move_gap(keys, keys->nr);
222 trans = bch2_trans_get(c);
225 * First, attempt to replay keys in sorted order. This is more
226 * efficient - better locality of btree access - but some might fail if
227 * that would cause a journal deadlock.
229 darray_for_each(*keys, k) {
233 * k->allocated means the key wasn't read in from the journal,
234 * rather it was from early repair code
237 immediate_flush = true;
239 /* Skip fastpath if we're low on space in the journal */
240 ret = c->journal.watermark ? -1 :
241 commit_do(trans, NULL, NULL,
242 BCH_TRANS_COMMIT_no_enospc|
243 BCH_TRANS_COMMIT_journal_reclaim|
244 (!k->allocated ? BCH_TRANS_COMMIT_no_journal_res : 0),
245 bch2_journal_replay_key(trans, k));
246 BUG_ON(!ret && !k->overwritten);
248 ret = darray_push(&keys_sorted, k);
255 * Now, replay any remaining keys in the order in which they appear in
256 * the journal, unpinning those journal entries as we go:
258 sort(keys_sorted.data, keys_sorted.nr,
259 sizeof(keys_sorted.data[0]),
260 journal_sort_seq_cmp, NULL);
262 darray_for_each(keys_sorted, kp) {
265 struct journal_key *k = *kp;
268 replay_now_at(j, k->journal_seq);
270 replay_now_at(j, j->replay_journal_seq_end);
272 ret = commit_do(trans, NULL, NULL,
273 BCH_TRANS_COMMIT_no_enospc|
275 ? BCH_TRANS_COMMIT_no_journal_res|BCH_WATERMARK_reclaim
277 bch2_journal_replay_key(trans, k));
278 bch_err_msg(c, ret, "while replaying key at btree %s level %u:",
279 bch2_btree_id_str(k->btree_id), k->level);
283 BUG_ON(!k->overwritten);
287 * We need to put our btree_trans before calling flush_all_pins(), since
288 * that will use a btree_trans internally
290 bch2_trans_put(trans);
293 if (!c->opts.retain_recovery_info &&
294 c->recovery_pass_done >= BCH_RECOVERY_PASS_journal_replay)
295 bch2_journal_keys_put_initial(c);
297 replay_now_at(j, j->replay_journal_seq_end);
298 j->replay_journal_seq = 0;
300 bch2_journal_set_replay_done(j);
302 /* if we did any repair, flush it immediately */
303 if (immediate_flush) {
304 bch2_journal_flush_all_pins(&c->journal);
305 ret = bch2_journal_meta(&c->journal);
309 bch2_journal_log_msg(c, "journal replay finished");
312 bch2_trans_put(trans);
313 darray_exit(&keys_sorted);
318 /* journal replay early: */
320 static int journal_replay_entry_early(struct bch_fs *c,
321 struct jset_entry *entry)
325 switch (entry->type) {
326 case BCH_JSET_ENTRY_btree_root: {
327 struct btree_root *r;
329 while (entry->btree_id >= c->btree_roots_extra.nr + BTREE_ID_NR) {
330 ret = darray_push(&c->btree_roots_extra, (struct btree_root) { NULL });
335 r = bch2_btree_id_root(c, entry->btree_id);
338 r->level = entry->level;
339 bkey_copy(&r->key, (struct bkey_i *) entry->start);
342 r->error = -BCH_ERR_btree_node_read_error;
347 case BCH_JSET_ENTRY_usage: {
348 struct jset_entry_usage *u =
349 container_of(entry, struct jset_entry_usage, entry);
351 switch (entry->btree_id) {
352 case BCH_FS_USAGE_reserved:
353 if (entry->level < BCH_REPLICAS_MAX)
354 c->usage_base->persistent_reserved[entry->level] =
357 case BCH_FS_USAGE_inodes:
358 c->usage_base->b.nr_inodes = le64_to_cpu(u->v);
360 case BCH_FS_USAGE_key_version:
361 atomic64_set(&c->key_version,
368 case BCH_JSET_ENTRY_data_usage: {
369 struct jset_entry_data_usage *u =
370 container_of(entry, struct jset_entry_data_usage, entry);
372 ret = bch2_replicas_set_usage(c, &u->r,
376 case BCH_JSET_ENTRY_dev_usage: {
377 struct jset_entry_dev_usage *u =
378 container_of(entry, struct jset_entry_dev_usage, entry);
379 unsigned nr_types = jset_entry_dev_usage_nr_types(u);
382 struct bch_dev *ca = bch2_dev_rcu(c, le32_to_cpu(u->dev));
384 for (unsigned i = 0; i < min_t(unsigned, nr_types, BCH_DATA_NR); i++) {
385 ca->usage_base->d[i].buckets = le64_to_cpu(u->d[i].buckets);
386 ca->usage_base->d[i].sectors = le64_to_cpu(u->d[i].sectors);
387 ca->usage_base->d[i].fragmented = le64_to_cpu(u->d[i].fragmented);
393 case BCH_JSET_ENTRY_blacklist: {
394 struct jset_entry_blacklist *bl_entry =
395 container_of(entry, struct jset_entry_blacklist, entry);
397 ret = bch2_journal_seq_blacklist_add(c,
398 le64_to_cpu(bl_entry->seq),
399 le64_to_cpu(bl_entry->seq) + 1);
402 case BCH_JSET_ENTRY_blacklist_v2: {
403 struct jset_entry_blacklist_v2 *bl_entry =
404 container_of(entry, struct jset_entry_blacklist_v2, entry);
406 ret = bch2_journal_seq_blacklist_add(c,
407 le64_to_cpu(bl_entry->start),
408 le64_to_cpu(bl_entry->end) + 1);
411 case BCH_JSET_ENTRY_clock: {
412 struct jset_entry_clock *clock =
413 container_of(entry, struct jset_entry_clock, entry);
415 atomic64_set(&c->io_clock[clock->rw].now, le64_to_cpu(clock->time));
422 static int journal_replay_early(struct bch_fs *c,
423 struct bch_sb_field_clean *clean)
426 for (struct jset_entry *entry = clean->start;
427 entry != vstruct_end(&clean->field);
428 entry = vstruct_next(entry)) {
429 int ret = journal_replay_entry_early(c, entry);
434 struct genradix_iter iter;
435 struct journal_replay *i, **_i;
437 genradix_for_each(&c->journal_entries, iter, _i) {
440 if (journal_replay_ignore(i))
443 vstruct_for_each(&i->j, entry) {
444 int ret = journal_replay_entry_early(c, entry);
451 bch2_fs_usage_initialize(c);
456 /* sb clean section: */
458 static int read_btree_roots(struct bch_fs *c)
462 for (unsigned i = 0; i < btree_id_nr_alive(c); i++) {
463 struct btree_root *r = bch2_btree_id_root(c, i);
468 if (btree_id_is_alloc(i) && c->opts.reconstruct_alloc)
471 if (mustfix_fsck_err_on((ret = r->error),
472 c, btree_root_bkey_invalid,
473 "invalid btree root %s",
474 bch2_btree_id_str(i)) ||
475 mustfix_fsck_err_on((ret = r->error = bch2_btree_root_read(c, i, &r->key, r->level)),
476 c, btree_root_read_error,
477 "error reading btree root %s l=%u: %s",
478 bch2_btree_id_str(i), r->level, bch2_err_str(ret))) {
479 if (btree_id_is_alloc(i)) {
480 c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_allocations);
481 c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_alloc_info);
482 c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_lrus);
483 c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_extents_to_backpointers);
484 c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_alloc_to_lru_refs);
485 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
487 } else if (!(c->recovery_passes_explicit & BIT_ULL(BCH_RECOVERY_PASS_scan_for_btree_nodes))) {
488 bch_info(c, "will run btree node scan");
489 c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_scan_for_btree_nodes);
490 c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_topology);
494 bch2_btree_lost_data(c, i);
498 for (unsigned i = 0; i < BTREE_ID_NR; i++) {
499 struct btree_root *r = bch2_btree_id_root(c, i);
501 if (!r->b && !r->error) {
504 bch2_btree_root_alloc_fake(c, i, 0);
511 static bool check_version_upgrade(struct bch_fs *c)
513 unsigned latest_version = bcachefs_metadata_version_current;
514 unsigned latest_compatible = min(latest_version,
515 bch2_latest_compatible_version(c->sb.version));
516 unsigned old_version = c->sb.version_upgrade_complete ?: c->sb.version;
517 unsigned new_version = 0;
519 if (old_version < bcachefs_metadata_required_upgrade_below) {
520 if (c->opts.version_upgrade == BCH_VERSION_UPGRADE_incompatible ||
521 latest_compatible < bcachefs_metadata_required_upgrade_below)
522 new_version = latest_version;
524 new_version = latest_compatible;
526 switch (c->opts.version_upgrade) {
527 case BCH_VERSION_UPGRADE_compatible:
528 new_version = latest_compatible;
530 case BCH_VERSION_UPGRADE_incompatible:
531 new_version = latest_version;
533 case BCH_VERSION_UPGRADE_none:
534 new_version = min(old_version, latest_version);
539 if (new_version > old_version) {
540 struct printbuf buf = PRINTBUF;
542 if (old_version < bcachefs_metadata_required_upgrade_below)
543 prt_str(&buf, "Version upgrade required:\n");
545 if (old_version != c->sb.version) {
546 prt_str(&buf, "Version upgrade from ");
547 bch2_version_to_text(&buf, c->sb.version_upgrade_complete);
548 prt_str(&buf, " to ");
549 bch2_version_to_text(&buf, c->sb.version);
550 prt_str(&buf, " incomplete\n");
553 prt_printf(&buf, "Doing %s version upgrade from ",
554 BCH_VERSION_MAJOR(old_version) != BCH_VERSION_MAJOR(new_version)
555 ? "incompatible" : "compatible");
556 bch2_version_to_text(&buf, old_version);
557 prt_str(&buf, " to ");
558 bch2_version_to_text(&buf, new_version);
561 struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
562 __le64 passes = ext->recovery_passes_required[0];
563 bch2_sb_set_upgrade(c, old_version, new_version);
564 passes = ext->recovery_passes_required[0] & ~passes;
567 prt_str(&buf, " running recovery passes: ");
568 prt_bitflags(&buf, bch2_recovery_passes,
569 bch2_recovery_passes_from_stable(le64_to_cpu(passes)));
572 bch_info(c, "%s", buf.buf);
574 bch2_sb_upgrade(c, new_version);
583 int bch2_fs_recovery(struct bch_fs *c)
585 struct bch_sb_field_clean *clean = NULL;
586 struct jset *last_journal_entry = NULL;
587 u64 last_seq = 0, blacklist_seq, journal_seq;
591 clean = bch2_read_superblock_clean(c);
592 ret = PTR_ERR_OR_ZERO(clean);
596 bch_info(c, "recovering from clean shutdown, journal seq %llu",
597 le64_to_cpu(clean->journal_seq));
599 bch_info(c, "recovering from unclean shutdown");
602 if (!(c->sb.features & (1ULL << BCH_FEATURE_new_extent_overwrite))) {
603 bch_err(c, "feature new_extent_overwrite not set, filesystem no longer supported");
609 !(c->sb.features & (1ULL << BCH_FEATURE_extents_above_btree_updates))) {
610 bch_err(c, "filesystem needs recovery from older version; run fsck from older bcachefs-tools to fix");
615 if (c->opts.norecovery)
616 c->opts.recovery_pass_last = BCH_RECOVERY_PASS_journal_replay - 1;
618 mutex_lock(&c->sb_lock);
619 struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
620 bool write_sb = false;
622 if (BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb)) {
623 ext->recovery_passes_required[0] |=
624 cpu_to_le64(bch2_recovery_passes_to_stable(BIT_ULL(BCH_RECOVERY_PASS_check_topology)));
628 u64 sb_passes = bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0]));
630 struct printbuf buf = PRINTBUF;
631 prt_str(&buf, "superblock requires following recovery passes to be run:\n ");
632 prt_bitflags(&buf, bch2_recovery_passes, sb_passes);
633 bch_info(c, "%s", buf.buf);
637 if (bch2_check_version_downgrade(c)) {
638 struct printbuf buf = PRINTBUF;
640 prt_str(&buf, "Version downgrade required:");
642 __le64 passes = ext->recovery_passes_required[0];
643 bch2_sb_set_downgrade(c,
644 BCH_VERSION_MINOR(bcachefs_metadata_version_current),
645 BCH_VERSION_MINOR(c->sb.version));
646 passes = ext->recovery_passes_required[0] & ~passes;
648 prt_str(&buf, "\n running recovery passes: ");
649 prt_bitflags(&buf, bch2_recovery_passes,
650 bch2_recovery_passes_from_stable(le64_to_cpu(passes)));
653 bch_info(c, "%s", buf.buf);
658 if (check_version_upgrade(c))
664 c->recovery_passes_explicit |= bch2_recovery_passes_from_stable(le64_to_cpu(ext->recovery_passes_required[0]));
665 mutex_unlock(&c->sb_lock);
667 if (c->opts.fsck && IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
668 c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_topology);
671 set_bit(BCH_FS_fsck_running, &c->flags);
673 ret = bch2_blacklist_table_initialize(c);
675 bch_err(c, "error initializing blacklist table");
679 bch2_journal_pos_from_member_info_resume(c);
681 if (!c->sb.clean || c->opts.retain_recovery_info) {
682 struct genradix_iter iter;
683 struct journal_replay **i;
685 bch_verbose(c, "starting journal read");
686 ret = bch2_journal_read(c, &last_seq, &blacklist_seq, &journal_seq);
691 * note: cmd_list_journal needs the blacklist table fully up to date so
692 * it can asterisk ignored journal entries:
694 if (c->opts.read_journal_only)
697 genradix_for_each_reverse(&c->journal_entries, iter, i)
698 if (!journal_replay_ignore(*i)) {
699 last_journal_entry = &(*i)->j;
703 if (mustfix_fsck_err_on(c->sb.clean &&
704 last_journal_entry &&
705 !journal_entry_empty(last_journal_entry), c,
706 clean_but_journal_not_empty,
707 "filesystem marked clean but journal not empty")) {
708 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
709 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
713 if (!last_journal_entry) {
714 fsck_err_on(!c->sb.clean, c,
715 dirty_but_no_journal_entries,
716 "no journal entries found");
720 genradix_for_each_reverse(&c->journal_entries, iter, i)
722 last_journal_entry = &(*i)->j;
723 (*i)->ignore_blacklisted = false;
724 (*i)->ignore_not_dirty= false;
726 * This was probably a NO_FLUSH entry,
727 * so last_seq was garbage - but we know
728 * we're only using a single journal
729 * entry, set it here:
731 (*i)->j.last_seq = (*i)->j.seq;
736 ret = bch2_journal_keys_sort(c);
740 if (c->sb.clean && last_journal_entry) {
741 ret = bch2_verify_superblock_clean(c, &clean,
749 bch_err(c, "no superblock clean section found");
750 ret = -BCH_ERR_fsck_repair_impossible;
754 blacklist_seq = journal_seq = le64_to_cpu(clean->journal_seq) + 1;
757 c->journal_replay_seq_start = last_seq;
758 c->journal_replay_seq_end = blacklist_seq - 1;
760 if (c->opts.reconstruct_alloc)
761 bch2_reconstruct_alloc(c);
763 zero_out_btree_mem_ptr(&c->journal_keys);
765 ret = journal_replay_early(c, clean);
770 * After an unclean shutdown, skip then next few journal sequence
771 * numbers as they may have been referenced by btree writes that
772 * happened before their corresponding journal writes - those btree
773 * writes need to be ignored, by skipping and blacklisting the next few
774 * journal sequence numbers:
779 if (blacklist_seq != journal_seq) {
780 ret = bch2_journal_log_msg(c, "blacklisting entries %llu-%llu",
781 blacklist_seq, journal_seq) ?:
782 bch2_journal_seq_blacklist_add(c,
783 blacklist_seq, journal_seq);
785 bch_err_msg(c, ret, "error creating new journal seq blacklist entry");
790 ret = bch2_journal_log_msg(c, "starting journal at entry %llu, replaying %llu-%llu",
791 journal_seq, last_seq, blacklist_seq - 1) ?:
792 bch2_fs_journal_start(&c->journal, journal_seq);
797 * Skip past versions that might have possibly been used (as nonces),
798 * but hadn't had their pointers written:
800 if (c->sb.encryption_type && !c->sb.clean)
801 atomic64_add(1 << 16, &c->key_version);
803 ret = read_btree_roots(c);
807 ret = bch2_run_recovery_passes(c);
811 clear_bit(BCH_FS_fsck_running, &c->flags);
813 /* fsync if we fixed errors */
814 if (test_bit(BCH_FS_errors_fixed, &c->flags) &&
815 bch2_write_ref_tryget(c, BCH_WRITE_REF_fsync)) {
816 bch2_journal_flush_all_pins(&c->journal);
817 bch2_journal_meta(&c->journal);
818 bch2_write_ref_put(c, BCH_WRITE_REF_fsync);
821 /* If we fixed errors, verify that fs is actually clean now: */
822 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
823 test_bit(BCH_FS_errors_fixed, &c->flags) &&
824 !test_bit(BCH_FS_errors_not_fixed, &c->flags) &&
825 !test_bit(BCH_FS_error, &c->flags)) {
826 bch2_flush_fsck_errs(c);
828 bch_info(c, "Fixed errors, running fsck a second time to verify fs is clean");
829 clear_bit(BCH_FS_errors_fixed, &c->flags);
831 c->curr_recovery_pass = BCH_RECOVERY_PASS_check_alloc_info;
833 ret = bch2_run_recovery_passes(c);
837 if (test_bit(BCH_FS_errors_fixed, &c->flags) ||
838 test_bit(BCH_FS_errors_not_fixed, &c->flags)) {
839 bch_err(c, "Second fsck run was not clean");
840 set_bit(BCH_FS_errors_not_fixed, &c->flags);
843 set_bit(BCH_FS_errors_fixed, &c->flags);
846 if (enabled_qtypes(c)) {
847 bch_verbose(c, "reading quotas");
848 ret = bch2_fs_quota_read(c);
851 bch_verbose(c, "quotas done");
854 mutex_lock(&c->sb_lock);
855 ext = bch2_sb_field_get(c->disk_sb.sb, ext);
858 if (BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb) != le16_to_cpu(c->disk_sb.sb->version)) {
859 SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, le16_to_cpu(c->disk_sb.sb->version));
863 if (!test_bit(BCH_FS_error, &c->flags) &&
864 !(c->disk_sb.sb->compat[0] & cpu_to_le64(1ULL << BCH_COMPAT_alloc_info))) {
865 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_info);
869 if (!test_bit(BCH_FS_error, &c->flags) &&
870 !bch2_is_zero(ext->errors_silent, sizeof(ext->errors_silent))) {
871 memset(ext->errors_silent, 0, sizeof(ext->errors_silent));
876 !test_bit(BCH_FS_error, &c->flags) &&
877 c->recovery_pass_done == BCH_RECOVERY_PASS_NR - 1 &&
878 ext->btrees_lost_data) {
879 ext->btrees_lost_data = 0;
884 !test_bit(BCH_FS_error, &c->flags) &&
885 !test_bit(BCH_FS_errors_not_fixed, &c->flags)) {
886 SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 0);
887 SET_BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb, 0);
891 if (bch2_blacklist_entries_gc(c))
896 mutex_unlock(&c->sb_lock);
898 if (!(c->sb.compat & (1ULL << BCH_COMPAT_extents_above_btree_updates_done)) ||
899 c->sb.version_min < bcachefs_metadata_version_btree_ptr_sectors_written) {
900 struct bch_move_stats stats;
902 bch2_move_stats_init(&stats, "recovery");
904 struct printbuf buf = PRINTBUF;
905 bch2_version_to_text(&buf, c->sb.version_min);
906 bch_info(c, "scanning for old btree nodes: min_version %s", buf.buf);
909 ret = bch2_fs_read_write_early(c) ?:
910 bch2_scan_old_btree_nodes(c, &stats);
913 bch_info(c, "scanning for old btree nodes done");
918 bch2_flush_fsck_errs(c);
920 if (!c->opts.retain_recovery_info) {
921 bch2_journal_keys_put_initial(c);
922 bch2_find_btree_nodes_exit(&c->found_btree_nodes);
928 test_bit(BCH_FS_need_delete_dead_snapshots, &c->flags) &&
929 !c->opts.nochanges) {
930 bch2_fs_read_write_early(c);
931 bch2_delete_dead_snapshots_async(c);
938 bch2_fs_emergency_read_only(c);
942 int bch2_fs_initialize(struct bch_fs *c)
944 struct bch_inode_unpacked root_inode, lostfound_inode;
945 struct bkey_inode_buf packed_inode;
946 struct qstr lostfound = QSTR("lost+found");
949 bch_notice(c, "initializing new filesystem");
950 set_bit(BCH_FS_new_fs, &c->flags);
952 mutex_lock(&c->sb_lock);
953 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_extents_above_btree_updates_done);
954 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_bformat_overflow_done);
956 bch2_check_version_downgrade(c);
958 if (c->opts.version_upgrade != BCH_VERSION_UPGRADE_none) {
959 bch2_sb_upgrade(c, bcachefs_metadata_version_current);
960 SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, bcachefs_metadata_version_current);
963 mutex_unlock(&c->sb_lock);
965 c->curr_recovery_pass = BCH_RECOVERY_PASS_NR;
966 set_bit(BCH_FS_may_go_rw, &c->flags);
968 for (unsigned i = 0; i < BTREE_ID_NR; i++)
969 bch2_btree_root_alloc_fake(c, i, 0);
971 for_each_member_device(c, ca)
972 bch2_dev_usage_init(ca);
974 ret = bch2_fs_journal_alloc(c);
979 * journal_res_get() will crash if called before this has
980 * set up the journal.pin FIFO and journal.cur pointer:
982 bch2_fs_journal_start(&c->journal, 1);
983 bch2_journal_set_replay_done(&c->journal);
985 ret = bch2_fs_read_write_early(c);
990 * Write out the superblock and journal buckets, now that we can do
993 bch_verbose(c, "marking superblocks");
994 ret = bch2_trans_mark_dev_sbs(c);
995 bch_err_msg(c, ret, "marking superblocks");
999 for_each_online_member(c, ca)
1000 ca->new_fs_bucket_idx = 0;
1002 ret = bch2_fs_freespace_init(c);
1006 ret = bch2_initialize_subvolumes(c);
1010 bch_verbose(c, "reading snapshots table");
1011 ret = bch2_snapshots_read(c);
1014 bch_verbose(c, "reading snapshots done");
1016 bch2_inode_init(c, &root_inode, 0, 0, S_IFDIR|0755, 0, NULL);
1017 root_inode.bi_inum = BCACHEFS_ROOT_INO;
1018 root_inode.bi_subvol = BCACHEFS_ROOT_SUBVOL;
1019 bch2_inode_pack(&packed_inode, &root_inode);
1020 packed_inode.inode.k.p.snapshot = U32_MAX;
1022 ret = bch2_btree_insert(c, BTREE_ID_inodes, &packed_inode.inode.k_i, NULL, 0);
1023 bch_err_msg(c, ret, "creating root directory");
1027 bch2_inode_init_early(c, &lostfound_inode);
1029 ret = bch2_trans_do(c, NULL, NULL, 0,
1030 bch2_create_trans(trans,
1031 BCACHEFS_ROOT_SUBVOL_INUM,
1032 &root_inode, &lostfound_inode,
1034 0, 0, S_IFDIR|0700, 0,
1035 NULL, NULL, (subvol_inum) { 0 }, 0));
1036 bch_err_msg(c, ret, "creating lost+found");
1040 c->recovery_pass_done = BCH_RECOVERY_PASS_NR - 1;
1042 if (enabled_qtypes(c)) {
1043 ret = bch2_fs_quota_read(c);
1048 ret = bch2_journal_flush(&c->journal);
1049 bch_err_msg(c, ret, "writing first journal entry");
1053 mutex_lock(&c->sb_lock);
1054 SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true);
1055 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
1057 bch2_write_super(c);
1058 mutex_unlock(&c->sb_lock);