1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2014 Datera Inc.
8 #include "alloc_background.h"
9 #include "alloc_foreground.h"
10 #include "backpointers.h"
11 #include "bkey_methods.h"
13 #include "btree_journal_iter.h"
14 #include "btree_key_cache.h"
15 #include "btree_locking.h"
16 #include "btree_node_scan.h"
17 #include "btree_update_interior.h"
29 #include "recovery_passes.h"
35 #include <linux/slab.h>
36 #include <linux/bitops.h>
37 #include <linux/freezer.h>
38 #include <linux/kthread.h>
39 #include <linux/preempt.h>
40 #include <linux/rcupdate.h>
41 #include <linux/sched/task.h>
43 #define DROP_THIS_NODE 10
44 #define DROP_PREV_NODE 11
45 #define DID_FILL_FROM_SCAN 12
47 static struct bkey_s unsafe_bkey_s_c_to_s(struct bkey_s_c k)
49 return (struct bkey_s) {{{
51 (struct bch_val *) k.v
55 static inline void __gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
58 write_seqcount_begin(&c->gc_pos_lock);
60 write_seqcount_end(&c->gc_pos_lock);
64 static inline void gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
66 BUG_ON(gc_pos_cmp(new_pos, c->gc_pos) < 0);
67 __gc_pos_set(c, new_pos);
70 static void btree_ptr_to_v2(struct btree *b, struct bkey_i_btree_ptr_v2 *dst)
72 switch (b->key.k.type) {
73 case KEY_TYPE_btree_ptr: {
74 struct bkey_i_btree_ptr *src = bkey_i_to_btree_ptr(&b->key);
78 dst->v.seq = b->data->keys.seq;
79 dst->v.sectors_written = 0;
81 dst->v.min_key = b->data->min_key;
82 set_bkey_val_bytes(&dst->k, sizeof(dst->v) + bkey_val_bytes(&src->k));
83 memcpy(dst->v.start, src->v.start, bkey_val_bytes(&src->k));
86 case KEY_TYPE_btree_ptr_v2:
87 bkey_copy(&dst->k_i, &b->key);
94 static int set_node_min(struct bch_fs *c, struct btree *b, struct bpos new_min)
96 struct bkey_i_btree_ptr_v2 *new;
99 if (c->opts.verbose) {
100 struct printbuf buf = PRINTBUF;
102 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
103 prt_str(&buf, " -> ");
104 bch2_bpos_to_text(&buf, new_min);
106 bch_info(c, "%s(): %s", __func__, buf.buf);
110 new = kmalloc_array(BKEY_BTREE_PTR_U64s_MAX, sizeof(u64), GFP_KERNEL);
112 return -BCH_ERR_ENOMEM_gc_repair_key;
114 btree_ptr_to_v2(b, new);
115 b->data->min_key = new_min;
116 new->v.min_key = new_min;
117 SET_BTREE_PTR_RANGE_UPDATED(&new->v, true);
119 ret = bch2_journal_key_insert_take(c, b->c.btree_id, b->c.level + 1, &new->k_i);
125 bch2_btree_node_drop_keys_outside_node(b);
126 bkey_copy(&b->key, &new->k_i);
130 static int set_node_max(struct bch_fs *c, struct btree *b, struct bpos new_max)
132 struct bkey_i_btree_ptr_v2 *new;
135 if (c->opts.verbose) {
136 struct printbuf buf = PRINTBUF;
138 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
139 prt_str(&buf, " -> ");
140 bch2_bpos_to_text(&buf, new_max);
142 bch_info(c, "%s(): %s", __func__, buf.buf);
146 ret = bch2_journal_key_delete(c, b->c.btree_id, b->c.level + 1, b->key.k.p);
150 new = kmalloc_array(BKEY_BTREE_PTR_U64s_MAX, sizeof(u64), GFP_KERNEL);
152 return -BCH_ERR_ENOMEM_gc_repair_key;
154 btree_ptr_to_v2(b, new);
155 b->data->max_key = new_max;
157 SET_BTREE_PTR_RANGE_UPDATED(&new->v, true);
159 ret = bch2_journal_key_insert_take(c, b->c.btree_id, b->c.level + 1, &new->k_i);
165 bch2_btree_node_drop_keys_outside_node(b);
167 mutex_lock(&c->btree_cache.lock);
168 bch2_btree_node_hash_remove(&c->btree_cache, b);
170 bkey_copy(&b->key, &new->k_i);
171 ret = __bch2_btree_node_hash_insert(&c->btree_cache, b);
173 mutex_unlock(&c->btree_cache.lock);
177 static int btree_check_node_boundaries(struct bch_fs *c, struct btree *b,
178 struct btree *prev, struct btree *cur,
179 struct bpos *pulled_from_scan)
181 struct bpos expected_start = !prev
183 : bpos_successor(prev->key.k.p);
184 struct printbuf buf = PRINTBUF;
187 BUG_ON(b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
188 !bpos_eq(bkey_i_to_btree_ptr_v2(&b->key)->v.min_key,
191 if (bpos_eq(expected_start, cur->data->min_key))
194 prt_printf(&buf, " at btree %s level %u:\n parent: ",
195 bch2_btree_id_str(b->c.btree_id), b->c.level);
196 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
199 prt_printf(&buf, "\n prev: ");
200 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&prev->key));
203 prt_str(&buf, "\n next: ");
204 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&cur->key));
206 if (bpos_lt(expected_start, cur->data->min_key)) { /* gap */
207 if (b->c.level == 1 &&
208 bpos_lt(*pulled_from_scan, cur->data->min_key)) {
209 ret = bch2_get_scanned_nodes(c, b->c.btree_id, 0,
211 bpos_predecessor(cur->data->min_key));
215 *pulled_from_scan = cur->data->min_key;
216 ret = DID_FILL_FROM_SCAN;
218 if (mustfix_fsck_err(c, btree_node_topology_bad_min_key,
219 "btree node with incorrect min_key%s", buf.buf))
220 ret = set_node_min(c, cur, expected_start);
222 } else { /* overlap */
223 if (prev && BTREE_NODE_SEQ(cur->data) > BTREE_NODE_SEQ(prev->data)) { /* cur overwrites prev */
224 if (bpos_ge(prev->data->min_key, cur->data->min_key)) { /* fully? */
225 if (mustfix_fsck_err(c, btree_node_topology_overwritten_by_next_node,
226 "btree node overwritten by next node%s", buf.buf))
227 ret = DROP_PREV_NODE;
229 if (mustfix_fsck_err(c, btree_node_topology_bad_max_key,
230 "btree node with incorrect max_key%s", buf.buf))
231 ret = set_node_max(c, prev,
232 bpos_predecessor(cur->data->min_key));
235 if (bpos_ge(expected_start, cur->data->max_key)) { /* fully? */
236 if (mustfix_fsck_err(c, btree_node_topology_overwritten_by_prev_node,
237 "btree node overwritten by prev node%s", buf.buf))
238 ret = DROP_THIS_NODE;
240 if (mustfix_fsck_err(c, btree_node_topology_bad_min_key,
241 "btree node with incorrect min_key%s", buf.buf))
242 ret = set_node_min(c, cur, expected_start);
252 static int btree_repair_node_end(struct bch_fs *c, struct btree *b,
253 struct btree *child, struct bpos *pulled_from_scan)
255 struct printbuf buf = PRINTBUF;
258 if (bpos_eq(child->key.k.p, b->key.k.p))
261 prt_printf(&buf, "at btree %s level %u:\n parent: ",
262 bch2_btree_id_str(b->c.btree_id), b->c.level);
263 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
265 prt_str(&buf, "\n child: ");
266 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&child->key));
268 if (mustfix_fsck_err(c, btree_node_topology_bad_max_key,
269 "btree node with incorrect max_key%s", buf.buf)) {
270 if (b->c.level == 1 &&
271 bpos_lt(*pulled_from_scan, b->key.k.p)) {
272 ret = bch2_get_scanned_nodes(c, b->c.btree_id, 0,
273 bpos_successor(child->key.k.p), b->key.k.p);
277 *pulled_from_scan = b->key.k.p;
278 ret = DID_FILL_FROM_SCAN;
280 ret = set_node_max(c, child, b->key.k.p);
289 static int bch2_btree_repair_topology_recurse(struct btree_trans *trans, struct btree *b,
290 struct bpos *pulled_from_scan)
292 struct bch_fs *c = trans->c;
293 struct btree_and_journal_iter iter;
295 struct bkey_buf prev_k, cur_k;
296 struct btree *prev = NULL, *cur = NULL;
297 bool have_child, new_pass = false;
298 struct printbuf buf = PRINTBUF;
304 bch2_bkey_buf_init(&prev_k);
305 bch2_bkey_buf_init(&cur_k);
308 have_child = new_pass = false;
309 bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b);
310 iter.prefetch = true;
312 while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
313 BUG_ON(bpos_lt(k.k->p, b->data->min_key));
314 BUG_ON(bpos_gt(k.k->p, b->data->max_key));
316 bch2_btree_and_journal_iter_advance(&iter);
317 bch2_bkey_buf_reassemble(&cur_k, c, k);
319 cur = bch2_btree_node_get_noiter(trans, cur_k.k,
320 b->c.btree_id, b->c.level - 1,
322 ret = PTR_ERR_OR_ZERO(cur);
324 printbuf_reset(&buf);
325 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(cur_k.k));
327 if (mustfix_fsck_err_on(bch2_err_matches(ret, EIO), c,
328 btree_node_unreadable,
329 "Topology repair: unreadable btree node at btree %s level %u:\n"
331 bch2_btree_id_str(b->c.btree_id),
334 bch2_btree_node_evict(trans, cur_k.k);
336 ret = bch2_journal_key_delete(c, b->c.btree_id,
337 b->c.level, cur_k.k->k.p);
341 if (!btree_id_is_alloc(b->c.btree_id)) {
342 ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_scan_for_btree_nodes);
349 bch_err_msg(c, ret, "getting btree node");
353 if (bch2_btree_node_is_stale(c, cur)) {
354 bch_info(c, "btree node %s older than nodes found by scanning", buf.buf);
355 six_unlock_read(&cur->c.lock);
356 bch2_btree_node_evict(trans, cur_k.k);
357 ret = bch2_journal_key_delete(c, b->c.btree_id,
358 b->c.level, cur_k.k->k.p);
365 ret = btree_check_node_boundaries(c, b, prev, cur, pulled_from_scan);
366 if (ret == DID_FILL_FROM_SCAN) {
371 if (ret == DROP_THIS_NODE) {
372 six_unlock_read(&cur->c.lock);
373 bch2_btree_node_evict(trans, cur_k.k);
374 ret = bch2_journal_key_delete(c, b->c.btree_id,
375 b->c.level, cur_k.k->k.p);
383 six_unlock_read(&prev->c.lock);
386 if (ret == DROP_PREV_NODE) {
387 bch_info(c, "dropped prev node");
388 bch2_btree_node_evict(trans, prev_k.k);
389 ret = bch2_journal_key_delete(c, b->c.btree_id,
390 b->c.level, prev_k.k->k.p);
394 bch2_btree_and_journal_iter_exit(&iter);
401 bch2_bkey_buf_copy(&prev_k, c, cur_k.k);
404 if (!ret && !IS_ERR_OR_NULL(prev)) {
406 ret = btree_repair_node_end(c, b, prev, pulled_from_scan);
407 if (ret == DID_FILL_FROM_SCAN) {
413 if (!IS_ERR_OR_NULL(prev))
414 six_unlock_read(&prev->c.lock);
416 if (!IS_ERR_OR_NULL(cur))
417 six_unlock_read(&cur->c.lock);
423 bch2_btree_and_journal_iter_exit(&iter);
428 bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b);
429 iter.prefetch = true;
431 while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
432 bch2_bkey_buf_reassemble(&cur_k, c, k);
433 bch2_btree_and_journal_iter_advance(&iter);
435 cur = bch2_btree_node_get_noiter(trans, cur_k.k,
436 b->c.btree_id, b->c.level - 1,
438 ret = PTR_ERR_OR_ZERO(cur);
440 bch_err_msg(c, ret, "getting btree node");
444 ret = bch2_btree_repair_topology_recurse(trans, cur, pulled_from_scan);
445 six_unlock_read(&cur->c.lock);
448 if (ret == DROP_THIS_NODE) {
449 bch2_btree_node_evict(trans, cur_k.k);
450 ret = bch2_journal_key_delete(c, b->c.btree_id,
451 b->c.level, cur_k.k->k.p);
461 printbuf_reset(&buf);
462 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
464 if (mustfix_fsck_err_on(!have_child, c,
465 btree_node_topology_interior_node_empty,
466 "empty interior btree node at btree %s level %u\n"
468 bch2_btree_id_str(b->c.btree_id),
469 b->c.level, buf.buf))
470 ret = DROP_THIS_NODE;
473 if (!IS_ERR_OR_NULL(prev))
474 six_unlock_read(&prev->c.lock);
475 if (!IS_ERR_OR_NULL(cur))
476 six_unlock_read(&cur->c.lock);
478 bch2_btree_and_journal_iter_exit(&iter);
480 if (!ret && new_pass)
483 BUG_ON(!ret && bch2_btree_node_check_topology(trans, b));
485 bch2_bkey_buf_exit(&prev_k, c);
486 bch2_bkey_buf_exit(&cur_k, c);
491 int bch2_check_topology(struct bch_fs *c)
493 struct btree_trans *trans = bch2_trans_get(c);
494 struct bpos pulled_from_scan = POS_MIN;
497 for (unsigned i = 0; i < btree_id_nr_alive(c) && !ret; i++) {
498 struct btree_root *r = bch2_btree_id_root(c, i);
499 bool reconstructed_root = false;
502 ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_scan_for_btree_nodes);
506 bch_info(c, "btree root %s unreadable, must recover from scan", bch2_btree_id_str(i));
511 if (!bch2_btree_has_scanned_nodes(c, i)) {
512 mustfix_fsck_err(c, btree_root_unreadable_and_scan_found_nothing,
513 "no nodes found for btree %s, continue?", bch2_btree_id_str(i));
514 bch2_btree_root_alloc_fake_trans(trans, i, 0);
516 bch2_btree_root_alloc_fake_trans(trans, i, 1);
517 bch2_shoot_down_journal_keys(c, i, 1, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
518 ret = bch2_get_scanned_nodes(c, i, 0, POS_MIN, SPOS_MAX);
523 reconstructed_root = true;
526 struct btree *b = r->b;
528 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
529 ret = bch2_btree_repair_topology_recurse(trans, b, &pulled_from_scan);
530 six_unlock_read(&b->c.lock);
532 if (ret == DROP_THIS_NODE) {
533 bch2_btree_node_hash_remove(&c->btree_cache, b);
534 mutex_lock(&c->btree_cache.lock);
535 list_move(&b->list, &c->btree_cache.freeable);
536 mutex_unlock(&c->btree_cache.lock);
540 if (!reconstructed_root)
541 goto reconstruct_root;
543 bch_err(c, "empty btree root %s", bch2_btree_id_str(i));
544 bch2_btree_root_alloc_fake_trans(trans, i, 0);
550 bch2_trans_put(trans);
554 /* marking of btree keys/nodes: */
556 static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id,
557 unsigned level, struct btree **prev,
558 struct btree_iter *iter, struct bkey_s_c k,
561 struct bch_fs *c = trans->c;
564 struct btree_path *path = btree_iter_path(trans, iter);
565 struct btree *b = path_l(path)->b;
568 int ret = bch2_btree_node_check_topology(trans, b);
575 struct bkey deleted = KEY(0, 0, 0);
576 struct bkey_s_c old = (struct bkey_s_c) { &deleted, NULL };
577 struct printbuf buf = PRINTBUF;
583 BUG_ON(bch2_journal_seq_verify &&
584 k.k->version.lo > atomic64_read(&c->journal.seq));
586 if (fsck_err_on(k.k->version.lo > atomic64_read(&c->key_version), c,
587 bkey_version_in_future,
588 "key version number higher than recorded %llu\n %s",
589 atomic64_read(&c->key_version),
590 (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
591 atomic64_set(&c->key_version, k.k->version.lo);
594 if (mustfix_fsck_err_on(level && !bch2_dev_btree_bitmap_marked(c, k),
595 c, btree_bitmap_not_marked,
596 "btree ptr not marked in member info btree allocated bitmap\n %s",
597 (printbuf_reset(&buf),
598 bch2_bkey_val_to_text(&buf, c, k),
600 mutex_lock(&c->sb_lock);
601 bch2_dev_btree_bitmap_mark(c, k);
603 mutex_unlock(&c->sb_lock);
607 * We require a commit before key_trigger() because
608 * key_trigger(BTREE_TRIGGER_GC) is not idempotant; we'll calculate the
609 * wrong result if we run it multiple times.
611 unsigned flags = !iter ? BTREE_TRIGGER_is_root : 0;
613 ret = bch2_key_trigger(trans, btree_id, level, old, unsafe_bkey_s_c_to_s(k),
614 BTREE_TRIGGER_check_repair|flags);
618 if (trans->nr_updates) {
619 ret = bch2_trans_commit(trans, NULL, NULL, 0) ?:
620 -BCH_ERR_transaction_restart_nested;
624 ret = bch2_key_trigger(trans, btree_id, level, old, unsafe_bkey_s_c_to_s(k),
625 BTREE_TRIGGER_gc|flags);
633 static int bch2_gc_btree(struct btree_trans *trans, enum btree_id btree, bool initial)
635 struct bch_fs *c = trans->c;
636 int level = 0, target_depth = btree_node_type_needs_gc(__btree_node_type(0, btree)) ? 0 : 1;
639 /* We need to make sure every leaf node is readable before going RW */
646 bch2_trans_begin(trans);
648 struct btree_iter iter;
649 bch2_trans_node_iter_init(trans, &iter, btree, POS_MIN,
650 0, bch2_btree_id_root(c, btree)->b->c.level, 0);
651 struct btree *b = bch2_btree_iter_peek_node(&iter);
652 ret = PTR_ERR_OR_ZERO(b);
656 if (b != btree_node_root(c, b)) {
657 bch2_trans_iter_exit(trans, &iter);
661 gc_pos_set(c, gc_pos_btree(btree, b->c.level + 1, SPOS_MAX));
662 struct bkey_s_c k = bkey_i_to_s_c(&b->key);
663 ret = bch2_gc_mark_key(trans, btree, b->c.level + 1, NULL, NULL, k, initial);
666 bch2_trans_iter_exit(trans, &iter);
667 } while (bch2_err_matches(ret, BCH_ERR_transaction_restart));
672 for (; level >= target_depth; --level) {
673 struct btree *prev = NULL;
674 struct btree_iter iter;
675 bch2_trans_node_iter_init(trans, &iter, btree, POS_MIN, 0, level,
676 BTREE_ITER_prefetch);
678 ret = for_each_btree_key_continue(trans, iter, 0, k, ({
679 gc_pos_set(c, gc_pos_btree(btree, level, k.k->p));
680 bch2_gc_mark_key(trans, btree, level, &prev, &iter, k, initial);
689 static inline int btree_id_gc_phase_cmp(enum btree_id l, enum btree_id r)
691 return cmp_int(gc_btree_order(l), gc_btree_order(r));
694 static int bch2_gc_btrees(struct bch_fs *c)
696 struct btree_trans *trans = bch2_trans_get(c);
697 enum btree_id ids[BTREE_ID_NR];
701 for (i = 0; i < BTREE_ID_NR; i++)
703 bubble_sort(ids, BTREE_ID_NR, btree_id_gc_phase_cmp);
705 for (i = 0; i < btree_id_nr_alive(c) && !ret; i++) {
706 unsigned btree = i < BTREE_ID_NR ? ids[i] : i;
708 if (IS_ERR_OR_NULL(bch2_btree_id_root(c, btree)->b))
711 ret = bch2_gc_btree(trans, btree, true);
713 if (mustfix_fsck_err_on(bch2_err_matches(ret, EIO),
714 c, btree_node_read_error,
715 "btree node read error for %s",
716 bch2_btree_id_str(btree)))
717 ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
720 bch2_trans_put(trans);
725 static int bch2_mark_superblocks(struct bch_fs *c)
727 mutex_lock(&c->sb_lock);
728 gc_pos_set(c, gc_phase(GC_PHASE_sb));
730 int ret = bch2_trans_mark_dev_sbs_flags(c, BTREE_TRIGGER_gc);
731 mutex_unlock(&c->sb_lock);
735 static void bch2_gc_free(struct bch_fs *c)
737 genradix_free(&c->reflink_gc_table);
738 genradix_free(&c->gc_stripes);
740 for_each_member_device(c, ca) {
741 kvfree(rcu_dereference_protected(ca->buckets_gc, 1));
742 ca->buckets_gc = NULL;
744 free_percpu(ca->usage_gc);
748 free_percpu(c->usage_gc);
752 static int bch2_gc_done(struct bch_fs *c)
754 struct bch_dev *ca = NULL;
755 struct printbuf buf = PRINTBUF;
759 percpu_down_write(&c->mark_lock);
761 #define copy_field(_err, _f, _msg, ...) \
762 if (fsck_err_on(dst->_f != src->_f, c, _err, \
763 _msg ": got %llu, should be %llu" , ##__VA_ARGS__, \
766 #define copy_dev_field(_err, _f, _msg, ...) \
767 copy_field(_err, _f, "dev %u has wrong " _msg, ca->dev_idx, ##__VA_ARGS__)
768 #define copy_fs_field(_err, _f, _msg, ...) \
769 copy_field(_err, _f, "fs has wrong " _msg, ##__VA_ARGS__)
771 for (i = 0; i < ARRAY_SIZE(c->usage); i++)
772 bch2_fs_usage_acc_to_base(c, i);
774 __for_each_member_device(c, ca) {
775 struct bch_dev_usage *dst = ca->usage_base;
776 struct bch_dev_usage *src = (void *)
777 bch2_acc_percpu_u64s((u64 __percpu *) ca->usage_gc,
780 for (i = 0; i < BCH_DATA_NR; i++) {
781 copy_dev_field(dev_usage_buckets_wrong,
782 d[i].buckets, "%s buckets", bch2_data_type_str(i));
783 copy_dev_field(dev_usage_sectors_wrong,
784 d[i].sectors, "%s sectors", bch2_data_type_str(i));
785 copy_dev_field(dev_usage_fragmented_wrong,
786 d[i].fragmented, "%s fragmented", bch2_data_type_str(i));
791 unsigned nr = fs_usage_u64s(c);
792 struct bch_fs_usage *dst = c->usage_base;
793 struct bch_fs_usage *src = (void *)
794 bch2_acc_percpu_u64s((u64 __percpu *) c->usage_gc, nr);
796 copy_fs_field(fs_usage_hidden_wrong,
798 copy_fs_field(fs_usage_btree_wrong,
801 copy_fs_field(fs_usage_data_wrong,
803 copy_fs_field(fs_usage_cached_wrong,
805 copy_fs_field(fs_usage_reserved_wrong,
806 b.reserved, "reserved");
807 copy_fs_field(fs_usage_nr_inodes_wrong,
808 b.nr_inodes,"nr_inodes");
810 for (i = 0; i < BCH_REPLICAS_MAX; i++)
811 copy_fs_field(fs_usage_persistent_reserved_wrong,
812 persistent_reserved[i],
813 "persistent_reserved[%i]", i);
815 for (i = 0; i < c->replicas.nr; i++) {
816 struct bch_replicas_entry_v1 *e =
817 cpu_replicas_entry(&c->replicas, i);
819 printbuf_reset(&buf);
820 bch2_replicas_entry_to_text(&buf, e);
822 copy_fs_field(fs_usage_replicas_wrong,
823 replicas[i], "%s", buf.buf);
828 #undef copy_dev_field
829 #undef copy_stripe_field
834 percpu_up_write(&c->mark_lock);
839 static int bch2_gc_start(struct bch_fs *c)
843 c->usage_gc = __alloc_percpu_gfp(fs_usage_u64s(c) * sizeof(u64),
844 sizeof(u64), GFP_KERNEL);
846 bch_err(c, "error allocating c->usage_gc");
847 return -BCH_ERR_ENOMEM_gc_start;
850 for_each_member_device(c, ca) {
851 BUG_ON(ca->usage_gc);
853 ca->usage_gc = alloc_percpu(struct bch_dev_usage);
855 bch_err(c, "error allocating ca->usage_gc");
857 return -BCH_ERR_ENOMEM_gc_start;
860 this_cpu_write(ca->usage_gc->d[BCH_DATA_free].buckets,
861 ca->mi.nbuckets - ca->mi.first_bucket);
867 /* returns true if not equal */
868 static inline bool bch2_alloc_v4_cmp(struct bch_alloc_v4 l,
869 struct bch_alloc_v4 r)
871 return l.gen != r.gen ||
872 l.oldest_gen != r.oldest_gen ||
873 l.data_type != r.data_type ||
874 l.dirty_sectors != r.dirty_sectors ||
875 l.cached_sectors != r.cached_sectors ||
876 l.stripe_redundancy != r.stripe_redundancy ||
877 l.stripe != r.stripe;
880 static int bch2_alloc_write_key(struct btree_trans *trans,
881 struct btree_iter *iter,
885 struct bch_fs *c = trans->c;
886 struct bkey_i_alloc_v4 *a;
887 struct bch_alloc_v4 old_gc, gc, old_convert, new;
888 const struct bch_alloc_v4 *old;
891 if (!bucket_valid(ca, k.k->p.offset))
894 old = bch2_alloc_to_v4(k, &old_convert);
897 percpu_down_read(&c->mark_lock);
898 __bucket_m_to_alloc(&gc, *gc_bucket(ca, iter->pos.offset));
902 if ((old->data_type == BCH_DATA_sb ||
903 old->data_type == BCH_DATA_journal) &&
904 !bch2_dev_is_online(ca)) {
905 gc.data_type = old->data_type;
906 gc.dirty_sectors = old->dirty_sectors;
910 * gc.data_type doesn't yet include need_discard & need_gc_gen states -
913 alloc_data_type_set(&gc, gc.data_type);
915 if (gc.data_type != old_gc.data_type ||
916 gc.dirty_sectors != old_gc.dirty_sectors)
917 bch2_dev_usage_update(c, ca, &old_gc, &gc, 0, true);
918 percpu_up_read(&c->mark_lock);
920 gc.fragmentation_lru = alloc_lru_idx_fragmentation(gc, ca);
922 if (fsck_err_on(new.data_type != gc.data_type, c,
923 alloc_key_data_type_wrong,
924 "bucket %llu:%llu gen %u has wrong data_type"
925 ": got %s, should be %s",
926 iter->pos.inode, iter->pos.offset,
928 bch2_data_type_str(new.data_type),
929 bch2_data_type_str(gc.data_type)))
930 new.data_type = gc.data_type;
932 #define copy_bucket_field(_errtype, _f) \
933 if (fsck_err_on(new._f != gc._f, c, _errtype, \
934 "bucket %llu:%llu gen %u data type %s has wrong " #_f \
935 ": got %llu, should be %llu", \
936 iter->pos.inode, iter->pos.offset, \
938 bch2_data_type_str(gc.data_type), \
939 (u64) new._f, (u64) gc._f)) \
942 copy_bucket_field(alloc_key_gen_wrong, gen);
943 copy_bucket_field(alloc_key_dirty_sectors_wrong, dirty_sectors);
944 copy_bucket_field(alloc_key_cached_sectors_wrong, cached_sectors);
945 copy_bucket_field(alloc_key_stripe_wrong, stripe);
946 copy_bucket_field(alloc_key_stripe_redundancy_wrong, stripe_redundancy);
947 copy_bucket_field(alloc_key_fragmentation_lru_wrong, fragmentation_lru);
948 #undef copy_bucket_field
950 if (!bch2_alloc_v4_cmp(*old, new))
953 a = bch2_alloc_to_v4_mut(trans, k);
954 ret = PTR_ERR_OR_ZERO(a);
961 * The trigger normally makes sure these are set, but we're not running
964 if (a->v.data_type == BCH_DATA_cached && !a->v.io_time[READ])
965 a->v.io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
967 ret = bch2_trans_update(trans, iter, &a->k_i, BTREE_TRIGGER_norun);
972 static int bch2_gc_alloc_done(struct bch_fs *c)
976 for_each_member_device(c, ca) {
977 ret = bch2_trans_run(c,
978 for_each_btree_key_upto_commit(trans, iter, BTREE_ID_alloc,
979 POS(ca->dev_idx, ca->mi.first_bucket),
980 POS(ca->dev_idx, ca->mi.nbuckets - 1),
981 BTREE_ITER_slots|BTREE_ITER_prefetch, k,
982 NULL, NULL, BCH_TRANS_COMMIT_lazy_rw,
983 bch2_alloc_write_key(trans, &iter, ca, k)));
994 static int bch2_gc_alloc_start(struct bch_fs *c)
996 for_each_member_device(c, ca) {
997 struct bucket_array *buckets = kvmalloc(sizeof(struct bucket_array) +
998 ca->mi.nbuckets * sizeof(struct bucket),
999 GFP_KERNEL|__GFP_ZERO);
1002 bch_err(c, "error allocating ca->buckets[gc]");
1003 return -BCH_ERR_ENOMEM_gc_alloc_start;
1006 buckets->first_bucket = ca->mi.first_bucket;
1007 buckets->nbuckets = ca->mi.nbuckets;
1008 buckets->nbuckets_minus_first =
1009 buckets->nbuckets - buckets->first_bucket;
1010 rcu_assign_pointer(ca->buckets_gc, buckets);
1013 struct bch_dev *ca = NULL;
1014 int ret = bch2_trans_run(c,
1015 for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
1016 BTREE_ITER_prefetch, k, ({
1017 ca = bch2_dev_iterate(c, ca, k.k->p.inode);
1019 bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
1023 if (bucket_valid(ca, k.k->p.offset)) {
1024 struct bch_alloc_v4 a_convert;
1025 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
1027 struct bucket *g = gc_bucket(ca, k.k->p.offset);
1038 static int bch2_gc_write_reflink_key(struct btree_trans *trans,
1039 struct btree_iter *iter,
1043 struct bch_fs *c = trans->c;
1044 const __le64 *refcount = bkey_refcount_c(k);
1045 struct printbuf buf = PRINTBUF;
1046 struct reflink_gc *r;
1052 while ((r = genradix_ptr(&c->reflink_gc_table, *idx)) &&
1053 r->offset < k.k->p.offset)
1057 r->offset != k.k->p.offset ||
1058 r->size != k.k->size) {
1059 bch_err(c, "unexpected inconsistency walking reflink table at gc finish");
1063 if (fsck_err_on(r->refcount != le64_to_cpu(*refcount), c,
1064 reflink_v_refcount_wrong,
1065 "reflink key has wrong refcount:\n"
1068 (bch2_bkey_val_to_text(&buf, c, k), buf.buf),
1070 struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
1071 ret = PTR_ERR_OR_ZERO(new);
1076 new->k.type = KEY_TYPE_deleted;
1078 *bkey_refcount(bkey_i_to_s(new)) = cpu_to_le64(r->refcount);
1079 ret = bch2_trans_update(trans, iter, new, 0);
1083 printbuf_exit(&buf);
1087 static int bch2_gc_reflink_done(struct bch_fs *c)
1091 int ret = bch2_trans_run(c,
1092 for_each_btree_key_commit(trans, iter,
1093 BTREE_ID_reflink, POS_MIN,
1094 BTREE_ITER_prefetch, k,
1095 NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1096 bch2_gc_write_reflink_key(trans, &iter, k, &idx)));
1097 c->reflink_gc_nr = 0;
1101 static int bch2_gc_reflink_start(struct bch_fs *c)
1103 c->reflink_gc_nr = 0;
1105 int ret = bch2_trans_run(c,
1106 for_each_btree_key(trans, iter, BTREE_ID_reflink, POS_MIN,
1107 BTREE_ITER_prefetch, k, ({
1108 const __le64 *refcount = bkey_refcount_c(k);
1113 struct reflink_gc *r = genradix_ptr_alloc(&c->reflink_gc_table,
1114 c->reflink_gc_nr++, GFP_KERNEL);
1116 ret = -BCH_ERR_ENOMEM_gc_reflink_start;
1120 r->offset = k.k->p.offset;
1121 r->size = k.k->size;
1130 static int bch2_gc_write_stripes_key(struct btree_trans *trans,
1131 struct btree_iter *iter,
1134 struct bch_fs *c = trans->c;
1135 struct printbuf buf = PRINTBUF;
1136 const struct bch_stripe *s;
1137 struct gc_stripe *m;
1142 if (k.k->type != KEY_TYPE_stripe)
1145 s = bkey_s_c_to_stripe(k).v;
1146 m = genradix_ptr(&c->gc_stripes, k.k->p.offset);
1148 for (i = 0; i < s->nr_blocks; i++) {
1149 u32 old = stripe_blockcount_get(s, i);
1150 u32 new = (m ? m->block_sectors[i] : 0);
1153 prt_printf(&buf, "stripe block %u has wrong sector count: got %u, should be %u\n",
1160 bch2_bkey_val_to_text(&buf, c, k);
1162 if (fsck_err_on(bad, c, stripe_sector_count_wrong,
1164 struct bkey_i_stripe *new;
1166 new = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1167 ret = PTR_ERR_OR_ZERO(new);
1171 bkey_reassemble(&new->k_i, k);
1173 for (i = 0; i < new->v.nr_blocks; i++)
1174 stripe_blockcount_set(&new->v, i, m ? m->block_sectors[i] : 0);
1176 ret = bch2_trans_update(trans, iter, &new->k_i, 0);
1179 printbuf_exit(&buf);
1183 static int bch2_gc_stripes_done(struct bch_fs *c)
1185 return bch2_trans_run(c,
1186 for_each_btree_key_commit(trans, iter,
1187 BTREE_ID_stripes, POS_MIN,
1188 BTREE_ITER_prefetch, k,
1189 NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1190 bch2_gc_write_stripes_key(trans, &iter, k)));
1194 * bch2_check_allocations - walk all references to buckets, and recompute them:
1196 * @c: filesystem object
1198 * Returns: 0 on success, or standard errcode on failure
1200 * Order matters here:
1201 * - Concurrent GC relies on the fact that we have a total ordering for
1202 * everything that GC walks - see gc_will_visit_node(),
1203 * gc_will_visit_root()
1205 * - also, references move around in the course of index updates and
1206 * various other crap: everything needs to agree on the ordering
1207 * references are allowed to move around in - e.g., we're allowed to
1208 * start with a reference owned by an open_bucket (the allocator) and
1209 * move it to the btree, but not the reverse.
1211 * This is necessary to ensure that gc doesn't miss references that
1212 * move around - if references move backwards in the ordering GC
1213 * uses, GC could skip past them
1215 int bch2_check_allocations(struct bch_fs *c)
1219 lockdep_assert_held(&c->state_lock);
1221 down_write(&c->gc_lock);
1223 bch2_btree_interior_updates_flush(c);
1225 ret = bch2_gc_start(c) ?:
1226 bch2_gc_alloc_start(c) ?:
1227 bch2_gc_reflink_start(c);
1231 gc_pos_set(c, gc_phase(GC_PHASE_start));
1233 ret = bch2_mark_superblocks(c);
1236 ret = bch2_gc_btrees(c);
1242 bch2_journal_block(&c->journal);
1244 ret = bch2_gc_alloc_done(c) ?:
1246 bch2_gc_stripes_done(c) ?:
1247 bch2_gc_reflink_done(c);
1249 bch2_journal_unblock(&c->journal);
1251 percpu_down_write(&c->mark_lock);
1252 /* Indicates that gc is no longer in progress: */
1253 __gc_pos_set(c, gc_phase(GC_PHASE_not_running));
1256 percpu_up_write(&c->mark_lock);
1258 up_write(&c->gc_lock);
1261 * At startup, allocations can happen directly instead of via the
1262 * allocator thread - issue wakeup in case they blocked on gc_lock:
1264 closure_wake_up(&c->freelist_wait);
1269 static int gc_btree_gens_key(struct btree_trans *trans,
1270 struct btree_iter *iter,
1273 struct bch_fs *c = trans->c;
1274 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1278 if (unlikely(test_bit(BCH_FS_going_ro, &c->flags)))
1281 percpu_down_read(&c->mark_lock);
1283 bkey_for_each_ptr(ptrs, ptr) {
1284 struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
1288 if (dev_ptr_stale(ca, ptr) > 16) {
1290 percpu_up_read(&c->mark_lock);
1295 bkey_for_each_ptr(ptrs, ptr) {
1296 struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
1300 u8 *gen = &ca->oldest_gen[PTR_BUCKET_NR(ca, ptr)];
1301 if (gen_after(*gen, ptr->gen))
1305 percpu_up_read(&c->mark_lock);
1308 u = bch2_bkey_make_mut(trans, iter, &k, 0);
1309 ret = PTR_ERR_OR_ZERO(u);
1313 bch2_extent_normalize(c, bkey_i_to_s(u));
1317 static int bch2_alloc_write_oldest_gen(struct btree_trans *trans, struct bch_dev *ca,
1318 struct btree_iter *iter, struct bkey_s_c k)
1320 struct bch_alloc_v4 a_convert;
1321 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
1322 struct bkey_i_alloc_v4 *a_mut;
1325 if (a->oldest_gen == ca->oldest_gen[iter->pos.offset])
1328 a_mut = bch2_alloc_to_v4_mut(trans, k);
1329 ret = PTR_ERR_OR_ZERO(a_mut);
1333 a_mut->v.oldest_gen = ca->oldest_gen[iter->pos.offset];
1334 alloc_data_type_set(&a_mut->v, a_mut->v.data_type);
1336 return bch2_trans_update(trans, iter, &a_mut->k_i, 0);
1339 int bch2_gc_gens(struct bch_fs *c)
1341 u64 b, start_time = local_clock();
1345 * Ideally we would be using state_lock and not gc_lock here, but that
1346 * introduces a deadlock in the RO path - we currently take the state
1347 * lock at the start of going RO, thus the gc thread may get stuck:
1349 if (!mutex_trylock(&c->gc_gens_lock))
1352 trace_and_count(c, gc_gens_start, c);
1353 down_read(&c->gc_lock);
1355 for_each_member_device(c, ca) {
1356 struct bucket_gens *gens = bucket_gens(ca);
1358 BUG_ON(ca->oldest_gen);
1360 ca->oldest_gen = kvmalloc(gens->nbuckets, GFP_KERNEL);
1361 if (!ca->oldest_gen) {
1363 ret = -BCH_ERR_ENOMEM_gc_gens;
1367 for (b = gens->first_bucket;
1368 b < gens->nbuckets; b++)
1369 ca->oldest_gen[b] = gens->b[b];
1372 for (unsigned i = 0; i < BTREE_ID_NR; i++)
1373 if (btree_type_has_ptrs(i)) {
1374 c->gc_gens_btree = i;
1375 c->gc_gens_pos = POS_MIN;
1377 ret = bch2_trans_run(c,
1378 for_each_btree_key_commit(trans, iter, i,
1380 BTREE_ITER_prefetch|BTREE_ITER_all_snapshots,
1383 BCH_TRANS_COMMIT_no_enospc,
1384 gc_btree_gens_key(trans, &iter, k)));
1389 struct bch_dev *ca = NULL;
1390 ret = bch2_trans_run(c,
1391 for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
1393 BTREE_ITER_prefetch,
1396 BCH_TRANS_COMMIT_no_enospc, ({
1397 ca = bch2_dev_iterate(c, ca, k.k->p.inode);
1399 bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
1402 bch2_alloc_write_oldest_gen(trans, ca, &iter, k);
1409 c->gc_gens_btree = 0;
1410 c->gc_gens_pos = POS_MIN;
1414 bch2_time_stats_update(&c->times[BCH_TIME_btree_gc], start_time);
1415 trace_and_count(c, gc_gens_end, c);
1417 for_each_member_device(c, ca) {
1418 kvfree(ca->oldest_gen);
1419 ca->oldest_gen = NULL;
1422 up_read(&c->gc_lock);
1423 mutex_unlock(&c->gc_gens_lock);
1424 if (!bch2_err_matches(ret, EROFS))
1429 static void bch2_gc_gens_work(struct work_struct *work)
1431 struct bch_fs *c = container_of(work, struct bch_fs, gc_gens_work);
1433 bch2_write_ref_put(c, BCH_WRITE_REF_gc_gens);
1436 void bch2_gc_gens_async(struct bch_fs *c)
1438 if (bch2_write_ref_tryget(c, BCH_WRITE_REF_gc_gens) &&
1439 !queue_work(c->write_ref_wq, &c->gc_gens_work))
1440 bch2_write_ref_put(c, BCH_WRITE_REF_gc_gens);
1443 void bch2_fs_gc_init(struct bch_fs *c)
1445 seqcount_init(&c->gc_pos_lock);
1447 INIT_WORK(&c->gc_gens_work, bch2_gc_gens_work);