1 // SPDX-License-Identifier: GPL-2.0
4 #include "bkey_methods.h"
6 #include "btree_cache.h"
8 #include "btree_iter.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
11 #include "btree_update_interior.h"
18 #include "journal_reclaim.h"
19 #include "journal_seq_blacklist.h"
24 #include <linux/sched/mm.h>
26 static void bch2_btree_node_header_to_text(struct printbuf *out, struct btree_node *bn)
28 prt_printf(out, "btree=%s l=%u seq %llux\n",
29 bch2_btree_id_str(BTREE_NODE_ID(bn)),
30 (unsigned) BTREE_NODE_LEVEL(bn), bn->keys.seq);
31 prt_str(out, "min: ");
32 bch2_bpos_to_text(out, bn->min_key);
34 prt_str(out, "max: ");
35 bch2_bpos_to_text(out, bn->max_key);
38 void bch2_btree_node_io_unlock(struct btree *b)
40 EBUG_ON(!btree_node_write_in_flight(b));
42 clear_btree_node_write_in_flight_inner(b);
43 clear_btree_node_write_in_flight(b);
44 wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
47 void bch2_btree_node_io_lock(struct btree *b)
49 bch2_assert_btree_nodes_not_locked();
51 wait_on_bit_lock_io(&b->flags, BTREE_NODE_write_in_flight,
52 TASK_UNINTERRUPTIBLE);
55 void __bch2_btree_node_wait_on_read(struct btree *b)
57 wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
58 TASK_UNINTERRUPTIBLE);
61 void __bch2_btree_node_wait_on_write(struct btree *b)
63 wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
64 TASK_UNINTERRUPTIBLE);
67 void bch2_btree_node_wait_on_read(struct btree *b)
69 bch2_assert_btree_nodes_not_locked();
71 wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
72 TASK_UNINTERRUPTIBLE);
75 void bch2_btree_node_wait_on_write(struct btree *b)
77 bch2_assert_btree_nodes_not_locked();
79 wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
80 TASK_UNINTERRUPTIBLE);
83 static void verify_no_dups(struct btree *b,
84 struct bkey_packed *start,
85 struct bkey_packed *end)
87 #ifdef CONFIG_BCACHEFS_DEBUG
88 struct bkey_packed *k, *p;
93 for (p = start, k = bkey_p_next(start);
95 p = k, k = bkey_p_next(k)) {
96 struct bkey l = bkey_unpack_key(b, p);
97 struct bkey r = bkey_unpack_key(b, k);
99 BUG_ON(bpos_ge(l.p, bkey_start_pos(&r)));
104 static void set_needs_whiteout(struct bset *i, int v)
106 struct bkey_packed *k;
108 for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
109 k->needs_whiteout = v;
112 static void btree_bounce_free(struct bch_fs *c, size_t size,
113 bool used_mempool, void *p)
116 mempool_free(p, &c->btree_bounce_pool);
121 static void *btree_bounce_alloc(struct bch_fs *c, size_t size,
124 unsigned flags = memalloc_nofs_save();
127 BUG_ON(size > c->opts.btree_node_size);
129 *used_mempool = false;
130 p = kvmalloc(size, __GFP_NOWARN|GFP_NOWAIT);
132 *used_mempool = true;
133 p = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
135 memalloc_nofs_restore(flags);
139 static void sort_bkey_ptrs(const struct btree *bt,
140 struct bkey_packed **ptrs, unsigned nr)
142 unsigned n = nr, a = nr / 2, b, c, d;
147 /* Heap sort: see lib/sort.c: */
152 swap(ptrs[0], ptrs[n]);
156 for (b = a; c = 2 * b + 1, (d = c + 1) < n;)
157 b = bch2_bkey_cmp_packed(bt,
159 ptrs[d]) >= 0 ? c : d;
164 bch2_bkey_cmp_packed(bt,
171 swap(ptrs[b], ptrs[c]);
176 static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
178 struct bkey_packed *new_whiteouts, **ptrs, **ptrs_end, *k;
179 bool used_mempool = false;
180 size_t bytes = b->whiteout_u64s * sizeof(u64);
182 if (!b->whiteout_u64s)
185 new_whiteouts = btree_bounce_alloc(c, bytes, &used_mempool);
187 ptrs = ptrs_end = ((void *) new_whiteouts + bytes);
189 for (k = unwritten_whiteouts_start(b);
190 k != unwritten_whiteouts_end(b);
194 sort_bkey_ptrs(b, ptrs, ptrs_end - ptrs);
198 while (ptrs != ptrs_end) {
199 bkey_p_copy(k, *ptrs);
204 verify_no_dups(b, new_whiteouts,
205 (void *) ((u64 *) new_whiteouts + b->whiteout_u64s));
207 memcpy_u64s(unwritten_whiteouts_start(b),
208 new_whiteouts, b->whiteout_u64s);
210 btree_bounce_free(c, bytes, used_mempool, new_whiteouts);
213 static bool should_compact_bset(struct btree *b, struct bset_tree *t,
214 bool compacting, enum compact_mode mode)
216 if (!bset_dead_u64s(b, t))
221 return should_compact_bset_lazy(b, t) ||
222 (compacting && !bset_written(b, bset(b, t)));
230 static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode)
234 for_each_bset(b, t) {
235 struct bset *i = bset(b, t);
236 struct bkey_packed *k, *n, *out, *start, *end;
237 struct btree_node_entry *src = NULL, *dst = NULL;
239 if (t != b->set && !bset_written(b, i)) {
240 src = container_of(i, struct btree_node_entry, keys);
241 dst = max(write_block(b),
242 (void *) btree_bkey_last(b, t - 1));
248 if (!should_compact_bset(b, t, ret, mode)) {
250 memmove(dst, src, sizeof(*src) +
251 le16_to_cpu(src->keys.u64s) *
254 set_btree_bset(b, t, i);
259 start = btree_bkey_first(b, t);
260 end = btree_bkey_last(b, t);
263 memmove(dst, src, sizeof(*src));
265 set_btree_bset(b, t, i);
270 for (k = start; k != end; k = n) {
273 if (!bkey_deleted(k)) {
275 out = bkey_p_next(out);
277 BUG_ON(k->needs_whiteout);
281 i->u64s = cpu_to_le16((u64 *) out - i->_data);
282 set_btree_bset_end(b, t);
283 bch2_bset_set_no_aux_tree(b, t);
287 bch2_verify_btree_nr_keys(b);
289 bch2_btree_build_aux_trees(b);
294 bool bch2_compact_whiteouts(struct bch_fs *c, struct btree *b,
295 enum compact_mode mode)
297 return bch2_drop_whiteouts(b, mode);
300 static void btree_node_sort(struct bch_fs *c, struct btree *b,
304 struct btree_node *out;
305 struct sort_iter_stack sort_iter;
307 struct bset *start_bset = bset(b, &b->set[start_idx]);
308 bool used_mempool = false;
309 u64 start_time, seq = 0;
310 unsigned i, u64s = 0, bytes, shift = end_idx - start_idx - 1;
311 bool sorting_entire_node = start_idx == 0 &&
314 sort_iter_stack_init(&sort_iter, b);
316 for (t = b->set + start_idx;
317 t < b->set + end_idx;
319 u64s += le16_to_cpu(bset(b, t)->u64s);
320 sort_iter_add(&sort_iter.iter,
321 btree_bkey_first(b, t),
322 btree_bkey_last(b, t));
325 bytes = sorting_entire_node
327 : __vstruct_bytes(struct btree_node, u64s);
329 out = btree_bounce_alloc(c, bytes, &used_mempool);
331 start_time = local_clock();
333 u64s = bch2_sort_keys(out->keys.start, &sort_iter.iter);
335 out->keys.u64s = cpu_to_le16(u64s);
337 BUG_ON(vstruct_end(&out->keys) > (void *) out + bytes);
339 if (sorting_entire_node)
340 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
343 /* Make sure we preserve bset journal_seq: */
344 for (t = b->set + start_idx; t < b->set + end_idx; t++)
345 seq = max(seq, le64_to_cpu(bset(b, t)->journal_seq));
346 start_bset->journal_seq = cpu_to_le64(seq);
348 if (sorting_entire_node) {
349 u64s = le16_to_cpu(out->keys.u64s);
351 BUG_ON(bytes != btree_buf_bytes(b));
354 * Our temporary buffer is the same size as the btree node's
355 * buffer, we can just swap buffers instead of doing a big
359 out->keys.u64s = cpu_to_le16(u64s);
361 set_btree_bset(b, b->set, &b->data->keys);
363 start_bset->u64s = out->keys.u64s;
364 memcpy_u64s(start_bset->start,
366 le16_to_cpu(out->keys.u64s));
369 for (i = start_idx + 1; i < end_idx; i++)
370 b->nr.bset_u64s[start_idx] +=
375 for (i = start_idx + 1; i < b->nsets; i++) {
376 b->nr.bset_u64s[i] = b->nr.bset_u64s[i + shift];
377 b->set[i] = b->set[i + shift];
380 for (i = b->nsets; i < MAX_BSETS; i++)
381 b->nr.bset_u64s[i] = 0;
383 set_btree_bset_end(b, &b->set[start_idx]);
384 bch2_bset_set_no_aux_tree(b, &b->set[start_idx]);
386 btree_bounce_free(c, bytes, used_mempool, out);
388 bch2_verify_btree_nr_keys(b);
391 void bch2_btree_sort_into(struct bch_fs *c,
395 struct btree_nr_keys nr;
396 struct btree_node_iter src_iter;
397 u64 start_time = local_clock();
399 BUG_ON(dst->nsets != 1);
401 bch2_bset_set_no_aux_tree(dst, dst->set);
403 bch2_btree_node_iter_init_from_start(&src_iter, src);
405 nr = bch2_sort_repack(btree_bset_first(dst),
410 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
413 set_btree_bset_end(dst, dst->set);
415 dst->nr.live_u64s += nr.live_u64s;
416 dst->nr.bset_u64s[0] += nr.bset_u64s[0];
417 dst->nr.packed_keys += nr.packed_keys;
418 dst->nr.unpacked_keys += nr.unpacked_keys;
420 bch2_verify_btree_nr_keys(dst);
424 * We're about to add another bset to the btree node, so if there's currently
425 * too many bsets - sort some of them together:
427 static bool btree_node_compact(struct bch_fs *c, struct btree *b)
429 unsigned unwritten_idx;
432 for (unwritten_idx = 0;
433 unwritten_idx < b->nsets;
435 if (!bset_written(b, bset(b, &b->set[unwritten_idx])))
438 if (b->nsets - unwritten_idx > 1) {
439 btree_node_sort(c, b, unwritten_idx, b->nsets);
443 if (unwritten_idx > 1) {
444 btree_node_sort(c, b, 0, unwritten_idx);
451 void bch2_btree_build_aux_trees(struct btree *b)
454 bch2_bset_build_aux_tree(b, t,
455 !bset_written(b, bset(b, t)) &&
456 t == bset_tree_last(b));
460 * If we have MAX_BSETS (3) bsets, should we sort them all down to just one?
462 * The first bset is going to be of similar order to the size of the node, the
463 * last bset is bounded by btree_write_set_buffer(), which is set to keep the
464 * memmove on insert from being too expensive: the middle bset should, ideally,
465 * be the geometric mean of the first and the last.
467 * Returns true if the middle bset is greater than that geometric mean:
469 static inline bool should_compact_all(struct bch_fs *c, struct btree *b)
471 unsigned mid_u64s_bits =
472 (ilog2(btree_max_u64s(c)) + BTREE_WRITE_SET_U64s_BITS) / 2;
474 return bset_u64s(&b->set[1]) > 1U << mid_u64s_bits;
478 * @bch_btree_init_next - initialize a new (unwritten) bset that can then be
481 * Safe to call if there already is an unwritten bset - will only add a new bset
482 * if @b doesn't already have one.
484 * Returns true if we sorted (i.e. invalidated iterators
486 void bch2_btree_init_next(struct btree_trans *trans, struct btree *b)
488 struct bch_fs *c = trans->c;
489 struct btree_node_entry *bne;
490 bool reinit_iter = false;
492 EBUG_ON(!six_lock_counts(&b->c.lock).n[SIX_LOCK_write]);
493 BUG_ON(bset_written(b, bset(b, &b->set[1])));
494 BUG_ON(btree_node_just_written(b));
496 if (b->nsets == MAX_BSETS &&
497 !btree_node_write_in_flight(b) &&
498 should_compact_all(c, b)) {
499 bch2_btree_node_write(c, b, SIX_LOCK_write,
500 BTREE_WRITE_init_next_bset);
504 if (b->nsets == MAX_BSETS &&
505 btree_node_compact(c, b))
508 BUG_ON(b->nsets >= MAX_BSETS);
510 bne = want_new_bset(c, b);
512 bch2_bset_init_next(b, bne);
514 bch2_btree_build_aux_trees(b);
517 bch2_trans_node_reinit_iter(trans, b);
520 static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
522 struct btree *b, struct bset *i, struct bkey_packed *k,
523 unsigned offset, int write)
525 prt_printf(out, bch2_log_msg(c, "%s"),
527 ? "error validating btree node "
528 : "corrupt btree node before write ");
530 prt_printf(out, "on %s ", ca->name);
531 prt_printf(out, "at btree ");
532 bch2_btree_pos_to_text(out, c, b);
534 printbuf_indent_add(out, 2);
536 prt_printf(out, "\nnode offset %u/%u",
537 b->written, btree_ptr_sectors_written(&b->key));
539 prt_printf(out, " bset u64s %u", le16_to_cpu(i->u64s));
541 prt_printf(out, " bset byte offset %lu",
542 (unsigned long)(void *)k -
543 ((unsigned long)(void *)i & ~511UL));
548 static int __btree_err(int ret,
553 struct bkey_packed *k,
556 enum bch_sb_error_id err_type,
557 const char *fmt, ...)
559 struct printbuf out = PRINTBUF;
560 bool silent = c->curr_recovery_pass == BCH_RECOVERY_PASS_scan_for_btree_nodes;
563 btree_err_msg(&out, c, ca, b, i, k, b->written, write);
566 prt_vprintf(&out, fmt, args);
569 if (write == WRITE) {
570 bch2_print_string_as_lines(KERN_ERR, out.buf);
571 ret = c->opts.errors == BCH_ON_ERROR_continue
573 : -BCH_ERR_fsck_errors_not_fixed;
577 if (!have_retry && ret == -BCH_ERR_btree_node_read_err_want_retry)
578 ret = -BCH_ERR_btree_node_read_err_fixable;
579 if (!have_retry && ret == -BCH_ERR_btree_node_read_err_must_retry)
580 ret = -BCH_ERR_btree_node_read_err_bad_node;
582 if (!silent && ret != -BCH_ERR_btree_node_read_err_fixable)
583 bch2_sb_error_count(c, err_type);
586 case -BCH_ERR_btree_node_read_err_fixable:
588 ? bch2_fsck_err(c, FSCK_CAN_FIX, err_type, "%s", out.buf)
590 if (ret != -BCH_ERR_fsck_fix &&
591 ret != -BCH_ERR_fsck_ignore)
593 ret = -BCH_ERR_fsck_fix;
595 case -BCH_ERR_btree_node_read_err_want_retry:
596 case -BCH_ERR_btree_node_read_err_must_retry:
598 bch2_print_string_as_lines(KERN_ERR, out.buf);
600 case -BCH_ERR_btree_node_read_err_bad_node:
602 bch2_print_string_as_lines(KERN_ERR, out.buf);
603 ret = bch2_topology_error(c);
605 case -BCH_ERR_btree_node_read_err_incompatible:
607 bch2_print_string_as_lines(KERN_ERR, out.buf);
608 ret = -BCH_ERR_fsck_errors_not_fixed;
619 #define btree_err(type, c, ca, b, i, k, _err_type, msg, ...) \
621 int _ret = __btree_err(type, c, ca, b, i, k, write, have_retry, \
622 BCH_FSCK_ERR_##_err_type, \
623 msg, ##__VA_ARGS__); \
625 if (_ret != -BCH_ERR_fsck_fix) { \
633 #define btree_err_on(cond, ...) ((cond) ? btree_err(__VA_ARGS__) : false)
636 * When btree topology repair changes the start or end of a node, that might
637 * mean we have to drop keys that are no longer inside the node:
640 void bch2_btree_node_drop_keys_outside_node(struct btree *b)
642 for_each_bset(b, t) {
643 struct bset *i = bset(b, t);
644 struct bkey_packed *k;
646 for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
647 if (bkey_cmp_left_packed(b, k, &b->data->min_key) >= 0)
651 unsigned shift = (u64 *) k - (u64 *) i->start;
653 memmove_u64s_down(i->start, k,
654 (u64 *) vstruct_end(i) - (u64 *) k);
655 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - shift);
656 set_btree_bset_end(b, t);
659 for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
660 if (bkey_cmp_left_packed(b, k, &b->data->max_key) > 0)
663 if (k != vstruct_last(i)) {
664 i->u64s = cpu_to_le16((u64 *) k - (u64 *) i->start);
665 set_btree_bset_end(b, t);
670 * Always rebuild search trees: eytzinger search tree nodes directly
671 * depend on the values of min/max key:
673 bch2_bset_set_no_aux_tree(b, b->set);
674 bch2_btree_build_aux_trees(b);
675 b->nr = bch2_btree_node_count_keys(b);
678 struct bkey unpacked;
679 struct btree_node_iter iter;
680 for_each_btree_node_key_unpack(b, k, &iter, &unpacked) {
681 BUG_ON(bpos_lt(k.k->p, b->data->min_key));
682 BUG_ON(bpos_gt(k.k->p, b->data->max_key));
686 static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
687 struct btree *b, struct bset *i,
688 unsigned offset, unsigned sectors,
689 int write, bool have_retry, bool *saw_error)
691 unsigned version = le16_to_cpu(i->version);
692 struct printbuf buf1 = PRINTBUF;
693 struct printbuf buf2 = PRINTBUF;
696 btree_err_on(!bch2_version_compatible(version),
697 -BCH_ERR_btree_node_read_err_incompatible,
699 btree_node_unsupported_version,
700 "unsupported bset version %u.%u",
701 BCH_VERSION_MAJOR(version),
702 BCH_VERSION_MINOR(version));
704 if (btree_err_on(version < c->sb.version_min,
705 -BCH_ERR_btree_node_read_err_fixable,
707 btree_node_bset_older_than_sb_min,
708 "bset version %u older than superblock version_min %u",
709 version, c->sb.version_min)) {
710 mutex_lock(&c->sb_lock);
711 c->disk_sb.sb->version_min = cpu_to_le16(version);
713 mutex_unlock(&c->sb_lock);
716 if (btree_err_on(BCH_VERSION_MAJOR(version) >
717 BCH_VERSION_MAJOR(c->sb.version),
718 -BCH_ERR_btree_node_read_err_fixable,
720 btree_node_bset_newer_than_sb,
721 "bset version %u newer than superblock version %u",
722 version, c->sb.version)) {
723 mutex_lock(&c->sb_lock);
724 c->disk_sb.sb->version = cpu_to_le16(version);
726 mutex_unlock(&c->sb_lock);
729 btree_err_on(BSET_SEPARATE_WHITEOUTS(i),
730 -BCH_ERR_btree_node_read_err_incompatible,
732 btree_node_unsupported_version,
733 "BSET_SEPARATE_WHITEOUTS no longer supported");
735 if (btree_err_on(offset + sectors > btree_sectors(c),
736 -BCH_ERR_btree_node_read_err_fixable,
738 bset_past_end_of_btree_node,
739 "bset past end of btree node")) {
745 btree_err_on(offset && !i->u64s,
746 -BCH_ERR_btree_node_read_err_fixable,
751 btree_err_on(BSET_OFFSET(i) && BSET_OFFSET(i) != offset,
752 -BCH_ERR_btree_node_read_err_want_retry,
754 bset_wrong_sector_offset,
755 "bset at wrong sector offset");
758 struct btree_node *bn =
759 container_of(i, struct btree_node, keys);
760 /* These indicate that we read the wrong btree node: */
762 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
763 struct bch_btree_ptr_v2 *bp =
764 &bkey_i_to_btree_ptr_v2(&b->key)->v;
767 btree_err_on(bp->seq != bn->keys.seq,
768 -BCH_ERR_btree_node_read_err_must_retry,
769 c, ca, b, NULL, NULL,
771 "incorrect sequence number (wrong btree node)");
774 btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id,
775 -BCH_ERR_btree_node_read_err_must_retry,
777 btree_node_bad_btree,
778 "incorrect btree id");
780 btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level,
781 -BCH_ERR_btree_node_read_err_must_retry,
783 btree_node_bad_level,
787 compat_btree_node(b->c.level, b->c.btree_id, version,
788 BSET_BIG_ENDIAN(i), write, bn);
790 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
791 struct bch_btree_ptr_v2 *bp =
792 &bkey_i_to_btree_ptr_v2(&b->key)->v;
794 if (BTREE_PTR_RANGE_UPDATED(bp)) {
795 b->data->min_key = bp->min_key;
796 b->data->max_key = b->key.k.p;
799 btree_err_on(!bpos_eq(b->data->min_key, bp->min_key),
800 -BCH_ERR_btree_node_read_err_must_retry,
801 c, ca, b, NULL, NULL,
802 btree_node_bad_min_key,
803 "incorrect min_key: got %s should be %s",
804 (printbuf_reset(&buf1),
805 bch2_bpos_to_text(&buf1, bn->min_key), buf1.buf),
806 (printbuf_reset(&buf2),
807 bch2_bpos_to_text(&buf2, bp->min_key), buf2.buf));
810 btree_err_on(!bpos_eq(bn->max_key, b->key.k.p),
811 -BCH_ERR_btree_node_read_err_must_retry,
813 btree_node_bad_max_key,
814 "incorrect max key %s",
815 (printbuf_reset(&buf1),
816 bch2_bpos_to_text(&buf1, bn->max_key), buf1.buf));
819 compat_btree_node(b->c.level, b->c.btree_id, version,
820 BSET_BIG_ENDIAN(i), write, bn);
822 btree_err_on(bch2_bkey_format_invalid(c, &bn->format, write, &buf1),
823 -BCH_ERR_btree_node_read_err_bad_node,
825 btree_node_bad_format,
826 "invalid bkey format: %s\n %s", buf1.buf,
827 (printbuf_reset(&buf2),
828 bch2_bkey_format_to_text(&buf2, &bn->format), buf2.buf));
829 printbuf_reset(&buf1);
831 compat_bformat(b->c.level, b->c.btree_id, version,
832 BSET_BIG_ENDIAN(i), write,
837 printbuf_exit(&buf2);
838 printbuf_exit(&buf1);
842 static int bset_key_invalid(struct bch_fs *c, struct btree *b,
844 bool updated_range, int rw,
845 struct printbuf *err)
847 return __bch2_bkey_invalid(c, k, btree_node_type(b), READ, err) ?:
848 (!updated_range ? bch2_bkey_in_btree_node(c, b, k, err) : 0) ?:
849 (rw == WRITE ? bch2_bkey_val_invalid(c, k, READ, err) : 0);
852 static bool bkey_packed_valid(struct bch_fs *c, struct btree *b,
853 struct bset *i, struct bkey_packed *k)
855 if (bkey_p_next(k) > vstruct_last(i))
858 if (k->format > KEY_FORMAT_CURRENT)
861 if (!bkeyp_u64s_valid(&b->format, k))
864 struct printbuf buf = PRINTBUF;
866 struct bkey_s u = __bkey_disassemble(b, k, &tmp);
867 bool ret = __bch2_bkey_invalid(c, u.s_c, btree_node_type(b), READ, &buf);
872 static int validate_bset_keys(struct bch_fs *c, struct btree *b,
873 struct bset *i, int write,
874 bool have_retry, bool *saw_error)
876 unsigned version = le16_to_cpu(i->version);
877 struct bkey_packed *k, *prev = NULL;
878 struct printbuf buf = PRINTBUF;
879 bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
880 BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
884 k != vstruct_last(i);) {
887 unsigned next_good_key;
889 if (btree_err_on(bkey_p_next(k) > vstruct_last(i),
890 -BCH_ERR_btree_node_read_err_fixable,
892 btree_node_bkey_past_bset_end,
893 "key extends past end of bset")) {
894 i->u64s = cpu_to_le16((u64 *) k - i->_data);
898 if (btree_err_on(k->format > KEY_FORMAT_CURRENT,
899 -BCH_ERR_btree_node_read_err_fixable,
901 btree_node_bkey_bad_format,
902 "invalid bkey format %u", k->format))
905 if (btree_err_on(!bkeyp_u64s_valid(&b->format, k),
906 -BCH_ERR_btree_node_read_err_fixable,
908 btree_node_bkey_bad_u64s,
909 "bad k->u64s %u (min %u max %zu)", k->u64s,
910 bkeyp_key_u64s(&b->format, k),
911 U8_MAX - BKEY_U64s + bkeyp_key_u64s(&b->format, k)))
915 bch2_bkey_compat(b->c.level, b->c.btree_id, version,
916 BSET_BIG_ENDIAN(i), write,
919 u = __bkey_disassemble(b, k, &tmp);
921 printbuf_reset(&buf);
922 if (bset_key_invalid(c, b, u.s_c, updated_range, write, &buf)) {
923 printbuf_reset(&buf);
924 bset_key_invalid(c, b, u.s_c, updated_range, write, &buf);
925 prt_printf(&buf, "\n ");
926 bch2_bkey_val_to_text(&buf, c, u.s_c);
928 btree_err(-BCH_ERR_btree_node_read_err_fixable,
931 "invalid bkey: %s", buf.buf);
936 bch2_bkey_compat(b->c.level, b->c.btree_id, version,
937 BSET_BIG_ENDIAN(i), write,
940 if (prev && bkey_iter_cmp(b, prev, k) > 0) {
941 struct bkey up = bkey_unpack_key(b, prev);
943 printbuf_reset(&buf);
944 prt_printf(&buf, "keys out of order: ");
945 bch2_bkey_to_text(&buf, &up);
946 prt_printf(&buf, " > ");
947 bch2_bkey_to_text(&buf, u.k);
949 if (btree_err(-BCH_ERR_btree_node_read_err_fixable,
951 btree_node_bkey_out_of_order,
960 next_good_key = k->u64s;
962 if (!next_good_key ||
963 (BSET_BIG_ENDIAN(i) == CPU_BIG_ENDIAN &&
964 version >= bcachefs_metadata_version_snapshot)) {
966 * only do scanning if bch2_bkey_compat() has nothing to
970 if (!bkey_packed_valid(c, b, i, (void *) ((u64 *) k + next_good_key))) {
971 for (next_good_key = 1;
972 next_good_key < (u64 *) vstruct_last(i) - (u64 *) k;
974 if (bkey_packed_valid(c, b, i, (void *) ((u64 *) k + next_good_key)))
979 * didn't find a good key, have to truncate the rest of
982 next_good_key = (u64 *) vstruct_last(i) - (u64 *) k;
985 le16_add_cpu(&i->u64s, -next_good_key);
986 memmove_u64s_down(k, bkey_p_next(k), (u64 *) vstruct_end(i) - (u64 *) k);
993 int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
994 struct btree *b, bool have_retry, bool *saw_error)
996 struct btree_node_entry *bne;
997 struct sort_iter *iter;
998 struct btree_node *sorted;
999 struct bkey_packed *k;
1001 bool used_mempool, blacklisted;
1002 bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
1003 BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
1005 unsigned ptr_written = btree_ptr_sectors_written(&b->key);
1006 struct printbuf buf = PRINTBUF;
1007 int ret = 0, retry_read = 0, write = READ;
1008 u64 start_time = local_clock();
1010 b->version_ondisk = U16_MAX;
1011 /* We might get called multiple times on read retry: */
1014 iter = mempool_alloc(&c->fill_iter, GFP_NOFS);
1015 sort_iter_init(iter, b, (btree_blocks(c) + 1) * 2);
1017 if (bch2_meta_read_fault("btree"))
1018 btree_err(-BCH_ERR_btree_node_read_err_must_retry,
1019 c, ca, b, NULL, NULL,
1020 btree_node_fault_injected,
1023 btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c),
1024 -BCH_ERR_btree_node_read_err_must_retry,
1025 c, ca, b, NULL, NULL,
1026 btree_node_bad_magic,
1027 "bad magic: want %llx, got %llx",
1028 bset_magic(c), le64_to_cpu(b->data->magic));
1030 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
1031 struct bch_btree_ptr_v2 *bp =
1032 &bkey_i_to_btree_ptr_v2(&b->key)->v;
1034 bch2_bpos_to_text(&buf, b->data->min_key);
1036 bch2_bpos_to_text(&buf, b->data->max_key);
1038 btree_err_on(b->data->keys.seq != bp->seq,
1039 -BCH_ERR_btree_node_read_err_must_retry,
1040 c, ca, b, NULL, NULL,
1042 "got wrong btree node: got\n%s",
1043 (printbuf_reset(&buf),
1044 bch2_btree_node_header_to_text(&buf, b->data),
1047 btree_err_on(!b->data->keys.seq,
1048 -BCH_ERR_btree_node_read_err_must_retry,
1049 c, ca, b, NULL, NULL,
1051 "bad btree header: seq 0\n%s",
1052 (printbuf_reset(&buf),
1053 bch2_btree_node_header_to_text(&buf, b->data),
1057 while (b->written < (ptr_written ?: btree_sectors(c))) {
1060 bool first = !b->written;
1066 btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
1067 -BCH_ERR_btree_node_read_err_want_retry,
1070 "unknown checksum type %llu", BSET_CSUM_TYPE(i));
1072 nonce = btree_nonce(i, b->written << 9);
1074 struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data);
1075 csum_bad = bch2_crc_cmp(b->data->csum, csum);
1077 bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
1079 btree_err_on(csum_bad,
1080 -BCH_ERR_btree_node_read_err_want_retry,
1084 (printbuf_reset(&buf),
1085 bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), b->data->csum, csum),
1088 ret = bset_encrypt(c, i, b->written << 9);
1089 if (bch2_fs_fatal_err_on(ret, c,
1090 "decrypting btree node: %s", bch2_err_str(ret)))
1093 btree_err_on(btree_node_type_is_extents(btree_node_type(b)) &&
1094 !BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data),
1095 -BCH_ERR_btree_node_read_err_incompatible,
1096 c, NULL, b, NULL, NULL,
1097 btree_node_unsupported_version,
1098 "btree node does not have NEW_EXTENT_OVERWRITE set");
1100 sectors = vstruct_sectors(b->data, c->block_bits);
1102 bne = write_block(b);
1105 if (i->seq != b->data->keys.seq)
1108 btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
1109 -BCH_ERR_btree_node_read_err_want_retry,
1112 "unknown checksum type %llu", BSET_CSUM_TYPE(i));
1114 nonce = btree_nonce(i, b->written << 9);
1115 struct bch_csum csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
1116 csum_bad = bch2_crc_cmp(bne->csum, csum);
1118 bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
1120 btree_err_on(csum_bad,
1121 -BCH_ERR_btree_node_read_err_want_retry,
1125 (printbuf_reset(&buf),
1126 bch2_csum_err_msg(&buf, BSET_CSUM_TYPE(i), bne->csum, csum),
1129 ret = bset_encrypt(c, i, b->written << 9);
1130 if (bch2_fs_fatal_err_on(ret, c,
1131 "decrypting btree node: %s", bch2_err_str(ret)))
1134 sectors = vstruct_sectors(bne, c->block_bits);
1137 b->version_ondisk = min(b->version_ondisk,
1138 le16_to_cpu(i->version));
1140 ret = validate_bset(c, ca, b, i, b->written, sectors,
1141 READ, have_retry, saw_error);
1146 btree_node_set_format(b, b->data->format);
1148 ret = validate_bset_keys(c, b, i, READ, have_retry, saw_error);
1152 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
1154 blacklisted = bch2_journal_seq_is_blacklisted(c,
1155 le64_to_cpu(i->journal_seq),
1158 btree_err_on(blacklisted && first,
1159 -BCH_ERR_btree_node_read_err_fixable,
1161 bset_blacklisted_journal_seq,
1162 "first btree node bset has blacklisted journal seq (%llu)",
1163 le64_to_cpu(i->journal_seq));
1165 btree_err_on(blacklisted && ptr_written,
1166 -BCH_ERR_btree_node_read_err_fixable,
1168 first_bset_blacklisted_journal_seq,
1169 "found blacklisted bset (journal seq %llu) in btree node at offset %u-%u/%u",
1170 le64_to_cpu(i->journal_seq),
1171 b->written, b->written + sectors, ptr_written);
1173 b->written += sectors;
1175 if (blacklisted && !first)
1184 btree_err_on(b->written < ptr_written,
1185 -BCH_ERR_btree_node_read_err_want_retry,
1186 c, ca, b, NULL, NULL,
1187 btree_node_data_missing,
1188 "btree node data missing: expected %u sectors, found %u",
1189 ptr_written, b->written);
1191 for (bne = write_block(b);
1192 bset_byte_offset(b, bne) < btree_buf_bytes(b);
1193 bne = (void *) bne + block_bytes(c))
1194 btree_err_on(bne->keys.seq == b->data->keys.seq &&
1195 !bch2_journal_seq_is_blacklisted(c,
1196 le64_to_cpu(bne->keys.journal_seq),
1198 -BCH_ERR_btree_node_read_err_want_retry,
1199 c, ca, b, NULL, NULL,
1200 btree_node_bset_after_end,
1201 "found bset signature after last bset");
1204 sorted = btree_bounce_alloc(c, btree_buf_bytes(b), &used_mempool);
1205 sorted->keys.u64s = 0;
1207 set_btree_bset(b, b->set, &b->data->keys);
1209 b->nr = bch2_key_sort_fix_overlapping(c, &sorted->keys, iter);
1211 u64s = le16_to_cpu(sorted->keys.u64s);
1213 sorted->keys.u64s = cpu_to_le16(u64s);
1214 swap(sorted, b->data);
1215 set_btree_bset(b, b->set, &b->data->keys);
1218 BUG_ON(b->nr.live_u64s != u64s);
1220 btree_bounce_free(c, btree_buf_bytes(b), used_mempool, sorted);
1223 bch2_btree_node_drop_keys_outside_node(b);
1226 for (k = i->start; k != vstruct_last(i);) {
1228 struct bkey_s u = __bkey_disassemble(b, k, &tmp);
1230 printbuf_reset(&buf);
1232 if (bch2_bkey_val_invalid(c, u.s_c, READ, &buf) ||
1233 (bch2_inject_invalid_keys &&
1234 !bversion_cmp(u.k->version, MAX_VERSION))) {
1235 printbuf_reset(&buf);
1237 prt_printf(&buf, "invalid bkey: ");
1238 bch2_bkey_val_invalid(c, u.s_c, READ, &buf);
1239 prt_printf(&buf, "\n ");
1240 bch2_bkey_val_to_text(&buf, c, u.s_c);
1242 btree_err(-BCH_ERR_btree_node_read_err_fixable,
1244 btree_node_bad_bkey,
1247 btree_keys_account_key_drop(&b->nr, 0, k);
1249 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
1250 memmove_u64s_down(k, bkey_p_next(k),
1251 (u64 *) vstruct_end(i) - (u64 *) k);
1252 set_btree_bset_end(b, b->set);
1256 if (u.k->type == KEY_TYPE_btree_ptr_v2) {
1257 struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(u);
1265 bch2_bset_build_aux_tree(b, b->set, false);
1267 set_needs_whiteout(btree_bset_first(b), true);
1269 btree_node_reset_sib_u64s(b);
1272 bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&b->key)), ptr) {
1273 struct bch_dev *ca2 = bch2_dev_rcu(c, ptr->dev);
1275 if (!ca2 || ca2->mi.state != BCH_MEMBER_STATE_rw)
1276 set_btree_node_need_rewrite(b);
1281 set_btree_node_need_rewrite(b);
1283 mempool_free(iter, &c->fill_iter);
1284 printbuf_exit(&buf);
1285 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read_done], start_time);
1288 if (ret == -BCH_ERR_btree_node_read_err_want_retry ||
1289 ret == -BCH_ERR_btree_node_read_err_must_retry) {
1292 set_btree_node_read_error(b);
1293 bch2_btree_lost_data(c, b->c.btree_id);
1298 static void btree_node_read_work(struct work_struct *work)
1300 struct btree_read_bio *rb =
1301 container_of(work, struct btree_read_bio, work);
1302 struct bch_fs *c = rb->c;
1303 struct bch_dev *ca = rb->have_ioref ? bch2_dev_have_ref(c, rb->pick.ptr.dev) : NULL;
1304 struct btree *b = rb->b;
1305 struct bio *bio = &rb->bio;
1306 struct bch_io_failures failed = { .nr = 0 };
1307 struct printbuf buf = PRINTBUF;
1308 bool saw_error = false;
1315 bch_info(c, "retrying read");
1316 ca = bch2_dev_get_ioref(c, rb->pick.ptr.dev, READ);
1317 rb->have_ioref = ca != NULL;
1318 bio_reset(bio, NULL, REQ_OP_READ|REQ_SYNC|REQ_META);
1319 bio->bi_iter.bi_sector = rb->pick.ptr.offset;
1320 bio->bi_iter.bi_size = btree_buf_bytes(b);
1322 if (rb->have_ioref) {
1323 bio_set_dev(bio, ca->disk_sb.bdev);
1324 submit_bio_wait(bio);
1326 bio->bi_status = BLK_STS_REMOVED;
1329 printbuf_reset(&buf);
1330 bch2_btree_pos_to_text(&buf, c, b);
1331 bch2_dev_io_err_on(ca && bio->bi_status, ca, BCH_MEMBER_ERROR_read,
1332 "btree read error %s for %s",
1333 bch2_blk_status_to_str(bio->bi_status), buf.buf);
1335 percpu_ref_put(&ca->io_ref);
1336 rb->have_ioref = false;
1338 bch2_mark_io_failure(&failed, &rb->pick);
1340 can_retry = bch2_bkey_pick_read_device(c,
1341 bkey_i_to_s_c(&b->key),
1342 &failed, &rb->pick) > 0;
1344 if (!bio->bi_status &&
1345 !bch2_btree_node_read_done(c, ca, b, can_retry, &saw_error)) {
1347 bch_info(c, "retry success");
1354 set_btree_node_read_error(b);
1355 bch2_btree_lost_data(c, b->c.btree_id);
1360 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read],
1365 !btree_node_read_error(b) &&
1366 c->curr_recovery_pass != BCH_RECOVERY_PASS_scan_for_btree_nodes) {
1367 printbuf_reset(&buf);
1368 bch2_bpos_to_text(&buf, b->key.k.p);
1369 bch_err_ratelimited(c, "%s: rewriting btree node at btree=%s level=%u %s due to error",
1370 __func__, bch2_btree_id_str(b->c.btree_id), b->c.level, buf.buf);
1372 bch2_btree_node_rewrite_async(c, b);
1375 printbuf_exit(&buf);
1376 clear_btree_node_read_in_flight(b);
1377 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1380 static void btree_node_read_endio(struct bio *bio)
1382 struct btree_read_bio *rb =
1383 container_of(bio, struct btree_read_bio, bio);
1384 struct bch_fs *c = rb->c;
1386 if (rb->have_ioref) {
1387 struct bch_dev *ca = bch2_dev_have_ref(c, rb->pick.ptr.dev);
1389 bch2_latency_acct(ca, rb->start_time, READ);
1392 queue_work(c->btree_read_complete_wq, &rb->work);
1395 struct btree_node_read_all {
1400 void *buf[BCH_REPLICAS_MAX];
1401 struct bio *bio[BCH_REPLICAS_MAX];
1402 blk_status_t err[BCH_REPLICAS_MAX];
1405 static unsigned btree_node_sectors_written(struct bch_fs *c, void *data)
1407 struct btree_node *bn = data;
1408 struct btree_node_entry *bne;
1409 unsigned offset = 0;
1411 if (le64_to_cpu(bn->magic) != bset_magic(c))
1414 while (offset < btree_sectors(c)) {
1416 offset += vstruct_sectors(bn, c->block_bits);
1418 bne = data + (offset << 9);
1419 if (bne->keys.seq != bn->keys.seq)
1421 offset += vstruct_sectors(bne, c->block_bits);
1428 static bool btree_node_has_extra_bsets(struct bch_fs *c, unsigned offset, void *data)
1430 struct btree_node *bn = data;
1431 struct btree_node_entry *bne;
1436 while (offset < btree_sectors(c)) {
1437 bne = data + (offset << 9);
1438 if (bne->keys.seq == bn->keys.seq)
1447 static CLOSURE_CALLBACK(btree_node_read_all_replicas_done)
1449 closure_type(ra, struct btree_node_read_all, cl);
1450 struct bch_fs *c = ra->c;
1451 struct btree *b = ra->b;
1452 struct printbuf buf = PRINTBUF;
1453 bool dump_bset_maps = false;
1454 bool have_retry = false;
1455 int ret = 0, best = -1, write = READ;
1456 unsigned i, written = 0, written2 = 0;
1457 __le64 seq = b->key.k.type == KEY_TYPE_btree_ptr_v2
1458 ? bkey_i_to_btree_ptr_v2(&b->key)->v.seq : 0;
1459 bool _saw_error = false, *saw_error = &_saw_error;
1461 for (i = 0; i < ra->nr; i++) {
1462 struct btree_node *bn = ra->buf[i];
1467 if (le64_to_cpu(bn->magic) != bset_magic(c) ||
1468 (seq && seq != bn->keys.seq))
1473 written = btree_node_sectors_written(c, bn);
1477 written2 = btree_node_sectors_written(c, ra->buf[i]);
1478 if (btree_err_on(written2 != written, -BCH_ERR_btree_node_read_err_fixable,
1479 c, NULL, b, NULL, NULL,
1480 btree_node_replicas_sectors_written_mismatch,
1481 "btree node sectors written mismatch: %u != %u",
1482 written, written2) ||
1483 btree_err_on(btree_node_has_extra_bsets(c, written2, ra->buf[i]),
1484 -BCH_ERR_btree_node_read_err_fixable,
1485 c, NULL, b, NULL, NULL,
1486 btree_node_bset_after_end,
1487 "found bset signature after last bset") ||
1488 btree_err_on(memcmp(ra->buf[best], ra->buf[i], written << 9),
1489 -BCH_ERR_btree_node_read_err_fixable,
1490 c, NULL, b, NULL, NULL,
1491 btree_node_replicas_data_mismatch,
1492 "btree node replicas content mismatch"))
1493 dump_bset_maps = true;
1495 if (written2 > written) {
1501 if (dump_bset_maps) {
1502 for (i = 0; i < ra->nr; i++) {
1503 struct btree_node *bn = ra->buf[i];
1504 struct btree_node_entry *bne = NULL;
1505 unsigned offset = 0, sectors;
1511 printbuf_reset(&buf);
1513 while (offset < btree_sectors(c)) {
1515 sectors = vstruct_sectors(bn, c->block_bits);
1517 bne = ra->buf[i] + (offset << 9);
1518 if (bne->keys.seq != bn->keys.seq)
1520 sectors = vstruct_sectors(bne, c->block_bits);
1523 prt_printf(&buf, " %u-%u", offset, offset + sectors);
1524 if (bne && bch2_journal_seq_is_blacklisted(c,
1525 le64_to_cpu(bne->keys.journal_seq), false))
1526 prt_printf(&buf, "*");
1530 while (offset < btree_sectors(c)) {
1531 bne = ra->buf[i] + (offset << 9);
1532 if (bne->keys.seq == bn->keys.seq) {
1534 prt_printf(&buf, " GAP");
1537 sectors = vstruct_sectors(bne, c->block_bits);
1538 prt_printf(&buf, " %u-%u", offset, offset + sectors);
1539 if (bch2_journal_seq_is_blacklisted(c,
1540 le64_to_cpu(bne->keys.journal_seq), false))
1541 prt_printf(&buf, "*");
1546 bch_err(c, "replica %u:%s", i, buf.buf);
1551 memcpy(b->data, ra->buf[best], btree_buf_bytes(b));
1552 ret = bch2_btree_node_read_done(c, NULL, b, false, saw_error);
1558 set_btree_node_read_error(b);
1559 bch2_btree_lost_data(c, b->c.btree_id);
1560 } else if (*saw_error)
1561 bch2_btree_node_rewrite_async(c, b);
1563 for (i = 0; i < ra->nr; i++) {
1564 mempool_free(ra->buf[i], &c->btree_bounce_pool);
1565 bio_put(ra->bio[i]);
1568 closure_debug_destroy(&ra->cl);
1570 printbuf_exit(&buf);
1572 clear_btree_node_read_in_flight(b);
1573 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1576 static void btree_node_read_all_replicas_endio(struct bio *bio)
1578 struct btree_read_bio *rb =
1579 container_of(bio, struct btree_read_bio, bio);
1580 struct bch_fs *c = rb->c;
1581 struct btree_node_read_all *ra = rb->ra;
1583 if (rb->have_ioref) {
1584 struct bch_dev *ca = bch2_dev_have_ref(c, rb->pick.ptr.dev);
1586 bch2_latency_acct(ca, rb->start_time, READ);
1589 ra->err[rb->idx] = bio->bi_status;
1590 closure_put(&ra->cl);
1594 * XXX This allocates multiple times from the same mempools, and can deadlock
1595 * under sufficient memory pressure (but is only a debug path)
1597 static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool sync)
1599 struct bkey_s_c k = bkey_i_to_s_c(&b->key);
1600 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1601 const union bch_extent_entry *entry;
1602 struct extent_ptr_decoded pick;
1603 struct btree_node_read_all *ra;
1606 ra = kzalloc(sizeof(*ra), GFP_NOFS);
1608 return -BCH_ERR_ENOMEM_btree_node_read_all_replicas;
1610 closure_init(&ra->cl, NULL);
1613 ra->nr = bch2_bkey_nr_ptrs(k);
1615 for (i = 0; i < ra->nr; i++) {
1616 ra->buf[i] = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
1617 ra->bio[i] = bio_alloc_bioset(NULL,
1618 buf_pages(ra->buf[i], btree_buf_bytes(b)),
1619 REQ_OP_READ|REQ_SYNC|REQ_META,
1625 bkey_for_each_ptr_decode(k.k, ptrs, pick, entry) {
1626 struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
1627 struct btree_read_bio *rb =
1628 container_of(ra->bio[i], struct btree_read_bio, bio);
1632 rb->start_time = local_clock();
1633 rb->have_ioref = ca != NULL;
1636 rb->bio.bi_iter.bi_sector = pick.ptr.offset;
1637 rb->bio.bi_end_io = btree_node_read_all_replicas_endio;
1638 bch2_bio_map(&rb->bio, ra->buf[i], btree_buf_bytes(b));
1640 if (rb->have_ioref) {
1641 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
1642 bio_sectors(&rb->bio));
1643 bio_set_dev(&rb->bio, ca->disk_sb.bdev);
1645 closure_get(&ra->cl);
1646 submit_bio(&rb->bio);
1648 ra->err[i] = BLK_STS_REMOVED;
1655 closure_sync(&ra->cl);
1656 btree_node_read_all_replicas_done(&ra->cl.work);
1658 continue_at(&ra->cl, btree_node_read_all_replicas_done,
1659 c->btree_read_complete_wq);
1665 void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
1668 struct bch_fs *c = trans->c;
1669 struct extent_ptr_decoded pick;
1670 struct btree_read_bio *rb;
1675 trace_and_count(c, btree_node_read, trans, b);
1677 if (bch2_verify_all_btree_replicas &&
1678 !btree_node_read_all_replicas(c, b, sync))
1681 ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key),
1685 struct printbuf buf = PRINTBUF;
1687 prt_str(&buf, "btree node read error: no device to read from\n at ");
1688 bch2_btree_pos_to_text(&buf, c, b);
1689 bch_err_ratelimited(c, "%s", buf.buf);
1691 if (c->recovery_passes_explicit & BIT_ULL(BCH_RECOVERY_PASS_check_topology) &&
1692 c->curr_recovery_pass > BCH_RECOVERY_PASS_check_topology)
1693 bch2_fatal_error(c);
1695 set_btree_node_read_error(b);
1696 bch2_btree_lost_data(c, b->c.btree_id);
1697 clear_btree_node_read_in_flight(b);
1698 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1699 printbuf_exit(&buf);
1703 ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
1705 bio = bio_alloc_bioset(NULL,
1706 buf_pages(b->data, btree_buf_bytes(b)),
1707 REQ_OP_READ|REQ_SYNC|REQ_META,
1710 rb = container_of(bio, struct btree_read_bio, bio);
1714 rb->start_time = local_clock();
1715 rb->have_ioref = ca != NULL;
1717 INIT_WORK(&rb->work, btree_node_read_work);
1718 bio->bi_iter.bi_sector = pick.ptr.offset;
1719 bio->bi_end_io = btree_node_read_endio;
1720 bch2_bio_map(bio, b->data, btree_buf_bytes(b));
1722 if (rb->have_ioref) {
1723 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
1725 bio_set_dev(bio, ca->disk_sb.bdev);
1728 submit_bio_wait(bio);
1729 bch2_latency_acct(ca, rb->start_time, READ);
1730 btree_node_read_work(&rb->work);
1735 bio->bi_status = BLK_STS_REMOVED;
1738 btree_node_read_work(&rb->work);
1740 queue_work(c->btree_read_complete_wq, &rb->work);
1744 static int __bch2_btree_root_read(struct btree_trans *trans, enum btree_id id,
1745 const struct bkey_i *k, unsigned level)
1747 struct bch_fs *c = trans->c;
1752 closure_init_stack(&cl);
1755 ret = bch2_btree_cache_cannibalize_lock(trans, &cl);
1759 b = bch2_btree_node_mem_alloc(trans, level != 0);
1760 bch2_btree_cache_cannibalize_unlock(trans);
1764 bkey_copy(&b->key, k);
1765 BUG_ON(bch2_btree_node_hash_insert(&c->btree_cache, b, level, id));
1767 set_btree_node_read_in_flight(b);
1769 bch2_btree_node_read(trans, b, true);
1771 if (btree_node_read_error(b)) {
1772 bch2_btree_node_hash_remove(&c->btree_cache, b);
1774 mutex_lock(&c->btree_cache.lock);
1775 list_move(&b->list, &c->btree_cache.freeable);
1776 mutex_unlock(&c->btree_cache.lock);
1778 ret = -BCH_ERR_btree_node_read_error;
1782 bch2_btree_set_root_for_read(c, b);
1784 six_unlock_write(&b->c.lock);
1785 six_unlock_intent(&b->c.lock);
1790 int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
1791 const struct bkey_i *k, unsigned level)
1793 return bch2_trans_run(c, __bch2_btree_root_read(trans, id, k, level));
1796 static void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
1797 struct btree_write *w)
1799 unsigned long old, new, v = READ_ONCE(b->will_make_reachable);
1807 } while ((v = cmpxchg(&b->will_make_reachable, old, new)) != old);
1810 closure_put(&((struct btree_update *) new)->cl);
1812 bch2_journal_pin_drop(&c->journal, &w->journal);
1815 static void __btree_node_write_done(struct bch_fs *c, struct btree *b)
1817 struct btree_write *w = btree_prev_write(b);
1818 unsigned long old, new, v;
1821 bch2_btree_complete_write(c, b, w);
1823 v = READ_ONCE(b->flags);
1827 if ((old & (1U << BTREE_NODE_dirty)) &&
1828 (old & (1U << BTREE_NODE_need_write)) &&
1829 !(old & (1U << BTREE_NODE_never_write)) &&
1830 !(old & (1U << BTREE_NODE_write_blocked)) &&
1831 !(old & (1U << BTREE_NODE_will_make_reachable))) {
1832 new &= ~(1U << BTREE_NODE_dirty);
1833 new &= ~(1U << BTREE_NODE_need_write);
1834 new |= (1U << BTREE_NODE_write_in_flight);
1835 new |= (1U << BTREE_NODE_write_in_flight_inner);
1836 new |= (1U << BTREE_NODE_just_written);
1837 new ^= (1U << BTREE_NODE_write_idx);
1839 type = new & BTREE_WRITE_TYPE_MASK;
1840 new &= ~BTREE_WRITE_TYPE_MASK;
1842 new &= ~(1U << BTREE_NODE_write_in_flight);
1843 new &= ~(1U << BTREE_NODE_write_in_flight_inner);
1845 } while ((v = cmpxchg(&b->flags, old, new)) != old);
1847 if (new & (1U << BTREE_NODE_write_in_flight))
1848 __bch2_btree_node_write(c, b, BTREE_WRITE_ALREADY_STARTED|type);
1850 wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
1853 static void btree_node_write_done(struct bch_fs *c, struct btree *b)
1855 struct btree_trans *trans = bch2_trans_get(c);
1857 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
1858 __btree_node_write_done(c, b);
1859 six_unlock_read(&b->c.lock);
1861 bch2_trans_put(trans);
1864 static void btree_node_write_work(struct work_struct *work)
1866 struct btree_write_bio *wbio =
1867 container_of(work, struct btree_write_bio, work);
1868 struct bch_fs *c = wbio->wbio.c;
1869 struct btree *b = wbio->wbio.bio.bi_private;
1872 btree_bounce_free(c,
1874 wbio->wbio.used_mempool,
1877 bch2_bkey_drop_ptrs(bkey_i_to_s(&wbio->key), ptr,
1878 bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev));
1880 if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(&wbio->key))) {
1881 ret = -BCH_ERR_btree_node_write_all_failed;
1885 if (wbio->wbio.first_btree_write) {
1886 if (wbio->wbio.failed.nr) {
1890 ret = bch2_trans_do(c, NULL, NULL, 0,
1891 bch2_btree_node_update_key_get_iter(trans, b, &wbio->key,
1892 BCH_WATERMARK_interior_updates|
1893 BCH_TRANS_COMMIT_journal_reclaim|
1894 BCH_TRANS_COMMIT_no_enospc|
1895 BCH_TRANS_COMMIT_no_check_rw,
1896 !wbio->wbio.failed.nr));
1901 bio_put(&wbio->wbio.bio);
1902 btree_node_write_done(c, b);
1905 set_btree_node_noevict(b);
1906 bch2_fs_fatal_err_on(!bch2_err_matches(ret, EROFS), c,
1907 "writing btree node: %s", bch2_err_str(ret));
1911 static void btree_node_write_endio(struct bio *bio)
1913 struct bch_write_bio *wbio = to_wbio(bio);
1914 struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
1915 struct bch_write_bio *orig = parent ?: wbio;
1916 struct btree_write_bio *wb = container_of(orig, struct btree_write_bio, wbio);
1917 struct bch_fs *c = wbio->c;
1918 struct btree *b = wbio->bio.bi_private;
1919 struct bch_dev *ca = wbio->have_ioref ? bch2_dev_have_ref(c, wbio->dev) : NULL;
1920 unsigned long flags;
1922 if (wbio->have_ioref)
1923 bch2_latency_acct(ca, wbio->submit_time, WRITE);
1926 bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write,
1927 "btree write error: %s",
1928 bch2_blk_status_to_str(bio->bi_status)) ||
1929 bch2_meta_write_fault("btree")) {
1930 spin_lock_irqsave(&c->btree_write_error_lock, flags);
1931 bch2_dev_list_add_dev(&orig->failed, wbio->dev);
1932 spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
1935 if (wbio->have_ioref)
1936 percpu_ref_put(&ca->io_ref);
1940 bio_endio(&parent->bio);
1944 clear_btree_node_write_in_flight_inner(b);
1945 wake_up_bit(&b->flags, BTREE_NODE_write_in_flight_inner);
1946 INIT_WORK(&wb->work, btree_node_write_work);
1947 queue_work(c->btree_io_complete_wq, &wb->work);
1950 static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
1951 struct bset *i, unsigned sectors)
1953 struct printbuf buf = PRINTBUF;
1957 ret = bch2_bkey_invalid(c, bkey_i_to_s_c(&b->key),
1958 BKEY_TYPE_btree, WRITE, &buf);
1961 bch2_fs_inconsistent(c, "invalid btree node key before write: %s", buf.buf);
1962 printbuf_exit(&buf);
1966 ret = validate_bset_keys(c, b, i, WRITE, false, &saw_error) ?:
1967 validate_bset(c, NULL, b, i, b->written, sectors, WRITE, false, &saw_error);
1969 bch2_inconsistent_error(c);
1976 static void btree_write_submit(struct work_struct *work)
1978 struct btree_write_bio *wbio = container_of(work, struct btree_write_bio, work);
1979 BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
1981 bkey_copy(&tmp.k, &wbio->key);
1983 bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&tmp.k)), ptr)
1984 ptr->offset += wbio->sector_offset;
1986 bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree,
1990 void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
1992 struct btree_write_bio *wbio;
1994 struct btree_node *bn = NULL;
1995 struct btree_node_entry *bne = NULL;
1996 struct sort_iter_stack sort_iter;
1998 unsigned bytes_to_write, sectors_to_write, bytes, u64s;
2001 unsigned long old, new;
2002 bool validate_before_checksum = false;
2003 enum btree_write_type type = flags & BTREE_WRITE_TYPE_MASK;
2007 if (flags & BTREE_WRITE_ALREADY_STARTED)
2011 * We may only have a read lock on the btree node - the dirty bit is our
2012 * "lock" against racing with other threads that may be trying to start
2013 * a write, we do a write iff we clear the dirty bit. Since setting the
2014 * dirty bit requires a write lock, we can't race with other threads
2018 old = new = READ_ONCE(b->flags);
2020 if (!(old & (1 << BTREE_NODE_dirty)))
2023 if ((flags & BTREE_WRITE_ONLY_IF_NEED) &&
2024 !(old & (1 << BTREE_NODE_need_write)))
2028 ((1 << BTREE_NODE_never_write)|
2029 (1 << BTREE_NODE_write_blocked)))
2033 (old & (1 << BTREE_NODE_will_make_reachable)))
2036 if (old & (1 << BTREE_NODE_write_in_flight))
2039 if (flags & BTREE_WRITE_ONLY_IF_NEED)
2040 type = new & BTREE_WRITE_TYPE_MASK;
2041 new &= ~BTREE_WRITE_TYPE_MASK;
2043 new &= ~(1 << BTREE_NODE_dirty);
2044 new &= ~(1 << BTREE_NODE_need_write);
2045 new |= (1 << BTREE_NODE_write_in_flight);
2046 new |= (1 << BTREE_NODE_write_in_flight_inner);
2047 new |= (1 << BTREE_NODE_just_written);
2048 new ^= (1 << BTREE_NODE_write_idx);
2049 } while (cmpxchg_acquire(&b->flags, old, new) != old);
2051 if (new & (1U << BTREE_NODE_need_write))
2054 BUG_ON((type == BTREE_WRITE_initial) != (b->written == 0));
2056 atomic_dec(&c->btree_cache.dirty);
2058 BUG_ON(btree_node_fake(b));
2059 BUG_ON((b->will_make_reachable != 0) != !b->written);
2061 BUG_ON(b->written >= btree_sectors(c));
2062 BUG_ON(b->written & (block_sectors(c) - 1));
2063 BUG_ON(bset_written(b, btree_bset_last(b)));
2064 BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c));
2065 BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format)));
2067 bch2_sort_whiteouts(c, b);
2069 sort_iter_stack_init(&sort_iter, b);
2072 ? sizeof(struct btree_node)
2073 : sizeof(struct btree_node_entry);
2075 bytes += b->whiteout_u64s * sizeof(u64);
2077 for_each_bset(b, t) {
2080 if (bset_written(b, i))
2083 bytes += le16_to_cpu(i->u64s) * sizeof(u64);
2084 sort_iter_add(&sort_iter.iter,
2085 btree_bkey_first(b, t),
2086 btree_bkey_last(b, t));
2087 seq = max(seq, le64_to_cpu(i->journal_seq));
2090 BUG_ON(b->written && !seq);
2092 /* bch2_varint_decode may read up to 7 bytes past the end of the buffer: */
2095 /* buffer must be a multiple of the block size */
2096 bytes = round_up(bytes, block_bytes(c));
2098 data = btree_bounce_alloc(c, bytes, &used_mempool);
2106 bne->keys = b->data->keys;
2110 i->journal_seq = cpu_to_le64(seq);
2113 sort_iter_add(&sort_iter.iter,
2114 unwritten_whiteouts_start(b),
2115 unwritten_whiteouts_end(b));
2116 SET_BSET_SEPARATE_WHITEOUTS(i, false);
2118 u64s = bch2_sort_keys_keep_unwritten_whiteouts(i->start, &sort_iter.iter);
2119 le16_add_cpu(&i->u64s, u64s);
2121 b->whiteout_u64s = 0;
2123 BUG_ON(!b->written && i->u64s != b->data->keys.u64s);
2125 set_needs_whiteout(i, false);
2127 /* do we have data to write? */
2128 if (b->written && !i->u64s)
2131 bytes_to_write = vstruct_end(i) - data;
2132 sectors_to_write = round_up(bytes_to_write, block_bytes(c)) >> 9;
2135 b->key.k.type == KEY_TYPE_btree_ptr_v2)
2136 BUG_ON(btree_ptr_sectors_written(&b->key) != sectors_to_write);
2138 memset(data + bytes_to_write, 0,
2139 (sectors_to_write << 9) - bytes_to_write);
2141 BUG_ON(b->written + sectors_to_write > btree_sectors(c));
2142 BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN);
2143 BUG_ON(i->seq != b->data->keys.seq);
2145 i->version = cpu_to_le16(c->sb.version);
2146 SET_BSET_OFFSET(i, b->written);
2147 SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c));
2149 if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)))
2150 validate_before_checksum = true;
2152 /* validate_bset will be modifying: */
2153 if (le16_to_cpu(i->version) < bcachefs_metadata_version_current)
2154 validate_before_checksum = true;
2156 /* if we're going to be encrypting, check metadata validity first: */
2157 if (validate_before_checksum &&
2158 validate_bset_for_write(c, b, i, sectors_to_write))
2161 ret = bset_encrypt(c, i, b->written << 9);
2162 if (bch2_fs_fatal_err_on(ret, c,
2163 "encrypting btree node: %s", bch2_err_str(ret)))
2166 nonce = btree_nonce(i, b->written << 9);
2169 bn->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bn);
2171 bne->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
2173 /* if we're not encrypting, check metadata after checksumming: */
2174 if (!validate_before_checksum &&
2175 validate_bset_for_write(c, b, i, sectors_to_write))
2179 * We handle btree write errors by immediately halting the journal -
2180 * after we've done that, we can't issue any subsequent btree writes
2181 * because they might have pointers to new nodes that failed to write.
2183 * Furthermore, there's no point in doing any more btree writes because
2184 * with the journal stopped, we're never going to update the journal to
2185 * reflect that those writes were done and the data flushed from the
2188 * Also on journal error, the pending write may have updates that were
2189 * never journalled (interior nodes, see btree_update_nodes_written()) -
2190 * it's critical that we don't do the write in that case otherwise we
2191 * will have updates visible that weren't in the journal:
2193 * Make sure to update b->written so bch2_btree_init_next() doesn't
2196 if (bch2_journal_error(&c->journal) ||
2200 trace_and_count(c, btree_node_write, b, bytes_to_write, sectors_to_write);
2202 wbio = container_of(bio_alloc_bioset(NULL,
2203 buf_pages(data, sectors_to_write << 9),
2204 REQ_OP_WRITE|REQ_META,
2207 struct btree_write_bio, wbio.bio);
2208 wbio_init(&wbio->wbio.bio);
2210 wbio->data_bytes = bytes;
2211 wbio->sector_offset = b->written;
2213 wbio->wbio.used_mempool = used_mempool;
2214 wbio->wbio.first_btree_write = !b->written;
2215 wbio->wbio.bio.bi_end_io = btree_node_write_endio;
2216 wbio->wbio.bio.bi_private = b;
2218 bch2_bio_map(&wbio->wbio.bio, data, sectors_to_write << 9);
2220 bkey_copy(&wbio->key, &b->key);
2222 b->written += sectors_to_write;
2224 if (wbio->key.k.type == KEY_TYPE_btree_ptr_v2)
2225 bkey_i_to_btree_ptr_v2(&wbio->key)->v.sectors_written =
2226 cpu_to_le16(b->written);
2228 atomic64_inc(&c->btree_write_stats[type].nr);
2229 atomic64_add(bytes_to_write, &c->btree_write_stats[type].bytes);
2231 INIT_WORK(&wbio->work, btree_write_submit);
2232 queue_work(c->btree_write_submit_wq, &wbio->work);
2235 set_btree_node_noevict(b);
2236 b->written += sectors_to_write;
2238 btree_bounce_free(c, bytes, used_mempool, data);
2239 __btree_node_write_done(c, b);
2243 * Work that must be done with write lock held:
2245 bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
2247 bool invalidated_iter = false;
2248 struct btree_node_entry *bne;
2250 if (!btree_node_just_written(b))
2253 BUG_ON(b->whiteout_u64s);
2255 clear_btree_node_just_written(b);
2258 * Note: immediately after write, bset_written() doesn't work - the
2259 * amount of data we had to write after compaction might have been
2260 * smaller than the offset of the last bset.
2262 * However, we know that all bsets have been written here, as long as
2263 * we're still holding the write lock:
2267 * XXX: decide if we really want to unconditionally sort down to a
2271 btree_node_sort(c, b, 0, b->nsets);
2272 invalidated_iter = true;
2274 invalidated_iter = bch2_drop_whiteouts(b, COMPACT_ALL);
2278 set_needs_whiteout(bset(b, t), true);
2280 bch2_btree_verify(c, b);
2283 * If later we don't unconditionally sort down to a single bset, we have
2284 * to ensure this is still true:
2286 BUG_ON((void *) btree_bkey_last(b, bset_tree_last(b)) > write_block(b));
2288 bne = want_new_bset(c, b);
2290 bch2_bset_init_next(b, bne);
2292 bch2_btree_build_aux_trees(b);
2294 return invalidated_iter;
2298 * Use this one if the node is intent locked:
2300 void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
2301 enum six_lock_type lock_type_held,
2304 if (lock_type_held == SIX_LOCK_intent ||
2305 (lock_type_held == SIX_LOCK_read &&
2306 six_lock_tryupgrade(&b->c.lock))) {
2307 __bch2_btree_node_write(c, b, flags);
2309 /* don't cycle lock unnecessarily: */
2310 if (btree_node_just_written(b) &&
2311 six_trylock_write(&b->c.lock)) {
2312 bch2_btree_post_write_cleanup(c, b);
2313 six_unlock_write(&b->c.lock);
2316 if (lock_type_held == SIX_LOCK_read)
2317 six_lock_downgrade(&b->c.lock);
2319 __bch2_btree_node_write(c, b, flags);
2320 if (lock_type_held == SIX_LOCK_write &&
2321 btree_node_just_written(b))
2322 bch2_btree_post_write_cleanup(c, b);
2326 static bool __bch2_btree_flush_all(struct bch_fs *c, unsigned flag)
2328 struct bucket_table *tbl;
2329 struct rhash_head *pos;
2335 for_each_cached_btree(b, c, tbl, i, pos)
2336 if (test_bit(flag, &b->flags)) {
2338 wait_on_bit_io(&b->flags, flag, TASK_UNINTERRUPTIBLE);
2347 bool bch2_btree_flush_all_reads(struct bch_fs *c)
2349 return __bch2_btree_flush_all(c, BTREE_NODE_read_in_flight);
2352 bool bch2_btree_flush_all_writes(struct bch_fs *c)
2354 return __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight);
2357 static const char * const bch2_btree_write_types[] = {
2358 #define x(t, n) [n] = #t,
2359 BCH_BTREE_WRITE_TYPES()
2363 void bch2_btree_write_stats_to_text(struct printbuf *out, struct bch_fs *c)
2365 printbuf_tabstop_push(out, 20);
2366 printbuf_tabstop_push(out, 10);
2368 prt_printf(out, "\tnr\tsize\n");
2370 for (unsigned i = 0; i < BTREE_WRITE_TYPE_NR; i++) {
2371 u64 nr = atomic64_read(&c->btree_write_stats[i].nr);
2372 u64 bytes = atomic64_read(&c->btree_write_stats[i].bytes);
2374 prt_printf(out, "%s:\t%llu\t", bch2_btree_write_types[i], nr);
2375 prt_human_readable_u64(out, nr ? div64_u64(bytes, nr) : 0);