1 // SPDX-License-Identifier: GPL-2.0
6 #include "btree_cache.h"
7 #include "btree_journal_iter.h"
8 #include "journal_io.h"
10 #include <linux/sort.h>
13 * For managing keys we read from the journal: until journal replay works normal
14 * btree lookups need to be able to find and return keys from the journal where
15 * they overwrite what's in the btree, so we have a special iterator and
16 * operations for the regular btree iter code to use:
19 static inline size_t pos_to_idx(struct journal_keys *keys, size_t pos)
21 size_t gap_size = keys->size - keys->nr;
23 BUG_ON(pos >= keys->gap && pos < keys->gap + gap_size);
30 static inline size_t idx_to_pos(struct journal_keys *keys, size_t idx)
32 size_t gap_size = keys->size - keys->nr;
39 static inline struct journal_key *idx_to_key(struct journal_keys *keys, size_t idx)
41 return keys->data + idx_to_pos(keys, idx);
44 static size_t __bch2_journal_key_search(struct journal_keys *keys,
45 enum btree_id id, unsigned level,
48 size_t l = 0, r = keys->nr, m;
51 m = l + ((r - l) >> 1);
52 if (__journal_key_cmp(id, level, pos, idx_to_key(keys, m)) > 0)
58 BUG_ON(l < keys->nr &&
59 __journal_key_cmp(id, level, pos, idx_to_key(keys, l)) > 0);
62 __journal_key_cmp(id, level, pos, idx_to_key(keys, l - 1)) <= 0);
67 static size_t bch2_journal_key_search(struct journal_keys *keys,
68 enum btree_id id, unsigned level,
71 return idx_to_pos(keys, __bch2_journal_key_search(keys, id, level, pos));
74 /* Returns first non-overwritten key >= search key: */
75 struct bkey_i *bch2_journal_keys_peek_max(struct bch_fs *c, enum btree_id btree_id,
76 unsigned level, struct bpos pos,
77 struct bpos end_pos, size_t *idx)
79 struct journal_keys *keys = &c->journal_keys;
81 struct journal_key *k;
83 BUG_ON(*idx > keys->nr);
86 *idx = __bch2_journal_key_search(keys, btree_id, level, pos);
89 __journal_key_cmp(btree_id, level, end_pos, idx_to_key(keys, *idx - 1)) <= 0) {
98 struct bkey_i *ret = NULL;
99 rcu_read_lock(); /* for overwritten_ranges */
101 while ((k = *idx < keys->nr ? idx_to_key(keys, *idx) : NULL)) {
102 if (__journal_key_cmp(btree_id, level, end_pos, k) < 0)
105 if (k->overwritten) {
106 if (k->overwritten_range)
107 *idx = rcu_dereference(k->overwritten_range)->end;
113 if (__journal_key_cmp(btree_id, level, pos, k) <= 0) {
131 struct bkey_i *bch2_journal_keys_peek_prev_min(struct bch_fs *c, enum btree_id btree_id,
132 unsigned level, struct bpos pos,
133 struct bpos end_pos, size_t *idx)
135 struct journal_keys *keys = &c->journal_keys;
137 struct journal_key *k;
139 BUG_ON(*idx > keys->nr);
142 *idx = __bch2_journal_key_search(keys, btree_id, level, pos);
145 __journal_key_cmp(btree_id, level, end_pos, idx_to_key(keys, *idx - 1)) <= 0) {
154 struct bkey_i *ret = NULL;
155 rcu_read_lock(); /* for overwritten_ranges */
157 while ((k = *idx < keys->nr ? idx_to_key(keys, *idx) : NULL)) {
158 if (__journal_key_cmp(btree_id, level, end_pos, k) > 0)
161 if (k->overwritten) {
162 if (k->overwritten_range)
163 *idx = rcu_dereference(k->overwritten_range)->start - 1;
169 if (__journal_key_cmp(btree_id, level, pos, k) >= 0) {
186 struct bkey_i *bch2_journal_keys_peek_slot(struct bch_fs *c, enum btree_id btree_id,
187 unsigned level, struct bpos pos)
191 return bch2_journal_keys_peek_max(c, btree_id, level, pos, pos, &idx);
194 static void journal_iter_verify(struct journal_iter *iter)
196 #ifdef CONFIG_BCACHEFS_DEBUG
197 struct journal_keys *keys = iter->keys;
198 size_t gap_size = keys->size - keys->nr;
200 BUG_ON(iter->idx >= keys->gap &&
201 iter->idx < keys->gap + gap_size);
203 if (iter->idx < keys->size) {
204 struct journal_key *k = keys->data + iter->idx;
206 int cmp = __journal_key_btree_cmp(iter->btree_id, iter->level, k);
212 static void journal_iters_fix(struct bch_fs *c)
214 struct journal_keys *keys = &c->journal_keys;
215 /* The key we just inserted is immediately before the gap: */
216 size_t gap_end = keys->gap + (keys->size - keys->nr);
217 struct journal_key *new_key = &keys->data[keys->gap - 1];
218 struct journal_iter *iter;
221 * If an iterator points one after the key we just inserted, decrement
222 * the iterator so it points at the key we just inserted - if the
223 * decrement was unnecessary, bch2_btree_and_journal_iter_peek() will
226 list_for_each_entry(iter, &c->journal_iters, list) {
227 journal_iter_verify(iter);
228 if (iter->idx == gap_end &&
229 new_key->btree_id == iter->btree_id &&
230 new_key->level == iter->level)
231 iter->idx = keys->gap - 1;
232 journal_iter_verify(iter);
236 static void journal_iters_move_gap(struct bch_fs *c, size_t old_gap, size_t new_gap)
238 struct journal_keys *keys = &c->journal_keys;
239 struct journal_iter *iter;
240 size_t gap_size = keys->size - keys->nr;
242 list_for_each_entry(iter, &c->journal_iters, list) {
243 if (iter->idx > old_gap)
244 iter->idx -= gap_size;
245 if (iter->idx >= new_gap)
246 iter->idx += gap_size;
250 int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
251 unsigned level, struct bkey_i *k)
253 struct journal_key n = {
259 * Ensure these keys are done last by journal replay, to unblock
262 .journal_seq = U64_MAX,
264 struct journal_keys *keys = &c->journal_keys;
265 size_t idx = bch2_journal_key_search(keys, id, level, k->k.p);
267 BUG_ON(test_bit(BCH_FS_rw, &c->flags));
269 if (idx < keys->size &&
270 journal_key_cmp(&n, &keys->data[idx]) == 0) {
271 if (keys->data[idx].allocated)
272 kfree(keys->data[idx].k);
278 idx -= keys->size - keys->nr;
280 size_t old_gap = keys->gap;
282 if (keys->nr == keys->size) {
283 journal_iters_move_gap(c, old_gap, keys->size);
284 old_gap = keys->size;
286 struct journal_keys new_keys = {
288 .size = max_t(size_t, keys->size, 8) * 2,
291 new_keys.data = kvmalloc_array(new_keys.size, sizeof(new_keys.data[0]), GFP_KERNEL);
292 if (!new_keys.data) {
293 bch_err(c, "%s: error allocating new key array (size %zu)",
294 __func__, new_keys.size);
295 return -BCH_ERR_ENOMEM_journal_key_insert;
298 /* Since @keys was full, there was no gap: */
299 memcpy(new_keys.data, keys->data, sizeof(keys->data[0]) * keys->nr);
301 keys->data = new_keys.data;
302 keys->nr = new_keys.nr;
303 keys->size = new_keys.size;
305 /* And now the gap is at the end: */
306 keys->gap = keys->nr;
309 journal_iters_move_gap(c, old_gap, idx);
314 keys->data[keys->gap++] = n;
316 journal_iters_fix(c);
322 * Can only be used from the recovery thread while we're still RO - can't be
323 * used once we've got RW, as journal_keys is at that point used by multiple
326 int bch2_journal_key_insert(struct bch_fs *c, enum btree_id id,
327 unsigned level, struct bkey_i *k)
332 n = kmalloc(bkey_bytes(&k->k), GFP_KERNEL);
334 return -BCH_ERR_ENOMEM_journal_key_insert;
337 ret = bch2_journal_key_insert_take(c, id, level, n);
343 int bch2_journal_key_delete(struct bch_fs *c, enum btree_id id,
344 unsigned level, struct bpos pos)
346 struct bkey_i whiteout;
348 bkey_init(&whiteout.k);
351 return bch2_journal_key_insert(c, id, level, &whiteout);
354 bool bch2_key_deleted_in_journal(struct btree_trans *trans, enum btree_id btree,
355 unsigned level, struct bpos pos)
357 struct journal_keys *keys = &trans->c->journal_keys;
358 size_t idx = bch2_journal_key_search(keys, btree, level, pos);
360 if (!trans->journal_replay_not_finished)
363 return (idx < keys->size &&
364 keys->data[idx].btree_id == btree &&
365 keys->data[idx].level == level &&
366 bpos_eq(keys->data[idx].k->k.p, pos) &&
367 bkey_deleted(&keys->data[idx].k->k));
370 static void __bch2_journal_key_overwritten(struct journal_keys *keys, size_t pos)
372 struct journal_key *k = keys->data + pos;
373 size_t idx = pos_to_idx(keys, pos);
375 k->overwritten = true;
377 struct journal_key *prev = idx > 0 ? keys->data + idx_to_pos(keys, idx - 1) : NULL;
378 struct journal_key *next = idx + 1 < keys->nr ? keys->data + idx_to_pos(keys, idx + 1) : NULL;
380 bool prev_overwritten = prev && prev->overwritten;
381 bool next_overwritten = next && next->overwritten;
383 struct journal_key_range_overwritten *prev_range =
384 prev_overwritten ? prev->overwritten_range : NULL;
385 struct journal_key_range_overwritten *next_range =
386 next_overwritten ? next->overwritten_range : NULL;
388 BUG_ON(prev_range && prev_range->end != idx);
389 BUG_ON(next_range && next_range->start != idx + 1);
391 if (prev_range && next_range) {
392 prev_range->end = next_range->end;
394 keys->data[pos].overwritten_range = prev_range;
395 for (size_t i = next_range->start; i < next_range->end; i++) {
396 struct journal_key *ip = keys->data + idx_to_pos(keys, i);
397 BUG_ON(ip->overwritten_range != next_range);
398 ip->overwritten_range = prev_range;
401 kfree_rcu_mightsleep(next_range);
402 } else if (prev_range) {
404 k->overwritten_range = prev_range;
405 if (next_overwritten) {
407 next->overwritten_range = prev_range;
409 } else if (next_range) {
411 k->overwritten_range = next_range;
412 if (prev_overwritten) {
414 prev->overwritten_range = next_range;
416 } else if (prev_overwritten || next_overwritten) {
417 struct journal_key_range_overwritten *r = kmalloc(sizeof(*r), GFP_KERNEL);
421 r->start = idx - (size_t) prev_overwritten;
422 r->end = idx + 1 + (size_t) next_overwritten;
424 rcu_assign_pointer(k->overwritten_range, r);
425 if (prev_overwritten)
426 prev->overwritten_range = r;
427 if (next_overwritten)
428 next->overwritten_range = r;
432 void bch2_journal_key_overwritten(struct bch_fs *c, enum btree_id btree,
433 unsigned level, struct bpos pos)
435 struct journal_keys *keys = &c->journal_keys;
436 size_t idx = bch2_journal_key_search(keys, btree, level, pos);
438 if (idx < keys->size &&
439 keys->data[idx].btree_id == btree &&
440 keys->data[idx].level == level &&
441 bpos_eq(keys->data[idx].k->k.p, pos) &&
442 !keys->data[idx].overwritten) {
443 mutex_lock(&keys->overwrite_lock);
444 __bch2_journal_key_overwritten(keys, idx);
445 mutex_unlock(&keys->overwrite_lock);
449 static void bch2_journal_iter_advance(struct journal_iter *iter)
451 if (iter->idx < iter->keys->size) {
453 if (iter->idx == iter->keys->gap)
454 iter->idx += iter->keys->size - iter->keys->nr;
458 static struct bkey_s_c bch2_journal_iter_peek(struct journal_iter *iter)
460 struct bkey_s_c ret = bkey_s_c_null;
462 journal_iter_verify(iter);
465 while (iter->idx < iter->keys->size) {
466 struct journal_key *k = iter->keys->data + iter->idx;
468 int cmp = __journal_key_btree_cmp(iter->btree_id, iter->level, k);
473 if (!k->overwritten) {
474 ret = bkey_i_to_s_c(k->k);
478 if (k->overwritten_range)
479 iter->idx = idx_to_pos(iter->keys, rcu_dereference(k->overwritten_range)->end);
481 bch2_journal_iter_advance(iter);
488 static void bch2_journal_iter_exit(struct journal_iter *iter)
490 list_del(&iter->list);
493 static void bch2_journal_iter_init(struct bch_fs *c,
494 struct journal_iter *iter,
495 enum btree_id id, unsigned level,
500 iter->keys = &c->journal_keys;
501 iter->idx = bch2_journal_key_search(&c->journal_keys, id, level, pos);
503 journal_iter_verify(iter);
506 static struct bkey_s_c bch2_journal_iter_peek_btree(struct btree_and_journal_iter *iter)
508 return bch2_btree_node_iter_peek_unpack(&iter->node_iter,
509 iter->b, &iter->unpacked);
512 static void bch2_journal_iter_advance_btree(struct btree_and_journal_iter *iter)
514 bch2_btree_node_iter_advance(&iter->node_iter, iter->b);
517 void bch2_btree_and_journal_iter_advance(struct btree_and_journal_iter *iter)
519 if (bpos_eq(iter->pos, SPOS_MAX))
522 iter->pos = bpos_successor(iter->pos);
525 static void btree_and_journal_iter_prefetch(struct btree_and_journal_iter *_iter)
527 struct btree_and_journal_iter iter = *_iter;
528 struct bch_fs *c = iter.trans->c;
529 unsigned level = iter.journal.level;
531 unsigned nr = test_bit(BCH_FS_started, &c->flags)
532 ? (level > 1 ? 0 : 2)
533 : (level > 1 ? 1 : 16);
535 iter.prefetch = false;
536 iter.fail_if_too_many_whiteouts = true;
537 bch2_bkey_buf_init(&tmp);
540 bch2_btree_and_journal_iter_advance(&iter);
541 struct bkey_s_c k = bch2_btree_and_journal_iter_peek(&iter);
545 bch2_bkey_buf_reassemble(&tmp, c, k);
546 bch2_btree_node_prefetch(iter.trans, NULL, tmp.k, iter.journal.btree_id, level - 1);
549 bch2_bkey_buf_exit(&tmp, c);
552 struct bkey_s_c bch2_btree_and_journal_iter_peek(struct btree_and_journal_iter *iter)
554 struct bkey_s_c btree_k, journal_k = bkey_s_c_null, ret;
557 if (iter->prefetch && iter->journal.level)
558 btree_and_journal_iter_prefetch(iter);
561 return bkey_s_c_null;
565 if (iters > 20 && iter->fail_if_too_many_whiteouts)
566 return bkey_s_c_null;
568 while ((btree_k = bch2_journal_iter_peek_btree(iter)).k &&
569 bpos_lt(btree_k.k->p, iter->pos))
570 bch2_journal_iter_advance_btree(iter);
572 if (iter->trans->journal_replay_not_finished)
573 while ((journal_k = bch2_journal_iter_peek(&iter->journal)).k &&
574 bpos_lt(journal_k.k->p, iter->pos))
575 bch2_journal_iter_advance(&iter->journal);
578 (!btree_k.k || bpos_le(journal_k.k->p, btree_k.k->p))
582 if (ret.k && iter->b && bpos_gt(ret.k->p, iter->b->data->max_key))
586 iter->pos = ret.k->p;
587 if (bkey_deleted(ret.k)) {
588 bch2_btree_and_journal_iter_advance(iter);
592 iter->pos = SPOS_MAX;
599 void bch2_btree_and_journal_iter_exit(struct btree_and_journal_iter *iter)
601 bch2_journal_iter_exit(&iter->journal);
604 void __bch2_btree_and_journal_iter_init_node_iter(struct btree_trans *trans,
605 struct btree_and_journal_iter *iter,
607 struct btree_node_iter node_iter,
610 memset(iter, 0, sizeof(*iter));
614 iter->node_iter = node_iter;
615 iter->pos = b->data->min_key;
616 iter->at_end = false;
617 INIT_LIST_HEAD(&iter->journal.list);
619 if (trans->journal_replay_not_finished) {
620 bch2_journal_iter_init(trans->c, &iter->journal, b->c.btree_id, b->c.level, pos);
621 if (!test_bit(BCH_FS_may_go_rw, &trans->c->flags))
622 list_add(&iter->journal.list, &trans->c->journal_iters);
627 * this version is used by btree_gc before filesystem has gone RW and
628 * multithreaded, so uses the journal_iters list:
630 void bch2_btree_and_journal_iter_init_node_iter(struct btree_trans *trans,
631 struct btree_and_journal_iter *iter,
634 struct btree_node_iter node_iter;
636 bch2_btree_node_iter_init_from_start(&node_iter, b);
637 __bch2_btree_and_journal_iter_init_node_iter(trans, iter, b, node_iter, b->data->min_key);
640 /* sort and dedup all keys in the journal: */
643 * When keys compare equal, oldest compares first:
645 static int journal_sort_key_cmp(const void *_l, const void *_r)
647 const struct journal_key *l = _l;
648 const struct journal_key *r = _r;
650 return journal_key_cmp(l, r) ?:
651 cmp_int(l->journal_seq, r->journal_seq) ?:
652 cmp_int(l->journal_offset, r->journal_offset);
655 void bch2_journal_keys_put(struct bch_fs *c)
657 struct journal_keys *keys = &c->journal_keys;
659 BUG_ON(atomic_read(&keys->ref) <= 0);
661 if (!atomic_dec_and_test(&keys->ref))
664 move_gap(keys, keys->nr);
666 darray_for_each(*keys, i) {
667 if (i->overwritten_range &&
668 (i == &darray_last(*keys) ||
669 i->overwritten_range != i[1].overwritten_range))
670 kfree(i->overwritten_range);
678 keys->nr = keys->gap = keys->size = 0;
680 struct journal_replay **i;
681 struct genradix_iter iter;
683 genradix_for_each(&c->journal_entries, iter, i)
685 genradix_free(&c->journal_entries);
688 static void __journal_keys_sort(struct journal_keys *keys)
690 sort(keys->data, keys->nr, sizeof(keys->data[0]), journal_sort_key_cmp, NULL);
694 struct journal_key *dst = keys->data;
696 darray_for_each(*keys, src) {
698 * We don't accumulate accounting keys here because we have to
699 * compare each individual accounting key against the version in
700 * the btree during replay:
702 if (src->k->k.type != KEY_TYPE_accounting &&
703 src + 1 < &darray_top(*keys) &&
704 !journal_key_cmp(src, src + 1))
710 keys->nr = dst - keys->data;
713 int bch2_journal_keys_sort(struct bch_fs *c)
715 struct genradix_iter iter;
716 struct journal_replay *i, **_i;
717 struct journal_keys *keys = &c->journal_keys;
720 genradix_for_each(&c->journal_entries, iter, _i) {
723 if (journal_replay_ignore(i))
728 for_each_jset_key(k, entry, &i->j) {
729 struct journal_key n = (struct journal_key) {
730 .btree_id = entry->btree_id,
731 .level = entry->level,
733 .journal_seq = le64_to_cpu(i->j.seq),
734 .journal_offset = k->_data - i->j._data,
737 if (darray_push(keys, n)) {
738 __journal_keys_sort(keys);
740 if (keys->nr * 8 > keys->size * 7) {
741 bch_err(c, "Too many journal keys for slowpath; have %zu compacted, buf size %zu, processed %zu keys at seq %llu",
742 keys->nr, keys->size, nr_read, le64_to_cpu(i->j.seq));
743 return -BCH_ERR_ENOMEM_journal_keys_sort;
746 BUG_ON(darray_push(keys, n));
753 __journal_keys_sort(keys);
754 keys->gap = keys->nr;
756 bch_verbose(c, "Journal keys: %zu read, %zu after sorting and compacting", nr_read, keys->nr);
760 void bch2_shoot_down_journal_keys(struct bch_fs *c, enum btree_id btree,
761 unsigned level_min, unsigned level_max,
762 struct bpos start, struct bpos end)
764 struct journal_keys *keys = &c->journal_keys;
767 move_gap(keys, keys->nr);
769 darray_for_each(*keys, i)
770 if (!(i->btree_id == btree &&
771 i->level >= level_min &&
772 i->level <= level_max &&
773 bpos_ge(i->k->k.p, start) &&
774 bpos_le(i->k->k.p, end)))
775 keys->data[dst++] = *i;
776 keys->nr = keys->gap = dst;
779 void bch2_journal_keys_dump(struct bch_fs *c)
781 struct journal_keys *keys = &c->journal_keys;
782 struct printbuf buf = PRINTBUF;
784 pr_info("%zu keys:", keys->nr);
786 move_gap(keys, keys->nr);
788 darray_for_each(*keys, i) {
789 printbuf_reset(&buf);
790 prt_printf(&buf, "btree=");
791 bch2_btree_id_to_text(&buf, i->btree_id);
792 prt_printf(&buf, " l=%u ", i->level);
793 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(i->k));
794 pr_err("%s", buf.buf);
799 void bch2_fs_journal_keys_init(struct bch_fs *c)
801 struct journal_keys *keys = &c->journal_keys;
803 atomic_set(&keys->ref, 1);
804 keys->initial_ref_held = true;
805 mutex_init(&keys->overwrite_lock);