1 // SPDX-License-Identifier: GPL-2.0
4 #include "alloc_foreground.h"
6 #include "bkey_methods.h"
7 #include "btree_cache.h"
9 #include "btree_journal_iter.h"
10 #include "btree_update.h"
11 #include "btree_update_interior.h"
13 #include "btree_iter.h"
14 #include "btree_locking.h"
20 #include "journal_reclaim.h"
22 #include "recovery_passes.h"
24 #include "sb-members.h"
28 #include <linux/random.h>
30 static const char * const bch2_btree_update_modes[] = {
37 static int bch2_btree_insert_node(struct btree_update *, struct btree_trans *,
38 btree_path_idx_t, struct btree *, struct keylist *);
39 static void bch2_btree_update_add_new_node(struct btree_update *, struct btree *);
42 * Verify that child nodes correctly span parent node's range:
44 int bch2_btree_node_check_topology(struct btree_trans *trans, struct btree *b)
46 struct bch_fs *c = trans->c;
47 struct bpos node_min = b->key.k.type == KEY_TYPE_btree_ptr_v2
48 ? bkey_i_to_btree_ptr_v2(&b->key)->v.min_key
50 struct btree_and_journal_iter iter;
52 struct printbuf buf = PRINTBUF;
56 BUG_ON(b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
57 !bpos_eq(bkey_i_to_btree_ptr_v2(&b->key)->v.min_key,
60 if (b == btree_node_root(c, b)) {
61 if (!bpos_eq(b->data->min_key, POS_MIN)) {
63 bch2_bpos_to_text(&buf, b->data->min_key);
64 need_fsck_err(c, btree_root_bad_min_key,
65 "btree root with incorrect min_key: %s", buf.buf);
69 if (!bpos_eq(b->data->max_key, SPOS_MAX)) {
71 bch2_bpos_to_text(&buf, b->data->max_key);
72 need_fsck_err(c, btree_root_bad_max_key,
73 "btree root with incorrect max_key: %s", buf.buf);
81 bch2_bkey_buf_init(&prev);
82 bkey_init(&prev.k->k);
83 bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b);
85 while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
86 if (k.k->type != KEY_TYPE_btree_ptr_v2)
89 struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
91 struct bpos expected_min = bkey_deleted(&prev.k->k)
93 : bpos_successor(prev.k->k.p);
95 if (!bpos_eq(expected_min, bp.v->min_key)) {
96 bch2_topology_error(c);
99 prt_str(&buf, "end of prev node doesn't match start of next node\n"),
100 prt_printf(&buf, " in btree %s level %u node ",
101 bch2_btree_id_str(b->c.btree_id), b->c.level);
102 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
103 prt_str(&buf, "\n prev ");
104 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(prev.k));
105 prt_str(&buf, "\n next ");
106 bch2_bkey_val_to_text(&buf, c, k);
108 need_fsck_err(c, btree_node_topology_bad_min_key, "%s", buf.buf);
109 goto topology_repair;
112 bch2_bkey_buf_reassemble(&prev, c, k);
113 bch2_btree_and_journal_iter_advance(&iter);
116 if (bkey_deleted(&prev.k->k)) {
117 bch2_topology_error(c);
119 printbuf_reset(&buf);
120 prt_str(&buf, "empty interior node\n");
121 prt_printf(&buf, " in btree %s level %u node ",
122 bch2_btree_id_str(b->c.btree_id), b->c.level);
123 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
125 need_fsck_err(c, btree_node_topology_empty_interior_node, "%s", buf.buf);
126 goto topology_repair;
127 } else if (!bpos_eq(prev.k->k.p, b->key.k.p)) {
128 bch2_topology_error(c);
130 printbuf_reset(&buf);
131 prt_str(&buf, "last child node doesn't end at end of parent node\n");
132 prt_printf(&buf, " in btree %s level %u node ",
133 bch2_btree_id_str(b->c.btree_id), b->c.level);
134 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
135 prt_str(&buf, "\n last key ");
136 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(prev.k));
138 need_fsck_err(c, btree_node_topology_bad_max_key, "%s", buf.buf);
139 goto topology_repair;
143 bch2_btree_and_journal_iter_exit(&iter);
144 bch2_bkey_buf_exit(&prev, c);
148 if ((c->recovery_passes_explicit & BIT_ULL(BCH_RECOVERY_PASS_check_topology)) &&
149 c->curr_recovery_pass > BCH_RECOVERY_PASS_check_topology) {
150 bch2_inconsistent_error(c);
151 ret = -BCH_ERR_btree_need_topology_repair;
153 ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
158 /* Calculate ideal packed bkey format for new btree nodes: */
160 static void __bch2_btree_calc_format(struct bkey_format_state *s, struct btree *b)
162 struct bkey_packed *k;
166 bset_tree_for_each_key(b, t, k)
167 if (!bkey_deleted(k)) {
168 uk = bkey_unpack_key(b, k);
169 bch2_bkey_format_add_key(s, &uk);
173 static struct bkey_format bch2_btree_calc_format(struct btree *b)
175 struct bkey_format_state s;
177 bch2_bkey_format_init(&s);
178 bch2_bkey_format_add_pos(&s, b->data->min_key);
179 bch2_bkey_format_add_pos(&s, b->data->max_key);
180 __bch2_btree_calc_format(&s, b);
182 return bch2_bkey_format_done(&s);
185 static size_t btree_node_u64s_with_format(struct btree_nr_keys nr,
186 struct bkey_format *old_f,
187 struct bkey_format *new_f)
189 /* stupid integer promotion rules */
191 (((int) new_f->key_u64s - old_f->key_u64s) *
192 (int) nr.packed_keys) +
193 (((int) new_f->key_u64s - BKEY_U64s) *
194 (int) nr.unpacked_keys);
196 BUG_ON(delta + nr.live_u64s < 0);
198 return nr.live_u64s + delta;
202 * bch2_btree_node_format_fits - check if we could rewrite node with a new format
204 * @c: filesystem handle
205 * @b: btree node to rewrite
206 * @nr: number of keys for new node (i.e. b->nr)
207 * @new_f: bkey format to translate keys to
209 * Returns: true if all re-packed keys will be able to fit in a new node.
211 * Assumes all keys will successfully pack with the new format.
213 static bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *b,
214 struct btree_nr_keys nr,
215 struct bkey_format *new_f)
217 size_t u64s = btree_node_u64s_with_format(nr, &b->format, new_f);
219 return __vstruct_bytes(struct btree_node, u64s) < btree_buf_bytes(b);
222 /* Btree node freeing/allocation: */
224 static void __btree_node_free(struct btree_trans *trans, struct btree *b)
226 struct bch_fs *c = trans->c;
228 trace_and_count(c, btree_node_free, trans, b);
230 BUG_ON(btree_node_write_blocked(b));
231 BUG_ON(btree_node_dirty(b));
232 BUG_ON(btree_node_need_write(b));
233 BUG_ON(b == btree_node_root(c, b));
235 BUG_ON(!list_empty(&b->write_blocked));
236 BUG_ON(b->will_make_reachable);
238 clear_btree_node_noevict(b);
240 mutex_lock(&c->btree_cache.lock);
241 list_move(&b->list, &c->btree_cache.freeable);
242 mutex_unlock(&c->btree_cache.lock);
245 static void bch2_btree_node_free_inmem(struct btree_trans *trans,
246 struct btree_path *path,
249 struct bch_fs *c = trans->c;
250 unsigned i, level = b->c.level;
252 bch2_btree_node_lock_write_nofail(trans, path, &b->c);
253 bch2_btree_node_hash_remove(&c->btree_cache, b);
254 __btree_node_free(trans, b);
255 six_unlock_write(&b->c.lock);
256 mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED);
258 trans_for_each_path(trans, path, i)
259 if (path->l[level].b == b) {
260 btree_node_unlock(trans, path, level);
261 path->l[level].b = ERR_PTR(-BCH_ERR_no_btree_node_init);
265 static void bch2_btree_node_free_never_used(struct btree_update *as,
266 struct btree_trans *trans,
269 struct bch_fs *c = as->c;
270 struct prealloc_nodes *p = &as->prealloc_nodes[b->c.lock.readers != NULL];
271 struct btree_path *path;
272 unsigned i, level = b->c.level;
274 BUG_ON(!list_empty(&b->write_blocked));
275 BUG_ON(b->will_make_reachable != (1UL|(unsigned long) as));
277 b->will_make_reachable = 0;
278 closure_put(&as->cl);
280 clear_btree_node_will_make_reachable(b);
281 clear_btree_node_accessed(b);
282 clear_btree_node_dirty_acct(c, b);
283 clear_btree_node_need_write(b);
285 mutex_lock(&c->btree_cache.lock);
286 list_del_init(&b->list);
287 bch2_btree_node_hash_remove(&c->btree_cache, b);
288 mutex_unlock(&c->btree_cache.lock);
290 BUG_ON(p->nr >= ARRAY_SIZE(p->b));
293 six_unlock_intent(&b->c.lock);
295 trans_for_each_path(trans, path, i)
296 if (path->l[level].b == b) {
297 btree_node_unlock(trans, path, level);
298 path->l[level].b = ERR_PTR(-BCH_ERR_no_btree_node_init);
302 static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans,
303 struct disk_reservation *res,
308 struct bch_fs *c = trans->c;
309 struct write_point *wp;
311 BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
312 struct open_buckets obs = { .nr = 0 };
313 struct bch_devs_list devs_have = (struct bch_devs_list) { 0 };
314 enum bch_watermark watermark = flags & BCH_WATERMARK_MASK;
315 unsigned nr_reserve = watermark < BCH_WATERMARK_reclaim
320 mutex_lock(&c->btree_reserve_cache_lock);
321 if (c->btree_reserve_cache_nr > nr_reserve) {
322 struct btree_alloc *a =
323 &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
326 bkey_copy(&tmp.k, &a->k);
327 mutex_unlock(&c->btree_reserve_cache_lock);
330 mutex_unlock(&c->btree_reserve_cache_lock);
333 ret = bch2_alloc_sectors_start_trans(trans,
334 c->opts.metadata_target ?:
335 c->opts.foreground_target,
337 writepoint_ptr(&c->btree_write_point),
340 min(res->nr_replicas,
341 c->opts.metadata_replicas_required),
342 watermark, 0, cl, &wp);
346 if (wp->sectors_free < btree_sectors(c)) {
347 struct open_bucket *ob;
350 open_bucket_for_each(c, &wp->ptrs, ob, i)
351 if (ob->sectors_free < btree_sectors(c))
352 ob->sectors_free = 0;
354 bch2_alloc_sectors_done(c, wp);
358 bkey_btree_ptr_v2_init(&tmp.k);
359 bch2_alloc_sectors_append_ptrs(c, wp, &tmp.k, btree_sectors(c), false);
361 bch2_open_bucket_get(c, wp, &obs);
362 bch2_alloc_sectors_done(c, wp);
364 b = bch2_btree_node_mem_alloc(trans, interior_node);
365 six_unlock_write(&b->c.lock);
366 six_unlock_intent(&b->c.lock);
368 /* we hold cannibalize_lock: */
372 bkey_copy(&b->key, &tmp.k);
378 static struct btree *bch2_btree_node_alloc(struct btree_update *as,
379 struct btree_trans *trans,
382 struct bch_fs *c = as->c;
384 struct prealloc_nodes *p = &as->prealloc_nodes[!!level];
387 BUG_ON(level >= BTREE_MAX_DEPTH);
392 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent);
393 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write);
395 set_btree_node_accessed(b);
396 set_btree_node_dirty_acct(c, b);
397 set_btree_node_need_write(b);
399 bch2_bset_init_first(b, &b->data->keys);
401 b->c.btree_id = as->btree_id;
402 b->version_ondisk = c->sb.version;
404 memset(&b->nr, 0, sizeof(b->nr));
405 b->data->magic = cpu_to_le64(bset_magic(c));
406 memset(&b->data->_ptr, 0, sizeof(b->data->_ptr));
408 SET_BTREE_NODE_ID(b->data, as->btree_id);
409 SET_BTREE_NODE_LEVEL(b->data, level);
411 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
412 struct bkey_i_btree_ptr_v2 *bp = bkey_i_to_btree_ptr_v2(&b->key);
415 bp->v.seq = b->data->keys.seq;
416 bp->v.sectors_written = 0;
419 SET_BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data, true);
421 bch2_btree_build_aux_trees(b);
423 ret = bch2_btree_node_hash_insert(&c->btree_cache, b, level, as->btree_id);
426 trace_and_count(c, btree_node_alloc, trans, b);
427 bch2_increment_clock(c, btree_sectors(c), WRITE);
431 static void btree_set_min(struct btree *b, struct bpos pos)
433 if (b->key.k.type == KEY_TYPE_btree_ptr_v2)
434 bkey_i_to_btree_ptr_v2(&b->key)->v.min_key = pos;
435 b->data->min_key = pos;
438 static void btree_set_max(struct btree *b, struct bpos pos)
441 b->data->max_key = pos;
444 static struct btree *bch2_btree_node_alloc_replacement(struct btree_update *as,
445 struct btree_trans *trans,
448 struct btree *n = bch2_btree_node_alloc(as, trans, b->c.level);
449 struct bkey_format format = bch2_btree_calc_format(b);
452 * The keys might expand with the new format - if they wouldn't fit in
453 * the btree node anymore, use the old format for now:
455 if (!bch2_btree_node_format_fits(as->c, b, b->nr, &format))
458 SET_BTREE_NODE_SEQ(n->data, BTREE_NODE_SEQ(b->data) + 1);
460 btree_set_min(n, b->data->min_key);
461 btree_set_max(n, b->data->max_key);
463 n->data->format = format;
464 btree_node_set_format(n, format);
466 bch2_btree_sort_into(as->c, n, b);
468 btree_node_reset_sib_u64s(n);
472 static struct btree *__btree_root_alloc(struct btree_update *as,
473 struct btree_trans *trans, unsigned level)
475 struct btree *b = bch2_btree_node_alloc(as, trans, level);
477 btree_set_min(b, POS_MIN);
478 btree_set_max(b, SPOS_MAX);
479 b->data->format = bch2_btree_calc_format(b);
481 btree_node_set_format(b, b->data->format);
482 bch2_btree_build_aux_trees(b);
487 static void bch2_btree_reserve_put(struct btree_update *as, struct btree_trans *trans)
489 struct bch_fs *c = as->c;
490 struct prealloc_nodes *p;
492 for (p = as->prealloc_nodes;
493 p < as->prealloc_nodes + ARRAY_SIZE(as->prealloc_nodes);
496 struct btree *b = p->b[--p->nr];
498 mutex_lock(&c->btree_reserve_cache_lock);
500 if (c->btree_reserve_cache_nr <
501 ARRAY_SIZE(c->btree_reserve_cache)) {
502 struct btree_alloc *a =
503 &c->btree_reserve_cache[c->btree_reserve_cache_nr++];
507 bkey_copy(&a->k, &b->key);
509 bch2_open_buckets_put(c, &b->ob);
512 mutex_unlock(&c->btree_reserve_cache_lock);
514 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent);
515 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write);
516 __btree_node_free(trans, b);
517 six_unlock_write(&b->c.lock);
518 six_unlock_intent(&b->c.lock);
523 static int bch2_btree_reserve_get(struct btree_trans *trans,
524 struct btree_update *as,
525 unsigned nr_nodes[2],
533 BUG_ON(nr_nodes[0] + nr_nodes[1] > BTREE_RESERVE_MAX);
536 * Protects reaping from the btree node cache and using the btree node
537 * open bucket reserve:
539 ret = bch2_btree_cache_cannibalize_lock(trans, cl);
543 for (interior = 0; interior < 2; interior++) {
544 struct prealloc_nodes *p = as->prealloc_nodes + interior;
546 while (p->nr < nr_nodes[interior]) {
547 b = __bch2_btree_node_alloc(trans, &as->disk_res, cl,
558 bch2_btree_cache_cannibalize_unlock(trans);
562 /* Asynchronous interior node update machinery */
564 static void bch2_btree_update_free(struct btree_update *as, struct btree_trans *trans)
566 struct bch_fs *c = as->c;
568 if (as->took_gc_lock)
569 up_read(&c->gc_lock);
570 as->took_gc_lock = false;
572 bch2_journal_pin_drop(&c->journal, &as->journal);
573 bch2_journal_pin_flush(&c->journal, &as->journal);
574 bch2_disk_reservation_put(c, &as->disk_res);
575 bch2_btree_reserve_put(as, trans);
577 bch2_time_stats_update(&c->times[BCH_TIME_btree_interior_update_total],
580 mutex_lock(&c->btree_interior_update_lock);
581 list_del(&as->unwritten_list);
584 closure_debug_destroy(&as->cl);
585 mempool_free(as, &c->btree_interior_update_pool);
588 * Have to do the wakeup with btree_interior_update_lock still held,
589 * since being on btree_interior_update_list is our ref on @c:
591 closure_wake_up(&c->btree_interior_update_wait);
593 mutex_unlock(&c->btree_interior_update_lock);
596 static void btree_update_add_key(struct btree_update *as,
597 struct keylist *keys, struct btree *b)
599 struct bkey_i *k = &b->key;
601 BUG_ON(bch2_keylist_u64s(keys) + k->k.u64s >
602 ARRAY_SIZE(as->_old_keys));
604 bkey_copy(keys->top, k);
605 bkey_i_to_btree_ptr_v2(keys->top)->v.mem_ptr = b->c.level + 1;
607 bch2_keylist_push(keys);
610 static bool btree_update_new_nodes_marked_sb(struct btree_update *as)
612 for_each_keylist_key(&as->new_keys, k)
613 if (!bch2_dev_btree_bitmap_marked(as->c, bkey_i_to_s_c(k)))
618 static void btree_update_new_nodes_mark_sb(struct btree_update *as)
620 struct bch_fs *c = as->c;
622 mutex_lock(&c->sb_lock);
623 for_each_keylist_key(&as->new_keys, k)
624 bch2_dev_btree_bitmap_mark(c, bkey_i_to_s_c(k));
627 mutex_unlock(&c->sb_lock);
631 * The transactional part of an interior btree node update, where we journal the
632 * update we did to the interior node and update alloc info:
634 static int btree_update_nodes_written_trans(struct btree_trans *trans,
635 struct btree_update *as)
637 struct jset_entry *e = bch2_trans_jset_entry_alloc(trans, as->journal_u64s);
638 int ret = PTR_ERR_OR_ZERO(e);
642 memcpy(e, as->journal_entries, as->journal_u64s * sizeof(u64));
644 trans->journal_pin = &as->journal;
646 for_each_keylist_key(&as->old_keys, k) {
647 unsigned level = bkey_i_to_btree_ptr_v2(k)->v.mem_ptr;
649 ret = bch2_key_trigger_old(trans, as->btree_id, level, bkey_i_to_s_c(k),
650 BTREE_TRIGGER_transactional);
655 for_each_keylist_key(&as->new_keys, k) {
656 unsigned level = bkey_i_to_btree_ptr_v2(k)->v.mem_ptr;
658 ret = bch2_key_trigger_new(trans, as->btree_id, level, bkey_i_to_s(k),
659 BTREE_TRIGGER_transactional);
667 static void btree_update_nodes_written(struct btree_update *as)
669 struct bch_fs *c = as->c;
671 struct btree_trans *trans = bch2_trans_get(c);
677 * If we're already in an error state, it might be because a btree node
678 * was never written, and we might be trying to free that same btree
679 * node here, but it won't have been marked as allocated and we'll see
680 * spurious disk usage inconsistencies in the transactional part below
681 * if we don't skip it:
683 ret = bch2_journal_error(&c->journal);
687 if (!btree_update_new_nodes_marked_sb(as))
688 btree_update_new_nodes_mark_sb(as);
691 * Wait for any in flight writes to finish before we free the old nodes
694 for (i = 0; i < as->nr_old_nodes; i++) {
697 b = as->old_nodes[i];
699 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
700 seq = b->data ? b->data->keys.seq : 0;
701 six_unlock_read(&b->c.lock);
703 if (seq == as->old_nodes_seq[i])
704 wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight_inner,
705 TASK_UNINTERRUPTIBLE);
709 * We did an update to a parent node where the pointers we added pointed
710 * to child nodes that weren't written yet: now, the child nodes have
711 * been written so we can write out the update to the interior node.
715 * We can't call into journal reclaim here: we'd block on the journal
716 * reclaim lock, but we may need to release the open buckets we have
717 * pinned in order for other btree updates to make forward progress, and
718 * journal reclaim does btree updates when flushing bkey_cached entries,
719 * which may require allocations as well.
721 ret = commit_do(trans, &as->disk_res, &journal_seq,
722 BCH_WATERMARK_interior_updates|
723 BCH_TRANS_COMMIT_no_enospc|
724 BCH_TRANS_COMMIT_no_check_rw|
725 BCH_TRANS_COMMIT_journal_reclaim,
726 btree_update_nodes_written_trans(trans, as));
727 bch2_trans_unlock(trans);
729 bch2_fs_fatal_err_on(ret && !bch2_journal_error(&c->journal), c,
730 "%s", bch2_err_str(ret));
733 * We have to be careful because another thread might be getting ready
734 * to free as->b and calling btree_update_reparent() on us - we'll
735 * recheck under btree_update_lock below:
737 b = READ_ONCE(as->b);
740 * @b is the node we did the final insert into:
742 * On failure to get a journal reservation, we still have to
743 * unblock the write and allow most of the write path to happen
744 * so that shutdown works, but the i->journal_seq mechanism
745 * won't work to prevent the btree write from being visible (we
746 * didn't get a journal sequence number) - instead
747 * __bch2_btree_node_write() doesn't do the actual write if
748 * we're in journal error state:
752 * Ensure transaction is unlocked before using
753 * btree_node_lock_nopath() (the use of which is always suspect,
754 * we need to work on removing this in the future)
756 * It should be, but bch2_path_get_unlocked_mut() -> bch2_path_get()
757 * calls bch2_path_upgrade(), before we call path_make_mut(), so
758 * we may rarely end up with a locked path besides the one we
761 bch2_trans_unlock(trans);
762 bch2_trans_begin(trans);
763 btree_path_idx_t path_idx = bch2_path_get_unlocked_mut(trans,
764 as->btree_id, b->c.level, b->key.k.p);
765 struct btree_path *path = trans->paths + path_idx;
766 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent);
767 mark_btree_node_locked(trans, path, b->c.level, BTREE_NODE_INTENT_LOCKED);
768 path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock);
769 path->l[b->c.level].b = b;
771 bch2_btree_node_lock_write_nofail(trans, path, &b->c);
773 mutex_lock(&c->btree_interior_update_lock);
775 list_del(&as->write_blocked_list);
776 if (list_empty(&b->write_blocked))
777 clear_btree_node_write_blocked(b);
780 * Node might have been freed, recheck under
781 * btree_interior_update_lock:
785 BUG_ON(!btree_node_dirty(b));
788 struct bset *last = btree_bset_last(b);
790 last->journal_seq = cpu_to_le64(
792 le64_to_cpu(last->journal_seq)));
794 bch2_btree_add_journal_pin(c, b, journal_seq);
797 * If we didn't get a journal sequence number we
798 * can't write this btree node, because recovery
799 * won't know to ignore this write:
801 set_btree_node_never_write(b);
805 mutex_unlock(&c->btree_interior_update_lock);
807 mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED);
808 six_unlock_write(&b->c.lock);
810 btree_node_write_if_need(c, b, SIX_LOCK_intent);
811 btree_node_unlock(trans, path, b->c.level);
812 bch2_path_put(trans, path_idx, true);
815 bch2_journal_pin_drop(&c->journal, &as->journal);
817 mutex_lock(&c->btree_interior_update_lock);
818 for (i = 0; i < as->nr_new_nodes; i++) {
819 b = as->new_nodes[i];
821 BUG_ON(b->will_make_reachable != (unsigned long) as);
822 b->will_make_reachable = 0;
823 clear_btree_node_will_make_reachable(b);
825 mutex_unlock(&c->btree_interior_update_lock);
827 for (i = 0; i < as->nr_new_nodes; i++) {
828 b = as->new_nodes[i];
830 btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
831 btree_node_write_if_need(c, b, SIX_LOCK_read);
832 six_unlock_read(&b->c.lock);
835 for (i = 0; i < as->nr_open_buckets; i++)
836 bch2_open_bucket_put(c, c->open_buckets + as->open_buckets[i]);
838 bch2_btree_update_free(as, trans);
839 bch2_trans_put(trans);
842 static void btree_interior_update_work(struct work_struct *work)
845 container_of(work, struct bch_fs, btree_interior_update_work);
846 struct btree_update *as;
849 mutex_lock(&c->btree_interior_update_lock);
850 as = list_first_entry_or_null(&c->btree_interior_updates_unwritten,
851 struct btree_update, unwritten_list);
852 if (as && !as->nodes_written)
854 mutex_unlock(&c->btree_interior_update_lock);
859 btree_update_nodes_written(as);
863 static CLOSURE_CALLBACK(btree_update_set_nodes_written)
865 closure_type(as, struct btree_update, cl);
866 struct bch_fs *c = as->c;
868 mutex_lock(&c->btree_interior_update_lock);
869 as->nodes_written = true;
870 mutex_unlock(&c->btree_interior_update_lock);
872 queue_work(c->btree_interior_update_worker, &c->btree_interior_update_work);
876 * We're updating @b with pointers to nodes that haven't finished writing yet:
877 * block @b from being written until @as completes
879 static void btree_update_updated_node(struct btree_update *as, struct btree *b)
881 struct bch_fs *c = as->c;
883 BUG_ON(as->mode != BTREE_UPDATE_none);
884 BUG_ON(as->update_level_end < b->c.level);
885 BUG_ON(!btree_node_dirty(b));
888 mutex_lock(&c->btree_interior_update_lock);
889 list_add_tail(&as->unwritten_list, &c->btree_interior_updates_unwritten);
891 as->mode = BTREE_UPDATE_node;
893 as->update_level_end = b->c.level;
895 set_btree_node_write_blocked(b);
896 list_add(&as->write_blocked_list, &b->write_blocked);
898 mutex_unlock(&c->btree_interior_update_lock);
901 static int bch2_update_reparent_journal_pin_flush(struct journal *j,
902 struct journal_entry_pin *_pin, u64 seq)
907 static void btree_update_reparent(struct btree_update *as,
908 struct btree_update *child)
910 struct bch_fs *c = as->c;
912 lockdep_assert_held(&c->btree_interior_update_lock);
915 child->mode = BTREE_UPDATE_update;
917 bch2_journal_pin_copy(&c->journal, &as->journal, &child->journal,
918 bch2_update_reparent_journal_pin_flush);
921 static void btree_update_updated_root(struct btree_update *as, struct btree *b)
923 struct bkey_i *insert = &b->key;
924 struct bch_fs *c = as->c;
926 BUG_ON(as->mode != BTREE_UPDATE_none);
928 BUG_ON(as->journal_u64s + jset_u64s(insert->k.u64s) >
929 ARRAY_SIZE(as->journal_entries));
932 journal_entry_set((void *) &as->journal_entries[as->journal_u64s],
933 BCH_JSET_ENTRY_btree_root,
934 b->c.btree_id, b->c.level,
935 insert, insert->k.u64s);
937 mutex_lock(&c->btree_interior_update_lock);
938 list_add_tail(&as->unwritten_list, &c->btree_interior_updates_unwritten);
940 as->mode = BTREE_UPDATE_root;
941 mutex_unlock(&c->btree_interior_update_lock);
945 * bch2_btree_update_add_new_node:
947 * This causes @as to wait on @b to be written, before it gets to
948 * bch2_btree_update_nodes_written
950 * Additionally, it sets b->will_make_reachable to prevent any additional writes
951 * to @b from happening besides the first until @b is reachable on disk
953 * And it adds @b to the list of @as's new nodes, so that we can update sector
954 * counts in bch2_btree_update_nodes_written:
956 static void bch2_btree_update_add_new_node(struct btree_update *as, struct btree *b)
958 struct bch_fs *c = as->c;
960 closure_get(&as->cl);
962 mutex_lock(&c->btree_interior_update_lock);
963 BUG_ON(as->nr_new_nodes >= ARRAY_SIZE(as->new_nodes));
964 BUG_ON(b->will_make_reachable);
966 as->new_nodes[as->nr_new_nodes++] = b;
967 b->will_make_reachable = 1UL|(unsigned long) as;
968 set_btree_node_will_make_reachable(b);
970 mutex_unlock(&c->btree_interior_update_lock);
972 btree_update_add_key(as, &as->new_keys, b);
974 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
975 unsigned bytes = vstruct_end(&b->data->keys) - (void *) b->data;
976 unsigned sectors = round_up(bytes, block_bytes(c)) >> 9;
978 bkey_i_to_btree_ptr_v2(&b->key)->v.sectors_written =
979 cpu_to_le16(sectors);
984 * returns true if @b was a new node
986 static void btree_update_drop_new_node(struct bch_fs *c, struct btree *b)
988 struct btree_update *as;
992 mutex_lock(&c->btree_interior_update_lock);
994 * When b->will_make_reachable != 0, it owns a ref on as->cl that's
995 * dropped when it gets written by bch2_btree_complete_write - the
996 * xchg() is for synchronization with bch2_btree_complete_write:
998 v = xchg(&b->will_make_reachable, 0);
999 clear_btree_node_will_make_reachable(b);
1000 as = (struct btree_update *) (v & ~1UL);
1003 mutex_unlock(&c->btree_interior_update_lock);
1007 for (i = 0; i < as->nr_new_nodes; i++)
1008 if (as->new_nodes[i] == b)
1013 array_remove_item(as->new_nodes, as->nr_new_nodes, i);
1014 mutex_unlock(&c->btree_interior_update_lock);
1017 closure_put(&as->cl);
1020 static void bch2_btree_update_get_open_buckets(struct btree_update *as, struct btree *b)
1023 as->open_buckets[as->nr_open_buckets++] =
1024 b->ob.v[--b->ob.nr];
1027 static int bch2_btree_update_will_free_node_journal_pin_flush(struct journal *j,
1028 struct journal_entry_pin *_pin, u64 seq)
1034 * @b is being split/rewritten: it may have pointers to not-yet-written btree
1035 * nodes and thus outstanding btree_updates - redirect @b's
1036 * btree_updates to point to this btree_update:
1038 static void bch2_btree_interior_update_will_free_node(struct btree_update *as,
1041 struct bch_fs *c = as->c;
1042 struct btree_update *p, *n;
1043 struct btree_write *w;
1045 set_btree_node_dying(b);
1047 if (btree_node_fake(b))
1050 mutex_lock(&c->btree_interior_update_lock);
1053 * Does this node have any btree_update operations preventing
1054 * it from being written?
1056 * If so, redirect them to point to this btree_update: we can
1057 * write out our new nodes, but we won't make them visible until those
1058 * operations complete
1060 list_for_each_entry_safe(p, n, &b->write_blocked, write_blocked_list) {
1061 list_del_init(&p->write_blocked_list);
1062 btree_update_reparent(as, p);
1065 * for flush_held_btree_writes() waiting on updates to flush or
1066 * nodes to be writeable:
1068 closure_wake_up(&c->btree_interior_update_wait);
1071 clear_btree_node_dirty_acct(c, b);
1072 clear_btree_node_need_write(b);
1073 clear_btree_node_write_blocked(b);
1076 * Does this node have unwritten data that has a pin on the journal?
1078 * If so, transfer that pin to the btree_update operation -
1079 * note that if we're freeing multiple nodes, we only need to keep the
1080 * oldest pin of any of the nodes we're freeing. We'll release the pin
1081 * when the new nodes are persistent and reachable on disk:
1083 w = btree_current_write(b);
1084 bch2_journal_pin_copy(&c->journal, &as->journal, &w->journal,
1085 bch2_btree_update_will_free_node_journal_pin_flush);
1086 bch2_journal_pin_drop(&c->journal, &w->journal);
1088 w = btree_prev_write(b);
1089 bch2_journal_pin_copy(&c->journal, &as->journal, &w->journal,
1090 bch2_btree_update_will_free_node_journal_pin_flush);
1091 bch2_journal_pin_drop(&c->journal, &w->journal);
1093 mutex_unlock(&c->btree_interior_update_lock);
1096 * Is this a node that isn't reachable on disk yet?
1098 * Nodes that aren't reachable yet have writes blocked until they're
1099 * reachable - now that we've cancelled any pending writes and moved
1100 * things waiting on that write to wait on this update, we can drop this
1101 * node from the list of nodes that the other update is making
1102 * reachable, prior to freeing it:
1104 btree_update_drop_new_node(c, b);
1106 btree_update_add_key(as, &as->old_keys, b);
1108 as->old_nodes[as->nr_old_nodes] = b;
1109 as->old_nodes_seq[as->nr_old_nodes] = b->data->keys.seq;
1113 static void bch2_btree_update_done(struct btree_update *as, struct btree_trans *trans)
1115 struct bch_fs *c = as->c;
1116 u64 start_time = as->start_time;
1118 BUG_ON(as->mode == BTREE_UPDATE_none);
1120 if (as->took_gc_lock)
1121 up_read(&as->c->gc_lock);
1122 as->took_gc_lock = false;
1124 bch2_btree_reserve_put(as, trans);
1126 continue_at(&as->cl, btree_update_set_nodes_written,
1127 as->c->btree_interior_update_worker);
1129 bch2_time_stats_update(&c->times[BCH_TIME_btree_interior_update_foreground],
1133 static struct btree_update *
1134 bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
1135 unsigned level_start, bool split, unsigned flags)
1137 struct bch_fs *c = trans->c;
1138 struct btree_update *as;
1139 u64 start_time = local_clock();
1140 int disk_res_flags = (flags & BCH_TRANS_COMMIT_no_enospc)
1141 ? BCH_DISK_RESERVATION_NOFAIL : 0;
1142 unsigned nr_nodes[2] = { 0, 0 };
1143 unsigned level_end = level_start;
1144 enum bch_watermark watermark = flags & BCH_WATERMARK_MASK;
1146 u32 restart_count = trans->restart_count;
1148 BUG_ON(!path->should_be_locked);
1150 if (watermark == BCH_WATERMARK_copygc)
1151 watermark = BCH_WATERMARK_btree_copygc;
1152 if (watermark < BCH_WATERMARK_btree)
1153 watermark = BCH_WATERMARK_btree;
1155 flags &= ~BCH_WATERMARK_MASK;
1158 if (watermark < BCH_WATERMARK_reclaim &&
1159 test_bit(JOURNAL_space_low, &c->journal.flags)) {
1160 if (flags & BCH_TRANS_COMMIT_journal_reclaim)
1161 return ERR_PTR(-BCH_ERR_journal_reclaim_would_deadlock);
1163 ret = drop_locks_do(trans,
1164 ({ wait_event(c->journal.wait, !test_bit(JOURNAL_space_low, &c->journal.flags)); 0; }));
1166 return ERR_PTR(ret);
1170 nr_nodes[!!level_end] += 1 + split;
1173 ret = bch2_btree_path_upgrade(trans, path, level_end + 1);
1175 return ERR_PTR(ret);
1177 if (!btree_path_node(path, level_end)) {
1178 /* Allocating new root? */
1179 nr_nodes[1] += split;
1180 level_end = BTREE_MAX_DEPTH;
1185 * Always check for space for two keys, even if we won't have to
1186 * split at prior level - it might have been a merge instead:
1188 if (bch2_btree_node_insert_fits(path->l[level_end].b,
1189 BKEY_BTREE_PTR_U64s_MAX * 2))
1192 split = path->l[level_end].b->nr.live_u64s > BTREE_SPLIT_THRESHOLD(c);
1195 if (!down_read_trylock(&c->gc_lock)) {
1196 ret = drop_locks_do(trans, (down_read(&c->gc_lock), 0));
1198 up_read(&c->gc_lock);
1199 return ERR_PTR(ret);
1203 as = mempool_alloc(&c->btree_interior_update_pool, GFP_NOFS);
1204 memset(as, 0, sizeof(*as));
1205 closure_init(&as->cl, NULL);
1207 as->start_time = start_time;
1208 as->ip_started = _RET_IP_;
1209 as->mode = BTREE_UPDATE_none;
1211 as->took_gc_lock = true;
1212 as->btree_id = path->btree_id;
1213 as->update_level_start = level_start;
1214 as->update_level_end = level_end;
1215 INIT_LIST_HEAD(&as->list);
1216 INIT_LIST_HEAD(&as->unwritten_list);
1217 INIT_LIST_HEAD(&as->write_blocked_list);
1218 bch2_keylist_init(&as->old_keys, as->_old_keys);
1219 bch2_keylist_init(&as->new_keys, as->_new_keys);
1220 bch2_keylist_init(&as->parent_keys, as->inline_keys);
1222 mutex_lock(&c->btree_interior_update_lock);
1223 list_add_tail(&as->list, &c->btree_interior_update_list);
1224 mutex_unlock(&c->btree_interior_update_lock);
1227 * We don't want to allocate if we're in an error state, that can cause
1228 * deadlock on emergency shutdown due to open buckets getting stuck in
1229 * the btree_reserve_cache after allocator shutdown has cleared it out.
1230 * This check needs to come after adding us to the btree_interior_update
1231 * list but before calling bch2_btree_reserve_get, to synchronize with
1232 * __bch2_fs_read_only().
1234 ret = bch2_journal_error(&c->journal);
1238 ret = bch2_disk_reservation_get(c, &as->disk_res,
1239 (nr_nodes[0] + nr_nodes[1]) * btree_sectors(c),
1240 c->opts.metadata_replicas,
1245 ret = bch2_btree_reserve_get(trans, as, nr_nodes, flags, NULL);
1246 if (bch2_err_matches(ret, ENOSPC) ||
1247 bch2_err_matches(ret, ENOMEM)) {
1251 * XXX: this should probably be a separate BTREE_INSERT_NONBLOCK
1254 if (bch2_err_matches(ret, ENOSPC) &&
1255 (flags & BCH_TRANS_COMMIT_journal_reclaim) &&
1256 watermark < BCH_WATERMARK_reclaim) {
1257 ret = -BCH_ERR_journal_reclaim_would_deadlock;
1261 closure_init_stack(&cl);
1264 ret = bch2_btree_reserve_get(trans, as, nr_nodes, flags, &cl);
1266 bch2_trans_unlock(trans);
1268 } while (bch2_err_matches(ret, BCH_ERR_operation_blocked));
1272 trace_and_count(c, btree_reserve_get_fail, trans->fn,
1273 _RET_IP_, nr_nodes[0] + nr_nodes[1], ret);
1277 ret = bch2_trans_relock(trans);
1281 bch2_trans_verify_not_restarted(trans, restart_count);
1284 bch2_btree_update_free(as, trans);
1285 if (!bch2_err_matches(ret, ENOSPC) &&
1286 !bch2_err_matches(ret, EROFS) &&
1287 ret != -BCH_ERR_journal_reclaim_would_deadlock)
1288 bch_err_fn_ratelimited(c, ret);
1289 return ERR_PTR(ret);
1292 /* Btree root updates: */
1294 static void bch2_btree_set_root_inmem(struct bch_fs *c, struct btree *b)
1296 /* Root nodes cannot be reaped */
1297 mutex_lock(&c->btree_cache.lock);
1298 list_del_init(&b->list);
1299 mutex_unlock(&c->btree_cache.lock);
1301 mutex_lock(&c->btree_root_lock);
1302 bch2_btree_id_root(c, b->c.btree_id)->b = b;
1303 mutex_unlock(&c->btree_root_lock);
1305 bch2_recalc_btree_reserve(c);
1308 static int bch2_btree_set_root(struct btree_update *as,
1309 struct btree_trans *trans,
1310 struct btree_path *path,
1314 struct bch_fs *c = as->c;
1316 trace_and_count(c, btree_node_set_root, trans, b);
1318 struct btree *old = btree_node_root(c, b);
1321 * Ensure no one is using the old root while we switch to the
1325 bch2_btree_node_lock_write_nofail(trans, path, &old->c);
1327 int ret = bch2_btree_node_lock_write(trans, path, &old->c);
1332 bch2_btree_set_root_inmem(c, b);
1334 btree_update_updated_root(as, b);
1337 * Unlock old root after new root is visible:
1339 * The new root isn't persistent, but that's ok: we still have
1340 * an intent lock on the new root, and any updates that would
1341 * depend on the new root would have to update the new root.
1343 bch2_btree_node_unlock_write(trans, path, old);
1347 /* Interior node updates: */
1349 static void bch2_insert_fixup_btree_ptr(struct btree_update *as,
1350 struct btree_trans *trans,
1351 struct btree_path *path,
1353 struct btree_node_iter *node_iter,
1354 struct bkey_i *insert)
1356 struct bch_fs *c = as->c;
1357 struct bkey_packed *k;
1358 struct printbuf buf = PRINTBUF;
1359 unsigned long old, new, v;
1361 BUG_ON(insert->k.type == KEY_TYPE_btree_ptr_v2 &&
1362 !btree_ptr_sectors_written(insert));
1364 if (unlikely(!test_bit(JOURNAL_replay_done, &c->journal.flags)))
1365 bch2_journal_key_overwritten(c, b->c.btree_id, b->c.level, insert->k.p);
1367 if (bch2_bkey_invalid(c, bkey_i_to_s_c(insert),
1368 btree_node_type(b), WRITE, &buf) ?:
1369 bch2_bkey_in_btree_node(c, b, bkey_i_to_s_c(insert), &buf)) {
1370 printbuf_reset(&buf);
1371 prt_printf(&buf, "inserting invalid bkey\n ");
1372 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
1373 prt_printf(&buf, "\n ");
1374 bch2_bkey_invalid(c, bkey_i_to_s_c(insert),
1375 btree_node_type(b), WRITE, &buf);
1376 bch2_bkey_in_btree_node(c, b, bkey_i_to_s_c(insert), &buf);
1378 bch2_fs_inconsistent(c, "%s", buf.buf);
1382 BUG_ON(as->journal_u64s + jset_u64s(insert->k.u64s) >
1383 ARRAY_SIZE(as->journal_entries));
1386 journal_entry_set((void *) &as->journal_entries[as->journal_u64s],
1387 BCH_JSET_ENTRY_btree_keys,
1388 b->c.btree_id, b->c.level,
1389 insert, insert->k.u64s);
1391 while ((k = bch2_btree_node_iter_peek_all(node_iter, b)) &&
1392 bkey_iter_pos_cmp(b, k, &insert->k.p) < 0)
1393 bch2_btree_node_iter_advance(node_iter, b);
1395 bch2_btree_bset_insert_key(trans, path, b, node_iter, insert);
1396 set_btree_node_dirty_acct(c, b);
1398 v = READ_ONCE(b->flags);
1402 new &= ~BTREE_WRITE_TYPE_MASK;
1403 new |= BTREE_WRITE_interior;
1404 new |= 1 << BTREE_NODE_need_write;
1405 } while ((v = cmpxchg(&b->flags, old, new)) != old);
1407 printbuf_exit(&buf);
1411 bch2_btree_insert_keys_interior(struct btree_update *as,
1412 struct btree_trans *trans,
1413 struct btree_path *path,
1415 struct btree_node_iter node_iter,
1416 struct keylist *keys)
1418 struct bkey_i *insert = bch2_keylist_front(keys);
1419 struct bkey_packed *k;
1421 BUG_ON(btree_node_type(b) != BKEY_TYPE_btree);
1423 while ((k = bch2_btree_node_iter_prev_all(&node_iter, b)) &&
1424 (bkey_cmp_left_packed(b, k, &insert->k.p) >= 0))
1427 while (!bch2_keylist_empty(keys)) {
1428 insert = bch2_keylist_front(keys);
1430 if (bpos_gt(insert->k.p, b->key.k.p))
1433 bch2_insert_fixup_btree_ptr(as, trans, path, b, &node_iter, insert);
1434 bch2_keylist_pop_front(keys);
1439 * Move keys from n1 (original replacement node, now lower node) to n2 (higher
1442 static void __btree_split_node(struct btree_update *as,
1443 struct btree_trans *trans,
1447 struct bkey_packed *k;
1448 struct bpos n1_pos = POS_MIN;
1449 struct btree_node_iter iter;
1450 struct bset *bsets[2];
1451 struct bkey_format_state format[2];
1452 struct bkey_packed *out[2];
1454 unsigned u64s, n1_u64s = (b->nr.live_u64s * 3) / 5;
1455 struct { unsigned nr_keys, val_u64s; } nr_keys[2];
1458 memset(&nr_keys, 0, sizeof(nr_keys));
1460 for (i = 0; i < 2; i++) {
1461 BUG_ON(n[i]->nsets != 1);
1463 bsets[i] = btree_bset_first(n[i]);
1464 out[i] = bsets[i]->start;
1466 SET_BTREE_NODE_SEQ(n[i]->data, BTREE_NODE_SEQ(b->data) + 1);
1467 bch2_bkey_format_init(&format[i]);
1471 for_each_btree_node_key(b, k, &iter) {
1472 if (bkey_deleted(k))
1475 uk = bkey_unpack_key(b, k);
1479 u64s + k->u64s >= n1_u64s &&
1480 bch2_key_deleted_in_journal(trans, b->c.btree_id, b->c.level, uk.p))
1483 i = u64s >= n1_u64s;
1487 bch2_bkey_format_add_key(&format[i], &uk);
1489 nr_keys[i].nr_keys++;
1490 nr_keys[i].val_u64s += bkeyp_val_u64s(&b->format, k);
1493 btree_set_min(n[0], b->data->min_key);
1494 btree_set_max(n[0], n1_pos);
1495 btree_set_min(n[1], bpos_successor(n1_pos));
1496 btree_set_max(n[1], b->data->max_key);
1498 for (i = 0; i < 2; i++) {
1499 bch2_bkey_format_add_pos(&format[i], n[i]->data->min_key);
1500 bch2_bkey_format_add_pos(&format[i], n[i]->data->max_key);
1502 n[i]->data->format = bch2_bkey_format_done(&format[i]);
1504 unsigned u64s = nr_keys[i].nr_keys * n[i]->data->format.key_u64s +
1505 nr_keys[i].val_u64s;
1506 if (__vstruct_bytes(struct btree_node, u64s) > btree_buf_bytes(b))
1507 n[i]->data->format = b->format;
1509 btree_node_set_format(n[i], n[i]->data->format);
1513 for_each_btree_node_key(b, k, &iter) {
1514 if (bkey_deleted(k))
1517 i = u64s >= n1_u64s;
1520 if (bch2_bkey_transform(&n[i]->format, out[i], bkey_packed(k)
1521 ? &b->format: &bch2_bkey_format_current, k))
1522 out[i]->format = KEY_FORMAT_LOCAL_BTREE;
1524 bch2_bkey_unpack(b, (void *) out[i], k);
1526 out[i]->needs_whiteout = false;
1528 btree_keys_account_key_add(&n[i]->nr, 0, out[i]);
1529 out[i] = bkey_p_next(out[i]);
1532 for (i = 0; i < 2; i++) {
1533 bsets[i]->u64s = cpu_to_le16((u64 *) out[i] - bsets[i]->_data);
1535 BUG_ON(!bsets[i]->u64s);
1537 set_btree_bset_end(n[i], n[i]->set);
1539 btree_node_reset_sib_u64s(n[i]);
1541 bch2_verify_btree_nr_keys(n[i]);
1543 BUG_ON(bch2_btree_node_check_topology(trans, n[i]));
1548 * For updates to interior nodes, we've got to do the insert before we split
1549 * because the stuff we're inserting has to be inserted atomically. Post split,
1550 * the keys might have to go in different nodes and the split would no longer be
1553 * Worse, if the insert is from btree node coalescing, if we do the insert after
1554 * we do the split (and pick the pivot) - the pivot we pick might be between
1555 * nodes that were coalesced, and thus in the middle of a child node post
1558 static void btree_split_insert_keys(struct btree_update *as,
1559 struct btree_trans *trans,
1560 btree_path_idx_t path_idx,
1562 struct keylist *keys)
1564 struct btree_path *path = trans->paths + path_idx;
1566 if (!bch2_keylist_empty(keys) &&
1567 bpos_le(bch2_keylist_front(keys)->k.p, b->data->max_key)) {
1568 struct btree_node_iter node_iter;
1570 bch2_btree_node_iter_init(&node_iter, b, &bch2_keylist_front(keys)->k.p);
1572 bch2_btree_insert_keys_interior(as, trans, path, b, node_iter, keys);
1574 BUG_ON(bch2_btree_node_check_topology(trans, b));
1578 static int btree_split(struct btree_update *as, struct btree_trans *trans,
1579 btree_path_idx_t path, struct btree *b,
1580 struct keylist *keys)
1582 struct bch_fs *c = as->c;
1583 struct btree *parent = btree_node_parent(trans->paths + path, b);
1584 struct btree *n1, *n2 = NULL, *n3 = NULL;
1585 btree_path_idx_t path1 = 0, path2 = 0;
1586 u64 start_time = local_clock();
1589 bch2_verify_btree_nr_keys(b);
1590 BUG_ON(!parent && (b != btree_node_root(c, b)));
1591 BUG_ON(parent && !btree_node_intent_locked(trans->paths + path, b->c.level + 1));
1593 ret = bch2_btree_node_check_topology(trans, b);
1597 bch2_btree_interior_update_will_free_node(as, b);
1599 if (b->nr.live_u64s > BTREE_SPLIT_THRESHOLD(c)) {
1602 trace_and_count(c, btree_node_split, trans, b);
1604 n[0] = n1 = bch2_btree_node_alloc(as, trans, b->c.level);
1605 n[1] = n2 = bch2_btree_node_alloc(as, trans, b->c.level);
1607 __btree_split_node(as, trans, b, n);
1610 btree_split_insert_keys(as, trans, path, n1, keys);
1611 btree_split_insert_keys(as, trans, path, n2, keys);
1612 BUG_ON(!bch2_keylist_empty(keys));
1615 bch2_btree_build_aux_trees(n2);
1616 bch2_btree_build_aux_trees(n1);
1618 bch2_btree_update_add_new_node(as, n1);
1619 bch2_btree_update_add_new_node(as, n2);
1620 six_unlock_write(&n2->c.lock);
1621 six_unlock_write(&n1->c.lock);
1623 path1 = bch2_path_get_unlocked_mut(trans, as->btree_id, n1->c.level, n1->key.k.p);
1624 six_lock_increment(&n1->c.lock, SIX_LOCK_intent);
1625 mark_btree_node_locked(trans, trans->paths + path1, n1->c.level, BTREE_NODE_INTENT_LOCKED);
1626 bch2_btree_path_level_init(trans, trans->paths + path1, n1);
1628 path2 = bch2_path_get_unlocked_mut(trans, as->btree_id, n2->c.level, n2->key.k.p);
1629 six_lock_increment(&n2->c.lock, SIX_LOCK_intent);
1630 mark_btree_node_locked(trans, trans->paths + path2, n2->c.level, BTREE_NODE_INTENT_LOCKED);
1631 bch2_btree_path_level_init(trans, trans->paths + path2, n2);
1634 * Note that on recursive parent_keys == keys, so we
1635 * can't start adding new keys to parent_keys before emptying it
1636 * out (which we did with btree_split_insert_keys() above)
1638 bch2_keylist_add(&as->parent_keys, &n1->key);
1639 bch2_keylist_add(&as->parent_keys, &n2->key);
1642 /* Depth increases, make a new root */
1643 n3 = __btree_root_alloc(as, trans, b->c.level + 1);
1645 bch2_btree_update_add_new_node(as, n3);
1646 six_unlock_write(&n3->c.lock);
1648 trans->paths[path2].locks_want++;
1649 BUG_ON(btree_node_locked(trans->paths + path2, n3->c.level));
1650 six_lock_increment(&n3->c.lock, SIX_LOCK_intent);
1651 mark_btree_node_locked(trans, trans->paths + path2, n3->c.level, BTREE_NODE_INTENT_LOCKED);
1652 bch2_btree_path_level_init(trans, trans->paths + path2, n3);
1654 n3->sib_u64s[0] = U16_MAX;
1655 n3->sib_u64s[1] = U16_MAX;
1657 btree_split_insert_keys(as, trans, path, n3, &as->parent_keys);
1660 trace_and_count(c, btree_node_compact, trans, b);
1662 n1 = bch2_btree_node_alloc_replacement(as, trans, b);
1665 btree_split_insert_keys(as, trans, path, n1, keys);
1666 BUG_ON(!bch2_keylist_empty(keys));
1669 bch2_btree_build_aux_trees(n1);
1670 bch2_btree_update_add_new_node(as, n1);
1671 six_unlock_write(&n1->c.lock);
1673 path1 = bch2_path_get_unlocked_mut(trans, as->btree_id, n1->c.level, n1->key.k.p);
1674 six_lock_increment(&n1->c.lock, SIX_LOCK_intent);
1675 mark_btree_node_locked(trans, trans->paths + path1, n1->c.level, BTREE_NODE_INTENT_LOCKED);
1676 bch2_btree_path_level_init(trans, trans->paths + path1, n1);
1679 bch2_keylist_add(&as->parent_keys, &n1->key);
1682 /* New nodes all written, now make them visible: */
1685 /* Split a non root node */
1686 ret = bch2_btree_insert_node(as, trans, path, parent, &as->parent_keys);
1688 ret = bch2_btree_set_root(as, trans, trans->paths + path, n3, false);
1690 /* Root filled up but didn't need to be split */
1691 ret = bch2_btree_set_root(as, trans, trans->paths + path, n1, false);
1698 bch2_btree_update_get_open_buckets(as, n3);
1699 bch2_btree_node_write(c, n3, SIX_LOCK_intent, 0);
1702 bch2_btree_update_get_open_buckets(as, n2);
1703 bch2_btree_node_write(c, n2, SIX_LOCK_intent, 0);
1705 bch2_btree_update_get_open_buckets(as, n1);
1706 bch2_btree_node_write(c, n1, SIX_LOCK_intent, 0);
1709 * The old node must be freed (in memory) _before_ unlocking the new
1710 * nodes - else another thread could re-acquire a read lock on the old
1711 * node after another thread has locked and updated the new node, thus
1712 * seeing stale data:
1714 bch2_btree_node_free_inmem(trans, trans->paths + path, b);
1717 bch2_trans_node_add(trans, trans->paths + path, n3);
1719 bch2_trans_node_add(trans, trans->paths + path2, n2);
1720 bch2_trans_node_add(trans, trans->paths + path1, n1);
1723 six_unlock_intent(&n3->c.lock);
1725 six_unlock_intent(&n2->c.lock);
1726 six_unlock_intent(&n1->c.lock);
1729 __bch2_btree_path_unlock(trans, trans->paths + path2);
1730 bch2_path_put(trans, path2, true);
1733 __bch2_btree_path_unlock(trans, trans->paths + path1);
1734 bch2_path_put(trans, path1, true);
1737 bch2_trans_verify_locks(trans);
1739 bch2_time_stats_update(&c->times[n2
1740 ? BCH_TIME_btree_node_split
1741 : BCH_TIME_btree_node_compact],
1746 bch2_btree_node_free_never_used(as, trans, n3);
1748 bch2_btree_node_free_never_used(as, trans, n2);
1749 bch2_btree_node_free_never_used(as, trans, n1);
1754 * bch2_btree_insert_node - insert bkeys into a given btree node
1756 * @as: btree_update object
1757 * @trans: btree_trans object
1758 * @path_idx: path that points to current node
1759 * @b: node to insert keys into
1760 * @keys: list of keys to insert
1762 * Returns: 0 on success, typically transaction restart error on failure
1764 * Inserts as many keys as it can into a given btree node, splitting it if full.
1765 * If a split occurred, this function will return early. This can only happen
1766 * for leaf nodes -- inserts into interior nodes have to be atomic.
1768 static int bch2_btree_insert_node(struct btree_update *as, struct btree_trans *trans,
1769 btree_path_idx_t path_idx, struct btree *b,
1770 struct keylist *keys)
1772 struct bch_fs *c = as->c;
1773 struct btree_path *path = trans->paths + path_idx, *linked;
1775 int old_u64s = le16_to_cpu(btree_bset_last(b)->u64s);
1776 int old_live_u64s = b->nr.live_u64s;
1777 int live_u64s_added, u64s_added;
1780 lockdep_assert_held(&c->gc_lock);
1781 BUG_ON(!btree_node_intent_locked(path, b->c.level));
1782 BUG_ON(!b->c.level);
1783 BUG_ON(!as || as->b);
1784 bch2_verify_keylist_sorted(keys);
1786 ret = bch2_btree_node_lock_write(trans, path, &b->c);
1790 bch2_btree_node_prep_for_write(trans, path, b);
1792 if (!bch2_btree_node_insert_fits(b, bch2_keylist_u64s(keys))) {
1793 bch2_btree_node_unlock_write(trans, path, b);
1797 ret = bch2_btree_node_check_topology(trans, b);
1799 bch2_btree_node_unlock_write(trans, path, b);
1803 bch2_btree_insert_keys_interior(as, trans, path, b,
1804 path->l[b->c.level].iter, keys);
1806 trans_for_each_path_with_node(trans, b, linked, i)
1807 bch2_btree_node_iter_peek(&linked->l[b->c.level].iter, b);
1809 bch2_trans_verify_paths(trans);
1811 live_u64s_added = (int) b->nr.live_u64s - old_live_u64s;
1812 u64s_added = (int) le16_to_cpu(btree_bset_last(b)->u64s) - old_u64s;
1814 if (b->sib_u64s[0] != U16_MAX && live_u64s_added < 0)
1815 b->sib_u64s[0] = max(0, (int) b->sib_u64s[0] + live_u64s_added);
1816 if (b->sib_u64s[1] != U16_MAX && live_u64s_added < 0)
1817 b->sib_u64s[1] = max(0, (int) b->sib_u64s[1] + live_u64s_added);
1819 if (u64s_added > live_u64s_added &&
1820 bch2_maybe_compact_whiteouts(c, b))
1821 bch2_trans_node_reinit_iter(trans, b);
1823 btree_update_updated_node(as, b);
1824 bch2_btree_node_unlock_write(trans, path, b);
1826 BUG_ON(bch2_btree_node_check_topology(trans, b));
1830 * We could attempt to avoid the transaction restart, by calling
1831 * bch2_btree_path_upgrade() and allocating more nodes:
1833 if (b->c.level >= as->update_level_end) {
1834 trace_and_count(c, trans_restart_split_race, trans, _THIS_IP_, b);
1835 return btree_trans_restart(trans, BCH_ERR_transaction_restart_split_race);
1838 return btree_split(as, trans, path_idx, b, keys);
1841 int bch2_btree_split_leaf(struct btree_trans *trans,
1842 btree_path_idx_t path,
1845 /* btree_split & merge may both cause paths array to be reallocated */
1846 struct btree *b = path_l(trans->paths + path)->b;
1847 struct btree_update *as;
1851 as = bch2_btree_update_start(trans, trans->paths + path,
1852 trans->paths[path].level,
1857 ret = btree_split(as, trans, path, b, NULL);
1859 bch2_btree_update_free(as, trans);
1863 bch2_btree_update_done(as, trans);
1865 for (l = trans->paths[path].level + 1;
1866 btree_node_intent_locked(&trans->paths[path], l) && !ret;
1868 ret = bch2_foreground_maybe_merge(trans, path, l, flags);
1873 static void __btree_increase_depth(struct btree_update *as, struct btree_trans *trans,
1874 btree_path_idx_t path_idx)
1876 struct bch_fs *c = as->c;
1877 struct btree_path *path = trans->paths + path_idx;
1878 struct btree *n, *b = bch2_btree_id_root(c, path->btree_id)->b;
1880 BUG_ON(!btree_node_locked(path, b->c.level));
1882 n = __btree_root_alloc(as, trans, b->c.level + 1);
1884 bch2_btree_update_add_new_node(as, n);
1885 six_unlock_write(&n->c.lock);
1888 BUG_ON(btree_node_locked(path, n->c.level));
1889 six_lock_increment(&n->c.lock, SIX_LOCK_intent);
1890 mark_btree_node_locked(trans, path, n->c.level, BTREE_NODE_INTENT_LOCKED);
1891 bch2_btree_path_level_init(trans, path, n);
1893 n->sib_u64s[0] = U16_MAX;
1894 n->sib_u64s[1] = U16_MAX;
1896 bch2_keylist_add(&as->parent_keys, &b->key);
1897 btree_split_insert_keys(as, trans, path_idx, n, &as->parent_keys);
1899 int ret = bch2_btree_set_root(as, trans, path, n, true);
1902 bch2_btree_update_get_open_buckets(as, n);
1903 bch2_btree_node_write(c, n, SIX_LOCK_intent, 0);
1904 bch2_trans_node_add(trans, path, n);
1905 six_unlock_intent(&n->c.lock);
1907 mutex_lock(&c->btree_cache.lock);
1908 list_add_tail(&b->list, &c->btree_cache.live);
1909 mutex_unlock(&c->btree_cache.lock);
1911 bch2_trans_verify_locks(trans);
1914 int bch2_btree_increase_depth(struct btree_trans *trans, btree_path_idx_t path, unsigned flags)
1916 struct bch_fs *c = trans->c;
1917 struct btree *b = bch2_btree_id_root(c, trans->paths[path].btree_id)->b;
1919 if (btree_node_fake(b))
1920 return bch2_btree_split_leaf(trans, path, flags);
1922 struct btree_update *as =
1923 bch2_btree_update_start(trans, trans->paths + path, b->c.level, true, flags);
1927 __btree_increase_depth(as, trans, path);
1928 bch2_btree_update_done(as, trans);
1932 int __bch2_foreground_maybe_merge(struct btree_trans *trans,
1933 btree_path_idx_t path,
1936 enum btree_node_sibling sib)
1938 struct bch_fs *c = trans->c;
1939 struct btree_update *as;
1940 struct bkey_format_state new_s;
1941 struct bkey_format new_f;
1942 struct bkey_i delete;
1943 struct btree *b, *m, *n, *prev, *next, *parent;
1944 struct bpos sib_pos;
1946 enum btree_id btree = trans->paths[path].btree_id;
1947 btree_path_idx_t sib_path = 0, new_path = 0;
1948 u64 start_time = local_clock();
1951 bch2_trans_verify_not_in_restart(trans);
1952 bch2_trans_verify_not_unlocked(trans);
1953 BUG_ON(!trans->paths[path].should_be_locked);
1954 BUG_ON(!btree_node_locked(&trans->paths[path], level));
1957 * Work around a deadlock caused by the btree write buffer not doing
1958 * merges and leaving tons of merges for us to do - we really don't need
1959 * to be doing merges at all from the interior update path, and if the
1960 * interior update path is generating too many new interior updates we
1963 if ((flags & BCH_WATERMARK_MASK) == BCH_WATERMARK_interior_updates)
1966 if ((flags & BCH_WATERMARK_MASK) <= BCH_WATERMARK_reclaim) {
1967 flags &= ~BCH_WATERMARK_MASK;
1968 flags |= BCH_WATERMARK_btree;
1969 flags |= BCH_TRANS_COMMIT_journal_reclaim;
1972 b = trans->paths[path].l[level].b;
1974 if ((sib == btree_prev_sib && bpos_eq(b->data->min_key, POS_MIN)) ||
1975 (sib == btree_next_sib && bpos_eq(b->data->max_key, SPOS_MAX))) {
1976 b->sib_u64s[sib] = U16_MAX;
1980 sib_pos = sib == btree_prev_sib
1981 ? bpos_predecessor(b->data->min_key)
1982 : bpos_successor(b->data->max_key);
1984 sib_path = bch2_path_get(trans, btree, sib_pos,
1985 U8_MAX, level, BTREE_ITER_intent, _THIS_IP_);
1986 ret = bch2_btree_path_traverse(trans, sib_path, false);
1990 btree_path_set_should_be_locked(trans->paths + sib_path);
1992 m = trans->paths[sib_path].l[level].b;
1994 if (btree_node_parent(trans->paths + path, b) !=
1995 btree_node_parent(trans->paths + sib_path, m)) {
1996 b->sib_u64s[sib] = U16_MAX;
2000 if (sib == btree_prev_sib) {
2008 if (!bpos_eq(bpos_successor(prev->data->max_key), next->data->min_key)) {
2009 struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
2011 bch2_bpos_to_text(&buf1, prev->data->max_key);
2012 bch2_bpos_to_text(&buf2, next->data->min_key);
2014 "%s(): btree topology error:\n"
2015 " prev ends at %s\n"
2016 " next starts at %s",
2017 __func__, buf1.buf, buf2.buf);
2018 printbuf_exit(&buf1);
2019 printbuf_exit(&buf2);
2020 ret = bch2_topology_error(c);
2024 bch2_bkey_format_init(&new_s);
2025 bch2_bkey_format_add_pos(&new_s, prev->data->min_key);
2026 __bch2_btree_calc_format(&new_s, prev);
2027 __bch2_btree_calc_format(&new_s, next);
2028 bch2_bkey_format_add_pos(&new_s, next->data->max_key);
2029 new_f = bch2_bkey_format_done(&new_s);
2031 sib_u64s = btree_node_u64s_with_format(b->nr, &b->format, &new_f) +
2032 btree_node_u64s_with_format(m->nr, &m->format, &new_f);
2034 if (sib_u64s > BTREE_FOREGROUND_MERGE_HYSTERESIS(c)) {
2035 sib_u64s -= BTREE_FOREGROUND_MERGE_HYSTERESIS(c);
2037 sib_u64s += BTREE_FOREGROUND_MERGE_HYSTERESIS(c);
2040 sib_u64s = min(sib_u64s, btree_max_u64s(c));
2041 sib_u64s = min(sib_u64s, (size_t) U16_MAX - 1);
2042 b->sib_u64s[sib] = sib_u64s;
2044 if (b->sib_u64s[sib] > c->btree_foreground_merge_threshold)
2047 parent = btree_node_parent(trans->paths + path, b);
2048 as = bch2_btree_update_start(trans, trans->paths + path, level, false,
2049 BCH_TRANS_COMMIT_no_enospc|flags);
2050 ret = PTR_ERR_OR_ZERO(as);
2054 trace_and_count(c, btree_node_merge, trans, b);
2056 bch2_btree_interior_update_will_free_node(as, b);
2057 bch2_btree_interior_update_will_free_node(as, m);
2059 n = bch2_btree_node_alloc(as, trans, b->c.level);
2061 SET_BTREE_NODE_SEQ(n->data,
2062 max(BTREE_NODE_SEQ(b->data),
2063 BTREE_NODE_SEQ(m->data)) + 1);
2065 btree_set_min(n, prev->data->min_key);
2066 btree_set_max(n, next->data->max_key);
2068 n->data->format = new_f;
2069 btree_node_set_format(n, new_f);
2071 bch2_btree_sort_into(c, n, prev);
2072 bch2_btree_sort_into(c, n, next);
2074 bch2_btree_build_aux_trees(n);
2075 bch2_btree_update_add_new_node(as, n);
2076 six_unlock_write(&n->c.lock);
2078 new_path = bch2_path_get_unlocked_mut(trans, btree, n->c.level, n->key.k.p);
2079 six_lock_increment(&n->c.lock, SIX_LOCK_intent);
2080 mark_btree_node_locked(trans, trans->paths + new_path, n->c.level, BTREE_NODE_INTENT_LOCKED);
2081 bch2_btree_path_level_init(trans, trans->paths + new_path, n);
2083 bkey_init(&delete.k);
2084 delete.k.p = prev->key.k.p;
2085 bch2_keylist_add(&as->parent_keys, &delete);
2086 bch2_keylist_add(&as->parent_keys, &n->key);
2088 bch2_trans_verify_paths(trans);
2090 ret = bch2_btree_insert_node(as, trans, path, parent, &as->parent_keys);
2092 goto err_free_update;
2094 bch2_trans_verify_paths(trans);
2096 bch2_btree_update_get_open_buckets(as, n);
2097 bch2_btree_node_write(c, n, SIX_LOCK_intent, 0);
2099 bch2_btree_node_free_inmem(trans, trans->paths + path, b);
2100 bch2_btree_node_free_inmem(trans, trans->paths + sib_path, m);
2102 bch2_trans_node_add(trans, trans->paths + path, n);
2104 bch2_trans_verify_paths(trans);
2106 six_unlock_intent(&n->c.lock);
2108 bch2_btree_update_done(as, trans);
2110 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_merge], start_time);
2114 bch2_path_put(trans, new_path, true);
2115 bch2_path_put(trans, sib_path, true);
2116 bch2_trans_verify_locks(trans);
2117 if (ret == -BCH_ERR_journal_reclaim_would_deadlock)
2120 ret = bch2_trans_relock(trans);
2123 bch2_btree_node_free_never_used(as, trans, n);
2124 bch2_btree_update_free(as, trans);
2128 int bch2_btree_node_rewrite(struct btree_trans *trans,
2129 struct btree_iter *iter,
2133 struct bch_fs *c = trans->c;
2134 struct btree *n, *parent;
2135 struct btree_update *as;
2136 btree_path_idx_t new_path = 0;
2139 flags |= BCH_TRANS_COMMIT_no_enospc;
2141 struct btree_path *path = btree_iter_path(trans, iter);
2142 parent = btree_node_parent(path, b);
2143 as = bch2_btree_update_start(trans, path, b->c.level, false, flags);
2144 ret = PTR_ERR_OR_ZERO(as);
2148 bch2_btree_interior_update_will_free_node(as, b);
2150 n = bch2_btree_node_alloc_replacement(as, trans, b);
2152 bch2_btree_build_aux_trees(n);
2153 bch2_btree_update_add_new_node(as, n);
2154 six_unlock_write(&n->c.lock);
2156 new_path = bch2_path_get_unlocked_mut(trans, iter->btree_id, n->c.level, n->key.k.p);
2157 six_lock_increment(&n->c.lock, SIX_LOCK_intent);
2158 mark_btree_node_locked(trans, trans->paths + new_path, n->c.level, BTREE_NODE_INTENT_LOCKED);
2159 bch2_btree_path_level_init(trans, trans->paths + new_path, n);
2161 trace_and_count(c, btree_node_rewrite, trans, b);
2164 bch2_keylist_add(&as->parent_keys, &n->key);
2165 ret = bch2_btree_insert_node(as, trans, iter->path, parent, &as->parent_keys);
2167 ret = bch2_btree_set_root(as, trans, btree_iter_path(trans, iter), n, false);
2173 bch2_btree_update_get_open_buckets(as, n);
2174 bch2_btree_node_write(c, n, SIX_LOCK_intent, 0);
2176 bch2_btree_node_free_inmem(trans, btree_iter_path(trans, iter), b);
2178 bch2_trans_node_add(trans, trans->paths + iter->path, n);
2179 six_unlock_intent(&n->c.lock);
2181 bch2_btree_update_done(as, trans);
2184 bch2_path_put(trans, new_path, true);
2185 bch2_trans_downgrade(trans);
2188 bch2_btree_node_free_never_used(as, trans, n);
2189 bch2_btree_update_free(as, trans);
2193 struct async_btree_rewrite {
2195 struct work_struct work;
2196 struct list_head list;
2197 enum btree_id btree_id;
2203 static int async_btree_node_rewrite_trans(struct btree_trans *trans,
2204 struct async_btree_rewrite *a)
2206 struct bch_fs *c = trans->c;
2207 struct btree_iter iter;
2211 bch2_trans_node_iter_init(trans, &iter, a->btree_id, a->pos,
2212 BTREE_MAX_DEPTH, a->level, 0);
2213 b = bch2_btree_iter_peek_node(&iter);
2214 ret = PTR_ERR_OR_ZERO(b);
2218 if (!b || b->data->keys.seq != a->seq) {
2219 struct printbuf buf = PRINTBUF;
2222 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
2224 prt_str(&buf, "(null");
2225 bch_info(c, "%s: node to rewrite not found:, searching for seq %llu, got\n%s",
2226 __func__, a->seq, buf.buf);
2227 printbuf_exit(&buf);
2231 ret = bch2_btree_node_rewrite(trans, &iter, b, 0);
2233 bch2_trans_iter_exit(trans, &iter);
2238 static void async_btree_node_rewrite_work(struct work_struct *work)
2240 struct async_btree_rewrite *a =
2241 container_of(work, struct async_btree_rewrite, work);
2242 struct bch_fs *c = a->c;
2245 ret = bch2_trans_do(c, NULL, NULL, 0,
2246 async_btree_node_rewrite_trans(trans, a));
2247 bch_err_fn_ratelimited(c, ret);
2248 bch2_write_ref_put(c, BCH_WRITE_REF_node_rewrite);
2252 void bch2_btree_node_rewrite_async(struct bch_fs *c, struct btree *b)
2254 struct async_btree_rewrite *a;
2257 a = kmalloc(sizeof(*a), GFP_NOFS);
2259 bch_err(c, "%s: error allocating memory", __func__);
2264 a->btree_id = b->c.btree_id;
2265 a->level = b->c.level;
2266 a->pos = b->key.k.p;
2267 a->seq = b->data->keys.seq;
2268 INIT_WORK(&a->work, async_btree_node_rewrite_work);
2270 if (unlikely(!test_bit(BCH_FS_may_go_rw, &c->flags))) {
2271 mutex_lock(&c->pending_node_rewrites_lock);
2272 list_add(&a->list, &c->pending_node_rewrites);
2273 mutex_unlock(&c->pending_node_rewrites_lock);
2277 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_node_rewrite)) {
2278 if (test_bit(BCH_FS_started, &c->flags)) {
2279 bch_err(c, "%s: error getting c->writes ref", __func__);
2284 ret = bch2_fs_read_write_early(c);
2285 bch_err_msg(c, ret, "going read-write");
2291 bch2_write_ref_get(c, BCH_WRITE_REF_node_rewrite);
2294 queue_work(c->btree_node_rewrite_worker, &a->work);
2297 void bch2_do_pending_node_rewrites(struct bch_fs *c)
2299 struct async_btree_rewrite *a, *n;
2301 mutex_lock(&c->pending_node_rewrites_lock);
2302 list_for_each_entry_safe(a, n, &c->pending_node_rewrites, list) {
2305 bch2_write_ref_get(c, BCH_WRITE_REF_node_rewrite);
2306 queue_work(c->btree_node_rewrite_worker, &a->work);
2308 mutex_unlock(&c->pending_node_rewrites_lock);
2311 void bch2_free_pending_node_rewrites(struct bch_fs *c)
2313 struct async_btree_rewrite *a, *n;
2315 mutex_lock(&c->pending_node_rewrites_lock);
2316 list_for_each_entry_safe(a, n, &c->pending_node_rewrites, list) {
2321 mutex_unlock(&c->pending_node_rewrites_lock);
2324 static int __bch2_btree_node_update_key(struct btree_trans *trans,
2325 struct btree_iter *iter,
2326 struct btree *b, struct btree *new_hash,
2327 struct bkey_i *new_key,
2328 unsigned commit_flags,
2331 struct bch_fs *c = trans->c;
2332 struct btree_iter iter2 = { NULL };
2333 struct btree *parent;
2336 if (!skip_triggers) {
2337 ret = bch2_key_trigger_old(trans, b->c.btree_id, b->c.level + 1,
2338 bkey_i_to_s_c(&b->key),
2339 BTREE_TRIGGER_transactional) ?:
2340 bch2_key_trigger_new(trans, b->c.btree_id, b->c.level + 1,
2341 bkey_i_to_s(new_key),
2342 BTREE_TRIGGER_transactional);
2348 bkey_copy(&new_hash->key, new_key);
2349 ret = bch2_btree_node_hash_insert(&c->btree_cache,
2350 new_hash, b->c.level, b->c.btree_id);
2354 parent = btree_node_parent(btree_iter_path(trans, iter), b);
2356 bch2_trans_copy_iter(&iter2, iter);
2358 iter2.path = bch2_btree_path_make_mut(trans, iter2.path,
2359 iter2.flags & BTREE_ITER_intent,
2362 struct btree_path *path2 = btree_iter_path(trans, &iter2);
2363 BUG_ON(path2->level != b->c.level);
2364 BUG_ON(!bpos_eq(path2->pos, new_key->k.p));
2366 btree_path_set_level_up(trans, path2);
2368 trans->paths_sorted = false;
2370 ret = bch2_btree_iter_traverse(&iter2) ?:
2371 bch2_trans_update(trans, &iter2, new_key, BTREE_TRIGGER_norun);
2375 BUG_ON(btree_node_root(c, b) != b);
2377 struct jset_entry *e = bch2_trans_jset_entry_alloc(trans,
2378 jset_u64s(new_key->k.u64s));
2379 ret = PTR_ERR_OR_ZERO(e);
2383 journal_entry_set(e,
2384 BCH_JSET_ENTRY_btree_root,
2385 b->c.btree_id, b->c.level,
2386 new_key, new_key->k.u64s);
2389 ret = bch2_trans_commit(trans, NULL, NULL, commit_flags);
2393 bch2_btree_node_lock_write_nofail(trans, btree_iter_path(trans, iter), &b->c);
2396 mutex_lock(&c->btree_cache.lock);
2397 bch2_btree_node_hash_remove(&c->btree_cache, new_hash);
2398 bch2_btree_node_hash_remove(&c->btree_cache, b);
2400 bkey_copy(&b->key, new_key);
2401 ret = __bch2_btree_node_hash_insert(&c->btree_cache, b);
2403 mutex_unlock(&c->btree_cache.lock);
2405 bkey_copy(&b->key, new_key);
2408 bch2_btree_node_unlock_write(trans, btree_iter_path(trans, iter), b);
2410 bch2_trans_iter_exit(trans, &iter2);
2414 mutex_lock(&c->btree_cache.lock);
2415 bch2_btree_node_hash_remove(&c->btree_cache, b);
2416 mutex_unlock(&c->btree_cache.lock);
2421 int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *iter,
2422 struct btree *b, struct bkey_i *new_key,
2423 unsigned commit_flags, bool skip_triggers)
2425 struct bch_fs *c = trans->c;
2426 struct btree *new_hash = NULL;
2427 struct btree_path *path = btree_iter_path(trans, iter);
2431 ret = bch2_btree_path_upgrade(trans, path, b->c.level + 1);
2435 closure_init_stack(&cl);
2438 * check btree_ptr_hash_val() after @b is locked by
2439 * btree_iter_traverse():
2441 if (btree_ptr_hash_val(new_key) != b->hash_val) {
2442 ret = bch2_btree_cache_cannibalize_lock(trans, &cl);
2444 ret = drop_locks_do(trans, (closure_sync(&cl), 0));
2449 new_hash = bch2_btree_node_mem_alloc(trans, false);
2453 ret = __bch2_btree_node_update_key(trans, iter, b, new_hash, new_key,
2454 commit_flags, skip_triggers);
2458 mutex_lock(&c->btree_cache.lock);
2459 list_move(&new_hash->list, &c->btree_cache.freeable);
2460 mutex_unlock(&c->btree_cache.lock);
2462 six_unlock_write(&new_hash->c.lock);
2463 six_unlock_intent(&new_hash->c.lock);
2466 bch2_btree_cache_cannibalize_unlock(trans);
2470 int bch2_btree_node_update_key_get_iter(struct btree_trans *trans,
2471 struct btree *b, struct bkey_i *new_key,
2472 unsigned commit_flags, bool skip_triggers)
2474 struct btree_iter iter;
2477 bch2_trans_node_iter_init(trans, &iter, b->c.btree_id, b->key.k.p,
2478 BTREE_MAX_DEPTH, b->c.level,
2480 ret = bch2_btree_iter_traverse(&iter);
2484 /* has node been freed? */
2485 if (btree_iter_path(trans, &iter)->l[b->c.level].b != b) {
2486 /* node has been freed: */
2487 BUG_ON(!btree_node_dying(b));
2491 BUG_ON(!btree_node_hashed(b));
2493 bch2_bkey_drop_ptrs(bkey_i_to_s(new_key), ptr,
2494 !bch2_bkey_has_device(bkey_i_to_s(&b->key), ptr->dev));
2496 ret = bch2_btree_node_update_key(trans, &iter, b, new_key,
2497 commit_flags, skip_triggers);
2499 bch2_trans_iter_exit(trans, &iter);
2506 * Only for filesystem bringup, when first reading the btree roots or allocating
2507 * btree roots when initializing a new filesystem:
2509 void bch2_btree_set_root_for_read(struct bch_fs *c, struct btree *b)
2511 BUG_ON(btree_node_root(c, b));
2513 bch2_btree_set_root_inmem(c, b);
2516 int bch2_btree_root_alloc_fake_trans(struct btree_trans *trans, enum btree_id id, unsigned level)
2518 struct bch_fs *c = trans->c;
2523 closure_init_stack(&cl);
2526 ret = bch2_btree_cache_cannibalize_lock(trans, &cl);
2530 b = bch2_btree_node_mem_alloc(trans, false);
2531 bch2_btree_cache_cannibalize_unlock(trans);
2533 set_btree_node_fake(b);
2534 set_btree_node_need_rewrite(b);
2538 bkey_btree_ptr_init(&b->key);
2539 b->key.k.p = SPOS_MAX;
2540 *((u64 *) bkey_i_to_btree_ptr(&b->key)->v.start) = U64_MAX - id;
2542 bch2_bset_init_first(b, &b->data->keys);
2543 bch2_btree_build_aux_trees(b);
2546 btree_set_min(b, POS_MIN);
2547 btree_set_max(b, SPOS_MAX);
2548 b->data->format = bch2_btree_calc_format(b);
2549 btree_node_set_format(b, b->data->format);
2551 ret = bch2_btree_node_hash_insert(&c->btree_cache, b,
2552 b->c.level, b->c.btree_id);
2555 bch2_btree_set_root_inmem(c, b);
2557 six_unlock_write(&b->c.lock);
2558 six_unlock_intent(&b->c.lock);
2562 void bch2_btree_root_alloc_fake(struct bch_fs *c, enum btree_id id, unsigned level)
2564 bch2_trans_run(c, bch2_btree_root_alloc_fake_trans(trans, id, level));
2567 static void bch2_btree_update_to_text(struct printbuf *out, struct btree_update *as)
2569 prt_printf(out, "%ps: ", (void *) as->ip_started);
2570 bch2_trans_commit_flags_to_text(out, as->flags);
2572 prt_printf(out, " btree=%s l=%u-%u mode=%s nodes_written=%u cl.remaining=%u journal_seq=%llu\n",
2573 bch2_btree_id_str(as->btree_id),
2574 as->update_level_start,
2575 as->update_level_end,
2576 bch2_btree_update_modes[as->mode],
2578 closure_nr_remaining(&as->cl),
2582 void bch2_btree_updates_to_text(struct printbuf *out, struct bch_fs *c)
2584 struct btree_update *as;
2586 mutex_lock(&c->btree_interior_update_lock);
2587 list_for_each_entry(as, &c->btree_interior_update_list, list)
2588 bch2_btree_update_to_text(out, as);
2589 mutex_unlock(&c->btree_interior_update_lock);
2592 static bool bch2_btree_interior_updates_pending(struct bch_fs *c)
2596 mutex_lock(&c->btree_interior_update_lock);
2597 ret = !list_empty(&c->btree_interior_update_list);
2598 mutex_unlock(&c->btree_interior_update_lock);
2603 bool bch2_btree_interior_updates_flush(struct bch_fs *c)
2605 bool ret = bch2_btree_interior_updates_pending(c);
2608 closure_wait_event(&c->btree_interior_update_wait,
2609 !bch2_btree_interior_updates_pending(c));
2613 void bch2_journal_entry_to_btree_root(struct bch_fs *c, struct jset_entry *entry)
2615 struct btree_root *r = bch2_btree_id_root(c, entry->btree_id);
2617 mutex_lock(&c->btree_root_lock);
2619 r->level = entry->level;
2621 bkey_copy(&r->key, (struct bkey_i *) entry->start);
2623 mutex_unlock(&c->btree_root_lock);
2627 bch2_btree_roots_to_journal_entries(struct bch_fs *c,
2628 struct jset_entry *end,
2633 mutex_lock(&c->btree_root_lock);
2635 for (i = 0; i < btree_id_nr_alive(c); i++) {
2636 struct btree_root *r = bch2_btree_id_root(c, i);
2638 if (r->alive && !test_bit(i, &skip)) {
2639 journal_entry_set(end, BCH_JSET_ENTRY_btree_root,
2640 i, r->level, &r->key, r->key.k.u64s);
2641 end = vstruct_next(end);
2645 mutex_unlock(&c->btree_root_lock);
2650 void bch2_fs_btree_interior_update_exit(struct bch_fs *c)
2652 if (c->btree_node_rewrite_worker)
2653 destroy_workqueue(c->btree_node_rewrite_worker);
2654 if (c->btree_interior_update_worker)
2655 destroy_workqueue(c->btree_interior_update_worker);
2656 mempool_exit(&c->btree_interior_update_pool);
2659 void bch2_fs_btree_interior_update_init_early(struct bch_fs *c)
2661 mutex_init(&c->btree_reserve_cache_lock);
2662 INIT_LIST_HEAD(&c->btree_interior_update_list);
2663 INIT_LIST_HEAD(&c->btree_interior_updates_unwritten);
2664 mutex_init(&c->btree_interior_update_lock);
2665 INIT_WORK(&c->btree_interior_update_work, btree_interior_update_work);
2667 INIT_LIST_HEAD(&c->pending_node_rewrites);
2668 mutex_init(&c->pending_node_rewrites_lock);
2671 int bch2_fs_btree_interior_update_init(struct bch_fs *c)
2673 c->btree_interior_update_worker =
2674 alloc_workqueue("btree_update", WQ_UNBOUND|WQ_MEM_RECLAIM, 8);
2675 if (!c->btree_interior_update_worker)
2676 return -BCH_ERR_ENOMEM_btree_interior_update_worker_init;
2678 c->btree_node_rewrite_worker =
2679 alloc_ordered_workqueue("btree_node_rewrite", WQ_UNBOUND);
2680 if (!c->btree_node_rewrite_worker)
2681 return -BCH_ERR_ENOMEM_btree_interior_update_worker_init;
2683 if (mempool_init_kmalloc_pool(&c->btree_interior_update_pool, 1,
2684 sizeof(struct btree_update)))
2685 return -BCH_ERR_ENOMEM_btree_interior_update_pool_init;