1 // SPDX-License-Identifier: GPL-2.0
4 #include "btree_update.h"
5 #include "btree_iter.h"
6 #include "btree_journal_iter.h"
7 #include "btree_locking.h"
17 static inline int btree_insert_entry_cmp(const struct btree_insert_entry *l,
18 const struct btree_insert_entry *r)
20 return cmp_int(l->btree_id, r->btree_id) ?:
21 cmp_int(l->cached, r->cached) ?:
22 -cmp_int(l->level, r->level) ?:
23 bpos_cmp(l->k->k.p, r->k->k.p);
26 static int __must_check
27 bch2_trans_update_by_path(struct btree_trans *, btree_path_idx_t,
28 struct bkey_i *, enum btree_iter_update_trigger_flags,
31 static noinline int extent_front_merge(struct btree_trans *trans,
32 struct btree_iter *iter,
34 struct bkey_i **insert,
35 enum btree_iter_update_trigger_flags flags)
37 struct bch_fs *c = trans->c;
38 struct bkey_i *update;
41 if (unlikely(trans->journal_replay_not_finished))
44 update = bch2_bkey_make_mut_noupdate(trans, k);
45 ret = PTR_ERR_OR_ZERO(update);
49 if (!bch2_bkey_merge(c, bkey_i_to_s(update), bkey_i_to_s_c(*insert)))
52 ret = bch2_key_has_snapshot_overwrites(trans, iter->btree_id, k.k->p) ?:
53 bch2_key_has_snapshot_overwrites(trans, iter->btree_id, (*insert)->k.p);
59 ret = bch2_btree_delete_at(trans, iter, flags);
67 static noinline int extent_back_merge(struct btree_trans *trans,
68 struct btree_iter *iter,
69 struct bkey_i *insert,
72 struct bch_fs *c = trans->c;
75 if (unlikely(trans->journal_replay_not_finished))
78 ret = bch2_key_has_snapshot_overwrites(trans, iter->btree_id, insert->k.p) ?:
79 bch2_key_has_snapshot_overwrites(trans, iter->btree_id, k.k->p);
85 bch2_bkey_merge(c, bkey_i_to_s(insert), k);
90 * When deleting, check if we need to emit a whiteout (because we're overwriting
91 * something in an ancestor snapshot)
93 static int need_whiteout_for_snapshot(struct btree_trans *trans,
94 enum btree_id btree_id, struct bpos pos)
96 struct btree_iter iter;
98 u32 snapshot = pos.snapshot;
101 if (!bch2_snapshot_parent(trans->c, pos.snapshot))
106 for_each_btree_key_norestart(trans, iter, btree_id, pos,
107 BTREE_ITER_all_snapshots|
108 BTREE_ITER_nopreserve, k, ret) {
109 if (!bkey_eq(k.k->p, pos))
112 if (bch2_snapshot_is_ancestor(trans->c, snapshot,
114 ret = !bkey_whiteout(k.k);
118 bch2_trans_iter_exit(trans, &iter);
123 int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
128 struct bch_fs *c = trans->c;
129 struct btree_iter old_iter, new_iter = { NULL };
130 struct bkey_s_c old_k, new_k;
132 struct bkey_i *update;
135 if (!bch2_snapshot_has_children(c, old_pos.snapshot))
140 bch2_trans_iter_init(trans, &old_iter, id, old_pos,
141 BTREE_ITER_not_extents|
142 BTREE_ITER_all_snapshots);
143 while ((old_k = bch2_btree_iter_prev(&old_iter)).k &&
144 !(ret = bkey_err(old_k)) &&
145 bkey_eq(old_pos, old_k.k->p)) {
146 struct bpos whiteout_pos =
147 SPOS(new_pos.inode, new_pos.offset, old_k.k->p.snapshot);
149 if (!bch2_snapshot_is_ancestor(c, old_k.k->p.snapshot, old_pos.snapshot) ||
150 snapshot_list_has_ancestor(c, &s, old_k.k->p.snapshot))
153 new_k = bch2_bkey_get_iter(trans, &new_iter, id, whiteout_pos,
154 BTREE_ITER_not_extents|
156 ret = bkey_err(new_k);
160 if (new_k.k->type == KEY_TYPE_deleted) {
161 update = bch2_trans_kmalloc(trans, sizeof(struct bkey_i));
162 ret = PTR_ERR_OR_ZERO(update);
166 bkey_init(&update->k);
167 update->k.p = whiteout_pos;
168 update->k.type = KEY_TYPE_whiteout;
170 ret = bch2_trans_update(trans, &new_iter, update,
171 BTREE_UPDATE_internal_snapshot_node);
173 bch2_trans_iter_exit(trans, &new_iter);
175 ret = snapshot_list_add(c, &s, old_k.k->p.snapshot);
179 bch2_trans_iter_exit(trans, &new_iter);
180 bch2_trans_iter_exit(trans, &old_iter);
186 int bch2_trans_update_extent_overwrite(struct btree_trans *trans,
187 struct btree_iter *iter,
188 enum btree_iter_update_trigger_flags flags,
192 enum btree_id btree_id = iter->btree_id;
193 struct bkey_i *update;
194 struct bpos new_start = bkey_start_pos(new.k);
195 unsigned front_split = bkey_lt(bkey_start_pos(old.k), new_start);
196 unsigned back_split = bkey_gt(old.k->p, new.k->p);
197 unsigned middle_split = (front_split || back_split) &&
198 old.k->p.snapshot != new.k->p.snapshot;
199 unsigned nr_splits = front_split + back_split + middle_split;
200 int ret = 0, compressed_sectors;
203 * If we're going to be splitting a compressed extent, note it
204 * so that __bch2_trans_commit() can increase our disk
208 (compressed_sectors = bch2_bkey_sectors_compressed(old)))
209 trans->extra_disk_res += compressed_sectors * (nr_splits - 1);
212 update = bch2_bkey_make_mut_noupdate(trans, old);
213 if ((ret = PTR_ERR_OR_ZERO(update)))
216 bch2_cut_back(new_start, update);
218 ret = bch2_insert_snapshot_whiteouts(trans, btree_id,
219 old.k->p, update->k.p) ?:
220 bch2_btree_insert_nonextent(trans, btree_id, update,
221 BTREE_UPDATE_internal_snapshot_node|flags);
226 /* If we're overwriting in a different snapshot - middle split: */
228 update = bch2_bkey_make_mut_noupdate(trans, old);
229 if ((ret = PTR_ERR_OR_ZERO(update)))
232 bch2_cut_front(new_start, update);
233 bch2_cut_back(new.k->p, update);
235 ret = bch2_insert_snapshot_whiteouts(trans, btree_id,
236 old.k->p, update->k.p) ?:
237 bch2_btree_insert_nonextent(trans, btree_id, update,
238 BTREE_UPDATE_internal_snapshot_node|flags);
243 if (bkey_le(old.k->p, new.k->p)) {
244 update = bch2_trans_kmalloc(trans, sizeof(*update));
245 if ((ret = PTR_ERR_OR_ZERO(update)))
248 bkey_init(&update->k);
249 update->k.p = old.k->p;
250 update->k.p.snapshot = new.k->p.snapshot;
252 if (new.k->p.snapshot != old.k->p.snapshot) {
253 update->k.type = KEY_TYPE_whiteout;
254 } else if (btree_type_has_snapshots(btree_id)) {
255 ret = need_whiteout_for_snapshot(trans, btree_id, update->k.p);
259 update->k.type = KEY_TYPE_whiteout;
262 ret = bch2_btree_insert_nonextent(trans, btree_id, update,
263 BTREE_UPDATE_internal_snapshot_node|flags);
269 update = bch2_bkey_make_mut_noupdate(trans, old);
270 if ((ret = PTR_ERR_OR_ZERO(update)))
273 bch2_cut_front(new.k->p, update);
275 ret = bch2_trans_update_by_path(trans, iter->path, update,
276 BTREE_UPDATE_internal_snapshot_node|
285 static int bch2_trans_update_extent(struct btree_trans *trans,
286 struct btree_iter *orig_iter,
287 struct bkey_i *insert,
288 enum btree_iter_update_trigger_flags flags)
290 struct btree_iter iter;
292 enum btree_id btree_id = orig_iter->btree_id;
295 bch2_trans_iter_init(trans, &iter, btree_id, bkey_start_pos(&insert->k),
297 BTREE_ITER_with_updates|
298 BTREE_ITER_not_extents);
299 k = bch2_btree_iter_peek_max(&iter, POS(insert->k.p.inode, U64_MAX));
300 if ((ret = bkey_err(k)))
305 if (bkey_eq(k.k->p, bkey_start_pos(&insert->k))) {
306 if (bch2_bkey_maybe_mergable(k.k, &insert->k)) {
307 ret = extent_front_merge(trans, &iter, k, &insert, flags);
315 while (bkey_gt(insert->k.p, bkey_start_pos(k.k))) {
316 bool done = bkey_lt(insert->k.p, k.k->p);
318 ret = bch2_trans_update_extent_overwrite(trans, &iter, flags, k, bkey_i_to_s_c(insert));
325 bch2_btree_iter_advance(&iter);
326 k = bch2_btree_iter_peek_max(&iter, POS(insert->k.p.inode, U64_MAX));
327 if ((ret = bkey_err(k)))
333 if (bch2_bkey_maybe_mergable(&insert->k, k.k)) {
334 ret = extent_back_merge(trans, &iter, insert, k);
339 if (!bkey_deleted(&insert->k))
340 ret = bch2_btree_insert_nonextent(trans, btree_id, insert, flags);
342 bch2_trans_iter_exit(trans, &iter);
347 static noinline int flush_new_cached_update(struct btree_trans *trans,
348 struct btree_insert_entry *i,
349 enum btree_iter_update_trigger_flags flags,
355 btree_path_idx_t path_idx =
356 bch2_path_get(trans, i->btree_id, i->old_k.p, 1, 0,
357 BTREE_ITER_intent, _THIS_IP_);
358 ret = bch2_btree_path_traverse(trans, path_idx, 0);
362 struct btree_path *btree_path = trans->paths + path_idx;
365 * The old key in the insert entry might actually refer to an existing
366 * key in the btree that has been deleted from cache and not yet
367 * flushed. Check for this and skip the flush so we don't run triggers
368 * against a stale key.
370 bch2_btree_path_peek_slot_exact(btree_path, &k);
371 if (!bkey_deleted(&k))
374 i->key_cache_already_flushed = true;
375 i->flags |= BTREE_TRIGGER_norun;
377 btree_path_set_should_be_locked(trans, btree_path);
378 ret = bch2_trans_update_by_path(trans, path_idx, i->k, flags, ip);
380 bch2_path_put(trans, path_idx, true);
384 static int __must_check
385 bch2_trans_update_by_path(struct btree_trans *trans, btree_path_idx_t path_idx,
386 struct bkey_i *k, enum btree_iter_update_trigger_flags flags,
389 struct bch_fs *c = trans->c;
390 struct btree_insert_entry *i, n;
393 struct btree_path *path = trans->paths + path_idx;
394 EBUG_ON(!path->should_be_locked);
395 EBUG_ON(trans->nr_updates >= trans->nr_paths);
396 EBUG_ON(!bpos_eq(k->k.p, path->pos));
398 n = (struct btree_insert_entry) {
400 .bkey_type = __btree_node_type(path->level, path->btree_id),
401 .btree_id = path->btree_id,
402 .level = path->level,
403 .cached = path->cached,
409 #ifdef CONFIG_BCACHEFS_DEBUG
410 trans_for_each_update(trans, i)
411 BUG_ON(i != trans->updates &&
412 btree_insert_entry_cmp(i - 1, i) >= 0);
416 * Pending updates are kept sorted: first, find position of new update,
417 * then delete/trim any updates the new update overwrites:
419 for (i = trans->updates; i < trans->updates + trans->nr_updates; i++) {
420 cmp = btree_insert_entry_cmp(&n, i);
425 bool overwrite = !cmp && i < trans->updates + trans->nr_updates;
428 EBUG_ON(i->insert_trigger_run || i->overwrite_trigger_run);
430 bch2_path_put(trans, i->path, true);
432 i->cached = n.cached;
435 i->ip_allocated = n.ip_allocated;
437 array_insert_item(trans->updates, trans->nr_updates,
438 i - trans->updates, n);
440 i->old_v = bch2_btree_path_peek_slot_exact(path, &i->old_k).v;
441 i->old_btree_u64s = !bkey_deleted(&i->old_k) ? i->old_k.u64s : 0;
443 if (unlikely(trans->journal_replay_not_finished)) {
445 bch2_journal_keys_peek_slot(c, n.btree_id, n.level, k->k.p);
454 __btree_path_get(trans, trans->paths + i->path, true);
456 trace_update_by_path(trans, path, i, overwrite);
459 * If a key is present in the key cache, it must also exist in the
460 * btree - this is necessary for cache coherency. When iterating over
461 * a btree that's cached in the key cache, the btree iter code checks
462 * the key cache - but the key has to exist in the btree for that to
465 if (path->cached && !i->old_btree_u64s)
466 return flush_new_cached_update(trans, i, flags, ip);
471 static noinline int bch2_trans_update_get_key_cache(struct btree_trans *trans,
472 struct btree_iter *iter,
473 struct btree_path *path)
475 struct btree_path *key_cache_path = btree_iter_key_cache_path(trans, iter);
477 if (!key_cache_path ||
478 !key_cache_path->should_be_locked ||
479 !bpos_eq(key_cache_path->pos, iter->pos)) {
480 struct bkey_cached *ck;
483 if (!iter->key_cache_path)
484 iter->key_cache_path =
485 bch2_path_get(trans, path->btree_id, path->pos, 1, 0,
487 BTREE_ITER_cached, _THIS_IP_);
489 iter->key_cache_path =
490 bch2_btree_path_set_pos(trans, iter->key_cache_path, path->pos,
491 iter->flags & BTREE_ITER_intent,
494 ret = bch2_btree_path_traverse(trans, iter->key_cache_path, BTREE_ITER_cached);
498 ck = (void *) trans->paths[iter->key_cache_path].l[0].b;
500 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
501 trace_and_count(trans->c, trans_restart_key_cache_raced, trans, _RET_IP_);
502 return btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_raced);
505 btree_path_set_should_be_locked(trans, trans->paths + iter->key_cache_path);
511 int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter *iter,
512 struct bkey_i *k, enum btree_iter_update_trigger_flags flags)
514 btree_path_idx_t path_idx = iter->update_path ?: iter->path;
517 if (iter->flags & BTREE_ITER_is_extents)
518 return bch2_trans_update_extent(trans, iter, k, flags);
520 if (bkey_deleted(&k->k) &&
521 !(flags & BTREE_UPDATE_key_cache_reclaim) &&
522 (iter->flags & BTREE_ITER_filter_snapshots)) {
523 ret = need_whiteout_for_snapshot(trans, iter->btree_id, k->k.p);
524 if (unlikely(ret < 0))
528 k->k.type = KEY_TYPE_whiteout;
532 * Ensure that updates to cached btrees go to the key cache:
534 struct btree_path *path = trans->paths + path_idx;
535 if (!(flags & BTREE_UPDATE_key_cache_reclaim) &&
538 btree_id_cached(trans->c, path->btree_id)) {
539 ret = bch2_trans_update_get_key_cache(trans, iter, path);
543 path_idx = iter->key_cache_path;
546 return bch2_trans_update_by_path(trans, path_idx, k, flags, _RET_IP_);
549 int bch2_btree_insert_clone_trans(struct btree_trans *trans,
553 struct bkey_i *n = bch2_trans_kmalloc(trans, bkey_bytes(&k->k));
554 int ret = PTR_ERR_OR_ZERO(n);
559 return bch2_btree_insert_trans(trans, btree, n, 0);
562 struct jset_entry *__bch2_trans_jset_entry_alloc(struct btree_trans *trans, unsigned u64s)
564 unsigned new_top = trans->journal_entries_u64s + u64s;
565 unsigned old_size = trans->journal_entries_size;
567 if (new_top > trans->journal_entries_size) {
568 trans->journal_entries_size = roundup_pow_of_two(new_top);
570 btree_trans_stats(trans)->journal_entries_size = trans->journal_entries_size;
573 struct jset_entry *n =
574 bch2_trans_kmalloc_nomemzero(trans,
575 trans->journal_entries_size * sizeof(u64));
579 if (trans->journal_entries)
580 memcpy(n, trans->journal_entries, old_size * sizeof(u64));
581 trans->journal_entries = n;
583 struct jset_entry *e = btree_trans_journal_entries_top(trans);
584 trans->journal_entries_u64s = new_top;
588 int bch2_bkey_get_empty_slot(struct btree_trans *trans, struct btree_iter *iter,
589 enum btree_id btree, struct bpos end)
591 bch2_trans_iter_init(trans, iter, btree, end, BTREE_ITER_intent);
592 struct bkey_s_c k = bch2_btree_iter_peek_prev(iter);
593 int ret = bkey_err(k);
597 bch2_btree_iter_advance(iter);
598 k = bch2_btree_iter_peek_slot(iter);
603 BUG_ON(k.k->type != KEY_TYPE_deleted);
605 if (bkey_gt(k.k->p, end)) {
606 ret = -BCH_ERR_ENOSPC_btree_slot;
612 bch2_trans_iter_exit(trans, iter);
616 void bch2_trans_commit_hook(struct btree_trans *trans,
617 struct btree_trans_commit_hook *h)
619 h->next = trans->hooks;
623 int bch2_btree_insert_nonextent(struct btree_trans *trans,
624 enum btree_id btree, struct bkey_i *k,
625 enum btree_iter_update_trigger_flags flags)
627 struct btree_iter iter;
630 bch2_trans_iter_init(trans, &iter, btree, k->k.p,
632 BTREE_ITER_not_extents|
634 ret = bch2_btree_iter_traverse(&iter) ?:
635 bch2_trans_update(trans, &iter, k, flags);
636 bch2_trans_iter_exit(trans, &iter);
640 int bch2_btree_insert_trans(struct btree_trans *trans, enum btree_id id,
641 struct bkey_i *k, enum btree_iter_update_trigger_flags flags)
643 struct btree_iter iter;
644 bch2_trans_iter_init(trans, &iter, id, bkey_start_pos(&k->k),
645 BTREE_ITER_intent|flags);
646 int ret = bch2_btree_iter_traverse(&iter) ?:
647 bch2_trans_update(trans, &iter, k, flags);
648 bch2_trans_iter_exit(trans, &iter);
653 * bch2_btree_insert - insert keys into the extent btree
654 * @c: pointer to struct bch_fs
655 * @id: btree to insert into
657 * @disk_res: must be non-NULL whenever inserting or potentially
658 * splitting data extents
659 * @flags: transaction commit flags
660 * @iter_flags: btree iter update trigger flags
662 * Returns: 0 on success, error code on failure
664 int bch2_btree_insert(struct bch_fs *c, enum btree_id id, struct bkey_i *k,
665 struct disk_reservation *disk_res, int flags,
666 enum btree_iter_update_trigger_flags iter_flags)
668 return bch2_trans_commit_do(c, disk_res, NULL, flags,
669 bch2_btree_insert_trans(trans, id, k, iter_flags));
672 int bch2_btree_delete_at(struct btree_trans *trans,
673 struct btree_iter *iter, unsigned update_flags)
675 struct bkey_i *k = bch2_trans_kmalloc(trans, sizeof(*k));
676 int ret = PTR_ERR_OR_ZERO(k);
682 return bch2_trans_update(trans, iter, k, update_flags);
685 int bch2_btree_delete(struct btree_trans *trans,
686 enum btree_id btree, struct bpos pos,
687 unsigned update_flags)
689 struct btree_iter iter;
692 bch2_trans_iter_init(trans, &iter, btree, pos,
695 ret = bch2_btree_iter_traverse(&iter) ?:
696 bch2_btree_delete_at(trans, &iter, update_flags);
697 bch2_trans_iter_exit(trans, &iter);
702 int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id,
703 struct bpos start, struct bpos end,
704 unsigned update_flags,
707 u32 restart_count = trans->restart_count;
708 struct btree_iter iter;
712 bch2_trans_iter_init(trans, &iter, id, start, BTREE_ITER_intent);
713 while ((k = bch2_btree_iter_peek_max(&iter, end)).k) {
714 struct disk_reservation disk_res =
715 bch2_disk_reservation_init(trans->c, 0);
716 struct bkey_i delete;
722 bkey_init(&delete.k);
725 * This could probably be more efficient for extents:
729 * For extents, iter.pos won't necessarily be the same as
730 * bkey_start_pos(k.k) (for non extents they always will be the
731 * same). It's important that we delete starting from iter.pos
732 * because the range we want to delete could start in the middle
735 * (bch2_btree_iter_peek() does guarantee that iter.pos >=
736 * bkey_start_pos(k.k)).
738 delete.k.p = iter.pos;
740 if (iter.flags & BTREE_ITER_is_extents)
741 bch2_key_resize(&delete.k,
742 bpos_min(end, k.k->p).offset -
745 ret = bch2_trans_update(trans, &iter, &delete, update_flags) ?:
746 bch2_trans_commit(trans, &disk_res, journal_seq,
747 BCH_TRANS_COMMIT_no_enospc);
748 bch2_disk_reservation_put(trans->c, &disk_res);
751 * the bch2_trans_begin() call is in a weird place because we
752 * need to call it after every transaction commit, to avoid path
753 * overflow, but don't want to call it if the delete operation
754 * is a no-op and we have no work to do:
756 bch2_trans_begin(trans);
758 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
763 bch2_trans_iter_exit(trans, &iter);
765 return ret ?: trans_was_restarted(trans, restart_count);
769 * bch_btree_delete_range - delete everything within a given range
771 * Range is a half open interval - [start, end)
773 int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id,
774 struct bpos start, struct bpos end,
775 unsigned update_flags,
778 int ret = bch2_trans_run(c,
779 bch2_btree_delete_range_trans(trans, id, start, end,
780 update_flags, journal_seq));
781 if (ret == -BCH_ERR_transaction_restart_nested)
786 int bch2_btree_bit_mod_iter(struct btree_trans *trans, struct btree_iter *iter, bool set)
788 struct bkey_i *k = bch2_trans_kmalloc(trans, sizeof(*k));
789 int ret = PTR_ERR_OR_ZERO(k);
794 k->k.type = set ? KEY_TYPE_set : KEY_TYPE_deleted;
796 if (iter->flags & BTREE_ITER_is_extents)
797 bch2_key_resize(&k->k, 1);
799 return bch2_trans_update(trans, iter, k, 0);
802 int bch2_btree_bit_mod(struct btree_trans *trans, enum btree_id btree,
803 struct bpos pos, bool set)
805 struct btree_iter iter;
806 bch2_trans_iter_init(trans, &iter, btree, pos, BTREE_ITER_intent);
808 int ret = bch2_btree_iter_traverse(&iter) ?:
809 bch2_btree_bit_mod_iter(trans, &iter, set);
810 bch2_trans_iter_exit(trans, &iter);
814 int bch2_btree_bit_mod_buffered(struct btree_trans *trans, enum btree_id btree,
815 struct bpos pos, bool set)
820 k.k.type = set ? KEY_TYPE_set : KEY_TYPE_deleted;
823 return bch2_trans_update_buffered(trans, btree, &k);
826 int bch2_trans_log_msg(struct btree_trans *trans, struct printbuf *buf)
828 unsigned u64s = DIV_ROUND_UP(buf->pos, sizeof(u64));
829 prt_chars(buf, '\0', u64s * sizeof(u64) - buf->pos);
831 int ret = buf->allocation_failure ? -BCH_ERR_ENOMEM_trans_log_msg : 0;
835 struct jset_entry *e = bch2_trans_jset_entry_alloc(trans, jset_u64s(u64s));
836 ret = PTR_ERR_OR_ZERO(e);
840 struct jset_entry_log *l = container_of(e, struct jset_entry_log, entry);
841 journal_entry_init(e, BCH_JSET_ENTRY_log, 0, 1, u64s);
842 memcpy(l->d, buf->buf, buf->pos);
848 __bch2_fs_log_msg(struct bch_fs *c, unsigned commit_flags, const char *fmt,
851 struct printbuf buf = PRINTBUF;
852 prt_vprintf(&buf, fmt, args);
854 unsigned u64s = DIV_ROUND_UP(buf.pos, sizeof(u64));
855 prt_chars(&buf, '\0', u64s * sizeof(u64) - buf.pos);
857 int ret = buf.allocation_failure ? -BCH_ERR_ENOMEM_trans_log_msg : 0;
861 if (!test_bit(JOURNAL_running, &c->journal.flags)) {
862 ret = darray_make_room(&c->journal.early_journal_entries, jset_u64s(u64s));
866 struct jset_entry_log *l = (void *) &darray_top(c->journal.early_journal_entries);
867 journal_entry_init(&l->entry, BCH_JSET_ENTRY_log, 0, 1, u64s);
868 memcpy(l->d, buf.buf, buf.pos);
869 c->journal.early_journal_entries.nr += jset_u64s(u64s);
871 ret = bch2_trans_commit_do(c, NULL, NULL, commit_flags,
872 bch2_trans_log_msg(trans, &buf));
880 int bch2_fs_log_msg(struct bch_fs *c, const char *fmt, ...)
886 ret = __bch2_fs_log_msg(c, 0, fmt, args);
892 * Use for logging messages during recovery to enable reserved space and avoid
896 int bch2_journal_log_msg(struct bch_fs *c, const char *fmt, ...)
902 ret = __bch2_fs_log_msg(c, BCH_WATERMARK_reclaim, fmt, args);