1 // SPDX-License-Identifier: GPL-2.0
4 #include "btree_cache.h"
5 #include "btree_iter.h"
6 #include "btree_key_cache.h"
7 #include "btree_locking.h"
8 #include "btree_update.h"
12 #include "journal_reclaim.h"
15 #include <linux/sched/mm.h>
17 static inline bool btree_uses_pcpu_readers(enum btree_id id)
19 return id == BTREE_ID_subvolumes;
22 static struct kmem_cache *bch2_key_cache;
24 static int bch2_btree_key_cache_cmp_fn(struct rhashtable_compare_arg *arg,
27 const struct bkey_cached *ck = obj;
28 const struct bkey_cached_key *key = arg->key;
30 return ck->key.btree_id != key->btree_id ||
31 !bpos_eq(ck->key.pos, key->pos);
34 static const struct rhashtable_params bch2_btree_key_cache_params = {
35 .head_offset = offsetof(struct bkey_cached, hash),
36 .key_offset = offsetof(struct bkey_cached, key),
37 .key_len = sizeof(struct bkey_cached_key),
38 .obj_cmpfn = bch2_btree_key_cache_cmp_fn,
39 .automatic_shrinking = true,
42 static inline void btree_path_cached_set(struct btree_trans *trans, struct btree_path *path,
43 struct bkey_cached *ck,
44 enum btree_node_locked_type lock_held)
46 path->l[0].lock_seq = six_lock_seq(&ck->c.lock);
47 path->l[0].b = (void *) ck;
48 mark_btree_node_locked(trans, path, 0, lock_held);
52 inline struct bkey_cached *
53 bch2_btree_key_cache_find(struct bch_fs *c, enum btree_id btree_id, struct bpos pos)
55 struct bkey_cached_key key = {
60 return rhashtable_lookup_fast(&c->btree_key_cache.table, &key,
61 bch2_btree_key_cache_params);
64 static bool bkey_cached_lock_for_evict(struct bkey_cached *ck)
66 if (!six_trylock_intent(&ck->c.lock))
69 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
70 six_unlock_intent(&ck->c.lock);
74 if (!six_trylock_write(&ck->c.lock)) {
75 six_unlock_intent(&ck->c.lock);
82 static bool bkey_cached_evict(struct btree_key_cache *c,
83 struct bkey_cached *ck)
85 bool ret = !rhashtable_remove_fast(&c->table, &ck->hash,
86 bch2_btree_key_cache_params);
88 memset(&ck->key, ~0, sizeof(ck->key));
89 atomic_long_dec(&c->nr_keys);
95 static void __bkey_cached_free(struct rcu_pending *pending, struct rcu_head *rcu)
97 struct bch_fs *c = container_of(pending->srcu, struct bch_fs, btree_trans_barrier);
98 struct bkey_cached *ck = container_of(rcu, struct bkey_cached, rcu);
100 this_cpu_dec(*c->btree_key_cache.nr_pending);
101 kmem_cache_free(bch2_key_cache, ck);
104 static void bkey_cached_free(struct btree_key_cache *bc,
105 struct bkey_cached *ck)
111 six_unlock_write(&ck->c.lock);
112 six_unlock_intent(&ck->c.lock);
114 bool pcpu_readers = ck->c.lock.readers != NULL;
115 rcu_pending_enqueue(&bc->pending[pcpu_readers], &ck->rcu);
116 this_cpu_inc(*bc->nr_pending);
119 static struct bkey_cached *__bkey_cached_alloc(unsigned key_u64s, gfp_t gfp)
121 gfp |= __GFP_ACCOUNT|__GFP_RECLAIMABLE;
123 struct bkey_cached *ck = kmem_cache_zalloc(bch2_key_cache, gfp);
126 ck->k = kmalloc(key_u64s * sizeof(u64), gfp);
127 if (unlikely(!ck->k)) {
128 kmem_cache_free(bch2_key_cache, ck);
135 static struct bkey_cached *
136 bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path, unsigned key_u64s)
138 struct bch_fs *c = trans->c;
139 struct btree_key_cache *bc = &c->btree_key_cache;
140 bool pcpu_readers = btree_uses_pcpu_readers(path->btree_id);
143 struct bkey_cached *ck = container_of_or_null(
144 rcu_pending_dequeue(&bc->pending[pcpu_readers]),
145 struct bkey_cached, rcu);
149 ck = allocate_dropping_locks(trans, ret,
150 __bkey_cached_alloc(key_u64s, _gfp));
154 kmem_cache_free(bch2_key_cache, ck);
159 bch2_btree_lock_init(&ck->c, pcpu_readers ? SIX_LOCK_INIT_PCPU : 0);
164 ck = container_of_or_null(rcu_pending_dequeue_from_all(&bc->pending[pcpu_readers]),
165 struct bkey_cached, rcu);
169 six_lock_intent(&ck->c.lock, NULL, NULL);
170 six_lock_write(&ck->c.lock, NULL, NULL);
174 static struct bkey_cached *
175 bkey_cached_reuse(struct btree_key_cache *c)
177 struct bucket_table *tbl;
178 struct rhash_head *pos;
179 struct bkey_cached *ck;
183 tbl = rht_dereference_rcu(c->table.tbl, &c->table);
184 for (i = 0; i < tbl->size; i++)
185 rht_for_each_entry_rcu(ck, pos, tbl, i, hash) {
186 if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags) &&
187 bkey_cached_lock_for_evict(ck)) {
188 if (bkey_cached_evict(c, ck))
190 six_unlock_write(&ck->c.lock);
191 six_unlock_intent(&ck->c.lock);
200 static int btree_key_cache_create(struct btree_trans *trans, struct btree_path *path,
203 struct bch_fs *c = trans->c;
204 struct btree_key_cache *bc = &c->btree_key_cache;
207 * bch2_varint_decode can read past the end of the buffer by at
208 * most 7 bytes (it won't be used):
210 unsigned key_u64s = k.k->u64s + 1;
213 * Allocate some extra space so that the transaction commit path is less
214 * likely to have to reallocate, since that requires a transaction
217 key_u64s = min(256U, (key_u64s * 3) / 2);
218 key_u64s = roundup_pow_of_two(key_u64s);
220 struct bkey_cached *ck = bkey_cached_alloc(trans, path, key_u64s);
221 int ret = PTR_ERR_OR_ZERO(ck);
226 ck = bkey_cached_reuse(bc);
228 bch_err(c, "error allocating memory for key cache item, btree %s",
229 bch2_btree_id_str(path->btree_id));
230 return -BCH_ERR_ENOMEM_btree_key_cache_create;
235 ck->c.btree_id = path->btree_id;
236 ck->key.btree_id = path->btree_id;
237 ck->key.pos = path->pos;
238 ck->flags = 1U << BKEY_CACHED_ACCESSED;
240 if (unlikely(key_u64s > ck->u64s)) {
241 mark_btree_node_locked_noreset(path, 0, BTREE_NODE_UNLOCKED);
243 struct bkey_i *new_k = allocate_dropping_locks(trans, ret,
244 kmalloc(key_u64s * sizeof(u64), _gfp));
245 if (unlikely(!new_k)) {
246 bch_err(trans->c, "error allocating memory for key cache key, btree %s u64s %u",
247 bch2_btree_id_str(ck->key.btree_id), key_u64s);
248 ret = -BCH_ERR_ENOMEM_btree_key_cache_fill;
259 bkey_reassemble(ck->k, k);
261 ret = rhashtable_lookup_insert_fast(&bc->table, &ck->hash, bch2_btree_key_cache_params);
262 if (unlikely(ret)) /* raced with another fill? */
265 atomic_long_inc(&bc->nr_keys);
266 six_unlock_write(&ck->c.lock);
268 enum six_lock_type lock_want = __btree_lock_want(path, 0);
269 if (lock_want == SIX_LOCK_read)
270 six_lock_downgrade(&ck->c.lock);
271 btree_path_cached_set(trans, path, ck, (enum btree_node_locked_type) lock_want);
272 path->uptodate = BTREE_ITER_UPTODATE;
275 bkey_cached_free(bc, ck);
276 mark_btree_node_locked_noreset(path, 0, BTREE_NODE_UNLOCKED);
281 static noinline int btree_key_cache_fill(struct btree_trans *trans,
282 struct btree_path *ck_path,
285 if (flags & BTREE_ITER_cached_nofill) {
286 ck_path->uptodate = BTREE_ITER_UPTODATE;
290 struct bch_fs *c = trans->c;
291 struct btree_iter iter;
295 bch2_trans_iter_init(trans, &iter, ck_path->btree_id, ck_path->pos,
296 BTREE_ITER_key_cache_fill|
297 BTREE_ITER_cached_nofill);
298 iter.flags &= ~BTREE_ITER_with_journal;
299 k = bch2_btree_iter_peek_slot(&iter);
304 /* Recheck after btree lookup, before allocating: */
305 ret = bch2_btree_key_cache_find(c, ck_path->btree_id, ck_path->pos) ? -EEXIST : 0;
309 ret = btree_key_cache_create(trans, ck_path, k);
313 /* We're not likely to need this iterator again: */
314 bch2_set_btree_iter_dontneed(&iter);
316 bch2_trans_iter_exit(trans, &iter);
320 static inline int btree_path_traverse_cached_fast(struct btree_trans *trans,
321 struct btree_path *path)
323 struct bch_fs *c = trans->c;
324 struct bkey_cached *ck;
326 ck = bch2_btree_key_cache_find(c, path->btree_id, path->pos);
330 enum six_lock_type lock_want = __btree_lock_want(path, 0);
332 int ret = btree_node_lock(trans, path, (void *) ck, 0, lock_want, _THIS_IP_);
336 if (ck->key.btree_id != path->btree_id ||
337 !bpos_eq(ck->key.pos, path->pos)) {
338 six_unlock_type(&ck->c.lock, lock_want);
342 if (!test_bit(BKEY_CACHED_ACCESSED, &ck->flags))
343 set_bit(BKEY_CACHED_ACCESSED, &ck->flags);
345 btree_path_cached_set(trans, path, ck, (enum btree_node_locked_type) lock_want);
346 path->uptodate = BTREE_ITER_UPTODATE;
350 int bch2_btree_path_traverse_cached(struct btree_trans *trans, struct btree_path *path,
353 EBUG_ON(path->level);
359 ret = btree_path_traverse_cached_fast(trans, path);
360 if (unlikely(ret == -ENOENT))
361 ret = btree_key_cache_fill(trans, path, flags);
362 } while (ret == -EEXIST);
365 path->uptodate = BTREE_ITER_NEED_TRAVERSE;
366 if (!bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
367 btree_node_unlock(trans, path, 0);
368 path->l[0].b = ERR_PTR(ret);
374 static int btree_key_cache_flush_pos(struct btree_trans *trans,
375 struct bkey_cached_key key,
377 unsigned commit_flags,
380 struct bch_fs *c = trans->c;
381 struct journal *j = &c->journal;
382 struct btree_iter c_iter, b_iter;
383 struct bkey_cached *ck = NULL;
386 bch2_trans_iter_init(trans, &b_iter, key.btree_id, key.pos,
389 BTREE_ITER_all_snapshots);
390 bch2_trans_iter_init(trans, &c_iter, key.btree_id, key.pos,
393 b_iter.flags &= ~BTREE_ITER_with_key_cache;
395 ret = bch2_btree_iter_traverse(&c_iter);
399 ck = (void *) btree_iter_path(trans, &c_iter)->l[0].b;
403 if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
409 if (journal_seq && ck->journal.seq != journal_seq)
412 trans->journal_res.seq = ck->journal.seq;
415 * If we're at the end of the journal, we really want to free up space
416 * in the journal right away - we don't want to pin that old journal
417 * sequence number with a new btree node write, we want to re-journal
420 if (ck->journal.seq == journal_last_seq(j))
421 commit_flags |= BCH_WATERMARK_reclaim;
423 if (ck->journal.seq != journal_last_seq(j) ||
424 !test_bit(JOURNAL_space_low, &c->journal.flags))
425 commit_flags |= BCH_TRANS_COMMIT_no_journal_res;
427 ret = bch2_btree_iter_traverse(&b_iter) ?:
428 bch2_trans_update(trans, &b_iter, ck->k,
429 BTREE_UPDATE_key_cache_reclaim|
430 BTREE_UPDATE_internal_snapshot_node|
431 BTREE_TRIGGER_norun) ?:
432 bch2_trans_commit(trans, NULL, NULL,
433 BCH_TRANS_COMMIT_no_check_rw|
434 BCH_TRANS_COMMIT_no_enospc|
437 bch2_fs_fatal_err_on(ret &&
438 !bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
439 !bch2_err_matches(ret, BCH_ERR_journal_reclaim_would_deadlock) &&
440 !bch2_journal_error(j), c,
441 "flushing key cache: %s", bch2_err_str(ret));
445 bch2_journal_pin_drop(j, &ck->journal);
447 struct btree_path *path = btree_iter_path(trans, &c_iter);
448 BUG_ON(!btree_node_locked(path, 0));
451 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
452 clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
453 atomic_long_dec(&c->btree_key_cache.nr_dirty);
456 struct btree_path *path2;
459 trans_for_each_path(trans, path2, i)
461 __bch2_btree_path_unlock(trans, path2);
463 bch2_btree_node_lock_write_nofail(trans, path, &ck->c);
465 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
466 clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
467 atomic_long_dec(&c->btree_key_cache.nr_dirty);
470 mark_btree_node_locked_noreset(path, 0, BTREE_NODE_UNLOCKED);
471 if (bkey_cached_evict(&c->btree_key_cache, ck)) {
472 bkey_cached_free(&c->btree_key_cache, ck);
474 six_unlock_write(&ck->c.lock);
475 six_unlock_intent(&ck->c.lock);
479 bch2_trans_iter_exit(trans, &b_iter);
480 bch2_trans_iter_exit(trans, &c_iter);
484 int bch2_btree_key_cache_journal_flush(struct journal *j,
485 struct journal_entry_pin *pin, u64 seq)
487 struct bch_fs *c = container_of(j, struct bch_fs, journal);
488 struct bkey_cached *ck =
489 container_of(pin, struct bkey_cached, journal);
490 struct bkey_cached_key key;
491 struct btree_trans *trans = bch2_trans_get(c);
492 int srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
495 btree_node_lock_nopath_nofail(trans, &ck->c, SIX_LOCK_read);
498 if (ck->journal.seq != seq ||
499 !test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
500 six_unlock_read(&ck->c.lock);
504 if (ck->seq != seq) {
505 bch2_journal_pin_update(&c->journal, ck->seq, &ck->journal,
506 bch2_btree_key_cache_journal_flush);
507 six_unlock_read(&ck->c.lock);
510 six_unlock_read(&ck->c.lock);
512 ret = lockrestart_do(trans,
513 btree_key_cache_flush_pos(trans, key, seq,
514 BCH_TRANS_COMMIT_journal_reclaim, false));
516 srcu_read_unlock(&c->btree_trans_barrier, srcu_idx);
518 bch2_trans_put(trans);
522 bool bch2_btree_insert_key_cached(struct btree_trans *trans,
524 struct btree_insert_entry *insert_entry)
526 struct bch_fs *c = trans->c;
527 struct bkey_cached *ck = (void *) (trans->paths + insert_entry->path)->l[0].b;
528 struct bkey_i *insert = insert_entry->k;
529 bool kick_reclaim = false;
531 BUG_ON(insert->k.u64s > ck->u64s);
533 bkey_copy(ck->k, insert);
535 if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
536 EBUG_ON(test_bit(BCH_FS_clean_shutdown, &c->flags));
537 set_bit(BKEY_CACHED_DIRTY, &ck->flags);
538 atomic_long_inc(&c->btree_key_cache.nr_dirty);
540 if (bch2_nr_btree_keys_need_flush(c))
545 * To minimize lock contention, we only add the journal pin here and
546 * defer pin updates to the flush callback via ->seq. Be careful not to
547 * update ->seq on nojournal commits because we don't want to update the
548 * pin to a seq that doesn't include journal updates on disk. Otherwise
549 * we risk losing the update after a crash.
551 * The only exception is if the pin is not active in the first place. We
552 * have to add the pin because journal reclaim drives key cache
553 * flushing. The flush callback will not proceed unless ->seq matches
554 * the latest pin, so make sure it starts with a consistent value.
556 if (!(insert_entry->flags & BTREE_UPDATE_nojournal) ||
557 !journal_pin_active(&ck->journal)) {
558 ck->seq = trans->journal_res.seq;
560 bch2_journal_pin_add(&c->journal, trans->journal_res.seq,
561 &ck->journal, bch2_btree_key_cache_journal_flush);
564 journal_reclaim_kick(&c->journal);
568 void bch2_btree_key_cache_drop(struct btree_trans *trans,
569 struct btree_path *path)
571 struct bch_fs *c = trans->c;
572 struct btree_key_cache *bc = &c->btree_key_cache;
573 struct bkey_cached *ck = (void *) path->l[0].b;
576 * We just did an update to the btree, bypassing the key cache: the key
577 * cache key is now stale and must be dropped, even if dirty:
579 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
580 clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
581 atomic_long_dec(&c->btree_key_cache.nr_dirty);
582 bch2_journal_pin_drop(&c->journal, &ck->journal);
585 bkey_cached_evict(bc, ck);
586 bkey_cached_free(bc, ck);
588 mark_btree_node_locked(trans, path, 0, BTREE_NODE_UNLOCKED);
589 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
590 path->should_be_locked = false;
593 static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
594 struct shrink_control *sc)
596 struct bch_fs *c = shrink->private_data;
597 struct btree_key_cache *bc = &c->btree_key_cache;
598 struct bucket_table *tbl;
599 struct bkey_cached *ck;
600 size_t scanned = 0, freed = 0, nr = sc->nr_to_scan;
601 unsigned iter, start;
604 srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
607 tbl = rht_dereference_rcu(bc->table.tbl, &bc->table);
610 * Scanning is expensive while a rehash is in progress - most elements
611 * will be on the new hashtable, if it's in progress
613 * A rehash could still start while we're scanning - that's ok, we'll
614 * still see most elements.
616 if (unlikely(tbl->nest)) {
618 srcu_read_unlock(&c->btree_trans_barrier, srcu_idx);
622 iter = bc->shrink_iter;
623 if (iter >= tbl->size)
628 struct rhash_head *pos, *next;
630 pos = rht_ptr_rcu(&tbl->buckets[iter]);
632 while (!rht_is_a_nulls(pos)) {
633 next = rht_dereference_bucket_rcu(pos->next, tbl, iter);
634 ck = container_of(pos, struct bkey_cached, hash);
636 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
638 } else if (test_bit(BKEY_CACHED_ACCESSED, &ck->flags)) {
639 clear_bit(BKEY_CACHED_ACCESSED, &ck->flags);
640 bc->skipped_accessed++;
641 } else if (!bkey_cached_lock_for_evict(ck)) {
642 bc->skipped_lock_fail++;
643 } else if (bkey_cached_evict(bc, ck)) {
644 bkey_cached_free(bc, ck);
648 six_unlock_write(&ck->c.lock);
649 six_unlock_intent(&ck->c.lock);
660 if (iter >= tbl->size)
662 } while (scanned < nr && iter != start);
664 bc->shrink_iter = iter;
667 srcu_read_unlock(&c->btree_trans_barrier, srcu_idx);
672 static unsigned long bch2_btree_key_cache_count(struct shrinker *shrink,
673 struct shrink_control *sc)
675 struct bch_fs *c = shrink->private_data;
676 struct btree_key_cache *bc = &c->btree_key_cache;
677 long nr = atomic_long_read(&bc->nr_keys) -
678 atomic_long_read(&bc->nr_dirty);
681 * Avoid hammering our shrinker too much if it's nearly empty - the
682 * shrinker code doesn't take into account how big our cache is, if it's
683 * mostly empty but the system is under memory pressure it causes nasty
691 void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc)
693 struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
694 struct bucket_table *tbl;
695 struct bkey_cached *ck;
696 struct rhash_head *pos;
700 shrinker_free(bc->shrink);
703 * The loop is needed to guard against racing with rehash:
705 while (atomic_long_read(&bc->nr_keys)) {
707 tbl = rht_dereference_rcu(bc->table.tbl, &bc->table);
710 /* wait for in progress rehash */
712 mutex_lock(&bc->table.mutex);
713 mutex_unlock(&bc->table.mutex);
717 for (i = 0; i < tbl->size; i++)
718 while (pos = rht_ptr_rcu(&tbl->buckets[i]), !rht_is_a_nulls(pos)) {
719 ck = container_of(pos, struct bkey_cached, hash);
720 BUG_ON(!bkey_cached_evict(bc, ck));
722 kmem_cache_free(bch2_key_cache, ck);
728 if (atomic_long_read(&bc->nr_dirty) &&
729 !bch2_journal_error(&c->journal) &&
730 test_bit(BCH_FS_was_rw, &c->flags))
731 panic("btree key cache shutdown error: nr_dirty nonzero (%li)\n",
732 atomic_long_read(&bc->nr_dirty));
734 if (atomic_long_read(&bc->nr_keys))
735 panic("btree key cache shutdown error: nr_keys nonzero (%li)\n",
736 atomic_long_read(&bc->nr_keys));
738 if (bc->table_init_done)
739 rhashtable_destroy(&bc->table);
741 rcu_pending_exit(&bc->pending[0]);
742 rcu_pending_exit(&bc->pending[1]);
744 free_percpu(bc->nr_pending);
747 void bch2_fs_btree_key_cache_init_early(struct btree_key_cache *c)
751 int bch2_fs_btree_key_cache_init(struct btree_key_cache *bc)
753 struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
754 struct shrinker *shrink;
756 bc->nr_pending = alloc_percpu(size_t);
758 return -BCH_ERR_ENOMEM_fs_btree_cache_init;
760 if (rcu_pending_init(&bc->pending[0], &c->btree_trans_barrier, __bkey_cached_free) ||
761 rcu_pending_init(&bc->pending[1], &c->btree_trans_barrier, __bkey_cached_free))
762 return -BCH_ERR_ENOMEM_fs_btree_cache_init;
764 if (rhashtable_init(&bc->table, &bch2_btree_key_cache_params))
765 return -BCH_ERR_ENOMEM_fs_btree_cache_init;
767 bc->table_init_done = true;
769 shrink = shrinker_alloc(0, "%s-btree_key_cache", c->name);
771 return -BCH_ERR_ENOMEM_fs_btree_cache_init;
773 shrink->count_objects = bch2_btree_key_cache_count;
774 shrink->scan_objects = bch2_btree_key_cache_scan;
775 shrink->batch = 1 << 14;
777 shrink->private_data = c;
778 shrinker_register(shrink);
782 void bch2_btree_key_cache_to_text(struct printbuf *out, struct btree_key_cache *bc)
784 printbuf_tabstop_push(out, 24);
785 printbuf_tabstop_push(out, 12);
787 prt_printf(out, "keys:\t%lu\r\n", atomic_long_read(&bc->nr_keys));
788 prt_printf(out, "dirty:\t%lu\r\n", atomic_long_read(&bc->nr_dirty));
789 prt_printf(out, "table size:\t%u\r\n", bc->table.tbl->size);
791 prt_printf(out, "shrinker:\n");
792 prt_printf(out, "requested_to_free:\t%lu\r\n", bc->requested_to_free);
793 prt_printf(out, "freed:\t%lu\r\n", bc->freed);
794 prt_printf(out, "skipped_dirty:\t%lu\r\n", bc->skipped_dirty);
795 prt_printf(out, "skipped_accessed:\t%lu\r\n", bc->skipped_accessed);
796 prt_printf(out, "skipped_lock_fail:\t%lu\r\n", bc->skipped_lock_fail);
798 prt_printf(out, "pending:\t%zu\r\n", per_cpu_sum(bc->nr_pending));
801 void bch2_btree_key_cache_exit(void)
803 kmem_cache_destroy(bch2_key_cache);
806 int __init bch2_btree_key_cache_init(void)
808 bch2_key_cache = KMEM_CACHE(bkey_cached, SLAB_RECLAIM_ACCOUNT);