1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright 2012 Google, Inc.
5 * Foreground allocator code: allocate buckets from freelist, and allocate in
6 * sector granularity from writepoints.
8 * bch2_bucket_alloc() allocates a single bucket from a specific device.
10 * bch2_bucket_alloc_set() allocates one or more buckets from different devices
11 * in a given filesystem.
15 #include "alloc_background.h"
16 #include "alloc_foreground.h"
17 #include "backpointers.h"
18 #include "btree_iter.h"
19 #include "btree_update.h"
22 #include "buckets_waiting_for_journal.h"
25 #include "disk_groups.h"
31 #include "nocow_locking.h"
34 #include <linux/math64.h>
35 #include <linux/rculist.h>
36 #include <linux/rcupdate.h>
38 static void bch2_trans_mutex_lock_norelock(struct btree_trans *trans,
41 if (!mutex_trylock(lock)) {
42 bch2_trans_unlock(trans);
47 const char * const bch2_watermarks[] = {
55 * Open buckets represent a bucket that's currently being allocated from. They
58 * - They track buckets that have been partially allocated, allowing for
59 * sub-bucket sized allocations - they're used by the sector allocator below
61 * - They provide a reference to the buckets they own that mark and sweep GC
62 * can find, until the new allocation has a pointer to it inserted into the
65 * When allocating some space with the sector allocator, the allocation comes
66 * with a reference to an open bucket - the caller is required to put that
67 * reference _after_ doing the index update that makes its allocation reachable.
70 void bch2_reset_alloc_cursors(struct bch_fs *c)
73 for_each_member_device_rcu(c, ca, NULL)
74 memset(ca->alloc_cursor, 0, sizeof(ca->alloc_cursor));
78 static void bch2_open_bucket_hash_add(struct bch_fs *c, struct open_bucket *ob)
80 open_bucket_idx_t idx = ob - c->open_buckets;
81 open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
87 static void bch2_open_bucket_hash_remove(struct bch_fs *c, struct open_bucket *ob)
89 open_bucket_idx_t idx = ob - c->open_buckets;
90 open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
92 while (*slot != idx) {
94 slot = &c->open_buckets[*slot].hash;
101 void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
103 struct bch_dev *ca = ob_dev(c, ob);
106 ec_stripe_new_put(c, ob->ec, STRIPE_REF_io);
110 percpu_down_read(&c->mark_lock);
111 spin_lock(&ob->lock);
116 spin_unlock(&ob->lock);
117 percpu_up_read(&c->mark_lock);
119 spin_lock(&c->freelist_lock);
120 bch2_open_bucket_hash_remove(c, ob);
122 ob->freelist = c->open_buckets_freelist;
123 c->open_buckets_freelist = ob - c->open_buckets;
125 c->open_buckets_nr_free++;
126 ca->nr_open_buckets--;
127 spin_unlock(&c->freelist_lock);
129 closure_wake_up(&c->open_buckets_wait);
132 void bch2_open_bucket_write_error(struct bch_fs *c,
133 struct open_buckets *obs,
136 struct open_bucket *ob;
139 open_bucket_for_each(c, obs, ob, i)
140 if (ob->dev == dev && ob->ec)
141 bch2_ec_bucket_cancel(c, ob);
144 static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
146 struct open_bucket *ob;
148 BUG_ON(!c->open_buckets_freelist || !c->open_buckets_nr_free);
150 ob = c->open_buckets + c->open_buckets_freelist;
151 c->open_buckets_freelist = ob->freelist;
152 atomic_set(&ob->pin, 1);
155 c->open_buckets_nr_free--;
159 static void open_bucket_free_unused(struct bch_fs *c, struct open_bucket *ob)
161 BUG_ON(c->open_buckets_partial_nr >=
162 ARRAY_SIZE(c->open_buckets_partial));
164 spin_lock(&c->freelist_lock);
165 ob->on_partial_list = true;
166 c->open_buckets_partial[c->open_buckets_partial_nr++] =
167 ob - c->open_buckets;
168 spin_unlock(&c->freelist_lock);
170 closure_wake_up(&c->open_buckets_wait);
171 closure_wake_up(&c->freelist_wait);
174 /* _only_ for allocating the journal on a new device: */
175 long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
177 while (ca->new_fs_bucket_idx < ca->mi.nbuckets) {
178 u64 b = ca->new_fs_bucket_idx++;
180 if (!is_superblock_bucket(ca, b) &&
181 (!ca->buckets_nouse || !test_bit(b, ca->buckets_nouse)))
188 static inline unsigned open_buckets_reserved(enum bch_watermark watermark)
191 case BCH_WATERMARK_interior_updates:
193 case BCH_WATERMARK_reclaim:
194 return OPEN_BUCKETS_COUNT / 6;
195 case BCH_WATERMARK_btree:
196 case BCH_WATERMARK_btree_copygc:
197 return OPEN_BUCKETS_COUNT / 4;
198 case BCH_WATERMARK_copygc:
199 return OPEN_BUCKETS_COUNT / 3;
201 return OPEN_BUCKETS_COUNT / 2;
205 static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
207 enum bch_watermark watermark,
208 const struct bch_alloc_v4 *a,
209 struct bucket_alloc_state *s,
212 struct open_bucket *ob;
214 if (unlikely(ca->buckets_nouse && test_bit(bucket, ca->buckets_nouse))) {
219 if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
224 if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
225 c->journal.flushed_seq_ondisk, ca->dev_idx, bucket)) {
226 s->skipped_need_journal_commit++;
230 if (bch2_bucket_nocow_is_locked(&c->nocow_locks, POS(ca->dev_idx, bucket))) {
235 spin_lock(&c->freelist_lock);
237 if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(watermark))) {
239 closure_wait(&c->open_buckets_wait, cl);
241 track_event_change(&c->times[BCH_TIME_blocked_allocate_open_bucket], true);
242 spin_unlock(&c->freelist_lock);
243 return ERR_PTR(-BCH_ERR_open_buckets_empty);
246 /* Recheck under lock: */
247 if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
248 spin_unlock(&c->freelist_lock);
253 ob = bch2_open_bucket_alloc(c);
255 spin_lock(&ob->lock);
258 ob->sectors_free = ca->mi.bucket_size;
259 ob->dev = ca->dev_idx;
262 spin_unlock(&ob->lock);
264 ca->nr_open_buckets++;
265 bch2_open_bucket_hash_add(c, ob);
267 track_event_change(&c->times[BCH_TIME_blocked_allocate_open_bucket], false);
268 track_event_change(&c->times[BCH_TIME_blocked_allocate], false);
270 spin_unlock(&c->freelist_lock);
274 static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca,
275 enum bch_watermark watermark, u64 free_entry,
276 struct bucket_alloc_state *s,
277 struct bkey_s_c freespace_k,
280 struct bch_fs *c = trans->c;
281 struct btree_iter iter = { NULL };
283 struct open_bucket *ob;
284 struct bch_alloc_v4 a_convert;
285 const struct bch_alloc_v4 *a;
286 u64 b = free_entry & ~(~0ULL << 56);
287 unsigned genbits = free_entry >> 56;
288 struct printbuf buf = PRINTBUF;
291 if (b < ca->mi.first_bucket || b >= ca->mi.nbuckets) {
292 prt_printf(&buf, "freespace btree has bucket outside allowed range %u-%llu\n"
294 ca->mi.first_bucket, ca->mi.nbuckets);
295 bch2_bkey_val_to_text(&buf, c, freespace_k);
296 bch2_trans_inconsistent(trans, "%s", buf.buf);
301 k = bch2_bkey_get_iter(trans, &iter,
302 BTREE_ID_alloc, POS(ca->dev_idx, b),
310 a = bch2_alloc_to_v4(k, &a_convert);
312 if (a->data_type != BCH_DATA_free) {
313 if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_alloc_info) {
318 prt_printf(&buf, "non free bucket in freespace btree\n"
320 bch2_bkey_val_to_text(&buf, c, freespace_k);
321 prt_printf(&buf, "\n ");
322 bch2_bkey_val_to_text(&buf, c, k);
323 bch2_trans_inconsistent(trans, "%s", buf.buf);
328 if (genbits != (alloc_freespace_genbits(*a) >> 56) &&
329 c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info) {
330 prt_printf(&buf, "bucket in freespace btree with wrong genbits (got %u should be %llu)\n"
332 genbits, alloc_freespace_genbits(*a) >> 56);
333 bch2_bkey_val_to_text(&buf, c, freespace_k);
334 prt_printf(&buf, "\n ");
335 bch2_bkey_val_to_text(&buf, c, k);
336 bch2_trans_inconsistent(trans, "%s", buf.buf);
341 if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_extents_to_backpointers) {
342 struct bch_backpointer bp;
343 struct bpos bp_pos = POS_MIN;
345 ret = bch2_get_next_backpointer(trans, ca, POS(ca->dev_idx, b), -1,
347 BTREE_ITER_nopreserve);
353 if (!bkey_eq(bp_pos, POS_MAX)) {
355 * Bucket may have data in it - we don't call
356 * bc2h_trans_inconnsistent() because fsck hasn't
364 ob = __try_alloc_bucket(c, ca, b, watermark, a, s, cl);
366 bch2_set_btree_iter_dontneed(&iter);
369 bch2_set_btree_iter_dontneed(&iter);
370 bch2_trans_iter_exit(trans, &iter);
376 * This path is for before the freespace btree is initialized:
378 * If ca->new_fs_bucket_idx is nonzero, we haven't yet marked superblock &
379 * journal buckets - journal buckets will be < ca->new_fs_bucket_idx
381 static noinline struct open_bucket *
382 bch2_bucket_alloc_early(struct btree_trans *trans,
384 enum bch_watermark watermark,
385 struct bucket_alloc_state *s,
388 struct btree_iter iter, citer;
389 struct bkey_s_c k, ck;
390 struct open_bucket *ob = NULL;
391 u64 first_bucket = max_t(u64, ca->mi.first_bucket, ca->new_fs_bucket_idx);
392 u64 *dev_alloc_cursor = &ca->alloc_cursor[s->btree_bitmap];
393 u64 alloc_start = max(first_bucket, *dev_alloc_cursor);
394 u64 alloc_cursor = alloc_start;
398 * Scan with an uncached iterator to avoid polluting the key cache. An
399 * uncached iter will return a cached key if one exists, but if not
400 * there is no other underlying protection for the associated key cache
401 * slot. To avoid racing bucket allocations, look up the cached key slot
402 * of any likely allocation candidate before attempting to proceed with
403 * the allocation. This provides proper exclusion on the associated
407 for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, alloc_cursor),
408 BTREE_ITER_slots, k, ret) {
409 u64 bucket = k.k->p.offset;
411 if (bkey_ge(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)))
414 if (ca->new_fs_bucket_idx &&
415 is_superblock_bucket(ca, k.k->p.offset))
418 if (s->btree_bitmap != BTREE_BITMAP_ANY &&
419 s->btree_bitmap != bch2_dev_btree_bitmap_marked_sectors(ca,
420 bucket_to_sector(ca, bucket), ca->mi.bucket_size)) {
421 if (s->btree_bitmap == BTREE_BITMAP_YES &&
422 bucket_to_sector(ca, bucket) > 64ULL << ca->mi.btree_bitmap_shift)
425 bucket = sector_to_bucket(ca,
426 round_up(bucket_to_sector(ca, bucket) + 1,
427 1ULL << ca->mi.btree_bitmap_shift));
428 bch2_btree_iter_set_pos(&iter, POS(ca->dev_idx, bucket));
430 s->skipped_mi_btree_bitmap++;
434 struct bch_alloc_v4 a_convert;
435 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
436 if (a->data_type != BCH_DATA_free)
439 /* now check the cached key to serialize concurrent allocs of the bucket */
440 ck = bch2_bkey_get_iter(trans, &citer, BTREE_ID_alloc, k.k->p, BTREE_ITER_cached);
445 a = bch2_alloc_to_v4(ck, &a_convert);
446 if (a->data_type != BCH_DATA_free)
451 ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, watermark, a, s, cl);
453 bch2_set_btree_iter_dontneed(&citer);
454 bch2_trans_iter_exit(trans, &citer);
458 bch2_trans_iter_exit(trans, &iter);
460 alloc_cursor = iter.pos.offset;
465 if (!ob && alloc_start > first_bucket) {
466 alloc_cursor = alloc_start = first_bucket;
470 *dev_alloc_cursor = alloc_cursor;
475 static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans,
477 enum bch_watermark watermark,
478 struct bucket_alloc_state *s,
481 struct btree_iter iter;
483 struct open_bucket *ob = NULL;
484 u64 *dev_alloc_cursor = &ca->alloc_cursor[s->btree_bitmap];
485 u64 alloc_start = max_t(u64, ca->mi.first_bucket, READ_ONCE(*dev_alloc_cursor));
486 u64 alloc_cursor = alloc_start;
489 BUG_ON(ca->new_fs_bucket_idx);
491 for_each_btree_key_norestart(trans, iter, BTREE_ID_freespace,
492 POS(ca->dev_idx, alloc_cursor), 0, k, ret) {
493 if (k.k->p.inode != ca->dev_idx)
496 for (alloc_cursor = max(alloc_cursor, bkey_start_offset(k.k));
497 alloc_cursor < k.k->p.offset;
499 ret = btree_trans_too_many_iters(trans);
507 u64 bucket = alloc_cursor & ~(~0ULL << 56);
508 if (s->btree_bitmap != BTREE_BITMAP_ANY &&
509 s->btree_bitmap != bch2_dev_btree_bitmap_marked_sectors(ca,
510 bucket_to_sector(ca, bucket), ca->mi.bucket_size)) {
511 if (s->btree_bitmap == BTREE_BITMAP_YES &&
512 bucket_to_sector(ca, bucket) > 64ULL << ca->mi.btree_bitmap_shift)
515 bucket = sector_to_bucket(ca,
516 round_up(bucket_to_sector(ca, bucket) + 1,
517 1ULL << ca->mi.btree_bitmap_shift));
518 u64 genbits = alloc_cursor >> 56;
519 alloc_cursor = bucket | (genbits << 56);
521 if (alloc_cursor > k.k->p.offset)
522 bch2_btree_iter_set_pos(&iter, POS(ca->dev_idx, alloc_cursor));
523 s->skipped_mi_btree_bitmap++;
527 ob = try_alloc_bucket(trans, ca, watermark,
528 alloc_cursor, s, k, cl);
530 bch2_set_btree_iter_dontneed(&iter);
539 bch2_trans_iter_exit(trans, &iter);
544 if (!ob && alloc_start > ca->mi.first_bucket) {
545 alloc_cursor = alloc_start = ca->mi.first_bucket;
549 *dev_alloc_cursor = alloc_cursor;
554 static noinline void trace_bucket_alloc2(struct bch_fs *c, struct bch_dev *ca,
555 enum bch_watermark watermark,
556 enum bch_data_type data_type,
558 struct bch_dev_usage *usage,
559 struct bucket_alloc_state *s,
560 struct open_bucket *ob)
562 struct printbuf buf = PRINTBUF;
564 printbuf_tabstop_push(&buf, 24);
566 prt_printf(&buf, "dev\t%s (%u)\n", ca->name, ca->dev_idx);
567 prt_printf(&buf, "watermark\t%s\n", bch2_watermarks[watermark]);
568 prt_printf(&buf, "data type\t%s\n", __bch2_data_types[data_type]);
569 prt_printf(&buf, "blocking\t%u\n", cl != NULL);
570 prt_printf(&buf, "free\t%llu\n", usage->d[BCH_DATA_free].buckets);
571 prt_printf(&buf, "avail\t%llu\n", dev_buckets_free(ca, *usage, watermark));
572 prt_printf(&buf, "copygc_wait\t%lu/%lli\n",
573 bch2_copygc_wait_amount(c),
574 c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now));
575 prt_printf(&buf, "seen\t%llu\n", s->buckets_seen);
576 prt_printf(&buf, "open\t%llu\n", s->skipped_open);
577 prt_printf(&buf, "need journal commit\t%llu\n", s->skipped_need_journal_commit);
578 prt_printf(&buf, "nocow\t%llu\n", s->skipped_nocow);
579 prt_printf(&buf, "nouse\t%llu\n", s->skipped_nouse);
580 prt_printf(&buf, "mi_btree_bitmap\t%llu\n", s->skipped_mi_btree_bitmap);
583 prt_printf(&buf, "allocated\t%llu\n", ob->bucket);
584 trace_bucket_alloc(c, buf.buf);
586 prt_printf(&buf, "err\t%s\n", bch2_err_str(PTR_ERR(ob)));
587 trace_bucket_alloc_fail(c, buf.buf);
594 * bch2_bucket_alloc_trans - allocate a single bucket from a specific device
595 * @trans: transaction object
596 * @ca: device to allocate from
597 * @watermark: how important is this allocation?
598 * @data_type: BCH_DATA_journal, btree, user...
599 * @cl: if not NULL, closure to be used to wait if buckets not available
600 * @usage: for secondarily also returning the current device usage
602 * Returns: an open_bucket on success, or an ERR_PTR() on failure.
604 static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
606 enum bch_watermark watermark,
607 enum bch_data_type data_type,
609 struct bch_dev_usage *usage)
611 struct bch_fs *c = trans->c;
612 struct open_bucket *ob = NULL;
613 bool freespace = READ_ONCE(ca->mi.freespace_initialized);
615 struct bucket_alloc_state s = {
616 .btree_bitmap = data_type == BCH_DATA_btree,
618 bool waiting = false;
620 bch2_dev_usage_read_fast(ca, usage);
621 avail = dev_buckets_free(ca, *usage, watermark);
623 if (usage->d[BCH_DATA_need_discard].buckets > avail)
626 if (usage->d[BCH_DATA_need_gc_gens].buckets > avail)
627 bch2_gc_gens_async(c);
629 if (should_invalidate_buckets(ca, *usage))
630 bch2_do_invalidates(c);
633 if (cl && !waiting) {
634 closure_wait(&c->freelist_wait, cl);
639 track_event_change(&c->times[BCH_TIME_blocked_allocate], true);
641 ob = ERR_PTR(-BCH_ERR_freelist_empty);
646 closure_wake_up(&c->freelist_wait);
648 ob = likely(freespace)
649 ? bch2_bucket_alloc_freelist(trans, ca, watermark, &s, cl)
650 : bch2_bucket_alloc_early(trans, ca, watermark, &s, cl);
652 if (s.skipped_need_journal_commit * 2 > avail)
653 bch2_journal_flush_async(&c->journal, NULL);
655 if (!ob && s.btree_bitmap != BTREE_BITMAP_ANY) {
656 s.btree_bitmap = BTREE_BITMAP_ANY;
660 if (!ob && freespace && c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_alloc_info) {
666 ob = ERR_PTR(-BCH_ERR_no_buckets_found);
669 ob->data_type = data_type;
672 count_event(c, bucket_alloc);
673 else if (!bch2_err_matches(PTR_ERR(ob), BCH_ERR_transaction_restart))
674 count_event(c, bucket_alloc_fail);
677 ? trace_bucket_alloc_enabled()
678 : trace_bucket_alloc_fail_enabled())
679 trace_bucket_alloc2(c, ca, watermark, data_type, cl, usage, &s, ob);
684 struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
685 enum bch_watermark watermark,
686 enum bch_data_type data_type,
689 struct bch_dev_usage usage;
690 struct open_bucket *ob;
692 bch2_trans_do(c, NULL, NULL, 0,
693 PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(trans, ca, watermark,
694 data_type, cl, &usage)));
698 static int __dev_stripe_cmp(struct dev_stripe_state *stripe,
699 unsigned l, unsigned r)
701 return ((stripe->next_alloc[l] > stripe->next_alloc[r]) -
702 (stripe->next_alloc[l] < stripe->next_alloc[r]));
705 #define dev_stripe_cmp(l, r) __dev_stripe_cmp(stripe, l, r)
707 struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c,
708 struct dev_stripe_state *stripe,
709 struct bch_devs_mask *devs)
711 struct dev_alloc_list ret = { .nr = 0 };
714 for_each_set_bit(i, devs->d, BCH_SB_MEMBERS_MAX)
715 ret.devs[ret.nr++] = i;
717 bubble_sort(ret.devs, ret.nr, dev_stripe_cmp);
721 static inline void bch2_dev_stripe_increment_inlined(struct bch_dev *ca,
722 struct dev_stripe_state *stripe,
723 struct bch_dev_usage *usage)
725 u64 *v = stripe->next_alloc + ca->dev_idx;
726 u64 free_space = dev_buckets_available(ca, BCH_WATERMARK_normal);
727 u64 free_space_inv = free_space
728 ? div64_u64(1ULL << 48, free_space)
732 if (*v + free_space_inv >= *v)
733 *v += free_space_inv;
737 for (v = stripe->next_alloc;
738 v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++)
739 *v = *v < scale ? 0 : *v - scale;
742 void bch2_dev_stripe_increment(struct bch_dev *ca,
743 struct dev_stripe_state *stripe)
745 struct bch_dev_usage usage;
747 bch2_dev_usage_read_fast(ca, &usage);
748 bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
751 static int add_new_bucket(struct bch_fs *c,
752 struct open_buckets *ptrs,
753 struct bch_devs_mask *devs_may_alloc,
754 unsigned nr_replicas,
755 unsigned *nr_effective,
758 struct open_bucket *ob)
760 unsigned durability = ob_dev(c, ob)->mi.durability;
762 BUG_ON(*nr_effective >= nr_replicas);
764 __clear_bit(ob->dev, devs_may_alloc->d);
765 *nr_effective += durability;
766 *have_cache |= !durability;
768 ob_push(c, ptrs, ob);
770 if (*nr_effective >= nr_replicas)
777 int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
778 struct open_buckets *ptrs,
779 struct dev_stripe_state *stripe,
780 struct bch_devs_mask *devs_may_alloc,
781 unsigned nr_replicas,
782 unsigned *nr_effective,
785 enum bch_data_type data_type,
786 enum bch_watermark watermark,
789 struct bch_fs *c = trans->c;
790 struct dev_alloc_list devs_sorted =
791 bch2_dev_alloc_list(c, stripe, devs_may_alloc);
792 int ret = -BCH_ERR_insufficient_devices;
794 BUG_ON(*nr_effective >= nr_replicas);
796 for (unsigned i = 0; i < devs_sorted.nr; i++) {
797 struct bch_dev_usage usage;
798 struct open_bucket *ob;
800 unsigned dev = devs_sorted.devs[i];
801 struct bch_dev *ca = bch2_dev_tryget_noerror(c, dev);
805 if (!ca->mi.durability && *have_cache) {
810 ob = bch2_bucket_alloc_trans(trans, ca, watermark, data_type, cl, &usage);
812 bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
817 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || cl)
822 if (add_new_bucket(c, ptrs, devs_may_alloc,
823 nr_replicas, nr_effective,
824 have_cache, flags, ob)) {
833 /* Allocate from stripes: */
836 * if we can't allocate a new stripe because there are already too many
837 * partially filled stripes, force allocating from an existing stripe even when
838 * it's to a device we don't want:
841 static int bucket_alloc_from_stripe(struct btree_trans *trans,
842 struct open_buckets *ptrs,
843 struct write_point *wp,
844 struct bch_devs_mask *devs_may_alloc,
846 unsigned nr_replicas,
847 unsigned *nr_effective,
849 enum bch_watermark watermark,
853 struct bch_fs *c = trans->c;
854 struct dev_alloc_list devs_sorted;
855 struct ec_stripe_head *h;
856 struct open_bucket *ob;
863 if (ec_open_bucket(c, ptrs))
866 h = bch2_ec_stripe_head_get(trans, target, 0, nr_replicas - 1, watermark, cl);
872 devs_sorted = bch2_dev_alloc_list(c, &wp->stripe, devs_may_alloc);
874 for (i = 0; i < devs_sorted.nr; i++)
875 for (ec_idx = 0; ec_idx < h->s->nr_data; ec_idx++) {
876 if (!h->s->blocks[ec_idx])
879 ob = c->open_buckets + h->s->blocks[ec_idx];
880 if (ob->dev == devs_sorted.devs[i] &&
881 !test_and_set_bit(ec_idx, h->s->blocks_allocated))
888 ec_stripe_new_get(h->s, STRIPE_REF_io);
890 ret = add_new_bucket(c, ptrs, devs_may_alloc,
891 nr_replicas, nr_effective,
892 have_cache, flags, ob);
894 bch2_ec_stripe_head_put(c, h);
898 /* Sector allocator */
900 static bool want_bucket(struct bch_fs *c,
901 struct write_point *wp,
902 struct bch_devs_mask *devs_may_alloc,
903 bool *have_cache, bool ec,
904 struct open_bucket *ob)
906 struct bch_dev *ca = ob_dev(c, ob);
908 if (!test_bit(ob->dev, devs_may_alloc->d))
911 if (ob->data_type != wp->data_type)
914 if (!ca->mi.durability &&
915 (wp->data_type == BCH_DATA_btree || ec || *have_cache))
918 if (ec != (ob->ec != NULL))
924 static int bucket_alloc_set_writepoint(struct bch_fs *c,
925 struct open_buckets *ptrs,
926 struct write_point *wp,
927 struct bch_devs_mask *devs_may_alloc,
928 unsigned nr_replicas,
929 unsigned *nr_effective,
931 bool ec, unsigned flags)
933 struct open_buckets ptrs_skip = { .nr = 0 };
934 struct open_bucket *ob;
938 open_bucket_for_each(c, &wp->ptrs, ob, i) {
939 if (!ret && want_bucket(c, wp, devs_may_alloc,
941 ret = add_new_bucket(c, ptrs, devs_may_alloc,
942 nr_replicas, nr_effective,
943 have_cache, flags, ob);
945 ob_push(c, &ptrs_skip, ob);
947 wp->ptrs = ptrs_skip;
952 static int bucket_alloc_set_partial(struct bch_fs *c,
953 struct open_buckets *ptrs,
954 struct write_point *wp,
955 struct bch_devs_mask *devs_may_alloc,
956 unsigned nr_replicas,
957 unsigned *nr_effective,
958 bool *have_cache, bool ec,
959 enum bch_watermark watermark,
964 if (!c->open_buckets_partial_nr)
967 spin_lock(&c->freelist_lock);
969 if (!c->open_buckets_partial_nr)
972 for (i = c->open_buckets_partial_nr - 1; i >= 0; --i) {
973 struct open_bucket *ob = c->open_buckets + c->open_buckets_partial[i];
975 if (want_bucket(c, wp, devs_may_alloc, have_cache, ec, ob)) {
976 struct bch_dev *ca = ob_dev(c, ob);
977 struct bch_dev_usage usage;
980 bch2_dev_usage_read_fast(ca, &usage);
981 avail = dev_buckets_free(ca, usage, watermark);
985 array_remove_item(c->open_buckets_partial,
986 c->open_buckets_partial_nr,
988 ob->on_partial_list = false;
990 ret = add_new_bucket(c, ptrs, devs_may_alloc,
991 nr_replicas, nr_effective,
992 have_cache, flags, ob);
998 spin_unlock(&c->freelist_lock);
1002 static int __open_bucket_add_buckets(struct btree_trans *trans,
1003 struct open_buckets *ptrs,
1004 struct write_point *wp,
1005 struct bch_devs_list *devs_have,
1008 unsigned nr_replicas,
1009 unsigned *nr_effective,
1011 enum bch_watermark watermark,
1013 struct closure *_cl)
1015 struct bch_fs *c = trans->c;
1016 struct bch_devs_mask devs;
1017 struct open_bucket *ob;
1018 struct closure *cl = NULL;
1022 devs = target_rw_devs(c, wp->data_type, target);
1024 /* Don't allocate from devices we already have pointers to: */
1025 darray_for_each(*devs_have, i)
1026 __clear_bit(*i, devs.d);
1028 open_bucket_for_each(c, ptrs, ob, i)
1029 __clear_bit(ob->dev, devs.d);
1031 if (erasure_code && ec_open_bucket(c, ptrs))
1034 ret = bucket_alloc_set_writepoint(c, ptrs, wp, &devs,
1035 nr_replicas, nr_effective,
1036 have_cache, erasure_code, flags);
1040 ret = bucket_alloc_set_partial(c, ptrs, wp, &devs,
1041 nr_replicas, nr_effective,
1042 have_cache, erasure_code, watermark, flags);
1047 ret = bucket_alloc_from_stripe(trans, ptrs, wp, &devs,
1049 nr_replicas, nr_effective,
1051 watermark, flags, _cl);
1055 * Try nonblocking first, so that if one device is full we'll try from
1058 ret = bch2_bucket_alloc_set_trans(trans, ptrs, &wp->stripe, &devs,
1059 nr_replicas, nr_effective, have_cache,
1060 flags, wp->data_type, watermark, cl);
1062 !bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
1063 !bch2_err_matches(ret, BCH_ERR_insufficient_devices) &&
1066 goto retry_blocking;
1073 static int open_bucket_add_buckets(struct btree_trans *trans,
1074 struct open_buckets *ptrs,
1075 struct write_point *wp,
1076 struct bch_devs_list *devs_have,
1078 unsigned erasure_code,
1079 unsigned nr_replicas,
1080 unsigned *nr_effective,
1082 enum bch_watermark watermark,
1089 ret = __open_bucket_add_buckets(trans, ptrs, wp,
1090 devs_have, target, erasure_code,
1091 nr_replicas, nr_effective, have_cache,
1092 watermark, flags, cl);
1093 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
1094 bch2_err_matches(ret, BCH_ERR_operation_blocked) ||
1095 bch2_err_matches(ret, BCH_ERR_freelist_empty) ||
1096 bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
1098 if (*nr_effective >= nr_replicas)
1102 ret = __open_bucket_add_buckets(trans, ptrs, wp,
1103 devs_have, target, false,
1104 nr_replicas, nr_effective, have_cache,
1105 watermark, flags, cl);
1106 return ret < 0 ? ret : 0;
1110 * should_drop_bucket - check if this is open_bucket should go away
1111 * @ob: open_bucket to predicate on
1112 * @c: filesystem handle
1113 * @ca: if set, we're killing buckets for a particular device
1114 * @ec: if true, we're shutting down erasure coding and killing all ec
1116 * otherwise, return true
1117 * Returns: true if we should kill this open_bucket
1119 * We're killing open_buckets because we're shutting down a device, erasure
1120 * coding, or the entire filesystem - check if this open_bucket matches:
1122 static bool should_drop_bucket(struct open_bucket *ob, struct bch_fs *c,
1123 struct bch_dev *ca, bool ec)
1126 return ob->ec != NULL;
1128 bool drop = ob->dev == ca->dev_idx;
1129 struct open_bucket *ob2;
1132 if (!drop && ob->ec) {
1135 mutex_lock(&ob->ec->lock);
1136 nr_blocks = bkey_i_to_stripe(&ob->ec->new_stripe.key)->v.nr_blocks;
1138 for (i = 0; i < nr_blocks; i++) {
1139 if (!ob->ec->blocks[i])
1142 ob2 = c->open_buckets + ob->ec->blocks[i];
1143 drop |= ob2->dev == ca->dev_idx;
1145 mutex_unlock(&ob->ec->lock);
1154 static void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
1155 bool ec, struct write_point *wp)
1157 struct open_buckets ptrs = { .nr = 0 };
1158 struct open_bucket *ob;
1161 mutex_lock(&wp->lock);
1162 open_bucket_for_each(c, &wp->ptrs, ob, i)
1163 if (should_drop_bucket(ob, c, ca, ec))
1164 bch2_open_bucket_put(c, ob);
1166 ob_push(c, &ptrs, ob);
1168 mutex_unlock(&wp->lock);
1171 void bch2_open_buckets_stop(struct bch_fs *c, struct bch_dev *ca,
1176 /* Next, close write points that point to this device... */
1177 for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
1178 bch2_writepoint_stop(c, ca, ec, &c->write_points[i]);
1180 bch2_writepoint_stop(c, ca, ec, &c->copygc_write_point);
1181 bch2_writepoint_stop(c, ca, ec, &c->rebalance_write_point);
1182 bch2_writepoint_stop(c, ca, ec, &c->btree_write_point);
1184 mutex_lock(&c->btree_reserve_cache_lock);
1185 while (c->btree_reserve_cache_nr) {
1186 struct btree_alloc *a =
1187 &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
1189 bch2_open_buckets_put(c, &a->ob);
1191 mutex_unlock(&c->btree_reserve_cache_lock);
1193 spin_lock(&c->freelist_lock);
1195 while (i < c->open_buckets_partial_nr) {
1196 struct open_bucket *ob =
1197 c->open_buckets + c->open_buckets_partial[i];
1199 if (should_drop_bucket(ob, c, ca, ec)) {
1200 --c->open_buckets_partial_nr;
1201 swap(c->open_buckets_partial[i],
1202 c->open_buckets_partial[c->open_buckets_partial_nr]);
1203 ob->on_partial_list = false;
1204 spin_unlock(&c->freelist_lock);
1205 bch2_open_bucket_put(c, ob);
1206 spin_lock(&c->freelist_lock);
1211 spin_unlock(&c->freelist_lock);
1213 bch2_ec_stop_dev(c, ca);
1216 static inline struct hlist_head *writepoint_hash(struct bch_fs *c,
1217 unsigned long write_point)
1220 hash_long(write_point, ilog2(ARRAY_SIZE(c->write_points_hash)));
1222 return &c->write_points_hash[hash];
1225 static struct write_point *__writepoint_find(struct hlist_head *head,
1226 unsigned long write_point)
1228 struct write_point *wp;
1231 hlist_for_each_entry_rcu(wp, head, node)
1232 if (wp->write_point == write_point)
1240 static inline bool too_many_writepoints(struct bch_fs *c, unsigned factor)
1242 u64 stranded = c->write_points_nr * c->bucket_size_max;
1243 u64 free = bch2_fs_usage_read_short(c).free;
1245 return stranded * factor > free;
1248 static bool try_increase_writepoints(struct bch_fs *c)
1250 struct write_point *wp;
1252 if (c->write_points_nr == ARRAY_SIZE(c->write_points) ||
1253 too_many_writepoints(c, 32))
1256 wp = c->write_points + c->write_points_nr++;
1257 hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point));
1261 static bool try_decrease_writepoints(struct btree_trans *trans, unsigned old_nr)
1263 struct bch_fs *c = trans->c;
1264 struct write_point *wp;
1265 struct open_bucket *ob;
1268 mutex_lock(&c->write_points_hash_lock);
1269 if (c->write_points_nr < old_nr) {
1270 mutex_unlock(&c->write_points_hash_lock);
1274 if (c->write_points_nr == 1 ||
1275 !too_many_writepoints(c, 8)) {
1276 mutex_unlock(&c->write_points_hash_lock);
1280 wp = c->write_points + --c->write_points_nr;
1282 hlist_del_rcu(&wp->node);
1283 mutex_unlock(&c->write_points_hash_lock);
1285 bch2_trans_mutex_lock_norelock(trans, &wp->lock);
1286 open_bucket_for_each(c, &wp->ptrs, ob, i)
1287 open_bucket_free_unused(c, ob);
1289 mutex_unlock(&wp->lock);
1293 static struct write_point *writepoint_find(struct btree_trans *trans,
1294 unsigned long write_point)
1296 struct bch_fs *c = trans->c;
1297 struct write_point *wp, *oldest;
1298 struct hlist_head *head;
1300 if (!(write_point & 1UL)) {
1301 wp = (struct write_point *) write_point;
1302 bch2_trans_mutex_lock_norelock(trans, &wp->lock);
1306 head = writepoint_hash(c, write_point);
1308 wp = __writepoint_find(head, write_point);
1311 bch2_trans_mutex_lock_norelock(trans, &wp->lock);
1312 if (wp->write_point == write_point)
1314 mutex_unlock(&wp->lock);
1317 restart_find_oldest:
1319 for (wp = c->write_points;
1320 wp < c->write_points + c->write_points_nr; wp++)
1321 if (!oldest || time_before64(wp->last_used, oldest->last_used))
1324 bch2_trans_mutex_lock_norelock(trans, &oldest->lock);
1325 bch2_trans_mutex_lock_norelock(trans, &c->write_points_hash_lock);
1326 if (oldest >= c->write_points + c->write_points_nr ||
1327 try_increase_writepoints(c)) {
1328 mutex_unlock(&c->write_points_hash_lock);
1329 mutex_unlock(&oldest->lock);
1330 goto restart_find_oldest;
1333 wp = __writepoint_find(head, write_point);
1334 if (wp && wp != oldest) {
1335 mutex_unlock(&c->write_points_hash_lock);
1336 mutex_unlock(&oldest->lock);
1341 hlist_del_rcu(&wp->node);
1342 wp->write_point = write_point;
1343 hlist_add_head_rcu(&wp->node, head);
1344 mutex_unlock(&c->write_points_hash_lock);
1346 wp->last_used = local_clock();
1350 static noinline void
1351 deallocate_extra_replicas(struct bch_fs *c,
1352 struct open_buckets *ptrs,
1353 struct open_buckets *ptrs_no_use,
1354 unsigned extra_replicas)
1356 struct open_buckets ptrs2 = { 0 };
1357 struct open_bucket *ob;
1360 open_bucket_for_each(c, ptrs, ob, i) {
1361 unsigned d = ob_dev(c, ob)->mi.durability;
1363 if (d && d <= extra_replicas) {
1364 extra_replicas -= d;
1365 ob_push(c, ptrs_no_use, ob);
1367 ob_push(c, &ptrs2, ob);
1375 * Get us an open_bucket we can allocate from, return with it locked:
1377 int bch2_alloc_sectors_start_trans(struct btree_trans *trans,
1379 unsigned erasure_code,
1380 struct write_point_specifier write_point,
1381 struct bch_devs_list *devs_have,
1382 unsigned nr_replicas,
1383 unsigned nr_replicas_required,
1384 enum bch_watermark watermark,
1387 struct write_point **wp_ret)
1389 struct bch_fs *c = trans->c;
1390 struct write_point *wp;
1391 struct open_bucket *ob;
1392 struct open_buckets ptrs;
1393 unsigned nr_effective, write_points_nr;
1398 if (!IS_ENABLED(CONFIG_BCACHEFS_ERASURE_CODING))
1399 erasure_code = false;
1401 BUG_ON(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS);
1403 BUG_ON(!nr_replicas || !nr_replicas_required);
1407 write_points_nr = c->write_points_nr;
1410 *wp_ret = wp = writepoint_find(trans, write_point.v);
1412 ret = bch2_trans_relock(trans);
1416 /* metadata may not allocate on cache devices: */
1417 if (wp->data_type != BCH_DATA_user)
1420 if (target && !(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
1421 ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1422 target, erasure_code,
1423 nr_replicas, &nr_effective,
1424 &have_cache, watermark,
1427 bch2_err_matches(ret, BCH_ERR_transaction_restart))
1430 /* Don't retry from all devices if we're out of open buckets: */
1431 if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty)) {
1432 int ret2 = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1433 target, erasure_code,
1434 nr_replicas, &nr_effective,
1435 &have_cache, watermark,
1438 bch2_err_matches(ret2, BCH_ERR_transaction_restart) ||
1439 bch2_err_matches(ret2, BCH_ERR_open_buckets_empty)) {
1446 * Only try to allocate cache (durability = 0 devices) from the
1451 ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1453 nr_replicas, &nr_effective,
1454 &have_cache, watermark,
1457 ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1458 target, erasure_code,
1459 nr_replicas, &nr_effective,
1460 &have_cache, watermark,
1464 BUG_ON(!ret && nr_effective < nr_replicas);
1466 if (erasure_code && !ec_open_bucket(c, &ptrs))
1467 pr_debug("failed to get ec bucket: ret %u", ret);
1469 if (ret == -BCH_ERR_insufficient_devices &&
1470 nr_effective >= nr_replicas_required)
1476 if (nr_effective > nr_replicas)
1477 deallocate_extra_replicas(c, &ptrs, &wp->ptrs, nr_effective - nr_replicas);
1479 /* Free buckets we didn't use: */
1480 open_bucket_for_each(c, &wp->ptrs, ob, i)
1481 open_bucket_free_unused(c, ob);
1485 wp->sectors_free = UINT_MAX;
1487 open_bucket_for_each(c, &wp->ptrs, ob, i)
1488 wp->sectors_free = min(wp->sectors_free, ob->sectors_free);
1490 BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
1494 open_bucket_for_each(c, &wp->ptrs, ob, i)
1495 if (ptrs.nr < ARRAY_SIZE(ptrs.v))
1496 ob_push(c, &ptrs, ob);
1498 open_bucket_free_unused(c, ob);
1501 mutex_unlock(&wp->lock);
1503 if (bch2_err_matches(ret, BCH_ERR_freelist_empty) &&
1504 try_decrease_writepoints(trans, write_points_nr))
1507 if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty) ||
1508 bch2_err_matches(ret, BCH_ERR_freelist_empty))
1510 ? -BCH_ERR_bucket_alloc_blocked
1511 : -BCH_ERR_ENOSPC_bucket_alloc;
1516 struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob)
1518 struct bch_dev *ca = ob_dev(c, ob);
1520 return (struct bch_extent_ptr) {
1521 .type = 1 << BCH_EXTENT_ENTRY_ptr,
1524 .offset = bucket_to_sector(ca, ob->bucket) +
1525 ca->mi.bucket_size -
1530 void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
1531 struct bkey_i *k, unsigned sectors,
1534 bch2_alloc_sectors_append_ptrs_inlined(c, wp, k, sectors, cached);
1538 * Append pointers to the space we just allocated to @k, and mark @sectors space
1539 * as allocated out of @ob
1541 void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
1543 bch2_alloc_sectors_done_inlined(c, wp);
1546 static inline void writepoint_init(struct write_point *wp,
1547 enum bch_data_type type)
1549 mutex_init(&wp->lock);
1550 wp->data_type = type;
1552 INIT_WORK(&wp->index_update_work, bch2_write_point_do_index_updates);
1553 INIT_LIST_HEAD(&wp->writes);
1554 spin_lock_init(&wp->writes_lock);
1557 void bch2_fs_allocator_foreground_init(struct bch_fs *c)
1559 struct open_bucket *ob;
1560 struct write_point *wp;
1562 mutex_init(&c->write_points_hash_lock);
1563 c->write_points_nr = ARRAY_SIZE(c->write_points);
1565 /* open bucket 0 is a sentinal NULL: */
1566 spin_lock_init(&c->open_buckets[0].lock);
1568 for (ob = c->open_buckets + 1;
1569 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) {
1570 spin_lock_init(&ob->lock);
1571 c->open_buckets_nr_free++;
1573 ob->freelist = c->open_buckets_freelist;
1574 c->open_buckets_freelist = ob - c->open_buckets;
1577 writepoint_init(&c->btree_write_point, BCH_DATA_btree);
1578 writepoint_init(&c->rebalance_write_point, BCH_DATA_user);
1579 writepoint_init(&c->copygc_write_point, BCH_DATA_user);
1581 for (wp = c->write_points;
1582 wp < c->write_points + c->write_points_nr; wp++) {
1583 writepoint_init(wp, BCH_DATA_user);
1585 wp->last_used = local_clock();
1586 wp->write_point = (unsigned long) wp;
1587 hlist_add_head_rcu(&wp->node,
1588 writepoint_hash(c, wp->write_point));
1592 static void bch2_open_bucket_to_text(struct printbuf *out, struct bch_fs *c, struct open_bucket *ob)
1594 struct bch_dev *ca = ob_dev(c, ob);
1595 unsigned data_type = ob->data_type;
1596 barrier(); /* READ_ONCE() doesn't work on bitfields */
1598 prt_printf(out, "%zu ref %u ",
1599 ob - c->open_buckets,
1600 atomic_read(&ob->pin));
1601 bch2_prt_data_type(out, data_type);
1602 prt_printf(out, " %u:%llu gen %u allocated %u/%u",
1603 ob->dev, ob->bucket, ob->gen,
1604 ca->mi.bucket_size - ob->sectors_free, ca->mi.bucket_size);
1606 prt_printf(out, " ec idx %llu", ob->ec->idx);
1607 if (ob->on_partial_list)
1608 prt_str(out, " partial");
1612 void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c)
1614 struct open_bucket *ob;
1618 for (ob = c->open_buckets;
1619 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1621 spin_lock(&ob->lock);
1622 if (ob->valid && !ob->on_partial_list)
1623 bch2_open_bucket_to_text(out, c, ob);
1624 spin_unlock(&ob->lock);
1630 void bch2_open_buckets_partial_to_text(struct printbuf *out, struct bch_fs *c)
1635 spin_lock(&c->freelist_lock);
1637 for (i = 0; i < c->open_buckets_partial_nr; i++)
1638 bch2_open_bucket_to_text(out, c,
1639 c->open_buckets + c->open_buckets_partial[i]);
1641 spin_unlock(&c->freelist_lock);
1645 static const char * const bch2_write_point_states[] = {
1647 WRITE_POINT_STATES()
1652 static void bch2_write_point_to_text(struct printbuf *out, struct bch_fs *c,
1653 struct write_point *wp)
1655 struct open_bucket *ob;
1658 prt_printf(out, "%lu: ", wp->write_point);
1659 prt_human_readable_u64(out, wp->sectors_allocated);
1661 prt_printf(out, " last wrote: ");
1662 bch2_pr_time_units(out, sched_clock() - wp->last_used);
1664 for (i = 0; i < WRITE_POINT_STATE_NR; i++) {
1665 prt_printf(out, " %s: ", bch2_write_point_states[i]);
1666 bch2_pr_time_units(out, wp->time[i]);
1671 printbuf_indent_add(out, 2);
1672 open_bucket_for_each(c, &wp->ptrs, ob, i)
1673 bch2_open_bucket_to_text(out, c, ob);
1674 printbuf_indent_sub(out, 2);
1677 void bch2_write_points_to_text(struct printbuf *out, struct bch_fs *c)
1679 struct write_point *wp;
1681 prt_str(out, "Foreground write points\n");
1682 for (wp = c->write_points;
1683 wp < c->write_points + ARRAY_SIZE(c->write_points);
1685 bch2_write_point_to_text(out, c, wp);
1687 prt_str(out, "Copygc write point\n");
1688 bch2_write_point_to_text(out, c, &c->copygc_write_point);
1690 prt_str(out, "Rebalance write point\n");
1691 bch2_write_point_to_text(out, c, &c->rebalance_write_point);
1693 prt_str(out, "Btree write point\n");
1694 bch2_write_point_to_text(out, c, &c->btree_write_point);
1697 void bch2_fs_alloc_debug_to_text(struct printbuf *out, struct bch_fs *c)
1699 unsigned nr[BCH_DATA_NR];
1701 memset(nr, 0, sizeof(nr));
1703 for (unsigned i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
1704 nr[c->open_buckets[i].data_type]++;
1706 printbuf_tabstop_push(out, 24);
1708 percpu_down_read(&c->mark_lock);
1709 prt_printf(out, "hidden\t%llu\n", bch2_fs_usage_read_one(c, &c->usage_base->b.hidden));
1710 prt_printf(out, "btree\t%llu\n", bch2_fs_usage_read_one(c, &c->usage_base->b.btree));
1711 prt_printf(out, "data\t%llu\n", bch2_fs_usage_read_one(c, &c->usage_base->b.data));
1712 prt_printf(out, "cached\t%llu\n", bch2_fs_usage_read_one(c, &c->usage_base->b.cached));
1713 prt_printf(out, "reserved\t%llu\n", bch2_fs_usage_read_one(c, &c->usage_base->b.reserved));
1714 prt_printf(out, "online_reserved\t%llu\n", percpu_u64_get(c->online_reserved));
1715 prt_printf(out, "nr_inodes\t%llu\n", bch2_fs_usage_read_one(c, &c->usage_base->b.nr_inodes));
1716 percpu_up_read(&c->mark_lock);
1719 prt_printf(out, "freelist_wait\t%s\n", c->freelist_wait.list.first ? "waiting" : "empty");
1720 prt_printf(out, "open buckets allocated\t%i\n", OPEN_BUCKETS_COUNT - c->open_buckets_nr_free);
1721 prt_printf(out, "open buckets total\t%u\n", OPEN_BUCKETS_COUNT);
1722 prt_printf(out, "open_buckets_wait\t%s\n", c->open_buckets_wait.list.first ? "waiting" : "empty");
1723 prt_printf(out, "open_buckets_btree\t%u\n", nr[BCH_DATA_btree]);
1724 prt_printf(out, "open_buckets_user\t%u\n", nr[BCH_DATA_user]);
1725 prt_printf(out, "btree reserve cache\t%u\n", c->btree_reserve_cache_nr);
1728 void bch2_dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
1730 struct bch_fs *c = ca->fs;
1731 struct bch_dev_usage stats = bch2_dev_usage_read(ca);
1732 unsigned nr[BCH_DATA_NR];
1734 memset(nr, 0, sizeof(nr));
1736 for (unsigned i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
1737 nr[c->open_buckets[i].data_type]++;
1739 printbuf_tabstop_push(out, 12);
1740 printbuf_tabstop_push(out, 16);
1741 printbuf_tabstop_push(out, 16);
1742 printbuf_tabstop_push(out, 16);
1743 printbuf_tabstop_push(out, 16);
1745 bch2_dev_usage_to_text(out, &stats);
1749 prt_printf(out, "reserves:\n");
1750 for (unsigned i = 0; i < BCH_WATERMARK_NR; i++)
1751 prt_printf(out, "%s\t%llu\r\n", bch2_watermarks[i], bch2_dev_buckets_reserved(ca, i));
1755 printbuf_tabstops_reset(out);
1756 printbuf_tabstop_push(out, 12);
1757 printbuf_tabstop_push(out, 16);
1759 prt_printf(out, "open buckets\t%i\r\n", ca->nr_open_buckets);
1760 prt_printf(out, "buckets to invalidate\t%llu\r\n", should_invalidate_buckets(ca, stats));
1763 void bch2_print_allocator_stuck(struct bch_fs *c)
1765 struct printbuf buf = PRINTBUF;
1767 prt_printf(&buf, "Allocator stuck? Waited for 10 seconds\n");
1769 prt_printf(&buf, "Allocator debug:\n");
1770 printbuf_indent_add(&buf, 2);
1771 bch2_fs_alloc_debug_to_text(&buf, c);
1772 printbuf_indent_sub(&buf, 2);
1775 for_each_online_member(c, ca) {
1776 prt_printf(&buf, "Dev %u:\n", ca->dev_idx);
1777 printbuf_indent_add(&buf, 2);
1778 bch2_dev_alloc_debug_to_text(&buf, ca);
1779 printbuf_indent_sub(&buf, 2);
1783 prt_printf(&buf, "Copygc debug:\n");
1784 printbuf_indent_add(&buf, 2);
1785 bch2_copygc_wait_to_text(&buf, c);
1786 printbuf_indent_sub(&buf, 2);
1789 prt_printf(&buf, "Journal debug:\n");
1790 printbuf_indent_add(&buf, 2);
1791 bch2_journal_debug_to_text(&buf, &c->journal);
1792 printbuf_indent_sub(&buf, 2);
1794 bch2_print_string_as_lines(KERN_ERR, buf.buf);
1795 printbuf_exit(&buf);