1 // SPDX-License-Identifier: GPL-2.0
3 * Code for manipulating bucket marks for garbage collection.
5 * Copyright 2014 Datera, Inc.
9 #include "alloc_background.h"
10 #include "backpointers.h"
13 #include "btree_update.h"
15 #include "buckets_waiting_for_journal.h"
16 #include "disk_accounting.h"
21 #include "rebalance.h"
23 #include "recovery_passes.h"
26 #include "subvolume.h"
29 #include <linux/preempt.h>
31 void bch2_dev_usage_read_fast(struct bch_dev *ca, struct bch_dev_usage *usage)
33 memset(usage, 0, sizeof(*usage));
34 acc_u64s_percpu((u64 *) usage, (u64 __percpu *) ca->usage, dev_usage_u64s());
37 static u64 reserve_factor(u64 r)
39 return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
42 static struct bch_fs_usage_short
43 __bch2_fs_usage_read_short(struct bch_fs *c)
45 struct bch_fs_usage_short ret;
48 ret.capacity = c->capacity -
49 percpu_u64_get(&c->usage->hidden);
51 data = percpu_u64_get(&c->usage->data) +
52 percpu_u64_get(&c->usage->btree);
53 reserved = percpu_u64_get(&c->usage->reserved) +
54 percpu_u64_get(c->online_reserved);
56 ret.used = min(ret.capacity, data + reserve_factor(reserved));
57 ret.free = ret.capacity - ret.used;
59 ret.nr_inodes = percpu_u64_get(&c->usage->nr_inodes);
64 struct bch_fs_usage_short
65 bch2_fs_usage_read_short(struct bch_fs *c)
67 struct bch_fs_usage_short ret;
69 percpu_down_read(&c->mark_lock);
70 ret = __bch2_fs_usage_read_short(c);
71 percpu_up_read(&c->mark_lock);
76 void bch2_dev_usage_to_text(struct printbuf *out,
78 struct bch_dev_usage *usage)
80 if (out->nr_tabstops < 5) {
81 printbuf_tabstops_reset(out);
82 printbuf_tabstop_push(out, 12);
83 printbuf_tabstop_push(out, 16);
84 printbuf_tabstop_push(out, 16);
85 printbuf_tabstop_push(out, 16);
86 printbuf_tabstop_push(out, 16);
89 prt_printf(out, "\tbuckets\rsectors\rfragmented\r\n");
91 for (unsigned i = 0; i < BCH_DATA_NR; i++) {
92 bch2_prt_data_type(out, i);
93 prt_printf(out, "\t%llu\r%llu\r%llu\r\n",
96 usage->d[i].fragmented);
99 prt_printf(out, "capacity\t%llu\r\n", ca->mi.nbuckets);
102 static int bch2_check_fix_ptr(struct btree_trans *trans,
104 struct extent_ptr_decoded p,
105 const union bch_extent_entry *entry,
108 struct bch_fs *c = trans->c;
109 struct printbuf buf = PRINTBUF;
112 struct bch_dev *ca = bch2_dev_tryget(c, p.ptr.dev);
114 if (fsck_err_on(p.ptr.dev != BCH_SB_MEMBER_INVALID,
115 trans, ptr_to_invalid_device,
116 "pointer to missing device %u\n"
119 (printbuf_reset(&buf),
120 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
125 struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
127 if (fsck_err(trans, ptr_to_invalid_device,
128 "pointer to invalid bucket on device %u\n"
131 (printbuf_reset(&buf),
132 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
137 enum bch_data_type data_type = bch2_bkey_ptr_data_type(k, p, entry);
139 if (fsck_err_on(!g->gen_valid,
140 trans, ptr_to_missing_alloc_key,
141 "bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n"
143 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
144 bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
146 (printbuf_reset(&buf),
147 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
156 if (fsck_err_on(gen_cmp(p.ptr.gen, g->gen) > 0,
157 trans, ptr_gen_newer_than_bucket_gen,
158 "bucket %u:%zu data type %s ptr gen in the future: %u > %u\n"
160 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
161 bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
163 (printbuf_reset(&buf),
164 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
166 (g->data_type != BCH_DATA_btree ||
167 data_type == BCH_DATA_btree)) {
171 g->stripe_sectors = 0;
172 g->dirty_sectors = 0;
173 g->cached_sectors = 0;
179 if (fsck_err_on(gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX,
180 trans, ptr_gen_newer_than_bucket_gen,
181 "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
183 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
184 bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
186 (printbuf_reset(&buf),
187 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
190 if (fsck_err_on(!p.ptr.cached && gen_cmp(p.ptr.gen, g->gen) < 0,
191 trans, stale_dirty_ptr,
192 "bucket %u:%zu data type %s stale dirty ptr: %u < %u\n"
194 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
195 bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
197 (printbuf_reset(&buf),
198 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
201 if (data_type != BCH_DATA_btree && p.ptr.gen != g->gen)
204 if (fsck_err_on(bucket_data_type_mismatch(g->data_type, data_type),
205 trans, ptr_bucket_data_type_mismatch,
206 "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
208 p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
209 bch2_data_type_str(g->data_type),
210 bch2_data_type_str(data_type),
211 (printbuf_reset(&buf),
212 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
213 if (data_type == BCH_DATA_btree) {
216 g->data_type = data_type;
217 g->stripe_sectors = 0;
218 g->dirty_sectors = 0;
219 g->cached_sectors = 0;
226 struct gc_stripe *m = genradix_ptr(&c->gc_stripes, p.ec.idx);
228 if (fsck_err_on(!m || !m->alive,
229 trans, ptr_to_missing_stripe,
230 "pointer to nonexistent stripe %llu\n"
233 (printbuf_reset(&buf),
234 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
237 if (fsck_err_on(m && m->alive && !bch2_ptr_matches_stripe_m(m, p),
238 trans, ptr_to_incorrect_stripe,
239 "pointer does not match stripe %llu\n"
242 (printbuf_reset(&buf),
243 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
253 int bch2_check_fix_ptrs(struct btree_trans *trans,
254 enum btree_id btree, unsigned level, struct bkey_s_c k,
255 enum btree_iter_update_trigger_flags flags)
257 struct bch_fs *c = trans->c;
258 struct bkey_ptrs_c ptrs_c = bch2_bkey_ptrs_c(k);
259 const union bch_extent_entry *entry_c;
260 struct extent_ptr_decoded p = { 0 };
261 bool do_update = false;
262 struct printbuf buf = PRINTBUF;
265 bkey_for_each_ptr_decode(k.k, ptrs_c, p, entry_c) {
266 ret = bch2_check_fix_ptr(trans, k, p, entry_c, &do_update);
272 if (flags & BTREE_TRIGGER_is_root) {
273 bch_err(c, "cannot update btree roots yet");
278 struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
279 ret = PTR_ERR_OR_ZERO(new);
284 bch2_bkey_drop_ptrs(bkey_i_to_s(new), ptr, !bch2_dev_exists(c, ptr->dev));
289 * We don't want to drop btree node pointers - if the
290 * btree node isn't there anymore, the read path will
293 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
295 bkey_for_each_ptr(ptrs, ptr) {
296 struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
297 struct bucket *g = PTR_GC_BUCKET(ca, ptr);
303 struct bkey_ptrs ptrs;
304 union bch_extent_entry *entry;
308 ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
309 bkey_for_each_ptr_decode(bkey_i_to_s(new).k, ptrs, p, entry) {
310 struct bch_dev *ca = bch2_dev_rcu(c, p.ptr.dev);
311 struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
312 enum bch_data_type data_type = bch2_bkey_ptr_data_type(bkey_i_to_s_c(new), p, entry);
315 (!g->gen_valid || gen_cmp(p.ptr.gen, g->gen) > 0)) ||
317 gen_cmp(p.ptr.gen, g->gen) < 0) ||
318 gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX ||
320 g->data_type != data_type)) {
321 bch2_bkey_drop_ptr(bkey_i_to_s(new), &entry->ptr);
322 goto restart_drop_ptrs;
327 ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
328 bkey_extent_entry_for_each(ptrs, entry) {
329 if (extent_entry_type(entry) == BCH_EXTENT_ENTRY_stripe_ptr) {
330 struct gc_stripe *m = genradix_ptr(&c->gc_stripes,
331 entry->stripe_ptr.idx);
332 union bch_extent_entry *next_ptr;
334 bkey_extent_entry_for_each_from(ptrs, next_ptr, entry)
335 if (extent_entry_type(next_ptr) == BCH_EXTENT_ENTRY_ptr)
340 bch_err(c, "aieee, found stripe ptr with no data ptr");
344 if (!m || !m->alive ||
345 !__bch2_ptr_matches_stripe(&m->ptrs[entry->stripe_ptr.block],
348 bch2_bkey_extent_entry_drop(new, entry);
356 printbuf_reset(&buf);
357 bch2_bkey_val_to_text(&buf, c, k);
358 bch_info(c, "updated %s", buf.buf);
360 printbuf_reset(&buf);
361 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(new));
362 bch_info(c, "new key %s", buf.buf);
365 struct btree_iter iter;
366 bch2_trans_node_iter_init(trans, &iter, btree, new->k.p, 0, level,
367 BTREE_ITER_intent|BTREE_ITER_all_snapshots);
368 ret = bch2_btree_iter_traverse(&iter) ?:
369 bch2_trans_update(trans, &iter, new,
370 BTREE_UPDATE_internal_snapshot_node|
371 BTREE_TRIGGER_norun);
372 bch2_trans_iter_exit(trans, &iter);
377 bch2_btree_node_update_key_early(trans, btree, level - 1, k, new);
384 int bch2_bucket_ref_update(struct btree_trans *trans, struct bch_dev *ca,
386 const struct bch_extent_ptr *ptr,
387 s64 sectors, enum bch_data_type ptr_data_type,
388 u8 b_gen, u8 bucket_data_type,
391 struct bch_fs *c = trans->c;
392 size_t bucket_nr = PTR_BUCKET_NR(ca, ptr);
393 struct printbuf buf = PRINTBUF;
394 bool inserting = sectors > 0;
399 if (gen_after(ptr->gen, b_gen)) {
400 bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_allocations);
401 log_fsck_err(trans, ptr_gen_newer_than_bucket_gen,
402 "bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n"
404 ptr->dev, bucket_nr, b_gen,
405 bch2_data_type_str(bucket_data_type ?: ptr_data_type),
407 (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
413 if (gen_cmp(b_gen, ptr->gen) > BUCKET_GC_GEN_MAX) {
414 bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_allocations);
415 log_fsck_err(trans, ptr_too_stale,
416 "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
418 ptr->dev, bucket_nr, b_gen,
419 bch2_data_type_str(bucket_data_type ?: ptr_data_type),
421 (printbuf_reset(&buf),
422 bch2_bkey_val_to_text(&buf, c, k), buf.buf));
428 if (b_gen != ptr->gen && ptr->cached) {
433 if (b_gen != ptr->gen) {
434 bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_allocations);
435 log_fsck_err(trans, stale_dirty_ptr,
436 "bucket %u:%zu gen %u (mem gen %u) data type %s: stale dirty ptr (gen %u)\n"
438 ptr->dev, bucket_nr, b_gen,
439 bucket_gen_get(ca, bucket_nr),
440 bch2_data_type_str(bucket_data_type ?: ptr_data_type),
442 (printbuf_reset(&buf),
443 bch2_bkey_val_to_text(&buf, c, k), buf.buf));
449 if (bucket_data_type_mismatch(bucket_data_type, ptr_data_type)) {
450 bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_allocations);
451 log_fsck_err(trans, ptr_bucket_data_type_mismatch,
452 "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
454 ptr->dev, bucket_nr, b_gen,
455 bch2_data_type_str(bucket_data_type),
456 bch2_data_type_str(ptr_data_type),
457 (printbuf_reset(&buf),
458 bch2_bkey_val_to_text(&buf, c, k), buf.buf));
464 if ((u64) *bucket_sectors + sectors > U32_MAX) {
465 bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_allocations);
466 log_fsck_err(trans, bucket_sector_count_overflow,
467 "bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U32_MAX\n"
469 ptr->dev, bucket_nr, b_gen,
470 bch2_data_type_str(bucket_data_type ?: ptr_data_type),
471 *bucket_sectors, sectors,
472 (printbuf_reset(&buf),
473 bch2_bkey_val_to_text(&buf, c, k), buf.buf));
476 sectors = -*bucket_sectors;
479 *bucket_sectors += sectors;
485 bch2_dump_trans_updates(trans);
486 bch2_inconsistent_error(c);
487 ret = -BCH_ERR_bucket_ref_update;
491 void bch2_trans_account_disk_usage_change(struct btree_trans *trans)
493 struct bch_fs *c = trans->c;
494 u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
495 static int warned_disk_usage = 0;
498 percpu_down_read(&c->mark_lock);
499 struct bch_fs_usage_base *src = &trans->fs_usage_delta;
501 s64 added = src->btree + src->data + src->reserved;
504 * Not allowed to reduce sectors_available except by getting a
507 s64 should_not_have_added = added - (s64) disk_res_sectors;
508 if (unlikely(should_not_have_added > 0)) {
511 old = atomic64_read(&c->sectors_available);
513 new = max_t(s64, 0, old - should_not_have_added);
514 } while (!atomic64_try_cmpxchg(&c->sectors_available,
517 added -= should_not_have_added;
522 trans->disk_res->sectors -= added;
523 this_cpu_sub(*c->online_reserved, added);
527 struct bch_fs_usage_base *dst = this_cpu_ptr(c->usage);
528 acc_u64s((u64 *) dst, (u64 *) src, sizeof(*src) / sizeof(u64));
530 percpu_up_read(&c->mark_lock);
532 if (unlikely(warn) && !xchg(&warned_disk_usage, 1))
533 bch2_trans_inconsistent(trans,
534 "disk usage increased %lli more than %llu sectors reserved)",
535 should_not_have_added, disk_res_sectors);
538 /* KEY_TYPE_extent: */
540 static int __mark_pointer(struct btree_trans *trans, struct bch_dev *ca,
542 const struct extent_ptr_decoded *p,
543 s64 sectors, enum bch_data_type ptr_data_type,
544 struct bch_alloc_v4 *a,
547 u32 *dst_sectors = p->has_ec ? &a->stripe_sectors :
548 !p->ptr.cached ? &a->dirty_sectors :
550 int ret = bch2_bucket_ref_update(trans, ca, k, &p->ptr, sectors, ptr_data_type,
551 a->gen, a->data_type, dst_sectors);
556 alloc_data_type_set(a, ptr_data_type);
560 static int bch2_trigger_pointer(struct btree_trans *trans,
561 enum btree_id btree_id, unsigned level,
562 struct bkey_s_c k, struct extent_ptr_decoded p,
563 const union bch_extent_entry *entry,
565 enum btree_iter_update_trigger_flags flags)
567 struct bch_fs *c = trans->c;
568 bool insert = !(flags & BTREE_TRIGGER_overwrite);
569 struct printbuf buf = PRINTBUF;
572 struct bkey_i_backpointer bp;
573 bch2_extent_ptr_to_bp(c, btree_id, level, k, p, entry, &bp);
575 *sectors = insert ? bp.v.bucket_len : -(s64) bp.v.bucket_len;
577 struct bch_dev *ca = bch2_dev_tryget(c, p.ptr.dev);
579 if (insert && p.ptr.dev != BCH_SB_MEMBER_INVALID)
580 ret = -BCH_ERR_trigger_pointer;
584 struct bpos bucket = PTR_BUCKET_POS(ca, &p.ptr);
586 if (flags & BTREE_TRIGGER_transactional) {
587 struct bkey_i_alloc_v4 *a = bch2_trans_start_alloc_update(trans, bucket, 0);
588 ret = PTR_ERR_OR_ZERO(a) ?:
589 __mark_pointer(trans, ca, k, &p, *sectors, bp.v.data_type, &a->v, insert);
594 ret = bch2_bucket_backpointer_mod(trans, k, &bp, insert);
600 if (flags & BTREE_TRIGGER_gc) {
601 struct bucket *g = gc_bucket(ca, bucket.offset);
602 if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u\n %s",
604 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
605 ret = -BCH_ERR_trigger_pointer;
610 struct bch_alloc_v4 old = bucket_m_to_alloc(*g), new = old;
611 ret = __mark_pointer(trans, ca, k, &p, *sectors, bp.v.data_type, &new, insert);
612 alloc_to_bucket(g, new);
616 ret = bch2_alloc_key_to_dev_counters(trans, ca, &old, &new, flags);
624 static int bch2_trigger_stripe_ptr(struct btree_trans *trans,
626 struct extent_ptr_decoded p,
627 enum bch_data_type data_type,
629 enum btree_iter_update_trigger_flags flags)
631 if (flags & BTREE_TRIGGER_transactional) {
632 struct btree_iter iter;
633 struct bkey_i_stripe *s = bch2_bkey_get_mut_typed(trans, &iter,
634 BTREE_ID_stripes, POS(0, p.ec.idx),
635 BTREE_ITER_with_updates, stripe);
636 int ret = PTR_ERR_OR_ZERO(s);
638 bch2_trans_inconsistent_on(bch2_err_matches(ret, ENOENT), trans,
639 "pointer to nonexistent stripe %llu",
644 if (!bch2_ptr_matches_stripe(&s->v, p)) {
645 bch2_trans_inconsistent(trans,
646 "stripe pointer doesn't match stripe %llu",
648 ret = -BCH_ERR_trigger_stripe_pointer;
652 stripe_blockcount_set(&s->v, p.ec.block,
653 stripe_blockcount_get(&s->v, p.ec.block) +
656 struct disk_accounting_pos acc = {
657 .type = BCH_DISK_ACCOUNTING_replicas,
659 bch2_bkey_to_replicas(&acc.replicas, bkey_i_to_s_c(&s->k_i));
660 acc.replicas.data_type = data_type;
661 ret = bch2_disk_accounting_mod(trans, &acc, §ors, 1, false);
663 bch2_trans_iter_exit(trans, &iter);
667 if (flags & BTREE_TRIGGER_gc) {
668 struct bch_fs *c = trans->c;
670 struct gc_stripe *m = genradix_ptr_alloc(&c->gc_stripes, p.ec.idx, GFP_KERNEL);
672 bch_err(c, "error allocating memory for gc_stripes, idx %llu",
674 return -BCH_ERR_ENOMEM_mark_stripe_ptr;
677 mutex_lock(&c->ec_stripes_heap_lock);
679 if (!m || !m->alive) {
680 mutex_unlock(&c->ec_stripes_heap_lock);
681 struct printbuf buf = PRINTBUF;
682 bch2_bkey_val_to_text(&buf, c, k);
683 bch_err_ratelimited(c, "pointer to nonexistent stripe %llu\n while marking %s",
684 (u64) p.ec.idx, buf.buf);
686 bch2_inconsistent_error(c);
687 return -BCH_ERR_trigger_stripe_pointer;
690 m->block_sectors[p.ec.block] += sectors;
692 struct disk_accounting_pos acc = {
693 .type = BCH_DISK_ACCOUNTING_replicas,
695 memcpy(&acc.replicas, &m->r.e, replicas_entry_bytes(&m->r.e));
696 mutex_unlock(&c->ec_stripes_heap_lock);
698 acc.replicas.data_type = data_type;
699 int ret = bch2_disk_accounting_mod(trans, &acc, §ors, 1, true);
707 static int __trigger_extent(struct btree_trans *trans,
708 enum btree_id btree_id, unsigned level,
710 enum btree_iter_update_trigger_flags flags,
711 s64 *replicas_sectors)
713 bool gc = flags & BTREE_TRIGGER_gc;
714 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
715 const union bch_extent_entry *entry;
716 struct extent_ptr_decoded p;
717 enum bch_data_type data_type = bkey_is_btree_ptr(k.k)
722 struct disk_accounting_pos acc_replicas_key = {
723 .type = BCH_DISK_ACCOUNTING_replicas,
724 .replicas.data_type = data_type,
725 .replicas.nr_devs = 0,
726 .replicas.nr_required = 1,
729 struct disk_accounting_pos acct_compression_key = {
730 .type = BCH_DISK_ACCOUNTING_compression,
732 u64 compression_acct[3] = { 1, 0, 0 };
734 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
735 s64 disk_sectors = 0;
736 ret = bch2_trigger_pointer(trans, btree_id, level, k, p, entry, &disk_sectors, flags);
740 bool stale = ret > 0;
742 if (p.ptr.cached && stale)
746 ret = bch2_mod_dev_cached_sectors(trans, p.ptr.dev, disk_sectors, gc);
749 } else if (!p.has_ec) {
750 *replicas_sectors += disk_sectors;
751 replicas_entry_add_dev(&acc_replicas_key.replicas, p.ptr.dev);
753 ret = bch2_trigger_stripe_ptr(trans, k, p, data_type, disk_sectors, flags);
758 * There may be other dirty pointers in this extent, but
759 * if so they're not required for mounting if we have an
760 * erasure coded pointer in this extent:
762 acc_replicas_key.replicas.nr_required = 0;
765 if (acct_compression_key.compression.type &&
766 acct_compression_key.compression.type != p.crc.compression_type) {
767 if (flags & BTREE_TRIGGER_overwrite)
768 bch2_u64s_neg(compression_acct, ARRAY_SIZE(compression_acct));
770 ret = bch2_disk_accounting_mod(trans, &acct_compression_key, compression_acct,
771 ARRAY_SIZE(compression_acct), gc);
775 compression_acct[0] = 1;
776 compression_acct[1] = 0;
777 compression_acct[2] = 0;
780 acct_compression_key.compression.type = p.crc.compression_type;
781 if (p.crc.compression_type) {
782 compression_acct[1] += p.crc.uncompressed_size;
783 compression_acct[2] += p.crc.compressed_size;
787 if (acc_replicas_key.replicas.nr_devs) {
788 ret = bch2_disk_accounting_mod(trans, &acc_replicas_key, replicas_sectors, 1, gc);
793 if (acc_replicas_key.replicas.nr_devs && !level && k.k->p.snapshot) {
794 struct disk_accounting_pos acc_snapshot_key = {
795 .type = BCH_DISK_ACCOUNTING_snapshot,
796 .snapshot.id = k.k->p.snapshot,
798 ret = bch2_disk_accounting_mod(trans, &acc_snapshot_key, replicas_sectors, 1, gc);
803 if (acct_compression_key.compression.type) {
804 if (flags & BTREE_TRIGGER_overwrite)
805 bch2_u64s_neg(compression_acct, ARRAY_SIZE(compression_acct));
807 ret = bch2_disk_accounting_mod(trans, &acct_compression_key, compression_acct,
808 ARRAY_SIZE(compression_acct), gc);
814 struct disk_accounting_pos acc_btree_key = {
815 .type = BCH_DISK_ACCOUNTING_btree,
816 .btree.id = btree_id,
818 ret = bch2_disk_accounting_mod(trans, &acc_btree_key, replicas_sectors, 1, gc);
822 bool insert = !(flags & BTREE_TRIGGER_overwrite);
823 struct disk_accounting_pos acc_inum_key = {
824 .type = BCH_DISK_ACCOUNTING_inum,
825 .inum.inum = k.k->p.inode,
829 insert ? k.k->size : -((s64) k.k->size),
832 ret = bch2_disk_accounting_mod(trans, &acc_inum_key, v, ARRAY_SIZE(v), gc);
840 int bch2_trigger_extent(struct btree_trans *trans,
841 enum btree_id btree, unsigned level,
842 struct bkey_s_c old, struct bkey_s new,
843 enum btree_iter_update_trigger_flags flags)
845 struct bch_fs *c = trans->c;
846 struct bkey_ptrs_c new_ptrs = bch2_bkey_ptrs_c(new.s_c);
847 struct bkey_ptrs_c old_ptrs = bch2_bkey_ptrs_c(old);
848 unsigned new_ptrs_bytes = (void *) new_ptrs.end - (void *) new_ptrs.start;
849 unsigned old_ptrs_bytes = (void *) old_ptrs.end - (void *) old_ptrs.start;
851 if (unlikely(flags & BTREE_TRIGGER_check_repair))
852 return bch2_check_fix_ptrs(trans, btree, level, new.s_c, flags);
854 /* if pointers aren't changing - nothing to do: */
855 if (new_ptrs_bytes == old_ptrs_bytes &&
856 !memcmp(new_ptrs.start,
861 if (flags & (BTREE_TRIGGER_transactional|BTREE_TRIGGER_gc)) {
862 s64 old_replicas_sectors = 0, new_replicas_sectors = 0;
865 int ret = __trigger_extent(trans, btree, level, old,
866 flags & ~BTREE_TRIGGER_insert,
867 &old_replicas_sectors);
873 int ret = __trigger_extent(trans, btree, level, new.s_c,
874 flags & ~BTREE_TRIGGER_overwrite,
875 &new_replicas_sectors);
880 int need_rebalance_delta = 0;
881 s64 need_rebalance_sectors_delta = 0;
883 s64 s = bch2_bkey_sectors_need_rebalance(c, old);
884 need_rebalance_delta -= s != 0;
885 need_rebalance_sectors_delta -= s;
887 s = bch2_bkey_sectors_need_rebalance(c, new.s_c);
888 need_rebalance_delta += s != 0;
889 need_rebalance_sectors_delta += s;
891 if ((flags & BTREE_TRIGGER_transactional) && need_rebalance_delta) {
892 int ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_rebalance_work,
893 new.k->p, need_rebalance_delta > 0);
898 if (need_rebalance_sectors_delta) {
899 struct disk_accounting_pos acc = {
900 .type = BCH_DISK_ACCOUNTING_rebalance_work,
902 int ret = bch2_disk_accounting_mod(trans, &acc, &need_rebalance_sectors_delta, 1,
903 flags & BTREE_TRIGGER_gc);
912 /* KEY_TYPE_reservation */
914 static int __trigger_reservation(struct btree_trans *trans,
915 enum btree_id btree_id, unsigned level, struct bkey_s_c k,
916 enum btree_iter_update_trigger_flags flags)
918 if (flags & (BTREE_TRIGGER_transactional|BTREE_TRIGGER_gc)) {
919 s64 sectors = k.k->size;
921 if (flags & BTREE_TRIGGER_overwrite)
924 struct disk_accounting_pos acc = {
925 .type = BCH_DISK_ACCOUNTING_persistent_reserved,
926 .persistent_reserved.nr_replicas = bkey_s_c_to_reservation(k).v->nr_replicas,
929 return bch2_disk_accounting_mod(trans, &acc, §ors, 1, flags & BTREE_TRIGGER_gc);
935 int bch2_trigger_reservation(struct btree_trans *trans,
936 enum btree_id btree_id, unsigned level,
937 struct bkey_s_c old, struct bkey_s new,
938 enum btree_iter_update_trigger_flags flags)
940 return trigger_run_overwrite_then_insert(__trigger_reservation, trans, btree_id, level, old, new, flags);
943 /* Mark superblocks: */
945 static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
946 struct bch_dev *ca, u64 b,
947 enum bch_data_type type,
950 struct bch_fs *c = trans->c;
951 struct btree_iter iter;
954 struct bkey_i_alloc_v4 *a =
955 bch2_trans_start_alloc_update_noupdate(trans, &iter, POS(ca->dev_idx, b));
959 if (a->v.data_type && type && a->v.data_type != type) {
960 bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_allocations);
961 log_fsck_err(trans, bucket_metadata_type_mismatch,
962 "bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n"
964 iter.pos.inode, iter.pos.offset, a->v.gen,
965 bch2_data_type_str(a->v.data_type),
966 bch2_data_type_str(type),
967 bch2_data_type_str(type));
968 ret = -BCH_ERR_metadata_bucket_inconsistency;
972 if (a->v.data_type != type ||
973 a->v.dirty_sectors != sectors) {
974 a->v.data_type = type;
975 a->v.dirty_sectors = sectors;
976 ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
980 bch2_trans_iter_exit(trans, &iter);
984 static int bch2_mark_metadata_bucket(struct btree_trans *trans, struct bch_dev *ca,
985 u64 b, enum bch_data_type data_type, unsigned sectors,
986 enum btree_iter_update_trigger_flags flags)
988 struct bch_fs *c = trans->c;
991 struct bucket *g = gc_bucket(ca, b);
992 if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u when marking metadata type %s",
993 ca->dev_idx, bch2_data_type_str(data_type)))
997 struct bch_alloc_v4 old = bucket_m_to_alloc(*g);
999 if (bch2_fs_inconsistent_on(g->data_type &&
1000 g->data_type != data_type, c,
1001 "different types of data in same bucket: %s, %s",
1002 bch2_data_type_str(g->data_type),
1003 bch2_data_type_str(data_type)))
1006 if (bch2_fs_inconsistent_on((u64) g->dirty_sectors + sectors > ca->mi.bucket_size, c,
1007 "bucket %u:%llu gen %u data type %s sector count overflow: %u + %u > bucket size",
1008 ca->dev_idx, b, g->gen,
1009 bch2_data_type_str(g->data_type ?: data_type),
1010 g->dirty_sectors, sectors))
1013 g->data_type = data_type;
1014 g->dirty_sectors += sectors;
1015 struct bch_alloc_v4 new = bucket_m_to_alloc(*g);
1017 ret = bch2_alloc_key_to_dev_counters(trans, ca, &old, &new, flags);
1022 return -BCH_ERR_metadata_bucket_inconsistency;
1025 int bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
1026 struct bch_dev *ca, u64 b,
1027 enum bch_data_type type, unsigned sectors,
1028 enum btree_iter_update_trigger_flags flags)
1030 BUG_ON(type != BCH_DATA_free &&
1031 type != BCH_DATA_sb &&
1032 type != BCH_DATA_journal);
1035 * Backup superblock might be past the end of our normal usable space:
1037 if (b >= ca->mi.nbuckets)
1040 if (flags & BTREE_TRIGGER_gc)
1041 return bch2_mark_metadata_bucket(trans, ca, b, type, sectors, flags);
1042 else if (flags & BTREE_TRIGGER_transactional)
1043 return commit_do(trans, NULL, NULL, 0,
1044 __bch2_trans_mark_metadata_bucket(trans, ca, b, type, sectors));
1049 static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans,
1050 struct bch_dev *ca, u64 start, u64 end,
1051 enum bch_data_type type, u64 *bucket, unsigned *bucket_sectors,
1052 enum btree_iter_update_trigger_flags flags)
1055 u64 b = sector_to_bucket(ca, start);
1057 min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
1059 if (b != *bucket && *bucket_sectors) {
1060 int ret = bch2_trans_mark_metadata_bucket(trans, ca, *bucket,
1061 type, *bucket_sectors, flags);
1065 *bucket_sectors = 0;
1069 *bucket_sectors += sectors;
1071 } while (start < end);
1076 static int __bch2_trans_mark_dev_sb(struct btree_trans *trans, struct bch_dev *ca,
1077 enum btree_iter_update_trigger_flags flags)
1079 struct bch_fs *c = trans->c;
1081 mutex_lock(&c->sb_lock);
1082 struct bch_sb_layout layout = ca->disk_sb.sb->layout;
1083 mutex_unlock(&c->sb_lock);
1086 unsigned i, bucket_sectors = 0;
1089 for (i = 0; i < layout.nr_superblocks; i++) {
1090 u64 offset = le64_to_cpu(layout.sb_offset[i]);
1092 if (offset == BCH_SB_SECTOR) {
1093 ret = bch2_trans_mark_metadata_sectors(trans, ca,
1095 BCH_DATA_sb, &bucket, &bucket_sectors, flags);
1100 ret = bch2_trans_mark_metadata_sectors(trans, ca, offset,
1101 offset + (1 << layout.sb_max_size_bits),
1102 BCH_DATA_sb, &bucket, &bucket_sectors, flags);
1107 if (bucket_sectors) {
1108 ret = bch2_trans_mark_metadata_bucket(trans, ca,
1109 bucket, BCH_DATA_sb, bucket_sectors, flags);
1114 for (i = 0; i < ca->journal.nr; i++) {
1115 ret = bch2_trans_mark_metadata_bucket(trans, ca,
1116 ca->journal.buckets[i],
1117 BCH_DATA_journal, ca->mi.bucket_size, flags);
1125 int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca,
1126 enum btree_iter_update_trigger_flags flags)
1128 int ret = bch2_trans_run(c,
1129 __bch2_trans_mark_dev_sb(trans, ca, flags));
1134 int bch2_trans_mark_dev_sbs_flags(struct bch_fs *c,
1135 enum btree_iter_update_trigger_flags flags)
1137 for_each_online_member(c, ca) {
1138 int ret = bch2_trans_mark_dev_sb(c, ca, flags);
1140 percpu_ref_put(&ca->io_ref);
1148 int bch2_trans_mark_dev_sbs(struct bch_fs *c)
1150 return bch2_trans_mark_dev_sbs_flags(c, BTREE_TRIGGER_transactional);
1153 bool bch2_is_superblock_bucket(struct bch_dev *ca, u64 b)
1155 struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
1156 u64 b_offset = bucket_to_sector(ca, b);
1157 u64 b_end = bucket_to_sector(ca, b + 1);
1163 for (i = 0; i < layout->nr_superblocks; i++) {
1164 u64 offset = le64_to_cpu(layout->sb_offset[i]);
1165 u64 end = offset + (1 << layout->sb_max_size_bits);
1167 if (!(offset >= b_end || end <= b_offset))
1171 for (i = 0; i < ca->journal.nr; i++)
1172 if (b == ca->journal.buckets[i])
1178 /* Disk reservations: */
1180 #define SECTORS_CACHE 1024
1182 int __bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
1183 u64 sectors, enum bch_reservation_flags flags)
1185 struct bch_fs_pcpu *pcpu;
1187 u64 sectors_available;
1190 percpu_down_read(&c->mark_lock);
1192 pcpu = this_cpu_ptr(c->pcpu);
1194 if (sectors <= pcpu->sectors_available)
1197 old = atomic64_read(&c->sectors_available);
1199 get = min((u64) sectors + SECTORS_CACHE, old);
1201 if (get < sectors) {
1205 } while (!atomic64_try_cmpxchg(&c->sectors_available,
1208 pcpu->sectors_available += get;
1211 pcpu->sectors_available -= sectors;
1212 this_cpu_add(*c->online_reserved, sectors);
1213 res->sectors += sectors;
1216 percpu_up_read(&c->mark_lock);
1220 mutex_lock(&c->sectors_available_lock);
1222 percpu_u64_set(&c->pcpu->sectors_available, 0);
1223 sectors_available = avail_factor(__bch2_fs_usage_read_short(c).free);
1225 if (sectors_available && (flags & BCH_DISK_RESERVATION_PARTIAL))
1226 sectors = min(sectors, sectors_available);
1228 if (sectors <= sectors_available ||
1229 (flags & BCH_DISK_RESERVATION_NOFAIL)) {
1230 atomic64_set(&c->sectors_available,
1231 max_t(s64, 0, sectors_available - sectors));
1232 this_cpu_add(*c->online_reserved, sectors);
1233 res->sectors += sectors;
1236 atomic64_set(&c->sectors_available, sectors_available);
1237 ret = -BCH_ERR_ENOSPC_disk_reservation;
1240 mutex_unlock(&c->sectors_available_lock);
1241 percpu_up_read(&c->mark_lock);
1246 /* Startup/shutdown: */
1248 void bch2_buckets_nouse_free(struct bch_fs *c)
1250 for_each_member_device(c, ca) {
1251 kvfree_rcu_mightsleep(ca->buckets_nouse);
1252 ca->buckets_nouse = NULL;
1256 int bch2_buckets_nouse_alloc(struct bch_fs *c)
1258 for_each_member_device(c, ca) {
1259 BUG_ON(ca->buckets_nouse);
1261 ca->buckets_nouse = bch2_kvmalloc(BITS_TO_LONGS(ca->mi.nbuckets) *
1262 sizeof(unsigned long),
1263 GFP_KERNEL|__GFP_ZERO);
1264 if (!ca->buckets_nouse) {
1266 return -BCH_ERR_ENOMEM_buckets_nouse;
1273 static void bucket_gens_free_rcu(struct rcu_head *rcu)
1275 struct bucket_gens *buckets =
1276 container_of(rcu, struct bucket_gens, rcu);
1281 int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
1283 struct bucket_gens *bucket_gens = NULL, *old_bucket_gens = NULL;
1284 bool resize = ca->bucket_gens != NULL;
1288 lockdep_assert_held(&c->state_lock);
1290 if (resize && ca->buckets_nouse)
1291 return -BCH_ERR_no_resize_with_buckets_nouse;
1293 bucket_gens = bch2_kvmalloc(struct_size(bucket_gens, b, nbuckets),
1294 GFP_KERNEL|__GFP_ZERO);
1296 ret = -BCH_ERR_ENOMEM_bucket_gens;
1300 bucket_gens->first_bucket = ca->mi.first_bucket;
1301 bucket_gens->nbuckets = nbuckets;
1302 bucket_gens->nbuckets_minus_first =
1303 bucket_gens->nbuckets - bucket_gens->first_bucket;
1305 old_bucket_gens = rcu_dereference_protected(ca->bucket_gens, 1);
1308 bucket_gens->nbuckets = min(bucket_gens->nbuckets,
1309 old_bucket_gens->nbuckets);
1310 bucket_gens->nbuckets_minus_first =
1311 bucket_gens->nbuckets - bucket_gens->first_bucket;
1312 memcpy(bucket_gens->b,
1314 bucket_gens->nbuckets);
1317 rcu_assign_pointer(ca->bucket_gens, bucket_gens);
1318 bucket_gens = old_bucket_gens;
1320 nbuckets = ca->mi.nbuckets;
1325 call_rcu(&bucket_gens->rcu, bucket_gens_free_rcu);
1330 void bch2_dev_buckets_free(struct bch_dev *ca)
1332 kvfree(ca->buckets_nouse);
1333 kvfree(rcu_dereference_protected(ca->bucket_gens, 1));
1334 free_percpu(ca->usage);
1337 int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
1339 ca->usage = alloc_percpu(struct bch_dev_usage);
1341 return -BCH_ERR_ENOMEM_usage_init;
1343 return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);