1 // SPDX-License-Identifier: GPL-2.0
6 #include "alloc_background.h"
7 #include "alloc_foreground.h"
8 #include "backpointers.h"
12 #include "btree_update.h"
13 #include "btree_write_buffer.h"
16 #include "disk_groups.h"
26 #include <linux/sort.h>
30 #include <linux/raid/pq.h>
31 #include <linux/raid/xor.h>
33 static void raid5_recov(unsigned disks, unsigned failed_idx,
34 size_t size, void **data)
38 BUG_ON(failed_idx >= disks);
40 swap(data[0], data[failed_idx]);
41 memcpy(data[0], data[1], size);
44 nr = min_t(unsigned, disks - i, MAX_XOR_BLOCKS);
45 xor_blocks(nr, size, data[0], data + i);
49 swap(data[0], data[failed_idx]);
52 static void raid_gen(int nd, int np, size_t size, void **v)
55 raid5_recov(nd + np, nd, size, v);
57 raid6_call.gen_syndrome(nd + np, size, v);
61 static void raid_rec(int nr, int *ir, int nd, int np, size_t size, void **v)
68 raid5_recov(nd + 1, ir[0], size, v);
70 raid6_call.gen_syndrome(nd + np, size, v);
74 /* data+data failure. */
75 raid6_2data_recov(nd + np, size, ir[0], ir[1], v);
76 } else if (ir[0] < nd) {
77 /* data + p/q failure */
79 if (ir[1] == nd) /* data + p failure */
80 raid6_datap_recov(nd + np, size, ir[0], v);
81 else { /* data + q failure */
82 raid5_recov(nd + 1, ir[0], size, v);
83 raid6_call.gen_syndrome(nd + np, size, v);
86 raid_gen(nd, np, size, v);
96 #include <raid/raid.h>
102 struct ec_stripe_buf *buf;
107 /* Stripes btree keys: */
109 int bch2_stripe_invalid(struct bch_fs *c, struct bkey_s_c k,
110 enum bch_validate_flags flags,
111 struct printbuf *err)
113 const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
116 bkey_fsck_err_on(bkey_eq(k.k->p, POS_MIN) ||
117 bpos_gt(k.k->p, POS(0, U32_MAX)), c, err,
119 "stripe at bad pos");
121 bkey_fsck_err_on(bkey_val_u64s(k.k) < stripe_val_u64s(s), c, err,
123 "incorrect value size (%zu < %u)",
124 bkey_val_u64s(k.k), stripe_val_u64s(s));
126 ret = bch2_bkey_ptrs_invalid(c, k, flags, err);
131 void bch2_stripe_to_text(struct printbuf *out, struct bch_fs *c,
134 const struct bch_stripe *sp = bkey_s_c_to_stripe(k).v;
135 struct bch_stripe s = {};
137 memcpy(&s, sp, min(sizeof(s), bkey_val_bytes(k.k)));
139 unsigned nr_data = s.nr_blocks - s.nr_redundant;
141 prt_printf(out, "algo %u sectors %u blocks %u:%u csum ",
143 le16_to_cpu(s.sectors),
146 bch2_prt_csum_type(out, s.csum_type);
147 prt_printf(out, " gran %u", 1U << s.csum_granularity_bits);
149 for (unsigned i = 0; i < s.nr_blocks; i++) {
150 const struct bch_extent_ptr *ptr = sp->ptrs + i;
152 if ((void *) ptr >= bkey_val_end(k))
155 bch2_extent_ptr_to_text(out, c, ptr);
157 if (s.csum_type < BCH_CSUM_NR &&
159 stripe_blockcount_offset(&s, i) < bkey_val_bytes(k.k))
160 prt_printf(out, "#%u", stripe_blockcount_get(sp, i));
166 static int __mark_stripe_bucket(struct btree_trans *trans,
168 struct bkey_s_c_stripe s,
169 unsigned ptr_idx, bool deleting,
171 struct bch_alloc_v4 *a,
172 enum btree_iter_update_trigger_flags flags)
174 const struct bch_extent_ptr *ptr = s.v->ptrs + ptr_idx;
175 unsigned nr_data = s.v->nr_blocks - s.v->nr_redundant;
176 bool parity = ptr_idx >= nr_data;
177 enum bch_data_type data_type = parity ? BCH_DATA_parity : BCH_DATA_stripe;
178 s64 sectors = parity ? le16_to_cpu(s.v->sectors) : 0;
179 struct printbuf buf = PRINTBUF;
182 struct bch_fs *c = trans->c;
187 if (bch2_trans_inconsistent_on(a->stripe ||
188 a->stripe_redundancy, trans,
189 "bucket %llu:%llu gen %u data type %s dirty_sectors %u: multiple stripes using same bucket (%u, %llu)\n%s",
190 bucket.inode, bucket.offset, a->gen,
191 bch2_data_type_str(a->data_type),
193 a->stripe, s.k->p.offset,
194 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
199 if (bch2_trans_inconsistent_on(parity && bch2_bucket_sectors_total(*a), trans,
200 "bucket %llu:%llu gen %u data type %s dirty_sectors %u cached_sectors %u: data already in parity bucket\n%s",
201 bucket.inode, bucket.offset, a->gen,
202 bch2_data_type_str(a->data_type),
205 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
210 if (bch2_trans_inconsistent_on(a->stripe != s.k->p.offset ||
211 a->stripe_redundancy != s.v->nr_redundant, trans,
212 "bucket %llu:%llu gen %u: not marked as stripe when deleting stripe (got %u)\n%s",
213 bucket.inode, bucket.offset, a->gen,
215 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
220 if (bch2_trans_inconsistent_on(a->data_type != data_type, trans,
221 "bucket %llu:%llu gen %u data type %s: wrong data type when stripe, should be %s\n%s",
222 bucket.inode, bucket.offset, a->gen,
223 bch2_data_type_str(a->data_type),
224 bch2_data_type_str(data_type),
225 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
230 if (bch2_trans_inconsistent_on(parity &&
231 (a->dirty_sectors != -sectors ||
232 a->cached_sectors), trans,
233 "bucket %llu:%llu gen %u dirty_sectors %u cached_sectors %u: wrong sectors when deleting parity block of stripe\n%s",
234 bucket.inode, bucket.offset, a->gen,
237 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
244 ret = bch2_bucket_ref_update(trans, ca, s.s_c, ptr, sectors, data_type,
245 a->gen, a->data_type, &a->dirty_sectors);
251 a->stripe = s.k->p.offset;
252 a->stripe_redundancy = s.v->nr_redundant;
255 a->stripe_redundancy = 0;
258 alloc_data_type_set(a, data_type);
264 static int mark_stripe_bucket(struct btree_trans *trans,
265 struct bkey_s_c_stripe s,
266 unsigned ptr_idx, bool deleting,
267 enum btree_iter_update_trigger_flags flags)
269 struct bch_fs *c = trans->c;
270 const struct bch_extent_ptr *ptr = s.v->ptrs + ptr_idx;
273 struct bch_dev *ca = bch2_dev_tryget(c, ptr->dev);
275 if (!(flags & BTREE_TRIGGER_overwrite))
280 struct bpos bucket = PTR_BUCKET_POS(ca, ptr);
282 if (flags & BTREE_TRIGGER_transactional) {
283 struct bkey_i_alloc_v4 *a =
284 bch2_trans_start_alloc_update(trans, bucket);
285 ret = PTR_ERR_OR_ZERO(a) ?:
286 __mark_stripe_bucket(trans, ca, s, ptr_idx, deleting, bucket, &a->v, flags);
289 if (flags & BTREE_TRIGGER_gc) {
290 percpu_down_read(&c->mark_lock);
291 struct bucket *g = gc_bucket(ca, bucket.offset);
293 struct bch_alloc_v4 old = bucket_m_to_alloc(*g), new = old;
294 ret = __mark_stripe_bucket(trans, ca, s, ptr_idx, deleting, bucket, &new, flags);
296 alloc_to_bucket(g, new);
297 bch2_dev_usage_update(c, ca, &old, &new, 0, true);
300 percpu_up_read(&c->mark_lock);
307 static int mark_stripe_buckets(struct btree_trans *trans,
308 struct bkey_s_c old, struct bkey_s_c new,
309 enum btree_iter_update_trigger_flags flags)
311 const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
312 ? bkey_s_c_to_stripe(old).v : NULL;
313 const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
314 ? bkey_s_c_to_stripe(new).v : NULL;
316 BUG_ON(old_s && new_s && old_s->nr_blocks != new_s->nr_blocks);
318 unsigned nr_blocks = new_s ? new_s->nr_blocks : old_s->nr_blocks;
320 for (unsigned i = 0; i < nr_blocks; i++) {
321 if (new_s && old_s &&
322 !memcmp(&new_s->ptrs[i],
324 sizeof(new_s->ptrs[i])))
328 int ret = mark_stripe_bucket(trans,
329 bkey_s_c_to_stripe(new), i, false, flags);
335 int ret = mark_stripe_bucket(trans,
336 bkey_s_c_to_stripe(old), i, true, flags);
345 int bch2_trigger_stripe(struct btree_trans *trans,
346 enum btree_id btree, unsigned level,
347 struct bkey_s_c old, struct bkey_s _new,
348 enum btree_iter_update_trigger_flags flags)
350 struct bkey_s_c new = _new.s_c;
351 struct bch_fs *c = trans->c;
352 u64 idx = new.k->p.offset;
353 const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
354 ? bkey_s_c_to_stripe(old).v : NULL;
355 const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
356 ? bkey_s_c_to_stripe(new).v : NULL;
358 if (unlikely(flags & BTREE_TRIGGER_check_repair))
359 return bch2_check_fix_ptrs(trans, btree, level, _new.s_c, flags);
361 if (flags & BTREE_TRIGGER_transactional) {
363 * If the pointers aren't changing, we don't need to do anything:
365 if (new_s && old_s &&
366 new_s->nr_blocks == old_s->nr_blocks &&
367 new_s->nr_redundant == old_s->nr_redundant &&
368 !memcmp(old_s->ptrs, new_s->ptrs,
369 new_s->nr_blocks * sizeof(struct bch_extent_ptr)))
372 BUG_ON(new_s && old_s &&
373 (new_s->nr_blocks != old_s->nr_blocks ||
374 new_s->nr_redundant != old_s->nr_redundant));
377 s64 sectors = le16_to_cpu(new_s->sectors);
379 struct bch_replicas_padded r;
380 bch2_bkey_to_replicas(&r.e, new);
381 int ret = bch2_update_replicas_list(trans, &r.e, sectors * new_s->nr_redundant);
387 s64 sectors = -((s64) le16_to_cpu(old_s->sectors));
389 struct bch_replicas_padded r;
390 bch2_bkey_to_replicas(&r.e, old);
391 int ret = bch2_update_replicas_list(trans, &r.e, sectors * old_s->nr_redundant);
396 int ret = mark_stripe_buckets(trans, old, new, flags);
401 if (flags & BTREE_TRIGGER_atomic) {
402 struct stripe *m = genradix_ptr(&c->stripes, idx);
405 struct printbuf buf1 = PRINTBUF;
406 struct printbuf buf2 = PRINTBUF;
408 bch2_bkey_val_to_text(&buf1, c, old);
409 bch2_bkey_val_to_text(&buf2, c, new);
410 bch_err_ratelimited(c, "error marking nonexistent stripe %llu while marking\n"
412 "new %s", idx, buf1.buf, buf2.buf);
413 printbuf_exit(&buf2);
414 printbuf_exit(&buf1);
415 bch2_inconsistent_error(c);
420 bch2_stripes_heap_del(c, m, idx);
422 memset(m, 0, sizeof(*m));
424 m->sectors = le16_to_cpu(new_s->sectors);
425 m->algorithm = new_s->algorithm;
426 m->nr_blocks = new_s->nr_blocks;
427 m->nr_redundant = new_s->nr_redundant;
428 m->blocks_nonempty = 0;
430 for (unsigned i = 0; i < new_s->nr_blocks; i++)
431 m->blocks_nonempty += !!stripe_blockcount_get(new_s, i);
434 bch2_stripes_heap_insert(c, m, idx);
436 bch2_stripes_heap_update(c, m, idx);
440 if (flags & BTREE_TRIGGER_gc) {
441 struct gc_stripe *m =
442 genradix_ptr_alloc(&c->gc_stripes, idx, GFP_KERNEL);
445 bch_err(c, "error allocating memory for gc_stripes, idx %llu",
447 return -BCH_ERR_ENOMEM_mark_stripe;
450 * This will be wrong when we bring back runtime gc: we should
451 * be unmarking the old key and then marking the new key
454 m->sectors = le16_to_cpu(new_s->sectors);
455 m->nr_blocks = new_s->nr_blocks;
456 m->nr_redundant = new_s->nr_redundant;
458 for (unsigned i = 0; i < new_s->nr_blocks; i++)
459 m->ptrs[i] = new_s->ptrs[i];
461 bch2_bkey_to_replicas(&m->r.e, new);
464 * gc recalculates this field from stripe ptr
467 memset(m->block_sectors, 0, sizeof(m->block_sectors));
469 int ret = mark_stripe_buckets(trans, old, new, flags);
473 ret = bch2_update_replicas(c, new, &m->r.e,
474 ((s64) m->sectors * m->nr_redundant),
477 struct printbuf buf = PRINTBUF;
479 bch2_bkey_val_to_text(&buf, c, new);
480 bch2_fs_fatal_error(c, ": no replicas entry for %s", buf.buf);
489 /* returns blocknr in stripe that we matched: */
490 static const struct bch_extent_ptr *bkey_matches_stripe(struct bch_stripe *s,
491 struct bkey_s_c k, unsigned *block)
493 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
494 unsigned i, nr_data = s->nr_blocks - s->nr_redundant;
496 bkey_for_each_ptr(ptrs, ptr)
497 for (i = 0; i < nr_data; i++)
498 if (__bch2_ptr_matches_stripe(&s->ptrs[i], ptr,
499 le16_to_cpu(s->sectors))) {
507 static bool extent_has_stripe_ptr(struct bkey_s_c k, u64 idx)
510 case KEY_TYPE_extent: {
511 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
512 const union bch_extent_entry *entry;
514 extent_for_each_entry(e, entry)
515 if (extent_entry_type(entry) ==
516 BCH_EXTENT_ENTRY_stripe_ptr &&
517 entry->stripe_ptr.idx == idx)
529 static void ec_stripe_buf_exit(struct ec_stripe_buf *buf)
531 if (buf->key.k.type == KEY_TYPE_stripe) {
532 struct bkey_i_stripe *s = bkey_i_to_stripe(&buf->key);
535 for (i = 0; i < s->v.nr_blocks; i++) {
536 kvfree(buf->data[i]);
542 /* XXX: this is a non-mempoolified memory allocation: */
543 static int ec_stripe_buf_init(struct ec_stripe_buf *buf,
544 unsigned offset, unsigned size)
546 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
547 unsigned csum_granularity = 1U << v->csum_granularity_bits;
548 unsigned end = offset + size;
551 BUG_ON(end > le16_to_cpu(v->sectors));
553 offset = round_down(offset, csum_granularity);
554 end = min_t(unsigned, le16_to_cpu(v->sectors),
555 round_up(end, csum_granularity));
557 buf->offset = offset;
558 buf->size = end - offset;
560 memset(buf->valid, 0xFF, sizeof(buf->valid));
562 for (i = 0; i < v->nr_blocks; i++) {
563 buf->data[i] = kvmalloc(buf->size << 9, GFP_KERNEL);
570 ec_stripe_buf_exit(buf);
571 return -BCH_ERR_ENOMEM_stripe_buf;
576 static struct bch_csum ec_block_checksum(struct ec_stripe_buf *buf,
577 unsigned block, unsigned offset)
579 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
580 unsigned csum_granularity = 1 << v->csum_granularity_bits;
581 unsigned end = buf->offset + buf->size;
582 unsigned len = min(csum_granularity, end - offset);
584 BUG_ON(offset >= end);
585 BUG_ON(offset < buf->offset);
586 BUG_ON(offset & (csum_granularity - 1));
587 BUG_ON(offset + len != le16_to_cpu(v->sectors) &&
588 (len & (csum_granularity - 1)));
590 return bch2_checksum(NULL, v->csum_type,
592 buf->data[block] + ((offset - buf->offset) << 9),
596 static void ec_generate_checksums(struct ec_stripe_buf *buf)
598 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
599 unsigned i, j, csums_per_device = stripe_csums_per_device(v);
605 BUG_ON(buf->size != le16_to_cpu(v->sectors));
607 for (i = 0; i < v->nr_blocks; i++)
608 for (j = 0; j < csums_per_device; j++)
609 stripe_csum_set(v, i, j,
610 ec_block_checksum(buf, i, j << v->csum_granularity_bits));
613 static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf)
615 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
616 unsigned csum_granularity = 1 << v->csum_granularity_bits;
622 for (i = 0; i < v->nr_blocks; i++) {
623 unsigned offset = buf->offset;
624 unsigned end = buf->offset + buf->size;
626 if (!test_bit(i, buf->valid))
629 while (offset < end) {
630 unsigned j = offset >> v->csum_granularity_bits;
631 unsigned len = min(csum_granularity, end - offset);
632 struct bch_csum want = stripe_csum_get(v, i, j);
633 struct bch_csum got = ec_block_checksum(buf, i, offset);
635 if (bch2_crc_cmp(want, got)) {
636 struct bch_dev *ca = bch2_dev_tryget(c, v->ptrs[i].dev);
638 struct printbuf err = PRINTBUF;
640 prt_str(&err, "stripe ");
641 bch2_csum_err_msg(&err, v->csum_type, want, got);
642 prt_printf(&err, " for %ps at %u of\n ", (void *) _RET_IP_, i);
643 bch2_bkey_val_to_text(&err, c, bkey_i_to_s_c(&buf->key));
644 bch_err_ratelimited(ca, "%s", err.buf);
647 bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
650 clear_bit(i, buf->valid);
659 /* Erasure coding: */
661 static void ec_generate_ec(struct ec_stripe_buf *buf)
663 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
664 unsigned nr_data = v->nr_blocks - v->nr_redundant;
665 unsigned bytes = le16_to_cpu(v->sectors) << 9;
667 raid_gen(nr_data, v->nr_redundant, bytes, buf->data);
670 static unsigned ec_nr_failed(struct ec_stripe_buf *buf)
672 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
674 return v->nr_blocks - bitmap_weight(buf->valid, v->nr_blocks);
677 static int ec_do_recov(struct bch_fs *c, struct ec_stripe_buf *buf)
679 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
680 unsigned i, failed[BCH_BKEY_PTRS_MAX], nr_failed = 0;
681 unsigned nr_data = v->nr_blocks - v->nr_redundant;
682 unsigned bytes = buf->size << 9;
684 if (ec_nr_failed(buf) > v->nr_redundant) {
685 bch_err_ratelimited(c,
686 "error doing reconstruct read: unable to read enough blocks");
690 for (i = 0; i < nr_data; i++)
691 if (!test_bit(i, buf->valid))
692 failed[nr_failed++] = i;
694 raid_rec(nr_failed, failed, nr_data, v->nr_redundant, bytes, buf->data);
700 static void ec_block_endio(struct bio *bio)
702 struct ec_bio *ec_bio = container_of(bio, struct ec_bio, bio);
703 struct bch_stripe *v = &bkey_i_to_stripe(&ec_bio->buf->key)->v;
704 struct bch_extent_ptr *ptr = &v->ptrs[ec_bio->idx];
705 struct bch_dev *ca = ec_bio->ca;
706 struct closure *cl = bio->bi_private;
708 if (bch2_dev_io_err_on(bio->bi_status, ca,
710 ? BCH_MEMBER_ERROR_write
711 : BCH_MEMBER_ERROR_read,
712 "erasure coding %s error: %s",
713 bio_data_dir(bio) ? "write" : "read",
714 bch2_blk_status_to_str(bio->bi_status)))
715 clear_bit(ec_bio->idx, ec_bio->buf->valid);
717 if (dev_ptr_stale(ca, ptr)) {
718 bch_err_ratelimited(ca->fs,
719 "error %s stripe: stale pointer after io",
720 bio_data_dir(bio) == READ ? "reading from" : "writing to");
721 clear_bit(ec_bio->idx, ec_bio->buf->valid);
724 bio_put(&ec_bio->bio);
725 percpu_ref_put(&ca->io_ref);
729 static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
730 blk_opf_t opf, unsigned idx, struct closure *cl)
732 struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
733 unsigned offset = 0, bytes = buf->size << 9;
734 struct bch_extent_ptr *ptr = &v->ptrs[idx];
735 enum bch_data_type data_type = idx < v->nr_blocks - v->nr_redundant
738 int rw = op_is_write(opf);
740 struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, rw);
742 clear_bit(idx, buf->valid);
746 if (dev_ptr_stale(ca, ptr)) {
747 bch_err_ratelimited(c,
748 "error %s stripe: stale pointer",
749 rw == READ ? "reading from" : "writing to");
750 clear_bit(idx, buf->valid);
755 this_cpu_add(ca->io_done->sectors[rw][data_type], buf->size);
757 while (offset < bytes) {
758 unsigned nr_iovecs = min_t(size_t, BIO_MAX_VECS,
759 DIV_ROUND_UP(bytes, PAGE_SIZE));
760 unsigned b = min_t(size_t, bytes - offset,
761 nr_iovecs << PAGE_SHIFT);
762 struct ec_bio *ec_bio;
764 ec_bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev,
775 ec_bio->bio.bi_iter.bi_sector = ptr->offset + buf->offset + (offset >> 9);
776 ec_bio->bio.bi_end_io = ec_block_endio;
777 ec_bio->bio.bi_private = cl;
779 bch2_bio_map(&ec_bio->bio, buf->data[idx] + offset, b);
782 percpu_ref_get(&ca->io_ref);
784 submit_bio(&ec_bio->bio);
789 percpu_ref_put(&ca->io_ref);
792 static int get_stripe_key_trans(struct btree_trans *trans, u64 idx,
793 struct ec_stripe_buf *stripe)
795 struct btree_iter iter;
799 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes,
800 POS(0, idx), BTREE_ITER_slots);
804 if (k.k->type != KEY_TYPE_stripe) {
808 bkey_reassemble(&stripe->key, k);
810 bch2_trans_iter_exit(trans, &iter);
814 /* recovery read path: */
815 int bch2_ec_read_extent(struct btree_trans *trans, struct bch_read_bio *rbio)
817 struct bch_fs *c = trans->c;
818 struct ec_stripe_buf *buf;
820 struct bch_stripe *v;
824 closure_init_stack(&cl);
826 BUG_ON(!rbio->pick.has_ec);
828 buf = kzalloc(sizeof(*buf), GFP_NOFS);
830 return -BCH_ERR_ENOMEM_ec_read_extent;
832 ret = lockrestart_do(trans, get_stripe_key_trans(trans, rbio->pick.ec.idx, buf));
834 bch_err_ratelimited(c,
835 "error doing reconstruct read: error %i looking up stripe", ret);
840 v = &bkey_i_to_stripe(&buf->key)->v;
842 if (!bch2_ptr_matches_stripe(v, rbio->pick)) {
843 bch_err_ratelimited(c,
844 "error doing reconstruct read: pointer doesn't match stripe");
849 offset = rbio->bio.bi_iter.bi_sector - v->ptrs[rbio->pick.ec.block].offset;
850 if (offset + bio_sectors(&rbio->bio) > le16_to_cpu(v->sectors)) {
851 bch_err_ratelimited(c,
852 "error doing reconstruct read: read is bigger than stripe");
857 ret = ec_stripe_buf_init(buf, offset, bio_sectors(&rbio->bio));
861 for (i = 0; i < v->nr_blocks; i++)
862 ec_block_io(c, buf, REQ_OP_READ, i, &cl);
866 if (ec_nr_failed(buf) > v->nr_redundant) {
867 bch_err_ratelimited(c,
868 "error doing reconstruct read: unable to read enough blocks");
873 ec_validate_checksums(c, buf);
875 ret = ec_do_recov(c, buf);
879 memcpy_to_bio(&rbio->bio, rbio->bio.bi_iter,
880 buf->data[rbio->pick.ec.block] + ((offset - buf->offset) << 9));
882 ec_stripe_buf_exit(buf);
887 /* stripe bucket accounting: */
889 static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp)
891 ec_stripes_heap n, *h = &c->ec_stripes_heap;
893 if (idx >= h->size) {
894 if (!init_heap(&n, max(1024UL, roundup_pow_of_two(idx + 1)), gfp))
895 return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
897 mutex_lock(&c->ec_stripes_heap_lock);
898 if (n.size > h->size) {
899 memcpy(n.data, h->data, h->used * sizeof(h->data[0]));
903 mutex_unlock(&c->ec_stripes_heap_lock);
908 if (!genradix_ptr_alloc(&c->stripes, idx, gfp))
909 return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
911 if (c->gc_pos.phase != GC_PHASE_NOT_RUNNING &&
912 !genradix_ptr_alloc(&c->gc_stripes, idx, gfp))
913 return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
918 static int ec_stripe_mem_alloc(struct btree_trans *trans,
919 struct btree_iter *iter)
921 return allocate_dropping_locks_errcode(trans,
922 __ec_stripe_mem_alloc(trans->c, iter->pos.offset, _gfp));
926 * Hash table of open stripes:
927 * Stripes that are being created or modified are kept in a hash table, so that
928 * stripe deletion can skip them.
931 static bool __bch2_stripe_is_open(struct bch_fs *c, u64 idx)
933 unsigned hash = hash_64(idx, ilog2(ARRAY_SIZE(c->ec_stripes_new)));
934 struct ec_stripe_new *s;
936 hlist_for_each_entry(s, &c->ec_stripes_new[hash], hash)
942 static bool bch2_stripe_is_open(struct bch_fs *c, u64 idx)
946 spin_lock(&c->ec_stripes_new_lock);
947 ret = __bch2_stripe_is_open(c, idx);
948 spin_unlock(&c->ec_stripes_new_lock);
953 static bool bch2_try_open_stripe(struct bch_fs *c,
954 struct ec_stripe_new *s,
959 spin_lock(&c->ec_stripes_new_lock);
960 ret = !__bch2_stripe_is_open(c, idx);
962 unsigned hash = hash_64(idx, ilog2(ARRAY_SIZE(c->ec_stripes_new)));
965 hlist_add_head(&s->hash, &c->ec_stripes_new[hash]);
967 spin_unlock(&c->ec_stripes_new_lock);
972 static void bch2_stripe_close(struct bch_fs *c, struct ec_stripe_new *s)
976 spin_lock(&c->ec_stripes_new_lock);
977 hlist_del_init(&s->hash);
978 spin_unlock(&c->ec_stripes_new_lock);
983 /* Heap of all existing stripes, ordered by blocks_nonempty */
985 static u64 stripe_idx_to_delete(struct bch_fs *c)
987 ec_stripes_heap *h = &c->ec_stripes_heap;
989 lockdep_assert_held(&c->ec_stripes_heap_lock);
992 h->data[0].blocks_nonempty == 0 &&
993 !bch2_stripe_is_open(c, h->data[0].idx))
994 return h->data[0].idx;
999 static inline int ec_stripes_heap_cmp(ec_stripes_heap *h,
1000 struct ec_stripe_heap_entry l,
1001 struct ec_stripe_heap_entry r)
1003 return ((l.blocks_nonempty > r.blocks_nonempty) -
1004 (l.blocks_nonempty < r.blocks_nonempty));
1007 static inline void ec_stripes_heap_set_backpointer(ec_stripes_heap *h,
1010 struct bch_fs *c = container_of(h, struct bch_fs, ec_stripes_heap);
1012 genradix_ptr(&c->stripes, h->data[i].idx)->heap_idx = i;
1015 static void heap_verify_backpointer(struct bch_fs *c, size_t idx)
1017 ec_stripes_heap *h = &c->ec_stripes_heap;
1018 struct stripe *m = genradix_ptr(&c->stripes, idx);
1020 BUG_ON(m->heap_idx >= h->used);
1021 BUG_ON(h->data[m->heap_idx].idx != idx);
1024 void bch2_stripes_heap_del(struct bch_fs *c,
1025 struct stripe *m, size_t idx)
1027 mutex_lock(&c->ec_stripes_heap_lock);
1028 heap_verify_backpointer(c, idx);
1030 heap_del(&c->ec_stripes_heap, m->heap_idx,
1031 ec_stripes_heap_cmp,
1032 ec_stripes_heap_set_backpointer);
1033 mutex_unlock(&c->ec_stripes_heap_lock);
1036 void bch2_stripes_heap_insert(struct bch_fs *c,
1037 struct stripe *m, size_t idx)
1039 mutex_lock(&c->ec_stripes_heap_lock);
1040 BUG_ON(heap_full(&c->ec_stripes_heap));
1042 heap_add(&c->ec_stripes_heap, ((struct ec_stripe_heap_entry) {
1044 .blocks_nonempty = m->blocks_nonempty,
1046 ec_stripes_heap_cmp,
1047 ec_stripes_heap_set_backpointer);
1049 heap_verify_backpointer(c, idx);
1050 mutex_unlock(&c->ec_stripes_heap_lock);
1053 void bch2_stripes_heap_update(struct bch_fs *c,
1054 struct stripe *m, size_t idx)
1056 ec_stripes_heap *h = &c->ec_stripes_heap;
1060 mutex_lock(&c->ec_stripes_heap_lock);
1061 heap_verify_backpointer(c, idx);
1063 h->data[m->heap_idx].blocks_nonempty = m->blocks_nonempty;
1066 heap_sift_up(h, i, ec_stripes_heap_cmp,
1067 ec_stripes_heap_set_backpointer);
1068 heap_sift_down(h, i, ec_stripes_heap_cmp,
1069 ec_stripes_heap_set_backpointer);
1071 heap_verify_backpointer(c, idx);
1073 do_deletes = stripe_idx_to_delete(c) != 0;
1074 mutex_unlock(&c->ec_stripes_heap_lock);
1077 bch2_do_stripe_deletes(c);
1080 /* stripe deletion */
1082 static int ec_stripe_delete(struct btree_trans *trans, u64 idx)
1084 struct bch_fs *c = trans->c;
1085 struct btree_iter iter;
1087 struct bkey_s_c_stripe s;
1090 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes, POS(0, idx),
1096 if (k.k->type != KEY_TYPE_stripe) {
1097 bch2_fs_inconsistent(c, "attempting to delete nonexistent stripe %llu", idx);
1102 s = bkey_s_c_to_stripe(k);
1103 for (unsigned i = 0; i < s.v->nr_blocks; i++)
1104 if (stripe_blockcount_get(s.v, i)) {
1105 struct printbuf buf = PRINTBUF;
1107 bch2_bkey_val_to_text(&buf, c, k);
1108 bch2_fs_inconsistent(c, "attempting to delete nonempty stripe %s", buf.buf);
1109 printbuf_exit(&buf);
1114 ret = bch2_btree_delete_at(trans, &iter, 0);
1116 bch2_trans_iter_exit(trans, &iter);
1120 static void ec_stripe_delete_work(struct work_struct *work)
1123 container_of(work, struct bch_fs, ec_stripe_delete_work);
1126 mutex_lock(&c->ec_stripes_heap_lock);
1127 u64 idx = stripe_idx_to_delete(c);
1128 mutex_unlock(&c->ec_stripes_heap_lock);
1133 int ret = bch2_trans_do(c, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1134 ec_stripe_delete(trans, idx));
1140 bch2_write_ref_put(c, BCH_WRITE_REF_stripe_delete);
1143 void bch2_do_stripe_deletes(struct bch_fs *c)
1145 if (bch2_write_ref_tryget(c, BCH_WRITE_REF_stripe_delete) &&
1146 !queue_work(c->write_ref_wq, &c->ec_stripe_delete_work))
1147 bch2_write_ref_put(c, BCH_WRITE_REF_stripe_delete);
1150 /* stripe creation: */
1152 static int ec_stripe_key_update(struct btree_trans *trans,
1153 struct bkey_i_stripe *new,
1156 struct bch_fs *c = trans->c;
1157 struct btree_iter iter;
1161 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes,
1162 new->k.p, BTREE_ITER_intent);
1167 if (k.k->type != (create ? KEY_TYPE_deleted : KEY_TYPE_stripe)) {
1168 bch2_fs_inconsistent(c, "error %s stripe: got existing key type %s",
1169 create ? "creating" : "updating",
1170 bch2_bkey_types[k.k->type]);
1175 if (k.k->type == KEY_TYPE_stripe) {
1176 const struct bch_stripe *old = bkey_s_c_to_stripe(k).v;
1179 if (old->nr_blocks != new->v.nr_blocks) {
1180 bch_err(c, "error updating stripe: nr_blocks does not match");
1185 for (i = 0; i < new->v.nr_blocks; i++) {
1186 unsigned v = stripe_blockcount_get(old, i);
1189 (old->ptrs[i].dev != new->v.ptrs[i].dev ||
1190 old->ptrs[i].gen != new->v.ptrs[i].gen ||
1191 old->ptrs[i].offset != new->v.ptrs[i].offset));
1193 stripe_blockcount_set(&new->v, i, v);
1197 ret = bch2_trans_update(trans, &iter, &new->k_i, 0);
1199 bch2_trans_iter_exit(trans, &iter);
1203 static int ec_stripe_update_extent(struct btree_trans *trans,
1205 struct bpos bucket, u8 gen,
1206 struct ec_stripe_buf *s,
1207 struct bpos *bp_pos)
1209 struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v;
1210 struct bch_fs *c = trans->c;
1211 struct bch_backpointer bp;
1212 struct btree_iter iter;
1214 const struct bch_extent_ptr *ptr_c;
1215 struct bch_extent_ptr *ec_ptr = NULL;
1216 struct bch_extent_stripe_ptr stripe_ptr;
1218 int ret, dev, block;
1220 ret = bch2_get_next_backpointer(trans, ca, bucket, gen,
1221 bp_pos, &bp, BTREE_ITER_cached);
1224 if (bpos_eq(*bp_pos, SPOS_MAX))
1228 struct printbuf buf = PRINTBUF;
1229 struct btree_iter node_iter;
1232 b = bch2_backpointer_get_node(trans, &node_iter, *bp_pos, bp);
1233 bch2_trans_iter_exit(trans, &node_iter);
1238 prt_printf(&buf, "found btree node in erasure coded bucket: b=%px\n", b);
1239 bch2_backpointer_to_text(&buf, &bp);
1241 bch2_fs_inconsistent(c, "%s", buf.buf);
1242 printbuf_exit(&buf);
1246 k = bch2_backpointer_get_key(trans, &iter, *bp_pos, bp, BTREE_ITER_intent);
1252 * extent no longer exists - we could flush the btree
1253 * write buffer and retry to verify, but no need:
1258 if (extent_has_stripe_ptr(k, s->key.k.p.offset))
1261 ptr_c = bkey_matches_stripe(v, k, &block);
1263 * It doesn't generally make sense to erasure code cached ptrs:
1264 * XXX: should we be incrementing a counter?
1266 if (!ptr_c || ptr_c->cached)
1269 dev = v->ptrs[block].dev;
1271 n = bch2_trans_kmalloc(trans, bkey_bytes(k.k) + sizeof(stripe_ptr));
1272 ret = PTR_ERR_OR_ZERO(n);
1276 bkey_reassemble(n, k);
1278 bch2_bkey_drop_ptrs(bkey_i_to_s(n), ptr, ptr->dev != dev);
1279 ec_ptr = bch2_bkey_has_device(bkey_i_to_s(n), dev);
1282 stripe_ptr = (struct bch_extent_stripe_ptr) {
1283 .type = 1 << BCH_EXTENT_ENTRY_stripe_ptr,
1285 .redundancy = v->nr_redundant,
1286 .idx = s->key.k.p.offset,
1289 __extent_entry_insert(n,
1290 (union bch_extent_entry *) ec_ptr,
1291 (union bch_extent_entry *) &stripe_ptr);
1293 ret = bch2_trans_update(trans, &iter, n, 0);
1295 bch2_trans_iter_exit(trans, &iter);
1299 static int ec_stripe_update_bucket(struct btree_trans *trans, struct ec_stripe_buf *s,
1302 struct bch_fs *c = trans->c;
1303 struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v;
1304 struct bch_extent_ptr ptr = v->ptrs[block];
1305 struct bpos bp_pos = POS_MIN;
1308 struct bch_dev *ca = bch2_dev_tryget(c, ptr.dev);
1312 struct bpos bucket_pos = PTR_BUCKET_POS(ca, &ptr);
1315 ret = commit_do(trans, NULL, NULL,
1316 BCH_TRANS_COMMIT_no_check_rw|
1317 BCH_TRANS_COMMIT_no_enospc,
1318 ec_stripe_update_extent(trans, ca, bucket_pos, ptr.gen, s, &bp_pos));
1321 if (bkey_eq(bp_pos, POS_MAX))
1324 bp_pos = bpos_nosnap_successor(bp_pos);
1331 static int ec_stripe_update_extents(struct bch_fs *c, struct ec_stripe_buf *s)
1333 struct btree_trans *trans = bch2_trans_get(c);
1334 struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v;
1335 unsigned i, nr_data = v->nr_blocks - v->nr_redundant;
1338 ret = bch2_btree_write_buffer_flush_sync(trans);
1342 for (i = 0; i < nr_data; i++) {
1343 ret = ec_stripe_update_bucket(trans, s, i);
1348 bch2_trans_put(trans);
1353 static void zero_out_rest_of_ec_bucket(struct bch_fs *c,
1354 struct ec_stripe_new *s,
1356 struct open_bucket *ob)
1358 struct bch_dev *ca = bch2_dev_get_ioref(c, ob->dev, WRITE);
1360 s->err = -BCH_ERR_erofs_no_writes;
1364 unsigned offset = ca->mi.bucket_size - ob->sectors_free;
1365 memset(s->new_stripe.data[block] + (offset << 9),
1367 ob->sectors_free << 9);
1369 int ret = blkdev_issue_zeroout(ca->disk_sb.bdev,
1370 ob->bucket * ca->mi.bucket_size + offset,
1374 percpu_ref_put(&ca->io_ref);
1380 void bch2_ec_stripe_new_free(struct bch_fs *c, struct ec_stripe_new *s)
1383 bch2_stripe_close(c, s);
1388 * data buckets of new stripe all written: create the stripe
1390 static void ec_stripe_create(struct ec_stripe_new *s)
1392 struct bch_fs *c = s->c;
1393 struct open_bucket *ob;
1394 struct bch_stripe *v = &bkey_i_to_stripe(&s->new_stripe.key)->v;
1395 unsigned i, nr_data = v->nr_blocks - v->nr_redundant;
1398 BUG_ON(s->h->s == s);
1400 closure_sync(&s->iodone);
1403 for (i = 0; i < nr_data; i++)
1405 ob = c->open_buckets + s->blocks[i];
1407 if (ob->sectors_free)
1408 zero_out_rest_of_ec_bucket(c, s, i, ob);
1413 if (!bch2_err_matches(s->err, EROFS))
1414 bch_err(c, "error creating stripe: error writing data buckets");
1418 if (s->have_existing_stripe) {
1419 ec_validate_checksums(c, &s->existing_stripe);
1421 if (ec_do_recov(c, &s->existing_stripe)) {
1422 bch_err(c, "error creating stripe: error reading existing stripe");
1426 for (i = 0; i < nr_data; i++)
1427 if (stripe_blockcount_get(&bkey_i_to_stripe(&s->existing_stripe.key)->v, i))
1428 swap(s->new_stripe.data[i],
1429 s->existing_stripe.data[i]);
1431 ec_stripe_buf_exit(&s->existing_stripe);
1434 BUG_ON(!s->allocated);
1437 ec_generate_ec(&s->new_stripe);
1439 ec_generate_checksums(&s->new_stripe);
1442 for (i = nr_data; i < v->nr_blocks; i++)
1443 ec_block_io(c, &s->new_stripe, REQ_OP_WRITE, i, &s->iodone);
1444 closure_sync(&s->iodone);
1446 if (ec_nr_failed(&s->new_stripe)) {
1447 bch_err(c, "error creating stripe: error writing redundancy buckets");
1451 ret = bch2_trans_do(c, &s->res, NULL,
1452 BCH_TRANS_COMMIT_no_check_rw|
1453 BCH_TRANS_COMMIT_no_enospc,
1454 ec_stripe_key_update(trans,
1455 bkey_i_to_stripe(&s->new_stripe.key),
1456 !s->have_existing_stripe));
1457 bch_err_msg(c, ret, "creating stripe key");
1462 ret = ec_stripe_update_extents(c, &s->new_stripe);
1463 bch_err_msg(c, ret, "error updating extents");
1467 bch2_disk_reservation_put(c, &s->res);
1469 for (i = 0; i < v->nr_blocks; i++)
1471 ob = c->open_buckets + s->blocks[i];
1475 __bch2_open_bucket_put(c, ob);
1477 bch2_open_bucket_put(c, ob);
1481 mutex_lock(&c->ec_stripe_new_lock);
1483 mutex_unlock(&c->ec_stripe_new_lock);
1484 wake_up(&c->ec_stripe_new_wait);
1486 ec_stripe_buf_exit(&s->existing_stripe);
1487 ec_stripe_buf_exit(&s->new_stripe);
1488 closure_debug_destroy(&s->iodone);
1490 ec_stripe_new_put(c, s, STRIPE_REF_stripe);
1493 static struct ec_stripe_new *get_pending_stripe(struct bch_fs *c)
1495 struct ec_stripe_new *s;
1497 mutex_lock(&c->ec_stripe_new_lock);
1498 list_for_each_entry(s, &c->ec_stripe_new_list, list)
1499 if (!atomic_read(&s->ref[STRIPE_REF_io]))
1503 mutex_unlock(&c->ec_stripe_new_lock);
1508 static void ec_stripe_create_work(struct work_struct *work)
1510 struct bch_fs *c = container_of(work,
1511 struct bch_fs, ec_stripe_create_work);
1512 struct ec_stripe_new *s;
1514 while ((s = get_pending_stripe(c)))
1515 ec_stripe_create(s);
1517 bch2_write_ref_put(c, BCH_WRITE_REF_stripe_create);
1520 void bch2_ec_do_stripe_creates(struct bch_fs *c)
1522 bch2_write_ref_get(c, BCH_WRITE_REF_stripe_create);
1524 if (!queue_work(system_long_wq, &c->ec_stripe_create_work))
1525 bch2_write_ref_put(c, BCH_WRITE_REF_stripe_create);
1528 static void ec_stripe_set_pending(struct bch_fs *c, struct ec_stripe_head *h)
1530 struct ec_stripe_new *s = h->s;
1532 BUG_ON(!s->allocated && !s->err);
1537 mutex_lock(&c->ec_stripe_new_lock);
1538 list_add(&s->list, &c->ec_stripe_new_list);
1539 mutex_unlock(&c->ec_stripe_new_lock);
1541 ec_stripe_new_put(c, s, STRIPE_REF_io);
1544 void bch2_ec_bucket_cancel(struct bch_fs *c, struct open_bucket *ob)
1546 struct ec_stripe_new *s = ob->ec;
1551 void *bch2_writepoint_ec_buf(struct bch_fs *c, struct write_point *wp)
1553 struct open_bucket *ob = ec_open_bucket(c, &wp->ptrs);
1557 BUG_ON(!ob->ec->new_stripe.data[ob->ec_idx]);
1559 struct bch_dev *ca = ob_dev(c, ob);
1560 unsigned offset = ca->mi.bucket_size - ob->sectors_free;
1562 return ob->ec->new_stripe.data[ob->ec_idx] + (offset << 9);
1565 static int unsigned_cmp(const void *_l, const void *_r)
1567 unsigned l = *((const unsigned *) _l);
1568 unsigned r = *((const unsigned *) _r);
1570 return cmp_int(l, r);
1573 /* pick most common bucket size: */
1574 static unsigned pick_blocksize(struct bch_fs *c,
1575 struct bch_devs_mask *devs)
1577 unsigned nr = 0, sizes[BCH_SB_MEMBERS_MAX];
1580 } cur = { 0, 0 }, best = { 0, 0 };
1582 for_each_member_device_rcu(c, ca, devs)
1583 sizes[nr++] = ca->mi.bucket_size;
1585 sort(sizes, nr, sizeof(unsigned), unsigned_cmp, NULL);
1587 for (unsigned i = 0; i < nr; i++) {
1588 if (sizes[i] != cur.size) {
1589 if (cur.nr > best.nr)
1593 cur.size = sizes[i];
1599 if (cur.nr > best.nr)
1605 static bool may_create_new_stripe(struct bch_fs *c)
1610 static void ec_stripe_key_init(struct bch_fs *c,
1614 unsigned stripe_size)
1616 struct bkey_i_stripe *s = bkey_stripe_init(k);
1619 s->v.sectors = cpu_to_le16(stripe_size);
1621 s->v.nr_blocks = nr_data + nr_parity;
1622 s->v.nr_redundant = nr_parity;
1623 s->v.csum_granularity_bits = ilog2(c->opts.encoded_extent_max >> 9);
1624 s->v.csum_type = BCH_CSUM_crc32c;
1627 while ((u64s = stripe_val_u64s(&s->v)) > BKEY_VAL_U64s_MAX) {
1628 BUG_ON(1 << s->v.csum_granularity_bits >=
1629 le16_to_cpu(s->v.sectors) ||
1630 s->v.csum_granularity_bits == U8_MAX);
1631 s->v.csum_granularity_bits++;
1634 set_bkey_val_u64s(&s->k, u64s);
1637 static int ec_new_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h)
1639 struct ec_stripe_new *s;
1641 lockdep_assert_held(&h->lock);
1643 s = kzalloc(sizeof(*s), GFP_KERNEL);
1645 return -BCH_ERR_ENOMEM_ec_new_stripe_alloc;
1647 mutex_init(&s->lock);
1648 closure_init(&s->iodone, NULL);
1649 atomic_set(&s->ref[STRIPE_REF_stripe], 1);
1650 atomic_set(&s->ref[STRIPE_REF_io], 1);
1653 s->nr_data = min_t(unsigned, h->nr_active_devs,
1654 BCH_BKEY_PTRS_MAX) - h->redundancy;
1655 s->nr_parity = h->redundancy;
1657 ec_stripe_key_init(c, &s->new_stripe.key,
1658 s->nr_data, s->nr_parity, h->blocksize);
1664 static struct ec_stripe_head *
1665 ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target,
1666 unsigned algo, unsigned redundancy,
1667 enum bch_watermark watermark)
1669 struct ec_stripe_head *h;
1671 h = kzalloc(sizeof(*h), GFP_KERNEL);
1675 mutex_init(&h->lock);
1676 BUG_ON(!mutex_trylock(&h->lock));
1680 h->redundancy = redundancy;
1681 h->watermark = watermark;
1684 h->devs = target_rw_devs(c, BCH_DATA_user, target);
1686 for_each_member_device_rcu(c, ca, &h->devs)
1687 if (!ca->mi.durability)
1688 __clear_bit(ca->dev_idx, h->devs.d);
1690 h->blocksize = pick_blocksize(c, &h->devs);
1692 for_each_member_device_rcu(c, ca, &h->devs)
1693 if (ca->mi.bucket_size == h->blocksize)
1694 h->nr_active_devs++;
1699 * If we only have redundancy + 1 devices, we're better off with just
1702 if (h->nr_active_devs < h->redundancy + 2)
1703 bch_err(c, "insufficient devices available to create stripe (have %u, need %u) - mismatched bucket sizes?",
1704 h->nr_active_devs, h->redundancy + 2);
1706 list_add(&h->list, &c->ec_stripe_head_list);
1710 void bch2_ec_stripe_head_put(struct bch_fs *c, struct ec_stripe_head *h)
1714 bitmap_weight(h->s->blocks_allocated,
1715 h->s->nr_data) == h->s->nr_data)
1716 ec_stripe_set_pending(c, h);
1718 mutex_unlock(&h->lock);
1721 static struct ec_stripe_head *
1722 __bch2_ec_stripe_head_get(struct btree_trans *trans,
1725 unsigned redundancy,
1726 enum bch_watermark watermark)
1728 struct bch_fs *c = trans->c;
1729 struct ec_stripe_head *h;
1735 ret = bch2_trans_mutex_lock(trans, &c->ec_stripe_head_lock);
1737 return ERR_PTR(ret);
1739 if (test_bit(BCH_FS_going_ro, &c->flags)) {
1740 h = ERR_PTR(-BCH_ERR_erofs_no_writes);
1744 list_for_each_entry(h, &c->ec_stripe_head_list, list)
1745 if (h->target == target &&
1747 h->redundancy == redundancy &&
1748 h->watermark == watermark) {
1749 ret = bch2_trans_mutex_lock(trans, &h->lock);
1755 h = ec_new_stripe_head_alloc(c, target, algo, redundancy, watermark);
1757 if (!IS_ERR_OR_NULL(h) &&
1758 h->nr_active_devs < h->redundancy + 2) {
1759 mutex_unlock(&h->lock);
1762 mutex_unlock(&c->ec_stripe_head_lock);
1766 static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_head *h,
1767 enum bch_watermark watermark, struct closure *cl)
1769 struct bch_fs *c = trans->c;
1770 struct bch_devs_mask devs = h->devs;
1771 struct open_bucket *ob;
1772 struct open_buckets buckets;
1773 struct bch_stripe *v = &bkey_i_to_stripe(&h->s->new_stripe.key)->v;
1774 unsigned i, j, nr_have_parity = 0, nr_have_data = 0;
1775 bool have_cache = true;
1778 BUG_ON(v->nr_blocks != h->s->nr_data + h->s->nr_parity);
1779 BUG_ON(v->nr_redundant != h->s->nr_parity);
1781 for_each_set_bit(i, h->s->blocks_gotten, v->nr_blocks) {
1782 __clear_bit(v->ptrs[i].dev, devs.d);
1783 if (i < h->s->nr_data)
1789 BUG_ON(nr_have_data > h->s->nr_data);
1790 BUG_ON(nr_have_parity > h->s->nr_parity);
1793 if (nr_have_parity < h->s->nr_parity) {
1794 ret = bch2_bucket_alloc_set_trans(trans, &buckets,
1804 open_bucket_for_each(c, &buckets, ob, i) {
1805 j = find_next_zero_bit(h->s->blocks_gotten,
1806 h->s->nr_data + h->s->nr_parity,
1808 BUG_ON(j >= h->s->nr_data + h->s->nr_parity);
1810 h->s->blocks[j] = buckets.v[i];
1811 v->ptrs[j] = bch2_ob_ptr(c, ob);
1812 __set_bit(j, h->s->blocks_gotten);
1820 if (nr_have_data < h->s->nr_data) {
1821 ret = bch2_bucket_alloc_set_trans(trans, &buckets,
1831 open_bucket_for_each(c, &buckets, ob, i) {
1832 j = find_next_zero_bit(h->s->blocks_gotten,
1834 BUG_ON(j >= h->s->nr_data);
1836 h->s->blocks[j] = buckets.v[i];
1837 v->ptrs[j] = bch2_ob_ptr(c, ob);
1838 __set_bit(j, h->s->blocks_gotten);
1848 /* XXX: doesn't obey target: */
1849 static s64 get_existing_stripe(struct bch_fs *c,
1850 struct ec_stripe_head *head)
1852 ec_stripes_heap *h = &c->ec_stripes_heap;
1858 if (may_create_new_stripe(c))
1861 mutex_lock(&c->ec_stripes_heap_lock);
1862 for (heap_idx = 0; heap_idx < h->used; heap_idx++) {
1863 /* No blocks worth reusing, stripe will just be deleted: */
1864 if (!h->data[heap_idx].blocks_nonempty)
1867 stripe_idx = h->data[heap_idx].idx;
1869 m = genradix_ptr(&c->stripes, stripe_idx);
1871 if (m->algorithm == head->algo &&
1872 m->nr_redundant == head->redundancy &&
1873 m->sectors == head->blocksize &&
1874 m->blocks_nonempty < m->nr_blocks - m->nr_redundant &&
1875 bch2_try_open_stripe(c, head->s, stripe_idx)) {
1880 mutex_unlock(&c->ec_stripes_heap_lock);
1884 static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stripe_head *h)
1886 struct bch_fs *c = trans->c;
1887 struct bch_stripe *new_v = &bkey_i_to_stripe(&h->s->new_stripe.key)->v;
1888 struct bch_stripe *existing_v;
1894 * If we can't allocate a new stripe, and there's no stripes with empty
1895 * blocks for us to reuse, that means we have to wait on copygc:
1897 idx = get_existing_stripe(c, h);
1899 return -BCH_ERR_stripe_alloc_blocked;
1901 ret = get_stripe_key_trans(trans, idx, &h->s->existing_stripe);
1902 bch2_fs_fatal_err_on(ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart), c,
1903 "reading stripe key: %s", bch2_err_str(ret));
1905 bch2_stripe_close(c, h->s);
1909 existing_v = &bkey_i_to_stripe(&h->s->existing_stripe.key)->v;
1911 BUG_ON(existing_v->nr_redundant != h->s->nr_parity);
1912 h->s->nr_data = existing_v->nr_blocks -
1913 existing_v->nr_redundant;
1915 ret = ec_stripe_buf_init(&h->s->existing_stripe, 0, h->blocksize);
1917 bch2_stripe_close(c, h->s);
1921 BUG_ON(h->s->existing_stripe.size != h->blocksize);
1922 BUG_ON(h->s->existing_stripe.size != le16_to_cpu(existing_v->sectors));
1925 * Free buckets we initially allocated - they might conflict with
1926 * blocks from the stripe we're reusing:
1928 for_each_set_bit(i, h->s->blocks_gotten, new_v->nr_blocks) {
1929 bch2_open_bucket_put(c, c->open_buckets + h->s->blocks[i]);
1930 h->s->blocks[i] = 0;
1932 memset(h->s->blocks_gotten, 0, sizeof(h->s->blocks_gotten));
1933 memset(h->s->blocks_allocated, 0, sizeof(h->s->blocks_allocated));
1935 for (i = 0; i < existing_v->nr_blocks; i++) {
1936 if (stripe_blockcount_get(existing_v, i)) {
1937 __set_bit(i, h->s->blocks_gotten);
1938 __set_bit(i, h->s->blocks_allocated);
1941 ec_block_io(c, &h->s->existing_stripe, READ, i, &h->s->iodone);
1944 bkey_copy(&h->s->new_stripe.key, &h->s->existing_stripe.key);
1945 h->s->have_existing_stripe = true;
1950 static int __bch2_ec_stripe_head_reserve(struct btree_trans *trans, struct ec_stripe_head *h)
1952 struct bch_fs *c = trans->c;
1953 struct btree_iter iter;
1955 struct bpos min_pos = POS(0, 1);
1956 struct bpos start_pos = bpos_max(min_pos, POS(0, c->ec_stripe_hint));
1959 if (!h->s->res.sectors) {
1960 ret = bch2_disk_reservation_get(c, &h->s->res,
1963 BCH_DISK_RESERVATION_NOFAIL);
1968 for_each_btree_key_norestart(trans, iter, BTREE_ID_stripes, start_pos,
1969 BTREE_ITER_slots|BTREE_ITER_intent, k, ret) {
1970 if (bkey_gt(k.k->p, POS(0, U32_MAX))) {
1971 if (start_pos.offset) {
1972 start_pos = min_pos;
1973 bch2_btree_iter_set_pos(&iter, start_pos);
1977 ret = -BCH_ERR_ENOSPC_stripe_create;
1981 if (bkey_deleted(k.k) &&
1982 bch2_try_open_stripe(c, h->s, k.k->p.offset))
1986 c->ec_stripe_hint = iter.pos.offset;
1991 ret = ec_stripe_mem_alloc(trans, &iter);
1993 bch2_stripe_close(c, h->s);
1997 h->s->new_stripe.key.k.p = iter.pos;
1999 bch2_trans_iter_exit(trans, &iter);
2002 bch2_disk_reservation_put(c, &h->s->res);
2006 struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
2009 unsigned redundancy,
2010 enum bch_watermark watermark,
2013 struct bch_fs *c = trans->c;
2014 struct ec_stripe_head *h;
2015 bool waiting = false;
2018 h = __bch2_ec_stripe_head_get(trans, target, algo, redundancy, watermark);
2019 if (IS_ERR_OR_NULL(h))
2023 ret = ec_new_stripe_alloc(c, h);
2025 bch_err(c, "failed to allocate new stripe");
2030 if (h->s->allocated)
2033 if (h->s->have_existing_stripe)
2034 goto alloc_existing;
2036 /* First, try to allocate a full stripe: */
2037 ret = new_stripe_alloc_buckets(trans, h, BCH_WATERMARK_stripe, NULL) ?:
2038 __bch2_ec_stripe_head_reserve(trans, h);
2041 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
2042 bch2_err_matches(ret, ENOMEM))
2046 * Not enough buckets available for a full stripe: we must reuse an
2050 ret = __bch2_ec_stripe_head_reuse(trans, h);
2053 if (waiting || !cl || ret != -BCH_ERR_stripe_alloc_blocked)
2056 if (watermark == BCH_WATERMARK_copygc) {
2057 ret = new_stripe_alloc_buckets(trans, h, watermark, NULL) ?:
2058 __bch2_ec_stripe_head_reserve(trans, h);
2064 /* XXX freelist_wait? */
2065 closure_wait(&c->freelist_wait, cl);
2070 closure_wake_up(&c->freelist_wait);
2073 * Retry allocating buckets, with the watermark for this
2076 ret = new_stripe_alloc_buckets(trans, h, watermark, cl);
2081 ret = ec_stripe_buf_init(&h->s->new_stripe, 0, h->blocksize);
2085 h->s->allocated = true;
2088 BUG_ON(!h->s->new_stripe.data[0]);
2089 BUG_ON(trans->restarted);
2092 bch2_ec_stripe_head_put(c, h);
2093 return ERR_PTR(ret);
2096 static void __bch2_ec_stop(struct bch_fs *c, struct bch_dev *ca)
2098 struct ec_stripe_head *h;
2099 struct open_bucket *ob;
2102 mutex_lock(&c->ec_stripe_head_lock);
2103 list_for_each_entry(h, &c->ec_stripe_head_list, list) {
2104 mutex_lock(&h->lock);
2111 for (i = 0; i < bkey_i_to_stripe(&h->s->new_stripe.key)->v.nr_blocks; i++) {
2112 if (!h->s->blocks[i])
2115 ob = c->open_buckets + h->s->blocks[i];
2116 if (ob->dev == ca->dev_idx)
2121 h->s->err = -BCH_ERR_erofs_no_writes;
2122 ec_stripe_set_pending(c, h);
2124 mutex_unlock(&h->lock);
2126 mutex_unlock(&c->ec_stripe_head_lock);
2129 void bch2_ec_stop_dev(struct bch_fs *c, struct bch_dev *ca)
2131 __bch2_ec_stop(c, ca);
2134 void bch2_fs_ec_stop(struct bch_fs *c)
2136 __bch2_ec_stop(c, NULL);
2139 static bool bch2_fs_ec_flush_done(struct bch_fs *c)
2143 mutex_lock(&c->ec_stripe_new_lock);
2144 ret = list_empty(&c->ec_stripe_new_list);
2145 mutex_unlock(&c->ec_stripe_new_lock);
2150 void bch2_fs_ec_flush(struct bch_fs *c)
2152 wait_event(c->ec_stripe_new_wait, bch2_fs_ec_flush_done(c));
2155 int bch2_stripes_read(struct bch_fs *c)
2157 int ret = bch2_trans_run(c,
2158 for_each_btree_key(trans, iter, BTREE_ID_stripes, POS_MIN,
2159 BTREE_ITER_prefetch, k, ({
2160 if (k.k->type != KEY_TYPE_stripe)
2163 ret = __ec_stripe_mem_alloc(c, k.k->p.offset, GFP_KERNEL);
2167 const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
2169 struct stripe *m = genradix_ptr(&c->stripes, k.k->p.offset);
2170 m->sectors = le16_to_cpu(s->sectors);
2171 m->algorithm = s->algorithm;
2172 m->nr_blocks = s->nr_blocks;
2173 m->nr_redundant = s->nr_redundant;
2174 m->blocks_nonempty = 0;
2176 for (unsigned i = 0; i < s->nr_blocks; i++)
2177 m->blocks_nonempty += !!stripe_blockcount_get(s, i);
2179 bch2_stripes_heap_insert(c, m, k.k->p.offset);
2186 void bch2_stripes_heap_to_text(struct printbuf *out, struct bch_fs *c)
2188 ec_stripes_heap *h = &c->ec_stripes_heap;
2192 mutex_lock(&c->ec_stripes_heap_lock);
2193 for (i = 0; i < min_t(size_t, h->used, 50); i++) {
2194 m = genradix_ptr(&c->stripes, h->data[i].idx);
2196 prt_printf(out, "%zu %u/%u+%u", h->data[i].idx,
2197 h->data[i].blocks_nonempty,
2198 m->nr_blocks - m->nr_redundant,
2200 if (bch2_stripe_is_open(c, h->data[i].idx))
2201 prt_str(out, " open");
2204 mutex_unlock(&c->ec_stripes_heap_lock);
2207 void bch2_new_stripes_to_text(struct printbuf *out, struct bch_fs *c)
2209 struct ec_stripe_head *h;
2210 struct ec_stripe_new *s;
2212 mutex_lock(&c->ec_stripe_head_lock);
2213 list_for_each_entry(h, &c->ec_stripe_head_list, list) {
2214 prt_printf(out, "target %u algo %u redundancy %u %s:\n",
2215 h->target, h->algo, h->redundancy,
2216 bch2_watermarks[h->watermark]);
2219 prt_printf(out, "\tidx %llu blocks %u+%u allocated %u\n",
2220 h->s->idx, h->s->nr_data, h->s->nr_parity,
2221 bitmap_weight(h->s->blocks_allocated,
2224 mutex_unlock(&c->ec_stripe_head_lock);
2226 prt_printf(out, "in flight:\n");
2228 mutex_lock(&c->ec_stripe_new_lock);
2229 list_for_each_entry(s, &c->ec_stripe_new_list, list) {
2230 prt_printf(out, "\tidx %llu blocks %u+%u ref %u %u %s\n",
2231 s->idx, s->nr_data, s->nr_parity,
2232 atomic_read(&s->ref[STRIPE_REF_io]),
2233 atomic_read(&s->ref[STRIPE_REF_stripe]),
2234 bch2_watermarks[s->h->watermark]);
2236 mutex_unlock(&c->ec_stripe_new_lock);
2239 void bch2_fs_ec_exit(struct bch_fs *c)
2241 struct ec_stripe_head *h;
2245 mutex_lock(&c->ec_stripe_head_lock);
2246 h = list_first_entry_or_null(&c->ec_stripe_head_list,
2247 struct ec_stripe_head, list);
2250 mutex_unlock(&c->ec_stripe_head_lock);
2255 for (i = 0; i < bkey_i_to_stripe(&h->s->new_stripe.key)->v.nr_blocks; i++)
2256 BUG_ON(h->s->blocks[i]);
2263 BUG_ON(!list_empty(&c->ec_stripe_new_list));
2265 free_heap(&c->ec_stripes_heap);
2266 genradix_free(&c->stripes);
2267 bioset_exit(&c->ec_bioset);
2270 void bch2_fs_ec_init_early(struct bch_fs *c)
2272 spin_lock_init(&c->ec_stripes_new_lock);
2273 mutex_init(&c->ec_stripes_heap_lock);
2275 INIT_LIST_HEAD(&c->ec_stripe_head_list);
2276 mutex_init(&c->ec_stripe_head_lock);
2278 INIT_LIST_HEAD(&c->ec_stripe_new_list);
2279 mutex_init(&c->ec_stripe_new_lock);
2280 init_waitqueue_head(&c->ec_stripe_new_wait);
2282 INIT_WORK(&c->ec_stripe_create_work, ec_stripe_create_work);
2283 INIT_WORK(&c->ec_stripe_delete_work, ec_stripe_delete_work);
2286 int bch2_fs_ec_init(struct bch_fs *c)
2288 return bioset_init(&c->ec_bioset, 1, offsetof(struct ec_bio, bio),