1 // SPDX-License-Identifier: GPL-2.0
4 #include "alloc_foreground.h"
6 #include "btree_update.h"
8 #include "data_update.h"
15 #include "nocow_locking.h"
16 #include "rebalance.h"
18 #include "subvolume.h"
21 static void trace_move_extent_finish2(struct bch_fs *c, struct bkey_s_c k)
23 if (trace_move_extent_finish_enabled()) {
24 struct printbuf buf = PRINTBUF;
26 bch2_bkey_val_to_text(&buf, c, k);
27 trace_move_extent_finish(c, buf.buf);
32 static void trace_move_extent_fail2(struct data_update *m,
34 struct bkey_s_c wrote,
35 struct bkey_i *insert,
38 struct bch_fs *c = m->op.c;
39 struct bkey_s_c old = bkey_i_to_s_c(m->k.k);
40 const union bch_extent_entry *entry;
41 struct bch_extent_ptr *ptr;
42 struct extent_ptr_decoded p;
43 struct printbuf buf = PRINTBUF;
44 unsigned i, rewrites_found = 0;
46 if (!trace_move_extent_fail_enabled())
53 bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry) {
54 if (((1U << i) & m->data_opts.rewrite_ptrs) &&
55 (ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) &&
57 rewrites_found |= 1U << i;
62 prt_printf(&buf, "\nrewrite ptrs: %u%u%u%u",
63 (m->data_opts.rewrite_ptrs & (1 << 0)) != 0,
64 (m->data_opts.rewrite_ptrs & (1 << 1)) != 0,
65 (m->data_opts.rewrite_ptrs & (1 << 2)) != 0,
66 (m->data_opts.rewrite_ptrs & (1 << 3)) != 0);
68 prt_printf(&buf, "\nrewrites found: %u%u%u%u",
69 (rewrites_found & (1 << 0)) != 0,
70 (rewrites_found & (1 << 1)) != 0,
71 (rewrites_found & (1 << 2)) != 0,
72 (rewrites_found & (1 << 3)) != 0);
74 prt_str(&buf, "\nold: ");
75 bch2_bkey_val_to_text(&buf, c, old);
77 prt_str(&buf, "\nnew: ");
78 bch2_bkey_val_to_text(&buf, c, new);
80 prt_str(&buf, "\nwrote: ");
81 bch2_bkey_val_to_text(&buf, c, wrote);
84 prt_str(&buf, "\ninsert: ");
85 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
88 trace_move_extent_fail(c, buf.buf);
92 static int __bch2_data_update_index_update(struct btree_trans *trans,
93 struct bch_write_op *op)
95 struct bch_fs *c = op->c;
96 struct btree_iter iter;
97 struct data_update *m =
98 container_of(op, struct data_update, op);
99 struct keylist *keys = &op->insert_keys;
100 struct bkey_buf _new, _insert;
103 bch2_bkey_buf_init(&_new);
104 bch2_bkey_buf_init(&_insert);
105 bch2_bkey_buf_realloc(&_insert, c, U8_MAX);
107 bch2_trans_iter_init(trans, &iter, m->btree_id,
108 bkey_start_pos(&bch2_keylist_front(keys)->k),
109 BTREE_ITER_slots|BTREE_ITER_intent);
113 struct bkey_s_c old = bkey_i_to_s_c(m->k.k);
114 struct bkey_i *insert = NULL;
115 struct bkey_i_extent *new;
116 const union bch_extent_entry *entry_c;
117 union bch_extent_entry *entry;
118 struct extent_ptr_decoded p;
119 struct bch_extent_ptr *ptr;
120 const struct bch_extent_ptr *ptr_c;
121 struct bpos next_pos;
122 bool should_check_enospc;
123 s64 i_sectors_delta = 0, disk_sectors_delta = 0;
124 unsigned rewrites_found = 0, durability, i;
126 bch2_trans_begin(trans);
128 k = bch2_btree_iter_peek_slot(&iter);
133 new = bkey_i_to_extent(bch2_keylist_front(keys));
135 if (!bch2_extents_match(k, old)) {
136 trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i),
141 bkey_reassemble(_insert.k, k);
144 bch2_bkey_buf_copy(&_new, c, bch2_keylist_front(keys));
145 new = bkey_i_to_extent(_new.k);
146 bch2_cut_front(iter.pos, &new->k_i);
148 bch2_cut_front(iter.pos, insert);
149 bch2_cut_back(new->k.p, insert);
150 bch2_cut_back(insert->k.p, &new->k_i);
153 * @old: extent that we read from
154 * @insert: key that we're going to update, initialized from
155 * extent currently in btree - same as @old unless we raced with
157 * @new: extent with new pointers that we'll be adding to @insert
159 * Fist, drop rewrite_ptrs from @new:
162 bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry_c) {
163 if (((1U << i) & m->data_opts.rewrite_ptrs) &&
164 (ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) &&
166 bch2_extent_ptr_set_cached(bkey_i_to_s(insert), ptr);
167 rewrites_found |= 1U << i;
172 if (m->data_opts.rewrite_ptrs &&
174 bch2_bkey_durability(c, k) >= m->op.opts.data_replicas) {
175 trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "no rewrites found:");
180 * A replica that we just wrote might conflict with a replica
181 * that we want to keep, due to racing with another move:
183 restart_drop_conflicting_replicas:
184 extent_for_each_ptr(extent_i_to_s(new), ptr)
185 if ((ptr_c = bch2_bkey_has_device_c(bkey_i_to_s_c(insert), ptr->dev)) &&
187 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(&new->k_i), ptr);
188 goto restart_drop_conflicting_replicas;
191 if (!bkey_val_u64s(&new->k)) {
192 trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "new replicas conflicted:");
196 /* Now, drop pointers that conflict with what we just wrote: */
197 extent_for_each_ptr_decode(extent_i_to_s(new), p, entry)
198 if ((ptr = bch2_bkey_has_device(bkey_i_to_s(insert), p.ptr.dev)))
199 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(insert), ptr);
201 durability = bch2_bkey_durability(c, bkey_i_to_s_c(insert)) +
202 bch2_bkey_durability(c, bkey_i_to_s_c(&new->k_i));
204 /* Now, drop excess replicas: */
205 restart_drop_extra_replicas:
208 bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs(bkey_i_to_s(insert)), p, entry) {
209 unsigned ptr_durability = bch2_extent_ptr_durability(c, &p);
212 durability - ptr_durability >= m->op.opts.data_replicas) {
213 durability -= ptr_durability;
215 bch2_extent_ptr_set_cached(bkey_i_to_s(insert), &entry->ptr);
216 goto restart_drop_extra_replicas;
221 /* Finally, add the pointers we just wrote: */
222 extent_for_each_ptr_decode(extent_i_to_s(new), p, entry)
223 bch2_extent_ptr_decoded_append(insert, &p);
225 bch2_bkey_narrow_crcs(insert, (struct bch_extent_crc_unpacked) { 0 });
226 bch2_extent_normalize(c, bkey_i_to_s(insert));
228 ret = bch2_sum_sector_overwrites(trans, &iter, insert,
229 &should_check_enospc,
231 &disk_sectors_delta);
235 if (disk_sectors_delta > (s64) op->res.sectors) {
236 ret = bch2_disk_reservation_add(c, &op->res,
237 disk_sectors_delta - op->res.sectors,
239 ? BCH_DISK_RESERVATION_NOFAIL : 0);
244 next_pos = insert->k.p;
247 * Check for nonce offset inconsistency:
248 * This is debug code - we've been seeing this bug rarely, and
249 * it's been hard to reproduce, so this should give us some more
250 * information when it does occur:
252 struct printbuf err = PRINTBUF;
253 int invalid = bch2_bkey_invalid(c, bkey_i_to_s_c(insert), __btree_node_type(0, m->btree_id), 0, &err);
257 struct printbuf buf = PRINTBUF;
259 prt_str(&buf, "about to insert invalid key in data update path");
260 prt_str(&buf, "\nold: ");
261 bch2_bkey_val_to_text(&buf, c, old);
262 prt_str(&buf, "\nk: ");
263 bch2_bkey_val_to_text(&buf, c, k);
264 prt_str(&buf, "\nnew: ");
265 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
267 bch2_print_string_as_lines(KERN_ERR, buf.buf);
274 if (trace_data_update_enabled()) {
275 struct printbuf buf = PRINTBUF;
277 prt_str(&buf, "\nold: ");
278 bch2_bkey_val_to_text(&buf, c, old);
279 prt_str(&buf, "\nk: ");
280 bch2_bkey_val_to_text(&buf, c, k);
281 prt_str(&buf, "\nnew: ");
282 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
284 trace_data_update(c, buf.buf);
288 ret = bch2_insert_snapshot_whiteouts(trans, m->btree_id,
289 k.k->p, bkey_start_pos(&insert->k)) ?:
290 bch2_insert_snapshot_whiteouts(trans, m->btree_id,
291 k.k->p, insert->k.p) ?:
292 bch2_bkey_set_needs_rebalance(c, insert, &op->opts) ?:
293 bch2_trans_update(trans, &iter, insert,
294 BTREE_UPDATE_internal_snapshot_node) ?:
295 bch2_trans_commit(trans, &op->res,
297 BCH_TRANS_COMMIT_no_check_rw|
298 BCH_TRANS_COMMIT_no_enospc|
299 m->data_opts.btree_insert_flags);
301 bch2_btree_iter_set_pos(&iter, next_pos);
303 this_cpu_add(c->counters[BCH_COUNTER_move_extent_finish], new->k.size);
304 trace_move_extent_finish2(c, bkey_i_to_s_c(&new->k_i));
307 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
312 while (bkey_ge(iter.pos, bch2_keylist_front(keys)->k.p)) {
313 bch2_keylist_pop_front(keys);
314 if (bch2_keylist_empty(keys))
320 BUG_ON(k.k->p.offset <= iter.pos.offset);
321 atomic64_inc(&m->stats->keys_raced);
322 atomic64_add(k.k->p.offset - iter.pos.offset,
323 &m->stats->sectors_raced);
326 count_event(c, move_extent_fail);
328 bch2_btree_iter_advance(&iter);
332 bch2_trans_iter_exit(trans, &iter);
333 bch2_bkey_buf_exit(&_insert, c);
334 bch2_bkey_buf_exit(&_new, c);
335 BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
339 int bch2_data_update_index_update(struct bch_write_op *op)
341 return bch2_trans_run(op->c, __bch2_data_update_index_update(trans, op));
344 void bch2_data_update_read_done(struct data_update *m,
345 struct bch_extent_crc_unpacked crc)
347 /* write bio must own pages: */
348 BUG_ON(!m->op.wbio.bio.bi_vcnt);
351 m->op.wbio.bio.bi_iter.bi_size = crc.compressed_size << 9;
353 closure_call(&m->op.cl, bch2_write, NULL, NULL);
356 void bch2_data_update_exit(struct data_update *update)
358 struct bch_fs *c = update->op.c;
359 struct bkey_ptrs_c ptrs =
360 bch2_bkey_ptrs_c(bkey_i_to_s_c(update->k.k));
362 bkey_for_each_ptr(ptrs, ptr) {
363 struct bch_dev *ca = bch2_dev_have_ref(c, ptr->dev);
364 if (c->opts.nocow_enabled)
365 bch2_bucket_nocow_unlock(&c->nocow_locks,
366 PTR_BUCKET_POS(ca, ptr), 0);
370 bch2_bkey_buf_exit(&update->k, c);
371 bch2_disk_reservation_put(c, &update->op.res);
372 bch2_bio_free_pages_pool(c, &update->op.wbio.bio);
375 static void bch2_update_unwritten_extent(struct btree_trans *trans,
376 struct data_update *update)
378 struct bch_fs *c = update->op.c;
379 struct bio *bio = &update->op.wbio.bio;
380 struct bkey_i_extent *e;
381 struct write_point *wp;
383 struct btree_iter iter;
387 closure_init_stack(&cl);
388 bch2_keylist_init(&update->op.insert_keys, update->op.inline_keys);
390 while (bio_sectors(bio)) {
391 unsigned sectors = bio_sectors(bio);
393 bch2_trans_begin(trans);
395 bch2_trans_iter_init(trans, &iter, update->btree_id, update->op.pos,
397 ret = lockrestart_do(trans, ({
398 k = bch2_btree_iter_peek_slot(&iter);
401 bch2_trans_iter_exit(trans, &iter);
403 if (ret || !bch2_extents_match(k, bkey_i_to_s_c(update->k.k)))
406 e = bkey_extent_init(update->op.insert_keys.top);
407 e->k.p = update->op.pos;
409 ret = bch2_alloc_sectors_start_trans(trans,
412 update->op.write_point,
413 &update->op.devs_have,
414 update->op.nr_replicas,
415 update->op.nr_replicas,
416 update->op.watermark,
418 if (bch2_err_matches(ret, BCH_ERR_operation_blocked)) {
419 bch2_trans_unlock(trans);
424 bch_err_fn_ratelimited(c, ret);
429 sectors = min(sectors, wp->sectors_free);
431 bch2_key_resize(&e->k, sectors);
433 bch2_open_bucket_get(c, wp, &update->op.open_buckets);
434 bch2_alloc_sectors_append_ptrs(c, wp, &e->k_i, sectors, false);
435 bch2_alloc_sectors_done(c, wp);
437 bio_advance(bio, sectors << 9);
438 update->op.pos.offset += sectors;
440 extent_for_each_ptr(extent_i_to_s(e), ptr)
441 ptr->unwritten = true;
442 bch2_keylist_push(&update->op.insert_keys);
444 ret = __bch2_data_update_index_update(trans, &update->op);
446 bch2_open_buckets_put(c, &update->op.open_buckets);
452 if (closure_nr_remaining(&cl) != 1) {
453 bch2_trans_unlock(trans);
458 int bch2_extent_drop_ptrs(struct btree_trans *trans,
459 struct btree_iter *iter,
461 struct data_update_opts data_opts)
463 struct bch_fs *c = trans->c;
467 n = bch2_bkey_make_mut_noupdate(trans, k);
468 ret = PTR_ERR_OR_ZERO(n);
472 while (data_opts.kill_ptrs) {
473 unsigned i = 0, drop = __fls(data_opts.kill_ptrs);
475 bch2_bkey_drop_ptrs(bkey_i_to_s(n), ptr, i++ == drop);
476 data_opts.kill_ptrs ^= 1U << drop;
480 * If the new extent no longer has any pointers, bch2_extent_normalize()
481 * will do the appropriate thing with it (turning it into a
482 * KEY_TYPE_error key, or just a discard if it was a cached extent)
484 bch2_extent_normalize(c, bkey_i_to_s(n));
487 * Since we're not inserting through an extent iterator
488 * (BTREE_ITER_all_snapshots iterators aren't extent iterators),
489 * we aren't using the extent overwrite path to delete, we're
490 * just using the normal key deletion path:
492 if (bkey_deleted(&n->k) && !(iter->flags & BTREE_ITER_is_extents))
495 return bch2_trans_relock(trans) ?:
496 bch2_trans_update(trans, iter, n, BTREE_UPDATE_internal_snapshot_node) ?:
497 bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
500 int bch2_data_update_init(struct btree_trans *trans,
501 struct btree_iter *iter,
502 struct moving_context *ctxt,
503 struct data_update *m,
504 struct write_point_specifier wp,
505 struct bch_io_opts io_opts,
506 struct data_update_opts data_opts,
507 enum btree_id btree_id,
510 struct bch_fs *c = trans->c;
511 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
512 const union bch_extent_entry *entry;
513 struct extent_ptr_decoded p;
514 unsigned i, reserve_sectors = k.k->size * data_opts.extra_replicas;
515 unsigned ptrs_locked = 0;
519 * fs is corrupt we have a key for a snapshot node that doesn't exist,
520 * and we have to check for this because we go rw before repairing the
521 * snapshots table - just skip it, we can move it later.
523 if (unlikely(k.k->p.snapshot && !bch2_snapshot_equiv(c, k.k->p.snapshot)))
524 return -BCH_ERR_data_update_done;
526 bch2_bkey_buf_init(&m->k);
527 bch2_bkey_buf_reassemble(&m->k, c, k);
528 m->btree_id = btree_id;
529 m->data_opts = data_opts;
531 m->stats = ctxt ? ctxt->stats : NULL;
533 bch2_write_op_init(&m->op, c, io_opts);
534 m->op.pos = bkey_start_pos(k.k);
535 m->op.version = k.k->version;
536 m->op.target = data_opts.target;
537 m->op.write_point = wp;
538 m->op.nr_replicas = 0;
539 m->op.flags |= BCH_WRITE_PAGES_STABLE|
540 BCH_WRITE_PAGES_OWNED|
541 BCH_WRITE_DATA_ENCODED|
543 m->data_opts.write_flags;
544 m->op.compression_opt = background_compression(io_opts);
545 m->op.watermark = m->data_opts.btree_insert_flags & BCH_WATERMARK_MASK;
547 bkey_for_each_ptr(ptrs, ptr) {
548 if (!bch2_dev_tryget(c, ptr->dev)) {
549 bkey_for_each_ptr(ptrs, ptr2) {
552 bch2_dev_put(bch2_dev_have_ref(c, ptr2->dev));
554 return -BCH_ERR_data_update_done;
558 unsigned durability_have = 0, durability_removing = 0;
561 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
562 struct bch_dev *ca = bch2_dev_have_ref(c, p.ptr.dev);
563 struct bpos bucket = PTR_BUCKET_POS(ca, &p.ptr);
567 if (((1U << i) & m->data_opts.rewrite_ptrs)) {
568 BUG_ON(p.ptr.cached);
570 if (crc_is_compressed(p.crc))
571 reserve_sectors += k.k->size;
573 m->op.nr_replicas += bch2_extent_ptr_desired_durability(c, &p);
574 durability_removing += bch2_extent_ptr_desired_durability(c, &p);
575 } else if (!p.ptr.cached &&
576 !((1U << i) & m->data_opts.kill_ptrs)) {
577 bch2_dev_list_add_dev(&m->op.devs_have, p.ptr.dev);
578 durability_have += bch2_extent_ptr_durability(c, &p);
583 * op->csum_type is normally initialized from the fs/file's
584 * current options - but if an extent is encrypted, we require
585 * that it stays encrypted:
587 if (bch2_csum_type_is_encryption(p.crc.csum_type)) {
588 m->op.nonce = p.crc.nonce + p.crc.offset;
589 m->op.csum_type = p.crc.csum_type;
592 if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
593 m->op.incompressible = true;
595 if (c->opts.nocow_enabled) {
597 move_ctxt_wait_event(ctxt,
598 (locked = bch2_bucket_nocow_trylock(&c->nocow_locks,
600 list_empty(&ctxt->ios));
603 bch2_bucket_nocow_lock(&c->nocow_locks, bucket, 0);
605 if (!bch2_bucket_nocow_trylock(&c->nocow_locks, bucket, 0)) {
606 ret = -BCH_ERR_nocow_lock_blocked;
610 ptrs_locked |= (1U << i);
616 unsigned durability_required = max(0, (int) (io_opts.data_replicas - durability_have));
619 * If current extent durability is less than io_opts.data_replicas,
620 * we're not trying to rereplicate the extent up to data_replicas here -
621 * unless extra_replicas was specified
623 * Increasing replication is an explicit operation triggered by
624 * rereplicate, currently, so that users don't get an unexpected -ENOSPC
626 if (!(m->data_opts.write_flags & BCH_WRITE_CACHED) &&
627 !durability_required) {
628 m->data_opts.kill_ptrs |= m->data_opts.rewrite_ptrs;
629 m->data_opts.rewrite_ptrs = 0;
630 /* if iter == NULL, it's just a promote */
632 ret = bch2_extent_drop_ptrs(trans, iter, k, m->data_opts);
636 m->op.nr_replicas = min(durability_removing, durability_required) +
637 m->data_opts.extra_replicas;
640 * If device(s) were set to durability=0 after data was written to them
641 * we can end up with a duribilty=0 extent, and the normal algorithm
642 * that tries not to increase durability doesn't work:
644 if (!(durability_have + durability_removing))
645 m->op.nr_replicas = max((unsigned) m->op.nr_replicas, 1);
647 m->op.nr_replicas_required = m->op.nr_replicas;
649 if (reserve_sectors) {
650 ret = bch2_disk_reservation_add(c, &m->op.res, reserve_sectors,
651 m->data_opts.extra_replicas
653 : BCH_DISK_RESERVATION_NOFAIL);
658 if (bkey_extent_is_unwritten(k)) {
659 bch2_update_unwritten_extent(trans, m);
666 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
667 struct bch_dev *ca = bch2_dev_have_ref(c, p.ptr.dev);
668 struct bpos bucket = PTR_BUCKET_POS(ca, &p.ptr);
669 if ((1U << i) & ptrs_locked)
670 bch2_bucket_nocow_unlock(&c->nocow_locks, bucket, 0);
675 bch2_bkey_buf_exit(&m->k, c);
676 bch2_bio_free_pages_pool(c, &m->op.wbio.bio);
679 bch2_data_update_exit(m);
680 return ret ?: -BCH_ERR_data_update_done;
683 void bch2_data_update_opts_normalize(struct bkey_s_c k, struct data_update_opts *opts)
685 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
688 bkey_for_each_ptr(ptrs, ptr) {
689 if ((opts->rewrite_ptrs & (1U << i)) && ptr->cached) {
690 opts->kill_ptrs |= 1U << i;
691 opts->rewrite_ptrs ^= 1U << i;