1 // SPDX-License-Identifier: GPL-2.0
4 #include "alloc_background.h"
5 #include "alloc_foreground.h"
6 #include "btree_iter.h"
7 #include "btree_update.h"
8 #include "btree_write_buffer.h"
12 #include "disk_groups.h"
18 #include "rebalance.h"
19 #include "subvolume.h"
23 #include <linux/freezer.h>
24 #include <linux/kthread.h>
25 #include <linux/sched/cputime.h>
27 /* bch_extent_rebalance: */
29 static const struct bch_extent_rebalance *bch2_bkey_rebalance_opts(struct bkey_s_c k)
31 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
32 const union bch_extent_entry *entry;
34 bkey_extent_entry_for_each(ptrs, entry)
35 if (__extent_entry_type(entry) == BCH_EXTENT_ENTRY_rebalance)
36 return &entry->rebalance;
41 static inline unsigned bch2_bkey_ptrs_need_compress(struct bch_fs *c,
42 struct bch_io_opts *opts,
44 struct bkey_ptrs_c ptrs)
46 if (!opts->background_compression)
49 unsigned compression_type = bch2_compression_opt_to_type(opts->background_compression);
50 const union bch_extent_entry *entry;
51 struct extent_ptr_decoded p;
53 unsigned rewrite_ptrs = 0;
55 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
56 if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible ||
60 if (!p.ptr.cached && p.crc.compression_type != compression_type)
61 rewrite_ptrs |= ptr_bit;
68 static inline unsigned bch2_bkey_ptrs_need_move(struct bch_fs *c,
69 struct bch_io_opts *opts,
70 struct bkey_ptrs_c ptrs)
72 if (!opts->background_target ||
73 !bch2_target_accepts_data(c, BCH_DATA_user, opts->background_target))
77 unsigned rewrite_ptrs = 0;
79 bkey_for_each_ptr(ptrs, ptr) {
80 if (!ptr->cached && !bch2_dev_in_target(c, ptr->dev, opts->background_target))
81 rewrite_ptrs |= ptr_bit;
88 static unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *c,
89 struct bch_io_opts *opts,
92 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
94 return bch2_bkey_ptrs_need_compress(c, opts, k, ptrs) |
95 bch2_bkey_ptrs_need_move(c, opts, ptrs);
98 u64 bch2_bkey_sectors_need_rebalance(struct bch_fs *c, struct bkey_s_c k)
100 const struct bch_extent_rebalance *opts = bch2_bkey_rebalance_opts(k);
104 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
105 const union bch_extent_entry *entry;
106 struct extent_ptr_decoded p;
109 if (opts->background_compression) {
110 unsigned compression_type = bch2_compression_opt_to_type(opts->background_compression);
112 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
113 if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible ||
119 if (!p.ptr.cached && p.crc.compression_type != compression_type)
120 sectors += p.crc.compressed_size;
124 if (opts->background_target)
125 bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
126 if (!p.ptr.cached && !bch2_dev_in_target(c, p.ptr.dev, opts->background_target))
127 sectors += p.crc.compressed_size;
132 static bool bch2_bkey_rebalance_needs_update(struct bch_fs *c, struct bch_io_opts *opts,
135 if (!bkey_extent_is_direct_data(k.k))
138 const struct bch_extent_rebalance *old = bch2_bkey_rebalance_opts(k);
140 if (k.k->type == KEY_TYPE_reflink_v || bch2_bkey_ptrs_need_rebalance(c, opts, k)) {
141 struct bch_extent_rebalance new = io_opts_to_rebalance_opts(c, opts);
142 return old == NULL || memcmp(old, &new, sizeof(new));
148 int bch2_bkey_set_needs_rebalance(struct bch_fs *c, struct bch_io_opts *opts,
151 if (!bkey_extent_is_direct_data(&_k->k))
154 struct bkey_s k = bkey_i_to_s(_k);
155 struct bch_extent_rebalance *old =
156 (struct bch_extent_rebalance *) bch2_bkey_rebalance_opts(k.s_c);
158 if (k.k->type == KEY_TYPE_reflink_v || bch2_bkey_ptrs_need_rebalance(c, opts, k.s_c)) {
160 old = bkey_val_end(k);
161 k.k->u64s += sizeof(*old) / sizeof(u64);
164 *old = io_opts_to_rebalance_opts(c, opts);
167 extent_entry_drop(k, (union bch_extent_entry *) old);
173 int bch2_get_update_rebalance_opts(struct btree_trans *trans,
174 struct bch_io_opts *io_opts,
175 struct btree_iter *iter,
178 BUG_ON(iter->flags & BTREE_ITER_is_extents);
179 BUG_ON(iter->flags & BTREE_ITER_filter_snapshots);
181 const struct bch_extent_rebalance *r = k.k->type == KEY_TYPE_reflink_v
182 ? bch2_bkey_rebalance_opts(k) : NULL;
185 if (r->_name##_from_inode) { \
186 io_opts->_name = r->_name; \
187 io_opts->_name##_from_inode = true; \
193 if (!bch2_bkey_rebalance_needs_update(trans->c, io_opts, k))
196 struct bkey_i *n = bch2_trans_kmalloc(trans, bkey_bytes(k.k) + 8);
197 int ret = PTR_ERR_OR_ZERO(n);
201 bkey_reassemble(n, k);
203 /* On successfull transaction commit, @k was invalidated: */
205 return bch2_bkey_set_needs_rebalance(trans->c, io_opts, n) ?:
206 bch2_trans_update(trans, iter, n, BTREE_UPDATE_internal_snapshot_node) ?:
207 bch2_trans_commit(trans, NULL, NULL, 0) ?:
208 -BCH_ERR_transaction_restart_nested;
211 #define REBALANCE_WORK_SCAN_OFFSET (U64_MAX - 1)
213 static const char * const bch2_rebalance_state_strs[] = {
215 BCH_REBALANCE_STATES()
220 int bch2_set_rebalance_needs_scan_trans(struct btree_trans *trans, u64 inum)
222 struct btree_iter iter;
224 struct bkey_i_cookie *cookie;
228 bch2_trans_iter_init(trans, &iter, BTREE_ID_rebalance_work,
229 SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX),
231 k = bch2_btree_iter_peek_slot(&iter);
236 v = k.k->type == KEY_TYPE_cookie
237 ? le64_to_cpu(bkey_s_c_to_cookie(k).v->cookie)
240 cookie = bch2_trans_kmalloc(trans, sizeof(*cookie));
241 ret = PTR_ERR_OR_ZERO(cookie);
245 bkey_cookie_init(&cookie->k_i);
246 cookie->k.p = iter.pos;
247 cookie->v.cookie = cpu_to_le64(v + 1);
249 ret = bch2_trans_update(trans, &iter, &cookie->k_i, 0);
251 bch2_trans_iter_exit(trans, &iter);
255 int bch2_set_rebalance_needs_scan(struct bch_fs *c, u64 inum)
257 int ret = bch2_trans_commit_do(c, NULL, NULL,
258 BCH_TRANS_COMMIT_no_enospc,
259 bch2_set_rebalance_needs_scan_trans(trans, inum));
264 int bch2_set_fs_needs_rebalance(struct bch_fs *c)
266 return bch2_set_rebalance_needs_scan(c, 0);
269 static int bch2_clear_rebalance_needs_scan(struct btree_trans *trans, u64 inum, u64 cookie)
271 struct btree_iter iter;
276 bch2_trans_iter_init(trans, &iter, BTREE_ID_rebalance_work,
277 SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX),
279 k = bch2_btree_iter_peek_slot(&iter);
284 v = k.k->type == KEY_TYPE_cookie
285 ? le64_to_cpu(bkey_s_c_to_cookie(k).v->cookie)
289 ret = bch2_btree_delete_at(trans, &iter, 0);
291 bch2_trans_iter_exit(trans, &iter);
295 static struct bkey_s_c next_rebalance_entry(struct btree_trans *trans,
296 struct btree_iter *work_iter)
298 return !kthread_should_stop()
299 ? bch2_btree_iter_peek(work_iter)
303 static int bch2_bkey_clear_needs_rebalance(struct btree_trans *trans,
304 struct btree_iter *iter,
307 if (!bch2_bkey_rebalance_opts(k))
310 struct bkey_i *n = bch2_bkey_make_mut(trans, iter, &k, 0);
311 int ret = PTR_ERR_OR_ZERO(n);
315 extent_entry_drop(bkey_i_to_s(n),
316 (void *) bch2_bkey_rebalance_opts(bkey_i_to_s_c(n)));
317 return bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
320 static struct bkey_s_c next_rebalance_extent(struct btree_trans *trans,
321 struct bpos work_pos,
322 struct btree_iter *extent_iter,
323 struct bch_io_opts *io_opts,
324 struct data_update_opts *data_opts)
326 struct bch_fs *c = trans->c;
328 bch2_trans_iter_exit(trans, extent_iter);
329 bch2_trans_iter_init(trans, extent_iter,
330 work_pos.inode ? BTREE_ID_extents : BTREE_ID_reflink,
332 BTREE_ITER_all_snapshots);
333 struct bkey_s_c k = bch2_btree_iter_peek_slot(extent_iter);
337 int ret = bch2_move_get_io_opts_one(trans, io_opts, extent_iter, k);
339 return bkey_s_c_err(ret);
341 memset(data_opts, 0, sizeof(*data_opts));
342 data_opts->rewrite_ptrs = bch2_bkey_ptrs_need_rebalance(c, io_opts, k);
343 data_opts->target = io_opts->background_target;
344 data_opts->write_flags |= BCH_WRITE_ONLY_SPECIFIED_DEVS;
346 if (!data_opts->rewrite_ptrs) {
348 * device we would want to write to offline? devices in target
351 * We'll now need a full scan before this extent is picked up
354 int ret = bch2_bkey_clear_needs_rebalance(trans, extent_iter, k);
356 return bkey_s_c_err(ret);
357 return bkey_s_c_null;
360 if (trace_rebalance_extent_enabled()) {
361 struct printbuf buf = PRINTBUF;
363 bch2_bkey_val_to_text(&buf, c, k);
366 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
368 unsigned p = bch2_bkey_ptrs_need_compress(c, io_opts, k, ptrs);
370 prt_str(&buf, "compression=");
371 bch2_compression_opt_to_text(&buf, io_opts->background_compression);
373 bch2_prt_u64_base2(&buf, p);
377 p = bch2_bkey_ptrs_need_move(c, io_opts, ptrs);
379 prt_str(&buf, "move=");
380 bch2_target_to_text(&buf, c, io_opts->background_target);
382 bch2_prt_u64_base2(&buf, p);
386 trace_rebalance_extent(c, buf.buf);
394 static int do_rebalance_extent(struct moving_context *ctxt,
395 struct bpos work_pos,
396 struct btree_iter *extent_iter)
398 struct btree_trans *trans = ctxt->trans;
399 struct bch_fs *c = trans->c;
400 struct bch_fs_rebalance *r = &trans->c->rebalance;
401 struct data_update_opts data_opts;
402 struct bch_io_opts io_opts;
407 ctxt->stats = &r->work_stats;
408 r->state = BCH_REBALANCE_working;
410 bch2_bkey_buf_init(&sk);
412 ret = bkey_err(k = next_rebalance_extent(trans, work_pos,
413 extent_iter, &io_opts, &data_opts));
417 atomic64_add(k.k->size, &ctxt->stats->sectors_seen);
420 * The iterator gets unlocked by __bch2_read_extent - need to
421 * save a copy of @k elsewhere:
423 bch2_bkey_buf_reassemble(&sk, c, k);
424 k = bkey_i_to_s_c(sk.k);
426 ret = bch2_move_extent(ctxt, NULL, extent_iter, k, io_opts, data_opts);
428 if (bch2_err_matches(ret, ENOMEM)) {
429 /* memory allocation failure, wait for some IO to finish */
430 bch2_move_ctxt_wait_for_io(ctxt);
431 ret = -BCH_ERR_transaction_restart_nested;
434 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
437 /* skip it and continue, XXX signal failure */
441 bch2_bkey_buf_exit(&sk, c);
445 static bool rebalance_pred(struct bch_fs *c, void *arg,
447 struct bch_io_opts *io_opts,
448 struct data_update_opts *data_opts)
450 data_opts->rewrite_ptrs = bch2_bkey_ptrs_need_rebalance(c, io_opts, k);
451 data_opts->target = io_opts->background_target;
452 data_opts->write_flags |= BCH_WRITE_ONLY_SPECIFIED_DEVS;
453 return data_opts->rewrite_ptrs != 0;
456 static int do_rebalance_scan(struct moving_context *ctxt, u64 inum, u64 cookie)
458 struct btree_trans *trans = ctxt->trans;
459 struct bch_fs_rebalance *r = &trans->c->rebalance;
462 bch2_move_stats_init(&r->scan_stats, "rebalance_scan");
463 ctxt->stats = &r->scan_stats;
466 r->scan_start = BBPOS_MIN;
467 r->scan_end = BBPOS_MAX;
469 r->scan_start = BBPOS(BTREE_ID_extents, POS(inum, 0));
470 r->scan_end = BBPOS(BTREE_ID_extents, POS(inum, U64_MAX));
473 r->state = BCH_REBALANCE_scanning;
475 ret = __bch2_move_data(ctxt, r->scan_start, r->scan_end, rebalance_pred, NULL) ?:
476 commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
477 bch2_clear_rebalance_needs_scan(trans, inum, cookie));
479 bch2_move_stats_exit(&r->scan_stats, trans->c);
483 static void rebalance_wait(struct bch_fs *c)
485 struct bch_fs_rebalance *r = &c->rebalance;
486 struct io_clock *clock = &c->io_clock[WRITE];
487 u64 now = atomic64_read(&clock->now);
488 u64 min_member_capacity = bch2_min_rw_member_capacity(c);
490 if (min_member_capacity == U64_MAX)
491 min_member_capacity = 128 * 2048;
493 r->wait_iotime_end = now + (min_member_capacity >> 6);
495 if (r->state != BCH_REBALANCE_waiting) {
496 r->wait_iotime_start = now;
497 r->wait_wallclock_start = ktime_get_real_ns();
498 r->state = BCH_REBALANCE_waiting;
501 bch2_kthread_io_clock_wait(clock, r->wait_iotime_end, MAX_SCHEDULE_TIMEOUT);
504 static int do_rebalance(struct moving_context *ctxt)
506 struct btree_trans *trans = ctxt->trans;
507 struct bch_fs *c = trans->c;
508 struct bch_fs_rebalance *r = &c->rebalance;
509 struct btree_iter rebalance_work_iter, extent_iter = { NULL };
513 bch2_trans_begin(trans);
515 bch2_move_stats_init(&r->work_stats, "rebalance_work");
516 bch2_move_stats_init(&r->scan_stats, "rebalance_scan");
518 bch2_trans_iter_init(trans, &rebalance_work_iter,
519 BTREE_ID_rebalance_work, POS_MIN,
520 BTREE_ITER_all_snapshots);
522 while (!bch2_move_ratelimit(ctxt)) {
523 if (!c->opts.rebalance_enabled) {
524 bch2_moving_ctxt_flush_all(ctxt);
525 kthread_wait_freezable(c->opts.rebalance_enabled ||
526 kthread_should_stop());
529 if (kthread_should_stop())
532 bch2_trans_begin(trans);
534 ret = bkey_err(k = next_rebalance_entry(trans, &rebalance_work_iter));
535 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
540 ret = k.k->type == KEY_TYPE_cookie
541 ? do_rebalance_scan(ctxt, k.k->p.inode,
542 le64_to_cpu(bkey_s_c_to_cookie(k).v->cookie))
543 : do_rebalance_extent(ctxt, k.k->p, &extent_iter);
545 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
550 bch2_btree_iter_advance(&rebalance_work_iter);
553 bch2_trans_iter_exit(trans, &extent_iter);
554 bch2_trans_iter_exit(trans, &rebalance_work_iter);
555 bch2_move_stats_exit(&r->scan_stats, c);
558 !kthread_should_stop() &&
559 !atomic64_read(&r->work_stats.sectors_seen) &&
560 !atomic64_read(&r->scan_stats.sectors_seen)) {
561 bch2_moving_ctxt_flush_all(ctxt);
562 bch2_trans_unlock_long(trans);
566 if (!bch2_err_matches(ret, EROFS))
571 static int bch2_rebalance_thread(void *arg)
573 struct bch_fs *c = arg;
574 struct bch_fs_rebalance *r = &c->rebalance;
575 struct moving_context ctxt;
579 bch2_moving_ctxt_init(&ctxt, c, NULL, &r->work_stats,
580 writepoint_ptr(&c->rebalance_write_point),
583 while (!kthread_should_stop() && !do_rebalance(&ctxt))
586 bch2_moving_ctxt_exit(&ctxt);
591 void bch2_rebalance_status_to_text(struct printbuf *out, struct bch_fs *c)
593 struct bch_fs_rebalance *r = &c->rebalance;
595 prt_str(out, bch2_rebalance_state_strs[r->state]);
597 printbuf_indent_add(out, 2);
600 case BCH_REBALANCE_waiting: {
601 u64 now = atomic64_read(&c->io_clock[WRITE].now);
603 prt_str(out, "io wait duration: ");
604 bch2_prt_human_readable_s64(out, (r->wait_iotime_end - r->wait_iotime_start) << 9);
607 prt_str(out, "io wait remaining: ");
608 bch2_prt_human_readable_s64(out, (r->wait_iotime_end - now) << 9);
611 prt_str(out, "duration waited: ");
612 bch2_pr_time_units(out, ktime_get_real_ns() - r->wait_wallclock_start);
616 case BCH_REBALANCE_working:
617 bch2_move_stats_to_text(out, &r->work_stats);
619 case BCH_REBALANCE_scanning:
620 bch2_move_stats_to_text(out, &r->scan_stats);
624 printbuf_indent_sub(out, 2);
627 void bch2_rebalance_stop(struct bch_fs *c)
629 struct task_struct *p;
631 c->rebalance.pd.rate.rate = UINT_MAX;
632 bch2_ratelimit_reset(&c->rebalance.pd.rate);
634 p = rcu_dereference_protected(c->rebalance.thread, 1);
635 c->rebalance.thread = NULL;
638 /* for sychronizing with rebalance_wakeup() */
646 int bch2_rebalance_start(struct bch_fs *c)
648 struct task_struct *p;
651 if (c->rebalance.thread)
654 if (c->opts.nochanges)
657 p = kthread_create(bch2_rebalance_thread, c, "bch-rebalance/%s", c->name);
658 ret = PTR_ERR_OR_ZERO(p);
659 bch_err_msg(c, ret, "creating rebalance thread");
664 rcu_assign_pointer(c->rebalance.thread, p);
669 void bch2_fs_rebalance_init(struct bch_fs *c)
671 bch2_pd_controller_init(&c->rebalance.pd);