1 // SPDX-License-Identifier: GPL-2.0
4 #include "btree_update.h"
11 #include "rebalance.h"
13 #include "subvolume.h"
16 #include <linux/sched/signal.h>
18 static inline unsigned bkey_type_to_indirect(const struct bkey *k)
22 return KEY_TYPE_reflink_v;
23 case KEY_TYPE_inline_data:
24 return KEY_TYPE_indirect_inline_data;
30 /* reflink pointers */
32 int bch2_reflink_p_invalid(struct bch_fs *c, struct bkey_s_c k,
33 enum bch_validate_flags flags,
36 struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
39 bkey_fsck_err_on(le64_to_cpu(p.v->idx) < le32_to_cpu(p.v->front_pad),
40 c, err, reflink_p_front_pad_bad,
41 "idx < front_pad (%llu < %u)",
42 le64_to_cpu(p.v->idx), le32_to_cpu(p.v->front_pad));
47 void bch2_reflink_p_to_text(struct printbuf *out, struct bch_fs *c,
50 struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
52 prt_printf(out, "idx %llu front_pad %u back_pad %u",
53 le64_to_cpu(p.v->idx),
54 le32_to_cpu(p.v->front_pad),
55 le32_to_cpu(p.v->back_pad));
58 bool bch2_reflink_p_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r)
60 struct bkey_s_reflink_p l = bkey_s_to_reflink_p(_l);
61 struct bkey_s_c_reflink_p r = bkey_s_c_to_reflink_p(_r);
64 * Disabled for now, the triggers code needs to be reworked for merging
65 * of reflink pointers to work:
69 if (le64_to_cpu(l.v->idx) + l.k->size != le64_to_cpu(r.v->idx))
72 bch2_key_resize(l.k, l.k->size + r.k->size);
76 static int trans_trigger_reflink_p_segment(struct btree_trans *trans,
77 struct bkey_s_c_reflink_p p, u64 *idx,
78 enum btree_iter_update_trigger_flags flags)
80 struct bch_fs *c = trans->c;
81 struct btree_iter iter;
84 int add = !(flags & BTREE_TRIGGER_overwrite) ? 1 : -1;
85 struct printbuf buf = PRINTBUF;
88 k = bch2_bkey_get_mut_noupdate(trans, &iter,
89 BTREE_ID_reflink, POS(0, *idx),
90 BTREE_ITER_with_updates);
91 ret = PTR_ERR_OR_ZERO(k);
95 refcount = bkey_refcount(bkey_i_to_s(k));
97 bch2_bkey_val_to_text(&buf, c, p.s_c);
98 bch2_trans_inconsistent(trans,
99 "nonexistent indirect extent at %llu while marking\n %s",
105 if (!*refcount && (flags & BTREE_TRIGGER_overwrite)) {
106 bch2_bkey_val_to_text(&buf, c, p.s_c);
107 bch2_trans_inconsistent(trans,
108 "indirect extent refcount underflow at %llu while marking\n %s",
114 if (flags & BTREE_TRIGGER_insert) {
115 struct bch_reflink_p *v = (struct bch_reflink_p *) p.v;
118 pad = max_t(s64, le32_to_cpu(v->front_pad),
119 le64_to_cpu(v->idx) - bkey_start_offset(&k->k));
120 BUG_ON(pad > U32_MAX);
121 v->front_pad = cpu_to_le32(pad);
123 pad = max_t(s64, le32_to_cpu(v->back_pad),
124 k->k.p.offset - p.k->size - le64_to_cpu(v->idx));
125 BUG_ON(pad > U32_MAX);
126 v->back_pad = cpu_to_le32(pad);
129 le64_add_cpu(refcount, add);
131 bch2_btree_iter_set_pos_to_extent_start(&iter);
132 ret = bch2_trans_update(trans, &iter, k, 0);
136 *idx = k->k.p.offset;
138 bch2_trans_iter_exit(trans, &iter);
143 static s64 gc_trigger_reflink_p_segment(struct btree_trans *trans,
144 struct bkey_s_c_reflink_p p, u64 *idx,
145 enum btree_iter_update_trigger_flags flags,
148 struct bch_fs *c = trans->c;
149 struct reflink_gc *r;
150 int add = !(flags & BTREE_TRIGGER_overwrite) ? 1 : -1;
151 u64 start = le64_to_cpu(p.v->idx);
152 u64 end = le64_to_cpu(p.v->idx) + p.k->size;
153 u64 next_idx = end + le32_to_cpu(p.v->back_pad);
155 struct printbuf buf = PRINTBUF;
157 if (r_idx >= c->reflink_gc_nr)
160 r = genradix_ptr(&c->reflink_gc_table, r_idx);
161 next_idx = min(next_idx, r->offset - r->size);
165 BUG_ON((s64) r->refcount + add < 0);
167 if (flags & BTREE_TRIGGER_gc)
172 BUG_ON(!(flags & BTREE_TRIGGER_check_repair));
174 if (fsck_err(c, reflink_p_to_missing_reflink_v,
175 "pointer to missing indirect extent\n"
177 " missing range %llu-%llu",
178 (bch2_bkey_val_to_text(&buf, c, p.s_c), buf.buf),
180 struct bkey_i *update = bch2_bkey_make_mut_noupdate(trans, p.s_c);
181 ret = PTR_ERR_OR_ZERO(update);
185 if (next_idx <= start) {
186 bkey_i_to_reflink_p(update)->v.front_pad = cpu_to_le32(start - next_idx);
187 } else if (*idx >= end) {
188 bkey_i_to_reflink_p(update)->v.back_pad = cpu_to_le32(*idx - end);
190 bkey_error_init(update);
191 update->k.p = p.k->p;
192 update->k.size = p.k->size;
193 set_bkey_val_u64s(&update->k, 0);
196 ret = bch2_btree_insert_trans(trans, BTREE_ID_extents, update, BTREE_TRIGGER_norun);
206 static int __trigger_reflink_p(struct btree_trans *trans,
207 enum btree_id btree_id, unsigned level, struct bkey_s_c k,
208 enum btree_iter_update_trigger_flags flags)
210 struct bch_fs *c = trans->c;
211 struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
214 u64 idx = le64_to_cpu(p.v->idx) - le32_to_cpu(p.v->front_pad);
215 u64 end = le64_to_cpu(p.v->idx) + p.k->size + le32_to_cpu(p.v->back_pad);
217 if (flags & BTREE_TRIGGER_transactional) {
218 while (idx < end && !ret)
219 ret = trans_trigger_reflink_p_segment(trans, p, &idx, flags);
222 if (flags & (BTREE_TRIGGER_check_repair|BTREE_TRIGGER_gc)) {
223 size_t l = 0, r = c->reflink_gc_nr;
226 size_t m = l + (r - l) / 2;
227 struct reflink_gc *ref = genradix_ptr(&c->reflink_gc_table, m);
228 if (ref->offset <= idx)
234 while (idx < end && !ret)
235 ret = gc_trigger_reflink_p_segment(trans, p, &idx, flags, l++);
241 int bch2_trigger_reflink_p(struct btree_trans *trans,
242 enum btree_id btree_id, unsigned level,
245 enum btree_iter_update_trigger_flags flags)
247 if ((flags & BTREE_TRIGGER_transactional) &&
248 (flags & BTREE_TRIGGER_insert)) {
249 struct bch_reflink_p *v = bkey_s_to_reflink_p(new).v;
251 v->front_pad = v->back_pad = 0;
254 return trigger_run_overwrite_then_insert(__trigger_reflink_p, trans, btree_id, level, old, new, flags);
257 /* indirect extents */
259 int bch2_reflink_v_invalid(struct bch_fs *c, struct bkey_s_c k,
260 enum bch_validate_flags flags,
261 struct printbuf *err)
263 return bch2_bkey_ptrs_invalid(c, k, flags, err);
266 void bch2_reflink_v_to_text(struct printbuf *out, struct bch_fs *c,
269 struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(k);
271 prt_printf(out, "refcount: %llu ", le64_to_cpu(r.v->refcount));
273 bch2_bkey_ptrs_to_text(out, c, k);
277 Currently disabled, needs to be debugged:
279 bool bch2_reflink_v_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r)
281 struct bkey_s_reflink_v l = bkey_s_to_reflink_v(_l);
282 struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(_r);
284 return l.v->refcount == r.v->refcount && bch2_extent_merge(c, _l, _r);
289 check_indirect_extent_deleting(struct bkey_s new,
290 enum btree_iter_update_trigger_flags *flags)
292 if ((*flags & BTREE_TRIGGER_insert) && !*bkey_refcount(new)) {
293 new.k->type = KEY_TYPE_deleted;
295 set_bkey_val_u64s(new.k, 0);
296 *flags &= ~BTREE_TRIGGER_insert;
300 int bch2_trigger_reflink_v(struct btree_trans *trans,
301 enum btree_id btree_id, unsigned level,
302 struct bkey_s_c old, struct bkey_s new,
303 enum btree_iter_update_trigger_flags flags)
305 if ((flags & BTREE_TRIGGER_transactional) &&
306 (flags & BTREE_TRIGGER_insert))
307 check_indirect_extent_deleting(new, &flags);
309 return bch2_trigger_extent(trans, btree_id, level, old, new, flags);
312 /* indirect inline data */
314 int bch2_indirect_inline_data_invalid(struct bch_fs *c, struct bkey_s_c k,
315 enum bch_validate_flags flags,
316 struct printbuf *err)
321 void bch2_indirect_inline_data_to_text(struct printbuf *out,
322 struct bch_fs *c, struct bkey_s_c k)
324 struct bkey_s_c_indirect_inline_data d = bkey_s_c_to_indirect_inline_data(k);
325 unsigned datalen = bkey_inline_data_bytes(k.k);
327 prt_printf(out, "refcount %llu datalen %u: %*phN",
328 le64_to_cpu(d.v->refcount), datalen,
329 min(datalen, 32U), d.v->data);
332 int bch2_trigger_indirect_inline_data(struct btree_trans *trans,
333 enum btree_id btree_id, unsigned level,
334 struct bkey_s_c old, struct bkey_s new,
335 enum btree_iter_update_trigger_flags flags)
337 check_indirect_extent_deleting(new, &flags);
342 static int bch2_make_extent_indirect(struct btree_trans *trans,
343 struct btree_iter *extent_iter,
346 struct bch_fs *c = trans->c;
347 struct btree_iter reflink_iter = { NULL };
350 struct bkey_i_reflink_p *r_p;
354 if (orig->k.type == KEY_TYPE_inline_data)
355 bch2_check_set_feature(c, BCH_FEATURE_reflink_inline_data);
357 bch2_trans_iter_init(trans, &reflink_iter, BTREE_ID_reflink, POS_MAX,
359 k = bch2_btree_iter_peek_prev(&reflink_iter);
364 r_v = bch2_trans_kmalloc(trans, sizeof(__le64) + bkey_bytes(&orig->k));
365 ret = PTR_ERR_OR_ZERO(r_v);
370 r_v->k.type = bkey_type_to_indirect(&orig->k);
371 r_v->k.p = reflink_iter.pos;
372 bch2_key_resize(&r_v->k, orig->k.size);
373 r_v->k.version = orig->k.version;
375 set_bkey_val_bytes(&r_v->k, sizeof(__le64) + bkey_val_bytes(&orig->k));
377 refcount = bkey_refcount(bkey_i_to_s(r_v));
379 memcpy(refcount + 1, &orig->v, bkey_val_bytes(&orig->k));
381 ret = bch2_trans_update(trans, &reflink_iter, r_v, 0);
386 * orig is in a bkey_buf which statically allocates 5 64s for the val,
387 * so we know it will be big enough:
389 orig->k.type = KEY_TYPE_reflink_p;
390 r_p = bkey_i_to_reflink_p(orig);
391 set_bkey_val_bytes(&r_p->k, sizeof(r_p->v));
393 /* FORTIFY_SOURCE is broken here, and doesn't provide unsafe_memset() */
394 #if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE)
395 __underlying_memset(&r_p->v, 0, sizeof(r_p->v));
397 memset(&r_p->v, 0, sizeof(r_p->v));
400 r_p->v.idx = cpu_to_le64(bkey_start_offset(&r_v->k));
402 ret = bch2_trans_update(trans, extent_iter, &r_p->k_i,
403 BTREE_UPDATE_internal_snapshot_node);
405 bch2_trans_iter_exit(trans, &reflink_iter);
410 static struct bkey_s_c get_next_src(struct btree_iter *iter, struct bpos end)
415 for_each_btree_key_upto_continue_norestart(*iter, end, 0, k, ret) {
416 if (bkey_extent_is_unwritten(k))
419 if (bkey_extent_is_data(k.k))
423 if (bkey_ge(iter->pos, end))
424 bch2_btree_iter_set_pos(iter, end);
425 return ret ? bkey_s_c_err(ret) : bkey_s_c_null;
428 s64 bch2_remap_range(struct bch_fs *c,
429 subvol_inum dst_inum, u64 dst_offset,
430 subvol_inum src_inum, u64 src_offset,
432 u64 new_i_size, s64 *i_sectors_delta)
434 struct btree_trans *trans;
435 struct btree_iter dst_iter, src_iter;
436 struct bkey_s_c src_k;
437 struct bkey_buf new_dst, new_src;
438 struct bpos dst_start = POS(dst_inum.inum, dst_offset);
439 struct bpos src_start = POS(src_inum.inum, src_offset);
440 struct bpos dst_end = dst_start, src_end = src_start;
441 struct bch_io_opts opts;
442 struct bpos src_want;
444 u32 dst_snapshot, src_snapshot;
445 int ret = 0, ret2 = 0;
447 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_reflink))
448 return -BCH_ERR_erofs_no_writes;
450 bch2_check_set_feature(c, BCH_FEATURE_reflink);
452 dst_end.offset += remap_sectors;
453 src_end.offset += remap_sectors;
455 bch2_bkey_buf_init(&new_dst);
456 bch2_bkey_buf_init(&new_src);
457 trans = bch2_trans_get(c);
459 ret = bch2_inum_opts_get(trans, src_inum, &opts);
463 bch2_trans_iter_init(trans, &src_iter, BTREE_ID_extents, src_start,
465 bch2_trans_iter_init(trans, &dst_iter, BTREE_ID_extents, dst_start,
469 bch2_err_matches(ret, BCH_ERR_transaction_restart)) &&
470 bkey_lt(dst_iter.pos, dst_end)) {
471 struct disk_reservation disk_res = { 0 };
473 bch2_trans_begin(trans);
475 if (fatal_signal_pending(current)) {
480 ret = bch2_subvolume_get_snapshot(trans, src_inum.subvol,
485 bch2_btree_iter_set_snapshot(&src_iter, src_snapshot);
487 ret = bch2_subvolume_get_snapshot(trans, dst_inum.subvol,
492 bch2_btree_iter_set_snapshot(&dst_iter, dst_snapshot);
494 if (dst_inum.inum < src_inum.inum) {
495 /* Avoid some lock cycle transaction restarts */
496 ret = bch2_btree_iter_traverse(&dst_iter);
501 dst_done = dst_iter.pos.offset - dst_start.offset;
502 src_want = POS(src_start.inode, src_start.offset + dst_done);
503 bch2_btree_iter_set_pos(&src_iter, src_want);
505 src_k = get_next_src(&src_iter, src_end);
506 ret = bkey_err(src_k);
510 if (bkey_lt(src_want, src_iter.pos)) {
511 ret = bch2_fpunch_at(trans, &dst_iter, dst_inum,
513 dst_iter.pos.offset +
514 src_iter.pos.offset - src_want.offset),
519 if (src_k.k->type != KEY_TYPE_reflink_p) {
520 bch2_btree_iter_set_pos_to_extent_start(&src_iter);
522 bch2_bkey_buf_reassemble(&new_src, c, src_k);
523 src_k = bkey_i_to_s_c(new_src.k);
525 ret = bch2_make_extent_indirect(trans, &src_iter,
530 BUG_ON(src_k.k->type != KEY_TYPE_reflink_p);
533 if (src_k.k->type == KEY_TYPE_reflink_p) {
534 struct bkey_s_c_reflink_p src_p =
535 bkey_s_c_to_reflink_p(src_k);
536 struct bkey_i_reflink_p *dst_p =
537 bkey_reflink_p_init(new_dst.k);
539 u64 offset = le64_to_cpu(src_p.v->idx) +
541 bkey_start_offset(src_k.k));
543 dst_p->v.idx = cpu_to_le64(offset);
548 new_dst.k->k.p = dst_iter.pos;
549 bch2_key_resize(&new_dst.k->k,
550 min(src_k.k->p.offset - src_want.offset,
551 dst_end.offset - dst_iter.pos.offset));
553 ret = bch2_bkey_set_needs_rebalance(c, new_dst.k, &opts) ?:
554 bch2_extent_update(trans, dst_inum, &dst_iter,
555 new_dst.k, &disk_res,
556 new_i_size, i_sectors_delta,
558 bch2_disk_reservation_put(c, &disk_res);
560 bch2_trans_iter_exit(trans, &dst_iter);
561 bch2_trans_iter_exit(trans, &src_iter);
563 BUG_ON(!ret && !bkey_eq(dst_iter.pos, dst_end));
564 BUG_ON(bkey_gt(dst_iter.pos, dst_end));
566 dst_done = dst_iter.pos.offset - dst_start.offset;
567 new_i_size = min(dst_iter.pos.offset << 9, new_i_size);
570 struct bch_inode_unpacked inode_u;
571 struct btree_iter inode_iter = { NULL };
573 bch2_trans_begin(trans);
575 ret2 = bch2_inode_peek(trans, &inode_iter, &inode_u,
576 dst_inum, BTREE_ITER_intent);
579 inode_u.bi_size < new_i_size) {
580 inode_u.bi_size = new_i_size;
581 ret2 = bch2_inode_write(trans, &inode_iter, &inode_u) ?:
582 bch2_trans_commit(trans, NULL, NULL,
583 BCH_TRANS_COMMIT_no_enospc);
586 bch2_trans_iter_exit(trans, &inode_iter);
587 } while (bch2_err_matches(ret2, BCH_ERR_transaction_restart));
589 bch2_trans_put(trans);
590 bch2_bkey_buf_exit(&new_src, c);
591 bch2_bkey_buf_exit(&new_dst, c);
593 bch2_write_ref_put(c, BCH_WRITE_REF_reflink);
595 return dst_done ?: ret ?: ret2;