1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2012 Google, Inc.
8 #include "alloc_foreground.h"
11 #include "btree_update.h"
19 #include "extent_update.h"
25 #include "nocow_locking.h"
26 #include "rebalance.h"
27 #include "subvolume.h"
32 #include <linux/blkdev.h>
33 #include <linux/prefetch.h>
34 #include <linux/random.h>
35 #include <linux/sched/mm.h>
37 #ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
39 static inline void bch2_congested_acct(struct bch_dev *ca, u64 io_latency,
43 ca->io_latency[rw].quantiles.entries[QUANTILE_IDX(1)].m;
44 /* ideally we'd be taking into account the device's variance here: */
45 u64 latency_threshold = latency_capable << (rw == READ ? 2 : 3);
46 s64 latency_over = io_latency - latency_threshold;
48 if (latency_threshold && latency_over > 0) {
50 * bump up congested by approximately latency_over * 4 /
51 * latency_threshold - we don't need much accuracy here so don't
52 * bother with the divide:
54 if (atomic_read(&ca->congested) < CONGESTED_MAX)
55 atomic_add(latency_over >>
56 max_t(int, ilog2(latency_threshold) - 2, 0),
59 ca->congested_last = now;
60 } else if (atomic_read(&ca->congested) > 0) {
61 atomic_dec(&ca->congested);
65 void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw)
67 atomic64_t *latency = &ca->cur_latency[rw];
68 u64 now = local_clock();
69 u64 io_latency = time_after64(now, submit_time)
74 old = atomic64_read(latency);
77 * If the io latency was reasonably close to the current
78 * latency, skip doing the update and atomic operation - most of
81 if (abs((int) (old - io_latency)) < (old >> 1) &&
85 new = ewma_add(old, io_latency, 5);
86 } while (!atomic64_try_cmpxchg(latency, &old, new));
88 bch2_congested_acct(ca, io_latency, now, rw);
90 __bch2_time_stats_update(&ca->io_latency[rw].stats, submit_time, now);
95 /* Allocate, free from mempool: */
97 void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio)
99 struct bvec_iter_all iter;
102 bio_for_each_segment_all(bv, bio, iter)
103 if (bv->bv_page != ZERO_PAGE(0))
104 mempool_free(bv->bv_page, &c->bio_bounce_pages);
108 static struct page *__bio_alloc_page_pool(struct bch_fs *c, bool *using_mempool)
112 if (likely(!*using_mempool)) {
113 page = alloc_page(GFP_NOFS);
114 if (unlikely(!page)) {
115 mutex_lock(&c->bio_bounce_pages_lock);
116 *using_mempool = true;
122 page = mempool_alloc(&c->bio_bounce_pages, GFP_NOFS);
128 void bch2_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio,
131 bool using_mempool = false;
134 struct page *page = __bio_alloc_page_pool(c, &using_mempool);
135 unsigned len = min_t(size_t, PAGE_SIZE, size);
137 BUG_ON(!bio_add_page(bio, page, len, 0));
142 mutex_unlock(&c->bio_bounce_pages_lock);
145 /* Extent update path: */
147 int bch2_sum_sector_overwrites(struct btree_trans *trans,
148 struct btree_iter *extent_iter,
150 bool *usage_increasing,
151 s64 *i_sectors_delta,
152 s64 *disk_sectors_delta)
154 struct bch_fs *c = trans->c;
155 struct btree_iter iter;
157 unsigned new_replicas = bch2_bkey_replicas(c, bkey_i_to_s_c(new));
158 bool new_compressed = bch2_bkey_sectors_compressed(bkey_i_to_s_c(new));
161 *usage_increasing = false;
162 *i_sectors_delta = 0;
163 *disk_sectors_delta = 0;
165 bch2_trans_copy_iter(&iter, extent_iter);
167 for_each_btree_key_max_continue_norestart(iter,
168 new->k.p, BTREE_ITER_slots, old, ret) {
169 s64 sectors = min(new->k.p.offset, old.k->p.offset) -
170 max(bkey_start_offset(&new->k),
171 bkey_start_offset(old.k));
173 *i_sectors_delta += sectors *
174 (bkey_extent_is_allocation(&new->k) -
175 bkey_extent_is_allocation(old.k));
177 *disk_sectors_delta += sectors * bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(new));
178 *disk_sectors_delta -= new->k.p.snapshot == old.k->p.snapshot
179 ? sectors * bch2_bkey_nr_ptrs_fully_allocated(old)
182 if (!*usage_increasing &&
183 (new->k.p.snapshot != old.k->p.snapshot ||
184 new_replicas > bch2_bkey_replicas(c, old) ||
185 (!new_compressed && bch2_bkey_sectors_compressed(old))))
186 *usage_increasing = true;
188 if (bkey_ge(old.k->p, new->k.p))
192 bch2_trans_iter_exit(trans, &iter);
196 static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
197 struct btree_iter *extent_iter,
202 * Crazy performance optimization:
203 * Every extent update needs to also update the inode: the inode trigger
204 * will set bi->journal_seq to the journal sequence number of this
205 * transaction - for fsync.
207 * But if that's the only reason we're updating the inode (we're not
208 * updating bi_size or bi_sectors), then we don't need the inode update
209 * to be journalled - if we crash, the bi_journal_seq update will be
210 * lost, but that's fine.
212 unsigned inode_update_flags = BTREE_UPDATE_nojournal;
214 struct btree_iter iter;
215 struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
217 extent_iter->pos.inode,
218 extent_iter->snapshot),
221 int ret = bkey_err(k);
226 * varint_decode_fast(), in the inode .invalid method, reads up to 7
227 * bytes past the end of the buffer:
229 struct bkey_i *k_mut = bch2_trans_kmalloc_nomemzero(trans, bkey_bytes(k.k) + 8);
230 ret = PTR_ERR_OR_ZERO(k_mut);
234 bkey_reassemble(k_mut, k);
236 if (unlikely(k_mut->k.type != KEY_TYPE_inode_v3)) {
237 k_mut = bch2_inode_to_v3(trans, k_mut);
238 ret = PTR_ERR_OR_ZERO(k_mut);
243 struct bkey_i_inode_v3 *inode = bkey_i_to_inode_v3(k_mut);
245 if (!(le64_to_cpu(inode->v.bi_flags) & BCH_INODE_i_size_dirty) &&
246 new_i_size > le64_to_cpu(inode->v.bi_size)) {
247 inode->v.bi_size = cpu_to_le64(new_i_size);
248 inode_update_flags = 0;
251 if (i_sectors_delta) {
252 le64_add_cpu(&inode->v.bi_sectors, i_sectors_delta);
253 inode_update_flags = 0;
256 if (inode->k.p.snapshot != iter.snapshot) {
257 inode->k.p.snapshot = iter.snapshot;
258 inode_update_flags = 0;
261 ret = bch2_trans_update(trans, &iter, &inode->k_i,
262 BTREE_UPDATE_internal_snapshot_node|
265 bch2_trans_iter_exit(trans, &iter);
269 int bch2_extent_update(struct btree_trans *trans,
271 struct btree_iter *iter,
273 struct disk_reservation *disk_res,
275 s64 *i_sectors_delta_total,
278 struct bpos next_pos;
279 bool usage_increasing;
280 s64 i_sectors_delta = 0, disk_sectors_delta = 0;
284 * This traverses us the iterator without changing iter->path->pos to
285 * search_key() (which is pos + 1 for extents): we want there to be a
286 * path already traversed at iter->pos because
287 * bch2_trans_extent_update() will use it to attempt extent merging
289 ret = __bch2_btree_iter_traverse(iter);
293 ret = bch2_extent_trim_atomic(trans, iter, k);
299 ret = bch2_sum_sector_overwrites(trans, iter, k,
302 &disk_sectors_delta);
307 disk_sectors_delta > (s64) disk_res->sectors) {
308 ret = bch2_disk_reservation_add(trans->c, disk_res,
309 disk_sectors_delta - disk_res->sectors,
310 !check_enospc || !usage_increasing
311 ? BCH_DISK_RESERVATION_NOFAIL : 0);
318 * We always have to do an inode update - even when i_size/i_sectors
319 * aren't changing - for fsync to work properly; fsync relies on
320 * inode->bi_journal_seq which is updated by the trigger code:
322 ret = bch2_extent_update_i_size_sectors(trans, iter,
323 min(k->k.p.offset << 9, new_i_size),
325 bch2_trans_update(trans, iter, k, 0) ?:
326 bch2_trans_commit(trans, disk_res, NULL,
327 BCH_TRANS_COMMIT_no_check_rw|
328 BCH_TRANS_COMMIT_no_enospc);
332 if (i_sectors_delta_total)
333 *i_sectors_delta_total += i_sectors_delta;
334 bch2_btree_iter_set_pos(iter, next_pos);
338 static int bch2_write_index_default(struct bch_write_op *op)
340 struct bch_fs *c = op->c;
342 struct keylist *keys = &op->insert_keys;
343 struct bkey_i *k = bch2_keylist_front(keys);
344 struct btree_trans *trans = bch2_trans_get(c);
345 struct btree_iter iter;
347 .subvol = op->subvol,
348 .inum = k->k.p.inode,
352 BUG_ON(!inum.subvol);
354 bch2_bkey_buf_init(&sk);
357 bch2_trans_begin(trans);
359 k = bch2_keylist_front(keys);
360 bch2_bkey_buf_copy(&sk, c, k);
362 ret = bch2_subvolume_get_snapshot(trans, inum.subvol,
363 &sk.k->k.p.snapshot);
364 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
369 bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
370 bkey_start_pos(&sk.k->k),
371 BTREE_ITER_slots|BTREE_ITER_intent);
373 ret = bch2_bkey_set_needs_rebalance(c, &op->opts, sk.k) ?:
374 bch2_extent_update(trans, inum, &iter, sk.k,
376 op->new_i_size, &op->i_sectors_delta,
377 op->flags & BCH_WRITE_CHECK_ENOSPC);
378 bch2_trans_iter_exit(trans, &iter);
380 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
385 if (bkey_ge(iter.pos, k->k.p))
386 bch2_keylist_pop_front(&op->insert_keys);
388 bch2_cut_front(iter.pos, k);
389 } while (!bch2_keylist_empty(keys));
391 bch2_trans_put(trans);
392 bch2_bkey_buf_exit(&sk, c);
399 static void __bch2_write_op_error(struct printbuf *out, struct bch_write_op *op,
402 bch2_inum_offset_err_msg(op->c, out,
403 (subvol_inum) { op->subvol, op->pos.inode, },
405 prt_printf(out, "write error%s: ",
406 op->flags & BCH_WRITE_MOVE ? "(internal move)" : "");
409 void bch2_write_op_error(struct printbuf *out, struct bch_write_op *op)
411 __bch2_write_op_error(out, op, op->pos.offset);
414 static void bch2_write_op_error_trans(struct btree_trans *trans, struct printbuf *out,
415 struct bch_write_op *op, u64 offset)
417 bch2_inum_offset_err_msg_trans(trans, out,
418 (subvol_inum) { op->subvol, op->pos.inode, },
420 prt_printf(out, "write error%s: ",
421 op->flags & BCH_WRITE_MOVE ? "(internal move)" : "");
424 void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
425 enum bch_data_type type,
426 const struct bkey_i *k,
429 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k));
430 struct bch_write_bio *n;
432 BUG_ON(c->opts.nochanges);
434 bkey_for_each_ptr(ptrs, ptr) {
435 struct bch_dev *ca = nocow
436 ? bch2_dev_have_ref(c, ptr->dev)
437 : bch2_dev_get_ioref(c, ptr->dev, type == BCH_DATA_btree ? READ : WRITE);
439 if (to_entry(ptr + 1) < ptrs.end) {
440 n = to_wbio(bio_alloc_clone(NULL, &wbio->bio, GFP_NOFS, &c->replica_set));
442 n->bio.bi_end_io = wbio->bio.bi_end_io;
443 n->bio.bi_private = wbio->bio.bi_private;
448 n->bio.bi_opf = wbio->bio.bi_opf;
449 bio_inc_remaining(&wbio->bio);
457 n->have_ioref = ca != NULL;
459 n->submit_time = local_clock();
460 n->inode_offset = bkey_start_offset(&k->k);
462 n->nocow_bucket = PTR_BUCKET_NR(ca, ptr);
463 n->bio.bi_iter.bi_sector = ptr->offset;
465 if (likely(n->have_ioref)) {
466 this_cpu_add(ca->io_done->sectors[WRITE][type],
467 bio_sectors(&n->bio));
469 bio_set_dev(&n->bio, ca->disk_sb.bdev);
471 if (type != BCH_DATA_btree && unlikely(c->opts.no_data_io)) {
478 n->bio.bi_status = BLK_STS_REMOVED;
484 static void __bch2_write(struct bch_write_op *);
486 static void bch2_write_done(struct closure *cl)
488 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
489 struct bch_fs *c = op->c;
491 EBUG_ON(op->open_buckets.nr);
493 bch2_time_stats_update(&c->times[BCH_TIME_data_write], op->start_time);
494 bch2_disk_reservation_put(c, &op->res);
496 if (!(op->flags & BCH_WRITE_MOVE))
497 bch2_write_ref_put(c, BCH_WRITE_REF_write);
498 bch2_keylist_free(&op->insert_keys, op->inline_keys);
501 closure_debug_destroy(cl);
506 static noinline int bch2_write_drop_io_error_ptrs(struct bch_write_op *op)
508 struct keylist *keys = &op->insert_keys;
509 struct bkey_i *src, *dst = keys->keys, *n;
511 for (src = keys->keys; src != keys->top; src = n) {
514 if (bkey_extent_is_direct_data(&src->k)) {
515 bch2_bkey_drop_ptrs(bkey_i_to_s(src), ptr,
516 test_bit(ptr->dev, op->failed.d));
518 if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(src)))
523 memmove_u64s_down(dst, src, src->k.u64s);
524 dst = bkey_next(dst);
532 * __bch2_write_index - after a write, update index to point to new data
533 * @op: bch_write_op to process
535 static void __bch2_write_index(struct bch_write_op *op)
537 struct bch_fs *c = op->c;
538 struct keylist *keys = &op->insert_keys;
542 if (unlikely(op->flags & BCH_WRITE_IO_ERROR)) {
543 ret = bch2_write_drop_io_error_ptrs(op);
548 if (!bch2_keylist_empty(keys)) {
549 u64 sectors_start = keylist_sectors(keys);
551 ret = !(op->flags & BCH_WRITE_MOVE)
552 ? bch2_write_index_default(op)
553 : bch2_data_update_index_update(op);
555 BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
556 BUG_ON(keylist_sectors(keys) && !ret);
558 op->written += sectors_start - keylist_sectors(keys);
560 if (unlikely(ret && !bch2_err_matches(ret, EROFS))) {
561 struct bkey_i *insert = bch2_keylist_front(&op->insert_keys);
563 struct printbuf buf = PRINTBUF;
564 __bch2_write_op_error(&buf, op, bkey_start_offset(&insert->k));
565 prt_printf(&buf, "btree update error: %s", bch2_err_str(ret));
566 bch_err_ratelimited(c, "%s", buf.buf);
574 /* If some a bucket wasn't written, we can't erasure code it: */
575 for_each_set_bit(dev, op->failed.d, BCH_SB_MEMBERS_MAX)
576 bch2_open_bucket_write_error(c, &op->open_buckets, dev);
578 bch2_open_buckets_put(c, &op->open_buckets);
581 keys->top = keys->keys;
583 op->flags |= BCH_WRITE_SUBMITTED;
587 static inline void __wp_update_state(struct write_point *wp, enum write_point_state state)
589 if (state != wp->state) {
590 u64 now = ktime_get_ns();
592 if (wp->last_state_change &&
593 time_after64(now, wp->last_state_change))
594 wp->time[wp->state] += now - wp->last_state_change;
596 wp->last_state_change = now;
600 static inline void wp_update_state(struct write_point *wp, bool running)
602 enum write_point_state state;
604 state = running ? WRITE_POINT_running :
605 !list_empty(&wp->writes) ? WRITE_POINT_waiting_io
606 : WRITE_POINT_stopped;
608 __wp_update_state(wp, state);
611 static CLOSURE_CALLBACK(bch2_write_index)
613 closure_type(op, struct bch_write_op, cl);
614 struct write_point *wp = op->wp;
615 struct workqueue_struct *wq = index_update_wq(op);
618 if ((op->flags & BCH_WRITE_SUBMITTED) &&
619 (op->flags & BCH_WRITE_MOVE))
620 bch2_bio_free_pages_pool(op->c, &op->wbio.bio);
622 spin_lock_irqsave(&wp->writes_lock, flags);
623 if (wp->state == WRITE_POINT_waiting_io)
624 __wp_update_state(wp, WRITE_POINT_waiting_work);
625 list_add_tail(&op->wp_list, &wp->writes);
626 spin_unlock_irqrestore (&wp->writes_lock, flags);
628 queue_work(wq, &wp->index_update_work);
631 static inline void bch2_write_queue(struct bch_write_op *op, struct write_point *wp)
635 if (wp->state == WRITE_POINT_stopped) {
636 spin_lock_irq(&wp->writes_lock);
637 __wp_update_state(wp, WRITE_POINT_waiting_io);
638 spin_unlock_irq(&wp->writes_lock);
642 void bch2_write_point_do_index_updates(struct work_struct *work)
644 struct write_point *wp =
645 container_of(work, struct write_point, index_update_work);
646 struct bch_write_op *op;
649 spin_lock_irq(&wp->writes_lock);
650 op = list_pop_entry(&wp->writes, struct bch_write_op, wp_list);
651 wp_update_state(wp, op != NULL);
652 spin_unlock_irq(&wp->writes_lock);
657 op->flags |= BCH_WRITE_IN_WORKER;
659 __bch2_write_index(op);
661 if (!(op->flags & BCH_WRITE_SUBMITTED))
664 bch2_write_done(&op->cl);
668 static void bch2_write_endio(struct bio *bio)
670 struct closure *cl = bio->bi_private;
671 struct bch_write_op *op = container_of(cl, struct bch_write_op, cl);
672 struct bch_write_bio *wbio = to_wbio(bio);
673 struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
674 struct bch_fs *c = wbio->c;
675 struct bch_dev *ca = wbio->have_ioref
676 ? bch2_dev_have_ref(c, wbio->dev)
679 if (bch2_dev_inum_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write,
681 wbio->inode_offset << 9,
682 "data write error: %s",
683 bch2_blk_status_to_str(bio->bi_status))) {
684 set_bit(wbio->dev, op->failed.d);
685 op->flags |= BCH_WRITE_IO_ERROR;
689 bch2_bucket_nocow_unlock(&c->nocow_locks,
690 POS(ca->dev_idx, wbio->nocow_bucket),
691 BUCKET_NOCOW_LOCK_UPDATE);
692 set_bit(wbio->dev, op->devs_need_flush->d);
695 if (wbio->have_ioref) {
696 bch2_latency_acct(ca, wbio->submit_time, WRITE);
697 percpu_ref_put(&ca->io_ref);
701 bch2_bio_free_pages_pool(c, bio);
707 bio_endio(&parent->bio);
712 static void init_append_extent(struct bch_write_op *op,
713 struct write_point *wp,
714 struct bversion version,
715 struct bch_extent_crc_unpacked crc)
717 struct bkey_i_extent *e;
719 op->pos.offset += crc.uncompressed_size;
721 e = bkey_extent_init(op->insert_keys.top);
723 e->k.size = crc.uncompressed_size;
724 e->k.bversion = version;
727 crc.compression_type ||
729 bch2_extent_crc_append(&e->k_i, crc);
731 bch2_alloc_sectors_append_ptrs_inlined(op->c, wp, &e->k_i, crc.compressed_size,
732 op->flags & BCH_WRITE_CACHED);
734 bch2_keylist_push(&op->insert_keys);
737 static struct bio *bch2_write_bio_alloc(struct bch_fs *c,
738 struct write_point *wp,
740 bool *page_alloc_failed,
743 struct bch_write_bio *wbio;
745 unsigned output_available =
746 min(wp->sectors_free << 9, src->bi_iter.bi_size);
747 unsigned pages = DIV_ROUND_UP(output_available +
749 ? ((unsigned long) buf & (PAGE_SIZE - 1))
752 pages = min(pages, BIO_MAX_VECS);
754 bio = bio_alloc_bioset(NULL, pages, 0,
755 GFP_NOFS, &c->bio_write);
756 wbio = wbio_init(bio);
757 wbio->put_bio = true;
758 /* copy WRITE_SYNC flag */
759 wbio->bio.bi_opf = src->bi_opf;
762 bch2_bio_map(bio, buf, output_available);
769 * We can't use mempool for more than c->sb.encoded_extent_max
770 * worth of pages, but we'd like to allocate more if we can:
772 bch2_bio_alloc_pages_pool(c, bio,
773 min_t(unsigned, output_available,
774 c->opts.encoded_extent_max));
776 if (bio->bi_iter.bi_size < output_available)
778 bch2_bio_alloc_pages(bio,
780 bio->bi_iter.bi_size,
786 static int bch2_write_rechecksum(struct bch_fs *c,
787 struct bch_write_op *op,
788 unsigned new_csum_type)
790 struct bio *bio = &op->wbio.bio;
791 struct bch_extent_crc_unpacked new_crc;
794 /* bch2_rechecksum_bio() can't encrypt or decrypt data: */
796 if (bch2_csum_type_is_encryption(op->crc.csum_type) !=
797 bch2_csum_type_is_encryption(new_csum_type))
798 new_csum_type = op->crc.csum_type;
800 ret = bch2_rechecksum_bio(c, bio, op->version, op->crc,
802 op->crc.offset, op->crc.live_size,
807 bio_advance(bio, op->crc.offset << 9);
808 bio->bi_iter.bi_size = op->crc.live_size << 9;
813 static int bch2_write_decrypt(struct bch_write_op *op)
815 struct bch_fs *c = op->c;
816 struct nonce nonce = extent_nonce(op->version, op->crc);
817 struct bch_csum csum;
820 if (!bch2_csum_type_is_encryption(op->crc.csum_type))
824 * If we need to decrypt data in the write path, we'll no longer be able
825 * to verify the existing checksum (poly1305 mac, in this case) after
826 * it's decrypted - this is the last point we'll be able to reverify the
829 csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
830 if (bch2_crc_cmp(op->crc.csum, csum) && !c->opts.no_data_io)
833 ret = bch2_encrypt_bio(c, op->crc.csum_type, nonce, &op->wbio.bio);
834 op->crc.csum_type = 0;
835 op->crc.csum = (struct bch_csum) { 0, 0 };
839 static enum prep_encoded_ret {
842 PREP_ENCODED_CHECKSUM_ERR,
843 PREP_ENCODED_DO_WRITE,
844 } bch2_write_prep_encoded_data(struct bch_write_op *op, struct write_point *wp)
846 struct bch_fs *c = op->c;
847 struct bio *bio = &op->wbio.bio;
849 if (!(op->flags & BCH_WRITE_DATA_ENCODED))
850 return PREP_ENCODED_OK;
852 BUG_ON(bio_sectors(bio) != op->crc.compressed_size);
854 /* Can we just write the entire extent as is? */
855 if (op->crc.uncompressed_size == op->crc.live_size &&
856 op->crc.uncompressed_size <= c->opts.encoded_extent_max >> 9 &&
857 op->crc.compressed_size <= wp->sectors_free &&
858 (op->crc.compression_type == bch2_compression_opt_to_type(op->compression_opt) ||
859 op->incompressible)) {
860 if (!crc_is_compressed(op->crc) &&
861 op->csum_type != op->crc.csum_type &&
862 bch2_write_rechecksum(c, op, op->csum_type) &&
864 return PREP_ENCODED_CHECKSUM_ERR;
866 return PREP_ENCODED_DO_WRITE;
870 * If the data is compressed and we couldn't write the entire extent as
871 * is, we have to decompress it:
873 if (crc_is_compressed(op->crc)) {
874 struct bch_csum csum;
876 if (bch2_write_decrypt(op))
877 return PREP_ENCODED_CHECKSUM_ERR;
879 /* Last point we can still verify checksum: */
880 csum = bch2_checksum_bio(c, op->crc.csum_type,
881 extent_nonce(op->version, op->crc),
883 if (bch2_crc_cmp(op->crc.csum, csum) && !c->opts.no_data_io)
884 return PREP_ENCODED_CHECKSUM_ERR;
886 if (bch2_bio_uncompress_inplace(op, bio))
887 return PREP_ENCODED_ERR;
891 * No longer have compressed data after this point - data might be
896 * If the data is checksummed and we're only writing a subset,
897 * rechecksum and adjust bio to point to currently live data:
899 if ((op->crc.live_size != op->crc.uncompressed_size ||
900 op->crc.csum_type != op->csum_type) &&
901 bch2_write_rechecksum(c, op, op->csum_type) &&
903 return PREP_ENCODED_CHECKSUM_ERR;
906 * If we want to compress the data, it has to be decrypted:
908 if ((op->compression_opt ||
909 bch2_csum_type_is_encryption(op->crc.csum_type) !=
910 bch2_csum_type_is_encryption(op->csum_type)) &&
911 bch2_write_decrypt(op))
912 return PREP_ENCODED_CHECKSUM_ERR;
914 return PREP_ENCODED_OK;
917 static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp,
920 struct bch_fs *c = op->c;
921 struct bio *src = &op->wbio.bio, *dst = src;
922 struct bvec_iter saved_iter;
924 unsigned total_output = 0, total_input = 0;
926 bool page_alloc_failed = false;
929 BUG_ON(!bio_sectors(src));
931 ec_buf = bch2_writepoint_ec_buf(c, wp);
933 switch (bch2_write_prep_encoded_data(op, wp)) {
934 case PREP_ENCODED_OK:
936 case PREP_ENCODED_ERR:
939 case PREP_ENCODED_CHECKSUM_ERR:
941 case PREP_ENCODED_DO_WRITE:
942 /* XXX look for bug here */
944 dst = bch2_write_bio_alloc(c, wp, src,
947 bio_copy_data(dst, src);
950 init_append_extent(op, wp, op->version, op->crc);
955 op->compression_opt ||
957 !(op->flags & BCH_WRITE_PAGES_STABLE)) ||
958 (bch2_csum_type_is_encryption(op->csum_type) &&
959 !(op->flags & BCH_WRITE_PAGES_OWNED))) {
960 dst = bch2_write_bio_alloc(c, wp, src,
966 saved_iter = dst->bi_iter;
969 struct bch_extent_crc_unpacked crc = { 0 };
970 struct bversion version = op->version;
971 size_t dst_len = 0, src_len = 0;
973 if (page_alloc_failed &&
974 dst->bi_iter.bi_size < (wp->sectors_free << 9) &&
975 dst->bi_iter.bi_size < c->opts.encoded_extent_max)
978 BUG_ON(op->compression_opt &&
979 (op->flags & BCH_WRITE_DATA_ENCODED) &&
980 bch2_csum_type_is_encryption(op->crc.csum_type));
981 BUG_ON(op->compression_opt && !bounce);
983 crc.compression_type = op->incompressible
984 ? BCH_COMPRESSION_TYPE_incompressible
985 : op->compression_opt
986 ? bch2_bio_compress(c, dst, &dst_len, src, &src_len,
989 if (!crc_is_compressed(crc)) {
990 dst_len = min(dst->bi_iter.bi_size, src->bi_iter.bi_size);
991 dst_len = min_t(unsigned, dst_len, wp->sectors_free << 9);
994 dst_len = min_t(unsigned, dst_len,
995 c->opts.encoded_extent_max);
998 swap(dst->bi_iter.bi_size, dst_len);
999 bio_copy_data(dst, src);
1000 swap(dst->bi_iter.bi_size, dst_len);
1006 BUG_ON(!src_len || !dst_len);
1008 if (bch2_csum_type_is_encryption(op->csum_type)) {
1009 if (bversion_zero(version)) {
1010 version.lo = atomic64_inc_return(&c->key_version);
1012 crc.nonce = op->nonce;
1013 op->nonce += src_len >> 9;
1017 if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
1018 !crc_is_compressed(crc) &&
1019 bch2_csum_type_is_encryption(op->crc.csum_type) ==
1020 bch2_csum_type_is_encryption(op->csum_type)) {
1021 u8 compression_type = crc.compression_type;
1022 u16 nonce = crc.nonce;
1024 * Note: when we're using rechecksum(), we need to be
1025 * checksumming @src because it has all the data our
1026 * existing checksum covers - if we bounced (because we
1027 * were trying to compress), @dst will only have the
1028 * part of the data the new checksum will cover.
1030 * But normally we want to be checksumming post bounce,
1031 * because part of the reason for bouncing is so the
1032 * data can't be modified (by userspace) while it's in
1035 if (bch2_rechecksum_bio(c, src, version, op->crc,
1038 bio_sectors(src) - (src_len >> 9),
1042 * rchecksum_bio sets compression_type on crc from op->crc,
1043 * this isn't always correct as sometimes we're changing
1044 * an extent from uncompressed to incompressible.
1046 crc.compression_type = compression_type;
1049 if ((op->flags & BCH_WRITE_DATA_ENCODED) &&
1050 bch2_rechecksum_bio(c, src, version, op->crc,
1053 bio_sectors(src) - (src_len >> 9),
1057 crc.compressed_size = dst_len >> 9;
1058 crc.uncompressed_size = src_len >> 9;
1059 crc.live_size = src_len >> 9;
1061 swap(dst->bi_iter.bi_size, dst_len);
1062 ret = bch2_encrypt_bio(c, op->csum_type,
1063 extent_nonce(version, crc), dst);
1067 crc.csum = bch2_checksum_bio(c, op->csum_type,
1068 extent_nonce(version, crc), dst);
1069 crc.csum_type = op->csum_type;
1070 swap(dst->bi_iter.bi_size, dst_len);
1073 init_append_extent(op, wp, version, crc);
1076 bio_advance(dst, dst_len);
1077 bio_advance(src, src_len);
1078 total_output += dst_len;
1079 total_input += src_len;
1080 } while (dst->bi_iter.bi_size &&
1081 src->bi_iter.bi_size &&
1083 !bch2_keylist_realloc(&op->insert_keys,
1085 ARRAY_SIZE(op->inline_keys),
1086 BKEY_EXTENT_U64s_MAX));
1088 more = src->bi_iter.bi_size != 0;
1090 dst->bi_iter = saved_iter;
1092 if (dst == src && more) {
1093 BUG_ON(total_output != total_input);
1095 dst = bio_split(src, total_input >> 9,
1096 GFP_NOFS, &c->bio_write);
1097 wbio_init(dst)->put_bio = true;
1098 /* copy WRITE_SYNC flag */
1099 dst->bi_opf = src->bi_opf;
1102 dst->bi_iter.bi_size = total_output;
1108 struct printbuf buf = PRINTBUF;
1109 bch2_write_op_error(&buf, op);
1110 prt_printf(&buf, "error verifying existing checksum while rewriting existing data (memory corruption?)");
1111 bch_err_ratelimited(c, "%s", buf.buf);
1112 printbuf_exit(&buf);
1117 if (to_wbio(dst)->bounce)
1118 bch2_bio_free_pages_pool(c, dst);
1119 if (to_wbio(dst)->put_bio)
1125 static bool bch2_extent_is_writeable(struct bch_write_op *op,
1128 struct bch_fs *c = op->c;
1129 struct bkey_s_c_extent e;
1130 struct extent_ptr_decoded p;
1131 const union bch_extent_entry *entry;
1132 unsigned replicas = 0;
1134 if (k.k->type != KEY_TYPE_extent)
1137 e = bkey_s_c_to_extent(k);
1140 extent_for_each_ptr_decode(e, p, entry) {
1141 if (crc_is_encoded(p.crc) || p.has_ec) {
1146 replicas += bch2_extent_ptr_durability(c, &p);
1150 return replicas >= op->opts.data_replicas;
1153 static int bch2_nocow_write_convert_one_unwritten(struct btree_trans *trans,
1154 struct btree_iter *iter,
1155 struct bkey_i *orig,
1159 if (!bch2_extents_match(bkey_i_to_s_c(orig), k)) {
1164 struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
1165 int ret = PTR_ERR_OR_ZERO(new);
1169 bch2_cut_front(bkey_start_pos(&orig->k), new);
1170 bch2_cut_back(orig->k.p, new);
1172 struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
1173 bkey_for_each_ptr(ptrs, ptr)
1177 * Note that we're not calling bch2_subvol_get_snapshot() in this path -
1178 * that was done when we kicked off the write, and here it's important
1179 * that we update the extent that we wrote to - even if a snapshot has
1180 * since been created. The write is still outstanding, so we're ok
1181 * w.r.t. snapshot atomicity:
1183 return bch2_extent_update_i_size_sectors(trans, iter,
1184 min(new->k.p.offset << 9, new_i_size), 0) ?:
1185 bch2_trans_update(trans, iter, new,
1186 BTREE_UPDATE_internal_snapshot_node);
1189 static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op)
1191 struct bch_fs *c = op->c;
1192 struct btree_trans *trans = bch2_trans_get(c);
1194 for_each_keylist_key(&op->insert_keys, orig) {
1195 int ret = for_each_btree_key_max_commit(trans, iter, BTREE_ID_extents,
1196 bkey_start_pos(&orig->k), orig->k.p,
1197 BTREE_ITER_intent, k,
1198 NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
1199 bch2_nocow_write_convert_one_unwritten(trans, &iter, orig, k, op->new_i_size);
1202 if (ret && !bch2_err_matches(ret, EROFS)) {
1203 struct bkey_i *insert = bch2_keylist_front(&op->insert_keys);
1205 struct printbuf buf = PRINTBUF;
1206 bch2_write_op_error_trans(trans, &buf, op, bkey_start_offset(&insert->k));
1207 prt_printf(&buf, "btree update error: %s", bch2_err_str(ret));
1208 bch_err_ratelimited(c, "%s", buf.buf);
1209 printbuf_exit(&buf);
1218 bch2_trans_put(trans);
1221 static void __bch2_nocow_write_done(struct bch_write_op *op)
1223 if (unlikely(op->flags & BCH_WRITE_IO_ERROR)) {
1225 } else if (unlikely(op->flags & BCH_WRITE_CONVERT_UNWRITTEN))
1226 bch2_nocow_write_convert_unwritten(op);
1229 static CLOSURE_CALLBACK(bch2_nocow_write_done)
1231 closure_type(op, struct bch_write_op, cl);
1233 __bch2_nocow_write_done(op);
1234 bch2_write_done(cl);
1237 struct bucket_to_lock {
1240 struct nocow_lock_bucket *l;
1243 static void bch2_nocow_write(struct bch_write_op *op)
1245 struct bch_fs *c = op->c;
1246 struct btree_trans *trans;
1247 struct btree_iter iter;
1249 DARRAY_PREALLOCATED(struct bucket_to_lock, 3) buckets;
1251 struct bucket_to_lock *stale_at;
1254 if (op->flags & BCH_WRITE_MOVE)
1257 darray_init(&buckets);
1258 trans = bch2_trans_get(c);
1260 bch2_trans_begin(trans);
1262 ret = bch2_subvolume_get_snapshot(trans, op->subvol, &snapshot);
1266 bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
1267 SPOS(op->pos.inode, op->pos.offset, snapshot),
1270 struct bio *bio = &op->wbio.bio;
1274 ret = bch2_trans_relock(trans);
1278 k = bch2_btree_iter_peek_slot(&iter);
1283 /* fall back to normal cow write path? */
1284 if (unlikely(k.k->p.snapshot != snapshot ||
1285 !bch2_extent_is_writeable(op, k)))
1288 if (bch2_keylist_realloc(&op->insert_keys,
1290 ARRAY_SIZE(op->inline_keys),
1294 /* Get iorefs before dropping btree locks: */
1295 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1296 bkey_for_each_ptr(ptrs, ptr) {
1297 struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, WRITE);
1301 struct bpos b = PTR_BUCKET_POS(ca, ptr);
1302 struct nocow_lock_bucket *l =
1303 bucket_nocow_lock(&c->nocow_locks, bucket_to_u64(b));
1306 /* XXX allocating memory with btree locks held - rare */
1307 darray_push_gfp(&buckets, ((struct bucket_to_lock) {
1308 .b = b, .gen = ptr->gen, .l = l,
1309 }), GFP_KERNEL|__GFP_NOFAIL);
1312 op->flags |= BCH_WRITE_CONVERT_UNWRITTEN;
1315 /* Unlock before taking nocow locks, doing IO: */
1316 bkey_reassemble(op->insert_keys.top, k);
1317 bch2_trans_unlock(trans);
1319 bch2_cut_front(op->pos, op->insert_keys.top);
1320 if (op->flags & BCH_WRITE_CONVERT_UNWRITTEN)
1321 bch2_cut_back(POS(op->pos.inode, op->pos.offset + bio_sectors(bio)), op->insert_keys.top);
1323 darray_for_each(buckets, i) {
1324 struct bch_dev *ca = bch2_dev_have_ref(c, i->b.inode);
1326 __bch2_bucket_nocow_lock(&c->nocow_locks, i->l,
1327 bucket_to_u64(i->b),
1328 BUCKET_NOCOW_LOCK_UPDATE);
1330 int gen = bucket_gen_get(ca, i->b.offset);
1331 stale = gen < 0 ? gen : gen_after(gen, i->gen);
1332 if (unlikely(stale)) {
1334 goto err_bucket_stale;
1338 bio = &op->wbio.bio;
1339 if (k.k->p.offset < op->pos.offset + bio_sectors(bio)) {
1340 bio = bio_split(bio, k.k->p.offset - op->pos.offset,
1341 GFP_KERNEL, &c->bio_write);
1342 wbio_init(bio)->put_bio = true;
1343 bio->bi_opf = op->wbio.bio.bi_opf;
1345 op->flags |= BCH_WRITE_SUBMITTED;
1348 op->pos.offset += bio_sectors(bio);
1349 op->written += bio_sectors(bio);
1351 bio->bi_end_io = bch2_write_endio;
1352 bio->bi_private = &op->cl;
1353 bio->bi_opf |= REQ_OP_WRITE;
1354 closure_get(&op->cl);
1355 bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
1356 op->insert_keys.top, true);
1358 bch2_keylist_push(&op->insert_keys);
1359 if (op->flags & BCH_WRITE_SUBMITTED)
1361 bch2_btree_iter_advance(&iter);
1364 bch2_trans_iter_exit(trans, &iter);
1366 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1369 bch2_trans_put(trans);
1370 darray_exit(&buckets);
1373 struct printbuf buf = PRINTBUF;
1374 bch2_write_op_error(&buf, op);
1375 prt_printf(&buf, "%s(): btree lookup error: %s", __func__, bch2_err_str(ret));
1376 bch_err_ratelimited(c, "%s", buf.buf);
1377 printbuf_exit(&buf);
1379 op->flags |= BCH_WRITE_SUBMITTED;
1382 /* fallback to cow write path? */
1383 if (!(op->flags & BCH_WRITE_SUBMITTED)) {
1384 closure_sync(&op->cl);
1385 __bch2_nocow_write_done(op);
1386 op->insert_keys.top = op->insert_keys.keys;
1387 } else if (op->flags & BCH_WRITE_SYNC) {
1388 closure_sync(&op->cl);
1389 bch2_nocow_write_done(&op->cl.work);
1393 * needs to run out of process context because ei_quota_lock is
1396 continue_at(&op->cl, bch2_nocow_write_done, index_update_wq(op));
1400 darray_for_each(buckets, i)
1401 percpu_ref_put(&bch2_dev_have_ref(c, i->b.inode)->io_ref);
1403 /* Fall back to COW path: */
1406 darray_for_each(buckets, i) {
1407 bch2_bucket_nocow_unlock(&c->nocow_locks, i->b, BUCKET_NOCOW_LOCK_UPDATE);
1412 struct printbuf buf = PRINTBUF;
1413 if (bch2_fs_inconsistent_on(stale < 0, c,
1414 "pointer to invalid bucket in nocow path on device %llu\n %s",
1416 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1419 /* We can retry this: */
1420 ret = -BCH_ERR_transaction_restart;
1422 printbuf_exit(&buf);
1427 static void __bch2_write(struct bch_write_op *op)
1429 struct bch_fs *c = op->c;
1430 struct write_point *wp = NULL;
1431 struct bio *bio = NULL;
1432 unsigned nofs_flags;
1435 nofs_flags = memalloc_nofs_save();
1437 if (unlikely(op->opts.nocow && c->opts.nocow_enabled)) {
1438 bch2_nocow_write(op);
1439 if (op->flags & BCH_WRITE_SUBMITTED)
1440 goto out_nofs_restore;
1443 memset(&op->failed, 0, sizeof(op->failed));
1446 struct bkey_i *key_to_write;
1447 unsigned key_to_write_offset = op->insert_keys.top_p -
1448 op->insert_keys.keys_p;
1450 /* +1 for possible cache device: */
1451 if (op->open_buckets.nr + op->nr_replicas + 1 >
1452 ARRAY_SIZE(op->open_buckets.v))
1455 if (bch2_keylist_realloc(&op->insert_keys,
1457 ARRAY_SIZE(op->inline_keys),
1458 BKEY_EXTENT_U64s_MAX))
1462 * The copygc thread is now global, which means it's no longer
1463 * freeing up space on specific disks, which means that
1464 * allocations for specific disks may hang arbitrarily long:
1466 ret = bch2_trans_run(c, lockrestart_do(trans,
1467 bch2_alloc_sectors_start_trans(trans,
1469 op->opts.erasure_code && !(op->flags & BCH_WRITE_CACHED),
1473 op->nr_replicas_required,
1477 if (unlikely(ret)) {
1478 if (bch2_err_matches(ret, BCH_ERR_operation_blocked))
1486 bch2_open_bucket_get(c, wp, &op->open_buckets);
1487 ret = bch2_write_extent(op, wp, &bio);
1489 bch2_alloc_sectors_done_inlined(c, wp);
1492 op->flags |= BCH_WRITE_SUBMITTED;
1494 if (unlikely(ret < 0)) {
1495 if (!(op->flags & BCH_WRITE_ALLOC_NOWAIT)) {
1496 struct printbuf buf = PRINTBUF;
1497 bch2_write_op_error(&buf, op);
1498 prt_printf(&buf, "%s(): %s", __func__, bch2_err_str(ret));
1499 bch_err_ratelimited(c, "%s", buf.buf);
1500 printbuf_exit(&buf);
1507 bio->bi_end_io = bch2_write_endio;
1508 bio->bi_private = &op->cl;
1509 bio->bi_opf |= REQ_OP_WRITE;
1511 closure_get(bio->bi_private);
1513 key_to_write = (void *) (op->insert_keys.keys_p +
1514 key_to_write_offset);
1516 bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user,
1517 key_to_write, false);
1523 * If we're running asynchronously, wne may still want to block
1524 * synchronously here if we weren't able to submit all of the IO at
1525 * once, as that signals backpressure to the caller.
1527 if ((op->flags & BCH_WRITE_SYNC) ||
1528 (!(op->flags & BCH_WRITE_SUBMITTED) &&
1529 !(op->flags & BCH_WRITE_IN_WORKER))) {
1530 bch2_wait_on_allocator(c, &op->cl);
1532 __bch2_write_index(op);
1534 if (!(op->flags & BCH_WRITE_SUBMITTED))
1536 bch2_write_done(&op->cl);
1538 bch2_write_queue(op, wp);
1539 continue_at(&op->cl, bch2_write_index, NULL);
1542 memalloc_nofs_restore(nofs_flags);
1545 static void bch2_write_data_inline(struct bch_write_op *op, unsigned data_len)
1547 struct bio *bio = &op->wbio.bio;
1548 struct bvec_iter iter;
1549 struct bkey_i_inline_data *id;
1553 memset(&op->failed, 0, sizeof(op->failed));
1555 op->flags |= BCH_WRITE_WROTE_DATA_INLINE;
1556 op->flags |= BCH_WRITE_SUBMITTED;
1558 bch2_check_set_feature(op->c, BCH_FEATURE_inline_data);
1560 ret = bch2_keylist_realloc(&op->insert_keys, op->inline_keys,
1561 ARRAY_SIZE(op->inline_keys),
1562 BKEY_U64s + DIV_ROUND_UP(data_len, 8));
1568 sectors = bio_sectors(bio);
1569 op->pos.offset += sectors;
1571 id = bkey_inline_data_init(op->insert_keys.top);
1573 id->k.bversion = op->version;
1574 id->k.size = sectors;
1576 iter = bio->bi_iter;
1577 iter.bi_size = data_len;
1578 memcpy_from_bio(id->v.data, bio, iter);
1580 while (data_len & 7)
1581 id->v.data[data_len++] = '\0';
1582 set_bkey_val_bytes(&id->k, data_len);
1583 bch2_keylist_push(&op->insert_keys);
1585 __bch2_write_index(op);
1587 bch2_write_done(&op->cl);
1591 * bch2_write() - handle a write to a cache device or flash only volume
1592 * @cl: &bch_write_op->cl
1594 * This is the starting point for any data to end up in a cache device; it could
1595 * be from a normal write, or a writeback write, or a write to a flash only
1596 * volume - it's also used by the moving garbage collector to compact data in
1597 * mostly empty buckets.
1599 * It first writes the data to the cache, creating a list of keys to be inserted
1600 * (if the data won't fit in a single open bucket, there will be multiple keys);
1601 * after the data is written it calls bch_journal, and after the keys have been
1602 * added to the next journal write they're inserted into the btree.
1604 * If op->discard is true, instead of inserting the data it invalidates the
1605 * region of the cache represented by op->bio and op->inode.
1607 CLOSURE_CALLBACK(bch2_write)
1609 closure_type(op, struct bch_write_op, cl);
1610 struct bio *bio = &op->wbio.bio;
1611 struct bch_fs *c = op->c;
1614 EBUG_ON(op->cl.parent);
1615 BUG_ON(!op->nr_replicas);
1616 BUG_ON(!op->write_point.v);
1617 BUG_ON(bkey_eq(op->pos, POS_MAX));
1619 if (op->flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)
1620 op->flags |= BCH_WRITE_ALLOC_NOWAIT;
1622 op->nr_replicas_required = min_t(unsigned, op->nr_replicas_required, op->nr_replicas);
1623 op->start_time = local_clock();
1624 bch2_keylist_init(&op->insert_keys, op->inline_keys);
1625 wbio_init(bio)->put_bio = false;
1627 if (unlikely(bio->bi_iter.bi_size & (c->opts.block_size - 1))) {
1628 struct printbuf buf = PRINTBUF;
1629 bch2_write_op_error(&buf, op);
1630 prt_printf(&buf, "misaligned write");
1631 printbuf_exit(&buf);
1636 if (c->opts.nochanges) {
1637 op->error = -BCH_ERR_erofs_no_writes;
1641 if (!(op->flags & BCH_WRITE_MOVE) &&
1642 !bch2_write_ref_tryget(c, BCH_WRITE_REF_write)) {
1643 op->error = -BCH_ERR_erofs_no_writes;
1647 this_cpu_add(c->counters[BCH_COUNTER_io_write], bio_sectors(bio));
1648 bch2_increment_clock(c, bio_sectors(bio), WRITE);
1650 data_len = min_t(u64, bio->bi_iter.bi_size,
1651 op->new_i_size - (op->pos.offset << 9));
1653 if (c->opts.inline_data &&
1654 data_len <= min(block_bytes(c) / 2, 1024U)) {
1655 bch2_write_data_inline(op, data_len);
1662 bch2_disk_reservation_put(c, &op->res);
1664 closure_debug_destroy(&op->cl);
1669 static const char * const bch2_write_flags[] = {
1676 void bch2_write_op_to_text(struct printbuf *out, struct bch_write_op *op)
1678 prt_str(out, "pos: ");
1679 bch2_bpos_to_text(out, op->pos);
1681 printbuf_indent_add(out, 2);
1683 prt_str(out, "started: ");
1684 bch2_pr_time_units(out, local_clock() - op->start_time);
1687 prt_str(out, "flags: ");
1688 prt_bitflags(out, bch2_write_flags, op->flags);
1691 prt_printf(out, "ref: %u\n", closure_nr_remaining(&op->cl));
1693 printbuf_indent_sub(out, 2);
1696 void bch2_fs_io_write_exit(struct bch_fs *c)
1698 mempool_exit(&c->bio_bounce_pages);
1699 bioset_exit(&c->replica_set);
1700 bioset_exit(&c->bio_write);
1703 int bch2_fs_io_write_init(struct bch_fs *c)
1705 if (bioset_init(&c->bio_write, 1, offsetof(struct bch_write_bio, bio), BIOSET_NEED_BVECS) ||
1706 bioset_init(&c->replica_set, 4, offsetof(struct bch_write_bio, bio), 0))
1707 return -BCH_ERR_ENOMEM_bio_write_init;
1709 if (mempool_init_page_pool(&c->bio_bounce_pages,
1711 c->opts.btree_node_size,
1712 c->opts.encoded_extent_max) /
1714 return -BCH_ERR_ENOMEM_bio_bounce_pages_init;