]> Git Repo - linux.git/blob - fs/bcachefs/data_update.c
Linux 6.14-rc3
[linux.git] / fs / bcachefs / data_update.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "alloc_foreground.h"
5 #include "bkey_buf.h"
6 #include "btree_update.h"
7 #include "buckets.h"
8 #include "compress.h"
9 #include "data_update.h"
10 #include "disk_groups.h"
11 #include "ec.h"
12 #include "error.h"
13 #include "extents.h"
14 #include "io_write.h"
15 #include "keylist.h"
16 #include "move.h"
17 #include "nocow_locking.h"
18 #include "rebalance.h"
19 #include "snapshot.h"
20 #include "subvolume.h"
21 #include "trace.h"
22
23 static void bkey_put_dev_refs(struct bch_fs *c, struct bkey_s_c k)
24 {
25         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
26
27         bkey_for_each_ptr(ptrs, ptr)
28                 bch2_dev_put(bch2_dev_have_ref(c, ptr->dev));
29 }
30
31 static bool bkey_get_dev_refs(struct bch_fs *c, struct bkey_s_c k)
32 {
33         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
34
35         bkey_for_each_ptr(ptrs, ptr) {
36                 if (!bch2_dev_tryget(c, ptr->dev)) {
37                         bkey_for_each_ptr(ptrs, ptr2) {
38                                 if (ptr2 == ptr)
39                                         break;
40                                 bch2_dev_put(bch2_dev_have_ref(c, ptr2->dev));
41                         }
42                         return false;
43                 }
44         }
45         return true;
46 }
47
48 static void bkey_nocow_unlock(struct bch_fs *c, struct bkey_s_c k)
49 {
50         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
51
52         bkey_for_each_ptr(ptrs, ptr) {
53                 struct bch_dev *ca = bch2_dev_have_ref(c, ptr->dev);
54                 struct bpos bucket = PTR_BUCKET_POS(ca, ptr);
55
56                 bch2_bucket_nocow_unlock(&c->nocow_locks, bucket, 0);
57         }
58 }
59
60 static bool bkey_nocow_lock(struct bch_fs *c, struct moving_context *ctxt, struct bkey_s_c k)
61 {
62         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
63
64         bkey_for_each_ptr(ptrs, ptr) {
65                 struct bch_dev *ca = bch2_dev_have_ref(c, ptr->dev);
66                 struct bpos bucket = PTR_BUCKET_POS(ca, ptr);
67
68                 if (ctxt) {
69                         bool locked;
70
71                         move_ctxt_wait_event(ctxt,
72                                 (locked = bch2_bucket_nocow_trylock(&c->nocow_locks, bucket, 0)) ||
73                                 list_empty(&ctxt->ios));
74
75                         if (!locked)
76                                 bch2_bucket_nocow_lock(&c->nocow_locks, bucket, 0);
77                 } else {
78                         if (!bch2_bucket_nocow_trylock(&c->nocow_locks, bucket, 0)) {
79                                 bkey_for_each_ptr(ptrs, ptr2) {
80                                         if (ptr2 == ptr)
81                                                 break;
82
83                                         ca = bch2_dev_have_ref(c, ptr2->dev);
84                                         bucket = PTR_BUCKET_POS(ca, ptr2);
85                                         bch2_bucket_nocow_unlock(&c->nocow_locks, bucket, 0);
86                                 }
87                                 return false;
88                         }
89                 }
90         }
91         return true;
92 }
93
94 static noinline void trace_move_extent_finish2(struct data_update *u,
95                                                struct bkey_i *new,
96                                                struct bkey_i *insert)
97 {
98         struct bch_fs *c = u->op.c;
99         struct printbuf buf = PRINTBUF;
100
101         prt_newline(&buf);
102
103         bch2_data_update_to_text(&buf, u);
104         prt_newline(&buf);
105
106         prt_str_indented(&buf, "new replicas:\t");
107         bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(new));
108         prt_newline(&buf);
109
110         prt_str_indented(&buf, "insert:\t");
111         bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
112         prt_newline(&buf);
113
114         trace_move_extent_finish(c, buf.buf);
115         printbuf_exit(&buf);
116 }
117
118 static void trace_move_extent_fail2(struct data_update *m,
119                          struct bkey_s_c new,
120                          struct bkey_s_c wrote,
121                          struct bkey_i *insert,
122                          const char *msg)
123 {
124         struct bch_fs *c = m->op.c;
125         struct bkey_s_c old = bkey_i_to_s_c(m->k.k);
126         struct printbuf buf = PRINTBUF;
127         unsigned rewrites_found = 0;
128
129         if (!trace_move_extent_fail_enabled())
130                 return;
131
132         prt_str(&buf, msg);
133
134         if (insert) {
135                 const union bch_extent_entry *entry;
136                 struct bch_extent_ptr *ptr;
137                 struct extent_ptr_decoded p;
138
139                 unsigned ptr_bit = 1;
140                 bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry) {
141                         if ((ptr_bit & m->data_opts.rewrite_ptrs) &&
142                             (ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) &&
143                             !ptr->cached)
144                                 rewrites_found |= ptr_bit;
145                         ptr_bit <<= 1;
146                 }
147         }
148
149         prt_str(&buf, "rewrites found:\t");
150         bch2_prt_u64_base2(&buf, rewrites_found);
151         prt_newline(&buf);
152
153         bch2_data_update_opts_to_text(&buf, c, &m->op.opts, &m->data_opts);
154
155         prt_str(&buf, "\nold:    ");
156         bch2_bkey_val_to_text(&buf, c, old);
157
158         prt_str(&buf, "\nnew:    ");
159         bch2_bkey_val_to_text(&buf, c, new);
160
161         prt_str(&buf, "\nwrote:  ");
162         bch2_bkey_val_to_text(&buf, c, wrote);
163
164         if (insert) {
165                 prt_str(&buf, "\ninsert: ");
166                 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
167         }
168
169         trace_move_extent_fail(c, buf.buf);
170         printbuf_exit(&buf);
171 }
172
173 static int __bch2_data_update_index_update(struct btree_trans *trans,
174                                            struct bch_write_op *op)
175 {
176         struct bch_fs *c = op->c;
177         struct btree_iter iter;
178         struct data_update *m =
179                 container_of(op, struct data_update, op);
180         struct keylist *keys = &op->insert_keys;
181         struct bkey_buf _new, _insert;
182         int ret = 0;
183
184         bch2_bkey_buf_init(&_new);
185         bch2_bkey_buf_init(&_insert);
186         bch2_bkey_buf_realloc(&_insert, c, U8_MAX);
187
188         bch2_trans_iter_init(trans, &iter, m->btree_id,
189                              bkey_start_pos(&bch2_keylist_front(keys)->k),
190                              BTREE_ITER_slots|BTREE_ITER_intent);
191
192         while (1) {
193                 struct bkey_s_c k;
194                 struct bkey_s_c old = bkey_i_to_s_c(m->k.k);
195                 struct bkey_i *insert = NULL;
196                 struct bkey_i_extent *new;
197                 const union bch_extent_entry *entry_c;
198                 union bch_extent_entry *entry;
199                 struct extent_ptr_decoded p;
200                 struct bch_extent_ptr *ptr;
201                 const struct bch_extent_ptr *ptr_c;
202                 struct bpos next_pos;
203                 bool should_check_enospc;
204                 s64 i_sectors_delta = 0, disk_sectors_delta = 0;
205                 unsigned rewrites_found = 0, durability, ptr_bit;
206
207                 bch2_trans_begin(trans);
208
209                 k = bch2_btree_iter_peek_slot(&iter);
210                 ret = bkey_err(k);
211                 if (ret)
212                         goto err;
213
214                 new = bkey_i_to_extent(bch2_keylist_front(keys));
215
216                 if (!bch2_extents_match(k, old)) {
217                         trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i),
218                                                 NULL, "no match:");
219                         goto nowork;
220                 }
221
222                 bkey_reassemble(_insert.k, k);
223                 insert = _insert.k;
224
225                 bch2_bkey_buf_copy(&_new, c, bch2_keylist_front(keys));
226                 new = bkey_i_to_extent(_new.k);
227                 bch2_cut_front(iter.pos, &new->k_i);
228
229                 bch2_cut_front(iter.pos,        insert);
230                 bch2_cut_back(new->k.p,         insert);
231                 bch2_cut_back(insert->k.p,      &new->k_i);
232
233                 /*
234                  * @old: extent that we read from
235                  * @insert: key that we're going to update, initialized from
236                  * extent currently in btree - same as @old unless we raced with
237                  * other updates
238                  * @new: extent with new pointers that we'll be adding to @insert
239                  *
240                  * Fist, drop rewrite_ptrs from @new:
241                  */
242                 ptr_bit = 1;
243                 bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry_c) {
244                         if ((ptr_bit & m->data_opts.rewrite_ptrs) &&
245                             (ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) &&
246                             !ptr->cached) {
247                                 bch2_extent_ptr_set_cached(c, &m->op.opts,
248                                                            bkey_i_to_s(insert), ptr);
249                                 rewrites_found |= ptr_bit;
250                         }
251                         ptr_bit <<= 1;
252                 }
253
254                 if (m->data_opts.rewrite_ptrs &&
255                     !rewrites_found &&
256                     bch2_bkey_durability(c, k) >= m->op.opts.data_replicas) {
257                         trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "no rewrites found:");
258                         goto nowork;
259                 }
260
261                 /*
262                  * A replica that we just wrote might conflict with a replica
263                  * that we want to keep, due to racing with another move:
264                  */
265 restart_drop_conflicting_replicas:
266                 extent_for_each_ptr(extent_i_to_s(new), ptr)
267                         if ((ptr_c = bch2_bkey_has_device_c(bkey_i_to_s_c(insert), ptr->dev)) &&
268                             !ptr_c->cached) {
269                                 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(&new->k_i), ptr);
270                                 goto restart_drop_conflicting_replicas;
271                         }
272
273                 if (!bkey_val_u64s(&new->k)) {
274                         trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "new replicas conflicted:");
275                         goto nowork;
276                 }
277
278                 /* Now, drop pointers that conflict with what we just wrote: */
279                 extent_for_each_ptr_decode(extent_i_to_s(new), p, entry)
280                         if ((ptr = bch2_bkey_has_device(bkey_i_to_s(insert), p.ptr.dev)))
281                                 bch2_bkey_drop_ptr_noerror(bkey_i_to_s(insert), ptr);
282
283                 durability = bch2_bkey_durability(c, bkey_i_to_s_c(insert)) +
284                         bch2_bkey_durability(c, bkey_i_to_s_c(&new->k_i));
285
286                 /* Now, drop excess replicas: */
287                 rcu_read_lock();
288 restart_drop_extra_replicas:
289                 bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs(bkey_i_to_s(insert)), p, entry) {
290                         unsigned ptr_durability = bch2_extent_ptr_durability(c, &p);
291
292                         if (!p.ptr.cached &&
293                             durability - ptr_durability >= m->op.opts.data_replicas) {
294                                 durability -= ptr_durability;
295
296                                 bch2_extent_ptr_set_cached(c, &m->op.opts,
297                                                            bkey_i_to_s(insert), &entry->ptr);
298                                 goto restart_drop_extra_replicas;
299                         }
300                 }
301                 rcu_read_unlock();
302
303                 /* Finally, add the pointers we just wrote: */
304                 extent_for_each_ptr_decode(extent_i_to_s(new), p, entry)
305                         bch2_extent_ptr_decoded_append(insert, &p);
306
307                 bch2_bkey_narrow_crcs(insert, (struct bch_extent_crc_unpacked) { 0 });
308                 bch2_extent_normalize_by_opts(c, &m->op.opts, bkey_i_to_s(insert));
309
310                 ret = bch2_sum_sector_overwrites(trans, &iter, insert,
311                                                  &should_check_enospc,
312                                                  &i_sectors_delta,
313                                                  &disk_sectors_delta);
314                 if (ret)
315                         goto err;
316
317                 if (disk_sectors_delta > (s64) op->res.sectors) {
318                         ret = bch2_disk_reservation_add(c, &op->res,
319                                                 disk_sectors_delta - op->res.sectors,
320                                                 !should_check_enospc
321                                                 ? BCH_DISK_RESERVATION_NOFAIL : 0);
322                         if (ret)
323                                 goto out;
324                 }
325
326                 next_pos = insert->k.p;
327
328                 /*
329                  * Check for nonce offset inconsistency:
330                  * This is debug code - we've been seeing this bug rarely, and
331                  * it's been hard to reproduce, so this should give us some more
332                  * information when it does occur:
333                  */
334                 int invalid = bch2_bkey_validate(c, bkey_i_to_s_c(insert),
335                                                  (struct bkey_validate_context) {
336                                                         .btree  = m->btree_id,
337                                                         .flags  = BCH_VALIDATE_commit,
338                                                  });
339                 if (invalid) {
340                         struct printbuf buf = PRINTBUF;
341
342                         prt_str(&buf, "about to insert invalid key in data update path");
343                         prt_str(&buf, "\nold: ");
344                         bch2_bkey_val_to_text(&buf, c, old);
345                         prt_str(&buf, "\nk:   ");
346                         bch2_bkey_val_to_text(&buf, c, k);
347                         prt_str(&buf, "\nnew: ");
348                         bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
349
350                         bch2_print_string_as_lines(KERN_ERR, buf.buf);
351                         printbuf_exit(&buf);
352
353                         bch2_fatal_error(c);
354                         ret = -EIO;
355                         goto out;
356                 }
357
358                 if (trace_data_update_enabled()) {
359                         struct printbuf buf = PRINTBUF;
360
361                         prt_str(&buf, "\nold: ");
362                         bch2_bkey_val_to_text(&buf, c, old);
363                         prt_str(&buf, "\nk:   ");
364                         bch2_bkey_val_to_text(&buf, c, k);
365                         prt_str(&buf, "\nnew: ");
366                         bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
367
368                         trace_data_update(c, buf.buf);
369                         printbuf_exit(&buf);
370                 }
371
372                 ret =   bch2_insert_snapshot_whiteouts(trans, m->btree_id,
373                                                 k.k->p, bkey_start_pos(&insert->k)) ?:
374                         bch2_insert_snapshot_whiteouts(trans, m->btree_id,
375                                                 k.k->p, insert->k.p) ?:
376                         bch2_bkey_set_needs_rebalance(c, &op->opts, insert) ?:
377                         bch2_trans_update(trans, &iter, insert,
378                                 BTREE_UPDATE_internal_snapshot_node) ?:
379                         bch2_trans_commit(trans, &op->res,
380                                 NULL,
381                                 BCH_TRANS_COMMIT_no_check_rw|
382                                 BCH_TRANS_COMMIT_no_enospc|
383                                 m->data_opts.btree_insert_flags);
384                 if (!ret) {
385                         bch2_btree_iter_set_pos(&iter, next_pos);
386
387                         this_cpu_add(c->counters[BCH_COUNTER_move_extent_finish], new->k.size);
388                         if (trace_move_extent_finish_enabled())
389                                 trace_move_extent_finish2(m, &new->k_i, insert);
390                 }
391 err:
392                 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
393                         ret = 0;
394                 if (ret)
395                         break;
396 next:
397                 while (bkey_ge(iter.pos, bch2_keylist_front(keys)->k.p)) {
398                         bch2_keylist_pop_front(keys);
399                         if (bch2_keylist_empty(keys))
400                                 goto out;
401                 }
402                 continue;
403 nowork:
404                 if (m->stats) {
405                         BUG_ON(k.k->p.offset <= iter.pos.offset);
406                         atomic64_inc(&m->stats->keys_raced);
407                         atomic64_add(k.k->p.offset - iter.pos.offset,
408                                      &m->stats->sectors_raced);
409                 }
410
411                 count_event(c, move_extent_fail);
412
413                 bch2_btree_iter_advance(&iter);
414                 goto next;
415         }
416 out:
417         bch2_trans_iter_exit(trans, &iter);
418         bch2_bkey_buf_exit(&_insert, c);
419         bch2_bkey_buf_exit(&_new, c);
420         BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
421         return ret;
422 }
423
424 int bch2_data_update_index_update(struct bch_write_op *op)
425 {
426         return bch2_trans_run(op->c, __bch2_data_update_index_update(trans, op));
427 }
428
429 void bch2_data_update_read_done(struct data_update *m,
430                                 struct bch_extent_crc_unpacked crc)
431 {
432         /* write bio must own pages: */
433         BUG_ON(!m->op.wbio.bio.bi_vcnt);
434
435         m->op.crc = crc;
436         m->op.wbio.bio.bi_iter.bi_size = crc.compressed_size << 9;
437
438         closure_call(&m->op.cl, bch2_write, NULL, NULL);
439 }
440
441 void bch2_data_update_exit(struct data_update *update)
442 {
443         struct bch_fs *c = update->op.c;
444         struct bkey_s_c k = bkey_i_to_s_c(update->k.k);
445
446         if (c->opts.nocow_enabled)
447                 bkey_nocow_unlock(c, k);
448         bkey_put_dev_refs(c, k);
449         bch2_bkey_buf_exit(&update->k, c);
450         bch2_disk_reservation_put(c, &update->op.res);
451         bch2_bio_free_pages_pool(c, &update->op.wbio.bio);
452 }
453
454 static void bch2_update_unwritten_extent(struct btree_trans *trans,
455                                   struct data_update *update)
456 {
457         struct bch_fs *c = update->op.c;
458         struct bio *bio = &update->op.wbio.bio;
459         struct bkey_i_extent *e;
460         struct write_point *wp;
461         struct closure cl;
462         struct btree_iter iter;
463         struct bkey_s_c k;
464         int ret;
465
466         closure_init_stack(&cl);
467         bch2_keylist_init(&update->op.insert_keys, update->op.inline_keys);
468
469         while (bio_sectors(bio)) {
470                 unsigned sectors = bio_sectors(bio);
471
472                 bch2_trans_begin(trans);
473
474                 bch2_trans_iter_init(trans, &iter, update->btree_id, update->op.pos,
475                                      BTREE_ITER_slots);
476                 ret = lockrestart_do(trans, ({
477                         k = bch2_btree_iter_peek_slot(&iter);
478                         bkey_err(k);
479                 }));
480                 bch2_trans_iter_exit(trans, &iter);
481
482                 if (ret || !bch2_extents_match(k, bkey_i_to_s_c(update->k.k)))
483                         break;
484
485                 e = bkey_extent_init(update->op.insert_keys.top);
486                 e->k.p = update->op.pos;
487
488                 ret = bch2_alloc_sectors_start_trans(trans,
489                                 update->op.target,
490                                 false,
491                                 update->op.write_point,
492                                 &update->op.devs_have,
493                                 update->op.nr_replicas,
494                                 update->op.nr_replicas,
495                                 update->op.watermark,
496                                 0, &cl, &wp);
497                 if (bch2_err_matches(ret, BCH_ERR_operation_blocked)) {
498                         bch2_trans_unlock(trans);
499                         closure_sync(&cl);
500                         continue;
501                 }
502
503                 bch_err_fn_ratelimited(c, ret);
504
505                 if (ret)
506                         return;
507
508                 sectors = min(sectors, wp->sectors_free);
509
510                 bch2_key_resize(&e->k, sectors);
511
512                 bch2_open_bucket_get(c, wp, &update->op.open_buckets);
513                 bch2_alloc_sectors_append_ptrs(c, wp, &e->k_i, sectors, false);
514                 bch2_alloc_sectors_done(c, wp);
515
516                 bio_advance(bio, sectors << 9);
517                 update->op.pos.offset += sectors;
518
519                 extent_for_each_ptr(extent_i_to_s(e), ptr)
520                         ptr->unwritten = true;
521                 bch2_keylist_push(&update->op.insert_keys);
522
523                 ret = __bch2_data_update_index_update(trans, &update->op);
524
525                 bch2_open_buckets_put(c, &update->op.open_buckets);
526
527                 if (ret)
528                         break;
529         }
530
531         if (closure_nr_remaining(&cl) != 1) {
532                 bch2_trans_unlock(trans);
533                 closure_sync(&cl);
534         }
535 }
536
537 void bch2_data_update_opts_to_text(struct printbuf *out, struct bch_fs *c,
538                                    struct bch_io_opts *io_opts,
539                                    struct data_update_opts *data_opts)
540 {
541         printbuf_tabstop_push(out, 20);
542
543         prt_str_indented(out, "rewrite ptrs:\t");
544         bch2_prt_u64_base2(out, data_opts->rewrite_ptrs);
545         prt_newline(out);
546
547         prt_str_indented(out, "kill ptrs:\t");
548         bch2_prt_u64_base2(out, data_opts->kill_ptrs);
549         prt_newline(out);
550
551         prt_str_indented(out, "target:\t");
552         bch2_target_to_text(out, c, data_opts->target);
553         prt_newline(out);
554
555         prt_str_indented(out, "compression:\t");
556         bch2_compression_opt_to_text(out, io_opts->background_compression);
557         prt_newline(out);
558
559         prt_str_indented(out, "opts.replicas:\t");
560         prt_u64(out, io_opts->data_replicas);
561         prt_newline(out);
562
563         prt_str_indented(out, "extra replicas:\t");
564         prt_u64(out, data_opts->extra_replicas);
565 }
566
567 void bch2_data_update_to_text(struct printbuf *out, struct data_update *m)
568 {
569         bch2_data_update_opts_to_text(out, m->op.c, &m->op.opts, &m->data_opts);
570         prt_newline(out);
571
572         prt_str_indented(out, "old key:\t");
573         bch2_bkey_val_to_text(out, m->op.c, bkey_i_to_s_c(m->k.k));
574 }
575
576 int bch2_extent_drop_ptrs(struct btree_trans *trans,
577                           struct btree_iter *iter,
578                           struct bkey_s_c k,
579                           struct bch_io_opts *io_opts,
580                           struct data_update_opts *data_opts)
581 {
582         struct bch_fs *c = trans->c;
583         struct bkey_i *n;
584         int ret;
585
586         n = bch2_bkey_make_mut_noupdate(trans, k);
587         ret = PTR_ERR_OR_ZERO(n);
588         if (ret)
589                 return ret;
590
591         while (data_opts->kill_ptrs) {
592                 unsigned i = 0, drop = __fls(data_opts->kill_ptrs);
593
594                 bch2_bkey_drop_ptrs_noerror(bkey_i_to_s(n), ptr, i++ == drop);
595                 data_opts->kill_ptrs ^= 1U << drop;
596         }
597
598         /*
599          * If the new extent no longer has any pointers, bch2_extent_normalize()
600          * will do the appropriate thing with it (turning it into a
601          * KEY_TYPE_error key, or just a discard if it was a cached extent)
602          */
603         bch2_extent_normalize_by_opts(c, io_opts, bkey_i_to_s(n));
604
605         /*
606          * Since we're not inserting through an extent iterator
607          * (BTREE_ITER_all_snapshots iterators aren't extent iterators),
608          * we aren't using the extent overwrite path to delete, we're
609          * just using the normal key deletion path:
610          */
611         if (bkey_deleted(&n->k) && !(iter->flags & BTREE_ITER_is_extents))
612                 n->k.size = 0;
613
614         return bch2_trans_relock(trans) ?:
615                 bch2_trans_update(trans, iter, n, BTREE_UPDATE_internal_snapshot_node) ?:
616                 bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
617 }
618
619 int bch2_data_update_init(struct btree_trans *trans,
620                           struct btree_iter *iter,
621                           struct moving_context *ctxt,
622                           struct data_update *m,
623                           struct write_point_specifier wp,
624                           struct bch_io_opts io_opts,
625                           struct data_update_opts data_opts,
626                           enum btree_id btree_id,
627                           struct bkey_s_c k)
628 {
629         struct bch_fs *c = trans->c;
630         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
631         const union bch_extent_entry *entry;
632         struct extent_ptr_decoded p;
633         unsigned reserve_sectors = k.k->size * data_opts.extra_replicas;
634         int ret = 0;
635
636         /*
637          * fs is corrupt  we have a key for a snapshot node that doesn't exist,
638          * and we have to check for this because we go rw before repairing the
639          * snapshots table - just skip it, we can move it later.
640          */
641         if (unlikely(k.k->p.snapshot && !bch2_snapshot_exists(c, k.k->p.snapshot)))
642                 return -BCH_ERR_data_update_done;
643
644         if (!bkey_get_dev_refs(c, k))
645                 return -BCH_ERR_data_update_done;
646
647         if (c->opts.nocow_enabled &&
648             !bkey_nocow_lock(c, ctxt, k)) {
649                 bkey_put_dev_refs(c, k);
650                 return -BCH_ERR_nocow_lock_blocked;
651         }
652
653         bch2_bkey_buf_init(&m->k);
654         bch2_bkey_buf_reassemble(&m->k, c, k);
655         m->btree_id     = btree_id;
656         m->data_opts    = data_opts;
657         m->ctxt         = ctxt;
658         m->stats        = ctxt ? ctxt->stats : NULL;
659
660         bch2_write_op_init(&m->op, c, io_opts);
661         m->op.pos       = bkey_start_pos(k.k);
662         m->op.version   = k.k->bversion;
663         m->op.target    = data_opts.target;
664         m->op.write_point = wp;
665         m->op.nr_replicas = 0;
666         m->op.flags     |= BCH_WRITE_PAGES_STABLE|
667                 BCH_WRITE_PAGES_OWNED|
668                 BCH_WRITE_DATA_ENCODED|
669                 BCH_WRITE_MOVE|
670                 m->data_opts.write_flags;
671         m->op.compression_opt   = io_opts.background_compression;
672         m->op.watermark         = m->data_opts.btree_insert_flags & BCH_WATERMARK_MASK;
673
674         unsigned durability_have = 0, durability_removing = 0;
675
676         unsigned ptr_bit = 1;
677         bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
678                 if (!p.ptr.cached) {
679                         rcu_read_lock();
680                         if (ptr_bit & m->data_opts.rewrite_ptrs) {
681                                 if (crc_is_compressed(p.crc))
682                                         reserve_sectors += k.k->size;
683
684                                 m->op.nr_replicas += bch2_extent_ptr_desired_durability(c, &p);
685                                 durability_removing += bch2_extent_ptr_desired_durability(c, &p);
686                         } else if (!(ptr_bit & m->data_opts.kill_ptrs)) {
687                                 bch2_dev_list_add_dev(&m->op.devs_have, p.ptr.dev);
688                                 durability_have += bch2_extent_ptr_durability(c, &p);
689                         }
690                         rcu_read_unlock();
691                 }
692
693                 /*
694                  * op->csum_type is normally initialized from the fs/file's
695                  * current options - but if an extent is encrypted, we require
696                  * that it stays encrypted:
697                  */
698                 if (bch2_csum_type_is_encryption(p.crc.csum_type)) {
699                         m->op.nonce     = p.crc.nonce + p.crc.offset;
700                         m->op.csum_type = p.crc.csum_type;
701                 }
702
703                 if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
704                         m->op.incompressible = true;
705
706                 ptr_bit <<= 1;
707         }
708
709         unsigned durability_required = max(0, (int) (io_opts.data_replicas - durability_have));
710
711         /*
712          * If current extent durability is less than io_opts.data_replicas,
713          * we're not trying to rereplicate the extent up to data_replicas here -
714          * unless extra_replicas was specified
715          *
716          * Increasing replication is an explicit operation triggered by
717          * rereplicate, currently, so that users don't get an unexpected -ENOSPC
718          */
719         m->op.nr_replicas = min(durability_removing, durability_required) +
720                 m->data_opts.extra_replicas;
721
722         /*
723          * If device(s) were set to durability=0 after data was written to them
724          * we can end up with a duribilty=0 extent, and the normal algorithm
725          * that tries not to increase durability doesn't work:
726          */
727         if (!(durability_have + durability_removing))
728                 m->op.nr_replicas = max((unsigned) m->op.nr_replicas, 1);
729
730         m->op.nr_replicas_required = m->op.nr_replicas;
731
732         /*
733          * It might turn out that we don't need any new replicas, if the
734          * replicas or durability settings have been changed since the extent
735          * was written:
736          */
737         if (!m->op.nr_replicas) {
738                 m->data_opts.kill_ptrs |= m->data_opts.rewrite_ptrs;
739                 m->data_opts.rewrite_ptrs = 0;
740                 /* if iter == NULL, it's just a promote */
741                 if (iter)
742                         ret = bch2_extent_drop_ptrs(trans, iter, k, &io_opts, &m->data_opts);
743                 goto out;
744         }
745
746         if (reserve_sectors) {
747                 ret = bch2_disk_reservation_add(c, &m->op.res, reserve_sectors,
748                                 m->data_opts.extra_replicas
749                                 ? 0
750                                 : BCH_DISK_RESERVATION_NOFAIL);
751                 if (ret)
752                         goto out;
753         }
754
755         if (bkey_extent_is_unwritten(k)) {
756                 bch2_update_unwritten_extent(trans, m);
757                 goto out;
758         }
759
760         return 0;
761 out:
762         bch2_data_update_exit(m);
763         return ret ?: -BCH_ERR_data_update_done;
764 }
765
766 void bch2_data_update_opts_normalize(struct bkey_s_c k, struct data_update_opts *opts)
767 {
768         struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
769         unsigned ptr_bit = 1;
770
771         bkey_for_each_ptr(ptrs, ptr) {
772                 if ((opts->rewrite_ptrs & ptr_bit) && ptr->cached) {
773                         opts->kill_ptrs |= ptr_bit;
774                         opts->rewrite_ptrs ^= ptr_bit;
775                 }
776
777                 ptr_bit <<= 1;
778         }
779 }
This page took 0.080366 seconds and 4 git commands to generate.