]> Git Repo - linux.git/blob - fs/bcachefs/btree_update.c
Linux 6.14-rc3
[linux.git] / fs / bcachefs / btree_update.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "btree_update.h"
5 #include "btree_iter.h"
6 #include "btree_journal_iter.h"
7 #include "btree_locking.h"
8 #include "buckets.h"
9 #include "debug.h"
10 #include "errcode.h"
11 #include "error.h"
12 #include "extents.h"
13 #include "keylist.h"
14 #include "snapshot.h"
15 #include "trace.h"
16
17 static inline int btree_insert_entry_cmp(const struct btree_insert_entry *l,
18                                          const struct btree_insert_entry *r)
19 {
20         return   cmp_int(l->btree_id,   r->btree_id) ?:
21                  cmp_int(l->cached,     r->cached) ?:
22                  -cmp_int(l->level,     r->level) ?:
23                  bpos_cmp(l->k->k.p,    r->k->k.p);
24 }
25
26 static int __must_check
27 bch2_trans_update_by_path(struct btree_trans *, btree_path_idx_t,
28                           struct bkey_i *, enum btree_iter_update_trigger_flags,
29                           unsigned long ip);
30
31 static noinline int extent_front_merge(struct btree_trans *trans,
32                                        struct btree_iter *iter,
33                                        struct bkey_s_c k,
34                                        struct bkey_i **insert,
35                                        enum btree_iter_update_trigger_flags flags)
36 {
37         struct bch_fs *c = trans->c;
38         struct bkey_i *update;
39         int ret;
40
41         if (unlikely(trans->journal_replay_not_finished))
42                 return 0;
43
44         update = bch2_bkey_make_mut_noupdate(trans, k);
45         ret = PTR_ERR_OR_ZERO(update);
46         if (ret)
47                 return ret;
48
49         if (!bch2_bkey_merge(c, bkey_i_to_s(update), bkey_i_to_s_c(*insert)))
50                 return 0;
51
52         ret =   bch2_key_has_snapshot_overwrites(trans, iter->btree_id, k.k->p) ?:
53                 bch2_key_has_snapshot_overwrites(trans, iter->btree_id, (*insert)->k.p);
54         if (ret < 0)
55                 return ret;
56         if (ret)
57                 return 0;
58
59         ret = bch2_btree_delete_at(trans, iter, flags);
60         if (ret)
61                 return ret;
62
63         *insert = update;
64         return 0;
65 }
66
67 static noinline int extent_back_merge(struct btree_trans *trans,
68                                       struct btree_iter *iter,
69                                       struct bkey_i *insert,
70                                       struct bkey_s_c k)
71 {
72         struct bch_fs *c = trans->c;
73         int ret;
74
75         if (unlikely(trans->journal_replay_not_finished))
76                 return 0;
77
78         ret =   bch2_key_has_snapshot_overwrites(trans, iter->btree_id, insert->k.p) ?:
79                 bch2_key_has_snapshot_overwrites(trans, iter->btree_id, k.k->p);
80         if (ret < 0)
81                 return ret;
82         if (ret)
83                 return 0;
84
85         bch2_bkey_merge(c, bkey_i_to_s(insert), k);
86         return 0;
87 }
88
89 /*
90  * When deleting, check if we need to emit a whiteout (because we're overwriting
91  * something in an ancestor snapshot)
92  */
93 static int need_whiteout_for_snapshot(struct btree_trans *trans,
94                                       enum btree_id btree_id, struct bpos pos)
95 {
96         struct btree_iter iter;
97         struct bkey_s_c k;
98         u32 snapshot = pos.snapshot;
99         int ret;
100
101         if (!bch2_snapshot_parent(trans->c, pos.snapshot))
102                 return 0;
103
104         pos.snapshot++;
105
106         for_each_btree_key_norestart(trans, iter, btree_id, pos,
107                            BTREE_ITER_all_snapshots|
108                            BTREE_ITER_nopreserve, k, ret) {
109                 if (!bkey_eq(k.k->p, pos))
110                         break;
111
112                 if (bch2_snapshot_is_ancestor(trans->c, snapshot,
113                                               k.k->p.snapshot)) {
114                         ret = !bkey_whiteout(k.k);
115                         break;
116                 }
117         }
118         bch2_trans_iter_exit(trans, &iter);
119
120         return ret;
121 }
122
123 int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
124                                    enum btree_id id,
125                                    struct bpos old_pos,
126                                    struct bpos new_pos)
127 {
128         struct bch_fs *c = trans->c;
129         struct btree_iter old_iter, new_iter = { NULL };
130         struct bkey_s_c old_k, new_k;
131         snapshot_id_list s;
132         struct bkey_i *update;
133         int ret = 0;
134
135         if (!bch2_snapshot_has_children(c, old_pos.snapshot))
136                 return 0;
137
138         darray_init(&s);
139
140         bch2_trans_iter_init(trans, &old_iter, id, old_pos,
141                              BTREE_ITER_not_extents|
142                              BTREE_ITER_all_snapshots);
143         while ((old_k = bch2_btree_iter_prev(&old_iter)).k &&
144                !(ret = bkey_err(old_k)) &&
145                bkey_eq(old_pos, old_k.k->p)) {
146                 struct bpos whiteout_pos =
147                         SPOS(new_pos.inode, new_pos.offset, old_k.k->p.snapshot);
148
149                 if (!bch2_snapshot_is_ancestor(c, old_k.k->p.snapshot, old_pos.snapshot) ||
150                     snapshot_list_has_ancestor(c, &s, old_k.k->p.snapshot))
151                         continue;
152
153                 new_k = bch2_bkey_get_iter(trans, &new_iter, id, whiteout_pos,
154                                            BTREE_ITER_not_extents|
155                                            BTREE_ITER_intent);
156                 ret = bkey_err(new_k);
157                 if (ret)
158                         break;
159
160                 if (new_k.k->type == KEY_TYPE_deleted) {
161                         update = bch2_trans_kmalloc(trans, sizeof(struct bkey_i));
162                         ret = PTR_ERR_OR_ZERO(update);
163                         if (ret)
164                                 break;
165
166                         bkey_init(&update->k);
167                         update->k.p             = whiteout_pos;
168                         update->k.type          = KEY_TYPE_whiteout;
169
170                         ret = bch2_trans_update(trans, &new_iter, update,
171                                                 BTREE_UPDATE_internal_snapshot_node);
172                 }
173                 bch2_trans_iter_exit(trans, &new_iter);
174
175                 ret = snapshot_list_add(c, &s, old_k.k->p.snapshot);
176                 if (ret)
177                         break;
178         }
179         bch2_trans_iter_exit(trans, &new_iter);
180         bch2_trans_iter_exit(trans, &old_iter);
181         darray_exit(&s);
182
183         return ret;
184 }
185
186 int bch2_trans_update_extent_overwrite(struct btree_trans *trans,
187                                        struct btree_iter *iter,
188                                        enum btree_iter_update_trigger_flags flags,
189                                        struct bkey_s_c old,
190                                        struct bkey_s_c new)
191 {
192         enum btree_id btree_id = iter->btree_id;
193         struct bkey_i *update;
194         struct bpos new_start = bkey_start_pos(new.k);
195         unsigned front_split = bkey_lt(bkey_start_pos(old.k), new_start);
196         unsigned back_split  = bkey_gt(old.k->p, new.k->p);
197         unsigned middle_split = (front_split || back_split) &&
198                 old.k->p.snapshot != new.k->p.snapshot;
199         unsigned nr_splits = front_split + back_split + middle_split;
200         int ret = 0, compressed_sectors;
201
202         /*
203          * If we're going to be splitting a compressed extent, note it
204          * so that __bch2_trans_commit() can increase our disk
205          * reservation:
206          */
207         if (nr_splits > 1 &&
208             (compressed_sectors = bch2_bkey_sectors_compressed(old)))
209                 trans->extra_disk_res += compressed_sectors * (nr_splits - 1);
210
211         if (front_split) {
212                 update = bch2_bkey_make_mut_noupdate(trans, old);
213                 if ((ret = PTR_ERR_OR_ZERO(update)))
214                         return ret;
215
216                 bch2_cut_back(new_start, update);
217
218                 ret =   bch2_insert_snapshot_whiteouts(trans, btree_id,
219                                         old.k->p, update->k.p) ?:
220                         bch2_btree_insert_nonextent(trans, btree_id, update,
221                                         BTREE_UPDATE_internal_snapshot_node|flags);
222                 if (ret)
223                         return ret;
224         }
225
226         /* If we're overwriting in a different snapshot - middle split: */
227         if (middle_split) {
228                 update = bch2_bkey_make_mut_noupdate(trans, old);
229                 if ((ret = PTR_ERR_OR_ZERO(update)))
230                         return ret;
231
232                 bch2_cut_front(new_start, update);
233                 bch2_cut_back(new.k->p, update);
234
235                 ret =   bch2_insert_snapshot_whiteouts(trans, btree_id,
236                                         old.k->p, update->k.p) ?:
237                         bch2_btree_insert_nonextent(trans, btree_id, update,
238                                           BTREE_UPDATE_internal_snapshot_node|flags);
239                 if (ret)
240                         return ret;
241         }
242
243         if (bkey_le(old.k->p, new.k->p)) {
244                 update = bch2_trans_kmalloc(trans, sizeof(*update));
245                 if ((ret = PTR_ERR_OR_ZERO(update)))
246                         return ret;
247
248                 bkey_init(&update->k);
249                 update->k.p = old.k->p;
250                 update->k.p.snapshot = new.k->p.snapshot;
251
252                 if (new.k->p.snapshot != old.k->p.snapshot) {
253                         update->k.type = KEY_TYPE_whiteout;
254                 } else if (btree_type_has_snapshots(btree_id)) {
255                         ret = need_whiteout_for_snapshot(trans, btree_id, update->k.p);
256                         if (ret < 0)
257                                 return ret;
258                         if (ret)
259                                 update->k.type = KEY_TYPE_whiteout;
260                 }
261
262                 ret = bch2_btree_insert_nonextent(trans, btree_id, update,
263                                           BTREE_UPDATE_internal_snapshot_node|flags);
264                 if (ret)
265                         return ret;
266         }
267
268         if (back_split) {
269                 update = bch2_bkey_make_mut_noupdate(trans, old);
270                 if ((ret = PTR_ERR_OR_ZERO(update)))
271                         return ret;
272
273                 bch2_cut_front(new.k->p, update);
274
275                 ret = bch2_trans_update_by_path(trans, iter->path, update,
276                                           BTREE_UPDATE_internal_snapshot_node|
277                                           flags, _RET_IP_);
278                 if (ret)
279                         return ret;
280         }
281
282         return 0;
283 }
284
285 static int bch2_trans_update_extent(struct btree_trans *trans,
286                                     struct btree_iter *orig_iter,
287                                     struct bkey_i *insert,
288                                     enum btree_iter_update_trigger_flags flags)
289 {
290         struct btree_iter iter;
291         struct bkey_s_c k;
292         enum btree_id btree_id = orig_iter->btree_id;
293         int ret = 0;
294
295         bch2_trans_iter_init(trans, &iter, btree_id, bkey_start_pos(&insert->k),
296                              BTREE_ITER_intent|
297                              BTREE_ITER_with_updates|
298                              BTREE_ITER_not_extents);
299         k = bch2_btree_iter_peek_max(&iter, POS(insert->k.p.inode, U64_MAX));
300         if ((ret = bkey_err(k)))
301                 goto err;
302         if (!k.k)
303                 goto out;
304
305         if (bkey_eq(k.k->p, bkey_start_pos(&insert->k))) {
306                 if (bch2_bkey_maybe_mergable(k.k, &insert->k)) {
307                         ret = extent_front_merge(trans, &iter, k, &insert, flags);
308                         if (ret)
309                                 goto err;
310                 }
311
312                 goto next;
313         }
314
315         while (bkey_gt(insert->k.p, bkey_start_pos(k.k))) {
316                 bool done = bkey_lt(insert->k.p, k.k->p);
317
318                 ret = bch2_trans_update_extent_overwrite(trans, &iter, flags, k, bkey_i_to_s_c(insert));
319                 if (ret)
320                         goto err;
321
322                 if (done)
323                         goto out;
324 next:
325                 bch2_btree_iter_advance(&iter);
326                 k = bch2_btree_iter_peek_max(&iter, POS(insert->k.p.inode, U64_MAX));
327                 if ((ret = bkey_err(k)))
328                         goto err;
329                 if (!k.k)
330                         goto out;
331         }
332
333         if (bch2_bkey_maybe_mergable(&insert->k, k.k)) {
334                 ret = extent_back_merge(trans, &iter, insert, k);
335                 if (ret)
336                         goto err;
337         }
338 out:
339         if (!bkey_deleted(&insert->k))
340                 ret = bch2_btree_insert_nonextent(trans, btree_id, insert, flags);
341 err:
342         bch2_trans_iter_exit(trans, &iter);
343
344         return ret;
345 }
346
347 static noinline int flush_new_cached_update(struct btree_trans *trans,
348                                             struct btree_insert_entry *i,
349                                             enum btree_iter_update_trigger_flags flags,
350                                             unsigned long ip)
351 {
352         struct bkey k;
353         int ret;
354
355         btree_path_idx_t path_idx =
356                 bch2_path_get(trans, i->btree_id, i->old_k.p, 1, 0,
357                               BTREE_ITER_intent, _THIS_IP_);
358         ret = bch2_btree_path_traverse(trans, path_idx, 0);
359         if (ret)
360                 goto out;
361
362         struct btree_path *btree_path = trans->paths + path_idx;
363
364         /*
365          * The old key in the insert entry might actually refer to an existing
366          * key in the btree that has been deleted from cache and not yet
367          * flushed. Check for this and skip the flush so we don't run triggers
368          * against a stale key.
369          */
370         bch2_btree_path_peek_slot_exact(btree_path, &k);
371         if (!bkey_deleted(&k))
372                 goto out;
373
374         i->key_cache_already_flushed = true;
375         i->flags |= BTREE_TRIGGER_norun;
376
377         btree_path_set_should_be_locked(trans, btree_path);
378         ret = bch2_trans_update_by_path(trans, path_idx, i->k, flags, ip);
379 out:
380         bch2_path_put(trans, path_idx, true);
381         return ret;
382 }
383
384 static int __must_check
385 bch2_trans_update_by_path(struct btree_trans *trans, btree_path_idx_t path_idx,
386                           struct bkey_i *k, enum btree_iter_update_trigger_flags flags,
387                           unsigned long ip)
388 {
389         struct bch_fs *c = trans->c;
390         struct btree_insert_entry *i, n;
391         int cmp;
392
393         struct btree_path *path = trans->paths + path_idx;
394         EBUG_ON(!path->should_be_locked);
395         EBUG_ON(trans->nr_updates >= trans->nr_paths);
396         EBUG_ON(!bpos_eq(k->k.p, path->pos));
397
398         n = (struct btree_insert_entry) {
399                 .flags          = flags,
400                 .bkey_type      = __btree_node_type(path->level, path->btree_id),
401                 .btree_id       = path->btree_id,
402                 .level          = path->level,
403                 .cached         = path->cached,
404                 .path           = path_idx,
405                 .k              = k,
406                 .ip_allocated   = ip,
407         };
408
409 #ifdef CONFIG_BCACHEFS_DEBUG
410         trans_for_each_update(trans, i)
411                 BUG_ON(i != trans->updates &&
412                        btree_insert_entry_cmp(i - 1, i) >= 0);
413 #endif
414
415         /*
416          * Pending updates are kept sorted: first, find position of new update,
417          * then delete/trim any updates the new update overwrites:
418          */
419         for (i = trans->updates; i < trans->updates + trans->nr_updates; i++) {
420                 cmp = btree_insert_entry_cmp(&n, i);
421                 if (cmp <= 0)
422                         break;
423         }
424
425         bool overwrite = !cmp && i < trans->updates + trans->nr_updates;
426
427         if (overwrite) {
428                 EBUG_ON(i->insert_trigger_run || i->overwrite_trigger_run);
429
430                 bch2_path_put(trans, i->path, true);
431                 i->flags        = n.flags;
432                 i->cached       = n.cached;
433                 i->k            = n.k;
434                 i->path         = n.path;
435                 i->ip_allocated = n.ip_allocated;
436         } else {
437                 array_insert_item(trans->updates, trans->nr_updates,
438                                   i - trans->updates, n);
439
440                 i->old_v = bch2_btree_path_peek_slot_exact(path, &i->old_k).v;
441                 i->old_btree_u64s = !bkey_deleted(&i->old_k) ? i->old_k.u64s : 0;
442
443                 if (unlikely(trans->journal_replay_not_finished)) {
444                         struct bkey_i *j_k =
445                                 bch2_journal_keys_peek_slot(c, n.btree_id, n.level, k->k.p);
446
447                         if (j_k) {
448                                 i->old_k = j_k->k;
449                                 i->old_v = &j_k->v;
450                         }
451                 }
452         }
453
454         __btree_path_get(trans, trans->paths + i->path, true);
455
456         trace_update_by_path(trans, path, i, overwrite);
457
458         /*
459          * If a key is present in the key cache, it must also exist in the
460          * btree - this is necessary for cache coherency. When iterating over
461          * a btree that's cached in the key cache, the btree iter code checks
462          * the key cache - but the key has to exist in the btree for that to
463          * work:
464          */
465         if (path->cached && !i->old_btree_u64s)
466                 return flush_new_cached_update(trans, i, flags, ip);
467
468         return 0;
469 }
470
471 static noinline int bch2_trans_update_get_key_cache(struct btree_trans *trans,
472                                                     struct btree_iter *iter,
473                                                     struct btree_path *path)
474 {
475         struct btree_path *key_cache_path = btree_iter_key_cache_path(trans, iter);
476
477         if (!key_cache_path ||
478             !key_cache_path->should_be_locked ||
479             !bpos_eq(key_cache_path->pos, iter->pos)) {
480                 struct bkey_cached *ck;
481                 int ret;
482
483                 if (!iter->key_cache_path)
484                         iter->key_cache_path =
485                                 bch2_path_get(trans, path->btree_id, path->pos, 1, 0,
486                                               BTREE_ITER_intent|
487                                               BTREE_ITER_cached, _THIS_IP_);
488
489                 iter->key_cache_path =
490                         bch2_btree_path_set_pos(trans, iter->key_cache_path, path->pos,
491                                                 iter->flags & BTREE_ITER_intent,
492                                                 _THIS_IP_);
493
494                 ret = bch2_btree_path_traverse(trans, iter->key_cache_path, BTREE_ITER_cached);
495                 if (unlikely(ret))
496                         return ret;
497
498                 ck = (void *) trans->paths[iter->key_cache_path].l[0].b;
499
500                 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
501                         trace_and_count(trans->c, trans_restart_key_cache_raced, trans, _RET_IP_);
502                         return btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_raced);
503                 }
504
505                 btree_path_set_should_be_locked(trans, trans->paths + iter->key_cache_path);
506         }
507
508         return 0;
509 }
510
511 int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter *iter,
512                                    struct bkey_i *k, enum btree_iter_update_trigger_flags flags)
513 {
514         btree_path_idx_t path_idx = iter->update_path ?: iter->path;
515         int ret;
516
517         if (iter->flags & BTREE_ITER_is_extents)
518                 return bch2_trans_update_extent(trans, iter, k, flags);
519
520         if (bkey_deleted(&k->k) &&
521             !(flags & BTREE_UPDATE_key_cache_reclaim) &&
522             (iter->flags & BTREE_ITER_filter_snapshots)) {
523                 ret = need_whiteout_for_snapshot(trans, iter->btree_id, k->k.p);
524                 if (unlikely(ret < 0))
525                         return ret;
526
527                 if (ret)
528                         k->k.type = KEY_TYPE_whiteout;
529         }
530
531         /*
532          * Ensure that updates to cached btrees go to the key cache:
533          */
534         struct btree_path *path = trans->paths + path_idx;
535         if (!(flags & BTREE_UPDATE_key_cache_reclaim) &&
536             !path->cached &&
537             !path->level &&
538             btree_id_cached(trans->c, path->btree_id)) {
539                 ret = bch2_trans_update_get_key_cache(trans, iter, path);
540                 if (ret)
541                         return ret;
542
543                 path_idx = iter->key_cache_path;
544         }
545
546         return bch2_trans_update_by_path(trans, path_idx, k, flags, _RET_IP_);
547 }
548
549 int bch2_btree_insert_clone_trans(struct btree_trans *trans,
550                                   enum btree_id btree,
551                                   struct bkey_i *k)
552 {
553         struct bkey_i *n = bch2_trans_kmalloc(trans, bkey_bytes(&k->k));
554         int ret = PTR_ERR_OR_ZERO(n);
555         if (ret)
556                 return ret;
557
558         bkey_copy(n, k);
559         return bch2_btree_insert_trans(trans, btree, n, 0);
560 }
561
562 struct jset_entry *__bch2_trans_jset_entry_alloc(struct btree_trans *trans, unsigned u64s)
563 {
564         unsigned new_top = trans->journal_entries_u64s + u64s;
565         unsigned old_size = trans->journal_entries_size;
566
567         if (new_top > trans->journal_entries_size) {
568                 trans->journal_entries_size = roundup_pow_of_two(new_top);
569
570                 btree_trans_stats(trans)->journal_entries_size = trans->journal_entries_size;
571         }
572
573         struct jset_entry *n =
574                 bch2_trans_kmalloc_nomemzero(trans,
575                                 trans->journal_entries_size * sizeof(u64));
576         if (IS_ERR(n))
577                 return ERR_CAST(n);
578
579         if (trans->journal_entries)
580                 memcpy(n, trans->journal_entries, old_size * sizeof(u64));
581         trans->journal_entries = n;
582
583         struct jset_entry *e = btree_trans_journal_entries_top(trans);
584         trans->journal_entries_u64s = new_top;
585         return e;
586 }
587
588 int bch2_bkey_get_empty_slot(struct btree_trans *trans, struct btree_iter *iter,
589                              enum btree_id btree, struct bpos end)
590 {
591         bch2_trans_iter_init(trans, iter, btree, end, BTREE_ITER_intent);
592         struct bkey_s_c k = bch2_btree_iter_peek_prev(iter);
593         int ret = bkey_err(k);
594         if (ret)
595                 goto err;
596
597         bch2_btree_iter_advance(iter);
598         k = bch2_btree_iter_peek_slot(iter);
599         ret = bkey_err(k);
600         if (ret)
601                 goto err;
602
603         BUG_ON(k.k->type != KEY_TYPE_deleted);
604
605         if (bkey_gt(k.k->p, end)) {
606                 ret = -BCH_ERR_ENOSPC_btree_slot;
607                 goto err;
608         }
609
610         return 0;
611 err:
612         bch2_trans_iter_exit(trans, iter);
613         return ret;
614 }
615
616 void bch2_trans_commit_hook(struct btree_trans *trans,
617                             struct btree_trans_commit_hook *h)
618 {
619         h->next = trans->hooks;
620         trans->hooks = h;
621 }
622
623 int bch2_btree_insert_nonextent(struct btree_trans *trans,
624                                 enum btree_id btree, struct bkey_i *k,
625                                 enum btree_iter_update_trigger_flags flags)
626 {
627         struct btree_iter iter;
628         int ret;
629
630         bch2_trans_iter_init(trans, &iter, btree, k->k.p,
631                              BTREE_ITER_cached|
632                              BTREE_ITER_not_extents|
633                              BTREE_ITER_intent);
634         ret   = bch2_btree_iter_traverse(&iter) ?:
635                 bch2_trans_update(trans, &iter, k, flags);
636         bch2_trans_iter_exit(trans, &iter);
637         return ret;
638 }
639
640 int bch2_btree_insert_trans(struct btree_trans *trans, enum btree_id id,
641                             struct bkey_i *k, enum btree_iter_update_trigger_flags flags)
642 {
643         struct btree_iter iter;
644         bch2_trans_iter_init(trans, &iter, id, bkey_start_pos(&k->k),
645                              BTREE_ITER_intent|flags);
646         int ret = bch2_btree_iter_traverse(&iter) ?:
647                   bch2_trans_update(trans, &iter, k, flags);
648         bch2_trans_iter_exit(trans, &iter);
649         return ret;
650 }
651
652 /**
653  * bch2_btree_insert - insert keys into the extent btree
654  * @c:                  pointer to struct bch_fs
655  * @id:                 btree to insert into
656  * @k:                  key to insert
657  * @disk_res:           must be non-NULL whenever inserting or potentially
658  *                      splitting data extents
659  * @flags:              transaction commit flags
660  * @iter_flags:         btree iter update trigger flags
661  *
662  * Returns:             0 on success, error code on failure
663  */
664 int bch2_btree_insert(struct bch_fs *c, enum btree_id id, struct bkey_i *k,
665                       struct disk_reservation *disk_res, int flags,
666                       enum btree_iter_update_trigger_flags iter_flags)
667 {
668         return bch2_trans_commit_do(c, disk_res, NULL, flags,
669                              bch2_btree_insert_trans(trans, id, k, iter_flags));
670 }
671
672 int bch2_btree_delete_at(struct btree_trans *trans,
673                          struct btree_iter *iter, unsigned update_flags)
674 {
675         struct bkey_i *k = bch2_trans_kmalloc(trans, sizeof(*k));
676         int ret = PTR_ERR_OR_ZERO(k);
677         if (ret)
678                 return ret;
679
680         bkey_init(&k->k);
681         k->k.p = iter->pos;
682         return bch2_trans_update(trans, iter, k, update_flags);
683 }
684
685 int bch2_btree_delete(struct btree_trans *trans,
686                       enum btree_id btree, struct bpos pos,
687                       unsigned update_flags)
688 {
689         struct btree_iter iter;
690         int ret;
691
692         bch2_trans_iter_init(trans, &iter, btree, pos,
693                              BTREE_ITER_cached|
694                              BTREE_ITER_intent);
695         ret   = bch2_btree_iter_traverse(&iter) ?:
696                 bch2_btree_delete_at(trans, &iter, update_flags);
697         bch2_trans_iter_exit(trans, &iter);
698
699         return ret;
700 }
701
702 int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id,
703                                   struct bpos start, struct bpos end,
704                                   unsigned update_flags,
705                                   u64 *journal_seq)
706 {
707         u32 restart_count = trans->restart_count;
708         struct btree_iter iter;
709         struct bkey_s_c k;
710         int ret = 0;
711
712         bch2_trans_iter_init(trans, &iter, id, start, BTREE_ITER_intent);
713         while ((k = bch2_btree_iter_peek_max(&iter, end)).k) {
714                 struct disk_reservation disk_res =
715                         bch2_disk_reservation_init(trans->c, 0);
716                 struct bkey_i delete;
717
718                 ret = bkey_err(k);
719                 if (ret)
720                         goto err;
721
722                 bkey_init(&delete.k);
723
724                 /*
725                  * This could probably be more efficient for extents:
726                  */
727
728                 /*
729                  * For extents, iter.pos won't necessarily be the same as
730                  * bkey_start_pos(k.k) (for non extents they always will be the
731                  * same). It's important that we delete starting from iter.pos
732                  * because the range we want to delete could start in the middle
733                  * of k.
734                  *
735                  * (bch2_btree_iter_peek() does guarantee that iter.pos >=
736                  * bkey_start_pos(k.k)).
737                  */
738                 delete.k.p = iter.pos;
739
740                 if (iter.flags & BTREE_ITER_is_extents)
741                         bch2_key_resize(&delete.k,
742                                         bpos_min(end, k.k->p).offset -
743                                         iter.pos.offset);
744
745                 ret   = bch2_trans_update(trans, &iter, &delete, update_flags) ?:
746                         bch2_trans_commit(trans, &disk_res, journal_seq,
747                                           BCH_TRANS_COMMIT_no_enospc);
748                 bch2_disk_reservation_put(trans->c, &disk_res);
749 err:
750                 /*
751                  * the bch2_trans_begin() call is in a weird place because we
752                  * need to call it after every transaction commit, to avoid path
753                  * overflow, but don't want to call it if the delete operation
754                  * is a no-op and we have no work to do:
755                  */
756                 bch2_trans_begin(trans);
757
758                 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
759                         ret = 0;
760                 if (ret)
761                         break;
762         }
763         bch2_trans_iter_exit(trans, &iter);
764
765         return ret ?: trans_was_restarted(trans, restart_count);
766 }
767
768 /*
769  * bch_btree_delete_range - delete everything within a given range
770  *
771  * Range is a half open interval - [start, end)
772  */
773 int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id,
774                             struct bpos start, struct bpos end,
775                             unsigned update_flags,
776                             u64 *journal_seq)
777 {
778         int ret = bch2_trans_run(c,
779                         bch2_btree_delete_range_trans(trans, id, start, end,
780                                                       update_flags, journal_seq));
781         if (ret == -BCH_ERR_transaction_restart_nested)
782                 ret = 0;
783         return ret;
784 }
785
786 int bch2_btree_bit_mod_iter(struct btree_trans *trans, struct btree_iter *iter, bool set)
787 {
788         struct bkey_i *k = bch2_trans_kmalloc(trans, sizeof(*k));
789         int ret = PTR_ERR_OR_ZERO(k);
790         if (ret)
791                 return ret;
792
793         bkey_init(&k->k);
794         k->k.type = set ? KEY_TYPE_set : KEY_TYPE_deleted;
795         k->k.p = iter->pos;
796         if (iter->flags & BTREE_ITER_is_extents)
797                 bch2_key_resize(&k->k, 1);
798
799         return bch2_trans_update(trans, iter, k, 0);
800 }
801
802 int bch2_btree_bit_mod(struct btree_trans *trans, enum btree_id btree,
803                        struct bpos pos, bool set)
804 {
805         struct btree_iter iter;
806         bch2_trans_iter_init(trans, &iter, btree, pos, BTREE_ITER_intent);
807
808         int ret = bch2_btree_iter_traverse(&iter) ?:
809                   bch2_btree_bit_mod_iter(trans, &iter, set);
810         bch2_trans_iter_exit(trans, &iter);
811         return ret;
812 }
813
814 int bch2_btree_bit_mod_buffered(struct btree_trans *trans, enum btree_id btree,
815                                 struct bpos pos, bool set)
816 {
817         struct bkey_i k;
818
819         bkey_init(&k.k);
820         k.k.type = set ? KEY_TYPE_set : KEY_TYPE_deleted;
821         k.k.p = pos;
822
823         return bch2_trans_update_buffered(trans, btree, &k);
824 }
825
826 int bch2_trans_log_msg(struct btree_trans *trans, struct printbuf *buf)
827 {
828         unsigned u64s = DIV_ROUND_UP(buf->pos, sizeof(u64));
829         prt_chars(buf, '\0', u64s * sizeof(u64) - buf->pos);
830
831         int ret = buf->allocation_failure ? -BCH_ERR_ENOMEM_trans_log_msg : 0;
832         if (ret)
833                 return ret;
834
835         struct jset_entry *e = bch2_trans_jset_entry_alloc(trans, jset_u64s(u64s));
836         ret = PTR_ERR_OR_ZERO(e);
837         if (ret)
838                 return ret;
839
840         struct jset_entry_log *l = container_of(e, struct jset_entry_log, entry);
841         journal_entry_init(e, BCH_JSET_ENTRY_log, 0, 1, u64s);
842         memcpy(l->d, buf->buf, buf->pos);
843         return 0;
844 }
845
846 __printf(3, 0)
847 static int
848 __bch2_fs_log_msg(struct bch_fs *c, unsigned commit_flags, const char *fmt,
849                   va_list args)
850 {
851         struct printbuf buf = PRINTBUF;
852         prt_vprintf(&buf, fmt, args);
853
854         unsigned u64s = DIV_ROUND_UP(buf.pos, sizeof(u64));
855         prt_chars(&buf, '\0', u64s * sizeof(u64) - buf.pos);
856
857         int ret = buf.allocation_failure ? -BCH_ERR_ENOMEM_trans_log_msg : 0;
858         if (ret)
859                 goto err;
860
861         if (!test_bit(JOURNAL_running, &c->journal.flags)) {
862                 ret = darray_make_room(&c->journal.early_journal_entries, jset_u64s(u64s));
863                 if (ret)
864                         goto err;
865
866                 struct jset_entry_log *l = (void *) &darray_top(c->journal.early_journal_entries);
867                 journal_entry_init(&l->entry, BCH_JSET_ENTRY_log, 0, 1, u64s);
868                 memcpy(l->d, buf.buf, buf.pos);
869                 c->journal.early_journal_entries.nr += jset_u64s(u64s);
870         } else {
871                 ret = bch2_trans_commit_do(c, NULL, NULL, commit_flags,
872                         bch2_trans_log_msg(trans, &buf));
873         }
874 err:
875         printbuf_exit(&buf);
876         return ret;
877 }
878
879 __printf(2, 3)
880 int bch2_fs_log_msg(struct bch_fs *c, const char *fmt, ...)
881 {
882         va_list args;
883         int ret;
884
885         va_start(args, fmt);
886         ret = __bch2_fs_log_msg(c, 0, fmt, args);
887         va_end(args);
888         return ret;
889 }
890
891 /*
892  * Use for logging messages during recovery to enable reserved space and avoid
893  * blocking.
894  */
895 __printf(2, 3)
896 int bch2_journal_log_msg(struct bch_fs *c, const char *fmt, ...)
897 {
898         va_list args;
899         int ret;
900
901         va_start(args, fmt);
902         ret = __bch2_fs_log_msg(c, BCH_WATERMARK_reclaim, fmt, args);
903         va_end(args);
904         return ret;
905 }
This page took 0.082612 seconds and 4 git commands to generate.