]> Git Repo - J-linux.git/blob - fs/bcachefs/btree_iter.c
Merge tag 'kbuild-v6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy...
[J-linux.git] / fs / bcachefs / btree_iter.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "bkey_methods.h"
5 #include "bkey_buf.h"
6 #include "btree_cache.h"
7 #include "btree_iter.h"
8 #include "btree_journal_iter.h"
9 #include "btree_key_cache.h"
10 #include "btree_locking.h"
11 #include "btree_update.h"
12 #include "debug.h"
13 #include "error.h"
14 #include "extents.h"
15 #include "journal.h"
16 #include "journal_io.h"
17 #include "replicas.h"
18 #include "snapshot.h"
19 #include "trace.h"
20
21 #include <linux/random.h>
22 #include <linux/prefetch.h>
23
24 static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *);
25 static inline void btree_path_list_add(struct btree_trans *,
26                         btree_path_idx_t, btree_path_idx_t);
27
28 static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter)
29 {
30 #ifdef TRACK_PATH_ALLOCATED
31         return iter->ip_allocated;
32 #else
33         return 0;
34 #endif
35 }
36
37 static btree_path_idx_t btree_path_alloc(struct btree_trans *, btree_path_idx_t);
38 static void bch2_trans_srcu_lock(struct btree_trans *);
39
40 static inline int __btree_path_cmp(const struct btree_path *l,
41                                    enum btree_id        r_btree_id,
42                                    bool                 r_cached,
43                                    struct bpos          r_pos,
44                                    unsigned             r_level)
45 {
46         /*
47          * Must match lock ordering as defined by __bch2_btree_node_lock:
48          */
49         return   cmp_int(l->btree_id,   r_btree_id) ?:
50                  cmp_int((int) l->cached,       (int) r_cached) ?:
51                  bpos_cmp(l->pos,       r_pos) ?:
52                 -cmp_int(l->level,      r_level);
53 }
54
55 static inline int btree_path_cmp(const struct btree_path *l,
56                                  const struct btree_path *r)
57 {
58         return __btree_path_cmp(l, r->btree_id, r->cached, r->pos, r->level);
59 }
60
61 static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
62 {
63         /* Are we iterating over keys in all snapshots? */
64         if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
65                 p = bpos_successor(p);
66         } else {
67                 p = bpos_nosnap_successor(p);
68                 p.snapshot = iter->snapshot;
69         }
70
71         return p;
72 }
73
74 static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
75 {
76         /* Are we iterating over keys in all snapshots? */
77         if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
78                 p = bpos_predecessor(p);
79         } else {
80                 p = bpos_nosnap_predecessor(p);
81                 p.snapshot = iter->snapshot;
82         }
83
84         return p;
85 }
86
87 static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
88 {
89         struct bpos pos = iter->pos;
90
91         if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
92             !bkey_eq(pos, POS_MAX))
93                 pos = bkey_successor(iter, pos);
94         return pos;
95 }
96
97 static inline bool btree_path_pos_before_node(struct btree_path *path,
98                                               struct btree *b)
99 {
100         return bpos_lt(path->pos, b->data->min_key);
101 }
102
103 static inline bool btree_path_pos_after_node(struct btree_path *path,
104                                              struct btree *b)
105 {
106         return bpos_gt(path->pos, b->key.k.p);
107 }
108
109 static inline bool btree_path_pos_in_node(struct btree_path *path,
110                                           struct btree *b)
111 {
112         return path->btree_id == b->c.btree_id &&
113                 !btree_path_pos_before_node(path, b) &&
114                 !btree_path_pos_after_node(path, b);
115 }
116
117 /* Btree iterator: */
118
119 #ifdef CONFIG_BCACHEFS_DEBUG
120
121 static void bch2_btree_path_verify_cached(struct btree_trans *trans,
122                                           struct btree_path *path)
123 {
124         struct bkey_cached *ck;
125         bool locked = btree_node_locked(path, 0);
126
127         if (!bch2_btree_node_relock(trans, path, 0))
128                 return;
129
130         ck = (void *) path->l[0].b;
131         BUG_ON(ck->key.btree_id != path->btree_id ||
132                !bkey_eq(ck->key.pos, path->pos));
133
134         if (!locked)
135                 btree_node_unlock(trans, path, 0);
136 }
137
138 static void bch2_btree_path_verify_level(struct btree_trans *trans,
139                                 struct btree_path *path, unsigned level)
140 {
141         struct btree_path_level *l;
142         struct btree_node_iter tmp;
143         bool locked;
144         struct bkey_packed *p, *k;
145         struct printbuf buf1 = PRINTBUF;
146         struct printbuf buf2 = PRINTBUF;
147         struct printbuf buf3 = PRINTBUF;
148         const char *msg;
149
150         if (!bch2_debug_check_iterators)
151                 return;
152
153         l       = &path->l[level];
154         tmp     = l->iter;
155         locked  = btree_node_locked(path, level);
156
157         if (path->cached) {
158                 if (!level)
159                         bch2_btree_path_verify_cached(trans, path);
160                 return;
161         }
162
163         if (!btree_path_node(path, level))
164                 return;
165
166         if (!bch2_btree_node_relock_notrace(trans, path, level))
167                 return;
168
169         BUG_ON(!btree_path_pos_in_node(path, l->b));
170
171         bch2_btree_node_iter_verify(&l->iter, l->b);
172
173         /*
174          * For interior nodes, the iterator will have skipped past deleted keys:
175          */
176         p = level
177                 ? bch2_btree_node_iter_prev(&tmp, l->b)
178                 : bch2_btree_node_iter_prev_all(&tmp, l->b);
179         k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
180
181         if (p && bkey_iter_pos_cmp(l->b, p, &path->pos) >= 0) {
182                 msg = "before";
183                 goto err;
184         }
185
186         if (k && bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
187                 msg = "after";
188                 goto err;
189         }
190
191         if (!locked)
192                 btree_node_unlock(trans, path, level);
193         return;
194 err:
195         bch2_bpos_to_text(&buf1, path->pos);
196
197         if (p) {
198                 struct bkey uk = bkey_unpack_key(l->b, p);
199
200                 bch2_bkey_to_text(&buf2, &uk);
201         } else {
202                 prt_printf(&buf2, "(none)");
203         }
204
205         if (k) {
206                 struct bkey uk = bkey_unpack_key(l->b, k);
207
208                 bch2_bkey_to_text(&buf3, &uk);
209         } else {
210                 prt_printf(&buf3, "(none)");
211         }
212
213         panic("path should be %s key at level %u:\n"
214               "path pos %s\n"
215               "prev key %s\n"
216               "cur  key %s\n",
217               msg, level, buf1.buf, buf2.buf, buf3.buf);
218 }
219
220 static void bch2_btree_path_verify(struct btree_trans *trans,
221                                    struct btree_path *path)
222 {
223         struct bch_fs *c = trans->c;
224         unsigned i;
225
226         EBUG_ON(path->btree_id >= BTREE_ID_NR);
227
228         for (i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) {
229                 if (!path->l[i].b) {
230                         BUG_ON(!path->cached &&
231                                bch2_btree_id_root(c, path->btree_id)->b->c.level > i);
232                         break;
233                 }
234
235                 bch2_btree_path_verify_level(trans, path, i);
236         }
237
238         bch2_btree_path_verify_locks(path);
239 }
240
241 void bch2_trans_verify_paths(struct btree_trans *trans)
242 {
243         struct btree_path *path;
244         unsigned iter;
245
246         trans_for_each_path(trans, path, iter)
247                 bch2_btree_path_verify(trans, path);
248 }
249
250 static void bch2_btree_iter_verify(struct btree_iter *iter)
251 {
252         struct btree_trans *trans = iter->trans;
253
254         BUG_ON(iter->btree_id >= BTREE_ID_NR);
255
256         BUG_ON(!!(iter->flags & BTREE_ITER_CACHED) != btree_iter_path(trans, iter)->cached);
257
258         BUG_ON((iter->flags & BTREE_ITER_IS_EXTENTS) &&
259                (iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
260
261         BUG_ON(!(iter->flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
262                (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
263                !btree_type_has_snapshot_field(iter->btree_id));
264
265         if (iter->update_path)
266                 bch2_btree_path_verify(trans, &trans->paths[iter->update_path]);
267         bch2_btree_path_verify(trans, btree_iter_path(trans, iter));
268 }
269
270 static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
271 {
272         BUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
273                !iter->pos.snapshot);
274
275         BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
276                iter->pos.snapshot != iter->snapshot);
277
278         BUG_ON(bkey_lt(iter->pos, bkey_start_pos(&iter->k)) ||
279                bkey_gt(iter->pos, iter->k.p));
280 }
281
282 static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
283 {
284         struct btree_trans *trans = iter->trans;
285         struct btree_iter copy;
286         struct bkey_s_c prev;
287         int ret = 0;
288
289         if (!bch2_debug_check_iterators)
290                 return 0;
291
292         if (!(iter->flags & BTREE_ITER_FILTER_SNAPSHOTS))
293                 return 0;
294
295         if (bkey_err(k) || !k.k)
296                 return 0;
297
298         BUG_ON(!bch2_snapshot_is_ancestor(trans->c,
299                                           iter->snapshot,
300                                           k.k->p.snapshot));
301
302         bch2_trans_iter_init(trans, &copy, iter->btree_id, iter->pos,
303                              BTREE_ITER_NOPRESERVE|
304                              BTREE_ITER_ALL_SNAPSHOTS);
305         prev = bch2_btree_iter_prev(&copy);
306         if (!prev.k)
307                 goto out;
308
309         ret = bkey_err(prev);
310         if (ret)
311                 goto out;
312
313         if (bkey_eq(prev.k->p, k.k->p) &&
314             bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
315                                       prev.k->p.snapshot) > 0) {
316                 struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
317
318                 bch2_bkey_to_text(&buf1, k.k);
319                 bch2_bkey_to_text(&buf2, prev.k);
320
321                 panic("iter snap %u\n"
322                       "k    %s\n"
323                       "prev %s\n",
324                       iter->snapshot,
325                       buf1.buf, buf2.buf);
326         }
327 out:
328         bch2_trans_iter_exit(trans, &copy);
329         return ret;
330 }
331
332 void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
333                             struct bpos pos, bool key_cache)
334 {
335         struct btree_path *path;
336         struct trans_for_each_path_inorder_iter iter;
337         struct printbuf buf = PRINTBUF;
338
339         btree_trans_sort_paths(trans);
340
341         trans_for_each_path_inorder(trans, path, iter) {
342                 int cmp = cmp_int(path->btree_id, id) ?:
343                         cmp_int(path->cached, key_cache);
344
345                 if (cmp > 0)
346                         break;
347                 if (cmp < 0)
348                         continue;
349
350                 if (!btree_node_locked(path, 0) ||
351                     !path->should_be_locked)
352                         continue;
353
354                 if (!key_cache) {
355                         if (bkey_ge(pos, path->l[0].b->data->min_key) &&
356                             bkey_le(pos, path->l[0].b->key.k.p))
357                                 return;
358                 } else {
359                         if (bkey_eq(pos, path->pos))
360                                 return;
361                 }
362         }
363
364         bch2_dump_trans_paths_updates(trans);
365         bch2_bpos_to_text(&buf, pos);
366
367         panic("not locked: %s %s%s\n",
368               bch2_btree_id_str(id), buf.buf,
369               key_cache ? " cached" : "");
370 }
371
372 #else
373
374 static inline void bch2_btree_path_verify_level(struct btree_trans *trans,
375                                                 struct btree_path *path, unsigned l) {}
376 static inline void bch2_btree_path_verify(struct btree_trans *trans,
377                                           struct btree_path *path) {}
378 static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
379 static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
380 static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { return 0; }
381
382 #endif
383
384 /* Btree path: fixups after btree updates */
385
386 static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
387                                         struct btree *b,
388                                         struct bset_tree *t,
389                                         struct bkey_packed *k)
390 {
391         struct btree_node_iter_set *set;
392
393         btree_node_iter_for_each(iter, set)
394                 if (set->end == t->end_offset) {
395                         set->k = __btree_node_key_to_offset(b, k);
396                         bch2_btree_node_iter_sort(iter, b);
397                         return;
398                 }
399
400         bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
401 }
402
403 static void __bch2_btree_path_fix_key_modified(struct btree_path *path,
404                                                struct btree *b,
405                                                struct bkey_packed *where)
406 {
407         struct btree_path_level *l = &path->l[b->c.level];
408
409         if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
410                 return;
411
412         if (bkey_iter_pos_cmp(l->b, where, &path->pos) < 0)
413                 bch2_btree_node_iter_advance(&l->iter, l->b);
414 }
415
416 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
417                                       struct btree *b,
418                                       struct bkey_packed *where)
419 {
420         struct btree_path *path;
421         unsigned i;
422
423         trans_for_each_path_with_node(trans, b, path, i) {
424                 __bch2_btree_path_fix_key_modified(path, b, where);
425                 bch2_btree_path_verify_level(trans, path, b->c.level);
426         }
427 }
428
429 static void __bch2_btree_node_iter_fix(struct btree_path *path,
430                                        struct btree *b,
431                                        struct btree_node_iter *node_iter,
432                                        struct bset_tree *t,
433                                        struct bkey_packed *where,
434                                        unsigned clobber_u64s,
435                                        unsigned new_u64s)
436 {
437         const struct bkey_packed *end = btree_bkey_last(b, t);
438         struct btree_node_iter_set *set;
439         unsigned offset = __btree_node_key_to_offset(b, where);
440         int shift = new_u64s - clobber_u64s;
441         unsigned old_end = t->end_offset - shift;
442         unsigned orig_iter_pos = node_iter->data[0].k;
443         bool iter_current_key_modified =
444                 orig_iter_pos >= offset &&
445                 orig_iter_pos <= offset + clobber_u64s;
446
447         btree_node_iter_for_each(node_iter, set)
448                 if (set->end == old_end)
449                         goto found;
450
451         /* didn't find the bset in the iterator - might have to readd it: */
452         if (new_u64s &&
453             bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
454                 bch2_btree_node_iter_push(node_iter, b, where, end);
455                 goto fixup_done;
456         } else {
457                 /* Iterator is after key that changed */
458                 return;
459         }
460 found:
461         set->end = t->end_offset;
462
463         /* Iterator hasn't gotten to the key that changed yet: */
464         if (set->k < offset)
465                 return;
466
467         if (new_u64s &&
468             bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
469                 set->k = offset;
470         } else if (set->k < offset + clobber_u64s) {
471                 set->k = offset + new_u64s;
472                 if (set->k == set->end)
473                         bch2_btree_node_iter_set_drop(node_iter, set);
474         } else {
475                 /* Iterator is after key that changed */
476                 set->k = (int) set->k + shift;
477                 return;
478         }
479
480         bch2_btree_node_iter_sort(node_iter, b);
481 fixup_done:
482         if (node_iter->data[0].k != orig_iter_pos)
483                 iter_current_key_modified = true;
484
485         /*
486          * When a new key is added, and the node iterator now points to that
487          * key, the iterator might have skipped past deleted keys that should
488          * come after the key the iterator now points to. We have to rewind to
489          * before those deleted keys - otherwise
490          * bch2_btree_node_iter_prev_all() breaks:
491          */
492         if (!bch2_btree_node_iter_end(node_iter) &&
493             iter_current_key_modified &&
494             b->c.level) {
495                 struct bkey_packed *k, *k2, *p;
496
497                 k = bch2_btree_node_iter_peek_all(node_iter, b);
498
499                 for_each_bset(b, t) {
500                         bool set_pos = false;
501
502                         if (node_iter->data[0].end == t->end_offset)
503                                 continue;
504
505                         k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
506
507                         while ((p = bch2_bkey_prev_all(b, t, k2)) &&
508                                bkey_iter_cmp(b, k, p) < 0) {
509                                 k2 = p;
510                                 set_pos = true;
511                         }
512
513                         if (set_pos)
514                                 btree_node_iter_set_set_pos(node_iter,
515                                                             b, t, k2);
516                 }
517         }
518 }
519
520 void bch2_btree_node_iter_fix(struct btree_trans *trans,
521                               struct btree_path *path,
522                               struct btree *b,
523                               struct btree_node_iter *node_iter,
524                               struct bkey_packed *where,
525                               unsigned clobber_u64s,
526                               unsigned new_u64s)
527 {
528         struct bset_tree *t = bch2_bkey_to_bset_inlined(b, where);
529         struct btree_path *linked;
530         unsigned i;
531
532         if (node_iter != &path->l[b->c.level].iter) {
533                 __bch2_btree_node_iter_fix(path, b, node_iter, t,
534                                            where, clobber_u64s, new_u64s);
535
536                 if (bch2_debug_check_iterators)
537                         bch2_btree_node_iter_verify(node_iter, b);
538         }
539
540         trans_for_each_path_with_node(trans, b, linked, i) {
541                 __bch2_btree_node_iter_fix(linked, b,
542                                            &linked->l[b->c.level].iter, t,
543                                            where, clobber_u64s, new_u64s);
544                 bch2_btree_path_verify_level(trans, linked, b->c.level);
545         }
546 }
547
548 /* Btree path level: pointer to a particular btree node and node iter */
549
550 static inline struct bkey_s_c __btree_iter_unpack(struct bch_fs *c,
551                                                   struct btree_path_level *l,
552                                                   struct bkey *u,
553                                                   struct bkey_packed *k)
554 {
555         if (unlikely(!k)) {
556                 /*
557                  * signal to bch2_btree_iter_peek_slot() that we're currently at
558                  * a hole
559                  */
560                 u->type = KEY_TYPE_deleted;
561                 return bkey_s_c_null;
562         }
563
564         return bkey_disassemble(l->b, k, u);
565 }
566
567 static inline struct bkey_s_c btree_path_level_peek_all(struct bch_fs *c,
568                                                         struct btree_path_level *l,
569                                                         struct bkey *u)
570 {
571         return __btree_iter_unpack(c, l, u,
572                         bch2_btree_node_iter_peek_all(&l->iter, l->b));
573 }
574
575 static inline struct bkey_s_c btree_path_level_peek(struct btree_trans *trans,
576                                                     struct btree_path *path,
577                                                     struct btree_path_level *l,
578                                                     struct bkey *u)
579 {
580         struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
581                         bch2_btree_node_iter_peek(&l->iter, l->b));
582
583         path->pos = k.k ? k.k->p : l->b->key.k.p;
584         trans->paths_sorted = false;
585         bch2_btree_path_verify_level(trans, path, l - path->l);
586         return k;
587 }
588
589 static inline struct bkey_s_c btree_path_level_prev(struct btree_trans *trans,
590                                                     struct btree_path *path,
591                                                     struct btree_path_level *l,
592                                                     struct bkey *u)
593 {
594         struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
595                         bch2_btree_node_iter_prev(&l->iter, l->b));
596
597         path->pos = k.k ? k.k->p : l->b->data->min_key;
598         trans->paths_sorted = false;
599         bch2_btree_path_verify_level(trans, path, l - path->l);
600         return k;
601 }
602
603 static inline bool btree_path_advance_to_pos(struct btree_path *path,
604                                              struct btree_path_level *l,
605                                              int max_advance)
606 {
607         struct bkey_packed *k;
608         int nr_advanced = 0;
609
610         while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
611                bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
612                 if (max_advance > 0 && nr_advanced >= max_advance)
613                         return false;
614
615                 bch2_btree_node_iter_advance(&l->iter, l->b);
616                 nr_advanced++;
617         }
618
619         return true;
620 }
621
622 static inline void __btree_path_level_init(struct btree_path *path,
623                                            unsigned level)
624 {
625         struct btree_path_level *l = &path->l[level];
626
627         bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
628
629         /*
630          * Iterators to interior nodes should always be pointed at the first non
631          * whiteout:
632          */
633         if (level)
634                 bch2_btree_node_iter_peek(&l->iter, l->b);
635 }
636
637 void bch2_btree_path_level_init(struct btree_trans *trans,
638                                 struct btree_path *path,
639                                 struct btree *b)
640 {
641         BUG_ON(path->cached);
642
643         EBUG_ON(!btree_path_pos_in_node(path, b));
644
645         path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock);
646         path->l[b->c.level].b = b;
647         __btree_path_level_init(path, b->c.level);
648 }
649
650 /* Btree path: fixups after btree node updates: */
651
652 static void bch2_trans_revalidate_updates_in_node(struct btree_trans *trans, struct btree *b)
653 {
654         struct bch_fs *c = trans->c;
655
656         trans_for_each_update(trans, i)
657                 if (!i->cached &&
658                     i->level    == b->c.level &&
659                     i->btree_id == b->c.btree_id &&
660                     bpos_cmp(i->k->k.p, b->data->min_key) >= 0 &&
661                     bpos_cmp(i->k->k.p, b->data->max_key) <= 0) {
662                         i->old_v = bch2_btree_path_peek_slot(trans->paths + i->path, &i->old_k).v;
663
664                         if (unlikely(trans->journal_replay_not_finished)) {
665                                 struct bkey_i *j_k =
666                                         bch2_journal_keys_peek_slot(c, i->btree_id, i->level,
667                                                                     i->k->k.p);
668
669                                 if (j_k) {
670                                         i->old_k = j_k->k;
671                                         i->old_v = &j_k->v;
672                                 }
673                         }
674                 }
675 }
676
677 /*
678  * A btree node is being replaced - update the iterator to point to the new
679  * node:
680  */
681 void bch2_trans_node_add(struct btree_trans *trans,
682                          struct btree_path *path,
683                          struct btree *b)
684 {
685         struct btree_path *prev;
686
687         BUG_ON(!btree_path_pos_in_node(path, b));
688
689         while ((prev = prev_btree_path(trans, path)) &&
690                btree_path_pos_in_node(prev, b))
691                 path = prev;
692
693         for (;
694              path && btree_path_pos_in_node(path, b);
695              path = next_btree_path(trans, path))
696                 if (path->uptodate == BTREE_ITER_UPTODATE && !path->cached) {
697                         enum btree_node_locked_type t =
698                                 btree_lock_want(path, b->c.level);
699
700                         if (t != BTREE_NODE_UNLOCKED) {
701                                 btree_node_unlock(trans, path, b->c.level);
702                                 six_lock_increment(&b->c.lock, (enum six_lock_type) t);
703                                 mark_btree_node_locked(trans, path, b->c.level, t);
704                         }
705
706                         bch2_btree_path_level_init(trans, path, b);
707                 }
708
709         bch2_trans_revalidate_updates_in_node(trans, b);
710 }
711
712 /*
713  * A btree node has been modified in such a way as to invalidate iterators - fix
714  * them:
715  */
716 void bch2_trans_node_reinit_iter(struct btree_trans *trans, struct btree *b)
717 {
718         struct btree_path *path;
719         unsigned i;
720
721         trans_for_each_path_with_node(trans, b, path, i)
722                 __btree_path_level_init(path, b->c.level);
723
724         bch2_trans_revalidate_updates_in_node(trans, b);
725 }
726
727 /* Btree path: traverse, set_pos: */
728
729 static inline int btree_path_lock_root(struct btree_trans *trans,
730                                        struct btree_path *path,
731                                        unsigned depth_want,
732                                        unsigned long trace_ip)
733 {
734         struct bch_fs *c = trans->c;
735         struct btree *b, **rootp = &bch2_btree_id_root(c, path->btree_id)->b;
736         enum six_lock_type lock_type;
737         unsigned i;
738         int ret;
739
740         EBUG_ON(path->nodes_locked);
741
742         while (1) {
743                 b = READ_ONCE(*rootp);
744                 path->level = READ_ONCE(b->c.level);
745
746                 if (unlikely(path->level < depth_want)) {
747                         /*
748                          * the root is at a lower depth than the depth we want:
749                          * got to the end of the btree, or we're walking nodes
750                          * greater than some depth and there are no nodes >=
751                          * that depth
752                          */
753                         path->level = depth_want;
754                         for (i = path->level; i < BTREE_MAX_DEPTH; i++)
755                                 path->l[i].b = NULL;
756                         return 1;
757                 }
758
759                 lock_type = __btree_lock_want(path, path->level);
760                 ret = btree_node_lock(trans, path, &b->c,
761                                       path->level, lock_type, trace_ip);
762                 if (unlikely(ret)) {
763                         if (bch2_err_matches(ret, BCH_ERR_lock_fail_root_changed))
764                                 continue;
765                         if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
766                                 return ret;
767                         BUG();
768                 }
769
770                 if (likely(b == READ_ONCE(*rootp) &&
771                            b->c.level == path->level &&
772                            !race_fault())) {
773                         for (i = 0; i < path->level; i++)
774                                 path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_lock_root);
775                         path->l[path->level].b = b;
776                         for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++)
777                                 path->l[i].b = NULL;
778
779                         mark_btree_node_locked(trans, path, path->level,
780                                                (enum btree_node_locked_type) lock_type);
781                         bch2_btree_path_level_init(trans, path, b);
782                         return 0;
783                 }
784
785                 six_unlock_type(&b->c.lock, lock_type);
786         }
787 }
788
789 noinline
790 static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *path)
791 {
792         struct bch_fs *c = trans->c;
793         struct btree_path_level *l = path_l(path);
794         struct btree_node_iter node_iter = l->iter;
795         struct bkey_packed *k;
796         struct bkey_buf tmp;
797         unsigned nr = test_bit(BCH_FS_started, &c->flags)
798                 ? (path->level > 1 ? 0 :  2)
799                 : (path->level > 1 ? 1 : 16);
800         bool was_locked = btree_node_locked(path, path->level);
801         int ret = 0;
802
803         bch2_bkey_buf_init(&tmp);
804
805         while (nr-- && !ret) {
806                 if (!bch2_btree_node_relock(trans, path, path->level))
807                         break;
808
809                 bch2_btree_node_iter_advance(&node_iter, l->b);
810                 k = bch2_btree_node_iter_peek(&node_iter, l->b);
811                 if (!k)
812                         break;
813
814                 bch2_bkey_buf_unpack(&tmp, c, l->b, k);
815                 ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
816                                                path->level - 1);
817         }
818
819         if (!was_locked)
820                 btree_node_unlock(trans, path, path->level);
821
822         bch2_bkey_buf_exit(&tmp, c);
823         return ret;
824 }
825
826 static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *path,
827                                  struct btree_and_journal_iter *jiter)
828 {
829         struct bch_fs *c = trans->c;
830         struct bkey_s_c k;
831         struct bkey_buf tmp;
832         unsigned nr = test_bit(BCH_FS_started, &c->flags)
833                 ? (path->level > 1 ? 0 :  2)
834                 : (path->level > 1 ? 1 : 16);
835         bool was_locked = btree_node_locked(path, path->level);
836         int ret = 0;
837
838         bch2_bkey_buf_init(&tmp);
839
840         while (nr-- && !ret) {
841                 if (!bch2_btree_node_relock(trans, path, path->level))
842                         break;
843
844                 bch2_btree_and_journal_iter_advance(jiter);
845                 k = bch2_btree_and_journal_iter_peek(jiter);
846                 if (!k.k)
847                         break;
848
849                 bch2_bkey_buf_reassemble(&tmp, c, k);
850                 ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
851                                                path->level - 1);
852         }
853
854         if (!was_locked)
855                 btree_node_unlock(trans, path, path->level);
856
857         bch2_bkey_buf_exit(&tmp, c);
858         return ret;
859 }
860
861 static noinline void btree_node_mem_ptr_set(struct btree_trans *trans,
862                                             struct btree_path *path,
863                                             unsigned plevel, struct btree *b)
864 {
865         struct btree_path_level *l = &path->l[plevel];
866         bool locked = btree_node_locked(path, plevel);
867         struct bkey_packed *k;
868         struct bch_btree_ptr_v2 *bp;
869
870         if (!bch2_btree_node_relock(trans, path, plevel))
871                 return;
872
873         k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
874         BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
875
876         bp = (void *) bkeyp_val(&l->b->format, k);
877         bp->mem_ptr = (unsigned long)b;
878
879         if (!locked)
880                 btree_node_unlock(trans, path, plevel);
881 }
882
883 static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans,
884                                                      struct btree_path *path,
885                                                      unsigned flags,
886                                                      struct bkey_buf *out)
887 {
888         struct bch_fs *c = trans->c;
889         struct btree_path_level *l = path_l(path);
890         struct btree_and_journal_iter jiter;
891         struct bkey_s_c k;
892         int ret = 0;
893
894         __bch2_btree_and_journal_iter_init_node_iter(trans, &jiter, l->b, l->iter, path->pos);
895
896         k = bch2_btree_and_journal_iter_peek(&jiter);
897
898         bch2_bkey_buf_reassemble(out, c, k);
899
900         if ((flags & BTREE_ITER_PREFETCH) &&
901             c->opts.btree_node_prefetch)
902                 ret = btree_path_prefetch_j(trans, path, &jiter);
903
904         bch2_btree_and_journal_iter_exit(&jiter);
905         return ret;
906 }
907
908 static __always_inline int btree_path_down(struct btree_trans *trans,
909                                            struct btree_path *path,
910                                            unsigned flags,
911                                            unsigned long trace_ip)
912 {
913         struct bch_fs *c = trans->c;
914         struct btree_path_level *l = path_l(path);
915         struct btree *b;
916         unsigned level = path->level - 1;
917         enum six_lock_type lock_type = __btree_lock_want(path, level);
918         struct bkey_buf tmp;
919         int ret;
920
921         EBUG_ON(!btree_node_locked(path, path->level));
922
923         bch2_bkey_buf_init(&tmp);
924
925         if (unlikely(trans->journal_replay_not_finished)) {
926                 ret = btree_node_iter_and_journal_peek(trans, path, flags, &tmp);
927                 if (ret)
928                         goto err;
929         } else {
930                 bch2_bkey_buf_unpack(&tmp, c, l->b,
931                                  bch2_btree_node_iter_peek(&l->iter, l->b));
932
933                 if ((flags & BTREE_ITER_PREFETCH) &&
934                     c->opts.btree_node_prefetch) {
935                         ret = btree_path_prefetch(trans, path);
936                         if (ret)
937                                 goto err;
938                 }
939         }
940
941         b = bch2_btree_node_get(trans, path, tmp.k, level, lock_type, trace_ip);
942         ret = PTR_ERR_OR_ZERO(b);
943         if (unlikely(ret))
944                 goto err;
945
946         if (likely(!trans->journal_replay_not_finished &&
947                    tmp.k->k.type == KEY_TYPE_btree_ptr_v2) &&
948             unlikely(b != btree_node_mem_ptr(tmp.k)))
949                 btree_node_mem_ptr_set(trans, path, level + 1, b);
950
951         if (btree_node_read_locked(path, level + 1))
952                 btree_node_unlock(trans, path, level + 1);
953
954         mark_btree_node_locked(trans, path, level,
955                                (enum btree_node_locked_type) lock_type);
956         path->level = level;
957         bch2_btree_path_level_init(trans, path, b);
958
959         bch2_btree_path_verify_locks(path);
960 err:
961         bch2_bkey_buf_exit(&tmp, c);
962         return ret;
963 }
964
965
966 static int bch2_btree_path_traverse_all(struct btree_trans *trans)
967 {
968         struct bch_fs *c = trans->c;
969         struct btree_path *path;
970         unsigned long trace_ip = _RET_IP_;
971         unsigned i;
972         int ret = 0;
973
974         if (trans->in_traverse_all)
975                 return -BCH_ERR_transaction_restart_in_traverse_all;
976
977         trans->in_traverse_all = true;
978 retry_all:
979         trans->restarted = 0;
980         trans->last_restarted_ip = 0;
981
982         trans_for_each_path(trans, path, i)
983                 path->should_be_locked = false;
984
985         btree_trans_sort_paths(trans);
986
987         bch2_trans_unlock(trans);
988         cond_resched();
989
990         if (unlikely(trans->memory_allocation_failure)) {
991                 struct closure cl;
992
993                 closure_init_stack(&cl);
994
995                 do {
996                         ret = bch2_btree_cache_cannibalize_lock(trans, &cl);
997                         closure_sync(&cl);
998                 } while (ret);
999         }
1000
1001         /* Now, redo traversals in correct order: */
1002         i = 0;
1003         while (i < trans->nr_sorted) {
1004                 btree_path_idx_t idx = trans->sorted[i];
1005
1006                 /*
1007                  * Traversing a path can cause another path to be added at about
1008                  * the same position:
1009                  */
1010                 if (trans->paths[idx].uptodate) {
1011                         __btree_path_get(&trans->paths[idx], false);
1012                         ret = bch2_btree_path_traverse_one(trans, idx, 0, _THIS_IP_);
1013                         __btree_path_put(&trans->paths[idx], false);
1014
1015                         if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
1016                             bch2_err_matches(ret, ENOMEM))
1017                                 goto retry_all;
1018                         if (ret)
1019                                 goto err;
1020                 } else {
1021                         i++;
1022                 }
1023         }
1024
1025         /*
1026          * We used to assert that all paths had been traversed here
1027          * (path->uptodate < BTREE_ITER_NEED_TRAVERSE); however, since
1028          * path->should_be_locked is not set yet, we might have unlocked and
1029          * then failed to relock a path - that's fine.
1030          */
1031 err:
1032         bch2_btree_cache_cannibalize_unlock(trans);
1033
1034         trans->in_traverse_all = false;
1035
1036         trace_and_count(c, trans_traverse_all, trans, trace_ip);
1037         return ret;
1038 }
1039
1040 static inline bool btree_path_check_pos_in_node(struct btree_path *path,
1041                                                 unsigned l, int check_pos)
1042 {
1043         if (check_pos < 0 && btree_path_pos_before_node(path, path->l[l].b))
1044                 return false;
1045         if (check_pos > 0 && btree_path_pos_after_node(path, path->l[l].b))
1046                 return false;
1047         return true;
1048 }
1049
1050 static inline bool btree_path_good_node(struct btree_trans *trans,
1051                                         struct btree_path *path,
1052                                         unsigned l, int check_pos)
1053 {
1054         return is_btree_node(path, l) &&
1055                 bch2_btree_node_relock(trans, path, l) &&
1056                 btree_path_check_pos_in_node(path, l, check_pos);
1057 }
1058
1059 static void btree_path_set_level_down(struct btree_trans *trans,
1060                                       struct btree_path *path,
1061                                       unsigned new_level)
1062 {
1063         unsigned l;
1064
1065         path->level = new_level;
1066
1067         for (l = path->level + 1; l < BTREE_MAX_DEPTH; l++)
1068                 if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED)
1069                         btree_node_unlock(trans, path, l);
1070
1071         btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1072         bch2_btree_path_verify(trans, path);
1073 }
1074
1075 static noinline unsigned __btree_path_up_until_good_node(struct btree_trans *trans,
1076                                                          struct btree_path *path,
1077                                                          int check_pos)
1078 {
1079         unsigned i, l = path->level;
1080 again:
1081         while (btree_path_node(path, l) &&
1082                !btree_path_good_node(trans, path, l, check_pos))
1083                 __btree_path_set_level_up(trans, path, l++);
1084
1085         /* If we need intent locks, take them too: */
1086         for (i = l + 1;
1087              i < path->locks_want && btree_path_node(path, i);
1088              i++)
1089                 if (!bch2_btree_node_relock(trans, path, i)) {
1090                         while (l <= i)
1091                                 __btree_path_set_level_up(trans, path, l++);
1092                         goto again;
1093                 }
1094
1095         return l;
1096 }
1097
1098 static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans,
1099                                                      struct btree_path *path,
1100                                                      int check_pos)
1101 {
1102         return likely(btree_node_locked(path, path->level) &&
1103                       btree_path_check_pos_in_node(path, path->level, check_pos))
1104                 ? path->level
1105                 : __btree_path_up_until_good_node(trans, path, check_pos);
1106 }
1107
1108 /*
1109  * This is the main state machine for walking down the btree - walks down to a
1110  * specified depth
1111  *
1112  * Returns 0 on success, -EIO on error (error reading in a btree node).
1113  *
1114  * On error, caller (peek_node()/peek_key()) must return NULL; the error is
1115  * stashed in the iterator and returned from bch2_trans_exit().
1116  */
1117 int bch2_btree_path_traverse_one(struct btree_trans *trans,
1118                                  btree_path_idx_t path_idx,
1119                                  unsigned flags,
1120                                  unsigned long trace_ip)
1121 {
1122         struct btree_path *path = &trans->paths[path_idx];
1123         unsigned depth_want = path->level;
1124         int ret = -((int) trans->restarted);
1125
1126         if (unlikely(ret))
1127                 goto out;
1128
1129         if (unlikely(!trans->srcu_held))
1130                 bch2_trans_srcu_lock(trans);
1131
1132         /*
1133          * Ensure we obey path->should_be_locked: if it's set, we can't unlock
1134          * and re-traverse the path without a transaction restart:
1135          */
1136         if (path->should_be_locked) {
1137                 ret = bch2_btree_path_relock(trans, path, trace_ip);
1138                 goto out;
1139         }
1140
1141         if (path->cached) {
1142                 ret = bch2_btree_path_traverse_cached(trans, path, flags);
1143                 goto out;
1144         }
1145
1146         path = &trans->paths[path_idx];
1147
1148         if (unlikely(path->level >= BTREE_MAX_DEPTH))
1149                 goto out_uptodate;
1150
1151         path->level = btree_path_up_until_good_node(trans, path, 0);
1152
1153         EBUG_ON(btree_path_node(path, path->level) &&
1154                 !btree_node_locked(path, path->level));
1155
1156         /*
1157          * Note: path->nodes[path->level] may be temporarily NULL here - that
1158          * would indicate to other code that we got to the end of the btree,
1159          * here it indicates that relocking the root failed - it's critical that
1160          * btree_path_lock_root() comes next and that it can't fail
1161          */
1162         while (path->level > depth_want) {
1163                 ret = btree_path_node(path, path->level)
1164                         ? btree_path_down(trans, path, flags, trace_ip)
1165                         : btree_path_lock_root(trans, path, depth_want, trace_ip);
1166                 if (unlikely(ret)) {
1167                         if (ret == 1) {
1168                                 /*
1169                                  * No nodes at this level - got to the end of
1170                                  * the btree:
1171                                  */
1172                                 ret = 0;
1173                                 goto out;
1174                         }
1175
1176                         __bch2_btree_path_unlock(trans, path);
1177                         path->level = depth_want;
1178                         path->l[path->level].b = ERR_PTR(ret);
1179                         goto out;
1180                 }
1181         }
1182 out_uptodate:
1183         path->uptodate = BTREE_ITER_UPTODATE;
1184 out:
1185         if (bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted)
1186                 panic("ret %s (%i) trans->restarted %s (%i)\n",
1187                       bch2_err_str(ret), ret,
1188                       bch2_err_str(trans->restarted), trans->restarted);
1189         bch2_btree_path_verify(trans, path);
1190         return ret;
1191 }
1192
1193 static inline void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
1194                             struct btree_path *src)
1195 {
1196         unsigned i, offset = offsetof(struct btree_path, pos);
1197
1198         memcpy((void *) dst + offset,
1199                (void *) src + offset,
1200                sizeof(struct btree_path) - offset);
1201
1202         for (i = 0; i < BTREE_MAX_DEPTH; i++) {
1203                 unsigned t = btree_node_locked_type(dst, i);
1204
1205                 if (t != BTREE_NODE_UNLOCKED)
1206                         six_lock_increment(&dst->l[i].b->c.lock, t);
1207         }
1208 }
1209
1210 static btree_path_idx_t btree_path_clone(struct btree_trans *trans, btree_path_idx_t src,
1211                                          bool intent)
1212 {
1213         btree_path_idx_t new = btree_path_alloc(trans, src);
1214         btree_path_copy(trans, trans->paths + new, trans->paths + src);
1215         __btree_path_get(trans->paths + new, intent);
1216         return new;
1217 }
1218
1219 __flatten
1220 btree_path_idx_t __bch2_btree_path_make_mut(struct btree_trans *trans,
1221                         btree_path_idx_t path, bool intent, unsigned long ip)
1222 {
1223         __btree_path_put(trans->paths + path, intent);
1224         path = btree_path_clone(trans, path, intent);
1225         trans->paths[path].preserve = false;
1226         return path;
1227 }
1228
1229 btree_path_idx_t __must_check
1230 __bch2_btree_path_set_pos(struct btree_trans *trans,
1231                           btree_path_idx_t path_idx, struct bpos new_pos,
1232                           bool intent, unsigned long ip)
1233 {
1234         int cmp = bpos_cmp(new_pos, trans->paths[path_idx].pos);
1235
1236         bch2_trans_verify_not_in_restart(trans);
1237         EBUG_ON(!trans->paths[path_idx].ref);
1238
1239         path_idx = bch2_btree_path_make_mut(trans, path_idx, intent, ip);
1240
1241         struct btree_path *path = trans->paths + path_idx;
1242         path->pos               = new_pos;
1243         trans->paths_sorted     = false;
1244
1245         if (unlikely(path->cached)) {
1246                 btree_node_unlock(trans, path, 0);
1247                 path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_up);
1248                 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1249                 goto out;
1250         }
1251
1252         unsigned level = btree_path_up_until_good_node(trans, path, cmp);
1253
1254         if (btree_path_node(path, level)) {
1255                 struct btree_path_level *l = &path->l[level];
1256
1257                 BUG_ON(!btree_node_locked(path, level));
1258                 /*
1259                  * We might have to skip over many keys, or just a few: try
1260                  * advancing the node iterator, and if we have to skip over too
1261                  * many keys just reinit it (or if we're rewinding, since that
1262                  * is expensive).
1263                  */
1264                 if (cmp < 0 ||
1265                     !btree_path_advance_to_pos(path, l, 8))
1266                         bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
1267
1268                 /*
1269                  * Iterators to interior nodes should always be pointed at the first non
1270                  * whiteout:
1271                  */
1272                 if (unlikely(level))
1273                         bch2_btree_node_iter_peek(&l->iter, l->b);
1274         }
1275
1276         if (unlikely(level != path->level)) {
1277                 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1278                 __bch2_btree_path_unlock(trans, path);
1279         }
1280 out:
1281         bch2_btree_path_verify(trans, path);
1282         return path_idx;
1283 }
1284
1285 /* Btree path: main interface: */
1286
1287 static struct btree_path *have_path_at_pos(struct btree_trans *trans, struct btree_path *path)
1288 {
1289         struct btree_path *sib;
1290
1291         sib = prev_btree_path(trans, path);
1292         if (sib && !btree_path_cmp(sib, path))
1293                 return sib;
1294
1295         sib = next_btree_path(trans, path);
1296         if (sib && !btree_path_cmp(sib, path))
1297                 return sib;
1298
1299         return NULL;
1300 }
1301
1302 static struct btree_path *have_node_at_pos(struct btree_trans *trans, struct btree_path *path)
1303 {
1304         struct btree_path *sib;
1305
1306         sib = prev_btree_path(trans, path);
1307         if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1308                 return sib;
1309
1310         sib = next_btree_path(trans, path);
1311         if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1312                 return sib;
1313
1314         return NULL;
1315 }
1316
1317 static inline void __bch2_path_free(struct btree_trans *trans, btree_path_idx_t path)
1318 {
1319         __bch2_btree_path_unlock(trans, trans->paths + path);
1320         btree_path_list_remove(trans, trans->paths + path);
1321         __clear_bit(path, trans->paths_allocated);
1322 }
1323
1324 void bch2_path_put(struct btree_trans *trans, btree_path_idx_t path_idx, bool intent)
1325 {
1326         struct btree_path *path = trans->paths + path_idx, *dup;
1327
1328         if (!__btree_path_put(path, intent))
1329                 return;
1330
1331         dup = path->preserve
1332                 ? have_path_at_pos(trans, path)
1333                 : have_node_at_pos(trans, path);
1334
1335         if (!dup && !(!path->preserve && !is_btree_node(path, path->level)))
1336                 return;
1337
1338         if (path->should_be_locked &&
1339             !trans->restarted &&
1340             (!dup || !bch2_btree_path_relock_norestart(trans, dup)))
1341                 return;
1342
1343         if (dup) {
1344                 dup->preserve           |= path->preserve;
1345                 dup->should_be_locked   |= path->should_be_locked;
1346         }
1347
1348         __bch2_path_free(trans, path_idx);
1349 }
1350
1351 static void bch2_path_put_nokeep(struct btree_trans *trans, btree_path_idx_t path,
1352                                  bool intent)
1353 {
1354         if (!__btree_path_put(trans->paths + path, intent))
1355                 return;
1356
1357         __bch2_path_free(trans, path);
1358 }
1359
1360 void __noreturn bch2_trans_restart_error(struct btree_trans *trans, u32 restart_count)
1361 {
1362         panic("trans->restart_count %u, should be %u, last restarted by %pS\n",
1363               trans->restart_count, restart_count,
1364               (void *) trans->last_begin_ip);
1365 }
1366
1367 void __noreturn bch2_trans_in_restart_error(struct btree_trans *trans)
1368 {
1369         panic("in transaction restart: %s, last restarted by %pS\n",
1370               bch2_err_str(trans->restarted),
1371               (void *) trans->last_restarted_ip);
1372 }
1373
1374 noinline __cold
1375 void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
1376 {
1377         prt_printf(buf, "transaction updates for %s journal seq %llu",
1378                trans->fn, trans->journal_res.seq);
1379         prt_newline(buf);
1380         printbuf_indent_add(buf, 2);
1381
1382         trans_for_each_update(trans, i) {
1383                 struct bkey_s_c old = { &i->old_k, i->old_v };
1384
1385                 prt_printf(buf, "update: btree=%s cached=%u %pS",
1386                        bch2_btree_id_str(i->btree_id),
1387                        i->cached,
1388                        (void *) i->ip_allocated);
1389                 prt_newline(buf);
1390
1391                 prt_printf(buf, "  old ");
1392                 bch2_bkey_val_to_text(buf, trans->c, old);
1393                 prt_newline(buf);
1394
1395                 prt_printf(buf, "  new ");
1396                 bch2_bkey_val_to_text(buf, trans->c, bkey_i_to_s_c(i->k));
1397                 prt_newline(buf);
1398         }
1399
1400         for (struct jset_entry *e = trans->journal_entries;
1401              e != btree_trans_journal_entries_top(trans);
1402              e = vstruct_next(e))
1403                 bch2_journal_entry_to_text(buf, trans->c, e);
1404
1405         printbuf_indent_sub(buf, 2);
1406 }
1407
1408 noinline __cold
1409 void bch2_dump_trans_updates(struct btree_trans *trans)
1410 {
1411         struct printbuf buf = PRINTBUF;
1412
1413         bch2_trans_updates_to_text(&buf, trans);
1414         bch2_print_string_as_lines(KERN_ERR, buf.buf);
1415         printbuf_exit(&buf);
1416 }
1417
1418 static void bch2_btree_path_to_text(struct printbuf *out, struct btree_trans *trans, btree_path_idx_t path_idx)
1419 {
1420         struct btree_path *path = trans->paths + path_idx;
1421
1422         prt_printf(out, "path: idx %2u ref %u:%u %c %c btree=%s l=%u pos ",
1423                    path_idx, path->ref, path->intent_ref,
1424                    path->preserve ? 'P' : ' ',
1425                    path->should_be_locked ? 'S' : ' ',
1426                    bch2_btree_id_str(path->btree_id),
1427                    path->level);
1428         bch2_bpos_to_text(out, path->pos);
1429
1430         prt_printf(out, " locks %u", path->nodes_locked);
1431 #ifdef TRACK_PATH_ALLOCATED
1432         prt_printf(out, " %pS", (void *) path->ip_allocated);
1433 #endif
1434         prt_newline(out);
1435 }
1436
1437 static noinline __cold
1438 void __bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans,
1439                                 bool nosort)
1440 {
1441         struct trans_for_each_path_inorder_iter iter;
1442
1443         if (!nosort)
1444                 btree_trans_sort_paths(trans);
1445
1446         trans_for_each_path_idx_inorder(trans, iter)
1447                 bch2_btree_path_to_text(out, trans, iter.path_idx);
1448 }
1449
1450 noinline __cold
1451 void bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans)
1452 {
1453         __bch2_trans_paths_to_text(out, trans, false);
1454 }
1455
1456 static noinline __cold
1457 void __bch2_dump_trans_paths_updates(struct btree_trans *trans, bool nosort)
1458 {
1459         struct printbuf buf = PRINTBUF;
1460
1461         __bch2_trans_paths_to_text(&buf, trans, nosort);
1462         bch2_trans_updates_to_text(&buf, trans);
1463
1464         bch2_print_string_as_lines(KERN_ERR, buf.buf);
1465         printbuf_exit(&buf);
1466 }
1467
1468 noinline __cold
1469 void bch2_dump_trans_paths_updates(struct btree_trans *trans)
1470 {
1471         __bch2_dump_trans_paths_updates(trans, false);
1472 }
1473
1474 noinline __cold
1475 static void bch2_trans_update_max_paths(struct btree_trans *trans)
1476 {
1477         struct btree_transaction_stats *s = btree_trans_stats(trans);
1478         struct printbuf buf = PRINTBUF;
1479         size_t nr = bitmap_weight(trans->paths_allocated, trans->nr_paths);
1480
1481         bch2_trans_paths_to_text(&buf, trans);
1482
1483         if (!buf.allocation_failure) {
1484                 mutex_lock(&s->lock);
1485                 if (nr > s->nr_max_paths) {
1486                         s->nr_max_paths = nr;
1487                         swap(s->max_paths_text, buf.buf);
1488                 }
1489                 mutex_unlock(&s->lock);
1490         }
1491
1492         printbuf_exit(&buf);
1493
1494         trans->nr_paths_max = nr;
1495 }
1496
1497 noinline __cold
1498 int __bch2_btree_trans_too_many_iters(struct btree_trans *trans)
1499 {
1500         if (trace_trans_restart_too_many_iters_enabled()) {
1501                 struct printbuf buf = PRINTBUF;
1502
1503                 bch2_trans_paths_to_text(&buf, trans);
1504                 trace_trans_restart_too_many_iters(trans, _THIS_IP_, buf.buf);
1505                 printbuf_exit(&buf);
1506         }
1507
1508         count_event(trans->c, trans_restart_too_many_iters);
1509
1510         return btree_trans_restart(trans, BCH_ERR_transaction_restart_too_many_iters);
1511 }
1512
1513 static noinline void btree_path_overflow(struct btree_trans *trans)
1514 {
1515         bch2_dump_trans_paths_updates(trans);
1516         bch_err(trans->c, "trans path overflow");
1517 }
1518
1519 static noinline void btree_paths_realloc(struct btree_trans *trans)
1520 {
1521         unsigned nr = trans->nr_paths * 2;
1522
1523         void *p = kvzalloc(BITS_TO_LONGS(nr) * sizeof(unsigned long) +
1524                           sizeof(struct btree_trans_paths) +
1525                           nr * sizeof(struct btree_path) +
1526                           nr * sizeof(btree_path_idx_t) + 8 +
1527                           nr * sizeof(struct btree_insert_entry), GFP_KERNEL|__GFP_NOFAIL);
1528
1529         unsigned long *paths_allocated = p;
1530         memcpy(paths_allocated, trans->paths_allocated, BITS_TO_LONGS(trans->nr_paths) * sizeof(unsigned long));
1531         p += BITS_TO_LONGS(nr) * sizeof(unsigned long);
1532
1533         p += sizeof(struct btree_trans_paths);
1534         struct btree_path *paths = p;
1535         *trans_paths_nr(paths) = nr;
1536         memcpy(paths, trans->paths, trans->nr_paths * sizeof(struct btree_path));
1537         p += nr * sizeof(struct btree_path);
1538
1539         btree_path_idx_t *sorted = p;
1540         memcpy(sorted, trans->sorted, trans->nr_sorted * sizeof(btree_path_idx_t));
1541         p += nr * sizeof(btree_path_idx_t) + 8;
1542
1543         struct btree_insert_entry *updates = p;
1544         memcpy(updates, trans->updates, trans->nr_paths * sizeof(struct btree_insert_entry));
1545
1546         unsigned long *old = trans->paths_allocated;
1547
1548         rcu_assign_pointer(trans->paths_allocated,      paths_allocated);
1549         rcu_assign_pointer(trans->paths,                paths);
1550         rcu_assign_pointer(trans->sorted,               sorted);
1551         rcu_assign_pointer(trans->updates,              updates);
1552
1553         trans->nr_paths         = nr;
1554
1555         if (old != trans->_paths_allocated)
1556                 kfree_rcu_mightsleep(old);
1557 }
1558
1559 static inline btree_path_idx_t btree_path_alloc(struct btree_trans *trans,
1560                                                 btree_path_idx_t pos)
1561 {
1562         btree_path_idx_t idx = find_first_zero_bit(trans->paths_allocated, trans->nr_paths);
1563
1564         if (unlikely(idx == trans->nr_paths)) {
1565                 if (trans->nr_paths == BTREE_ITER_MAX) {
1566                         btree_path_overflow(trans);
1567                         return 0;
1568                 }
1569
1570                 btree_paths_realloc(trans);
1571         }
1572
1573         /*
1574          * Do this before marking the new path as allocated, since it won't be
1575          * initialized yet:
1576          */
1577         if (unlikely(idx > trans->nr_paths_max))
1578                 bch2_trans_update_max_paths(trans);
1579
1580         __set_bit(idx, trans->paths_allocated);
1581
1582         struct btree_path *path = &trans->paths[idx];
1583         path->ref               = 0;
1584         path->intent_ref        = 0;
1585         path->nodes_locked      = 0;
1586
1587         btree_path_list_add(trans, pos, idx);
1588         trans->paths_sorted = false;
1589         return idx;
1590 }
1591
1592 btree_path_idx_t bch2_path_get(struct btree_trans *trans,
1593                              enum btree_id btree_id, struct bpos pos,
1594                              unsigned locks_want, unsigned level,
1595                              unsigned flags, unsigned long ip)
1596 {
1597         struct btree_path *path;
1598         bool cached = flags & BTREE_ITER_CACHED;
1599         bool intent = flags & BTREE_ITER_INTENT;
1600         struct trans_for_each_path_inorder_iter iter;
1601         btree_path_idx_t path_pos = 0, path_idx;
1602
1603         bch2_trans_verify_not_in_restart(trans);
1604         bch2_trans_verify_locks(trans);
1605
1606         btree_trans_sort_paths(trans);
1607
1608         trans_for_each_path_inorder(trans, path, iter) {
1609                 if (__btree_path_cmp(path,
1610                                      btree_id,
1611                                      cached,
1612                                      pos,
1613                                      level) > 0)
1614                         break;
1615
1616                 path_pos = iter.path_idx;
1617         }
1618
1619         if (path_pos &&
1620             trans->paths[path_pos].cached       == cached &&
1621             trans->paths[path_pos].btree_id     == btree_id &&
1622             trans->paths[path_pos].level        == level) {
1623                 __btree_path_get(trans->paths + path_pos, intent);
1624                 path_idx = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip);
1625                 path = trans->paths + path_idx;
1626         } else {
1627                 path_idx = btree_path_alloc(trans, path_pos);
1628                 path = trans->paths + path_idx;
1629
1630                 __btree_path_get(path, intent);
1631                 path->pos                       = pos;
1632                 path->btree_id                  = btree_id;
1633                 path->cached                    = cached;
1634                 path->uptodate                  = BTREE_ITER_NEED_TRAVERSE;
1635                 path->should_be_locked          = false;
1636                 path->level                     = level;
1637                 path->locks_want                = locks_want;
1638                 path->nodes_locked              = 0;
1639                 for (unsigned i = 0; i < ARRAY_SIZE(path->l); i++)
1640                         path->l[i].b            = ERR_PTR(-BCH_ERR_no_btree_node_init);
1641 #ifdef TRACK_PATH_ALLOCATED
1642                 path->ip_allocated              = ip;
1643 #endif
1644                 trans->paths_sorted             = false;
1645         }
1646
1647         if (!(flags & BTREE_ITER_NOPRESERVE))
1648                 path->preserve = true;
1649
1650         if (path->intent_ref)
1651                 locks_want = max(locks_want, level + 1);
1652
1653         /*
1654          * If the path has locks_want greater than requested, we don't downgrade
1655          * it here - on transaction restart because btree node split needs to
1656          * upgrade locks, we might be putting/getting the iterator again.
1657          * Downgrading iterators only happens via bch2_trans_downgrade(), after
1658          * a successful transaction commit.
1659          */
1660
1661         locks_want = min(locks_want, BTREE_MAX_DEPTH);
1662         if (locks_want > path->locks_want)
1663                 bch2_btree_path_upgrade_noupgrade_sibs(trans, path, locks_want, NULL);
1664
1665         return path_idx;
1666 }
1667
1668 struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
1669 {
1670
1671         struct btree_path_level *l = path_l(path);
1672         struct bkey_packed *_k;
1673         struct bkey_s_c k;
1674
1675         if (unlikely(!l->b))
1676                 return bkey_s_c_null;
1677
1678         EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
1679         EBUG_ON(!btree_node_locked(path, path->level));
1680
1681         if (!path->cached) {
1682                 _k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1683                 k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
1684
1685                 EBUG_ON(k.k && bkey_deleted(k.k) && bpos_eq(k.k->p, path->pos));
1686
1687                 if (!k.k || !bpos_eq(path->pos, k.k->p))
1688                         goto hole;
1689         } else {
1690                 struct bkey_cached *ck = (void *) path->l[0].b;
1691
1692                 EBUG_ON(ck &&
1693                         (path->btree_id != ck->key.btree_id ||
1694                          !bkey_eq(path->pos, ck->key.pos)));
1695                 if (!ck || !ck->valid)
1696                         return bkey_s_c_null;
1697
1698                 *u = ck->k->k;
1699                 k = bkey_i_to_s_c(ck->k);
1700         }
1701
1702         return k;
1703 hole:
1704         bkey_init(u);
1705         u->p = path->pos;
1706         return (struct bkey_s_c) { u, NULL };
1707 }
1708
1709 /* Btree iterators: */
1710
1711 int __must_check
1712 __bch2_btree_iter_traverse(struct btree_iter *iter)
1713 {
1714         return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
1715 }
1716
1717 int __must_check
1718 bch2_btree_iter_traverse(struct btree_iter *iter)
1719 {
1720         struct btree_trans *trans = iter->trans;
1721         int ret;
1722
1723         iter->path = bch2_btree_path_set_pos(trans, iter->path,
1724                                         btree_iter_search_key(iter),
1725                                         iter->flags & BTREE_ITER_INTENT,
1726                                         btree_iter_ip_allocated(iter));
1727
1728         ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
1729         if (ret)
1730                 return ret;
1731
1732         struct btree_path *path = btree_iter_path(trans, iter);
1733         if (btree_path_node(path, path->level))
1734                 btree_path_set_should_be_locked(path);
1735         return 0;
1736 }
1737
1738 /* Iterate across nodes (leaf and interior nodes) */
1739
1740 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
1741 {
1742         struct btree_trans *trans = iter->trans;
1743         struct btree *b = NULL;
1744         int ret;
1745
1746         EBUG_ON(trans->paths[iter->path].cached);
1747         bch2_btree_iter_verify(iter);
1748
1749         ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1750         if (ret)
1751                 goto err;
1752
1753         struct btree_path *path = btree_iter_path(trans, iter);
1754         b = btree_path_node(path, path->level);
1755         if (!b)
1756                 goto out;
1757
1758         BUG_ON(bpos_lt(b->key.k.p, iter->pos));
1759
1760         bkey_init(&iter->k);
1761         iter->k.p = iter->pos = b->key.k.p;
1762
1763         iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
1764                                         iter->flags & BTREE_ITER_INTENT,
1765                                         btree_iter_ip_allocated(iter));
1766         btree_path_set_should_be_locked(btree_iter_path(trans, iter));
1767 out:
1768         bch2_btree_iter_verify_entry_exit(iter);
1769         bch2_btree_iter_verify(iter);
1770
1771         return b;
1772 err:
1773         b = ERR_PTR(ret);
1774         goto out;
1775 }
1776
1777 struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *iter)
1778 {
1779         struct btree *b;
1780
1781         while (b = bch2_btree_iter_peek_node(iter),
1782                bch2_err_matches(PTR_ERR_OR_ZERO(b), BCH_ERR_transaction_restart))
1783                 bch2_trans_begin(iter->trans);
1784
1785         return b;
1786 }
1787
1788 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
1789 {
1790         struct btree_trans *trans = iter->trans;
1791         struct btree *b = NULL;
1792         int ret;
1793
1794         EBUG_ON(trans->paths[iter->path].cached);
1795         bch2_trans_verify_not_in_restart(trans);
1796         bch2_btree_iter_verify(iter);
1797
1798         struct btree_path *path = btree_iter_path(trans, iter);
1799
1800         /* already at end? */
1801         if (!btree_path_node(path, path->level))
1802                 return NULL;
1803
1804         /* got to end? */
1805         if (!btree_path_node(path, path->level + 1)) {
1806                 btree_path_set_level_up(trans, path);
1807                 return NULL;
1808         }
1809
1810         if (!bch2_btree_node_relock(trans, path, path->level + 1)) {
1811                 __bch2_btree_path_unlock(trans, path);
1812                 path->l[path->level].b          = ERR_PTR(-BCH_ERR_no_btree_node_relock);
1813                 path->l[path->level + 1].b      = ERR_PTR(-BCH_ERR_no_btree_node_relock);
1814                 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1815                 trace_and_count(trans->c, trans_restart_relock_next_node, trans, _THIS_IP_, path);
1816                 ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
1817                 goto err;
1818         }
1819
1820         b = btree_path_node(path, path->level + 1);
1821
1822         if (bpos_eq(iter->pos, b->key.k.p)) {
1823                 __btree_path_set_level_up(trans, path, path->level++);
1824         } else {
1825                 /*
1826                  * Haven't gotten to the end of the parent node: go back down to
1827                  * the next child node
1828                  */
1829                 iter->path = bch2_btree_path_set_pos(trans, iter->path,
1830                                         bpos_successor(iter->pos),
1831                                         iter->flags & BTREE_ITER_INTENT,
1832                                         btree_iter_ip_allocated(iter));
1833
1834                 path = btree_iter_path(trans, iter);
1835                 btree_path_set_level_down(trans, path, iter->min_depth);
1836
1837                 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1838                 if (ret)
1839                         goto err;
1840
1841                 path = btree_iter_path(trans, iter);
1842                 b = path->l[path->level].b;
1843         }
1844
1845         bkey_init(&iter->k);
1846         iter->k.p = iter->pos = b->key.k.p;
1847
1848         iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
1849                                         iter->flags & BTREE_ITER_INTENT,
1850                                         btree_iter_ip_allocated(iter));
1851         btree_path_set_should_be_locked(btree_iter_path(trans, iter));
1852         EBUG_ON(btree_iter_path(trans, iter)->uptodate);
1853 out:
1854         bch2_btree_iter_verify_entry_exit(iter);
1855         bch2_btree_iter_verify(iter);
1856
1857         return b;
1858 err:
1859         b = ERR_PTR(ret);
1860         goto out;
1861 }
1862
1863 /* Iterate across keys (in leaf nodes only) */
1864
1865 inline bool bch2_btree_iter_advance(struct btree_iter *iter)
1866 {
1867         struct bpos pos = iter->k.p;
1868         bool ret = !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS
1869                      ? bpos_eq(pos, SPOS_MAX)
1870                      : bkey_eq(pos, SPOS_MAX));
1871
1872         if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
1873                 pos = bkey_successor(iter, pos);
1874         bch2_btree_iter_set_pos(iter, pos);
1875         return ret;
1876 }
1877
1878 inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
1879 {
1880         struct bpos pos = bkey_start_pos(&iter->k);
1881         bool ret = !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS
1882                      ? bpos_eq(pos, POS_MIN)
1883                      : bkey_eq(pos, POS_MIN));
1884
1885         if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
1886                 pos = bkey_predecessor(iter, pos);
1887         bch2_btree_iter_set_pos(iter, pos);
1888         return ret;
1889 }
1890
1891 static noinline
1892 void bch2_btree_trans_peek_prev_updates(struct btree_trans *trans, struct btree_iter *iter,
1893                                         struct bkey_s_c *k)
1894 {
1895         struct bpos end = path_l(btree_iter_path(trans, iter))->b->data->min_key;
1896
1897         trans_for_each_update(trans, i)
1898                 if (!i->key_cache_already_flushed &&
1899                     i->btree_id == iter->btree_id &&
1900                     bpos_le(i->k->k.p, iter->pos) &&
1901                     bpos_ge(i->k->k.p, k->k ? k->k->p : end)) {
1902                         iter->k = i->k->k;
1903                         *k = bkey_i_to_s_c(i->k);
1904                 }
1905 }
1906
1907 static noinline
1908 void bch2_btree_trans_peek_updates(struct btree_trans *trans, struct btree_iter *iter,
1909                                    struct bkey_s_c *k)
1910 {
1911         struct btree_path *path = btree_iter_path(trans, iter);
1912         struct bpos end = path_l(path)->b->key.k.p;
1913
1914         trans_for_each_update(trans, i)
1915                 if (!i->key_cache_already_flushed &&
1916                     i->btree_id == iter->btree_id &&
1917                     bpos_ge(i->k->k.p, path->pos) &&
1918                     bpos_le(i->k->k.p, k->k ? k->k->p : end)) {
1919                         iter->k = i->k->k;
1920                         *k = bkey_i_to_s_c(i->k);
1921                 }
1922 }
1923
1924 static noinline
1925 void bch2_btree_trans_peek_slot_updates(struct btree_trans *trans, struct btree_iter *iter,
1926                                         struct bkey_s_c *k)
1927 {
1928         trans_for_each_update(trans, i)
1929                 if (!i->key_cache_already_flushed &&
1930                     i->btree_id == iter->btree_id &&
1931                     bpos_eq(i->k->k.p, iter->pos)) {
1932                         iter->k = i->k->k;
1933                         *k = bkey_i_to_s_c(i->k);
1934                 }
1935 }
1936
1937 static struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
1938                                               struct btree_iter *iter,
1939                                               struct bpos end_pos)
1940 {
1941         struct btree_path *path = btree_iter_path(trans, iter);
1942
1943         return bch2_journal_keys_peek_upto(trans->c, iter->btree_id,
1944                                            path->level,
1945                                            path->pos,
1946                                            end_pos,
1947                                            &iter->journal_idx);
1948 }
1949
1950 static noinline
1951 struct bkey_s_c btree_trans_peek_slot_journal(struct btree_trans *trans,
1952                                               struct btree_iter *iter)
1953 {
1954         struct btree_path *path = btree_iter_path(trans, iter);
1955         struct bkey_i *k = bch2_btree_journal_peek(trans, iter, path->pos);
1956
1957         if (k) {
1958                 iter->k = k->k;
1959                 return bkey_i_to_s_c(k);
1960         } else {
1961                 return bkey_s_c_null;
1962         }
1963 }
1964
1965 static noinline
1966 struct bkey_s_c btree_trans_peek_journal(struct btree_trans *trans,
1967                                          struct btree_iter *iter,
1968                                          struct bkey_s_c k)
1969 {
1970         struct btree_path *path = btree_iter_path(trans, iter);
1971         struct bkey_i *next_journal =
1972                 bch2_btree_journal_peek(trans, iter,
1973                                 k.k ? k.k->p : path_l(path)->b->key.k.p);
1974
1975         if (next_journal) {
1976                 iter->k = next_journal->k;
1977                 k = bkey_i_to_s_c(next_journal);
1978         }
1979
1980         return k;
1981 }
1982
1983 /*
1984  * Checks btree key cache for key at iter->pos and returns it if present, or
1985  * bkey_s_c_null:
1986  */
1987 static noinline
1988 struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos)
1989 {
1990         struct btree_trans *trans = iter->trans;
1991         struct bch_fs *c = trans->c;
1992         struct bkey u;
1993         struct bkey_s_c k;
1994         int ret;
1995
1996         if ((iter->flags & BTREE_ITER_KEY_CACHE_FILL) &&
1997             bpos_eq(iter->pos, pos))
1998                 return bkey_s_c_null;
1999
2000         if (!bch2_btree_key_cache_find(c, iter->btree_id, pos))
2001                 return bkey_s_c_null;
2002
2003         if (!iter->key_cache_path)
2004                 iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos,
2005                                                      iter->flags & BTREE_ITER_INTENT, 0,
2006                                                      iter->flags|BTREE_ITER_CACHED|
2007                                                      BTREE_ITER_CACHED_NOFILL,
2008                                                      _THIS_IP_);
2009
2010         iter->key_cache_path = bch2_btree_path_set_pos(trans, iter->key_cache_path, pos,
2011                                         iter->flags & BTREE_ITER_INTENT,
2012                                         btree_iter_ip_allocated(iter));
2013
2014         ret =   bch2_btree_path_traverse(trans, iter->key_cache_path,
2015                                          iter->flags|BTREE_ITER_CACHED) ?:
2016                 bch2_btree_path_relock(trans, btree_iter_path(trans, iter), _THIS_IP_);
2017         if (unlikely(ret))
2018                 return bkey_s_c_err(ret);
2019
2020         btree_path_set_should_be_locked(trans->paths + iter->key_cache_path);
2021
2022         k = bch2_btree_path_peek_slot(trans->paths + iter->key_cache_path, &u);
2023         if (k.k && !bkey_err(k)) {
2024                 iter->k = u;
2025                 k.k = &iter->k;
2026         }
2027         return k;
2028 }
2029
2030 static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bpos search_key)
2031 {
2032         struct btree_trans *trans = iter->trans;
2033         struct bkey_s_c k, k2;
2034         int ret;
2035
2036         EBUG_ON(btree_iter_path(trans, iter)->cached);
2037         bch2_btree_iter_verify(iter);
2038
2039         while (1) {
2040                 struct btree_path_level *l;
2041
2042                 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2043                                         iter->flags & BTREE_ITER_INTENT,
2044                                         btree_iter_ip_allocated(iter));
2045
2046                 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2047                 if (unlikely(ret)) {
2048                         /* ensure that iter->k is consistent with iter->pos: */
2049                         bch2_btree_iter_set_pos(iter, iter->pos);
2050                         k = bkey_s_c_err(ret);
2051                         goto out;
2052                 }
2053
2054                 struct btree_path *path = btree_iter_path(trans, iter);
2055                 l = path_l(path);
2056
2057                 if (unlikely(!l->b)) {
2058                         /* No btree nodes at requested level: */
2059                         bch2_btree_iter_set_pos(iter, SPOS_MAX);
2060                         k = bkey_s_c_null;
2061                         goto out;
2062                 }
2063
2064                 btree_path_set_should_be_locked(path);
2065
2066                 k = btree_path_level_peek_all(trans->c, l, &iter->k);
2067
2068                 if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
2069                     k.k &&
2070                     (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
2071                         k = k2;
2072                         ret = bkey_err(k);
2073                         if (ret) {
2074                                 bch2_btree_iter_set_pos(iter, iter->pos);
2075                                 goto out;
2076                         }
2077                 }
2078
2079                 if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL))
2080                         k = btree_trans_peek_journal(trans, iter, k);
2081
2082                 if (unlikely((iter->flags & BTREE_ITER_WITH_UPDATES) &&
2083                              trans->nr_updates))
2084                         bch2_btree_trans_peek_updates(trans, iter, &k);
2085
2086                 if (k.k && bkey_deleted(k.k)) {
2087                         /*
2088                          * If we've got a whiteout, and it's after the search
2089                          * key, advance the search key to the whiteout instead
2090                          * of just after the whiteout - it might be a btree
2091                          * whiteout, with a real key at the same position, since
2092                          * in the btree deleted keys sort before non deleted.
2093                          */
2094                         search_key = !bpos_eq(search_key, k.k->p)
2095                                 ? k.k->p
2096                                 : bpos_successor(k.k->p);
2097                         continue;
2098                 }
2099
2100                 if (likely(k.k)) {
2101                         break;
2102                 } else if (likely(!bpos_eq(l->b->key.k.p, SPOS_MAX))) {
2103                         /* Advance to next leaf node: */
2104                         search_key = bpos_successor(l->b->key.k.p);
2105                 } else {
2106                         /* End of btree: */
2107                         bch2_btree_iter_set_pos(iter, SPOS_MAX);
2108                         k = bkey_s_c_null;
2109                         goto out;
2110                 }
2111         }
2112 out:
2113         bch2_btree_iter_verify(iter);
2114
2115         return k;
2116 }
2117
2118 /**
2119  * bch2_btree_iter_peek_upto() - returns first key greater than or equal to
2120  * iterator's current position
2121  * @iter:       iterator to peek from
2122  * @end:        search limit: returns keys less than or equal to @end
2123  *
2124  * Returns:     key if found, or an error extractable with bkey_err().
2125  */
2126 struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos end)
2127 {
2128         struct btree_trans *trans = iter->trans;
2129         struct bpos search_key = btree_iter_search_key(iter);
2130         struct bkey_s_c k;
2131         struct bpos iter_pos;
2132         int ret;
2133
2134         EBUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) && bkey_eq(end, POS_MAX));
2135
2136         if (iter->update_path) {
2137                 bch2_path_put_nokeep(trans, iter->update_path,
2138                                      iter->flags & BTREE_ITER_INTENT);
2139                 iter->update_path = 0;
2140         }
2141
2142         bch2_btree_iter_verify_entry_exit(iter);
2143
2144         while (1) {
2145                 k = __bch2_btree_iter_peek(iter, search_key);
2146                 if (unlikely(!k.k))
2147                         goto end;
2148                 if (unlikely(bkey_err(k)))
2149                         goto out_no_locked;
2150
2151                 /*
2152                  * We need to check against @end before FILTER_SNAPSHOTS because
2153                  * if we get to a different inode that requested we might be
2154                  * seeing keys for a different snapshot tree that will all be
2155                  * filtered out.
2156                  *
2157                  * But we can't do the full check here, because bkey_start_pos()
2158                  * isn't monotonically increasing before FILTER_SNAPSHOTS, and
2159                  * that's what we check against in extents mode:
2160                  */
2161                 if (unlikely(!(iter->flags & BTREE_ITER_IS_EXTENTS)
2162                              ? bkey_gt(k.k->p, end)
2163                              : k.k->p.inode > end.inode))
2164                         goto end;
2165
2166                 if (iter->update_path &&
2167                     !bkey_eq(trans->paths[iter->update_path].pos, k.k->p)) {
2168                         bch2_path_put_nokeep(trans, iter->update_path,
2169                                              iter->flags & BTREE_ITER_INTENT);
2170                         iter->update_path = 0;
2171                 }
2172
2173                 if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
2174                     (iter->flags & BTREE_ITER_INTENT) &&
2175                     !(iter->flags & BTREE_ITER_IS_EXTENTS) &&
2176                     !iter->update_path) {
2177                         struct bpos pos = k.k->p;
2178
2179                         if (pos.snapshot < iter->snapshot) {
2180                                 search_key = bpos_successor(k.k->p);
2181                                 continue;
2182                         }
2183
2184                         pos.snapshot = iter->snapshot;
2185
2186                         /*
2187                          * advance, same as on exit for iter->path, but only up
2188                          * to snapshot
2189                          */
2190                         __btree_path_get(trans->paths + iter->path, iter->flags & BTREE_ITER_INTENT);
2191                         iter->update_path = iter->path;
2192
2193                         iter->update_path = bch2_btree_path_set_pos(trans,
2194                                                 iter->update_path, pos,
2195                                                 iter->flags & BTREE_ITER_INTENT,
2196                                                 _THIS_IP_);
2197                         ret = bch2_btree_path_traverse(trans, iter->update_path, iter->flags);
2198                         if (unlikely(ret)) {
2199                                 k = bkey_s_c_err(ret);
2200                                 goto out_no_locked;
2201                         }
2202                 }
2203
2204                 /*
2205                  * We can never have a key in a leaf node at POS_MAX, so
2206                  * we don't have to check these successor() calls:
2207                  */
2208                 if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
2209                     !bch2_snapshot_is_ancestor(trans->c,
2210                                                iter->snapshot,
2211                                                k.k->p.snapshot)) {
2212                         search_key = bpos_successor(k.k->p);
2213                         continue;
2214                 }
2215
2216                 if (bkey_whiteout(k.k) &&
2217                     !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
2218                         search_key = bkey_successor(iter, k.k->p);
2219                         continue;
2220                 }
2221
2222                 /*
2223                  * iter->pos should be mononotically increasing, and always be
2224                  * equal to the key we just returned - except extents can
2225                  * straddle iter->pos:
2226                  */
2227                 if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
2228                         iter_pos = k.k->p;
2229                 else
2230                         iter_pos = bkey_max(iter->pos, bkey_start_pos(k.k));
2231
2232                 if (unlikely(!(iter->flags & BTREE_ITER_IS_EXTENTS)
2233                              ? bkey_gt(iter_pos, end)
2234                              : bkey_ge(iter_pos, end)))
2235                         goto end;
2236
2237                 break;
2238         }
2239
2240         iter->pos = iter_pos;
2241
2242         iter->path = bch2_btree_path_set_pos(trans, iter->path, k.k->p,
2243                                 iter->flags & BTREE_ITER_INTENT,
2244                                 btree_iter_ip_allocated(iter));
2245
2246         btree_path_set_should_be_locked(btree_iter_path(trans, iter));
2247 out_no_locked:
2248         if (iter->update_path) {
2249                 ret = bch2_btree_path_relock(trans, trans->paths + iter->update_path, _THIS_IP_);
2250                 if (unlikely(ret))
2251                         k = bkey_s_c_err(ret);
2252                 else
2253                         btree_path_set_should_be_locked(trans->paths + iter->update_path);
2254         }
2255
2256         if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS))
2257                 iter->pos.snapshot = iter->snapshot;
2258
2259         ret = bch2_btree_iter_verify_ret(iter, k);
2260         if (unlikely(ret)) {
2261                 bch2_btree_iter_set_pos(iter, iter->pos);
2262                 k = bkey_s_c_err(ret);
2263         }
2264
2265         bch2_btree_iter_verify_entry_exit(iter);
2266
2267         return k;
2268 end:
2269         bch2_btree_iter_set_pos(iter, end);
2270         k = bkey_s_c_null;
2271         goto out_no_locked;
2272 }
2273
2274 /**
2275  * bch2_btree_iter_next() - returns first key greater than iterator's current
2276  * position
2277  * @iter:       iterator to peek from
2278  *
2279  * Returns:     key if found, or an error extractable with bkey_err().
2280  */
2281 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
2282 {
2283         if (!bch2_btree_iter_advance(iter))
2284                 return bkey_s_c_null;
2285
2286         return bch2_btree_iter_peek(iter);
2287 }
2288
2289 /**
2290  * bch2_btree_iter_peek_prev() - returns first key less than or equal to
2291  * iterator's current position
2292  * @iter:       iterator to peek from
2293  *
2294  * Returns:     key if found, or an error extractable with bkey_err().
2295  */
2296 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
2297 {
2298         struct btree_trans *trans = iter->trans;
2299         struct bpos search_key = iter->pos;
2300         struct bkey_s_c k;
2301         struct bkey saved_k;
2302         const struct bch_val *saved_v;
2303         btree_path_idx_t saved_path = 0;
2304         int ret;
2305
2306         EBUG_ON(btree_iter_path(trans, iter)->cached ||
2307                 btree_iter_path(trans, iter)->level);
2308
2309         if (iter->flags & BTREE_ITER_WITH_JOURNAL)
2310                 return bkey_s_c_err(-BCH_ERR_btree_iter_with_journal_not_supported);
2311
2312         bch2_btree_iter_verify(iter);
2313         bch2_btree_iter_verify_entry_exit(iter);
2314
2315         if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2316                 search_key.snapshot = U32_MAX;
2317
2318         while (1) {
2319                 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2320                                                 iter->flags & BTREE_ITER_INTENT,
2321                                                 btree_iter_ip_allocated(iter));
2322
2323                 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2324                 if (unlikely(ret)) {
2325                         /* ensure that iter->k is consistent with iter->pos: */
2326                         bch2_btree_iter_set_pos(iter, iter->pos);
2327                         k = bkey_s_c_err(ret);
2328                         goto out_no_locked;
2329                 }
2330
2331                 struct btree_path *path = btree_iter_path(trans, iter);
2332
2333                 k = btree_path_level_peek(trans, path, &path->l[0], &iter->k);
2334                 if (!k.k ||
2335                     ((iter->flags & BTREE_ITER_IS_EXTENTS)
2336                      ? bpos_ge(bkey_start_pos(k.k), search_key)
2337                      : bpos_gt(k.k->p, search_key)))
2338                         k = btree_path_level_prev(trans, path, &path->l[0], &iter->k);
2339
2340                 if (unlikely((iter->flags & BTREE_ITER_WITH_UPDATES) &&
2341                              trans->nr_updates))
2342                         bch2_btree_trans_peek_prev_updates(trans, iter, &k);
2343
2344                 if (likely(k.k)) {
2345                         if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) {
2346                                 if (k.k->p.snapshot == iter->snapshot)
2347                                         goto got_key;
2348
2349                                 /*
2350                                  * If we have a saved candidate, and we're no
2351                                  * longer at the same _key_ (not pos), return
2352                                  * that candidate
2353                                  */
2354                                 if (saved_path && !bkey_eq(k.k->p, saved_k.p)) {
2355                                         bch2_path_put_nokeep(trans, iter->path,
2356                                                       iter->flags & BTREE_ITER_INTENT);
2357                                         iter->path = saved_path;
2358                                         saved_path = 0;
2359                                         iter->k = saved_k;
2360                                         k.v     = saved_v;
2361                                         goto got_key;
2362                                 }
2363
2364                                 if (bch2_snapshot_is_ancestor(trans->c,
2365                                                               iter->snapshot,
2366                                                               k.k->p.snapshot)) {
2367                                         if (saved_path)
2368                                                 bch2_path_put_nokeep(trans, saved_path,
2369                                                       iter->flags & BTREE_ITER_INTENT);
2370                                         saved_path = btree_path_clone(trans, iter->path,
2371                                                                 iter->flags & BTREE_ITER_INTENT);
2372                                         path = btree_iter_path(trans, iter);
2373                                         saved_k = *k.k;
2374                                         saved_v = k.v;
2375                                 }
2376
2377                                 search_key = bpos_predecessor(k.k->p);
2378                                 continue;
2379                         }
2380 got_key:
2381                         if (bkey_whiteout(k.k) &&
2382                             !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
2383                                 search_key = bkey_predecessor(iter, k.k->p);
2384                                 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2385                                         search_key.snapshot = U32_MAX;
2386                                 continue;
2387                         }
2388
2389                         btree_path_set_should_be_locked(path);
2390                         break;
2391                 } else if (likely(!bpos_eq(path->l[0].b->data->min_key, POS_MIN))) {
2392                         /* Advance to previous leaf node: */
2393                         search_key = bpos_predecessor(path->l[0].b->data->min_key);
2394                 } else {
2395                         /* Start of btree: */
2396                         bch2_btree_iter_set_pos(iter, POS_MIN);
2397                         k = bkey_s_c_null;
2398                         goto out_no_locked;
2399                 }
2400         }
2401
2402         EBUG_ON(bkey_gt(bkey_start_pos(k.k), iter->pos));
2403
2404         /* Extents can straddle iter->pos: */
2405         if (bkey_lt(k.k->p, iter->pos))
2406                 iter->pos = k.k->p;
2407
2408         if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2409                 iter->pos.snapshot = iter->snapshot;
2410 out_no_locked:
2411         if (saved_path)
2412                 bch2_path_put_nokeep(trans, saved_path, iter->flags & BTREE_ITER_INTENT);
2413
2414         bch2_btree_iter_verify_entry_exit(iter);
2415         bch2_btree_iter_verify(iter);
2416
2417         return k;
2418 }
2419
2420 /**
2421  * bch2_btree_iter_prev() - returns first key less than iterator's current
2422  * position
2423  * @iter:       iterator to peek from
2424  *
2425  * Returns:     key if found, or an error extractable with bkey_err().
2426  */
2427 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
2428 {
2429         if (!bch2_btree_iter_rewind(iter))
2430                 return bkey_s_c_null;
2431
2432         return bch2_btree_iter_peek_prev(iter);
2433 }
2434
2435 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
2436 {
2437         struct btree_trans *trans = iter->trans;
2438         struct bpos search_key;
2439         struct bkey_s_c k;
2440         int ret;
2441
2442         bch2_btree_iter_verify(iter);
2443         bch2_btree_iter_verify_entry_exit(iter);
2444         EBUG_ON(btree_iter_path(trans, iter)->level && (iter->flags & BTREE_ITER_WITH_KEY_CACHE));
2445
2446         /* extents can't span inode numbers: */
2447         if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
2448             unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
2449                 if (iter->pos.inode == KEY_INODE_MAX)
2450                         return bkey_s_c_null;
2451
2452                 bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
2453         }
2454
2455         search_key = btree_iter_search_key(iter);
2456         iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2457                                         iter->flags & BTREE_ITER_INTENT,
2458                                         btree_iter_ip_allocated(iter));
2459
2460         ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2461         if (unlikely(ret)) {
2462                 k = bkey_s_c_err(ret);
2463                 goto out_no_locked;
2464         }
2465
2466         if ((iter->flags & BTREE_ITER_CACHED) ||
2467             !(iter->flags & (BTREE_ITER_IS_EXTENTS|BTREE_ITER_FILTER_SNAPSHOTS))) {
2468                 k = bkey_s_c_null;
2469
2470                 if (unlikely((iter->flags & BTREE_ITER_WITH_UPDATES) &&
2471                              trans->nr_updates)) {
2472                         bch2_btree_trans_peek_slot_updates(trans, iter, &k);
2473                         if (k.k)
2474                                 goto out;
2475                 }
2476
2477                 if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL) &&
2478                     (k = btree_trans_peek_slot_journal(trans, iter)).k)
2479                         goto out;
2480
2481                 if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
2482                     (k = btree_trans_peek_key_cache(iter, iter->pos)).k) {
2483                         if (!bkey_err(k))
2484                                 iter->k = *k.k;
2485                         /* We're not returning a key from iter->path: */
2486                         goto out_no_locked;
2487                 }
2488
2489                 k = bch2_btree_path_peek_slot(trans->paths + iter->path, &iter->k);
2490                 if (unlikely(!k.k))
2491                         goto out_no_locked;
2492         } else {
2493                 struct bpos next;
2494                 struct bpos end = iter->pos;
2495
2496                 if (iter->flags & BTREE_ITER_IS_EXTENTS)
2497                         end.offset = U64_MAX;
2498
2499                 EBUG_ON(btree_iter_path(trans, iter)->level);
2500
2501                 if (iter->flags & BTREE_ITER_INTENT) {
2502                         struct btree_iter iter2;
2503
2504                         bch2_trans_copy_iter(&iter2, iter);
2505                         k = bch2_btree_iter_peek_upto(&iter2, end);
2506
2507                         if (k.k && !bkey_err(k)) {
2508                                 swap(iter->key_cache_path, iter2.key_cache_path);
2509                                 iter->k = iter2.k;
2510                                 k.k = &iter->k;
2511                         }
2512                         bch2_trans_iter_exit(trans, &iter2);
2513                 } else {
2514                         struct bpos pos = iter->pos;
2515
2516                         k = bch2_btree_iter_peek_upto(iter, end);
2517                         if (unlikely(bkey_err(k)))
2518                                 bch2_btree_iter_set_pos(iter, pos);
2519                         else
2520                                 iter->pos = pos;
2521                 }
2522
2523                 if (unlikely(bkey_err(k)))
2524                         goto out_no_locked;
2525
2526                 next = k.k ? bkey_start_pos(k.k) : POS_MAX;
2527
2528                 if (bkey_lt(iter->pos, next)) {
2529                         bkey_init(&iter->k);
2530                         iter->k.p = iter->pos;
2531
2532                         if (iter->flags & BTREE_ITER_IS_EXTENTS) {
2533                                 bch2_key_resize(&iter->k,
2534                                                 min_t(u64, KEY_SIZE_MAX,
2535                                                       (next.inode == iter->pos.inode
2536                                                        ? next.offset
2537                                                        : KEY_OFFSET_MAX) -
2538                                                       iter->pos.offset));
2539                                 EBUG_ON(!iter->k.size);
2540                         }
2541
2542                         k = (struct bkey_s_c) { &iter->k, NULL };
2543                 }
2544         }
2545 out:
2546         btree_path_set_should_be_locked(btree_iter_path(trans, iter));
2547 out_no_locked:
2548         bch2_btree_iter_verify_entry_exit(iter);
2549         bch2_btree_iter_verify(iter);
2550         ret = bch2_btree_iter_verify_ret(iter, k);
2551         if (unlikely(ret))
2552                 return bkey_s_c_err(ret);
2553
2554         return k;
2555 }
2556
2557 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
2558 {
2559         if (!bch2_btree_iter_advance(iter))
2560                 return bkey_s_c_null;
2561
2562         return bch2_btree_iter_peek_slot(iter);
2563 }
2564
2565 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
2566 {
2567         if (!bch2_btree_iter_rewind(iter))
2568                 return bkey_s_c_null;
2569
2570         return bch2_btree_iter_peek_slot(iter);
2571 }
2572
2573 struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *iter)
2574 {
2575         struct bkey_s_c k;
2576
2577         while (btree_trans_too_many_iters(iter->trans) ||
2578                (k = bch2_btree_iter_peek_type(iter, iter->flags),
2579                 bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
2580                 bch2_trans_begin(iter->trans);
2581
2582         return k;
2583 }
2584
2585 /* new transactional stuff: */
2586
2587 #ifdef CONFIG_BCACHEFS_DEBUG
2588 static void btree_trans_verify_sorted_refs(struct btree_trans *trans)
2589 {
2590         struct btree_path *path;
2591         unsigned i;
2592
2593         BUG_ON(trans->nr_sorted != bitmap_weight(trans->paths_allocated, trans->nr_paths) - 1);
2594
2595         trans_for_each_path(trans, path, i) {
2596                 BUG_ON(path->sorted_idx >= trans->nr_sorted);
2597                 BUG_ON(trans->sorted[path->sorted_idx] != i);
2598         }
2599
2600         for (i = 0; i < trans->nr_sorted; i++) {
2601                 unsigned idx = trans->sorted[i];
2602
2603                 BUG_ON(!test_bit(idx, trans->paths_allocated));
2604                 BUG_ON(trans->paths[idx].sorted_idx != i);
2605         }
2606 }
2607
2608 static void btree_trans_verify_sorted(struct btree_trans *trans)
2609 {
2610         struct btree_path *path, *prev = NULL;
2611         struct trans_for_each_path_inorder_iter iter;
2612
2613         if (!bch2_debug_check_iterators)
2614                 return;
2615
2616         trans_for_each_path_inorder(trans, path, iter) {
2617                 if (prev && btree_path_cmp(prev, path) > 0) {
2618                         __bch2_dump_trans_paths_updates(trans, true);
2619                         panic("trans paths out of order!\n");
2620                 }
2621                 prev = path;
2622         }
2623 }
2624 #else
2625 static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans) {}
2626 static inline void btree_trans_verify_sorted(struct btree_trans *trans) {}
2627 #endif
2628
2629 void __bch2_btree_trans_sort_paths(struct btree_trans *trans)
2630 {
2631         int i, l = 0, r = trans->nr_sorted, inc = 1;
2632         bool swapped;
2633
2634         btree_trans_verify_sorted_refs(trans);
2635
2636         if (trans->paths_sorted)
2637                 goto out;
2638
2639         /*
2640          * Cocktail shaker sort: this is efficient because iterators will be
2641          * mostly sorted.
2642          */
2643         do {
2644                 swapped = false;
2645
2646                 for (i = inc > 0 ? l : r - 2;
2647                      i + 1 < r && i >= l;
2648                      i += inc) {
2649                         if (btree_path_cmp(trans->paths + trans->sorted[i],
2650                                            trans->paths + trans->sorted[i + 1]) > 0) {
2651                                 swap(trans->sorted[i], trans->sorted[i + 1]);
2652                                 trans->paths[trans->sorted[i]].sorted_idx = i;
2653                                 trans->paths[trans->sorted[i + 1]].sorted_idx = i + 1;
2654                                 swapped = true;
2655                         }
2656                 }
2657
2658                 if (inc > 0)
2659                         --r;
2660                 else
2661                         l++;
2662                 inc = -inc;
2663         } while (swapped);
2664
2665         trans->paths_sorted = true;
2666 out:
2667         btree_trans_verify_sorted(trans);
2668 }
2669
2670 static inline void btree_path_list_remove(struct btree_trans *trans,
2671                                           struct btree_path *path)
2672 {
2673         EBUG_ON(path->sorted_idx >= trans->nr_sorted);
2674 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2675         trans->nr_sorted--;
2676         memmove_u64s_down_small(trans->sorted + path->sorted_idx,
2677                                 trans->sorted + path->sorted_idx + 1,
2678                                 DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx,
2679                                              sizeof(u64) / sizeof(btree_path_idx_t)));
2680 #else
2681         array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx);
2682 #endif
2683         for (unsigned i = path->sorted_idx; i < trans->nr_sorted; i++)
2684                 trans->paths[trans->sorted[i]].sorted_idx = i;
2685 }
2686
2687 static inline void btree_path_list_add(struct btree_trans *trans,
2688                                        btree_path_idx_t pos,
2689                                        btree_path_idx_t path_idx)
2690 {
2691         struct btree_path *path = trans->paths + path_idx;
2692
2693         path->sorted_idx = pos ? trans->paths[pos].sorted_idx + 1 : trans->nr_sorted;
2694
2695 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2696         memmove_u64s_up_small(trans->sorted + path->sorted_idx + 1,
2697                               trans->sorted + path->sorted_idx,
2698                               DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx,
2699                                            sizeof(u64) / sizeof(btree_path_idx_t)));
2700         trans->nr_sorted++;
2701         trans->sorted[path->sorted_idx] = path_idx;
2702 #else
2703         array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path_idx);
2704 #endif
2705
2706         for (unsigned i = path->sorted_idx; i < trans->nr_sorted; i++)
2707                 trans->paths[trans->sorted[i]].sorted_idx = i;
2708
2709         btree_trans_verify_sorted_refs(trans);
2710 }
2711
2712 void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
2713 {
2714         if (iter->update_path)
2715                 bch2_path_put_nokeep(trans, iter->update_path,
2716                               iter->flags & BTREE_ITER_INTENT);
2717         if (iter->path)
2718                 bch2_path_put(trans, iter->path,
2719                               iter->flags & BTREE_ITER_INTENT);
2720         if (iter->key_cache_path)
2721                 bch2_path_put(trans, iter->key_cache_path,
2722                               iter->flags & BTREE_ITER_INTENT);
2723         iter->path              = 0;
2724         iter->update_path       = 0;
2725         iter->key_cache_path    = 0;
2726         iter->trans             = NULL;
2727 }
2728
2729 void bch2_trans_iter_init_outlined(struct btree_trans *trans,
2730                           struct btree_iter *iter,
2731                           enum btree_id btree_id, struct bpos pos,
2732                           unsigned flags)
2733 {
2734         bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
2735                                bch2_btree_iter_flags(trans, btree_id, flags),
2736                                _RET_IP_);
2737 }
2738
2739 void bch2_trans_node_iter_init(struct btree_trans *trans,
2740                                struct btree_iter *iter,
2741                                enum btree_id btree_id,
2742                                struct bpos pos,
2743                                unsigned locks_want,
2744                                unsigned depth,
2745                                unsigned flags)
2746 {
2747         flags |= BTREE_ITER_NOT_EXTENTS;
2748         flags |= __BTREE_ITER_ALL_SNAPSHOTS;
2749         flags |= BTREE_ITER_ALL_SNAPSHOTS;
2750
2751         bch2_trans_iter_init_common(trans, iter, btree_id, pos, locks_want, depth,
2752                                __bch2_btree_iter_flags(trans, btree_id, flags),
2753                                _RET_IP_);
2754
2755         iter->min_depth = depth;
2756
2757         struct btree_path *path = btree_iter_path(trans, iter);
2758         BUG_ON(path->locks_want  < min(locks_want, BTREE_MAX_DEPTH));
2759         BUG_ON(path->level      != depth);
2760         BUG_ON(iter->min_depth  != depth);
2761 }
2762
2763 void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
2764 {
2765         struct btree_trans *trans = src->trans;
2766
2767         *dst = *src;
2768 #ifdef TRACK_PATH_ALLOCATED
2769         dst->ip_allocated = _RET_IP_;
2770 #endif
2771         if (src->path)
2772                 __btree_path_get(trans->paths + src->path, src->flags & BTREE_ITER_INTENT);
2773         if (src->update_path)
2774                 __btree_path_get(trans->paths + src->update_path, src->flags & BTREE_ITER_INTENT);
2775         dst->key_cache_path = 0;
2776 }
2777
2778 void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
2779 {
2780         struct bch_fs *c = trans->c;
2781         unsigned new_top = trans->mem_top + size;
2782         unsigned old_bytes = trans->mem_bytes;
2783         unsigned new_bytes = roundup_pow_of_two(new_top);
2784         int ret;
2785         void *new_mem;
2786         void *p;
2787
2788         WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
2789
2790         struct btree_transaction_stats *s = btree_trans_stats(trans);
2791         s->max_mem = max(s->max_mem, new_bytes);
2792
2793         new_mem = krealloc(trans->mem, new_bytes, GFP_NOWAIT|__GFP_NOWARN);
2794         if (unlikely(!new_mem)) {
2795                 bch2_trans_unlock(trans);
2796
2797                 new_mem = krealloc(trans->mem, new_bytes, GFP_KERNEL);
2798                 if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
2799                         new_mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
2800                         new_bytes = BTREE_TRANS_MEM_MAX;
2801                         kfree(trans->mem);
2802                 }
2803
2804                 if (!new_mem)
2805                         return ERR_PTR(-BCH_ERR_ENOMEM_trans_kmalloc);
2806
2807                 trans->mem = new_mem;
2808                 trans->mem_bytes = new_bytes;
2809
2810                 ret = bch2_trans_relock(trans);
2811                 if (ret)
2812                         return ERR_PTR(ret);
2813         }
2814
2815         trans->mem = new_mem;
2816         trans->mem_bytes = new_bytes;
2817
2818         if (old_bytes) {
2819                 trace_and_count(c, trans_restart_mem_realloced, trans, _RET_IP_, new_bytes);
2820                 return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_mem_realloced));
2821         }
2822
2823         p = trans->mem + trans->mem_top;
2824         trans->mem_top += size;
2825         memset(p, 0, size);
2826         return p;
2827 }
2828
2829 static inline void check_srcu_held_too_long(struct btree_trans *trans)
2830 {
2831         WARN(trans->srcu_held && time_after(jiffies, trans->srcu_lock_time + HZ * 10),
2832              "btree trans held srcu lock (delaying memory reclaim) for %lu seconds",
2833              (jiffies - trans->srcu_lock_time) / HZ);
2834 }
2835
2836 void bch2_trans_srcu_unlock(struct btree_trans *trans)
2837 {
2838         if (trans->srcu_held) {
2839                 struct bch_fs *c = trans->c;
2840                 struct btree_path *path;
2841                 unsigned i;
2842
2843                 trans_for_each_path(trans, path, i)
2844                         if (path->cached && !btree_node_locked(path, 0))
2845                                 path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_srcu_reset);
2846
2847                 check_srcu_held_too_long(trans);
2848                 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
2849                 trans->srcu_held = false;
2850         }
2851 }
2852
2853 static void bch2_trans_srcu_lock(struct btree_trans *trans)
2854 {
2855         if (!trans->srcu_held) {
2856                 trans->srcu_idx = srcu_read_lock(&trans->c->btree_trans_barrier);
2857                 trans->srcu_lock_time   = jiffies;
2858                 trans->srcu_held = true;
2859         }
2860 }
2861
2862 /**
2863  * bch2_trans_begin() - reset a transaction after a interrupted attempt
2864  * @trans: transaction to reset
2865  *
2866  * Returns:     current restart counter, to be used with trans_was_restarted()
2867  *
2868  * While iterating over nodes or updating nodes a attempt to lock a btree node
2869  * may return BCH_ERR_transaction_restart when the trylock fails. When this
2870  * occurs bch2_trans_begin() should be called and the transaction retried.
2871  */
2872 u32 bch2_trans_begin(struct btree_trans *trans)
2873 {
2874         struct btree_path *path;
2875         unsigned i;
2876         u64 now;
2877
2878         bch2_trans_reset_updates(trans);
2879
2880         trans->restart_count++;
2881         trans->mem_top                  = 0;
2882         trans->journal_entries          = NULL;
2883
2884         trans_for_each_path(trans, path, i) {
2885                 path->should_be_locked = false;
2886
2887                 /*
2888                  * If the transaction wasn't restarted, we're presuming to be
2889                  * doing something new: dont keep iterators excpt the ones that
2890                  * are in use - except for the subvolumes btree:
2891                  */
2892                 if (!trans->restarted && path->btree_id != BTREE_ID_subvolumes)
2893                         path->preserve = false;
2894
2895                 /*
2896                  * XXX: we probably shouldn't be doing this if the transaction
2897                  * was restarted, but currently we still overflow transaction
2898                  * iterators if we do that
2899                  */
2900                 if (!path->ref && !path->preserve)
2901                         __bch2_path_free(trans, i);
2902                 else
2903                         path->preserve = false;
2904         }
2905
2906         now = local_clock();
2907
2908         if (!IS_ENABLED(CONFIG_BCACHEFS_NO_LATENCY_ACCT) &&
2909             time_after64(now, trans->last_begin_time + 10))
2910                 __bch2_time_stats_update(&btree_trans_stats(trans)->duration,
2911                                          trans->last_begin_time, now);
2912
2913         if (!trans->restarted &&
2914             (need_resched() ||
2915              time_after64(now, trans->last_begin_time + BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS))) {
2916                 drop_locks_do(trans, (cond_resched(), 0));
2917                 now = local_clock();
2918         }
2919         trans->last_begin_time = now;
2920
2921         if (unlikely(trans->srcu_held &&
2922                      time_after(jiffies, trans->srcu_lock_time + msecs_to_jiffies(10))))
2923                 bch2_trans_srcu_unlock(trans);
2924
2925         trans->last_begin_ip = _RET_IP_;
2926         if (trans->restarted) {
2927                 bch2_btree_path_traverse_all(trans);
2928                 trans->notrace_relock_fail = false;
2929         }
2930
2931         return trans->restart_count;
2932 }
2933
2934 const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR] = { "(unknown)" };
2935
2936 unsigned bch2_trans_get_fn_idx(const char *fn)
2937 {
2938         for (unsigned i = 0; i < ARRAY_SIZE(bch2_btree_transaction_fns); i++)
2939                 if (!bch2_btree_transaction_fns[i] ||
2940                     bch2_btree_transaction_fns[i] == fn) {
2941                         bch2_btree_transaction_fns[i] = fn;
2942                         return i;
2943                 }
2944
2945         pr_warn_once("BCH_TRANSACTIONS_NR not big enough!");
2946         return 0;
2947 }
2948
2949 struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx)
2950         __acquires(&c->btree_trans_barrier)
2951 {
2952         struct btree_trans *trans;
2953
2954         if (IS_ENABLED(__KERNEL__)) {
2955                 trans = this_cpu_xchg(c->btree_trans_bufs->trans, NULL);
2956                 if (trans) {
2957                         memset(trans, 0, offsetof(struct btree_trans, list));
2958                         goto got_trans;
2959                 }
2960         }
2961
2962         trans = mempool_alloc(&c->btree_trans_pool, GFP_NOFS);
2963         memset(trans, 0, sizeof(*trans));
2964         closure_init_stack(&trans->ref);
2965
2966         seqmutex_lock(&c->btree_trans_lock);
2967         if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
2968                 struct btree_trans *pos;
2969                 pid_t pid = current->pid;
2970
2971                 trans->locking_wait.task = current;
2972
2973                 list_for_each_entry(pos, &c->btree_trans_list, list) {
2974                         struct task_struct *pos_task = READ_ONCE(pos->locking_wait.task);
2975                         /*
2976                          * We'd much prefer to be stricter here and completely
2977                          * disallow multiple btree_trans in the same thread -
2978                          * but the data move path calls bch2_write when we
2979                          * already have a btree_trans initialized.
2980                          */
2981                         BUG_ON(pos_task &&
2982                                pid == pos_task->pid &&
2983                                bch2_trans_locked(pos));
2984
2985                         if (pos_task && pid < pos_task->pid) {
2986                                 list_add_tail(&trans->list, &pos->list);
2987                                 goto list_add_done;
2988                         }
2989                 }
2990         }
2991         list_add_tail(&trans->list, &c->btree_trans_list);
2992 list_add_done:
2993         seqmutex_unlock(&c->btree_trans_lock);
2994 got_trans:
2995         trans->c                = c;
2996         trans->last_begin_time  = local_clock();
2997         trans->fn_idx           = fn_idx;
2998         trans->locking_wait.task = current;
2999         trans->journal_replay_not_finished =
3000                 unlikely(!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags)) &&
3001                 atomic_inc_not_zero(&c->journal_keys.ref);
3002         trans->nr_paths         = ARRAY_SIZE(trans->_paths);
3003         trans->paths_allocated  = trans->_paths_allocated;
3004         trans->sorted           = trans->_sorted;
3005         trans->paths            = trans->_paths;
3006         trans->updates          = trans->_updates;
3007
3008         *trans_paths_nr(trans->paths) = BTREE_ITER_INITIAL;
3009
3010         trans->paths_allocated[0] = 1;
3011
3012         if (fn_idx < BCH_TRANSACTIONS_NR) {
3013                 trans->fn = bch2_btree_transaction_fns[fn_idx];
3014
3015                 struct btree_transaction_stats *s = &c->btree_transaction_stats[fn_idx];
3016
3017                 if (s->max_mem) {
3018                         unsigned expected_mem_bytes = roundup_pow_of_two(s->max_mem);
3019
3020                         trans->mem = kmalloc(expected_mem_bytes, GFP_KERNEL);
3021                         if (likely(trans->mem))
3022                                 trans->mem_bytes = expected_mem_bytes;
3023                 }
3024
3025                 trans->nr_paths_max = s->nr_max_paths;
3026                 trans->journal_entries_size = s->journal_entries_size;
3027         }
3028
3029         trans->srcu_idx         = srcu_read_lock(&c->btree_trans_barrier);
3030         trans->srcu_lock_time   = jiffies;
3031         trans->srcu_held        = true;
3032         return trans;
3033 }
3034
3035 static void check_btree_paths_leaked(struct btree_trans *trans)
3036 {
3037 #ifdef CONFIG_BCACHEFS_DEBUG
3038         struct bch_fs *c = trans->c;
3039         struct btree_path *path;
3040         unsigned i;
3041
3042         trans_for_each_path(trans, path, i)
3043                 if (path->ref)
3044                         goto leaked;
3045         return;
3046 leaked:
3047         bch_err(c, "btree paths leaked from %s!", trans->fn);
3048         trans_for_each_path(trans, path, i)
3049                 if (path->ref)
3050                         printk(KERN_ERR "  btree %s %pS\n",
3051                                bch2_btree_id_str(path->btree_id),
3052                                (void *) path->ip_allocated);
3053         /* Be noisy about this: */
3054         bch2_fatal_error(c);
3055 #endif
3056 }
3057
3058 void bch2_trans_put(struct btree_trans *trans)
3059         __releases(&c->btree_trans_barrier)
3060 {
3061         struct bch_fs *c = trans->c;
3062
3063         bch2_trans_unlock(trans);
3064
3065         trans_for_each_update(trans, i)
3066                 __btree_path_put(trans->paths + i->path, true);
3067         trans->nr_updates       = 0;
3068         trans->locking_wait.task = NULL;
3069
3070         check_btree_paths_leaked(trans);
3071
3072         if (trans->srcu_held) {
3073                 check_srcu_held_too_long(trans);
3074                 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
3075         }
3076
3077         if (trans->fs_usage_deltas) {
3078                 if (trans->fs_usage_deltas->size + sizeof(trans->fs_usage_deltas) ==
3079                     REPLICAS_DELTA_LIST_MAX)
3080                         mempool_free(trans->fs_usage_deltas,
3081                                      &c->replicas_delta_pool);
3082                 else
3083                         kfree(trans->fs_usage_deltas);
3084         }
3085
3086         if (unlikely(trans->journal_replay_not_finished))
3087                 bch2_journal_keys_put(c);
3088
3089         unsigned long *paths_allocated = trans->paths_allocated;
3090         trans->paths_allocated  = NULL;
3091         trans->paths            = NULL;
3092
3093         if (paths_allocated != trans->_paths_allocated)
3094                 kvfree_rcu_mightsleep(paths_allocated);
3095
3096         if (trans->mem_bytes == BTREE_TRANS_MEM_MAX)
3097                 mempool_free(trans->mem, &c->btree_trans_mem_pool);
3098         else
3099                 kfree(trans->mem);
3100
3101         /* Userspace doesn't have a real percpu implementation: */
3102         if (IS_ENABLED(__KERNEL__))
3103                 trans = this_cpu_xchg(c->btree_trans_bufs->trans, trans);
3104
3105         if (trans) {
3106                 closure_sync(&trans->ref);
3107
3108                 seqmutex_lock(&c->btree_trans_lock);
3109                 list_del(&trans->list);
3110                 seqmutex_unlock(&c->btree_trans_lock);
3111
3112                 mempool_free(trans, &c->btree_trans_pool);
3113         }
3114 }
3115
3116 static void __maybe_unused
3117 bch2_btree_bkey_cached_common_to_text(struct printbuf *out,
3118                                       struct btree_bkey_cached_common *b)
3119 {
3120         struct six_lock_count c = six_lock_counts(&b->lock);
3121         struct task_struct *owner;
3122         pid_t pid;
3123
3124         rcu_read_lock();
3125         owner = READ_ONCE(b->lock.owner);
3126         pid = owner ? owner->pid : 0;
3127         rcu_read_unlock();
3128
3129         prt_tab(out);
3130         prt_printf(out, "%px %c l=%u %s:", b, b->cached ? 'c' : 'b',
3131                    b->level, bch2_btree_id_str(b->btree_id));
3132         bch2_bpos_to_text(out, btree_node_pos(b));
3133
3134         prt_tab(out);
3135         prt_printf(out, " locks %u:%u:%u held by pid %u",
3136                    c.n[0], c.n[1], c.n[2], pid);
3137 }
3138
3139 void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
3140 {
3141         struct btree_bkey_cached_common *b;
3142         static char lock_types[] = { 'r', 'i', 'w' };
3143         struct task_struct *task = READ_ONCE(trans->locking_wait.task);
3144         unsigned l, idx;
3145
3146         /* before rcu_read_lock(): */
3147         bch2_printbuf_make_room(out, 4096);
3148
3149         if (!out->nr_tabstops) {
3150                 printbuf_tabstop_push(out, 16);
3151                 printbuf_tabstop_push(out, 32);
3152         }
3153
3154         prt_printf(out, "%i %s\n", task ? task->pid : 0, trans->fn);
3155
3156         /* trans->paths is rcu protected vs. freeing */
3157         rcu_read_lock();
3158         out->atomic++;
3159
3160         struct btree_path *paths = rcu_dereference(trans->paths);
3161         if (!paths)
3162                 goto out;
3163
3164         unsigned long *paths_allocated = trans_paths_allocated(paths);
3165
3166         trans_for_each_path_idx_from(paths_allocated, *trans_paths_nr(paths), idx, 1) {
3167                 struct btree_path *path = paths + idx;
3168                 if (!path->nodes_locked)
3169                         continue;
3170
3171                 prt_printf(out, "  path %u %c l=%u %s:",
3172                        idx,
3173                        path->cached ? 'c' : 'b',
3174                        path->level,
3175                        bch2_btree_id_str(path->btree_id));
3176                 bch2_bpos_to_text(out, path->pos);
3177                 prt_newline(out);
3178
3179                 for (l = 0; l < BTREE_MAX_DEPTH; l++) {
3180                         if (btree_node_locked(path, l) &&
3181                             !IS_ERR_OR_NULL(b = (void *) READ_ONCE(path->l[l].b))) {
3182                                 prt_printf(out, "    %c l=%u ",
3183                                            lock_types[btree_node_locked_type(path, l)], l);
3184                                 bch2_btree_bkey_cached_common_to_text(out, b);
3185                                 prt_newline(out);
3186                         }
3187                 }
3188         }
3189
3190         b = READ_ONCE(trans->locking);
3191         if (b) {
3192                 prt_printf(out, "  blocked for %lluus on",
3193                            div_u64(local_clock() - trans->locking_wait.start_time,
3194                                    1000));
3195                 prt_newline(out);
3196                 prt_printf(out, "    %c", lock_types[trans->locking_wait.lock_want]);
3197                 bch2_btree_bkey_cached_common_to_text(out, b);
3198                 prt_newline(out);
3199         }
3200 out:
3201         --out->atomic;
3202         rcu_read_unlock();
3203 }
3204
3205 void bch2_fs_btree_iter_exit(struct bch_fs *c)
3206 {
3207         struct btree_transaction_stats *s;
3208         struct btree_trans *trans;
3209         int cpu;
3210
3211         if (c->btree_trans_bufs)
3212                 for_each_possible_cpu(cpu) {
3213                         struct btree_trans *trans =
3214                                 per_cpu_ptr(c->btree_trans_bufs, cpu)->trans;
3215
3216                         if (trans) {
3217                                 closure_sync(&trans->ref);
3218
3219                                 seqmutex_lock(&c->btree_trans_lock);
3220                                 list_del(&trans->list);
3221                                 seqmutex_unlock(&c->btree_trans_lock);
3222                         }
3223                         kfree(trans);
3224                 }
3225         free_percpu(c->btree_trans_bufs);
3226
3227         trans = list_first_entry_or_null(&c->btree_trans_list, struct btree_trans, list);
3228         if (trans)
3229                 panic("%s leaked btree_trans\n", trans->fn);
3230
3231         for (s = c->btree_transaction_stats;
3232              s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
3233              s++) {
3234                 kfree(s->max_paths_text);
3235                 bch2_time_stats_exit(&s->lock_hold_times);
3236         }
3237
3238         if (c->btree_trans_barrier_initialized)
3239                 cleanup_srcu_struct(&c->btree_trans_barrier);
3240         mempool_exit(&c->btree_trans_mem_pool);
3241         mempool_exit(&c->btree_trans_pool);
3242 }
3243
3244 void bch2_fs_btree_iter_init_early(struct bch_fs *c)
3245 {
3246         struct btree_transaction_stats *s;
3247
3248         for (s = c->btree_transaction_stats;
3249              s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
3250              s++) {
3251                 bch2_time_stats_init(&s->duration);
3252                 bch2_time_stats_init(&s->lock_hold_times);
3253                 mutex_init(&s->lock);
3254         }
3255
3256         INIT_LIST_HEAD(&c->btree_trans_list);
3257         seqmutex_init(&c->btree_trans_lock);
3258 }
3259
3260 int bch2_fs_btree_iter_init(struct bch_fs *c)
3261 {
3262         int ret;
3263
3264         c->btree_trans_bufs = alloc_percpu(struct btree_trans_buf);
3265         if (!c->btree_trans_bufs)
3266                 return -ENOMEM;
3267
3268         ret   = mempool_init_kmalloc_pool(&c->btree_trans_pool, 1,
3269                                           sizeof(struct btree_trans)) ?:
3270                 mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
3271                                           BTREE_TRANS_MEM_MAX) ?:
3272                 init_srcu_struct(&c->btree_trans_barrier);
3273         if (!ret)
3274                 c->btree_trans_barrier_initialized = true;
3275         return ret;
3276 }
This page took 0.230784 seconds and 4 git commands to generate.