]> Git Repo - linux.git/blob - fs/bcachefs/btree_iter.c
Linux 6.14-rc3
[linux.git] / fs / bcachefs / btree_iter.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "bkey_methods.h"
5 #include "bkey_buf.h"
6 #include "btree_cache.h"
7 #include "btree_iter.h"
8 #include "btree_journal_iter.h"
9 #include "btree_key_cache.h"
10 #include "btree_locking.h"
11 #include "btree_update.h"
12 #include "debug.h"
13 #include "error.h"
14 #include "extents.h"
15 #include "journal.h"
16 #include "journal_io.h"
17 #include "replicas.h"
18 #include "snapshot.h"
19 #include "trace.h"
20
21 #include <linux/random.h>
22 #include <linux/prefetch.h>
23
24 static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *);
25 static inline void btree_path_list_add(struct btree_trans *,
26                         btree_path_idx_t, btree_path_idx_t);
27
28 static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter)
29 {
30 #ifdef TRACK_PATH_ALLOCATED
31         return iter->ip_allocated;
32 #else
33         return 0;
34 #endif
35 }
36
37 static btree_path_idx_t btree_path_alloc(struct btree_trans *, btree_path_idx_t);
38 static void bch2_trans_srcu_lock(struct btree_trans *);
39
40 static inline int __btree_path_cmp(const struct btree_path *l,
41                                    enum btree_id        r_btree_id,
42                                    bool                 r_cached,
43                                    struct bpos          r_pos,
44                                    unsigned             r_level)
45 {
46         /*
47          * Must match lock ordering as defined by __bch2_btree_node_lock:
48          */
49         return   cmp_int(l->btree_id,   r_btree_id) ?:
50                  cmp_int((int) l->cached,       (int) r_cached) ?:
51                  bpos_cmp(l->pos,       r_pos) ?:
52                 -cmp_int(l->level,      r_level);
53 }
54
55 static inline int btree_path_cmp(const struct btree_path *l,
56                                  const struct btree_path *r)
57 {
58         return __btree_path_cmp(l, r->btree_id, r->cached, r->pos, r->level);
59 }
60
61 static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
62 {
63         /* Are we iterating over keys in all snapshots? */
64         if (iter->flags & BTREE_ITER_all_snapshots) {
65                 p = bpos_successor(p);
66         } else {
67                 p = bpos_nosnap_successor(p);
68                 p.snapshot = iter->snapshot;
69         }
70
71         return p;
72 }
73
74 static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
75 {
76         /* Are we iterating over keys in all snapshots? */
77         if (iter->flags & BTREE_ITER_all_snapshots) {
78                 p = bpos_predecessor(p);
79         } else {
80                 p = bpos_nosnap_predecessor(p);
81                 p.snapshot = iter->snapshot;
82         }
83
84         return p;
85 }
86
87 static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
88 {
89         struct bpos pos = iter->pos;
90
91         if ((iter->flags & BTREE_ITER_is_extents) &&
92             !bkey_eq(pos, POS_MAX))
93                 pos = bkey_successor(iter, pos);
94         return pos;
95 }
96
97 static inline bool btree_path_pos_before_node(struct btree_path *path,
98                                               struct btree *b)
99 {
100         return bpos_lt(path->pos, b->data->min_key);
101 }
102
103 static inline bool btree_path_pos_after_node(struct btree_path *path,
104                                              struct btree *b)
105 {
106         return bpos_gt(path->pos, b->key.k.p);
107 }
108
109 static inline bool btree_path_pos_in_node(struct btree_path *path,
110                                           struct btree *b)
111 {
112         return path->btree_id == b->c.btree_id &&
113                 !btree_path_pos_before_node(path, b) &&
114                 !btree_path_pos_after_node(path, b);
115 }
116
117 /* Btree iterator: */
118
119 #ifdef CONFIG_BCACHEFS_DEBUG
120
121 static void bch2_btree_path_verify_cached(struct btree_trans *trans,
122                                           struct btree_path *path)
123 {
124         struct bkey_cached *ck;
125         bool locked = btree_node_locked(path, 0);
126
127         if (!bch2_btree_node_relock(trans, path, 0))
128                 return;
129
130         ck = (void *) path->l[0].b;
131         BUG_ON(ck->key.btree_id != path->btree_id ||
132                !bkey_eq(ck->key.pos, path->pos));
133
134         if (!locked)
135                 btree_node_unlock(trans, path, 0);
136 }
137
138 static void bch2_btree_path_verify_level(struct btree_trans *trans,
139                                 struct btree_path *path, unsigned level)
140 {
141         struct btree_path_level *l;
142         struct btree_node_iter tmp;
143         bool locked;
144         struct bkey_packed *p, *k;
145         struct printbuf buf1 = PRINTBUF;
146         struct printbuf buf2 = PRINTBUF;
147         struct printbuf buf3 = PRINTBUF;
148         const char *msg;
149
150         if (!bch2_debug_check_iterators)
151                 return;
152
153         l       = &path->l[level];
154         tmp     = l->iter;
155         locked  = btree_node_locked(path, level);
156
157         if (path->cached) {
158                 if (!level)
159                         bch2_btree_path_verify_cached(trans, path);
160                 return;
161         }
162
163         if (!btree_path_node(path, level))
164                 return;
165
166         if (!bch2_btree_node_relock_notrace(trans, path, level))
167                 return;
168
169         BUG_ON(!btree_path_pos_in_node(path, l->b));
170
171         bch2_btree_node_iter_verify(&l->iter, l->b);
172
173         /*
174          * For interior nodes, the iterator will have skipped past deleted keys:
175          */
176         p = level
177                 ? bch2_btree_node_iter_prev(&tmp, l->b)
178                 : bch2_btree_node_iter_prev_all(&tmp, l->b);
179         k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
180
181         if (p && bkey_iter_pos_cmp(l->b, p, &path->pos) >= 0) {
182                 msg = "before";
183                 goto err;
184         }
185
186         if (k && bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
187                 msg = "after";
188                 goto err;
189         }
190
191         if (!locked)
192                 btree_node_unlock(trans, path, level);
193         return;
194 err:
195         bch2_bpos_to_text(&buf1, path->pos);
196
197         if (p) {
198                 struct bkey uk = bkey_unpack_key(l->b, p);
199
200                 bch2_bkey_to_text(&buf2, &uk);
201         } else {
202                 prt_printf(&buf2, "(none)");
203         }
204
205         if (k) {
206                 struct bkey uk = bkey_unpack_key(l->b, k);
207
208                 bch2_bkey_to_text(&buf3, &uk);
209         } else {
210                 prt_printf(&buf3, "(none)");
211         }
212
213         panic("path should be %s key at level %u:\n"
214               "path pos %s\n"
215               "prev key %s\n"
216               "cur  key %s\n",
217               msg, level, buf1.buf, buf2.buf, buf3.buf);
218 }
219
220 static void bch2_btree_path_verify(struct btree_trans *trans,
221                                    struct btree_path *path)
222 {
223         struct bch_fs *c = trans->c;
224
225         for (unsigned i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) {
226                 if (!path->l[i].b) {
227                         BUG_ON(!path->cached &&
228                                bch2_btree_id_root(c, path->btree_id)->b->c.level > i);
229                         break;
230                 }
231
232                 bch2_btree_path_verify_level(trans, path, i);
233         }
234
235         bch2_btree_path_verify_locks(path);
236 }
237
238 void bch2_trans_verify_paths(struct btree_trans *trans)
239 {
240         struct btree_path *path;
241         unsigned iter;
242
243         trans_for_each_path(trans, path, iter)
244                 bch2_btree_path_verify(trans, path);
245 }
246
247 static void bch2_btree_iter_verify(struct btree_iter *iter)
248 {
249         struct btree_trans *trans = iter->trans;
250
251         BUG_ON(!!(iter->flags & BTREE_ITER_cached) != btree_iter_path(trans, iter)->cached);
252
253         BUG_ON((iter->flags & BTREE_ITER_is_extents) &&
254                (iter->flags & BTREE_ITER_all_snapshots));
255
256         BUG_ON(!(iter->flags & BTREE_ITER_snapshot_field) &&
257                (iter->flags & BTREE_ITER_all_snapshots) &&
258                !btree_type_has_snapshot_field(iter->btree_id));
259
260         if (iter->update_path)
261                 bch2_btree_path_verify(trans, &trans->paths[iter->update_path]);
262         bch2_btree_path_verify(trans, btree_iter_path(trans, iter));
263 }
264
265 static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
266 {
267         BUG_ON((iter->flags & BTREE_ITER_filter_snapshots) &&
268                !iter->pos.snapshot);
269
270         BUG_ON(!(iter->flags & BTREE_ITER_all_snapshots) &&
271                iter->pos.snapshot != iter->snapshot);
272
273         BUG_ON(iter->flags & BTREE_ITER_all_snapshots   ? !bpos_eq(iter->pos, iter->k.p) :
274                !(iter->flags & BTREE_ITER_is_extents)   ? !bkey_eq(iter->pos, iter->k.p) :
275                (bkey_lt(iter->pos, bkey_start_pos(&iter->k)) ||
276                 bkey_gt(iter->pos, iter->k.p)));
277 }
278
279 static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
280 {
281         struct btree_trans *trans = iter->trans;
282         struct btree_iter copy;
283         struct bkey_s_c prev;
284         int ret = 0;
285
286         if (!bch2_debug_check_iterators)
287                 return 0;
288
289         if (!(iter->flags & BTREE_ITER_filter_snapshots))
290                 return 0;
291
292         if (bkey_err(k) || !k.k)
293                 return 0;
294
295         BUG_ON(!bch2_snapshot_is_ancestor(trans->c,
296                                           iter->snapshot,
297                                           k.k->p.snapshot));
298
299         bch2_trans_iter_init(trans, &copy, iter->btree_id, iter->pos,
300                              BTREE_ITER_nopreserve|
301                              BTREE_ITER_all_snapshots);
302         prev = bch2_btree_iter_prev(&copy);
303         if (!prev.k)
304                 goto out;
305
306         ret = bkey_err(prev);
307         if (ret)
308                 goto out;
309
310         if (bkey_eq(prev.k->p, k.k->p) &&
311             bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
312                                       prev.k->p.snapshot) > 0) {
313                 struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
314
315                 bch2_bkey_to_text(&buf1, k.k);
316                 bch2_bkey_to_text(&buf2, prev.k);
317
318                 panic("iter snap %u\n"
319                       "k    %s\n"
320                       "prev %s\n",
321                       iter->snapshot,
322                       buf1.buf, buf2.buf);
323         }
324 out:
325         bch2_trans_iter_exit(trans, &copy);
326         return ret;
327 }
328
329 void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
330                             struct bpos pos)
331 {
332         bch2_trans_verify_not_unlocked_or_in_restart(trans);
333
334         struct btree_path *path;
335         struct trans_for_each_path_inorder_iter iter;
336         struct printbuf buf = PRINTBUF;
337
338         btree_trans_sort_paths(trans);
339
340         trans_for_each_path_inorder(trans, path, iter) {
341                 if (path->btree_id != id ||
342                     !btree_node_locked(path, 0) ||
343                     !path->should_be_locked)
344                         continue;
345
346                 if (!path->cached) {
347                         if (bkey_ge(pos, path->l[0].b->data->min_key) &&
348                             bkey_le(pos, path->l[0].b->key.k.p))
349                                 return;
350                 } else {
351                         if (bkey_eq(pos, path->pos))
352                                 return;
353                 }
354         }
355
356         bch2_dump_trans_paths_updates(trans);
357         bch2_bpos_to_text(&buf, pos);
358
359         panic("not locked: %s %s\n", bch2_btree_id_str(id), buf.buf);
360 }
361
362 #else
363
364 static inline void bch2_btree_path_verify_level(struct btree_trans *trans,
365                                                 struct btree_path *path, unsigned l) {}
366 static inline void bch2_btree_path_verify(struct btree_trans *trans,
367                                           struct btree_path *path) {}
368 static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
369 static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
370 static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { return 0; }
371
372 #endif
373
374 /* Btree path: fixups after btree updates */
375
376 static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
377                                         struct btree *b,
378                                         struct bset_tree *t,
379                                         struct bkey_packed *k)
380 {
381         struct btree_node_iter_set *set;
382
383         btree_node_iter_for_each(iter, set)
384                 if (set->end == t->end_offset) {
385                         set->k = __btree_node_key_to_offset(b, k);
386                         bch2_btree_node_iter_sort(iter, b);
387                         return;
388                 }
389
390         bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
391 }
392
393 static void __bch2_btree_path_fix_key_modified(struct btree_path *path,
394                                                struct btree *b,
395                                                struct bkey_packed *where)
396 {
397         struct btree_path_level *l = &path->l[b->c.level];
398
399         if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
400                 return;
401
402         if (bkey_iter_pos_cmp(l->b, where, &path->pos) < 0)
403                 bch2_btree_node_iter_advance(&l->iter, l->b);
404 }
405
406 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
407                                       struct btree *b,
408                                       struct bkey_packed *where)
409 {
410         struct btree_path *path;
411         unsigned i;
412
413         trans_for_each_path_with_node(trans, b, path, i) {
414                 __bch2_btree_path_fix_key_modified(path, b, where);
415                 bch2_btree_path_verify_level(trans, path, b->c.level);
416         }
417 }
418
419 static void __bch2_btree_node_iter_fix(struct btree_path *path,
420                                        struct btree *b,
421                                        struct btree_node_iter *node_iter,
422                                        struct bset_tree *t,
423                                        struct bkey_packed *where,
424                                        unsigned clobber_u64s,
425                                        unsigned new_u64s)
426 {
427         const struct bkey_packed *end = btree_bkey_last(b, t);
428         struct btree_node_iter_set *set;
429         unsigned offset = __btree_node_key_to_offset(b, where);
430         int shift = new_u64s - clobber_u64s;
431         unsigned old_end = t->end_offset - shift;
432         unsigned orig_iter_pos = node_iter->data[0].k;
433         bool iter_current_key_modified =
434                 orig_iter_pos >= offset &&
435                 orig_iter_pos <= offset + clobber_u64s;
436
437         btree_node_iter_for_each(node_iter, set)
438                 if (set->end == old_end)
439                         goto found;
440
441         /* didn't find the bset in the iterator - might have to readd it: */
442         if (new_u64s &&
443             bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
444                 bch2_btree_node_iter_push(node_iter, b, where, end);
445                 goto fixup_done;
446         } else {
447                 /* Iterator is after key that changed */
448                 return;
449         }
450 found:
451         set->end = t->end_offset;
452
453         /* Iterator hasn't gotten to the key that changed yet: */
454         if (set->k < offset)
455                 return;
456
457         if (new_u64s &&
458             bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
459                 set->k = offset;
460         } else if (set->k < offset + clobber_u64s) {
461                 set->k = offset + new_u64s;
462                 if (set->k == set->end)
463                         bch2_btree_node_iter_set_drop(node_iter, set);
464         } else {
465                 /* Iterator is after key that changed */
466                 set->k = (int) set->k + shift;
467                 return;
468         }
469
470         bch2_btree_node_iter_sort(node_iter, b);
471 fixup_done:
472         if (node_iter->data[0].k != orig_iter_pos)
473                 iter_current_key_modified = true;
474
475         /*
476          * When a new key is added, and the node iterator now points to that
477          * key, the iterator might have skipped past deleted keys that should
478          * come after the key the iterator now points to. We have to rewind to
479          * before those deleted keys - otherwise
480          * bch2_btree_node_iter_prev_all() breaks:
481          */
482         if (!bch2_btree_node_iter_end(node_iter) &&
483             iter_current_key_modified &&
484             b->c.level) {
485                 struct bkey_packed *k, *k2, *p;
486
487                 k = bch2_btree_node_iter_peek_all(node_iter, b);
488
489                 for_each_bset(b, t) {
490                         bool set_pos = false;
491
492                         if (node_iter->data[0].end == t->end_offset)
493                                 continue;
494
495                         k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
496
497                         while ((p = bch2_bkey_prev_all(b, t, k2)) &&
498                                bkey_iter_cmp(b, k, p) < 0) {
499                                 k2 = p;
500                                 set_pos = true;
501                         }
502
503                         if (set_pos)
504                                 btree_node_iter_set_set_pos(node_iter,
505                                                             b, t, k2);
506                 }
507         }
508 }
509
510 void bch2_btree_node_iter_fix(struct btree_trans *trans,
511                               struct btree_path *path,
512                               struct btree *b,
513                               struct btree_node_iter *node_iter,
514                               struct bkey_packed *where,
515                               unsigned clobber_u64s,
516                               unsigned new_u64s)
517 {
518         struct bset_tree *t = bch2_bkey_to_bset_inlined(b, where);
519         struct btree_path *linked;
520         unsigned i;
521
522         if (node_iter != &path->l[b->c.level].iter) {
523                 __bch2_btree_node_iter_fix(path, b, node_iter, t,
524                                            where, clobber_u64s, new_u64s);
525
526                 if (bch2_debug_check_iterators)
527                         bch2_btree_node_iter_verify(node_iter, b);
528         }
529
530         trans_for_each_path_with_node(trans, b, linked, i) {
531                 __bch2_btree_node_iter_fix(linked, b,
532                                            &linked->l[b->c.level].iter, t,
533                                            where, clobber_u64s, new_u64s);
534                 bch2_btree_path_verify_level(trans, linked, b->c.level);
535         }
536 }
537
538 /* Btree path level: pointer to a particular btree node and node iter */
539
540 static inline struct bkey_s_c __btree_iter_unpack(struct bch_fs *c,
541                                                   struct btree_path_level *l,
542                                                   struct bkey *u,
543                                                   struct bkey_packed *k)
544 {
545         if (unlikely(!k)) {
546                 /*
547                  * signal to bch2_btree_iter_peek_slot() that we're currently at
548                  * a hole
549                  */
550                 u->type = KEY_TYPE_deleted;
551                 return bkey_s_c_null;
552         }
553
554         return bkey_disassemble(l->b, k, u);
555 }
556
557 static inline struct bkey_s_c btree_path_level_peek_all(struct bch_fs *c,
558                                                         struct btree_path_level *l,
559                                                         struct bkey *u)
560 {
561         return __btree_iter_unpack(c, l, u,
562                         bch2_btree_node_iter_peek_all(&l->iter, l->b));
563 }
564
565 static inline struct bkey_s_c btree_path_level_peek(struct btree_trans *trans,
566                                                     struct btree_path *path,
567                                                     struct btree_path_level *l,
568                                                     struct bkey *u)
569 {
570         struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
571                         bch2_btree_node_iter_peek(&l->iter, l->b));
572
573         path->pos = k.k ? k.k->p : l->b->key.k.p;
574         trans->paths_sorted = false;
575         bch2_btree_path_verify_level(trans, path, l - path->l);
576         return k;
577 }
578
579 static inline struct bkey_s_c btree_path_level_prev(struct btree_trans *trans,
580                                                     struct btree_path *path,
581                                                     struct btree_path_level *l,
582                                                     struct bkey *u)
583 {
584         struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
585                         bch2_btree_node_iter_prev(&l->iter, l->b));
586
587         path->pos = k.k ? k.k->p : l->b->data->min_key;
588         trans->paths_sorted = false;
589         bch2_btree_path_verify_level(trans, path, l - path->l);
590         return k;
591 }
592
593 static inline bool btree_path_advance_to_pos(struct btree_path *path,
594                                              struct btree_path_level *l,
595                                              int max_advance)
596 {
597         struct bkey_packed *k;
598         int nr_advanced = 0;
599
600         while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
601                bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
602                 if (max_advance > 0 && nr_advanced >= max_advance)
603                         return false;
604
605                 bch2_btree_node_iter_advance(&l->iter, l->b);
606                 nr_advanced++;
607         }
608
609         return true;
610 }
611
612 static inline void __btree_path_level_init(struct btree_path *path,
613                                            unsigned level)
614 {
615         struct btree_path_level *l = &path->l[level];
616
617         bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
618
619         /*
620          * Iterators to interior nodes should always be pointed at the first non
621          * whiteout:
622          */
623         if (level)
624                 bch2_btree_node_iter_peek(&l->iter, l->b);
625 }
626
627 void bch2_btree_path_level_init(struct btree_trans *trans,
628                                 struct btree_path *path,
629                                 struct btree *b)
630 {
631         BUG_ON(path->cached);
632
633         EBUG_ON(!btree_path_pos_in_node(path, b));
634
635         path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock);
636         path->l[b->c.level].b = b;
637         __btree_path_level_init(path, b->c.level);
638 }
639
640 /* Btree path: fixups after btree node updates: */
641
642 static void bch2_trans_revalidate_updates_in_node(struct btree_trans *trans, struct btree *b)
643 {
644         struct bch_fs *c = trans->c;
645
646         trans_for_each_update(trans, i)
647                 if (!i->cached &&
648                     i->level    == b->c.level &&
649                     i->btree_id == b->c.btree_id &&
650                     bpos_cmp(i->k->k.p, b->data->min_key) >= 0 &&
651                     bpos_cmp(i->k->k.p, b->data->max_key) <= 0) {
652                         i->old_v = bch2_btree_path_peek_slot(trans->paths + i->path, &i->old_k).v;
653
654                         if (unlikely(trans->journal_replay_not_finished)) {
655                                 struct bkey_i *j_k =
656                                         bch2_journal_keys_peek_slot(c, i->btree_id, i->level,
657                                                                     i->k->k.p);
658
659                                 if (j_k) {
660                                         i->old_k = j_k->k;
661                                         i->old_v = &j_k->v;
662                                 }
663                         }
664                 }
665 }
666
667 /*
668  * A btree node is being replaced - update the iterator to point to the new
669  * node:
670  */
671 void bch2_trans_node_add(struct btree_trans *trans,
672                          struct btree_path *path,
673                          struct btree *b)
674 {
675         struct btree_path *prev;
676
677         BUG_ON(!btree_path_pos_in_node(path, b));
678
679         while ((prev = prev_btree_path(trans, path)) &&
680                btree_path_pos_in_node(prev, b))
681                 path = prev;
682
683         for (;
684              path && btree_path_pos_in_node(path, b);
685              path = next_btree_path(trans, path))
686                 if (path->uptodate == BTREE_ITER_UPTODATE && !path->cached) {
687                         enum btree_node_locked_type t =
688                                 btree_lock_want(path, b->c.level);
689
690                         if (t != BTREE_NODE_UNLOCKED) {
691                                 btree_node_unlock(trans, path, b->c.level);
692                                 six_lock_increment(&b->c.lock, (enum six_lock_type) t);
693                                 mark_btree_node_locked(trans, path, b->c.level, t);
694                         }
695
696                         bch2_btree_path_level_init(trans, path, b);
697                 }
698
699         bch2_trans_revalidate_updates_in_node(trans, b);
700 }
701
702 void bch2_trans_node_drop(struct btree_trans *trans,
703                           struct btree *b)
704 {
705         struct btree_path *path;
706         unsigned i, level = b->c.level;
707
708         trans_for_each_path(trans, path, i)
709                 if (path->l[level].b == b) {
710                         btree_node_unlock(trans, path, level);
711                         path->l[level].b = ERR_PTR(-BCH_ERR_no_btree_node_init);
712                 }
713 }
714
715 /*
716  * A btree node has been modified in such a way as to invalidate iterators - fix
717  * them:
718  */
719 void bch2_trans_node_reinit_iter(struct btree_trans *trans, struct btree *b)
720 {
721         struct btree_path *path;
722         unsigned i;
723
724         trans_for_each_path_with_node(trans, b, path, i)
725                 __btree_path_level_init(path, b->c.level);
726
727         bch2_trans_revalidate_updates_in_node(trans, b);
728 }
729
730 /* Btree path: traverse, set_pos: */
731
732 static inline int btree_path_lock_root(struct btree_trans *trans,
733                                        struct btree_path *path,
734                                        unsigned depth_want,
735                                        unsigned long trace_ip)
736 {
737         struct bch_fs *c = trans->c;
738         struct btree_root *r = bch2_btree_id_root(c, path->btree_id);
739         enum six_lock_type lock_type;
740         unsigned i;
741         int ret;
742
743         EBUG_ON(path->nodes_locked);
744
745         while (1) {
746                 struct btree *b = READ_ONCE(r->b);
747                 if (unlikely(!b)) {
748                         BUG_ON(!r->error);
749                         return r->error;
750                 }
751
752                 path->level = READ_ONCE(b->c.level);
753
754                 if (unlikely(path->level < depth_want)) {
755                         /*
756                          * the root is at a lower depth than the depth we want:
757                          * got to the end of the btree, or we're walking nodes
758                          * greater than some depth and there are no nodes >=
759                          * that depth
760                          */
761                         path->level = depth_want;
762                         for (i = path->level; i < BTREE_MAX_DEPTH; i++)
763                                 path->l[i].b = NULL;
764                         return 1;
765                 }
766
767                 lock_type = __btree_lock_want(path, path->level);
768                 ret = btree_node_lock(trans, path, &b->c,
769                                       path->level, lock_type, trace_ip);
770                 if (unlikely(ret)) {
771                         if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
772                                 return ret;
773                         BUG();
774                 }
775
776                 if (likely(b == READ_ONCE(r->b) &&
777                            b->c.level == path->level &&
778                            !race_fault())) {
779                         for (i = 0; i < path->level; i++)
780                                 path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_lock_root);
781                         path->l[path->level].b = b;
782                         for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++)
783                                 path->l[i].b = NULL;
784
785                         mark_btree_node_locked(trans, path, path->level,
786                                                (enum btree_node_locked_type) lock_type);
787                         bch2_btree_path_level_init(trans, path, b);
788                         return 0;
789                 }
790
791                 six_unlock_type(&b->c.lock, lock_type);
792         }
793 }
794
795 noinline
796 static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *path)
797 {
798         struct bch_fs *c = trans->c;
799         struct btree_path_level *l = path_l(path);
800         struct btree_node_iter node_iter = l->iter;
801         struct bkey_packed *k;
802         struct bkey_buf tmp;
803         unsigned nr = test_bit(BCH_FS_started, &c->flags)
804                 ? (path->level > 1 ? 0 :  2)
805                 : (path->level > 1 ? 1 : 16);
806         bool was_locked = btree_node_locked(path, path->level);
807         int ret = 0;
808
809         bch2_bkey_buf_init(&tmp);
810
811         while (nr-- && !ret) {
812                 if (!bch2_btree_node_relock(trans, path, path->level))
813                         break;
814
815                 bch2_btree_node_iter_advance(&node_iter, l->b);
816                 k = bch2_btree_node_iter_peek(&node_iter, l->b);
817                 if (!k)
818                         break;
819
820                 bch2_bkey_buf_unpack(&tmp, c, l->b, k);
821                 ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
822                                                path->level - 1);
823         }
824
825         if (!was_locked)
826                 btree_node_unlock(trans, path, path->level);
827
828         bch2_bkey_buf_exit(&tmp, c);
829         return ret;
830 }
831
832 static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *path,
833                                  struct btree_and_journal_iter *jiter)
834 {
835         struct bch_fs *c = trans->c;
836         struct bkey_s_c k;
837         struct bkey_buf tmp;
838         unsigned nr = test_bit(BCH_FS_started, &c->flags)
839                 ? (path->level > 1 ? 0 :  2)
840                 : (path->level > 1 ? 1 : 16);
841         bool was_locked = btree_node_locked(path, path->level);
842         int ret = 0;
843
844         bch2_bkey_buf_init(&tmp);
845
846         jiter->fail_if_too_many_whiteouts = true;
847
848         while (nr-- && !ret) {
849                 if (!bch2_btree_node_relock(trans, path, path->level))
850                         break;
851
852                 bch2_btree_and_journal_iter_advance(jiter);
853                 k = bch2_btree_and_journal_iter_peek(jiter);
854                 if (!k.k)
855                         break;
856
857                 bch2_bkey_buf_reassemble(&tmp, c, k);
858                 ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
859                                                path->level - 1);
860         }
861
862         if (!was_locked)
863                 btree_node_unlock(trans, path, path->level);
864
865         bch2_bkey_buf_exit(&tmp, c);
866         return ret;
867 }
868
869 static noinline void btree_node_mem_ptr_set(struct btree_trans *trans,
870                                             struct btree_path *path,
871                                             unsigned plevel, struct btree *b)
872 {
873         struct btree_path_level *l = &path->l[plevel];
874         bool locked = btree_node_locked(path, plevel);
875         struct bkey_packed *k;
876         struct bch_btree_ptr_v2 *bp;
877
878         if (!bch2_btree_node_relock(trans, path, plevel))
879                 return;
880
881         k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
882         BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
883
884         bp = (void *) bkeyp_val(&l->b->format, k);
885         bp->mem_ptr = (unsigned long)b;
886
887         if (!locked)
888                 btree_node_unlock(trans, path, plevel);
889 }
890
891 static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans,
892                                                      struct btree_path *path,
893                                                      unsigned flags,
894                                                      struct bkey_buf *out)
895 {
896         struct bch_fs *c = trans->c;
897         struct btree_path_level *l = path_l(path);
898         struct btree_and_journal_iter jiter;
899         struct bkey_s_c k;
900         int ret = 0;
901
902         __bch2_btree_and_journal_iter_init_node_iter(trans, &jiter, l->b, l->iter, path->pos);
903
904         k = bch2_btree_and_journal_iter_peek(&jiter);
905         if (!k.k) {
906                 struct printbuf buf = PRINTBUF;
907
908                 prt_str(&buf, "node not found at pos ");
909                 bch2_bpos_to_text(&buf, path->pos);
910                 prt_str(&buf, " at btree ");
911                 bch2_btree_pos_to_text(&buf, c, l->b);
912
913                 ret = bch2_fs_topology_error(c, "%s", buf.buf);
914                 printbuf_exit(&buf);
915                 goto err;
916         }
917
918         bch2_bkey_buf_reassemble(out, c, k);
919
920         if ((flags & BTREE_ITER_prefetch) &&
921             c->opts.btree_node_prefetch)
922                 ret = btree_path_prefetch_j(trans, path, &jiter);
923
924 err:
925         bch2_btree_and_journal_iter_exit(&jiter);
926         return ret;
927 }
928
929 static __always_inline int btree_path_down(struct btree_trans *trans,
930                                            struct btree_path *path,
931                                            unsigned flags,
932                                            unsigned long trace_ip)
933 {
934         struct bch_fs *c = trans->c;
935         struct btree_path_level *l = path_l(path);
936         struct btree *b;
937         unsigned level = path->level - 1;
938         enum six_lock_type lock_type = __btree_lock_want(path, level);
939         struct bkey_buf tmp;
940         int ret;
941
942         EBUG_ON(!btree_node_locked(path, path->level));
943
944         bch2_bkey_buf_init(&tmp);
945
946         if (unlikely(trans->journal_replay_not_finished)) {
947                 ret = btree_node_iter_and_journal_peek(trans, path, flags, &tmp);
948                 if (ret)
949                         goto err;
950         } else {
951                 struct bkey_packed *k = bch2_btree_node_iter_peek(&l->iter, l->b);
952                 if (!k) {
953                         struct printbuf buf = PRINTBUF;
954
955                         prt_str(&buf, "node not found at pos ");
956                         bch2_bpos_to_text(&buf, path->pos);
957                         prt_str(&buf, " within parent node ");
958                         bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&l->b->key));
959
960                         bch2_fs_fatal_error(c, "%s", buf.buf);
961                         printbuf_exit(&buf);
962                         ret = -BCH_ERR_btree_need_topology_repair;
963                         goto err;
964                 }
965
966                 bch2_bkey_buf_unpack(&tmp, c, l->b, k);
967
968                 if ((flags & BTREE_ITER_prefetch) &&
969                     c->opts.btree_node_prefetch) {
970                         ret = btree_path_prefetch(trans, path);
971                         if (ret)
972                                 goto err;
973                 }
974         }
975
976         b = bch2_btree_node_get(trans, path, tmp.k, level, lock_type, trace_ip);
977         ret = PTR_ERR_OR_ZERO(b);
978         if (unlikely(ret))
979                 goto err;
980
981         if (likely(!trans->journal_replay_not_finished &&
982                    tmp.k->k.type == KEY_TYPE_btree_ptr_v2) &&
983             unlikely(b != btree_node_mem_ptr(tmp.k)))
984                 btree_node_mem_ptr_set(trans, path, level + 1, b);
985
986         if (btree_node_read_locked(path, level + 1))
987                 btree_node_unlock(trans, path, level + 1);
988
989         mark_btree_node_locked(trans, path, level,
990                                (enum btree_node_locked_type) lock_type);
991         path->level = level;
992         bch2_btree_path_level_init(trans, path, b);
993
994         bch2_btree_path_verify_locks(path);
995 err:
996         bch2_bkey_buf_exit(&tmp, c);
997         return ret;
998 }
999
1000 static int bch2_btree_path_traverse_all(struct btree_trans *trans)
1001 {
1002         struct bch_fs *c = trans->c;
1003         struct btree_path *path;
1004         unsigned long trace_ip = _RET_IP_;
1005         unsigned i;
1006         int ret = 0;
1007
1008         if (trans->in_traverse_all)
1009                 return -BCH_ERR_transaction_restart_in_traverse_all;
1010
1011         trans->in_traverse_all = true;
1012 retry_all:
1013         trans->restarted = 0;
1014         trans->last_restarted_ip = 0;
1015
1016         trans_for_each_path(trans, path, i)
1017                 path->should_be_locked = false;
1018
1019         btree_trans_sort_paths(trans);
1020
1021         bch2_trans_unlock(trans);
1022         cond_resched();
1023         trans_set_locked(trans, false);
1024
1025         if (unlikely(trans->memory_allocation_failure)) {
1026                 struct closure cl;
1027
1028                 closure_init_stack(&cl);
1029
1030                 do {
1031                         ret = bch2_btree_cache_cannibalize_lock(trans, &cl);
1032                         closure_sync(&cl);
1033                 } while (ret);
1034         }
1035
1036         /* Now, redo traversals in correct order: */
1037         i = 0;
1038         while (i < trans->nr_sorted) {
1039                 btree_path_idx_t idx = trans->sorted[i];
1040
1041                 /*
1042                  * Traversing a path can cause another path to be added at about
1043                  * the same position:
1044                  */
1045                 if (trans->paths[idx].uptodate) {
1046                         __btree_path_get(trans, &trans->paths[idx], false);
1047                         ret = bch2_btree_path_traverse_one(trans, idx, 0, _THIS_IP_);
1048                         __btree_path_put(trans, &trans->paths[idx], false);
1049
1050                         if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
1051                             bch2_err_matches(ret, ENOMEM))
1052                                 goto retry_all;
1053                         if (ret)
1054                                 goto err;
1055                 } else {
1056                         i++;
1057                 }
1058         }
1059
1060         /*
1061          * We used to assert that all paths had been traversed here
1062          * (path->uptodate < BTREE_ITER_NEED_TRAVERSE); however, since
1063          * path->should_be_locked is not set yet, we might have unlocked and
1064          * then failed to relock a path - that's fine.
1065          */
1066 err:
1067         bch2_btree_cache_cannibalize_unlock(trans);
1068
1069         trans->in_traverse_all = false;
1070
1071         trace_and_count(c, trans_traverse_all, trans, trace_ip);
1072         return ret;
1073 }
1074
1075 static inline bool btree_path_check_pos_in_node(struct btree_path *path,
1076                                                 unsigned l, int check_pos)
1077 {
1078         if (check_pos < 0 && btree_path_pos_before_node(path, path->l[l].b))
1079                 return false;
1080         if (check_pos > 0 && btree_path_pos_after_node(path, path->l[l].b))
1081                 return false;
1082         return true;
1083 }
1084
1085 static inline bool btree_path_good_node(struct btree_trans *trans,
1086                                         struct btree_path *path,
1087                                         unsigned l, int check_pos)
1088 {
1089         return is_btree_node(path, l) &&
1090                 bch2_btree_node_relock(trans, path, l) &&
1091                 btree_path_check_pos_in_node(path, l, check_pos);
1092 }
1093
1094 static void btree_path_set_level_down(struct btree_trans *trans,
1095                                       struct btree_path *path,
1096                                       unsigned new_level)
1097 {
1098         unsigned l;
1099
1100         path->level = new_level;
1101
1102         for (l = path->level + 1; l < BTREE_MAX_DEPTH; l++)
1103                 if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED)
1104                         btree_node_unlock(trans, path, l);
1105
1106         btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1107         bch2_btree_path_verify(trans, path);
1108 }
1109
1110 static noinline unsigned __btree_path_up_until_good_node(struct btree_trans *trans,
1111                                                          struct btree_path *path,
1112                                                          int check_pos)
1113 {
1114         unsigned i, l = path->level;
1115 again:
1116         while (btree_path_node(path, l) &&
1117                !btree_path_good_node(trans, path, l, check_pos))
1118                 __btree_path_set_level_up(trans, path, l++);
1119
1120         /* If we need intent locks, take them too: */
1121         for (i = l + 1;
1122              i < path->locks_want && btree_path_node(path, i);
1123              i++)
1124                 if (!bch2_btree_node_relock(trans, path, i)) {
1125                         while (l <= i)
1126                                 __btree_path_set_level_up(trans, path, l++);
1127                         goto again;
1128                 }
1129
1130         return l;
1131 }
1132
1133 static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans,
1134                                                      struct btree_path *path,
1135                                                      int check_pos)
1136 {
1137         return likely(btree_node_locked(path, path->level) &&
1138                       btree_path_check_pos_in_node(path, path->level, check_pos))
1139                 ? path->level
1140                 : __btree_path_up_until_good_node(trans, path, check_pos);
1141 }
1142
1143 /*
1144  * This is the main state machine for walking down the btree - walks down to a
1145  * specified depth
1146  *
1147  * Returns 0 on success, -EIO on error (error reading in a btree node).
1148  *
1149  * On error, caller (peek_node()/peek_key()) must return NULL; the error is
1150  * stashed in the iterator and returned from bch2_trans_exit().
1151  */
1152 int bch2_btree_path_traverse_one(struct btree_trans *trans,
1153                                  btree_path_idx_t path_idx,
1154                                  unsigned flags,
1155                                  unsigned long trace_ip)
1156 {
1157         struct btree_path *path = &trans->paths[path_idx];
1158         unsigned depth_want = path->level;
1159         int ret = -((int) trans->restarted);
1160
1161         if (unlikely(ret))
1162                 goto out;
1163
1164         if (unlikely(!trans->srcu_held))
1165                 bch2_trans_srcu_lock(trans);
1166
1167         trace_btree_path_traverse_start(trans, path);
1168
1169         /*
1170          * Ensure we obey path->should_be_locked: if it's set, we can't unlock
1171          * and re-traverse the path without a transaction restart:
1172          */
1173         if (path->should_be_locked) {
1174                 ret = bch2_btree_path_relock(trans, path, trace_ip);
1175                 goto out;
1176         }
1177
1178         if (path->cached) {
1179                 ret = bch2_btree_path_traverse_cached(trans, path, flags);
1180                 goto out;
1181         }
1182
1183         path = &trans->paths[path_idx];
1184
1185         if (unlikely(path->level >= BTREE_MAX_DEPTH))
1186                 goto out_uptodate;
1187
1188         path->level = btree_path_up_until_good_node(trans, path, 0);
1189         unsigned max_level = path->level;
1190
1191         EBUG_ON(btree_path_node(path, path->level) &&
1192                 !btree_node_locked(path, path->level));
1193
1194         /*
1195          * Note: path->nodes[path->level] may be temporarily NULL here - that
1196          * would indicate to other code that we got to the end of the btree,
1197          * here it indicates that relocking the root failed - it's critical that
1198          * btree_path_lock_root() comes next and that it can't fail
1199          */
1200         while (path->level > depth_want) {
1201                 ret = btree_path_node(path, path->level)
1202                         ? btree_path_down(trans, path, flags, trace_ip)
1203                         : btree_path_lock_root(trans, path, depth_want, trace_ip);
1204                 if (unlikely(ret)) {
1205                         if (ret == 1) {
1206                                 /*
1207                                  * No nodes at this level - got to the end of
1208                                  * the btree:
1209                                  */
1210                                 ret = 0;
1211                                 goto out;
1212                         }
1213
1214                         __bch2_btree_path_unlock(trans, path);
1215                         path->level = depth_want;
1216                         path->l[path->level].b = ERR_PTR(ret);
1217                         goto out;
1218                 }
1219         }
1220
1221         if (unlikely(max_level > path->level)) {
1222                 struct btree_path *linked;
1223                 unsigned iter;
1224
1225                 trans_for_each_path_with_node(trans, path_l(path)->b, linked, iter)
1226                         for (unsigned j = path->level + 1; j < max_level; j++)
1227                                 linked->l[j] = path->l[j];
1228         }
1229
1230 out_uptodate:
1231         path->uptodate = BTREE_ITER_UPTODATE;
1232         trace_btree_path_traverse_end(trans, path);
1233 out:
1234         if (bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted)
1235                 panic("ret %s (%i) trans->restarted %s (%i)\n",
1236                       bch2_err_str(ret), ret,
1237                       bch2_err_str(trans->restarted), trans->restarted);
1238         bch2_btree_path_verify(trans, path);
1239         return ret;
1240 }
1241
1242 static inline void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
1243                             struct btree_path *src)
1244 {
1245         unsigned i, offset = offsetof(struct btree_path, pos);
1246
1247         memcpy((void *) dst + offset,
1248                (void *) src + offset,
1249                sizeof(struct btree_path) - offset);
1250
1251         for (i = 0; i < BTREE_MAX_DEPTH; i++) {
1252                 unsigned t = btree_node_locked_type(dst, i);
1253
1254                 if (t != BTREE_NODE_UNLOCKED)
1255                         six_lock_increment(&dst->l[i].b->c.lock, t);
1256         }
1257 }
1258
1259 static btree_path_idx_t btree_path_clone(struct btree_trans *trans, btree_path_idx_t src,
1260                                          bool intent, unsigned long ip)
1261 {
1262         btree_path_idx_t new = btree_path_alloc(trans, src);
1263         btree_path_copy(trans, trans->paths + new, trans->paths + src);
1264         __btree_path_get(trans, trans->paths + new, intent);
1265 #ifdef TRACK_PATH_ALLOCATED
1266         trans->paths[new].ip_allocated = ip;
1267 #endif
1268         return new;
1269 }
1270
1271 __flatten
1272 btree_path_idx_t __bch2_btree_path_make_mut(struct btree_trans *trans,
1273                         btree_path_idx_t path, bool intent, unsigned long ip)
1274 {
1275         struct btree_path *old = trans->paths + path;
1276         __btree_path_put(trans, trans->paths + path, intent);
1277         path = btree_path_clone(trans, path, intent, ip);
1278         trace_btree_path_clone(trans, old, trans->paths + path);
1279         trans->paths[path].preserve = false;
1280         return path;
1281 }
1282
1283 btree_path_idx_t __must_check
1284 __bch2_btree_path_set_pos(struct btree_trans *trans,
1285                           btree_path_idx_t path_idx, struct bpos new_pos,
1286                           bool intent, unsigned long ip)
1287 {
1288         int cmp = bpos_cmp(new_pos, trans->paths[path_idx].pos);
1289
1290         bch2_trans_verify_not_unlocked_or_in_restart(trans);
1291         EBUG_ON(!trans->paths[path_idx].ref);
1292
1293         trace_btree_path_set_pos(trans, trans->paths + path_idx, &new_pos);
1294
1295         path_idx = bch2_btree_path_make_mut(trans, path_idx, intent, ip);
1296
1297         struct btree_path *path = trans->paths + path_idx;
1298         path->pos               = new_pos;
1299         trans->paths_sorted     = false;
1300
1301         if (unlikely(path->cached)) {
1302                 btree_node_unlock(trans, path, 0);
1303                 path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_up);
1304                 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1305                 goto out;
1306         }
1307
1308         unsigned level = btree_path_up_until_good_node(trans, path, cmp);
1309
1310         if (btree_path_node(path, level)) {
1311                 struct btree_path_level *l = &path->l[level];
1312
1313                 BUG_ON(!btree_node_locked(path, level));
1314                 /*
1315                  * We might have to skip over many keys, or just a few: try
1316                  * advancing the node iterator, and if we have to skip over too
1317                  * many keys just reinit it (or if we're rewinding, since that
1318                  * is expensive).
1319                  */
1320                 if (cmp < 0 ||
1321                     !btree_path_advance_to_pos(path, l, 8))
1322                         bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
1323
1324                 /*
1325                  * Iterators to interior nodes should always be pointed at the first non
1326                  * whiteout:
1327                  */
1328                 if (unlikely(level))
1329                         bch2_btree_node_iter_peek(&l->iter, l->b);
1330         }
1331
1332         if (unlikely(level != path->level)) {
1333                 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1334                 __bch2_btree_path_unlock(trans, path);
1335         }
1336 out:
1337         bch2_btree_path_verify(trans, path);
1338         return path_idx;
1339 }
1340
1341 /* Btree path: main interface: */
1342
1343 static struct btree_path *have_path_at_pos(struct btree_trans *trans, struct btree_path *path)
1344 {
1345         struct btree_path *sib;
1346
1347         sib = prev_btree_path(trans, path);
1348         if (sib && !btree_path_cmp(sib, path))
1349                 return sib;
1350
1351         sib = next_btree_path(trans, path);
1352         if (sib && !btree_path_cmp(sib, path))
1353                 return sib;
1354
1355         return NULL;
1356 }
1357
1358 static struct btree_path *have_node_at_pos(struct btree_trans *trans, struct btree_path *path)
1359 {
1360         struct btree_path *sib;
1361
1362         sib = prev_btree_path(trans, path);
1363         if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1364                 return sib;
1365
1366         sib = next_btree_path(trans, path);
1367         if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1368                 return sib;
1369
1370         return NULL;
1371 }
1372
1373 static inline void __bch2_path_free(struct btree_trans *trans, btree_path_idx_t path)
1374 {
1375         __bch2_btree_path_unlock(trans, trans->paths + path);
1376         btree_path_list_remove(trans, trans->paths + path);
1377         __clear_bit(path, trans->paths_allocated);
1378 }
1379
1380 static bool bch2_btree_path_can_relock(struct btree_trans *trans, struct btree_path *path)
1381 {
1382         unsigned l = path->level;
1383
1384         do {
1385                 if (!btree_path_node(path, l))
1386                         break;
1387
1388                 if (!is_btree_node(path, l))
1389                         return false;
1390
1391                 if (path->l[l].lock_seq != path->l[l].b->c.lock.seq)
1392                         return false;
1393
1394                 l++;
1395         } while (l < path->locks_want);
1396
1397         return true;
1398 }
1399
1400 void bch2_path_put(struct btree_trans *trans, btree_path_idx_t path_idx, bool intent)
1401 {
1402         struct btree_path *path = trans->paths + path_idx, *dup;
1403
1404         if (!__btree_path_put(trans, path, intent))
1405                 return;
1406
1407         dup = path->preserve
1408                 ? have_path_at_pos(trans, path)
1409                 : have_node_at_pos(trans, path);
1410
1411         trace_btree_path_free(trans, path_idx, dup);
1412
1413         if (!dup && !(!path->preserve && !is_btree_node(path, path->level)))
1414                 return;
1415
1416         if (path->should_be_locked && !trans->restarted) {
1417                 if (!dup)
1418                         return;
1419
1420                 if (!(trans->locked
1421                       ? bch2_btree_path_relock_norestart(trans, dup)
1422                       : bch2_btree_path_can_relock(trans, dup)))
1423                         return;
1424         }
1425
1426         if (dup) {
1427                 dup->preserve           |= path->preserve;
1428                 dup->should_be_locked   |= path->should_be_locked;
1429         }
1430
1431         __bch2_path_free(trans, path_idx);
1432 }
1433
1434 static void bch2_path_put_nokeep(struct btree_trans *trans, btree_path_idx_t path,
1435                                  bool intent)
1436 {
1437         if (!__btree_path_put(trans, trans->paths + path, intent))
1438                 return;
1439
1440         __bch2_path_free(trans, path);
1441 }
1442
1443 void __noreturn bch2_trans_restart_error(struct btree_trans *trans, u32 restart_count)
1444 {
1445         panic("trans->restart_count %u, should be %u, last restarted by %pS\n",
1446               trans->restart_count, restart_count,
1447               (void *) trans->last_begin_ip);
1448 }
1449
1450 static void __noreturn bch2_trans_in_restart_error(struct btree_trans *trans)
1451 {
1452 #ifdef CONFIG_BCACHEFS_DEBUG
1453         struct printbuf buf = PRINTBUF;
1454         bch2_prt_backtrace(&buf, &trans->last_restarted_trace);
1455         panic("in transaction restart: %s, last restarted by\n%s",
1456               bch2_err_str(trans->restarted),
1457               buf.buf);
1458 #else
1459         panic("in transaction restart: %s, last restarted by %pS\n",
1460               bch2_err_str(trans->restarted),
1461               (void *) trans->last_restarted_ip);
1462 #endif
1463 }
1464
1465 void __noreturn bch2_trans_unlocked_or_in_restart_error(struct btree_trans *trans)
1466 {
1467         if (trans->restarted)
1468                 bch2_trans_in_restart_error(trans);
1469
1470         if (!trans->locked)
1471                 panic("trans should be locked, unlocked by %pS\n",
1472                       (void *) trans->last_unlock_ip);
1473
1474         BUG();
1475 }
1476
1477 noinline __cold
1478 void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
1479 {
1480         prt_printf(buf, "%u transaction updates for %s journal seq %llu\n",
1481                    trans->nr_updates, trans->fn, trans->journal_res.seq);
1482         printbuf_indent_add(buf, 2);
1483
1484         trans_for_each_update(trans, i) {
1485                 struct bkey_s_c old = { &i->old_k, i->old_v };
1486
1487                 prt_str(buf, "update: btree=");
1488                 bch2_btree_id_to_text(buf, i->btree_id);
1489                 prt_printf(buf, " cached=%u %pS\n",
1490                            i->cached,
1491                            (void *) i->ip_allocated);
1492
1493                 prt_printf(buf, "  old ");
1494                 bch2_bkey_val_to_text(buf, trans->c, old);
1495                 prt_newline(buf);
1496
1497                 prt_printf(buf, "  new ");
1498                 bch2_bkey_val_to_text(buf, trans->c, bkey_i_to_s_c(i->k));
1499                 prt_newline(buf);
1500         }
1501
1502         for (struct jset_entry *e = trans->journal_entries;
1503              e != btree_trans_journal_entries_top(trans);
1504              e = vstruct_next(e))
1505                 bch2_journal_entry_to_text(buf, trans->c, e);
1506
1507         printbuf_indent_sub(buf, 2);
1508 }
1509
1510 noinline __cold
1511 void bch2_dump_trans_updates(struct btree_trans *trans)
1512 {
1513         struct printbuf buf = PRINTBUF;
1514
1515         bch2_trans_updates_to_text(&buf, trans);
1516         bch2_print_str(trans->c, buf.buf);
1517         printbuf_exit(&buf);
1518 }
1519
1520 static void bch2_btree_path_to_text_short(struct printbuf *out, struct btree_trans *trans, btree_path_idx_t path_idx)
1521 {
1522         struct btree_path *path = trans->paths + path_idx;
1523
1524         prt_printf(out, "path: idx %3u ref %u:%u %c %c %c ",
1525                    path_idx, path->ref, path->intent_ref,
1526                    path->preserve ? 'P' : ' ',
1527                    path->should_be_locked ? 'S' : ' ',
1528                    path->cached ? 'C' : 'B');
1529         bch2_btree_id_level_to_text(out, path->btree_id, path->level);
1530         prt_str(out, " pos ");
1531         bch2_bpos_to_text(out, path->pos);
1532
1533         if (!path->cached && btree_node_locked(path, path->level)) {
1534                 prt_char(out, ' ');
1535                 struct btree *b = path_l(path)->b;
1536                 bch2_bpos_to_text(out, b->data->min_key);
1537                 prt_char(out, '-');
1538                 bch2_bpos_to_text(out, b->key.k.p);
1539         }
1540
1541 #ifdef TRACK_PATH_ALLOCATED
1542         prt_printf(out, " %pS", (void *) path->ip_allocated);
1543 #endif
1544 }
1545
1546 static const char *btree_node_locked_str(enum btree_node_locked_type t)
1547 {
1548         switch (t) {
1549         case BTREE_NODE_UNLOCKED:
1550                 return "unlocked";
1551         case BTREE_NODE_READ_LOCKED:
1552                 return "read";
1553         case BTREE_NODE_INTENT_LOCKED:
1554                 return "intent";
1555         case BTREE_NODE_WRITE_LOCKED:
1556                 return "write";
1557         default:
1558                 return NULL;
1559         }
1560 }
1561
1562 void bch2_btree_path_to_text(struct printbuf *out, struct btree_trans *trans, btree_path_idx_t path_idx)
1563 {
1564         bch2_btree_path_to_text_short(out, trans, path_idx);
1565
1566         struct btree_path *path = trans->paths + path_idx;
1567
1568         prt_printf(out, " uptodate %u locks_want %u", path->uptodate, path->locks_want);
1569         prt_newline(out);
1570
1571         printbuf_indent_add(out, 2);
1572         for (unsigned l = 0; l < BTREE_MAX_DEPTH; l++) {
1573                 prt_printf(out, "l=%u locks %s seq %u node ", l,
1574                            btree_node_locked_str(btree_node_locked_type(path, l)),
1575                            path->l[l].lock_seq);
1576
1577                 int ret = PTR_ERR_OR_ZERO(path->l[l].b);
1578                 if (ret)
1579                         prt_str(out, bch2_err_str(ret));
1580                 else
1581                         prt_printf(out, "%px", path->l[l].b);
1582                 prt_newline(out);
1583         }
1584         printbuf_indent_sub(out, 2);
1585 }
1586
1587 static noinline __cold
1588 void __bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans,
1589                                 bool nosort)
1590 {
1591         struct trans_for_each_path_inorder_iter iter;
1592
1593         if (!nosort)
1594                 btree_trans_sort_paths(trans);
1595
1596         trans_for_each_path_idx_inorder(trans, iter) {
1597                 bch2_btree_path_to_text_short(out, trans, iter.path_idx);
1598                 prt_newline(out);
1599         }
1600 }
1601
1602 noinline __cold
1603 void bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans)
1604 {
1605         __bch2_trans_paths_to_text(out, trans, false);
1606 }
1607
1608 static noinline __cold
1609 void __bch2_dump_trans_paths_updates(struct btree_trans *trans, bool nosort)
1610 {
1611         struct printbuf buf = PRINTBUF;
1612
1613         __bch2_trans_paths_to_text(&buf, trans, nosort);
1614         bch2_trans_updates_to_text(&buf, trans);
1615
1616         bch2_print_str(trans->c, buf.buf);
1617         printbuf_exit(&buf);
1618 }
1619
1620 noinline __cold
1621 void bch2_dump_trans_paths_updates(struct btree_trans *trans)
1622 {
1623         __bch2_dump_trans_paths_updates(trans, false);
1624 }
1625
1626 noinline __cold
1627 static void bch2_trans_update_max_paths(struct btree_trans *trans)
1628 {
1629         struct btree_transaction_stats *s = btree_trans_stats(trans);
1630         struct printbuf buf = PRINTBUF;
1631         size_t nr = bitmap_weight(trans->paths_allocated, trans->nr_paths);
1632
1633         bch2_trans_paths_to_text(&buf, trans);
1634
1635         if (!buf.allocation_failure) {
1636                 mutex_lock(&s->lock);
1637                 if (nr > s->nr_max_paths) {
1638                         s->nr_max_paths = nr;
1639                         swap(s->max_paths_text, buf.buf);
1640                 }
1641                 mutex_unlock(&s->lock);
1642         }
1643
1644         printbuf_exit(&buf);
1645
1646         trans->nr_paths_max = nr;
1647 }
1648
1649 noinline __cold
1650 int __bch2_btree_trans_too_many_iters(struct btree_trans *trans)
1651 {
1652         if (trace_trans_restart_too_many_iters_enabled()) {
1653                 struct printbuf buf = PRINTBUF;
1654
1655                 bch2_trans_paths_to_text(&buf, trans);
1656                 trace_trans_restart_too_many_iters(trans, _THIS_IP_, buf.buf);
1657                 printbuf_exit(&buf);
1658         }
1659
1660         count_event(trans->c, trans_restart_too_many_iters);
1661
1662         return btree_trans_restart(trans, BCH_ERR_transaction_restart_too_many_iters);
1663 }
1664
1665 static noinline void btree_path_overflow(struct btree_trans *trans)
1666 {
1667         bch2_dump_trans_paths_updates(trans);
1668         bch_err(trans->c, "trans path overflow");
1669 }
1670
1671 static noinline void btree_paths_realloc(struct btree_trans *trans)
1672 {
1673         unsigned nr = trans->nr_paths * 2;
1674
1675         void *p = kvzalloc(BITS_TO_LONGS(nr) * sizeof(unsigned long) +
1676                           sizeof(struct btree_trans_paths) +
1677                           nr * sizeof(struct btree_path) +
1678                           nr * sizeof(btree_path_idx_t) + 8 +
1679                           nr * sizeof(struct btree_insert_entry), GFP_KERNEL|__GFP_NOFAIL);
1680
1681         unsigned long *paths_allocated = p;
1682         memcpy(paths_allocated, trans->paths_allocated, BITS_TO_LONGS(trans->nr_paths) * sizeof(unsigned long));
1683         p += BITS_TO_LONGS(nr) * sizeof(unsigned long);
1684
1685         p += sizeof(struct btree_trans_paths);
1686         struct btree_path *paths = p;
1687         *trans_paths_nr(paths) = nr;
1688         memcpy(paths, trans->paths, trans->nr_paths * sizeof(struct btree_path));
1689         p += nr * sizeof(struct btree_path);
1690
1691         btree_path_idx_t *sorted = p;
1692         memcpy(sorted, trans->sorted, trans->nr_sorted * sizeof(btree_path_idx_t));
1693         p += nr * sizeof(btree_path_idx_t) + 8;
1694
1695         struct btree_insert_entry *updates = p;
1696         memcpy(updates, trans->updates, trans->nr_paths * sizeof(struct btree_insert_entry));
1697
1698         unsigned long *old = trans->paths_allocated;
1699
1700         rcu_assign_pointer(trans->paths_allocated,      paths_allocated);
1701         rcu_assign_pointer(trans->paths,                paths);
1702         rcu_assign_pointer(trans->sorted,               sorted);
1703         rcu_assign_pointer(trans->updates,              updates);
1704
1705         trans->nr_paths         = nr;
1706
1707         if (old != trans->_paths_allocated)
1708                 kfree_rcu_mightsleep(old);
1709 }
1710
1711 static inline btree_path_idx_t btree_path_alloc(struct btree_trans *trans,
1712                                                 btree_path_idx_t pos)
1713 {
1714         btree_path_idx_t idx = find_first_zero_bit(trans->paths_allocated, trans->nr_paths);
1715
1716         if (unlikely(idx == trans->nr_paths)) {
1717                 if (trans->nr_paths == BTREE_ITER_MAX) {
1718                         btree_path_overflow(trans);
1719                         return 0;
1720                 }
1721
1722                 btree_paths_realloc(trans);
1723         }
1724
1725         /*
1726          * Do this before marking the new path as allocated, since it won't be
1727          * initialized yet:
1728          */
1729         if (unlikely(idx > trans->nr_paths_max))
1730                 bch2_trans_update_max_paths(trans);
1731
1732         __set_bit(idx, trans->paths_allocated);
1733
1734         struct btree_path *path = &trans->paths[idx];
1735         path->ref               = 0;
1736         path->intent_ref        = 0;
1737         path->nodes_locked      = 0;
1738
1739         btree_path_list_add(trans, pos, idx);
1740         trans->paths_sorted = false;
1741         return idx;
1742 }
1743
1744 btree_path_idx_t bch2_path_get(struct btree_trans *trans,
1745                              enum btree_id btree_id, struct bpos pos,
1746                              unsigned locks_want, unsigned level,
1747                              unsigned flags, unsigned long ip)
1748 {
1749         struct btree_path *path;
1750         bool cached = flags & BTREE_ITER_cached;
1751         bool intent = flags & BTREE_ITER_intent;
1752         struct trans_for_each_path_inorder_iter iter;
1753         btree_path_idx_t path_pos = 0, path_idx;
1754
1755         bch2_trans_verify_not_unlocked_or_in_restart(trans);
1756         bch2_trans_verify_locks(trans);
1757
1758         btree_trans_sort_paths(trans);
1759
1760         trans_for_each_path_inorder(trans, path, iter) {
1761                 if (__btree_path_cmp(path,
1762                                      btree_id,
1763                                      cached,
1764                                      pos,
1765                                      level) > 0)
1766                         break;
1767
1768                 path_pos = iter.path_idx;
1769         }
1770
1771         if (path_pos &&
1772             trans->paths[path_pos].cached       == cached &&
1773             trans->paths[path_pos].btree_id     == btree_id &&
1774             trans->paths[path_pos].level        == level) {
1775                 trace_btree_path_get(trans, trans->paths + path_pos, &pos);
1776
1777                 __btree_path_get(trans, trans->paths + path_pos, intent);
1778                 path_idx = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip);
1779                 path = trans->paths + path_idx;
1780         } else {
1781                 path_idx = btree_path_alloc(trans, path_pos);
1782                 path = trans->paths + path_idx;
1783
1784                 __btree_path_get(trans, path, intent);
1785                 path->pos                       = pos;
1786                 path->btree_id                  = btree_id;
1787                 path->cached                    = cached;
1788                 path->uptodate                  = BTREE_ITER_NEED_TRAVERSE;
1789                 path->should_be_locked          = false;
1790                 path->level                     = level;
1791                 path->locks_want                = locks_want;
1792                 path->nodes_locked              = 0;
1793                 for (unsigned i = 0; i < ARRAY_SIZE(path->l); i++)
1794                         path->l[i].b            = ERR_PTR(-BCH_ERR_no_btree_node_init);
1795 #ifdef TRACK_PATH_ALLOCATED
1796                 path->ip_allocated              = ip;
1797 #endif
1798                 trans->paths_sorted             = false;
1799
1800                 trace_btree_path_alloc(trans, path);
1801         }
1802
1803         if (!(flags & BTREE_ITER_nopreserve))
1804                 path->preserve = true;
1805
1806         if (path->intent_ref)
1807                 locks_want = max(locks_want, level + 1);
1808
1809         /*
1810          * If the path has locks_want greater than requested, we don't downgrade
1811          * it here - on transaction restart because btree node split needs to
1812          * upgrade locks, we might be putting/getting the iterator again.
1813          * Downgrading iterators only happens via bch2_trans_downgrade(), after
1814          * a successful transaction commit.
1815          */
1816
1817         locks_want = min(locks_want, BTREE_MAX_DEPTH);
1818         if (locks_want > path->locks_want)
1819                 bch2_btree_path_upgrade_noupgrade_sibs(trans, path, locks_want, NULL);
1820
1821         return path_idx;
1822 }
1823
1824 btree_path_idx_t bch2_path_get_unlocked_mut(struct btree_trans *trans,
1825                                             enum btree_id btree_id,
1826                                             unsigned level,
1827                                             struct bpos pos)
1828 {
1829         btree_path_idx_t path_idx = bch2_path_get(trans, btree_id, pos, level + 1, level,
1830                              BTREE_ITER_nopreserve|
1831                              BTREE_ITER_intent, _RET_IP_);
1832         path_idx = bch2_btree_path_make_mut(trans, path_idx, true, _RET_IP_);
1833
1834         struct btree_path *path = trans->paths + path_idx;
1835         bch2_btree_path_downgrade(trans, path);
1836         __bch2_btree_path_unlock(trans, path);
1837         return path_idx;
1838 }
1839
1840 struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
1841 {
1842
1843         struct btree_path_level *l = path_l(path);
1844         struct bkey_packed *_k;
1845         struct bkey_s_c k;
1846
1847         if (unlikely(!l->b))
1848                 return bkey_s_c_null;
1849
1850         EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
1851         EBUG_ON(!btree_node_locked(path, path->level));
1852
1853         if (!path->cached) {
1854                 _k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1855                 k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
1856
1857                 EBUG_ON(k.k && bkey_deleted(k.k) && bpos_eq(k.k->p, path->pos));
1858
1859                 if (!k.k || !bpos_eq(path->pos, k.k->p))
1860                         goto hole;
1861         } else {
1862                 struct bkey_cached *ck = (void *) path->l[0].b;
1863                 if (!ck)
1864                         return bkey_s_c_null;
1865
1866                 EBUG_ON(path->btree_id != ck->key.btree_id ||
1867                         !bkey_eq(path->pos, ck->key.pos));
1868
1869                 *u = ck->k->k;
1870                 k = (struct bkey_s_c) { u, &ck->k->v };
1871         }
1872
1873         return k;
1874 hole:
1875         bkey_init(u);
1876         u->p = path->pos;
1877         return (struct bkey_s_c) { u, NULL };
1878 }
1879
1880 void bch2_set_btree_iter_dontneed(struct btree_iter *iter)
1881 {
1882         struct btree_trans *trans = iter->trans;
1883
1884         if (!iter->path || trans->restarted)
1885                 return;
1886
1887         struct btree_path *path = btree_iter_path(trans, iter);
1888         path->preserve          = false;
1889         if (path->ref == 1)
1890                 path->should_be_locked  = false;
1891 }
1892 /* Btree iterators: */
1893
1894 int __must_check
1895 __bch2_btree_iter_traverse(struct btree_iter *iter)
1896 {
1897         return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
1898 }
1899
1900 int __must_check
1901 bch2_btree_iter_traverse(struct btree_iter *iter)
1902 {
1903         struct btree_trans *trans = iter->trans;
1904         int ret;
1905
1906         bch2_trans_verify_not_unlocked_or_in_restart(trans);
1907
1908         iter->path = bch2_btree_path_set_pos(trans, iter->path,
1909                                         btree_iter_search_key(iter),
1910                                         iter->flags & BTREE_ITER_intent,
1911                                         btree_iter_ip_allocated(iter));
1912
1913         ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
1914         if (ret)
1915                 return ret;
1916
1917         struct btree_path *path = btree_iter_path(trans, iter);
1918         if (btree_path_node(path, path->level))
1919                 btree_path_set_should_be_locked(trans, path);
1920         return 0;
1921 }
1922
1923 /* Iterate across nodes (leaf and interior nodes) */
1924
1925 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
1926 {
1927         struct btree_trans *trans = iter->trans;
1928         struct btree *b = NULL;
1929         int ret;
1930
1931         EBUG_ON(trans->paths[iter->path].cached);
1932         bch2_btree_iter_verify(iter);
1933
1934         ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1935         if (ret)
1936                 goto err;
1937
1938         struct btree_path *path = btree_iter_path(trans, iter);
1939         b = btree_path_node(path, path->level);
1940         if (!b)
1941                 goto out;
1942
1943         BUG_ON(bpos_lt(b->key.k.p, iter->pos));
1944
1945         bkey_init(&iter->k);
1946         iter->k.p = iter->pos = b->key.k.p;
1947
1948         iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
1949                                         iter->flags & BTREE_ITER_intent,
1950                                         btree_iter_ip_allocated(iter));
1951         btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
1952 out:
1953         bch2_btree_iter_verify_entry_exit(iter);
1954         bch2_btree_iter_verify(iter);
1955
1956         return b;
1957 err:
1958         b = ERR_PTR(ret);
1959         goto out;
1960 }
1961
1962 /* Only kept for -tools */
1963 struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *iter)
1964 {
1965         struct btree *b;
1966
1967         while (b = bch2_btree_iter_peek_node(iter),
1968                bch2_err_matches(PTR_ERR_OR_ZERO(b), BCH_ERR_transaction_restart))
1969                 bch2_trans_begin(iter->trans);
1970
1971         return b;
1972 }
1973
1974 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
1975 {
1976         struct btree_trans *trans = iter->trans;
1977         struct btree *b = NULL;
1978         int ret;
1979
1980         EBUG_ON(trans->paths[iter->path].cached);
1981         bch2_trans_verify_not_unlocked_or_in_restart(trans);
1982         bch2_btree_iter_verify(iter);
1983
1984         ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1985         if (ret)
1986                 goto err;
1987
1988
1989         struct btree_path *path = btree_iter_path(trans, iter);
1990
1991         /* already at end? */
1992         if (!btree_path_node(path, path->level))
1993                 return NULL;
1994
1995         /* got to end? */
1996         if (!btree_path_node(path, path->level + 1)) {
1997                 btree_path_set_level_up(trans, path);
1998                 return NULL;
1999         }
2000
2001         if (!bch2_btree_node_relock(trans, path, path->level + 1)) {
2002                 __bch2_btree_path_unlock(trans, path);
2003                 path->l[path->level].b          = ERR_PTR(-BCH_ERR_no_btree_node_relock);
2004                 path->l[path->level + 1].b      = ERR_PTR(-BCH_ERR_no_btree_node_relock);
2005                 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
2006                 trace_and_count(trans->c, trans_restart_relock_next_node, trans, _THIS_IP_, path);
2007                 ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
2008                 goto err;
2009         }
2010
2011         b = btree_path_node(path, path->level + 1);
2012
2013         if (bpos_eq(iter->pos, b->key.k.p)) {
2014                 __btree_path_set_level_up(trans, path, path->level++);
2015         } else {
2016                 if (btree_lock_want(path, path->level + 1) == BTREE_NODE_UNLOCKED)
2017                         btree_node_unlock(trans, path, path->level + 1);
2018
2019                 /*
2020                  * Haven't gotten to the end of the parent node: go back down to
2021                  * the next child node
2022                  */
2023                 iter->path = bch2_btree_path_set_pos(trans, iter->path,
2024                                         bpos_successor(iter->pos),
2025                                         iter->flags & BTREE_ITER_intent,
2026                                         btree_iter_ip_allocated(iter));
2027
2028                 path = btree_iter_path(trans, iter);
2029                 btree_path_set_level_down(trans, path, iter->min_depth);
2030
2031                 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2032                 if (ret)
2033                         goto err;
2034
2035                 path = btree_iter_path(trans, iter);
2036                 b = path->l[path->level].b;
2037         }
2038
2039         bkey_init(&iter->k);
2040         iter->k.p = iter->pos = b->key.k.p;
2041
2042         iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
2043                                         iter->flags & BTREE_ITER_intent,
2044                                         btree_iter_ip_allocated(iter));
2045         btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
2046         EBUG_ON(btree_iter_path(trans, iter)->uptodate);
2047 out:
2048         bch2_btree_iter_verify_entry_exit(iter);
2049         bch2_btree_iter_verify(iter);
2050
2051         return b;
2052 err:
2053         b = ERR_PTR(ret);
2054         goto out;
2055 }
2056
2057 /* Iterate across keys (in leaf nodes only) */
2058
2059 inline bool bch2_btree_iter_advance(struct btree_iter *iter)
2060 {
2061         struct bpos pos = iter->k.p;
2062         bool ret = !(iter->flags & BTREE_ITER_all_snapshots
2063                      ? bpos_eq(pos, SPOS_MAX)
2064                      : bkey_eq(pos, SPOS_MAX));
2065
2066         if (ret && !(iter->flags & BTREE_ITER_is_extents))
2067                 pos = bkey_successor(iter, pos);
2068         bch2_btree_iter_set_pos(iter, pos);
2069         return ret;
2070 }
2071
2072 inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
2073 {
2074         struct bpos pos = bkey_start_pos(&iter->k);
2075         bool ret = !(iter->flags & BTREE_ITER_all_snapshots
2076                      ? bpos_eq(pos, POS_MIN)
2077                      : bkey_eq(pos, POS_MIN));
2078
2079         if (ret && !(iter->flags & BTREE_ITER_is_extents))
2080                 pos = bkey_predecessor(iter, pos);
2081         bch2_btree_iter_set_pos(iter, pos);
2082         return ret;
2083 }
2084
2085 static noinline
2086 void bch2_btree_trans_peek_prev_updates(struct btree_trans *trans, struct btree_iter *iter,
2087                                         struct bkey_s_c *k)
2088 {
2089         struct bpos end = path_l(btree_iter_path(trans, iter))->b->data->min_key;
2090
2091         trans_for_each_update(trans, i)
2092                 if (!i->key_cache_already_flushed &&
2093                     i->btree_id == iter->btree_id &&
2094                     bpos_le(i->k->k.p, iter->pos) &&
2095                     bpos_ge(i->k->k.p, k->k ? k->k->p : end)) {
2096                         iter->k = i->k->k;
2097                         *k = bkey_i_to_s_c(i->k);
2098                 }
2099 }
2100
2101 static noinline
2102 void bch2_btree_trans_peek_updates(struct btree_trans *trans, struct btree_iter *iter,
2103                                    struct bkey_s_c *k)
2104 {
2105         struct btree_path *path = btree_iter_path(trans, iter);
2106         struct bpos end = path_l(path)->b->key.k.p;
2107
2108         trans_for_each_update(trans, i)
2109                 if (!i->key_cache_already_flushed &&
2110                     i->btree_id == iter->btree_id &&
2111                     bpos_ge(i->k->k.p, path->pos) &&
2112                     bpos_le(i->k->k.p, k->k ? k->k->p : end)) {
2113                         iter->k = i->k->k;
2114                         *k = bkey_i_to_s_c(i->k);
2115                 }
2116 }
2117
2118 static noinline
2119 void bch2_btree_trans_peek_slot_updates(struct btree_trans *trans, struct btree_iter *iter,
2120                                         struct bkey_s_c *k)
2121 {
2122         trans_for_each_update(trans, i)
2123                 if (!i->key_cache_already_flushed &&
2124                     i->btree_id == iter->btree_id &&
2125                     bpos_eq(i->k->k.p, iter->pos)) {
2126                         iter->k = i->k->k;
2127                         *k = bkey_i_to_s_c(i->k);
2128                 }
2129 }
2130
2131 static struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
2132                                               struct btree_iter *iter,
2133                                               struct bpos end_pos)
2134 {
2135         struct btree_path *path = btree_iter_path(trans, iter);
2136
2137         return bch2_journal_keys_peek_max(trans->c, iter->btree_id,
2138                                            path->level,
2139                                            path->pos,
2140                                            end_pos,
2141                                            &iter->journal_idx);
2142 }
2143
2144 static noinline
2145 struct bkey_s_c btree_trans_peek_slot_journal(struct btree_trans *trans,
2146                                               struct btree_iter *iter)
2147 {
2148         struct btree_path *path = btree_iter_path(trans, iter);
2149         struct bkey_i *k = bch2_btree_journal_peek(trans, iter, path->pos);
2150
2151         if (k) {
2152                 iter->k = k->k;
2153                 return bkey_i_to_s_c(k);
2154         } else {
2155                 return bkey_s_c_null;
2156         }
2157 }
2158
2159 static noinline
2160 void btree_trans_peek_journal(struct btree_trans *trans,
2161                               struct btree_iter *iter,
2162                               struct bkey_s_c *k)
2163 {
2164         struct btree_path *path = btree_iter_path(trans, iter);
2165         struct bkey_i *next_journal =
2166                 bch2_btree_journal_peek(trans, iter,
2167                                 k->k ? k->k->p : path_l(path)->b->key.k.p);
2168         if (next_journal) {
2169                 iter->k = next_journal->k;
2170                 *k = bkey_i_to_s_c(next_journal);
2171         }
2172 }
2173
2174 static struct bkey_i *bch2_btree_journal_peek_prev(struct btree_trans *trans,
2175                                               struct btree_iter *iter,
2176                                               struct bpos end_pos)
2177 {
2178         struct btree_path *path = btree_iter_path(trans, iter);
2179
2180         return bch2_journal_keys_peek_prev_min(trans->c, iter->btree_id,
2181                                            path->level,
2182                                            path->pos,
2183                                            end_pos,
2184                                            &iter->journal_idx);
2185 }
2186
2187 static noinline
2188 void btree_trans_peek_prev_journal(struct btree_trans *trans,
2189                                    struct btree_iter *iter,
2190                                    struct bkey_s_c *k)
2191 {
2192         struct btree_path *path = btree_iter_path(trans, iter);
2193         struct bkey_i *next_journal =
2194                 bch2_btree_journal_peek_prev(trans, iter,
2195                                 k->k ? k->k->p : path_l(path)->b->key.k.p);
2196
2197         if (next_journal) {
2198                 iter->k = next_journal->k;
2199                 *k = bkey_i_to_s_c(next_journal);
2200         }
2201 }
2202
2203 /*
2204  * Checks btree key cache for key at iter->pos and returns it if present, or
2205  * bkey_s_c_null:
2206  */
2207 static noinline
2208 struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos)
2209 {
2210         struct btree_trans *trans = iter->trans;
2211         struct bch_fs *c = trans->c;
2212         struct bkey u;
2213         struct bkey_s_c k;
2214         int ret;
2215
2216         bch2_trans_verify_not_unlocked_or_in_restart(trans);
2217
2218         if ((iter->flags & BTREE_ITER_key_cache_fill) &&
2219             bpos_eq(iter->pos, pos))
2220                 return bkey_s_c_null;
2221
2222         if (!bch2_btree_key_cache_find(c, iter->btree_id, pos))
2223                 return bkey_s_c_null;
2224
2225         if (!iter->key_cache_path)
2226                 iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos,
2227                                                      iter->flags & BTREE_ITER_intent, 0,
2228                                                      iter->flags|BTREE_ITER_cached|
2229                                                      BTREE_ITER_cached_nofill,
2230                                                      _THIS_IP_);
2231
2232         iter->key_cache_path = bch2_btree_path_set_pos(trans, iter->key_cache_path, pos,
2233                                         iter->flags & BTREE_ITER_intent,
2234                                         btree_iter_ip_allocated(iter));
2235
2236         ret =   bch2_btree_path_traverse(trans, iter->key_cache_path,
2237                                          iter->flags|BTREE_ITER_cached) ?:
2238                 bch2_btree_path_relock(trans, btree_iter_path(trans, iter), _THIS_IP_);
2239         if (unlikely(ret))
2240                 return bkey_s_c_err(ret);
2241
2242         k = bch2_btree_path_peek_slot(trans->paths + iter->key_cache_path, &u);
2243         if (!k.k)
2244                 return k;
2245
2246         if ((iter->flags & BTREE_ITER_all_snapshots) &&
2247             !bpos_eq(pos, k.k->p))
2248                 return bkey_s_c_null;
2249
2250         iter->k = u;
2251         k.k = &iter->k;
2252         btree_path_set_should_be_locked(trans, trans->paths + iter->key_cache_path);
2253         return k;
2254 }
2255
2256 static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bpos search_key)
2257 {
2258         struct btree_trans *trans = iter->trans;
2259         struct bkey_s_c k, k2;
2260         int ret;
2261
2262         EBUG_ON(btree_iter_path(trans, iter)->cached);
2263         bch2_btree_iter_verify(iter);
2264
2265         while (1) {
2266                 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2267                                         iter->flags & BTREE_ITER_intent,
2268                                         btree_iter_ip_allocated(iter));
2269
2270                 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2271                 if (unlikely(ret)) {
2272                         /* ensure that iter->k is consistent with iter->pos: */
2273                         bch2_btree_iter_set_pos(iter, iter->pos);
2274                         k = bkey_s_c_err(ret);
2275                         break;
2276                 }
2277
2278                 struct btree_path *path = btree_iter_path(trans, iter);
2279                 struct btree_path_level *l = path_l(path);
2280
2281                 if (unlikely(!l->b)) {
2282                         /* No btree nodes at requested level: */
2283                         bch2_btree_iter_set_pos(iter, SPOS_MAX);
2284                         k = bkey_s_c_null;
2285                         break;
2286                 }
2287
2288                 btree_path_set_should_be_locked(trans, path);
2289
2290                 k = btree_path_level_peek_all(trans->c, l, &iter->k);
2291
2292                 if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
2293                     k.k &&
2294                     (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
2295                         k = k2;
2296                         if (bkey_err(k)) {
2297                                 bch2_btree_iter_set_pos(iter, iter->pos);
2298                                 break;
2299                         }
2300                 }
2301
2302                 if (unlikely(iter->flags & BTREE_ITER_with_journal))
2303                         btree_trans_peek_journal(trans, iter, &k);
2304
2305                 if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
2306                              trans->nr_updates))
2307                         bch2_btree_trans_peek_updates(trans, iter, &k);
2308
2309                 if (k.k && bkey_deleted(k.k)) {
2310                         /*
2311                          * If we've got a whiteout, and it's after the search
2312                          * key, advance the search key to the whiteout instead
2313                          * of just after the whiteout - it might be a btree
2314                          * whiteout, with a real key at the same position, since
2315                          * in the btree deleted keys sort before non deleted.
2316                          */
2317                         search_key = !bpos_eq(search_key, k.k->p)
2318                                 ? k.k->p
2319                                 : bpos_successor(k.k->p);
2320                         continue;
2321                 }
2322
2323                 if (likely(k.k)) {
2324                         break;
2325                 } else if (likely(!bpos_eq(l->b->key.k.p, SPOS_MAX))) {
2326                         /* Advance to next leaf node: */
2327                         search_key = bpos_successor(l->b->key.k.p);
2328                 } else {
2329                         /* End of btree: */
2330                         bch2_btree_iter_set_pos(iter, SPOS_MAX);
2331                         k = bkey_s_c_null;
2332                         break;
2333                 }
2334         }
2335
2336         bch2_btree_iter_verify(iter);
2337         return k;
2338 }
2339
2340 /**
2341  * bch2_btree_iter_peek_max() - returns first key greater than or equal to
2342  * iterator's current position
2343  * @iter:       iterator to peek from
2344  * @end:        search limit: returns keys less than or equal to @end
2345  *
2346  * Returns:     key if found, or an error extractable with bkey_err().
2347  */
2348 struct bkey_s_c bch2_btree_iter_peek_max(struct btree_iter *iter, struct bpos end)
2349 {
2350         struct btree_trans *trans = iter->trans;
2351         struct bpos search_key = btree_iter_search_key(iter);
2352         struct bkey_s_c k;
2353         struct bpos iter_pos = iter->pos;
2354         int ret;
2355
2356         bch2_trans_verify_not_unlocked_or_in_restart(trans);
2357         bch2_btree_iter_verify_entry_exit(iter);
2358         EBUG_ON((iter->flags & BTREE_ITER_filter_snapshots) && bkey_eq(end, POS_MAX));
2359
2360         ret = trans_maybe_inject_restart(trans, _RET_IP_);
2361         if (unlikely(ret)) {
2362                 k = bkey_s_c_err(ret);
2363                 goto out_no_locked;
2364         }
2365
2366         if (iter->update_path) {
2367                 bch2_path_put_nokeep(trans, iter->update_path,
2368                                      iter->flags & BTREE_ITER_intent);
2369                 iter->update_path = 0;
2370         }
2371
2372         while (1) {
2373                 k = __bch2_btree_iter_peek(iter, search_key);
2374                 if (unlikely(!k.k))
2375                         goto end;
2376                 if (unlikely(bkey_err(k)))
2377                         goto out_no_locked;
2378
2379                 if (iter->flags & BTREE_ITER_filter_snapshots) {
2380                         /*
2381                          * We need to check against @end before FILTER_SNAPSHOTS because
2382                          * if we get to a different inode that requested we might be
2383                          * seeing keys for a different snapshot tree that will all be
2384                          * filtered out.
2385                          *
2386                          * But we can't do the full check here, because bkey_start_pos()
2387                          * isn't monotonically increasing before FILTER_SNAPSHOTS, and
2388                          * that's what we check against in extents mode:
2389                          */
2390                         if (unlikely(!(iter->flags & BTREE_ITER_is_extents)
2391                                      ? bkey_gt(k.k->p, end)
2392                                      : k.k->p.inode > end.inode))
2393                                 goto end;
2394
2395                         if (iter->update_path &&
2396                             !bkey_eq(trans->paths[iter->update_path].pos, k.k->p)) {
2397                                 bch2_path_put_nokeep(trans, iter->update_path,
2398                                                      iter->flags & BTREE_ITER_intent);
2399                                 iter->update_path = 0;
2400                         }
2401
2402                         if ((iter->flags & BTREE_ITER_intent) &&
2403                             !(iter->flags & BTREE_ITER_is_extents) &&
2404                             !iter->update_path) {
2405                                 struct bpos pos = k.k->p;
2406
2407                                 if (pos.snapshot < iter->snapshot) {
2408                                         search_key = bpos_successor(k.k->p);
2409                                         continue;
2410                                 }
2411
2412                                 pos.snapshot = iter->snapshot;
2413
2414                                 /*
2415                                  * advance, same as on exit for iter->path, but only up
2416                                  * to snapshot
2417                                  */
2418                                 __btree_path_get(trans, trans->paths + iter->path, iter->flags & BTREE_ITER_intent);
2419                                 iter->update_path = iter->path;
2420
2421                                 iter->update_path = bch2_btree_path_set_pos(trans,
2422                                                         iter->update_path, pos,
2423                                                         iter->flags & BTREE_ITER_intent,
2424                                                         _THIS_IP_);
2425                                 ret = bch2_btree_path_traverse(trans, iter->update_path, iter->flags);
2426                                 if (unlikely(ret)) {
2427                                         k = bkey_s_c_err(ret);
2428                                         goto out_no_locked;
2429                                 }
2430                         }
2431
2432                         /*
2433                          * We can never have a key in a leaf node at POS_MAX, so
2434                          * we don't have to check these successor() calls:
2435                          */
2436                         if (!bch2_snapshot_is_ancestor(trans->c,
2437                                                        iter->snapshot,
2438                                                        k.k->p.snapshot)) {
2439                                 search_key = bpos_successor(k.k->p);
2440                                 continue;
2441                         }
2442
2443                         if (bkey_whiteout(k.k) &&
2444                             !(iter->flags & BTREE_ITER_key_cache_fill)) {
2445                                 search_key = bkey_successor(iter, k.k->p);
2446                                 continue;
2447                         }
2448                 }
2449
2450                 /*
2451                  * iter->pos should be mononotically increasing, and always be
2452                  * equal to the key we just returned - except extents can
2453                  * straddle iter->pos:
2454                  */
2455                 if (!(iter->flags & BTREE_ITER_is_extents))
2456                         iter_pos = k.k->p;
2457                 else
2458                         iter_pos = bkey_max(iter->pos, bkey_start_pos(k.k));
2459
2460                 if (unlikely(iter->flags & BTREE_ITER_all_snapshots     ? bpos_gt(iter_pos, end) :
2461                              iter->flags & BTREE_ITER_is_extents        ? bkey_ge(iter_pos, end) :
2462                                                                           bkey_gt(iter_pos, end)))
2463                         goto end;
2464
2465                 break;
2466         }
2467
2468         iter->pos = iter_pos;
2469
2470         iter->path = bch2_btree_path_set_pos(trans, iter->path, k.k->p,
2471                                 iter->flags & BTREE_ITER_intent,
2472                                 btree_iter_ip_allocated(iter));
2473
2474         btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
2475 out_no_locked:
2476         if (iter->update_path) {
2477                 ret = bch2_btree_path_relock(trans, trans->paths + iter->update_path, _THIS_IP_);
2478                 if (unlikely(ret))
2479                         k = bkey_s_c_err(ret);
2480                 else
2481                         btree_path_set_should_be_locked(trans, trans->paths + iter->update_path);
2482         }
2483
2484         if (!(iter->flags & BTREE_ITER_all_snapshots))
2485                 iter->pos.snapshot = iter->snapshot;
2486
2487         ret = bch2_btree_iter_verify_ret(iter, k);
2488         if (unlikely(ret)) {
2489                 bch2_btree_iter_set_pos(iter, iter->pos);
2490                 k = bkey_s_c_err(ret);
2491         }
2492
2493         bch2_btree_iter_verify_entry_exit(iter);
2494
2495         return k;
2496 end:
2497         bch2_btree_iter_set_pos(iter, end);
2498         k = bkey_s_c_null;
2499         goto out_no_locked;
2500 }
2501
2502 /**
2503  * bch2_btree_iter_next() - returns first key greater than iterator's current
2504  * position
2505  * @iter:       iterator to peek from
2506  *
2507  * Returns:     key if found, or an error extractable with bkey_err().
2508  */
2509 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
2510 {
2511         if (!bch2_btree_iter_advance(iter))
2512                 return bkey_s_c_null;
2513
2514         return bch2_btree_iter_peek(iter);
2515 }
2516
2517 static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_iter *iter, struct bpos search_key)
2518 {
2519         struct btree_trans *trans = iter->trans;
2520         struct bkey_s_c k, k2;
2521
2522         bch2_btree_iter_verify(iter);
2523
2524         while (1) {
2525                 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2526                                         iter->flags & BTREE_ITER_intent,
2527                                         btree_iter_ip_allocated(iter));
2528
2529                 int ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2530                 if (unlikely(ret)) {
2531                         /* ensure that iter->k is consistent with iter->pos: */
2532                         bch2_btree_iter_set_pos(iter, iter->pos);
2533                         k = bkey_s_c_err(ret);
2534                         break;
2535                 }
2536
2537                 struct btree_path *path = btree_iter_path(trans, iter);
2538                 struct btree_path_level *l = path_l(path);
2539
2540                 if (unlikely(!l->b)) {
2541                         /* No btree nodes at requested level: */
2542                         bch2_btree_iter_set_pos(iter, SPOS_MAX);
2543                         k = bkey_s_c_null;
2544                         break;
2545                 }
2546
2547                 btree_path_set_should_be_locked(trans, path);
2548
2549                 k = btree_path_level_peek_all(trans->c, l, &iter->k);
2550                 if (!k.k || bpos_gt(k.k->p, search_key)) {
2551                         k = btree_path_level_prev(trans, path, l, &iter->k);
2552
2553                         BUG_ON(k.k && bpos_gt(k.k->p, search_key));
2554                 }
2555
2556                 if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
2557                     k.k &&
2558                     (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
2559                         k = k2;
2560                         if (bkey_err(k2)) {
2561                                 bch2_btree_iter_set_pos(iter, iter->pos);
2562                                 break;
2563                         }
2564                 }
2565
2566                 if (unlikely(iter->flags & BTREE_ITER_with_journal))
2567                         btree_trans_peek_prev_journal(trans, iter, &k);
2568
2569                 if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
2570                              trans->nr_updates))
2571                         bch2_btree_trans_peek_prev_updates(trans, iter, &k);
2572
2573                 if (likely(k.k && !bkey_deleted(k.k))) {
2574                         break;
2575                 } else if (k.k) {
2576                         search_key = bpos_predecessor(k.k->p);
2577                 } else if (likely(!bpos_eq(path->l[0].b->data->min_key, POS_MIN))) {
2578                         /* Advance to previous leaf node: */
2579                         search_key = bpos_predecessor(path->l[0].b->data->min_key);
2580                 } else {
2581                         /* Start of btree: */
2582                         bch2_btree_iter_set_pos(iter, POS_MIN);
2583                         k = bkey_s_c_null;
2584                         break;
2585                 }
2586         }
2587
2588         bch2_btree_iter_verify(iter);
2589         return k;
2590 }
2591
2592 /**
2593  * bch2_btree_iter_peek_prev_min() - returns first key less than or equal to
2594  * iterator's current position
2595  * @iter:       iterator to peek from
2596  * @end:        search limit: returns keys greater than or equal to @end
2597  *
2598  * Returns:     key if found, or an error extractable with bkey_err().
2599  */
2600 struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_iter *iter, struct bpos end)
2601 {
2602         if ((iter->flags & (BTREE_ITER_is_extents|BTREE_ITER_filter_snapshots)) &&
2603            !bkey_eq(iter->pos, POS_MAX)) {
2604                 /*
2605                  * bkey_start_pos(), for extents, is not monotonically
2606                  * increasing until after filtering for snapshots:
2607                  *
2608                  * Thus, for extents we need to search forward until we find a
2609                  * real visible extents - easiest to just use peek_slot() (which
2610                  * internally uses peek() for extents)
2611                  */
2612                 struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
2613                 if (bkey_err(k))
2614                         return k;
2615
2616                 if (!bkey_deleted(k.k) &&
2617                     (!(iter->flags & BTREE_ITER_is_extents) ||
2618                      bkey_lt(bkey_start_pos(k.k), iter->pos)))
2619                         return k;
2620         }
2621
2622         struct btree_trans *trans = iter->trans;
2623         struct bpos search_key = iter->pos;
2624         struct bkey_s_c k;
2625         btree_path_idx_t saved_path = 0;
2626
2627         bch2_trans_verify_not_unlocked_or_in_restart(trans);
2628         bch2_btree_iter_verify_entry_exit(iter);
2629         EBUG_ON((iter->flags & BTREE_ITER_filter_snapshots) && bpos_eq(end, POS_MIN));
2630
2631         int ret = trans_maybe_inject_restart(trans, _RET_IP_);
2632         if (unlikely(ret)) {
2633                 k = bkey_s_c_err(ret);
2634                 goto out_no_locked;
2635         }
2636
2637         while (1) {
2638                 k = __bch2_btree_iter_peek_prev(iter, search_key);
2639                 if (unlikely(!k.k))
2640                         goto end;
2641                 if (unlikely(bkey_err(k)))
2642                         goto out_no_locked;
2643
2644                 if (iter->flags & BTREE_ITER_filter_snapshots) {
2645                         struct btree_path *s = saved_path ? trans->paths + saved_path : NULL;
2646                         if (s && bpos_lt(k.k->p, SPOS(s->pos.inode, s->pos.offset, iter->snapshot))) {
2647                                 /*
2648                                  * If we have a saved candidate, and we're past
2649                                  * the last possible snapshot overwrite, return
2650                                  * it:
2651                                  */
2652                                 bch2_path_put_nokeep(trans, iter->path,
2653                                               iter->flags & BTREE_ITER_intent);
2654                                 iter->path = saved_path;
2655                                 saved_path = 0;
2656                                 k = bch2_btree_path_peek_slot(btree_iter_path(trans, iter), &iter->k);
2657                                 break;
2658                         }
2659
2660                         /*
2661                          * We need to check against @end before FILTER_SNAPSHOTS because
2662                          * if we get to a different inode that requested we might be
2663                          * seeing keys for a different snapshot tree that will all be
2664                          * filtered out.
2665                          */
2666                         if (unlikely(bkey_lt(k.k->p, end)))
2667                                 goto end;
2668
2669                         if (!bch2_snapshot_is_ancestor(trans->c, iter->snapshot, k.k->p.snapshot)) {
2670                                 search_key = bpos_predecessor(k.k->p);
2671                                 continue;
2672                         }
2673
2674                         if (k.k->p.snapshot != iter->snapshot) {
2675                                 /*
2676                                  * Have a key visible in iter->snapshot, but
2677                                  * might have overwrites: - save it and keep
2678                                  * searching. Unless it's a whiteout - then drop
2679                                  * our previous saved candidate:
2680                                  */
2681                                 if (saved_path) {
2682                                         bch2_path_put_nokeep(trans, saved_path,
2683                                               iter->flags & BTREE_ITER_intent);
2684                                         saved_path = 0;
2685                                 }
2686
2687                                 if (!bkey_whiteout(k.k)) {
2688                                         saved_path = btree_path_clone(trans, iter->path,
2689                                                                 iter->flags & BTREE_ITER_intent,
2690                                                                 _THIS_IP_);
2691                                         trace_btree_path_save_pos(trans,
2692                                                                   trans->paths + iter->path,
2693                                                                   trans->paths + saved_path);
2694                                 }
2695
2696                                 search_key = bpos_predecessor(k.k->p);
2697                                 continue;
2698                         }
2699
2700                         if (bkey_whiteout(k.k)) {
2701                                 search_key = bkey_predecessor(iter, k.k->p);
2702                                 search_key.snapshot = U32_MAX;
2703                                 continue;
2704                         }
2705                 }
2706
2707                 EBUG_ON(iter->flags & BTREE_ITER_all_snapshots          ? bpos_gt(k.k->p, iter->pos) :
2708                         iter->flags & BTREE_ITER_is_extents             ? bkey_ge(bkey_start_pos(k.k), iter->pos) :
2709                                                                           bkey_gt(k.k->p, iter->pos));
2710
2711                 if (unlikely(iter->flags & BTREE_ITER_all_snapshots     ? bpos_lt(k.k->p, end) :
2712                              iter->flags & BTREE_ITER_is_extents        ? bkey_le(k.k->p, end) :
2713                                                                           bkey_lt(k.k->p, end)))
2714                         goto end;
2715
2716                 break;
2717         }
2718
2719         /* Extents can straddle iter->pos: */
2720         iter->pos = bpos_min(iter->pos, k.k->p);;
2721
2722         if (iter->flags & BTREE_ITER_filter_snapshots)
2723                 iter->pos.snapshot = iter->snapshot;
2724 out_no_locked:
2725         if (saved_path)
2726                 bch2_path_put_nokeep(trans, saved_path, iter->flags & BTREE_ITER_intent);
2727
2728         bch2_btree_iter_verify_entry_exit(iter);
2729         bch2_btree_iter_verify(iter);
2730         return k;
2731 end:
2732         bch2_btree_iter_set_pos(iter, end);
2733         k = bkey_s_c_null;
2734         goto out_no_locked;
2735 }
2736
2737 /**
2738  * bch2_btree_iter_prev() - returns first key less than iterator's current
2739  * position
2740  * @iter:       iterator to peek from
2741  *
2742  * Returns:     key if found, or an error extractable with bkey_err().
2743  */
2744 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
2745 {
2746         if (!bch2_btree_iter_rewind(iter))
2747                 return bkey_s_c_null;
2748
2749         return bch2_btree_iter_peek_prev(iter);
2750 }
2751
2752 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
2753 {
2754         struct btree_trans *trans = iter->trans;
2755         struct bpos search_key;
2756         struct bkey_s_c k;
2757         int ret;
2758
2759         bch2_trans_verify_not_unlocked_or_in_restart(trans);
2760         bch2_btree_iter_verify(iter);
2761         bch2_btree_iter_verify_entry_exit(iter);
2762         EBUG_ON(btree_iter_path(trans, iter)->level && (iter->flags & BTREE_ITER_with_key_cache));
2763
2764         ret = trans_maybe_inject_restart(trans, _RET_IP_);
2765         if (unlikely(ret)) {
2766                 k = bkey_s_c_err(ret);
2767                 goto out_no_locked;
2768         }
2769
2770         /* extents can't span inode numbers: */
2771         if ((iter->flags & BTREE_ITER_is_extents) &&
2772             unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
2773                 if (iter->pos.inode == KEY_INODE_MAX)
2774                         return bkey_s_c_null;
2775
2776                 bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
2777         }
2778
2779         search_key = btree_iter_search_key(iter);
2780         iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2781                                         iter->flags & BTREE_ITER_intent,
2782                                         btree_iter_ip_allocated(iter));
2783
2784         ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2785         if (unlikely(ret)) {
2786                 k = bkey_s_c_err(ret);
2787                 goto out_no_locked;
2788         }
2789
2790         struct btree_path *path = btree_iter_path(trans, iter);
2791         if (unlikely(!btree_path_node(path, path->level)))
2792                 return bkey_s_c_null;
2793
2794         if ((iter->flags & BTREE_ITER_cached) ||
2795             !(iter->flags & (BTREE_ITER_is_extents|BTREE_ITER_filter_snapshots))) {
2796                 k = bkey_s_c_null;
2797
2798                 if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
2799                              trans->nr_updates)) {
2800                         bch2_btree_trans_peek_slot_updates(trans, iter, &k);
2801                         if (k.k)
2802                                 goto out;
2803                 }
2804
2805                 if (unlikely(iter->flags & BTREE_ITER_with_journal) &&
2806                     (k = btree_trans_peek_slot_journal(trans, iter)).k)
2807                         goto out;
2808
2809                 if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
2810                     (k = btree_trans_peek_key_cache(iter, iter->pos)).k) {
2811                         if (!bkey_err(k))
2812                                 iter->k = *k.k;
2813                         /* We're not returning a key from iter->path: */
2814                         goto out_no_locked;
2815                 }
2816
2817                 k = bch2_btree_path_peek_slot(trans->paths + iter->path, &iter->k);
2818                 if (unlikely(!k.k))
2819                         goto out_no_locked;
2820
2821                 if (unlikely(k.k->type == KEY_TYPE_whiteout &&
2822                              (iter->flags & BTREE_ITER_filter_snapshots) &&
2823                              !(iter->flags & BTREE_ITER_key_cache_fill)))
2824                         iter->k.type = KEY_TYPE_deleted;
2825         } else {
2826                 struct bpos next;
2827                 struct bpos end = iter->pos;
2828
2829                 if (iter->flags & BTREE_ITER_is_extents)
2830                         end.offset = U64_MAX;
2831
2832                 EBUG_ON(btree_iter_path(trans, iter)->level);
2833
2834                 if (iter->flags & BTREE_ITER_intent) {
2835                         struct btree_iter iter2;
2836
2837                         bch2_trans_copy_iter(&iter2, iter);
2838                         k = bch2_btree_iter_peek_max(&iter2, end);
2839
2840                         if (k.k && !bkey_err(k)) {
2841                                 swap(iter->key_cache_path, iter2.key_cache_path);
2842                                 iter->k = iter2.k;
2843                                 k.k = &iter->k;
2844                         }
2845                         bch2_trans_iter_exit(trans, &iter2);
2846                 } else {
2847                         struct bpos pos = iter->pos;
2848
2849                         k = bch2_btree_iter_peek_max(iter, end);
2850                         if (unlikely(bkey_err(k)))
2851                                 bch2_btree_iter_set_pos(iter, pos);
2852                         else
2853                                 iter->pos = pos;
2854                 }
2855
2856                 if (unlikely(bkey_err(k)))
2857                         goto out_no_locked;
2858
2859                 next = k.k ? bkey_start_pos(k.k) : POS_MAX;
2860
2861                 if (bkey_lt(iter->pos, next)) {
2862                         bkey_init(&iter->k);
2863                         iter->k.p = iter->pos;
2864
2865                         if (iter->flags & BTREE_ITER_is_extents) {
2866                                 bch2_key_resize(&iter->k,
2867                                                 min_t(u64, KEY_SIZE_MAX,
2868                                                       (next.inode == iter->pos.inode
2869                                                        ? next.offset
2870                                                        : KEY_OFFSET_MAX) -
2871                                                       iter->pos.offset));
2872                                 EBUG_ON(!iter->k.size);
2873                         }
2874
2875                         k = (struct bkey_s_c) { &iter->k, NULL };
2876                 }
2877         }
2878 out:
2879         btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
2880 out_no_locked:
2881         bch2_btree_iter_verify_entry_exit(iter);
2882         bch2_btree_iter_verify(iter);
2883         ret = bch2_btree_iter_verify_ret(iter, k);
2884         if (unlikely(ret))
2885                 return bkey_s_c_err(ret);
2886
2887         return k;
2888 }
2889
2890 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
2891 {
2892         if (!bch2_btree_iter_advance(iter))
2893                 return bkey_s_c_null;
2894
2895         return bch2_btree_iter_peek_slot(iter);
2896 }
2897
2898 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
2899 {
2900         if (!bch2_btree_iter_rewind(iter))
2901                 return bkey_s_c_null;
2902
2903         return bch2_btree_iter_peek_slot(iter);
2904 }
2905
2906 /* Obsolete, but still used by rust wrapper in -tools */
2907 struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *iter)
2908 {
2909         struct bkey_s_c k;
2910
2911         while (btree_trans_too_many_iters(iter->trans) ||
2912                (k = bch2_btree_iter_peek_type(iter, iter->flags),
2913                 bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
2914                 bch2_trans_begin(iter->trans);
2915
2916         return k;
2917 }
2918
2919 /* new transactional stuff: */
2920
2921 #ifdef CONFIG_BCACHEFS_DEBUG
2922 static void btree_trans_verify_sorted_refs(struct btree_trans *trans)
2923 {
2924         struct btree_path *path;
2925         unsigned i;
2926
2927         BUG_ON(trans->nr_sorted != bitmap_weight(trans->paths_allocated, trans->nr_paths) - 1);
2928
2929         trans_for_each_path(trans, path, i) {
2930                 BUG_ON(path->sorted_idx >= trans->nr_sorted);
2931                 BUG_ON(trans->sorted[path->sorted_idx] != i);
2932         }
2933
2934         for (i = 0; i < trans->nr_sorted; i++) {
2935                 unsigned idx = trans->sorted[i];
2936
2937                 BUG_ON(!test_bit(idx, trans->paths_allocated));
2938                 BUG_ON(trans->paths[idx].sorted_idx != i);
2939         }
2940 }
2941
2942 static void btree_trans_verify_sorted(struct btree_trans *trans)
2943 {
2944         struct btree_path *path, *prev = NULL;
2945         struct trans_for_each_path_inorder_iter iter;
2946
2947         if (!bch2_debug_check_iterators)
2948                 return;
2949
2950         trans_for_each_path_inorder(trans, path, iter) {
2951                 if (prev && btree_path_cmp(prev, path) > 0) {
2952                         __bch2_dump_trans_paths_updates(trans, true);
2953                         panic("trans paths out of order!\n");
2954                 }
2955                 prev = path;
2956         }
2957 }
2958 #else
2959 static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans) {}
2960 static inline void btree_trans_verify_sorted(struct btree_trans *trans) {}
2961 #endif
2962
2963 void __bch2_btree_trans_sort_paths(struct btree_trans *trans)
2964 {
2965         int i, l = 0, r = trans->nr_sorted, inc = 1;
2966         bool swapped;
2967
2968         btree_trans_verify_sorted_refs(trans);
2969
2970         if (trans->paths_sorted)
2971                 goto out;
2972
2973         /*
2974          * Cocktail shaker sort: this is efficient because iterators will be
2975          * mostly sorted.
2976          */
2977         do {
2978                 swapped = false;
2979
2980                 for (i = inc > 0 ? l : r - 2;
2981                      i + 1 < r && i >= l;
2982                      i += inc) {
2983                         if (btree_path_cmp(trans->paths + trans->sorted[i],
2984                                            trans->paths + trans->sorted[i + 1]) > 0) {
2985                                 swap(trans->sorted[i], trans->sorted[i + 1]);
2986                                 trans->paths[trans->sorted[i]].sorted_idx = i;
2987                                 trans->paths[trans->sorted[i + 1]].sorted_idx = i + 1;
2988                                 swapped = true;
2989                         }
2990                 }
2991
2992                 if (inc > 0)
2993                         --r;
2994                 else
2995                         l++;
2996                 inc = -inc;
2997         } while (swapped);
2998
2999         trans->paths_sorted = true;
3000 out:
3001         btree_trans_verify_sorted(trans);
3002 }
3003
3004 static inline void btree_path_list_remove(struct btree_trans *trans,
3005                                           struct btree_path *path)
3006 {
3007         EBUG_ON(path->sorted_idx >= trans->nr_sorted);
3008 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
3009         trans->nr_sorted--;
3010         memmove_u64s_down_small(trans->sorted + path->sorted_idx,
3011                                 trans->sorted + path->sorted_idx + 1,
3012                                 DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx,
3013                                              sizeof(u64) / sizeof(btree_path_idx_t)));
3014 #else
3015         array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx);
3016 #endif
3017         for (unsigned i = path->sorted_idx; i < trans->nr_sorted; i++)
3018                 trans->paths[trans->sorted[i]].sorted_idx = i;
3019 }
3020
3021 static inline void btree_path_list_add(struct btree_trans *trans,
3022                                        btree_path_idx_t pos,
3023                                        btree_path_idx_t path_idx)
3024 {
3025         struct btree_path *path = trans->paths + path_idx;
3026
3027         path->sorted_idx = pos ? trans->paths[pos].sorted_idx + 1 : trans->nr_sorted;
3028
3029 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
3030         memmove_u64s_up_small(trans->sorted + path->sorted_idx + 1,
3031                               trans->sorted + path->sorted_idx,
3032                               DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx,
3033                                            sizeof(u64) / sizeof(btree_path_idx_t)));
3034         trans->nr_sorted++;
3035         trans->sorted[path->sorted_idx] = path_idx;
3036 #else
3037         array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path_idx);
3038 #endif
3039
3040         for (unsigned i = path->sorted_idx; i < trans->nr_sorted; i++)
3041                 trans->paths[trans->sorted[i]].sorted_idx = i;
3042
3043         btree_trans_verify_sorted_refs(trans);
3044 }
3045
3046 void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
3047 {
3048         if (iter->update_path)
3049                 bch2_path_put_nokeep(trans, iter->update_path,
3050                               iter->flags & BTREE_ITER_intent);
3051         if (iter->path)
3052                 bch2_path_put(trans, iter->path,
3053                               iter->flags & BTREE_ITER_intent);
3054         if (iter->key_cache_path)
3055                 bch2_path_put(trans, iter->key_cache_path,
3056                               iter->flags & BTREE_ITER_intent);
3057         iter->path              = 0;
3058         iter->update_path       = 0;
3059         iter->key_cache_path    = 0;
3060         iter->trans             = NULL;
3061 }
3062
3063 void bch2_trans_iter_init_outlined(struct btree_trans *trans,
3064                           struct btree_iter *iter,
3065                           enum btree_id btree_id, struct bpos pos,
3066                           unsigned flags)
3067 {
3068         bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
3069                                bch2_btree_iter_flags(trans, btree_id, 0, flags),
3070                                _RET_IP_);
3071 }
3072
3073 void bch2_trans_node_iter_init(struct btree_trans *trans,
3074                                struct btree_iter *iter,
3075                                enum btree_id btree_id,
3076                                struct bpos pos,
3077                                unsigned locks_want,
3078                                unsigned depth,
3079                                unsigned flags)
3080 {
3081         flags |= BTREE_ITER_not_extents;
3082         flags |= BTREE_ITER_snapshot_field;
3083         flags |= BTREE_ITER_all_snapshots;
3084
3085         if (!depth && btree_id_cached(trans->c, btree_id))
3086                 flags |= BTREE_ITER_with_key_cache;
3087
3088         bch2_trans_iter_init_common(trans, iter, btree_id, pos, locks_want, depth,
3089                                bch2_btree_iter_flags(trans, btree_id, depth, flags),
3090                                _RET_IP_);
3091
3092         iter->min_depth = depth;
3093
3094         struct btree_path *path = btree_iter_path(trans, iter);
3095         BUG_ON(path->locks_want  < min(locks_want, BTREE_MAX_DEPTH));
3096         BUG_ON(path->level      != depth);
3097         BUG_ON(iter->min_depth  != depth);
3098 }
3099
3100 void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
3101 {
3102         struct btree_trans *trans = src->trans;
3103
3104         *dst = *src;
3105 #ifdef TRACK_PATH_ALLOCATED
3106         dst->ip_allocated = _RET_IP_;
3107 #endif
3108         if (src->path)
3109                 __btree_path_get(trans, trans->paths + src->path, src->flags & BTREE_ITER_intent);
3110         if (src->update_path)
3111                 __btree_path_get(trans, trans->paths + src->update_path, src->flags & BTREE_ITER_intent);
3112         dst->key_cache_path = 0;
3113 }
3114
3115 void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
3116 {
3117         struct bch_fs *c = trans->c;
3118         unsigned new_top = trans->mem_top + size;
3119         unsigned old_bytes = trans->mem_bytes;
3120         unsigned new_bytes = roundup_pow_of_two(new_top);
3121         int ret;
3122         void *new_mem;
3123         void *p;
3124
3125         WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
3126
3127         ret = trans_maybe_inject_restart(trans, _RET_IP_);
3128         if (ret)
3129                 return ERR_PTR(ret);
3130
3131         struct btree_transaction_stats *s = btree_trans_stats(trans);
3132         s->max_mem = max(s->max_mem, new_bytes);
3133
3134         if (trans->used_mempool) {
3135                 if (trans->mem_bytes >= new_bytes)
3136                         goto out_change_top;
3137
3138                 /* No more space from mempool item, need malloc new one */
3139                 new_mem = kmalloc(new_bytes, GFP_NOWAIT|__GFP_NOWARN);
3140                 if (unlikely(!new_mem)) {
3141                         bch2_trans_unlock(trans);
3142
3143                         new_mem = kmalloc(new_bytes, GFP_KERNEL);
3144                         if (!new_mem)
3145                                 return ERR_PTR(-BCH_ERR_ENOMEM_trans_kmalloc);
3146
3147                         ret = bch2_trans_relock(trans);
3148                         if (ret) {
3149                                 kfree(new_mem);
3150                                 return ERR_PTR(ret);
3151                         }
3152                 }
3153                 memcpy(new_mem, trans->mem, trans->mem_top);
3154                 trans->used_mempool = false;
3155                 mempool_free(trans->mem, &c->btree_trans_mem_pool);
3156                 goto out_new_mem;
3157         }
3158
3159         new_mem = krealloc(trans->mem, new_bytes, GFP_NOWAIT|__GFP_NOWARN);
3160         if (unlikely(!new_mem)) {
3161                 bch2_trans_unlock(trans);
3162
3163                 new_mem = krealloc(trans->mem, new_bytes, GFP_KERNEL);
3164                 if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
3165                         new_mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
3166                         new_bytes = BTREE_TRANS_MEM_MAX;
3167                         memcpy(new_mem, trans->mem, trans->mem_top);
3168                         trans->used_mempool = true;
3169                         kfree(trans->mem);
3170                 }
3171
3172                 if (!new_mem)
3173                         return ERR_PTR(-BCH_ERR_ENOMEM_trans_kmalloc);
3174
3175                 trans->mem = new_mem;
3176                 trans->mem_bytes = new_bytes;
3177
3178                 ret = bch2_trans_relock(trans);
3179                 if (ret)
3180                         return ERR_PTR(ret);
3181         }
3182 out_new_mem:
3183         trans->mem = new_mem;
3184         trans->mem_bytes = new_bytes;
3185
3186         if (old_bytes) {
3187                 trace_and_count(c, trans_restart_mem_realloced, trans, _RET_IP_, new_bytes);
3188                 return ERR_PTR(btree_trans_restart_ip(trans,
3189                                         BCH_ERR_transaction_restart_mem_realloced, _RET_IP_));
3190         }
3191 out_change_top:
3192         p = trans->mem + trans->mem_top;
3193         trans->mem_top += size;
3194         memset(p, 0, size);
3195         return p;
3196 }
3197
3198 static inline void check_srcu_held_too_long(struct btree_trans *trans)
3199 {
3200         WARN(trans->srcu_held && time_after(jiffies, trans->srcu_lock_time + HZ * 10),
3201              "btree trans held srcu lock (delaying memory reclaim) for %lu seconds",
3202              (jiffies - trans->srcu_lock_time) / HZ);
3203 }
3204
3205 void bch2_trans_srcu_unlock(struct btree_trans *trans)
3206 {
3207         if (trans->srcu_held) {
3208                 struct bch_fs *c = trans->c;
3209                 struct btree_path *path;
3210                 unsigned i;
3211
3212                 trans_for_each_path(trans, path, i)
3213                         if (path->cached && !btree_node_locked(path, 0))
3214                                 path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_srcu_reset);
3215
3216                 check_srcu_held_too_long(trans);
3217                 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
3218                 trans->srcu_held = false;
3219         }
3220 }
3221
3222 static void bch2_trans_srcu_lock(struct btree_trans *trans)
3223 {
3224         if (!trans->srcu_held) {
3225                 trans->srcu_idx = srcu_read_lock(&trans->c->btree_trans_barrier);
3226                 trans->srcu_lock_time   = jiffies;
3227                 trans->srcu_held = true;
3228         }
3229 }
3230
3231 /**
3232  * bch2_trans_begin() - reset a transaction after a interrupted attempt
3233  * @trans: transaction to reset
3234  *
3235  * Returns:     current restart counter, to be used with trans_was_restarted()
3236  *
3237  * While iterating over nodes or updating nodes a attempt to lock a btree node
3238  * may return BCH_ERR_transaction_restart when the trylock fails. When this
3239  * occurs bch2_trans_begin() should be called and the transaction retried.
3240  */
3241 u32 bch2_trans_begin(struct btree_trans *trans)
3242 {
3243         struct btree_path *path;
3244         unsigned i;
3245         u64 now;
3246
3247         bch2_trans_reset_updates(trans);
3248
3249         trans->restart_count++;
3250         trans->mem_top                  = 0;
3251         trans->journal_entries          = NULL;
3252
3253         trans_for_each_path(trans, path, i) {
3254                 path->should_be_locked = false;
3255
3256                 /*
3257                  * If the transaction wasn't restarted, we're presuming to be
3258                  * doing something new: dont keep iterators excpt the ones that
3259                  * are in use - except for the subvolumes btree:
3260                  */
3261                 if (!trans->restarted && path->btree_id != BTREE_ID_subvolumes)
3262                         path->preserve = false;
3263
3264                 /*
3265                  * XXX: we probably shouldn't be doing this if the transaction
3266                  * was restarted, but currently we still overflow transaction
3267                  * iterators if we do that
3268                  */
3269                 if (!path->ref && !path->preserve)
3270                         __bch2_path_free(trans, i);
3271                 else
3272                         path->preserve = false;
3273         }
3274
3275         now = local_clock();
3276
3277         if (!IS_ENABLED(CONFIG_BCACHEFS_NO_LATENCY_ACCT) &&
3278             time_after64(now, trans->last_begin_time + 10))
3279                 __bch2_time_stats_update(&btree_trans_stats(trans)->duration,
3280                                          trans->last_begin_time, now);
3281
3282         if (!trans->restarted &&
3283             (need_resched() ||
3284              time_after64(now, trans->last_begin_time + BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS))) {
3285                 bch2_trans_unlock(trans);
3286                 cond_resched();
3287                 now = local_clock();
3288         }
3289         trans->last_begin_time = now;
3290
3291         if (unlikely(trans->srcu_held &&
3292                      time_after(jiffies, trans->srcu_lock_time + msecs_to_jiffies(10))))
3293                 bch2_trans_srcu_unlock(trans);
3294
3295         trans->last_begin_ip = _RET_IP_;
3296
3297 #ifdef CONFIG_BCACHEFS_INJECT_TRANSACTION_RESTARTS
3298         if (trans->restarted) {
3299                 trans->restart_count_this_trans++;
3300         } else {
3301                 trans->restart_count_this_trans = 0;
3302         }
3303 #endif
3304
3305         trans_set_locked(trans, false);
3306
3307         if (trans->restarted) {
3308                 bch2_btree_path_traverse_all(trans);
3309                 trans->notrace_relock_fail = false;
3310         }
3311
3312         bch2_trans_verify_not_unlocked_or_in_restart(trans);
3313         return trans->restart_count;
3314 }
3315
3316 const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR] = { "(unknown)" };
3317
3318 unsigned bch2_trans_get_fn_idx(const char *fn)
3319 {
3320         for (unsigned i = 0; i < ARRAY_SIZE(bch2_btree_transaction_fns); i++)
3321                 if (!bch2_btree_transaction_fns[i] ||
3322                     bch2_btree_transaction_fns[i] == fn) {
3323                         bch2_btree_transaction_fns[i] = fn;
3324                         return i;
3325                 }
3326
3327         pr_warn_once("BCH_TRANSACTIONS_NR not big enough!");
3328         return 0;
3329 }
3330
3331 struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx)
3332         __acquires(&c->btree_trans_barrier)
3333 {
3334         struct btree_trans *trans;
3335
3336         if (IS_ENABLED(__KERNEL__)) {
3337                 trans = this_cpu_xchg(c->btree_trans_bufs->trans, NULL);
3338                 if (trans) {
3339                         memset(trans, 0, offsetof(struct btree_trans, list));
3340                         goto got_trans;
3341                 }
3342         }
3343
3344         trans = mempool_alloc(&c->btree_trans_pool, GFP_NOFS);
3345         memset(trans, 0, sizeof(*trans));
3346
3347         seqmutex_lock(&c->btree_trans_lock);
3348         if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
3349                 struct btree_trans *pos;
3350                 pid_t pid = current->pid;
3351
3352                 trans->locking_wait.task = current;
3353
3354                 list_for_each_entry(pos, &c->btree_trans_list, list) {
3355                         struct task_struct *pos_task = READ_ONCE(pos->locking_wait.task);
3356                         /*
3357                          * We'd much prefer to be stricter here and completely
3358                          * disallow multiple btree_trans in the same thread -
3359                          * but the data move path calls bch2_write when we
3360                          * already have a btree_trans initialized.
3361                          */
3362                         BUG_ON(pos_task &&
3363                                pid == pos_task->pid &&
3364                                pos->locked);
3365                 }
3366         }
3367
3368         list_add(&trans->list, &c->btree_trans_list);
3369         seqmutex_unlock(&c->btree_trans_lock);
3370 got_trans:
3371         trans->c                = c;
3372         trans->last_begin_time  = local_clock();
3373         trans->fn_idx           = fn_idx;
3374         trans->locking_wait.task = current;
3375         trans->journal_replay_not_finished =
3376                 unlikely(!test_bit(JOURNAL_replay_done, &c->journal.flags)) &&
3377                 atomic_inc_not_zero(&c->journal_keys.ref);
3378         trans->nr_paths         = ARRAY_SIZE(trans->_paths);
3379         trans->paths_allocated  = trans->_paths_allocated;
3380         trans->sorted           = trans->_sorted;
3381         trans->paths            = trans->_paths;
3382         trans->updates          = trans->_updates;
3383
3384         *trans_paths_nr(trans->paths) = BTREE_ITER_INITIAL;
3385
3386         trans->paths_allocated[0] = 1;
3387
3388         static struct lock_class_key lockdep_key;
3389         lockdep_init_map(&trans->dep_map, "bcachefs_btree", &lockdep_key, 0);
3390
3391         if (fn_idx < BCH_TRANSACTIONS_NR) {
3392                 trans->fn = bch2_btree_transaction_fns[fn_idx];
3393
3394                 struct btree_transaction_stats *s = &c->btree_transaction_stats[fn_idx];
3395
3396                 if (s->max_mem) {
3397                         unsigned expected_mem_bytes = roundup_pow_of_two(s->max_mem);
3398
3399                         trans->mem = kmalloc(expected_mem_bytes, GFP_KERNEL);
3400                         if (likely(trans->mem))
3401                                 trans->mem_bytes = expected_mem_bytes;
3402                 }
3403
3404                 trans->nr_paths_max = s->nr_max_paths;
3405                 trans->journal_entries_size = s->journal_entries_size;
3406         }
3407
3408         trans->srcu_idx         = srcu_read_lock(&c->btree_trans_barrier);
3409         trans->srcu_lock_time   = jiffies;
3410         trans->srcu_held        = true;
3411         trans_set_locked(trans, false);
3412
3413         closure_init_stack_release(&trans->ref);
3414         return trans;
3415 }
3416
3417 static void check_btree_paths_leaked(struct btree_trans *trans)
3418 {
3419 #ifdef CONFIG_BCACHEFS_DEBUG
3420         struct bch_fs *c = trans->c;
3421         struct btree_path *path;
3422         unsigned i;
3423
3424         trans_for_each_path(trans, path, i)
3425                 if (path->ref)
3426                         goto leaked;
3427         return;
3428 leaked:
3429         bch_err(c, "btree paths leaked from %s!", trans->fn);
3430         trans_for_each_path(trans, path, i)
3431                 if (path->ref)
3432                         printk(KERN_ERR "  btree %s %pS\n",
3433                                bch2_btree_id_str(path->btree_id),
3434                                (void *) path->ip_allocated);
3435         /* Be noisy about this: */
3436         bch2_fatal_error(c);
3437 #endif
3438 }
3439
3440 void bch2_trans_put(struct btree_trans *trans)
3441         __releases(&c->btree_trans_barrier)
3442 {
3443         struct bch_fs *c = trans->c;
3444
3445         if (trans->restarted)
3446                 bch2_trans_in_restart_error(trans);
3447
3448         bch2_trans_unlock(trans);
3449
3450         trans_for_each_update(trans, i)
3451                 __btree_path_put(trans, trans->paths + i->path, true);
3452         trans->nr_updates       = 0;
3453
3454         check_btree_paths_leaked(trans);
3455
3456         if (trans->srcu_held) {
3457                 check_srcu_held_too_long(trans);
3458                 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
3459         }
3460
3461         if (unlikely(trans->journal_replay_not_finished))
3462                 bch2_journal_keys_put(c);
3463
3464         /*
3465          * trans->ref protects trans->locking_wait.task, btree_paths array; used
3466          * by cycle detector
3467          */
3468         closure_return_sync(&trans->ref);
3469         trans->locking_wait.task = NULL;
3470
3471 #ifdef CONFIG_BCACHEFS_DEBUG
3472         darray_exit(&trans->last_restarted_trace);
3473 #endif
3474
3475         unsigned long *paths_allocated = trans->paths_allocated;
3476         trans->paths_allocated  = NULL;
3477         trans->paths            = NULL;
3478
3479         if (paths_allocated != trans->_paths_allocated)
3480                 kvfree_rcu_mightsleep(paths_allocated);
3481
3482         if (trans->used_mempool)
3483                 mempool_free(trans->mem, &c->btree_trans_mem_pool);
3484         else
3485                 kfree(trans->mem);
3486
3487         /* Userspace doesn't have a real percpu implementation: */
3488         if (IS_ENABLED(__KERNEL__))
3489                 trans = this_cpu_xchg(c->btree_trans_bufs->trans, trans);
3490
3491         if (trans) {
3492                 seqmutex_lock(&c->btree_trans_lock);
3493                 list_del(&trans->list);
3494                 seqmutex_unlock(&c->btree_trans_lock);
3495
3496                 mempool_free(trans, &c->btree_trans_pool);
3497         }
3498 }
3499
3500 bool bch2_current_has_btree_trans(struct bch_fs *c)
3501 {
3502         seqmutex_lock(&c->btree_trans_lock);
3503         struct btree_trans *trans;
3504         bool ret = false;
3505         list_for_each_entry(trans, &c->btree_trans_list, list)
3506                 if (trans->locking_wait.task == current &&
3507                     trans->locked) {
3508                         ret = true;
3509                         break;
3510                 }
3511         seqmutex_unlock(&c->btree_trans_lock);
3512         return ret;
3513 }
3514
3515 static void __maybe_unused
3516 bch2_btree_bkey_cached_common_to_text(struct printbuf *out,
3517                                       struct btree_bkey_cached_common *b)
3518 {
3519         struct six_lock_count c = six_lock_counts(&b->lock);
3520         struct task_struct *owner;
3521         pid_t pid;
3522
3523         rcu_read_lock();
3524         owner = READ_ONCE(b->lock.owner);
3525         pid = owner ? owner->pid : 0;
3526         rcu_read_unlock();
3527
3528         prt_printf(out, "\t%px %c ", b, b->cached ? 'c' : 'b');
3529         bch2_btree_id_to_text(out, b->btree_id);
3530         prt_printf(out, " l=%u:", b->level);
3531         bch2_bpos_to_text(out, btree_node_pos(b));
3532
3533         prt_printf(out, "\t locks %u:%u:%u held by pid %u",
3534                    c.n[0], c.n[1], c.n[2], pid);
3535 }
3536
3537 void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
3538 {
3539         struct btree_bkey_cached_common *b;
3540         static char lock_types[] = { 'r', 'i', 'w' };
3541         struct task_struct *task = READ_ONCE(trans->locking_wait.task);
3542         unsigned l, idx;
3543
3544         /* before rcu_read_lock(): */
3545         bch2_printbuf_make_room(out, 4096);
3546
3547         if (!out->nr_tabstops) {
3548                 printbuf_tabstop_push(out, 16);
3549                 printbuf_tabstop_push(out, 32);
3550         }
3551
3552         prt_printf(out, "%i %s\n", task ? task->pid : 0, trans->fn);
3553
3554         /* trans->paths is rcu protected vs. freeing */
3555         rcu_read_lock();
3556         out->atomic++;
3557
3558         struct btree_path *paths = rcu_dereference(trans->paths);
3559         if (!paths)
3560                 goto out;
3561
3562         unsigned long *paths_allocated = trans_paths_allocated(paths);
3563
3564         trans_for_each_path_idx_from(paths_allocated, *trans_paths_nr(paths), idx, 1) {
3565                 struct btree_path *path = paths + idx;
3566                 if (!path->nodes_locked)
3567                         continue;
3568
3569                 prt_printf(out, "  path %u %c ",
3570                            idx,
3571                            path->cached ? 'c' : 'b');
3572                 bch2_btree_id_to_text(out, path->btree_id);
3573                 prt_printf(out, " l=%u:", path->level);
3574                 bch2_bpos_to_text(out, path->pos);
3575                 prt_newline(out);
3576
3577                 for (l = 0; l < BTREE_MAX_DEPTH; l++) {
3578                         if (btree_node_locked(path, l) &&
3579                             !IS_ERR_OR_NULL(b = (void *) READ_ONCE(path->l[l].b))) {
3580                                 prt_printf(out, "    %c l=%u ",
3581                                            lock_types[btree_node_locked_type(path, l)], l);
3582                                 bch2_btree_bkey_cached_common_to_text(out, b);
3583                                 prt_newline(out);
3584                         }
3585                 }
3586         }
3587
3588         b = READ_ONCE(trans->locking);
3589         if (b) {
3590                 prt_printf(out, "  blocked for %lluus on\n",
3591                            div_u64(local_clock() - trans->locking_wait.start_time, 1000));
3592                 prt_printf(out, "    %c", lock_types[trans->locking_wait.lock_want]);
3593                 bch2_btree_bkey_cached_common_to_text(out, b);
3594                 prt_newline(out);
3595         }
3596 out:
3597         --out->atomic;
3598         rcu_read_unlock();
3599 }
3600
3601 void bch2_fs_btree_iter_exit(struct bch_fs *c)
3602 {
3603         struct btree_transaction_stats *s;
3604         struct btree_trans *trans;
3605         int cpu;
3606
3607         if (c->btree_trans_bufs)
3608                 for_each_possible_cpu(cpu) {
3609                         struct btree_trans *trans =
3610                                 per_cpu_ptr(c->btree_trans_bufs, cpu)->trans;
3611
3612                         if (trans) {
3613                                 seqmutex_lock(&c->btree_trans_lock);
3614                                 list_del(&trans->list);
3615                                 seqmutex_unlock(&c->btree_trans_lock);
3616                         }
3617                         kfree(trans);
3618                 }
3619         free_percpu(c->btree_trans_bufs);
3620
3621         trans = list_first_entry_or_null(&c->btree_trans_list, struct btree_trans, list);
3622         if (trans)
3623                 panic("%s leaked btree_trans\n", trans->fn);
3624
3625         for (s = c->btree_transaction_stats;
3626              s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
3627              s++) {
3628                 kfree(s->max_paths_text);
3629                 bch2_time_stats_exit(&s->lock_hold_times);
3630         }
3631
3632         if (c->btree_trans_barrier_initialized) {
3633                 synchronize_srcu_expedited(&c->btree_trans_barrier);
3634                 cleanup_srcu_struct(&c->btree_trans_barrier);
3635         }
3636         mempool_exit(&c->btree_trans_mem_pool);
3637         mempool_exit(&c->btree_trans_pool);
3638 }
3639
3640 void bch2_fs_btree_iter_init_early(struct bch_fs *c)
3641 {
3642         struct btree_transaction_stats *s;
3643
3644         for (s = c->btree_transaction_stats;
3645              s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
3646              s++) {
3647                 bch2_time_stats_init(&s->duration);
3648                 bch2_time_stats_init(&s->lock_hold_times);
3649                 mutex_init(&s->lock);
3650         }
3651
3652         INIT_LIST_HEAD(&c->btree_trans_list);
3653         seqmutex_init(&c->btree_trans_lock);
3654 }
3655
3656 int bch2_fs_btree_iter_init(struct bch_fs *c)
3657 {
3658         int ret;
3659
3660         c->btree_trans_bufs = alloc_percpu(struct btree_trans_buf);
3661         if (!c->btree_trans_bufs)
3662                 return -ENOMEM;
3663
3664         ret   = mempool_init_kmalloc_pool(&c->btree_trans_pool, 1,
3665                                           sizeof(struct btree_trans)) ?:
3666                 mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
3667                                           BTREE_TRANS_MEM_MAX) ?:
3668                 init_srcu_struct(&c->btree_trans_barrier);
3669         if (ret)
3670                 return ret;
3671
3672         /*
3673          * static annotation (hackily done) for lock ordering of reclaim vs.
3674          * btree node locks:
3675          */
3676 #ifdef CONFIG_LOCKDEP
3677         fs_reclaim_acquire(GFP_KERNEL);
3678         struct btree_trans *trans = bch2_trans_get(c);
3679         trans_set_locked(trans, false);
3680         bch2_trans_put(trans);
3681         fs_reclaim_release(GFP_KERNEL);
3682 #endif
3683
3684         c->btree_trans_barrier_initialized = true;
3685         return 0;
3686
3687 }
This page took 0.245747 seconds and 4 git commands to generate.