1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_ITER_H
3 #define _BCACHEFS_BTREE_ITER_H
6 #include "btree_types.h"
9 static inline int __bkey_err(const struct bkey *k)
11 return PTR_ERR_OR_ZERO(k);
14 #define bkey_err(_k) __bkey_err((_k).k)
16 static inline void __btree_path_get(struct btree_path *path, bool intent)
19 path->intent_ref += intent;
22 static inline bool __btree_path_put(struct btree_path *path, bool intent)
25 EBUG_ON(!path->intent_ref && intent);
26 path->intent_ref -= intent;
27 return --path->ref == 0;
30 static inline void btree_path_set_dirty(struct btree_path *path,
31 enum btree_path_uptodate u)
33 path->uptodate = max_t(unsigned, path->uptodate, u);
36 static inline struct btree *btree_path_node(struct btree_path *path,
39 return level < BTREE_MAX_DEPTH ? path->l[level].b : NULL;
42 static inline bool btree_node_lock_seq_matches(const struct btree_path *path,
43 const struct btree *b, unsigned level)
45 return path->l[level].lock_seq == six_lock_seq(&b->c.lock);
48 static inline struct btree *btree_node_parent(struct btree_path *path,
51 return btree_path_node(path, b->c.level + 1);
54 /* Iterate over paths within a transaction: */
56 void __bch2_btree_trans_sort_paths(struct btree_trans *);
58 static inline void btree_trans_sort_paths(struct btree_trans *trans)
60 if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
63 __bch2_btree_trans_sort_paths(trans);
66 static inline unsigned long *trans_paths_nr(struct btree_path *paths)
68 return &container_of(paths, struct btree_trans_paths, paths[0])->nr_paths;
71 static inline unsigned long *trans_paths_allocated(struct btree_path *paths)
73 unsigned long *v = trans_paths_nr(paths);
74 return v - BITS_TO_LONGS(*v);
77 #define trans_for_each_path_idx_from(_paths_allocated, _nr, _idx, _start)\
79 (_idx = find_next_bit(_paths_allocated, _nr, _idx)) < _nr; \
82 static inline struct btree_path *
83 __trans_next_path(struct btree_trans *trans, unsigned *idx)
85 unsigned long *w = trans->paths_allocated + *idx / BITS_PER_LONG;
87 * Open coded find_next_bit(), because
88 * - this is fast path, we can't afford the function call
89 * - and we know that nr_paths is a multiple of BITS_PER_LONG,
91 while (*idx < trans->nr_paths) {
92 unsigned long v = *w >> (*idx & (BITS_PER_LONG - 1));
95 return trans->paths + *idx;
98 *idx += BITS_PER_LONG;
99 *idx &= ~(BITS_PER_LONG - 1);
107 * This version is intended to be safe for use on a btree_trans that is owned by
108 * another thread, for bch2_btree_trans_to_text();
110 #define trans_for_each_path_from(_trans, _path, _idx, _start) \
111 for (_idx = _start; \
112 (_path = __trans_next_path((_trans), &_idx)); \
115 #define trans_for_each_path(_trans, _path, _idx) \
116 trans_for_each_path_from(_trans, _path, _idx, 1)
118 static inline struct btree_path *next_btree_path(struct btree_trans *trans, struct btree_path *path)
120 unsigned idx = path ? path->sorted_idx + 1 : 0;
122 EBUG_ON(idx > trans->nr_sorted);
124 return idx < trans->nr_sorted
125 ? trans->paths + trans->sorted[idx]
129 static inline struct btree_path *prev_btree_path(struct btree_trans *trans, struct btree_path *path)
131 unsigned idx = path ? path->sorted_idx : trans->nr_sorted;
134 ? trans->paths + trans->sorted[idx - 1]
138 #define trans_for_each_path_idx_inorder(_trans, _iter) \
139 for (_iter = (struct trans_for_each_path_inorder_iter) { 0 }; \
140 (_iter.path_idx = trans->sorted[_iter.sorted_idx], \
141 _iter.sorted_idx < (_trans)->nr_sorted); \
144 struct trans_for_each_path_inorder_iter {
145 btree_path_idx_t sorted_idx;
146 btree_path_idx_t path_idx;
149 #define trans_for_each_path_inorder(_trans, _path, _iter) \
150 for (_iter = (struct trans_for_each_path_inorder_iter) { 0 }; \
151 (_iter.path_idx = trans->sorted[_iter.sorted_idx], \
152 _path = (_trans)->paths + _iter.path_idx, \
153 _iter.sorted_idx < (_trans)->nr_sorted); \
156 #define trans_for_each_path_inorder_reverse(_trans, _path, _i) \
157 for (_i = trans->nr_sorted - 1; \
158 ((_path) = (_trans)->paths + trans->sorted[_i]), (_i) >= 0;\
161 static inline bool __path_has_node(const struct btree_path *path,
162 const struct btree *b)
164 return path->l[b->c.level].b == b &&
165 btree_node_lock_seq_matches(path, b, b->c.level);
168 static inline struct btree_path *
169 __trans_next_path_with_node(struct btree_trans *trans, struct btree *b,
172 struct btree_path *path;
174 while ((path = __trans_next_path(trans, idx)) &&
175 !__path_has_node(path, b))
181 #define trans_for_each_path_with_node(_trans, _b, _path, _iter) \
183 (_path = __trans_next_path_with_node((_trans), (_b), &_iter));\
186 btree_path_idx_t __bch2_btree_path_make_mut(struct btree_trans *, btree_path_idx_t,
187 bool, unsigned long);
189 static inline btree_path_idx_t __must_check
190 bch2_btree_path_make_mut(struct btree_trans *trans,
191 btree_path_idx_t path, bool intent,
194 if (trans->paths[path].ref > 1 ||
195 trans->paths[path].preserve)
196 path = __bch2_btree_path_make_mut(trans, path, intent, ip);
197 trans->paths[path].should_be_locked = false;
201 btree_path_idx_t __must_check
202 __bch2_btree_path_set_pos(struct btree_trans *, btree_path_idx_t,
203 struct bpos, bool, unsigned long);
205 static inline btree_path_idx_t __must_check
206 bch2_btree_path_set_pos(struct btree_trans *trans,
207 btree_path_idx_t path, struct bpos new_pos,
208 bool intent, unsigned long ip)
210 return !bpos_eq(new_pos, trans->paths[path].pos)
211 ? __bch2_btree_path_set_pos(trans, path, new_pos, intent, ip)
215 int __must_check bch2_btree_path_traverse_one(struct btree_trans *,
217 unsigned, unsigned long);
219 static inline void bch2_trans_verify_not_unlocked(struct btree_trans *);
221 static inline int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
222 btree_path_idx_t path, unsigned flags)
224 bch2_trans_verify_not_unlocked(trans);
226 if (trans->paths[path].uptodate < BTREE_ITER_NEED_RELOCK)
229 return bch2_btree_path_traverse_one(trans, path, flags, _RET_IP_);
232 btree_path_idx_t bch2_path_get(struct btree_trans *, enum btree_id, struct bpos,
233 unsigned, unsigned, unsigned, unsigned long);
234 btree_path_idx_t bch2_path_get_unlocked_mut(struct btree_trans *, enum btree_id,
235 unsigned, struct bpos);
237 struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *, struct bkey *);
240 * bch2_btree_path_peek_slot() for a cached iterator might return a key in a
241 * different snapshot:
243 static inline struct bkey_s_c bch2_btree_path_peek_slot_exact(struct btree_path *path, struct bkey *u)
245 struct bkey_s_c k = bch2_btree_path_peek_slot(path, u);
247 if (k.k && bpos_eq(path->pos, k.k->p))
252 return (struct bkey_s_c) { u, NULL };
255 struct bkey_i *bch2_btree_journal_peek_slot(struct btree_trans *,
256 struct btree_iter *, struct bpos);
258 void bch2_btree_path_level_init(struct btree_trans *, struct btree_path *, struct btree *);
260 int __bch2_trans_mutex_lock(struct btree_trans *, struct mutex *);
262 static inline int bch2_trans_mutex_lock(struct btree_trans *trans, struct mutex *lock)
264 return mutex_trylock(lock)
266 : __bch2_trans_mutex_lock(trans, lock);
269 #ifdef CONFIG_BCACHEFS_DEBUG
270 void bch2_trans_verify_paths(struct btree_trans *);
271 void bch2_assert_pos_locked(struct btree_trans *, enum btree_id,
274 static inline void bch2_trans_verify_paths(struct btree_trans *trans) {}
275 static inline void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
276 struct bpos pos, bool key_cache) {}
279 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
280 struct btree *, struct bkey_packed *);
281 void bch2_btree_node_iter_fix(struct btree_trans *trans, struct btree_path *,
282 struct btree *, struct btree_node_iter *,
283 struct bkey_packed *, unsigned, unsigned);
285 int bch2_btree_path_relock_intent(struct btree_trans *, struct btree_path *);
287 void bch2_path_put(struct btree_trans *, btree_path_idx_t, bool);
289 int bch2_trans_relock(struct btree_trans *);
290 int bch2_trans_relock_notrace(struct btree_trans *);
291 void bch2_trans_unlock(struct btree_trans *);
292 void bch2_trans_unlock_long(struct btree_trans *);
294 static inline int trans_was_restarted(struct btree_trans *trans, u32 restart_count)
296 return restart_count != trans->restart_count
297 ? -BCH_ERR_transaction_restart_nested
301 void __noreturn bch2_trans_restart_error(struct btree_trans *, u32);
303 static inline void bch2_trans_verify_not_restarted(struct btree_trans *trans,
306 if (trans_was_restarted(trans, restart_count))
307 bch2_trans_restart_error(trans, restart_count);
310 void __noreturn bch2_trans_in_restart_error(struct btree_trans *);
312 static inline void bch2_trans_verify_not_in_restart(struct btree_trans *trans)
314 if (trans->restarted)
315 bch2_trans_in_restart_error(trans);
318 void __noreturn bch2_trans_unlocked_error(struct btree_trans *);
320 static inline void bch2_trans_verify_not_unlocked(struct btree_trans *trans)
323 bch2_trans_unlocked_error(trans);
327 static int btree_trans_restart_nounlock(struct btree_trans *trans, int err)
330 BUG_ON(!bch2_err_matches(-err, BCH_ERR_transaction_restart));
332 trans->restarted = err;
333 trans->last_restarted_ip = _THIS_IP_;
338 static int btree_trans_restart(struct btree_trans *trans, int err)
340 btree_trans_restart_nounlock(trans, err);
344 bool bch2_btree_node_upgrade(struct btree_trans *,
345 struct btree_path *, unsigned);
347 void __bch2_btree_path_downgrade(struct btree_trans *, struct btree_path *, unsigned);
349 static inline void bch2_btree_path_downgrade(struct btree_trans *trans,
350 struct btree_path *path)
352 unsigned new_locks_want = path->level + !!path->intent_ref;
354 if (path->locks_want > new_locks_want)
355 __bch2_btree_path_downgrade(trans, path, new_locks_want);
358 void bch2_trans_downgrade(struct btree_trans *);
360 void bch2_trans_node_add(struct btree_trans *trans, struct btree_path *, struct btree *);
361 void bch2_trans_node_reinit_iter(struct btree_trans *, struct btree *);
363 int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter);
364 int __must_check bch2_btree_iter_traverse(struct btree_iter *);
366 struct btree *bch2_btree_iter_peek_node(struct btree_iter *);
367 struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *);
368 struct btree *bch2_btree_iter_next_node(struct btree_iter *);
370 struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *, struct bpos);
371 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *);
373 static inline struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
375 return bch2_btree_iter_peek_upto(iter, SPOS_MAX);
378 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *);
379 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *);
381 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *);
382 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *);
383 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *);
385 bool bch2_btree_iter_advance(struct btree_iter *);
386 bool bch2_btree_iter_rewind(struct btree_iter *);
388 static inline void __bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
390 iter->k.type = KEY_TYPE_deleted;
391 iter->k.p.inode = iter->pos.inode = new_pos.inode;
392 iter->k.p.offset = iter->pos.offset = new_pos.offset;
393 iter->k.p.snapshot = iter->pos.snapshot = new_pos.snapshot;
397 static inline void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
399 struct btree_trans *trans = iter->trans;
401 if (unlikely(iter->update_path))
402 bch2_path_put(trans, iter->update_path,
403 iter->flags & BTREE_ITER_intent);
404 iter->update_path = 0;
406 if (!(iter->flags & BTREE_ITER_all_snapshots))
407 new_pos.snapshot = iter->snapshot;
409 __bch2_btree_iter_set_pos(iter, new_pos);
412 static inline void bch2_btree_iter_set_pos_to_extent_start(struct btree_iter *iter)
414 BUG_ON(!(iter->flags & BTREE_ITER_is_extents));
415 iter->pos = bkey_start_pos(&iter->k);
418 static inline void bch2_btree_iter_set_snapshot(struct btree_iter *iter, u32 snapshot)
420 struct bpos pos = iter->pos;
422 iter->snapshot = snapshot;
423 pos.snapshot = snapshot;
424 bch2_btree_iter_set_pos(iter, pos);
427 void bch2_trans_iter_exit(struct btree_trans *, struct btree_iter *);
429 static inline unsigned __bch2_btree_iter_flags(struct btree_trans *trans,
433 if (!(flags & (BTREE_ITER_all_snapshots|BTREE_ITER_not_extents)) &&
434 btree_id_is_extents(btree_id))
435 flags |= BTREE_ITER_is_extents;
437 if (!(flags & BTREE_ITER_snapshot_field) &&
438 !btree_type_has_snapshot_field(btree_id))
439 flags &= ~BTREE_ITER_all_snapshots;
441 if (!(flags & BTREE_ITER_all_snapshots) &&
442 btree_type_has_snapshots(btree_id))
443 flags |= BTREE_ITER_filter_snapshots;
445 if (trans->journal_replay_not_finished)
446 flags |= BTREE_ITER_with_journal;
451 static inline unsigned bch2_btree_iter_flags(struct btree_trans *trans,
455 if (!btree_id_cached(trans->c, btree_id)) {
456 flags &= ~BTREE_ITER_cached;
457 flags &= ~BTREE_ITER_with_key_cache;
458 } else if (!(flags & BTREE_ITER_cached))
459 flags |= BTREE_ITER_with_key_cache;
461 return __bch2_btree_iter_flags(trans, btree_id, flags);
464 static inline void bch2_trans_iter_init_common(struct btree_trans *trans,
465 struct btree_iter *iter,
466 unsigned btree_id, struct bpos pos,
473 iter->update_path = 0;
474 iter->key_cache_path = 0;
475 iter->btree_id = btree_id;
478 iter->snapshot = pos.snapshot;
480 iter->k = POS_KEY(pos);
481 iter->journal_idx = 0;
482 #ifdef CONFIG_BCACHEFS_DEBUG
483 iter->ip_allocated = ip;
485 iter->path = bch2_path_get(trans, btree_id, iter->pos,
486 locks_want, depth, flags, ip);
489 void bch2_trans_iter_init_outlined(struct btree_trans *, struct btree_iter *,
490 enum btree_id, struct bpos, unsigned);
492 static inline void bch2_trans_iter_init(struct btree_trans *trans,
493 struct btree_iter *iter,
494 unsigned btree_id, struct bpos pos,
497 if (__builtin_constant_p(btree_id) &&
498 __builtin_constant_p(flags))
499 bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
500 bch2_btree_iter_flags(trans, btree_id, flags),
503 bch2_trans_iter_init_outlined(trans, iter, btree_id, pos, flags);
506 void bch2_trans_node_iter_init(struct btree_trans *, struct btree_iter *,
507 enum btree_id, struct bpos,
508 unsigned, unsigned, unsigned);
509 void bch2_trans_copy_iter(struct btree_iter *, struct btree_iter *);
511 void bch2_set_btree_iter_dontneed(struct btree_iter *);
513 void *__bch2_trans_kmalloc(struct btree_trans *, size_t);
515 static inline void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
517 size = roundup(size, 8);
519 if (likely(trans->mem_top + size <= trans->mem_bytes)) {
520 void *p = trans->mem + trans->mem_top;
522 trans->mem_top += size;
526 return __bch2_trans_kmalloc(trans, size);
530 static inline void *bch2_trans_kmalloc_nomemzero(struct btree_trans *trans, size_t size)
532 size = round_up(size, 8);
534 if (likely(trans->mem_top + size <= trans->mem_bytes)) {
535 void *p = trans->mem + trans->mem_top;
537 trans->mem_top += size;
540 return __bch2_trans_kmalloc(trans, size);
544 static inline struct bkey_s_c __bch2_bkey_get_iter(struct btree_trans *trans,
545 struct btree_iter *iter,
546 unsigned btree_id, struct bpos pos,
547 unsigned flags, unsigned type)
551 bch2_trans_iter_init(trans, iter, btree_id, pos, flags);
552 k = bch2_btree_iter_peek_slot(iter);
554 if (!bkey_err(k) && type && k.k->type != type)
555 k = bkey_s_c_err(-BCH_ERR_ENOENT_bkey_type_mismatch);
556 if (unlikely(bkey_err(k)))
557 bch2_trans_iter_exit(trans, iter);
561 static inline struct bkey_s_c bch2_bkey_get_iter(struct btree_trans *trans,
562 struct btree_iter *iter,
563 unsigned btree_id, struct bpos pos,
566 return __bch2_bkey_get_iter(trans, iter, btree_id, pos, flags, 0);
569 #define bch2_bkey_get_iter_typed(_trans, _iter, _btree_id, _pos, _flags, _type)\
570 bkey_s_c_to_##_type(__bch2_bkey_get_iter(_trans, _iter, \
571 _btree_id, _pos, _flags, KEY_TYPE_##_type))
573 static inline int __bch2_bkey_get_val_typed(struct btree_trans *trans,
574 unsigned btree_id, struct bpos pos,
575 unsigned flags, unsigned type,
576 unsigned val_size, void *val)
578 struct btree_iter iter;
582 k = __bch2_bkey_get_iter(trans, &iter, btree_id, pos, flags, type);
585 unsigned b = min_t(unsigned, bkey_val_bytes(k.k), val_size);
588 if (unlikely(b < sizeof(*val)))
589 memset((void *) val + b, 0, sizeof(*val) - b);
590 bch2_trans_iter_exit(trans, &iter);
596 #define bch2_bkey_get_val_typed(_trans, _btree_id, _pos, _flags, _type, _val)\
597 __bch2_bkey_get_val_typed(_trans, _btree_id, _pos, _flags, \
598 KEY_TYPE_##_type, sizeof(*_val), _val)
600 void bch2_trans_srcu_unlock(struct btree_trans *);
602 u32 bch2_trans_begin(struct btree_trans *);
606 * this does not handle transaction restarts from bch2_btree_iter_next_node()
609 #define __for_each_btree_node(_trans, _iter, _btree_id, _start, \
610 _locks_want, _depth, _flags, _b, _ret) \
611 for (bch2_trans_node_iter_init((_trans), &(_iter), (_btree_id), \
612 _start, _locks_want, _depth, _flags); \
613 (_b) = bch2_btree_iter_peek_node_and_restart(&(_iter)), \
614 !((_ret) = PTR_ERR_OR_ZERO(_b)) && (_b); \
615 (_b) = bch2_btree_iter_next_node(&(_iter)))
617 #define for_each_btree_node(_trans, _iter, _btree_id, _start, \
619 __for_each_btree_node(_trans, _iter, _btree_id, _start, \
620 0, 0, _flags, _b, _ret)
622 static inline struct bkey_s_c bch2_btree_iter_peek_prev_type(struct btree_iter *iter,
625 return flags & BTREE_ITER_slots ? bch2_btree_iter_peek_slot(iter) :
626 bch2_btree_iter_peek_prev(iter);
629 static inline struct bkey_s_c bch2_btree_iter_peek_type(struct btree_iter *iter,
632 return flags & BTREE_ITER_slots ? bch2_btree_iter_peek_slot(iter) :
633 bch2_btree_iter_peek(iter);
636 static inline struct bkey_s_c bch2_btree_iter_peek_upto_type(struct btree_iter *iter,
640 if (!(flags & BTREE_ITER_slots))
641 return bch2_btree_iter_peek_upto(iter, end);
643 if (bkey_gt(iter->pos, end))
644 return bkey_s_c_null;
646 return bch2_btree_iter_peek_slot(iter);
649 int __bch2_btree_trans_too_many_iters(struct btree_trans *);
651 static inline int btree_trans_too_many_iters(struct btree_trans *trans)
653 if (bitmap_weight(trans->paths_allocated, trans->nr_paths) > BTREE_ITER_NORMAL_LIMIT - 8)
654 return __bch2_btree_trans_too_many_iters(trans);
660 * goto instead of loop, so that when used inside for_each_btree_key2()
661 * break/continue work correctly
663 #define lockrestart_do(_trans, _do) \
665 __label__ transaction_restart; \
666 u32 _restart_count; \
668 transaction_restart: \
669 _restart_count = bch2_trans_begin(_trans); \
672 if (bch2_err_matches(_ret2, BCH_ERR_transaction_restart)) \
673 goto transaction_restart; \
676 bch2_trans_verify_not_restarted(_trans, _restart_count);\
681 * nested_lockrestart_do(), nested_commit_do():
683 * These are like lockrestart_do() and commit_do(), with two differences:
685 * - We don't call bch2_trans_begin() unless we had a transaction restart
686 * - We return -BCH_ERR_transaction_restart_nested if we succeeded after a
687 * transaction restart
689 #define nested_lockrestart_do(_trans, _do) \
691 u32 _restart_count, _orig_restart_count; \
694 _restart_count = _orig_restart_count = (_trans)->restart_count; \
696 while (bch2_err_matches(_ret2 = (_do), BCH_ERR_transaction_restart))\
697 _restart_count = bch2_trans_begin(_trans); \
700 bch2_trans_verify_not_restarted(_trans, _restart_count);\
702 _ret2 ?: trans_was_restarted(_trans, _restart_count); \
705 #define for_each_btree_key_upto_continue(_trans, _iter, \
706 _end, _flags, _k, _do) \
708 struct bkey_s_c _k; \
712 _ret3 = lockrestart_do(_trans, ({ \
713 (_k) = bch2_btree_iter_peek_upto_type(&(_iter), \
718 bkey_err(_k) ?: (_do); \
720 } while (!_ret3 && bch2_btree_iter_advance(&(_iter))); \
722 bch2_trans_iter_exit((_trans), &(_iter)); \
726 #define for_each_btree_key_continue(_trans, _iter, _flags, _k, _do) \
727 for_each_btree_key_upto_continue(_trans, _iter, SPOS_MAX, _flags, _k, _do)
729 #define for_each_btree_key_upto(_trans, _iter, _btree_id, \
730 _start, _end, _flags, _k, _do) \
732 bch2_trans_begin(trans); \
734 struct btree_iter _iter; \
735 bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
736 (_start), (_flags)); \
738 for_each_btree_key_upto_continue(_trans, _iter, _end, _flags, _k, _do);\
741 #define for_each_btree_key(_trans, _iter, _btree_id, \
742 _start, _flags, _k, _do) \
743 for_each_btree_key_upto(_trans, _iter, _btree_id, _start, \
744 SPOS_MAX, _flags, _k, _do)
746 #define for_each_btree_key_reverse(_trans, _iter, _btree_id, \
747 _start, _flags, _k, _do) \
749 struct btree_iter _iter; \
750 struct bkey_s_c _k; \
753 bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
754 (_start), (_flags)); \
757 _ret3 = lockrestart_do(_trans, ({ \
758 (_k) = bch2_btree_iter_peek_prev_type(&(_iter), \
763 bkey_err(_k) ?: (_do); \
765 } while (!_ret3 && bch2_btree_iter_rewind(&(_iter))); \
767 bch2_trans_iter_exit((_trans), &(_iter)); \
771 #define for_each_btree_key_commit(_trans, _iter, _btree_id, \
772 _start, _iter_flags, _k, \
773 _disk_res, _journal_seq, _commit_flags,\
775 for_each_btree_key(_trans, _iter, _btree_id, _start, _iter_flags, _k,\
776 (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
777 (_journal_seq), (_commit_flags)))
779 #define for_each_btree_key_reverse_commit(_trans, _iter, _btree_id, \
780 _start, _iter_flags, _k, \
781 _disk_res, _journal_seq, _commit_flags,\
783 for_each_btree_key_reverse(_trans, _iter, _btree_id, _start, _iter_flags, _k,\
784 (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
785 (_journal_seq), (_commit_flags)))
787 #define for_each_btree_key_upto_commit(_trans, _iter, _btree_id, \
788 _start, _end, _iter_flags, _k, \
789 _disk_res, _journal_seq, _commit_flags,\
791 for_each_btree_key_upto(_trans, _iter, _btree_id, _start, _end, _iter_flags, _k,\
792 (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
793 (_journal_seq), (_commit_flags)))
795 struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *);
797 static inline struct bkey_s_c
798 __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
799 struct btree_iter *iter, unsigned flags)
803 while (btree_trans_too_many_iters(trans) ||
804 (k = bch2_btree_iter_peek_type(iter, flags),
805 bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
806 bch2_trans_begin(trans);
811 #define for_each_btree_key_upto_norestart(_trans, _iter, _btree_id, \
812 _start, _end, _flags, _k, _ret) \
813 for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
814 (_start), (_flags)); \
815 (_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, _flags),\
816 !((_ret) = bkey_err(_k)) && (_k).k; \
817 bch2_btree_iter_advance(&(_iter)))
819 #define for_each_btree_key_upto_continue_norestart(_iter, _end, _flags, _k, _ret)\
821 (_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, _flags), \
822 !((_ret) = bkey_err(_k)) && (_k).k; \
823 bch2_btree_iter_advance(&(_iter)))
825 #define for_each_btree_key_norestart(_trans, _iter, _btree_id, \
826 _start, _flags, _k, _ret) \
827 for_each_btree_key_upto_norestart(_trans, _iter, _btree_id, _start,\
828 SPOS_MAX, _flags, _k, _ret)
830 #define for_each_btree_key_continue_norestart(_iter, _flags, _k, _ret) \
831 for_each_btree_key_upto_continue_norestart(_iter, SPOS_MAX, _flags, _k, _ret)
834 * This should not be used in a fastpath, without first trying _do in
835 * nonblocking mode - it will cause excessive transaction restarts and
836 * potentially livelocking:
838 #define drop_locks_do(_trans, _do) \
840 bch2_trans_unlock(_trans); \
841 _do ?: bch2_trans_relock(_trans); \
844 #define allocate_dropping_locks_errcode(_trans, _do) \
846 gfp_t _gfp = GFP_NOWAIT|__GFP_NOWARN; \
849 if (bch2_err_matches(_ret, ENOMEM)) { \
851 _ret = drop_locks_do(trans, _do); \
856 #define allocate_dropping_locks(_trans, _ret, _do) \
858 gfp_t _gfp = GFP_NOWAIT|__GFP_NOWARN; \
859 typeof(_do) _p = _do; \
862 if (unlikely(!_p)) { \
864 _ret = drop_locks_do(trans, ((_p = _do), 0)); \
869 void bch2_trans_updates_to_text(struct printbuf *, struct btree_trans *);
870 void bch2_btree_path_to_text(struct printbuf *, struct btree_trans *, btree_path_idx_t);
871 void bch2_trans_paths_to_text(struct printbuf *, struct btree_trans *);
872 void bch2_dump_trans_updates(struct btree_trans *);
873 void bch2_dump_trans_paths_updates(struct btree_trans *);
875 struct btree_trans *__bch2_trans_get(struct bch_fs *, unsigned);
876 void bch2_trans_put(struct btree_trans *);
878 extern const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR];
879 unsigned bch2_trans_get_fn_idx(const char *);
881 #define bch2_trans_get(_c) \
883 static unsigned trans_fn_idx; \
885 if (unlikely(!trans_fn_idx)) \
886 trans_fn_idx = bch2_trans_get_fn_idx(__func__); \
887 __bch2_trans_get(_c, trans_fn_idx); \
890 void bch2_btree_trans_to_text(struct printbuf *, struct btree_trans *);
892 void bch2_fs_btree_iter_exit(struct bch_fs *);
893 void bch2_fs_btree_iter_init_early(struct bch_fs *);
894 int bch2_fs_btree_iter_init(struct bch_fs *);
896 #endif /* _BCACHEFS_BTREE_ITER_H */