]> Git Repo - J-linux.git/blob - fs/bcachefs/btree_iter.h
Merge patch series "riscv: Extension parsing fixes"
[J-linux.git] / fs / bcachefs / btree_iter.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_ITER_H
3 #define _BCACHEFS_BTREE_ITER_H
4
5 #include "bset.h"
6 #include "btree_types.h"
7 #include "trace.h"
8
9 static inline int __bkey_err(const struct bkey *k)
10 {
11         return PTR_ERR_OR_ZERO(k);
12 }
13
14 #define bkey_err(_k)    __bkey_err((_k).k)
15
16 static inline void __btree_path_get(struct btree_path *path, bool intent)
17 {
18         path->ref++;
19         path->intent_ref += intent;
20 }
21
22 static inline bool __btree_path_put(struct btree_path *path, bool intent)
23 {
24         EBUG_ON(!path->ref);
25         EBUG_ON(!path->intent_ref && intent);
26         path->intent_ref -= intent;
27         return --path->ref == 0;
28 }
29
30 static inline void btree_path_set_dirty(struct btree_path *path,
31                                         enum btree_path_uptodate u)
32 {
33         path->uptodate = max_t(unsigned, path->uptodate, u);
34 }
35
36 static inline struct btree *btree_path_node(struct btree_path *path,
37                                             unsigned level)
38 {
39         return level < BTREE_MAX_DEPTH ? path->l[level].b : NULL;
40 }
41
42 static inline bool btree_node_lock_seq_matches(const struct btree_path *path,
43                                         const struct btree *b, unsigned level)
44 {
45         return path->l[level].lock_seq == six_lock_seq(&b->c.lock);
46 }
47
48 static inline struct btree *btree_node_parent(struct btree_path *path,
49                                               struct btree *b)
50 {
51         return btree_path_node(path, b->c.level + 1);
52 }
53
54 /* Iterate over paths within a transaction: */
55
56 void __bch2_btree_trans_sort_paths(struct btree_trans *);
57
58 static inline void btree_trans_sort_paths(struct btree_trans *trans)
59 {
60         if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
61             trans->paths_sorted)
62                 return;
63         __bch2_btree_trans_sort_paths(trans);
64 }
65
66 static inline unsigned long *trans_paths_nr(struct btree_path *paths)
67 {
68         return &container_of(paths, struct btree_trans_paths, paths[0])->nr_paths;
69 }
70
71 static inline unsigned long *trans_paths_allocated(struct btree_path *paths)
72 {
73         unsigned long *v = trans_paths_nr(paths);
74         return v - BITS_TO_LONGS(*v);
75 }
76
77 #define trans_for_each_path_idx_from(_paths_allocated, _nr, _idx, _start)\
78         for (_idx = _start;                                             \
79              (_idx = find_next_bit(_paths_allocated, _nr, _idx)) < _nr; \
80              _idx++)
81
82 static inline struct btree_path *
83 __trans_next_path(struct btree_trans *trans, unsigned *idx)
84 {
85         unsigned long *w = trans->paths_allocated + *idx / BITS_PER_LONG;
86         /*
87          * Open coded find_next_bit(), because
88          *  - this is fast path, we can't afford the function call
89          *  - and we know that nr_paths is a multiple of BITS_PER_LONG,
90          */
91         while (*idx < trans->nr_paths) {
92                 unsigned long v = *w >> (*idx & (BITS_PER_LONG - 1));
93                 if (v) {
94                         *idx += __ffs(v);
95                         return trans->paths + *idx;
96                 }
97
98                 *idx += BITS_PER_LONG;
99                 *idx &= ~(BITS_PER_LONG - 1);
100                 w++;
101         }
102
103         return NULL;
104 }
105
106 /*
107  * This version is intended to be safe for use on a btree_trans that is owned by
108  * another thread, for bch2_btree_trans_to_text();
109  */
110 #define trans_for_each_path_from(_trans, _path, _idx, _start)           \
111         for (_idx = _start;                                             \
112              (_path = __trans_next_path((_trans), &_idx));              \
113              _idx++)
114
115 #define trans_for_each_path(_trans, _path, _idx)                        \
116         trans_for_each_path_from(_trans, _path, _idx, 1)
117
118 static inline struct btree_path *next_btree_path(struct btree_trans *trans, struct btree_path *path)
119 {
120         unsigned idx = path ? path->sorted_idx + 1 : 0;
121
122         EBUG_ON(idx > trans->nr_sorted);
123
124         return idx < trans->nr_sorted
125                 ? trans->paths + trans->sorted[idx]
126                 : NULL;
127 }
128
129 static inline struct btree_path *prev_btree_path(struct btree_trans *trans, struct btree_path *path)
130 {
131         unsigned idx = path ? path->sorted_idx : trans->nr_sorted;
132
133         return idx
134                 ? trans->paths + trans->sorted[idx - 1]
135                 : NULL;
136 }
137
138 #define trans_for_each_path_idx_inorder(_trans, _iter)                  \
139         for (_iter = (struct trans_for_each_path_inorder_iter) { 0 };   \
140              (_iter.path_idx = trans->sorted[_iter.sorted_idx],         \
141               _iter.sorted_idx < (_trans)->nr_sorted);                  \
142              _iter.sorted_idx++)
143
144 struct trans_for_each_path_inorder_iter {
145         btree_path_idx_t        sorted_idx;
146         btree_path_idx_t        path_idx;
147 };
148
149 #define trans_for_each_path_inorder(_trans, _path, _iter)               \
150         for (_iter = (struct trans_for_each_path_inorder_iter) { 0 };   \
151              (_iter.path_idx = trans->sorted[_iter.sorted_idx],         \
152               _path = (_trans)->paths + _iter.path_idx,                 \
153               _iter.sorted_idx < (_trans)->nr_sorted);                  \
154              _iter.sorted_idx++)
155
156 #define trans_for_each_path_inorder_reverse(_trans, _path, _i)          \
157         for (_i = trans->nr_sorted - 1;                                 \
158              ((_path) = (_trans)->paths + trans->sorted[_i]), (_i) >= 0;\
159              --_i)
160
161 static inline bool __path_has_node(const struct btree_path *path,
162                                    const struct btree *b)
163 {
164         return path->l[b->c.level].b == b &&
165                 btree_node_lock_seq_matches(path, b, b->c.level);
166 }
167
168 static inline struct btree_path *
169 __trans_next_path_with_node(struct btree_trans *trans, struct btree *b,
170                             unsigned *idx)
171 {
172         struct btree_path *path;
173
174         while ((path = __trans_next_path(trans, idx)) &&
175                 !__path_has_node(path, b))
176                (*idx)++;
177
178         return path;
179 }
180
181 #define trans_for_each_path_with_node(_trans, _b, _path, _iter)         \
182         for (_iter = 1;                                                 \
183              (_path = __trans_next_path_with_node((_trans), (_b), &_iter));\
184              _iter++)
185
186 btree_path_idx_t __bch2_btree_path_make_mut(struct btree_trans *, btree_path_idx_t,
187                                             bool, unsigned long);
188
189 static inline btree_path_idx_t __must_check
190 bch2_btree_path_make_mut(struct btree_trans *trans,
191                          btree_path_idx_t path, bool intent,
192                          unsigned long ip)
193 {
194         if (trans->paths[path].ref > 1 ||
195             trans->paths[path].preserve)
196                 path = __bch2_btree_path_make_mut(trans, path, intent, ip);
197         trans->paths[path].should_be_locked = false;
198         return path;
199 }
200
201 btree_path_idx_t __must_check
202 __bch2_btree_path_set_pos(struct btree_trans *, btree_path_idx_t,
203                           struct bpos, bool, unsigned long);
204
205 static inline btree_path_idx_t __must_check
206 bch2_btree_path_set_pos(struct btree_trans *trans,
207                         btree_path_idx_t path, struct bpos new_pos,
208                         bool intent, unsigned long ip)
209 {
210         return !bpos_eq(new_pos, trans->paths[path].pos)
211                 ? __bch2_btree_path_set_pos(trans, path, new_pos, intent, ip)
212                 : path;
213 }
214
215 int __must_check bch2_btree_path_traverse_one(struct btree_trans *,
216                                               btree_path_idx_t,
217                                               unsigned, unsigned long);
218
219 static inline void bch2_trans_verify_not_unlocked(struct btree_trans *);
220
221 static inline int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
222                                           btree_path_idx_t path, unsigned flags)
223 {
224         bch2_trans_verify_not_unlocked(trans);
225
226         if (trans->paths[path].uptodate < BTREE_ITER_NEED_RELOCK)
227                 return 0;
228
229         return bch2_btree_path_traverse_one(trans, path, flags, _RET_IP_);
230 }
231
232 btree_path_idx_t bch2_path_get(struct btree_trans *, enum btree_id, struct bpos,
233                                  unsigned, unsigned, unsigned, unsigned long);
234 btree_path_idx_t bch2_path_get_unlocked_mut(struct btree_trans *, enum btree_id,
235                                             unsigned, struct bpos);
236
237 struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *, struct bkey *);
238
239 /*
240  * bch2_btree_path_peek_slot() for a cached iterator might return a key in a
241  * different snapshot:
242  */
243 static inline struct bkey_s_c bch2_btree_path_peek_slot_exact(struct btree_path *path, struct bkey *u)
244 {
245         struct bkey_s_c k = bch2_btree_path_peek_slot(path, u);
246
247         if (k.k && bpos_eq(path->pos, k.k->p))
248                 return k;
249
250         bkey_init(u);
251         u->p = path->pos;
252         return (struct bkey_s_c) { u, NULL };
253 }
254
255 struct bkey_i *bch2_btree_journal_peek_slot(struct btree_trans *,
256                                         struct btree_iter *, struct bpos);
257
258 void bch2_btree_path_level_init(struct btree_trans *, struct btree_path *, struct btree *);
259
260 int __bch2_trans_mutex_lock(struct btree_trans *, struct mutex *);
261
262 static inline int bch2_trans_mutex_lock(struct btree_trans *trans, struct mutex *lock)
263 {
264         return mutex_trylock(lock)
265                 ? 0
266                 : __bch2_trans_mutex_lock(trans, lock);
267 }
268
269 #ifdef CONFIG_BCACHEFS_DEBUG
270 void bch2_trans_verify_paths(struct btree_trans *);
271 void bch2_assert_pos_locked(struct btree_trans *, enum btree_id,
272                             struct bpos, bool);
273 #else
274 static inline void bch2_trans_verify_paths(struct btree_trans *trans) {}
275 static inline void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
276                                           struct bpos pos, bool key_cache) {}
277 #endif
278
279 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
280                                       struct btree *, struct bkey_packed *);
281 void bch2_btree_node_iter_fix(struct btree_trans *trans, struct btree_path *,
282                               struct btree *, struct btree_node_iter *,
283                               struct bkey_packed *, unsigned, unsigned);
284
285 int bch2_btree_path_relock_intent(struct btree_trans *, struct btree_path *);
286
287 void bch2_path_put(struct btree_trans *, btree_path_idx_t, bool);
288
289 int bch2_trans_relock(struct btree_trans *);
290 int bch2_trans_relock_notrace(struct btree_trans *);
291 void bch2_trans_unlock(struct btree_trans *);
292 void bch2_trans_unlock_long(struct btree_trans *);
293
294 static inline int trans_was_restarted(struct btree_trans *trans, u32 restart_count)
295 {
296         return restart_count != trans->restart_count
297                 ? -BCH_ERR_transaction_restart_nested
298                 : 0;
299 }
300
301 void __noreturn bch2_trans_restart_error(struct btree_trans *, u32);
302
303 static inline void bch2_trans_verify_not_restarted(struct btree_trans *trans,
304                                                    u32 restart_count)
305 {
306         if (trans_was_restarted(trans, restart_count))
307                 bch2_trans_restart_error(trans, restart_count);
308 }
309
310 void __noreturn bch2_trans_in_restart_error(struct btree_trans *);
311
312 static inline void bch2_trans_verify_not_in_restart(struct btree_trans *trans)
313 {
314         if (trans->restarted)
315                 bch2_trans_in_restart_error(trans);
316 }
317
318 void __noreturn bch2_trans_unlocked_error(struct btree_trans *);
319
320 static inline void bch2_trans_verify_not_unlocked(struct btree_trans *trans)
321 {
322         if (!trans->locked)
323                 bch2_trans_unlocked_error(trans);
324 }
325
326 __always_inline
327 static int btree_trans_restart_nounlock(struct btree_trans *trans, int err)
328 {
329         BUG_ON(err <= 0);
330         BUG_ON(!bch2_err_matches(-err, BCH_ERR_transaction_restart));
331
332         trans->restarted = err;
333         trans->last_restarted_ip = _THIS_IP_;
334         return -err;
335 }
336
337 __always_inline
338 static int btree_trans_restart(struct btree_trans *trans, int err)
339 {
340         btree_trans_restart_nounlock(trans, err);
341         return -err;
342 }
343
344 bool bch2_btree_node_upgrade(struct btree_trans *,
345                              struct btree_path *, unsigned);
346
347 void __bch2_btree_path_downgrade(struct btree_trans *, struct btree_path *, unsigned);
348
349 static inline void bch2_btree_path_downgrade(struct btree_trans *trans,
350                                              struct btree_path *path)
351 {
352         unsigned new_locks_want = path->level + !!path->intent_ref;
353
354         if (path->locks_want > new_locks_want)
355                 __bch2_btree_path_downgrade(trans, path, new_locks_want);
356 }
357
358 void bch2_trans_downgrade(struct btree_trans *);
359
360 void bch2_trans_node_add(struct btree_trans *trans, struct btree_path *, struct btree *);
361 void bch2_trans_node_reinit_iter(struct btree_trans *, struct btree *);
362
363 int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter);
364 int __must_check bch2_btree_iter_traverse(struct btree_iter *);
365
366 struct btree *bch2_btree_iter_peek_node(struct btree_iter *);
367 struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *);
368 struct btree *bch2_btree_iter_next_node(struct btree_iter *);
369
370 struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *, struct bpos);
371 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *);
372
373 static inline struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
374 {
375         return bch2_btree_iter_peek_upto(iter, SPOS_MAX);
376 }
377
378 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *);
379 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *);
380
381 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *);
382 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *);
383 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *);
384
385 bool bch2_btree_iter_advance(struct btree_iter *);
386 bool bch2_btree_iter_rewind(struct btree_iter *);
387
388 static inline void __bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
389 {
390         iter->k.type = KEY_TYPE_deleted;
391         iter->k.p.inode         = iter->pos.inode       = new_pos.inode;
392         iter->k.p.offset        = iter->pos.offset      = new_pos.offset;
393         iter->k.p.snapshot      = iter->pos.snapshot    = new_pos.snapshot;
394         iter->k.size = 0;
395 }
396
397 static inline void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
398 {
399         struct btree_trans *trans = iter->trans;
400
401         if (unlikely(iter->update_path))
402                 bch2_path_put(trans, iter->update_path,
403                               iter->flags & BTREE_ITER_intent);
404         iter->update_path = 0;
405
406         if (!(iter->flags & BTREE_ITER_all_snapshots))
407                 new_pos.snapshot = iter->snapshot;
408
409         __bch2_btree_iter_set_pos(iter, new_pos);
410 }
411
412 static inline void bch2_btree_iter_set_pos_to_extent_start(struct btree_iter *iter)
413 {
414         BUG_ON(!(iter->flags & BTREE_ITER_is_extents));
415         iter->pos = bkey_start_pos(&iter->k);
416 }
417
418 static inline void bch2_btree_iter_set_snapshot(struct btree_iter *iter, u32 snapshot)
419 {
420         struct bpos pos = iter->pos;
421
422         iter->snapshot = snapshot;
423         pos.snapshot = snapshot;
424         bch2_btree_iter_set_pos(iter, pos);
425 }
426
427 void bch2_trans_iter_exit(struct btree_trans *, struct btree_iter *);
428
429 static inline unsigned __bch2_btree_iter_flags(struct btree_trans *trans,
430                                                unsigned btree_id,
431                                                unsigned flags)
432 {
433         if (!(flags & (BTREE_ITER_all_snapshots|BTREE_ITER_not_extents)) &&
434             btree_id_is_extents(btree_id))
435                 flags |= BTREE_ITER_is_extents;
436
437         if (!(flags & BTREE_ITER_snapshot_field) &&
438             !btree_type_has_snapshot_field(btree_id))
439                 flags &= ~BTREE_ITER_all_snapshots;
440
441         if (!(flags & BTREE_ITER_all_snapshots) &&
442             btree_type_has_snapshots(btree_id))
443                 flags |= BTREE_ITER_filter_snapshots;
444
445         if (trans->journal_replay_not_finished)
446                 flags |= BTREE_ITER_with_journal;
447
448         return flags;
449 }
450
451 static inline unsigned bch2_btree_iter_flags(struct btree_trans *trans,
452                                              unsigned btree_id,
453                                              unsigned flags)
454 {
455         if (!btree_id_cached(trans->c, btree_id)) {
456                 flags &= ~BTREE_ITER_cached;
457                 flags &= ~BTREE_ITER_with_key_cache;
458         } else if (!(flags & BTREE_ITER_cached))
459                 flags |= BTREE_ITER_with_key_cache;
460
461         return __bch2_btree_iter_flags(trans, btree_id, flags);
462 }
463
464 static inline void bch2_trans_iter_init_common(struct btree_trans *trans,
465                                           struct btree_iter *iter,
466                                           unsigned btree_id, struct bpos pos,
467                                           unsigned locks_want,
468                                           unsigned depth,
469                                           unsigned flags,
470                                           unsigned long ip)
471 {
472         iter->trans             = trans;
473         iter->update_path       = 0;
474         iter->key_cache_path    = 0;
475         iter->btree_id          = btree_id;
476         iter->min_depth         = 0;
477         iter->flags             = flags;
478         iter->snapshot          = pos.snapshot;
479         iter->pos               = pos;
480         iter->k                 = POS_KEY(pos);
481         iter->journal_idx       = 0;
482 #ifdef CONFIG_BCACHEFS_DEBUG
483         iter->ip_allocated = ip;
484 #endif
485         iter->path = bch2_path_get(trans, btree_id, iter->pos,
486                                    locks_want, depth, flags, ip);
487 }
488
489 void bch2_trans_iter_init_outlined(struct btree_trans *, struct btree_iter *,
490                           enum btree_id, struct bpos, unsigned);
491
492 static inline void bch2_trans_iter_init(struct btree_trans *trans,
493                           struct btree_iter *iter,
494                           unsigned btree_id, struct bpos pos,
495                           unsigned flags)
496 {
497         if (__builtin_constant_p(btree_id) &&
498             __builtin_constant_p(flags))
499                 bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
500                                 bch2_btree_iter_flags(trans, btree_id, flags),
501                                 _THIS_IP_);
502         else
503                 bch2_trans_iter_init_outlined(trans, iter, btree_id, pos, flags);
504 }
505
506 void bch2_trans_node_iter_init(struct btree_trans *, struct btree_iter *,
507                                enum btree_id, struct bpos,
508                                unsigned, unsigned, unsigned);
509 void bch2_trans_copy_iter(struct btree_iter *, struct btree_iter *);
510
511 void bch2_set_btree_iter_dontneed(struct btree_iter *);
512
513 void *__bch2_trans_kmalloc(struct btree_trans *, size_t);
514
515 static inline void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
516 {
517         size = roundup(size, 8);
518
519         if (likely(trans->mem_top + size <= trans->mem_bytes)) {
520                 void *p = trans->mem + trans->mem_top;
521
522                 trans->mem_top += size;
523                 memset(p, 0, size);
524                 return p;
525         } else {
526                 return __bch2_trans_kmalloc(trans, size);
527         }
528 }
529
530 static inline void *bch2_trans_kmalloc_nomemzero(struct btree_trans *trans, size_t size)
531 {
532         size = round_up(size, 8);
533
534         if (likely(trans->mem_top + size <= trans->mem_bytes)) {
535                 void *p = trans->mem + trans->mem_top;
536
537                 trans->mem_top += size;
538                 return p;
539         } else {
540                 return __bch2_trans_kmalloc(trans, size);
541         }
542 }
543
544 static inline struct bkey_s_c __bch2_bkey_get_iter(struct btree_trans *trans,
545                                 struct btree_iter *iter,
546                                 unsigned btree_id, struct bpos pos,
547                                 unsigned flags, unsigned type)
548 {
549         struct bkey_s_c k;
550
551         bch2_trans_iter_init(trans, iter, btree_id, pos, flags);
552         k = bch2_btree_iter_peek_slot(iter);
553
554         if (!bkey_err(k) && type && k.k->type != type)
555                 k = bkey_s_c_err(-BCH_ERR_ENOENT_bkey_type_mismatch);
556         if (unlikely(bkey_err(k)))
557                 bch2_trans_iter_exit(trans, iter);
558         return k;
559 }
560
561 static inline struct bkey_s_c bch2_bkey_get_iter(struct btree_trans *trans,
562                                 struct btree_iter *iter,
563                                 unsigned btree_id, struct bpos pos,
564                                 unsigned flags)
565 {
566         return __bch2_bkey_get_iter(trans, iter, btree_id, pos, flags, 0);
567 }
568
569 #define bch2_bkey_get_iter_typed(_trans, _iter, _btree_id, _pos, _flags, _type)\
570         bkey_s_c_to_##_type(__bch2_bkey_get_iter(_trans, _iter,                 \
571                                        _btree_id, _pos, _flags, KEY_TYPE_##_type))
572
573 static inline int __bch2_bkey_get_val_typed(struct btree_trans *trans,
574                                 unsigned btree_id, struct bpos pos,
575                                 unsigned flags, unsigned type,
576                                 unsigned val_size, void *val)
577 {
578         struct btree_iter iter;
579         struct bkey_s_c k;
580         int ret;
581
582         k = __bch2_bkey_get_iter(trans, &iter, btree_id, pos, flags, type);
583         ret = bkey_err(k);
584         if (!ret) {
585                 unsigned b = min_t(unsigned, bkey_val_bytes(k.k), val_size);
586
587                 memcpy(val, k.v, b);
588                 if (unlikely(b < sizeof(*val)))
589                         memset((void *) val + b, 0, sizeof(*val) - b);
590                 bch2_trans_iter_exit(trans, &iter);
591         }
592
593         return ret;
594 }
595
596 #define bch2_bkey_get_val_typed(_trans, _btree_id, _pos, _flags, _type, _val)\
597         __bch2_bkey_get_val_typed(_trans, _btree_id, _pos, _flags,      \
598                                   KEY_TYPE_##_type, sizeof(*_val), _val)
599
600 void bch2_trans_srcu_unlock(struct btree_trans *);
601
602 u32 bch2_trans_begin(struct btree_trans *);
603
604 /*
605  * XXX
606  * this does not handle transaction restarts from bch2_btree_iter_next_node()
607  * correctly
608  */
609 #define __for_each_btree_node(_trans, _iter, _btree_id, _start,         \
610                               _locks_want, _depth, _flags, _b, _ret)    \
611         for (bch2_trans_node_iter_init((_trans), &(_iter), (_btree_id), \
612                                 _start, _locks_want, _depth, _flags);   \
613              (_b) = bch2_btree_iter_peek_node_and_restart(&(_iter)),    \
614              !((_ret) = PTR_ERR_OR_ZERO(_b)) && (_b);                   \
615              (_b) = bch2_btree_iter_next_node(&(_iter)))
616
617 #define for_each_btree_node(_trans, _iter, _btree_id, _start,           \
618                             _flags, _b, _ret)                           \
619         __for_each_btree_node(_trans, _iter, _btree_id, _start,         \
620                               0, 0, _flags, _b, _ret)
621
622 static inline struct bkey_s_c bch2_btree_iter_peek_prev_type(struct btree_iter *iter,
623                                                              unsigned flags)
624 {
625         return  flags & BTREE_ITER_slots      ? bch2_btree_iter_peek_slot(iter) :
626                                                 bch2_btree_iter_peek_prev(iter);
627 }
628
629 static inline struct bkey_s_c bch2_btree_iter_peek_type(struct btree_iter *iter,
630                                                         unsigned flags)
631 {
632         return  flags & BTREE_ITER_slots      ? bch2_btree_iter_peek_slot(iter) :
633                                                 bch2_btree_iter_peek(iter);
634 }
635
636 static inline struct bkey_s_c bch2_btree_iter_peek_upto_type(struct btree_iter *iter,
637                                                              struct bpos end,
638                                                              unsigned flags)
639 {
640         if (!(flags & BTREE_ITER_slots))
641                 return bch2_btree_iter_peek_upto(iter, end);
642
643         if (bkey_gt(iter->pos, end))
644                 return bkey_s_c_null;
645
646         return bch2_btree_iter_peek_slot(iter);
647 }
648
649 int __bch2_btree_trans_too_many_iters(struct btree_trans *);
650
651 static inline int btree_trans_too_many_iters(struct btree_trans *trans)
652 {
653         if (bitmap_weight(trans->paths_allocated, trans->nr_paths) > BTREE_ITER_NORMAL_LIMIT - 8)
654                 return __bch2_btree_trans_too_many_iters(trans);
655
656         return 0;
657 }
658
659 /*
660  * goto instead of loop, so that when used inside for_each_btree_key2()
661  * break/continue work correctly
662  */
663 #define lockrestart_do(_trans, _do)                                     \
664 ({                                                                      \
665         __label__ transaction_restart;                                  \
666         u32 _restart_count;                                             \
667         int _ret2;                                                      \
668 transaction_restart:                                                    \
669         _restart_count = bch2_trans_begin(_trans);                      \
670         _ret2 = (_do);                                                  \
671                                                                         \
672         if (bch2_err_matches(_ret2, BCH_ERR_transaction_restart))       \
673                 goto transaction_restart;                               \
674                                                                         \
675         if (!_ret2)                                                     \
676                 bch2_trans_verify_not_restarted(_trans, _restart_count);\
677         _ret2;                                                          \
678 })
679
680 /*
681  * nested_lockrestart_do(), nested_commit_do():
682  *
683  * These are like lockrestart_do() and commit_do(), with two differences:
684  *
685  *  - We don't call bch2_trans_begin() unless we had a transaction restart
686  *  - We return -BCH_ERR_transaction_restart_nested if we succeeded after a
687  *  transaction restart
688  */
689 #define nested_lockrestart_do(_trans, _do)                              \
690 ({                                                                      \
691         u32 _restart_count, _orig_restart_count;                        \
692         int _ret2;                                                      \
693                                                                         \
694         _restart_count = _orig_restart_count = (_trans)->restart_count; \
695                                                                         \
696         while (bch2_err_matches(_ret2 = (_do), BCH_ERR_transaction_restart))\
697                 _restart_count = bch2_trans_begin(_trans);              \
698                                                                         \
699         if (!_ret2)                                                     \
700                 bch2_trans_verify_not_restarted(_trans, _restart_count);\
701                                                                         \
702         _ret2 ?: trans_was_restarted(_trans, _restart_count);           \
703 })
704
705 #define for_each_btree_key_upto_continue(_trans, _iter,                 \
706                                          _end, _flags, _k, _do)         \
707 ({                                                                      \
708         struct bkey_s_c _k;                                             \
709         int _ret3 = 0;                                                  \
710                                                                         \
711         do {                                                            \
712                 _ret3 = lockrestart_do(_trans, ({                       \
713                         (_k) = bch2_btree_iter_peek_upto_type(&(_iter), \
714                                                 _end, (_flags));        \
715                         if (!(_k).k)                                    \
716                                 break;                                  \
717                                                                         \
718                         bkey_err(_k) ?: (_do);                          \
719                 }));                                                    \
720         } while (!_ret3 && bch2_btree_iter_advance(&(_iter)));          \
721                                                                         \
722         bch2_trans_iter_exit((_trans), &(_iter));                       \
723         _ret3;                                                          \
724 })
725
726 #define for_each_btree_key_continue(_trans, _iter, _flags, _k, _do)     \
727         for_each_btree_key_upto_continue(_trans, _iter, SPOS_MAX, _flags, _k, _do)
728
729 #define for_each_btree_key_upto(_trans, _iter, _btree_id,               \
730                                 _start, _end, _flags, _k, _do)          \
731 ({                                                                      \
732         bch2_trans_begin(trans);                                        \
733                                                                         \
734         struct btree_iter _iter;                                        \
735         bch2_trans_iter_init((_trans), &(_iter), (_btree_id),           \
736                              (_start), (_flags));                       \
737                                                                         \
738         for_each_btree_key_upto_continue(_trans, _iter, _end, _flags, _k, _do);\
739 })
740
741 #define for_each_btree_key(_trans, _iter, _btree_id,                    \
742                            _start, _flags, _k, _do)                     \
743         for_each_btree_key_upto(_trans, _iter, _btree_id, _start,       \
744                                  SPOS_MAX, _flags, _k, _do)
745
746 #define for_each_btree_key_reverse(_trans, _iter, _btree_id,            \
747                                    _start, _flags, _k, _do)             \
748 ({                                                                      \
749         struct btree_iter _iter;                                        \
750         struct bkey_s_c _k;                                             \
751         int _ret3 = 0;                                                  \
752                                                                         \
753         bch2_trans_iter_init((_trans), &(_iter), (_btree_id),           \
754                              (_start), (_flags));                       \
755                                                                         \
756         do {                                                            \
757                 _ret3 = lockrestart_do(_trans, ({                       \
758                         (_k) = bch2_btree_iter_peek_prev_type(&(_iter), \
759                                                         (_flags));      \
760                         if (!(_k).k)                                    \
761                                 break;                                  \
762                                                                         \
763                         bkey_err(_k) ?: (_do);                          \
764                 }));                                                    \
765         } while (!_ret3 && bch2_btree_iter_rewind(&(_iter)));           \
766                                                                         \
767         bch2_trans_iter_exit((_trans), &(_iter));                       \
768         _ret3;                                                          \
769 })
770
771 #define for_each_btree_key_commit(_trans, _iter, _btree_id,             \
772                                   _start, _iter_flags, _k,              \
773                                   _disk_res, _journal_seq, _commit_flags,\
774                                   _do)                                  \
775         for_each_btree_key(_trans, _iter, _btree_id, _start, _iter_flags, _k,\
776                             (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
777                                         (_journal_seq), (_commit_flags)))
778
779 #define for_each_btree_key_reverse_commit(_trans, _iter, _btree_id,     \
780                                   _start, _iter_flags, _k,              \
781                                   _disk_res, _journal_seq, _commit_flags,\
782                                   _do)                                  \
783         for_each_btree_key_reverse(_trans, _iter, _btree_id, _start, _iter_flags, _k,\
784                             (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
785                                         (_journal_seq), (_commit_flags)))
786
787 #define for_each_btree_key_upto_commit(_trans, _iter, _btree_id,        \
788                                   _start, _end, _iter_flags, _k,        \
789                                   _disk_res, _journal_seq, _commit_flags,\
790                                   _do)                                  \
791         for_each_btree_key_upto(_trans, _iter, _btree_id, _start, _end, _iter_flags, _k,\
792                             (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
793                                         (_journal_seq), (_commit_flags)))
794
795 struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *);
796
797 static inline struct bkey_s_c
798 __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
799                                    struct btree_iter *iter, unsigned flags)
800 {
801         struct bkey_s_c k;
802
803         while (btree_trans_too_many_iters(trans) ||
804                (k = bch2_btree_iter_peek_type(iter, flags),
805                 bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
806                 bch2_trans_begin(trans);
807
808         return k;
809 }
810
811 #define for_each_btree_key_upto_norestart(_trans, _iter, _btree_id,     \
812                            _start, _end, _flags, _k, _ret)              \
813         for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id),      \
814                                   (_start), (_flags));                  \
815              (_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, _flags),\
816              !((_ret) = bkey_err(_k)) && (_k).k;                        \
817              bch2_btree_iter_advance(&(_iter)))
818
819 #define for_each_btree_key_upto_continue_norestart(_iter, _end, _flags, _k, _ret)\
820         for (;                                                                  \
821              (_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, _flags),     \
822              !((_ret) = bkey_err(_k)) && (_k).k;                                \
823              bch2_btree_iter_advance(&(_iter)))
824
825 #define for_each_btree_key_norestart(_trans, _iter, _btree_id,          \
826                            _start, _flags, _k, _ret)                    \
827         for_each_btree_key_upto_norestart(_trans, _iter, _btree_id, _start,\
828                                           SPOS_MAX, _flags, _k, _ret)
829
830 #define for_each_btree_key_continue_norestart(_iter, _flags, _k, _ret)  \
831         for_each_btree_key_upto_continue_norestart(_iter, SPOS_MAX, _flags, _k, _ret)
832
833 /*
834  * This should not be used in a fastpath, without first trying _do in
835  * nonblocking mode - it will cause excessive transaction restarts and
836  * potentially livelocking:
837  */
838 #define drop_locks_do(_trans, _do)                                      \
839 ({                                                                      \
840         bch2_trans_unlock(_trans);                                      \
841         _do ?: bch2_trans_relock(_trans);                               \
842 })
843
844 #define allocate_dropping_locks_errcode(_trans, _do)                    \
845 ({                                                                      \
846         gfp_t _gfp = GFP_NOWAIT|__GFP_NOWARN;                           \
847         int _ret = _do;                                                 \
848                                                                         \
849         if (bch2_err_matches(_ret, ENOMEM)) {                           \
850                 _gfp = GFP_KERNEL;                                      \
851                 _ret = drop_locks_do(trans, _do);                       \
852         }                                                               \
853         _ret;                                                           \
854 })
855
856 #define allocate_dropping_locks(_trans, _ret, _do)                      \
857 ({                                                                      \
858         gfp_t _gfp = GFP_NOWAIT|__GFP_NOWARN;                           \
859         typeof(_do) _p = _do;                                           \
860                                                                         \
861         _ret = 0;                                                       \
862         if (unlikely(!_p)) {                                            \
863                 _gfp = GFP_KERNEL;                                      \
864                 _ret = drop_locks_do(trans, ((_p = _do), 0));           \
865         }                                                               \
866         _p;                                                             \
867 })
868
869 void bch2_trans_updates_to_text(struct printbuf *, struct btree_trans *);
870 void bch2_btree_path_to_text(struct printbuf *, struct btree_trans *, btree_path_idx_t);
871 void bch2_trans_paths_to_text(struct printbuf *, struct btree_trans *);
872 void bch2_dump_trans_updates(struct btree_trans *);
873 void bch2_dump_trans_paths_updates(struct btree_trans *);
874
875 struct btree_trans *__bch2_trans_get(struct bch_fs *, unsigned);
876 void bch2_trans_put(struct btree_trans *);
877
878 extern const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR];
879 unsigned bch2_trans_get_fn_idx(const char *);
880
881 #define bch2_trans_get(_c)                                              \
882 ({                                                                      \
883         static unsigned trans_fn_idx;                                   \
884                                                                         \
885         if (unlikely(!trans_fn_idx))                                    \
886                 trans_fn_idx = bch2_trans_get_fn_idx(__func__);         \
887         __bch2_trans_get(_c, trans_fn_idx);                             \
888 })
889
890 void bch2_btree_trans_to_text(struct printbuf *, struct btree_trans *);
891
892 void bch2_fs_btree_iter_exit(struct bch_fs *);
893 void bch2_fs_btree_iter_init_early(struct bch_fs *);
894 int bch2_fs_btree_iter_init(struct bch_fs *);
895
896 #endif /* _BCACHEFS_BTREE_ITER_H */
This page took 0.081628 seconds and 4 git commands to generate.