1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_LOCKING_H
3 #define _BCACHEFS_BTREE_LOCKING_H
6 * Only for internal btree use:
8 * The btree iterator tracks what locks it wants to take, and what locks it
9 * currently has - here we have wrappers for locking/unlocking btree nodes and
10 * updating the iterator state
13 #include "btree_iter.h"
16 void bch2_btree_lock_init(struct btree_bkey_cached_common *, enum six_lock_init_flags);
18 void bch2_trans_unlock_noassert(struct btree_trans *);
20 static inline bool is_btree_node(struct btree_path *path, unsigned l)
22 return l < BTREE_MAX_DEPTH && !IS_ERR_OR_NULL(path->l[l].b);
25 static inline struct btree_transaction_stats *btree_trans_stats(struct btree_trans *trans)
27 return trans->fn_idx < ARRAY_SIZE(trans->c->btree_transaction_stats)
28 ? &trans->c->btree_transaction_stats[trans->fn_idx]
32 /* matches six lock types */
33 enum btree_node_locked_type {
34 BTREE_NODE_UNLOCKED = -1,
35 BTREE_NODE_READ_LOCKED = SIX_LOCK_read,
36 BTREE_NODE_INTENT_LOCKED = SIX_LOCK_intent,
37 BTREE_NODE_WRITE_LOCKED = SIX_LOCK_write,
40 static inline int btree_node_locked_type(struct btree_path *path,
43 return BTREE_NODE_UNLOCKED + ((path->nodes_locked >> (level << 1)) & 3);
46 static inline bool btree_node_write_locked(struct btree_path *path, unsigned l)
48 return btree_node_locked_type(path, l) == BTREE_NODE_WRITE_LOCKED;
51 static inline bool btree_node_intent_locked(struct btree_path *path, unsigned l)
53 return btree_node_locked_type(path, l) == BTREE_NODE_INTENT_LOCKED;
56 static inline bool btree_node_read_locked(struct btree_path *path, unsigned l)
58 return btree_node_locked_type(path, l) == BTREE_NODE_READ_LOCKED;
61 static inline bool btree_node_locked(struct btree_path *path, unsigned level)
63 return btree_node_locked_type(path, level) != BTREE_NODE_UNLOCKED;
66 static inline void mark_btree_node_locked_noreset(struct btree_path *path,
68 enum btree_node_locked_type type)
70 /* relying on this to avoid a branch */
71 BUILD_BUG_ON(SIX_LOCK_read != 0);
72 BUILD_BUG_ON(SIX_LOCK_intent != 1);
74 path->nodes_locked &= ~(3U << (level << 1));
75 path->nodes_locked |= (type + 1) << (level << 1);
78 static inline void mark_btree_node_unlocked(struct btree_path *path,
81 EBUG_ON(btree_node_write_locked(path, level));
82 mark_btree_node_locked_noreset(path, level, BTREE_NODE_UNLOCKED);
85 static inline void mark_btree_node_locked(struct btree_trans *trans,
86 struct btree_path *path,
88 enum btree_node_locked_type type)
90 mark_btree_node_locked_noreset(path, level, (enum btree_node_locked_type) type);
91 #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
92 path->l[level].lock_taken_time = local_clock();
96 static inline enum six_lock_type __btree_lock_want(struct btree_path *path, int level)
98 return level < path->locks_want
103 static inline enum btree_node_locked_type
104 btree_lock_want(struct btree_path *path, int level)
106 if (level < path->level)
107 return BTREE_NODE_UNLOCKED;
108 if (level < path->locks_want)
109 return BTREE_NODE_INTENT_LOCKED;
110 if (level == path->level)
111 return BTREE_NODE_READ_LOCKED;
112 return BTREE_NODE_UNLOCKED;
115 static void btree_trans_lock_hold_time_update(struct btree_trans *trans,
116 struct btree_path *path, unsigned level)
118 #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
119 __bch2_time_stats_update(&btree_trans_stats(trans)->lock_hold_times,
120 path->l[level].lock_taken_time,
127 static inline void btree_node_unlock(struct btree_trans *trans,
128 struct btree_path *path, unsigned level)
130 int lock_type = btree_node_locked_type(path, level);
132 EBUG_ON(level >= BTREE_MAX_DEPTH);
133 EBUG_ON(lock_type == BTREE_NODE_WRITE_LOCKED);
135 if (lock_type != BTREE_NODE_UNLOCKED) {
136 six_unlock_type(&path->l[level].b->c.lock, lock_type);
137 btree_trans_lock_hold_time_update(trans, path, level);
139 mark_btree_node_unlocked(path, level);
142 static inline int btree_path_lowest_level_locked(struct btree_path *path)
144 return __ffs(path->nodes_locked) >> 1;
147 static inline int btree_path_highest_level_locked(struct btree_path *path)
149 return __fls(path->nodes_locked) >> 1;
152 static inline void __bch2_btree_path_unlock(struct btree_trans *trans,
153 struct btree_path *path)
155 btree_path_set_dirty(path, BTREE_ITER_NEED_RELOCK);
157 while (path->nodes_locked)
158 btree_node_unlock(trans, path, btree_path_lowest_level_locked(path));
162 * Updates the saved lock sequence number, so that bch2_btree_node_relock() will
166 bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_path *path,
169 struct btree_path *linked;
172 EBUG_ON(path->l[b->c.level].b != b);
173 EBUG_ON(path->l[b->c.level].lock_seq != six_lock_seq(&b->c.lock));
174 EBUG_ON(btree_node_locked_type(path, b->c.level) != SIX_LOCK_write);
176 mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED);
178 trans_for_each_path_with_node(trans, b, linked, i)
179 linked->l[b->c.level].lock_seq++;
181 six_unlock_write(&b->c.lock);
184 void bch2_btree_node_unlock_write(struct btree_trans *,
185 struct btree_path *, struct btree *);
187 int bch2_six_check_for_deadlock(struct six_lock *lock, void *p);
191 static inline void trans_set_locked(struct btree_trans *trans)
193 if (!trans->locked) {
194 lock_acquire_exclusive(&trans->dep_map, 0, 0, NULL, _THIS_IP_);
195 trans->locked = true;
196 trans->last_unlock_ip = 0;
198 trans->pf_memalloc_nofs = (current->flags & PF_MEMALLOC_NOFS) != 0;
199 current->flags |= PF_MEMALLOC_NOFS;
203 static inline void trans_set_unlocked(struct btree_trans *trans)
206 lock_release(&trans->dep_map, _THIS_IP_);
207 trans->locked = false;
208 trans->last_unlock_ip = _RET_IP_;
210 if (!trans->pf_memalloc_nofs)
211 current->flags &= ~PF_MEMALLOC_NOFS;
215 static inline int __btree_node_lock_nopath(struct btree_trans *trans,
216 struct btree_bkey_cached_common *b,
217 enum six_lock_type type,
218 bool lock_may_not_fail,
221 trans->lock_may_not_fail = lock_may_not_fail;
222 trans->lock_must_abort = false;
225 int ret = six_lock_ip_waiter(&b->lock, type, &trans->locking_wait,
226 bch2_six_check_for_deadlock, trans, ip);
227 WRITE_ONCE(trans->locking, NULL);
228 WRITE_ONCE(trans->locking_wait.start_time, 0);
231 trace_btree_path_lock(trans, _THIS_IP_, b);
235 static inline int __must_check
236 btree_node_lock_nopath(struct btree_trans *trans,
237 struct btree_bkey_cached_common *b,
238 enum six_lock_type type,
241 return __btree_node_lock_nopath(trans, b, type, false, ip);
244 static inline void btree_node_lock_nopath_nofail(struct btree_trans *trans,
245 struct btree_bkey_cached_common *b,
246 enum six_lock_type type)
248 int ret = __btree_node_lock_nopath(trans, b, type, true, _THIS_IP_);
254 * Lock a btree node if we already have it locked on one of our linked
257 static inline bool btree_node_lock_increment(struct btree_trans *trans,
258 struct btree_bkey_cached_common *b,
260 enum btree_node_locked_type want)
262 struct btree_path *path;
265 trans_for_each_path(trans, path, i)
266 if (&path->l[level].b->c == b &&
267 btree_node_locked_type(path, level) >= want) {
268 six_lock_increment(&b->lock, (enum six_lock_type) want);
275 static inline int btree_node_lock(struct btree_trans *trans,
276 struct btree_path *path,
277 struct btree_bkey_cached_common *b,
279 enum six_lock_type type,
284 EBUG_ON(level >= BTREE_MAX_DEPTH);
285 bch2_trans_verify_not_unlocked(trans);
287 if (likely(six_trylock_type(&b->lock, type)) ||
288 btree_node_lock_increment(trans, b, level, (enum btree_node_locked_type) type) ||
289 !(ret = btree_node_lock_nopath(trans, b, type, btree_path_ip_allocated(path)))) {
290 #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
291 path->l[b->level].lock_taken_time = local_clock();
298 int __bch2_btree_node_lock_write(struct btree_trans *, struct btree_path *,
299 struct btree_bkey_cached_common *b, bool);
301 static inline int __btree_node_lock_write(struct btree_trans *trans,
302 struct btree_path *path,
303 struct btree_bkey_cached_common *b,
304 bool lock_may_not_fail)
306 EBUG_ON(&path->l[b->level].b->c != b);
307 EBUG_ON(path->l[b->level].lock_seq != six_lock_seq(&b->lock));
308 EBUG_ON(!btree_node_intent_locked(path, b->level));
311 * six locks are unfair, and read locks block while a thread wants a
312 * write lock: thus, we need to tell the cycle detector we have a write
313 * lock _before_ taking the lock:
315 mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_WRITE_LOCKED);
317 return likely(six_trylock_write(&b->lock))
319 : __bch2_btree_node_lock_write(trans, path, b, lock_may_not_fail);
322 static inline int __must_check
323 bch2_btree_node_lock_write(struct btree_trans *trans,
324 struct btree_path *path,
325 struct btree_bkey_cached_common *b)
327 return __btree_node_lock_write(trans, path, b, false);
330 void bch2_btree_node_lock_write_nofail(struct btree_trans *,
332 struct btree_bkey_cached_common *);
336 bool bch2_btree_path_relock_norestart(struct btree_trans *, struct btree_path *);
337 int __bch2_btree_path_relock(struct btree_trans *,
338 struct btree_path *, unsigned long);
340 static inline int bch2_btree_path_relock(struct btree_trans *trans,
341 struct btree_path *path, unsigned long trace_ip)
343 return btree_node_locked(path, path->level)
345 : __bch2_btree_path_relock(trans, path, trace_ip);
348 bool __bch2_btree_node_relock(struct btree_trans *, struct btree_path *, unsigned, bool trace);
350 static inline bool bch2_btree_node_relock(struct btree_trans *trans,
351 struct btree_path *path, unsigned level)
353 EBUG_ON(btree_node_locked(path, level) &&
354 !btree_node_write_locked(path, level) &&
355 btree_node_locked_type(path, level) != __btree_lock_want(path, level));
357 return likely(btree_node_locked(path, level)) ||
358 (!IS_ERR_OR_NULL(path->l[level].b) &&
359 __bch2_btree_node_relock(trans, path, level, true));
362 static inline bool bch2_btree_node_relock_notrace(struct btree_trans *trans,
363 struct btree_path *path, unsigned level)
365 EBUG_ON(btree_node_locked(path, level) &&
366 !btree_node_write_locked(path, level) &&
367 btree_node_locked_type(path, level) != __btree_lock_want(path, level));
369 return likely(btree_node_locked(path, level)) ||
370 (!IS_ERR_OR_NULL(path->l[level].b) &&
371 __bch2_btree_node_relock(trans, path, level, false));
376 bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *,
377 struct btree_path *, unsigned,
378 struct get_locks_fail *);
380 bool __bch2_btree_path_upgrade(struct btree_trans *,
381 struct btree_path *, unsigned,
382 struct get_locks_fail *);
384 static inline int bch2_btree_path_upgrade(struct btree_trans *trans,
385 struct btree_path *path,
386 unsigned new_locks_want)
388 struct get_locks_fail f = {};
389 unsigned old_locks_want = path->locks_want;
391 new_locks_want = min(new_locks_want, BTREE_MAX_DEPTH);
393 if (path->locks_want < new_locks_want
394 ? __bch2_btree_path_upgrade(trans, path, new_locks_want, &f)
395 : path->nodes_locked)
398 trace_and_count(trans->c, trans_restart_upgrade, trans, _THIS_IP_, path,
399 old_locks_want, new_locks_want, &f);
400 return btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade);
405 static inline void btree_path_set_should_be_locked(struct btree_trans *trans, struct btree_path *path)
407 EBUG_ON(!btree_node_locked(path, path->level));
408 EBUG_ON(path->uptodate);
410 path->should_be_locked = true;
411 trace_btree_path_should_be_locked(trans, path);
414 static inline void __btree_path_set_level_up(struct btree_trans *trans,
415 struct btree_path *path,
418 btree_node_unlock(trans, path, l);
419 path->l[l].b = ERR_PTR(-BCH_ERR_no_btree_node_up);
422 static inline void btree_path_set_level_up(struct btree_trans *trans,
423 struct btree_path *path)
425 __btree_path_set_level_up(trans, path, path->level++);
426 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
431 struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *,
433 struct btree_bkey_cached_common *b,
436 int bch2_check_for_deadlock(struct btree_trans *, struct printbuf *);
438 #ifdef CONFIG_BCACHEFS_DEBUG
439 void bch2_btree_path_verify_locks(struct btree_path *);
440 void bch2_trans_verify_locks(struct btree_trans *);
442 static inline void bch2_btree_path_verify_locks(struct btree_path *path) {}
443 static inline void bch2_trans_verify_locks(struct btree_trans *trans) {}
446 #endif /* _BCACHEFS_BTREE_LOCKING_H */