]>
Commit | Line | Data |
---|---|---|
9888c340 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
925baedd CM |
2 | /* |
3 | * Copyright (C) 2008 Oracle. All rights reserved. | |
925baedd CM |
4 | */ |
5 | ||
9888c340 DS |
6 | #ifndef BTRFS_LOCKING_H |
7 | #define BTRFS_LOCKING_H | |
925baedd | 8 | |
2992df73 NB |
9 | #include <linux/atomic.h> |
10 | #include <linux/wait.h> | |
11 | #include <linux/percpu_counter.h> | |
31f6e769 DS |
12 | #include "extent_io.h" |
13 | ||
bd681513 CM |
14 | #define BTRFS_WRITE_LOCK 1 |
15 | #define BTRFS_READ_LOCK 2 | |
16 | #define BTRFS_WRITE_LOCK_BLOCKING 3 | |
17 | #define BTRFS_READ_LOCK_BLOCKING 4 | |
18 | ||
fd7ba1c1 JB |
19 | /* |
20 | * We are limited in number of subclasses by MAX_LOCKDEP_SUBCLASSES, which at | |
21 | * the time of this patch is 8, which is how many we use. Keep this in mind if | |
22 | * you decide you want to add another subclass. | |
23 | */ | |
24 | enum btrfs_lock_nesting { | |
25 | BTRFS_NESTING_NORMAL, | |
26 | ||
9631e4cc JB |
27 | /* |
28 | * When we COW a block we are holding the lock on the original block, | |
29 | * and since our lockdep maps are rootid+level, this confuses lockdep | |
30 | * when we lock the newly allocated COW'd block. Handle this by having | |
31 | * a subclass for COW'ed blocks so that lockdep doesn't complain. | |
32 | */ | |
33 | BTRFS_NESTING_COW, | |
34 | ||
bf77467a JB |
35 | /* |
36 | * Oftentimes we need to lock adjacent nodes on the same level while | |
37 | * still holding the lock on the original node we searched to, such as | |
38 | * for searching forward or for split/balance. | |
39 | * | |
40 | * Because of this we need to indicate to lockdep that this is | |
41 | * acceptable by having a different subclass for each of these | |
42 | * operations. | |
43 | */ | |
44 | BTRFS_NESTING_LEFT, | |
45 | BTRFS_NESTING_RIGHT, | |
46 | ||
bf59a5a2 JB |
47 | /* |
48 | * When splitting we will be holding a lock on the left/right node when | |
49 | * we need to cow that node, thus we need a new set of subclasses for | |
50 | * these two operations. | |
51 | */ | |
52 | BTRFS_NESTING_LEFT_COW, | |
53 | BTRFS_NESTING_RIGHT_COW, | |
54 | ||
4dff97e6 JB |
55 | /* |
56 | * When splitting we may push nodes to the left or right, but still use | |
57 | * the subsequent nodes in our path, keeping our locks on those adjacent | |
58 | * blocks. Thus when we go to allocate a new split block we've already | |
59 | * used up all of our available subclasses, so this subclass exists to | |
60 | * handle this case where we need to allocate a new split block. | |
61 | */ | |
62 | BTRFS_NESTING_SPLIT, | |
63 | ||
cf6f34aa JB |
64 | /* |
65 | * When promoting a new block to a root we need to have a special | |
66 | * subclass so we don't confuse lockdep, as it will appear that we are | |
67 | * locking a higher level node before a lower level one. Copying also | |
68 | * has this problem as it appears we're locking the same block again | |
69 | * when we make a snapshot of an existing root. | |
70 | */ | |
71 | BTRFS_NESTING_NEW_ROOT, | |
72 | ||
fd7ba1c1 JB |
73 | /* |
74 | * We are limited to MAX_LOCKDEP_SUBLCLASSES number of subclasses, so | |
75 | * add this in here and add a static_assert to keep us from going over | |
76 | * the limit. As of this writing we're limited to 8, and we're | |
77 | * definitely using 8, hence this check to keep us from messing up in | |
78 | * the future. | |
79 | */ | |
80 | BTRFS_NESTING_MAX, | |
81 | }; | |
82 | ||
83 | static_assert(BTRFS_NESTING_MAX <= MAX_LOCKDEP_SUBCLASSES, | |
84 | "too many lock subclasses defined"); | |
85 | ||
2992df73 NB |
86 | struct btrfs_path; |
87 | ||
fd7ba1c1 | 88 | void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest); |
143bede5 JM |
89 | void btrfs_tree_lock(struct extent_buffer *eb); |
90 | void btrfs_tree_unlock(struct extent_buffer *eb); | |
b4ce94de | 91 | |
fd7ba1c1 JB |
92 | void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest, |
93 | bool recurse); | |
bd681513 CM |
94 | void btrfs_tree_read_lock(struct extent_buffer *eb); |
95 | void btrfs_tree_read_unlock(struct extent_buffer *eb); | |
96 | void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb); | |
b95be2d9 DS |
97 | void btrfs_set_lock_blocking_read(struct extent_buffer *eb); |
98 | void btrfs_set_lock_blocking_write(struct extent_buffer *eb); | |
bd681513 CM |
99 | int btrfs_try_tree_read_lock(struct extent_buffer *eb); |
100 | int btrfs_try_tree_write_lock(struct extent_buffer *eb); | |
f82c458a | 101 | int btrfs_tree_read_lock_atomic(struct extent_buffer *eb); |
51899412 JB |
102 | struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root); |
103 | struct extent_buffer *__btrfs_read_lock_root_node(struct btrfs_root *root, | |
104 | bool recurse); | |
105 | ||
106 | static inline struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root) | |
107 | { | |
108 | return __btrfs_read_lock_root_node(root, false); | |
109 | } | |
f82c458a | 110 | |
31f6e769 DS |
111 | #ifdef CONFIG_BTRFS_DEBUG |
112 | static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) { | |
113 | BUG_ON(!eb->write_locks); | |
114 | } | |
115 | #else | |
116 | static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) { } | |
117 | #endif | |
bd681513 | 118 | |
ed2b1d36 | 119 | void btrfs_set_path_blocking(struct btrfs_path *p); |
1f95ec01 | 120 | void btrfs_unlock_up_safe(struct btrfs_path *path, int level); |
ed2b1d36 | 121 | |
bd681513 CM |
122 | static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw) |
123 | { | |
124 | if (rw == BTRFS_WRITE_LOCK || rw == BTRFS_WRITE_LOCK_BLOCKING) | |
125 | btrfs_tree_unlock(eb); | |
126 | else if (rw == BTRFS_READ_LOCK_BLOCKING) | |
127 | btrfs_tree_read_unlock_blocking(eb); | |
128 | else if (rw == BTRFS_READ_LOCK) | |
129 | btrfs_tree_read_unlock(eb); | |
130 | else | |
131 | BUG(); | |
132 | } | |
133 | ||
2992df73 NB |
134 | struct btrfs_drew_lock { |
135 | atomic_t readers; | |
136 | struct percpu_counter writers; | |
137 | wait_queue_head_t pending_writers; | |
138 | wait_queue_head_t pending_readers; | |
139 | }; | |
140 | ||
141 | int btrfs_drew_lock_init(struct btrfs_drew_lock *lock); | |
142 | void btrfs_drew_lock_destroy(struct btrfs_drew_lock *lock); | |
143 | void btrfs_drew_write_lock(struct btrfs_drew_lock *lock); | |
144 | bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock); | |
145 | void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock); | |
146 | void btrfs_drew_read_lock(struct btrfs_drew_lock *lock); | |
147 | void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock); | |
148 | ||
925baedd | 149 | #endif |