]>
Commit | Line | Data |
---|---|---|
925baedd CM |
1 | /* |
2 | * Copyright (C) 2008 Oracle. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public | |
6 | * License v2 as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public | |
14 | * License along with this program; if not, write to the | |
15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
16 | * Boston, MA 021110-1307, USA. | |
17 | */ | |
18 | #include <linux/sched.h> | |
925baedd CM |
19 | #include <linux/pagemap.h> |
20 | #include <linux/spinlock.h> | |
21 | #include <linux/page-flags.h> | |
4881ee5a | 22 | #include <asm/bug.h> |
925baedd CM |
23 | #include "ctree.h" |
24 | #include "extent_io.h" | |
25 | #include "locking.h" | |
26 | ||
48a3b636 | 27 | static void btrfs_assert_tree_read_locked(struct extent_buffer *eb); |
d397712b | 28 | |
b4ce94de | 29 | /* |
bd681513 CM |
30 | * if we currently have a spinning reader or writer lock |
31 | * (indicated by the rw flag) this will bump the count | |
32 | * of blocking holders and drop the spinlock. | |
b4ce94de | 33 | */ |
bd681513 | 34 | void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw) |
925baedd | 35 | { |
ea4ebde0 CM |
36 | /* |
37 | * no lock is required. The lock owner may change if | |
38 | * we have a read lock, but it won't change to or away | |
39 | * from us. If we have the write lock, we are the owner | |
40 | * and it'll never change. | |
41 | */ | |
42 | if (eb->lock_nested && current->pid == eb->lock_owner) | |
43 | return; | |
bd681513 CM |
44 | if (rw == BTRFS_WRITE_LOCK) { |
45 | if (atomic_read(&eb->blocking_writers) == 0) { | |
46 | WARN_ON(atomic_read(&eb->spinning_writers) != 1); | |
47 | atomic_dec(&eb->spinning_writers); | |
48 | btrfs_assert_tree_locked(eb); | |
49 | atomic_inc(&eb->blocking_writers); | |
50 | write_unlock(&eb->lock); | |
51 | } | |
52 | } else if (rw == BTRFS_READ_LOCK) { | |
53 | btrfs_assert_tree_read_locked(eb); | |
54 | atomic_inc(&eb->blocking_readers); | |
55 | WARN_ON(atomic_read(&eb->spinning_readers) == 0); | |
56 | atomic_dec(&eb->spinning_readers); | |
57 | read_unlock(&eb->lock); | |
b4ce94de | 58 | } |
b4ce94de | 59 | } |
f9efa9c7 | 60 | |
b4ce94de | 61 | /* |
bd681513 CM |
62 | * if we currently have a blocking lock, take the spinlock |
63 | * and drop our blocking count | |
b4ce94de | 64 | */ |
bd681513 | 65 | void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) |
b4ce94de | 66 | { |
ea4ebde0 CM |
67 | /* |
68 | * no lock is required. The lock owner may change if | |
69 | * we have a read lock, but it won't change to or away | |
70 | * from us. If we have the write lock, we are the owner | |
71 | * and it'll never change. | |
72 | */ | |
73 | if (eb->lock_nested && current->pid == eb->lock_owner) | |
74 | return; | |
75 | ||
bd681513 CM |
76 | if (rw == BTRFS_WRITE_LOCK_BLOCKING) { |
77 | BUG_ON(atomic_read(&eb->blocking_writers) != 1); | |
78 | write_lock(&eb->lock); | |
79 | WARN_ON(atomic_read(&eb->spinning_writers)); | |
80 | atomic_inc(&eb->spinning_writers); | |
ee863954 DS |
81 | /* |
82 | * atomic_dec_and_test implies a barrier for waitqueue_active | |
83 | */ | |
cbea5ac1 CM |
84 | if (atomic_dec_and_test(&eb->blocking_writers) && |
85 | waitqueue_active(&eb->write_lock_wq)) | |
bd681513 CM |
86 | wake_up(&eb->write_lock_wq); |
87 | } else if (rw == BTRFS_READ_LOCK_BLOCKING) { | |
88 | BUG_ON(atomic_read(&eb->blocking_readers) == 0); | |
89 | read_lock(&eb->lock); | |
90 | atomic_inc(&eb->spinning_readers); | |
ee863954 DS |
91 | /* |
92 | * atomic_dec_and_test implies a barrier for waitqueue_active | |
93 | */ | |
cbea5ac1 CM |
94 | if (atomic_dec_and_test(&eb->blocking_readers) && |
95 | waitqueue_active(&eb->read_lock_wq)) | |
bd681513 | 96 | wake_up(&eb->read_lock_wq); |
b4ce94de | 97 | } |
b4ce94de CM |
98 | } |
99 | ||
100 | /* | |
bd681513 CM |
101 | * take a spinning read lock. This will wait for any blocking |
102 | * writers | |
b4ce94de | 103 | */ |
bd681513 | 104 | void btrfs_tree_read_lock(struct extent_buffer *eb) |
b4ce94de | 105 | { |
bd681513 | 106 | again: |
ea4ebde0 CM |
107 | BUG_ON(!atomic_read(&eb->blocking_writers) && |
108 | current->pid == eb->lock_owner); | |
109 | ||
5b25f70f AJ |
110 | read_lock(&eb->lock); |
111 | if (atomic_read(&eb->blocking_writers) && | |
112 | current->pid == eb->lock_owner) { | |
113 | /* | |
114 | * This extent is already write-locked by our thread. We allow | |
115 | * an additional read lock to be added because it's for the same | |
116 | * thread. btrfs_find_all_roots() depends on this as it may be | |
117 | * called on a partly (write-)locked tree. | |
118 | */ | |
119 | BUG_ON(eb->lock_nested); | |
120 | eb->lock_nested = 1; | |
121 | read_unlock(&eb->lock); | |
122 | return; | |
123 | } | |
bd681513 CM |
124 | if (atomic_read(&eb->blocking_writers)) { |
125 | read_unlock(&eb->lock); | |
39f9d028 LB |
126 | wait_event(eb->write_lock_wq, |
127 | atomic_read(&eb->blocking_writers) == 0); | |
bd681513 | 128 | goto again; |
b4ce94de | 129 | } |
bd681513 CM |
130 | atomic_inc(&eb->read_locks); |
131 | atomic_inc(&eb->spinning_readers); | |
b4ce94de CM |
132 | } |
133 | ||
f82c458a CM |
134 | /* |
135 | * take a spinning read lock. | |
136 | * returns 1 if we get the read lock and 0 if we don't | |
137 | * this won't wait for blocking writers | |
138 | */ | |
139 | int btrfs_tree_read_lock_atomic(struct extent_buffer *eb) | |
140 | { | |
141 | if (atomic_read(&eb->blocking_writers)) | |
142 | return 0; | |
143 | ||
144 | read_lock(&eb->lock); | |
145 | if (atomic_read(&eb->blocking_writers)) { | |
146 | read_unlock(&eb->lock); | |
147 | return 0; | |
148 | } | |
149 | atomic_inc(&eb->read_locks); | |
150 | atomic_inc(&eb->spinning_readers); | |
151 | return 1; | |
152 | } | |
153 | ||
b4ce94de | 154 | /* |
bd681513 CM |
155 | * returns 1 if we get the read lock and 0 if we don't |
156 | * this won't wait for blocking writers | |
b4ce94de | 157 | */ |
bd681513 | 158 | int btrfs_try_tree_read_lock(struct extent_buffer *eb) |
b4ce94de | 159 | { |
bd681513 CM |
160 | if (atomic_read(&eb->blocking_writers)) |
161 | return 0; | |
b4ce94de | 162 | |
ea4ebde0 CM |
163 | if (!read_trylock(&eb->lock)) |
164 | return 0; | |
165 | ||
bd681513 CM |
166 | if (atomic_read(&eb->blocking_writers)) { |
167 | read_unlock(&eb->lock); | |
168 | return 0; | |
b9473439 | 169 | } |
bd681513 CM |
170 | atomic_inc(&eb->read_locks); |
171 | atomic_inc(&eb->spinning_readers); | |
172 | return 1; | |
b4ce94de CM |
173 | } |
174 | ||
175 | /* | |
bd681513 CM |
176 | * returns 1 if we get the read lock and 0 if we don't |
177 | * this won't wait for blocking writers or readers | |
b4ce94de | 178 | */ |
bd681513 | 179 | int btrfs_try_tree_write_lock(struct extent_buffer *eb) |
b4ce94de | 180 | { |
bd681513 CM |
181 | if (atomic_read(&eb->blocking_writers) || |
182 | atomic_read(&eb->blocking_readers)) | |
183 | return 0; | |
ea4ebde0 | 184 | |
f82c458a | 185 | write_lock(&eb->lock); |
bd681513 CM |
186 | if (atomic_read(&eb->blocking_writers) || |
187 | atomic_read(&eb->blocking_readers)) { | |
188 | write_unlock(&eb->lock); | |
189 | return 0; | |
190 | } | |
191 | atomic_inc(&eb->write_locks); | |
192 | atomic_inc(&eb->spinning_writers); | |
5b25f70f | 193 | eb->lock_owner = current->pid; |
b4ce94de CM |
194 | return 1; |
195 | } | |
196 | ||
197 | /* | |
bd681513 CM |
198 | * drop a spinning read lock |
199 | */ | |
200 | void btrfs_tree_read_unlock(struct extent_buffer *eb) | |
201 | { | |
ea4ebde0 CM |
202 | /* |
203 | * if we're nested, we have the write lock. No new locking | |
204 | * is needed as long as we are the lock owner. | |
205 | * The write unlock will do a barrier for us, and the lock_nested | |
206 | * field only matters to the lock owner. | |
207 | */ | |
208 | if (eb->lock_nested && current->pid == eb->lock_owner) { | |
209 | eb->lock_nested = 0; | |
210 | return; | |
5b25f70f | 211 | } |
bd681513 CM |
212 | btrfs_assert_tree_read_locked(eb); |
213 | WARN_ON(atomic_read(&eb->spinning_readers) == 0); | |
214 | atomic_dec(&eb->spinning_readers); | |
215 | atomic_dec(&eb->read_locks); | |
216 | read_unlock(&eb->lock); | |
217 | } | |
218 | ||
219 | /* | |
220 | * drop a blocking read lock | |
221 | */ | |
222 | void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb) | |
223 | { | |
ea4ebde0 CM |
224 | /* |
225 | * if we're nested, we have the write lock. No new locking | |
226 | * is needed as long as we are the lock owner. | |
227 | * The write unlock will do a barrier for us, and the lock_nested | |
228 | * field only matters to the lock owner. | |
229 | */ | |
230 | if (eb->lock_nested && current->pid == eb->lock_owner) { | |
231 | eb->lock_nested = 0; | |
232 | return; | |
5b25f70f | 233 | } |
bd681513 CM |
234 | btrfs_assert_tree_read_locked(eb); |
235 | WARN_ON(atomic_read(&eb->blocking_readers) == 0); | |
ee863954 DS |
236 | /* |
237 | * atomic_dec_and_test implies a barrier for waitqueue_active | |
238 | */ | |
cbea5ac1 CM |
239 | if (atomic_dec_and_test(&eb->blocking_readers) && |
240 | waitqueue_active(&eb->read_lock_wq)) | |
bd681513 CM |
241 | wake_up(&eb->read_lock_wq); |
242 | atomic_dec(&eb->read_locks); | |
243 | } | |
244 | ||
245 | /* | |
246 | * take a spinning write lock. This will wait for both | |
247 | * blocking readers or writers | |
b4ce94de | 248 | */ |
143bede5 | 249 | void btrfs_tree_lock(struct extent_buffer *eb) |
b4ce94de | 250 | { |
166f66d0 | 251 | WARN_ON(eb->lock_owner == current->pid); |
bd681513 CM |
252 | again: |
253 | wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0); | |
254 | wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0); | |
255 | write_lock(&eb->lock); | |
256 | if (atomic_read(&eb->blocking_readers)) { | |
257 | write_unlock(&eb->lock); | |
258 | wait_event(eb->read_lock_wq, | |
259 | atomic_read(&eb->blocking_readers) == 0); | |
260 | goto again; | |
f9efa9c7 | 261 | } |
bd681513 CM |
262 | if (atomic_read(&eb->blocking_writers)) { |
263 | write_unlock(&eb->lock); | |
264 | wait_event(eb->write_lock_wq, | |
265 | atomic_read(&eb->blocking_writers) == 0); | |
266 | goto again; | |
267 | } | |
268 | WARN_ON(atomic_read(&eb->spinning_writers)); | |
269 | atomic_inc(&eb->spinning_writers); | |
270 | atomic_inc(&eb->write_locks); | |
5b25f70f | 271 | eb->lock_owner = current->pid; |
925baedd CM |
272 | } |
273 | ||
bd681513 CM |
274 | /* |
275 | * drop a spinning or a blocking write lock. | |
276 | */ | |
143bede5 | 277 | void btrfs_tree_unlock(struct extent_buffer *eb) |
925baedd | 278 | { |
bd681513 CM |
279 | int blockers = atomic_read(&eb->blocking_writers); |
280 | ||
281 | BUG_ON(blockers > 1); | |
282 | ||
283 | btrfs_assert_tree_locked(eb); | |
ea4ebde0 | 284 | eb->lock_owner = 0; |
bd681513 CM |
285 | atomic_dec(&eb->write_locks); |
286 | ||
287 | if (blockers) { | |
288 | WARN_ON(atomic_read(&eb->spinning_writers)); | |
289 | atomic_dec(&eb->blocking_writers); | |
a83342aa DS |
290 | /* |
291 | * Make sure counter is updated before we wake up waiters. | |
292 | */ | |
cbea5ac1 CM |
293 | smp_mb(); |
294 | if (waitqueue_active(&eb->write_lock_wq)) | |
295 | wake_up(&eb->write_lock_wq); | |
bd681513 CM |
296 | } else { |
297 | WARN_ON(atomic_read(&eb->spinning_writers) != 1); | |
298 | atomic_dec(&eb->spinning_writers); | |
299 | write_unlock(&eb->lock); | |
300 | } | |
925baedd CM |
301 | } |
302 | ||
b9447ef8 | 303 | void btrfs_assert_tree_locked(struct extent_buffer *eb) |
925baedd | 304 | { |
bd681513 CM |
305 | BUG_ON(!atomic_read(&eb->write_locks)); |
306 | } | |
307 | ||
48a3b636 | 308 | static void btrfs_assert_tree_read_locked(struct extent_buffer *eb) |
bd681513 CM |
309 | { |
310 | BUG_ON(!atomic_read(&eb->read_locks)); | |
925baedd | 311 | } |