]>
Commit | Line | Data |
---|---|---|
925baedd CM |
1 | /* |
2 | * Copyright (C) 2008 Oracle. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public | |
6 | * License v2 as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public | |
14 | * License along with this program; if not, write to the | |
15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
16 | * Boston, MA 021110-1307, USA. | |
17 | */ | |
18 | #include <linux/sched.h> | |
925baedd CM |
19 | #include <linux/pagemap.h> |
20 | #include <linux/spinlock.h> | |
21 | #include <linux/page-flags.h> | |
4881ee5a | 22 | #include <asm/bug.h> |
925baedd CM |
23 | #include "ctree.h" |
24 | #include "extent_io.h" | |
25 | #include "locking.h" | |
26 | ||
bd681513 | 27 | void btrfs_assert_tree_read_locked(struct extent_buffer *eb); |
d397712b | 28 | |
b4ce94de | 29 | /* |
bd681513 CM |
30 | * if we currently have a spinning reader or writer lock |
31 | * (indicated by the rw flag) this will bump the count | |
32 | * of blocking holders and drop the spinlock. | |
b4ce94de | 33 | */ |
bd681513 | 34 | void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw) |
925baedd | 35 | { |
5b25f70f AJ |
36 | if (eb->lock_nested) { |
37 | read_lock(&eb->lock); | |
38 | if (eb->lock_nested && current->pid == eb->lock_owner) { | |
39 | read_unlock(&eb->lock); | |
40 | return; | |
41 | } | |
42 | read_unlock(&eb->lock); | |
43 | } | |
bd681513 CM |
44 | if (rw == BTRFS_WRITE_LOCK) { |
45 | if (atomic_read(&eb->blocking_writers) == 0) { | |
46 | WARN_ON(atomic_read(&eb->spinning_writers) != 1); | |
47 | atomic_dec(&eb->spinning_writers); | |
48 | btrfs_assert_tree_locked(eb); | |
49 | atomic_inc(&eb->blocking_writers); | |
50 | write_unlock(&eb->lock); | |
51 | } | |
52 | } else if (rw == BTRFS_READ_LOCK) { | |
53 | btrfs_assert_tree_read_locked(eb); | |
54 | atomic_inc(&eb->blocking_readers); | |
55 | WARN_ON(atomic_read(&eb->spinning_readers) == 0); | |
56 | atomic_dec(&eb->spinning_readers); | |
57 | read_unlock(&eb->lock); | |
b4ce94de | 58 | } |
bd681513 | 59 | return; |
b4ce94de | 60 | } |
f9efa9c7 | 61 | |
b4ce94de | 62 | /* |
bd681513 CM |
63 | * if we currently have a blocking lock, take the spinlock |
64 | * and drop our blocking count | |
b4ce94de | 65 | */ |
bd681513 | 66 | void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) |
b4ce94de | 67 | { |
5b25f70f AJ |
68 | if (eb->lock_nested) { |
69 | read_lock(&eb->lock); | |
70 | if (&eb->lock_nested && current->pid == eb->lock_owner) { | |
71 | read_unlock(&eb->lock); | |
72 | return; | |
73 | } | |
74 | read_unlock(&eb->lock); | |
75 | } | |
bd681513 CM |
76 | if (rw == BTRFS_WRITE_LOCK_BLOCKING) { |
77 | BUG_ON(atomic_read(&eb->blocking_writers) != 1); | |
78 | write_lock(&eb->lock); | |
79 | WARN_ON(atomic_read(&eb->spinning_writers)); | |
80 | atomic_inc(&eb->spinning_writers); | |
cbea5ac1 CM |
81 | if (atomic_dec_and_test(&eb->blocking_writers) && |
82 | waitqueue_active(&eb->write_lock_wq)) | |
bd681513 CM |
83 | wake_up(&eb->write_lock_wq); |
84 | } else if (rw == BTRFS_READ_LOCK_BLOCKING) { | |
85 | BUG_ON(atomic_read(&eb->blocking_readers) == 0); | |
86 | read_lock(&eb->lock); | |
87 | atomic_inc(&eb->spinning_readers); | |
cbea5ac1 CM |
88 | if (atomic_dec_and_test(&eb->blocking_readers) && |
89 | waitqueue_active(&eb->read_lock_wq)) | |
bd681513 | 90 | wake_up(&eb->read_lock_wq); |
b4ce94de | 91 | } |
bd681513 | 92 | return; |
b4ce94de CM |
93 | } |
94 | ||
95 | /* | |
bd681513 CM |
96 | * take a spinning read lock. This will wait for any blocking |
97 | * writers | |
b4ce94de | 98 | */ |
bd681513 | 99 | void btrfs_tree_read_lock(struct extent_buffer *eb) |
b4ce94de | 100 | { |
bd681513 | 101 | again: |
5b25f70f AJ |
102 | read_lock(&eb->lock); |
103 | if (atomic_read(&eb->blocking_writers) && | |
104 | current->pid == eb->lock_owner) { | |
105 | /* | |
106 | * This extent is already write-locked by our thread. We allow | |
107 | * an additional read lock to be added because it's for the same | |
108 | * thread. btrfs_find_all_roots() depends on this as it may be | |
109 | * called on a partly (write-)locked tree. | |
110 | */ | |
111 | BUG_ON(eb->lock_nested); | |
112 | eb->lock_nested = 1; | |
113 | read_unlock(&eb->lock); | |
114 | return; | |
115 | } | |
116 | read_unlock(&eb->lock); | |
bd681513 CM |
117 | wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0); |
118 | read_lock(&eb->lock); | |
119 | if (atomic_read(&eb->blocking_writers)) { | |
120 | read_unlock(&eb->lock); | |
bd681513 | 121 | goto again; |
b4ce94de | 122 | } |
bd681513 CM |
123 | atomic_inc(&eb->read_locks); |
124 | atomic_inc(&eb->spinning_readers); | |
b4ce94de CM |
125 | } |
126 | ||
127 | /* | |
bd681513 CM |
128 | * returns 1 if we get the read lock and 0 if we don't |
129 | * this won't wait for blocking writers | |
b4ce94de | 130 | */ |
bd681513 | 131 | int btrfs_try_tree_read_lock(struct extent_buffer *eb) |
b4ce94de | 132 | { |
bd681513 CM |
133 | if (atomic_read(&eb->blocking_writers)) |
134 | return 0; | |
b4ce94de | 135 | |
bd681513 CM |
136 | read_lock(&eb->lock); |
137 | if (atomic_read(&eb->blocking_writers)) { | |
138 | read_unlock(&eb->lock); | |
139 | return 0; | |
b9473439 | 140 | } |
bd681513 CM |
141 | atomic_inc(&eb->read_locks); |
142 | atomic_inc(&eb->spinning_readers); | |
143 | return 1; | |
b4ce94de CM |
144 | } |
145 | ||
146 | /* | |
bd681513 CM |
147 | * returns 1 if we get the read lock and 0 if we don't |
148 | * this won't wait for blocking writers or readers | |
b4ce94de | 149 | */ |
bd681513 | 150 | int btrfs_try_tree_write_lock(struct extent_buffer *eb) |
b4ce94de | 151 | { |
bd681513 CM |
152 | if (atomic_read(&eb->blocking_writers) || |
153 | atomic_read(&eb->blocking_readers)) | |
154 | return 0; | |
155 | write_lock(&eb->lock); | |
156 | if (atomic_read(&eb->blocking_writers) || | |
157 | atomic_read(&eb->blocking_readers)) { | |
158 | write_unlock(&eb->lock); | |
159 | return 0; | |
160 | } | |
161 | atomic_inc(&eb->write_locks); | |
162 | atomic_inc(&eb->spinning_writers); | |
5b25f70f | 163 | eb->lock_owner = current->pid; |
b4ce94de CM |
164 | return 1; |
165 | } | |
166 | ||
167 | /* | |
bd681513 CM |
168 | * drop a spinning read lock |
169 | */ | |
170 | void btrfs_tree_read_unlock(struct extent_buffer *eb) | |
171 | { | |
5b25f70f AJ |
172 | if (eb->lock_nested) { |
173 | read_lock(&eb->lock); | |
174 | if (eb->lock_nested && current->pid == eb->lock_owner) { | |
175 | eb->lock_nested = 0; | |
176 | read_unlock(&eb->lock); | |
177 | return; | |
178 | } | |
179 | read_unlock(&eb->lock); | |
180 | } | |
bd681513 CM |
181 | btrfs_assert_tree_read_locked(eb); |
182 | WARN_ON(atomic_read(&eb->spinning_readers) == 0); | |
183 | atomic_dec(&eb->spinning_readers); | |
184 | atomic_dec(&eb->read_locks); | |
185 | read_unlock(&eb->lock); | |
186 | } | |
187 | ||
188 | /* | |
189 | * drop a blocking read lock | |
190 | */ | |
191 | void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb) | |
192 | { | |
5b25f70f AJ |
193 | if (eb->lock_nested) { |
194 | read_lock(&eb->lock); | |
195 | if (eb->lock_nested && current->pid == eb->lock_owner) { | |
196 | eb->lock_nested = 0; | |
197 | read_unlock(&eb->lock); | |
198 | return; | |
199 | } | |
200 | read_unlock(&eb->lock); | |
201 | } | |
bd681513 CM |
202 | btrfs_assert_tree_read_locked(eb); |
203 | WARN_ON(atomic_read(&eb->blocking_readers) == 0); | |
cbea5ac1 CM |
204 | if (atomic_dec_and_test(&eb->blocking_readers) && |
205 | waitqueue_active(&eb->read_lock_wq)) | |
bd681513 CM |
206 | wake_up(&eb->read_lock_wq); |
207 | atomic_dec(&eb->read_locks); | |
208 | } | |
209 | ||
210 | /* | |
211 | * take a spinning write lock. This will wait for both | |
212 | * blocking readers or writers | |
b4ce94de | 213 | */ |
143bede5 | 214 | void btrfs_tree_lock(struct extent_buffer *eb) |
b4ce94de | 215 | { |
bd681513 CM |
216 | again: |
217 | wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0); | |
218 | wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0); | |
219 | write_lock(&eb->lock); | |
220 | if (atomic_read(&eb->blocking_readers)) { | |
221 | write_unlock(&eb->lock); | |
222 | wait_event(eb->read_lock_wq, | |
223 | atomic_read(&eb->blocking_readers) == 0); | |
224 | goto again; | |
f9efa9c7 | 225 | } |
bd681513 CM |
226 | if (atomic_read(&eb->blocking_writers)) { |
227 | write_unlock(&eb->lock); | |
228 | wait_event(eb->write_lock_wq, | |
229 | atomic_read(&eb->blocking_writers) == 0); | |
230 | goto again; | |
231 | } | |
232 | WARN_ON(atomic_read(&eb->spinning_writers)); | |
233 | atomic_inc(&eb->spinning_writers); | |
234 | atomic_inc(&eb->write_locks); | |
5b25f70f | 235 | eb->lock_owner = current->pid; |
925baedd CM |
236 | } |
237 | ||
bd681513 CM |
238 | /* |
239 | * drop a spinning or a blocking write lock. | |
240 | */ | |
143bede5 | 241 | void btrfs_tree_unlock(struct extent_buffer *eb) |
925baedd | 242 | { |
bd681513 CM |
243 | int blockers = atomic_read(&eb->blocking_writers); |
244 | ||
245 | BUG_ON(blockers > 1); | |
246 | ||
247 | btrfs_assert_tree_locked(eb); | |
248 | atomic_dec(&eb->write_locks); | |
249 | ||
250 | if (blockers) { | |
251 | WARN_ON(atomic_read(&eb->spinning_writers)); | |
252 | atomic_dec(&eb->blocking_writers); | |
cbea5ac1 CM |
253 | smp_mb(); |
254 | if (waitqueue_active(&eb->write_lock_wq)) | |
255 | wake_up(&eb->write_lock_wq); | |
bd681513 CM |
256 | } else { |
257 | WARN_ON(atomic_read(&eb->spinning_writers) != 1); | |
258 | atomic_dec(&eb->spinning_writers); | |
259 | write_unlock(&eb->lock); | |
260 | } | |
925baedd CM |
261 | } |
262 | ||
b9447ef8 | 263 | void btrfs_assert_tree_locked(struct extent_buffer *eb) |
925baedd | 264 | { |
bd681513 CM |
265 | BUG_ON(!atomic_read(&eb->write_locks)); |
266 | } | |
267 | ||
268 | void btrfs_assert_tree_read_locked(struct extent_buffer *eb) | |
269 | { | |
270 | BUG_ON(!atomic_read(&eb->read_locks)); | |
925baedd | 271 | } |