]>
Commit | Line | Data |
---|---|---|
925baedd CM |
1 | /* |
2 | * Copyright (C) 2008 Oracle. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public | |
6 | * License v2 as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public | |
14 | * License along with this program; if not, write to the | |
15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
16 | * Boston, MA 021110-1307, USA. | |
17 | */ | |
18 | #include <linux/sched.h> | |
925baedd CM |
19 | #include <linux/pagemap.h> |
20 | #include <linux/spinlock.h> | |
21 | #include <linux/page-flags.h> | |
4881ee5a | 22 | #include <asm/bug.h> |
925baedd CM |
23 | #include "ctree.h" |
24 | #include "extent_io.h" | |
25 | #include "locking.h" | |
26 | ||
bd681513 | 27 | void btrfs_assert_tree_read_locked(struct extent_buffer *eb); |
d397712b | 28 | |
b4ce94de | 29 | /* |
bd681513 CM |
30 | * if we currently have a spinning reader or writer lock |
31 | * (indicated by the rw flag) this will bump the count | |
32 | * of blocking holders and drop the spinlock. | |
b4ce94de | 33 | */ |
bd681513 | 34 | void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw) |
925baedd | 35 | { |
5b25f70f AJ |
36 | if (eb->lock_nested) { |
37 | read_lock(&eb->lock); | |
38 | if (eb->lock_nested && current->pid == eb->lock_owner) { | |
39 | read_unlock(&eb->lock); | |
40 | return; | |
41 | } | |
42 | read_unlock(&eb->lock); | |
43 | } | |
bd681513 CM |
44 | if (rw == BTRFS_WRITE_LOCK) { |
45 | if (atomic_read(&eb->blocking_writers) == 0) { | |
46 | WARN_ON(atomic_read(&eb->spinning_writers) != 1); | |
47 | atomic_dec(&eb->spinning_writers); | |
48 | btrfs_assert_tree_locked(eb); | |
49 | atomic_inc(&eb->blocking_writers); | |
50 | write_unlock(&eb->lock); | |
51 | } | |
52 | } else if (rw == BTRFS_READ_LOCK) { | |
53 | btrfs_assert_tree_read_locked(eb); | |
54 | atomic_inc(&eb->blocking_readers); | |
55 | WARN_ON(atomic_read(&eb->spinning_readers) == 0); | |
56 | atomic_dec(&eb->spinning_readers); | |
57 | read_unlock(&eb->lock); | |
b4ce94de | 58 | } |
bd681513 | 59 | return; |
b4ce94de | 60 | } |
f9efa9c7 | 61 | |
b4ce94de | 62 | /* |
bd681513 CM |
63 | * if we currently have a blocking lock, take the spinlock |
64 | * and drop our blocking count | |
b4ce94de | 65 | */ |
bd681513 | 66 | void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) |
b4ce94de | 67 | { |
5b25f70f AJ |
68 | if (eb->lock_nested) { |
69 | read_lock(&eb->lock); | |
70 | if (&eb->lock_nested && current->pid == eb->lock_owner) { | |
71 | read_unlock(&eb->lock); | |
72 | return; | |
73 | } | |
74 | read_unlock(&eb->lock); | |
75 | } | |
bd681513 CM |
76 | if (rw == BTRFS_WRITE_LOCK_BLOCKING) { |
77 | BUG_ON(atomic_read(&eb->blocking_writers) != 1); | |
78 | write_lock(&eb->lock); | |
79 | WARN_ON(atomic_read(&eb->spinning_writers)); | |
80 | atomic_inc(&eb->spinning_writers); | |
81 | if (atomic_dec_and_test(&eb->blocking_writers)) | |
82 | wake_up(&eb->write_lock_wq); | |
83 | } else if (rw == BTRFS_READ_LOCK_BLOCKING) { | |
84 | BUG_ON(atomic_read(&eb->blocking_readers) == 0); | |
85 | read_lock(&eb->lock); | |
86 | atomic_inc(&eb->spinning_readers); | |
87 | if (atomic_dec_and_test(&eb->blocking_readers)) | |
88 | wake_up(&eb->read_lock_wq); | |
b4ce94de | 89 | } |
bd681513 | 90 | return; |
b4ce94de CM |
91 | } |
92 | ||
93 | /* | |
bd681513 CM |
94 | * take a spinning read lock. This will wait for any blocking |
95 | * writers | |
b4ce94de | 96 | */ |
bd681513 | 97 | void btrfs_tree_read_lock(struct extent_buffer *eb) |
b4ce94de | 98 | { |
bd681513 | 99 | again: |
5b25f70f AJ |
100 | read_lock(&eb->lock); |
101 | if (atomic_read(&eb->blocking_writers) && | |
102 | current->pid == eb->lock_owner) { | |
103 | /* | |
104 | * This extent is already write-locked by our thread. We allow | |
105 | * an additional read lock to be added because it's for the same | |
106 | * thread. btrfs_find_all_roots() depends on this as it may be | |
107 | * called on a partly (write-)locked tree. | |
108 | */ | |
109 | BUG_ON(eb->lock_nested); | |
110 | eb->lock_nested = 1; | |
111 | read_unlock(&eb->lock); | |
112 | return; | |
113 | } | |
114 | read_unlock(&eb->lock); | |
bd681513 CM |
115 | wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0); |
116 | read_lock(&eb->lock); | |
117 | if (atomic_read(&eb->blocking_writers)) { | |
118 | read_unlock(&eb->lock); | |
bd681513 | 119 | goto again; |
b4ce94de | 120 | } |
bd681513 CM |
121 | atomic_inc(&eb->read_locks); |
122 | atomic_inc(&eb->spinning_readers); | |
b4ce94de CM |
123 | } |
124 | ||
125 | /* | |
bd681513 CM |
126 | * returns 1 if we get the read lock and 0 if we don't |
127 | * this won't wait for blocking writers | |
b4ce94de | 128 | */ |
bd681513 | 129 | int btrfs_try_tree_read_lock(struct extent_buffer *eb) |
b4ce94de | 130 | { |
bd681513 CM |
131 | if (atomic_read(&eb->blocking_writers)) |
132 | return 0; | |
b4ce94de | 133 | |
bd681513 CM |
134 | read_lock(&eb->lock); |
135 | if (atomic_read(&eb->blocking_writers)) { | |
136 | read_unlock(&eb->lock); | |
137 | return 0; | |
b9473439 | 138 | } |
bd681513 CM |
139 | atomic_inc(&eb->read_locks); |
140 | atomic_inc(&eb->spinning_readers); | |
141 | return 1; | |
b4ce94de CM |
142 | } |
143 | ||
144 | /* | |
bd681513 CM |
145 | * returns 1 if we get the read lock and 0 if we don't |
146 | * this won't wait for blocking writers or readers | |
b4ce94de | 147 | */ |
bd681513 | 148 | int btrfs_try_tree_write_lock(struct extent_buffer *eb) |
b4ce94de | 149 | { |
bd681513 CM |
150 | if (atomic_read(&eb->blocking_writers) || |
151 | atomic_read(&eb->blocking_readers)) | |
152 | return 0; | |
153 | write_lock(&eb->lock); | |
154 | if (atomic_read(&eb->blocking_writers) || | |
155 | atomic_read(&eb->blocking_readers)) { | |
156 | write_unlock(&eb->lock); | |
157 | return 0; | |
158 | } | |
159 | atomic_inc(&eb->write_locks); | |
160 | atomic_inc(&eb->spinning_writers); | |
5b25f70f | 161 | eb->lock_owner = current->pid; |
b4ce94de CM |
162 | return 1; |
163 | } | |
164 | ||
165 | /* | |
bd681513 CM |
166 | * drop a spinning read lock |
167 | */ | |
168 | void btrfs_tree_read_unlock(struct extent_buffer *eb) | |
169 | { | |
5b25f70f AJ |
170 | if (eb->lock_nested) { |
171 | read_lock(&eb->lock); | |
172 | if (eb->lock_nested && current->pid == eb->lock_owner) { | |
173 | eb->lock_nested = 0; | |
174 | read_unlock(&eb->lock); | |
175 | return; | |
176 | } | |
177 | read_unlock(&eb->lock); | |
178 | } | |
bd681513 CM |
179 | btrfs_assert_tree_read_locked(eb); |
180 | WARN_ON(atomic_read(&eb->spinning_readers) == 0); | |
181 | atomic_dec(&eb->spinning_readers); | |
182 | atomic_dec(&eb->read_locks); | |
183 | read_unlock(&eb->lock); | |
184 | } | |
185 | ||
186 | /* | |
187 | * drop a blocking read lock | |
188 | */ | |
189 | void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb) | |
190 | { | |
5b25f70f AJ |
191 | if (eb->lock_nested) { |
192 | read_lock(&eb->lock); | |
193 | if (eb->lock_nested && current->pid == eb->lock_owner) { | |
194 | eb->lock_nested = 0; | |
195 | read_unlock(&eb->lock); | |
196 | return; | |
197 | } | |
198 | read_unlock(&eb->lock); | |
199 | } | |
bd681513 CM |
200 | btrfs_assert_tree_read_locked(eb); |
201 | WARN_ON(atomic_read(&eb->blocking_readers) == 0); | |
202 | if (atomic_dec_and_test(&eb->blocking_readers)) | |
203 | wake_up(&eb->read_lock_wq); | |
204 | atomic_dec(&eb->read_locks); | |
205 | } | |
206 | ||
207 | /* | |
208 | * take a spinning write lock. This will wait for both | |
209 | * blocking readers or writers | |
b4ce94de | 210 | */ |
143bede5 | 211 | void btrfs_tree_lock(struct extent_buffer *eb) |
b4ce94de | 212 | { |
bd681513 CM |
213 | again: |
214 | wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0); | |
215 | wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0); | |
216 | write_lock(&eb->lock); | |
217 | if (atomic_read(&eb->blocking_readers)) { | |
218 | write_unlock(&eb->lock); | |
219 | wait_event(eb->read_lock_wq, | |
220 | atomic_read(&eb->blocking_readers) == 0); | |
221 | goto again; | |
f9efa9c7 | 222 | } |
bd681513 CM |
223 | if (atomic_read(&eb->blocking_writers)) { |
224 | write_unlock(&eb->lock); | |
225 | wait_event(eb->write_lock_wq, | |
226 | atomic_read(&eb->blocking_writers) == 0); | |
227 | goto again; | |
228 | } | |
229 | WARN_ON(atomic_read(&eb->spinning_writers)); | |
230 | atomic_inc(&eb->spinning_writers); | |
231 | atomic_inc(&eb->write_locks); | |
5b25f70f | 232 | eb->lock_owner = current->pid; |
925baedd CM |
233 | } |
234 | ||
bd681513 CM |
235 | /* |
236 | * drop a spinning or a blocking write lock. | |
237 | */ | |
143bede5 | 238 | void btrfs_tree_unlock(struct extent_buffer *eb) |
925baedd | 239 | { |
bd681513 CM |
240 | int blockers = atomic_read(&eb->blocking_writers); |
241 | ||
242 | BUG_ON(blockers > 1); | |
243 | ||
244 | btrfs_assert_tree_locked(eb); | |
245 | atomic_dec(&eb->write_locks); | |
246 | ||
247 | if (blockers) { | |
248 | WARN_ON(atomic_read(&eb->spinning_writers)); | |
249 | atomic_dec(&eb->blocking_writers); | |
250 | smp_wmb(); | |
251 | wake_up(&eb->write_lock_wq); | |
252 | } else { | |
253 | WARN_ON(atomic_read(&eb->spinning_writers) != 1); | |
254 | atomic_dec(&eb->spinning_writers); | |
255 | write_unlock(&eb->lock); | |
256 | } | |
925baedd CM |
257 | } |
258 | ||
b9447ef8 | 259 | void btrfs_assert_tree_locked(struct extent_buffer *eb) |
925baedd | 260 | { |
bd681513 CM |
261 | BUG_ON(!atomic_read(&eb->write_locks)); |
262 | } | |
263 | ||
264 | void btrfs_assert_tree_read_locked(struct extent_buffer *eb) | |
265 | { | |
266 | BUG_ON(!atomic_read(&eb->read_locks)); | |
925baedd | 267 | } |