]>
Commit | Line | Data |
---|---|---|
742c8fdc JT |
1 | /* |
2 | * Copyright (C) 2012-2017 Red Hat, Inc. | |
3 | * | |
4 | * This file is released under the GPL. | |
5 | */ | |
6 | ||
7 | #include "dm.h" | |
8 | #include "dm-bio-prison-v2.h" | |
9 | ||
10 | #include <linux/spinlock.h> | |
11 | #include <linux/mempool.h> | |
12 | #include <linux/module.h> | |
13 | #include <linux/slab.h> | |
14 | #include <linux/rwsem.h> | |
15 | ||
16 | /*----------------------------------------------------------------*/ | |
17 | ||
18 | #define MIN_CELLS 1024 | |
19 | ||
20 | struct dm_bio_prison_v2 { | |
21 | struct workqueue_struct *wq; | |
22 | ||
23 | spinlock_t lock; | |
742c8fdc | 24 | struct rb_root cells; |
72d711c8 | 25 | mempool_t cell_pool; |
742c8fdc JT |
26 | }; |
27 | ||
28 | static struct kmem_cache *_cell_cache; | |
29 | ||
30 | /*----------------------------------------------------------------*/ | |
31 | ||
32 | /* | |
33 | * @nr_cells should be the number of cells you want in use _concurrently_. | |
34 | * Don't confuse it with the number of distinct keys. | |
35 | */ | |
36 | struct dm_bio_prison_v2 *dm_bio_prison_create_v2(struct workqueue_struct *wq) | |
37 | { | |
d3775354 | 38 | struct dm_bio_prison_v2 *prison = kzalloc(sizeof(*prison), GFP_KERNEL); |
6f1c819c | 39 | int ret; |
742c8fdc JT |
40 | |
41 | if (!prison) | |
42 | return NULL; | |
43 | ||
44 | prison->wq = wq; | |
45 | spin_lock_init(&prison->lock); | |
46 | ||
6f1c819c KO |
47 | ret = mempool_init_slab_pool(&prison->cell_pool, MIN_CELLS, _cell_cache); |
48 | if (ret) { | |
742c8fdc JT |
49 | kfree(prison); |
50 | return NULL; | |
51 | } | |
52 | ||
53 | prison->cells = RB_ROOT; | |
54 | ||
55 | return prison; | |
56 | } | |
57 | EXPORT_SYMBOL_GPL(dm_bio_prison_create_v2); | |
58 | ||
59 | void dm_bio_prison_destroy_v2(struct dm_bio_prison_v2 *prison) | |
60 | { | |
6f1c819c | 61 | mempool_exit(&prison->cell_pool); |
742c8fdc JT |
62 | kfree(prison); |
63 | } | |
64 | EXPORT_SYMBOL_GPL(dm_bio_prison_destroy_v2); | |
65 | ||
66 | struct dm_bio_prison_cell_v2 *dm_bio_prison_alloc_cell_v2(struct dm_bio_prison_v2 *prison, gfp_t gfp) | |
67 | { | |
6f1c819c | 68 | return mempool_alloc(&prison->cell_pool, gfp); |
742c8fdc JT |
69 | } |
70 | EXPORT_SYMBOL_GPL(dm_bio_prison_alloc_cell_v2); | |
71 | ||
72 | void dm_bio_prison_free_cell_v2(struct dm_bio_prison_v2 *prison, | |
73 | struct dm_bio_prison_cell_v2 *cell) | |
74 | { | |
6f1c819c | 75 | mempool_free(cell, &prison->cell_pool); |
742c8fdc JT |
76 | } |
77 | EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell_v2); | |
78 | ||
79 | static void __setup_new_cell(struct dm_cell_key_v2 *key, | |
80 | struct dm_bio_prison_cell_v2 *cell) | |
81 | { | |
82 | memset(cell, 0, sizeof(*cell)); | |
83 | memcpy(&cell->key, key, sizeof(cell->key)); | |
84 | bio_list_init(&cell->bios); | |
85 | } | |
86 | ||
87 | static int cmp_keys(struct dm_cell_key_v2 *lhs, | |
88 | struct dm_cell_key_v2 *rhs) | |
89 | { | |
90 | if (lhs->virtual < rhs->virtual) | |
91 | return -1; | |
92 | ||
93 | if (lhs->virtual > rhs->virtual) | |
94 | return 1; | |
95 | ||
96 | if (lhs->dev < rhs->dev) | |
97 | return -1; | |
98 | ||
99 | if (lhs->dev > rhs->dev) | |
100 | return 1; | |
101 | ||
102 | if (lhs->block_end <= rhs->block_begin) | |
103 | return -1; | |
104 | ||
105 | if (lhs->block_begin >= rhs->block_end) | |
106 | return 1; | |
107 | ||
108 | return 0; | |
109 | } | |
110 | ||
111 | /* | |
112 | * Returns true if node found, otherwise it inserts a new one. | |
113 | */ | |
114 | static bool __find_or_insert(struct dm_bio_prison_v2 *prison, | |
115 | struct dm_cell_key_v2 *key, | |
116 | struct dm_bio_prison_cell_v2 *cell_prealloc, | |
117 | struct dm_bio_prison_cell_v2 **result) | |
118 | { | |
119 | int r; | |
120 | struct rb_node **new = &prison->cells.rb_node, *parent = NULL; | |
121 | ||
122 | while (*new) { | |
123 | struct dm_bio_prison_cell_v2 *cell = | |
6e333d0b | 124 | rb_entry(*new, struct dm_bio_prison_cell_v2, node); |
742c8fdc JT |
125 | |
126 | r = cmp_keys(key, &cell->key); | |
127 | ||
128 | parent = *new; | |
129 | if (r < 0) | |
130 | new = &((*new)->rb_left); | |
131 | ||
132 | else if (r > 0) | |
133 | new = &((*new)->rb_right); | |
134 | ||
135 | else { | |
136 | *result = cell; | |
137 | return true; | |
138 | } | |
139 | } | |
140 | ||
141 | __setup_new_cell(key, cell_prealloc); | |
142 | *result = cell_prealloc; | |
143 | rb_link_node(&cell_prealloc->node, parent, new); | |
144 | rb_insert_color(&cell_prealloc->node, &prison->cells); | |
145 | ||
146 | return false; | |
147 | } | |
148 | ||
149 | static bool __get(struct dm_bio_prison_v2 *prison, | |
150 | struct dm_cell_key_v2 *key, | |
151 | unsigned lock_level, | |
152 | struct bio *inmate, | |
153 | struct dm_bio_prison_cell_v2 *cell_prealloc, | |
154 | struct dm_bio_prison_cell_v2 **cell) | |
155 | { | |
156 | if (__find_or_insert(prison, key, cell_prealloc, cell)) { | |
157 | if ((*cell)->exclusive_lock) { | |
158 | if (lock_level <= (*cell)->exclusive_level) { | |
159 | bio_list_add(&(*cell)->bios, inmate); | |
160 | return false; | |
161 | } | |
162 | } | |
163 | ||
164 | (*cell)->shared_count++; | |
165 | ||
166 | } else | |
167 | (*cell)->shared_count = 1; | |
168 | ||
169 | return true; | |
170 | } | |
171 | ||
172 | bool dm_cell_get_v2(struct dm_bio_prison_v2 *prison, | |
173 | struct dm_cell_key_v2 *key, | |
174 | unsigned lock_level, | |
175 | struct bio *inmate, | |
176 | struct dm_bio_prison_cell_v2 *cell_prealloc, | |
177 | struct dm_bio_prison_cell_v2 **cell_result) | |
178 | { | |
179 | int r; | |
180 | unsigned long flags; | |
181 | ||
182 | spin_lock_irqsave(&prison->lock, flags); | |
183 | r = __get(prison, key, lock_level, inmate, cell_prealloc, cell_result); | |
184 | spin_unlock_irqrestore(&prison->lock, flags); | |
185 | ||
186 | return r; | |
187 | } | |
188 | EXPORT_SYMBOL_GPL(dm_cell_get_v2); | |
189 | ||
190 | static bool __put(struct dm_bio_prison_v2 *prison, | |
191 | struct dm_bio_prison_cell_v2 *cell) | |
192 | { | |
193 | BUG_ON(!cell->shared_count); | |
194 | cell->shared_count--; | |
195 | ||
196 | // FIXME: shared locks granted above the lock level could starve this | |
197 | if (!cell->shared_count) { | |
198 | if (cell->exclusive_lock){ | |
199 | if (cell->quiesce_continuation) { | |
200 | queue_work(prison->wq, cell->quiesce_continuation); | |
201 | cell->quiesce_continuation = NULL; | |
202 | } | |
203 | } else { | |
204 | rb_erase(&cell->node, &prison->cells); | |
205 | return true; | |
206 | } | |
207 | } | |
208 | ||
209 | return false; | |
210 | } | |
211 | ||
212 | bool dm_cell_put_v2(struct dm_bio_prison_v2 *prison, | |
213 | struct dm_bio_prison_cell_v2 *cell) | |
214 | { | |
215 | bool r; | |
216 | unsigned long flags; | |
217 | ||
218 | spin_lock_irqsave(&prison->lock, flags); | |
219 | r = __put(prison, cell); | |
220 | spin_unlock_irqrestore(&prison->lock, flags); | |
221 | ||
222 | return r; | |
223 | } | |
224 | EXPORT_SYMBOL_GPL(dm_cell_put_v2); | |
225 | ||
226 | static int __lock(struct dm_bio_prison_v2 *prison, | |
227 | struct dm_cell_key_v2 *key, | |
228 | unsigned lock_level, | |
229 | struct dm_bio_prison_cell_v2 *cell_prealloc, | |
230 | struct dm_bio_prison_cell_v2 **cell_result) | |
231 | { | |
232 | struct dm_bio_prison_cell_v2 *cell; | |
233 | ||
234 | if (__find_or_insert(prison, key, cell_prealloc, &cell)) { | |
235 | if (cell->exclusive_lock) | |
236 | return -EBUSY; | |
237 | ||
238 | cell->exclusive_lock = true; | |
239 | cell->exclusive_level = lock_level; | |
240 | *cell_result = cell; | |
241 | ||
242 | // FIXME: we don't yet know what level these shared locks | |
243 | // were taken at, so have to quiesce them all. | |
244 | return cell->shared_count > 0; | |
245 | ||
246 | } else { | |
247 | cell = cell_prealloc; | |
248 | cell->shared_count = 0; | |
249 | cell->exclusive_lock = true; | |
250 | cell->exclusive_level = lock_level; | |
251 | *cell_result = cell; | |
252 | } | |
253 | ||
254 | return 0; | |
255 | } | |
256 | ||
257 | int dm_cell_lock_v2(struct dm_bio_prison_v2 *prison, | |
258 | struct dm_cell_key_v2 *key, | |
259 | unsigned lock_level, | |
260 | struct dm_bio_prison_cell_v2 *cell_prealloc, | |
261 | struct dm_bio_prison_cell_v2 **cell_result) | |
262 | { | |
263 | int r; | |
264 | unsigned long flags; | |
265 | ||
266 | spin_lock_irqsave(&prison->lock, flags); | |
267 | r = __lock(prison, key, lock_level, cell_prealloc, cell_result); | |
268 | spin_unlock_irqrestore(&prison->lock, flags); | |
269 | ||
270 | return r; | |
271 | } | |
272 | EXPORT_SYMBOL_GPL(dm_cell_lock_v2); | |
273 | ||
274 | static void __quiesce(struct dm_bio_prison_v2 *prison, | |
275 | struct dm_bio_prison_cell_v2 *cell, | |
276 | struct work_struct *continuation) | |
277 | { | |
278 | if (!cell->shared_count) | |
279 | queue_work(prison->wq, continuation); | |
280 | else | |
281 | cell->quiesce_continuation = continuation; | |
282 | } | |
283 | ||
284 | void dm_cell_quiesce_v2(struct dm_bio_prison_v2 *prison, | |
285 | struct dm_bio_prison_cell_v2 *cell, | |
286 | struct work_struct *continuation) | |
287 | { | |
288 | unsigned long flags; | |
289 | ||
290 | spin_lock_irqsave(&prison->lock, flags); | |
291 | __quiesce(prison, cell, continuation); | |
292 | spin_unlock_irqrestore(&prison->lock, flags); | |
293 | } | |
294 | EXPORT_SYMBOL_GPL(dm_cell_quiesce_v2); | |
295 | ||
296 | static int __promote(struct dm_bio_prison_v2 *prison, | |
297 | struct dm_bio_prison_cell_v2 *cell, | |
298 | unsigned new_lock_level) | |
299 | { | |
300 | if (!cell->exclusive_lock) | |
301 | return -EINVAL; | |
302 | ||
303 | cell->exclusive_level = new_lock_level; | |
304 | return cell->shared_count > 0; | |
305 | } | |
306 | ||
307 | int dm_cell_lock_promote_v2(struct dm_bio_prison_v2 *prison, | |
308 | struct dm_bio_prison_cell_v2 *cell, | |
309 | unsigned new_lock_level) | |
310 | { | |
311 | int r; | |
312 | unsigned long flags; | |
313 | ||
314 | spin_lock_irqsave(&prison->lock, flags); | |
315 | r = __promote(prison, cell, new_lock_level); | |
316 | spin_unlock_irqrestore(&prison->lock, flags); | |
317 | ||
318 | return r; | |
319 | } | |
320 | EXPORT_SYMBOL_GPL(dm_cell_lock_promote_v2); | |
321 | ||
322 | static bool __unlock(struct dm_bio_prison_v2 *prison, | |
323 | struct dm_bio_prison_cell_v2 *cell, | |
324 | struct bio_list *bios) | |
325 | { | |
326 | BUG_ON(!cell->exclusive_lock); | |
327 | ||
328 | bio_list_merge(bios, &cell->bios); | |
329 | bio_list_init(&cell->bios); | |
330 | ||
331 | if (cell->shared_count) { | |
332 | cell->exclusive_lock = 0; | |
333 | return false; | |
334 | } | |
335 | ||
336 | rb_erase(&cell->node, &prison->cells); | |
337 | return true; | |
338 | } | |
339 | ||
340 | bool dm_cell_unlock_v2(struct dm_bio_prison_v2 *prison, | |
341 | struct dm_bio_prison_cell_v2 *cell, | |
342 | struct bio_list *bios) | |
343 | { | |
344 | bool r; | |
345 | unsigned long flags; | |
346 | ||
347 | spin_lock_irqsave(&prison->lock, flags); | |
348 | r = __unlock(prison, cell, bios); | |
349 | spin_unlock_irqrestore(&prison->lock, flags); | |
350 | ||
351 | return r; | |
352 | } | |
353 | EXPORT_SYMBOL_GPL(dm_cell_unlock_v2); | |
354 | ||
355 | /*----------------------------------------------------------------*/ | |
356 | ||
357 | int __init dm_bio_prison_init_v2(void) | |
358 | { | |
359 | _cell_cache = KMEM_CACHE(dm_bio_prison_cell_v2, 0); | |
360 | if (!_cell_cache) | |
361 | return -ENOMEM; | |
362 | ||
363 | return 0; | |
364 | } | |
365 | ||
366 | void dm_bio_prison_exit_v2(void) | |
367 | { | |
368 | kmem_cache_destroy(_cell_cache); | |
369 | _cell_cache = NULL; | |
370 | } |