2 * Copyright (C) 2011 Red Hat, Inc.
4 * This file is released under the GPL.
6 #include "dm-block-manager.h"
7 #include "dm-persistent-data-internal.h"
8 #include "../dm-bufio.h"
10 #include <linux/crc32c.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/rwsem.h>
14 #include <linux/device-mapper.h>
15 #include <linux/stacktrace.h>
17 #define DM_MSG_PREFIX "block manager"
19 /*----------------------------------------------------------------*/
21 #ifdef CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING
24 * This is a read/write semaphore with a couple of differences.
26 * i) There is a restriction on the number of concurrent read locks that
27 * may be held at once. This is just an implementation detail.
29 * ii) Recursive locking attempts are detected and return EINVAL. A stack
30 * trace is also emitted for the previous lock acquisition.
32 * iii) Priority is given to write locks.
37 typedef unsigned long stack_entries[MAX_STACK];
42 struct list_head waiters;
43 struct task_struct *holders[MAX_HOLDERS];
45 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
46 struct stack_trace traces[MAX_HOLDERS];
47 stack_entries entries[MAX_HOLDERS];
52 struct list_head list;
53 struct task_struct *task;
57 static unsigned __find_holder(struct block_lock *lock,
58 struct task_struct *task)
62 for (i = 0; i < MAX_HOLDERS; i++)
63 if (lock->holders[i] == task)
66 BUG_ON(i == MAX_HOLDERS);
70 /* call this *after* you increment lock->count */
71 static void __add_holder(struct block_lock *lock, struct task_struct *task)
73 unsigned h = __find_holder(lock, NULL);
74 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
75 struct stack_trace *t;
78 get_task_struct(task);
79 lock->holders[h] = task;
81 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
84 t->max_entries = MAX_STACK;
85 t->entries = lock->entries[h];
91 /* call this *before* you decrement lock->count */
92 static void __del_holder(struct block_lock *lock, struct task_struct *task)
94 unsigned h = __find_holder(lock, task);
95 lock->holders[h] = NULL;
96 put_task_struct(task);
99 static int __check_holder(struct block_lock *lock)
103 for (i = 0; i < MAX_HOLDERS; i++) {
104 if (lock->holders[i] == current) {
105 DMERR("recursive lock detected in metadata");
106 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
107 DMERR("previously held here:");
108 print_stack_trace(lock->traces + i, 4);
110 DMERR("subsequent acquisition attempted here:");
120 static void __wait(struct waiter *w)
123 set_task_state(current, TASK_UNINTERRUPTIBLE);
131 set_task_state(current, TASK_RUNNING);
134 static void __wake_waiter(struct waiter *w)
136 struct task_struct *task;
142 wake_up_process(task);
146 * We either wake a few readers or a single writer.
148 static void __wake_many(struct block_lock *lock)
150 struct waiter *w, *tmp;
152 BUG_ON(lock->count < 0);
153 list_for_each_entry_safe(w, tmp, &lock->waiters, list) {
154 if (lock->count >= MAX_HOLDERS)
157 if (w->wants_write) {
159 return; /* still read locked */
162 __add_holder(lock, w->task);
168 __add_holder(lock, w->task);
173 static void bl_init(struct block_lock *lock)
177 spin_lock_init(&lock->lock);
179 INIT_LIST_HEAD(&lock->waiters);
180 for (i = 0; i < MAX_HOLDERS; i++)
181 lock->holders[i] = NULL;
184 static int __available_for_read(struct block_lock *lock)
186 return lock->count >= 0 &&
187 lock->count < MAX_HOLDERS &&
188 list_empty(&lock->waiters);
191 static int bl_down_read(struct block_lock *lock)
196 spin_lock(&lock->lock);
197 r = __check_holder(lock);
199 spin_unlock(&lock->lock);
203 if (__available_for_read(lock)) {
205 __add_holder(lock, current);
206 spin_unlock(&lock->lock);
210 get_task_struct(current);
214 list_add_tail(&w.list, &lock->waiters);
215 spin_unlock(&lock->lock);
218 put_task_struct(current);
222 static int bl_down_read_nonblock(struct block_lock *lock)
226 spin_lock(&lock->lock);
227 r = __check_holder(lock);
231 if (__available_for_read(lock)) {
233 __add_holder(lock, current);
239 spin_unlock(&lock->lock);
243 static void bl_up_read(struct block_lock *lock)
245 spin_lock(&lock->lock);
246 BUG_ON(lock->count <= 0);
247 __del_holder(lock, current);
249 if (!list_empty(&lock->waiters))
251 spin_unlock(&lock->lock);
254 static int bl_down_write(struct block_lock *lock)
259 spin_lock(&lock->lock);
260 r = __check_holder(lock);
262 spin_unlock(&lock->lock);
266 if (lock->count == 0 && list_empty(&lock->waiters)) {
268 __add_holder(lock, current);
269 spin_unlock(&lock->lock);
273 get_task_struct(current);
278 * Writers given priority. We know there's only one mutator in the
279 * system, so ignoring the ordering reversal.
281 list_add(&w.list, &lock->waiters);
282 spin_unlock(&lock->lock);
285 put_task_struct(current);
290 static void bl_up_write(struct block_lock *lock)
292 spin_lock(&lock->lock);
293 __del_holder(lock, current);
295 if (!list_empty(&lock->waiters))
297 spin_unlock(&lock->lock);
300 static void report_recursive_bug(dm_block_t b, int r)
303 DMERR("recursive acquisition of block %llu requested.",
304 (unsigned long long) b);
307 #else /* !CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING */
309 #define bl_init(x) do { } while (0)
310 #define bl_down_read(x) 0
311 #define bl_down_read_nonblock(x) 0
312 #define bl_up_read(x) do { } while (0)
313 #define bl_down_write(x) 0
314 #define bl_up_write(x) do { } while (0)
315 #define report_recursive_bug(x, y) do { } while (0)
317 #endif /* CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING */
319 /*----------------------------------------------------------------*/
322 * Block manager is currently implemented using dm-bufio. struct
323 * dm_block_manager and struct dm_block map directly onto a couple of
324 * structs in the bufio interface. I want to retain the freedom to move
325 * away from bufio in the future. So these structs are just cast within
326 * this .c file, rather than making it through to the public interface.
328 static struct dm_buffer *to_buffer(struct dm_block *b)
330 return (struct dm_buffer *) b;
333 dm_block_t dm_block_location(struct dm_block *b)
335 return dm_bufio_get_block_number(to_buffer(b));
337 EXPORT_SYMBOL_GPL(dm_block_location);
339 void *dm_block_data(struct dm_block *b)
341 return dm_bufio_get_block_data(to_buffer(b));
343 EXPORT_SYMBOL_GPL(dm_block_data);
346 struct dm_block_validator *validator;
349 #ifdef CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING
350 struct block_lock lock;
354 static void dm_block_manager_alloc_callback(struct dm_buffer *buf)
356 struct buffer_aux *aux = dm_bufio_get_aux_data(buf);
357 aux->validator = NULL;
361 static void dm_block_manager_write_callback(struct dm_buffer *buf)
363 struct buffer_aux *aux = dm_bufio_get_aux_data(buf);
364 if (aux->validator) {
365 aux->validator->prepare_for_write(aux->validator, (struct dm_block *) buf,
366 dm_bufio_get_block_size(dm_bufio_get_client(buf)));
370 /*----------------------------------------------------------------
372 *--------------------------------------------------------------*/
373 struct dm_block_manager {
374 struct dm_bufio_client *bufio;
378 struct dm_block_manager *dm_block_manager_create(struct block_device *bdev,
381 unsigned max_held_per_thread)
384 struct dm_block_manager *bm;
386 bm = kmalloc(sizeof(*bm), GFP_KERNEL);
392 bm->bufio = dm_bufio_client_create(bdev, block_size, max_held_per_thread,
393 sizeof(struct buffer_aux),
394 dm_block_manager_alloc_callback,
395 dm_block_manager_write_callback);
396 if (IS_ERR(bm->bufio)) {
397 r = PTR_ERR(bm->bufio);
402 bm->read_only = false;
409 EXPORT_SYMBOL_GPL(dm_block_manager_create);
411 void dm_block_manager_destroy(struct dm_block_manager *bm)
413 dm_bufio_client_destroy(bm->bufio);
416 EXPORT_SYMBOL_GPL(dm_block_manager_destroy);
418 unsigned dm_bm_block_size(struct dm_block_manager *bm)
420 return dm_bufio_get_block_size(bm->bufio);
422 EXPORT_SYMBOL_GPL(dm_bm_block_size);
424 dm_block_t dm_bm_nr_blocks(struct dm_block_manager *bm)
426 return dm_bufio_get_device_size(bm->bufio);
429 static int dm_bm_validate_buffer(struct dm_block_manager *bm,
430 struct dm_buffer *buf,
431 struct buffer_aux *aux,
432 struct dm_block_validator *v)
434 if (unlikely(!aux->validator)) {
438 r = v->check(v, (struct dm_block *) buf, dm_bufio_get_block_size(bm->bufio));
440 DMERR_LIMIT("%s validator check failed for block %llu", v->name,
441 (unsigned long long) dm_bufio_get_block_number(buf));
446 if (unlikely(aux->validator != v)) {
447 DMERR_LIMIT("validator mismatch (old=%s vs new=%s) for block %llu",
448 aux->validator->name, v ? v->name : "NULL",
449 (unsigned long long) dm_bufio_get_block_number(buf));
456 int dm_bm_read_lock(struct dm_block_manager *bm, dm_block_t b,
457 struct dm_block_validator *v,
458 struct dm_block **result)
460 struct buffer_aux *aux;
464 p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result);
468 aux = dm_bufio_get_aux_data(to_buffer(*result));
469 r = bl_down_read(&aux->lock);
471 dm_bufio_release(to_buffer(*result));
472 report_recursive_bug(b, r);
476 aux->write_locked = 0;
478 r = dm_bm_validate_buffer(bm, to_buffer(*result), aux, v);
480 bl_up_read(&aux->lock);
481 dm_bufio_release(to_buffer(*result));
487 EXPORT_SYMBOL_GPL(dm_bm_read_lock);
489 int dm_bm_write_lock(struct dm_block_manager *bm,
490 dm_block_t b, struct dm_block_validator *v,
491 struct dm_block **result)
493 struct buffer_aux *aux;
500 p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result);
504 aux = dm_bufio_get_aux_data(to_buffer(*result));
505 r = bl_down_write(&aux->lock);
507 dm_bufio_release(to_buffer(*result));
508 report_recursive_bug(b, r);
512 aux->write_locked = 1;
514 r = dm_bm_validate_buffer(bm, to_buffer(*result), aux, v);
516 bl_up_write(&aux->lock);
517 dm_bufio_release(to_buffer(*result));
523 EXPORT_SYMBOL_GPL(dm_bm_write_lock);
525 int dm_bm_read_try_lock(struct dm_block_manager *bm,
526 dm_block_t b, struct dm_block_validator *v,
527 struct dm_block **result)
529 struct buffer_aux *aux;
533 p = dm_bufio_get(bm->bufio, b, (struct dm_buffer **) result);
539 aux = dm_bufio_get_aux_data(to_buffer(*result));
540 r = bl_down_read_nonblock(&aux->lock);
542 dm_bufio_release(to_buffer(*result));
543 report_recursive_bug(b, r);
546 aux->write_locked = 0;
548 r = dm_bm_validate_buffer(bm, to_buffer(*result), aux, v);
550 bl_up_read(&aux->lock);
551 dm_bufio_release(to_buffer(*result));
558 int dm_bm_write_lock_zero(struct dm_block_manager *bm,
559 dm_block_t b, struct dm_block_validator *v,
560 struct dm_block **result)
563 struct buffer_aux *aux;
569 p = dm_bufio_new(bm->bufio, b, (struct dm_buffer **) result);
573 memset(p, 0, dm_bm_block_size(bm));
575 aux = dm_bufio_get_aux_data(to_buffer(*result));
576 r = bl_down_write(&aux->lock);
578 dm_bufio_release(to_buffer(*result));
582 aux->write_locked = 1;
587 EXPORT_SYMBOL_GPL(dm_bm_write_lock_zero);
589 void dm_bm_unlock(struct dm_block *b)
591 struct buffer_aux *aux;
592 aux = dm_bufio_get_aux_data(to_buffer(b));
594 if (aux->write_locked) {
595 dm_bufio_mark_buffer_dirty(to_buffer(b));
596 bl_up_write(&aux->lock);
598 bl_up_read(&aux->lock);
600 dm_bufio_release(to_buffer(b));
602 EXPORT_SYMBOL_GPL(dm_bm_unlock);
604 int dm_bm_flush(struct dm_block_manager *bm)
609 return dm_bufio_write_dirty_buffers(bm->bufio);
611 EXPORT_SYMBOL_GPL(dm_bm_flush);
613 void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b)
615 dm_bufio_prefetch(bm->bufio, b, 1);
618 bool dm_bm_is_read_only(struct dm_block_manager *bm)
620 return bm->read_only;
622 EXPORT_SYMBOL_GPL(dm_bm_is_read_only);
624 void dm_bm_set_read_only(struct dm_block_manager *bm)
626 bm->read_only = true;
628 EXPORT_SYMBOL_GPL(dm_bm_set_read_only);
630 void dm_bm_set_read_write(struct dm_block_manager *bm)
632 bm->read_only = false;
634 EXPORT_SYMBOL_GPL(dm_bm_set_read_write);
636 u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor)
638 return crc32c(~(u32) 0, data, len) ^ init_xor;
640 EXPORT_SYMBOL_GPL(dm_bm_checksum);
642 /*----------------------------------------------------------------*/
644 MODULE_LICENSE("GPL");
646 MODULE_DESCRIPTION("Immutable metadata library for dm");
648 /*----------------------------------------------------------------*/