1 /******************************************************************************
2 *******************************************************************************
4 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5 ** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
7 ** This copyrighted material is made available to anyone wishing to use,
8 ** modify, copy, or redistribute it subject to the terms and conditions
9 ** of the GNU General Public License v.2.
11 *******************************************************************************
12 ******************************************************************************/
14 #include "dlm_internal.h"
15 #include "lockspace.h"
25 #ifdef CONFIG_DLM_DEBUG
26 int dlm_create_debug_file(struct dlm_ls *ls);
27 void dlm_delete_debug_file(struct dlm_ls *ls);
29 static inline int dlm_create_debug_file(struct dlm_ls *ls) { return 0; }
30 static inline void dlm_delete_debug_file(struct dlm_ls *ls) { }
34 static struct mutex ls_lock;
35 static struct list_head lslist;
36 static spinlock_t lslist_lock;
37 static struct task_struct * scand_task;
40 static ssize_t dlm_control_store(struct dlm_ls *ls, const char *buf, size_t len)
43 int n = simple_strtol(buf, NULL, 0);
58 static ssize_t dlm_event_store(struct dlm_ls *ls, const char *buf, size_t len)
60 ls->ls_uevent_result = simple_strtol(buf, NULL, 0);
61 set_bit(LSFL_UEVENT_WAIT, &ls->ls_flags);
62 wake_up(&ls->ls_uevent_wait);
66 static ssize_t dlm_id_show(struct dlm_ls *ls, char *buf)
68 return sprintf(buf, "%u\n", ls->ls_global_id);
71 static ssize_t dlm_id_store(struct dlm_ls *ls, const char *buf, size_t len)
73 ls->ls_global_id = simple_strtoul(buf, NULL, 0);
78 struct attribute attr;
79 ssize_t (*show)(struct dlm_ls *, char *);
80 ssize_t (*store)(struct dlm_ls *, const char *, size_t);
83 static struct dlm_attr dlm_attr_control = {
84 .attr = {.name = "control", .mode = S_IWUSR},
85 .store = dlm_control_store
88 static struct dlm_attr dlm_attr_event = {
89 .attr = {.name = "event_done", .mode = S_IWUSR},
90 .store = dlm_event_store
93 static struct dlm_attr dlm_attr_id = {
94 .attr = {.name = "id", .mode = S_IRUGO | S_IWUSR},
99 static struct attribute *dlm_attrs[] = {
100 &dlm_attr_control.attr,
101 &dlm_attr_event.attr,
106 static ssize_t dlm_attr_show(struct kobject *kobj, struct attribute *attr,
109 struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
110 struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
111 return a->show ? a->show(ls, buf) : 0;
114 static ssize_t dlm_attr_store(struct kobject *kobj, struct attribute *attr,
115 const char *buf, size_t len)
117 struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
118 struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
119 return a->store ? a->store(ls, buf, len) : len;
122 static struct sysfs_ops dlm_attr_ops = {
123 .show = dlm_attr_show,
124 .store = dlm_attr_store,
127 static struct kobj_type dlm_ktype = {
128 .default_attrs = dlm_attrs,
129 .sysfs_ops = &dlm_attr_ops,
132 static struct kset dlm_kset = {
133 .subsys = &kernel_subsys,
134 .kobj = {.name = "dlm",},
138 static int kobject_setup(struct dlm_ls *ls)
140 char lsname[DLM_LOCKSPACE_LEN];
143 memset(lsname, 0, DLM_LOCKSPACE_LEN);
144 snprintf(lsname, DLM_LOCKSPACE_LEN, "%s", ls->ls_name);
146 error = kobject_set_name(&ls->ls_kobj, "%s", lsname);
150 ls->ls_kobj.kset = &dlm_kset;
151 ls->ls_kobj.ktype = &dlm_ktype;
155 static int do_uevent(struct dlm_ls *ls, int in)
160 kobject_uevent(&ls->ls_kobj, KOBJ_ONLINE);
162 kobject_uevent(&ls->ls_kobj, KOBJ_OFFLINE);
164 error = wait_event_interruptible(ls->ls_uevent_wait,
165 test_and_clear_bit(LSFL_UEVENT_WAIT, &ls->ls_flags));
169 error = ls->ls_uevent_result;
175 int dlm_lockspace_init(void)
180 mutex_init(&ls_lock);
181 INIT_LIST_HEAD(&lslist);
182 spin_lock_init(&lslist_lock);
184 error = kset_register(&dlm_kset);
186 printk("dlm_lockspace_init: cannot register kset %d\n", error);
190 void dlm_lockspace_exit(void)
192 kset_unregister(&dlm_kset);
195 static int dlm_scand(void *data)
199 while (!kthread_should_stop()) {
200 list_for_each_entry(ls, &lslist, ls_list)
202 schedule_timeout_interruptible(dlm_config.scan_secs * HZ);
207 static int dlm_scand_start(void)
209 struct task_struct *p;
212 p = kthread_run(dlm_scand, NULL, "dlm_scand");
220 static void dlm_scand_stop(void)
222 kthread_stop(scand_task);
225 static struct dlm_ls *dlm_find_lockspace_name(char *name, int namelen)
229 spin_lock(&lslist_lock);
231 list_for_each_entry(ls, &lslist, ls_list) {
232 if (ls->ls_namelen == namelen &&
233 memcmp(ls->ls_name, name, namelen) == 0)
238 spin_unlock(&lslist_lock);
242 struct dlm_ls *dlm_find_lockspace_global(uint32_t id)
246 spin_lock(&lslist_lock);
248 list_for_each_entry(ls, &lslist, ls_list) {
249 if (ls->ls_global_id == id) {
256 spin_unlock(&lslist_lock);
260 struct dlm_ls *dlm_find_lockspace_local(void *id)
262 struct dlm_ls *ls = id;
264 spin_lock(&lslist_lock);
266 spin_unlock(&lslist_lock);
270 void dlm_put_lockspace(struct dlm_ls *ls)
272 spin_lock(&lslist_lock);
274 spin_unlock(&lslist_lock);
277 static void remove_lockspace(struct dlm_ls *ls)
280 spin_lock(&lslist_lock);
281 if (ls->ls_count == 0) {
282 list_del(&ls->ls_list);
283 spin_unlock(&lslist_lock);
286 spin_unlock(&lslist_lock);
291 static int threads_start(void)
295 /* Thread which process lock requests for all lockspace's */
296 error = dlm_astd_start();
298 log_print("cannot start dlm_astd thread %d", error);
302 error = dlm_scand_start();
304 log_print("cannot start dlm_scand thread %d", error);
308 /* Thread for sending/receiving messages for all lockspace's */
309 error = dlm_lowcomms_start();
311 log_print("cannot start dlm lowcomms %d", error);
325 static void threads_stop(void)
332 static int new_lockspace(char *name, int namelen, void **lockspace,
333 uint32_t flags, int lvblen)
336 int i, size, error = -ENOMEM;
338 if (namelen > DLM_LOCKSPACE_LEN)
341 if (!lvblen || (lvblen % 8))
344 if (!try_module_get(THIS_MODULE))
347 ls = dlm_find_lockspace_name(name, namelen);
350 module_put(THIS_MODULE);
354 ls = kzalloc(sizeof(struct dlm_ls) + namelen, GFP_KERNEL);
357 memcpy(ls->ls_name, name, namelen);
358 ls->ls_namelen = namelen;
359 ls->ls_exflags = flags;
360 ls->ls_lvblen = lvblen;
364 size = dlm_config.rsbtbl_size;
365 ls->ls_rsbtbl_size = size;
367 ls->ls_rsbtbl = kmalloc(sizeof(struct dlm_rsbtable) * size, GFP_KERNEL);
370 for (i = 0; i < size; i++) {
371 INIT_LIST_HEAD(&ls->ls_rsbtbl[i].list);
372 INIT_LIST_HEAD(&ls->ls_rsbtbl[i].toss);
373 rwlock_init(&ls->ls_rsbtbl[i].lock);
376 size = dlm_config.lkbtbl_size;
377 ls->ls_lkbtbl_size = size;
379 ls->ls_lkbtbl = kmalloc(sizeof(struct dlm_lkbtable) * size, GFP_KERNEL);
382 for (i = 0; i < size; i++) {
383 INIT_LIST_HEAD(&ls->ls_lkbtbl[i].list);
384 rwlock_init(&ls->ls_lkbtbl[i].lock);
385 ls->ls_lkbtbl[i].counter = 1;
388 size = dlm_config.dirtbl_size;
389 ls->ls_dirtbl_size = size;
391 ls->ls_dirtbl = kmalloc(sizeof(struct dlm_dirtable) * size, GFP_KERNEL);
394 for (i = 0; i < size; i++) {
395 INIT_LIST_HEAD(&ls->ls_dirtbl[i].list);
396 rwlock_init(&ls->ls_dirtbl[i].lock);
399 INIT_LIST_HEAD(&ls->ls_waiters);
400 mutex_init(&ls->ls_waiters_mutex);
402 INIT_LIST_HEAD(&ls->ls_nodes);
403 INIT_LIST_HEAD(&ls->ls_nodes_gone);
404 ls->ls_num_nodes = 0;
405 ls->ls_low_nodeid = 0;
406 ls->ls_total_weight = 0;
407 ls->ls_node_array = NULL;
409 memset(&ls->ls_stub_rsb, 0, sizeof(struct dlm_rsb));
410 ls->ls_stub_rsb.res_ls = ls;
412 ls->ls_debug_dentry = NULL;
414 init_waitqueue_head(&ls->ls_uevent_wait);
415 ls->ls_uevent_result = 0;
417 ls->ls_recoverd_task = NULL;
418 mutex_init(&ls->ls_recoverd_active);
419 spin_lock_init(&ls->ls_recover_lock);
420 ls->ls_recover_status = 0;
421 ls->ls_recover_seq = 0;
422 ls->ls_recover_args = NULL;
423 init_rwsem(&ls->ls_in_recovery);
424 INIT_LIST_HEAD(&ls->ls_requestqueue);
425 mutex_init(&ls->ls_requestqueue_mutex);
427 ls->ls_recover_buf = kmalloc(dlm_config.buffer_size, GFP_KERNEL);
428 if (!ls->ls_recover_buf)
431 INIT_LIST_HEAD(&ls->ls_recover_list);
432 spin_lock_init(&ls->ls_recover_list_lock);
433 ls->ls_recover_list_count = 0;
434 init_waitqueue_head(&ls->ls_wait_general);
435 INIT_LIST_HEAD(&ls->ls_root_list);
436 init_rwsem(&ls->ls_root_sem);
438 down_write(&ls->ls_in_recovery);
440 error = dlm_recoverd_start(ls);
442 log_error(ls, "can't start dlm_recoverd %d", error);
446 spin_lock(&lslist_lock);
447 list_add(&ls->ls_list, &lslist);
448 spin_unlock(&lslist_lock);
450 dlm_create_debug_file(ls);
452 error = kobject_setup(ls);
456 error = kobject_register(&ls->ls_kobj);
460 error = do_uevent(ls, 1);
468 kobject_unregister(&ls->ls_kobj);
470 dlm_delete_debug_file(ls);
471 spin_lock(&lslist_lock);
472 list_del(&ls->ls_list);
473 spin_unlock(&lslist_lock);
474 dlm_recoverd_stop(ls);
476 kfree(ls->ls_recover_buf);
478 kfree(ls->ls_dirtbl);
480 kfree(ls->ls_lkbtbl);
482 kfree(ls->ls_rsbtbl);
486 module_put(THIS_MODULE);
490 int dlm_new_lockspace(char *name, int namelen, void **lockspace,
491 uint32_t flags, int lvblen)
495 mutex_lock(&ls_lock);
497 error = threads_start();
501 error = new_lockspace(name, namelen, lockspace, flags, lvblen);
505 mutex_unlock(&ls_lock);
509 /* Return 1 if the lockspace still has active remote locks,
510 * 2 if the lockspace still has active local locks.
512 static int lockspace_busy(struct dlm_ls *ls)
514 int i, lkb_found = 0;
517 /* NOTE: We check the lockidtbl here rather than the resource table.
518 This is because there may be LKBs queued as ASTs that have been
519 unlinked from their RSBs and are pending deletion once the AST has
522 for (i = 0; i < ls->ls_lkbtbl_size; i++) {
523 read_lock(&ls->ls_lkbtbl[i].lock);
524 if (!list_empty(&ls->ls_lkbtbl[i].list)) {
526 list_for_each_entry(lkb, &ls->ls_lkbtbl[i].list,
528 if (!lkb->lkb_nodeid) {
529 read_unlock(&ls->ls_lkbtbl[i].lock);
534 read_unlock(&ls->ls_lkbtbl[i].lock);
539 static int release_lockspace(struct dlm_ls *ls, int force)
543 struct list_head *head;
545 int busy = lockspace_busy(ls);
553 dlm_recoverd_stop(ls);
555 remove_lockspace(ls);
557 dlm_delete_debug_file(ls);
561 kfree(ls->ls_recover_buf);
564 * Free direntry structs.
568 kfree(ls->ls_dirtbl);
571 * Free all lkb's on lkbtbl[] lists.
574 for (i = 0; i < ls->ls_lkbtbl_size; i++) {
575 head = &ls->ls_lkbtbl[i].list;
576 while (!list_empty(head)) {
577 lkb = list_entry(head->next, struct dlm_lkb,
580 list_del(&lkb->lkb_idtbl_list);
584 if (lkb->lkb_lvbptr && lkb->lkb_flags & DLM_IFL_MSTCPY)
585 free_lvb(lkb->lkb_lvbptr);
592 kfree(ls->ls_lkbtbl);
595 * Free all rsb's on rsbtbl[] lists
598 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
599 head = &ls->ls_rsbtbl[i].list;
600 while (!list_empty(head)) {
601 rsb = list_entry(head->next, struct dlm_rsb,
604 list_del(&rsb->res_hashchain);
608 head = &ls->ls_rsbtbl[i].toss;
609 while (!list_empty(head)) {
610 rsb = list_entry(head->next, struct dlm_rsb,
612 list_del(&rsb->res_hashchain);
617 kfree(ls->ls_rsbtbl);
620 * Free structures on any other lists
623 kfree(ls->ls_recover_args);
624 dlm_clear_free_entries(ls);
625 dlm_clear_members(ls);
626 dlm_clear_members_gone(ls);
627 kfree(ls->ls_node_array);
628 kobject_unregister(&ls->ls_kobj);
631 mutex_lock(&ls_lock);
635 mutex_unlock(&ls_lock);
637 module_put(THIS_MODULE);
642 * Called when a system has released all its locks and is not going to use the
643 * lockspace any longer. We free everything we're managing for this lockspace.
644 * Remaining nodes will go through the recovery process as if we'd died. The
645 * lockspace must continue to function as usual, participating in recoveries,
646 * until this returns.
648 * Force has 4 possible values:
649 * 0 - don't destroy locksapce if it has any LKBs
650 * 1 - destroy lockspace if it has remote LKBs but not if it has local LKBs
651 * 2 - destroy lockspace regardless of LKBs
652 * 3 - destroy lockspace as part of a forced shutdown
655 int dlm_release_lockspace(void *lockspace, int force)
659 ls = dlm_find_lockspace_local(lockspace);
662 dlm_put_lockspace(ls);
663 return release_lockspace(ls, force);