1 /******************************************************************************
2 *******************************************************************************
4 ** Copyright (C) 2005-2010 Red Hat, Inc. All rights reserved.
6 ** This copyrighted material is made available to anyone wishing to use,
7 ** modify, copy, or redistribute it subject to the terms and conditions
8 ** of the GNU General Public License v.2.
10 *******************************************************************************
11 ******************************************************************************/
13 /* Central locking logic has four stages:
33 Stage 1 (lock, unlock) is mainly about checking input args and
34 splitting into one of the four main operations:
36 dlm_lock = request_lock
37 dlm_lock+CONVERT = convert_lock
38 dlm_unlock = unlock_lock
39 dlm_unlock+CANCEL = cancel_lock
41 Stage 2, xxxx_lock(), just finds and locks the relevant rsb which is
42 provided to the next stage.
44 Stage 3, _xxxx_lock(), determines if the operation is local or remote.
45 When remote, it calls send_xxxx(), when local it calls do_xxxx().
47 Stage 4, do_xxxx(), is the guts of the operation. It manipulates the
48 given rsb and lkb and queues callbacks.
50 For remote operations, send_xxxx() results in the corresponding do_xxxx()
51 function being executed on the remote node. The connecting send/receive
52 calls on local (L) and remote (R) nodes:
54 L: send_xxxx() -> R: receive_xxxx()
56 L: receive_xxxx_reply() <- R: send_xxxx_reply()
58 #include <linux/types.h>
59 #include <linux/rbtree.h>
60 #include <linux/slab.h>
61 #include "dlm_internal.h"
62 #include <linux/dlm_device.h>
65 #include "requestqueue.h"
69 #include "lockspace.h"
74 #include "lvb_table.h"
78 static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb);
79 static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb);
80 static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb);
81 static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb);
82 static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb);
83 static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode);
84 static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb);
85 static int send_remove(struct dlm_rsb *r);
86 static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
87 static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb);
88 static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
89 struct dlm_message *ms);
90 static int receive_extralen(struct dlm_message *ms);
91 static void do_purge(struct dlm_ls *ls, int nodeid, int pid);
92 static void del_timeout(struct dlm_lkb *lkb);
93 static void toss_rsb(struct kref *kref);
96 * Lock compatibilty matrix - thanks Steve
97 * UN = Unlocked state. Not really a state, used as a flag
98 * PD = Padding. Used to make the matrix a nice power of two in size
99 * Other states are the same as the VMS DLM.
100 * Usage: matrix[grmode+1][rqmode+1] (although m[rq+1][gr+1] is the same)
103 static const int __dlm_compat_matrix[8][8] = {
104 /* UN NL CR CW PR PW EX PD */
105 {1, 1, 1, 1, 1, 1, 1, 0}, /* UN */
106 {1, 1, 1, 1, 1, 1, 1, 0}, /* NL */
107 {1, 1, 1, 1, 1, 1, 0, 0}, /* CR */
108 {1, 1, 1, 1, 0, 0, 0, 0}, /* CW */
109 {1, 1, 1, 0, 1, 0, 0, 0}, /* PR */
110 {1, 1, 1, 0, 0, 0, 0, 0}, /* PW */
111 {1, 1, 0, 0, 0, 0, 0, 0}, /* EX */
112 {0, 0, 0, 0, 0, 0, 0, 0} /* PD */
116 * This defines the direction of transfer of LVB data.
117 * Granted mode is the row; requested mode is the column.
118 * Usage: matrix[grmode+1][rqmode+1]
119 * 1 = LVB is returned to the caller
120 * 0 = LVB is written to the resource
121 * -1 = nothing happens to the LVB
124 const int dlm_lvb_operations[8][8] = {
125 /* UN NL CR CW PR PW EX PD*/
126 { -1, 1, 1, 1, 1, 1, 1, -1 }, /* UN */
127 { -1, 1, 1, 1, 1, 1, 1, 0 }, /* NL */
128 { -1, -1, 1, 1, 1, 1, 1, 0 }, /* CR */
129 { -1, -1, -1, 1, 1, 1, 1, 0 }, /* CW */
130 { -1, -1, -1, -1, 1, 1, 1, 0 }, /* PR */
131 { -1, 0, 0, 0, 0, 0, 1, 0 }, /* PW */
132 { -1, 0, 0, 0, 0, 0, 0, 0 }, /* EX */
133 { -1, 0, 0, 0, 0, 0, 0, 0 } /* PD */
136 #define modes_compat(gr, rq) \
137 __dlm_compat_matrix[(gr)->lkb_grmode + 1][(rq)->lkb_rqmode + 1]
139 int dlm_modes_compat(int mode1, int mode2)
141 return __dlm_compat_matrix[mode1 + 1][mode2 + 1];
145 * Compatibility matrix for conversions with QUECVT set.
146 * Granted mode is the row; requested mode is the column.
147 * Usage: matrix[grmode+1][rqmode+1]
150 static const int __quecvt_compat_matrix[8][8] = {
151 /* UN NL CR CW PR PW EX PD */
152 {0, 0, 0, 0, 0, 0, 0, 0}, /* UN */
153 {0, 0, 1, 1, 1, 1, 1, 0}, /* NL */
154 {0, 0, 0, 1, 1, 1, 1, 0}, /* CR */
155 {0, 0, 0, 0, 1, 1, 1, 0}, /* CW */
156 {0, 0, 0, 1, 0, 1, 1, 0}, /* PR */
157 {0, 0, 0, 0, 0, 0, 1, 0}, /* PW */
158 {0, 0, 0, 0, 0, 0, 0, 0}, /* EX */
159 {0, 0, 0, 0, 0, 0, 0, 0} /* PD */
162 void dlm_print_lkb(struct dlm_lkb *lkb)
164 printk(KERN_ERR "lkb: nodeid %d id %x remid %x exflags %x flags %x "
165 "sts %d rq %d gr %d wait_type %d wait_nodeid %d seq %llu\n",
166 lkb->lkb_nodeid, lkb->lkb_id, lkb->lkb_remid, lkb->lkb_exflags,
167 lkb->lkb_flags, lkb->lkb_status, lkb->lkb_rqmode,
168 lkb->lkb_grmode, lkb->lkb_wait_type, lkb->lkb_wait_nodeid,
169 (unsigned long long)lkb->lkb_recover_seq);
172 static void dlm_print_rsb(struct dlm_rsb *r)
174 printk(KERN_ERR "rsb: nodeid %d master %d dir %d flags %lx first %x "
176 r->res_nodeid, r->res_master_nodeid, r->res_dir_nodeid,
177 r->res_flags, r->res_first_lkid, r->res_recover_locks_count,
181 void dlm_dump_rsb(struct dlm_rsb *r)
187 printk(KERN_ERR "rsb: root_list empty %d recover_list empty %d\n",
188 list_empty(&r->res_root_list), list_empty(&r->res_recover_list));
189 printk(KERN_ERR "rsb lookup list\n");
190 list_for_each_entry(lkb, &r->res_lookup, lkb_rsb_lookup)
192 printk(KERN_ERR "rsb grant queue:\n");
193 list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue)
195 printk(KERN_ERR "rsb convert queue:\n");
196 list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue)
198 printk(KERN_ERR "rsb wait queue:\n");
199 list_for_each_entry(lkb, &r->res_waitqueue, lkb_statequeue)
203 /* Threads cannot use the lockspace while it's being recovered */
205 static inline void dlm_lock_recovery(struct dlm_ls *ls)
207 down_read(&ls->ls_in_recovery);
210 void dlm_unlock_recovery(struct dlm_ls *ls)
212 up_read(&ls->ls_in_recovery);
215 int dlm_lock_recovery_try(struct dlm_ls *ls)
217 return down_read_trylock(&ls->ls_in_recovery);
220 static inline int can_be_queued(struct dlm_lkb *lkb)
222 return !(lkb->lkb_exflags & DLM_LKF_NOQUEUE);
225 static inline int force_blocking_asts(struct dlm_lkb *lkb)
227 return (lkb->lkb_exflags & DLM_LKF_NOQUEUEBAST);
230 static inline int is_demoted(struct dlm_lkb *lkb)
232 return (lkb->lkb_sbflags & DLM_SBF_DEMOTED);
235 static inline int is_altmode(struct dlm_lkb *lkb)
237 return (lkb->lkb_sbflags & DLM_SBF_ALTMODE);
240 static inline int is_granted(struct dlm_lkb *lkb)
242 return (lkb->lkb_status == DLM_LKSTS_GRANTED);
245 static inline int is_remote(struct dlm_rsb *r)
247 DLM_ASSERT(r->res_nodeid >= 0, dlm_print_rsb(r););
248 return !!r->res_nodeid;
251 static inline int is_process_copy(struct dlm_lkb *lkb)
253 return (lkb->lkb_nodeid && !(lkb->lkb_flags & DLM_IFL_MSTCPY));
256 static inline int is_master_copy(struct dlm_lkb *lkb)
258 return (lkb->lkb_flags & DLM_IFL_MSTCPY) ? 1 : 0;
261 static inline int middle_conversion(struct dlm_lkb *lkb)
263 if ((lkb->lkb_grmode==DLM_LOCK_PR && lkb->lkb_rqmode==DLM_LOCK_CW) ||
264 (lkb->lkb_rqmode==DLM_LOCK_PR && lkb->lkb_grmode==DLM_LOCK_CW))
269 static inline int down_conversion(struct dlm_lkb *lkb)
271 return (!middle_conversion(lkb) && lkb->lkb_rqmode < lkb->lkb_grmode);
274 static inline int is_overlap_unlock(struct dlm_lkb *lkb)
276 return lkb->lkb_flags & DLM_IFL_OVERLAP_UNLOCK;
279 static inline int is_overlap_cancel(struct dlm_lkb *lkb)
281 return lkb->lkb_flags & DLM_IFL_OVERLAP_CANCEL;
284 static inline int is_overlap(struct dlm_lkb *lkb)
286 return (lkb->lkb_flags & (DLM_IFL_OVERLAP_UNLOCK |
287 DLM_IFL_OVERLAP_CANCEL));
290 static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
292 if (is_master_copy(lkb))
297 DLM_ASSERT(lkb->lkb_lksb, dlm_print_lkb(lkb););
299 /* if the operation was a cancel, then return -DLM_ECANCEL, if a
300 timeout caused the cancel then return -ETIMEDOUT */
301 if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_TIMEOUT_CANCEL)) {
302 lkb->lkb_flags &= ~DLM_IFL_TIMEOUT_CANCEL;
306 if (rv == -DLM_ECANCEL && (lkb->lkb_flags & DLM_IFL_DEADLOCK_CANCEL)) {
307 lkb->lkb_flags &= ~DLM_IFL_DEADLOCK_CANCEL;
311 dlm_add_cb(lkb, DLM_CB_CAST, lkb->lkb_grmode, rv, lkb->lkb_sbflags);
314 static inline void queue_cast_overlap(struct dlm_rsb *r, struct dlm_lkb *lkb)
317 is_overlap_unlock(lkb) ? -DLM_EUNLOCK : -DLM_ECANCEL);
320 static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode)
322 if (is_master_copy(lkb)) {
323 send_bast(r, lkb, rqmode);
325 dlm_add_cb(lkb, DLM_CB_BAST, rqmode, 0, 0);
330 * Basic operations on rsb's and lkb's
333 /* This is only called to add a reference when the code already holds
334 a valid reference to the rsb, so there's no need for locking. */
336 static inline void hold_rsb(struct dlm_rsb *r)
338 kref_get(&r->res_ref);
341 void dlm_hold_rsb(struct dlm_rsb *r)
346 /* When all references to the rsb are gone it's transferred to
347 the tossed list for later disposal. */
349 static void put_rsb(struct dlm_rsb *r)
351 struct dlm_ls *ls = r->res_ls;
352 uint32_t bucket = r->res_bucket;
354 spin_lock(&ls->ls_rsbtbl[bucket].lock);
355 kref_put(&r->res_ref, toss_rsb);
356 spin_unlock(&ls->ls_rsbtbl[bucket].lock);
359 void dlm_put_rsb(struct dlm_rsb *r)
364 static int pre_rsb_struct(struct dlm_ls *ls)
366 struct dlm_rsb *r1, *r2;
369 spin_lock(&ls->ls_new_rsb_spin);
370 if (ls->ls_new_rsb_count > dlm_config.ci_new_rsb_count / 2) {
371 spin_unlock(&ls->ls_new_rsb_spin);
374 spin_unlock(&ls->ls_new_rsb_spin);
376 r1 = dlm_allocate_rsb(ls);
377 r2 = dlm_allocate_rsb(ls);
379 spin_lock(&ls->ls_new_rsb_spin);
381 list_add(&r1->res_hashchain, &ls->ls_new_rsb);
382 ls->ls_new_rsb_count++;
385 list_add(&r2->res_hashchain, &ls->ls_new_rsb);
386 ls->ls_new_rsb_count++;
388 count = ls->ls_new_rsb_count;
389 spin_unlock(&ls->ls_new_rsb_spin);
396 /* If ls->ls_new_rsb is empty, return -EAGAIN, so the caller can
397 unlock any spinlocks, go back and call pre_rsb_struct again.
398 Otherwise, take an rsb off the list and return it. */
400 static int get_rsb_struct(struct dlm_ls *ls, char *name, int len,
401 struct dlm_rsb **r_ret)
406 spin_lock(&ls->ls_new_rsb_spin);
407 if (list_empty(&ls->ls_new_rsb)) {
408 count = ls->ls_new_rsb_count;
409 spin_unlock(&ls->ls_new_rsb_spin);
410 log_debug(ls, "find_rsb retry %d %d %s",
411 count, dlm_config.ci_new_rsb_count, name);
415 r = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, res_hashchain);
416 list_del(&r->res_hashchain);
417 /* Convert the empty list_head to a NULL rb_node for tree usage: */
418 memset(&r->res_hashnode, 0, sizeof(struct rb_node));
419 ls->ls_new_rsb_count--;
420 spin_unlock(&ls->ls_new_rsb_spin);
424 memcpy(r->res_name, name, len);
425 mutex_init(&r->res_mutex);
427 INIT_LIST_HEAD(&r->res_lookup);
428 INIT_LIST_HEAD(&r->res_grantqueue);
429 INIT_LIST_HEAD(&r->res_convertqueue);
430 INIT_LIST_HEAD(&r->res_waitqueue);
431 INIT_LIST_HEAD(&r->res_root_list);
432 INIT_LIST_HEAD(&r->res_recover_list);
438 static int rsb_cmp(struct dlm_rsb *r, const char *name, int nlen)
440 char maxname[DLM_RESNAME_MAXLEN];
442 memset(maxname, 0, DLM_RESNAME_MAXLEN);
443 memcpy(maxname, name, nlen);
444 return memcmp(r->res_name, maxname, DLM_RESNAME_MAXLEN);
447 int dlm_search_rsb_tree(struct rb_root *tree, char *name, int len,
448 struct dlm_rsb **r_ret)
450 struct rb_node *node = tree->rb_node;
455 r = rb_entry(node, struct dlm_rsb, res_hashnode);
456 rc = rsb_cmp(r, name, len);
458 node = node->rb_left;
460 node = node->rb_right;
472 static int rsb_insert(struct dlm_rsb *rsb, struct rb_root *tree)
474 struct rb_node **newn = &tree->rb_node;
475 struct rb_node *parent = NULL;
479 struct dlm_rsb *cur = rb_entry(*newn, struct dlm_rsb,
483 rc = rsb_cmp(cur, rsb->res_name, rsb->res_length);
485 newn = &parent->rb_left;
487 newn = &parent->rb_right;
489 log_print("rsb_insert match");
496 rb_link_node(&rsb->res_hashnode, parent, newn);
497 rb_insert_color(&rsb->res_hashnode, tree);
502 * Find rsb in rsbtbl and potentially create/add one
504 * Delaying the release of rsb's has a similar benefit to applications keeping
505 * NL locks on an rsb, but without the guarantee that the cached master value
506 * will still be valid when the rsb is reused. Apps aren't always smart enough
507 * to keep NL locks on an rsb that they may lock again shortly; this can lead
508 * to excessive master lookups and removals if we don't delay the release.
510 * Searching for an rsb means looking through both the normal list and toss
511 * list. When found on the toss list the rsb is moved to the normal list with
512 * ref count of 1; when found on normal list the ref count is incremented.
514 * rsb's on the keep list are being used locally and refcounted.
515 * rsb's on the toss list are not being used locally, and are not refcounted.
517 * The toss list rsb's were either
518 * - previously used locally but not any more (were on keep list, then
519 * moved to toss list when last refcount dropped)
520 * - created and put on toss list as a directory record for a lookup
521 * (we are the dir node for the res, but are not using the res right now,
522 * but some other node is)
524 * The purpose of find_rsb() is to return a refcounted rsb for local use.
525 * So, if the given rsb is on the toss list, it is moved to the keep list
526 * before being returned.
528 * toss_rsb() happens when all local usage of the rsb is done, i.e. no
529 * more refcounts exist, so the rsb is moved from the keep list to the
532 * rsb's on both keep and toss lists are used for doing a name to master
533 * lookups. rsb's that are in use locally (and being refcounted) are on
534 * the keep list, rsb's that are not in use locally (not refcounted) and
535 * only exist for name/master lookups are on the toss list.
537 * rsb's on the toss list who's dir_nodeid is not local can have stale
538 * name/master mappings. So, remote requests on such rsb's can potentially
539 * return with an error, which means the mapping is stale and needs to
540 * be updated with a new lookup. (The idea behind MASTER UNCERTAIN and
541 * first_lkid is to keep only a single outstanding request on an rsb
542 * while that rsb has a potentially stale master.)
545 static int find_rsb_dir(struct dlm_ls *ls, char *name, int len,
546 uint32_t hash, uint32_t b,
547 int dir_nodeid, int from_nodeid,
548 unsigned int flags, struct dlm_rsb **r_ret)
550 struct dlm_rsb *r = NULL;
551 int our_nodeid = dlm_our_nodeid();
558 if (flags & R_RECEIVE_REQUEST) {
559 if (from_nodeid == dir_nodeid)
563 } else if (flags & R_REQUEST) {
568 * flags & R_RECEIVE_RECOVER is from dlm_recover_master_copy, so
569 * from_nodeid has sent us a lock in dlm_recover_locks, believing
570 * we're the new master. Our local recovery may not have set
571 * res_master_nodeid to our_nodeid yet, so allow either. Don't
572 * create the rsb; dlm_recover_process_copy() will handle EBADR
575 * If someone sends us a request, we are the dir node, and we do
576 * not find the rsb anywhere, then recreate it. This happens if
577 * someone sends us a request after we have removed/freed an rsb
578 * from our toss list. (They sent a request instead of lookup
579 * because they are using an rsb from their toss list.)
582 if (from_local || from_dir ||
583 (from_other && (dir_nodeid == our_nodeid))) {
589 error = pre_rsb_struct(ls);
594 spin_lock(&ls->ls_rsbtbl[b].lock);
596 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
601 * rsb is active, so we can't check master_nodeid without lock_rsb.
604 kref_get(&r->res_ref);
610 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
615 * rsb found inactive (master_nodeid may be out of date unless
616 * we are the dir_nodeid or were the master) No other thread
617 * is using this rsb because it's on the toss list, so we can
618 * look at or update res_master_nodeid without lock_rsb.
621 if ((r->res_master_nodeid != our_nodeid) && from_other) {
622 /* our rsb was not master, and another node (not the dir node)
623 has sent us a request */
624 log_debug(ls, "find_rsb toss from_other %d master %d dir %d %s",
625 from_nodeid, r->res_master_nodeid, dir_nodeid,
631 if ((r->res_master_nodeid != our_nodeid) && from_dir) {
632 /* don't think this should ever happen */
633 log_error(ls, "find_rsb toss from_dir %d master %d",
634 from_nodeid, r->res_master_nodeid);
636 /* fix it and go on */
637 r->res_master_nodeid = our_nodeid;
639 rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
640 r->res_first_lkid = 0;
643 if (from_local && (r->res_master_nodeid != our_nodeid)) {
644 /* Because we have held no locks on this rsb,
645 res_master_nodeid could have become stale. */
646 rsb_set_flag(r, RSB_MASTER_UNCERTAIN);
647 r->res_first_lkid = 0;
650 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
651 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
660 if (error == -EBADR && !create)
663 error = get_rsb_struct(ls, name, len, &r);
664 if (error == -EAGAIN) {
665 spin_unlock(&ls->ls_rsbtbl[b].lock);
673 r->res_dir_nodeid = dir_nodeid;
674 kref_init(&r->res_ref);
677 /* want to see how often this happens */
678 log_debug(ls, "find_rsb new from_dir %d recreate %s",
679 from_nodeid, r->res_name);
680 r->res_master_nodeid = our_nodeid;
685 if (from_other && (dir_nodeid != our_nodeid)) {
686 /* should never happen */
687 log_error(ls, "find_rsb new from_other %d dir %d our %d %s",
688 from_nodeid, dir_nodeid, our_nodeid, r->res_name);
696 log_debug(ls, "find_rsb new from_other %d dir %d %s",
697 from_nodeid, dir_nodeid, r->res_name);
700 if (dir_nodeid == our_nodeid) {
701 /* When we are the dir nodeid, we can set the master
703 r->res_master_nodeid = our_nodeid;
706 /* set_master will send_lookup to dir_nodeid */
707 r->res_master_nodeid = 0;
712 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
714 spin_unlock(&ls->ls_rsbtbl[b].lock);
720 /* During recovery, other nodes can send us new MSTCPY locks (from
721 dlm_recover_locks) before we've made ourself master (in
722 dlm_recover_masters). */
724 static int find_rsb_nodir(struct dlm_ls *ls, char *name, int len,
725 uint32_t hash, uint32_t b,
726 int dir_nodeid, int from_nodeid,
727 unsigned int flags, struct dlm_rsb **r_ret)
729 struct dlm_rsb *r = NULL;
730 int our_nodeid = dlm_our_nodeid();
731 int recover = (flags & R_RECEIVE_RECOVER);
735 error = pre_rsb_struct(ls);
739 spin_lock(&ls->ls_rsbtbl[b].lock);
741 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
746 * rsb is active, so we can't check master_nodeid without lock_rsb.
749 kref_get(&r->res_ref);
754 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
759 * rsb found inactive. No other thread is using this rsb because
760 * it's on the toss list, so we can look at or update
761 * res_master_nodeid without lock_rsb.
764 if (!recover && (r->res_master_nodeid != our_nodeid) && from_nodeid) {
765 /* our rsb is not master, and another node has sent us a
766 request; this should never happen */
767 log_error(ls, "find_rsb toss from_nodeid %d master %d dir %d",
768 from_nodeid, r->res_master_nodeid, dir_nodeid);
774 if (!recover && (r->res_master_nodeid != our_nodeid) &&
775 (dir_nodeid == our_nodeid)) {
776 /* our rsb is not master, and we are dir; may as well fix it;
777 this should never happen */
778 log_error(ls, "find_rsb toss our %d master %d dir %d",
779 our_nodeid, r->res_master_nodeid, dir_nodeid);
781 r->res_master_nodeid = our_nodeid;
785 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
786 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
795 error = get_rsb_struct(ls, name, len, &r);
796 if (error == -EAGAIN) {
797 spin_unlock(&ls->ls_rsbtbl[b].lock);
805 r->res_dir_nodeid = dir_nodeid;
806 r->res_master_nodeid = dir_nodeid;
807 r->res_nodeid = (dir_nodeid == our_nodeid) ? 0 : dir_nodeid;
808 kref_init(&r->res_ref);
810 error = rsb_insert(r, &ls->ls_rsbtbl[b].keep);
812 spin_unlock(&ls->ls_rsbtbl[b].lock);
818 static int find_rsb(struct dlm_ls *ls, char *name, int len, int from_nodeid,
819 unsigned int flags, struct dlm_rsb **r_ret)
824 if (len > DLM_RESNAME_MAXLEN)
827 hash = jhash(name, len, 0);
828 b = hash & (ls->ls_rsbtbl_size - 1);
830 dir_nodeid = dlm_hash2nodeid(ls, hash);
832 if (dlm_no_directory(ls))
833 return find_rsb_nodir(ls, name, len, hash, b, dir_nodeid,
834 from_nodeid, flags, r_ret);
836 return find_rsb_dir(ls, name, len, hash, b, dir_nodeid,
837 from_nodeid, flags, r_ret);
840 /* we have received a request and found that res_master_nodeid != our_nodeid,
841 so we need to return an error or make ourself the master */
843 static int validate_master_nodeid(struct dlm_ls *ls, struct dlm_rsb *r,
846 if (dlm_no_directory(ls)) {
847 log_error(ls, "find_rsb keep from_nodeid %d master %d dir %d",
848 from_nodeid, r->res_master_nodeid,
854 if (from_nodeid != r->res_dir_nodeid) {
855 /* our rsb is not master, and another node (not the dir node)
856 has sent us a request. this is much more common when our
857 master_nodeid is zero, so limit debug to non-zero. */
859 if (r->res_master_nodeid) {
860 log_debug(ls, "validate master from_other %d master %d "
861 "dir %d first %x %s", from_nodeid,
862 r->res_master_nodeid, r->res_dir_nodeid,
863 r->res_first_lkid, r->res_name);
867 /* our rsb is not master, but the dir nodeid has sent us a
868 request; this could happen with master 0 / res_nodeid -1 */
870 if (r->res_master_nodeid) {
871 log_error(ls, "validate master from_dir %d master %d "
873 from_nodeid, r->res_master_nodeid,
874 r->res_first_lkid, r->res_name);
877 r->res_master_nodeid = dlm_our_nodeid();
884 * We're the dir node for this res and another node wants to know the
885 * master nodeid. During normal operation (non recovery) this is only
886 * called from receive_lookup(); master lookups when the local node is
887 * the dir node are done by find_rsb().
889 * normal operation, we are the dir node for a resource
894 * . dlm_master_lookup flags 0
896 * recover directory, we are rebuilding dir for all resources
897 * . dlm_recover_directory
899 * remote node sends back the rsb names it is master of and we are dir of
900 * . dlm_master_lookup RECOVER_DIR (fix_master 0, from_master 1)
901 * we either create new rsb setting remote node as master, or find existing
902 * rsb and set master to be the remote node.
904 * recover masters, we are finding the new master for resources
905 * . dlm_recover_masters
907 * . dlm_send_rcom_lookup
908 * . receive_rcom_lookup
909 * . dlm_master_lookup RECOVER_MASTER (fix_master 1, from_master 0)
912 int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, char *name, int len,
913 unsigned int flags, int *r_nodeid, int *result)
915 struct dlm_rsb *r = NULL;
917 int from_master = (flags & DLM_LU_RECOVER_DIR);
918 int fix_master = (flags & DLM_LU_RECOVER_MASTER);
919 int our_nodeid = dlm_our_nodeid();
920 int dir_nodeid, error, toss_list = 0;
922 if (len > DLM_RESNAME_MAXLEN)
925 if (from_nodeid == our_nodeid) {
926 log_error(ls, "dlm_master_lookup from our_nodeid %d flags %x",
931 hash = jhash(name, len, 0);
932 b = hash & (ls->ls_rsbtbl_size - 1);
934 dir_nodeid = dlm_hash2nodeid(ls, hash);
935 if (dir_nodeid != our_nodeid) {
936 log_error(ls, "dlm_master_lookup from %d dir %d our %d h %x %d",
937 from_nodeid, dir_nodeid, our_nodeid, hash,
944 error = pre_rsb_struct(ls);
948 spin_lock(&ls->ls_rsbtbl[b].lock);
949 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
951 /* because the rsb is active, we need to lock_rsb before
952 checking/changing re_master_nodeid */
955 spin_unlock(&ls->ls_rsbtbl[b].lock);
960 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
964 /* because the rsb is inactive (on toss list), it's not refcounted
965 and lock_rsb is not used, but is protected by the rsbtbl lock */
969 if (r->res_dir_nodeid != our_nodeid) {
970 /* should not happen, but may as well fix it and carry on */
971 log_error(ls, "dlm_master_lookup res_dir %d our %d %s",
972 r->res_dir_nodeid, our_nodeid, r->res_name);
973 r->res_dir_nodeid = our_nodeid;
976 if (fix_master && dlm_is_removed(ls, r->res_master_nodeid)) {
977 /* Recovery uses this function to set a new master when
978 the previous master failed. Setting NEW_MASTER will
979 force dlm_recover_masters to call recover_master on this
980 rsb even though the res_nodeid is no longer removed. */
982 r->res_master_nodeid = from_nodeid;
983 r->res_nodeid = from_nodeid;
984 rsb_set_flag(r, RSB_NEW_MASTER);
987 /* I don't think we should ever find it on toss list. */
988 log_error(ls, "dlm_master_lookup fix_master on toss");
993 if (from_master && (r->res_master_nodeid != from_nodeid)) {
994 /* this will happen if from_nodeid became master during
995 a previous recovery cycle, and we aborted the previous
996 cycle before recovering this master value */
998 log_limit(ls, "dlm_master_lookup from_master %d "
999 "master_nodeid %d res_nodeid %d first %x %s",
1000 from_nodeid, r->res_master_nodeid, r->res_nodeid,
1001 r->res_first_lkid, r->res_name);
1003 if (r->res_master_nodeid == our_nodeid) {
1004 log_error(ls, "from_master %d our_master", from_nodeid);
1009 r->res_master_nodeid = from_nodeid;
1010 r->res_nodeid = from_nodeid;
1011 rsb_set_flag(r, RSB_NEW_MASTER);
1014 if (!r->res_master_nodeid) {
1015 /* this will happen if recovery happens while we're looking
1016 up the master for this rsb */
1018 log_debug(ls, "dlm_master_lookup master 0 to %d first %x %s",
1019 from_nodeid, r->res_first_lkid, r->res_name);
1020 r->res_master_nodeid = from_nodeid;
1021 r->res_nodeid = from_nodeid;
1024 if (!from_master && !fix_master &&
1025 (r->res_master_nodeid == from_nodeid)) {
1026 /* this can happen when the master sends remove, the dir node
1027 finds the rsb on the keep list and ignores the remove,
1028 and the former master sends a lookup */
1030 log_limit(ls, "dlm_master_lookup from master %d flags %x "
1031 "first %x %s", from_nodeid, flags,
1032 r->res_first_lkid, r->res_name);
1036 *r_nodeid = r->res_master_nodeid;
1038 *result = DLM_LU_MATCH;
1041 r->res_toss_time = jiffies;
1042 /* the rsb was inactive (on toss list) */
1043 spin_unlock(&ls->ls_rsbtbl[b].lock);
1045 /* the rsb was active */
1052 error = get_rsb_struct(ls, name, len, &r);
1053 if (error == -EAGAIN) {
1054 spin_unlock(&ls->ls_rsbtbl[b].lock);
1062 r->res_dir_nodeid = our_nodeid;
1063 r->res_master_nodeid = from_nodeid;
1064 r->res_nodeid = from_nodeid;
1065 kref_init(&r->res_ref);
1066 r->res_toss_time = jiffies;
1068 error = rsb_insert(r, &ls->ls_rsbtbl[b].toss);
1070 /* should never happen */
1072 spin_unlock(&ls->ls_rsbtbl[b].lock);
1077 *result = DLM_LU_ADD;
1078 *r_nodeid = from_nodeid;
1081 spin_unlock(&ls->ls_rsbtbl[b].lock);
1085 static void dlm_dump_rsb_hash(struct dlm_ls *ls, uint32_t hash)
1091 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
1092 spin_lock(&ls->ls_rsbtbl[i].lock);
1093 for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) {
1094 r = rb_entry(n, struct dlm_rsb, res_hashnode);
1095 if (r->res_hash == hash)
1098 spin_unlock(&ls->ls_rsbtbl[i].lock);
1102 void dlm_dump_rsb_name(struct dlm_ls *ls, char *name, int len)
1104 struct dlm_rsb *r = NULL;
1108 hash = jhash(name, len, 0);
1109 b = hash & (ls->ls_rsbtbl_size - 1);
1111 spin_lock(&ls->ls_rsbtbl[b].lock);
1112 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
1116 error = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
1122 spin_unlock(&ls->ls_rsbtbl[b].lock);
1125 static void toss_rsb(struct kref *kref)
1127 struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
1128 struct dlm_ls *ls = r->res_ls;
1130 DLM_ASSERT(list_empty(&r->res_root_list), dlm_print_rsb(r););
1131 kref_init(&r->res_ref);
1132 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[r->res_bucket].keep);
1133 rsb_insert(r, &ls->ls_rsbtbl[r->res_bucket].toss);
1134 r->res_toss_time = jiffies;
1135 ls->ls_rsbtbl[r->res_bucket].flags |= DLM_RTF_SHRINK;
1136 if (r->res_lvbptr) {
1137 dlm_free_lvb(r->res_lvbptr);
1138 r->res_lvbptr = NULL;
1142 /* See comment for unhold_lkb */
1144 static void unhold_rsb(struct dlm_rsb *r)
1147 rv = kref_put(&r->res_ref, toss_rsb);
1148 DLM_ASSERT(!rv, dlm_dump_rsb(r););
1151 static void kill_rsb(struct kref *kref)
1153 struct dlm_rsb *r = container_of(kref, struct dlm_rsb, res_ref);
1155 /* All work is done after the return from kref_put() so we
1156 can release the write_lock before the remove and free. */
1158 DLM_ASSERT(list_empty(&r->res_lookup), dlm_dump_rsb(r););
1159 DLM_ASSERT(list_empty(&r->res_grantqueue), dlm_dump_rsb(r););
1160 DLM_ASSERT(list_empty(&r->res_convertqueue), dlm_dump_rsb(r););
1161 DLM_ASSERT(list_empty(&r->res_waitqueue), dlm_dump_rsb(r););
1162 DLM_ASSERT(list_empty(&r->res_root_list), dlm_dump_rsb(r););
1163 DLM_ASSERT(list_empty(&r->res_recover_list), dlm_dump_rsb(r););
1166 /* Attaching/detaching lkb's from rsb's is for rsb reference counting.
1167 The rsb must exist as long as any lkb's for it do. */
1169 static void attach_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
1172 lkb->lkb_resource = r;
1175 static void detach_lkb(struct dlm_lkb *lkb)
1177 if (lkb->lkb_resource) {
1178 put_rsb(lkb->lkb_resource);
1179 lkb->lkb_resource = NULL;
1183 static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
1185 struct dlm_lkb *lkb;
1188 lkb = dlm_allocate_lkb(ls);
1192 lkb->lkb_nodeid = -1;
1193 lkb->lkb_grmode = DLM_LOCK_IV;
1194 kref_init(&lkb->lkb_ref);
1195 INIT_LIST_HEAD(&lkb->lkb_ownqueue);
1196 INIT_LIST_HEAD(&lkb->lkb_rsb_lookup);
1197 INIT_LIST_HEAD(&lkb->lkb_time_list);
1198 INIT_LIST_HEAD(&lkb->lkb_cb_list);
1199 mutex_init(&lkb->lkb_cb_mutex);
1200 INIT_WORK(&lkb->lkb_cb_work, dlm_callback_work);
1202 idr_preload(GFP_NOFS);
1203 spin_lock(&ls->ls_lkbidr_spin);
1204 rv = idr_alloc(&ls->ls_lkbidr, lkb, 1, 0, GFP_NOWAIT);
1207 spin_unlock(&ls->ls_lkbidr_spin);
1211 log_error(ls, "create_lkb idr error %d", rv);
1220 static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret)
1222 struct dlm_lkb *lkb;
1224 spin_lock(&ls->ls_lkbidr_spin);
1225 lkb = idr_find(&ls->ls_lkbidr, lkid);
1227 kref_get(&lkb->lkb_ref);
1228 spin_unlock(&ls->ls_lkbidr_spin);
1231 return lkb ? 0 : -ENOENT;
1234 static void kill_lkb(struct kref *kref)
1236 struct dlm_lkb *lkb = container_of(kref, struct dlm_lkb, lkb_ref);
1238 /* All work is done after the return from kref_put() so we
1239 can release the write_lock before the detach_lkb */
1241 DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
1244 /* __put_lkb() is used when an lkb may not have an rsb attached to
1245 it so we need to provide the lockspace explicitly */
1247 static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb)
1249 uint32_t lkid = lkb->lkb_id;
1251 spin_lock(&ls->ls_lkbidr_spin);
1252 if (kref_put(&lkb->lkb_ref, kill_lkb)) {
1253 idr_remove(&ls->ls_lkbidr, lkid);
1254 spin_unlock(&ls->ls_lkbidr_spin);
1258 /* for local/process lkbs, lvbptr points to caller's lksb */
1259 if (lkb->lkb_lvbptr && is_master_copy(lkb))
1260 dlm_free_lvb(lkb->lkb_lvbptr);
1264 spin_unlock(&ls->ls_lkbidr_spin);
1269 int dlm_put_lkb(struct dlm_lkb *lkb)
1273 DLM_ASSERT(lkb->lkb_resource, dlm_print_lkb(lkb););
1274 DLM_ASSERT(lkb->lkb_resource->res_ls, dlm_print_lkb(lkb););
1276 ls = lkb->lkb_resource->res_ls;
1277 return __put_lkb(ls, lkb);
1280 /* This is only called to add a reference when the code already holds
1281 a valid reference to the lkb, so there's no need for locking. */
1283 static inline void hold_lkb(struct dlm_lkb *lkb)
1285 kref_get(&lkb->lkb_ref);
1288 /* This is called when we need to remove a reference and are certain
1289 it's not the last ref. e.g. del_lkb is always called between a
1290 find_lkb/put_lkb and is always the inverse of a previous add_lkb.
1291 put_lkb would work fine, but would involve unnecessary locking */
1293 static inline void unhold_lkb(struct dlm_lkb *lkb)
1296 rv = kref_put(&lkb->lkb_ref, kill_lkb);
1297 DLM_ASSERT(!rv, dlm_print_lkb(lkb););
1300 static void lkb_add_ordered(struct list_head *new, struct list_head *head,
1303 struct dlm_lkb *lkb = NULL;
1305 list_for_each_entry(lkb, head, lkb_statequeue)
1306 if (lkb->lkb_rqmode < mode)
1309 __list_add(new, lkb->lkb_statequeue.prev, &lkb->lkb_statequeue);
1312 /* add/remove lkb to rsb's grant/convert/wait queue */
1314 static void add_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int status)
1316 kref_get(&lkb->lkb_ref);
1318 DLM_ASSERT(!lkb->lkb_status, dlm_print_lkb(lkb););
1320 lkb->lkb_timestamp = ktime_get();
1322 lkb->lkb_status = status;
1325 case DLM_LKSTS_WAITING:
1326 if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
1327 list_add(&lkb->lkb_statequeue, &r->res_waitqueue);
1329 list_add_tail(&lkb->lkb_statequeue, &r->res_waitqueue);
1331 case DLM_LKSTS_GRANTED:
1332 /* convention says granted locks kept in order of grmode */
1333 lkb_add_ordered(&lkb->lkb_statequeue, &r->res_grantqueue,
1336 case DLM_LKSTS_CONVERT:
1337 if (lkb->lkb_exflags & DLM_LKF_HEADQUE)
1338 list_add(&lkb->lkb_statequeue, &r->res_convertqueue);
1340 list_add_tail(&lkb->lkb_statequeue,
1341 &r->res_convertqueue);
1344 DLM_ASSERT(0, dlm_print_lkb(lkb); printk("sts=%d\n", status););
1348 static void del_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb)
1350 lkb->lkb_status = 0;
1351 list_del(&lkb->lkb_statequeue);
1355 static void move_lkb(struct dlm_rsb *r, struct dlm_lkb *lkb, int sts)
1359 add_lkb(r, lkb, sts);
1363 static int msg_reply_type(int mstype)
1366 case DLM_MSG_REQUEST:
1367 return DLM_MSG_REQUEST_REPLY;
1368 case DLM_MSG_CONVERT:
1369 return DLM_MSG_CONVERT_REPLY;
1370 case DLM_MSG_UNLOCK:
1371 return DLM_MSG_UNLOCK_REPLY;
1372 case DLM_MSG_CANCEL:
1373 return DLM_MSG_CANCEL_REPLY;
1374 case DLM_MSG_LOOKUP:
1375 return DLM_MSG_LOOKUP_REPLY;
1380 static int nodeid_warned(int nodeid, int num_nodes, int *warned)
1384 for (i = 0; i < num_nodes; i++) {
1389 if (warned[i] == nodeid)
1395 void dlm_scan_waiters(struct dlm_ls *ls)
1397 struct dlm_lkb *lkb;
1399 s64 debug_maxus = 0;
1400 u32 debug_scanned = 0;
1401 u32 debug_expired = 0;
1405 if (!dlm_config.ci_waitwarn_us)
1408 mutex_lock(&ls->ls_waiters_mutex);
1410 list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
1411 if (!lkb->lkb_wait_time)
1416 us = ktime_to_us(ktime_sub(ktime_get(), lkb->lkb_wait_time));
1418 if (us < dlm_config.ci_waitwarn_us)
1421 lkb->lkb_wait_time = 0;
1424 if (us > debug_maxus)
1428 num_nodes = ls->ls_num_nodes;
1429 warned = kcalloc(num_nodes, sizeof(int), GFP_KERNEL);
1433 if (nodeid_warned(lkb->lkb_wait_nodeid, num_nodes, warned))
1436 log_error(ls, "waitwarn %x %lld %d us check connection to "
1437 "node %d", lkb->lkb_id, (long long)us,
1438 dlm_config.ci_waitwarn_us, lkb->lkb_wait_nodeid);
1440 mutex_unlock(&ls->ls_waiters_mutex);
1444 log_debug(ls, "scan_waiters %u warn %u over %d us max %lld us",
1445 debug_scanned, debug_expired,
1446 dlm_config.ci_waitwarn_us, (long long)debug_maxus);
1449 /* add/remove lkb from global waiters list of lkb's waiting for
1450 a reply from a remote node */
1452 static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid)
1454 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1457 mutex_lock(&ls->ls_waiters_mutex);
1459 if (is_overlap_unlock(lkb) ||
1460 (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL))) {
1465 if (lkb->lkb_wait_type || is_overlap_cancel(lkb)) {
1467 case DLM_MSG_UNLOCK:
1468 lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
1470 case DLM_MSG_CANCEL:
1471 lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
1477 lkb->lkb_wait_count++;
1480 log_debug(ls, "addwait %x cur %d overlap %d count %d f %x",
1481 lkb->lkb_id, lkb->lkb_wait_type, mstype,
1482 lkb->lkb_wait_count, lkb->lkb_flags);
1486 DLM_ASSERT(!lkb->lkb_wait_count,
1488 printk("wait_count %d\n", lkb->lkb_wait_count););
1490 lkb->lkb_wait_count++;
1491 lkb->lkb_wait_type = mstype;
1492 lkb->lkb_wait_time = ktime_get();
1493 lkb->lkb_wait_nodeid = to_nodeid; /* for debugging */
1495 list_add(&lkb->lkb_wait_reply, &ls->ls_waiters);
1498 log_error(ls, "addwait error %x %d flags %x %d %d %s",
1499 lkb->lkb_id, error, lkb->lkb_flags, mstype,
1500 lkb->lkb_wait_type, lkb->lkb_resource->res_name);
1501 mutex_unlock(&ls->ls_waiters_mutex);
1505 /* We clear the RESEND flag because we might be taking an lkb off the waiters
1506 list as part of process_requestqueue (e.g. a lookup that has an optimized
1507 request reply on the requestqueue) between dlm_recover_waiters_pre() which
1508 set RESEND and dlm_recover_waiters_post() */
1510 static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype,
1511 struct dlm_message *ms)
1513 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1514 int overlap_done = 0;
1516 if (is_overlap_unlock(lkb) && (mstype == DLM_MSG_UNLOCK_REPLY)) {
1517 log_debug(ls, "remwait %x unlock_reply overlap", lkb->lkb_id);
1518 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
1523 if (is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL_REPLY)) {
1524 log_debug(ls, "remwait %x cancel_reply overlap", lkb->lkb_id);
1525 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
1530 /* Cancel state was preemptively cleared by a successful convert,
1531 see next comment, nothing to do. */
1533 if ((mstype == DLM_MSG_CANCEL_REPLY) &&
1534 (lkb->lkb_wait_type != DLM_MSG_CANCEL)) {
1535 log_debug(ls, "remwait %x cancel_reply wait_type %d",
1536 lkb->lkb_id, lkb->lkb_wait_type);
1540 /* Remove for the convert reply, and premptively remove for the
1541 cancel reply. A convert has been granted while there's still
1542 an outstanding cancel on it (the cancel is moot and the result
1543 in the cancel reply should be 0). We preempt the cancel reply
1544 because the app gets the convert result and then can follow up
1545 with another op, like convert. This subsequent op would see the
1546 lingering state of the cancel and fail with -EBUSY. */
1548 if ((mstype == DLM_MSG_CONVERT_REPLY) &&
1549 (lkb->lkb_wait_type == DLM_MSG_CONVERT) &&
1550 is_overlap_cancel(lkb) && ms && !ms->m_result) {
1551 log_debug(ls, "remwait %x convert_reply zap overlap_cancel",
1553 lkb->lkb_wait_type = 0;
1554 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
1555 lkb->lkb_wait_count--;
1559 /* N.B. type of reply may not always correspond to type of original
1560 msg due to lookup->request optimization, verify others? */
1562 if (lkb->lkb_wait_type) {
1563 lkb->lkb_wait_type = 0;
1567 log_error(ls, "remwait error %x remote %d %x msg %d flags %x no wait",
1568 lkb->lkb_id, ms ? ms->m_header.h_nodeid : 0, lkb->lkb_remid,
1569 mstype, lkb->lkb_flags);
1573 /* the force-unlock/cancel has completed and we haven't recvd a reply
1574 to the op that was in progress prior to the unlock/cancel; we
1575 give up on any reply to the earlier op. FIXME: not sure when/how
1576 this would happen */
1578 if (overlap_done && lkb->lkb_wait_type) {
1579 log_error(ls, "remwait error %x reply %d wait_type %d overlap",
1580 lkb->lkb_id, mstype, lkb->lkb_wait_type);
1581 lkb->lkb_wait_count--;
1582 lkb->lkb_wait_type = 0;
1585 DLM_ASSERT(lkb->lkb_wait_count, dlm_print_lkb(lkb););
1587 lkb->lkb_flags &= ~DLM_IFL_RESEND;
1588 lkb->lkb_wait_count--;
1589 if (!lkb->lkb_wait_count)
1590 list_del_init(&lkb->lkb_wait_reply);
1595 static int remove_from_waiters(struct dlm_lkb *lkb, int mstype)
1597 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1600 mutex_lock(&ls->ls_waiters_mutex);
1601 error = _remove_from_waiters(lkb, mstype, NULL);
1602 mutex_unlock(&ls->ls_waiters_mutex);
1606 /* Handles situations where we might be processing a "fake" or "stub" reply in
1607 which we can't try to take waiters_mutex again. */
1609 static int remove_from_waiters_ms(struct dlm_lkb *lkb, struct dlm_message *ms)
1611 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1614 if (ms->m_flags != DLM_IFL_STUB_MS)
1615 mutex_lock(&ls->ls_waiters_mutex);
1616 error = _remove_from_waiters(lkb, ms->m_type, ms);
1617 if (ms->m_flags != DLM_IFL_STUB_MS)
1618 mutex_unlock(&ls->ls_waiters_mutex);
1622 /* If there's an rsb for the same resource being removed, ensure
1623 that the remove message is sent before the new lookup message.
1624 It should be rare to need a delay here, but if not, then it may
1625 be worthwhile to add a proper wait mechanism rather than a delay. */
1627 static void wait_pending_remove(struct dlm_rsb *r)
1629 struct dlm_ls *ls = r->res_ls;
1631 spin_lock(&ls->ls_remove_spin);
1632 if (ls->ls_remove_len &&
1633 !rsb_cmp(r, ls->ls_remove_name, ls->ls_remove_len)) {
1634 log_debug(ls, "delay lookup for remove dir %d %s",
1635 r->res_dir_nodeid, r->res_name);
1636 spin_unlock(&ls->ls_remove_spin);
1640 spin_unlock(&ls->ls_remove_spin);
1644 * ls_remove_spin protects ls_remove_name and ls_remove_len which are
1645 * read by other threads in wait_pending_remove. ls_remove_names
1646 * and ls_remove_lens are only used by the scan thread, so they do
1647 * not need protection.
1650 static void shrink_bucket(struct dlm_ls *ls, int b)
1652 struct rb_node *n, *next;
1655 int our_nodeid = dlm_our_nodeid();
1656 int remote_count = 0;
1657 int need_shrink = 0;
1660 memset(&ls->ls_remove_lens, 0, sizeof(int) * DLM_REMOVE_NAMES_MAX);
1662 spin_lock(&ls->ls_rsbtbl[b].lock);
1664 if (!(ls->ls_rsbtbl[b].flags & DLM_RTF_SHRINK)) {
1665 spin_unlock(&ls->ls_rsbtbl[b].lock);
1669 for (n = rb_first(&ls->ls_rsbtbl[b].toss); n; n = next) {
1671 r = rb_entry(n, struct dlm_rsb, res_hashnode);
1673 /* If we're the directory record for this rsb, and
1674 we're not the master of it, then we need to wait
1675 for the master node to send us a dir remove for
1676 before removing the dir record. */
1678 if (!dlm_no_directory(ls) &&
1679 (r->res_master_nodeid != our_nodeid) &&
1680 (dlm_dir_nodeid(r) == our_nodeid)) {
1686 if (!time_after_eq(jiffies, r->res_toss_time +
1687 dlm_config.ci_toss_secs * HZ)) {
1691 if (!dlm_no_directory(ls) &&
1692 (r->res_master_nodeid == our_nodeid) &&
1693 (dlm_dir_nodeid(r) != our_nodeid)) {
1695 /* We're the master of this rsb but we're not
1696 the directory record, so we need to tell the
1697 dir node to remove the dir record. */
1699 ls->ls_remove_lens[remote_count] = r->res_length;
1700 memcpy(ls->ls_remove_names[remote_count], r->res_name,
1701 DLM_RESNAME_MAXLEN);
1704 if (remote_count >= DLM_REMOVE_NAMES_MAX)
1709 if (!kref_put(&r->res_ref, kill_rsb)) {
1710 log_error(ls, "tossed rsb in use %s", r->res_name);
1714 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
1719 ls->ls_rsbtbl[b].flags |= DLM_RTF_SHRINK;
1721 ls->ls_rsbtbl[b].flags &= ~DLM_RTF_SHRINK;
1722 spin_unlock(&ls->ls_rsbtbl[b].lock);
1725 * While searching for rsb's to free, we found some that require
1726 * remote removal. We leave them in place and find them again here
1727 * so there is a very small gap between removing them from the toss
1728 * list and sending the removal. Keeping this gap small is
1729 * important to keep us (the master node) from being out of sync
1730 * with the remote dir node for very long.
1732 * From the time the rsb is removed from toss until just after
1733 * send_remove, the rsb name is saved in ls_remove_name. A new
1734 * lookup checks this to ensure that a new lookup message for the
1735 * same resource name is not sent just before the remove message.
1738 for (i = 0; i < remote_count; i++) {
1739 name = ls->ls_remove_names[i];
1740 len = ls->ls_remove_lens[i];
1742 spin_lock(&ls->ls_rsbtbl[b].lock);
1743 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
1745 spin_unlock(&ls->ls_rsbtbl[b].lock);
1746 log_debug(ls, "remove_name not toss %s", name);
1750 if (r->res_master_nodeid != our_nodeid) {
1751 spin_unlock(&ls->ls_rsbtbl[b].lock);
1752 log_debug(ls, "remove_name master %d dir %d our %d %s",
1753 r->res_master_nodeid, r->res_dir_nodeid,
1758 if (r->res_dir_nodeid == our_nodeid) {
1759 /* should never happen */
1760 spin_unlock(&ls->ls_rsbtbl[b].lock);
1761 log_error(ls, "remove_name dir %d master %d our %d %s",
1762 r->res_dir_nodeid, r->res_master_nodeid,
1767 if (!time_after_eq(jiffies, r->res_toss_time +
1768 dlm_config.ci_toss_secs * HZ)) {
1769 spin_unlock(&ls->ls_rsbtbl[b].lock);
1770 log_debug(ls, "remove_name toss_time %lu now %lu %s",
1771 r->res_toss_time, jiffies, name);
1775 if (!kref_put(&r->res_ref, kill_rsb)) {
1776 spin_unlock(&ls->ls_rsbtbl[b].lock);
1777 log_error(ls, "remove_name in use %s", name);
1781 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
1783 /* block lookup of same name until we've sent remove */
1784 spin_lock(&ls->ls_remove_spin);
1785 ls->ls_remove_len = len;
1786 memcpy(ls->ls_remove_name, name, DLM_RESNAME_MAXLEN);
1787 spin_unlock(&ls->ls_remove_spin);
1788 spin_unlock(&ls->ls_rsbtbl[b].lock);
1792 /* allow lookup of name again */
1793 spin_lock(&ls->ls_remove_spin);
1794 ls->ls_remove_len = 0;
1795 memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN);
1796 spin_unlock(&ls->ls_remove_spin);
1802 void dlm_scan_rsbs(struct dlm_ls *ls)
1806 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
1807 shrink_bucket(ls, i);
1808 if (dlm_locking_stopped(ls))
1814 static void add_timeout(struct dlm_lkb *lkb)
1816 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1818 if (is_master_copy(lkb))
1821 if (test_bit(LSFL_TIMEWARN, &ls->ls_flags) &&
1822 !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
1823 lkb->lkb_flags |= DLM_IFL_WATCH_TIMEWARN;
1826 if (lkb->lkb_exflags & DLM_LKF_TIMEOUT)
1831 DLM_ASSERT(list_empty(&lkb->lkb_time_list), dlm_print_lkb(lkb););
1832 mutex_lock(&ls->ls_timeout_mutex);
1834 list_add_tail(&lkb->lkb_time_list, &ls->ls_timeout);
1835 mutex_unlock(&ls->ls_timeout_mutex);
1838 static void del_timeout(struct dlm_lkb *lkb)
1840 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
1842 mutex_lock(&ls->ls_timeout_mutex);
1843 if (!list_empty(&lkb->lkb_time_list)) {
1844 list_del_init(&lkb->lkb_time_list);
1847 mutex_unlock(&ls->ls_timeout_mutex);
1850 /* FIXME: is it safe to look at lkb_exflags, lkb_flags, lkb_timestamp, and
1851 lkb_lksb_timeout without lock_rsb? Note: we can't lock timeout_mutex
1852 and then lock rsb because of lock ordering in add_timeout. We may need
1853 to specify some special timeout-related bits in the lkb that are just to
1854 be accessed under the timeout_mutex. */
1856 void dlm_scan_timeout(struct dlm_ls *ls)
1859 struct dlm_lkb *lkb;
1860 int do_cancel, do_warn;
1864 if (dlm_locking_stopped(ls))
1869 mutex_lock(&ls->ls_timeout_mutex);
1870 list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list) {
1872 wait_us = ktime_to_us(ktime_sub(ktime_get(),
1873 lkb->lkb_timestamp));
1875 if ((lkb->lkb_exflags & DLM_LKF_TIMEOUT) &&
1876 wait_us >= (lkb->lkb_timeout_cs * 10000))
1879 if ((lkb->lkb_flags & DLM_IFL_WATCH_TIMEWARN) &&
1880 wait_us >= dlm_config.ci_timewarn_cs * 10000)
1883 if (!do_cancel && !do_warn)
1888 mutex_unlock(&ls->ls_timeout_mutex);
1890 if (!do_cancel && !do_warn)
1893 r = lkb->lkb_resource;
1898 /* clear flag so we only warn once */
1899 lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
1900 if (!(lkb->lkb_exflags & DLM_LKF_TIMEOUT))
1902 dlm_timeout_warn(lkb);
1906 log_debug(ls, "timeout cancel %x node %d %s",
1907 lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
1908 lkb->lkb_flags &= ~DLM_IFL_WATCH_TIMEWARN;
1909 lkb->lkb_flags |= DLM_IFL_TIMEOUT_CANCEL;
1911 _cancel_lock(r, lkb);
1920 /* This is only called by dlm_recoverd, and we rely on dlm_ls_stop() stopping
1921 dlm_recoverd before checking/setting ls_recover_begin. */
1923 void dlm_adjust_timeouts(struct dlm_ls *ls)
1925 struct dlm_lkb *lkb;
1926 u64 adj_us = jiffies_to_usecs(jiffies - ls->ls_recover_begin);
1928 ls->ls_recover_begin = 0;
1929 mutex_lock(&ls->ls_timeout_mutex);
1930 list_for_each_entry(lkb, &ls->ls_timeout, lkb_time_list)
1931 lkb->lkb_timestamp = ktime_add_us(lkb->lkb_timestamp, adj_us);
1932 mutex_unlock(&ls->ls_timeout_mutex);
1934 if (!dlm_config.ci_waitwarn_us)
1937 mutex_lock(&ls->ls_waiters_mutex);
1938 list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
1939 if (ktime_to_us(lkb->lkb_wait_time))
1940 lkb->lkb_wait_time = ktime_get();
1942 mutex_unlock(&ls->ls_waiters_mutex);
1945 /* lkb is master or local copy */
1947 static void set_lvb_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
1949 int b, len = r->res_ls->ls_lvblen;
1951 /* b=1 lvb returned to caller
1952 b=0 lvb written to rsb or invalidated
1955 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
1958 if (!lkb->lkb_lvbptr)
1961 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1967 memcpy(lkb->lkb_lvbptr, r->res_lvbptr, len);
1968 lkb->lkb_lvbseq = r->res_lvbseq;
1970 } else if (b == 0) {
1971 if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
1972 rsb_set_flag(r, RSB_VALNOTVALID);
1976 if (!lkb->lkb_lvbptr)
1979 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
1983 r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
1988 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, len);
1990 lkb->lkb_lvbseq = r->res_lvbseq;
1991 rsb_clear_flag(r, RSB_VALNOTVALID);
1994 if (rsb_flag(r, RSB_VALNOTVALID))
1995 lkb->lkb_sbflags |= DLM_SBF_VALNOTVALID;
1998 static void set_lvb_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2000 if (lkb->lkb_grmode < DLM_LOCK_PW)
2003 if (lkb->lkb_exflags & DLM_LKF_IVVALBLK) {
2004 rsb_set_flag(r, RSB_VALNOTVALID);
2008 if (!lkb->lkb_lvbptr)
2011 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
2015 r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
2020 memcpy(r->res_lvbptr, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
2022 rsb_clear_flag(r, RSB_VALNOTVALID);
2025 /* lkb is process copy (pc) */
2027 static void set_lvb_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
2028 struct dlm_message *ms)
2032 if (!lkb->lkb_lvbptr)
2035 if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
2038 b = dlm_lvb_operations[lkb->lkb_grmode + 1][lkb->lkb_rqmode + 1];
2040 int len = receive_extralen(ms);
2041 if (len > r->res_ls->ls_lvblen)
2042 len = r->res_ls->ls_lvblen;
2043 memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
2044 lkb->lkb_lvbseq = ms->m_lvbseq;
2048 /* Manipulate lkb's on rsb's convert/granted/waiting queues
2049 remove_lock -- used for unlock, removes lkb from granted
2050 revert_lock -- used for cancel, moves lkb from convert to granted
2051 grant_lock -- used for request and convert, adds lkb to granted or
2052 moves lkb from convert or waiting to granted
2054 Each of these is used for master or local copy lkb's. There is
2055 also a _pc() variation used to make the corresponding change on
2056 a process copy (pc) lkb. */
2058 static void _remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2061 lkb->lkb_grmode = DLM_LOCK_IV;
2062 /* this unhold undoes the original ref from create_lkb()
2063 so this leads to the lkb being freed */
2067 static void remove_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2069 set_lvb_unlock(r, lkb);
2070 _remove_lock(r, lkb);
2073 static void remove_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
2075 _remove_lock(r, lkb);
2078 /* returns: 0 did nothing
2079 1 moved lock to granted
2082 static int revert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2086 lkb->lkb_rqmode = DLM_LOCK_IV;
2088 switch (lkb->lkb_status) {
2089 case DLM_LKSTS_GRANTED:
2091 case DLM_LKSTS_CONVERT:
2092 move_lkb(r, lkb, DLM_LKSTS_GRANTED);
2095 case DLM_LKSTS_WAITING:
2097 lkb->lkb_grmode = DLM_LOCK_IV;
2098 /* this unhold undoes the original ref from create_lkb()
2099 so this leads to the lkb being freed */
2104 log_print("invalid status for revert %d", lkb->lkb_status);
2109 static int revert_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb)
2111 return revert_lock(r, lkb);
2114 static void _grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2116 if (lkb->lkb_grmode != lkb->lkb_rqmode) {
2117 lkb->lkb_grmode = lkb->lkb_rqmode;
2118 if (lkb->lkb_status)
2119 move_lkb(r, lkb, DLM_LKSTS_GRANTED);
2121 add_lkb(r, lkb, DLM_LKSTS_GRANTED);
2124 lkb->lkb_rqmode = DLM_LOCK_IV;
2125 lkb->lkb_highbast = 0;
2128 static void grant_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
2130 set_lvb_lock(r, lkb);
2131 _grant_lock(r, lkb);
2134 static void grant_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
2135 struct dlm_message *ms)
2137 set_lvb_lock_pc(r, lkb, ms);
2138 _grant_lock(r, lkb);
2141 /* called by grant_pending_locks() which means an async grant message must
2142 be sent to the requesting node in addition to granting the lock if the
2143 lkb belongs to a remote node. */
2145 static void grant_lock_pending(struct dlm_rsb *r, struct dlm_lkb *lkb)
2148 if (is_master_copy(lkb))
2151 queue_cast(r, lkb, 0);
2154 /* The special CONVDEADLK, ALTPR and ALTCW flags allow the master to
2155 change the granted/requested modes. We're munging things accordingly in
2157 CONVDEADLK: our grmode may have been forced down to NL to resolve a
2159 ALTPR/ALTCW: our rqmode may have been changed to PR or CW to become
2160 compatible with other granted locks */
2162 static void munge_demoted(struct dlm_lkb *lkb)
2164 if (lkb->lkb_rqmode == DLM_LOCK_IV || lkb->lkb_grmode == DLM_LOCK_IV) {
2165 log_print("munge_demoted %x invalid modes gr %d rq %d",
2166 lkb->lkb_id, lkb->lkb_grmode, lkb->lkb_rqmode);
2170 lkb->lkb_grmode = DLM_LOCK_NL;
2173 static void munge_altmode(struct dlm_lkb *lkb, struct dlm_message *ms)
2175 if (ms->m_type != DLM_MSG_REQUEST_REPLY &&
2176 ms->m_type != DLM_MSG_GRANT) {
2177 log_print("munge_altmode %x invalid reply type %d",
2178 lkb->lkb_id, ms->m_type);
2182 if (lkb->lkb_exflags & DLM_LKF_ALTPR)
2183 lkb->lkb_rqmode = DLM_LOCK_PR;
2184 else if (lkb->lkb_exflags & DLM_LKF_ALTCW)
2185 lkb->lkb_rqmode = DLM_LOCK_CW;
2187 log_print("munge_altmode invalid exflags %x", lkb->lkb_exflags);
2192 static inline int first_in_list(struct dlm_lkb *lkb, struct list_head *head)
2194 struct dlm_lkb *first = list_entry(head->next, struct dlm_lkb,
2196 if (lkb->lkb_id == first->lkb_id)
2202 /* Check if the given lkb conflicts with another lkb on the queue. */
2204 static int queue_conflict(struct list_head *head, struct dlm_lkb *lkb)
2206 struct dlm_lkb *this;
2208 list_for_each_entry(this, head, lkb_statequeue) {
2211 if (!modes_compat(this, lkb))
2218 * "A conversion deadlock arises with a pair of lock requests in the converting
2219 * queue for one resource. The granted mode of each lock blocks the requested
2220 * mode of the other lock."
2222 * Part 2: if the granted mode of lkb is preventing an earlier lkb in the
2223 * convert queue from being granted, then deadlk/demote lkb.
2226 * Granted Queue: empty
2227 * Convert Queue: NL->EX (first lock)
2228 * PR->EX (second lock)
2230 * The first lock can't be granted because of the granted mode of the second
2231 * lock and the second lock can't be granted because it's not first in the
2232 * list. We either cancel lkb's conversion (PR->EX) and return EDEADLK, or we
2233 * demote the granted mode of lkb (from PR to NL) if it has the CONVDEADLK
2234 * flag set and return DEMOTED in the lksb flags.
2236 * Originally, this function detected conv-deadlk in a more limited scope:
2237 * - if !modes_compat(lkb1, lkb2) && !modes_compat(lkb2, lkb1), or
2238 * - if lkb1 was the first entry in the queue (not just earlier), and was
2239 * blocked by the granted mode of lkb2, and there was nothing on the
2240 * granted queue preventing lkb1 from being granted immediately, i.e.
2241 * lkb2 was the only thing preventing lkb1 from being granted.
2243 * That second condition meant we'd only say there was conv-deadlk if
2244 * resolving it (by demotion) would lead to the first lock on the convert
2245 * queue being granted right away. It allowed conversion deadlocks to exist
2246 * between locks on the convert queue while they couldn't be granted anyway.
2248 * Now, we detect and take action on conversion deadlocks immediately when
2249 * they're created, even if they may not be immediately consequential. If
2250 * lkb1 exists anywhere in the convert queue and lkb2 comes in with a granted
2251 * mode that would prevent lkb1's conversion from being granted, we do a
2252 * deadlk/demote on lkb2 right away and don't let it onto the convert queue.
2253 * I think this means that the lkb_is_ahead condition below should always
2254 * be zero, i.e. there will never be conv-deadlk between two locks that are
2255 * both already on the convert queue.
2258 static int conversion_deadlock_detect(struct dlm_rsb *r, struct dlm_lkb *lkb2)
2260 struct dlm_lkb *lkb1;
2261 int lkb_is_ahead = 0;
2263 list_for_each_entry(lkb1, &r->res_convertqueue, lkb_statequeue) {
2269 if (!lkb_is_ahead) {
2270 if (!modes_compat(lkb2, lkb1))
2273 if (!modes_compat(lkb2, lkb1) &&
2274 !modes_compat(lkb1, lkb2))
2282 * Return 1 if the lock can be granted, 0 otherwise.
2283 * Also detect and resolve conversion deadlocks.
2285 * lkb is the lock to be granted
2287 * now is 1 if the function is being called in the context of the
2288 * immediate request, it is 0 if called later, after the lock has been
2291 * recover is 1 if dlm_recover_grant() is trying to grant conversions
2294 * References are from chapter 6 of "VAXcluster Principles" by Roy Davis
2297 static int _can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
2300 int8_t conv = (lkb->lkb_grmode != DLM_LOCK_IV);
2303 * 6-10: Version 5.4 introduced an option to address the phenomenon of
2304 * a new request for a NL mode lock being blocked.
2306 * 6-11: If the optional EXPEDITE flag is used with the new NL mode
2307 * request, then it would be granted. In essence, the use of this flag
2308 * tells the Lock Manager to expedite theis request by not considering
2309 * what may be in the CONVERTING or WAITING queues... As of this
2310 * writing, the EXPEDITE flag can be used only with new requests for NL
2311 * mode locks. This flag is not valid for conversion requests.
2313 * A shortcut. Earlier checks return an error if EXPEDITE is used in a
2314 * conversion or used with a non-NL requested mode. We also know an
2315 * EXPEDITE request is always granted immediately, so now must always
2316 * be 1. The full condition to grant an expedite request: (now &&
2317 * !conv && lkb->rqmode == DLM_LOCK_NL && (flags & EXPEDITE)) can
2318 * therefore be shortened to just checking the flag.
2321 if (lkb->lkb_exflags & DLM_LKF_EXPEDITE)
2325 * A shortcut. Without this, !queue_conflict(grantqueue, lkb) would be
2326 * added to the remaining conditions.
2329 if (queue_conflict(&r->res_grantqueue, lkb))
2333 * 6-3: By default, a conversion request is immediately granted if the
2334 * requested mode is compatible with the modes of all other granted
2338 if (queue_conflict(&r->res_convertqueue, lkb))
2342 * The RECOVER_GRANT flag means dlm_recover_grant() is granting
2343 * locks for a recovered rsb, on which lkb's have been rebuilt.
2344 * The lkb's may have been rebuilt on the queues in a different
2345 * order than they were in on the previous master. So, granting
2346 * queued conversions in order after recovery doesn't make sense
2347 * since the order hasn't been preserved anyway. The new order
2348 * could also have created a new "in place" conversion deadlock.
2349 * (e.g. old, failed master held granted EX, with PR->EX, NL->EX.
2350 * After recovery, there would be no granted locks, and possibly
2351 * NL->EX, PR->EX, an in-place conversion deadlock.) So, after
2352 * recovery, grant conversions without considering order.
2355 if (conv && recover)
2359 * 6-5: But the default algorithm for deciding whether to grant or
2360 * queue conversion requests does not by itself guarantee that such
2361 * requests are serviced on a "first come first serve" basis. This, in
2362 * turn, can lead to a phenomenon known as "indefinate postponement".
2364 * 6-7: This issue is dealt with by using the optional QUECVT flag with
2365 * the system service employed to request a lock conversion. This flag
2366 * forces certain conversion requests to be queued, even if they are
2367 * compatible with the granted modes of other locks on the same
2368 * resource. Thus, the use of this flag results in conversion requests
2369 * being ordered on a "first come first servce" basis.
2371 * DCT: This condition is all about new conversions being able to occur
2372 * "in place" while the lock remains on the granted queue (assuming
2373 * nothing else conflicts.) IOW if QUECVT isn't set, a conversion
2374 * doesn't _have_ to go onto the convert queue where it's processed in
2375 * order. The "now" variable is necessary to distinguish converts
2376 * being received and processed for the first time now, because once a
2377 * convert is moved to the conversion queue the condition below applies
2378 * requiring fifo granting.
2381 if (now && conv && !(lkb->lkb_exflags & DLM_LKF_QUECVT))
2385 * Even if the convert is compat with all granted locks,
2386 * QUECVT forces it behind other locks on the convert queue.
2389 if (now && conv && (lkb->lkb_exflags & DLM_LKF_QUECVT)) {
2390 if (list_empty(&r->res_convertqueue))
2397 * The NOORDER flag is set to avoid the standard vms rules on grant
2401 if (lkb->lkb_exflags & DLM_LKF_NOORDER)
2405 * 6-3: Once in that queue [CONVERTING], a conversion request cannot be
2406 * granted until all other conversion requests ahead of it are granted
2410 if (!now && conv && first_in_list(lkb, &r->res_convertqueue))
2414 * 6-4: By default, a new request is immediately granted only if all
2415 * three of the following conditions are satisfied when the request is
2417 * - The queue of ungranted conversion requests for the resource is
2419 * - The queue of ungranted new requests for the resource is empty.
2420 * - The mode of the new request is compatible with the most
2421 * restrictive mode of all granted locks on the resource.
2424 if (now && !conv && list_empty(&r->res_convertqueue) &&
2425 list_empty(&r->res_waitqueue))
2429 * 6-4: Once a lock request is in the queue of ungranted new requests,
2430 * it cannot be granted until the queue of ungranted conversion
2431 * requests is empty, all ungranted new requests ahead of it are
2432 * granted and/or canceled, and it is compatible with the granted mode
2433 * of the most restrictive lock granted on the resource.
2436 if (!now && !conv && list_empty(&r->res_convertqueue) &&
2437 first_in_list(lkb, &r->res_waitqueue))
2443 static int can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now,
2444 int recover, int *err)
2447 int8_t alt = 0, rqmode = lkb->lkb_rqmode;
2448 int8_t is_convert = (lkb->lkb_grmode != DLM_LOCK_IV);
2453 rv = _can_be_granted(r, lkb, now, recover);
2458 * The CONVDEADLK flag is non-standard and tells the dlm to resolve
2459 * conversion deadlocks by demoting grmode to NL, otherwise the dlm
2460 * cancels one of the locks.
2463 if (is_convert && can_be_queued(lkb) &&
2464 conversion_deadlock_detect(r, lkb)) {
2465 if (lkb->lkb_exflags & DLM_LKF_CONVDEADLK) {
2466 lkb->lkb_grmode = DLM_LOCK_NL;
2467 lkb->lkb_sbflags |= DLM_SBF_DEMOTED;
2471 log_print("can_be_granted deadlock %x now %d",
2479 * The ALTPR and ALTCW flags are non-standard and tell the dlm to try
2480 * to grant a request in a mode other than the normal rqmode. It's a
2481 * simple way to provide a big optimization to applications that can
2485 if (rqmode != DLM_LOCK_PR && (lkb->lkb_exflags & DLM_LKF_ALTPR))
2487 else if (rqmode != DLM_LOCK_CW && (lkb->lkb_exflags & DLM_LKF_ALTCW))
2491 lkb->lkb_rqmode = alt;
2492 rv = _can_be_granted(r, lkb, now, 0);
2494 lkb->lkb_sbflags |= DLM_SBF_ALTMODE;
2496 lkb->lkb_rqmode = rqmode;
2502 /* Returns the highest requested mode of all blocked conversions; sets
2503 cw if there's a blocked conversion to DLM_LOCK_CW. */
2505 static int grant_pending_convert(struct dlm_rsb *r, int high, int *cw,
2506 unsigned int *count)
2508 struct dlm_lkb *lkb, *s;
2509 int recover = rsb_flag(r, RSB_RECOVER_GRANT);
2510 int hi, demoted, quit, grant_restart, demote_restart;
2519 list_for_each_entry_safe(lkb, s, &r->res_convertqueue, lkb_statequeue) {
2520 demoted = is_demoted(lkb);
2523 if (can_be_granted(r, lkb, 0, recover, &deadlk)) {
2524 grant_lock_pending(r, lkb);
2531 if (!demoted && is_demoted(lkb)) {
2532 log_print("WARN: pending demoted %x node %d %s",
2533 lkb->lkb_id, lkb->lkb_nodeid, r->res_name);
2540 * If DLM_LKB_NODLKWT flag is set and conversion
2541 * deadlock is detected, we request blocking AST and
2542 * down (or cancel) conversion.
2544 if (lkb->lkb_exflags & DLM_LKF_NODLCKWT) {
2545 if (lkb->lkb_highbast < lkb->lkb_rqmode) {
2546 queue_bast(r, lkb, lkb->lkb_rqmode);
2547 lkb->lkb_highbast = lkb->lkb_rqmode;
2550 log_print("WARN: pending deadlock %x node %d %s",
2551 lkb->lkb_id, lkb->lkb_nodeid,
2558 hi = max_t(int, lkb->lkb_rqmode, hi);
2560 if (cw && lkb->lkb_rqmode == DLM_LOCK_CW)
2566 if (demote_restart && !quit) {
2571 return max_t(int, high, hi);
2574 static int grant_pending_wait(struct dlm_rsb *r, int high, int *cw,
2575 unsigned int *count)
2577 struct dlm_lkb *lkb, *s;
2579 list_for_each_entry_safe(lkb, s, &r->res_waitqueue, lkb_statequeue) {
2580 if (can_be_granted(r, lkb, 0, 0, NULL)) {
2581 grant_lock_pending(r, lkb);
2585 high = max_t(int, lkb->lkb_rqmode, high);
2586 if (lkb->lkb_rqmode == DLM_LOCK_CW)
2594 /* cw of 1 means there's a lock with a rqmode of DLM_LOCK_CW that's blocked
2595 on either the convert or waiting queue.
2596 high is the largest rqmode of all locks blocked on the convert or
2599 static int lock_requires_bast(struct dlm_lkb *gr, int high, int cw)
2601 if (gr->lkb_grmode == DLM_LOCK_PR && cw) {
2602 if (gr->lkb_highbast < DLM_LOCK_EX)
2607 if (gr->lkb_highbast < high &&
2608 !__dlm_compat_matrix[gr->lkb_grmode+1][high+1])
2613 static void grant_pending_locks(struct dlm_rsb *r, unsigned int *count)
2615 struct dlm_lkb *lkb, *s;
2616 int high = DLM_LOCK_IV;
2619 if (!is_master(r)) {
2620 log_print("grant_pending_locks r nodeid %d", r->res_nodeid);
2625 high = grant_pending_convert(r, high, &cw, count);
2626 high = grant_pending_wait(r, high, &cw, count);
2628 if (high == DLM_LOCK_IV)
2632 * If there are locks left on the wait/convert queue then send blocking
2633 * ASTs to granted locks based on the largest requested mode (high)
2637 list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) {
2638 if (lkb->lkb_bastfn && lock_requires_bast(lkb, high, cw)) {
2639 if (cw && high == DLM_LOCK_PR &&
2640 lkb->lkb_grmode == DLM_LOCK_PR)
2641 queue_bast(r, lkb, DLM_LOCK_CW);
2643 queue_bast(r, lkb, high);
2644 lkb->lkb_highbast = high;
2649 static int modes_require_bast(struct dlm_lkb *gr, struct dlm_lkb *rq)
2651 if ((gr->lkb_grmode == DLM_LOCK_PR && rq->lkb_rqmode == DLM_LOCK_CW) ||
2652 (gr->lkb_grmode == DLM_LOCK_CW && rq->lkb_rqmode == DLM_LOCK_PR)) {
2653 if (gr->lkb_highbast < DLM_LOCK_EX)
2658 if (gr->lkb_highbast < rq->lkb_rqmode && !modes_compat(gr, rq))
2663 static void send_bast_queue(struct dlm_rsb *r, struct list_head *head,
2664 struct dlm_lkb *lkb)
2668 list_for_each_entry(gr, head, lkb_statequeue) {
2669 /* skip self when sending basts to convertqueue */
2672 if (gr->lkb_bastfn && modes_require_bast(gr, lkb)) {
2673 queue_bast(r, gr, lkb->lkb_rqmode);
2674 gr->lkb_highbast = lkb->lkb_rqmode;
2679 static void send_blocking_asts(struct dlm_rsb *r, struct dlm_lkb *lkb)
2681 send_bast_queue(r, &r->res_grantqueue, lkb);
2684 static void send_blocking_asts_all(struct dlm_rsb *r, struct dlm_lkb *lkb)
2686 send_bast_queue(r, &r->res_grantqueue, lkb);
2687 send_bast_queue(r, &r->res_convertqueue, lkb);
2690 /* set_master(r, lkb) -- set the master nodeid of a resource
2692 The purpose of this function is to set the nodeid field in the given
2693 lkb using the nodeid field in the given rsb. If the rsb's nodeid is
2694 known, it can just be copied to the lkb and the function will return
2695 0. If the rsb's nodeid is _not_ known, it needs to be looked up
2696 before it can be copied to the lkb.
2698 When the rsb nodeid is being looked up remotely, the initial lkb
2699 causing the lookup is kept on the ls_waiters list waiting for the
2700 lookup reply. Other lkb's waiting for the same rsb lookup are kept
2701 on the rsb's res_lookup list until the master is verified.
2704 0: nodeid is set in rsb/lkb and the caller should go ahead and use it
2705 1: the rsb master is not available and the lkb has been placed on
2709 static int set_master(struct dlm_rsb *r, struct dlm_lkb *lkb)
2711 int our_nodeid = dlm_our_nodeid();
2713 if (rsb_flag(r, RSB_MASTER_UNCERTAIN)) {
2714 rsb_clear_flag(r, RSB_MASTER_UNCERTAIN);
2715 r->res_first_lkid = lkb->lkb_id;
2716 lkb->lkb_nodeid = r->res_nodeid;
2720 if (r->res_first_lkid && r->res_first_lkid != lkb->lkb_id) {
2721 list_add_tail(&lkb->lkb_rsb_lookup, &r->res_lookup);
2725 if (r->res_master_nodeid == our_nodeid) {
2726 lkb->lkb_nodeid = 0;
2730 if (r->res_master_nodeid) {
2731 lkb->lkb_nodeid = r->res_master_nodeid;
2735 if (dlm_dir_nodeid(r) == our_nodeid) {
2736 /* This is a somewhat unusual case; find_rsb will usually
2737 have set res_master_nodeid when dir nodeid is local, but
2738 there are cases where we become the dir node after we've
2739 past find_rsb and go through _request_lock again.
2740 confirm_master() or process_lookup_list() needs to be
2741 called after this. */
2742 log_debug(r->res_ls, "set_master %x self master %d dir %d %s",
2743 lkb->lkb_id, r->res_master_nodeid, r->res_dir_nodeid,
2745 r->res_master_nodeid = our_nodeid;
2747 lkb->lkb_nodeid = 0;
2751 wait_pending_remove(r);
2753 r->res_first_lkid = lkb->lkb_id;
2754 send_lookup(r, lkb);
2758 static void process_lookup_list(struct dlm_rsb *r)
2760 struct dlm_lkb *lkb, *safe;
2762 list_for_each_entry_safe(lkb, safe, &r->res_lookup, lkb_rsb_lookup) {
2763 list_del_init(&lkb->lkb_rsb_lookup);
2764 _request_lock(r, lkb);
2769 /* confirm_master -- confirm (or deny) an rsb's master nodeid */
2771 static void confirm_master(struct dlm_rsb *r, int error)
2773 struct dlm_lkb *lkb;
2775 if (!r->res_first_lkid)
2781 r->res_first_lkid = 0;
2782 process_lookup_list(r);
2788 /* the remote request failed and won't be retried (it was
2789 a NOQUEUE, or has been canceled/unlocked); make a waiting
2790 lkb the first_lkid */
2792 r->res_first_lkid = 0;
2794 if (!list_empty(&r->res_lookup)) {
2795 lkb = list_entry(r->res_lookup.next, struct dlm_lkb,
2797 list_del_init(&lkb->lkb_rsb_lookup);
2798 r->res_first_lkid = lkb->lkb_id;
2799 _request_lock(r, lkb);
2804 log_error(r->res_ls, "confirm_master unknown error %d", error);
2808 static int set_lock_args(int mode, struct dlm_lksb *lksb, uint32_t flags,
2809 int namelen, unsigned long timeout_cs,
2810 void (*ast) (void *astparam),
2812 void (*bast) (void *astparam, int mode),
2813 struct dlm_args *args)
2817 /* check for invalid arg usage */
2819 if (mode < 0 || mode > DLM_LOCK_EX)
2822 if (!(flags & DLM_LKF_CONVERT) && (namelen > DLM_RESNAME_MAXLEN))
2825 if (flags & DLM_LKF_CANCEL)
2828 if (flags & DLM_LKF_QUECVT && !(flags & DLM_LKF_CONVERT))
2831 if (flags & DLM_LKF_CONVDEADLK && !(flags & DLM_LKF_CONVERT))
2834 if (flags & DLM_LKF_CONVDEADLK && flags & DLM_LKF_NOQUEUE)
2837 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_CONVERT)
2840 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_QUECVT)
2843 if (flags & DLM_LKF_EXPEDITE && flags & DLM_LKF_NOQUEUE)
2846 if (flags & DLM_LKF_EXPEDITE && mode != DLM_LOCK_NL)
2852 if (flags & DLM_LKF_VALBLK && !lksb->sb_lvbptr)
2855 if (flags & DLM_LKF_CONVERT && !lksb->sb_lkid)
2858 /* these args will be copied to the lkb in validate_lock_args,
2859 it cannot be done now because when converting locks, fields in
2860 an active lkb cannot be modified before locking the rsb */
2862 args->flags = flags;
2864 args->astparam = astparam;
2865 args->bastfn = bast;
2866 args->timeout = timeout_cs;
2874 static int set_unlock_args(uint32_t flags, void *astarg, struct dlm_args *args)
2876 if (flags & ~(DLM_LKF_CANCEL | DLM_LKF_VALBLK | DLM_LKF_IVVALBLK |
2877 DLM_LKF_FORCEUNLOCK))
2880 if (flags & DLM_LKF_CANCEL && flags & DLM_LKF_FORCEUNLOCK)
2883 args->flags = flags;
2884 args->astparam = astarg;
2888 static int validate_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
2889 struct dlm_args *args)
2893 if (args->flags & DLM_LKF_CONVERT) {
2894 if (lkb->lkb_flags & DLM_IFL_MSTCPY)
2897 if (args->flags & DLM_LKF_QUECVT &&
2898 !__quecvt_compat_matrix[lkb->lkb_grmode+1][args->mode+1])
2902 if (lkb->lkb_status != DLM_LKSTS_GRANTED)
2905 if (lkb->lkb_wait_type)
2908 if (is_overlap(lkb))
2912 lkb->lkb_exflags = args->flags;
2913 lkb->lkb_sbflags = 0;
2914 lkb->lkb_astfn = args->astfn;
2915 lkb->lkb_astparam = args->astparam;
2916 lkb->lkb_bastfn = args->bastfn;
2917 lkb->lkb_rqmode = args->mode;
2918 lkb->lkb_lksb = args->lksb;
2919 lkb->lkb_lvbptr = args->lksb->sb_lvbptr;
2920 lkb->lkb_ownpid = (int) current->pid;
2921 lkb->lkb_timeout_cs = args->timeout;
2925 log_debug(ls, "validate_lock_args %d %x %x %x %d %d %s",
2926 rv, lkb->lkb_id, lkb->lkb_flags, args->flags,
2927 lkb->lkb_status, lkb->lkb_wait_type,
2928 lkb->lkb_resource->res_name);
2932 /* when dlm_unlock() sees -EBUSY with CANCEL/FORCEUNLOCK it returns 0
2935 /* note: it's valid for lkb_nodeid/res_nodeid to be -1 when we get here
2936 because there may be a lookup in progress and it's valid to do
2937 cancel/unlockf on it */
2939 static int validate_unlock_args(struct dlm_lkb *lkb, struct dlm_args *args)
2941 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
2944 if (lkb->lkb_flags & DLM_IFL_MSTCPY) {
2945 log_error(ls, "unlock on MSTCPY %x", lkb->lkb_id);
2950 /* an lkb may still exist even though the lock is EOL'ed due to a
2951 cancel, unlock or failed noqueue request; an app can't use these
2952 locks; return same error as if the lkid had not been found at all */
2954 if (lkb->lkb_flags & DLM_IFL_ENDOFLIFE) {
2955 log_debug(ls, "unlock on ENDOFLIFE %x", lkb->lkb_id);
2960 /* an lkb may be waiting for an rsb lookup to complete where the
2961 lookup was initiated by another lock */
2963 if (!list_empty(&lkb->lkb_rsb_lookup)) {
2964 if (args->flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)) {
2965 log_debug(ls, "unlock on rsb_lookup %x", lkb->lkb_id);
2966 list_del_init(&lkb->lkb_rsb_lookup);
2967 queue_cast(lkb->lkb_resource, lkb,
2968 args->flags & DLM_LKF_CANCEL ?
2969 -DLM_ECANCEL : -DLM_EUNLOCK);
2970 unhold_lkb(lkb); /* undoes create_lkb() */
2972 /* caller changes -EBUSY to 0 for CANCEL and FORCEUNLOCK */
2977 /* cancel not allowed with another cancel/unlock in progress */
2979 if (args->flags & DLM_LKF_CANCEL) {
2980 if (lkb->lkb_exflags & DLM_LKF_CANCEL)
2983 if (is_overlap(lkb))
2986 /* don't let scand try to do a cancel */
2989 if (lkb->lkb_flags & DLM_IFL_RESEND) {
2990 lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
2995 /* there's nothing to cancel */
2996 if (lkb->lkb_status == DLM_LKSTS_GRANTED &&
2997 !lkb->lkb_wait_type) {
3002 switch (lkb->lkb_wait_type) {
3003 case DLM_MSG_LOOKUP:
3004 case DLM_MSG_REQUEST:
3005 lkb->lkb_flags |= DLM_IFL_OVERLAP_CANCEL;
3008 case DLM_MSG_UNLOCK:
3009 case DLM_MSG_CANCEL:
3012 /* add_to_waiters() will set OVERLAP_CANCEL */
3016 /* do we need to allow a force-unlock if there's a normal unlock
3017 already in progress? in what conditions could the normal unlock
3018 fail such that we'd want to send a force-unlock to be sure? */
3020 if (args->flags & DLM_LKF_FORCEUNLOCK) {
3021 if (lkb->lkb_exflags & DLM_LKF_FORCEUNLOCK)
3024 if (is_overlap_unlock(lkb))
3027 /* don't let scand try to do a cancel */
3030 if (lkb->lkb_flags & DLM_IFL_RESEND) {
3031 lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
3036 switch (lkb->lkb_wait_type) {
3037 case DLM_MSG_LOOKUP:
3038 case DLM_MSG_REQUEST:
3039 lkb->lkb_flags |= DLM_IFL_OVERLAP_UNLOCK;
3042 case DLM_MSG_UNLOCK:
3045 /* add_to_waiters() will set OVERLAP_UNLOCK */
3049 /* normal unlock not allowed if there's any op in progress */
3051 if (lkb->lkb_wait_type || lkb->lkb_wait_count)
3055 /* an overlapping op shouldn't blow away exflags from other op */
3056 lkb->lkb_exflags |= args->flags;
3057 lkb->lkb_sbflags = 0;
3058 lkb->lkb_astparam = args->astparam;
3062 log_debug(ls, "validate_unlock_args %d %x %x %x %x %d %s", rv,
3063 lkb->lkb_id, lkb->lkb_flags, lkb->lkb_exflags,
3064 args->flags, lkb->lkb_wait_type,
3065 lkb->lkb_resource->res_name);
3070 * Four stage 4 varieties:
3071 * do_request(), do_convert(), do_unlock(), do_cancel()
3072 * These are called on the master node for the given lock and
3073 * from the central locking logic.
3076 static int do_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
3080 if (can_be_granted(r, lkb, 1, 0, NULL)) {
3082 queue_cast(r, lkb, 0);
3086 if (can_be_queued(lkb)) {
3087 error = -EINPROGRESS;
3088 add_lkb(r, lkb, DLM_LKSTS_WAITING);
3094 queue_cast(r, lkb, -EAGAIN);
3099 static void do_request_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3104 if (force_blocking_asts(lkb))
3105 send_blocking_asts_all(r, lkb);
3108 send_blocking_asts(r, lkb);
3113 static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
3118 /* changing an existing lock may allow others to be granted */
3120 if (can_be_granted(r, lkb, 1, 0, &deadlk)) {
3122 queue_cast(r, lkb, 0);
3126 /* can_be_granted() detected that this lock would block in a conversion
3127 deadlock, so we leave it on the granted queue and return EDEADLK in
3128 the ast for the convert. */
3130 if (deadlk && !(lkb->lkb_exflags & DLM_LKF_NODLCKWT)) {
3131 /* it's left on the granted queue */
3132 revert_lock(r, lkb);
3133 queue_cast(r, lkb, -EDEADLK);
3138 /* is_demoted() means the can_be_granted() above set the grmode
3139 to NL, and left us on the granted queue. This auto-demotion
3140 (due to CONVDEADLK) might mean other locks, and/or this lock, are
3141 now grantable. We have to try to grant other converting locks
3142 before we try again to grant this one. */
3144 if (is_demoted(lkb)) {
3145 grant_pending_convert(r, DLM_LOCK_IV, NULL, NULL);
3146 if (_can_be_granted(r, lkb, 1, 0)) {
3148 queue_cast(r, lkb, 0);
3151 /* else fall through and move to convert queue */
3154 if (can_be_queued(lkb)) {
3155 error = -EINPROGRESS;
3157 add_lkb(r, lkb, DLM_LKSTS_CONVERT);
3163 queue_cast(r, lkb, -EAGAIN);
3168 static void do_convert_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3173 grant_pending_locks(r, NULL);
3174 /* grant_pending_locks also sends basts */
3177 if (force_blocking_asts(lkb))
3178 send_blocking_asts_all(r, lkb);
3181 send_blocking_asts(r, lkb);
3186 static int do_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3188 remove_lock(r, lkb);
3189 queue_cast(r, lkb, -DLM_EUNLOCK);
3190 return -DLM_EUNLOCK;
3193 static void do_unlock_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3196 grant_pending_locks(r, NULL);
3199 /* returns: 0 did nothing, -DLM_ECANCEL canceled lock */
3201 static int do_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
3205 error = revert_lock(r, lkb);
3207 queue_cast(r, lkb, -DLM_ECANCEL);
3208 return -DLM_ECANCEL;
3213 static void do_cancel_effects(struct dlm_rsb *r, struct dlm_lkb *lkb,
3217 grant_pending_locks(r, NULL);
3221 * Four stage 3 varieties:
3222 * _request_lock(), _convert_lock(), _unlock_lock(), _cancel_lock()
3225 /* add a new lkb to a possibly new rsb, called by requesting process */
3227 static int _request_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3231 /* set_master: sets lkb nodeid from r */
3233 error = set_master(r, lkb);
3242 /* receive_request() calls do_request() on remote node */
3243 error = send_request(r, lkb);
3245 error = do_request(r, lkb);
3246 /* for remote locks the request_reply is sent
3247 between do_request and do_request_effects */
3248 do_request_effects(r, lkb, error);
3254 /* change some property of an existing lkb, e.g. mode */
3256 static int _convert_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3261 /* receive_convert() calls do_convert() on remote node */
3262 error = send_convert(r, lkb);
3264 error = do_convert(r, lkb);
3265 /* for remote locks the convert_reply is sent
3266 between do_convert and do_convert_effects */
3267 do_convert_effects(r, lkb, error);
3273 /* remove an existing lkb from the granted queue */
3275 static int _unlock_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3280 /* receive_unlock() calls do_unlock() on remote node */
3281 error = send_unlock(r, lkb);
3283 error = do_unlock(r, lkb);
3284 /* for remote locks the unlock_reply is sent
3285 between do_unlock and do_unlock_effects */
3286 do_unlock_effects(r, lkb, error);
3292 /* remove an existing lkb from the convert or wait queue */
3294 static int _cancel_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3299 /* receive_cancel() calls do_cancel() on remote node */
3300 error = send_cancel(r, lkb);
3302 error = do_cancel(r, lkb);
3303 /* for remote locks the cancel_reply is sent
3304 between do_cancel and do_cancel_effects */
3305 do_cancel_effects(r, lkb, error);
3312 * Four stage 2 varieties:
3313 * request_lock(), convert_lock(), unlock_lock(), cancel_lock()
3316 static int request_lock(struct dlm_ls *ls, struct dlm_lkb *lkb, char *name,
3317 int len, struct dlm_args *args)
3322 error = validate_lock_args(ls, lkb, args);
3326 error = find_rsb(ls, name, len, 0, R_REQUEST, &r);
3333 lkb->lkb_lksb->sb_lkid = lkb->lkb_id;
3335 error = _request_lock(r, lkb);
3342 static int convert_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3343 struct dlm_args *args)
3348 r = lkb->lkb_resource;
3353 error = validate_lock_args(ls, lkb, args);
3357 error = _convert_lock(r, lkb);
3364 static int unlock_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3365 struct dlm_args *args)
3370 r = lkb->lkb_resource;
3375 error = validate_unlock_args(lkb, args);
3379 error = _unlock_lock(r, lkb);
3386 static int cancel_lock(struct dlm_ls *ls, struct dlm_lkb *lkb,
3387 struct dlm_args *args)
3392 r = lkb->lkb_resource;
3397 error = validate_unlock_args(lkb, args);
3401 error = _cancel_lock(r, lkb);
3409 * Two stage 1 varieties: dlm_lock() and dlm_unlock()
3412 int dlm_lock(dlm_lockspace_t *lockspace,
3414 struct dlm_lksb *lksb,
3417 unsigned int namelen,
3418 uint32_t parent_lkid,
3419 void (*ast) (void *astarg),
3421 void (*bast) (void *astarg, int mode))
3424 struct dlm_lkb *lkb;
3425 struct dlm_args args;
3426 int error, convert = flags & DLM_LKF_CONVERT;
3428 ls = dlm_find_lockspace_local(lockspace);
3432 dlm_lock_recovery(ls);
3435 error = find_lkb(ls, lksb->sb_lkid, &lkb);
3437 error = create_lkb(ls, &lkb);
3442 error = set_lock_args(mode, lksb, flags, namelen, 0, ast,
3443 astarg, bast, &args);
3448 error = convert_lock(ls, lkb, &args);
3450 error = request_lock(ls, lkb, name, namelen, &args);
3452 if (error == -EINPROGRESS)
3455 if (convert || error)
3457 if (error == -EAGAIN || error == -EDEADLK)
3460 dlm_unlock_recovery(ls);
3461 dlm_put_lockspace(ls);
3465 int dlm_unlock(dlm_lockspace_t *lockspace,
3468 struct dlm_lksb *lksb,
3472 struct dlm_lkb *lkb;
3473 struct dlm_args args;
3476 ls = dlm_find_lockspace_local(lockspace);
3480 dlm_lock_recovery(ls);
3482 error = find_lkb(ls, lkid, &lkb);
3486 error = set_unlock_args(flags, astarg, &args);
3490 if (flags & DLM_LKF_CANCEL)
3491 error = cancel_lock(ls, lkb, &args);
3493 error = unlock_lock(ls, lkb, &args);
3495 if (error == -DLM_EUNLOCK || error == -DLM_ECANCEL)
3497 if (error == -EBUSY && (flags & (DLM_LKF_CANCEL | DLM_LKF_FORCEUNLOCK)))
3502 dlm_unlock_recovery(ls);
3503 dlm_put_lockspace(ls);
3508 * send/receive routines for remote operations and replies
3512 * send_request receive_request
3513 * send_convert receive_convert
3514 * send_unlock receive_unlock
3515 * send_cancel receive_cancel
3516 * send_grant receive_grant
3517 * send_bast receive_bast
3518 * send_lookup receive_lookup
3519 * send_remove receive_remove
3522 * receive_request_reply send_request_reply
3523 * receive_convert_reply send_convert_reply
3524 * receive_unlock_reply send_unlock_reply
3525 * receive_cancel_reply send_cancel_reply
3526 * receive_lookup_reply send_lookup_reply
3529 static int _create_message(struct dlm_ls *ls, int mb_len,
3530 int to_nodeid, int mstype,
3531 struct dlm_message **ms_ret,
3532 struct dlm_mhandle **mh_ret)
3534 struct dlm_message *ms;
3535 struct dlm_mhandle *mh;
3538 /* get_buffer gives us a message handle (mh) that we need to
3539 pass into lowcomms_commit and a message buffer (mb) that we
3540 write our data into */
3542 mh = dlm_lowcomms_get_buffer(to_nodeid, mb_len, GFP_NOFS, &mb);
3546 memset(mb, 0, mb_len);
3548 ms = (struct dlm_message *) mb;
3550 ms->m_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
3551 ms->m_header.h_lockspace = ls->ls_global_id;
3552 ms->m_header.h_nodeid = dlm_our_nodeid();
3553 ms->m_header.h_length = mb_len;
3554 ms->m_header.h_cmd = DLM_MSG;
3556 ms->m_type = mstype;
3563 static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb,
3564 int to_nodeid, int mstype,
3565 struct dlm_message **ms_ret,
3566 struct dlm_mhandle **mh_ret)
3568 int mb_len = sizeof(struct dlm_message);
3571 case DLM_MSG_REQUEST:
3572 case DLM_MSG_LOOKUP:
3573 case DLM_MSG_REMOVE:
3574 mb_len += r->res_length;
3576 case DLM_MSG_CONVERT:
3577 case DLM_MSG_UNLOCK:
3578 case DLM_MSG_REQUEST_REPLY:
3579 case DLM_MSG_CONVERT_REPLY:
3581 if (lkb && lkb->lkb_lvbptr)
3582 mb_len += r->res_ls->ls_lvblen;
3586 return _create_message(r->res_ls, mb_len, to_nodeid, mstype,
3590 /* further lowcomms enhancements or alternate implementations may make
3591 the return value from this function useful at some point */
3593 static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms)
3595 dlm_message_out(ms);
3596 dlm_lowcomms_commit_buffer(mh);
3600 static void send_args(struct dlm_rsb *r, struct dlm_lkb *lkb,
3601 struct dlm_message *ms)
3603 ms->m_nodeid = lkb->lkb_nodeid;
3604 ms->m_pid = lkb->lkb_ownpid;
3605 ms->m_lkid = lkb->lkb_id;
3606 ms->m_remid = lkb->lkb_remid;
3607 ms->m_exflags = lkb->lkb_exflags;
3608 ms->m_sbflags = lkb->lkb_sbflags;
3609 ms->m_flags = lkb->lkb_flags;
3610 ms->m_lvbseq = lkb->lkb_lvbseq;
3611 ms->m_status = lkb->lkb_status;
3612 ms->m_grmode = lkb->lkb_grmode;
3613 ms->m_rqmode = lkb->lkb_rqmode;
3614 ms->m_hash = r->res_hash;
3616 /* m_result and m_bastmode are set from function args,
3617 not from lkb fields */
3619 if (lkb->lkb_bastfn)
3620 ms->m_asts |= DLM_CB_BAST;
3622 ms->m_asts |= DLM_CB_CAST;
3624 /* compare with switch in create_message; send_remove() doesn't
3627 switch (ms->m_type) {
3628 case DLM_MSG_REQUEST:
3629 case DLM_MSG_LOOKUP:
3630 memcpy(ms->m_extra, r->res_name, r->res_length);
3632 case DLM_MSG_CONVERT:
3633 case DLM_MSG_UNLOCK:
3634 case DLM_MSG_REQUEST_REPLY:
3635 case DLM_MSG_CONVERT_REPLY:
3637 if (!lkb->lkb_lvbptr)
3639 memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
3644 static int send_common(struct dlm_rsb *r, struct dlm_lkb *lkb, int mstype)
3646 struct dlm_message *ms;
3647 struct dlm_mhandle *mh;
3648 int to_nodeid, error;
3650 to_nodeid = r->res_nodeid;
3652 error = add_to_waiters(lkb, mstype, to_nodeid);
3656 error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
3660 send_args(r, lkb, ms);
3662 error = send_message(mh, ms);
3668 remove_from_waiters(lkb, msg_reply_type(mstype));
3672 static int send_request(struct dlm_rsb *r, struct dlm_lkb *lkb)
3674 return send_common(r, lkb, DLM_MSG_REQUEST);
3677 static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
3681 error = send_common(r, lkb, DLM_MSG_CONVERT);
3683 /* down conversions go without a reply from the master */
3684 if (!error && down_conversion(lkb)) {
3685 remove_from_waiters(lkb, DLM_MSG_CONVERT_REPLY);
3686 r->res_ls->ls_stub_ms.m_flags = DLM_IFL_STUB_MS;
3687 r->res_ls->ls_stub_ms.m_type = DLM_MSG_CONVERT_REPLY;
3688 r->res_ls->ls_stub_ms.m_result = 0;
3689 __receive_convert_reply(r, lkb, &r->res_ls->ls_stub_ms);
3695 /* FIXME: if this lkb is the only lock we hold on the rsb, then set
3696 MASTER_UNCERTAIN to force the next request on the rsb to confirm
3697 that the master is still correct. */
3699 static int send_unlock(struct dlm_rsb *r, struct dlm_lkb *lkb)
3701 return send_common(r, lkb, DLM_MSG_UNLOCK);
3704 static int send_cancel(struct dlm_rsb *r, struct dlm_lkb *lkb)
3706 return send_common(r, lkb, DLM_MSG_CANCEL);
3709 static int send_grant(struct dlm_rsb *r, struct dlm_lkb *lkb)
3711 struct dlm_message *ms;
3712 struct dlm_mhandle *mh;
3713 int to_nodeid, error;
3715 to_nodeid = lkb->lkb_nodeid;
3717 error = create_message(r, lkb, to_nodeid, DLM_MSG_GRANT, &ms, &mh);
3721 send_args(r, lkb, ms);
3725 error = send_message(mh, ms);
3730 static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode)
3732 struct dlm_message *ms;
3733 struct dlm_mhandle *mh;
3734 int to_nodeid, error;
3736 to_nodeid = lkb->lkb_nodeid;
3738 error = create_message(r, NULL, to_nodeid, DLM_MSG_BAST, &ms, &mh);
3742 send_args(r, lkb, ms);
3744 ms->m_bastmode = mode;
3746 error = send_message(mh, ms);
3751 static int send_lookup(struct dlm_rsb *r, struct dlm_lkb *lkb)
3753 struct dlm_message *ms;
3754 struct dlm_mhandle *mh;
3755 int to_nodeid, error;
3757 to_nodeid = dlm_dir_nodeid(r);
3759 error = add_to_waiters(lkb, DLM_MSG_LOOKUP, to_nodeid);
3763 error = create_message(r, NULL, to_nodeid, DLM_MSG_LOOKUP, &ms, &mh);
3767 send_args(r, lkb, ms);
3769 error = send_message(mh, ms);
3775 remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
3779 static int send_remove(struct dlm_rsb *r)
3781 struct dlm_message *ms;
3782 struct dlm_mhandle *mh;
3783 int to_nodeid, error;
3785 to_nodeid = dlm_dir_nodeid(r);
3787 error = create_message(r, NULL, to_nodeid, DLM_MSG_REMOVE, &ms, &mh);
3791 memcpy(ms->m_extra, r->res_name, r->res_length);
3792 ms->m_hash = r->res_hash;
3794 error = send_message(mh, ms);
3799 static int send_common_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
3802 struct dlm_message *ms;
3803 struct dlm_mhandle *mh;
3804 int to_nodeid, error;
3806 to_nodeid = lkb->lkb_nodeid;
3808 error = create_message(r, lkb, to_nodeid, mstype, &ms, &mh);
3812 send_args(r, lkb, ms);
3816 error = send_message(mh, ms);
3821 static int send_request_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3823 return send_common_reply(r, lkb, DLM_MSG_REQUEST_REPLY, rv);
3826 static int send_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3828 return send_common_reply(r, lkb, DLM_MSG_CONVERT_REPLY, rv);
3831 static int send_unlock_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3833 return send_common_reply(r, lkb, DLM_MSG_UNLOCK_REPLY, rv);
3836 static int send_cancel_reply(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
3838 return send_common_reply(r, lkb, DLM_MSG_CANCEL_REPLY, rv);
3841 static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in,
3842 int ret_nodeid, int rv)
3844 struct dlm_rsb *r = &ls->ls_stub_rsb;
3845 struct dlm_message *ms;
3846 struct dlm_mhandle *mh;
3847 int error, nodeid = ms_in->m_header.h_nodeid;
3849 error = create_message(r, NULL, nodeid, DLM_MSG_LOOKUP_REPLY, &ms, &mh);
3853 ms->m_lkid = ms_in->m_lkid;
3855 ms->m_nodeid = ret_nodeid;
3857 error = send_message(mh, ms);
3862 /* which args we save from a received message depends heavily on the type
3863 of message, unlike the send side where we can safely send everything about
3864 the lkb for any type of message */
3866 static void receive_flags(struct dlm_lkb *lkb, struct dlm_message *ms)
3868 lkb->lkb_exflags = ms->m_exflags;
3869 lkb->lkb_sbflags = ms->m_sbflags;
3870 lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
3871 (ms->m_flags & 0x0000FFFF);
3874 static void receive_flags_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
3876 if (ms->m_flags == DLM_IFL_STUB_MS)
3879 lkb->lkb_sbflags = ms->m_sbflags;
3880 lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
3881 (ms->m_flags & 0x0000FFFF);
3884 static int receive_extralen(struct dlm_message *ms)
3886 return (ms->m_header.h_length - sizeof(struct dlm_message));
3889 static int receive_lvb(struct dlm_ls *ls, struct dlm_lkb *lkb,
3890 struct dlm_message *ms)
3894 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3895 if (!lkb->lkb_lvbptr)
3896 lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
3897 if (!lkb->lkb_lvbptr)
3899 len = receive_extralen(ms);
3900 if (len > ls->ls_lvblen)
3901 len = ls->ls_lvblen;
3902 memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
3907 static void fake_bastfn(void *astparam, int mode)
3909 log_print("fake_bastfn should not be called");
3912 static void fake_astfn(void *astparam)
3914 log_print("fake_astfn should not be called");
3917 static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3918 struct dlm_message *ms)
3920 lkb->lkb_nodeid = ms->m_header.h_nodeid;
3921 lkb->lkb_ownpid = ms->m_pid;
3922 lkb->lkb_remid = ms->m_lkid;
3923 lkb->lkb_grmode = DLM_LOCK_IV;
3924 lkb->lkb_rqmode = ms->m_rqmode;
3926 lkb->lkb_bastfn = (ms->m_asts & DLM_CB_BAST) ? &fake_bastfn : NULL;
3927 lkb->lkb_astfn = (ms->m_asts & DLM_CB_CAST) ? &fake_astfn : NULL;
3929 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
3930 /* lkb was just created so there won't be an lvb yet */
3931 lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
3932 if (!lkb->lkb_lvbptr)
3939 static int receive_convert_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3940 struct dlm_message *ms)
3942 if (lkb->lkb_status != DLM_LKSTS_GRANTED)
3945 if (receive_lvb(ls, lkb, ms))
3948 lkb->lkb_rqmode = ms->m_rqmode;
3949 lkb->lkb_lvbseq = ms->m_lvbseq;
3954 static int receive_unlock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
3955 struct dlm_message *ms)
3957 if (receive_lvb(ls, lkb, ms))
3962 /* We fill in the stub-lkb fields with the info that send_xxxx_reply()
3963 uses to send a reply and that the remote end uses to process the reply. */
3965 static void setup_stub_lkb(struct dlm_ls *ls, struct dlm_message *ms)
3967 struct dlm_lkb *lkb = &ls->ls_stub_lkb;
3968 lkb->lkb_nodeid = ms->m_header.h_nodeid;
3969 lkb->lkb_remid = ms->m_lkid;
3972 /* This is called after the rsb is locked so that we can safely inspect
3973 fields in the lkb. */
3975 static int validate_message(struct dlm_lkb *lkb, struct dlm_message *ms)
3977 int from = ms->m_header.h_nodeid;
3980 switch (ms->m_type) {
3981 case DLM_MSG_CONVERT:
3982 case DLM_MSG_UNLOCK:
3983 case DLM_MSG_CANCEL:
3984 if (!is_master_copy(lkb) || lkb->lkb_nodeid != from)
3988 case DLM_MSG_CONVERT_REPLY:
3989 case DLM_MSG_UNLOCK_REPLY:
3990 case DLM_MSG_CANCEL_REPLY:
3993 if (!is_process_copy(lkb) || lkb->lkb_nodeid != from)
3997 case DLM_MSG_REQUEST_REPLY:
3998 if (!is_process_copy(lkb))
4000 else if (lkb->lkb_nodeid != -1 && lkb->lkb_nodeid != from)
4009 log_error(lkb->lkb_resource->res_ls,
4010 "ignore invalid message %d from %d %x %x %x %d",
4011 ms->m_type, from, lkb->lkb_id, lkb->lkb_remid,
4012 lkb->lkb_flags, lkb->lkb_nodeid);
4016 static void send_repeat_remove(struct dlm_ls *ls, char *ms_name, int len)
4018 char name[DLM_RESNAME_MAXLEN + 1];
4019 struct dlm_message *ms;
4020 struct dlm_mhandle *mh;
4025 memset(name, 0, sizeof(name));
4026 memcpy(name, ms_name, len);
4028 hash = jhash(name, len, 0);
4029 b = hash & (ls->ls_rsbtbl_size - 1);
4031 dir_nodeid = dlm_hash2nodeid(ls, hash);
4033 log_error(ls, "send_repeat_remove dir %d %s", dir_nodeid, name);
4035 spin_lock(&ls->ls_rsbtbl[b].lock);
4036 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
4038 spin_unlock(&ls->ls_rsbtbl[b].lock);
4039 log_error(ls, "repeat_remove on keep %s", name);
4043 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
4045 spin_unlock(&ls->ls_rsbtbl[b].lock);
4046 log_error(ls, "repeat_remove on toss %s", name);
4050 /* use ls->remove_name2 to avoid conflict with shrink? */
4052 spin_lock(&ls->ls_remove_spin);
4053 ls->ls_remove_len = len;
4054 memcpy(ls->ls_remove_name, name, DLM_RESNAME_MAXLEN);
4055 spin_unlock(&ls->ls_remove_spin);
4056 spin_unlock(&ls->ls_rsbtbl[b].lock);
4058 rv = _create_message(ls, sizeof(struct dlm_message) + len,
4059 dir_nodeid, DLM_MSG_REMOVE, &ms, &mh);
4063 memcpy(ms->m_extra, name, len);
4066 send_message(mh, ms);
4068 spin_lock(&ls->ls_remove_spin);
4069 ls->ls_remove_len = 0;
4070 memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN);
4071 spin_unlock(&ls->ls_remove_spin);
4074 static int receive_request(struct dlm_ls *ls, struct dlm_message *ms)
4076 struct dlm_lkb *lkb;
4079 int error, namelen = 0;
4081 from_nodeid = ms->m_header.h_nodeid;
4083 error = create_lkb(ls, &lkb);
4087 receive_flags(lkb, ms);
4088 lkb->lkb_flags |= DLM_IFL_MSTCPY;
4089 error = receive_request_args(ls, lkb, ms);
4095 /* The dir node is the authority on whether we are the master
4096 for this rsb or not, so if the master sends us a request, we should
4097 recreate the rsb if we've destroyed it. This race happens when we
4098 send a remove message to the dir node at the same time that the dir
4099 node sends us a request for the rsb. */
4101 namelen = receive_extralen(ms);
4103 error = find_rsb(ls, ms->m_extra, namelen, from_nodeid,
4104 R_RECEIVE_REQUEST, &r);
4112 if (r->res_master_nodeid != dlm_our_nodeid()) {
4113 error = validate_master_nodeid(ls, r, from_nodeid);
4123 error = do_request(r, lkb);
4124 send_request_reply(r, lkb, error);
4125 do_request_effects(r, lkb, error);
4130 if (error == -EINPROGRESS)
4137 /* TODO: instead of returning ENOTBLK, add the lkb to res_lookup
4138 and do this receive_request again from process_lookup_list once
4139 we get the lookup reply. This would avoid a many repeated
4140 ENOTBLK request failures when the lookup reply designating us
4141 as master is delayed. */
4143 /* We could repeatedly return -EBADR here if our send_remove() is
4144 delayed in being sent/arriving/being processed on the dir node.
4145 Another node would repeatedly lookup up the master, and the dir
4146 node would continue returning our nodeid until our send_remove
4149 We send another remove message in case our previous send_remove
4150 was lost/ignored/missed somehow. */
4152 if (error != -ENOTBLK) {
4153 log_limit(ls, "receive_request %x from %d %d",
4154 ms->m_lkid, from_nodeid, error);
4157 if (namelen && error == -EBADR) {
4158 send_repeat_remove(ls, ms->m_extra, namelen);
4162 setup_stub_lkb(ls, ms);
4163 send_request_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4167 static int receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
4169 struct dlm_lkb *lkb;
4171 int error, reply = 1;
4173 error = find_lkb(ls, ms->m_remid, &lkb);
4177 if (lkb->lkb_remid != ms->m_lkid) {
4178 log_error(ls, "receive_convert %x remid %x recover_seq %llu "
4179 "remote %d %x", lkb->lkb_id, lkb->lkb_remid,
4180 (unsigned long long)lkb->lkb_recover_seq,
4181 ms->m_header.h_nodeid, ms->m_lkid);
4187 r = lkb->lkb_resource;
4192 error = validate_message(lkb, ms);
4196 receive_flags(lkb, ms);
4198 error = receive_convert_args(ls, lkb, ms);
4200 send_convert_reply(r, lkb, error);
4204 reply = !down_conversion(lkb);
4206 error = do_convert(r, lkb);
4208 send_convert_reply(r, lkb, error);
4209 do_convert_effects(r, lkb, error);
4217 setup_stub_lkb(ls, ms);
4218 send_convert_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4222 static int receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
4224 struct dlm_lkb *lkb;
4228 error = find_lkb(ls, ms->m_remid, &lkb);
4232 if (lkb->lkb_remid != ms->m_lkid) {
4233 log_error(ls, "receive_unlock %x remid %x remote %d %x",
4234 lkb->lkb_id, lkb->lkb_remid,
4235 ms->m_header.h_nodeid, ms->m_lkid);
4241 r = lkb->lkb_resource;
4246 error = validate_message(lkb, ms);
4250 receive_flags(lkb, ms);
4252 error = receive_unlock_args(ls, lkb, ms);
4254 send_unlock_reply(r, lkb, error);
4258 error = do_unlock(r, lkb);
4259 send_unlock_reply(r, lkb, error);
4260 do_unlock_effects(r, lkb, error);
4268 setup_stub_lkb(ls, ms);
4269 send_unlock_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4273 static int receive_cancel(struct dlm_ls *ls, struct dlm_message *ms)
4275 struct dlm_lkb *lkb;
4279 error = find_lkb(ls, ms->m_remid, &lkb);
4283 receive_flags(lkb, ms);
4285 r = lkb->lkb_resource;
4290 error = validate_message(lkb, ms);
4294 error = do_cancel(r, lkb);
4295 send_cancel_reply(r, lkb, error);
4296 do_cancel_effects(r, lkb, error);
4304 setup_stub_lkb(ls, ms);
4305 send_cancel_reply(&ls->ls_stub_rsb, &ls->ls_stub_lkb, error);
4309 static int receive_grant(struct dlm_ls *ls, struct dlm_message *ms)
4311 struct dlm_lkb *lkb;
4315 error = find_lkb(ls, ms->m_remid, &lkb);
4319 r = lkb->lkb_resource;
4324 error = validate_message(lkb, ms);
4328 receive_flags_reply(lkb, ms);
4329 if (is_altmode(lkb))
4330 munge_altmode(lkb, ms);
4331 grant_lock_pc(r, lkb, ms);
4332 queue_cast(r, lkb, 0);
4340 static int receive_bast(struct dlm_ls *ls, struct dlm_message *ms)
4342 struct dlm_lkb *lkb;
4346 error = find_lkb(ls, ms->m_remid, &lkb);
4350 r = lkb->lkb_resource;
4355 error = validate_message(lkb, ms);
4359 queue_bast(r, lkb, ms->m_bastmode);
4360 lkb->lkb_highbast = ms->m_bastmode;
4368 static void receive_lookup(struct dlm_ls *ls, struct dlm_message *ms)
4370 int len, error, ret_nodeid, from_nodeid, our_nodeid;
4372 from_nodeid = ms->m_header.h_nodeid;
4373 our_nodeid = dlm_our_nodeid();
4375 len = receive_extralen(ms);
4377 error = dlm_master_lookup(ls, from_nodeid, ms->m_extra, len, 0,
4380 /* Optimization: we're master so treat lookup as a request */
4381 if (!error && ret_nodeid == our_nodeid) {
4382 receive_request(ls, ms);
4385 send_lookup_reply(ls, ms, ret_nodeid, error);
4388 static void receive_remove(struct dlm_ls *ls, struct dlm_message *ms)
4390 char name[DLM_RESNAME_MAXLEN+1];
4393 int rv, len, dir_nodeid, from_nodeid;
4395 from_nodeid = ms->m_header.h_nodeid;
4397 len = receive_extralen(ms);
4399 if (len > DLM_RESNAME_MAXLEN) {
4400 log_error(ls, "receive_remove from %d bad len %d",
4405 dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash);
4406 if (dir_nodeid != dlm_our_nodeid()) {
4407 log_error(ls, "receive_remove from %d bad nodeid %d",
4408 from_nodeid, dir_nodeid);
4412 /* Look for name on rsbtbl.toss, if it's there, kill it.
4413 If it's on rsbtbl.keep, it's being used, and we should ignore this
4414 message. This is an expected race between the dir node sending a
4415 request to the master node at the same time as the master node sends
4416 a remove to the dir node. The resolution to that race is for the
4417 dir node to ignore the remove message, and the master node to
4418 recreate the master rsb when it gets a request from the dir node for
4419 an rsb it doesn't have. */
4421 memset(name, 0, sizeof(name));
4422 memcpy(name, ms->m_extra, len);
4424 hash = jhash(name, len, 0);
4425 b = hash & (ls->ls_rsbtbl_size - 1);
4427 spin_lock(&ls->ls_rsbtbl[b].lock);
4429 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].toss, name, len, &r);
4431 /* verify the rsb is on keep list per comment above */
4432 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[b].keep, name, len, &r);
4434 /* should not happen */
4435 log_error(ls, "receive_remove from %d not found %s",
4437 spin_unlock(&ls->ls_rsbtbl[b].lock);
4440 if (r->res_master_nodeid != from_nodeid) {
4441 /* should not happen */
4442 log_error(ls, "receive_remove keep from %d master %d",
4443 from_nodeid, r->res_master_nodeid);
4445 spin_unlock(&ls->ls_rsbtbl[b].lock);
4449 log_debug(ls, "receive_remove from %d master %d first %x %s",
4450 from_nodeid, r->res_master_nodeid, r->res_first_lkid,
4452 spin_unlock(&ls->ls_rsbtbl[b].lock);
4456 if (r->res_master_nodeid != from_nodeid) {
4457 log_error(ls, "receive_remove toss from %d master %d",
4458 from_nodeid, r->res_master_nodeid);
4460 spin_unlock(&ls->ls_rsbtbl[b].lock);
4464 if (kref_put(&r->res_ref, kill_rsb)) {
4465 rb_erase(&r->res_hashnode, &ls->ls_rsbtbl[b].toss);
4466 spin_unlock(&ls->ls_rsbtbl[b].lock);
4469 log_error(ls, "receive_remove from %d rsb ref error",
4472 spin_unlock(&ls->ls_rsbtbl[b].lock);
4476 static void receive_purge(struct dlm_ls *ls, struct dlm_message *ms)
4478 do_purge(ls, ms->m_nodeid, ms->m_pid);
4481 static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
4483 struct dlm_lkb *lkb;
4485 int error, mstype, result;
4486 int from_nodeid = ms->m_header.h_nodeid;
4488 error = find_lkb(ls, ms->m_remid, &lkb);
4492 r = lkb->lkb_resource;
4496 error = validate_message(lkb, ms);
4500 mstype = lkb->lkb_wait_type;
4501 error = remove_from_waiters(lkb, DLM_MSG_REQUEST_REPLY);
4503 log_error(ls, "receive_request_reply %x remote %d %x result %d",
4504 lkb->lkb_id, from_nodeid, ms->m_lkid, ms->m_result);
4509 /* Optimization: the dir node was also the master, so it took our
4510 lookup as a request and sent request reply instead of lookup reply */
4511 if (mstype == DLM_MSG_LOOKUP) {
4512 r->res_master_nodeid = from_nodeid;
4513 r->res_nodeid = from_nodeid;
4514 lkb->lkb_nodeid = from_nodeid;
4517 /* this is the value returned from do_request() on the master */
4518 result = ms->m_result;
4522 /* request would block (be queued) on remote master */
4523 queue_cast(r, lkb, -EAGAIN);
4524 confirm_master(r, -EAGAIN);
4525 unhold_lkb(lkb); /* undoes create_lkb() */
4530 /* request was queued or granted on remote master */
4531 receive_flags_reply(lkb, ms);
4532 lkb->lkb_remid = ms->m_lkid;
4533 if (is_altmode(lkb))
4534 munge_altmode(lkb, ms);
4536 add_lkb(r, lkb, DLM_LKSTS_WAITING);
4539 grant_lock_pc(r, lkb, ms);
4540 queue_cast(r, lkb, 0);
4542 confirm_master(r, result);
4547 /* find_rsb failed to find rsb or rsb wasn't master */
4548 log_limit(ls, "receive_request_reply %x from %d %d "
4549 "master %d dir %d first %x %s", lkb->lkb_id,
4550 from_nodeid, result, r->res_master_nodeid,
4551 r->res_dir_nodeid, r->res_first_lkid, r->res_name);
4553 if (r->res_dir_nodeid != dlm_our_nodeid() &&
4554 r->res_master_nodeid != dlm_our_nodeid()) {
4555 /* cause _request_lock->set_master->send_lookup */
4556 r->res_master_nodeid = 0;
4558 lkb->lkb_nodeid = -1;
4561 if (is_overlap(lkb)) {
4562 /* we'll ignore error in cancel/unlock reply */
4563 queue_cast_overlap(r, lkb);
4564 confirm_master(r, result);
4565 unhold_lkb(lkb); /* undoes create_lkb() */
4567 _request_lock(r, lkb);
4569 if (r->res_master_nodeid == dlm_our_nodeid())
4570 confirm_master(r, 0);
4575 log_error(ls, "receive_request_reply %x error %d",
4576 lkb->lkb_id, result);
4579 if (is_overlap_unlock(lkb) && (result == 0 || result == -EINPROGRESS)) {
4580 log_debug(ls, "receive_request_reply %x result %d unlock",
4581 lkb->lkb_id, result);
4582 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4583 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4584 send_unlock(r, lkb);
4585 } else if (is_overlap_cancel(lkb) && (result == -EINPROGRESS)) {
4586 log_debug(ls, "receive_request_reply %x cancel", lkb->lkb_id);
4587 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4588 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4589 send_cancel(r, lkb);
4591 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
4592 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
4601 static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
4602 struct dlm_message *ms)
4604 /* this is the value returned from do_convert() on the master */
4605 switch (ms->m_result) {
4607 /* convert would block (be queued) on remote master */
4608 queue_cast(r, lkb, -EAGAIN);
4612 receive_flags_reply(lkb, ms);
4613 revert_lock_pc(r, lkb);
4614 queue_cast(r, lkb, -EDEADLK);
4618 /* convert was queued on remote master */
4619 receive_flags_reply(lkb, ms);
4620 if (is_demoted(lkb))
4623 add_lkb(r, lkb, DLM_LKSTS_CONVERT);
4628 /* convert was granted on remote master */
4629 receive_flags_reply(lkb, ms);
4630 if (is_demoted(lkb))
4632 grant_lock_pc(r, lkb, ms);
4633 queue_cast(r, lkb, 0);
4637 log_error(r->res_ls, "receive_convert_reply %x remote %d %x %d",
4638 lkb->lkb_id, ms->m_header.h_nodeid, ms->m_lkid,
4645 static void _receive_convert_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4647 struct dlm_rsb *r = lkb->lkb_resource;
4653 error = validate_message(lkb, ms);
4657 /* stub reply can happen with waiters_mutex held */
4658 error = remove_from_waiters_ms(lkb, ms);
4662 __receive_convert_reply(r, lkb, ms);
4668 static int receive_convert_reply(struct dlm_ls *ls, struct dlm_message *ms)
4670 struct dlm_lkb *lkb;
4673 error = find_lkb(ls, ms->m_remid, &lkb);
4677 _receive_convert_reply(lkb, ms);
4682 static void _receive_unlock_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4684 struct dlm_rsb *r = lkb->lkb_resource;
4690 error = validate_message(lkb, ms);
4694 /* stub reply can happen with waiters_mutex held */
4695 error = remove_from_waiters_ms(lkb, ms);
4699 /* this is the value returned from do_unlock() on the master */
4701 switch (ms->m_result) {
4703 receive_flags_reply(lkb, ms);
4704 remove_lock_pc(r, lkb);
4705 queue_cast(r, lkb, -DLM_EUNLOCK);
4710 log_error(r->res_ls, "receive_unlock_reply %x error %d",
4711 lkb->lkb_id, ms->m_result);
4718 static int receive_unlock_reply(struct dlm_ls *ls, struct dlm_message *ms)
4720 struct dlm_lkb *lkb;
4723 error = find_lkb(ls, ms->m_remid, &lkb);
4727 _receive_unlock_reply(lkb, ms);
4732 static void _receive_cancel_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
4734 struct dlm_rsb *r = lkb->lkb_resource;
4740 error = validate_message(lkb, ms);
4744 /* stub reply can happen with waiters_mutex held */
4745 error = remove_from_waiters_ms(lkb, ms);
4749 /* this is the value returned from do_cancel() on the master */
4751 switch (ms->m_result) {
4753 receive_flags_reply(lkb, ms);
4754 revert_lock_pc(r, lkb);
4755 queue_cast(r, lkb, -DLM_ECANCEL);
4760 log_error(r->res_ls, "receive_cancel_reply %x error %d",
4761 lkb->lkb_id, ms->m_result);
4768 static int receive_cancel_reply(struct dlm_ls *ls, struct dlm_message *ms)
4770 struct dlm_lkb *lkb;
4773 error = find_lkb(ls, ms->m_remid, &lkb);
4777 _receive_cancel_reply(lkb, ms);
4782 static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
4784 struct dlm_lkb *lkb;
4786 int error, ret_nodeid;
4787 int do_lookup_list = 0;
4789 error = find_lkb(ls, ms->m_lkid, &lkb);
4791 log_error(ls, "receive_lookup_reply no lkid %x", ms->m_lkid);
4795 /* ms->m_result is the value returned by dlm_master_lookup on dir node
4796 FIXME: will a non-zero error ever be returned? */
4798 r = lkb->lkb_resource;
4802 error = remove_from_waiters(lkb, DLM_MSG_LOOKUP_REPLY);
4806 ret_nodeid = ms->m_nodeid;
4808 /* We sometimes receive a request from the dir node for this
4809 rsb before we've received the dir node's loookup_reply for it.
4810 The request from the dir node implies we're the master, so we set
4811 ourself as master in receive_request_reply, and verify here that
4812 we are indeed the master. */
4814 if (r->res_master_nodeid && (r->res_master_nodeid != ret_nodeid)) {
4815 /* This should never happen */
4816 log_error(ls, "receive_lookup_reply %x from %d ret %d "
4817 "master %d dir %d our %d first %x %s",
4818 lkb->lkb_id, ms->m_header.h_nodeid, ret_nodeid,
4819 r->res_master_nodeid, r->res_dir_nodeid,
4820 dlm_our_nodeid(), r->res_first_lkid, r->res_name);
4823 if (ret_nodeid == dlm_our_nodeid()) {
4824 r->res_master_nodeid = ret_nodeid;
4827 r->res_first_lkid = 0;
4828 } else if (ret_nodeid == -1) {
4829 /* the remote node doesn't believe it's the dir node */
4830 log_error(ls, "receive_lookup_reply %x from %d bad ret_nodeid",
4831 lkb->lkb_id, ms->m_header.h_nodeid);
4832 r->res_master_nodeid = 0;
4834 lkb->lkb_nodeid = -1;
4836 /* set_master() will set lkb_nodeid from r */
4837 r->res_master_nodeid = ret_nodeid;
4838 r->res_nodeid = ret_nodeid;
4841 if (is_overlap(lkb)) {
4842 log_debug(ls, "receive_lookup_reply %x unlock %x",
4843 lkb->lkb_id, lkb->lkb_flags);
4844 queue_cast_overlap(r, lkb);
4845 unhold_lkb(lkb); /* undoes create_lkb() */
4849 _request_lock(r, lkb);
4853 process_lookup_list(r);
4860 static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms,
4863 int error = 0, noent = 0;
4865 if (!dlm_is_member(ls, ms->m_header.h_nodeid)) {
4866 log_limit(ls, "receive %d from non-member %d %x %x %d",
4867 ms->m_type, ms->m_header.h_nodeid, ms->m_lkid,
4868 ms->m_remid, ms->m_result);
4872 switch (ms->m_type) {
4874 /* messages sent to a master node */
4876 case DLM_MSG_REQUEST:
4877 error = receive_request(ls, ms);
4880 case DLM_MSG_CONVERT:
4881 error = receive_convert(ls, ms);
4884 case DLM_MSG_UNLOCK:
4885 error = receive_unlock(ls, ms);
4888 case DLM_MSG_CANCEL:
4890 error = receive_cancel(ls, ms);
4893 /* messages sent from a master node (replies to above) */
4895 case DLM_MSG_REQUEST_REPLY:
4896 error = receive_request_reply(ls, ms);
4899 case DLM_MSG_CONVERT_REPLY:
4900 error = receive_convert_reply(ls, ms);
4903 case DLM_MSG_UNLOCK_REPLY:
4904 error = receive_unlock_reply(ls, ms);
4907 case DLM_MSG_CANCEL_REPLY:
4908 error = receive_cancel_reply(ls, ms);
4911 /* messages sent from a master node (only two types of async msg) */
4915 error = receive_grant(ls, ms);
4920 error = receive_bast(ls, ms);
4923 /* messages sent to a dir node */
4925 case DLM_MSG_LOOKUP:
4926 receive_lookup(ls, ms);
4929 case DLM_MSG_REMOVE:
4930 receive_remove(ls, ms);
4933 /* messages sent from a dir node (remove has no reply) */
4935 case DLM_MSG_LOOKUP_REPLY:
4936 receive_lookup_reply(ls, ms);
4939 /* other messages */
4942 receive_purge(ls, ms);
4946 log_error(ls, "unknown message type %d", ms->m_type);
4950 * When checking for ENOENT, we're checking the result of
4951 * find_lkb(m_remid):
4953 * The lock id referenced in the message wasn't found. This may
4954 * happen in normal usage for the async messages and cancel, so
4955 * only use log_debug for them.
4957 * Some errors are expected and normal.
4960 if (error == -ENOENT && noent) {
4961 log_debug(ls, "receive %d no %x remote %d %x saved_seq %u",
4962 ms->m_type, ms->m_remid, ms->m_header.h_nodeid,
4963 ms->m_lkid, saved_seq);
4964 } else if (error == -ENOENT) {
4965 log_error(ls, "receive %d no %x remote %d %x saved_seq %u",
4966 ms->m_type, ms->m_remid, ms->m_header.h_nodeid,
4967 ms->m_lkid, saved_seq);
4969 if (ms->m_type == DLM_MSG_CONVERT)
4970 dlm_dump_rsb_hash(ls, ms->m_hash);
4973 if (error == -EINVAL) {
4974 log_error(ls, "receive %d inval from %d lkid %x remid %x "
4976 ms->m_type, ms->m_header.h_nodeid,
4977 ms->m_lkid, ms->m_remid, saved_seq);
4981 /* If the lockspace is in recovery mode (locking stopped), then normal
4982 messages are saved on the requestqueue for processing after recovery is
4983 done. When not in recovery mode, we wait for dlm_recoverd to drain saved
4984 messages off the requestqueue before we process new ones. This occurs right
4985 after recovery completes when we transition from saving all messages on
4986 requestqueue, to processing all the saved messages, to processing new
4987 messages as they arrive. */
4989 static void dlm_receive_message(struct dlm_ls *ls, struct dlm_message *ms,
4992 if (dlm_locking_stopped(ls)) {
4993 /* If we were a member of this lockspace, left, and rejoined,
4994 other nodes may still be sending us messages from the
4995 lockspace generation before we left. */
4996 if (!ls->ls_generation) {
4997 log_limit(ls, "receive %d from %d ignore old gen",
4998 ms->m_type, nodeid);
5002 dlm_add_requestqueue(ls, nodeid, ms);
5004 dlm_wait_requestqueue(ls);
5005 _receive_message(ls, ms, 0);
5009 /* This is called by dlm_recoverd to process messages that were saved on
5010 the requestqueue. */
5012 void dlm_receive_message_saved(struct dlm_ls *ls, struct dlm_message *ms,
5015 _receive_message(ls, ms, saved_seq);
5018 /* This is called by the midcomms layer when something is received for
5019 the lockspace. It could be either a MSG (normal message sent as part of
5020 standard locking activity) or an RCOM (recovery message sent as part of
5021 lockspace recovery). */
5023 void dlm_receive_buffer(union dlm_packet *p, int nodeid)
5025 struct dlm_header *hd = &p->header;
5029 switch (hd->h_cmd) {
5031 dlm_message_in(&p->message);
5032 type = p->message.m_type;
5035 dlm_rcom_in(&p->rcom);
5036 type = p->rcom.rc_type;
5039 log_print("invalid h_cmd %d from %u", hd->h_cmd, nodeid);
5043 if (hd->h_nodeid != nodeid) {
5044 log_print("invalid h_nodeid %d from %d lockspace %x",
5045 hd->h_nodeid, nodeid, hd->h_lockspace);
5049 ls = dlm_find_lockspace_global(hd->h_lockspace);
5051 if (dlm_config.ci_log_debug) {
5052 printk_ratelimited(KERN_DEBUG "dlm: invalid lockspace "
5053 "%u from %d cmd %d type %d\n",
5054 hd->h_lockspace, nodeid, hd->h_cmd, type);
5057 if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS)
5058 dlm_send_ls_not_ready(nodeid, &p->rcom);
5062 /* this rwsem allows dlm_ls_stop() to wait for all dlm_recv threads to
5063 be inactive (in this ls) before transitioning to recovery mode */
5065 down_read(&ls->ls_recv_active);
5066 if (hd->h_cmd == DLM_MSG)
5067 dlm_receive_message(ls, &p->message, nodeid);
5069 dlm_receive_rcom(ls, &p->rcom, nodeid);
5070 up_read(&ls->ls_recv_active);
5072 dlm_put_lockspace(ls);
5075 static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb,
5076 struct dlm_message *ms_stub)
5078 if (middle_conversion(lkb)) {
5080 memset(ms_stub, 0, sizeof(struct dlm_message));
5081 ms_stub->m_flags = DLM_IFL_STUB_MS;
5082 ms_stub->m_type = DLM_MSG_CONVERT_REPLY;
5083 ms_stub->m_result = -EINPROGRESS;
5084 ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
5085 _receive_convert_reply(lkb, ms_stub);
5087 /* Same special case as in receive_rcom_lock_args() */
5088 lkb->lkb_grmode = DLM_LOCK_IV;
5089 rsb_set_flag(lkb->lkb_resource, RSB_RECOVER_CONVERT);
5092 } else if (lkb->lkb_rqmode >= lkb->lkb_grmode) {
5093 lkb->lkb_flags |= DLM_IFL_RESEND;
5096 /* lkb->lkb_rqmode < lkb->lkb_grmode shouldn't happen since down
5097 conversions are async; there's no reply from the remote master */
5100 /* A waiting lkb needs recovery if the master node has failed, or
5101 the master node is changing (only when no directory is used) */
5103 static int waiter_needs_recovery(struct dlm_ls *ls, struct dlm_lkb *lkb,
5106 if (dlm_no_directory(ls))
5109 if (dlm_is_removed(ls, lkb->lkb_wait_nodeid))
5115 /* Recovery for locks that are waiting for replies from nodes that are now
5116 gone. We can just complete unlocks and cancels by faking a reply from the
5117 dead node. Requests and up-conversions we flag to be resent after
5118 recovery. Down-conversions can just be completed with a fake reply like
5119 unlocks. Conversions between PR and CW need special attention. */
5121 void dlm_recover_waiters_pre(struct dlm_ls *ls)
5123 struct dlm_lkb *lkb, *safe;
5124 struct dlm_message *ms_stub;
5125 int wait_type, stub_unlock_result, stub_cancel_result;
5128 ms_stub = kmalloc(sizeof(*ms_stub), GFP_KERNEL);
5132 mutex_lock(&ls->ls_waiters_mutex);
5134 list_for_each_entry_safe(lkb, safe, &ls->ls_waiters, lkb_wait_reply) {
5136 dir_nodeid = dlm_dir_nodeid(lkb->lkb_resource);
5138 /* exclude debug messages about unlocks because there can be so
5139 many and they aren't very interesting */
5141 if (lkb->lkb_wait_type != DLM_MSG_UNLOCK) {
5142 log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d "
5143 "lkb_nodeid %d wait_nodeid %d dir_nodeid %d",
5147 lkb->lkb_resource->res_nodeid,
5149 lkb->lkb_wait_nodeid,
5153 /* all outstanding lookups, regardless of destination will be
5154 resent after recovery is done */
5156 if (lkb->lkb_wait_type == DLM_MSG_LOOKUP) {
5157 lkb->lkb_flags |= DLM_IFL_RESEND;
5161 if (!waiter_needs_recovery(ls, lkb, dir_nodeid))
5164 wait_type = lkb->lkb_wait_type;
5165 stub_unlock_result = -DLM_EUNLOCK;
5166 stub_cancel_result = -DLM_ECANCEL;
5168 /* Main reply may have been received leaving a zero wait_type,
5169 but a reply for the overlapping op may not have been
5170 received. In that case we need to fake the appropriate
5171 reply for the overlap op. */
5174 if (is_overlap_cancel(lkb)) {
5175 wait_type = DLM_MSG_CANCEL;
5176 if (lkb->lkb_grmode == DLM_LOCK_IV)
5177 stub_cancel_result = 0;
5179 if (is_overlap_unlock(lkb)) {
5180 wait_type = DLM_MSG_UNLOCK;
5181 if (lkb->lkb_grmode == DLM_LOCK_IV)
5182 stub_unlock_result = -ENOENT;
5185 log_debug(ls, "rwpre overlap %x %x %d %d %d",
5186 lkb->lkb_id, lkb->lkb_flags, wait_type,
5187 stub_cancel_result, stub_unlock_result);
5190 switch (wait_type) {
5192 case DLM_MSG_REQUEST:
5193 lkb->lkb_flags |= DLM_IFL_RESEND;
5196 case DLM_MSG_CONVERT:
5197 recover_convert_waiter(ls, lkb, ms_stub);
5200 case DLM_MSG_UNLOCK:
5202 memset(ms_stub, 0, sizeof(struct dlm_message));
5203 ms_stub->m_flags = DLM_IFL_STUB_MS;
5204 ms_stub->m_type = DLM_MSG_UNLOCK_REPLY;
5205 ms_stub->m_result = stub_unlock_result;
5206 ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
5207 _receive_unlock_reply(lkb, ms_stub);
5211 case DLM_MSG_CANCEL:
5213 memset(ms_stub, 0, sizeof(struct dlm_message));
5214 ms_stub->m_flags = DLM_IFL_STUB_MS;
5215 ms_stub->m_type = DLM_MSG_CANCEL_REPLY;
5216 ms_stub->m_result = stub_cancel_result;
5217 ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
5218 _receive_cancel_reply(lkb, ms_stub);
5223 log_error(ls, "invalid lkb wait_type %d %d",
5224 lkb->lkb_wait_type, wait_type);
5228 mutex_unlock(&ls->ls_waiters_mutex);
5232 static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
5234 struct dlm_lkb *lkb;
5237 mutex_lock(&ls->ls_waiters_mutex);
5238 list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
5239 if (lkb->lkb_flags & DLM_IFL_RESEND) {
5245 mutex_unlock(&ls->ls_waiters_mutex);
5252 /* Deal with lookups and lkb's marked RESEND from _pre. We may now be the
5253 master or dir-node for r. Processing the lkb may result in it being placed
5256 /* We do this after normal locking has been enabled and any saved messages
5257 (in requestqueue) have been processed. We should be confident that at
5258 this point we won't get or process a reply to any of these waiting
5259 operations. But, new ops may be coming in on the rsbs/locks here from
5260 userspace or remotely. */
5262 /* there may have been an overlap unlock/cancel prior to recovery or after
5263 recovery. if before, the lkb may still have a pos wait_count; if after, the
5264 overlap flag would just have been set and nothing new sent. we can be
5265 confident here than any replies to either the initial op or overlap ops
5266 prior to recovery have been received. */
5268 int dlm_recover_waiters_post(struct dlm_ls *ls)
5270 struct dlm_lkb *lkb;
5272 int error = 0, mstype, err, oc, ou;
5275 if (dlm_locking_stopped(ls)) {
5276 log_debug(ls, "recover_waiters_post aborted");
5281 lkb = find_resend_waiter(ls);
5285 r = lkb->lkb_resource;
5289 mstype = lkb->lkb_wait_type;
5290 oc = is_overlap_cancel(lkb);
5291 ou = is_overlap_unlock(lkb);
5294 log_debug(ls, "waiter %x remote %x msg %d r_nodeid %d "
5295 "lkb_nodeid %d wait_nodeid %d dir_nodeid %d "
5296 "overlap %d %d", lkb->lkb_id, lkb->lkb_remid, mstype,
5297 r->res_nodeid, lkb->lkb_nodeid, lkb->lkb_wait_nodeid,
5298 dlm_dir_nodeid(r), oc, ou);
5300 /* At this point we assume that we won't get a reply to any
5301 previous op or overlap op on this lock. First, do a big
5302 remove_from_waiters() for all previous ops. */
5304 lkb->lkb_flags &= ~DLM_IFL_RESEND;
5305 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_UNLOCK;
5306 lkb->lkb_flags &= ~DLM_IFL_OVERLAP_CANCEL;
5307 lkb->lkb_wait_type = 0;
5308 lkb->lkb_wait_count = 0;
5309 mutex_lock(&ls->ls_waiters_mutex);
5310 list_del_init(&lkb->lkb_wait_reply);
5311 mutex_unlock(&ls->ls_waiters_mutex);
5312 unhold_lkb(lkb); /* for waiters list */
5315 /* do an unlock or cancel instead of resending */
5317 case DLM_MSG_LOOKUP:
5318 case DLM_MSG_REQUEST:
5319 queue_cast(r, lkb, ou ? -DLM_EUNLOCK :
5321 unhold_lkb(lkb); /* undoes create_lkb() */
5323 case DLM_MSG_CONVERT:
5325 queue_cast(r, lkb, -DLM_ECANCEL);
5327 lkb->lkb_exflags |= DLM_LKF_FORCEUNLOCK;
5328 _unlock_lock(r, lkb);
5336 case DLM_MSG_LOOKUP:
5337 case DLM_MSG_REQUEST:
5338 _request_lock(r, lkb);
5340 confirm_master(r, 0);
5342 case DLM_MSG_CONVERT:
5343 _convert_lock(r, lkb);
5351 log_error(ls, "waiter %x msg %d r_nodeid %d "
5352 "dir_nodeid %d overlap %d %d",
5353 lkb->lkb_id, mstype, r->res_nodeid,
5354 dlm_dir_nodeid(r), oc, ou);
5364 static void purge_mstcpy_list(struct dlm_ls *ls, struct dlm_rsb *r,
5365 struct list_head *list)
5367 struct dlm_lkb *lkb, *safe;
5369 list_for_each_entry_safe(lkb, safe, list, lkb_statequeue) {
5370 if (!is_master_copy(lkb))
5373 /* don't purge lkbs we've added in recover_master_copy for
5374 the current recovery seq */
5376 if (lkb->lkb_recover_seq == ls->ls_recover_seq)
5381 /* this put should free the lkb */
5382 if (!dlm_put_lkb(lkb))
5383 log_error(ls, "purged mstcpy lkb not released");
5387 void dlm_purge_mstcpy_locks(struct dlm_rsb *r)
5389 struct dlm_ls *ls = r->res_ls;
5391 purge_mstcpy_list(ls, r, &r->res_grantqueue);
5392 purge_mstcpy_list(ls, r, &r->res_convertqueue);
5393 purge_mstcpy_list(ls, r, &r->res_waitqueue);
5396 static void purge_dead_list(struct dlm_ls *ls, struct dlm_rsb *r,
5397 struct list_head *list,
5398 int nodeid_gone, unsigned int *count)
5400 struct dlm_lkb *lkb, *safe;
5402 list_for_each_entry_safe(lkb, safe, list, lkb_statequeue) {
5403 if (!is_master_copy(lkb))
5406 if ((lkb->lkb_nodeid == nodeid_gone) ||
5407 dlm_is_removed(ls, lkb->lkb_nodeid)) {
5409 /* tell recover_lvb to invalidate the lvb
5410 because a node holding EX/PW failed */
5411 if ((lkb->lkb_exflags & DLM_LKF_VALBLK) &&
5412 (lkb->lkb_grmode >= DLM_LOCK_PW)) {
5413 rsb_set_flag(r, RSB_RECOVER_LVB_INVAL);
5418 /* this put should free the lkb */
5419 if (!dlm_put_lkb(lkb))
5420 log_error(ls, "purged dead lkb not released");
5422 rsb_set_flag(r, RSB_RECOVER_GRANT);
5429 /* Get rid of locks held by nodes that are gone. */
5431 void dlm_recover_purge(struct dlm_ls *ls)
5434 struct dlm_member *memb;
5435 int nodes_count = 0;
5436 int nodeid_gone = 0;
5437 unsigned int lkb_count = 0;
5439 /* cache one removed nodeid to optimize the common
5440 case of a single node removed */
5442 list_for_each_entry(memb, &ls->ls_nodes_gone, list) {
5444 nodeid_gone = memb->nodeid;
5450 down_write(&ls->ls_root_sem);
5451 list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
5455 purge_dead_list(ls, r, &r->res_grantqueue,
5456 nodeid_gone, &lkb_count);
5457 purge_dead_list(ls, r, &r->res_convertqueue,
5458 nodeid_gone, &lkb_count);
5459 purge_dead_list(ls, r, &r->res_waitqueue,
5460 nodeid_gone, &lkb_count);
5466 up_write(&ls->ls_root_sem);
5469 log_rinfo(ls, "dlm_recover_purge %u locks for %u nodes",
5470 lkb_count, nodes_count);
5473 static struct dlm_rsb *find_grant_rsb(struct dlm_ls *ls, int bucket)
5478 spin_lock(&ls->ls_rsbtbl[bucket].lock);
5479 for (n = rb_first(&ls->ls_rsbtbl[bucket].keep); n; n = rb_next(n)) {
5480 r = rb_entry(n, struct dlm_rsb, res_hashnode);
5482 if (!rsb_flag(r, RSB_RECOVER_GRANT))
5484 if (!is_master(r)) {
5485 rsb_clear_flag(r, RSB_RECOVER_GRANT);
5489 spin_unlock(&ls->ls_rsbtbl[bucket].lock);
5492 spin_unlock(&ls->ls_rsbtbl[bucket].lock);
5497 * Attempt to grant locks on resources that we are the master of.
5498 * Locks may have become grantable during recovery because locks
5499 * from departed nodes have been purged (or not rebuilt), allowing
5500 * previously blocked locks to now be granted. The subset of rsb's
5501 * we are interested in are those with lkb's on either the convert or
5504 * Simplest would be to go through each master rsb and check for non-empty
5505 * convert or waiting queues, and attempt to grant on those rsbs.
5506 * Checking the queues requires lock_rsb, though, for which we'd need
5507 * to release the rsbtbl lock. This would make iterating through all
5508 * rsb's very inefficient. So, we rely on earlier recovery routines
5509 * to set RECOVER_GRANT on any rsb's that we should attempt to grant
5513 void dlm_recover_grant(struct dlm_ls *ls)
5517 unsigned int count = 0;
5518 unsigned int rsb_count = 0;
5519 unsigned int lkb_count = 0;
5522 r = find_grant_rsb(ls, bucket);
5524 if (bucket == ls->ls_rsbtbl_size - 1)
5532 /* the RECOVER_GRANT flag is checked in the grant path */
5533 grant_pending_locks(r, &count);
5534 rsb_clear_flag(r, RSB_RECOVER_GRANT);
5536 confirm_master(r, 0);
5543 log_rinfo(ls, "dlm_recover_grant %u locks on %u resources",
5544 lkb_count, rsb_count);
5547 static struct dlm_lkb *search_remid_list(struct list_head *head, int nodeid,
5550 struct dlm_lkb *lkb;
5552 list_for_each_entry(lkb, head, lkb_statequeue) {
5553 if (lkb->lkb_nodeid == nodeid && lkb->lkb_remid == remid)
5559 static struct dlm_lkb *search_remid(struct dlm_rsb *r, int nodeid,
5562 struct dlm_lkb *lkb;
5564 lkb = search_remid_list(&r->res_grantqueue, nodeid, remid);
5567 lkb = search_remid_list(&r->res_convertqueue, nodeid, remid);
5570 lkb = search_remid_list(&r->res_waitqueue, nodeid, remid);
5576 /* needs at least dlm_rcom + rcom_lock */
5577 static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
5578 struct dlm_rsb *r, struct dlm_rcom *rc)
5580 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5582 lkb->lkb_nodeid = rc->rc_header.h_nodeid;
5583 lkb->lkb_ownpid = le32_to_cpu(rl->rl_ownpid);
5584 lkb->lkb_remid = le32_to_cpu(rl->rl_lkid);
5585 lkb->lkb_exflags = le32_to_cpu(rl->rl_exflags);
5586 lkb->lkb_flags = le32_to_cpu(rl->rl_flags) & 0x0000FFFF;
5587 lkb->lkb_flags |= DLM_IFL_MSTCPY;
5588 lkb->lkb_lvbseq = le32_to_cpu(rl->rl_lvbseq);
5589 lkb->lkb_rqmode = rl->rl_rqmode;
5590 lkb->lkb_grmode = rl->rl_grmode;
5591 /* don't set lkb_status because add_lkb wants to itself */
5593 lkb->lkb_bastfn = (rl->rl_asts & DLM_CB_BAST) ? &fake_bastfn : NULL;
5594 lkb->lkb_astfn = (rl->rl_asts & DLM_CB_CAST) ? &fake_astfn : NULL;
5596 if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
5597 int lvblen = rc->rc_header.h_length - sizeof(struct dlm_rcom) -
5598 sizeof(struct rcom_lock);
5599 if (lvblen > ls->ls_lvblen)
5601 lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
5602 if (!lkb->lkb_lvbptr)
5604 memcpy(lkb->lkb_lvbptr, rl->rl_lvb, lvblen);
5607 /* Conversions between PR and CW (middle modes) need special handling.
5608 The real granted mode of these converting locks cannot be determined
5609 until all locks have been rebuilt on the rsb (recover_conversion) */
5611 if (rl->rl_wait_type == cpu_to_le16(DLM_MSG_CONVERT) &&
5612 middle_conversion(lkb)) {
5613 rl->rl_status = DLM_LKSTS_CONVERT;
5614 lkb->lkb_grmode = DLM_LOCK_IV;
5615 rsb_set_flag(r, RSB_RECOVER_CONVERT);
5621 /* This lkb may have been recovered in a previous aborted recovery so we need
5622 to check if the rsb already has an lkb with the given remote nodeid/lkid.
5623 If so we just send back a standard reply. If not, we create a new lkb with
5624 the given values and send back our lkid. We send back our lkid by sending
5625 back the rcom_lock struct we got but with the remid field filled in. */
5627 /* needs at least dlm_rcom + rcom_lock */
5628 int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
5630 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5632 struct dlm_lkb *lkb;
5634 int from_nodeid = rc->rc_header.h_nodeid;
5637 if (rl->rl_parent_lkid) {
5638 error = -EOPNOTSUPP;
5642 remid = le32_to_cpu(rl->rl_lkid);
5644 /* In general we expect the rsb returned to be R_MASTER, but we don't
5645 have to require it. Recovery of masters on one node can overlap
5646 recovery of locks on another node, so one node can send us MSTCPY
5647 locks before we've made ourselves master of this rsb. We can still
5648 add new MSTCPY locks that we receive here without any harm; when
5649 we make ourselves master, dlm_recover_masters() won't touch the
5650 MSTCPY locks we've received early. */
5652 error = find_rsb(ls, rl->rl_name, le16_to_cpu(rl->rl_namelen),
5653 from_nodeid, R_RECEIVE_RECOVER, &r);
5659 if (dlm_no_directory(ls) && (dlm_dir_nodeid(r) != dlm_our_nodeid())) {
5660 log_error(ls, "dlm_recover_master_copy remote %d %x not dir",
5661 from_nodeid, remid);
5666 lkb = search_remid(r, from_nodeid, remid);
5672 error = create_lkb(ls, &lkb);
5676 error = receive_rcom_lock_args(ls, lkb, r, rc);
5683 add_lkb(r, lkb, rl->rl_status);
5685 ls->ls_recover_locks_in++;
5687 if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue))
5688 rsb_set_flag(r, RSB_RECOVER_GRANT);
5691 /* this is the new value returned to the lock holder for
5692 saving in its process-copy lkb */
5693 rl->rl_remid = cpu_to_le32(lkb->lkb_id);
5695 lkb->lkb_recover_seq = ls->ls_recover_seq;
5701 if (error && error != -EEXIST)
5702 log_rinfo(ls, "dlm_recover_master_copy remote %d %x error %d",
5703 from_nodeid, remid, error);
5704 rl->rl_result = cpu_to_le32(error);
5708 /* needs at least dlm_rcom + rcom_lock */
5709 int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
5711 struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
5713 struct dlm_lkb *lkb;
5714 uint32_t lkid, remid;
5717 lkid = le32_to_cpu(rl->rl_lkid);
5718 remid = le32_to_cpu(rl->rl_remid);
5719 result = le32_to_cpu(rl->rl_result);
5721 error = find_lkb(ls, lkid, &lkb);
5723 log_error(ls, "dlm_recover_process_copy no %x remote %d %x %d",
5724 lkid, rc->rc_header.h_nodeid, remid, result);
5728 r = lkb->lkb_resource;
5732 if (!is_process_copy(lkb)) {
5733 log_error(ls, "dlm_recover_process_copy bad %x remote %d %x %d",
5734 lkid, rc->rc_header.h_nodeid, remid, result);
5744 /* There's a chance the new master received our lock before
5745 dlm_recover_master_reply(), this wouldn't happen if we did
5746 a barrier between recover_masters and recover_locks. */
5748 log_debug(ls, "dlm_recover_process_copy %x remote %d %x %d",
5749 lkid, rc->rc_header.h_nodeid, remid, result);
5751 dlm_send_rcom_lock(r, lkb);
5755 lkb->lkb_remid = remid;
5758 log_error(ls, "dlm_recover_process_copy %x remote %d %x %d unk",
5759 lkid, rc->rc_header.h_nodeid, remid, result);
5762 /* an ack for dlm_recover_locks() which waits for replies from
5763 all the locks it sends to new masters */
5764 dlm_recovered_lock(r);
5773 int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
5774 int mode, uint32_t flags, void *name, unsigned int namelen,
5775 unsigned long timeout_cs)
5777 struct dlm_lkb *lkb;
5778 struct dlm_args args;
5781 dlm_lock_recovery(ls);
5783 error = create_lkb(ls, &lkb);
5789 if (flags & DLM_LKF_VALBLK) {
5790 ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
5791 if (!ua->lksb.sb_lvbptr) {
5798 error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs,
5799 fake_astfn, ua, fake_bastfn, &args);
5801 kfree(ua->lksb.sb_lvbptr);
5802 ua->lksb.sb_lvbptr = NULL;
5808 /* After ua is attached to lkb it will be freed by dlm_free_lkb().
5809 When DLM_IFL_USER is set, the dlm knows that this is a userspace
5810 lock and that lkb_astparam is the dlm_user_args structure. */
5811 lkb->lkb_flags |= DLM_IFL_USER;
5812 error = request_lock(ls, lkb, name, namelen, &args);
5828 /* add this new lkb to the per-process list of locks */
5829 spin_lock(&ua->proc->locks_spin);
5831 list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
5832 spin_unlock(&ua->proc->locks_spin);
5834 dlm_unlock_recovery(ls);
5838 int dlm_user_convert(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5839 int mode, uint32_t flags, uint32_t lkid, char *lvb_in,
5840 unsigned long timeout_cs)
5842 struct dlm_lkb *lkb;
5843 struct dlm_args args;
5844 struct dlm_user_args *ua;
5847 dlm_lock_recovery(ls);
5849 error = find_lkb(ls, lkid, &lkb);
5853 /* user can change the params on its lock when it converts it, or
5854 add an lvb that didn't exist before */
5858 if (flags & DLM_LKF_VALBLK && !ua->lksb.sb_lvbptr) {
5859 ua->lksb.sb_lvbptr = kzalloc(DLM_USER_LVB_LEN, GFP_NOFS);
5860 if (!ua->lksb.sb_lvbptr) {
5865 if (lvb_in && ua->lksb.sb_lvbptr)
5866 memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
5868 ua->xid = ua_tmp->xid;
5869 ua->castparam = ua_tmp->castparam;
5870 ua->castaddr = ua_tmp->castaddr;
5871 ua->bastparam = ua_tmp->bastparam;
5872 ua->bastaddr = ua_tmp->bastaddr;
5873 ua->user_lksb = ua_tmp->user_lksb;
5875 error = set_lock_args(mode, &ua->lksb, flags, 0, timeout_cs,
5876 fake_astfn, ua, fake_bastfn, &args);
5880 error = convert_lock(ls, lkb, &args);
5882 if (error == -EINPROGRESS || error == -EAGAIN || error == -EDEADLK)
5887 dlm_unlock_recovery(ls);
5893 * The caller asks for an orphan lock on a given resource with a given mode.
5894 * If a matching lock exists, it's moved to the owner's list of locks and
5895 * the lkid is returned.
5898 int dlm_user_adopt_orphan(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5899 int mode, uint32_t flags, void *name, unsigned int namelen,
5900 unsigned long timeout_cs, uint32_t *lkid)
5902 struct dlm_lkb *lkb;
5903 struct dlm_user_args *ua;
5904 int found_other_mode = 0;
5908 mutex_lock(&ls->ls_orphans_mutex);
5909 list_for_each_entry(lkb, &ls->ls_orphans, lkb_ownqueue) {
5910 if (lkb->lkb_resource->res_length != namelen)
5912 if (memcmp(lkb->lkb_resource->res_name, name, namelen))
5914 if (lkb->lkb_grmode != mode) {
5915 found_other_mode = 1;
5920 list_del_init(&lkb->lkb_ownqueue);
5921 lkb->lkb_flags &= ~DLM_IFL_ORPHAN;
5922 *lkid = lkb->lkb_id;
5925 mutex_unlock(&ls->ls_orphans_mutex);
5927 if (!found && found_other_mode) {
5937 lkb->lkb_exflags = flags;
5938 lkb->lkb_ownpid = (int) current->pid;
5942 ua->proc = ua_tmp->proc;
5943 ua->xid = ua_tmp->xid;
5944 ua->castparam = ua_tmp->castparam;
5945 ua->castaddr = ua_tmp->castaddr;
5946 ua->bastparam = ua_tmp->bastparam;
5947 ua->bastaddr = ua_tmp->bastaddr;
5948 ua->user_lksb = ua_tmp->user_lksb;
5951 * The lkb reference from the ls_orphans list was not
5952 * removed above, and is now considered the reference
5953 * for the proc locks list.
5956 spin_lock(&ua->proc->locks_spin);
5957 list_add_tail(&lkb->lkb_ownqueue, &ua->proc->locks);
5958 spin_unlock(&ua->proc->locks_spin);
5964 int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
5965 uint32_t flags, uint32_t lkid, char *lvb_in)
5967 struct dlm_lkb *lkb;
5968 struct dlm_args args;
5969 struct dlm_user_args *ua;
5972 dlm_lock_recovery(ls);
5974 error = find_lkb(ls, lkid, &lkb);
5980 if (lvb_in && ua->lksb.sb_lvbptr)
5981 memcpy(ua->lksb.sb_lvbptr, lvb_in, DLM_USER_LVB_LEN);
5982 if (ua_tmp->castparam)
5983 ua->castparam = ua_tmp->castparam;
5984 ua->user_lksb = ua_tmp->user_lksb;
5986 error = set_unlock_args(flags, ua, &args);
5990 error = unlock_lock(ls, lkb, &args);
5992 if (error == -DLM_EUNLOCK)
5994 /* from validate_unlock_args() */
5995 if (error == -EBUSY && (flags & DLM_LKF_FORCEUNLOCK))
6000 spin_lock(&ua->proc->locks_spin);
6001 /* dlm_user_add_cb() may have already taken lkb off the proc list */
6002 if (!list_empty(&lkb->lkb_ownqueue))
6003 list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking);
6004 spin_unlock(&ua->proc->locks_spin);
6008 dlm_unlock_recovery(ls);
6013 int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
6014 uint32_t flags, uint32_t lkid)
6016 struct dlm_lkb *lkb;
6017 struct dlm_args args;
6018 struct dlm_user_args *ua;
6021 dlm_lock_recovery(ls);
6023 error = find_lkb(ls, lkid, &lkb);
6028 if (ua_tmp->castparam)
6029 ua->castparam = ua_tmp->castparam;
6030 ua->user_lksb = ua_tmp->user_lksb;
6032 error = set_unlock_args(flags, ua, &args);
6036 error = cancel_lock(ls, lkb, &args);
6038 if (error == -DLM_ECANCEL)
6040 /* from validate_unlock_args() */
6041 if (error == -EBUSY)
6046 dlm_unlock_recovery(ls);
6051 int dlm_user_deadlock(struct dlm_ls *ls, uint32_t flags, uint32_t lkid)
6053 struct dlm_lkb *lkb;
6054 struct dlm_args args;
6055 struct dlm_user_args *ua;
6059 dlm_lock_recovery(ls);
6061 error = find_lkb(ls, lkid, &lkb);
6067 error = set_unlock_args(flags, ua, &args);
6071 /* same as cancel_lock(), but set DEADLOCK_CANCEL after lock_rsb */
6073 r = lkb->lkb_resource;
6077 error = validate_unlock_args(lkb, &args);
6080 lkb->lkb_flags |= DLM_IFL_DEADLOCK_CANCEL;
6082 error = _cancel_lock(r, lkb);
6087 if (error == -DLM_ECANCEL)
6089 /* from validate_unlock_args() */
6090 if (error == -EBUSY)
6095 dlm_unlock_recovery(ls);
6099 /* lkb's that are removed from the waiters list by revert are just left on the
6100 orphans list with the granted orphan locks, to be freed by purge */
6102 static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
6104 struct dlm_args args;
6107 hold_lkb(lkb); /* reference for the ls_orphans list */
6108 mutex_lock(&ls->ls_orphans_mutex);
6109 list_add_tail(&lkb->lkb_ownqueue, &ls->ls_orphans);
6110 mutex_unlock(&ls->ls_orphans_mutex);
6112 set_unlock_args(0, lkb->lkb_ua, &args);
6114 error = cancel_lock(ls, lkb, &args);
6115 if (error == -DLM_ECANCEL)
6120 /* The FORCEUNLOCK flag allows the unlock to go ahead even if the lkb isn't
6121 granted. Regardless of what rsb queue the lock is on, it's removed and
6122 freed. The IVVALBLK flag causes the lvb on the resource to be invalidated
6123 if our lock is PW/EX (it's ignored if our granted mode is smaller.) */
6125 static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
6127 struct dlm_args args;
6130 set_unlock_args(DLM_LKF_FORCEUNLOCK | DLM_LKF_IVVALBLK,
6131 lkb->lkb_ua, &args);
6133 error = unlock_lock(ls, lkb, &args);
6134 if (error == -DLM_EUNLOCK)
6139 /* We have to release clear_proc_locks mutex before calling unlock_proc_lock()
6140 (which does lock_rsb) due to deadlock with receiving a message that does
6141 lock_rsb followed by dlm_user_add_cb() */
6143 static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls,
6144 struct dlm_user_proc *proc)
6146 struct dlm_lkb *lkb = NULL;
6148 mutex_lock(&ls->ls_clear_proc_locks);
6149 if (list_empty(&proc->locks))
6152 lkb = list_entry(proc->locks.next, struct dlm_lkb, lkb_ownqueue);
6153 list_del_init(&lkb->lkb_ownqueue);
6155 if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
6156 lkb->lkb_flags |= DLM_IFL_ORPHAN;
6158 lkb->lkb_flags |= DLM_IFL_DEAD;
6160 mutex_unlock(&ls->ls_clear_proc_locks);
6164 /* The ls_clear_proc_locks mutex protects against dlm_user_add_cb() which
6165 1) references lkb->ua which we free here and 2) adds lkbs to proc->asts,
6166 which we clear here. */
6168 /* proc CLOSING flag is set so no more device_reads should look at proc->asts
6169 list, and no more device_writes should add lkb's to proc->locks list; so we
6170 shouldn't need to take asts_spin or locks_spin here. this assumes that
6171 device reads/writes/closes are serialized -- FIXME: we may need to serialize
6174 void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
6176 struct dlm_lkb *lkb, *safe;
6178 dlm_lock_recovery(ls);
6181 lkb = del_proc_lock(ls, proc);
6185 if (lkb->lkb_exflags & DLM_LKF_PERSISTENT)
6186 orphan_proc_lock(ls, lkb);
6188 unlock_proc_lock(ls, lkb);
6190 /* this removes the reference for the proc->locks list
6191 added by dlm_user_request, it may result in the lkb
6197 mutex_lock(&ls->ls_clear_proc_locks);
6199 /* in-progress unlocks */
6200 list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
6201 list_del_init(&lkb->lkb_ownqueue);
6202 lkb->lkb_flags |= DLM_IFL_DEAD;
6206 list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
6207 memset(&lkb->lkb_callbacks, 0,
6208 sizeof(struct dlm_callback) * DLM_CALLBACKS_SIZE);
6209 list_del_init(&lkb->lkb_cb_list);
6213 mutex_unlock(&ls->ls_clear_proc_locks);
6214 dlm_unlock_recovery(ls);
6217 static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
6219 struct dlm_lkb *lkb, *safe;
6223 spin_lock(&proc->locks_spin);
6224 if (!list_empty(&proc->locks)) {
6225 lkb = list_entry(proc->locks.next, struct dlm_lkb,
6227 list_del_init(&lkb->lkb_ownqueue);
6229 spin_unlock(&proc->locks_spin);
6234 lkb->lkb_flags |= DLM_IFL_DEAD;
6235 unlock_proc_lock(ls, lkb);
6236 dlm_put_lkb(lkb); /* ref from proc->locks list */
6239 spin_lock(&proc->locks_spin);
6240 list_for_each_entry_safe(lkb, safe, &proc->unlocking, lkb_ownqueue) {
6241 list_del_init(&lkb->lkb_ownqueue);
6242 lkb->lkb_flags |= DLM_IFL_DEAD;
6245 spin_unlock(&proc->locks_spin);
6247 spin_lock(&proc->asts_spin);
6248 list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
6249 memset(&lkb->lkb_callbacks, 0,
6250 sizeof(struct dlm_callback) * DLM_CALLBACKS_SIZE);
6251 list_del_init(&lkb->lkb_cb_list);
6254 spin_unlock(&proc->asts_spin);
6257 /* pid of 0 means purge all orphans */
6259 static void do_purge(struct dlm_ls *ls, int nodeid, int pid)
6261 struct dlm_lkb *lkb, *safe;
6263 mutex_lock(&ls->ls_orphans_mutex);
6264 list_for_each_entry_safe(lkb, safe, &ls->ls_orphans, lkb_ownqueue) {
6265 if (pid && lkb->lkb_ownpid != pid)
6267 unlock_proc_lock(ls, lkb);
6268 list_del_init(&lkb->lkb_ownqueue);
6271 mutex_unlock(&ls->ls_orphans_mutex);
6274 static int send_purge(struct dlm_ls *ls, int nodeid, int pid)
6276 struct dlm_message *ms;
6277 struct dlm_mhandle *mh;
6280 error = _create_message(ls, sizeof(struct dlm_message), nodeid,
6281 DLM_MSG_PURGE, &ms, &mh);
6284 ms->m_nodeid = nodeid;
6287 return send_message(mh, ms);
6290 int dlm_user_purge(struct dlm_ls *ls, struct dlm_user_proc *proc,
6291 int nodeid, int pid)
6295 if (nodeid && (nodeid != dlm_our_nodeid())) {
6296 error = send_purge(ls, nodeid, pid);
6298 dlm_lock_recovery(ls);
6299 if (pid == current->pid)
6300 purge_proc_locks(ls, proc);
6302 do_purge(ls, nodeid, pid);
6303 dlm_unlock_recovery(ls);