1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * Code which implements an OCFS2 specific interface to our DLM.
8 * Copyright (C) 2003, 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
26 #include <linux/types.h>
27 #include <linux/slab.h>
28 #include <linux/highmem.h>
30 #include <linux/kthread.h>
31 #include <linux/pagemap.h>
32 #include <linux/debugfs.h>
33 #include <linux/seq_file.h>
34 #include <linux/time.h>
35 #include <linux/quotaops.h>
37 #define MLOG_MASK_PREFIX ML_DLM_GLUE
38 #include <cluster/masklog.h>
41 #include "ocfs2_lockingver.h"
46 #include "extent_map.h"
48 #include "heartbeat.h"
51 #include "stackglue.h"
56 #include "refcounttree.h"
58 #include "buffer_head_io.h"
60 struct ocfs2_mask_waiter {
61 struct list_head mw_item;
63 struct completion mw_complete;
64 unsigned long mw_mask;
65 unsigned long mw_goal;
66 #ifdef CONFIG_OCFS2_FS_STATS
67 ktime_t mw_lock_start;
71 static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres);
72 static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres);
73 static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres);
74 static struct ocfs2_super *ocfs2_get_qinfo_osb(struct ocfs2_lock_res *lockres);
77 * Return value from ->downconvert_worker functions.
79 * These control the precise actions of ocfs2_unblock_lock()
80 * and ocfs2_process_blocked_lock()
83 enum ocfs2_unblock_action {
84 UNBLOCK_CONTINUE = 0, /* Continue downconvert */
85 UNBLOCK_CONTINUE_POST = 1, /* Continue downconvert, fire
86 * ->post_unlock callback */
87 UNBLOCK_STOP_POST = 2, /* Do not downconvert, fire
88 * ->post_unlock() callback. */
91 struct ocfs2_unblock_ctl {
93 enum ocfs2_unblock_action unblock_action;
96 /* Lockdep class keys */
97 struct lock_class_key lockdep_keys[OCFS2_NUM_LOCK_TYPES];
99 static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
101 static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres);
103 static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
106 static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
109 static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
110 struct ocfs2_lock_res *lockres);
112 static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres);
114 static int ocfs2_check_refcount_downconvert(struct ocfs2_lock_res *lockres,
116 static int ocfs2_refcount_convert_worker(struct ocfs2_lock_res *lockres,
119 #define mlog_meta_lvb(__level, __lockres) ocfs2_dump_meta_lvb_info(__level, __PRETTY_FUNCTION__, __LINE__, __lockres)
121 /* This aids in debugging situations where a bad LVB might be involved. */
122 static void ocfs2_dump_meta_lvb_info(u64 level,
123 const char *function,
125 struct ocfs2_lock_res *lockres)
127 struct ocfs2_meta_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
129 mlog(level, "LVB information for %s (called from %s:%u):\n",
130 lockres->l_name, function, line);
131 mlog(level, "version: %u, clusters: %u, generation: 0x%x\n",
132 lvb->lvb_version, be32_to_cpu(lvb->lvb_iclusters),
133 be32_to_cpu(lvb->lvb_igeneration));
134 mlog(level, "size: %llu, uid %u, gid %u, mode 0x%x\n",
135 (unsigned long long)be64_to_cpu(lvb->lvb_isize),
136 be32_to_cpu(lvb->lvb_iuid), be32_to_cpu(lvb->lvb_igid),
137 be16_to_cpu(lvb->lvb_imode));
138 mlog(level, "nlink %u, atime_packed 0x%llx, ctime_packed 0x%llx, "
139 "mtime_packed 0x%llx iattr 0x%x\n", be16_to_cpu(lvb->lvb_inlink),
140 (long long)be64_to_cpu(lvb->lvb_iatime_packed),
141 (long long)be64_to_cpu(lvb->lvb_ictime_packed),
142 (long long)be64_to_cpu(lvb->lvb_imtime_packed),
143 be32_to_cpu(lvb->lvb_iattr));
148 * OCFS2 Lock Resource Operations
150 * These fine tune the behavior of the generic dlmglue locking infrastructure.
152 * The most basic of lock types can point ->l_priv to their respective
153 * struct ocfs2_super and allow the default actions to manage things.
155 * Right now, each lock type also needs to implement an init function,
156 * and trivial lock/unlock wrappers. ocfs2_simple_drop_lockres()
157 * should be called when the lock is no longer needed (i.e., object
160 struct ocfs2_lock_res_ops {
162 * Translate an ocfs2_lock_res * into an ocfs2_super *. Define
163 * this callback if ->l_priv is not an ocfs2_super pointer
165 struct ocfs2_super * (*get_osb)(struct ocfs2_lock_res *);
168 * Optionally called in the downconvert thread after a
169 * successful downconvert. The lockres will not be referenced
170 * after this callback is called, so it is safe to free
173 * The exact semantics of when this is called are controlled
174 * by ->downconvert_worker()
176 void (*post_unlock)(struct ocfs2_super *, struct ocfs2_lock_res *);
179 * Allow a lock type to add checks to determine whether it is
180 * safe to downconvert a lock. Return 0 to re-queue the
181 * downconvert at a later time, nonzero to continue.
183 * For most locks, the default checks that there are no
184 * incompatible holders are sufficient.
186 * Called with the lockres spinlock held.
188 int (*check_downconvert)(struct ocfs2_lock_res *, int);
191 * Allows a lock type to populate the lock value block. This
192 * is called on downconvert, and when we drop a lock.
194 * Locks that want to use this should set LOCK_TYPE_USES_LVB
195 * in the flags field.
197 * Called with the lockres spinlock held.
199 void (*set_lvb)(struct ocfs2_lock_res *);
202 * Called from the downconvert thread when it is determined
203 * that a lock will be downconverted. This is called without
204 * any locks held so the function can do work that might
205 * schedule (syncing out data, etc).
207 * This should return any one of the ocfs2_unblock_action
208 * values, depending on what it wants the thread to do.
210 int (*downconvert_worker)(struct ocfs2_lock_res *, int);
213 * LOCK_TYPE_* flags which describe the specific requirements
214 * of a lock type. Descriptions of each individual flag follow.
220 * Some locks want to "refresh" potentially stale data when a
221 * meaningful (PRMODE or EXMODE) lock level is first obtained. If this
222 * flag is set, the OCFS2_LOCK_NEEDS_REFRESH flag will be set on the
223 * individual lockres l_flags member from the ast function. It is
224 * expected that the locking wrapper will clear the
225 * OCFS2_LOCK_NEEDS_REFRESH flag when done.
227 #define LOCK_TYPE_REQUIRES_REFRESH 0x1
230 * Indicate that a lock type makes use of the lock value block. The
231 * ->set_lvb lock type callback must be defined.
233 #define LOCK_TYPE_USES_LVB 0x2
235 static struct ocfs2_lock_res_ops ocfs2_inode_rw_lops = {
236 .get_osb = ocfs2_get_inode_osb,
240 static struct ocfs2_lock_res_ops ocfs2_inode_inode_lops = {
241 .get_osb = ocfs2_get_inode_osb,
242 .check_downconvert = ocfs2_check_meta_downconvert,
243 .set_lvb = ocfs2_set_meta_lvb,
244 .downconvert_worker = ocfs2_data_convert_worker,
245 .flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
248 static struct ocfs2_lock_res_ops ocfs2_super_lops = {
249 .flags = LOCK_TYPE_REQUIRES_REFRESH,
252 static struct ocfs2_lock_res_ops ocfs2_rename_lops = {
256 static struct ocfs2_lock_res_ops ocfs2_nfs_sync_lops = {
260 static struct ocfs2_lock_res_ops ocfs2_orphan_scan_lops = {
261 .flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
264 static struct ocfs2_lock_res_ops ocfs2_dentry_lops = {
265 .get_osb = ocfs2_get_dentry_osb,
266 .post_unlock = ocfs2_dentry_post_unlock,
267 .downconvert_worker = ocfs2_dentry_convert_worker,
271 static struct ocfs2_lock_res_ops ocfs2_inode_open_lops = {
272 .get_osb = ocfs2_get_inode_osb,
276 static struct ocfs2_lock_res_ops ocfs2_flock_lops = {
277 .get_osb = ocfs2_get_file_osb,
281 static struct ocfs2_lock_res_ops ocfs2_qinfo_lops = {
282 .set_lvb = ocfs2_set_qinfo_lvb,
283 .get_osb = ocfs2_get_qinfo_osb,
284 .flags = LOCK_TYPE_REQUIRES_REFRESH | LOCK_TYPE_USES_LVB,
287 static struct ocfs2_lock_res_ops ocfs2_refcount_block_lops = {
288 .check_downconvert = ocfs2_check_refcount_downconvert,
289 .downconvert_worker = ocfs2_refcount_convert_worker,
293 static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres)
295 return lockres->l_type == OCFS2_LOCK_TYPE_META ||
296 lockres->l_type == OCFS2_LOCK_TYPE_RW ||
297 lockres->l_type == OCFS2_LOCK_TYPE_OPEN;
300 static inline struct ocfs2_lock_res *ocfs2_lksb_to_lock_res(struct ocfs2_dlm_lksb *lksb)
302 return container_of(lksb, struct ocfs2_lock_res, l_lksb);
305 static inline struct inode *ocfs2_lock_res_inode(struct ocfs2_lock_res *lockres)
307 BUG_ON(!ocfs2_is_inode_lock(lockres));
309 return (struct inode *) lockres->l_priv;
312 static inline struct ocfs2_dentry_lock *ocfs2_lock_res_dl(struct ocfs2_lock_res *lockres)
314 BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_DENTRY);
316 return (struct ocfs2_dentry_lock *)lockres->l_priv;
319 static inline struct ocfs2_mem_dqinfo *ocfs2_lock_res_qinfo(struct ocfs2_lock_res *lockres)
321 BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_QINFO);
323 return (struct ocfs2_mem_dqinfo *)lockres->l_priv;
326 static inline struct ocfs2_refcount_tree *
327 ocfs2_lock_res_refcount_tree(struct ocfs2_lock_res *res)
329 return container_of(res, struct ocfs2_refcount_tree, rf_lockres);
332 static inline struct ocfs2_super *ocfs2_get_lockres_osb(struct ocfs2_lock_res *lockres)
334 if (lockres->l_ops->get_osb)
335 return lockres->l_ops->get_osb(lockres);
337 return (struct ocfs2_super *)lockres->l_priv;
340 static int ocfs2_lock_create(struct ocfs2_super *osb,
341 struct ocfs2_lock_res *lockres,
344 static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
346 static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
347 struct ocfs2_lock_res *lockres,
348 int level, unsigned long caller_ip);
349 static inline void ocfs2_cluster_unlock(struct ocfs2_super *osb,
350 struct ocfs2_lock_res *lockres,
353 __ocfs2_cluster_unlock(osb, lockres, level, _RET_IP_);
356 static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres);
357 static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres);
358 static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres);
359 static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, int level);
360 static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
361 struct ocfs2_lock_res *lockres);
362 static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
364 #define ocfs2_log_dlm_error(_func, _err, _lockres) do { \
365 if ((_lockres)->l_type != OCFS2_LOCK_TYPE_DENTRY) \
366 mlog(ML_ERROR, "DLM error %d while calling %s on resource %s\n", \
367 _err, _func, _lockres->l_name); \
369 mlog(ML_ERROR, "DLM error %d while calling %s on resource %.*s%08x\n", \
370 _err, _func, OCFS2_DENTRY_LOCK_INO_START - 1, (_lockres)->l_name, \
371 (unsigned int)ocfs2_get_dentry_lock_ino(_lockres)); \
373 static int ocfs2_downconvert_thread(void *arg);
374 static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
375 struct ocfs2_lock_res *lockres);
376 static int ocfs2_inode_lock_update(struct inode *inode,
377 struct buffer_head **bh);
378 static void ocfs2_drop_osb_locks(struct ocfs2_super *osb);
379 static inline int ocfs2_highest_compat_lock_level(int level);
380 static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
382 static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
383 struct ocfs2_lock_res *lockres,
386 unsigned int generation);
387 static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
388 struct ocfs2_lock_res *lockres);
389 static int ocfs2_cancel_convert(struct ocfs2_super *osb,
390 struct ocfs2_lock_res *lockres);
393 static void ocfs2_build_lock_name(enum ocfs2_lock_type type,
400 BUG_ON(type >= OCFS2_NUM_LOCK_TYPES);
402 len = snprintf(name, OCFS2_LOCK_ID_MAX_LEN, "%c%s%016llx%08x",
403 ocfs2_lock_type_char(type), OCFS2_LOCK_ID_PAD,
404 (long long)blkno, generation);
406 BUG_ON(len != (OCFS2_LOCK_ID_MAX_LEN - 1));
408 mlog(0, "built lock resource with name: %s\n", name);
411 static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock);
413 static void ocfs2_add_lockres_tracking(struct ocfs2_lock_res *res,
414 struct ocfs2_dlm_debug *dlm_debug)
416 mlog(0, "Add tracking for lockres %s\n", res->l_name);
418 spin_lock(&ocfs2_dlm_tracking_lock);
419 list_add(&res->l_debug_list, &dlm_debug->d_lockres_tracking);
420 spin_unlock(&ocfs2_dlm_tracking_lock);
423 static void ocfs2_remove_lockres_tracking(struct ocfs2_lock_res *res)
425 spin_lock(&ocfs2_dlm_tracking_lock);
426 if (!list_empty(&res->l_debug_list))
427 list_del_init(&res->l_debug_list);
428 spin_unlock(&ocfs2_dlm_tracking_lock);
431 #ifdef CONFIG_OCFS2_FS_STATS
432 static void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
434 res->l_lock_refresh = 0;
435 memset(&res->l_lock_prmode, 0, sizeof(struct ocfs2_lock_stats));
436 memset(&res->l_lock_exmode, 0, sizeof(struct ocfs2_lock_stats));
439 static void ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level,
440 struct ocfs2_mask_waiter *mw, int ret)
444 struct ocfs2_lock_stats *stats;
446 if (level == LKM_PRMODE)
447 stats = &res->l_lock_prmode;
448 else if (level == LKM_EXMODE)
449 stats = &res->l_lock_exmode;
453 kt = ktime_sub(ktime_get(), mw->mw_lock_start);
454 usec = ktime_to_us(kt);
457 stats->ls_total += ktime_to_ns(kt);
459 if (unlikely(stats->ls_gets == 0)) {
461 stats->ls_total = ktime_to_ns(kt);
464 if (stats->ls_max < usec)
465 stats->ls_max = usec;
471 static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
473 lockres->l_lock_refresh++;
476 static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
478 mw->mw_lock_start = ktime_get();
481 static inline void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
484 static inline void ocfs2_update_lock_stats(struct ocfs2_lock_res *res,
485 int level, struct ocfs2_mask_waiter *mw, int ret)
488 static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
491 static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
496 static void ocfs2_lock_res_init_common(struct ocfs2_super *osb,
497 struct ocfs2_lock_res *res,
498 enum ocfs2_lock_type type,
499 struct ocfs2_lock_res_ops *ops,
506 res->l_level = DLM_LOCK_IV;
507 res->l_requested = DLM_LOCK_IV;
508 res->l_blocking = DLM_LOCK_IV;
509 res->l_action = OCFS2_AST_INVALID;
510 res->l_unlock_action = OCFS2_UNLOCK_INVALID;
512 res->l_flags = OCFS2_LOCK_INITIALIZED;
514 ocfs2_add_lockres_tracking(res, osb->osb_dlm_debug);
516 ocfs2_init_lock_stats(res);
517 #ifdef CONFIG_DEBUG_LOCK_ALLOC
518 if (type != OCFS2_LOCK_TYPE_OPEN)
519 lockdep_init_map(&res->l_lockdep_map, ocfs2_lock_type_strings[type],
520 &lockdep_keys[type], 0);
522 res->l_lockdep_map.key = NULL;
526 void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res)
528 /* This also clears out the lock status block */
529 memset(res, 0, sizeof(struct ocfs2_lock_res));
530 spin_lock_init(&res->l_lock);
531 init_waitqueue_head(&res->l_event);
532 INIT_LIST_HEAD(&res->l_blocked_list);
533 INIT_LIST_HEAD(&res->l_mask_waiters);
536 void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res,
537 enum ocfs2_lock_type type,
538 unsigned int generation,
541 struct ocfs2_lock_res_ops *ops;
544 case OCFS2_LOCK_TYPE_RW:
545 ops = &ocfs2_inode_rw_lops;
547 case OCFS2_LOCK_TYPE_META:
548 ops = &ocfs2_inode_inode_lops;
550 case OCFS2_LOCK_TYPE_OPEN:
551 ops = &ocfs2_inode_open_lops;
554 mlog_bug_on_msg(1, "type: %d\n", type);
555 ops = NULL; /* thanks, gcc */
559 ocfs2_build_lock_name(type, OCFS2_I(inode)->ip_blkno,
560 generation, res->l_name);
561 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), res, type, ops, inode);
564 static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres)
566 struct inode *inode = ocfs2_lock_res_inode(lockres);
568 return OCFS2_SB(inode->i_sb);
571 static struct ocfs2_super *ocfs2_get_qinfo_osb(struct ocfs2_lock_res *lockres)
573 struct ocfs2_mem_dqinfo *info = lockres->l_priv;
575 return OCFS2_SB(info->dqi_gi.dqi_sb);
578 static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres)
580 struct ocfs2_file_private *fp = lockres->l_priv;
582 return OCFS2_SB(fp->fp_file->f_mapping->host->i_sb);
585 static __u64 ocfs2_get_dentry_lock_ino(struct ocfs2_lock_res *lockres)
587 __be64 inode_blkno_be;
589 memcpy(&inode_blkno_be, &lockres->l_name[OCFS2_DENTRY_LOCK_INO_START],
592 return be64_to_cpu(inode_blkno_be);
595 static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres)
597 struct ocfs2_dentry_lock *dl = lockres->l_priv;
599 return OCFS2_SB(dl->dl_inode->i_sb);
602 void ocfs2_dentry_lock_res_init(struct ocfs2_dentry_lock *dl,
603 u64 parent, struct inode *inode)
606 u64 inode_blkno = OCFS2_I(inode)->ip_blkno;
607 __be64 inode_blkno_be = cpu_to_be64(inode_blkno);
608 struct ocfs2_lock_res *lockres = &dl->dl_lockres;
610 ocfs2_lock_res_init_once(lockres);
613 * Unfortunately, the standard lock naming scheme won't work
614 * here because we have two 16 byte values to use. Instead,
615 * we'll stuff the inode number as a binary value. We still
616 * want error prints to show something without garbling the
617 * display, so drop a null byte in there before the inode
618 * number. A future version of OCFS2 will likely use all
619 * binary lock names. The stringified names have been a
620 * tremendous aid in debugging, but now that the debugfs
621 * interface exists, we can mangle things there if need be.
623 * NOTE: We also drop the standard "pad" value (the total lock
624 * name size stays the same though - the last part is all
625 * zeros due to the memset in ocfs2_lock_res_init_once()
627 len = snprintf(lockres->l_name, OCFS2_DENTRY_LOCK_INO_START,
629 ocfs2_lock_type_char(OCFS2_LOCK_TYPE_DENTRY),
632 BUG_ON(len != (OCFS2_DENTRY_LOCK_INO_START - 1));
634 memcpy(&lockres->l_name[OCFS2_DENTRY_LOCK_INO_START], &inode_blkno_be,
637 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres,
638 OCFS2_LOCK_TYPE_DENTRY, &ocfs2_dentry_lops,
642 static void ocfs2_super_lock_res_init(struct ocfs2_lock_res *res,
643 struct ocfs2_super *osb)
645 /* Superblock lockres doesn't come from a slab so we call init
646 * once on it manually. */
647 ocfs2_lock_res_init_once(res);
648 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_SUPER, OCFS2_SUPER_BLOCK_BLKNO,
650 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_SUPER,
651 &ocfs2_super_lops, osb);
654 static void ocfs2_rename_lock_res_init(struct ocfs2_lock_res *res,
655 struct ocfs2_super *osb)
657 /* Rename lockres doesn't come from a slab so we call init
658 * once on it manually. */
659 ocfs2_lock_res_init_once(res);
660 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_RENAME, 0, 0, res->l_name);
661 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_RENAME,
662 &ocfs2_rename_lops, osb);
665 static void ocfs2_nfs_sync_lock_res_init(struct ocfs2_lock_res *res,
666 struct ocfs2_super *osb)
668 /* nfs_sync lockres doesn't come from a slab so we call init
669 * once on it manually. */
670 ocfs2_lock_res_init_once(res);
671 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_NFS_SYNC, 0, 0, res->l_name);
672 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_NFS_SYNC,
673 &ocfs2_nfs_sync_lops, osb);
676 static void ocfs2_orphan_scan_lock_res_init(struct ocfs2_lock_res *res,
677 struct ocfs2_super *osb)
679 ocfs2_lock_res_init_once(res);
680 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_ORPHAN_SCAN, 0, 0, res->l_name);
681 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_ORPHAN_SCAN,
682 &ocfs2_orphan_scan_lops, osb);
685 void ocfs2_file_lock_res_init(struct ocfs2_lock_res *lockres,
686 struct ocfs2_file_private *fp)
688 struct inode *inode = fp->fp_file->f_mapping->host;
689 struct ocfs2_inode_info *oi = OCFS2_I(inode);
691 ocfs2_lock_res_init_once(lockres);
692 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_FLOCK, oi->ip_blkno,
693 inode->i_generation, lockres->l_name);
694 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres,
695 OCFS2_LOCK_TYPE_FLOCK, &ocfs2_flock_lops,
697 lockres->l_flags |= OCFS2_LOCK_NOCACHE;
700 void ocfs2_qinfo_lock_res_init(struct ocfs2_lock_res *lockres,
701 struct ocfs2_mem_dqinfo *info)
703 ocfs2_lock_res_init_once(lockres);
704 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_QINFO, info->dqi_gi.dqi_type,
706 ocfs2_lock_res_init_common(OCFS2_SB(info->dqi_gi.dqi_sb), lockres,
707 OCFS2_LOCK_TYPE_QINFO, &ocfs2_qinfo_lops,
711 void ocfs2_refcount_lock_res_init(struct ocfs2_lock_res *lockres,
712 struct ocfs2_super *osb, u64 ref_blkno,
713 unsigned int generation)
715 ocfs2_lock_res_init_once(lockres);
716 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_REFCOUNT, ref_blkno,
717 generation, lockres->l_name);
718 ocfs2_lock_res_init_common(osb, lockres, OCFS2_LOCK_TYPE_REFCOUNT,
719 &ocfs2_refcount_block_lops, osb);
722 void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
724 if (!(res->l_flags & OCFS2_LOCK_INITIALIZED))
727 ocfs2_remove_lockres_tracking(res);
729 mlog_bug_on_msg(!list_empty(&res->l_blocked_list),
730 "Lockres %s is on the blocked list\n",
732 mlog_bug_on_msg(!list_empty(&res->l_mask_waiters),
733 "Lockres %s has mask waiters pending\n",
735 mlog_bug_on_msg(spin_is_locked(&res->l_lock),
736 "Lockres %s is locked\n",
738 mlog_bug_on_msg(res->l_ro_holders,
739 "Lockres %s has %u ro holders\n",
740 res->l_name, res->l_ro_holders);
741 mlog_bug_on_msg(res->l_ex_holders,
742 "Lockres %s has %u ex holders\n",
743 res->l_name, res->l_ex_holders);
745 /* Need to clear out the lock status block for the dlm */
746 memset(&res->l_lksb, 0, sizeof(res->l_lksb));
751 static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres,
758 lockres->l_ex_holders++;
761 lockres->l_ro_holders++;
768 static inline void ocfs2_dec_holders(struct ocfs2_lock_res *lockres,
775 BUG_ON(!lockres->l_ex_holders);
776 lockres->l_ex_holders--;
779 BUG_ON(!lockres->l_ro_holders);
780 lockres->l_ro_holders--;
787 /* WARNING: This function lives in a world where the only three lock
788 * levels are EX, PR, and NL. It *will* have to be adjusted when more
789 * lock types are added. */
790 static inline int ocfs2_highest_compat_lock_level(int level)
792 int new_level = DLM_LOCK_EX;
794 if (level == DLM_LOCK_EX)
795 new_level = DLM_LOCK_NL;
796 else if (level == DLM_LOCK_PR)
797 new_level = DLM_LOCK_PR;
801 static void lockres_set_flags(struct ocfs2_lock_res *lockres,
802 unsigned long newflags)
804 struct ocfs2_mask_waiter *mw, *tmp;
806 assert_spin_locked(&lockres->l_lock);
808 lockres->l_flags = newflags;
810 list_for_each_entry_safe(mw, tmp, &lockres->l_mask_waiters, mw_item) {
811 if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
814 list_del_init(&mw->mw_item);
816 complete(&mw->mw_complete);
819 static void lockres_or_flags(struct ocfs2_lock_res *lockres, unsigned long or)
821 lockres_set_flags(lockres, lockres->l_flags | or);
823 static void lockres_clear_flags(struct ocfs2_lock_res *lockres,
826 lockres_set_flags(lockres, lockres->l_flags & ~clear);
829 static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres)
831 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
832 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
833 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
834 BUG_ON(lockres->l_blocking <= DLM_LOCK_NL);
836 lockres->l_level = lockres->l_requested;
837 if (lockres->l_level <=
838 ocfs2_highest_compat_lock_level(lockres->l_blocking)) {
839 lockres->l_blocking = DLM_LOCK_NL;
840 lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
842 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
845 static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres)
847 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
848 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
850 /* Convert from RO to EX doesn't really need anything as our
851 * information is already up to data. Convert from NL to
852 * *anything* however should mark ourselves as needing an
854 if (lockres->l_level == DLM_LOCK_NL &&
855 lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
856 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
858 lockres->l_level = lockres->l_requested;
861 * We set the OCFS2_LOCK_UPCONVERT_FINISHING flag before clearing
862 * the OCFS2_LOCK_BUSY flag to prevent the dc thread from
863 * downconverting the lock before the upconvert has fully completed.
864 * Do not prevent the dc thread from downconverting if NONBLOCK lock
865 * had already returned.
867 if (!(lockres->l_flags & OCFS2_LOCK_NONBLOCK_FINISHED))
868 lockres_or_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
870 lockres_clear_flags(lockres, OCFS2_LOCK_NONBLOCK_FINISHED);
872 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
875 static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres)
877 BUG_ON((!(lockres->l_flags & OCFS2_LOCK_BUSY)));
878 BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
880 if (lockres->l_requested > DLM_LOCK_NL &&
881 !(lockres->l_flags & OCFS2_LOCK_LOCAL) &&
882 lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
883 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
885 lockres->l_level = lockres->l_requested;
886 lockres_or_flags(lockres, OCFS2_LOCK_ATTACHED);
887 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
890 static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
893 int needs_downconvert = 0;
895 assert_spin_locked(&lockres->l_lock);
897 if (level > lockres->l_blocking) {
898 /* only schedule a downconvert if we haven't already scheduled
899 * one that goes low enough to satisfy the level we're
900 * blocking. this also catches the case where we get
902 if (ocfs2_highest_compat_lock_level(level) <
903 ocfs2_highest_compat_lock_level(lockres->l_blocking))
904 needs_downconvert = 1;
906 lockres->l_blocking = level;
909 mlog(ML_BASTS, "lockres %s, block %d, level %d, l_block %d, dwn %d\n",
910 lockres->l_name, level, lockres->l_level, lockres->l_blocking,
913 if (needs_downconvert)
914 lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
915 mlog(0, "needs_downconvert = %d\n", needs_downconvert);
916 return needs_downconvert;
920 * OCFS2_LOCK_PENDING and l_pending_gen.
922 * Why does OCFS2_LOCK_PENDING exist? To close a race between setting
923 * OCFS2_LOCK_BUSY and calling ocfs2_dlm_lock(). See ocfs2_unblock_lock()
924 * for more details on the race.
926 * OCFS2_LOCK_PENDING closes the race quite nicely. However, it introduces
927 * a race on itself. In o2dlm, we can get the ast before ocfs2_dlm_lock()
928 * returns. The ast clears OCFS2_LOCK_BUSY, and must therefore clear
929 * OCFS2_LOCK_PENDING at the same time. When ocfs2_dlm_lock() returns,
930 * the caller is going to try to clear PENDING again. If nothing else is
931 * happening, __lockres_clear_pending() sees PENDING is unset and does
934 * But what if another path (eg downconvert thread) has just started a
935 * new locking action? The other path has re-set PENDING. Our path
936 * cannot clear PENDING, because that will re-open the original race
942 * ocfs2_cluster_lock()
947 * ocfs2_locking_ast() ocfs2_downconvert_thread()
948 * clear PENDING ocfs2_unblock_lock()
951 * ocfs2_prepare_downconvert()
961 * So as you can see, we now have a window where l_lock is not held,
962 * PENDING is not set, and ocfs2_dlm_lock() has not been called.
964 * The core problem is that ocfs2_cluster_lock() has cleared the PENDING
965 * set by ocfs2_prepare_downconvert(). That wasn't nice.
967 * To solve this we introduce l_pending_gen. A call to
968 * lockres_clear_pending() will only do so when it is passed a generation
969 * number that matches the lockres. lockres_set_pending() will return the
970 * current generation number. When ocfs2_cluster_lock() goes to clear
971 * PENDING, it passes the generation it got from set_pending(). In our
972 * example above, the generation numbers will *not* match. Thus,
973 * ocfs2_cluster_lock() will not clear the PENDING set by
974 * ocfs2_prepare_downconvert().
977 /* Unlocked version for ocfs2_locking_ast() */
978 static void __lockres_clear_pending(struct ocfs2_lock_res *lockres,
979 unsigned int generation,
980 struct ocfs2_super *osb)
982 assert_spin_locked(&lockres->l_lock);
985 * The ast and locking functions can race us here. The winner
986 * will clear pending, the loser will not.
988 if (!(lockres->l_flags & OCFS2_LOCK_PENDING) ||
989 (lockres->l_pending_gen != generation))
992 lockres_clear_flags(lockres, OCFS2_LOCK_PENDING);
993 lockres->l_pending_gen++;
996 * The downconvert thread may have skipped us because we
997 * were PENDING. Wake it up.
999 if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
1000 ocfs2_wake_downconvert_thread(osb);
1003 /* Locked version for callers of ocfs2_dlm_lock() */
1004 static void lockres_clear_pending(struct ocfs2_lock_res *lockres,
1005 unsigned int generation,
1006 struct ocfs2_super *osb)
1008 unsigned long flags;
1010 spin_lock_irqsave(&lockres->l_lock, flags);
1011 __lockres_clear_pending(lockres, generation, osb);
1012 spin_unlock_irqrestore(&lockres->l_lock, flags);
1015 static unsigned int lockres_set_pending(struct ocfs2_lock_res *lockres)
1017 assert_spin_locked(&lockres->l_lock);
1018 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
1020 lockres_or_flags(lockres, OCFS2_LOCK_PENDING);
1022 return lockres->l_pending_gen;
1025 static void ocfs2_blocking_ast(struct ocfs2_dlm_lksb *lksb, int level)
1027 struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
1028 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
1029 int needs_downconvert;
1030 unsigned long flags;
1032 BUG_ON(level <= DLM_LOCK_NL);
1034 mlog(ML_BASTS, "BAST fired for lockres %s, blocking %d, level %d, "
1035 "type %s\n", lockres->l_name, level, lockres->l_level,
1036 ocfs2_lock_type_string(lockres->l_type));
1039 * We can skip the bast for locks which don't enable caching -
1040 * they'll be dropped at the earliest possible time anyway.
1042 if (lockres->l_flags & OCFS2_LOCK_NOCACHE)
1045 spin_lock_irqsave(&lockres->l_lock, flags);
1046 needs_downconvert = ocfs2_generic_handle_bast(lockres, level);
1047 if (needs_downconvert)
1048 ocfs2_schedule_blocked_lock(osb, lockres);
1049 spin_unlock_irqrestore(&lockres->l_lock, flags);
1051 wake_up(&lockres->l_event);
1053 ocfs2_wake_downconvert_thread(osb);
1056 static void ocfs2_locking_ast(struct ocfs2_dlm_lksb *lksb)
1058 struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
1059 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
1060 unsigned long flags;
1063 spin_lock_irqsave(&lockres->l_lock, flags);
1065 status = ocfs2_dlm_lock_status(&lockres->l_lksb);
1067 if (status == -EAGAIN) {
1068 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
1073 mlog(ML_ERROR, "lockres %s: lksb status value of %d!\n",
1074 lockres->l_name, status);
1075 spin_unlock_irqrestore(&lockres->l_lock, flags);
1079 mlog(ML_BASTS, "AST fired for lockres %s, action %d, unlock %d, "
1080 "level %d => %d\n", lockres->l_name, lockres->l_action,
1081 lockres->l_unlock_action, lockres->l_level, lockres->l_requested);
1083 switch(lockres->l_action) {
1084 case OCFS2_AST_ATTACH:
1085 ocfs2_generic_handle_attach_action(lockres);
1086 lockres_clear_flags(lockres, OCFS2_LOCK_LOCAL);
1088 case OCFS2_AST_CONVERT:
1089 ocfs2_generic_handle_convert_action(lockres);
1091 case OCFS2_AST_DOWNCONVERT:
1092 ocfs2_generic_handle_downconvert_action(lockres);
1095 mlog(ML_ERROR, "lockres %s: AST fired with invalid action: %u, "
1096 "flags 0x%lx, unlock: %u\n",
1097 lockres->l_name, lockres->l_action, lockres->l_flags,
1098 lockres->l_unlock_action);
1102 /* set it to something invalid so if we get called again we
1104 lockres->l_action = OCFS2_AST_INVALID;
1106 /* Did we try to cancel this lock? Clear that state */
1107 if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT)
1108 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
1111 * We may have beaten the locking functions here. We certainly
1112 * know that dlm_lock() has been called :-)
1113 * Because we can't have two lock calls in flight at once, we
1114 * can use lockres->l_pending_gen.
1116 __lockres_clear_pending(lockres, lockres->l_pending_gen, osb);
1118 wake_up(&lockres->l_event);
1119 spin_unlock_irqrestore(&lockres->l_lock, flags);
1122 static void ocfs2_unlock_ast(struct ocfs2_dlm_lksb *lksb, int error)
1124 struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
1125 unsigned long flags;
1127 mlog(ML_BASTS, "UNLOCK AST fired for lockres %s, action = %d\n",
1128 lockres->l_name, lockres->l_unlock_action);
1130 spin_lock_irqsave(&lockres->l_lock, flags);
1132 mlog(ML_ERROR, "Dlm passes error %d for lock %s, "
1133 "unlock_action %d\n", error, lockres->l_name,
1134 lockres->l_unlock_action);
1135 spin_unlock_irqrestore(&lockres->l_lock, flags);
1139 switch(lockres->l_unlock_action) {
1140 case OCFS2_UNLOCK_CANCEL_CONVERT:
1141 mlog(0, "Cancel convert success for %s\n", lockres->l_name);
1142 lockres->l_action = OCFS2_AST_INVALID;
1143 /* Downconvert thread may have requeued this lock, we
1144 * need to wake it. */
1145 if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
1146 ocfs2_wake_downconvert_thread(ocfs2_get_lockres_osb(lockres));
1148 case OCFS2_UNLOCK_DROP_LOCK:
1149 lockres->l_level = DLM_LOCK_IV;
1155 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
1156 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
1157 wake_up(&lockres->l_event);
1158 spin_unlock_irqrestore(&lockres->l_lock, flags);
1162 * This is the filesystem locking protocol. It provides the lock handling
1163 * hooks for the underlying DLM. It has a maximum version number.
1164 * The version number allows interoperability with systems running at
1165 * the same major number and an equal or smaller minor number.
1167 * Whenever the filesystem does new things with locks (adds or removes a
1168 * lock, orders them differently, does different things underneath a lock),
1169 * the version must be changed. The protocol is negotiated when joining
1170 * the dlm domain. A node may join the domain if its major version is
1171 * identical to all other nodes and its minor version is greater than
1172 * or equal to all other nodes. When its minor version is greater than
1173 * the other nodes, it will run at the minor version specified by the
1176 * If a locking change is made that will not be compatible with older
1177 * versions, the major number must be increased and the minor version set
1178 * to zero. If a change merely adds a behavior that can be disabled when
1179 * speaking to older versions, the minor version must be increased. If a
1180 * change adds a fully backwards compatible change (eg, LVB changes that
1181 * are just ignored by older versions), the version does not need to be
1184 static struct ocfs2_locking_protocol lproto = {
1186 .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
1187 .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
1189 .lp_lock_ast = ocfs2_locking_ast,
1190 .lp_blocking_ast = ocfs2_blocking_ast,
1191 .lp_unlock_ast = ocfs2_unlock_ast,
1194 void ocfs2_set_locking_protocol(void)
1196 ocfs2_stack_glue_set_max_proto_version(&lproto.lp_max_version);
1199 static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
1202 unsigned long flags;
1204 spin_lock_irqsave(&lockres->l_lock, flags);
1205 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
1206 lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
1208 lockres->l_action = OCFS2_AST_INVALID;
1210 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
1211 spin_unlock_irqrestore(&lockres->l_lock, flags);
1213 wake_up(&lockres->l_event);
1216 /* Note: If we detect another process working on the lock (i.e.,
1217 * OCFS2_LOCK_BUSY), we'll bail out returning 0. It's up to the caller
1218 * to do the right thing in that case.
1220 static int ocfs2_lock_create(struct ocfs2_super *osb,
1221 struct ocfs2_lock_res *lockres,
1226 unsigned long flags;
1229 mlog(0, "lock %s, level = %d, flags = %u\n", lockres->l_name, level,
1232 spin_lock_irqsave(&lockres->l_lock, flags);
1233 if ((lockres->l_flags & OCFS2_LOCK_ATTACHED) ||
1234 (lockres->l_flags & OCFS2_LOCK_BUSY)) {
1235 spin_unlock_irqrestore(&lockres->l_lock, flags);
1239 lockres->l_action = OCFS2_AST_ATTACH;
1240 lockres->l_requested = level;
1241 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
1242 gen = lockres_set_pending(lockres);
1243 spin_unlock_irqrestore(&lockres->l_lock, flags);
1245 ret = ocfs2_dlm_lock(osb->cconn,
1250 OCFS2_LOCK_ID_MAX_LEN - 1);
1251 lockres_clear_pending(lockres, gen, osb);
1253 ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
1254 ocfs2_recover_from_dlm_error(lockres, 1);
1257 mlog(0, "lock %s, return from ocfs2_dlm_lock\n", lockres->l_name);
1263 static inline int ocfs2_check_wait_flag(struct ocfs2_lock_res *lockres,
1266 unsigned long flags;
1269 spin_lock_irqsave(&lockres->l_lock, flags);
1270 ret = lockres->l_flags & flag;
1271 spin_unlock_irqrestore(&lockres->l_lock, flags);
1276 static inline void ocfs2_wait_on_busy_lock(struct ocfs2_lock_res *lockres)
1279 wait_event(lockres->l_event,
1280 !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_BUSY));
1283 static inline void ocfs2_wait_on_refreshing_lock(struct ocfs2_lock_res *lockres)
1286 wait_event(lockres->l_event,
1287 !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_REFRESHING));
1290 /* predict what lock level we'll be dropping down to on behalf
1291 * of another node, and return true if the currently wanted
1292 * level will be compatible with it. */
1293 static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
1296 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
1298 return wanted <= ocfs2_highest_compat_lock_level(lockres->l_blocking);
1301 static void ocfs2_init_mask_waiter(struct ocfs2_mask_waiter *mw)
1303 INIT_LIST_HEAD(&mw->mw_item);
1304 init_completion(&mw->mw_complete);
1305 ocfs2_init_start_time(mw);
1308 static int ocfs2_wait_for_mask(struct ocfs2_mask_waiter *mw)
1310 wait_for_completion(&mw->mw_complete);
1311 /* Re-arm the completion in case we want to wait on it again */
1312 reinit_completion(&mw->mw_complete);
1313 return mw->mw_status;
1316 static void lockres_add_mask_waiter(struct ocfs2_lock_res *lockres,
1317 struct ocfs2_mask_waiter *mw,
1321 BUG_ON(!list_empty(&mw->mw_item));
1323 assert_spin_locked(&lockres->l_lock);
1325 list_add_tail(&mw->mw_item, &lockres->l_mask_waiters);
1330 /* returns 0 if the mw that was removed was already satisfied, -EBUSY
1331 * if the mask still hadn't reached its goal */
1332 static int __lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres,
1333 struct ocfs2_mask_waiter *mw)
1337 assert_spin_locked(&lockres->l_lock);
1338 if (!list_empty(&mw->mw_item)) {
1339 if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
1342 list_del_init(&mw->mw_item);
1343 init_completion(&mw->mw_complete);
1349 static int lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres,
1350 struct ocfs2_mask_waiter *mw)
1352 unsigned long flags;
1355 spin_lock_irqsave(&lockres->l_lock, flags);
1356 ret = __lockres_remove_mask_waiter(lockres, mw);
1357 spin_unlock_irqrestore(&lockres->l_lock, flags);
1363 static int ocfs2_wait_for_mask_interruptible(struct ocfs2_mask_waiter *mw,
1364 struct ocfs2_lock_res *lockres)
1368 ret = wait_for_completion_interruptible(&mw->mw_complete);
1370 lockres_remove_mask_waiter(lockres, mw);
1372 ret = mw->mw_status;
1373 /* Re-arm the completion in case we want to wait on it again */
1374 reinit_completion(&mw->mw_complete);
1378 static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
1379 struct ocfs2_lock_res *lockres,
1384 unsigned long caller_ip)
1386 struct ocfs2_mask_waiter mw;
1387 int wait, catch_signals = !(osb->s_mount_opt & OCFS2_MOUNT_NOINTR);
1388 int ret = 0; /* gcc doesn't realize wait = 1 guarantees ret is set */
1389 unsigned long flags;
1391 int noqueue_attempted = 0;
1394 if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED)) {
1395 mlog_errno(-EINVAL);
1399 ocfs2_init_mask_waiter(&mw);
1401 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
1402 lkm_flags |= DLM_LKF_VALBLK;
1407 spin_lock_irqsave(&lockres->l_lock, flags);
1409 if (catch_signals && signal_pending(current)) {
1414 mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING,
1415 "Cluster lock called on freeing lockres %s! flags "
1416 "0x%lx\n", lockres->l_name, lockres->l_flags);
1418 /* We only compare against the currently granted level
1419 * here. If the lock is blocked waiting on a downconvert,
1420 * we'll get caught below. */
1421 if (lockres->l_flags & OCFS2_LOCK_BUSY &&
1422 level > lockres->l_level) {
1423 /* is someone sitting in dlm_lock? If so, wait on
1425 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1430 if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING) {
1432 * We've upconverted. If the lock now has a level we can
1433 * work with, we take it. If, however, the lock is not at the
1434 * required level, we go thru the full cycle. One way this could
1435 * happen is if a process requesting an upconvert to PR is
1436 * closely followed by another requesting upconvert to an EX.
1437 * If the process requesting EX lands here, we want it to
1438 * continue attempting to upconvert and let the process
1439 * requesting PR take the lock.
1440 * If multiple processes request upconvert to PR, the first one
1441 * here will take the lock. The others will have to go thru the
1442 * OCFS2_LOCK_BLOCKED check to ensure that there is no pending
1443 * downconvert request.
1445 if (level <= lockres->l_level)
1446 goto update_holders;
1449 if (lockres->l_flags & OCFS2_LOCK_BLOCKED &&
1450 !ocfs2_may_continue_on_blocked_lock(lockres, level)) {
1451 /* is the lock is currently blocked on behalf of
1453 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BLOCKED, 0);
1458 if (level > lockres->l_level) {
1459 if (noqueue_attempted > 0) {
1463 if (lkm_flags & DLM_LKF_NOQUEUE)
1464 noqueue_attempted = 1;
1466 if (lockres->l_action != OCFS2_AST_INVALID)
1467 mlog(ML_ERROR, "lockres %s has action %u pending\n",
1468 lockres->l_name, lockres->l_action);
1470 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
1471 lockres->l_action = OCFS2_AST_ATTACH;
1472 lkm_flags &= ~DLM_LKF_CONVERT;
1474 lockres->l_action = OCFS2_AST_CONVERT;
1475 lkm_flags |= DLM_LKF_CONVERT;
1478 lockres->l_requested = level;
1479 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
1480 gen = lockres_set_pending(lockres);
1481 spin_unlock_irqrestore(&lockres->l_lock, flags);
1483 BUG_ON(level == DLM_LOCK_IV);
1484 BUG_ON(level == DLM_LOCK_NL);
1486 mlog(ML_BASTS, "lockres %s, convert from %d to %d\n",
1487 lockres->l_name, lockres->l_level, level);
1489 /* call dlm_lock to upgrade lock now */
1490 ret = ocfs2_dlm_lock(osb->cconn,
1495 OCFS2_LOCK_ID_MAX_LEN - 1);
1496 lockres_clear_pending(lockres, gen, osb);
1498 if (!(lkm_flags & DLM_LKF_NOQUEUE) ||
1500 ocfs2_log_dlm_error("ocfs2_dlm_lock",
1503 ocfs2_recover_from_dlm_error(lockres, 1);
1508 mlog(0, "lock %s, successful return from ocfs2_dlm_lock\n",
1511 /* At this point we've gone inside the dlm and need to
1512 * complete our work regardless. */
1515 /* wait for busy to clear and carry on */
1520 /* Ok, if we get here then we're good to go. */
1521 ocfs2_inc_holders(lockres, level);
1525 lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
1527 spin_unlock_irqrestore(&lockres->l_lock, flags);
1530 * This is helping work around a lock inversion between the page lock
1531 * and dlm locks. One path holds the page lock while calling aops
1532 * which block acquiring dlm locks. The voting thread holds dlm
1533 * locks while acquiring page locks while down converting data locks.
1534 * This block is helping an aop path notice the inversion and back
1535 * off to unlock its page lock before trying the dlm lock again.
1537 if (wait && arg_flags & OCFS2_LOCK_NONBLOCK &&
1538 mw.mw_mask & (OCFS2_LOCK_BUSY|OCFS2_LOCK_BLOCKED)) {
1540 spin_lock_irqsave(&lockres->l_lock, flags);
1541 if (__lockres_remove_mask_waiter(lockres, &mw)) {
1543 lockres_or_flags(lockres,
1544 OCFS2_LOCK_NONBLOCK_FINISHED);
1545 spin_unlock_irqrestore(&lockres->l_lock, flags);
1548 spin_unlock_irqrestore(&lockres->l_lock, flags);
1553 ret = ocfs2_wait_for_mask(&mw);
1558 ocfs2_update_lock_stats(lockres, level, &mw, ret);
1560 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1561 if (!ret && lockres->l_lockdep_map.key != NULL) {
1562 if (level == DLM_LOCK_PR)
1563 rwsem_acquire_read(&lockres->l_lockdep_map, l_subclass,
1564 !!(arg_flags & OCFS2_META_LOCK_NOQUEUE),
1567 rwsem_acquire(&lockres->l_lockdep_map, l_subclass,
1568 !!(arg_flags & OCFS2_META_LOCK_NOQUEUE),
1575 static inline int ocfs2_cluster_lock(struct ocfs2_super *osb,
1576 struct ocfs2_lock_res *lockres,
1581 return __ocfs2_cluster_lock(osb, lockres, level, lkm_flags, arg_flags,
1586 static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
1587 struct ocfs2_lock_res *lockres,
1589 unsigned long caller_ip)
1591 unsigned long flags;
1593 spin_lock_irqsave(&lockres->l_lock, flags);
1594 ocfs2_dec_holders(lockres, level);
1595 ocfs2_downconvert_on_unlock(osb, lockres);
1596 spin_unlock_irqrestore(&lockres->l_lock, flags);
1597 #ifdef CONFIG_DEBUG_LOCK_ALLOC
1598 if (lockres->l_lockdep_map.key != NULL)
1599 rwsem_release(&lockres->l_lockdep_map, 1, caller_ip);
1603 static int ocfs2_create_new_lock(struct ocfs2_super *osb,
1604 struct ocfs2_lock_res *lockres,
1608 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
1609 unsigned long flags;
1610 u32 lkm_flags = local ? DLM_LKF_LOCAL : 0;
1612 spin_lock_irqsave(&lockres->l_lock, flags);
1613 BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
1614 lockres_or_flags(lockres, OCFS2_LOCK_LOCAL);
1615 spin_unlock_irqrestore(&lockres->l_lock, flags);
1617 return ocfs2_lock_create(osb, lockres, level, lkm_flags);
1620 /* Grants us an EX lock on the data and metadata resources, skipping
1621 * the normal cluster directory lookup. Use this ONLY on newly created
1622 * inodes which other nodes can't possibly see, and which haven't been
1623 * hashed in the inode hash yet. This can give us a good performance
1624 * increase as it'll skip the network broadcast normally associated
1625 * with creating a new lock resource. */
1626 int ocfs2_create_new_inode_locks(struct inode *inode)
1629 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1632 BUG_ON(!ocfs2_inode_is_new(inode));
1634 mlog(0, "Inode %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno);
1636 /* NOTE: That we don't increment any of the holder counts, nor
1637 * do we add anything to a journal handle. Since this is
1638 * supposed to be a new inode which the cluster doesn't know
1639 * about yet, there is no need to. As far as the LVB handling
1640 * is concerned, this is basically like acquiring an EX lock
1641 * on a resource which has an invalid one -- we'll set it
1642 * valid when we release the EX. */
1644 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_rw_lockres, 1, 1);
1651 * We don't want to use DLM_LKF_LOCAL on a meta data lock as they
1652 * don't use a generation in their lock names.
1654 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_inode_lockres, 1, 0);
1660 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_open_lockres, 0, 0);
1670 int ocfs2_rw_lock(struct inode *inode, int write)
1673 struct ocfs2_lock_res *lockres;
1674 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1678 mlog(0, "inode %llu take %s RW lock\n",
1679 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1680 write ? "EXMODE" : "PRMODE");
1682 if (ocfs2_mount_local(osb))
1685 lockres = &OCFS2_I(inode)->ip_rw_lockres;
1687 level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
1689 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres, level, 0,
1697 void ocfs2_rw_unlock(struct inode *inode, int write)
1699 int level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
1700 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_rw_lockres;
1701 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1703 mlog(0, "inode %llu drop %s RW lock\n",
1704 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1705 write ? "EXMODE" : "PRMODE");
1707 if (!ocfs2_mount_local(osb))
1708 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
1712 * ocfs2_open_lock always get PR mode lock.
1714 int ocfs2_open_lock(struct inode *inode)
1717 struct ocfs2_lock_res *lockres;
1718 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1722 mlog(0, "inode %llu take PRMODE open lock\n",
1723 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1725 if (ocfs2_is_hard_readonly(osb) || ocfs2_mount_local(osb))
1728 lockres = &OCFS2_I(inode)->ip_open_lockres;
1730 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres,
1739 int ocfs2_try_open_lock(struct inode *inode, int write)
1741 int status = 0, level;
1742 struct ocfs2_lock_res *lockres;
1743 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1747 mlog(0, "inode %llu try to take %s open lock\n",
1748 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1749 write ? "EXMODE" : "PRMODE");
1751 if (ocfs2_is_hard_readonly(osb)) {
1757 if (ocfs2_mount_local(osb))
1760 lockres = &OCFS2_I(inode)->ip_open_lockres;
1762 level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
1765 * The file system may already holding a PRMODE/EXMODE open lock.
1766 * Since we pass DLM_LKF_NOQUEUE, the request won't block waiting on
1767 * other nodes and the -EAGAIN will indicate to the caller that
1768 * this inode is still in use.
1770 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres,
1771 level, DLM_LKF_NOQUEUE, 0);
1778 * ocfs2_open_unlock unlock PR and EX mode open locks.
1780 void ocfs2_open_unlock(struct inode *inode)
1782 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_open_lockres;
1783 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1785 mlog(0, "inode %llu drop open lock\n",
1786 (unsigned long long)OCFS2_I(inode)->ip_blkno);
1788 if (ocfs2_mount_local(osb))
1791 if(lockres->l_ro_holders)
1792 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres,
1794 if(lockres->l_ex_holders)
1795 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres,
1802 static int ocfs2_flock_handle_signal(struct ocfs2_lock_res *lockres,
1806 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
1807 unsigned long flags;
1808 struct ocfs2_mask_waiter mw;
1810 ocfs2_init_mask_waiter(&mw);
1813 spin_lock_irqsave(&lockres->l_lock, flags);
1814 if (lockres->l_flags & OCFS2_LOCK_BUSY) {
1815 ret = ocfs2_prepare_cancel_convert(osb, lockres);
1817 spin_unlock_irqrestore(&lockres->l_lock, flags);
1818 ret = ocfs2_cancel_convert(osb, lockres);
1825 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1826 spin_unlock_irqrestore(&lockres->l_lock, flags);
1828 ocfs2_wait_for_mask(&mw);
1834 * We may still have gotten the lock, in which case there's no
1835 * point to restarting the syscall.
1837 if (lockres->l_level == level)
1840 mlog(0, "Cancel returning %d. flags: 0x%lx, level: %d, act: %d\n", ret,
1841 lockres->l_flags, lockres->l_level, lockres->l_action);
1843 spin_unlock_irqrestore(&lockres->l_lock, flags);
1850 * ocfs2_file_lock() and ocfs2_file_unlock() map to a single pair of
1851 * flock() calls. The locking approach this requires is sufficiently
1852 * different from all other cluster lock types that we implement a
1853 * separate path to the "low-level" dlm calls. In particular:
1855 * - No optimization of lock levels is done - we take at exactly
1856 * what's been requested.
1858 * - No lock caching is employed. We immediately downconvert to
1859 * no-lock at unlock time. This also means flock locks never go on
1860 * the blocking list).
1862 * - Since userspace can trivially deadlock itself with flock, we make
1863 * sure to allow cancellation of a misbehaving applications flock()
1866 * - Access to any flock lockres doesn't require concurrency, so we
1867 * can simplify the code by requiring the caller to guarantee
1868 * serialization of dlmglue flock calls.
1870 int ocfs2_file_lock(struct file *file, int ex, int trylock)
1872 int ret, level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
1873 unsigned int lkm_flags = trylock ? DLM_LKF_NOQUEUE : 0;
1874 unsigned long flags;
1875 struct ocfs2_file_private *fp = file->private_data;
1876 struct ocfs2_lock_res *lockres = &fp->fp_flock;
1877 struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb);
1878 struct ocfs2_mask_waiter mw;
1880 ocfs2_init_mask_waiter(&mw);
1882 if ((lockres->l_flags & OCFS2_LOCK_BUSY) ||
1883 (lockres->l_level > DLM_LOCK_NL)) {
1885 "File lock \"%s\" has busy or locked state: flags: 0x%lx, "
1886 "level: %u\n", lockres->l_name, lockres->l_flags,
1891 spin_lock_irqsave(&lockres->l_lock, flags);
1892 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
1893 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1894 spin_unlock_irqrestore(&lockres->l_lock, flags);
1897 * Get the lock at NLMODE to start - that way we
1898 * can cancel the upconvert request if need be.
1900 ret = ocfs2_lock_create(osb, lockres, DLM_LOCK_NL, 0);
1906 ret = ocfs2_wait_for_mask(&mw);
1911 spin_lock_irqsave(&lockres->l_lock, flags);
1914 lockres->l_action = OCFS2_AST_CONVERT;
1915 lkm_flags |= DLM_LKF_CONVERT;
1916 lockres->l_requested = level;
1917 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
1919 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1920 spin_unlock_irqrestore(&lockres->l_lock, flags);
1922 ret = ocfs2_dlm_lock(osb->cconn, level, &lockres->l_lksb, lkm_flags,
1923 lockres->l_name, OCFS2_LOCK_ID_MAX_LEN - 1);
1925 if (!trylock || (ret != -EAGAIN)) {
1926 ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
1930 ocfs2_recover_from_dlm_error(lockres, 1);
1931 lockres_remove_mask_waiter(lockres, &mw);
1935 ret = ocfs2_wait_for_mask_interruptible(&mw, lockres);
1936 if (ret == -ERESTARTSYS) {
1938 * Userspace can cause deadlock itself with
1939 * flock(). Current behavior locally is to allow the
1940 * deadlock, but abort the system call if a signal is
1941 * received. We follow this example, otherwise a
1942 * poorly written program could sit in kernel until
1945 * Handling this is a bit more complicated for Ocfs2
1946 * though. We can't exit this function with an
1947 * outstanding lock request, so a cancel convert is
1948 * required. We intentionally overwrite 'ret' - if the
1949 * cancel fails and the lock was granted, it's easier
1950 * to just bubble success back up to the user.
1952 ret = ocfs2_flock_handle_signal(lockres, level);
1953 } else if (!ret && (level > lockres->l_level)) {
1954 /* Trylock failed asynchronously */
1961 mlog(0, "Lock: \"%s\" ex: %d, trylock: %d, returns: %d\n",
1962 lockres->l_name, ex, trylock, ret);
1966 void ocfs2_file_unlock(struct file *file)
1970 unsigned long flags;
1971 struct ocfs2_file_private *fp = file->private_data;
1972 struct ocfs2_lock_res *lockres = &fp->fp_flock;
1973 struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb);
1974 struct ocfs2_mask_waiter mw;
1976 ocfs2_init_mask_waiter(&mw);
1978 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED))
1981 if (lockres->l_level == DLM_LOCK_NL)
1984 mlog(0, "Unlock: \"%s\" flags: 0x%lx, level: %d, act: %d\n",
1985 lockres->l_name, lockres->l_flags, lockres->l_level,
1988 spin_lock_irqsave(&lockres->l_lock, flags);
1990 * Fake a blocking ast for the downconvert code.
1992 lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
1993 lockres->l_blocking = DLM_LOCK_EX;
1995 gen = ocfs2_prepare_downconvert(lockres, DLM_LOCK_NL);
1996 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
1997 spin_unlock_irqrestore(&lockres->l_lock, flags);
1999 ret = ocfs2_downconvert_lock(osb, lockres, DLM_LOCK_NL, 0, gen);
2005 ret = ocfs2_wait_for_mask(&mw);
2010 static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
2011 struct ocfs2_lock_res *lockres)
2015 /* If we know that another node is waiting on our lock, kick
2016 * the downconvert thread * pre-emptively when we reach a release
2018 if (lockres->l_flags & OCFS2_LOCK_BLOCKED) {
2019 switch(lockres->l_blocking) {
2021 if (!lockres->l_ex_holders && !lockres->l_ro_holders)
2025 if (!lockres->l_ex_holders)
2034 ocfs2_wake_downconvert_thread(osb);
2037 #define OCFS2_SEC_BITS 34
2038 #define OCFS2_SEC_SHIFT (64 - 34)
2039 #define OCFS2_NSEC_MASK ((1ULL << OCFS2_SEC_SHIFT) - 1)
2041 /* LVB only has room for 64 bits of time here so we pack it for
2043 static u64 ocfs2_pack_timespec(struct timespec *spec)
2046 u64 sec = spec->tv_sec;
2047 u32 nsec = spec->tv_nsec;
2049 res = (sec << OCFS2_SEC_SHIFT) | (nsec & OCFS2_NSEC_MASK);
2054 /* Call this with the lockres locked. I am reasonably sure we don't
2055 * need ip_lock in this function as anyone who would be changing those
2056 * values is supposed to be blocked in ocfs2_inode_lock right now. */
2057 static void __ocfs2_stuff_meta_lvb(struct inode *inode)
2059 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2060 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
2061 struct ocfs2_meta_lvb *lvb;
2063 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2066 * Invalidate the LVB of a deleted inode - this way other
2067 * nodes are forced to go to disk and discover the new inode
2070 if (oi->ip_flags & OCFS2_INODE_DELETED) {
2071 lvb->lvb_version = 0;
2075 lvb->lvb_version = OCFS2_LVB_VERSION;
2076 lvb->lvb_isize = cpu_to_be64(i_size_read(inode));
2077 lvb->lvb_iclusters = cpu_to_be32(oi->ip_clusters);
2078 lvb->lvb_iuid = cpu_to_be32(i_uid_read(inode));
2079 lvb->lvb_igid = cpu_to_be32(i_gid_read(inode));
2080 lvb->lvb_imode = cpu_to_be16(inode->i_mode);
2081 lvb->lvb_inlink = cpu_to_be16(inode->i_nlink);
2082 lvb->lvb_iatime_packed =
2083 cpu_to_be64(ocfs2_pack_timespec(&inode->i_atime));
2084 lvb->lvb_ictime_packed =
2085 cpu_to_be64(ocfs2_pack_timespec(&inode->i_ctime));
2086 lvb->lvb_imtime_packed =
2087 cpu_to_be64(ocfs2_pack_timespec(&inode->i_mtime));
2088 lvb->lvb_iattr = cpu_to_be32(oi->ip_attr);
2089 lvb->lvb_idynfeatures = cpu_to_be16(oi->ip_dyn_features);
2090 lvb->lvb_igeneration = cpu_to_be32(inode->i_generation);
2093 mlog_meta_lvb(0, lockres);
2096 static void ocfs2_unpack_timespec(struct timespec *spec,
2099 spec->tv_sec = packed_time >> OCFS2_SEC_SHIFT;
2100 spec->tv_nsec = packed_time & OCFS2_NSEC_MASK;
2103 static void ocfs2_refresh_inode_from_lvb(struct inode *inode)
2105 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2106 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
2107 struct ocfs2_meta_lvb *lvb;
2109 mlog_meta_lvb(0, lockres);
2111 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2113 /* We're safe here without the lockres lock... */
2114 spin_lock(&oi->ip_lock);
2115 oi->ip_clusters = be32_to_cpu(lvb->lvb_iclusters);
2116 i_size_write(inode, be64_to_cpu(lvb->lvb_isize));
2118 oi->ip_attr = be32_to_cpu(lvb->lvb_iattr);
2119 oi->ip_dyn_features = be16_to_cpu(lvb->lvb_idynfeatures);
2120 ocfs2_set_inode_flags(inode);
2122 /* fast-symlinks are a special case */
2123 if (S_ISLNK(inode->i_mode) && !oi->ip_clusters)
2124 inode->i_blocks = 0;
2126 inode->i_blocks = ocfs2_inode_sector_count(inode);
2128 i_uid_write(inode, be32_to_cpu(lvb->lvb_iuid));
2129 i_gid_write(inode, be32_to_cpu(lvb->lvb_igid));
2130 inode->i_mode = be16_to_cpu(lvb->lvb_imode);
2131 set_nlink(inode, be16_to_cpu(lvb->lvb_inlink));
2132 ocfs2_unpack_timespec(&inode->i_atime,
2133 be64_to_cpu(lvb->lvb_iatime_packed));
2134 ocfs2_unpack_timespec(&inode->i_mtime,
2135 be64_to_cpu(lvb->lvb_imtime_packed));
2136 ocfs2_unpack_timespec(&inode->i_ctime,
2137 be64_to_cpu(lvb->lvb_ictime_packed));
2138 spin_unlock(&oi->ip_lock);
2141 static inline int ocfs2_meta_lvb_is_trustable(struct inode *inode,
2142 struct ocfs2_lock_res *lockres)
2144 struct ocfs2_meta_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2146 if (ocfs2_dlm_lvb_valid(&lockres->l_lksb)
2147 && lvb->lvb_version == OCFS2_LVB_VERSION
2148 && be32_to_cpu(lvb->lvb_igeneration) == inode->i_generation)
2153 /* Determine whether a lock resource needs to be refreshed, and
2154 * arbitrate who gets to refresh it.
2156 * 0 means no refresh needed.
2158 * > 0 means you need to refresh this and you MUST call
2159 * ocfs2_complete_lock_res_refresh afterwards. */
2160 static int ocfs2_should_refresh_lock_res(struct ocfs2_lock_res *lockres)
2162 unsigned long flags;
2166 spin_lock_irqsave(&lockres->l_lock, flags);
2167 if (!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) {
2168 spin_unlock_irqrestore(&lockres->l_lock, flags);
2172 if (lockres->l_flags & OCFS2_LOCK_REFRESHING) {
2173 spin_unlock_irqrestore(&lockres->l_lock, flags);
2175 ocfs2_wait_on_refreshing_lock(lockres);
2179 /* Ok, I'll be the one to refresh this lock. */
2180 lockres_or_flags(lockres, OCFS2_LOCK_REFRESHING);
2181 spin_unlock_irqrestore(&lockres->l_lock, flags);
2185 mlog(0, "status %d\n", status);
2189 /* If status is non zero, I'll mark it as not being in refresh
2190 * anymroe, but i won't clear the needs refresh flag. */
2191 static inline void ocfs2_complete_lock_res_refresh(struct ocfs2_lock_res *lockres,
2194 unsigned long flags;
2196 spin_lock_irqsave(&lockres->l_lock, flags);
2197 lockres_clear_flags(lockres, OCFS2_LOCK_REFRESHING);
2199 lockres_clear_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
2200 spin_unlock_irqrestore(&lockres->l_lock, flags);
2202 wake_up(&lockres->l_event);
2205 /* may or may not return a bh if it went to disk. */
2206 static int ocfs2_inode_lock_update(struct inode *inode,
2207 struct buffer_head **bh)
2210 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2211 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
2212 struct ocfs2_dinode *fe;
2213 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2215 if (ocfs2_mount_local(osb))
2218 spin_lock(&oi->ip_lock);
2219 if (oi->ip_flags & OCFS2_INODE_DELETED) {
2220 mlog(0, "Orphaned inode %llu was deleted while we "
2221 "were waiting on a lock. ip_flags = 0x%x\n",
2222 (unsigned long long)oi->ip_blkno, oi->ip_flags);
2223 spin_unlock(&oi->ip_lock);
2227 spin_unlock(&oi->ip_lock);
2229 if (!ocfs2_should_refresh_lock_res(lockres))
2232 /* This will discard any caching information we might have had
2233 * for the inode metadata. */
2234 ocfs2_metadata_cache_purge(INODE_CACHE(inode));
2236 ocfs2_extent_map_trunc(inode, 0);
2238 if (ocfs2_meta_lvb_is_trustable(inode, lockres)) {
2239 mlog(0, "Trusting LVB on inode %llu\n",
2240 (unsigned long long)oi->ip_blkno);
2241 ocfs2_refresh_inode_from_lvb(inode);
2243 /* Boo, we have to go to disk. */
2244 /* read bh, cast, ocfs2_refresh_inode */
2245 status = ocfs2_read_inode_block(inode, bh);
2250 fe = (struct ocfs2_dinode *) (*bh)->b_data;
2252 /* This is a good chance to make sure we're not
2253 * locking an invalid object. ocfs2_read_inode_block()
2254 * already checked that the inode block is sane.
2256 * We bug on a stale inode here because we checked
2257 * above whether it was wiped from disk. The wiping
2258 * node provides a guarantee that we receive that
2259 * message and can mark the inode before dropping any
2260 * locks associated with it. */
2261 mlog_bug_on_msg(inode->i_generation !=
2262 le32_to_cpu(fe->i_generation),
2263 "Invalid dinode %llu disk generation: %u "
2264 "inode->i_generation: %u\n",
2265 (unsigned long long)oi->ip_blkno,
2266 le32_to_cpu(fe->i_generation),
2267 inode->i_generation);
2268 mlog_bug_on_msg(le64_to_cpu(fe->i_dtime) ||
2269 !(fe->i_flags & cpu_to_le32(OCFS2_VALID_FL)),
2270 "Stale dinode %llu dtime: %llu flags: 0x%x\n",
2271 (unsigned long long)oi->ip_blkno,
2272 (unsigned long long)le64_to_cpu(fe->i_dtime),
2273 le32_to_cpu(fe->i_flags));
2275 ocfs2_refresh_inode(inode, fe);
2276 ocfs2_track_lock_refresh(lockres);
2281 ocfs2_complete_lock_res_refresh(lockres, status);
2286 static int ocfs2_assign_bh(struct inode *inode,
2287 struct buffer_head **ret_bh,
2288 struct buffer_head *passed_bh)
2293 /* Ok, the update went to disk for us, use the
2295 *ret_bh = passed_bh;
2301 status = ocfs2_read_inode_block(inode, ret_bh);
2309 * returns < 0 error if the callback will never be called, otherwise
2310 * the result of the lock will be communicated via the callback.
2312 int ocfs2_inode_lock_full_nested(struct inode *inode,
2313 struct buffer_head **ret_bh,
2318 int status, level, acquired;
2320 struct ocfs2_lock_res *lockres = NULL;
2321 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2322 struct buffer_head *local_bh = NULL;
2326 mlog(0, "inode %llu, take %s META lock\n",
2327 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2328 ex ? "EXMODE" : "PRMODE");
2332 /* We'll allow faking a readonly metadata lock for
2334 if (ocfs2_is_hard_readonly(osb)) {
2340 if (ocfs2_mount_local(osb))
2343 if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
2344 ocfs2_wait_for_recovery(osb);
2346 lockres = &OCFS2_I(inode)->ip_inode_lockres;
2347 level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2349 if (arg_flags & OCFS2_META_LOCK_NOQUEUE)
2350 dlm_flags |= DLM_LKF_NOQUEUE;
2352 status = __ocfs2_cluster_lock(osb, lockres, level, dlm_flags,
2353 arg_flags, subclass, _RET_IP_);
2355 if (status != -EAGAIN)
2360 /* Notify the error cleanup path to drop the cluster lock. */
2363 /* We wait twice because a node may have died while we were in
2364 * the lower dlm layers. The second time though, we've
2365 * committed to owning this lock so we don't allow signals to
2366 * abort the operation. */
2367 if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
2368 ocfs2_wait_for_recovery(osb);
2372 * We only see this flag if we're being called from
2373 * ocfs2_read_locked_inode(). It means we're locking an inode
2374 * which hasn't been populated yet, so clear the refresh flag
2375 * and let the caller handle it.
2377 if (inode->i_state & I_NEW) {
2380 ocfs2_complete_lock_res_refresh(lockres, 0);
2384 /* This is fun. The caller may want a bh back, or it may
2385 * not. ocfs2_inode_lock_update definitely wants one in, but
2386 * may or may not read one, depending on what's in the
2387 * LVB. The result of all of this is that we've *only* gone to
2388 * disk if we have to, so the complexity is worthwhile. */
2389 status = ocfs2_inode_lock_update(inode, &local_bh);
2391 if (status != -ENOENT)
2397 status = ocfs2_assign_bh(inode, ret_bh, local_bh);
2406 if (ret_bh && (*ret_bh)) {
2411 ocfs2_inode_unlock(inode, ex);
2421 * This is working around a lock inversion between tasks acquiring DLM
2422 * locks while holding a page lock and the downconvert thread which
2423 * blocks dlm lock acquiry while acquiring page locks.
2425 * ** These _with_page variantes are only intended to be called from aop
2426 * methods that hold page locks and return a very specific *positive* error
2427 * code that aop methods pass up to the VFS -- test for errors with != 0. **
2429 * The DLM is called such that it returns -EAGAIN if it would have
2430 * blocked waiting for the downconvert thread. In that case we unlock
2431 * our page so the downconvert thread can make progress. Once we've
2432 * done this we have to return AOP_TRUNCATED_PAGE so the aop method
2433 * that called us can bubble that back up into the VFS who will then
2434 * immediately retry the aop call.
2436 * We do a blocking lock and immediate unlock before returning, though, so that
2437 * the lock has a great chance of being cached on this node by the time the VFS
2438 * calls back to retry the aop. This has a potential to livelock as nodes
2439 * ping locks back and forth, but that's a risk we're willing to take to avoid
2440 * the lock inversion simply.
2442 int ocfs2_inode_lock_with_page(struct inode *inode,
2443 struct buffer_head **ret_bh,
2449 ret = ocfs2_inode_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK);
2450 if (ret == -EAGAIN) {
2452 if (ocfs2_inode_lock(inode, ret_bh, ex) == 0)
2453 ocfs2_inode_unlock(inode, ex);
2454 ret = AOP_TRUNCATED_PAGE;
2460 int ocfs2_inode_lock_atime(struct inode *inode,
2461 struct vfsmount *vfsmnt,
2466 ret = ocfs2_inode_lock(inode, NULL, 0);
2473 * If we should update atime, we will get EX lock,
2474 * otherwise we just get PR lock.
2476 if (ocfs2_should_update_atime(inode, vfsmnt)) {
2477 struct buffer_head *bh = NULL;
2479 ocfs2_inode_unlock(inode, 0);
2480 ret = ocfs2_inode_lock(inode, &bh, 1);
2486 if (ocfs2_should_update_atime(inode, vfsmnt))
2487 ocfs2_update_inode_atime(inode, bh);
2496 void ocfs2_inode_unlock(struct inode *inode,
2499 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2500 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_inode_lockres;
2501 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2503 mlog(0, "inode %llu drop %s META lock\n",
2504 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2505 ex ? "EXMODE" : "PRMODE");
2507 if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb)) &&
2508 !ocfs2_mount_local(osb))
2509 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
2512 int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno)
2514 struct ocfs2_lock_res *lockres;
2515 struct ocfs2_orphan_scan_lvb *lvb;
2518 if (ocfs2_is_hard_readonly(osb))
2521 if (ocfs2_mount_local(osb))
2524 lockres = &osb->osb_orphan_scan.os_lockres;
2525 status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX, 0, 0);
2529 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2530 if (ocfs2_dlm_lvb_valid(&lockres->l_lksb) &&
2531 lvb->lvb_version == OCFS2_ORPHAN_LVB_VERSION)
2532 *seqno = be32_to_cpu(lvb->lvb_os_seqno);
2534 *seqno = osb->osb_orphan_scan.os_seqno + 1;
2539 void ocfs2_orphan_scan_unlock(struct ocfs2_super *osb, u32 seqno)
2541 struct ocfs2_lock_res *lockres;
2542 struct ocfs2_orphan_scan_lvb *lvb;
2544 if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb)) {
2545 lockres = &osb->osb_orphan_scan.os_lockres;
2546 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2547 lvb->lvb_version = OCFS2_ORPHAN_LVB_VERSION;
2548 lvb->lvb_os_seqno = cpu_to_be32(seqno);
2549 ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
2553 int ocfs2_super_lock(struct ocfs2_super *osb,
2557 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2558 struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
2560 if (ocfs2_is_hard_readonly(osb))
2563 if (ocfs2_mount_local(osb))
2566 status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
2572 /* The super block lock path is really in the best position to
2573 * know when resources covered by the lock need to be
2574 * refreshed, so we do it here. Of course, making sense of
2575 * everything is up to the caller :) */
2576 status = ocfs2_should_refresh_lock_res(lockres);
2578 status = ocfs2_refresh_slot_info(osb);
2580 ocfs2_complete_lock_res_refresh(lockres, status);
2583 ocfs2_cluster_unlock(osb, lockres, level);
2586 ocfs2_track_lock_refresh(lockres);
2592 void ocfs2_super_unlock(struct ocfs2_super *osb,
2595 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2596 struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
2598 if (!ocfs2_mount_local(osb))
2599 ocfs2_cluster_unlock(osb, lockres, level);
2602 int ocfs2_rename_lock(struct ocfs2_super *osb)
2605 struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
2607 if (ocfs2_is_hard_readonly(osb))
2610 if (ocfs2_mount_local(osb))
2613 status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX, 0, 0);
2620 void ocfs2_rename_unlock(struct ocfs2_super *osb)
2622 struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
2624 if (!ocfs2_mount_local(osb))
2625 ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
2628 int ocfs2_nfs_sync_lock(struct ocfs2_super *osb, int ex)
2631 struct ocfs2_lock_res *lockres = &osb->osb_nfs_sync_lockres;
2633 if (ocfs2_is_hard_readonly(osb))
2636 if (ocfs2_mount_local(osb))
2639 status = ocfs2_cluster_lock(osb, lockres, ex ? LKM_EXMODE : LKM_PRMODE,
2642 mlog(ML_ERROR, "lock on nfs sync lock failed %d\n", status);
2647 void ocfs2_nfs_sync_unlock(struct ocfs2_super *osb, int ex)
2649 struct ocfs2_lock_res *lockres = &osb->osb_nfs_sync_lockres;
2651 if (!ocfs2_mount_local(osb))
2652 ocfs2_cluster_unlock(osb, lockres,
2653 ex ? LKM_EXMODE : LKM_PRMODE);
2656 int ocfs2_dentry_lock(struct dentry *dentry, int ex)
2659 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2660 struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
2661 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
2665 if (ocfs2_is_hard_readonly(osb)) {
2671 if (ocfs2_mount_local(osb))
2674 ret = ocfs2_cluster_lock(osb, &dl->dl_lockres, level, 0, 0);
2681 void ocfs2_dentry_unlock(struct dentry *dentry, int ex)
2683 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
2684 struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
2685 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
2687 if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb))
2688 ocfs2_cluster_unlock(osb, &dl->dl_lockres, level);
2691 /* Reference counting of the dlm debug structure. We want this because
2692 * open references on the debug inodes can live on after a mount, so
2693 * we can't rely on the ocfs2_super to always exist. */
2694 static void ocfs2_dlm_debug_free(struct kref *kref)
2696 struct ocfs2_dlm_debug *dlm_debug;
2698 dlm_debug = container_of(kref, struct ocfs2_dlm_debug, d_refcnt);
2703 void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug)
2706 kref_put(&dlm_debug->d_refcnt, ocfs2_dlm_debug_free);
2709 static void ocfs2_get_dlm_debug(struct ocfs2_dlm_debug *debug)
2711 kref_get(&debug->d_refcnt);
2714 struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void)
2716 struct ocfs2_dlm_debug *dlm_debug;
2718 dlm_debug = kmalloc(sizeof(struct ocfs2_dlm_debug), GFP_KERNEL);
2720 mlog_errno(-ENOMEM);
2724 kref_init(&dlm_debug->d_refcnt);
2725 INIT_LIST_HEAD(&dlm_debug->d_lockres_tracking);
2726 dlm_debug->d_locking_state = NULL;
2731 /* Access to this is arbitrated for us via seq_file->sem. */
2732 struct ocfs2_dlm_seq_priv {
2733 struct ocfs2_dlm_debug *p_dlm_debug;
2734 struct ocfs2_lock_res p_iter_res;
2735 struct ocfs2_lock_res p_tmp_res;
2738 static struct ocfs2_lock_res *ocfs2_dlm_next_res(struct ocfs2_lock_res *start,
2739 struct ocfs2_dlm_seq_priv *priv)
2741 struct ocfs2_lock_res *iter, *ret = NULL;
2742 struct ocfs2_dlm_debug *dlm_debug = priv->p_dlm_debug;
2744 assert_spin_locked(&ocfs2_dlm_tracking_lock);
2746 list_for_each_entry(iter, &start->l_debug_list, l_debug_list) {
2747 /* discover the head of the list */
2748 if (&iter->l_debug_list == &dlm_debug->d_lockres_tracking) {
2749 mlog(0, "End of list found, %p\n", ret);
2753 /* We track our "dummy" iteration lockres' by a NULL
2755 if (iter->l_ops != NULL) {
2764 static void *ocfs2_dlm_seq_start(struct seq_file *m, loff_t *pos)
2766 struct ocfs2_dlm_seq_priv *priv = m->private;
2767 struct ocfs2_lock_res *iter;
2769 spin_lock(&ocfs2_dlm_tracking_lock);
2770 iter = ocfs2_dlm_next_res(&priv->p_iter_res, priv);
2772 /* Since lockres' have the lifetime of their container
2773 * (which can be inodes, ocfs2_supers, etc) we want to
2774 * copy this out to a temporary lockres while still
2775 * under the spinlock. Obviously after this we can't
2776 * trust any pointers on the copy returned, but that's
2777 * ok as the information we want isn't typically held
2779 priv->p_tmp_res = *iter;
2780 iter = &priv->p_tmp_res;
2782 spin_unlock(&ocfs2_dlm_tracking_lock);
2787 static void ocfs2_dlm_seq_stop(struct seq_file *m, void *v)
2791 static void *ocfs2_dlm_seq_next(struct seq_file *m, void *v, loff_t *pos)
2793 struct ocfs2_dlm_seq_priv *priv = m->private;
2794 struct ocfs2_lock_res *iter = v;
2795 struct ocfs2_lock_res *dummy = &priv->p_iter_res;
2797 spin_lock(&ocfs2_dlm_tracking_lock);
2798 iter = ocfs2_dlm_next_res(iter, priv);
2799 list_del_init(&dummy->l_debug_list);
2801 list_add(&dummy->l_debug_list, &iter->l_debug_list);
2802 priv->p_tmp_res = *iter;
2803 iter = &priv->p_tmp_res;
2805 spin_unlock(&ocfs2_dlm_tracking_lock);
2811 * Version is used by debugfs.ocfs2 to determine the format being used
2814 * - Lock stats printed
2816 * - Max time in lock stats is in usecs (instead of nsecs)
2818 #define OCFS2_DLM_DEBUG_STR_VERSION 3
2819 static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
2823 struct ocfs2_lock_res *lockres = v;
2828 seq_printf(m, "0x%x\t", OCFS2_DLM_DEBUG_STR_VERSION);
2830 if (lockres->l_type == OCFS2_LOCK_TYPE_DENTRY)
2831 seq_printf(m, "%.*s%08x\t", OCFS2_DENTRY_LOCK_INO_START - 1,
2833 (unsigned int)ocfs2_get_dentry_lock_ino(lockres));
2835 seq_printf(m, "%.*s\t", OCFS2_LOCK_ID_MAX_LEN, lockres->l_name);
2837 seq_printf(m, "%d\t"
2848 lockres->l_unlock_action,
2849 lockres->l_ro_holders,
2850 lockres->l_ex_holders,
2851 lockres->l_requested,
2852 lockres->l_blocking);
2854 /* Dump the raw LVB */
2855 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
2856 for(i = 0; i < DLM_LVB_LEN; i++)
2857 seq_printf(m, "0x%x\t", lvb[i]);
2859 #ifdef CONFIG_OCFS2_FS_STATS
2860 # define lock_num_prmode(_l) ((_l)->l_lock_prmode.ls_gets)
2861 # define lock_num_exmode(_l) ((_l)->l_lock_exmode.ls_gets)
2862 # define lock_num_prmode_failed(_l) ((_l)->l_lock_prmode.ls_fail)
2863 # define lock_num_exmode_failed(_l) ((_l)->l_lock_exmode.ls_fail)
2864 # define lock_total_prmode(_l) ((_l)->l_lock_prmode.ls_total)
2865 # define lock_total_exmode(_l) ((_l)->l_lock_exmode.ls_total)
2866 # define lock_max_prmode(_l) ((_l)->l_lock_prmode.ls_max)
2867 # define lock_max_exmode(_l) ((_l)->l_lock_exmode.ls_max)
2868 # define lock_refresh(_l) ((_l)->l_lock_refresh)
2870 # define lock_num_prmode(_l) (0)
2871 # define lock_num_exmode(_l) (0)
2872 # define lock_num_prmode_failed(_l) (0)
2873 # define lock_num_exmode_failed(_l) (0)
2874 # define lock_total_prmode(_l) (0ULL)
2875 # define lock_total_exmode(_l) (0ULL)
2876 # define lock_max_prmode(_l) (0)
2877 # define lock_max_exmode(_l) (0)
2878 # define lock_refresh(_l) (0)
2880 /* The following seq_print was added in version 2 of this output */
2881 seq_printf(m, "%u\t"
2890 lock_num_prmode(lockres),
2891 lock_num_exmode(lockres),
2892 lock_num_prmode_failed(lockres),
2893 lock_num_exmode_failed(lockres),
2894 lock_total_prmode(lockres),
2895 lock_total_exmode(lockres),
2896 lock_max_prmode(lockres),
2897 lock_max_exmode(lockres),
2898 lock_refresh(lockres));
2901 seq_printf(m, "\n");
2905 static const struct seq_operations ocfs2_dlm_seq_ops = {
2906 .start = ocfs2_dlm_seq_start,
2907 .stop = ocfs2_dlm_seq_stop,
2908 .next = ocfs2_dlm_seq_next,
2909 .show = ocfs2_dlm_seq_show,
2912 static int ocfs2_dlm_debug_release(struct inode *inode, struct file *file)
2914 struct seq_file *seq = file->private_data;
2915 struct ocfs2_dlm_seq_priv *priv = seq->private;
2916 struct ocfs2_lock_res *res = &priv->p_iter_res;
2918 ocfs2_remove_lockres_tracking(res);
2919 ocfs2_put_dlm_debug(priv->p_dlm_debug);
2920 return seq_release_private(inode, file);
2923 static int ocfs2_dlm_debug_open(struct inode *inode, struct file *file)
2925 struct ocfs2_dlm_seq_priv *priv;
2926 struct ocfs2_super *osb;
2928 priv = __seq_open_private(file, &ocfs2_dlm_seq_ops, sizeof(*priv));
2930 mlog_errno(-ENOMEM);
2934 osb = inode->i_private;
2935 ocfs2_get_dlm_debug(osb->osb_dlm_debug);
2936 priv->p_dlm_debug = osb->osb_dlm_debug;
2937 INIT_LIST_HEAD(&priv->p_iter_res.l_debug_list);
2939 ocfs2_add_lockres_tracking(&priv->p_iter_res,
2945 static const struct file_operations ocfs2_dlm_debug_fops = {
2946 .open = ocfs2_dlm_debug_open,
2947 .release = ocfs2_dlm_debug_release,
2949 .llseek = seq_lseek,
2952 static int ocfs2_dlm_init_debug(struct ocfs2_super *osb)
2955 struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
2957 dlm_debug->d_locking_state = debugfs_create_file("locking_state",
2959 osb->osb_debug_root,
2961 &ocfs2_dlm_debug_fops);
2962 if (!dlm_debug->d_locking_state) {
2965 "Unable to create locking state debugfs file.\n");
2969 ocfs2_get_dlm_debug(dlm_debug);
2974 static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb)
2976 struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
2979 debugfs_remove(dlm_debug->d_locking_state);
2980 ocfs2_put_dlm_debug(dlm_debug);
2984 int ocfs2_dlm_init(struct ocfs2_super *osb)
2987 struct ocfs2_cluster_connection *conn = NULL;
2989 if (ocfs2_mount_local(osb)) {
2994 status = ocfs2_dlm_init_debug(osb);
3000 /* launch downconvert thread */
3001 osb->dc_task = kthread_run(ocfs2_downconvert_thread, osb, "ocfs2dc");
3002 if (IS_ERR(osb->dc_task)) {
3003 status = PTR_ERR(osb->dc_task);
3004 osb->dc_task = NULL;
3009 /* for now, uuid == domain */
3010 status = ocfs2_cluster_connect(osb->osb_cluster_stack,
3011 osb->osb_cluster_name,
3012 strlen(osb->osb_cluster_name),
3014 strlen(osb->uuid_str),
3015 &lproto, ocfs2_do_node_down, osb,
3022 status = ocfs2_cluster_this_node(conn, &osb->node_num);
3026 "could not find this host's node number\n");
3027 ocfs2_cluster_disconnect(conn, 0);
3032 ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb);
3033 ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb);
3034 ocfs2_nfs_sync_lock_res_init(&osb->osb_nfs_sync_lockres, osb);
3035 ocfs2_orphan_scan_lock_res_init(&osb->osb_orphan_scan.os_lockres, osb);
3042 ocfs2_dlm_shutdown_debug(osb);
3044 kthread_stop(osb->dc_task);
3050 void ocfs2_dlm_shutdown(struct ocfs2_super *osb,
3053 ocfs2_drop_osb_locks(osb);
3056 * Now that we have dropped all locks and ocfs2_dismount_volume()
3057 * has disabled recovery, the DLM won't be talking to us. It's
3058 * safe to tear things down before disconnecting the cluster.
3062 kthread_stop(osb->dc_task);
3063 osb->dc_task = NULL;
3066 ocfs2_lock_res_free(&osb->osb_super_lockres);
3067 ocfs2_lock_res_free(&osb->osb_rename_lockres);
3068 ocfs2_lock_res_free(&osb->osb_nfs_sync_lockres);
3069 ocfs2_lock_res_free(&osb->osb_orphan_scan.os_lockres);
3071 ocfs2_cluster_disconnect(osb->cconn, hangup_pending);
3074 ocfs2_dlm_shutdown_debug(osb);
3077 static int ocfs2_drop_lock(struct ocfs2_super *osb,
3078 struct ocfs2_lock_res *lockres)
3081 unsigned long flags;
3084 /* We didn't get anywhere near actually using this lockres. */
3085 if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED))
3088 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
3089 lkm_flags |= DLM_LKF_VALBLK;
3091 spin_lock_irqsave(&lockres->l_lock, flags);
3093 mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_FREEING),
3094 "lockres %s, flags 0x%lx\n",
3095 lockres->l_name, lockres->l_flags);
3097 while (lockres->l_flags & OCFS2_LOCK_BUSY) {
3098 mlog(0, "waiting on busy lock \"%s\": flags = %lx, action = "
3099 "%u, unlock_action = %u\n",
3100 lockres->l_name, lockres->l_flags, lockres->l_action,
3101 lockres->l_unlock_action);
3103 spin_unlock_irqrestore(&lockres->l_lock, flags);
3105 /* XXX: Today we just wait on any busy
3106 * locks... Perhaps we need to cancel converts in the
3108 ocfs2_wait_on_busy_lock(lockres);
3110 spin_lock_irqsave(&lockres->l_lock, flags);
3113 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
3114 if (lockres->l_flags & OCFS2_LOCK_ATTACHED &&
3115 lockres->l_level == DLM_LOCK_EX &&
3116 !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
3117 lockres->l_ops->set_lvb(lockres);
3120 if (lockres->l_flags & OCFS2_LOCK_BUSY)
3121 mlog(ML_ERROR, "destroying busy lock: \"%s\"\n",
3123 if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
3124 mlog(0, "destroying blocked lock: \"%s\"\n", lockres->l_name);
3126 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
3127 spin_unlock_irqrestore(&lockres->l_lock, flags);
3131 lockres_clear_flags(lockres, OCFS2_LOCK_ATTACHED);
3133 /* make sure we never get here while waiting for an ast to
3135 BUG_ON(lockres->l_action != OCFS2_AST_INVALID);
3137 /* is this necessary? */
3138 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
3139 lockres->l_unlock_action = OCFS2_UNLOCK_DROP_LOCK;
3140 spin_unlock_irqrestore(&lockres->l_lock, flags);
3142 mlog(0, "lock %s\n", lockres->l_name);
3144 ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb, lkm_flags);
3146 ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
3147 mlog(ML_ERROR, "lockres flags: %lu\n", lockres->l_flags);
3148 ocfs2_dlm_dump_lksb(&lockres->l_lksb);
3151 mlog(0, "lock %s, successful return from ocfs2_dlm_unlock\n",
3154 ocfs2_wait_on_busy_lock(lockres);
3159 static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
3160 struct ocfs2_lock_res *lockres);
3162 /* Mark the lockres as being dropped. It will no longer be
3163 * queued if blocking, but we still may have to wait on it
3164 * being dequeued from the downconvert thread before we can consider
3167 * You can *not* attempt to call cluster_lock on this lockres anymore. */
3168 void ocfs2_mark_lockres_freeing(struct ocfs2_super *osb,
3169 struct ocfs2_lock_res *lockres)
3172 struct ocfs2_mask_waiter mw;
3173 unsigned long flags, flags2;
3175 ocfs2_init_mask_waiter(&mw);
3177 spin_lock_irqsave(&lockres->l_lock, flags);
3178 lockres->l_flags |= OCFS2_LOCK_FREEING;
3179 if (lockres->l_flags & OCFS2_LOCK_QUEUED && current == osb->dc_task) {
3181 * We know the downconvert is queued but not in progress
3182 * because we are the downconvert thread and processing
3183 * different lock. So we can just remove the lock from the
3184 * queue. This is not only an optimization but also a way
3185 * to avoid the following deadlock:
3186 * ocfs2_dentry_post_unlock()
3187 * ocfs2_dentry_lock_put()
3188 * ocfs2_drop_dentry_lock()
3190 * ocfs2_evict_inode()
3191 * ocfs2_clear_inode()
3192 * ocfs2_mark_lockres_freeing()
3193 * ... blocks waiting for OCFS2_LOCK_QUEUED
3194 * since we are the downconvert thread which
3195 * should clear the flag.
3197 spin_unlock_irqrestore(&lockres->l_lock, flags);
3198 spin_lock_irqsave(&osb->dc_task_lock, flags2);
3199 list_del_init(&lockres->l_blocked_list);
3200 osb->blocked_lock_count--;
3201 spin_unlock_irqrestore(&osb->dc_task_lock, flags2);
3203 * Warn if we recurse into another post_unlock call. Strictly
3204 * speaking it isn't a problem but we need to be careful if
3205 * that happens (stack overflow, deadlocks, ...) so warn if
3206 * ocfs2 grows a path for which this can happen.
3208 WARN_ON_ONCE(lockres->l_ops->post_unlock);
3209 /* Since the lock is freeing we don't do much in the fn below */
3210 ocfs2_process_blocked_lock(osb, lockres);
3213 while (lockres->l_flags & OCFS2_LOCK_QUEUED) {
3214 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_QUEUED, 0);
3215 spin_unlock_irqrestore(&lockres->l_lock, flags);
3217 mlog(0, "Waiting on lockres %s\n", lockres->l_name);
3219 status = ocfs2_wait_for_mask(&mw);
3223 spin_lock_irqsave(&lockres->l_lock, flags);
3225 spin_unlock_irqrestore(&lockres->l_lock, flags);
3228 void ocfs2_simple_drop_lockres(struct ocfs2_super *osb,
3229 struct ocfs2_lock_res *lockres)
3233 ocfs2_mark_lockres_freeing(osb, lockres);
3234 ret = ocfs2_drop_lock(osb, lockres);
3239 static void ocfs2_drop_osb_locks(struct ocfs2_super *osb)
3241 ocfs2_simple_drop_lockres(osb, &osb->osb_super_lockres);
3242 ocfs2_simple_drop_lockres(osb, &osb->osb_rename_lockres);
3243 ocfs2_simple_drop_lockres(osb, &osb->osb_nfs_sync_lockres);
3244 ocfs2_simple_drop_lockres(osb, &osb->osb_orphan_scan.os_lockres);
3247 int ocfs2_drop_inode_locks(struct inode *inode)
3251 /* No need to call ocfs2_mark_lockres_freeing here -
3252 * ocfs2_clear_inode has done it for us. */
3254 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
3255 &OCFS2_I(inode)->ip_open_lockres);
3261 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
3262 &OCFS2_I(inode)->ip_inode_lockres);
3265 if (err < 0 && !status)
3268 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
3269 &OCFS2_I(inode)->ip_rw_lockres);
3272 if (err < 0 && !status)
3278 static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
3281 assert_spin_locked(&lockres->l_lock);
3283 BUG_ON(lockres->l_blocking <= DLM_LOCK_NL);
3285 if (lockres->l_level <= new_level) {
3286 mlog(ML_ERROR, "lockres %s, lvl %d <= %d, blcklst %d, mask %d, "
3287 "type %d, flags 0x%lx, hold %d %d, act %d %d, req %d, "
3288 "block %d, pgen %d\n", lockres->l_name, lockres->l_level,
3289 new_level, list_empty(&lockres->l_blocked_list),
3290 list_empty(&lockres->l_mask_waiters), lockres->l_type,
3291 lockres->l_flags, lockres->l_ro_holders,
3292 lockres->l_ex_holders, lockres->l_action,
3293 lockres->l_unlock_action, lockres->l_requested,
3294 lockres->l_blocking, lockres->l_pending_gen);
3298 mlog(ML_BASTS, "lockres %s, level %d => %d, blocking %d\n",
3299 lockres->l_name, lockres->l_level, new_level, lockres->l_blocking);
3301 lockres->l_action = OCFS2_AST_DOWNCONVERT;
3302 lockres->l_requested = new_level;
3303 lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
3304 return lockres_set_pending(lockres);
3307 static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
3308 struct ocfs2_lock_res *lockres,
3311 unsigned int generation)
3314 u32 dlm_flags = DLM_LKF_CONVERT;
3316 mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name,
3317 lockres->l_level, new_level);
3320 dlm_flags |= DLM_LKF_VALBLK;
3322 ret = ocfs2_dlm_lock(osb->cconn,
3327 OCFS2_LOCK_ID_MAX_LEN - 1);
3328 lockres_clear_pending(lockres, generation, osb);
3330 ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
3331 ocfs2_recover_from_dlm_error(lockres, 1);
3340 /* returns 1 when the caller should unlock and call ocfs2_dlm_unlock */
3341 static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
3342 struct ocfs2_lock_res *lockres)
3344 assert_spin_locked(&lockres->l_lock);
3346 if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
3347 /* If we're already trying to cancel a lock conversion
3348 * then just drop the spinlock and allow the caller to
3349 * requeue this lock. */
3350 mlog(ML_BASTS, "lockres %s, skip convert\n", lockres->l_name);
3354 /* were we in a convert when we got the bast fire? */
3355 BUG_ON(lockres->l_action != OCFS2_AST_CONVERT &&
3356 lockres->l_action != OCFS2_AST_DOWNCONVERT);
3357 /* set things up for the unlockast to know to just
3358 * clear out the ast_action and unset busy, etc. */
3359 lockres->l_unlock_action = OCFS2_UNLOCK_CANCEL_CONVERT;
3361 mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_BUSY),
3362 "lock %s, invalid flags: 0x%lx\n",
3363 lockres->l_name, lockres->l_flags);
3365 mlog(ML_BASTS, "lockres %s\n", lockres->l_name);
3370 static int ocfs2_cancel_convert(struct ocfs2_super *osb,
3371 struct ocfs2_lock_res *lockres)
3375 ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb,
3378 ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
3379 ocfs2_recover_from_dlm_error(lockres, 0);
3382 mlog(ML_BASTS, "lockres %s\n", lockres->l_name);
3387 static int ocfs2_unblock_lock(struct ocfs2_super *osb,
3388 struct ocfs2_lock_res *lockres,
3389 struct ocfs2_unblock_ctl *ctl)
3391 unsigned long flags;
3399 spin_lock_irqsave(&lockres->l_lock, flags);
3403 * Is it still blocking? If not, we have no more work to do.
3405 if (!(lockres->l_flags & OCFS2_LOCK_BLOCKED)) {
3406 BUG_ON(lockres->l_blocking != DLM_LOCK_NL);
3407 spin_unlock_irqrestore(&lockres->l_lock, flags);
3412 if (lockres->l_flags & OCFS2_LOCK_BUSY) {
3414 * This is a *big* race. The OCFS2_LOCK_PENDING flag
3415 * exists entirely for one reason - another thread has set
3416 * OCFS2_LOCK_BUSY, but has *NOT* yet called dlm_lock().
3418 * If we do ocfs2_cancel_convert() before the other thread
3419 * calls dlm_lock(), our cancel will do nothing. We will
3420 * get no ast, and we will have no way of knowing the
3421 * cancel failed. Meanwhile, the other thread will call
3422 * into dlm_lock() and wait...forever.
3424 * Why forever? Because another node has asked for the
3425 * lock first; that's why we're here in unblock_lock().
3427 * The solution is OCFS2_LOCK_PENDING. When PENDING is
3428 * set, we just requeue the unblock. Only when the other
3429 * thread has called dlm_lock() and cleared PENDING will
3430 * we then cancel their request.
3432 * All callers of dlm_lock() must set OCFS2_DLM_PENDING
3433 * at the same time they set OCFS2_DLM_BUSY. They must
3434 * clear OCFS2_DLM_PENDING after dlm_lock() returns.
3436 if (lockres->l_flags & OCFS2_LOCK_PENDING) {
3437 mlog(ML_BASTS, "lockres %s, ReQ: Pending\n",
3443 ret = ocfs2_prepare_cancel_convert(osb, lockres);
3444 spin_unlock_irqrestore(&lockres->l_lock, flags);
3446 ret = ocfs2_cancel_convert(osb, lockres);
3454 * This prevents livelocks. OCFS2_LOCK_UPCONVERT_FINISHING flag is
3455 * set when the ast is received for an upconvert just before the
3456 * OCFS2_LOCK_BUSY flag is cleared. Now if the fs received a bast
3457 * on the heels of the ast, we want to delay the downconvert just
3458 * enough to allow the up requestor to do its task. Because this
3459 * lock is in the blocked queue, the lock will be downconverted
3460 * as soon as the requestor is done with the lock.
3462 if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING)
3466 * How can we block and yet be at NL? We were trying to upconvert
3467 * from NL and got canceled. The code comes back here, and now
3468 * we notice and clear BLOCKING.
3470 if (lockres->l_level == DLM_LOCK_NL) {
3471 BUG_ON(lockres->l_ex_holders || lockres->l_ro_holders);
3472 mlog(ML_BASTS, "lockres %s, Aborting dc\n", lockres->l_name);
3473 lockres->l_blocking = DLM_LOCK_NL;
3474 lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
3475 spin_unlock_irqrestore(&lockres->l_lock, flags);
3479 /* if we're blocking an exclusive and we have *any* holders,
3481 if ((lockres->l_blocking == DLM_LOCK_EX)
3482 && (lockres->l_ex_holders || lockres->l_ro_holders)) {
3483 mlog(ML_BASTS, "lockres %s, ReQ: EX/PR Holders %u,%u\n",
3484 lockres->l_name, lockres->l_ex_holders,
3485 lockres->l_ro_holders);
3489 /* If it's a PR we're blocking, then only
3490 * requeue if we've got any EX holders */
3491 if (lockres->l_blocking == DLM_LOCK_PR &&
3492 lockres->l_ex_holders) {
3493 mlog(ML_BASTS, "lockres %s, ReQ: EX Holders %u\n",
3494 lockres->l_name, lockres->l_ex_holders);
3499 * Can we get a lock in this state if the holder counts are
3500 * zero? The meta data unblock code used to check this.
3502 if ((lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
3503 && (lockres->l_flags & OCFS2_LOCK_REFRESHING)) {
3504 mlog(ML_BASTS, "lockres %s, ReQ: Lock Refreshing\n",
3509 new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking);
3511 if (lockres->l_ops->check_downconvert
3512 && !lockres->l_ops->check_downconvert(lockres, new_level)) {
3513 mlog(ML_BASTS, "lockres %s, ReQ: Checkpointing\n",
3518 /* If we get here, then we know that there are no more
3519 * incompatible holders (and anyone asking for an incompatible
3520 * lock is blocked). We can now downconvert the lock */
3521 if (!lockres->l_ops->downconvert_worker)
3524 /* Some lockres types want to do a bit of work before
3525 * downconverting a lock. Allow that here. The worker function
3526 * may sleep, so we save off a copy of what we're blocking as
3527 * it may change while we're not holding the spin lock. */
3528 blocking = lockres->l_blocking;
3529 level = lockres->l_level;
3530 spin_unlock_irqrestore(&lockres->l_lock, flags);
3532 ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking);
3534 if (ctl->unblock_action == UNBLOCK_STOP_POST) {
3535 mlog(ML_BASTS, "lockres %s, UNBLOCK_STOP_POST\n",
3540 spin_lock_irqsave(&lockres->l_lock, flags);
3541 if ((blocking != lockres->l_blocking) || (level != lockres->l_level)) {
3542 /* If this changed underneath us, then we can't drop
3544 mlog(ML_BASTS, "lockres %s, block=%d:%d, level=%d:%d, "
3545 "Recheck\n", lockres->l_name, blocking,
3546 lockres->l_blocking, level, lockres->l_level);
3553 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
3554 if (lockres->l_level == DLM_LOCK_EX)
3558 * We only set the lvb if the lock has been fully
3559 * refreshed - otherwise we risk setting stale
3560 * data. Otherwise, there's no need to actually clear
3561 * out the lvb here as it's value is still valid.
3563 if (set_lvb && !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
3564 lockres->l_ops->set_lvb(lockres);
3567 gen = ocfs2_prepare_downconvert(lockres, new_level);
3568 spin_unlock_irqrestore(&lockres->l_lock, flags);
3569 ret = ocfs2_downconvert_lock(osb, lockres, new_level, set_lvb,
3578 spin_unlock_irqrestore(&lockres->l_lock, flags);
3584 static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
3587 struct inode *inode;
3588 struct address_space *mapping;
3589 struct ocfs2_inode_info *oi;
3591 inode = ocfs2_lock_res_inode(lockres);
3592 mapping = inode->i_mapping;
3594 if (S_ISDIR(inode->i_mode)) {
3595 oi = OCFS2_I(inode);
3596 oi->ip_dir_lock_gen++;
3597 mlog(0, "generation: %u\n", oi->ip_dir_lock_gen);
3601 if (!S_ISREG(inode->i_mode))
3605 * We need this before the filemap_fdatawrite() so that it can
3606 * transfer the dirty bit from the PTE to the
3607 * page. Unfortunately this means that even for EX->PR
3608 * downconverts, we'll lose our mappings and have to build
3611 unmap_mapping_range(mapping, 0, 0, 0);
3613 if (filemap_fdatawrite(mapping)) {
3614 mlog(ML_ERROR, "Could not sync inode %llu for downconvert!",
3615 (unsigned long long)OCFS2_I(inode)->ip_blkno);
3617 sync_mapping_buffers(mapping);
3618 if (blocking == DLM_LOCK_EX) {
3619 truncate_inode_pages(mapping, 0);
3621 /* We only need to wait on the I/O if we're not also
3622 * truncating pages because truncate_inode_pages waits
3623 * for us above. We don't truncate pages if we're
3624 * blocking anything < EXMODE because we want to keep
3625 * them around in that case. */
3626 filemap_fdatawait(mapping);
3630 return UNBLOCK_CONTINUE;
3633 static int ocfs2_ci_checkpointed(struct ocfs2_caching_info *ci,
3634 struct ocfs2_lock_res *lockres,
3637 int checkpointed = ocfs2_ci_fully_checkpointed(ci);
3639 BUG_ON(new_level != DLM_LOCK_NL && new_level != DLM_LOCK_PR);
3640 BUG_ON(lockres->l_level != DLM_LOCK_EX && !checkpointed);
3645 ocfs2_start_checkpoint(OCFS2_SB(ocfs2_metadata_cache_get_super(ci)));
3649 static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
3652 struct inode *inode = ocfs2_lock_res_inode(lockres);
3654 return ocfs2_ci_checkpointed(INODE_CACHE(inode), lockres, new_level);
3657 static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres)
3659 struct inode *inode = ocfs2_lock_res_inode(lockres);
3661 __ocfs2_stuff_meta_lvb(inode);
3665 * Does the final reference drop on our dentry lock. Right now this
3666 * happens in the downconvert thread, but we could choose to simplify the
3667 * dlmglue API and push these off to the ocfs2_wq in the future.
3669 static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
3670 struct ocfs2_lock_res *lockres)
3672 struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
3673 ocfs2_dentry_lock_put(osb, dl);
3677 * d_delete() matching dentries before the lock downconvert.
3679 * At this point, any process waiting to destroy the
3680 * dentry_lock due to last ref count is stopped by the
3681 * OCFS2_LOCK_QUEUED flag.
3683 * We have two potential problems
3685 * 1) If we do the last reference drop on our dentry_lock (via dput)
3686 * we'll wind up in ocfs2_release_dentry_lock(), waiting on
3687 * the downconvert to finish. Instead we take an elevated
3688 * reference and push the drop until after we've completed our
3689 * unblock processing.
3691 * 2) There might be another process with a final reference,
3692 * waiting on us to finish processing. If this is the case, we
3693 * detect it and exit out - there's no more dentries anyway.
3695 static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
3698 struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
3699 struct ocfs2_inode_info *oi = OCFS2_I(dl->dl_inode);
3700 struct dentry *dentry;
3701 unsigned long flags;
3705 * This node is blocking another node from getting a read
3706 * lock. This happens when we've renamed within a
3707 * directory. We've forced the other nodes to d_delete(), but
3708 * we never actually dropped our lock because it's still
3709 * valid. The downconvert code will retain a PR for this node,
3710 * so there's no further work to do.
3712 if (blocking == DLM_LOCK_PR)
3713 return UNBLOCK_CONTINUE;
3716 * Mark this inode as potentially orphaned. The code in
3717 * ocfs2_delete_inode() will figure out whether it actually
3718 * needs to be freed or not.
3720 spin_lock(&oi->ip_lock);
3721 oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED;
3722 spin_unlock(&oi->ip_lock);
3725 * Yuck. We need to make sure however that the check of
3726 * OCFS2_LOCK_FREEING and the extra reference are atomic with
3727 * respect to a reference decrement or the setting of that
3730 spin_lock_irqsave(&lockres->l_lock, flags);
3731 spin_lock(&dentry_attach_lock);
3732 if (!(lockres->l_flags & OCFS2_LOCK_FREEING)
3737 spin_unlock(&dentry_attach_lock);
3738 spin_unlock_irqrestore(&lockres->l_lock, flags);
3740 mlog(0, "extra_ref = %d\n", extra_ref);
3743 * We have a process waiting on us in ocfs2_dentry_iput(),
3744 * which means we can't have any more outstanding
3745 * aliases. There's no need to do any more work.
3748 return UNBLOCK_CONTINUE;
3750 spin_lock(&dentry_attach_lock);
3752 dentry = ocfs2_find_local_alias(dl->dl_inode,
3753 dl->dl_parent_blkno, 1);
3756 spin_unlock(&dentry_attach_lock);
3758 if (S_ISDIR(dl->dl_inode->i_mode))
3759 shrink_dcache_parent(dentry);
3761 mlog(0, "d_delete(%pd);\n", dentry);
3764 * The following dcache calls may do an
3765 * iput(). Normally we don't want that from the
3766 * downconverting thread, but in this case it's ok
3767 * because the requesting node already has an
3768 * exclusive lock on the inode, so it can't be queued
3769 * for a downconvert.
3774 spin_lock(&dentry_attach_lock);
3776 spin_unlock(&dentry_attach_lock);
3779 * If we are the last holder of this dentry lock, there is no
3780 * reason to downconvert so skip straight to the unlock.
3782 if (dl->dl_count == 1)
3783 return UNBLOCK_STOP_POST;
3785 return UNBLOCK_CONTINUE_POST;
3788 static int ocfs2_check_refcount_downconvert(struct ocfs2_lock_res *lockres,
3791 struct ocfs2_refcount_tree *tree =
3792 ocfs2_lock_res_refcount_tree(lockres);
3794 return ocfs2_ci_checkpointed(&tree->rf_ci, lockres, new_level);
3797 static int ocfs2_refcount_convert_worker(struct ocfs2_lock_res *lockres,
3800 struct ocfs2_refcount_tree *tree =
3801 ocfs2_lock_res_refcount_tree(lockres);
3803 ocfs2_metadata_cache_purge(&tree->rf_ci);
3805 return UNBLOCK_CONTINUE;
3808 static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres)
3810 struct ocfs2_qinfo_lvb *lvb;
3811 struct ocfs2_mem_dqinfo *oinfo = ocfs2_lock_res_qinfo(lockres);
3812 struct mem_dqinfo *info = sb_dqinfo(oinfo->dqi_gi.dqi_sb,
3813 oinfo->dqi_gi.dqi_type);
3815 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
3816 lvb->lvb_version = OCFS2_QINFO_LVB_VERSION;
3817 lvb->lvb_bgrace = cpu_to_be32(info->dqi_bgrace);
3818 lvb->lvb_igrace = cpu_to_be32(info->dqi_igrace);
3819 lvb->lvb_syncms = cpu_to_be32(oinfo->dqi_syncms);
3820 lvb->lvb_blocks = cpu_to_be32(oinfo->dqi_gi.dqi_blocks);
3821 lvb->lvb_free_blk = cpu_to_be32(oinfo->dqi_gi.dqi_free_blk);
3822 lvb->lvb_free_entry = cpu_to_be32(oinfo->dqi_gi.dqi_free_entry);
3825 void ocfs2_qinfo_unlock(struct ocfs2_mem_dqinfo *oinfo, int ex)
3827 struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock;
3828 struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb);
3829 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
3831 if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb))
3832 ocfs2_cluster_unlock(osb, lockres, level);
3835 static int ocfs2_refresh_qinfo(struct ocfs2_mem_dqinfo *oinfo)
3837 struct mem_dqinfo *info = sb_dqinfo(oinfo->dqi_gi.dqi_sb,
3838 oinfo->dqi_gi.dqi_type);
3839 struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock;
3840 struct ocfs2_qinfo_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
3841 struct buffer_head *bh = NULL;
3842 struct ocfs2_global_disk_dqinfo *gdinfo;
3845 if (ocfs2_dlm_lvb_valid(&lockres->l_lksb) &&
3846 lvb->lvb_version == OCFS2_QINFO_LVB_VERSION) {
3847 info->dqi_bgrace = be32_to_cpu(lvb->lvb_bgrace);
3848 info->dqi_igrace = be32_to_cpu(lvb->lvb_igrace);
3849 oinfo->dqi_syncms = be32_to_cpu(lvb->lvb_syncms);
3850 oinfo->dqi_gi.dqi_blocks = be32_to_cpu(lvb->lvb_blocks);
3851 oinfo->dqi_gi.dqi_free_blk = be32_to_cpu(lvb->lvb_free_blk);
3852 oinfo->dqi_gi.dqi_free_entry =
3853 be32_to_cpu(lvb->lvb_free_entry);
3855 status = ocfs2_read_quota_phys_block(oinfo->dqi_gqinode,
3856 oinfo->dqi_giblk, &bh);
3861 gdinfo = (struct ocfs2_global_disk_dqinfo *)
3862 (bh->b_data + OCFS2_GLOBAL_INFO_OFF);
3863 info->dqi_bgrace = le32_to_cpu(gdinfo->dqi_bgrace);
3864 info->dqi_igrace = le32_to_cpu(gdinfo->dqi_igrace);
3865 oinfo->dqi_syncms = le32_to_cpu(gdinfo->dqi_syncms);
3866 oinfo->dqi_gi.dqi_blocks = le32_to_cpu(gdinfo->dqi_blocks);
3867 oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(gdinfo->dqi_free_blk);
3868 oinfo->dqi_gi.dqi_free_entry =
3869 le32_to_cpu(gdinfo->dqi_free_entry);
3871 ocfs2_track_lock_refresh(lockres);
3878 /* Lock quota info, this function expects at least shared lock on the quota file
3879 * so that we can safely refresh quota info from disk. */
3880 int ocfs2_qinfo_lock(struct ocfs2_mem_dqinfo *oinfo, int ex)
3882 struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock;
3883 struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb);
3884 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
3887 /* On RO devices, locking really isn't needed... */
3888 if (ocfs2_is_hard_readonly(osb)) {
3893 if (ocfs2_mount_local(osb))
3896 status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
3901 if (!ocfs2_should_refresh_lock_res(lockres))
3903 /* OK, we have the lock but we need to refresh the quota info */
3904 status = ocfs2_refresh_qinfo(oinfo);
3906 ocfs2_qinfo_unlock(oinfo, ex);
3907 ocfs2_complete_lock_res_refresh(lockres, status);
3912 int ocfs2_refcount_lock(struct ocfs2_refcount_tree *ref_tree, int ex)
3915 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
3916 struct ocfs2_lock_res *lockres = &ref_tree->rf_lockres;
3917 struct ocfs2_super *osb = lockres->l_priv;
3920 if (ocfs2_is_hard_readonly(osb))
3923 if (ocfs2_mount_local(osb))
3926 status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
3933 void ocfs2_refcount_unlock(struct ocfs2_refcount_tree *ref_tree, int ex)
3935 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
3936 struct ocfs2_lock_res *lockres = &ref_tree->rf_lockres;
3937 struct ocfs2_super *osb = lockres->l_priv;
3939 if (!ocfs2_mount_local(osb))
3940 ocfs2_cluster_unlock(osb, lockres, level);
3943 static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
3944 struct ocfs2_lock_res *lockres)
3947 struct ocfs2_unblock_ctl ctl = {0, 0,};
3948 unsigned long flags;
3950 /* Our reference to the lockres in this function can be
3951 * considered valid until we remove the OCFS2_LOCK_QUEUED
3955 BUG_ON(!lockres->l_ops);
3957 mlog(ML_BASTS, "lockres %s blocked\n", lockres->l_name);
3959 /* Detect whether a lock has been marked as going away while
3960 * the downconvert thread was processing other things. A lock can
3961 * still be marked with OCFS2_LOCK_FREEING after this check,
3962 * but short circuiting here will still save us some
3964 spin_lock_irqsave(&lockres->l_lock, flags);
3965 if (lockres->l_flags & OCFS2_LOCK_FREEING)
3967 spin_unlock_irqrestore(&lockres->l_lock, flags);
3969 status = ocfs2_unblock_lock(osb, lockres, &ctl);
3973 spin_lock_irqsave(&lockres->l_lock, flags);
3975 if (lockres->l_flags & OCFS2_LOCK_FREEING || !ctl.requeue) {
3976 lockres_clear_flags(lockres, OCFS2_LOCK_QUEUED);
3978 ocfs2_schedule_blocked_lock(osb, lockres);
3980 mlog(ML_BASTS, "lockres %s, requeue = %s.\n", lockres->l_name,
3981 ctl.requeue ? "yes" : "no");
3982 spin_unlock_irqrestore(&lockres->l_lock, flags);
3984 if (ctl.unblock_action != UNBLOCK_CONTINUE
3985 && lockres->l_ops->post_unlock)
3986 lockres->l_ops->post_unlock(osb, lockres);
3989 static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
3990 struct ocfs2_lock_res *lockres)
3992 unsigned long flags;
3994 assert_spin_locked(&lockres->l_lock);
3996 if (lockres->l_flags & OCFS2_LOCK_FREEING) {
3997 /* Do not schedule a lock for downconvert when it's on
3998 * the way to destruction - any nodes wanting access
3999 * to the resource will get it soon. */
4000 mlog(ML_BASTS, "lockres %s won't be scheduled: flags 0x%lx\n",
4001 lockres->l_name, lockres->l_flags);
4005 lockres_or_flags(lockres, OCFS2_LOCK_QUEUED);
4007 spin_lock_irqsave(&osb->dc_task_lock, flags);
4008 if (list_empty(&lockres->l_blocked_list)) {
4009 list_add_tail(&lockres->l_blocked_list,
4010 &osb->blocked_lock_list);
4011 osb->blocked_lock_count++;
4013 spin_unlock_irqrestore(&osb->dc_task_lock, flags);
4016 static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
4018 unsigned long processed;
4019 unsigned long flags;
4020 struct ocfs2_lock_res *lockres;
4022 spin_lock_irqsave(&osb->dc_task_lock, flags);
4023 /* grab this early so we know to try again if a state change and
4024 * wake happens part-way through our work */
4025 osb->dc_work_sequence = osb->dc_wake_sequence;
4027 processed = osb->blocked_lock_count;
4029 BUG_ON(list_empty(&osb->blocked_lock_list));
4031 lockres = list_entry(osb->blocked_lock_list.next,
4032 struct ocfs2_lock_res, l_blocked_list);
4033 list_del_init(&lockres->l_blocked_list);
4034 osb->blocked_lock_count--;
4035 spin_unlock_irqrestore(&osb->dc_task_lock, flags);
4040 ocfs2_process_blocked_lock(osb, lockres);
4042 spin_lock_irqsave(&osb->dc_task_lock, flags);
4044 spin_unlock_irqrestore(&osb->dc_task_lock, flags);
4047 static int ocfs2_downconvert_thread_lists_empty(struct ocfs2_super *osb)
4050 unsigned long flags;
4052 spin_lock_irqsave(&osb->dc_task_lock, flags);
4053 if (list_empty(&osb->blocked_lock_list))
4056 spin_unlock_irqrestore(&osb->dc_task_lock, flags);
4060 static int ocfs2_downconvert_thread_should_wake(struct ocfs2_super *osb)
4062 int should_wake = 0;
4063 unsigned long flags;
4065 spin_lock_irqsave(&osb->dc_task_lock, flags);
4066 if (osb->dc_work_sequence != osb->dc_wake_sequence)
4068 spin_unlock_irqrestore(&osb->dc_task_lock, flags);
4073 static int ocfs2_downconvert_thread(void *arg)
4076 struct ocfs2_super *osb = arg;
4078 /* only quit once we've been asked to stop and there is no more
4080 while (!(kthread_should_stop() &&
4081 ocfs2_downconvert_thread_lists_empty(osb))) {
4083 wait_event_interruptible(osb->dc_event,
4084 ocfs2_downconvert_thread_should_wake(osb) ||
4085 kthread_should_stop());
4087 mlog(0, "downconvert_thread: awoken\n");
4089 ocfs2_downconvert_thread_do_work(osb);
4092 osb->dc_task = NULL;
4096 void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb)
4098 unsigned long flags;
4100 spin_lock_irqsave(&osb->dc_task_lock, flags);
4101 /* make sure the voting thread gets a swipe at whatever changes
4102 * the caller may have made to the voting state */
4103 osb->dc_wake_sequence++;
4104 spin_unlock_irqrestore(&osb->dc_task_lock, flags);
4105 wake_up(&osb->dc_event);