1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
3 *******************************************************************************
5 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
6 ** Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved.
9 *******************************************************************************
10 ******************************************************************************/
12 #include <trace/events/dlm.h>
14 #include "dlm_internal.h"
20 void dlm_release_callback(struct kref *ref)
22 struct dlm_callback *cb = container_of(ref, struct dlm_callback, ref);
27 void dlm_callback_set_last_ptr(struct dlm_callback **from,
28 struct dlm_callback *to)
31 kref_put(&(*from)->ref, dlm_release_callback);
39 void dlm_purge_lkb_callbacks(struct dlm_lkb *lkb)
41 struct dlm_callback *cb, *safe;
43 list_for_each_entry_safe(cb, safe, &lkb->lkb_callbacks, list) {
45 kref_put(&cb->ref, dlm_release_callback);
48 lkb->lkb_flags &= ~DLM_IFL_CB_PENDING;
51 dlm_callback_set_last_ptr(&lkb->lkb_last_cast, NULL);
52 dlm_callback_set_last_ptr(&lkb->lkb_last_cb, NULL);
53 lkb->lkb_last_bast_mode = -1;
56 int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
57 int status, uint32_t sbflags)
59 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
60 int rv = DLM_ENQUEUE_CALLBACK_SUCCESS;
61 struct dlm_callback *cb;
64 if (flags & DLM_CB_BAST) {
65 /* if cb is a bast, it should be skipped if the blocking mode is
66 * compatible with the last granted mode
68 if (lkb->lkb_last_cast) {
69 if (dlm_modes_compat(mode, lkb->lkb_last_cast->mode)) {
70 log_debug(ls, "skip %x bast mode %d for cast mode %d",
72 lkb->lkb_last_cast->mode);
78 * Suppress some redundant basts here, do more on removal.
79 * Don't even add a bast if the callback just before it
80 * is a bast for the same mode or a more restrictive mode.
81 * (the addional > PR check is needed for PR/CW inversion)
83 if (lkb->lkb_last_cb && lkb->lkb_last_cb->flags & DLM_CB_BAST) {
84 prev_mode = lkb->lkb_last_cb->mode;
86 if ((prev_mode == mode) ||
87 (prev_mode > mode && prev_mode > DLM_LOCK_PR)) {
88 log_debug(ls, "skip %x add bast mode %d for bast mode %d",
89 lkb->lkb_id, mode, prev_mode);
95 cb = dlm_allocate_cb();
97 rv = DLM_ENQUEUE_CALLBACK_FAILURE;
103 cb->sb_status = status;
104 cb->sb_flags = (sbflags & 0x000000FF);
106 if (!(lkb->lkb_flags & DLM_IFL_CB_PENDING)) {
107 lkb->lkb_flags |= DLM_IFL_CB_PENDING;
108 rv = DLM_ENQUEUE_CALLBACK_NEED_SCHED;
110 list_add_tail(&cb->list, &lkb->lkb_callbacks);
112 if (flags & DLM_CB_CAST)
113 dlm_callback_set_last_ptr(&lkb->lkb_last_cast, cb);
115 dlm_callback_set_last_ptr(&lkb->lkb_last_cb, cb);
121 int dlm_dequeue_lkb_callback(struct dlm_lkb *lkb, struct dlm_callback **cb)
123 /* oldest undelivered cb is callbacks first entry */
124 *cb = list_first_entry_or_null(&lkb->lkb_callbacks,
125 struct dlm_callback, list);
127 return DLM_DEQUEUE_CALLBACK_EMPTY;
129 /* remove it from callbacks so shift others down */
130 list_del(&(*cb)->list);
131 if (list_empty(&lkb->lkb_callbacks))
132 return DLM_DEQUEUE_CALLBACK_LAST;
134 return DLM_DEQUEUE_CALLBACK_SUCCESS;
137 void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status,
140 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
143 if (lkb->lkb_flags & DLM_IFL_USER) {
144 dlm_user_add_ast(lkb, flags, mode, status, sbflags);
148 spin_lock(&lkb->lkb_cb_lock);
149 rv = dlm_enqueue_lkb_callback(lkb, flags, mode, status, sbflags);
151 case DLM_ENQUEUE_CALLBACK_NEED_SCHED:
152 kref_get(&lkb->lkb_ref);
154 spin_lock(&ls->ls_cb_lock);
155 if (test_bit(LSFL_CB_DELAY, &ls->ls_flags)) {
156 list_add(&lkb->lkb_cb_list, &ls->ls_cb_delay);
158 queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
160 spin_unlock(&ls->ls_cb_lock);
162 case DLM_ENQUEUE_CALLBACK_FAILURE:
165 case DLM_ENQUEUE_CALLBACK_SUCCESS:
171 spin_unlock(&lkb->lkb_cb_lock);
174 void dlm_callback_work(struct work_struct *work)
176 struct dlm_lkb *lkb = container_of(work, struct dlm_lkb, lkb_cb_work);
177 struct dlm_ls *ls = lkb->lkb_resource->res_ls;
178 void (*castfn) (void *astparam);
179 void (*bastfn) (void *astparam, int mode);
180 struct dlm_callback *cb;
183 spin_lock(&lkb->lkb_cb_lock);
184 rv = dlm_dequeue_lkb_callback(lkb, &cb);
185 spin_unlock(&lkb->lkb_cb_lock);
187 if (WARN_ON_ONCE(rv == DLM_DEQUEUE_CALLBACK_EMPTY))
191 castfn = lkb->lkb_astfn;
192 bastfn = lkb->lkb_bastfn;
194 if (cb->flags & DLM_CB_BAST) {
195 trace_dlm_bast(ls, lkb, cb->mode);
196 lkb->lkb_last_bast_time = ktime_get();
197 lkb->lkb_last_bast_mode = cb->mode;
198 bastfn(lkb->lkb_astparam, cb->mode);
199 } else if (cb->flags & DLM_CB_CAST) {
200 lkb->lkb_lksb->sb_status = cb->sb_status;
201 lkb->lkb_lksb->sb_flags = cb->sb_flags;
202 trace_dlm_ast(ls, lkb);
203 lkb->lkb_last_cast_time = ktime_get();
204 castfn(lkb->lkb_astparam);
207 kref_put(&cb->ref, dlm_release_callback);
209 spin_lock(&lkb->lkb_cb_lock);
210 rv = dlm_dequeue_lkb_callback(lkb, &cb);
211 if (rv == DLM_DEQUEUE_CALLBACK_EMPTY) {
212 lkb->lkb_flags &= ~DLM_IFL_CB_PENDING;
213 spin_unlock(&lkb->lkb_cb_lock);
216 spin_unlock(&lkb->lkb_cb_lock);
220 /* undo kref_get from dlm_add_callback, may cause lkb to be freed */
224 int dlm_callback_start(struct dlm_ls *ls)
226 ls->ls_callback_wq = alloc_workqueue("dlm_callback",
227 WQ_HIGHPRI | WQ_MEM_RECLAIM, 0);
228 if (!ls->ls_callback_wq) {
229 log_print("can't start dlm_callback workqueue");
235 void dlm_callback_stop(struct dlm_ls *ls)
237 if (ls->ls_callback_wq)
238 destroy_workqueue(ls->ls_callback_wq);
241 void dlm_callback_suspend(struct dlm_ls *ls)
243 if (ls->ls_callback_wq) {
244 spin_lock(&ls->ls_cb_lock);
245 set_bit(LSFL_CB_DELAY, &ls->ls_flags);
246 spin_unlock(&ls->ls_cb_lock);
248 flush_workqueue(ls->ls_callback_wq);
252 #define MAX_CB_QUEUE 25
254 void dlm_callback_resume(struct dlm_ls *ls)
256 struct dlm_lkb *lkb, *safe;
257 int count = 0, sum = 0;
260 if (!ls->ls_callback_wq)
264 spin_lock(&ls->ls_cb_lock);
265 list_for_each_entry_safe(lkb, safe, &ls->ls_cb_delay, lkb_cb_list) {
266 list_del_init(&lkb->lkb_cb_list);
267 queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
269 if (count == MAX_CB_QUEUE)
272 empty = list_empty(&ls->ls_cb_delay);
274 clear_bit(LSFL_CB_DELAY, &ls->ls_flags);
275 spin_unlock(&ls->ls_cb_lock);
285 log_rinfo(ls, "%s %d", __func__, sum);