]>
Commit | Line | Data |
---|---|---|
7336d0e6 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
f057f6cd SW |
2 | /* |
3 | * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. | |
e0c2a9aa | 4 | * Copyright 2004-2011 Red Hat, Inc. |
f057f6cd SW |
5 | */ |
6 | ||
d77d1b58 JP |
7 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
8 | ||
f057f6cd SW |
9 | #include <linux/fs.h> |
10 | #include <linux/dlm.h> | |
5a0e3ad6 | 11 | #include <linux/slab.h> |
f057f6cd | 12 | #include <linux/types.h> |
e0c2a9aa | 13 | #include <linux/delay.h> |
f057f6cd | 14 | #include <linux/gfs2_ondisk.h> |
174cd4b1 | 15 | #include <linux/sched/signal.h> |
f057f6cd SW |
16 | |
17 | #include "incore.h" | |
18 | #include "glock.h" | |
601ef0d5 BP |
19 | #include "glops.h" |
20 | #include "recovery.h" | |
f057f6cd | 21 | #include "util.h" |
e0c2a9aa | 22 | #include "sys.h" |
a245769f | 23 | #include "trace_gfs2.h" |
f057f6cd | 24 | |
a245769f SW |
25 | /** |
26 | * gfs2_update_stats - Update time based stats | |
c551f66c LJ |
27 | * @s: The stats to update (local or global) |
28 | * @index: The index inside @s | |
a245769f | 29 | * @sample: New data to include |
a245769f | 30 | */ |
a245769f SW |
31 | static inline void gfs2_update_stats(struct gfs2_lkstats *s, unsigned index, |
32 | s64 sample) | |
33 | { | |
c551f66c LJ |
34 | /* |
35 | * @delta is the difference between the current rtt sample and the | |
36 | * running average srtt. We add 1/8 of that to the srtt in order to | |
37 | * update the current srtt estimate. The variance estimate is a bit | |
38 | * more complicated. We subtract the current variance estimate from | |
39 | * the abs value of the @delta and add 1/4 of that to the running | |
40 | * total. That's equivalent to 3/4 of the current variance | |
41 | * estimate plus 1/4 of the abs of @delta. | |
42 | * | |
43 | * Note that the index points at the array entry containing the | |
44 | * smoothed mean value, and the variance is always in the following | |
45 | * entry | |
46 | * | |
47 | * Reference: TCP/IP Illustrated, vol 2, p. 831,832 | |
48 | * All times are in units of integer nanoseconds. Unlike the TCP/IP | |
49 | * case, they are not scaled fixed point. | |
50 | */ | |
51 | ||
a245769f SW |
52 | s64 delta = sample - s->stats[index]; |
53 | s->stats[index] += (delta >> 3); | |
54 | index++; | |
5a5ec83d | 55 | s->stats[index] += (s64)(abs(delta) - s->stats[index]) >> 2; |
a245769f SW |
56 | } |
57 | ||
58 | /** | |
59 | * gfs2_update_reply_times - Update locking statistics | |
60 | * @gl: The glock to update | |
61 | * | |
62 | * This assumes that gl->gl_dstamp has been set earlier. | |
63 | * | |
64 | * The rtt (lock round trip time) is an estimate of the time | |
65 | * taken to perform a dlm lock request. We update it on each | |
66 | * reply from the dlm. | |
67 | * | |
68 | * The blocking flag is set on the glock for all dlm requests | |
69 | * which may potentially block due to lock requests from other nodes. | |
70 | * DLM requests where the current lock state is exclusive, the | |
71 | * requested state is null (or unlocked) or where the TRY or | |
72 | * TRY_1CB flags are set are classified as non-blocking. All | |
73 | * other DLM requests are counted as (potentially) blocking. | |
74 | */ | |
75 | static inline void gfs2_update_reply_times(struct gfs2_glock *gl) | |
76 | { | |
77 | struct gfs2_pcpu_lkstats *lks; | |
78 | const unsigned gltype = gl->gl_name.ln_type; | |
79 | unsigned index = test_bit(GLF_BLOCKING, &gl->gl_flags) ? | |
80 | GFS2_LKS_SRTTB : GFS2_LKS_SRTT; | |
81 | s64 rtt; | |
82 | ||
83 | preempt_disable(); | |
84 | rtt = ktime_to_ns(ktime_sub(ktime_get_real(), gl->gl_dstamp)); | |
15562c43 | 85 | lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats); |
a245769f SW |
86 | gfs2_update_stats(&gl->gl_stats, index, rtt); /* Local */ |
87 | gfs2_update_stats(&lks->lkstats[gltype], index, rtt); /* Global */ | |
88 | preempt_enable(); | |
89 | ||
90 | trace_gfs2_glock_lock_time(gl, rtt); | |
91 | } | |
92 | ||
93 | /** | |
94 | * gfs2_update_request_times - Update locking statistics | |
95 | * @gl: The glock to update | |
96 | * | |
97 | * The irt (lock inter-request times) measures the average time | |
98 | * between requests to the dlm. It is updated immediately before | |
99 | * each dlm call. | |
100 | */ | |
101 | ||
102 | static inline void gfs2_update_request_times(struct gfs2_glock *gl) | |
103 | { | |
104 | struct gfs2_pcpu_lkstats *lks; | |
105 | const unsigned gltype = gl->gl_name.ln_type; | |
106 | ktime_t dstamp; | |
107 | s64 irt; | |
108 | ||
109 | preempt_disable(); | |
110 | dstamp = gl->gl_dstamp; | |
111 | gl->gl_dstamp = ktime_get_real(); | |
112 | irt = ktime_to_ns(ktime_sub(gl->gl_dstamp, dstamp)); | |
15562c43 | 113 | lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats); |
a245769f SW |
114 | gfs2_update_stats(&gl->gl_stats, GFS2_LKS_SIRT, irt); /* Local */ |
115 | gfs2_update_stats(&lks->lkstats[gltype], GFS2_LKS_SIRT, irt); /* Global */ | |
116 | preempt_enable(); | |
117 | } | |
118 | ||
f057f6cd SW |
119 | static void gdlm_ast(void *arg) |
120 | { | |
121 | struct gfs2_glock *gl = arg; | |
122 | unsigned ret = gl->gl_state; | |
123 | ||
d98779e6 AG |
124 | /* If the glock is dead, we only react to a dlm_unlock() reply. */ |
125 | if (__lockref_is_dead(&gl->gl_lockref) && | |
126 | gl->gl_lksb.sb_status != -DLM_EUNLOCK) | |
127 | return; | |
128 | ||
a245769f | 129 | gfs2_update_reply_times(gl); |
f057f6cd SW |
130 | BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED); |
131 | ||
4e2f8849 DT |
132 | if ((gl->gl_lksb.sb_flags & DLM_SBF_VALNOTVALID) && gl->gl_lksb.sb_lvbptr) |
133 | memset(gl->gl_lksb.sb_lvbptr, 0, GDLM_LVB_SIZE); | |
f057f6cd SW |
134 | |
135 | switch (gl->gl_lksb.sb_status) { | |
136 | case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */ | |
121e7301 AG |
137 | if (gl->gl_ops->go_unlocked) |
138 | gl->gl_ops->go_unlocked(gl); | |
fc0e38da | 139 | gfs2_glock_free(gl); |
f057f6cd SW |
140 | return; |
141 | case -DLM_ECANCEL: /* Cancel while getting lock */ | |
142 | ret |= LM_OUT_CANCELED; | |
143 | goto out; | |
144 | case -EAGAIN: /* Try lock fails */ | |
1fea7c25 | 145 | case -EDEADLK: /* Deadlock detected */ |
f057f6cd | 146 | goto out; |
1fea7c25 | 147 | case -ETIMEDOUT: /* Canceled due to timeout */ |
f057f6cd SW |
148 | ret |= LM_OUT_ERROR; |
149 | goto out; | |
150 | case 0: /* Success */ | |
151 | break; | |
152 | default: /* Something unexpected */ | |
153 | BUG(); | |
154 | } | |
155 | ||
02ffad08 | 156 | ret = gl->gl_req; |
f057f6cd | 157 | if (gl->gl_lksb.sb_flags & DLM_SBF_ALTMODE) { |
02ffad08 | 158 | if (gl->gl_req == LM_ST_SHARED) |
f057f6cd | 159 | ret = LM_ST_DEFERRED; |
02ffad08 | 160 | else if (gl->gl_req == LM_ST_DEFERRED) |
f057f6cd SW |
161 | ret = LM_ST_SHARED; |
162 | else | |
163 | BUG(); | |
164 | } | |
165 | ||
c8758ad0 AG |
166 | /* |
167 | * The GLF_INITIAL flag is initially set for new glocks. Upon the | |
168 | * first successful new (non-conversion) request, we clear this flag to | |
169 | * indicate that a DLM lock exists and that gl->gl_lksb.sb_lkid is the | |
170 | * identifier to use for identifying it. | |
171 | * | |
172 | * Any failed initial requests do not create a DLM lock, so we ignore | |
173 | * the gl->gl_lksb.sb_lkid values that come with such requests. | |
174 | */ | |
175 | ||
176 | clear_bit(GLF_INITIAL, &gl->gl_flags); | |
f057f6cd SW |
177 | gfs2_glock_complete(gl, ret); |
178 | return; | |
179 | out: | |
c8758ad0 | 180 | if (test_bit(GLF_INITIAL, &gl->gl_flags)) |
f057f6cd SW |
181 | gl->gl_lksb.sb_lkid = 0; |
182 | gfs2_glock_complete(gl, ret); | |
183 | } | |
184 | ||
185 | static void gdlm_bast(void *arg, int mode) | |
186 | { | |
187 | struct gfs2_glock *gl = arg; | |
188 | ||
d98779e6 AG |
189 | if (__lockref_is_dead(&gl->gl_lockref)) |
190 | return; | |
191 | ||
f057f6cd SW |
192 | switch (mode) { |
193 | case DLM_LOCK_EX: | |
194 | gfs2_glock_cb(gl, LM_ST_UNLOCKED); | |
195 | break; | |
196 | case DLM_LOCK_CW: | |
197 | gfs2_glock_cb(gl, LM_ST_DEFERRED); | |
198 | break; | |
199 | case DLM_LOCK_PR: | |
200 | gfs2_glock_cb(gl, LM_ST_SHARED); | |
201 | break; | |
202 | default: | |
e54c78a2 | 203 | fs_err(gl->gl_name.ln_sbd, "unknown bast mode %d\n", mode); |
f057f6cd SW |
204 | BUG(); |
205 | } | |
206 | } | |
207 | ||
208 | /* convert gfs lock-state to dlm lock-mode */ | |
209 | ||
e54c78a2 | 210 | static int make_mode(struct gfs2_sbd *sdp, const unsigned int lmstate) |
f057f6cd SW |
211 | { |
212 | switch (lmstate) { | |
213 | case LM_ST_UNLOCKED: | |
214 | return DLM_LOCK_NL; | |
215 | case LM_ST_EXCLUSIVE: | |
216 | return DLM_LOCK_EX; | |
217 | case LM_ST_DEFERRED: | |
218 | return DLM_LOCK_CW; | |
219 | case LM_ST_SHARED: | |
220 | return DLM_LOCK_PR; | |
221 | } | |
e54c78a2 | 222 | fs_err(sdp, "unknown LM state %d\n", lmstate); |
f057f6cd SW |
223 | BUG(); |
224 | return -1; | |
225 | } | |
226 | ||
b6900ce1 AG |
227 | /* Taken from fs/dlm/lock.c. */ |
228 | ||
229 | static bool middle_conversion(int cur, int req) | |
230 | { | |
231 | return (cur == DLM_LOCK_PR && req == DLM_LOCK_CW) || | |
232 | (cur == DLM_LOCK_CW && req == DLM_LOCK_PR); | |
233 | } | |
234 | ||
235 | static bool down_conversion(int cur, int req) | |
236 | { | |
237 | return !middle_conversion(cur, req) && req < cur; | |
238 | } | |
239 | ||
4c569a72 | 240 | static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags, |
b6900ce1 | 241 | const int cur, const int req) |
f057f6cd | 242 | { |
dba2d70c DT |
243 | u32 lkf = 0; |
244 | ||
4e2f8849 | 245 | if (gl->gl_lksb.sb_lvbptr) |
dba2d70c | 246 | lkf |= DLM_LKF_VALBLK; |
f057f6cd SW |
247 | |
248 | if (gfs_flags & LM_FLAG_TRY) | |
249 | lkf |= DLM_LKF_NOQUEUE; | |
250 | ||
251 | if (gfs_flags & LM_FLAG_TRY_1CB) { | |
252 | lkf |= DLM_LKF_NOQUEUE; | |
253 | lkf |= DLM_LKF_NOQUEUEBAST; | |
254 | } | |
255 | ||
f057f6cd SW |
256 | if (gfs_flags & LM_FLAG_ANY) { |
257 | if (req == DLM_LOCK_PR) | |
258 | lkf |= DLM_LKF_ALTCW; | |
259 | else if (req == DLM_LOCK_CW) | |
260 | lkf |= DLM_LKF_ALTPR; | |
261 | else | |
262 | BUG(); | |
263 | } | |
264 | ||
c8758ad0 | 265 | if (!test_bit(GLF_INITIAL, &gl->gl_flags)) { |
f057f6cd | 266 | lkf |= DLM_LKF_CONVERT; |
b6900ce1 AG |
267 | |
268 | /* | |
269 | * The DLM_LKF_QUECVT flag needs to be set for "first come, | |
270 | * first served" semantics, but it must only be set for | |
271 | * "upward" lock conversions or else DLM will reject the | |
272 | * request as invalid. | |
273 | */ | |
274 | if (!down_conversion(cur, req)) | |
4c569a72 BP |
275 | lkf |= DLM_LKF_QUECVT; |
276 | } | |
f057f6cd | 277 | |
f057f6cd SW |
278 | return lkf; |
279 | } | |
280 | ||
a245769f SW |
281 | static void gfs2_reverse_hex(char *c, u64 value) |
282 | { | |
ec148752 | 283 | *c = '0'; |
a245769f SW |
284 | while (value) { |
285 | *c-- = hex_asc[value & 0x0f]; | |
286 | value >>= 4; | |
287 | } | |
288 | } | |
289 | ||
921169ca SW |
290 | static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state, |
291 | unsigned int flags) | |
f057f6cd | 292 | { |
15562c43 | 293 | struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; |
b6900ce1 | 294 | int cur, req; |
f057f6cd | 295 | u32 lkf; |
a245769f | 296 | char strname[GDLM_STRNAME_BYTES] = ""; |
a892b123 | 297 | int error; |
f057f6cd | 298 | |
b6900ce1 | 299 | cur = make_mode(gl->gl_name.ln_sbd, gl->gl_state); |
e54c78a2 | 300 | req = make_mode(gl->gl_name.ln_sbd, req_state); |
b6900ce1 | 301 | lkf = make_flags(gl, flags, cur, req); |
a245769f SW |
302 | gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT); |
303 | gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT); | |
c8758ad0 | 304 | if (test_bit(GLF_INITIAL, &gl->gl_flags)) { |
a245769f SW |
305 | memset(strname, ' ', GDLM_STRNAME_BYTES - 1); |
306 | strname[GDLM_STRNAME_BYTES - 1] = '\0'; | |
307 | gfs2_reverse_hex(strname + 7, gl->gl_name.ln_type); | |
308 | gfs2_reverse_hex(strname + 23, gl->gl_name.ln_number); | |
309 | gl->gl_dstamp = ktime_get_real(); | |
c8758ad0 AG |
310 | } else { |
311 | gfs2_update_request_times(gl); | |
a245769f | 312 | } |
f057f6cd SW |
313 | /* |
314 | * Submit the actual lock request. | |
315 | */ | |
316 | ||
a892b123 AG |
317 | again: |
318 | error = dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, strname, | |
921169ca | 319 | GDLM_STRNAME_BYTES - 1, 0, gdlm_ast, gl, gdlm_bast); |
a892b123 AG |
320 | if (error == -EBUSY) { |
321 | msleep(20); | |
322 | goto again; | |
323 | } | |
324 | return error; | |
f057f6cd SW |
325 | } |
326 | ||
bc015cb8 | 327 | static void gdlm_put_lock(struct gfs2_glock *gl) |
f057f6cd | 328 | { |
15562c43 | 329 | struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; |
e402746a | 330 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; |
f057f6cd SW |
331 | int error; |
332 | ||
d98779e6 AG |
333 | BUG_ON(!__lockref_is_dead(&gl->gl_lockref)); |
334 | ||
c8758ad0 | 335 | if (test_bit(GLF_INITIAL, &gl->gl_flags)) { |
d98779e6 AG |
336 | gfs2_glock_free(gl); |
337 | return; | |
338 | } | |
f057f6cd | 339 | |
a245769f SW |
340 | clear_bit(GLF_BLOCKING, &gl->gl_flags); |
341 | gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT); | |
342 | gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT); | |
343 | gfs2_update_request_times(gl); | |
fb6791d1 | 344 | |
d1340f80 | 345 | /* don't want to call dlm if we've unmounted the lock protocol */ |
d98779e6 AG |
346 | if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) { |
347 | gfs2_glock_free(gl); | |
348 | return; | |
349 | } | |
a3730c5e AG |
350 | |
351 | /* | |
352 | * When the lockspace is released, all remaining glocks will be | |
353 | * unlocked automatically. This is more efficient than unlocking them | |
354 | * individually, but when the lock is held in DLM_LOCK_EX or | |
355 | * DLM_LOCK_PW mode, the lock value block (LVB) will be lost. | |
356 | */ | |
d4e0bfec | 357 | |
fb6791d1 | 358 | if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) && |
a3730c5e | 359 | (!gl->gl_lksb.sb_lvbptr || gl->gl_state != LM_ST_EXCLUSIVE)) { |
d98779e6 AG |
360 | gfs2_glock_free_later(gl); |
361 | return; | |
362 | } | |
fb6791d1 | 363 | |
a892b123 | 364 | again: |
f057f6cd SW |
365 | error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK, |
366 | NULL, gl); | |
a892b123 AG |
367 | if (error == -EBUSY) { |
368 | msleep(20); | |
369 | goto again; | |
370 | } | |
371 | ||
f057f6cd | 372 | if (error) { |
e54c78a2 | 373 | fs_err(sdp, "gdlm_unlock %x,%llx err=%d\n", |
d77d1b58 | 374 | gl->gl_name.ln_type, |
f057f6cd | 375 | (unsigned long long)gl->gl_name.ln_number, error); |
f057f6cd SW |
376 | } |
377 | } | |
378 | ||
379 | static void gdlm_cancel(struct gfs2_glock *gl) | |
380 | { | |
15562c43 | 381 | struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct; |
f057f6cd SW |
382 | dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_CANCEL, NULL, gl); |
383 | } | |
384 | ||
e0c2a9aa DT |
385 | /* |
386 | * dlm/gfs2 recovery coordination using dlm_recover callbacks | |
387 | * | |
601ef0d5 | 388 | * 0. gfs2 checks for another cluster node withdraw, needing journal replay |
e0c2a9aa DT |
389 | * 1. dlm_controld sees lockspace members change |
390 | * 2. dlm_controld blocks dlm-kernel locking activity | |
391 | * 3. dlm_controld within dlm-kernel notifies gfs2 (recover_prep) | |
392 | * 4. dlm_controld starts and finishes its own user level recovery | |
393 | * 5. dlm_controld starts dlm-kernel dlm_recoverd to do kernel recovery | |
394 | * 6. dlm_recoverd notifies gfs2 of failed nodes (recover_slot) | |
395 | * 7. dlm_recoverd does its own lock recovery | |
396 | * 8. dlm_recoverd unblocks dlm-kernel locking activity | |
397 | * 9. dlm_recoverd notifies gfs2 when done (recover_done with new generation) | |
398 | * 10. gfs2_control updates control_lock lvb with new generation and jid bits | |
399 | * 11. gfs2_control enqueues journals for gfs2_recover to recover (maybe none) | |
400 | * 12. gfs2_recover dequeues and recovers journals of failed nodes | |
401 | * 13. gfs2_recover provides recovery results to gfs2_control (recovery_result) | |
402 | * 14. gfs2_control updates control_lock lvb jid bits for recovered journals | |
403 | * 15. gfs2_control unblocks normal locking when all journals are recovered | |
404 | * | |
405 | * - failures during recovery | |
406 | * | |
407 | * recover_prep() may set BLOCK_LOCKS (step 3) again before gfs2_control | |
408 | * clears BLOCK_LOCKS (step 15), e.g. another node fails while still | |
409 | * recovering for a prior failure. gfs2_control needs a way to detect | |
410 | * this so it can leave BLOCK_LOCKS set in step 15. This is managed using | |
411 | * the recover_block and recover_start values. | |
412 | * | |
413 | * recover_done() provides a new lockspace generation number each time it | |
414 | * is called (step 9). This generation number is saved as recover_start. | |
415 | * When recover_prep() is called, it sets BLOCK_LOCKS and sets | |
416 | * recover_block = recover_start. So, while recover_block is equal to | |
417 | * recover_start, BLOCK_LOCKS should remain set. (recover_spin must | |
418 | * be held around the BLOCK_LOCKS/recover_block/recover_start logic.) | |
419 | * | |
420 | * - more specific gfs2 steps in sequence above | |
421 | * | |
422 | * 3. recover_prep sets BLOCK_LOCKS and sets recover_block = recover_start | |
423 | * 6. recover_slot records any failed jids (maybe none) | |
424 | * 9. recover_done sets recover_start = new generation number | |
425 | * 10. gfs2_control sets control_lock lvb = new gen + bits for failed jids | |
426 | * 12. gfs2_recover does journal recoveries for failed jids identified above | |
427 | * 14. gfs2_control clears control_lock lvb bits for recovered jids | |
428 | * 15. gfs2_control checks if recover_block == recover_start (step 3 occured | |
429 | * again) then do nothing, otherwise if recover_start > recover_block | |
430 | * then clear BLOCK_LOCKS. | |
431 | * | |
432 | * - parallel recovery steps across all nodes | |
433 | * | |
434 | * All nodes attempt to update the control_lock lvb with the new generation | |
435 | * number and jid bits, but only the first to get the control_lock EX will | |
436 | * do so; others will see that it's already done (lvb already contains new | |
437 | * generation number.) | |
438 | * | |
439 | * . All nodes get the same recover_prep/recover_slot/recover_done callbacks | |
440 | * . All nodes attempt to set control_lock lvb gen + bits for the new gen | |
441 | * . One node gets control_lock first and writes the lvb, others see it's done | |
442 | * . All nodes attempt to recover jids for which they see control_lock bits set | |
443 | * . One node succeeds for a jid, and that one clears the jid bit in the lvb | |
444 | * . All nodes will eventually see all lvb bits clear and unblock locks | |
445 | * | |
446 | * - is there a problem with clearing an lvb bit that should be set | |
447 | * and missing a journal recovery? | |
448 | * | |
449 | * 1. jid fails | |
450 | * 2. lvb bit set for step 1 | |
451 | * 3. jid recovered for step 1 | |
452 | * 4. jid taken again (new mount) | |
453 | * 5. jid fails (for step 4) | |
454 | * 6. lvb bit set for step 5 (will already be set) | |
455 | * 7. lvb bit cleared for step 3 | |
456 | * | |
457 | * This is not a problem because the failure in step 5 does not | |
458 | * require recovery, because the mount in step 4 could not have | |
459 | * progressed far enough to unblock locks and access the fs. The | |
460 | * control_mount() function waits for all recoveries to be complete | |
461 | * for the latest lockspace generation before ever unblocking locks | |
462 | * and returning. The mount in step 4 waits until the recovery in | |
463 | * step 1 is done. | |
464 | * | |
465 | * - special case of first mounter: first node to mount the fs | |
466 | * | |
467 | * The first node to mount a gfs2 fs needs to check all the journals | |
468 | * and recover any that need recovery before other nodes are allowed | |
469 | * to mount the fs. (Others may begin mounting, but they must wait | |
470 | * for the first mounter to be done before taking locks on the fs | |
471 | * or accessing the fs.) This has two parts: | |
472 | * | |
473 | * 1. The mounted_lock tells a node it's the first to mount the fs. | |
474 | * Each node holds the mounted_lock in PR while it's mounted. | |
475 | * Each node tries to acquire the mounted_lock in EX when it mounts. | |
476 | * If a node is granted the mounted_lock EX it means there are no | |
477 | * other mounted nodes (no PR locks exist), and it is the first mounter. | |
478 | * The mounted_lock is demoted to PR when first recovery is done, so | |
479 | * others will fail to get an EX lock, but will get a PR lock. | |
480 | * | |
481 | * 2. The control_lock blocks others in control_mount() while the first | |
482 | * mounter is doing first mount recovery of all journals. | |
483 | * A mounting node needs to acquire control_lock in EX mode before | |
484 | * it can proceed. The first mounter holds control_lock in EX while doing | |
485 | * the first mount recovery, blocking mounts from other nodes, then demotes | |
486 | * control_lock to NL when it's done (others_may_mount/first_done), | |
487 | * allowing other nodes to continue mounting. | |
488 | * | |
489 | * first mounter: | |
490 | * control_lock EX/NOQUEUE success | |
491 | * mounted_lock EX/NOQUEUE success (no other PR, so no other mounters) | |
492 | * set first=1 | |
493 | * do first mounter recovery | |
494 | * mounted_lock EX->PR | |
495 | * control_lock EX->NL, write lvb generation | |
496 | * | |
497 | * other mounter: | |
498 | * control_lock EX/NOQUEUE success (if fail -EAGAIN, retry) | |
499 | * mounted_lock EX/NOQUEUE fail -EAGAIN (expected due to other mounters PR) | |
500 | * mounted_lock PR/NOQUEUE success | |
501 | * read lvb generation | |
502 | * control_lock EX->NL | |
503 | * set first=0 | |
504 | * | |
505 | * - mount during recovery | |
506 | * | |
507 | * If a node mounts while others are doing recovery (not first mounter), | |
508 | * the mounting node will get its initial recover_done() callback without | |
509 | * having seen any previous failures/callbacks. | |
510 | * | |
511 | * It must wait for all recoveries preceding its mount to be finished | |
512 | * before it unblocks locks. It does this by repeating the "other mounter" | |
513 | * steps above until the lvb generation number is >= its mount generation | |
514 | * number (from initial recover_done) and all lvb bits are clear. | |
515 | * | |
516 | * - control_lock lvb format | |
517 | * | |
518 | * 4 bytes generation number: the latest dlm lockspace generation number | |
519 | * from recover_done callback. Indicates the jid bitmap has been updated | |
520 | * to reflect all slot failures through that generation. | |
521 | * 4 bytes unused. | |
522 | * GDLM_LVB_SIZE-8 bytes of jid bit map. If bit N is set, it indicates | |
523 | * that jid N needs recovery. | |
524 | */ | |
525 | ||
526 | #define JID_BITMAP_OFFSET 8 /* 4 byte generation number + 4 byte unused */ | |
527 | ||
528 | static void control_lvb_read(struct lm_lockstruct *ls, uint32_t *lvb_gen, | |
529 | char *lvb_bits) | |
530 | { | |
951b4bd5 | 531 | __le32 gen; |
e0c2a9aa | 532 | memcpy(lvb_bits, ls->ls_control_lvb, GDLM_LVB_SIZE); |
951b4bd5 | 533 | memcpy(&gen, lvb_bits, sizeof(__le32)); |
e0c2a9aa DT |
534 | *lvb_gen = le32_to_cpu(gen); |
535 | } | |
536 | ||
537 | static void control_lvb_write(struct lm_lockstruct *ls, uint32_t lvb_gen, | |
538 | char *lvb_bits) | |
539 | { | |
951b4bd5 | 540 | __le32 gen; |
e0c2a9aa DT |
541 | memcpy(ls->ls_control_lvb, lvb_bits, GDLM_LVB_SIZE); |
542 | gen = cpu_to_le32(lvb_gen); | |
951b4bd5 | 543 | memcpy(ls->ls_control_lvb, &gen, sizeof(__le32)); |
e0c2a9aa DT |
544 | } |
545 | ||
546 | static int all_jid_bits_clear(char *lvb) | |
547 | { | |
4146c3d4 AM |
548 | return !memchr_inv(lvb + JID_BITMAP_OFFSET, 0, |
549 | GDLM_LVB_SIZE - JID_BITMAP_OFFSET); | |
e0c2a9aa DT |
550 | } |
551 | ||
552 | static void sync_wait_cb(void *arg) | |
553 | { | |
554 | struct lm_lockstruct *ls = arg; | |
555 | complete(&ls->ls_sync_wait); | |
556 | } | |
557 | ||
558 | static int sync_unlock(struct gfs2_sbd *sdp, struct dlm_lksb *lksb, char *name) | |
f057f6cd SW |
559 | { |
560 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | |
561 | int error; | |
562 | ||
e0c2a9aa DT |
563 | error = dlm_unlock(ls->ls_dlm, lksb->sb_lkid, 0, lksb, ls); |
564 | if (error) { | |
565 | fs_err(sdp, "%s lkid %x error %d\n", | |
566 | name, lksb->sb_lkid, error); | |
567 | return error; | |
568 | } | |
569 | ||
570 | wait_for_completion(&ls->ls_sync_wait); | |
571 | ||
572 | if (lksb->sb_status != -DLM_EUNLOCK) { | |
573 | fs_err(sdp, "%s lkid %x status %d\n", | |
574 | name, lksb->sb_lkid, lksb->sb_status); | |
575 | return -1; | |
576 | } | |
577 | return 0; | |
578 | } | |
579 | ||
580 | static int sync_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags, | |
581 | unsigned int num, struct dlm_lksb *lksb, char *name) | |
582 | { | |
583 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | |
584 | char strname[GDLM_STRNAME_BYTES]; | |
585 | int error, status; | |
586 | ||
587 | memset(strname, 0, GDLM_STRNAME_BYTES); | |
588 | snprintf(strname, GDLM_STRNAME_BYTES, "%8x%16x", LM_TYPE_NONDISK, num); | |
589 | ||
590 | error = dlm_lock(ls->ls_dlm, mode, lksb, flags, | |
591 | strname, GDLM_STRNAME_BYTES - 1, | |
592 | 0, sync_wait_cb, ls, NULL); | |
593 | if (error) { | |
594 | fs_err(sdp, "%s lkid %x flags %x mode %d error %d\n", | |
595 | name, lksb->sb_lkid, flags, mode, error); | |
596 | return error; | |
597 | } | |
598 | ||
599 | wait_for_completion(&ls->ls_sync_wait); | |
600 | ||
601 | status = lksb->sb_status; | |
602 | ||
603 | if (status && status != -EAGAIN) { | |
604 | fs_err(sdp, "%s lkid %x flags %x mode %d status %d\n", | |
605 | name, lksb->sb_lkid, flags, mode, status); | |
606 | } | |
607 | ||
608 | return status; | |
609 | } | |
610 | ||
611 | static int mounted_unlock(struct gfs2_sbd *sdp) | |
612 | { | |
613 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | |
614 | return sync_unlock(sdp, &ls->ls_mounted_lksb, "mounted_lock"); | |
615 | } | |
616 | ||
617 | static int mounted_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags) | |
618 | { | |
619 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | |
620 | return sync_lock(sdp, mode, flags, GFS2_MOUNTED_LOCK, | |
621 | &ls->ls_mounted_lksb, "mounted_lock"); | |
622 | } | |
623 | ||
624 | static int control_unlock(struct gfs2_sbd *sdp) | |
625 | { | |
626 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | |
627 | return sync_unlock(sdp, &ls->ls_control_lksb, "control_lock"); | |
628 | } | |
629 | ||
630 | static int control_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags) | |
631 | { | |
632 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | |
633 | return sync_lock(sdp, mode, flags, GFS2_CONTROL_LOCK, | |
634 | &ls->ls_control_lksb, "control_lock"); | |
635 | } | |
636 | ||
601ef0d5 BP |
637 | /** |
638 | * remote_withdraw - react to a node withdrawing from the file system | |
639 | * @sdp: The superblock | |
640 | */ | |
641 | static void remote_withdraw(struct gfs2_sbd *sdp) | |
642 | { | |
643 | struct gfs2_jdesc *jd; | |
644 | int ret = 0, count = 0; | |
645 | ||
646 | list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { | |
647 | if (jd->jd_jid == sdp->sd_lockstruct.ls_jid) | |
648 | continue; | |
649 | ret = gfs2_recover_journal(jd, true); | |
650 | if (ret) | |
651 | break; | |
652 | count++; | |
653 | } | |
654 | ||
655 | /* Now drop the additional reference we acquired */ | |
656 | fs_err(sdp, "Journals checked: %d, ret = %d.\n", count, ret); | |
657 | } | |
658 | ||
e0c2a9aa DT |
659 | static void gfs2_control_func(struct work_struct *work) |
660 | { | |
661 | struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_control_work.work); | |
662 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | |
e0c2a9aa DT |
663 | uint32_t block_gen, start_gen, lvb_gen, flags; |
664 | int recover_set = 0; | |
665 | int write_lvb = 0; | |
666 | int recover_size; | |
667 | int i, error; | |
668 | ||
601ef0d5 BP |
669 | /* First check for other nodes that may have done a withdraw. */ |
670 | if (test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags)) { | |
671 | remote_withdraw(sdp); | |
672 | clear_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags); | |
673 | return; | |
674 | } | |
675 | ||
e0c2a9aa DT |
676 | spin_lock(&ls->ls_recover_spin); |
677 | /* | |
678 | * No MOUNT_DONE means we're still mounting; control_mount() | |
679 | * will set this flag, after which this thread will take over | |
680 | * all further clearing of BLOCK_LOCKS. | |
681 | * | |
682 | * FIRST_MOUNT means this node is doing first mounter recovery, | |
683 | * for which recovery control is handled by | |
684 | * control_mount()/control_first_done(), not this thread. | |
685 | */ | |
686 | if (!test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) || | |
687 | test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { | |
688 | spin_unlock(&ls->ls_recover_spin); | |
689 | return; | |
690 | } | |
691 | block_gen = ls->ls_recover_block; | |
692 | start_gen = ls->ls_recover_start; | |
693 | spin_unlock(&ls->ls_recover_spin); | |
694 | ||
695 | /* | |
696 | * Equal block_gen and start_gen implies we are between | |
697 | * recover_prep and recover_done callbacks, which means | |
698 | * dlm recovery is in progress and dlm locking is blocked. | |
699 | * There's no point trying to do any work until recover_done. | |
700 | */ | |
701 | ||
702 | if (block_gen == start_gen) | |
703 | return; | |
704 | ||
705 | /* | |
706 | * Propagate recover_submit[] and recover_result[] to lvb: | |
707 | * dlm_recoverd adds to recover_submit[] jids needing recovery | |
708 | * gfs2_recover adds to recover_result[] journal recovery results | |
709 | * | |
710 | * set lvb bit for jids in recover_submit[] if the lvb has not | |
711 | * yet been updated for the generation of the failure | |
712 | * | |
713 | * clear lvb bit for jids in recover_result[] if the result of | |
714 | * the journal recovery is SUCCESS | |
715 | */ | |
716 | ||
717 | error = control_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_VALBLK); | |
718 | if (error) { | |
719 | fs_err(sdp, "control lock EX error %d\n", error); | |
720 | return; | |
721 | } | |
722 | ||
57c7310b | 723 | control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits); |
e0c2a9aa DT |
724 | |
725 | spin_lock(&ls->ls_recover_spin); | |
726 | if (block_gen != ls->ls_recover_block || | |
727 | start_gen != ls->ls_recover_start) { | |
728 | fs_info(sdp, "recover generation %u block1 %u %u\n", | |
729 | start_gen, block_gen, ls->ls_recover_block); | |
730 | spin_unlock(&ls->ls_recover_spin); | |
731 | control_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT); | |
732 | return; | |
733 | } | |
734 | ||
735 | recover_size = ls->ls_recover_size; | |
736 | ||
737 | if (lvb_gen <= start_gen) { | |
738 | /* | |
739 | * Clear lvb bits for jids we've successfully recovered. | |
740 | * Because all nodes attempt to recover failed journals, | |
741 | * a journal can be recovered multiple times successfully | |
742 | * in succession. Only the first will really do recovery, | |
743 | * the others find it clean, but still report a successful | |
744 | * recovery. So, another node may have already recovered | |
745 | * the jid and cleared the lvb bit for it. | |
746 | */ | |
747 | for (i = 0; i < recover_size; i++) { | |
748 | if (ls->ls_recover_result[i] != LM_RD_SUCCESS) | |
749 | continue; | |
750 | ||
751 | ls->ls_recover_result[i] = 0; | |
752 | ||
57c7310b | 753 | if (!test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) |
e0c2a9aa DT |
754 | continue; |
755 | ||
57c7310b | 756 | __clear_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET); |
e0c2a9aa DT |
757 | write_lvb = 1; |
758 | } | |
759 | } | |
760 | ||
761 | if (lvb_gen == start_gen) { | |
762 | /* | |
763 | * Failed slots before start_gen are already set in lvb. | |
764 | */ | |
765 | for (i = 0; i < recover_size; i++) { | |
766 | if (!ls->ls_recover_submit[i]) | |
767 | continue; | |
768 | if (ls->ls_recover_submit[i] < lvb_gen) | |
769 | ls->ls_recover_submit[i] = 0; | |
770 | } | |
771 | } else if (lvb_gen < start_gen) { | |
772 | /* | |
773 | * Failed slots before start_gen are not yet set in lvb. | |
774 | */ | |
775 | for (i = 0; i < recover_size; i++) { | |
776 | if (!ls->ls_recover_submit[i]) | |
777 | continue; | |
778 | if (ls->ls_recover_submit[i] < start_gen) { | |
779 | ls->ls_recover_submit[i] = 0; | |
57c7310b | 780 | __set_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET); |
e0c2a9aa DT |
781 | } |
782 | } | |
783 | /* even if there are no bits to set, we need to write the | |
784 | latest generation to the lvb */ | |
785 | write_lvb = 1; | |
786 | } else { | |
787 | /* | |
788 | * we should be getting a recover_done() for lvb_gen soon | |
789 | */ | |
790 | } | |
791 | spin_unlock(&ls->ls_recover_spin); | |
792 | ||
793 | if (write_lvb) { | |
57c7310b | 794 | control_lvb_write(ls, start_gen, ls->ls_lvb_bits); |
e0c2a9aa DT |
795 | flags = DLM_LKF_CONVERT | DLM_LKF_VALBLK; |
796 | } else { | |
797 | flags = DLM_LKF_CONVERT; | |
798 | } | |
799 | ||
800 | error = control_lock(sdp, DLM_LOCK_NL, flags); | |
801 | if (error) { | |
802 | fs_err(sdp, "control lock NL error %d\n", error); | |
803 | return; | |
804 | } | |
805 | ||
806 | /* | |
807 | * Everyone will see jid bits set in the lvb, run gfs2_recover_set(), | |
808 | * and clear a jid bit in the lvb if the recovery is a success. | |
809 | * Eventually all journals will be recovered, all jid bits will | |
810 | * be cleared in the lvb, and everyone will clear BLOCK_LOCKS. | |
811 | */ | |
812 | ||
813 | for (i = 0; i < recover_size; i++) { | |
57c7310b | 814 | if (test_bit_le(i, ls->ls_lvb_bits + JID_BITMAP_OFFSET)) { |
e0c2a9aa DT |
815 | fs_info(sdp, "recover generation %u jid %d\n", |
816 | start_gen, i); | |
817 | gfs2_recover_set(sdp, i); | |
818 | recover_set++; | |
819 | } | |
820 | } | |
821 | if (recover_set) | |
822 | return; | |
823 | ||
824 | /* | |
825 | * No more jid bits set in lvb, all recovery is done, unblock locks | |
826 | * (unless a new recover_prep callback has occured blocking locks | |
827 | * again while working above) | |
828 | */ | |
829 | ||
830 | spin_lock(&ls->ls_recover_spin); | |
831 | if (ls->ls_recover_block == block_gen && | |
832 | ls->ls_recover_start == start_gen) { | |
833 | clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); | |
834 | spin_unlock(&ls->ls_recover_spin); | |
835 | fs_info(sdp, "recover generation %u done\n", start_gen); | |
836 | gfs2_glock_thaw(sdp); | |
837 | } else { | |
838 | fs_info(sdp, "recover generation %u block2 %u %u\n", | |
839 | start_gen, block_gen, ls->ls_recover_block); | |
840 | spin_unlock(&ls->ls_recover_spin); | |
841 | } | |
842 | } | |
843 | ||
844 | static int control_mount(struct gfs2_sbd *sdp) | |
845 | { | |
846 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | |
e0c2a9aa DT |
847 | uint32_t start_gen, block_gen, mount_gen, lvb_gen; |
848 | int mounted_mode; | |
849 | int retries = 0; | |
850 | int error; | |
851 | ||
852 | memset(&ls->ls_mounted_lksb, 0, sizeof(struct dlm_lksb)); | |
853 | memset(&ls->ls_control_lksb, 0, sizeof(struct dlm_lksb)); | |
854 | memset(&ls->ls_control_lvb, 0, GDLM_LVB_SIZE); | |
855 | ls->ls_control_lksb.sb_lvbptr = ls->ls_control_lvb; | |
856 | init_completion(&ls->ls_sync_wait); | |
857 | ||
858 | set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); | |
859 | ||
860 | error = control_lock(sdp, DLM_LOCK_NL, DLM_LKF_VALBLK); | |
861 | if (error) { | |
862 | fs_err(sdp, "control_mount control_lock NL error %d\n", error); | |
863 | return error; | |
864 | } | |
865 | ||
866 | error = mounted_lock(sdp, DLM_LOCK_NL, 0); | |
867 | if (error) { | |
868 | fs_err(sdp, "control_mount mounted_lock NL error %d\n", error); | |
869 | control_unlock(sdp); | |
870 | return error; | |
871 | } | |
872 | mounted_mode = DLM_LOCK_NL; | |
873 | ||
874 | restart: | |
875 | if (retries++ && signal_pending(current)) { | |
876 | error = -EINTR; | |
877 | goto fail; | |
878 | } | |
879 | ||
880 | /* | |
881 | * We always start with both locks in NL. control_lock is | |
882 | * demoted to NL below so we don't need to do it here. | |
883 | */ | |
884 | ||
885 | if (mounted_mode != DLM_LOCK_NL) { | |
886 | error = mounted_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT); | |
887 | if (error) | |
888 | goto fail; | |
889 | mounted_mode = DLM_LOCK_NL; | |
890 | } | |
891 | ||
892 | /* | |
893 | * Other nodes need to do some work in dlm recovery and gfs2_control | |
894 | * before the recover_done and control_lock will be ready for us below. | |
895 | * A delay here is not required but often avoids having to retry. | |
896 | */ | |
897 | ||
898 | msleep_interruptible(500); | |
899 | ||
900 | /* | |
901 | * Acquire control_lock in EX and mounted_lock in either EX or PR. | |
902 | * control_lock lvb keeps track of any pending journal recoveries. | |
903 | * mounted_lock indicates if any other nodes have the fs mounted. | |
904 | */ | |
905 | ||
906 | error = control_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_NOQUEUE|DLM_LKF_VALBLK); | |
907 | if (error == -EAGAIN) { | |
908 | goto restart; | |
909 | } else if (error) { | |
910 | fs_err(sdp, "control_mount control_lock EX error %d\n", error); | |
911 | goto fail; | |
912 | } | |
913 | ||
4a772772 BP |
914 | /** |
915 | * If we're a spectator, we don't want to take the lock in EX because | |
916 | * we cannot do the first-mount responsibility it implies: recovery. | |
917 | */ | |
918 | if (sdp->sd_args.ar_spectator) | |
919 | goto locks_done; | |
920 | ||
e0c2a9aa DT |
921 | error = mounted_lock(sdp, DLM_LOCK_EX, DLM_LKF_CONVERT|DLM_LKF_NOQUEUE); |
922 | if (!error) { | |
923 | mounted_mode = DLM_LOCK_EX; | |
924 | goto locks_done; | |
925 | } else if (error != -EAGAIN) { | |
926 | fs_err(sdp, "control_mount mounted_lock EX error %d\n", error); | |
927 | goto fail; | |
928 | } | |
929 | ||
930 | error = mounted_lock(sdp, DLM_LOCK_PR, DLM_LKF_CONVERT|DLM_LKF_NOQUEUE); | |
931 | if (!error) { | |
932 | mounted_mode = DLM_LOCK_PR; | |
933 | goto locks_done; | |
934 | } else { | |
935 | /* not even -EAGAIN should happen here */ | |
936 | fs_err(sdp, "control_mount mounted_lock PR error %d\n", error); | |
937 | goto fail; | |
938 | } | |
939 | ||
940 | locks_done: | |
941 | /* | |
942 | * If we got both locks above in EX, then we're the first mounter. | |
943 | * If not, then we need to wait for the control_lock lvb to be | |
944 | * updated by other mounted nodes to reflect our mount generation. | |
945 | * | |
946 | * In simple first mounter cases, first mounter will see zero lvb_gen, | |
947 | * but in cases where all existing nodes leave/fail before mounting | |
948 | * nodes finish control_mount, then all nodes will be mounting and | |
949 | * lvb_gen will be non-zero. | |
950 | */ | |
951 | ||
57c7310b | 952 | control_lvb_read(ls, &lvb_gen, ls->ls_lvb_bits); |
e0c2a9aa DT |
953 | |
954 | if (lvb_gen == 0xFFFFFFFF) { | |
955 | /* special value to force mount attempts to fail */ | |
956 | fs_err(sdp, "control_mount control_lock disabled\n"); | |
957 | error = -EINVAL; | |
958 | goto fail; | |
959 | } | |
960 | ||
961 | if (mounted_mode == DLM_LOCK_EX) { | |
962 | /* first mounter, keep both EX while doing first recovery */ | |
963 | spin_lock(&ls->ls_recover_spin); | |
964 | clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); | |
965 | set_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags); | |
966 | set_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags); | |
967 | spin_unlock(&ls->ls_recover_spin); | |
968 | fs_info(sdp, "first mounter control generation %u\n", lvb_gen); | |
969 | return 0; | |
970 | } | |
971 | ||
972 | error = control_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT); | |
973 | if (error) | |
974 | goto fail; | |
975 | ||
976 | /* | |
977 | * We are not first mounter, now we need to wait for the control_lock | |
978 | * lvb generation to be >= the generation from our first recover_done | |
979 | * and all lvb bits to be clear (no pending journal recoveries.) | |
980 | */ | |
981 | ||
57c7310b | 982 | if (!all_jid_bits_clear(ls->ls_lvb_bits)) { |
e0c2a9aa DT |
983 | /* journals need recovery, wait until all are clear */ |
984 | fs_info(sdp, "control_mount wait for journal recovery\n"); | |
985 | goto restart; | |
986 | } | |
987 | ||
988 | spin_lock(&ls->ls_recover_spin); | |
989 | block_gen = ls->ls_recover_block; | |
990 | start_gen = ls->ls_recover_start; | |
991 | mount_gen = ls->ls_recover_mount; | |
992 | ||
993 | if (lvb_gen < mount_gen) { | |
994 | /* wait for mounted nodes to update control_lock lvb to our | |
995 | generation, which might include new recovery bits set */ | |
4a772772 BP |
996 | if (sdp->sd_args.ar_spectator) { |
997 | fs_info(sdp, "Recovery is required. Waiting for a " | |
998 | "non-spectator to mount.\n"); | |
999 | msleep_interruptible(1000); | |
1000 | } else { | |
1001 | fs_info(sdp, "control_mount wait1 block %u start %u " | |
1002 | "mount %u lvb %u flags %lx\n", block_gen, | |
1003 | start_gen, mount_gen, lvb_gen, | |
1004 | ls->ls_recover_flags); | |
1005 | } | |
e0c2a9aa DT |
1006 | spin_unlock(&ls->ls_recover_spin); |
1007 | goto restart; | |
1008 | } | |
1009 | ||
1010 | if (lvb_gen != start_gen) { | |
1011 | /* wait for mounted nodes to update control_lock lvb to the | |
1012 | latest recovery generation */ | |
1013 | fs_info(sdp, "control_mount wait2 block %u start %u mount %u " | |
1014 | "lvb %u flags %lx\n", block_gen, start_gen, mount_gen, | |
1015 | lvb_gen, ls->ls_recover_flags); | |
1016 | spin_unlock(&ls->ls_recover_spin); | |
1017 | goto restart; | |
1018 | } | |
1019 | ||
1020 | if (block_gen == start_gen) { | |
1021 | /* dlm recovery in progress, wait for it to finish */ | |
1022 | fs_info(sdp, "control_mount wait3 block %u start %u mount %u " | |
1023 | "lvb %u flags %lx\n", block_gen, start_gen, mount_gen, | |
1024 | lvb_gen, ls->ls_recover_flags); | |
1025 | spin_unlock(&ls->ls_recover_spin); | |
1026 | goto restart; | |
f057f6cd SW |
1027 | } |
1028 | ||
e0c2a9aa DT |
1029 | clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); |
1030 | set_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags); | |
1031 | memset(ls->ls_recover_submit, 0, ls->ls_recover_size*sizeof(uint32_t)); | |
1032 | memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t)); | |
1033 | spin_unlock(&ls->ls_recover_spin); | |
1034 | return 0; | |
1035 | ||
1036 | fail: | |
1037 | mounted_unlock(sdp); | |
1038 | control_unlock(sdp); | |
1039 | return error; | |
1040 | } | |
1041 | ||
e0c2a9aa DT |
1042 | static int control_first_done(struct gfs2_sbd *sdp) |
1043 | { | |
1044 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | |
e0c2a9aa DT |
1045 | uint32_t start_gen, block_gen; |
1046 | int error; | |
1047 | ||
1048 | restart: | |
1049 | spin_lock(&ls->ls_recover_spin); | |
1050 | start_gen = ls->ls_recover_start; | |
1051 | block_gen = ls->ls_recover_block; | |
1052 | ||
1053 | if (test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags) || | |
1054 | !test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) || | |
1055 | !test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { | |
1056 | /* sanity check, should not happen */ | |
1057 | fs_err(sdp, "control_first_done start %u block %u flags %lx\n", | |
1058 | start_gen, block_gen, ls->ls_recover_flags); | |
1059 | spin_unlock(&ls->ls_recover_spin); | |
1060 | control_unlock(sdp); | |
1061 | return -1; | |
1062 | } | |
1063 | ||
1064 | if (start_gen == block_gen) { | |
1065 | /* | |
1066 | * Wait for the end of a dlm recovery cycle to switch from | |
1067 | * first mounter recovery. We can ignore any recover_slot | |
1068 | * callbacks between the recover_prep and next recover_done | |
1069 | * because we are still the first mounter and any failed nodes | |
1070 | * have not fully mounted, so they don't need recovery. | |
1071 | */ | |
1072 | spin_unlock(&ls->ls_recover_spin); | |
1073 | fs_info(sdp, "control_first_done wait gen %u\n", start_gen); | |
1074 | ||
1075 | wait_on_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY, | |
74316201 | 1076 | TASK_UNINTERRUPTIBLE); |
e0c2a9aa DT |
1077 | goto restart; |
1078 | } | |
1079 | ||
1080 | clear_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags); | |
1081 | set_bit(DFL_FIRST_MOUNT_DONE, &ls->ls_recover_flags); | |
1082 | memset(ls->ls_recover_submit, 0, ls->ls_recover_size*sizeof(uint32_t)); | |
1083 | memset(ls->ls_recover_result, 0, ls->ls_recover_size*sizeof(uint32_t)); | |
1084 | spin_unlock(&ls->ls_recover_spin); | |
1085 | ||
57c7310b DT |
1086 | memset(ls->ls_lvb_bits, 0, GDLM_LVB_SIZE); |
1087 | control_lvb_write(ls, start_gen, ls->ls_lvb_bits); | |
e0c2a9aa DT |
1088 | |
1089 | error = mounted_lock(sdp, DLM_LOCK_PR, DLM_LKF_CONVERT); | |
1090 | if (error) | |
1091 | fs_err(sdp, "control_first_done mounted PR error %d\n", error); | |
1092 | ||
1093 | error = control_lock(sdp, DLM_LOCK_NL, DLM_LKF_CONVERT|DLM_LKF_VALBLK); | |
f057f6cd | 1094 | if (error) |
e0c2a9aa | 1095 | fs_err(sdp, "control_first_done control NL error %d\n", error); |
f057f6cd SW |
1096 | |
1097 | return error; | |
1098 | } | |
1099 | ||
e0c2a9aa DT |
1100 | /* |
1101 | * Expand static jid arrays if necessary (by increments of RECOVER_SIZE_INC) | |
fe39dc98 | 1102 | * to accommodate the largest slot number. (NB dlm slot numbers start at 1, |
e0c2a9aa DT |
1103 | * gfs2 jids start at 0, so jid = slot - 1) |
1104 | */ | |
1105 | ||
1106 | #define RECOVER_SIZE_INC 16 | |
1107 | ||
1108 | static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots, | |
1109 | int num_slots) | |
1110 | { | |
1111 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | |
1112 | uint32_t *submit = NULL; | |
1113 | uint32_t *result = NULL; | |
1114 | uint32_t old_size, new_size; | |
1115 | int i, max_jid; | |
1116 | ||
57c7310b DT |
1117 | if (!ls->ls_lvb_bits) { |
1118 | ls->ls_lvb_bits = kzalloc(GDLM_LVB_SIZE, GFP_NOFS); | |
1119 | if (!ls->ls_lvb_bits) | |
1120 | return -ENOMEM; | |
1121 | } | |
1122 | ||
e0c2a9aa DT |
1123 | max_jid = 0; |
1124 | for (i = 0; i < num_slots; i++) { | |
1125 | if (max_jid < slots[i].slot - 1) | |
1126 | max_jid = slots[i].slot - 1; | |
1127 | } | |
1128 | ||
1129 | old_size = ls->ls_recover_size; | |
8f0daef5 AG |
1130 | new_size = old_size; |
1131 | while (new_size < max_jid + 1) | |
1132 | new_size += RECOVER_SIZE_INC; | |
1133 | if (new_size == old_size) | |
e0c2a9aa DT |
1134 | return 0; |
1135 | ||
6ec43b18 FF |
1136 | submit = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS); |
1137 | result = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS); | |
e0c2a9aa DT |
1138 | if (!submit || !result) { |
1139 | kfree(submit); | |
1140 | kfree(result); | |
1141 | return -ENOMEM; | |
1142 | } | |
1143 | ||
1144 | spin_lock(&ls->ls_recover_spin); | |
1145 | memcpy(submit, ls->ls_recover_submit, old_size * sizeof(uint32_t)); | |
1146 | memcpy(result, ls->ls_recover_result, old_size * sizeof(uint32_t)); | |
1147 | kfree(ls->ls_recover_submit); | |
1148 | kfree(ls->ls_recover_result); | |
1149 | ls->ls_recover_submit = submit; | |
1150 | ls->ls_recover_result = result; | |
1151 | ls->ls_recover_size = new_size; | |
1152 | spin_unlock(&ls->ls_recover_spin); | |
1153 | return 0; | |
1154 | } | |
1155 | ||
1156 | static void free_recover_size(struct lm_lockstruct *ls) | |
1157 | { | |
57c7310b | 1158 | kfree(ls->ls_lvb_bits); |
e0c2a9aa DT |
1159 | kfree(ls->ls_recover_submit); |
1160 | kfree(ls->ls_recover_result); | |
1161 | ls->ls_recover_submit = NULL; | |
1162 | ls->ls_recover_result = NULL; | |
1163 | ls->ls_recover_size = 0; | |
cc1dfa8b | 1164 | ls->ls_lvb_bits = NULL; |
e0c2a9aa DT |
1165 | } |
1166 | ||
1167 | /* dlm calls before it does lock recovery */ | |
1168 | ||
1169 | static void gdlm_recover_prep(void *arg) | |
1170 | { | |
1171 | struct gfs2_sbd *sdp = arg; | |
1172 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | |
1173 | ||
4d927b03 | 1174 | if (gfs2_withdrawing_or_withdrawn(sdp)) { |
03678a99 BP |
1175 | fs_err(sdp, "recover_prep ignored due to withdraw.\n"); |
1176 | return; | |
1177 | } | |
e0c2a9aa DT |
1178 | spin_lock(&ls->ls_recover_spin); |
1179 | ls->ls_recover_block = ls->ls_recover_start; | |
1180 | set_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags); | |
1181 | ||
1182 | if (!test_bit(DFL_MOUNT_DONE, &ls->ls_recover_flags) || | |
1183 | test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { | |
1184 | spin_unlock(&ls->ls_recover_spin); | |
1185 | return; | |
1186 | } | |
1187 | set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); | |
1188 | spin_unlock(&ls->ls_recover_spin); | |
1189 | } | |
1190 | ||
1191 | /* dlm calls after recover_prep has been completed on all lockspace members; | |
1192 | identifies slot/jid of failed member */ | |
1193 | ||
1194 | static void gdlm_recover_slot(void *arg, struct dlm_slot *slot) | |
1195 | { | |
1196 | struct gfs2_sbd *sdp = arg; | |
1197 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | |
1198 | int jid = slot->slot - 1; | |
1199 | ||
4d927b03 | 1200 | if (gfs2_withdrawing_or_withdrawn(sdp)) { |
03678a99 BP |
1201 | fs_err(sdp, "recover_slot jid %d ignored due to withdraw.\n", |
1202 | jid); | |
1203 | return; | |
1204 | } | |
e0c2a9aa DT |
1205 | spin_lock(&ls->ls_recover_spin); |
1206 | if (ls->ls_recover_size < jid + 1) { | |
af38816e | 1207 | fs_err(sdp, "recover_slot jid %d gen %u short size %d\n", |
e0c2a9aa DT |
1208 | jid, ls->ls_recover_block, ls->ls_recover_size); |
1209 | spin_unlock(&ls->ls_recover_spin); | |
1210 | return; | |
1211 | } | |
1212 | ||
1213 | if (ls->ls_recover_submit[jid]) { | |
ad781971 | 1214 | fs_info(sdp, "recover_slot jid %d gen %u prev %u\n", |
e0c2a9aa DT |
1215 | jid, ls->ls_recover_block, ls->ls_recover_submit[jid]); |
1216 | } | |
1217 | ls->ls_recover_submit[jid] = ls->ls_recover_block; | |
1218 | spin_unlock(&ls->ls_recover_spin); | |
1219 | } | |
1220 | ||
1221 | /* dlm calls after recover_slot and after it completes lock recovery */ | |
1222 | ||
1223 | static void gdlm_recover_done(void *arg, struct dlm_slot *slots, int num_slots, | |
1224 | int our_slot, uint32_t generation) | |
1225 | { | |
1226 | struct gfs2_sbd *sdp = arg; | |
1227 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | |
1228 | ||
4d927b03 | 1229 | if (gfs2_withdrawing_or_withdrawn(sdp)) { |
03678a99 BP |
1230 | fs_err(sdp, "recover_done ignored due to withdraw.\n"); |
1231 | return; | |
1232 | } | |
e0c2a9aa DT |
1233 | /* ensure the ls jid arrays are large enough */ |
1234 | set_recover_size(sdp, slots, num_slots); | |
1235 | ||
1236 | spin_lock(&ls->ls_recover_spin); | |
1237 | ls->ls_recover_start = generation; | |
1238 | ||
1239 | if (!ls->ls_recover_mount) { | |
1240 | ls->ls_recover_mount = generation; | |
1241 | ls->ls_jid = our_slot - 1; | |
1242 | } | |
1243 | ||
1244 | if (!test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) | |
1245 | queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0); | |
1246 | ||
1247 | clear_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags); | |
4e857c58 | 1248 | smp_mb__after_atomic(); |
e0c2a9aa DT |
1249 | wake_up_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY); |
1250 | spin_unlock(&ls->ls_recover_spin); | |
1251 | } | |
1252 | ||
1253 | /* gfs2_recover thread has a journal recovery result */ | |
1254 | ||
1255 | static void gdlm_recovery_result(struct gfs2_sbd *sdp, unsigned int jid, | |
1256 | unsigned int result) | |
1257 | { | |
1258 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | |
1259 | ||
4d927b03 | 1260 | if (gfs2_withdrawing_or_withdrawn(sdp)) { |
03678a99 BP |
1261 | fs_err(sdp, "recovery_result jid %d ignored due to withdraw.\n", |
1262 | jid); | |
1263 | return; | |
1264 | } | |
e0c2a9aa DT |
1265 | if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags)) |
1266 | return; | |
1267 | ||
1268 | /* don't care about the recovery of own journal during mount */ | |
1269 | if (jid == ls->ls_jid) | |
1270 | return; | |
1271 | ||
1272 | spin_lock(&ls->ls_recover_spin); | |
1273 | if (test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags)) { | |
1274 | spin_unlock(&ls->ls_recover_spin); | |
1275 | return; | |
1276 | } | |
1277 | if (ls->ls_recover_size < jid + 1) { | |
af38816e | 1278 | fs_err(sdp, "recovery_result jid %d short size %d\n", |
e0c2a9aa DT |
1279 | jid, ls->ls_recover_size); |
1280 | spin_unlock(&ls->ls_recover_spin); | |
1281 | return; | |
1282 | } | |
1283 | ||
1284 | fs_info(sdp, "recover jid %d result %s\n", jid, | |
1285 | result == LM_RD_GAVEUP ? "busy" : "success"); | |
1286 | ||
1287 | ls->ls_recover_result[jid] = result; | |
1288 | ||
1289 | /* GAVEUP means another node is recovering the journal; delay our | |
1290 | next attempt to recover it, to give the other node a chance to | |
1291 | finish before trying again */ | |
1292 | ||
1293 | if (!test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) | |
1294 | queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, | |
1295 | result == LM_RD_GAVEUP ? HZ : 0); | |
1296 | spin_unlock(&ls->ls_recover_spin); | |
1297 | } | |
1298 | ||
27c3b415 | 1299 | static const struct dlm_lockspace_ops gdlm_lockspace_ops = { |
e0c2a9aa DT |
1300 | .recover_prep = gdlm_recover_prep, |
1301 | .recover_slot = gdlm_recover_slot, | |
1302 | .recover_done = gdlm_recover_done, | |
1303 | }; | |
1304 | ||
1305 | static int gdlm_mount(struct gfs2_sbd *sdp, const char *table) | |
1306 | { | |
1307 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | |
1308 | char cluster[GFS2_LOCKNAME_LEN]; | |
1309 | const char *fsname; | |
1310 | uint32_t flags; | |
1311 | int error, ops_result; | |
1312 | ||
1313 | /* | |
1314 | * initialize everything | |
1315 | */ | |
1316 | ||
1317 | INIT_DELAYED_WORK(&sdp->sd_control_work, gfs2_control_func); | |
1318 | spin_lock_init(&ls->ls_recover_spin); | |
1319 | ls->ls_recover_flags = 0; | |
1320 | ls->ls_recover_mount = 0; | |
1321 | ls->ls_recover_start = 0; | |
1322 | ls->ls_recover_block = 0; | |
1323 | ls->ls_recover_size = 0; | |
1324 | ls->ls_recover_submit = NULL; | |
1325 | ls->ls_recover_result = NULL; | |
57c7310b | 1326 | ls->ls_lvb_bits = NULL; |
e0c2a9aa DT |
1327 | |
1328 | error = set_recover_size(sdp, NULL, 0); | |
1329 | if (error) | |
1330 | goto fail; | |
1331 | ||
1332 | /* | |
1333 | * prepare dlm_new_lockspace args | |
1334 | */ | |
1335 | ||
1336 | fsname = strchr(table, ':'); | |
1337 | if (!fsname) { | |
1338 | fs_info(sdp, "no fsname found\n"); | |
1339 | error = -EINVAL; | |
1340 | goto fail_free; | |
1341 | } | |
1342 | memset(cluster, 0, sizeof(cluster)); | |
1343 | memcpy(cluster, table, strlen(table) - strlen(fsname)); | |
1344 | fsname++; | |
1345 | ||
12cda13c | 1346 | flags = DLM_LSFL_NEWEXCL; |
e0c2a9aa DT |
1347 | |
1348 | /* | |
1349 | * create/join lockspace | |
1350 | */ | |
1351 | ||
1352 | error = dlm_new_lockspace(fsname, cluster, flags, GDLM_LVB_SIZE, | |
1353 | &gdlm_lockspace_ops, sdp, &ops_result, | |
1354 | &ls->ls_dlm); | |
1355 | if (error) { | |
1356 | fs_err(sdp, "dlm_new_lockspace error %d\n", error); | |
1357 | goto fail_free; | |
1358 | } | |
1359 | ||
1360 | if (ops_result < 0) { | |
1361 | /* | |
1362 | * dlm does not support ops callbacks, | |
1363 | * old dlm_controld/gfs_controld are used, try without ops. | |
1364 | */ | |
1365 | fs_info(sdp, "dlm lockspace ops not used\n"); | |
1366 | free_recover_size(ls); | |
1367 | set_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags); | |
1368 | return 0; | |
1369 | } | |
1370 | ||
1371 | if (!test_bit(SDF_NOJOURNALID, &sdp->sd_flags)) { | |
1372 | fs_err(sdp, "dlm lockspace ops disallow jid preset\n"); | |
1373 | error = -EINVAL; | |
1374 | goto fail_release; | |
1375 | } | |
1376 | ||
1377 | /* | |
1378 | * control_mount() uses control_lock to determine first mounter, | |
1379 | * and for later mounts, waits for any recoveries to be cleared. | |
1380 | */ | |
1381 | ||
1382 | error = control_mount(sdp); | |
1383 | if (error) { | |
1384 | fs_err(sdp, "mount control error %d\n", error); | |
1385 | goto fail_release; | |
1386 | } | |
1387 | ||
1388 | ls->ls_first = !!test_bit(DFL_FIRST_MOUNT, &ls->ls_recover_flags); | |
1389 | clear_bit(SDF_NOJOURNALID, &sdp->sd_flags); | |
4e857c58 | 1390 | smp_mb__after_atomic(); |
e0c2a9aa DT |
1391 | wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID); |
1392 | return 0; | |
1393 | ||
1394 | fail_release: | |
1395 | dlm_release_lockspace(ls->ls_dlm, 2); | |
1396 | fail_free: | |
1397 | free_recover_size(ls); | |
1398 | fail: | |
1399 | return error; | |
1400 | } | |
1401 | ||
1402 | static void gdlm_first_done(struct gfs2_sbd *sdp) | |
1403 | { | |
1404 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | |
1405 | int error; | |
1406 | ||
1407 | if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags)) | |
1408 | return; | |
1409 | ||
1410 | error = control_first_done(sdp); | |
1411 | if (error) | |
1412 | fs_err(sdp, "mount first_done error %d\n", error); | |
1413 | } | |
1414 | ||
f057f6cd SW |
1415 | static void gdlm_unmount(struct gfs2_sbd *sdp) |
1416 | { | |
1417 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | |
1418 | ||
e0c2a9aa DT |
1419 | if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags)) |
1420 | goto release; | |
1421 | ||
1422 | /* wait for gfs2_control_wq to be done with this mount */ | |
1423 | ||
1424 | spin_lock(&ls->ls_recover_spin); | |
1425 | set_bit(DFL_UNMOUNT, &ls->ls_recover_flags); | |
1426 | spin_unlock(&ls->ls_recover_spin); | |
43829731 | 1427 | flush_delayed_work(&sdp->sd_control_work); |
e0c2a9aa DT |
1428 | |
1429 | /* mounted_lock and control_lock will be purged in dlm recovery */ | |
1430 | release: | |
f057f6cd SW |
1431 | if (ls->ls_dlm) { |
1432 | dlm_release_lockspace(ls->ls_dlm, 2); | |
1433 | ls->ls_dlm = NULL; | |
1434 | } | |
e0c2a9aa DT |
1435 | |
1436 | free_recover_size(ls); | |
f057f6cd SW |
1437 | } |
1438 | ||
1439 | static const match_table_t dlm_tokens = { | |
1440 | { Opt_jid, "jid=%d"}, | |
1441 | { Opt_id, "id=%d"}, | |
1442 | { Opt_first, "first=%d"}, | |
1443 | { Opt_nodir, "nodir=%d"}, | |
1444 | { Opt_err, NULL }, | |
1445 | }; | |
1446 | ||
1447 | const struct lm_lockops gfs2_dlm_ops = { | |
1448 | .lm_proto_name = "lock_dlm", | |
1449 | .lm_mount = gdlm_mount, | |
e0c2a9aa DT |
1450 | .lm_first_done = gdlm_first_done, |
1451 | .lm_recovery_result = gdlm_recovery_result, | |
f057f6cd SW |
1452 | .lm_unmount = gdlm_unmount, |
1453 | .lm_put_lock = gdlm_put_lock, | |
1454 | .lm_lock = gdlm_lock, | |
1455 | .lm_cancel = gdlm_cancel, | |
1456 | .lm_tokens = &dlm_tokens, | |
1457 | }; | |
1458 |