]>
Commit | Line | Data |
---|---|---|
e7fd4179 DT |
1 | /****************************************************************************** |
2 | ******************************************************************************* | |
3 | ** | |
4 | ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. | |
5 | ** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. | |
6 | ** | |
7 | ** This copyrighted material is made available to anyone wishing to use, | |
8 | ** modify, copy, or redistribute it subject to the terms and conditions | |
9 | ** of the GNU General Public License v.2. | |
10 | ** | |
11 | ******************************************************************************* | |
12 | ******************************************************************************/ | |
13 | ||
14 | #include "dlm_internal.h" | |
15 | #include "lockspace.h" | |
16 | #include "dir.h" | |
17 | #include "config.h" | |
18 | #include "ast.h" | |
19 | #include "memory.h" | |
20 | #include "rcom.h" | |
21 | #include "lock.h" | |
22 | #include "lowcomms.h" | |
23 | #include "member.h" | |
24 | #include "recover.h" | |
25 | ||
26 | ||
27 | /* | |
28 | * Recovery waiting routines: these functions wait for a particular reply from | |
29 | * a remote node, or for the remote node to report a certain status. They need | |
30 | * to abort if the lockspace is stopped indicating a node has failed (perhaps | |
31 | * the one being waited for). | |
32 | */ | |
33 | ||
34 | /* | |
35 | * Wait until given function returns non-zero or lockspace is stopped | |
36 | * (LS_RECOVERY_STOP set due to failure of a node in ls_nodes). When another | |
37 | * function thinks it could have completed the waited-on task, they should wake | |
38 | * up ls_wait_general to get an immediate response rather than waiting for the | |
6d768177 DT |
39 | * timeout. This uses a timeout so it can check periodically if the wait |
40 | * should abort due to node failure (which doesn't cause a wake_up). | |
41 | * This should only be called by the dlm_recoverd thread. | |
e7fd4179 DT |
42 | */ |
43 | ||
e7fd4179 DT |
44 | int dlm_wait_function(struct dlm_ls *ls, int (*testfn) (struct dlm_ls *ls)) |
45 | { | |
46 | int error = 0; | |
6d768177 | 47 | int rv; |
e7fd4179 | 48 | |
6d768177 DT |
49 | while (1) { |
50 | rv = wait_event_timeout(ls->ls_wait_general, | |
51 | testfn(ls) || dlm_recovery_stopped(ls), | |
52 | dlm_config.ci_recover_timer * HZ); | |
53 | if (rv) | |
54 | break; | |
55 | } | |
e7fd4179 DT |
56 | |
57 | if (dlm_recovery_stopped(ls)) { | |
58 | log_debug(ls, "dlm_wait_function aborted"); | |
59 | error = -EINTR; | |
60 | } | |
61 | return error; | |
62 | } | |
63 | ||
64 | /* | |
65 | * An efficient way for all nodes to wait for all others to have a certain | |
66 | * status. The node with the lowest nodeid polls all the others for their | |
67 | * status (wait_status_all) and all the others poll the node with the low id | |
68 | * for its accumulated result (wait_status_low). When all nodes have set | |
69 | * status flag X, then status flag X_ALL will be set on the low nodeid. | |
70 | */ | |
71 | ||
72 | uint32_t dlm_recover_status(struct dlm_ls *ls) | |
73 | { | |
74 | uint32_t status; | |
75 | spin_lock(&ls->ls_recover_lock); | |
76 | status = ls->ls_recover_status; | |
77 | spin_unlock(&ls->ls_recover_lock); | |
78 | return status; | |
79 | } | |
80 | ||
757a4271 DT |
81 | static void _set_recover_status(struct dlm_ls *ls, uint32_t status) |
82 | { | |
83 | ls->ls_recover_status |= status; | |
84 | } | |
85 | ||
e7fd4179 DT |
86 | void dlm_set_recover_status(struct dlm_ls *ls, uint32_t status) |
87 | { | |
88 | spin_lock(&ls->ls_recover_lock); | |
757a4271 | 89 | _set_recover_status(ls, status); |
e7fd4179 DT |
90 | spin_unlock(&ls->ls_recover_lock); |
91 | } | |
92 | ||
757a4271 DT |
93 | static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status, |
94 | int save_slots) | |
e7fd4179 | 95 | { |
4007685c | 96 | struct dlm_rcom *rc = ls->ls_recover_buf; |
e7fd4179 DT |
97 | struct dlm_member *memb; |
98 | int error = 0, delay; | |
99 | ||
100 | list_for_each_entry(memb, &ls->ls_nodes, list) { | |
101 | delay = 0; | |
102 | for (;;) { | |
103 | if (dlm_recovery_stopped(ls)) { | |
104 | error = -EINTR; | |
105 | goto out; | |
106 | } | |
107 | ||
757a4271 | 108 | error = dlm_rcom_status(ls, memb->nodeid, 0); |
e7fd4179 DT |
109 | if (error) |
110 | goto out; | |
111 | ||
757a4271 DT |
112 | if (save_slots) |
113 | dlm_slot_save(ls, rc, memb); | |
114 | ||
e7fd4179 DT |
115 | if (rc->rc_result & wait_status) |
116 | break; | |
117 | if (delay < 1000) | |
118 | delay += 20; | |
119 | msleep(delay); | |
120 | } | |
121 | } | |
122 | out: | |
123 | return error; | |
124 | } | |
125 | ||
757a4271 DT |
126 | static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status, |
127 | uint32_t status_flags) | |
e7fd4179 | 128 | { |
4007685c | 129 | struct dlm_rcom *rc = ls->ls_recover_buf; |
e7fd4179 DT |
130 | int error = 0, delay = 0, nodeid = ls->ls_low_nodeid; |
131 | ||
132 | for (;;) { | |
133 | if (dlm_recovery_stopped(ls)) { | |
134 | error = -EINTR; | |
135 | goto out; | |
136 | } | |
137 | ||
757a4271 | 138 | error = dlm_rcom_status(ls, nodeid, status_flags); |
e7fd4179 DT |
139 | if (error) |
140 | break; | |
141 | ||
142 | if (rc->rc_result & wait_status) | |
143 | break; | |
144 | if (delay < 1000) | |
145 | delay += 20; | |
146 | msleep(delay); | |
147 | } | |
148 | out: | |
149 | return error; | |
150 | } | |
151 | ||
152 | static int wait_status(struct dlm_ls *ls, uint32_t status) | |
153 | { | |
154 | uint32_t status_all = status << 1; | |
155 | int error; | |
156 | ||
157 | if (ls->ls_low_nodeid == dlm_our_nodeid()) { | |
757a4271 | 158 | error = wait_status_all(ls, status, 0); |
e7fd4179 DT |
159 | if (!error) |
160 | dlm_set_recover_status(ls, status_all); | |
161 | } else | |
757a4271 | 162 | error = wait_status_low(ls, status_all, 0); |
e7fd4179 DT |
163 | |
164 | return error; | |
165 | } | |
166 | ||
167 | int dlm_recover_members_wait(struct dlm_ls *ls) | |
168 | { | |
757a4271 DT |
169 | struct dlm_member *memb; |
170 | struct dlm_slot *slots; | |
171 | int num_slots, slots_size; | |
172 | int error, rv; | |
173 | uint32_t gen; | |
174 | ||
175 | list_for_each_entry(memb, &ls->ls_nodes, list) { | |
176 | memb->slot = -1; | |
177 | memb->generation = 0; | |
178 | } | |
179 | ||
180 | if (ls->ls_low_nodeid == dlm_our_nodeid()) { | |
181 | error = wait_status_all(ls, DLM_RS_NODES, 1); | |
182 | if (error) | |
183 | goto out; | |
184 | ||
185 | /* slots array is sparse, slots_size may be > num_slots */ | |
186 | ||
187 | rv = dlm_slots_assign(ls, &num_slots, &slots_size, &slots, &gen); | |
188 | if (!rv) { | |
189 | spin_lock(&ls->ls_recover_lock); | |
190 | _set_recover_status(ls, DLM_RS_NODES_ALL); | |
191 | ls->ls_num_slots = num_slots; | |
192 | ls->ls_slots_size = slots_size; | |
193 | ls->ls_slots = slots; | |
194 | ls->ls_generation = gen; | |
195 | spin_unlock(&ls->ls_recover_lock); | |
196 | } else { | |
197 | dlm_set_recover_status(ls, DLM_RS_NODES_ALL); | |
198 | } | |
199 | } else { | |
200 | error = wait_status_low(ls, DLM_RS_NODES_ALL, DLM_RSF_NEED_SLOTS); | |
201 | if (error) | |
202 | goto out; | |
203 | ||
204 | dlm_slots_copy_in(ls); | |
205 | } | |
206 | out: | |
207 | return error; | |
e7fd4179 DT |
208 | } |
209 | ||
210 | int dlm_recover_directory_wait(struct dlm_ls *ls) | |
211 | { | |
212 | return wait_status(ls, DLM_RS_DIR); | |
213 | } | |
214 | ||
215 | int dlm_recover_locks_wait(struct dlm_ls *ls) | |
216 | { | |
217 | return wait_status(ls, DLM_RS_LOCKS); | |
218 | } | |
219 | ||
220 | int dlm_recover_done_wait(struct dlm_ls *ls) | |
221 | { | |
222 | return wait_status(ls, DLM_RS_DONE); | |
223 | } | |
224 | ||
225 | /* | |
226 | * The recover_list contains all the rsb's for which we've requested the new | |
227 | * master nodeid. As replies are returned from the resource directories the | |
228 | * rsb's are removed from the list. When the list is empty we're done. | |
229 | * | |
230 | * The recover_list is later similarly used for all rsb's for which we've sent | |
231 | * new lkb's and need to receive new corresponding lkid's. | |
232 | * | |
233 | * We use the address of the rsb struct as a simple local identifier for the | |
234 | * rsb so we can match an rcom reply with the rsb it was sent for. | |
235 | */ | |
236 | ||
237 | static int recover_list_empty(struct dlm_ls *ls) | |
238 | { | |
239 | int empty; | |
240 | ||
241 | spin_lock(&ls->ls_recover_list_lock); | |
242 | empty = list_empty(&ls->ls_recover_list); | |
243 | spin_unlock(&ls->ls_recover_list_lock); | |
244 | ||
245 | return empty; | |
246 | } | |
247 | ||
248 | static void recover_list_add(struct dlm_rsb *r) | |
249 | { | |
250 | struct dlm_ls *ls = r->res_ls; | |
251 | ||
252 | spin_lock(&ls->ls_recover_list_lock); | |
253 | if (list_empty(&r->res_recover_list)) { | |
254 | list_add_tail(&r->res_recover_list, &ls->ls_recover_list); | |
255 | ls->ls_recover_list_count++; | |
256 | dlm_hold_rsb(r); | |
257 | } | |
258 | spin_unlock(&ls->ls_recover_list_lock); | |
259 | } | |
260 | ||
261 | static void recover_list_del(struct dlm_rsb *r) | |
262 | { | |
263 | struct dlm_ls *ls = r->res_ls; | |
264 | ||
265 | spin_lock(&ls->ls_recover_list_lock); | |
266 | list_del_init(&r->res_recover_list); | |
267 | ls->ls_recover_list_count--; | |
268 | spin_unlock(&ls->ls_recover_list_lock); | |
269 | ||
270 | dlm_put_rsb(r); | |
271 | } | |
272 | ||
e7fd4179 DT |
273 | static void recover_list_clear(struct dlm_ls *ls) |
274 | { | |
275 | struct dlm_rsb *r, *s; | |
276 | ||
277 | spin_lock(&ls->ls_recover_list_lock); | |
278 | list_for_each_entry_safe(r, s, &ls->ls_recover_list, res_recover_list) { | |
279 | list_del_init(&r->res_recover_list); | |
52069809 | 280 | r->res_recover_locks_count = 0; |
e7fd4179 DT |
281 | dlm_put_rsb(r); |
282 | ls->ls_recover_list_count--; | |
283 | } | |
284 | ||
285 | if (ls->ls_recover_list_count != 0) { | |
286 | log_error(ls, "warning: recover_list_count %d", | |
287 | ls->ls_recover_list_count); | |
288 | ls->ls_recover_list_count = 0; | |
289 | } | |
290 | spin_unlock(&ls->ls_recover_list_lock); | |
291 | } | |
292 | ||
1d7c484e DT |
293 | static int recover_idr_empty(struct dlm_ls *ls) |
294 | { | |
295 | int empty = 1; | |
296 | ||
297 | spin_lock(&ls->ls_recover_idr_lock); | |
298 | if (ls->ls_recover_list_count) | |
299 | empty = 0; | |
300 | spin_unlock(&ls->ls_recover_idr_lock); | |
301 | ||
302 | return empty; | |
303 | } | |
304 | ||
305 | static int recover_idr_add(struct dlm_rsb *r) | |
306 | { | |
307 | struct dlm_ls *ls = r->res_ls; | |
308 | int rv, id; | |
309 | ||
310 | rv = idr_pre_get(&ls->ls_recover_idr, GFP_NOFS); | |
311 | if (!rv) | |
312 | return -ENOMEM; | |
313 | ||
314 | spin_lock(&ls->ls_recover_idr_lock); | |
315 | if (r->res_id) { | |
316 | spin_unlock(&ls->ls_recover_idr_lock); | |
317 | return -1; | |
318 | } | |
319 | rv = idr_get_new_above(&ls->ls_recover_idr, r, 1, &id); | |
320 | if (rv) { | |
321 | spin_unlock(&ls->ls_recover_idr_lock); | |
322 | return rv; | |
323 | } | |
324 | r->res_id = id; | |
325 | ls->ls_recover_list_count++; | |
326 | dlm_hold_rsb(r); | |
327 | spin_unlock(&ls->ls_recover_idr_lock); | |
328 | return 0; | |
329 | } | |
330 | ||
331 | static void recover_idr_del(struct dlm_rsb *r) | |
332 | { | |
333 | struct dlm_ls *ls = r->res_ls; | |
334 | ||
335 | spin_lock(&ls->ls_recover_idr_lock); | |
336 | idr_remove(&ls->ls_recover_idr, r->res_id); | |
337 | r->res_id = 0; | |
338 | ls->ls_recover_list_count--; | |
339 | spin_unlock(&ls->ls_recover_idr_lock); | |
340 | ||
341 | dlm_put_rsb(r); | |
342 | } | |
343 | ||
344 | static struct dlm_rsb *recover_idr_find(struct dlm_ls *ls, uint64_t id) | |
345 | { | |
346 | struct dlm_rsb *r; | |
347 | ||
348 | spin_lock(&ls->ls_recover_idr_lock); | |
349 | r = idr_find(&ls->ls_recover_idr, (int)id); | |
350 | spin_unlock(&ls->ls_recover_idr_lock); | |
351 | return r; | |
352 | } | |
353 | ||
cda95406 | 354 | static void recover_idr_clear(struct dlm_ls *ls) |
1d7c484e | 355 | { |
cda95406 TH |
356 | struct dlm_rsb *r; |
357 | int id; | |
1d7c484e | 358 | |
cda95406 | 359 | spin_lock(&ls->ls_recover_idr_lock); |
1d7c484e | 360 | |
cda95406 | 361 | idr_for_each_entry(&ls->ls_recover_idr, r, id) { |
a67a380e | 362 | idr_remove(&ls->ls_recover_idr, id); |
cda95406 TH |
363 | r->res_id = 0; |
364 | r->res_recover_locks_count = 0; | |
365 | ls->ls_recover_list_count--; | |
1d7c484e | 366 | |
cda95406 TH |
367 | dlm_put_rsb(r); |
368 | } | |
1d7c484e DT |
369 | |
370 | if (ls->ls_recover_list_count != 0) { | |
371 | log_error(ls, "warning: recover_list_count %d", | |
372 | ls->ls_recover_list_count); | |
373 | ls->ls_recover_list_count = 0; | |
374 | } | |
375 | spin_unlock(&ls->ls_recover_idr_lock); | |
376 | } | |
377 | ||
e7fd4179 DT |
378 | |
379 | /* Master recovery: find new master node for rsb's that were | |
380 | mastered on nodes that have been removed. | |
381 | ||
382 | dlm_recover_masters | |
383 | recover_master | |
384 | dlm_send_rcom_lookup -> receive_rcom_lookup | |
385 | dlm_dir_lookup | |
386 | receive_rcom_lookup_reply <- | |
387 | dlm_recover_master_reply | |
388 | set_new_master | |
389 | set_master_lkbs | |
390 | set_lock_master | |
391 | */ | |
392 | ||
393 | /* | |
394 | * Set the lock master for all LKBs in a lock queue | |
395 | * If we are the new master of the rsb, we may have received new | |
396 | * MSTCPY locks from other nodes already which we need to ignore | |
397 | * when setting the new nodeid. | |
398 | */ | |
399 | ||
400 | static void set_lock_master(struct list_head *queue, int nodeid) | |
401 | { | |
402 | struct dlm_lkb *lkb; | |
403 | ||
4875647a DT |
404 | list_for_each_entry(lkb, queue, lkb_statequeue) { |
405 | if (!(lkb->lkb_flags & DLM_IFL_MSTCPY)) { | |
e7fd4179 | 406 | lkb->lkb_nodeid = nodeid; |
4875647a DT |
407 | lkb->lkb_remid = 0; |
408 | } | |
409 | } | |
e7fd4179 DT |
410 | } |
411 | ||
412 | static void set_master_lkbs(struct dlm_rsb *r) | |
413 | { | |
414 | set_lock_master(&r->res_grantqueue, r->res_nodeid); | |
415 | set_lock_master(&r->res_convertqueue, r->res_nodeid); | |
416 | set_lock_master(&r->res_waitqueue, r->res_nodeid); | |
417 | } | |
418 | ||
419 | /* | |
25985edc | 420 | * Propagate the new master nodeid to locks |
e7fd4179 | 421 | * The NEW_MASTER flag tells dlm_recover_locks() which rsb's to consider. |
4875647a | 422 | * The NEW_MASTER2 flag tells recover_lvb() and recover_grant() which |
f7da790d | 423 | * rsb's to consider. |
e7fd4179 DT |
424 | */ |
425 | ||
c04fecb4 | 426 | static void set_new_master(struct dlm_rsb *r) |
e7fd4179 | 427 | { |
e7fd4179 DT |
428 | set_master_lkbs(r); |
429 | rsb_set_flag(r, RSB_NEW_MASTER); | |
430 | rsb_set_flag(r, RSB_NEW_MASTER2); | |
e7fd4179 DT |
431 | } |
432 | ||
433 | /* | |
434 | * We do async lookups on rsb's that need new masters. The rsb's | |
435 | * waiting for a lookup reply are kept on the recover_list. | |
c04fecb4 DT |
436 | * |
437 | * Another node recovering the master may have sent us a rcom lookup, | |
438 | * and our dlm_master_lookup() set it as the new master, along with | |
439 | * NEW_MASTER so that we'll recover it here (this implies dir_nodeid | |
440 | * equals our_nodeid below). | |
e7fd4179 DT |
441 | */ |
442 | ||
c04fecb4 | 443 | static int recover_master(struct dlm_rsb *r, unsigned int *count) |
e7fd4179 DT |
444 | { |
445 | struct dlm_ls *ls = r->res_ls; | |
c04fecb4 DT |
446 | int our_nodeid, dir_nodeid; |
447 | int is_removed = 0; | |
448 | int error; | |
449 | ||
450 | if (is_master(r)) | |
451 | return 0; | |
452 | ||
453 | is_removed = dlm_is_removed(ls, r->res_nodeid); | |
454 | ||
455 | if (!is_removed && !rsb_flag(r, RSB_NEW_MASTER)) | |
456 | return 0; | |
457 | ||
458 | our_nodeid = dlm_our_nodeid(); | |
459 | dir_nodeid = dlm_dir_nodeid(r); | |
e7fd4179 DT |
460 | |
461 | if (dir_nodeid == our_nodeid) { | |
c04fecb4 DT |
462 | if (is_removed) { |
463 | r->res_master_nodeid = our_nodeid; | |
464 | r->res_nodeid = 0; | |
465 | } | |
e7fd4179 | 466 | |
c04fecb4 DT |
467 | /* set master of lkbs to ourself when is_removed, or to |
468 | another new master which we set along with NEW_MASTER | |
469 | in dlm_master_lookup */ | |
470 | set_new_master(r); | |
471 | error = 0; | |
e7fd4179 | 472 | } else { |
1d7c484e | 473 | recover_idr_add(r); |
e7fd4179 DT |
474 | error = dlm_send_rcom_lookup(r, dir_nodeid); |
475 | } | |
476 | ||
c04fecb4 | 477 | (*count)++; |
e7fd4179 DT |
478 | return error; |
479 | } | |
480 | ||
481 | /* | |
4875647a DT |
482 | * All MSTCPY locks are purged and rebuilt, even if the master stayed the same. |
483 | * This is necessary because recovery can be started, aborted and restarted, | |
484 | * causing the master nodeid to briefly change during the aborted recovery, and | |
485 | * change back to the original value in the second recovery. The MSTCPY locks | |
486 | * may or may not have been purged during the aborted recovery. Another node | |
487 | * with an outstanding request in waiters list and a request reply saved in the | |
488 | * requestqueue, cannot know whether it should ignore the reply and resend the | |
489 | * request, or accept the reply and complete the request. It must do the | |
490 | * former if the remote node purged MSTCPY locks, and it must do the later if | |
491 | * the remote node did not. This is solved by always purging MSTCPY locks, in | |
492 | * which case, the request reply would always be ignored and the request | |
493 | * resent. | |
e7fd4179 DT |
494 | */ |
495 | ||
c04fecb4 | 496 | static int recover_master_static(struct dlm_rsb *r, unsigned int *count) |
e7fd4179 | 497 | { |
4875647a DT |
498 | int dir_nodeid = dlm_dir_nodeid(r); |
499 | int new_master = dir_nodeid; | |
e7fd4179 | 500 | |
4875647a DT |
501 | if (dir_nodeid == dlm_our_nodeid()) |
502 | new_master = 0; | |
e7fd4179 | 503 | |
4875647a | 504 | dlm_purge_mstcpy_locks(r); |
c04fecb4 DT |
505 | r->res_master_nodeid = dir_nodeid; |
506 | r->res_nodeid = new_master; | |
507 | set_new_master(r); | |
508 | (*count)++; | |
509 | return 0; | |
e7fd4179 DT |
510 | } |
511 | ||
512 | /* | |
513 | * Go through local root resources and for each rsb which has a master which | |
514 | * has departed, get the new master nodeid from the directory. The dir will | |
515 | * assign mastery to the first node to look up the new master. That means | |
516 | * we'll discover in this lookup if we're the new master of any rsb's. | |
517 | * | |
518 | * We fire off all the dir lookup requests individually and asynchronously to | |
519 | * the correct dir node. | |
520 | */ | |
521 | ||
522 | int dlm_recover_masters(struct dlm_ls *ls) | |
523 | { | |
524 | struct dlm_rsb *r; | |
c04fecb4 DT |
525 | unsigned int total = 0; |
526 | unsigned int count = 0; | |
527 | int nodir = dlm_no_directory(ls); | |
528 | int error; | |
e7fd4179 DT |
529 | |
530 | log_debug(ls, "dlm_recover_masters"); | |
531 | ||
532 | down_read(&ls->ls_root_sem); | |
533 | list_for_each_entry(r, &ls->ls_root_list, res_root_list) { | |
534 | if (dlm_recovery_stopped(ls)) { | |
535 | up_read(&ls->ls_root_sem); | |
536 | error = -EINTR; | |
537 | goto out; | |
538 | } | |
539 | ||
c04fecb4 DT |
540 | lock_rsb(r); |
541 | if (nodir) | |
542 | error = recover_master_static(r, &count); | |
543 | else | |
544 | error = recover_master(r, &count); | |
545 | unlock_rsb(r); | |
546 | cond_resched(); | |
547 | total++; | |
e7fd4179 | 548 | |
c04fecb4 DT |
549 | if (error) { |
550 | up_read(&ls->ls_root_sem); | |
551 | goto out; | |
552 | } | |
e7fd4179 DT |
553 | } |
554 | up_read(&ls->ls_root_sem); | |
555 | ||
c04fecb4 | 556 | log_debug(ls, "dlm_recover_masters %u of %u", count, total); |
e7fd4179 | 557 | |
1d7c484e | 558 | error = dlm_wait_function(ls, &recover_idr_empty); |
e7fd4179 DT |
559 | out: |
560 | if (error) | |
1d7c484e | 561 | recover_idr_clear(ls); |
e7fd4179 DT |
562 | return error; |
563 | } | |
564 | ||
565 | int dlm_recover_master_reply(struct dlm_ls *ls, struct dlm_rcom *rc) | |
566 | { | |
567 | struct dlm_rsb *r; | |
c04fecb4 | 568 | int ret_nodeid, new_master; |
e7fd4179 | 569 | |
1d7c484e | 570 | r = recover_idr_find(ls, rc->rc_id); |
e7fd4179 | 571 | if (!r) { |
90135925 | 572 | log_error(ls, "dlm_recover_master_reply no id %llx", |
9229f013 | 573 | (unsigned long long)rc->rc_id); |
e7fd4179 DT |
574 | goto out; |
575 | } | |
576 | ||
c04fecb4 DT |
577 | ret_nodeid = rc->rc_result; |
578 | ||
579 | if (ret_nodeid == dlm_our_nodeid()) | |
580 | new_master = 0; | |
581 | else | |
582 | new_master = ret_nodeid; | |
e7fd4179 | 583 | |
4875647a | 584 | lock_rsb(r); |
c04fecb4 DT |
585 | r->res_master_nodeid = ret_nodeid; |
586 | r->res_nodeid = new_master; | |
587 | set_new_master(r); | |
4875647a | 588 | unlock_rsb(r); |
1d7c484e | 589 | recover_idr_del(r); |
e7fd4179 | 590 | |
1d7c484e | 591 | if (recover_idr_empty(ls)) |
e7fd4179 DT |
592 | wake_up(&ls->ls_wait_general); |
593 | out: | |
594 | return 0; | |
595 | } | |
596 | ||
597 | ||
598 | /* Lock recovery: rebuild the process-copy locks we hold on a | |
599 | remastered rsb on the new rsb master. | |
600 | ||
601 | dlm_recover_locks | |
602 | recover_locks | |
603 | recover_locks_queue | |
604 | dlm_send_rcom_lock -> receive_rcom_lock | |
605 | dlm_recover_master_copy | |
606 | receive_rcom_lock_reply <- | |
607 | dlm_recover_process_copy | |
608 | */ | |
609 | ||
610 | ||
611 | /* | |
612 | * keep a count of the number of lkb's we send to the new master; when we get | |
613 | * an equal number of replies then recovery for the rsb is done | |
614 | */ | |
615 | ||
616 | static int recover_locks_queue(struct dlm_rsb *r, struct list_head *head) | |
617 | { | |
618 | struct dlm_lkb *lkb; | |
619 | int error = 0; | |
620 | ||
621 | list_for_each_entry(lkb, head, lkb_statequeue) { | |
622 | error = dlm_send_rcom_lock(r, lkb); | |
623 | if (error) | |
624 | break; | |
625 | r->res_recover_locks_count++; | |
626 | } | |
627 | ||
628 | return error; | |
629 | } | |
630 | ||
e7fd4179 DT |
631 | static int recover_locks(struct dlm_rsb *r) |
632 | { | |
633 | int error = 0; | |
634 | ||
635 | lock_rsb(r); | |
e7fd4179 | 636 | |
a345da3e | 637 | DLM_ASSERT(!r->res_recover_locks_count, dlm_dump_rsb(r);); |
e7fd4179 DT |
638 | |
639 | error = recover_locks_queue(r, &r->res_grantqueue); | |
640 | if (error) | |
641 | goto out; | |
642 | error = recover_locks_queue(r, &r->res_convertqueue); | |
643 | if (error) | |
644 | goto out; | |
645 | error = recover_locks_queue(r, &r->res_waitqueue); | |
646 | if (error) | |
647 | goto out; | |
648 | ||
649 | if (r->res_recover_locks_count) | |
650 | recover_list_add(r); | |
651 | else | |
652 | rsb_clear_flag(r, RSB_NEW_MASTER); | |
653 | out: | |
654 | unlock_rsb(r); | |
655 | return error; | |
656 | } | |
657 | ||
658 | int dlm_recover_locks(struct dlm_ls *ls) | |
659 | { | |
660 | struct dlm_rsb *r; | |
661 | int error, count = 0; | |
662 | ||
e7fd4179 DT |
663 | down_read(&ls->ls_root_sem); |
664 | list_for_each_entry(r, &ls->ls_root_list, res_root_list) { | |
665 | if (is_master(r)) { | |
666 | rsb_clear_flag(r, RSB_NEW_MASTER); | |
667 | continue; | |
668 | } | |
669 | ||
670 | if (!rsb_flag(r, RSB_NEW_MASTER)) | |
671 | continue; | |
672 | ||
673 | if (dlm_recovery_stopped(ls)) { | |
674 | error = -EINTR; | |
675 | up_read(&ls->ls_root_sem); | |
676 | goto out; | |
677 | } | |
678 | ||
679 | error = recover_locks(r); | |
680 | if (error) { | |
681 | up_read(&ls->ls_root_sem); | |
682 | goto out; | |
683 | } | |
684 | ||
685 | count += r->res_recover_locks_count; | |
686 | } | |
687 | up_read(&ls->ls_root_sem); | |
688 | ||
4875647a | 689 | log_debug(ls, "dlm_recover_locks %d out", count); |
e7fd4179 DT |
690 | |
691 | error = dlm_wait_function(ls, &recover_list_empty); | |
692 | out: | |
693 | if (error) | |
694 | recover_list_clear(ls); | |
e7fd4179 DT |
695 | return error; |
696 | } | |
697 | ||
698 | void dlm_recovered_lock(struct dlm_rsb *r) | |
699 | { | |
a345da3e | 700 | DLM_ASSERT(rsb_flag(r, RSB_NEW_MASTER), dlm_dump_rsb(r);); |
e7fd4179 DT |
701 | |
702 | r->res_recover_locks_count--; | |
703 | if (!r->res_recover_locks_count) { | |
704 | rsb_clear_flag(r, RSB_NEW_MASTER); | |
705 | recover_list_del(r); | |
706 | } | |
707 | ||
708 | if (recover_list_empty(r->res_ls)) | |
709 | wake_up(&r->res_ls->ls_wait_general); | |
710 | } | |
711 | ||
712 | /* | |
713 | * The lvb needs to be recovered on all master rsb's. This includes setting | |
714 | * the VALNOTVALID flag if necessary, and determining the correct lvb contents | |
715 | * based on the lvb's of the locks held on the rsb. | |
716 | * | |
da8c6663 DT |
717 | * RSB_VALNOTVALID is set in two cases: |
718 | * | |
719 | * 1. we are master, but not new, and we purged an EX/PW lock held by a | |
720 | * failed node (in dlm_recover_purge which set RSB_RECOVER_LVB_INVAL) | |
721 | * | |
722 | * 2. we are a new master, and there are only NL/CR locks left. | |
723 | * (We could probably improve this by only invaliding in this way when | |
724 | * the previous master left uncleanly. VMS docs mention that.) | |
e7fd4179 DT |
725 | * |
726 | * The LVB contents are only considered for changing when this is a new master | |
727 | * of the rsb (NEW_MASTER2). Then, the rsb's lvb is taken from any lkb with | |
728 | * mode > CR. If no lkb's exist with mode above CR, the lvb contents are taken | |
729 | * from the lkb with the largest lvb sequence number. | |
730 | */ | |
731 | ||
732 | static void recover_lvb(struct dlm_rsb *r) | |
733 | { | |
734 | struct dlm_lkb *lkb, *high_lkb = NULL; | |
735 | uint32_t high_seq = 0; | |
90135925 DT |
736 | int lock_lvb_exists = 0; |
737 | int big_lock_exists = 0; | |
e7fd4179 DT |
738 | int lvblen = r->res_ls->ls_lvblen; |
739 | ||
da8c6663 DT |
740 | if (!rsb_flag(r, RSB_NEW_MASTER2) && |
741 | rsb_flag(r, RSB_RECOVER_LVB_INVAL)) { | |
742 | /* case 1 above */ | |
743 | rsb_set_flag(r, RSB_VALNOTVALID); | |
744 | return; | |
745 | } | |
746 | ||
747 | if (!rsb_flag(r, RSB_NEW_MASTER2)) | |
748 | return; | |
749 | ||
750 | /* we are the new master, so figure out if VALNOTVALID should | |
751 | be set, and set the rsb lvb from the best lkb available. */ | |
752 | ||
e7fd4179 DT |
753 | list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) { |
754 | if (!(lkb->lkb_exflags & DLM_LKF_VALBLK)) | |
755 | continue; | |
756 | ||
90135925 | 757 | lock_lvb_exists = 1; |
e7fd4179 DT |
758 | |
759 | if (lkb->lkb_grmode > DLM_LOCK_CR) { | |
90135925 | 760 | big_lock_exists = 1; |
e7fd4179 DT |
761 | goto setflag; |
762 | } | |
763 | ||
764 | if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) { | |
765 | high_lkb = lkb; | |
766 | high_seq = lkb->lkb_lvbseq; | |
767 | } | |
768 | } | |
769 | ||
770 | list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) { | |
771 | if (!(lkb->lkb_exflags & DLM_LKF_VALBLK)) | |
772 | continue; | |
773 | ||
90135925 | 774 | lock_lvb_exists = 1; |
e7fd4179 DT |
775 | |
776 | if (lkb->lkb_grmode > DLM_LOCK_CR) { | |
90135925 | 777 | big_lock_exists = 1; |
e7fd4179 DT |
778 | goto setflag; |
779 | } | |
780 | ||
781 | if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) { | |
782 | high_lkb = lkb; | |
783 | high_seq = lkb->lkb_lvbseq; | |
784 | } | |
785 | } | |
786 | ||
787 | setflag: | |
788 | if (!lock_lvb_exists) | |
789 | goto out; | |
790 | ||
da8c6663 | 791 | /* lvb is invalidated if only NL/CR locks remain */ |
e7fd4179 DT |
792 | if (!big_lock_exists) |
793 | rsb_set_flag(r, RSB_VALNOTVALID); | |
794 | ||
e7fd4179 | 795 | if (!r->res_lvbptr) { |
52bda2b5 | 796 | r->res_lvbptr = dlm_allocate_lvb(r->res_ls); |
e7fd4179 DT |
797 | if (!r->res_lvbptr) |
798 | goto out; | |
799 | } | |
800 | ||
801 | if (big_lock_exists) { | |
802 | r->res_lvbseq = lkb->lkb_lvbseq; | |
803 | memcpy(r->res_lvbptr, lkb->lkb_lvbptr, lvblen); | |
804 | } else if (high_lkb) { | |
805 | r->res_lvbseq = high_lkb->lkb_lvbseq; | |
806 | memcpy(r->res_lvbptr, high_lkb->lkb_lvbptr, lvblen); | |
807 | } else { | |
808 | r->res_lvbseq = 0; | |
809 | memset(r->res_lvbptr, 0, lvblen); | |
810 | } | |
811 | out: | |
812 | return; | |
813 | } | |
814 | ||
815 | /* All master rsb's flagged RECOVER_CONVERT need to be looked at. The locks | |
816 | converting PR->CW or CW->PR need to have their lkb_grmode set. */ | |
817 | ||
818 | static void recover_conversion(struct dlm_rsb *r) | |
819 | { | |
c503a621 | 820 | struct dlm_ls *ls = r->res_ls; |
e7fd4179 DT |
821 | struct dlm_lkb *lkb; |
822 | int grmode = -1; | |
823 | ||
824 | list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) { | |
825 | if (lkb->lkb_grmode == DLM_LOCK_PR || | |
826 | lkb->lkb_grmode == DLM_LOCK_CW) { | |
827 | grmode = lkb->lkb_grmode; | |
828 | break; | |
829 | } | |
830 | } | |
831 | ||
832 | list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) { | |
833 | if (lkb->lkb_grmode != DLM_LOCK_IV) | |
834 | continue; | |
c503a621 DT |
835 | if (grmode == -1) { |
836 | log_debug(ls, "recover_conversion %x set gr to rq %d", | |
837 | lkb->lkb_id, lkb->lkb_rqmode); | |
e7fd4179 | 838 | lkb->lkb_grmode = lkb->lkb_rqmode; |
c503a621 DT |
839 | } else { |
840 | log_debug(ls, "recover_conversion %x set gr %d", | |
841 | lkb->lkb_id, grmode); | |
e7fd4179 | 842 | lkb->lkb_grmode = grmode; |
c503a621 | 843 | } |
e7fd4179 DT |
844 | } |
845 | } | |
846 | ||
f7da790d | 847 | /* We've become the new master for this rsb and waiting/converting locks may |
4875647a | 848 | need to be granted in dlm_recover_grant() due to locks that may have |
f7da790d DT |
849 | existed from a removed node. */ |
850 | ||
4875647a | 851 | static void recover_grant(struct dlm_rsb *r) |
f7da790d DT |
852 | { |
853 | if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue)) | |
4875647a | 854 | rsb_set_flag(r, RSB_RECOVER_GRANT); |
f7da790d DT |
855 | } |
856 | ||
e7fd4179 DT |
857 | void dlm_recover_rsbs(struct dlm_ls *ls) |
858 | { | |
859 | struct dlm_rsb *r; | |
4875647a | 860 | unsigned int count = 0; |
e7fd4179 DT |
861 | |
862 | down_read(&ls->ls_root_sem); | |
863 | list_for_each_entry(r, &ls->ls_root_list, res_root_list) { | |
864 | lock_rsb(r); | |
865 | if (is_master(r)) { | |
866 | if (rsb_flag(r, RSB_RECOVER_CONVERT)) | |
867 | recover_conversion(r); | |
da8c6663 DT |
868 | |
869 | /* recover lvb before granting locks so the updated | |
870 | lvb/VALNOTVALID is presented in the completion */ | |
871 | recover_lvb(r); | |
872 | ||
f7da790d | 873 | if (rsb_flag(r, RSB_NEW_MASTER2)) |
4875647a | 874 | recover_grant(r); |
e7fd4179 | 875 | count++; |
da8c6663 DT |
876 | } else { |
877 | rsb_clear_flag(r, RSB_VALNOTVALID); | |
e7fd4179 DT |
878 | } |
879 | rsb_clear_flag(r, RSB_RECOVER_CONVERT); | |
da8c6663 | 880 | rsb_clear_flag(r, RSB_RECOVER_LVB_INVAL); |
f7da790d | 881 | rsb_clear_flag(r, RSB_NEW_MASTER2); |
e7fd4179 DT |
882 | unlock_rsb(r); |
883 | } | |
884 | up_read(&ls->ls_root_sem); | |
885 | ||
4875647a DT |
886 | if (count) |
887 | log_debug(ls, "dlm_recover_rsbs %d done", count); | |
e7fd4179 DT |
888 | } |
889 | ||
890 | /* Create a single list of all root rsb's to be used during recovery */ | |
891 | ||
892 | int dlm_create_root_list(struct dlm_ls *ls) | |
893 | { | |
9beb3bf5 | 894 | struct rb_node *n; |
e7fd4179 DT |
895 | struct dlm_rsb *r; |
896 | int i, error = 0; | |
897 | ||
898 | down_write(&ls->ls_root_sem); | |
899 | if (!list_empty(&ls->ls_root_list)) { | |
900 | log_error(ls, "root list not empty"); | |
901 | error = -EINVAL; | |
902 | goto out; | |
903 | } | |
904 | ||
905 | for (i = 0; i < ls->ls_rsbtbl_size; i++) { | |
c7be761a | 906 | spin_lock(&ls->ls_rsbtbl[i].lock); |
9beb3bf5 BP |
907 | for (n = rb_first(&ls->ls_rsbtbl[i].keep); n; n = rb_next(n)) { |
908 | r = rb_entry(n, struct dlm_rsb, res_hashnode); | |
e7fd4179 DT |
909 | list_add(&r->res_root_list, &ls->ls_root_list); |
910 | dlm_hold_rsb(r); | |
911 | } | |
85f0379a | 912 | |
c04fecb4 DT |
913 | if (!RB_EMPTY_ROOT(&ls->ls_rsbtbl[i].toss)) |
914 | log_error(ls, "dlm_create_root_list toss not empty"); | |
c7be761a | 915 | spin_unlock(&ls->ls_rsbtbl[i].lock); |
e7fd4179 DT |
916 | } |
917 | out: | |
918 | up_write(&ls->ls_root_sem); | |
919 | return error; | |
920 | } | |
921 | ||
922 | void dlm_release_root_list(struct dlm_ls *ls) | |
923 | { | |
924 | struct dlm_rsb *r, *safe; | |
925 | ||
926 | down_write(&ls->ls_root_sem); | |
927 | list_for_each_entry_safe(r, safe, &ls->ls_root_list, res_root_list) { | |
928 | list_del_init(&r->res_root_list); | |
929 | dlm_put_rsb(r); | |
930 | } | |
931 | up_write(&ls->ls_root_sem); | |
932 | } | |
933 | ||
c04fecb4 | 934 | void dlm_clear_toss(struct dlm_ls *ls) |
e7fd4179 | 935 | { |
9beb3bf5 | 936 | struct rb_node *n, *next; |
c04fecb4 DT |
937 | struct dlm_rsb *r; |
938 | unsigned int count = 0; | |
e7fd4179 DT |
939 | int i; |
940 | ||
941 | for (i = 0; i < ls->ls_rsbtbl_size; i++) { | |
c7be761a | 942 | spin_lock(&ls->ls_rsbtbl[i].lock); |
9beb3bf5 | 943 | for (n = rb_first(&ls->ls_rsbtbl[i].toss); n; n = next) { |
c04fecb4 DT |
944 | next = rb_next(n); |
945 | r = rb_entry(n, struct dlm_rsb, res_hashnode); | |
946 | rb_erase(n, &ls->ls_rsbtbl[i].toss); | |
947 | dlm_free_rsb(r); | |
948 | count++; | |
e7fd4179 | 949 | } |
c7be761a | 950 | spin_unlock(&ls->ls_rsbtbl[i].lock); |
e7fd4179 | 951 | } |
c04fecb4 DT |
952 | |
953 | if (count) | |
954 | log_debug(ls, "dlm_clear_toss %u done", count); | |
e7fd4179 DT |
955 | } |
956 |