]> Git Repo - linux.git/blob - fs/dlm/recover.c
drm/xe: Move and export xe_hw_engine lookup.
[linux.git] / fs / dlm / recover.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
3 *******************************************************************************
4 **
5 **  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
6 **  Copyright (C) 2004-2005 Red Hat, Inc.  All rights reserved.
7 **
8 **
9 *******************************************************************************
10 ******************************************************************************/
11
12 #include "dlm_internal.h"
13 #include "lockspace.h"
14 #include "dir.h"
15 #include "config.h"
16 #include "ast.h"
17 #include "memory.h"
18 #include "rcom.h"
19 #include "lock.h"
20 #include "lowcomms.h"
21 #include "member.h"
22 #include "recover.h"
23
24
25 /*
26  * Recovery waiting routines: these functions wait for a particular reply from
27  * a remote node, or for the remote node to report a certain status.  They need
28  * to abort if the lockspace is stopped indicating a node has failed (perhaps
29  * the one being waited for).
30  */
31
32 /*
33  * Wait until given function returns non-zero or lockspace is stopped
34  * (LS_RECOVERY_STOP set due to failure of a node in ls_nodes).  When another
35  * function thinks it could have completed the waited-on task, they should wake
36  * up ls_wait_general to get an immediate response rather than waiting for the
37  * timeout.  This uses a timeout so it can check periodically if the wait
38  * should abort due to node failure (which doesn't cause a wake_up).
39  * This should only be called by the dlm_recoverd thread.
40  */
41
42 int dlm_wait_function(struct dlm_ls *ls, int (*testfn) (struct dlm_ls *ls))
43 {
44         int error = 0;
45         int rv;
46
47         while (1) {
48                 rv = wait_event_timeout(ls->ls_wait_general,
49                                         testfn(ls) || dlm_recovery_stopped(ls),
50                                         dlm_config.ci_recover_timer * HZ);
51                 if (rv)
52                         break;
53                 if (test_bit(LSFL_RCOM_WAIT, &ls->ls_flags)) {
54                         log_debug(ls, "dlm_wait_function timed out");
55                         return -ETIMEDOUT;
56                 }
57         }
58
59         if (dlm_recovery_stopped(ls)) {
60                 log_debug(ls, "dlm_wait_function aborted");
61                 error = -EINTR;
62         }
63         return error;
64 }
65
66 /*
67  * An efficient way for all nodes to wait for all others to have a certain
68  * status.  The node with the lowest nodeid polls all the others for their
69  * status (wait_status_all) and all the others poll the node with the low id
70  * for its accumulated result (wait_status_low).  When all nodes have set
71  * status flag X, then status flag X_ALL will be set on the low nodeid.
72  */
73
74 uint32_t dlm_recover_status(struct dlm_ls *ls)
75 {
76         uint32_t status;
77         spin_lock_bh(&ls->ls_recover_lock);
78         status = ls->ls_recover_status;
79         spin_unlock_bh(&ls->ls_recover_lock);
80         return status;
81 }
82
83 static void _set_recover_status(struct dlm_ls *ls, uint32_t status)
84 {
85         ls->ls_recover_status |= status;
86 }
87
88 void dlm_set_recover_status(struct dlm_ls *ls, uint32_t status)
89 {
90         spin_lock_bh(&ls->ls_recover_lock);
91         _set_recover_status(ls, status);
92         spin_unlock_bh(&ls->ls_recover_lock);
93 }
94
95 static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status,
96                            int save_slots, uint64_t seq)
97 {
98         struct dlm_rcom *rc = ls->ls_recover_buf;
99         struct dlm_member *memb;
100         int error = 0, delay;
101
102         list_for_each_entry(memb, &ls->ls_nodes, list) {
103                 delay = 0;
104                 for (;;) {
105                         if (dlm_recovery_stopped(ls)) {
106                                 error = -EINTR;
107                                 goto out;
108                         }
109
110                         error = dlm_rcom_status(ls, memb->nodeid, 0, seq);
111                         if (error)
112                                 goto out;
113
114                         if (save_slots)
115                                 dlm_slot_save(ls, rc, memb);
116
117                         if (le32_to_cpu(rc->rc_result) & wait_status)
118                                 break;
119                         if (delay < 1000)
120                                 delay += 20;
121                         msleep(delay);
122                 }
123         }
124  out:
125         return error;
126 }
127
128 static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status,
129                            uint32_t status_flags, uint64_t seq)
130 {
131         struct dlm_rcom *rc = ls->ls_recover_buf;
132         int error = 0, delay = 0, nodeid = ls->ls_low_nodeid;
133
134         for (;;) {
135                 if (dlm_recovery_stopped(ls)) {
136                         error = -EINTR;
137                         goto out;
138                 }
139
140                 error = dlm_rcom_status(ls, nodeid, status_flags, seq);
141                 if (error)
142                         break;
143
144                 if (le32_to_cpu(rc->rc_result) & wait_status)
145                         break;
146                 if (delay < 1000)
147                         delay += 20;
148                 msleep(delay);
149         }
150  out:
151         return error;
152 }
153
154 static int wait_status(struct dlm_ls *ls, uint32_t status, uint64_t seq)
155 {
156         uint32_t status_all = status << 1;
157         int error;
158
159         if (ls->ls_low_nodeid == dlm_our_nodeid()) {
160                 error = wait_status_all(ls, status, 0, seq);
161                 if (!error)
162                         dlm_set_recover_status(ls, status_all);
163         } else
164                 error = wait_status_low(ls, status_all, 0, seq);
165
166         return error;
167 }
168
169 int dlm_recover_members_wait(struct dlm_ls *ls, uint64_t seq)
170 {
171         struct dlm_member *memb;
172         struct dlm_slot *slots;
173         int num_slots, slots_size;
174         int error, rv;
175         uint32_t gen;
176
177         list_for_each_entry(memb, &ls->ls_nodes, list) {
178                 memb->slot = -1;
179                 memb->generation = 0;
180         }
181
182         if (ls->ls_low_nodeid == dlm_our_nodeid()) {
183                 error = wait_status_all(ls, DLM_RS_NODES, 1, seq);
184                 if (error)
185                         goto out;
186
187                 /* slots array is sparse, slots_size may be > num_slots */
188
189                 rv = dlm_slots_assign(ls, &num_slots, &slots_size, &slots, &gen);
190                 if (!rv) {
191                         spin_lock_bh(&ls->ls_recover_lock);
192                         _set_recover_status(ls, DLM_RS_NODES_ALL);
193                         ls->ls_num_slots = num_slots;
194                         ls->ls_slots_size = slots_size;
195                         ls->ls_slots = slots;
196                         ls->ls_generation = gen;
197                         spin_unlock_bh(&ls->ls_recover_lock);
198                 } else {
199                         dlm_set_recover_status(ls, DLM_RS_NODES_ALL);
200                 }
201         } else {
202                 error = wait_status_low(ls, DLM_RS_NODES_ALL,
203                                         DLM_RSF_NEED_SLOTS, seq);
204                 if (error)
205                         goto out;
206
207                 dlm_slots_copy_in(ls);
208         }
209  out:
210         return error;
211 }
212
213 int dlm_recover_directory_wait(struct dlm_ls *ls, uint64_t seq)
214 {
215         return wait_status(ls, DLM_RS_DIR, seq);
216 }
217
218 int dlm_recover_locks_wait(struct dlm_ls *ls, uint64_t seq)
219 {
220         return wait_status(ls, DLM_RS_LOCKS, seq);
221 }
222
223 int dlm_recover_done_wait(struct dlm_ls *ls, uint64_t seq)
224 {
225         return wait_status(ls, DLM_RS_DONE, seq);
226 }
227
228 /*
229  * The recover_list contains all the rsb's for which we've requested the new
230  * master nodeid.  As replies are returned from the resource directories the
231  * rsb's are removed from the list.  When the list is empty we're done.
232  *
233  * The recover_list is later similarly used for all rsb's for which we've sent
234  * new lkb's and need to receive new corresponding lkid's.
235  *
236  * We use the address of the rsb struct as a simple local identifier for the
237  * rsb so we can match an rcom reply with the rsb it was sent for.
238  */
239
240 static int recover_list_empty(struct dlm_ls *ls)
241 {
242         int empty;
243
244         spin_lock_bh(&ls->ls_recover_list_lock);
245         empty = list_empty(&ls->ls_recover_list);
246         spin_unlock_bh(&ls->ls_recover_list_lock);
247
248         return empty;
249 }
250
251 static void recover_list_add(struct dlm_rsb *r)
252 {
253         struct dlm_ls *ls = r->res_ls;
254
255         spin_lock_bh(&ls->ls_recover_list_lock);
256         if (list_empty(&r->res_recover_list)) {
257                 list_add_tail(&r->res_recover_list, &ls->ls_recover_list);
258                 ls->ls_recover_list_count++;
259                 dlm_hold_rsb(r);
260         }
261         spin_unlock_bh(&ls->ls_recover_list_lock);
262 }
263
264 static void recover_list_del(struct dlm_rsb *r)
265 {
266         struct dlm_ls *ls = r->res_ls;
267
268         spin_lock_bh(&ls->ls_recover_list_lock);
269         list_del_init(&r->res_recover_list);
270         ls->ls_recover_list_count--;
271         spin_unlock_bh(&ls->ls_recover_list_lock);
272
273         dlm_put_rsb(r);
274 }
275
276 static void recover_list_clear(struct dlm_ls *ls)
277 {
278         struct dlm_rsb *r, *s;
279
280         spin_lock_bh(&ls->ls_recover_list_lock);
281         list_for_each_entry_safe(r, s, &ls->ls_recover_list, res_recover_list) {
282                 list_del_init(&r->res_recover_list);
283                 r->res_recover_locks_count = 0;
284                 dlm_put_rsb(r);
285                 ls->ls_recover_list_count--;
286         }
287
288         if (ls->ls_recover_list_count != 0) {
289                 log_error(ls, "warning: recover_list_count %d",
290                           ls->ls_recover_list_count);
291                 ls->ls_recover_list_count = 0;
292         }
293         spin_unlock_bh(&ls->ls_recover_list_lock);
294 }
295
296 static int recover_idr_empty(struct dlm_ls *ls)
297 {
298         int empty = 1;
299
300         spin_lock_bh(&ls->ls_recover_idr_lock);
301         if (ls->ls_recover_list_count)
302                 empty = 0;
303         spin_unlock_bh(&ls->ls_recover_idr_lock);
304
305         return empty;
306 }
307
308 static int recover_idr_add(struct dlm_rsb *r)
309 {
310         struct dlm_ls *ls = r->res_ls;
311         int rv;
312
313         spin_lock_bh(&ls->ls_recover_idr_lock);
314         if (r->res_id) {
315                 rv = -1;
316                 goto out_unlock;
317         }
318         rv = idr_alloc(&ls->ls_recover_idr, r, 1, 0, GFP_NOWAIT);
319         if (rv < 0)
320                 goto out_unlock;
321
322         r->res_id = rv;
323         ls->ls_recover_list_count++;
324         dlm_hold_rsb(r);
325         rv = 0;
326 out_unlock:
327         spin_unlock_bh(&ls->ls_recover_idr_lock);
328         return rv;
329 }
330
331 static void recover_idr_del(struct dlm_rsb *r)
332 {
333         struct dlm_ls *ls = r->res_ls;
334
335         spin_lock_bh(&ls->ls_recover_idr_lock);
336         idr_remove(&ls->ls_recover_idr, r->res_id);
337         r->res_id = 0;
338         ls->ls_recover_list_count--;
339         spin_unlock_bh(&ls->ls_recover_idr_lock);
340
341         dlm_put_rsb(r);
342 }
343
344 static struct dlm_rsb *recover_idr_find(struct dlm_ls *ls, uint64_t id)
345 {
346         struct dlm_rsb *r;
347
348         spin_lock_bh(&ls->ls_recover_idr_lock);
349         r = idr_find(&ls->ls_recover_idr, (int)id);
350         spin_unlock_bh(&ls->ls_recover_idr_lock);
351         return r;
352 }
353
354 static void recover_idr_clear(struct dlm_ls *ls)
355 {
356         struct dlm_rsb *r;
357         int id;
358
359         spin_lock_bh(&ls->ls_recover_idr_lock);
360
361         idr_for_each_entry(&ls->ls_recover_idr, r, id) {
362                 idr_remove(&ls->ls_recover_idr, id);
363                 r->res_id = 0;
364                 r->res_recover_locks_count = 0;
365                 ls->ls_recover_list_count--;
366
367                 dlm_put_rsb(r);
368         }
369
370         if (ls->ls_recover_list_count != 0) {
371                 log_error(ls, "warning: recover_list_count %d",
372                           ls->ls_recover_list_count);
373                 ls->ls_recover_list_count = 0;
374         }
375         spin_unlock_bh(&ls->ls_recover_idr_lock);
376 }
377
378
379 /* Master recovery: find new master node for rsb's that were
380    mastered on nodes that have been removed.
381
382    dlm_recover_masters
383    recover_master
384    dlm_send_rcom_lookup            ->  receive_rcom_lookup
385                                        dlm_dir_lookup
386    receive_rcom_lookup_reply       <-
387    dlm_recover_master_reply
388    set_new_master
389    set_master_lkbs
390    set_lock_master
391 */
392
393 /*
394  * Set the lock master for all LKBs in a lock queue
395  * If we are the new master of the rsb, we may have received new
396  * MSTCPY locks from other nodes already which we need to ignore
397  * when setting the new nodeid.
398  */
399
400 static void set_lock_master(struct list_head *queue, int nodeid)
401 {
402         struct dlm_lkb *lkb;
403
404         list_for_each_entry(lkb, queue, lkb_statequeue) {
405                 if (!test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags)) {
406                         lkb->lkb_nodeid = nodeid;
407                         lkb->lkb_remid = 0;
408                 }
409         }
410 }
411
412 static void set_master_lkbs(struct dlm_rsb *r)
413 {
414         set_lock_master(&r->res_grantqueue, r->res_nodeid);
415         set_lock_master(&r->res_convertqueue, r->res_nodeid);
416         set_lock_master(&r->res_waitqueue, r->res_nodeid);
417 }
418
419 /*
420  * Propagate the new master nodeid to locks
421  * The NEW_MASTER flag tells dlm_recover_locks() which rsb's to consider.
422  * The NEW_MASTER2 flag tells recover_lvb() and recover_grant() which
423  * rsb's to consider.
424  */
425
426 static void set_new_master(struct dlm_rsb *r)
427 {
428         set_master_lkbs(r);
429         rsb_set_flag(r, RSB_NEW_MASTER);
430         rsb_set_flag(r, RSB_NEW_MASTER2);
431 }
432
433 /*
434  * We do async lookups on rsb's that need new masters.  The rsb's
435  * waiting for a lookup reply are kept on the recover_list.
436  *
437  * Another node recovering the master may have sent us a rcom lookup,
438  * and our dlm_master_lookup() set it as the new master, along with
439  * NEW_MASTER so that we'll recover it here (this implies dir_nodeid
440  * equals our_nodeid below).
441  */
442
443 static int recover_master(struct dlm_rsb *r, unsigned int *count, uint64_t seq)
444 {
445         struct dlm_ls *ls = r->res_ls;
446         int our_nodeid, dir_nodeid;
447         int is_removed = 0;
448         int error;
449
450         if (is_master(r))
451                 return 0;
452
453         is_removed = dlm_is_removed(ls, r->res_nodeid);
454
455         if (!is_removed && !rsb_flag(r, RSB_NEW_MASTER))
456                 return 0;
457
458         our_nodeid = dlm_our_nodeid();
459         dir_nodeid = dlm_dir_nodeid(r);
460
461         if (dir_nodeid == our_nodeid) {
462                 if (is_removed) {
463                         r->res_master_nodeid = our_nodeid;
464                         r->res_nodeid = 0;
465                 }
466
467                 /* set master of lkbs to ourself when is_removed, or to
468                    another new master which we set along with NEW_MASTER
469                    in dlm_master_lookup */
470                 set_new_master(r);
471                 error = 0;
472         } else {
473                 recover_idr_add(r);
474                 error = dlm_send_rcom_lookup(r, dir_nodeid, seq);
475         }
476
477         (*count)++;
478         return error;
479 }
480
481 /*
482  * All MSTCPY locks are purged and rebuilt, even if the master stayed the same.
483  * This is necessary because recovery can be started, aborted and restarted,
484  * causing the master nodeid to briefly change during the aborted recovery, and
485  * change back to the original value in the second recovery.  The MSTCPY locks
486  * may or may not have been purged during the aborted recovery.  Another node
487  * with an outstanding request in waiters list and a request reply saved in the
488  * requestqueue, cannot know whether it should ignore the reply and resend the
489  * request, or accept the reply and complete the request.  It must do the
490  * former if the remote node purged MSTCPY locks, and it must do the later if
491  * the remote node did not.  This is solved by always purging MSTCPY locks, in
492  * which case, the request reply would always be ignored and the request
493  * resent.
494  */
495
496 static int recover_master_static(struct dlm_rsb *r, unsigned int *count)
497 {
498         int dir_nodeid = dlm_dir_nodeid(r);
499         int new_master = dir_nodeid;
500
501         if (dir_nodeid == dlm_our_nodeid())
502                 new_master = 0;
503
504         dlm_purge_mstcpy_locks(r);
505         r->res_master_nodeid = dir_nodeid;
506         r->res_nodeid = new_master;
507         set_new_master(r);
508         (*count)++;
509         return 0;
510 }
511
512 /*
513  * Go through local root resources and for each rsb which has a master which
514  * has departed, get the new master nodeid from the directory.  The dir will
515  * assign mastery to the first node to look up the new master.  That means
516  * we'll discover in this lookup if we're the new master of any rsb's.
517  *
518  * We fire off all the dir lookup requests individually and asynchronously to
519  * the correct dir node.
520  */
521
522 int dlm_recover_masters(struct dlm_ls *ls, uint64_t seq,
523                         const struct list_head *root_list)
524 {
525         struct dlm_rsb *r;
526         unsigned int total = 0;
527         unsigned int count = 0;
528         int nodir = dlm_no_directory(ls);
529         int error;
530
531         log_rinfo(ls, "dlm_recover_masters");
532
533         list_for_each_entry(r, root_list, res_root_list) {
534                 if (dlm_recovery_stopped(ls)) {
535                         error = -EINTR;
536                         goto out;
537                 }
538
539                 lock_rsb(r);
540                 if (nodir)
541                         error = recover_master_static(r, &count);
542                 else
543                         error = recover_master(r, &count, seq);
544                 unlock_rsb(r);
545                 cond_resched();
546                 total++;
547
548                 if (error)
549                         goto out;
550         }
551
552         log_rinfo(ls, "dlm_recover_masters %u of %u", count, total);
553
554         error = dlm_wait_function(ls, &recover_idr_empty);
555  out:
556         if (error)
557                 recover_idr_clear(ls);
558         return error;
559 }
560
561 int dlm_recover_master_reply(struct dlm_ls *ls, const struct dlm_rcom *rc)
562 {
563         struct dlm_rsb *r;
564         int ret_nodeid, new_master;
565
566         r = recover_idr_find(ls, le64_to_cpu(rc->rc_id));
567         if (!r) {
568                 log_error(ls, "dlm_recover_master_reply no id %llx",
569                           (unsigned long long)le64_to_cpu(rc->rc_id));
570                 goto out;
571         }
572
573         ret_nodeid = le32_to_cpu(rc->rc_result);
574
575         if (ret_nodeid == dlm_our_nodeid())
576                 new_master = 0;
577         else
578                 new_master = ret_nodeid;
579
580         lock_rsb(r);
581         r->res_master_nodeid = ret_nodeid;
582         r->res_nodeid = new_master;
583         set_new_master(r);
584         unlock_rsb(r);
585         recover_idr_del(r);
586
587         if (recover_idr_empty(ls))
588                 wake_up(&ls->ls_wait_general);
589  out:
590         return 0;
591 }
592
593
594 /* Lock recovery: rebuild the process-copy locks we hold on a
595    remastered rsb on the new rsb master.
596
597    dlm_recover_locks
598    recover_locks
599    recover_locks_queue
600    dlm_send_rcom_lock              ->  receive_rcom_lock
601                                        dlm_recover_master_copy
602    receive_rcom_lock_reply         <-
603    dlm_recover_process_copy
604 */
605
606
607 /*
608  * keep a count of the number of lkb's we send to the new master; when we get
609  * an equal number of replies then recovery for the rsb is done
610  */
611
612 static int recover_locks_queue(struct dlm_rsb *r, struct list_head *head,
613                                uint64_t seq)
614 {
615         struct dlm_lkb *lkb;
616         int error = 0;
617
618         list_for_each_entry(lkb, head, lkb_statequeue) {
619                 error = dlm_send_rcom_lock(r, lkb, seq);
620                 if (error)
621                         break;
622                 r->res_recover_locks_count++;
623         }
624
625         return error;
626 }
627
628 static int recover_locks(struct dlm_rsb *r, uint64_t seq)
629 {
630         int error = 0;
631
632         lock_rsb(r);
633
634         DLM_ASSERT(!r->res_recover_locks_count, dlm_dump_rsb(r););
635
636         error = recover_locks_queue(r, &r->res_grantqueue, seq);
637         if (error)
638                 goto out;
639         error = recover_locks_queue(r, &r->res_convertqueue, seq);
640         if (error)
641                 goto out;
642         error = recover_locks_queue(r, &r->res_waitqueue, seq);
643         if (error)
644                 goto out;
645
646         if (r->res_recover_locks_count)
647                 recover_list_add(r);
648         else
649                 rsb_clear_flag(r, RSB_NEW_MASTER);
650  out:
651         unlock_rsb(r);
652         return error;
653 }
654
655 int dlm_recover_locks(struct dlm_ls *ls, uint64_t seq,
656                       const struct list_head *root_list)
657 {
658         struct dlm_rsb *r;
659         int error, count = 0;
660
661         list_for_each_entry(r, root_list, res_root_list) {
662                 if (is_master(r)) {
663                         rsb_clear_flag(r, RSB_NEW_MASTER);
664                         continue;
665                 }
666
667                 if (!rsb_flag(r, RSB_NEW_MASTER))
668                         continue;
669
670                 if (dlm_recovery_stopped(ls)) {
671                         error = -EINTR;
672                         goto out;
673                 }
674
675                 error = recover_locks(r, seq);
676                 if (error)
677                         goto out;
678
679                 count += r->res_recover_locks_count;
680         }
681
682         log_rinfo(ls, "dlm_recover_locks %d out", count);
683
684         error = dlm_wait_function(ls, &recover_list_empty);
685  out:
686         if (error)
687                 recover_list_clear(ls);
688         return error;
689 }
690
691 void dlm_recovered_lock(struct dlm_rsb *r)
692 {
693         DLM_ASSERT(rsb_flag(r, RSB_NEW_MASTER), dlm_dump_rsb(r););
694
695         r->res_recover_locks_count--;
696         if (!r->res_recover_locks_count) {
697                 rsb_clear_flag(r, RSB_NEW_MASTER);
698                 recover_list_del(r);
699         }
700
701         if (recover_list_empty(r->res_ls))
702                 wake_up(&r->res_ls->ls_wait_general);
703 }
704
705 /*
706  * The lvb needs to be recovered on all master rsb's.  This includes setting
707  * the VALNOTVALID flag if necessary, and determining the correct lvb contents
708  * based on the lvb's of the locks held on the rsb.
709  *
710  * RSB_VALNOTVALID is set in two cases:
711  *
712  * 1. we are master, but not new, and we purged an EX/PW lock held by a
713  * failed node (in dlm_recover_purge which set RSB_RECOVER_LVB_INVAL)
714  *
715  * 2. we are a new master, and there are only NL/CR locks left.
716  * (We could probably improve this by only invaliding in this way when
717  * the previous master left uncleanly.  VMS docs mention that.)
718  *
719  * The LVB contents are only considered for changing when this is a new master
720  * of the rsb (NEW_MASTER2).  Then, the rsb's lvb is taken from any lkb with
721  * mode > CR.  If no lkb's exist with mode above CR, the lvb contents are taken
722  * from the lkb with the largest lvb sequence number.
723  */
724
725 static void recover_lvb(struct dlm_rsb *r)
726 {
727         struct dlm_lkb *big_lkb = NULL, *iter, *high_lkb = NULL;
728         uint32_t high_seq = 0;
729         int lock_lvb_exists = 0;
730         int lvblen = r->res_ls->ls_lvblen;
731
732         if (!rsb_flag(r, RSB_NEW_MASTER2) &&
733             rsb_flag(r, RSB_RECOVER_LVB_INVAL)) {
734                 /* case 1 above */
735                 rsb_set_flag(r, RSB_VALNOTVALID);
736                 return;
737         }
738
739         if (!rsb_flag(r, RSB_NEW_MASTER2))
740                 return;
741
742         /* we are the new master, so figure out if VALNOTVALID should
743            be set, and set the rsb lvb from the best lkb available. */
744
745         list_for_each_entry(iter, &r->res_grantqueue, lkb_statequeue) {
746                 if (!(iter->lkb_exflags & DLM_LKF_VALBLK))
747                         continue;
748
749                 lock_lvb_exists = 1;
750
751                 if (iter->lkb_grmode > DLM_LOCK_CR) {
752                         big_lkb = iter;
753                         goto setflag;
754                 }
755
756                 if (((int)iter->lkb_lvbseq - (int)high_seq) >= 0) {
757                         high_lkb = iter;
758                         high_seq = iter->lkb_lvbseq;
759                 }
760         }
761
762         list_for_each_entry(iter, &r->res_convertqueue, lkb_statequeue) {
763                 if (!(iter->lkb_exflags & DLM_LKF_VALBLK))
764                         continue;
765
766                 lock_lvb_exists = 1;
767
768                 if (iter->lkb_grmode > DLM_LOCK_CR) {
769                         big_lkb = iter;
770                         goto setflag;
771                 }
772
773                 if (((int)iter->lkb_lvbseq - (int)high_seq) >= 0) {
774                         high_lkb = iter;
775                         high_seq = iter->lkb_lvbseq;
776                 }
777         }
778
779  setflag:
780         if (!lock_lvb_exists)
781                 goto out;
782
783         /* lvb is invalidated if only NL/CR locks remain */
784         if (!big_lkb)
785                 rsb_set_flag(r, RSB_VALNOTVALID);
786
787         if (!r->res_lvbptr) {
788                 r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
789                 if (!r->res_lvbptr)
790                         goto out;
791         }
792
793         if (big_lkb) {
794                 r->res_lvbseq = big_lkb->lkb_lvbseq;
795                 memcpy(r->res_lvbptr, big_lkb->lkb_lvbptr, lvblen);
796         } else if (high_lkb) {
797                 r->res_lvbseq = high_lkb->lkb_lvbseq;
798                 memcpy(r->res_lvbptr, high_lkb->lkb_lvbptr, lvblen);
799         } else {
800                 r->res_lvbseq = 0;
801                 memset(r->res_lvbptr, 0, lvblen);
802         }
803  out:
804         return;
805 }
806
807 /* All master rsb's flagged RECOVER_CONVERT need to be looked at.  The locks
808    converting PR->CW or CW->PR need to have their lkb_grmode set. */
809
810 static void recover_conversion(struct dlm_rsb *r)
811 {
812         struct dlm_ls *ls = r->res_ls;
813         struct dlm_lkb *lkb;
814         int grmode = -1;
815
816         list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
817                 if (lkb->lkb_grmode == DLM_LOCK_PR ||
818                     lkb->lkb_grmode == DLM_LOCK_CW) {
819                         grmode = lkb->lkb_grmode;
820                         break;
821                 }
822         }
823
824         list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) {
825                 if (lkb->lkb_grmode != DLM_LOCK_IV)
826                         continue;
827                 if (grmode == -1) {
828                         log_debug(ls, "recover_conversion %x set gr to rq %d",
829                                   lkb->lkb_id, lkb->lkb_rqmode);
830                         lkb->lkb_grmode = lkb->lkb_rqmode;
831                 } else {
832                         log_debug(ls, "recover_conversion %x set gr %d",
833                                   lkb->lkb_id, grmode);
834                         lkb->lkb_grmode = grmode;
835                 }
836         }
837 }
838
839 /* We've become the new master for this rsb and waiting/converting locks may
840    need to be granted in dlm_recover_grant() due to locks that may have
841    existed from a removed node. */
842
843 static void recover_grant(struct dlm_rsb *r)
844 {
845         if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue))
846                 rsb_set_flag(r, RSB_RECOVER_GRANT);
847 }
848
849 void dlm_recover_rsbs(struct dlm_ls *ls, const struct list_head *root_list)
850 {
851         struct dlm_rsb *r;
852         unsigned int count = 0;
853
854         list_for_each_entry(r, root_list, res_root_list) {
855                 lock_rsb(r);
856                 if (is_master(r)) {
857                         if (rsb_flag(r, RSB_RECOVER_CONVERT))
858                                 recover_conversion(r);
859
860                         /* recover lvb before granting locks so the updated
861                            lvb/VALNOTVALID is presented in the completion */
862                         recover_lvb(r);
863
864                         if (rsb_flag(r, RSB_NEW_MASTER2))
865                                 recover_grant(r);
866                         count++;
867                 } else {
868                         rsb_clear_flag(r, RSB_VALNOTVALID);
869                 }
870                 rsb_clear_flag(r, RSB_RECOVER_CONVERT);
871                 rsb_clear_flag(r, RSB_RECOVER_LVB_INVAL);
872                 rsb_clear_flag(r, RSB_NEW_MASTER2);
873                 unlock_rsb(r);
874         }
875
876         if (count)
877                 log_rinfo(ls, "dlm_recover_rsbs %d done", count);
878 }
879
880 /* Create a single list of all root rsb's to be used during recovery */
881
882 void dlm_clear_toss(struct dlm_ls *ls)
883 {
884         struct dlm_rsb *r, *safe;
885         unsigned int count = 0;
886
887         write_lock_bh(&ls->ls_rsbtbl_lock);
888         list_for_each_entry_safe(r, safe, &ls->ls_toss, res_rsbs_list) {
889                 list_del(&r->res_rsbs_list);
890                 rhashtable_remove_fast(&ls->ls_rsbtbl, &r->res_node,
891                                        dlm_rhash_rsb_params);
892
893                 /* remove it from the toss queue if its part of it */
894                 if (!list_empty(&r->res_toss_q_list))
895                         list_del_init(&r->res_toss_q_list);
896
897                 free_toss_rsb(r);
898                 count++;
899         }
900         write_unlock_bh(&ls->ls_rsbtbl_lock);
901
902         if (count)
903                 log_rinfo(ls, "dlm_clear_toss %u done", count);
904 }
905
This page took 0.083007 seconds and 4 git commands to generate.