]>
Commit | Line | Data |
---|---|---|
e7fd4179 DT |
1 | /****************************************************************************** |
2 | ******************************************************************************* | |
3 | ** | |
4 | ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. | |
3ae1acf9 | 5 | ** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. |
e7fd4179 DT |
6 | ** |
7 | ** This copyrighted material is made available to anyone wishing to use, | |
8 | ** modify, copy, or redistribute it subject to the terms and conditions | |
9 | ** of the GNU General Public License v.2. | |
10 | ** | |
11 | ******************************************************************************* | |
12 | ******************************************************************************/ | |
13 | ||
14 | #include "dlm_internal.h" | |
15 | #include "lockspace.h" | |
16 | #include "member.h" | |
17 | #include "dir.h" | |
18 | #include "ast.h" | |
19 | #include "recover.h" | |
20 | #include "lowcomms.h" | |
21 | #include "lock.h" | |
22 | #include "requestqueue.h" | |
23 | #include "recoverd.h" | |
24 | ||
25 | ||
26 | /* If the start for which we're re-enabling locking (seq) has been superseded | |
c36258b5 DT |
27 | by a newer stop (ls_recover_seq), we need to leave locking disabled. |
28 | ||
29 | We suspend dlm_recv threads here to avoid the race where dlm_recv a) sees | |
30 | locking stopped and b) adds a message to the requestqueue, but dlm_recoverd | |
31 | enables locking and clears the requestqueue between a and b. */ | |
e7fd4179 DT |
32 | |
33 | static int enable_locking(struct dlm_ls *ls, uint64_t seq) | |
34 | { | |
35 | int error = -EINTR; | |
36 | ||
c36258b5 DT |
37 | down_write(&ls->ls_recv_active); |
38 | ||
e7fd4179 DT |
39 | spin_lock(&ls->ls_recover_lock); |
40 | if (ls->ls_recover_seq == seq) { | |
41 | set_bit(LSFL_RUNNING, &ls->ls_flags); | |
c36258b5 | 42 | /* unblocks processes waiting to enter the dlm */ |
e7fd4179 DT |
43 | up_write(&ls->ls_in_recovery); |
44 | error = 0; | |
45 | } | |
46 | spin_unlock(&ls->ls_recover_lock); | |
c36258b5 DT |
47 | |
48 | up_write(&ls->ls_recv_active); | |
e7fd4179 DT |
49 | return error; |
50 | } | |
51 | ||
52 | static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) | |
53 | { | |
54 | unsigned long start; | |
55 | int error, neg = 0; | |
56 | ||
57adf7ee | 57 | log_debug(ls, "recover %llx", (unsigned long long)rv->seq); |
e7fd4179 | 58 | |
90135925 | 59 | mutex_lock(&ls->ls_recoverd_active); |
e7fd4179 | 60 | |
23e8e1aa | 61 | dlm_callback_suspend(ls); |
e7fd4179 DT |
62 | |
63 | /* | |
85f0379a DT |
64 | * Free non-master tossed rsb's. Master rsb's are kept on toss |
65 | * list and put on root list to be included in resdir recovery. | |
e7fd4179 DT |
66 | */ |
67 | ||
85f0379a | 68 | dlm_clear_toss_list(ls); |
e7fd4179 DT |
69 | |
70 | /* | |
85f0379a DT |
71 | * This list of root rsb's will be the basis of most of the recovery |
72 | * routines. | |
e7fd4179 DT |
73 | */ |
74 | ||
85f0379a | 75 | dlm_create_root_list(ls); |
e7fd4179 DT |
76 | |
77 | /* | |
78 | * Add or remove nodes from the lockspace's ls_nodes list. | |
79 | * Also waits for all nodes to complete dlm_recover_members. | |
80 | */ | |
81 | ||
82 | error = dlm_recover_members(ls, rv, &neg); | |
83 | if (error) { | |
8ec68867 | 84 | log_debug(ls, "recover_members failed %d", error); |
e7fd4179 DT |
85 | goto fail; |
86 | } | |
87 | start = jiffies; | |
88 | ||
89 | /* | |
90 | * Rebuild our own share of the directory by collecting from all other | |
91 | * nodes their master rsb names that hash to us. | |
92 | */ | |
93 | ||
94 | error = dlm_recover_directory(ls); | |
95 | if (error) { | |
8ec68867 | 96 | log_debug(ls, "recover_directory failed %d", error); |
e7fd4179 DT |
97 | goto fail; |
98 | } | |
99 | ||
e7fd4179 DT |
100 | /* |
101 | * Wait for all nodes to complete directory rebuild. | |
102 | */ | |
103 | ||
104 | error = dlm_recover_directory_wait(ls); | |
105 | if (error) { | |
8ec68867 | 106 | log_debug(ls, "recover_directory_wait failed %d", error); |
e7fd4179 DT |
107 | goto fail; |
108 | } | |
109 | ||
110 | /* | |
111 | * We may have outstanding operations that are waiting for a reply from | |
112 | * a failed node. Mark these to be resent after recovery. Unlock and | |
113 | * cancel ops can just be completed. | |
114 | */ | |
115 | ||
116 | dlm_recover_waiters_pre(ls); | |
117 | ||
118 | error = dlm_recovery_stopped(ls); | |
119 | if (error) | |
120 | goto fail; | |
121 | ||
122 | if (neg || dlm_no_directory(ls)) { | |
123 | /* | |
124 | * Clear lkb's for departed nodes. | |
125 | */ | |
126 | ||
127 | dlm_purge_locks(ls); | |
128 | ||
129 | /* | |
130 | * Get new master nodeid's for rsb's that were mastered on | |
131 | * departed nodes. | |
132 | */ | |
133 | ||
134 | error = dlm_recover_masters(ls); | |
135 | if (error) { | |
8ec68867 | 136 | log_debug(ls, "recover_masters failed %d", error); |
e7fd4179 DT |
137 | goto fail; |
138 | } | |
139 | ||
140 | /* | |
141 | * Send our locks on remastered rsb's to the new masters. | |
142 | */ | |
143 | ||
144 | error = dlm_recover_locks(ls); | |
145 | if (error) { | |
8ec68867 | 146 | log_debug(ls, "recover_locks failed %d", error); |
e7fd4179 DT |
147 | goto fail; |
148 | } | |
149 | ||
150 | error = dlm_recover_locks_wait(ls); | |
151 | if (error) { | |
8ec68867 | 152 | log_debug(ls, "recover_locks_wait failed %d", error); |
e7fd4179 DT |
153 | goto fail; |
154 | } | |
155 | ||
156 | /* | |
157 | * Finalize state in master rsb's now that all locks can be | |
158 | * checked. This includes conversion resolution and lvb | |
159 | * settings. | |
160 | */ | |
161 | ||
162 | dlm_recover_rsbs(ls); | |
91c0dc93 DT |
163 | } else { |
164 | /* | |
165 | * Other lockspace members may be going through the "neg" steps | |
166 | * while also adding us to the lockspace, in which case they'll | |
4b77f2c9 | 167 | * be doing the recover_locks (RS_LOCKS) barrier. |
91c0dc93 DT |
168 | */ |
169 | dlm_set_recover_status(ls, DLM_RS_LOCKS); | |
4b77f2c9 DT |
170 | |
171 | error = dlm_recover_locks_wait(ls); | |
172 | if (error) { | |
8ec68867 | 173 | log_debug(ls, "recover_locks_wait failed %d", error); |
4b77f2c9 DT |
174 | goto fail; |
175 | } | |
e7fd4179 DT |
176 | } |
177 | ||
178 | dlm_release_root_list(ls); | |
179 | ||
2896ee37 DT |
180 | /* |
181 | * Purge directory-related requests that are saved in requestqueue. | |
182 | * All dir requests from before recovery are invalid now due to the dir | |
183 | * rebuild and will be resent by the requesting nodes. | |
184 | */ | |
185 | ||
186 | dlm_purge_requestqueue(ls); | |
187 | ||
e7fd4179 DT |
188 | dlm_set_recover_status(ls, DLM_RS_DONE); |
189 | error = dlm_recover_done_wait(ls); | |
190 | if (error) { | |
8ec68867 | 191 | log_debug(ls, "recover_done_wait failed %d", error); |
e7fd4179 DT |
192 | goto fail; |
193 | } | |
194 | ||
195 | dlm_clear_members_gone(ls); | |
196 | ||
3ae1acf9 DT |
197 | dlm_adjust_timeouts(ls); |
198 | ||
23e8e1aa DT |
199 | dlm_callback_resume(ls); |
200 | ||
e7fd4179 DT |
201 | error = enable_locking(ls, rv->seq); |
202 | if (error) { | |
8ec68867 | 203 | log_debug(ls, "enable_locking failed %d", error); |
e7fd4179 DT |
204 | goto fail; |
205 | } | |
206 | ||
207 | error = dlm_process_requestqueue(ls); | |
208 | if (error) { | |
8ec68867 | 209 | log_debug(ls, "process_requestqueue failed %d", error); |
e7fd4179 DT |
210 | goto fail; |
211 | } | |
212 | ||
213 | error = dlm_recover_waiters_post(ls); | |
214 | if (error) { | |
8ec68867 | 215 | log_debug(ls, "recover_waiters_post failed %d", error); |
e7fd4179 DT |
216 | goto fail; |
217 | } | |
218 | ||
219 | dlm_grant_after_purge(ls); | |
220 | ||
57adf7ee RK |
221 | log_debug(ls, "recover %llx done: %u ms", |
222 | (unsigned long long)rv->seq, | |
e7fd4179 | 223 | jiffies_to_msecs(jiffies - start)); |
90135925 | 224 | mutex_unlock(&ls->ls_recoverd_active); |
e7fd4179 DT |
225 | |
226 | return 0; | |
227 | ||
228 | fail: | |
229 | dlm_release_root_list(ls); | |
57adf7ee RK |
230 | log_debug(ls, "recover %llx error %d", |
231 | (unsigned long long)rv->seq, error); | |
90135925 | 232 | mutex_unlock(&ls->ls_recoverd_active); |
e7fd4179 DT |
233 | return error; |
234 | } | |
235 | ||
2cdc98aa DT |
236 | /* The dlm_ls_start() that created the rv we take here may already have been |
237 | stopped via dlm_ls_stop(); in that case we need to leave the RECOVERY_STOP | |
238 | flag set. */ | |
239 | ||
e7fd4179 DT |
240 | static void do_ls_recovery(struct dlm_ls *ls) |
241 | { | |
242 | struct dlm_recover *rv = NULL; | |
243 | ||
244 | spin_lock(&ls->ls_recover_lock); | |
245 | rv = ls->ls_recover_args; | |
246 | ls->ls_recover_args = NULL; | |
2cdc98aa DT |
247 | if (rv && ls->ls_recover_seq == rv->seq) |
248 | clear_bit(LSFL_RECOVERY_STOP, &ls->ls_flags); | |
e7fd4179 DT |
249 | spin_unlock(&ls->ls_recover_lock); |
250 | ||
251 | if (rv) { | |
252 | ls_recover(ls, rv); | |
253 | kfree(rv->nodeids); | |
d44e0fc7 | 254 | kfree(rv->new); |
e7fd4179 DT |
255 | kfree(rv); |
256 | } | |
257 | } | |
258 | ||
259 | static int dlm_recoverd(void *arg) | |
260 | { | |
261 | struct dlm_ls *ls; | |
262 | ||
263 | ls = dlm_find_lockspace_local(arg); | |
5f88f1ea DT |
264 | if (!ls) { |
265 | log_print("dlm_recoverd: no lockspace %p", arg); | |
266 | return -1; | |
267 | } | |
e7fd4179 DT |
268 | |
269 | while (!kthread_should_stop()) { | |
270 | set_current_state(TASK_INTERRUPTIBLE); | |
271 | if (!test_bit(LSFL_WORK, &ls->ls_flags)) | |
272 | schedule(); | |
273 | set_current_state(TASK_RUNNING); | |
274 | ||
275 | if (test_and_clear_bit(LSFL_WORK, &ls->ls_flags)) | |
276 | do_ls_recovery(ls); | |
277 | } | |
278 | ||
279 | dlm_put_lockspace(ls); | |
280 | return 0; | |
281 | } | |
282 | ||
283 | void dlm_recoverd_kick(struct dlm_ls *ls) | |
284 | { | |
285 | set_bit(LSFL_WORK, &ls->ls_flags); | |
286 | wake_up_process(ls->ls_recoverd_task); | |
287 | } | |
288 | ||
289 | int dlm_recoverd_start(struct dlm_ls *ls) | |
290 | { | |
291 | struct task_struct *p; | |
292 | int error = 0; | |
293 | ||
294 | p = kthread_run(dlm_recoverd, ls, "dlm_recoverd"); | |
295 | if (IS_ERR(p)) | |
296 | error = PTR_ERR(p); | |
297 | else | |
298 | ls->ls_recoverd_task = p; | |
299 | return error; | |
300 | } | |
301 | ||
302 | void dlm_recoverd_stop(struct dlm_ls *ls) | |
303 | { | |
304 | kthread_stop(ls->ls_recoverd_task); | |
305 | } | |
306 | ||
307 | void dlm_recoverd_suspend(struct dlm_ls *ls) | |
308 | { | |
f6db1b8e | 309 | wake_up(&ls->ls_wait_general); |
90135925 | 310 | mutex_lock(&ls->ls_recoverd_active); |
e7fd4179 DT |
311 | } |
312 | ||
313 | void dlm_recoverd_resume(struct dlm_ls *ls) | |
314 | { | |
90135925 | 315 | mutex_unlock(&ls->ls_recoverd_active); |
e7fd4179 DT |
316 | } |
317 |