diff options
Diffstat (limited to 'fs/dlm/recover.c')
-rw-r--r-- | fs/dlm/recover.c | 765 |
1 files changed, 765 insertions, 0 deletions
diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c new file mode 100644 index 000000000000..a5e6d184872e --- /dev/null +++ b/fs/dlm/recover.c | |||
@@ -0,0 +1,765 @@ | |||
1 | /****************************************************************************** | ||
2 | ******************************************************************************* | ||
3 | ** | ||
4 | ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. | ||
5 | ** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. | ||
6 | ** | ||
7 | ** This copyrighted material is made available to anyone wishing to use, | ||
8 | ** modify, copy, or redistribute it subject to the terms and conditions | ||
9 | ** of the GNU General Public License v.2. | ||
10 | ** | ||
11 | ******************************************************************************* | ||
12 | ******************************************************************************/ | ||
13 | |||
14 | #include "dlm_internal.h" | ||
15 | #include "lockspace.h" | ||
16 | #include "dir.h" | ||
17 | #include "config.h" | ||
18 | #include "ast.h" | ||
19 | #include "memory.h" | ||
20 | #include "rcom.h" | ||
21 | #include "lock.h" | ||
22 | #include "lowcomms.h" | ||
23 | #include "member.h" | ||
24 | #include "recover.h" | ||
25 | |||
26 | |||
27 | /* | ||
28 | * Recovery waiting routines: these functions wait for a particular reply from | ||
29 | * a remote node, or for the remote node to report a certain status. They need | ||
30 | * to abort if the lockspace is stopped indicating a node has failed (perhaps | ||
31 | * the one being waited for). | ||
32 | */ | ||
33 | |||
34 | /* | ||
35 | * Wait until given function returns non-zero or lockspace is stopped | ||
36 | * (LS_RECOVERY_STOP set due to failure of a node in ls_nodes). When another | ||
37 | * function thinks it could have completed the waited-on task, they should wake | ||
38 | * up ls_wait_general to get an immediate response rather than waiting for the | ||
39 | * timer to detect the result. A timer wakes us up periodically while waiting | ||
40 | * to see if we should abort due to a node failure. This should only be called | ||
41 | * by the dlm_recoverd thread. | ||
42 | */ | ||
43 | |||
44 | static void dlm_wait_timer_fn(unsigned long data) | ||
45 | { | ||
46 | struct dlm_ls *ls = (struct dlm_ls *) data; | ||
47 | mod_timer(&ls->ls_timer, jiffies + (dlm_config.recover_timer * HZ)); | ||
48 | wake_up(&ls->ls_wait_general); | ||
49 | } | ||
50 | |||
51 | int dlm_wait_function(struct dlm_ls *ls, int (*testfn) (struct dlm_ls *ls)) | ||
52 | { | ||
53 | int error = 0; | ||
54 | |||
55 | init_timer(&ls->ls_timer); | ||
56 | ls->ls_timer.function = dlm_wait_timer_fn; | ||
57 | ls->ls_timer.data = (long) ls; | ||
58 | ls->ls_timer.expires = jiffies + (dlm_config.recover_timer * HZ); | ||
59 | add_timer(&ls->ls_timer); | ||
60 | |||
61 | wait_event(ls->ls_wait_general, testfn(ls) || dlm_recovery_stopped(ls)); | ||
62 | del_timer_sync(&ls->ls_timer); | ||
63 | |||
64 | if (dlm_recovery_stopped(ls)) { | ||
65 | log_debug(ls, "dlm_wait_function aborted"); | ||
66 | error = -EINTR; | ||
67 | } | ||
68 | return error; | ||
69 | } | ||
70 | |||
71 | /* | ||
72 | * An efficient way for all nodes to wait for all others to have a certain | ||
73 | * status. The node with the lowest nodeid polls all the others for their | ||
74 | * status (wait_status_all) and all the others poll the node with the low id | ||
75 | * for its accumulated result (wait_status_low). When all nodes have set | ||
76 | * status flag X, then status flag X_ALL will be set on the low nodeid. | ||
77 | */ | ||
78 | |||
79 | uint32_t dlm_recover_status(struct dlm_ls *ls) | ||
80 | { | ||
81 | uint32_t status; | ||
82 | spin_lock(&ls->ls_recover_lock); | ||
83 | status = ls->ls_recover_status; | ||
84 | spin_unlock(&ls->ls_recover_lock); | ||
85 | return status; | ||
86 | } | ||
87 | |||
88 | void dlm_set_recover_status(struct dlm_ls *ls, uint32_t status) | ||
89 | { | ||
90 | spin_lock(&ls->ls_recover_lock); | ||
91 | ls->ls_recover_status |= status; | ||
92 | spin_unlock(&ls->ls_recover_lock); | ||
93 | } | ||
94 | |||
95 | static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status) | ||
96 | { | ||
97 | struct dlm_rcom *rc = (struct dlm_rcom *) ls->ls_recover_buf; | ||
98 | struct dlm_member *memb; | ||
99 | int error = 0, delay; | ||
100 | |||
101 | list_for_each_entry(memb, &ls->ls_nodes, list) { | ||
102 | delay = 0; | ||
103 | for (;;) { | ||
104 | if (dlm_recovery_stopped(ls)) { | ||
105 | error = -EINTR; | ||
106 | goto out; | ||
107 | } | ||
108 | |||
109 | error = dlm_rcom_status(ls, memb->nodeid); | ||
110 | if (error) | ||
111 | goto out; | ||
112 | |||
113 | if (rc->rc_result & wait_status) | ||
114 | break; | ||
115 | if (delay < 1000) | ||
116 | delay += 20; | ||
117 | msleep(delay); | ||
118 | } | ||
119 | } | ||
120 | out: | ||
121 | return error; | ||
122 | } | ||
123 | |||
124 | static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status) | ||
125 | { | ||
126 | struct dlm_rcom *rc = (struct dlm_rcom *) ls->ls_recover_buf; | ||
127 | int error = 0, delay = 0, nodeid = ls->ls_low_nodeid; | ||
128 | |||
129 | for (;;) { | ||
130 | if (dlm_recovery_stopped(ls)) { | ||
131 | error = -EINTR; | ||
132 | goto out; | ||
133 | } | ||
134 | |||
135 | error = dlm_rcom_status(ls, nodeid); | ||
136 | if (error) | ||
137 | break; | ||
138 | |||
139 | if (rc->rc_result & wait_status) | ||
140 | break; | ||
141 | if (delay < 1000) | ||
142 | delay += 20; | ||
143 | msleep(delay); | ||
144 | } | ||
145 | out: | ||
146 | return error; | ||
147 | } | ||
148 | |||
149 | static int wait_status(struct dlm_ls *ls, uint32_t status) | ||
150 | { | ||
151 | uint32_t status_all = status << 1; | ||
152 | int error; | ||
153 | |||
154 | if (ls->ls_low_nodeid == dlm_our_nodeid()) { | ||
155 | error = wait_status_all(ls, status); | ||
156 | if (!error) | ||
157 | dlm_set_recover_status(ls, status_all); | ||
158 | } else | ||
159 | error = wait_status_low(ls, status_all); | ||
160 | |||
161 | return error; | ||
162 | } | ||
163 | |||
164 | int dlm_recover_members_wait(struct dlm_ls *ls) | ||
165 | { | ||
166 | return wait_status(ls, DLM_RS_NODES); | ||
167 | } | ||
168 | |||
169 | int dlm_recover_directory_wait(struct dlm_ls *ls) | ||
170 | { | ||
171 | return wait_status(ls, DLM_RS_DIR); | ||
172 | } | ||
173 | |||
174 | int dlm_recover_locks_wait(struct dlm_ls *ls) | ||
175 | { | ||
176 | return wait_status(ls, DLM_RS_LOCKS); | ||
177 | } | ||
178 | |||
179 | int dlm_recover_done_wait(struct dlm_ls *ls) | ||
180 | { | ||
181 | return wait_status(ls, DLM_RS_DONE); | ||
182 | } | ||
183 | |||
184 | /* | ||
185 | * The recover_list contains all the rsb's for which we've requested the new | ||
186 | * master nodeid. As replies are returned from the resource directories the | ||
187 | * rsb's are removed from the list. When the list is empty we're done. | ||
188 | * | ||
189 | * The recover_list is later similarly used for all rsb's for which we've sent | ||
190 | * new lkb's and need to receive new corresponding lkid's. | ||
191 | * | ||
192 | * We use the address of the rsb struct as a simple local identifier for the | ||
193 | * rsb so we can match an rcom reply with the rsb it was sent for. | ||
194 | */ | ||
195 | |||
196 | static int recover_list_empty(struct dlm_ls *ls) | ||
197 | { | ||
198 | int empty; | ||
199 | |||
200 | spin_lock(&ls->ls_recover_list_lock); | ||
201 | empty = list_empty(&ls->ls_recover_list); | ||
202 | spin_unlock(&ls->ls_recover_list_lock); | ||
203 | |||
204 | return empty; | ||
205 | } | ||
206 | |||
207 | static void recover_list_add(struct dlm_rsb *r) | ||
208 | { | ||
209 | struct dlm_ls *ls = r->res_ls; | ||
210 | |||
211 | spin_lock(&ls->ls_recover_list_lock); | ||
212 | if (list_empty(&r->res_recover_list)) { | ||
213 | list_add_tail(&r->res_recover_list, &ls->ls_recover_list); | ||
214 | ls->ls_recover_list_count++; | ||
215 | dlm_hold_rsb(r); | ||
216 | } | ||
217 | spin_unlock(&ls->ls_recover_list_lock); | ||
218 | } | ||
219 | |||
220 | static void recover_list_del(struct dlm_rsb *r) | ||
221 | { | ||
222 | struct dlm_ls *ls = r->res_ls; | ||
223 | |||
224 | spin_lock(&ls->ls_recover_list_lock); | ||
225 | list_del_init(&r->res_recover_list); | ||
226 | ls->ls_recover_list_count--; | ||
227 | spin_unlock(&ls->ls_recover_list_lock); | ||
228 | |||
229 | dlm_put_rsb(r); | ||
230 | } | ||
231 | |||
232 | static struct dlm_rsb *recover_list_find(struct dlm_ls *ls, uint64_t id) | ||
233 | { | ||
234 | struct dlm_rsb *r = NULL; | ||
235 | |||
236 | spin_lock(&ls->ls_recover_list_lock); | ||
237 | |||
238 | list_for_each_entry(r, &ls->ls_recover_list, res_recover_list) { | ||
239 | if (id == (unsigned long) r) | ||
240 | goto out; | ||
241 | } | ||
242 | r = NULL; | ||
243 | out: | ||
244 | spin_unlock(&ls->ls_recover_list_lock); | ||
245 | return r; | ||
246 | } | ||
247 | |||
248 | static void recover_list_clear(struct dlm_ls *ls) | ||
249 | { | ||
250 | struct dlm_rsb *r, *s; | ||
251 | |||
252 | spin_lock(&ls->ls_recover_list_lock); | ||
253 | list_for_each_entry_safe(r, s, &ls->ls_recover_list, res_recover_list) { | ||
254 | list_del_init(&r->res_recover_list); | ||
255 | dlm_put_rsb(r); | ||
256 | ls->ls_recover_list_count--; | ||
257 | } | ||
258 | |||
259 | if (ls->ls_recover_list_count != 0) { | ||
260 | log_error(ls, "warning: recover_list_count %d", | ||
261 | ls->ls_recover_list_count); | ||
262 | ls->ls_recover_list_count = 0; | ||
263 | } | ||
264 | spin_unlock(&ls->ls_recover_list_lock); | ||
265 | } | ||
266 | |||
267 | |||
268 | /* Master recovery: find new master node for rsb's that were | ||
269 | mastered on nodes that have been removed. | ||
270 | |||
271 | dlm_recover_masters | ||
272 | recover_master | ||
273 | dlm_send_rcom_lookup -> receive_rcom_lookup | ||
274 | dlm_dir_lookup | ||
275 | receive_rcom_lookup_reply <- | ||
276 | dlm_recover_master_reply | ||
277 | set_new_master | ||
278 | set_master_lkbs | ||
279 | set_lock_master | ||
280 | */ | ||
281 | |||
282 | /* | ||
283 | * Set the lock master for all LKBs in a lock queue | ||
284 | * If we are the new master of the rsb, we may have received new | ||
285 | * MSTCPY locks from other nodes already which we need to ignore | ||
286 | * when setting the new nodeid. | ||
287 | */ | ||
288 | |||
289 | static void set_lock_master(struct list_head *queue, int nodeid) | ||
290 | { | ||
291 | struct dlm_lkb *lkb; | ||
292 | |||
293 | list_for_each_entry(lkb, queue, lkb_statequeue) | ||
294 | if (!(lkb->lkb_flags & DLM_IFL_MSTCPY)) | ||
295 | lkb->lkb_nodeid = nodeid; | ||
296 | } | ||
297 | |||
298 | static void set_master_lkbs(struct dlm_rsb *r) | ||
299 | { | ||
300 | set_lock_master(&r->res_grantqueue, r->res_nodeid); | ||
301 | set_lock_master(&r->res_convertqueue, r->res_nodeid); | ||
302 | set_lock_master(&r->res_waitqueue, r->res_nodeid); | ||
303 | } | ||
304 | |||
305 | /* | ||
306 | * Propogate the new master nodeid to locks | ||
307 | * The NEW_MASTER flag tells dlm_recover_locks() which rsb's to consider. | ||
308 | * The NEW_MASTER2 flag tells recover_lvb() and set_locks_purged() which | ||
309 | * rsb's to consider. | ||
310 | */ | ||
311 | |||
312 | static void set_new_master(struct dlm_rsb *r, int nodeid) | ||
313 | { | ||
314 | lock_rsb(r); | ||
315 | r->res_nodeid = nodeid; | ||
316 | set_master_lkbs(r); | ||
317 | rsb_set_flag(r, RSB_NEW_MASTER); | ||
318 | rsb_set_flag(r, RSB_NEW_MASTER2); | ||
319 | unlock_rsb(r); | ||
320 | } | ||
321 | |||
322 | /* | ||
323 | * We do async lookups on rsb's that need new masters. The rsb's | ||
324 | * waiting for a lookup reply are kept on the recover_list. | ||
325 | */ | ||
326 | |||
327 | static int recover_master(struct dlm_rsb *r) | ||
328 | { | ||
329 | struct dlm_ls *ls = r->res_ls; | ||
330 | int error, dir_nodeid, ret_nodeid, our_nodeid = dlm_our_nodeid(); | ||
331 | |||
332 | dir_nodeid = dlm_dir_nodeid(r); | ||
333 | |||
334 | if (dir_nodeid == our_nodeid) { | ||
335 | error = dlm_dir_lookup(ls, our_nodeid, r->res_name, | ||
336 | r->res_length, &ret_nodeid); | ||
337 | if (error) | ||
338 | log_error(ls, "recover dir lookup error %d", error); | ||
339 | |||
340 | if (ret_nodeid == our_nodeid) | ||
341 | ret_nodeid = 0; | ||
342 | set_new_master(r, ret_nodeid); | ||
343 | } else { | ||
344 | recover_list_add(r); | ||
345 | error = dlm_send_rcom_lookup(r, dir_nodeid); | ||
346 | } | ||
347 | |||
348 | return error; | ||
349 | } | ||
350 | |||
351 | /* | ||
352 | * When not using a directory, most resource names will hash to a new static | ||
353 | * master nodeid and the resource will need to be remastered. | ||
354 | */ | ||
355 | |||
356 | static int recover_master_static(struct dlm_rsb *r) | ||
357 | { | ||
358 | int master = dlm_dir_nodeid(r); | ||
359 | |||
360 | if (master == dlm_our_nodeid()) | ||
361 | master = 0; | ||
362 | |||
363 | if (r->res_nodeid != master) { | ||
364 | if (is_master(r)) | ||
365 | dlm_purge_mstcpy_locks(r); | ||
366 | set_new_master(r, master); | ||
367 | return 1; | ||
368 | } | ||
369 | return 0; | ||
370 | } | ||
371 | |||
372 | /* | ||
373 | * Go through local root resources and for each rsb which has a master which | ||
374 | * has departed, get the new master nodeid from the directory. The dir will | ||
375 | * assign mastery to the first node to look up the new master. That means | ||
376 | * we'll discover in this lookup if we're the new master of any rsb's. | ||
377 | * | ||
378 | * We fire off all the dir lookup requests individually and asynchronously to | ||
379 | * the correct dir node. | ||
380 | */ | ||
381 | |||
382 | int dlm_recover_masters(struct dlm_ls *ls) | ||
383 | { | ||
384 | struct dlm_rsb *r; | ||
385 | int error = 0, count = 0; | ||
386 | |||
387 | log_debug(ls, "dlm_recover_masters"); | ||
388 | |||
389 | down_read(&ls->ls_root_sem); | ||
390 | list_for_each_entry(r, &ls->ls_root_list, res_root_list) { | ||
391 | if (dlm_recovery_stopped(ls)) { | ||
392 | up_read(&ls->ls_root_sem); | ||
393 | error = -EINTR; | ||
394 | goto out; | ||
395 | } | ||
396 | |||
397 | if (dlm_no_directory(ls)) | ||
398 | count += recover_master_static(r); | ||
399 | else if (!is_master(r) && dlm_is_removed(ls, r->res_nodeid)) { | ||
400 | recover_master(r); | ||
401 | count++; | ||
402 | } | ||
403 | |||
404 | schedule(); | ||
405 | } | ||
406 | up_read(&ls->ls_root_sem); | ||
407 | |||
408 | log_debug(ls, "dlm_recover_masters %d resources", count); | ||
409 | |||
410 | error = dlm_wait_function(ls, &recover_list_empty); | ||
411 | out: | ||
412 | if (error) | ||
413 | recover_list_clear(ls); | ||
414 | return error; | ||
415 | } | ||
416 | |||
417 | int dlm_recover_master_reply(struct dlm_ls *ls, struct dlm_rcom *rc) | ||
418 | { | ||
419 | struct dlm_rsb *r; | ||
420 | int nodeid; | ||
421 | |||
422 | r = recover_list_find(ls, rc->rc_id); | ||
423 | if (!r) { | ||
424 | log_error(ls, "dlm_recover_master_reply no id %llx", | ||
425 | (unsigned long long)rc->rc_id); | ||
426 | goto out; | ||
427 | } | ||
428 | |||
429 | nodeid = rc->rc_result; | ||
430 | if (nodeid == dlm_our_nodeid()) | ||
431 | nodeid = 0; | ||
432 | |||
433 | set_new_master(r, nodeid); | ||
434 | recover_list_del(r); | ||
435 | |||
436 | if (recover_list_empty(ls)) | ||
437 | wake_up(&ls->ls_wait_general); | ||
438 | out: | ||
439 | return 0; | ||
440 | } | ||
441 | |||
442 | |||
443 | /* Lock recovery: rebuild the process-copy locks we hold on a | ||
444 | remastered rsb on the new rsb master. | ||
445 | |||
446 | dlm_recover_locks | ||
447 | recover_locks | ||
448 | recover_locks_queue | ||
449 | dlm_send_rcom_lock -> receive_rcom_lock | ||
450 | dlm_recover_master_copy | ||
451 | receive_rcom_lock_reply <- | ||
452 | dlm_recover_process_copy | ||
453 | */ | ||
454 | |||
455 | |||
456 | /* | ||
457 | * keep a count of the number of lkb's we send to the new master; when we get | ||
458 | * an equal number of replies then recovery for the rsb is done | ||
459 | */ | ||
460 | |||
461 | static int recover_locks_queue(struct dlm_rsb *r, struct list_head *head) | ||
462 | { | ||
463 | struct dlm_lkb *lkb; | ||
464 | int error = 0; | ||
465 | |||
466 | list_for_each_entry(lkb, head, lkb_statequeue) { | ||
467 | error = dlm_send_rcom_lock(r, lkb); | ||
468 | if (error) | ||
469 | break; | ||
470 | r->res_recover_locks_count++; | ||
471 | } | ||
472 | |||
473 | return error; | ||
474 | } | ||
475 | |||
476 | static int recover_locks(struct dlm_rsb *r) | ||
477 | { | ||
478 | int error = 0; | ||
479 | |||
480 | lock_rsb(r); | ||
481 | |||
482 | DLM_ASSERT(!r->res_recover_locks_count, dlm_dump_rsb(r);); | ||
483 | |||
484 | error = recover_locks_queue(r, &r->res_grantqueue); | ||
485 | if (error) | ||
486 | goto out; | ||
487 | error = recover_locks_queue(r, &r->res_convertqueue); | ||
488 | if (error) | ||
489 | goto out; | ||
490 | error = recover_locks_queue(r, &r->res_waitqueue); | ||
491 | if (error) | ||
492 | goto out; | ||
493 | |||
494 | if (r->res_recover_locks_count) | ||
495 | recover_list_add(r); | ||
496 | else | ||
497 | rsb_clear_flag(r, RSB_NEW_MASTER); | ||
498 | out: | ||
499 | unlock_rsb(r); | ||
500 | return error; | ||
501 | } | ||
502 | |||
503 | int dlm_recover_locks(struct dlm_ls *ls) | ||
504 | { | ||
505 | struct dlm_rsb *r; | ||
506 | int error, count = 0; | ||
507 | |||
508 | log_debug(ls, "dlm_recover_locks"); | ||
509 | |||
510 | down_read(&ls->ls_root_sem); | ||
511 | list_for_each_entry(r, &ls->ls_root_list, res_root_list) { | ||
512 | if (is_master(r)) { | ||
513 | rsb_clear_flag(r, RSB_NEW_MASTER); | ||
514 | continue; | ||
515 | } | ||
516 | |||
517 | if (!rsb_flag(r, RSB_NEW_MASTER)) | ||
518 | continue; | ||
519 | |||
520 | if (dlm_recovery_stopped(ls)) { | ||
521 | error = -EINTR; | ||
522 | up_read(&ls->ls_root_sem); | ||
523 | goto out; | ||
524 | } | ||
525 | |||
526 | error = recover_locks(r); | ||
527 | if (error) { | ||
528 | up_read(&ls->ls_root_sem); | ||
529 | goto out; | ||
530 | } | ||
531 | |||
532 | count += r->res_recover_locks_count; | ||
533 | } | ||
534 | up_read(&ls->ls_root_sem); | ||
535 | |||
536 | log_debug(ls, "dlm_recover_locks %d locks", count); | ||
537 | |||
538 | error = dlm_wait_function(ls, &recover_list_empty); | ||
539 | out: | ||
540 | if (error) | ||
541 | recover_list_clear(ls); | ||
542 | else | ||
543 | dlm_set_recover_status(ls, DLM_RS_LOCKS); | ||
544 | return error; | ||
545 | } | ||
546 | |||
547 | void dlm_recovered_lock(struct dlm_rsb *r) | ||
548 | { | ||
549 | DLM_ASSERT(rsb_flag(r, RSB_NEW_MASTER), dlm_dump_rsb(r);); | ||
550 | |||
551 | r->res_recover_locks_count--; | ||
552 | if (!r->res_recover_locks_count) { | ||
553 | rsb_clear_flag(r, RSB_NEW_MASTER); | ||
554 | recover_list_del(r); | ||
555 | } | ||
556 | |||
557 | if (recover_list_empty(r->res_ls)) | ||
558 | wake_up(&r->res_ls->ls_wait_general); | ||
559 | } | ||
560 | |||
561 | /* | ||
562 | * The lvb needs to be recovered on all master rsb's. This includes setting | ||
563 | * the VALNOTVALID flag if necessary, and determining the correct lvb contents | ||
564 | * based on the lvb's of the locks held on the rsb. | ||
565 | * | ||
566 | * RSB_VALNOTVALID is set if there are only NL/CR locks on the rsb. If it | ||
567 | * was already set prior to recovery, it's not cleared, regardless of locks. | ||
568 | * | ||
569 | * The LVB contents are only considered for changing when this is a new master | ||
570 | * of the rsb (NEW_MASTER2). Then, the rsb's lvb is taken from any lkb with | ||
571 | * mode > CR. If no lkb's exist with mode above CR, the lvb contents are taken | ||
572 | * from the lkb with the largest lvb sequence number. | ||
573 | */ | ||
574 | |||
575 | static void recover_lvb(struct dlm_rsb *r) | ||
576 | { | ||
577 | struct dlm_lkb *lkb, *high_lkb = NULL; | ||
578 | uint32_t high_seq = 0; | ||
579 | int lock_lvb_exists = 0; | ||
580 | int big_lock_exists = 0; | ||
581 | int lvblen = r->res_ls->ls_lvblen; | ||
582 | |||
583 | list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) { | ||
584 | if (!(lkb->lkb_exflags & DLM_LKF_VALBLK)) | ||
585 | continue; | ||
586 | |||
587 | lock_lvb_exists = 1; | ||
588 | |||
589 | if (lkb->lkb_grmode > DLM_LOCK_CR) { | ||
590 | big_lock_exists = 1; | ||
591 | goto setflag; | ||
592 | } | ||
593 | |||
594 | if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) { | ||
595 | high_lkb = lkb; | ||
596 | high_seq = lkb->lkb_lvbseq; | ||
597 | } | ||
598 | } | ||
599 | |||
600 | list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) { | ||
601 | if (!(lkb->lkb_exflags & DLM_LKF_VALBLK)) | ||
602 | continue; | ||
603 | |||
604 | lock_lvb_exists = 1; | ||
605 | |||
606 | if (lkb->lkb_grmode > DLM_LOCK_CR) { | ||
607 | big_lock_exists = 1; | ||
608 | goto setflag; | ||
609 | } | ||
610 | |||
611 | if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) { | ||
612 | high_lkb = lkb; | ||
613 | high_seq = lkb->lkb_lvbseq; | ||
614 | } | ||
615 | } | ||
616 | |||
617 | setflag: | ||
618 | if (!lock_lvb_exists) | ||
619 | goto out; | ||
620 | |||
621 | if (!big_lock_exists) | ||
622 | rsb_set_flag(r, RSB_VALNOTVALID); | ||
623 | |||
624 | /* don't mess with the lvb unless we're the new master */ | ||
625 | if (!rsb_flag(r, RSB_NEW_MASTER2)) | ||
626 | goto out; | ||
627 | |||
628 | if (!r->res_lvbptr) { | ||
629 | r->res_lvbptr = allocate_lvb(r->res_ls); | ||
630 | if (!r->res_lvbptr) | ||
631 | goto out; | ||
632 | } | ||
633 | |||
634 | if (big_lock_exists) { | ||
635 | r->res_lvbseq = lkb->lkb_lvbseq; | ||
636 | memcpy(r->res_lvbptr, lkb->lkb_lvbptr, lvblen); | ||
637 | } else if (high_lkb) { | ||
638 | r->res_lvbseq = high_lkb->lkb_lvbseq; | ||
639 | memcpy(r->res_lvbptr, high_lkb->lkb_lvbptr, lvblen); | ||
640 | } else { | ||
641 | r->res_lvbseq = 0; | ||
642 | memset(r->res_lvbptr, 0, lvblen); | ||
643 | } | ||
644 | out: | ||
645 | return; | ||
646 | } | ||
647 | |||
648 | /* All master rsb's flagged RECOVER_CONVERT need to be looked at. The locks | ||
649 | converting PR->CW or CW->PR need to have their lkb_grmode set. */ | ||
650 | |||
651 | static void recover_conversion(struct dlm_rsb *r) | ||
652 | { | ||
653 | struct dlm_lkb *lkb; | ||
654 | int grmode = -1; | ||
655 | |||
656 | list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) { | ||
657 | if (lkb->lkb_grmode == DLM_LOCK_PR || | ||
658 | lkb->lkb_grmode == DLM_LOCK_CW) { | ||
659 | grmode = lkb->lkb_grmode; | ||
660 | break; | ||
661 | } | ||
662 | } | ||
663 | |||
664 | list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) { | ||
665 | if (lkb->lkb_grmode != DLM_LOCK_IV) | ||
666 | continue; | ||
667 | if (grmode == -1) | ||
668 | lkb->lkb_grmode = lkb->lkb_rqmode; | ||
669 | else | ||
670 | lkb->lkb_grmode = grmode; | ||
671 | } | ||
672 | } | ||
673 | |||
674 | /* We've become the new master for this rsb and waiting/converting locks may | ||
675 | need to be granted in dlm_grant_after_purge() due to locks that may have | ||
676 | existed from a removed node. */ | ||
677 | |||
678 | static void set_locks_purged(struct dlm_rsb *r) | ||
679 | { | ||
680 | if (!list_empty(&r->res_waitqueue) || !list_empty(&r->res_convertqueue)) | ||
681 | rsb_set_flag(r, RSB_LOCKS_PURGED); | ||
682 | } | ||
683 | |||
684 | void dlm_recover_rsbs(struct dlm_ls *ls) | ||
685 | { | ||
686 | struct dlm_rsb *r; | ||
687 | int count = 0; | ||
688 | |||
689 | log_debug(ls, "dlm_recover_rsbs"); | ||
690 | |||
691 | down_read(&ls->ls_root_sem); | ||
692 | list_for_each_entry(r, &ls->ls_root_list, res_root_list) { | ||
693 | lock_rsb(r); | ||
694 | if (is_master(r)) { | ||
695 | if (rsb_flag(r, RSB_RECOVER_CONVERT)) | ||
696 | recover_conversion(r); | ||
697 | if (rsb_flag(r, RSB_NEW_MASTER2)) | ||
698 | set_locks_purged(r); | ||
699 | recover_lvb(r); | ||
700 | count++; | ||
701 | } | ||
702 | rsb_clear_flag(r, RSB_RECOVER_CONVERT); | ||
703 | rsb_clear_flag(r, RSB_NEW_MASTER2); | ||
704 | unlock_rsb(r); | ||
705 | } | ||
706 | up_read(&ls->ls_root_sem); | ||
707 | |||
708 | log_debug(ls, "dlm_recover_rsbs %d rsbs", count); | ||
709 | } | ||
710 | |||
711 | /* Create a single list of all root rsb's to be used during recovery */ | ||
712 | |||
713 | int dlm_create_root_list(struct dlm_ls *ls) | ||
714 | { | ||
715 | struct dlm_rsb *r; | ||
716 | int i, error = 0; | ||
717 | |||
718 | down_write(&ls->ls_root_sem); | ||
719 | if (!list_empty(&ls->ls_root_list)) { | ||
720 | log_error(ls, "root list not empty"); | ||
721 | error = -EINVAL; | ||
722 | goto out; | ||
723 | } | ||
724 | |||
725 | for (i = 0; i < ls->ls_rsbtbl_size; i++) { | ||
726 | read_lock(&ls->ls_rsbtbl[i].lock); | ||
727 | list_for_each_entry(r, &ls->ls_rsbtbl[i].list, res_hashchain) { | ||
728 | list_add(&r->res_root_list, &ls->ls_root_list); | ||
729 | dlm_hold_rsb(r); | ||
730 | } | ||
731 | read_unlock(&ls->ls_rsbtbl[i].lock); | ||
732 | } | ||
733 | out: | ||
734 | up_write(&ls->ls_root_sem); | ||
735 | return error; | ||
736 | } | ||
737 | |||
738 | void dlm_release_root_list(struct dlm_ls *ls) | ||
739 | { | ||
740 | struct dlm_rsb *r, *safe; | ||
741 | |||
742 | down_write(&ls->ls_root_sem); | ||
743 | list_for_each_entry_safe(r, safe, &ls->ls_root_list, res_root_list) { | ||
744 | list_del_init(&r->res_root_list); | ||
745 | dlm_put_rsb(r); | ||
746 | } | ||
747 | up_write(&ls->ls_root_sem); | ||
748 | } | ||
749 | |||
750 | void dlm_clear_toss_list(struct dlm_ls *ls) | ||
751 | { | ||
752 | struct dlm_rsb *r, *safe; | ||
753 | int i; | ||
754 | |||
755 | for (i = 0; i < ls->ls_rsbtbl_size; i++) { | ||
756 | write_lock(&ls->ls_rsbtbl[i].lock); | ||
757 | list_for_each_entry_safe(r, safe, &ls->ls_rsbtbl[i].toss, | ||
758 | res_hashchain) { | ||
759 | list_del(&r->res_hashchain); | ||
760 | free_rsb(r); | ||
761 | } | ||
762 | write_unlock(&ls->ls_rsbtbl[i].lock); | ||
763 | } | ||
764 | } | ||
765 | |||