diff options
author | Kurt Hackel <kurt.hackel@oracle.com> | 2006-05-01 16:47:50 -0400 |
---|---|---|
committer | Mark Fasheh <mark.fasheh@oracle.com> | 2006-06-26 17:43:08 -0400 |
commit | c8df412e1c746dd21094966d04b3a79aad0f4d08 (patch) | |
tree | b662c0c5b9ff4e5a607d53adb92a8c38acce940e /fs/ocfs2/dlm/dlmlock.c | |
parent | 36407488b1cbc4d84bc2bd14e03f3f9b768090d9 (diff) |
ocfs2: special case recovery lock in dlmlock_remote()
If the previous master of the recovery lock dies, let calc_usage take it
down completely and let the caller completely redo the dlmlock() call.
Otherwise, there will never be an opportunity to re-master the lockres and
recovery wont be able to progress.
Signed-off-by: Kurt Hackel <kurt.hackel@oracle.com>
Signed-off-by: Mark Fasheh <mark.fasheh@oracle.com>
Diffstat (limited to 'fs/ocfs2/dlm/dlmlock.c')
-rw-r--r-- | fs/ocfs2/dlm/dlmlock.c | 33 |
1 files changed, 23 insertions, 10 deletions
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c index 0ff934874942..20b38dc18736 100644 --- a/fs/ocfs2/dlm/dlmlock.c +++ b/fs/ocfs2/dlm/dlmlock.c | |||
@@ -227,7 +227,16 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm, | |||
227 | res->state &= ~DLM_LOCK_RES_IN_PROGRESS; | 227 | res->state &= ~DLM_LOCK_RES_IN_PROGRESS; |
228 | lock->lock_pending = 0; | 228 | lock->lock_pending = 0; |
229 | if (status != DLM_NORMAL) { | 229 | if (status != DLM_NORMAL) { |
230 | if (status != DLM_NOTQUEUED) { | 230 | if (status == DLM_RECOVERING && |
231 | dlm_is_recovery_lock(res->lockname.name, | ||
232 | res->lockname.len)) { | ||
233 | /* recovery lock was mastered by dead node. | ||
234 | * we need to have calc_usage shoot down this | ||
235 | * lockres and completely remaster it. */ | ||
236 | mlog(0, "%s: recovery lock was owned by " | ||
237 | "dead node %u, remaster it now.\n", | ||
238 | dlm->name, res->owner); | ||
239 | } else if (status != DLM_NOTQUEUED) { | ||
231 | /* | 240 | /* |
232 | * DO NOT call calc_usage, as this would unhash | 241 | * DO NOT call calc_usage, as this would unhash |
233 | * the remote lockres before we ever get to use | 242 | * the remote lockres before we ever get to use |
@@ -691,18 +700,22 @@ retry_lock: | |||
691 | msleep(100); | 700 | msleep(100); |
692 | /* no waiting for dlm_reco_thread */ | 701 | /* no waiting for dlm_reco_thread */ |
693 | if (recovery) { | 702 | if (recovery) { |
694 | if (status == DLM_RECOVERING) { | 703 | if (status != DLM_RECOVERING) |
695 | mlog(0, "%s: got RECOVERING " | 704 | goto retry_lock; |
696 | "for $REOCVERY lock, master " | 705 | |
697 | "was %u\n", dlm->name, | 706 | mlog(0, "%s: got RECOVERING " |
698 | res->owner); | 707 | "for $RECOVERY lock, master " |
699 | dlm_wait_for_node_death(dlm, res->owner, | 708 | "was %u\n", dlm->name, |
700 | DLM_NODE_DEATH_WAIT_MAX); | 709 | res->owner); |
701 | } | 710 | /* wait to see the node go down, then |
711 | * drop down and allow the lockres to | ||
712 | * get cleaned up. need to remaster. */ | ||
713 | dlm_wait_for_node_death(dlm, res->owner, | ||
714 | DLM_NODE_DEATH_WAIT_MAX); | ||
702 | } else { | 715 | } else { |
703 | dlm_wait_for_recovery(dlm); | 716 | dlm_wait_for_recovery(dlm); |
717 | goto retry_lock; | ||
704 | } | 718 | } |
705 | goto retry_lock; | ||
706 | } | 719 | } |
707 | 720 | ||
708 | if (status != DLM_NORMAL) { | 721 | if (status != DLM_NORMAL) { |