diff options
-rw-r--r-- | fs/ocfs2/dlm/dlmlock.c | 14 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmthread.c | 14 |
2 files changed, 23 insertions, 5 deletions
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c index 675123c30852..0ff934874942 100644 --- a/fs/ocfs2/dlm/dlmlock.c +++ b/fs/ocfs2/dlm/dlmlock.c | |||
@@ -227,14 +227,18 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm, | |||
227 | res->state &= ~DLM_LOCK_RES_IN_PROGRESS; | 227 | res->state &= ~DLM_LOCK_RES_IN_PROGRESS; |
228 | lock->lock_pending = 0; | 228 | lock->lock_pending = 0; |
229 | if (status != DLM_NORMAL) { | 229 | if (status != DLM_NORMAL) { |
230 | if (status != DLM_NOTQUEUED) | 230 | if (status != DLM_NOTQUEUED) { |
231 | /* | ||
232 | * DO NOT call calc_usage, as this would unhash | ||
233 | * the remote lockres before we ever get to use | ||
234 | * it. treat as if we never made any change to | ||
235 | * the lockres. | ||
236 | */ | ||
237 | lockres_changed = 0; | ||
231 | dlm_error(status); | 238 | dlm_error(status); |
239 | } | ||
232 | dlm_revert_pending_lock(res, lock); | 240 | dlm_revert_pending_lock(res, lock); |
233 | dlm_lock_put(lock); | 241 | dlm_lock_put(lock); |
234 | /* do NOT call calc_usage, as this would unhash the remote | ||
235 | * lockres before we ever get to use it. treat as if we | ||
236 | * never made any change to the lockres. */ | ||
237 | lockres_changed = 0; | ||
238 | } else if (dlm_is_recovery_lock(res->lockname.name, | 242 | } else if (dlm_is_recovery_lock(res->lockname.name, |
239 | res->lockname.len)) { | 243 | res->lockname.len)) { |
240 | /* special case for the $RECOVERY lock. | 244 | /* special case for the $RECOVERY lock. |
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c index 610dc76a851b..c1c10fd1a5a7 100644 --- a/fs/ocfs2/dlm/dlmthread.c +++ b/fs/ocfs2/dlm/dlmthread.c | |||
@@ -106,6 +106,20 @@ void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm, | |||
106 | assert_spin_locked(&res->spinlock); | 106 | assert_spin_locked(&res->spinlock); |
107 | 107 | ||
108 | if (__dlm_lockres_unused(res)){ | 108 | if (__dlm_lockres_unused(res)){ |
109 | /* For now, just keep any resource we master */ | ||
110 | if (res->owner == dlm->node_num) | ||
111 | { | ||
112 | if (!list_empty(&res->purge)) { | ||
113 | mlog(0, "we master %s:%.*s, but it is on " | ||
114 | "the purge list. Removing\n", | ||
115 | dlm->name, res->lockname.len, | ||
116 | res->lockname.name); | ||
117 | list_del_init(&res->purge); | ||
118 | dlm->purge_count--; | ||
119 | } | ||
120 | return; | ||
121 | } | ||
122 | |||
109 | if (list_empty(&res->purge)) { | 123 | if (list_empty(&res->purge)) { |
110 | mlog(0, "putting lockres %.*s from purge list\n", | 124 | mlog(0, "putting lockres %.*s from purge list\n", |
111 | res->lockname.len, res->lockname.name); | 125 | res->lockname.len, res->lockname.name); |