diff options
author | David Teigland <teigland@redhat.com> | 2012-08-02 12:08:21 -0400 |
---|---|---|
committer | David Teigland <teigland@redhat.com> | 2012-08-08 12:33:49 -0400 |
commit | 475f230c6072fb2186f48b23943afcd0ee3a8343 (patch) | |
tree | 42c7979e644138ed93f30f2cd8cf2c33bb849078 /fs/dlm/recoverd.c | |
parent | 6ad2291624824c1de19dbbbbb6d4f9f601b60781 (diff) |
dlm: fix unlock balance warnings
The in_recovery rw_semaphore has always been acquired and
released by different threads by design. To work around
the "BUG: bad unlock balance detected!" messages, adjust
things so the dlm_recoverd thread always does both down_write
and up_write.
Signed-off-by: David Teigland <teigland@redhat.com>
Diffstat (limited to 'fs/dlm/recoverd.c')
-rw-r--r-- | fs/dlm/recoverd.c | 27 |
1 files changed, 18 insertions, 9 deletions
diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c index 88ce65ff021e..32f9f8926ec3 100644 --- a/fs/dlm/recoverd.c +++ b/fs/dlm/recoverd.c | |||
@@ -41,6 +41,7 @@ static int enable_locking(struct dlm_ls *ls, uint64_t seq) | |||
41 | set_bit(LSFL_RUNNING, &ls->ls_flags); | 41 | set_bit(LSFL_RUNNING, &ls->ls_flags); |
42 | /* unblocks processes waiting to enter the dlm */ | 42 | /* unblocks processes waiting to enter the dlm */ |
43 | up_write(&ls->ls_in_recovery); | 43 | up_write(&ls->ls_in_recovery); |
44 | clear_bit(LSFL_RECOVER_LOCK, &ls->ls_flags); | ||
44 | error = 0; | 45 | error = 0; |
45 | } | 46 | } |
46 | spin_unlock(&ls->ls_recover_lock); | 47 | spin_unlock(&ls->ls_recover_lock); |
@@ -262,7 +263,7 @@ static void do_ls_recovery(struct dlm_ls *ls) | |||
262 | rv = ls->ls_recover_args; | 263 | rv = ls->ls_recover_args; |
263 | ls->ls_recover_args = NULL; | 264 | ls->ls_recover_args = NULL; |
264 | if (rv && ls->ls_recover_seq == rv->seq) | 265 | if (rv && ls->ls_recover_seq == rv->seq) |
265 | clear_bit(LSFL_RECOVERY_STOP, &ls->ls_flags); | 266 | clear_bit(LSFL_RECOVER_STOP, &ls->ls_flags); |
266 | spin_unlock(&ls->ls_recover_lock); | 267 | spin_unlock(&ls->ls_recover_lock); |
267 | 268 | ||
268 | if (rv) { | 269 | if (rv) { |
@@ -282,26 +283,34 @@ static int dlm_recoverd(void *arg) | |||
282 | return -1; | 283 | return -1; |
283 | } | 284 | } |
284 | 285 | ||
286 | down_write(&ls->ls_in_recovery); | ||
287 | set_bit(LSFL_RECOVER_LOCK, &ls->ls_flags); | ||
288 | wake_up(&ls->ls_recover_lock_wait); | ||
289 | |||
285 | while (!kthread_should_stop()) { | 290 | while (!kthread_should_stop()) { |
286 | set_current_state(TASK_INTERRUPTIBLE); | 291 | set_current_state(TASK_INTERRUPTIBLE); |
287 | if (!test_bit(LSFL_WORK, &ls->ls_flags)) | 292 | if (!test_bit(LSFL_RECOVER_WORK, &ls->ls_flags) && |
293 | !test_bit(LSFL_RECOVER_DOWN, &ls->ls_flags)) | ||
288 | schedule(); | 294 | schedule(); |
289 | set_current_state(TASK_RUNNING); | 295 | set_current_state(TASK_RUNNING); |
290 | 296 | ||
291 | if (test_and_clear_bit(LSFL_WORK, &ls->ls_flags)) | 297 | if (test_and_clear_bit(LSFL_RECOVER_DOWN, &ls->ls_flags)) { |
298 | down_write(&ls->ls_in_recovery); | ||
299 | set_bit(LSFL_RECOVER_LOCK, &ls->ls_flags); | ||
300 | wake_up(&ls->ls_recover_lock_wait); | ||
301 | } | ||
302 | |||
303 | if (test_and_clear_bit(LSFL_RECOVER_WORK, &ls->ls_flags)) | ||
292 | do_ls_recovery(ls); | 304 | do_ls_recovery(ls); |
293 | } | 305 | } |
294 | 306 | ||
307 | if (test_bit(LSFL_RECOVER_LOCK, &ls->ls_flags)) | ||
308 | up_write(&ls->ls_in_recovery); | ||
309 | |||
295 | dlm_put_lockspace(ls); | 310 | dlm_put_lockspace(ls); |
296 | return 0; | 311 | return 0; |
297 | } | 312 | } |
298 | 313 | ||
299 | void dlm_recoverd_kick(struct dlm_ls *ls) | ||
300 | { | ||
301 | set_bit(LSFL_WORK, &ls->ls_flags); | ||
302 | wake_up_process(ls->ls_recoverd_task); | ||
303 | } | ||
304 | |||
305 | int dlm_recoverd_start(struct dlm_ls *ls) | 314 | int dlm_recoverd_start(struct dlm_ls *ls) |
306 | { | 315 | { |
307 | struct task_struct *p; | 316 | struct task_struct *p; |