diff options
author | Wengang Wang <wen.gang.wang@oracle.com> | 2010-05-17 08:20:44 -0400 |
---|---|---|
committer | Joel Becker <joel.becker@oracle.com> | 2010-05-18 19:41:34 -0400 |
commit | d9ef75221a6247b758e1d7e18edb661996e4b7cf (patch) | |
tree | a1a1355da23b7448afdc851f7a211f2b3492d6e5 /fs | |
parent | d5a7df0649fa6a1e7800785d760e2c7d7a3204de (diff) |
ocfs2:dlm: avoid dlm->ast_lock lockres->spinlock dependency break
Currently we process a dirty lockres with the lockres->spinlock taken. While
during the process, we may need to lock on dlm->ast_lock. This breaks the
dependency of dlm->ast_lock(lock first) and lockres->spinlock(lock second).
This patch fixes the problem.
Since we can't release lockres->spinlock, we have to take dlm->ast_lock
just before taking the lockres->spinlock and release it after lockres->spinlock
is released. And use __dlm_queue_bast()/__dlm_queue_ast(), the nolock version,
in dlm_shuffle_lists(). There are no too many locks on a lockres, so there is no
performance harm.
Signed-off-by: Wengang Wang <wen.gang.wang@oracle.com>
Signed-off-by: Joel Becker <joel.becker@oracle.com>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/ocfs2/dlm/dlmast.c | 4 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmcommon.h | 2 | ||||
-rw-r--r-- | fs/ocfs2/dlm/dlmthread.c | 16 |
3 files changed, 14 insertions, 8 deletions
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c index 390a887c4df3..7ec61d91b6a4 100644 --- a/fs/ocfs2/dlm/dlmast.c +++ b/fs/ocfs2/dlm/dlmast.c | |||
@@ -89,7 +89,7 @@ static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) | |||
89 | return 0; | 89 | return 0; |
90 | } | 90 | } |
91 | 91 | ||
92 | static void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) | 92 | void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) |
93 | { | 93 | { |
94 | mlog_entry_void(); | 94 | mlog_entry_void(); |
95 | 95 | ||
@@ -146,7 +146,7 @@ void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) | |||
146 | } | 146 | } |
147 | 147 | ||
148 | 148 | ||
149 | static void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) | 149 | void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) |
150 | { | 150 | { |
151 | mlog_entry_void(); | 151 | mlog_entry_void(); |
152 | 152 | ||
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h index 40115681d5b0..4b6ae2c13b47 100644 --- a/fs/ocfs2/dlm/dlmcommon.h +++ b/fs/ocfs2/dlm/dlmcommon.h | |||
@@ -904,6 +904,8 @@ void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, | |||
904 | 904 | ||
905 | void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock); | 905 | void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock); |
906 | void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); | 906 | void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); |
907 | void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock); | ||
908 | void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); | ||
907 | void dlm_do_local_ast(struct dlm_ctxt *dlm, | 909 | void dlm_do_local_ast(struct dlm_ctxt *dlm, |
908 | struct dlm_lock_resource *res, | 910 | struct dlm_lock_resource *res, |
909 | struct dlm_lock *lock); | 911 | struct dlm_lock *lock); |
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c index 52ec020ea78b..0bdd28e1d4d9 100644 --- a/fs/ocfs2/dlm/dlmthread.c +++ b/fs/ocfs2/dlm/dlmthread.c | |||
@@ -310,6 +310,7 @@ static void dlm_shuffle_lists(struct dlm_ctxt *dlm, | |||
310 | * spinlock, and because we know that it is not migrating/ | 310 | * spinlock, and because we know that it is not migrating/ |
311 | * recovering/in-progress, it is fine to reserve asts and | 311 | * recovering/in-progress, it is fine to reserve asts and |
312 | * basts right before queueing them all throughout */ | 312 | * basts right before queueing them all throughout */ |
313 | assert_spin_locked(&dlm->ast_lock); | ||
313 | assert_spin_locked(&res->spinlock); | 314 | assert_spin_locked(&res->spinlock); |
314 | BUG_ON((res->state & (DLM_LOCK_RES_MIGRATING| | 315 | BUG_ON((res->state & (DLM_LOCK_RES_MIGRATING| |
315 | DLM_LOCK_RES_RECOVERING| | 316 | DLM_LOCK_RES_RECOVERING| |
@@ -338,7 +339,7 @@ converting: | |||
338 | /* queue the BAST if not already */ | 339 | /* queue the BAST if not already */ |
339 | if (lock->ml.highest_blocked == LKM_IVMODE) { | 340 | if (lock->ml.highest_blocked == LKM_IVMODE) { |
340 | __dlm_lockres_reserve_ast(res); | 341 | __dlm_lockres_reserve_ast(res); |
341 | dlm_queue_bast(dlm, lock); | 342 | __dlm_queue_bast(dlm, lock); |
342 | } | 343 | } |
343 | /* update the highest_blocked if needed */ | 344 | /* update the highest_blocked if needed */ |
344 | if (lock->ml.highest_blocked < target->ml.convert_type) | 345 | if (lock->ml.highest_blocked < target->ml.convert_type) |
@@ -356,7 +357,7 @@ converting: | |||
356 | can_grant = 0; | 357 | can_grant = 0; |
357 | if (lock->ml.highest_blocked == LKM_IVMODE) { | 358 | if (lock->ml.highest_blocked == LKM_IVMODE) { |
358 | __dlm_lockres_reserve_ast(res); | 359 | __dlm_lockres_reserve_ast(res); |
359 | dlm_queue_bast(dlm, lock); | 360 | __dlm_queue_bast(dlm, lock); |
360 | } | 361 | } |
361 | if (lock->ml.highest_blocked < target->ml.convert_type) | 362 | if (lock->ml.highest_blocked < target->ml.convert_type) |
362 | lock->ml.highest_blocked = | 363 | lock->ml.highest_blocked = |
@@ -384,7 +385,7 @@ converting: | |||
384 | spin_unlock(&target->spinlock); | 385 | spin_unlock(&target->spinlock); |
385 | 386 | ||
386 | __dlm_lockres_reserve_ast(res); | 387 | __dlm_lockres_reserve_ast(res); |
387 | dlm_queue_ast(dlm, target); | 388 | __dlm_queue_ast(dlm, target); |
388 | /* go back and check for more */ | 389 | /* go back and check for more */ |
389 | goto converting; | 390 | goto converting; |
390 | } | 391 | } |
@@ -403,7 +404,7 @@ blocked: | |||
403 | can_grant = 0; | 404 | can_grant = 0; |
404 | if (lock->ml.highest_blocked == LKM_IVMODE) { | 405 | if (lock->ml.highest_blocked == LKM_IVMODE) { |
405 | __dlm_lockres_reserve_ast(res); | 406 | __dlm_lockres_reserve_ast(res); |
406 | dlm_queue_bast(dlm, lock); | 407 | __dlm_queue_bast(dlm, lock); |
407 | } | 408 | } |
408 | if (lock->ml.highest_blocked < target->ml.type) | 409 | if (lock->ml.highest_blocked < target->ml.type) |
409 | lock->ml.highest_blocked = target->ml.type; | 410 | lock->ml.highest_blocked = target->ml.type; |
@@ -419,7 +420,7 @@ blocked: | |||
419 | can_grant = 0; | 420 | can_grant = 0; |
420 | if (lock->ml.highest_blocked == LKM_IVMODE) { | 421 | if (lock->ml.highest_blocked == LKM_IVMODE) { |
421 | __dlm_lockres_reserve_ast(res); | 422 | __dlm_lockres_reserve_ast(res); |
422 | dlm_queue_bast(dlm, lock); | 423 | __dlm_queue_bast(dlm, lock); |
423 | } | 424 | } |
424 | if (lock->ml.highest_blocked < target->ml.type) | 425 | if (lock->ml.highest_blocked < target->ml.type) |
425 | lock->ml.highest_blocked = target->ml.type; | 426 | lock->ml.highest_blocked = target->ml.type; |
@@ -445,7 +446,7 @@ blocked: | |||
445 | spin_unlock(&target->spinlock); | 446 | spin_unlock(&target->spinlock); |
446 | 447 | ||
447 | __dlm_lockres_reserve_ast(res); | 448 | __dlm_lockres_reserve_ast(res); |
448 | dlm_queue_ast(dlm, target); | 449 | __dlm_queue_ast(dlm, target); |
449 | /* go back and check for more */ | 450 | /* go back and check for more */ |
450 | goto converting; | 451 | goto converting; |
451 | } | 452 | } |
@@ -675,6 +676,7 @@ static int dlm_thread(void *data) | |||
675 | /* lockres can be re-dirtied/re-added to the | 676 | /* lockres can be re-dirtied/re-added to the |
676 | * dirty_list in this gap, but that is ok */ | 677 | * dirty_list in this gap, but that is ok */ |
677 | 678 | ||
679 | spin_lock(&dlm->ast_lock); | ||
678 | spin_lock(&res->spinlock); | 680 | spin_lock(&res->spinlock); |
679 | if (res->owner != dlm->node_num) { | 681 | if (res->owner != dlm->node_num) { |
680 | __dlm_print_one_lock_resource(res); | 682 | __dlm_print_one_lock_resource(res); |
@@ -695,6 +697,7 @@ static int dlm_thread(void *data) | |||
695 | /* move it to the tail and keep going */ | 697 | /* move it to the tail and keep going */ |
696 | res->state &= ~DLM_LOCK_RES_DIRTY; | 698 | res->state &= ~DLM_LOCK_RES_DIRTY; |
697 | spin_unlock(&res->spinlock); | 699 | spin_unlock(&res->spinlock); |
700 | spin_unlock(&dlm->ast_lock); | ||
698 | mlog(0, "delaying list shuffling for in-" | 701 | mlog(0, "delaying list shuffling for in-" |
699 | "progress lockres %.*s, state=%d\n", | 702 | "progress lockres %.*s, state=%d\n", |
700 | res->lockname.len, res->lockname.name, | 703 | res->lockname.len, res->lockname.name, |
@@ -716,6 +719,7 @@ static int dlm_thread(void *data) | |||
716 | dlm_shuffle_lists(dlm, res); | 719 | dlm_shuffle_lists(dlm, res); |
717 | res->state &= ~DLM_LOCK_RES_DIRTY; | 720 | res->state &= ~DLM_LOCK_RES_DIRTY; |
718 | spin_unlock(&res->spinlock); | 721 | spin_unlock(&res->spinlock); |
722 | spin_unlock(&dlm->ast_lock); | ||
719 | 723 | ||
720 | dlm_lockres_calc_usage(dlm, res); | 724 | dlm_lockres_calc_usage(dlm, res); |
721 | 725 | ||