aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ocfs2/dlm/dlmmaster.c
diff options
context:
space:
mode:
authorKurt Hackel <kurt.hackel@oracle.com>2007-01-17 17:53:37 -0500
committerMark Fasheh <mark.fasheh@oracle.com>2007-02-07 15:03:02 -0500
commit1cd04dbe3364be71b93e3aaf4545daa1e261aaa1 (patch)
treee155456258e0700303bf2bca0326124ddbc2c327 /fs/ocfs2/dlm/dlmmaster.c
parente17e75ecb86b8ce9b51b219b5348517561031f80 (diff)
ocfs2_dlm: Flush dlm workqueue before starting to migrate
This is to prevent the condition in which a previously queued up assert master asserts after we start the migration. Now migration ensures the workqueue is flushed before proceeding with migrating the lock to another node. This condition is typically encountered during parallel umounts. Signed-off-by: Kurt Hackel <kurt.hackel@oracle.com> Signed-off-by: Sunil Mushran <sunil.mushran@oracle.com> Signed-off-by: Mark Fasheh <mark.fasheh@oracle.com>
Diffstat (limited to 'fs/ocfs2/dlm/dlmmaster.c')
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c36
1 files changed, 20 insertions, 16 deletions
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index a65a87726d6a..b36cce034ea0 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -1507,10 +1507,11 @@ way_up_top:
1507 1507
1508 /* take care of the easy cases up front */ 1508 /* take care of the easy cases up front */
1509 spin_lock(&res->spinlock); 1509 spin_lock(&res->spinlock);
1510 if (res->state & DLM_LOCK_RES_RECOVERING) { 1510 if (res->state & (DLM_LOCK_RES_RECOVERING|
1511 DLM_LOCK_RES_MIGRATING)) {
1511 spin_unlock(&res->spinlock); 1512 spin_unlock(&res->spinlock);
1512 mlog(0, "returning DLM_MASTER_RESP_ERROR since res is " 1513 mlog(0, "returning DLM_MASTER_RESP_ERROR since res is "
1513 "being recovered\n"); 1514 "being recovered/migrated\n");
1514 response = DLM_MASTER_RESP_ERROR; 1515 response = DLM_MASTER_RESP_ERROR;
1515 if (mle) 1516 if (mle)
1516 kmem_cache_free(dlm_mle_cache, mle); 1517 kmem_cache_free(dlm_mle_cache, mle);
@@ -2493,6 +2494,9 @@ fail:
2493 * the lockres 2494 * the lockres
2494 */ 2495 */
2495 2496
2497 /* now that remote nodes are spinning on the MIGRATING flag,
2498 * ensure that all assert_master work is flushed. */
2499 flush_workqueue(dlm->dlm_worker);
2496 2500
2497 /* get an extra reference on the mle. 2501 /* get an extra reference on the mle.
2498 * otherwise the assert_master from the new 2502 * otherwise the assert_master from the new
@@ -2547,7 +2551,8 @@ fail:
2547 res->owner == target) 2551 res->owner == target)
2548 break; 2552 break;
2549 2553
2550 mlog(0, "timed out during migration\n"); 2554 mlog(0, "%s:%.*s: timed out during migration\n",
2555 dlm->name, res->lockname.len, res->lockname.name);
2551 /* avoid hang during shutdown when migrating lockres 2556 /* avoid hang during shutdown when migrating lockres
2552 * to a node which also goes down */ 2557 * to a node which also goes down */
2553 if (dlm_is_node_dead(dlm, target)) { 2558 if (dlm_is_node_dead(dlm, target)) {
@@ -2555,20 +2560,19 @@ fail:
2555 "target %u is no longer up, restarting\n", 2560 "target %u is no longer up, restarting\n",
2556 dlm->name, res->lockname.len, 2561 dlm->name, res->lockname.len,
2557 res->lockname.name, target); 2562 res->lockname.name, target);
2558 ret = -ERESTARTSYS; 2563 ret = -EINVAL;
2564 /* migration failed, detach and clean up mle */
2565 dlm_mle_detach_hb_events(dlm, mle);
2566 dlm_put_mle(mle);
2567 dlm_put_mle_inuse(mle);
2568 spin_lock(&res->spinlock);
2569 res->state &= ~DLM_LOCK_RES_MIGRATING;
2570 spin_unlock(&res->spinlock);
2571 goto leave;
2559 } 2572 }
2560 } 2573 } else
2561 if (ret == -ERESTARTSYS) { 2574 mlog(0, "%s:%.*s: caught signal during migration\n",
2562 /* migration failed, detach and clean up mle */ 2575 dlm->name, res->lockname.len, res->lockname.name);
2563 dlm_mle_detach_hb_events(dlm, mle);
2564 dlm_put_mle(mle);
2565 dlm_put_mle_inuse(mle);
2566 spin_lock(&res->spinlock);
2567 res->state &= ~DLM_LOCK_RES_MIGRATING;
2568 spin_unlock(&res->spinlock);
2569 goto leave;
2570 }
2571 /* TODO: if node died: stop, clean up, return error */
2572 } 2576 }
2573 2577
2574 /* all done, set the owner, clear the flag */ 2578 /* all done, set the owner, clear the flag */