aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h1
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c16
-rw-r--r--fs/ocfs2/dlm/dlmthread.c30
3 files changed, 33 insertions, 14 deletions
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index 04048bb1a1bd..e95ecb2aaf14 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -223,6 +223,7 @@ static inline void __dlm_set_joining_node(struct dlm_ctxt *dlm,
223#define DLM_LOCK_RES_IN_PROGRESS 0x00000010 223#define DLM_LOCK_RES_IN_PROGRESS 0x00000010
224#define DLM_LOCK_RES_MIGRATING 0x00000020 224#define DLM_LOCK_RES_MIGRATING 0x00000020
225#define DLM_LOCK_RES_DROPPING_REF 0x00000040 225#define DLM_LOCK_RES_DROPPING_REF 0x00000040
226#define DLM_LOCK_RES_BLOCK_DIRTY 0x00001000
226 227
227/* max milliseconds to wait to sync up a network failure with a node death */ 228/* max milliseconds to wait to sync up a network failure with a node death */
228#define DLM_NODE_DEATH_WAIT_MAX (5 * 1000) 229#define DLM_NODE_DEATH_WAIT_MAX (5 * 1000)
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 251c48028ea3..a65a87726d6a 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -2707,8 +2707,15 @@ static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
2707 __dlm_lockres_reserve_ast(res); 2707 __dlm_lockres_reserve_ast(res);
2708 spin_unlock(&res->spinlock); 2708 spin_unlock(&res->spinlock);
2709 2709
2710 /* now flush all the pending asts.. hang out for a bit */ 2710 /* now flush all the pending asts */
2711 dlm_kick_thread(dlm, res); 2711 dlm_kick_thread(dlm, res);
2712 /* before waiting on DIRTY, block processes which may
2713 * try to dirty the lockres before MIGRATING is set */
2714 spin_lock(&res->spinlock);
2715 BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY);
2716 res->state |= DLM_LOCK_RES_BLOCK_DIRTY;
2717 spin_unlock(&res->spinlock);
2718 /* now wait on any pending asts and the DIRTY state */
2712 wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res)); 2719 wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res));
2713 dlm_lockres_release_ast(dlm, res); 2720 dlm_lockres_release_ast(dlm, res);
2714 2721
@@ -2734,6 +2741,13 @@ again:
2734 mlog(0, "trying again...\n"); 2741 mlog(0, "trying again...\n");
2735 goto again; 2742 goto again;
2736 } 2743 }
2744 /* now that we are sure the MIGRATING state is there, drop
2745 * the unneded state which blocked threads trying to DIRTY */
2746 spin_lock(&res->spinlock);
2747 BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY));
2748 BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
2749 res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
2750 spin_unlock(&res->spinlock);
2737 2751
2738 /* did the target go down or die? */ 2752 /* did the target go down or die? */
2739 spin_lock(&dlm->spinlock); 2753 spin_lock(&dlm->spinlock);
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
index baa99979904c..3b94e4dec351 100644
--- a/fs/ocfs2/dlm/dlmthread.c
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -95,7 +95,7 @@ int __dlm_lockres_has_locks(struct dlm_lock_resource *res)
95int __dlm_lockres_unused(struct dlm_lock_resource *res) 95int __dlm_lockres_unused(struct dlm_lock_resource *res)
96{ 96{
97 if (!__dlm_lockres_has_locks(res) && 97 if (!__dlm_lockres_has_locks(res) &&
98 list_empty(&res->dirty)) { 98 (list_empty(&res->dirty) && !(res->state & DLM_LOCK_RES_DIRTY))) {
99 /* try not to scan the bitmap unless the first two 99 /* try not to scan the bitmap unless the first two
100 * conditions are already true */ 100 * conditions are already true */
101 int bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0); 101 int bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
@@ -455,12 +455,17 @@ void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
455 assert_spin_locked(&res->spinlock); 455 assert_spin_locked(&res->spinlock);
456 456
457 /* don't shuffle secondary queues */ 457 /* don't shuffle secondary queues */
458 if ((res->owner == dlm->node_num) && 458 if ((res->owner == dlm->node_num)) {
459 !(res->state & DLM_LOCK_RES_DIRTY)) { 459 if (res->state & (DLM_LOCK_RES_MIGRATING |
460 /* ref for dirty_list */ 460 DLM_LOCK_RES_BLOCK_DIRTY))
461 dlm_lockres_get(res); 461 return;
462 list_add_tail(&res->dirty, &dlm->dirty_list); 462
463 res->state |= DLM_LOCK_RES_DIRTY; 463 if (list_empty(&res->dirty)) {
464 /* ref for dirty_list */
465 dlm_lockres_get(res);
466 list_add_tail(&res->dirty, &dlm->dirty_list);
467 res->state |= DLM_LOCK_RES_DIRTY;
468 }
464 } 469 }
465} 470}
466 471
@@ -639,7 +644,7 @@ static int dlm_thread(void *data)
639 dlm_lockres_get(res); 644 dlm_lockres_get(res);
640 645
641 spin_lock(&res->spinlock); 646 spin_lock(&res->spinlock);
642 res->state &= ~DLM_LOCK_RES_DIRTY; 647 /* We clear the DLM_LOCK_RES_DIRTY state once we shuffle lists below */
643 list_del_init(&res->dirty); 648 list_del_init(&res->dirty);
644 spin_unlock(&res->spinlock); 649 spin_unlock(&res->spinlock);
645 spin_unlock(&dlm->spinlock); 650 spin_unlock(&dlm->spinlock);
@@ -663,10 +668,11 @@ static int dlm_thread(void *data)
663 /* it is now ok to move lockreses in these states 668 /* it is now ok to move lockreses in these states
664 * to the dirty list, assuming that they will only be 669 * to the dirty list, assuming that they will only be
665 * dirty for a short while. */ 670 * dirty for a short while. */
671 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
666 if (res->state & (DLM_LOCK_RES_IN_PROGRESS | 672 if (res->state & (DLM_LOCK_RES_IN_PROGRESS |
667 DLM_LOCK_RES_MIGRATING |
668 DLM_LOCK_RES_RECOVERING)) { 673 DLM_LOCK_RES_RECOVERING)) {
669 /* move it to the tail and keep going */ 674 /* move it to the tail and keep going */
675 res->state &= ~DLM_LOCK_RES_DIRTY;
670 spin_unlock(&res->spinlock); 676 spin_unlock(&res->spinlock);
671 mlog(0, "delaying list shuffling for in-" 677 mlog(0, "delaying list shuffling for in-"
672 "progress lockres %.*s, state=%d\n", 678 "progress lockres %.*s, state=%d\n",
@@ -687,6 +693,7 @@ static int dlm_thread(void *data)
687 693
688 /* called while holding lockres lock */ 694 /* called while holding lockres lock */
689 dlm_shuffle_lists(dlm, res); 695 dlm_shuffle_lists(dlm, res);
696 res->state &= ~DLM_LOCK_RES_DIRTY;
690 spin_unlock(&res->spinlock); 697 spin_unlock(&res->spinlock);
691 698
692 dlm_lockres_calc_usage(dlm, res); 699 dlm_lockres_calc_usage(dlm, res);
@@ -697,11 +704,8 @@ in_progress:
697 /* if the lock was in-progress, stick 704 /* if the lock was in-progress, stick
698 * it on the back of the list */ 705 * it on the back of the list */
699 if (delay) { 706 if (delay) {
700 /* ref for dirty_list */
701 dlm_lockres_get(res);
702 spin_lock(&res->spinlock); 707 spin_lock(&res->spinlock);
703 list_add_tail(&res->dirty, &dlm->dirty_list); 708 __dlm_dirty_lockres(dlm, res);
704 res->state |= DLM_LOCK_RES_DIRTY;
705 spin_unlock(&res->spinlock); 709 spin_unlock(&res->spinlock);
706 } 710 }
707 dlm_lockres_put(res); 711 dlm_lockres_put(res);