aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorSunil Mushran <sunil.mushran@oracle.com>2011-07-24 13:29:54 -0400
committerSunil Mushran <sunil.mushran@oracle.com>2011-07-24 13:29:54 -0400
commitff0a522e7db79625aa27a433467eb94c5e255718 (patch)
treeb404bbd397b3f495792db561a4a09fa66aa82be0 /fs
parented8625c6fb93d750ed022db571a8a7b7a6724b3b (diff)
ocfs2/dlm: Take inflight reference count for remotely mastered resources too
The inflight reference count, in the lock resource, is taken to pin the resource in memory. We take it when a new resource is created and release it after a lock is attached to it. We do this to prevent the resource from getting purged prematurely. Earlier this reference count was being taken for locally mastered resources only. This patch extends the same functionality for remotely mastered ones. We are doing this because the same premature purging could occur for remotely mastered resources if the remote node were to die before completion of the create lock. Fix for Oracle bug#12405575. Signed-off-by: Sunil Mushran <sunil.mushran@oracle.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/ocfs2/dlm/dlmlock.c12
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c47
-rw-r--r--fs/ocfs2/dlm/dlmthread.c12
3 files changed, 32 insertions, 39 deletions
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
index c7f3e22bda1e..3ef2c1adfb8f 100644
--- a/fs/ocfs2/dlm/dlmlock.c
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -183,10 +183,6 @@ static enum dlm_status dlmlock_master(struct dlm_ctxt *dlm,
183 kick_thread = 1; 183 kick_thread = 1;
184 } 184 }
185 } 185 }
186 /* reduce the inflight count, this may result in the lockres
187 * being purged below during calc_usage */
188 if (lock->ml.node == dlm->node_num)
189 dlm_lockres_drop_inflight_ref(dlm, res);
190 186
191 spin_unlock(&res->spinlock); 187 spin_unlock(&res->spinlock);
192 wake_up(&res->wq); 188 wake_up(&res->wq);
@@ -737,6 +733,14 @@ retry_lock:
737 } 733 }
738 } 734 }
739 735
736 /* Inflight taken in dlm_get_lock_resource() is dropped here */
737 spin_lock(&res->spinlock);
738 dlm_lockres_drop_inflight_ref(dlm, res);
739 spin_unlock(&res->spinlock);
740
741 dlm_lockres_calc_usage(dlm, res);
742 dlm_kick_thread(dlm, res);
743
740 if (status != DLM_NORMAL) { 744 if (status != DLM_NORMAL) {
741 lock->lksb->flags &= ~DLM_LKSB_GET_LVB; 745 lock->lksb->flags &= ~DLM_LKSB_GET_LVB;
742 if (status != DLM_NOTQUEUED) 746 if (status != DLM_NOTQUEUED)
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 11e446ffb67a..005261c333b0 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -659,11 +659,8 @@ void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
659{ 659{
660 assert_spin_locked(&res->spinlock); 660 assert_spin_locked(&res->spinlock);
661 661
662 if (!test_bit(dlm->node_num, res->refmap)) {
663 BUG_ON(res->inflight_locks != 0);
664 dlm_lockres_set_refmap_bit(dlm, res, dlm->node_num);
665 }
666 res->inflight_locks++; 662 res->inflight_locks++;
663
667 mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name, 664 mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name,
668 res->lockname.len, res->lockname.name, res->inflight_locks, 665 res->lockname.len, res->lockname.name, res->inflight_locks,
669 __builtin_return_address(0)); 666 __builtin_return_address(0));
@@ -677,12 +674,11 @@ void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
677 BUG_ON(res->inflight_locks == 0); 674 BUG_ON(res->inflight_locks == 0);
678 675
679 res->inflight_locks--; 676 res->inflight_locks--;
677
680 mlog(0, "%s: res %.*s, inflight--: now %u, %ps()\n", dlm->name, 678 mlog(0, "%s: res %.*s, inflight--: now %u, %ps()\n", dlm->name,
681 res->lockname.len, res->lockname.name, res->inflight_locks, 679 res->lockname.len, res->lockname.name, res->inflight_locks,
682 __builtin_return_address(0)); 680 __builtin_return_address(0));
683 681
684 if (res->inflight_locks == 0)
685 dlm_lockres_clear_refmap_bit(dlm, res, dlm->node_num);
686 wake_up(&res->wq); 682 wake_up(&res->wq);
687} 683}
688 684
@@ -716,7 +712,6 @@ struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
716 unsigned int hash; 712 unsigned int hash;
717 int tries = 0; 713 int tries = 0;
718 int bit, wait_on_recovery = 0; 714 int bit, wait_on_recovery = 0;
719 int drop_inflight_if_nonlocal = 0;
720 715
721 BUG_ON(!lockid); 716 BUG_ON(!lockid);
722 717
@@ -728,36 +723,33 @@ lookup:
728 spin_lock(&dlm->spinlock); 723 spin_lock(&dlm->spinlock);
729 tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash); 724 tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash);
730 if (tmpres) { 725 if (tmpres) {
731 int dropping_ref = 0;
732
733 spin_unlock(&dlm->spinlock); 726 spin_unlock(&dlm->spinlock);
734
735 spin_lock(&tmpres->spinlock); 727 spin_lock(&tmpres->spinlock);
736 /* We wait for the other thread that is mastering the resource */ 728 /* Wait on the thread that is mastering the resource */
737 if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { 729 if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
738 __dlm_wait_on_lockres(tmpres); 730 __dlm_wait_on_lockres(tmpres);
739 BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN); 731 BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN);
732 spin_unlock(&tmpres->spinlock);
733 dlm_lockres_put(tmpres);
734 tmpres = NULL;
735 goto lookup;
740 } 736 }
741 737
742 if (tmpres->owner == dlm->node_num) { 738 /* Wait on the resource purge to complete before continuing */
743 BUG_ON(tmpres->state & DLM_LOCK_RES_DROPPING_REF); 739 if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) {
744 dlm_lockres_grab_inflight_ref(dlm, tmpres); 740 BUG_ON(tmpres->owner == dlm->node_num);
745 } else if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) 741 __dlm_wait_on_lockres_flags(tmpres,
746 dropping_ref = 1; 742 DLM_LOCK_RES_DROPPING_REF);
747 spin_unlock(&tmpres->spinlock);
748
749 /* wait until done messaging the master, drop our ref to allow
750 * the lockres to be purged, start over. */
751 if (dropping_ref) {
752 spin_lock(&tmpres->spinlock);
753 __dlm_wait_on_lockres_flags(tmpres, DLM_LOCK_RES_DROPPING_REF);
754 spin_unlock(&tmpres->spinlock); 743 spin_unlock(&tmpres->spinlock);
755 dlm_lockres_put(tmpres); 744 dlm_lockres_put(tmpres);
756 tmpres = NULL; 745 tmpres = NULL;
757 goto lookup; 746 goto lookup;
758 } 747 }
759 748
760 mlog(0, "found in hash!\n"); 749 /* Grab inflight ref to pin the resource */
750 dlm_lockres_grab_inflight_ref(dlm, tmpres);
751
752 spin_unlock(&tmpres->spinlock);
761 if (res) 753 if (res)
762 dlm_lockres_put(res); 754 dlm_lockres_put(res);
763 res = tmpres; 755 res = tmpres;
@@ -863,14 +855,11 @@ lookup:
863 /* finally add the lockres to its hash bucket */ 855 /* finally add the lockres to its hash bucket */
864 __dlm_insert_lockres(dlm, res); 856 __dlm_insert_lockres(dlm, res);
865 857
858 /* Grab inflight ref to pin the resource */
866 spin_lock(&res->spinlock); 859 spin_lock(&res->spinlock);
867 dlm_lockres_grab_inflight_ref(dlm, res); 860 dlm_lockres_grab_inflight_ref(dlm, res);
868 spin_unlock(&res->spinlock); 861 spin_unlock(&res->spinlock);
869 862
870 /* if this node does not become the master make sure to drop
871 * this inflight reference below */
872 drop_inflight_if_nonlocal = 1;
873
874 /* get an extra ref on the mle in case this is a BLOCK 863 /* get an extra ref on the mle in case this is a BLOCK
875 * if so, the creator of the BLOCK may try to put the last 864 * if so, the creator of the BLOCK may try to put the last
876 * ref at this time in the assert master handler, so we 865 * ref at this time in the assert master handler, so we
@@ -973,8 +962,6 @@ wait:
973 962
974wake_waiters: 963wake_waiters:
975 spin_lock(&res->spinlock); 964 spin_lock(&res->spinlock);
976 if (res->owner != dlm->node_num && drop_inflight_if_nonlocal)
977 dlm_lockres_drop_inflight_ref(dlm, res);
978 res->state &= ~DLM_LOCK_RES_IN_PROGRESS; 965 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
979 spin_unlock(&res->spinlock); 966 spin_unlock(&res->spinlock);
980 wake_up(&res->wq); 967 wake_up(&res->wq);
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
index 4eff65e7e59e..e73c833fc2a1 100644
--- a/fs/ocfs2/dlm/dlmthread.c
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -94,24 +94,26 @@ int __dlm_lockres_unused(struct dlm_lock_resource *res)
94{ 94{
95 int bit; 95 int bit;
96 96
97 assert_spin_locked(&res->spinlock);
98
97 if (__dlm_lockres_has_locks(res)) 99 if (__dlm_lockres_has_locks(res))
98 return 0; 100 return 0;
99 101
102 /* Locks are in the process of being created */
103 if (res->inflight_locks)
104 return 0;
105
100 if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY) 106 if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY)
101 return 0; 107 return 0;
102 108
103 if (res->state & DLM_LOCK_RES_RECOVERING) 109 if (res->state & DLM_LOCK_RES_RECOVERING)
104 return 0; 110 return 0;
105 111
112 /* Another node has this resource with this node as the master */
106 bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0); 113 bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
107 if (bit < O2NM_MAX_NODES) 114 if (bit < O2NM_MAX_NODES)
108 return 0; 115 return 0;
109 116
110 /*
111 * since the bit for dlm->node_num is not set, inflight_locks better
112 * be zero
113 */
114 BUG_ON(res->inflight_locks != 0);
115 return 1; 117 return 1;
116} 118}
117 119