diff options
author | Steven Whitehouse <swhiteho@redhat.com> | 2010-01-29 10:21:27 -0500 |
---|---|---|
committer | Steven Whitehouse <swhiteho@redhat.com> | 2010-02-03 04:56:21 -0500 |
commit | 8f05228ee7c8f409ae3c6f9c3e13d7ccb9c18360 (patch) | |
tree | 34e8cf87485edf4ecb6878ade96704975e5d5bf5 /fs/gfs2/lock_dlm.c | |
parent | e402746a945ceb9d0486a8e3d5917c9228fa4404 (diff) |
GFS2: Extend umount wait coverage to full glock lifetime
Although all glocks are, by the time of the umount glock wait,
scheduled for demotion, some of them haven't made it far
enough through the process for the original set of waiting
code to wait for them.
This extends the ref count to the whole glock lifetime in order
to ensure that the waiting does catch all glocks. It does make
it a bit more invasive, but it seems the only sensible solution
at the moment.
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2/lock_dlm.c')
-rw-r--r-- | fs/gfs2/lock_dlm.c | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c index cdd0755d7823..0e5e0e7022e5 100644 --- a/fs/gfs2/lock_dlm.c +++ b/fs/gfs2/lock_dlm.c | |||
@@ -167,15 +167,16 @@ static unsigned int gdlm_lock(struct gfs2_glock *gl, | |||
167 | return LM_OUT_ASYNC; | 167 | return LM_OUT_ASYNC; |
168 | } | 168 | } |
169 | 169 | ||
170 | static void gdlm_put_lock(struct kmem_cache *cachep, void *ptr) | 170 | static void gdlm_put_lock(struct kmem_cache *cachep, struct gfs2_glock *gl) |
171 | { | 171 | { |
172 | struct gfs2_glock *gl = ptr; | ||
173 | struct gfs2_sbd *sdp = gl->gl_sbd; | 172 | struct gfs2_sbd *sdp = gl->gl_sbd; |
174 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; | 173 | struct lm_lockstruct *ls = &sdp->sd_lockstruct; |
175 | int error; | 174 | int error; |
176 | 175 | ||
177 | if (gl->gl_lksb.sb_lkid == 0) { | 176 | if (gl->gl_lksb.sb_lkid == 0) { |
178 | kmem_cache_free(cachep, gl); | 177 | kmem_cache_free(cachep, gl); |
178 | if (atomic_dec_and_test(&sdp->sd_glock_disposal)) | ||
179 | wake_up(&sdp->sd_glock_wait); | ||
179 | return; | 180 | return; |
180 | } | 181 | } |
181 | 182 | ||
@@ -187,7 +188,6 @@ static void gdlm_put_lock(struct kmem_cache *cachep, void *ptr) | |||
187 | (unsigned long long)gl->gl_name.ln_number, error); | 188 | (unsigned long long)gl->gl_name.ln_number, error); |
188 | return; | 189 | return; |
189 | } | 190 | } |
190 | atomic_inc(&sdp->sd_glock_disposal); | ||
191 | } | 191 | } |
192 | 192 | ||
193 | static void gdlm_cancel(struct gfs2_glock *gl) | 193 | static void gdlm_cancel(struct gfs2_glock *gl) |