aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2/glock.c
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2006-04-28 10:46:21 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2006-04-28 10:46:21 -0400
commit363275216c1a1b0b82c8419310c194b8c26b9c27 (patch)
tree476bb04ff32ac1724afa192f4950f4a39a0237f2 /fs/gfs2/glock.c
parentd26046bb0aff707aac38a9bf3dd56fa39b28a399 (diff)
[GFS2] Reordering in deallocation to avoid recursive locking
Despite my earlier careful search, there was a recursive lock left in the deallocation code. This removes it. It also should speed up deallocation be reducing the number of locking operations which take place by using two "try lock" operations on the two locks involved in inode deallocation which allows us to grab the locks out of order (compared with NFS which grabs the inode lock first and the iopen lock later). It is ok for us to fail while doing this since if it does fail it means that someone else is still using the inode and thus it wouldn't be possible to deallocate anyway. This fixes the bug reported to me by Rob Kenna. Cc: Rob Kenna <rkenna@redhat.com> Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2/glock.c')
-rw-r--r--fs/gfs2/glock.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 17d474fab5ab..f82ecc0cc8fb 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1814,7 +1814,7 @@ void gfs2_try_toss_inode(struct gfs2_sbd *sdp, struct gfs2_inum *inum)
1814 if (atomic_read(&ip->i_count)) 1814 if (atomic_read(&ip->i_count))
1815 goto out_unlock; 1815 goto out_unlock;
1816 1816
1817 gfs2_inode_destroy(ip); 1817 gfs2_inode_destroy(ip, 1);
1818 1818
1819 out_unlock: 1819 out_unlock:
1820 gfs2_glmutex_unlock(gl); 1820 gfs2_glmutex_unlock(gl);
@@ -1940,7 +1940,7 @@ void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1940 if (gl->gl_ops == &gfs2_inode_glops) { 1940 if (gl->gl_ops == &gfs2_inode_glops) {
1941 struct gfs2_inode *ip = gl->gl_object; 1941 struct gfs2_inode *ip = gl->gl_object;
1942 if (ip && !atomic_read(&ip->i_count)) 1942 if (ip && !atomic_read(&ip->i_count))
1943 gfs2_inode_destroy(ip); 1943 gfs2_inode_destroy(ip, 1);
1944 } 1944 }
1945 if (queue_empty(gl, &gl->gl_holders) && 1945 if (queue_empty(gl, &gl->gl_holders) &&
1946 gl->gl_state != LM_ST_UNLOCKED && 1946 gl->gl_state != LM_ST_UNLOCKED &&
@@ -2083,7 +2083,7 @@ static void clear_glock(struct gfs2_glock *gl)
2083 if (gl->gl_ops == &gfs2_inode_glops) { 2083 if (gl->gl_ops == &gfs2_inode_glops) {
2084 struct gfs2_inode *ip = gl->gl_object; 2084 struct gfs2_inode *ip = gl->gl_object;
2085 if (ip && !atomic_read(&ip->i_count)) 2085 if (ip && !atomic_read(&ip->i_count))
2086 gfs2_inode_destroy(ip); 2086 gfs2_inode_destroy(ip, 1);
2087 } 2087 }
2088 if (queue_empty(gl, &gl->gl_holders) && 2088 if (queue_empty(gl, &gl->gl_holders) &&
2089 gl->gl_state != LM_ST_UNLOCKED) 2089 gl->gl_state != LM_ST_UNLOCKED)