aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2/glock.c
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2006-08-24 17:03:05 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2006-08-24 17:03:05 -0400
commita2242db0906445491d9ac50bfa756b0de0a25d45 (patch)
treeed530c05eaae5293fd6b14cfb05bdee655adc586 /fs/gfs2/glock.c
parent166afccd71fbb7bd758ab9fc770eef4924081077 (diff)
[GFS2] Speed up scanning of glocks
I noticed the gfs2_scand seemed to be taking a lot of CPU, so in order to cut that down a bit, here is a patch. Firstly the type of a glock is a constant during its lifetime, so that its possible to check this without needing locking. I've moved the (common) case of testing for an inode glock outside of the glmutex lock. Also there was a mutex left over from when the glock cache was master of the inode cache. That isn't required any more so I've removed that too. There is probably scope for further speed ups in the future in this area. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2/glock.c')
-rw-r--r--fs/gfs2/glock.c24
1 files changed, 5 insertions, 19 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index a5e16e539999..005788fb361f 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -150,12 +150,9 @@ static void kill_glock(struct kref *kref)
150 150
151int gfs2_glock_put(struct gfs2_glock *gl) 151int gfs2_glock_put(struct gfs2_glock *gl)
152{ 152{
153 struct gfs2_sbd *sdp = gl->gl_sbd;
154 struct gfs2_gl_hash_bucket *bucket = gl->gl_bucket; 153 struct gfs2_gl_hash_bucket *bucket = gl->gl_bucket;
155 int rv = 0; 154 int rv = 0;
156 155
157 mutex_lock(&sdp->sd_invalidate_inodes_mutex);
158
159 write_lock(&bucket->hb_lock); 156 write_lock(&bucket->hb_lock);
160 if (kref_put(&gl->gl_ref, kill_glock)) { 157 if (kref_put(&gl->gl_ref, kill_glock)) {
161 list_del_init(&gl->gl_list); 158 list_del_init(&gl->gl_list);
@@ -166,8 +163,7 @@ int gfs2_glock_put(struct gfs2_glock *gl)
166 goto out; 163 goto out;
167 } 164 }
168 write_unlock(&bucket->hb_lock); 165 write_unlock(&bucket->hb_lock);
169 out: 166out:
170 mutex_unlock(&sdp->sd_invalidate_inodes_mutex);
171 return rv; 167 return rv;
172} 168}
173 169
@@ -1964,19 +1960,18 @@ static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
1964 1960
1965static void scan_glock(struct gfs2_glock *gl) 1961static void scan_glock(struct gfs2_glock *gl)
1966{ 1962{
1963 if (gl->gl_ops == &gfs2_inode_glops)
1964 goto out;
1965
1967 if (gfs2_glmutex_trylock(gl)) { 1966 if (gfs2_glmutex_trylock(gl)) {
1968 if (gl->gl_ops == &gfs2_inode_glops)
1969 goto out;
1970 if (queue_empty(gl, &gl->gl_holders) && 1967 if (queue_empty(gl, &gl->gl_holders) &&
1971 gl->gl_state != LM_ST_UNLOCKED && 1968 gl->gl_state != LM_ST_UNLOCKED &&
1972 demote_ok(gl)) 1969 demote_ok(gl))
1973 goto out_schedule; 1970 goto out_schedule;
1974out:
1975 gfs2_glmutex_unlock(gl); 1971 gfs2_glmutex_unlock(gl);
1976 } 1972 }
1977 1973out:
1978 gfs2_glock_put(gl); 1974 gfs2_glock_put(gl);
1979
1980 return; 1975 return;
1981 1976
1982out_schedule: 1977out_schedule:
@@ -2070,16 +2065,7 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
2070 t = jiffies; 2065 t = jiffies;
2071 } 2066 }
2072 2067
2073 /* invalidate_inodes() requires that the sb inodes list
2074 not change, but an async completion callback for an
2075 unlock can occur which does glock_put() which
2076 can call iput() which will change the sb inodes list.
2077 invalidate_inodes_mutex prevents glock_put()'s during
2078 an invalidate_inodes() */
2079
2080 mutex_lock(&sdp->sd_invalidate_inodes_mutex);
2081 invalidate_inodes(sdp->sd_vfs); 2068 invalidate_inodes(sdp->sd_vfs);
2082 mutex_unlock(&sdp->sd_invalidate_inodes_mutex);
2083 msleep(10); 2069 msleep(10);
2084 } 2070 }
2085} 2071}