diff options
author | Steven Whitehouse <swhiteho@redhat.com> | 2006-08-24 17:03:05 -0400 |
---|---|---|
committer | Steven Whitehouse <swhiteho@redhat.com> | 2006-08-24 17:03:05 -0400 |
commit | a2242db0906445491d9ac50bfa756b0de0a25d45 (patch) | |
tree | ed530c05eaae5293fd6b14cfb05bdee655adc586 | |
parent | 166afccd71fbb7bd758ab9fc770eef4924081077 (diff) |
[GFS2] Speed up scanning of glocks
I noticed the gfs2_scand seemed to be taking a lot of CPU,
so in order to cut that down a bit, here is a patch. Firstly
the type of a glock is a constant during its lifetime, so that
its possible to check this without needing locking. I've moved
the (common) case of testing for an inode glock outside of
the glmutex lock.
Also there was a mutex left over from when the glock cache was
master of the inode cache. That isn't required any more so I've
removed that too.
There is probably scope for further speed ups in the future
in this area.
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
-rw-r--r-- | fs/gfs2/glock.c | 24 | ||||
-rw-r--r-- | fs/gfs2/incore.h | 1 | ||||
-rw-r--r-- | fs/gfs2/ops_fstype.c | 1 |
3 files changed, 5 insertions, 21 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index a5e16e53999..005788fb361 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -150,12 +150,9 @@ static void kill_glock(struct kref *kref) | |||
150 | 150 | ||
151 | int gfs2_glock_put(struct gfs2_glock *gl) | 151 | int gfs2_glock_put(struct gfs2_glock *gl) |
152 | { | 152 | { |
153 | struct gfs2_sbd *sdp = gl->gl_sbd; | ||
154 | struct gfs2_gl_hash_bucket *bucket = gl->gl_bucket; | 153 | struct gfs2_gl_hash_bucket *bucket = gl->gl_bucket; |
155 | int rv = 0; | 154 | int rv = 0; |
156 | 155 | ||
157 | mutex_lock(&sdp->sd_invalidate_inodes_mutex); | ||
158 | |||
159 | write_lock(&bucket->hb_lock); | 156 | write_lock(&bucket->hb_lock); |
160 | if (kref_put(&gl->gl_ref, kill_glock)) { | 157 | if (kref_put(&gl->gl_ref, kill_glock)) { |
161 | list_del_init(&gl->gl_list); | 158 | list_del_init(&gl->gl_list); |
@@ -166,8 +163,7 @@ int gfs2_glock_put(struct gfs2_glock *gl) | |||
166 | goto out; | 163 | goto out; |
167 | } | 164 | } |
168 | write_unlock(&bucket->hb_lock); | 165 | write_unlock(&bucket->hb_lock); |
169 | out: | 166 | out: |
170 | mutex_unlock(&sdp->sd_invalidate_inodes_mutex); | ||
171 | return rv; | 167 | return rv; |
172 | } | 168 | } |
173 | 169 | ||
@@ -1964,19 +1960,18 @@ static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp, | |||
1964 | 1960 | ||
1965 | static void scan_glock(struct gfs2_glock *gl) | 1961 | static void scan_glock(struct gfs2_glock *gl) |
1966 | { | 1962 | { |
1963 | if (gl->gl_ops == &gfs2_inode_glops) | ||
1964 | goto out; | ||
1965 | |||
1967 | if (gfs2_glmutex_trylock(gl)) { | 1966 | if (gfs2_glmutex_trylock(gl)) { |
1968 | if (gl->gl_ops == &gfs2_inode_glops) | ||
1969 | goto out; | ||
1970 | if (queue_empty(gl, &gl->gl_holders) && | 1967 | if (queue_empty(gl, &gl->gl_holders) && |
1971 | gl->gl_state != LM_ST_UNLOCKED && | 1968 | gl->gl_state != LM_ST_UNLOCKED && |
1972 | demote_ok(gl)) | 1969 | demote_ok(gl)) |
1973 | goto out_schedule; | 1970 | goto out_schedule; |
1974 | out: | ||
1975 | gfs2_glmutex_unlock(gl); | 1971 | gfs2_glmutex_unlock(gl); |
1976 | } | 1972 | } |
1977 | 1973 | out: | |
1978 | gfs2_glock_put(gl); | 1974 | gfs2_glock_put(gl); |
1979 | |||
1980 | return; | 1975 | return; |
1981 | 1976 | ||
1982 | out_schedule: | 1977 | out_schedule: |
@@ -2070,16 +2065,7 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait) | |||
2070 | t = jiffies; | 2065 | t = jiffies; |
2071 | } | 2066 | } |
2072 | 2067 | ||
2073 | /* invalidate_inodes() requires that the sb inodes list | ||
2074 | not change, but an async completion callback for an | ||
2075 | unlock can occur which does glock_put() which | ||
2076 | can call iput() which will change the sb inodes list. | ||
2077 | invalidate_inodes_mutex prevents glock_put()'s during | ||
2078 | an invalidate_inodes() */ | ||
2079 | |||
2080 | mutex_lock(&sdp->sd_invalidate_inodes_mutex); | ||
2081 | invalidate_inodes(sdp->sd_vfs); | 2068 | invalidate_inodes(sdp->sd_vfs); |
2082 | mutex_unlock(&sdp->sd_invalidate_inodes_mutex); | ||
2083 | msleep(10); | 2069 | msleep(10); |
2084 | } | 2070 | } |
2085 | } | 2071 | } |
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index e98c14f30da..78d3cb511eb 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h | |||
@@ -507,7 +507,6 @@ struct gfs2_sbd { | |||
507 | struct gfs2_holder sd_live_gh; | 507 | struct gfs2_holder sd_live_gh; |
508 | struct gfs2_glock *sd_rename_gl; | 508 | struct gfs2_glock *sd_rename_gl; |
509 | struct gfs2_glock *sd_trans_gl; | 509 | struct gfs2_glock *sd_trans_gl; |
510 | struct mutex sd_invalidate_inodes_mutex; | ||
511 | 510 | ||
512 | /* Inode Stuff */ | 511 | /* Inode Stuff */ |
513 | 512 | ||
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 444000968cf..c66067c84bc 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c | |||
@@ -63,7 +63,6 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb) | |||
63 | INIT_LIST_HEAD(&sdp->sd_reclaim_list); | 63 | INIT_LIST_HEAD(&sdp->sd_reclaim_list); |
64 | spin_lock_init(&sdp->sd_reclaim_lock); | 64 | spin_lock_init(&sdp->sd_reclaim_lock); |
65 | init_waitqueue_head(&sdp->sd_reclaim_wq); | 65 | init_waitqueue_head(&sdp->sd_reclaim_wq); |
66 | mutex_init(&sdp->sd_invalidate_inodes_mutex); | ||
67 | 66 | ||
68 | mutex_init(&sdp->sd_inum_mutex); | 67 | mutex_init(&sdp->sd_inum_mutex); |
69 | spin_lock_init(&sdp->sd_statfs_spin); | 68 | spin_lock_init(&sdp->sd_statfs_spin); |