aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2/glock.c
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2011-03-30 11:33:25 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2011-04-20 03:59:48 -0400
commit29687a2ac8dfcd5363e515ea715ec226aef8c26b (patch)
treea9bb35c110396c9c2a563c9442690cfb3368c8a8 /fs/gfs2/glock.c
parent5ac048bb7ea6e87b06504b999017cfa1f38f4092 (diff)
GFS2: Alter point of entry to glock lru list for glocks with an address_space
Rather than allowing the glocks to be scheduled for possible reclaim as soon as they have exited the journal, this patch delays their entry to the list until the glocks in question are no longer in use. This means that we will rely on the vm for writeback of all dirty data and metadata from now on. When glocks are added to the lru list they should be freeable much faster since all the I/O required to free them should have already been completed. This should lead to much better I/O patterns under low memory conditions. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2/glock.c')
-rw-r--r--fs/gfs2/glock.c33
1 files changed, 15 insertions, 18 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index f07643e21bfa..1019183232fe 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -160,6 +160,19 @@ static int demote_ok(const struct gfs2_glock *gl)
160} 160}
161 161
162 162
163void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
164{
165 spin_lock(&lru_lock);
166
167 if (!list_empty(&gl->gl_lru))
168 list_del_init(&gl->gl_lru);
169 else
170 atomic_inc(&lru_count);
171
172 list_add_tail(&gl->gl_lru, &lru_list);
173 spin_unlock(&lru_lock);
174}
175
163/** 176/**
164 * __gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list 177 * __gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
165 * @gl: the glock 178 * @gl: the glock
@@ -170,24 +183,8 @@ static int demote_ok(const struct gfs2_glock *gl)
170 183
171static void __gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl) 184static void __gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
172{ 185{
173 if (demote_ok(gl)) { 186 if (demote_ok(gl))
174 spin_lock(&lru_lock); 187 gfs2_glock_add_to_lru(gl);
175
176 if (!list_empty(&gl->gl_lru))
177 list_del_init(&gl->gl_lru);
178 else
179 atomic_inc(&lru_count);
180
181 list_add_tail(&gl->gl_lru, &lru_list);
182 spin_unlock(&lru_lock);
183 }
184}
185
186void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
187{
188 spin_lock(&gl->gl_spin);
189 __gfs2_glock_schedule_for_reclaim(gl);
190 spin_unlock(&gl->gl_spin);
191} 188}
192 189
193/** 190/**