aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2/glock.c
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2011-04-14 11:50:31 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2011-04-20 04:01:17 -0400
commitf42ab0852946c1fb5103682c5897eb3da908e4b0 (patch)
tree3847b23d2cac6bab422e6e001e0c6d6c66a81f1e /fs/gfs2/glock.c
parent627c10b7e471b5dcfb7101d6cc74d219619c9bc4 (diff)
GFS2: Optimise glock lru and end of life inodes
The GLF_LRU flag introduced in the previous patch can be used to check if a glock is on the lru list when a new holder is queued and if so remove it, without having first to get the lru_lock. The main purpose of this patch however is to optimise the glocks left over when an inode at end of life is being evicted. Previously such glocks were left with the GLF_LFLUSH flag set, so that when reclaimed, each one required a log flush. This patch resets the GLF_LFLUSH flag when there is nothing left to flush thus preventing later log flushes as glocks are reused or demoted. In order to do this, we need to keep track of the number of revokes which are outstanding, and also to clear the GLF_LFLUSH bit after a log commit when only revokes have been processed. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2/glock.c')
-rw-r--r--fs/gfs2/glock.c41
1 files changed, 21 insertions, 20 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 0c6c69090140..cb8776f0102e 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -145,14 +145,9 @@ static int demote_ok(const struct gfs2_glock *gl)
145{ 145{
146 const struct gfs2_glock_operations *glops = gl->gl_ops; 146 const struct gfs2_glock_operations *glops = gl->gl_ops;
147 147
148 /* assert_spin_locked(&gl->gl_spin); */
149
150 if (gl->gl_state == LM_ST_UNLOCKED) 148 if (gl->gl_state == LM_ST_UNLOCKED)
151 return 0; 149 return 0;
152 if (test_bit(GLF_LFLUSH, &gl->gl_flags)) 150 if (!list_empty(&gl->gl_holders))
153 return 0;
154 if ((gl->gl_name.ln_type != LM_TYPE_INODE) &&
155 !list_empty(&gl->gl_holders))
156 return 0; 151 return 0;
157 if (glops->go_demote_ok) 152 if (glops->go_demote_ok)
158 return glops->go_demote_ok(gl); 153 return glops->go_demote_ok(gl);
@@ -174,6 +169,17 @@ void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
174 spin_unlock(&lru_lock); 169 spin_unlock(&lru_lock);
175} 170}
176 171
172static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
173{
174 spin_lock(&lru_lock);
175 if (!list_empty(&gl->gl_lru)) {
176 list_del_init(&gl->gl_lru);
177 atomic_dec(&lru_count);
178 clear_bit(GLF_LRU, &gl->gl_flags);
179 }
180 spin_unlock(&lru_lock);
181}
182
177/** 183/**
178 * __gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list 184 * __gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
179 * @gl: the glock 185 * @gl: the glock
@@ -217,12 +223,7 @@ void gfs2_glock_put(struct gfs2_glock *gl)
217 spin_lock_bucket(gl->gl_hash); 223 spin_lock_bucket(gl->gl_hash);
218 hlist_bl_del_rcu(&gl->gl_list); 224 hlist_bl_del_rcu(&gl->gl_list);
219 spin_unlock_bucket(gl->gl_hash); 225 spin_unlock_bucket(gl->gl_hash);
220 spin_lock(&lru_lock); 226 gfs2_glock_remove_from_lru(gl);
221 if (!list_empty(&gl->gl_lru)) {
222 list_del_init(&gl->gl_lru);
223 atomic_dec(&lru_count);
224 }
225 spin_unlock(&lru_lock);
226 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); 227 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
227 GLOCK_BUG_ON(gl, mapping && mapping->nrpages); 228 GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
228 trace_gfs2_glock_put(gl); 229 trace_gfs2_glock_put(gl);
@@ -1025,6 +1026,9 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
1025 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) 1026 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
1026 return -EIO; 1027 return -EIO;
1027 1028
1029 if (test_bit(GLF_LRU, &gl->gl_flags))
1030 gfs2_glock_remove_from_lru(gl);
1031
1028 spin_lock(&gl->gl_spin); 1032 spin_lock(&gl->gl_spin);
1029 add_to_queue(gh); 1033 add_to_queue(gh);
1030 if ((LM_FLAG_NOEXP & gh->gh_flags) && 1034 if ((LM_FLAG_NOEXP & gh->gh_flags) &&
@@ -1082,7 +1086,8 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
1082 !test_bit(GLF_DEMOTE, &gl->gl_flags)) 1086 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1083 fast_path = 1; 1087 fast_path = 1;
1084 } 1088 }
1085 __gfs2_glock_schedule_for_reclaim(gl); 1089 if (!test_bit(GLF_LFLUSH, &gl->gl_flags))
1090 __gfs2_glock_schedule_for_reclaim(gl);
1086 trace_gfs2_glock_queue(gh, 0); 1091 trace_gfs2_glock_queue(gh, 0);
1087 spin_unlock(&gl->gl_spin); 1092 spin_unlock(&gl->gl_spin);
1088 if (likely(fast_path)) 1093 if (likely(fast_path))
@@ -1461,12 +1466,7 @@ static void thaw_glock(struct gfs2_glock *gl)
1461 1466
1462static void clear_glock(struct gfs2_glock *gl) 1467static void clear_glock(struct gfs2_glock *gl)
1463{ 1468{
1464 spin_lock(&lru_lock); 1469 gfs2_glock_remove_from_lru(gl);
1465 if (!list_empty(&gl->gl_lru)) {
1466 list_del_init(&gl->gl_lru);
1467 atomic_dec(&lru_count);
1468 }
1469 spin_unlock(&lru_lock);
1470 1470
1471 spin_lock(&gl->gl_spin); 1471 spin_lock(&gl->gl_spin);
1472 if (gl->gl_state != LM_ST_UNLOCKED) 1472 if (gl->gl_state != LM_ST_UNLOCKED)
@@ -1666,7 +1666,7 @@ static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1666 dtime *= 1000000/HZ; /* demote time in uSec */ 1666 dtime *= 1000000/HZ; /* demote time in uSec */
1667 if (!test_bit(GLF_DEMOTE, &gl->gl_flags)) 1667 if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1668 dtime = 0; 1668 dtime = 0;
1669 gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d r:%d\n", 1669 gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d\n",
1670 state2str(gl->gl_state), 1670 state2str(gl->gl_state),
1671 gl->gl_name.ln_type, 1671 gl->gl_name.ln_type,
1672 (unsigned long long)gl->gl_name.ln_number, 1672 (unsigned long long)gl->gl_name.ln_number,
@@ -1674,6 +1674,7 @@ static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
1674 state2str(gl->gl_target), 1674 state2str(gl->gl_target),
1675 state2str(gl->gl_demote_state), dtime, 1675 state2str(gl->gl_demote_state), dtime,
1676 atomic_read(&gl->gl_ail_count), 1676 atomic_read(&gl->gl_ail_count),
1677 atomic_read(&gl->gl_revokes),
1677 atomic_read(&gl->gl_ref)); 1678 atomic_read(&gl->gl_ref));
1678 1679
1679 list_for_each_entry(gh, &gl->gl_holders, gh_list) { 1680 list_for_each_entry(gh, &gl->gl_holders, gh_list) {