aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2
diff options
context:
space:
mode:
authorRoss Lagerwall <ross.lagerwall@citrix.com>2019-03-27 13:09:17 -0400
committerAndreas Gruenbacher <agruenba@redhat.com>2019-05-07 16:33:53 -0400
commit7881ef3f33bb80f459ea6020d1e021fc524a6348 (patch)
treecfab62f5bb0959a8800e6cb4082700c2dbf86ab0 /fs/gfs2
parent71921ef85928e95e3d942c747c9d40443a5ff775 (diff)
gfs2: Fix lru_count going negative
Under certain conditions, lru_count may drop below zero resulting in a large amount of log spam like this: vmscan: shrink_slab: gfs2_dump_glock+0x3b0/0x630 [gfs2] \ negative objects to delete nr=-1 This happens as follows: 1) A glock is moved from lru_list to the dispose list and lru_count is decremented. 2) The dispose function calls cond_resched() and drops the lru lock. 3) Another thread takes the lru lock and tries to add the same glock to lru_list, checking if the glock is on an lru list. 4) It is on a list (actually the dispose list) and so it avoids incrementing lru_count. 5) The glock is moved to lru_list. 5) The original thread doesn't dispose it because it has been re-added to the lru list but the lru_count has still decreased by one. Fix by checking if the LRU flag is set on the glock rather than checking if the glock is on some list and rearrange the code so that the LRU flag is added/removed precisely when the glock is added/removed from lru_list. Signed-off-by: Ross Lagerwall <ross.lagerwall@citrix.com> Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Diffstat (limited to 'fs/gfs2')
-rw-r--r--fs/gfs2/glock.c22
1 files changed, 13 insertions, 9 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index d32964cd1117..e4f6d39500bc 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -183,15 +183,19 @@ static int demote_ok(const struct gfs2_glock *gl)
183 183
184void gfs2_glock_add_to_lru(struct gfs2_glock *gl) 184void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
185{ 185{
186 if (!(gl->gl_ops->go_flags & GLOF_LRU))
187 return;
188
186 spin_lock(&lru_lock); 189 spin_lock(&lru_lock);
187 190
188 if (!list_empty(&gl->gl_lru)) 191 list_del(&gl->gl_lru);
189 list_del_init(&gl->gl_lru); 192 list_add_tail(&gl->gl_lru, &lru_list);
190 else 193
194 if (!test_bit(GLF_LRU, &gl->gl_flags)) {
195 set_bit(GLF_LRU, &gl->gl_flags);
191 atomic_inc(&lru_count); 196 atomic_inc(&lru_count);
197 }
192 198
193 list_add_tail(&gl->gl_lru, &lru_list);
194 set_bit(GLF_LRU, &gl->gl_flags);
195 spin_unlock(&lru_lock); 199 spin_unlock(&lru_lock);
196} 200}
197 201
@@ -201,7 +205,7 @@ static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
201 return; 205 return;
202 206
203 spin_lock(&lru_lock); 207 spin_lock(&lru_lock);
204 if (!list_empty(&gl->gl_lru)) { 208 if (test_bit(GLF_LRU, &gl->gl_flags)) {
205 list_del_init(&gl->gl_lru); 209 list_del_init(&gl->gl_lru);
206 atomic_dec(&lru_count); 210 atomic_dec(&lru_count);
207 clear_bit(GLF_LRU, &gl->gl_flags); 211 clear_bit(GLF_LRU, &gl->gl_flags);
@@ -1159,8 +1163,7 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
1159 !test_bit(GLF_DEMOTE, &gl->gl_flags)) 1163 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1160 fast_path = 1; 1164 fast_path = 1;
1161 } 1165 }
1162 if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl) && 1166 if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl))
1163 (glops->go_flags & GLOF_LRU))
1164 gfs2_glock_add_to_lru(gl); 1167 gfs2_glock_add_to_lru(gl);
1165 1168
1166 trace_gfs2_glock_queue(gh, 0); 1169 trace_gfs2_glock_queue(gh, 0);
@@ -1456,6 +1459,7 @@ __acquires(&lru_lock)
1456 if (!spin_trylock(&gl->gl_lockref.lock)) { 1459 if (!spin_trylock(&gl->gl_lockref.lock)) {
1457add_back_to_lru: 1460add_back_to_lru:
1458 list_add(&gl->gl_lru, &lru_list); 1461 list_add(&gl->gl_lru, &lru_list);
1462 set_bit(GLF_LRU, &gl->gl_flags);
1459 atomic_inc(&lru_count); 1463 atomic_inc(&lru_count);
1460 continue; 1464 continue;
1461 } 1465 }
@@ -1463,7 +1467,6 @@ add_back_to_lru:
1463 spin_unlock(&gl->gl_lockref.lock); 1467 spin_unlock(&gl->gl_lockref.lock);
1464 goto add_back_to_lru; 1468 goto add_back_to_lru;
1465 } 1469 }
1466 clear_bit(GLF_LRU, &gl->gl_flags);
1467 gl->gl_lockref.count++; 1470 gl->gl_lockref.count++;
1468 if (demote_ok(gl)) 1471 if (demote_ok(gl))
1469 handle_callback(gl, LM_ST_UNLOCKED, 0, false); 1472 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
@@ -1498,6 +1501,7 @@ static long gfs2_scan_glock_lru(int nr)
1498 if (!test_bit(GLF_LOCK, &gl->gl_flags)) { 1501 if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
1499 list_move(&gl->gl_lru, &dispose); 1502 list_move(&gl->gl_lru, &dispose);
1500 atomic_dec(&lru_count); 1503 atomic_dec(&lru_count);
1504 clear_bit(GLF_LRU, &gl->gl_flags);
1501 freed++; 1505 freed++;
1502 continue; 1506 continue;
1503 } 1507 }