aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2014-06-23 09:43:32 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2014-07-18 06:12:51 -0400
commit94a09a3999ee978e097b5aad74034ed43bae56db (patch)
tree6b4331c36b3ccee82d22e62fc133f07ad060e11c /fs/gfs2
parent79272b3562bb44ce7dc720cd13136f5a4a53c618 (diff)
GFS2: Fix race in glock lru glock disposal
We must not leave items on the LRU list with GLF_LOCK set, since they can be removed if the glock is brought back into use, which may then potentially result in a hang, waiting for GLF_LOCK to clear. It doesn't happen very often, since it requires a glock that has not been used for a long time to be brought back into use at the same moment that the shrinker is part way through disposing of glocks. The fix is to set GLF_LOCK at a later time, when we already know that the other locks can be obtained. Also, we now only release the lru_lock in case a resched is needed, rather than on every iteration. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2')
-rw-r--r--fs/gfs2/glock.c10
1 files changed, 7 insertions, 3 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 278fae5b6982..c1e5b126d2ca 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1406,12 +1406,16 @@ __acquires(&lru_lock)
1406 gl = list_entry(list->next, struct gfs2_glock, gl_lru); 1406 gl = list_entry(list->next, struct gfs2_glock, gl_lru);
1407 list_del_init(&gl->gl_lru); 1407 list_del_init(&gl->gl_lru);
1408 if (!spin_trylock(&gl->gl_spin)) { 1408 if (!spin_trylock(&gl->gl_spin)) {
1409add_back_to_lru:
1409 list_add(&gl->gl_lru, &lru_list); 1410 list_add(&gl->gl_lru, &lru_list);
1410 atomic_inc(&lru_count); 1411 atomic_inc(&lru_count);
1411 continue; 1412 continue;
1412 } 1413 }
1414 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1415 spin_unlock(&gl->gl_spin);
1416 goto add_back_to_lru;
1417 }
1413 clear_bit(GLF_LRU, &gl->gl_flags); 1418 clear_bit(GLF_LRU, &gl->gl_flags);
1414 spin_unlock(&lru_lock);
1415 gl->gl_lockref.count++; 1419 gl->gl_lockref.count++;
1416 if (demote_ok(gl)) 1420 if (demote_ok(gl))
1417 handle_callback(gl, LM_ST_UNLOCKED, 0, false); 1421 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
@@ -1419,7 +1423,7 @@ __acquires(&lru_lock)
1419 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1423 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1420 gl->gl_lockref.count--; 1424 gl->gl_lockref.count--;
1421 spin_unlock(&gl->gl_spin); 1425 spin_unlock(&gl->gl_spin);
1422 spin_lock(&lru_lock); 1426 cond_resched_lock(&lru_lock);
1423 } 1427 }
1424} 1428}
1425 1429
@@ -1444,7 +1448,7 @@ static long gfs2_scan_glock_lru(int nr)
1444 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru); 1448 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1445 1449
1446 /* Test for being demotable */ 1450 /* Test for being demotable */
1447 if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { 1451 if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
1448 list_move(&gl->gl_lru, &dispose); 1452 list_move(&gl->gl_lru, &dispose);
1449 atomic_dec(&lru_count); 1453 atomic_dec(&lru_count);
1450 freed++; 1454 freed++;