diff options
Diffstat (limited to 'fs/gfs2/glock.c')
-rw-r--r-- | fs/gfs2/glock.c | 14 |
1 files changed, 9 insertions, 5 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index c355f7320e44..ee4e04fe60fc 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -731,14 +731,14 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, | |||
731 | cachep = gfs2_glock_aspace_cachep; | 731 | cachep = gfs2_glock_aspace_cachep; |
732 | else | 732 | else |
733 | cachep = gfs2_glock_cachep; | 733 | cachep = gfs2_glock_cachep; |
734 | gl = kmem_cache_alloc(cachep, GFP_KERNEL); | 734 | gl = kmem_cache_alloc(cachep, GFP_NOFS); |
735 | if (!gl) | 735 | if (!gl) |
736 | return -ENOMEM; | 736 | return -ENOMEM; |
737 | 737 | ||
738 | memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb)); | 738 | memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb)); |
739 | 739 | ||
740 | if (glops->go_flags & GLOF_LVB) { | 740 | if (glops->go_flags & GLOF_LVB) { |
741 | gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_KERNEL); | 741 | gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS); |
742 | if (!gl->gl_lksb.sb_lvbptr) { | 742 | if (!gl->gl_lksb.sb_lvbptr) { |
743 | kmem_cache_free(cachep, gl); | 743 | kmem_cache_free(cachep, gl); |
744 | return -ENOMEM; | 744 | return -ENOMEM; |
@@ -1404,12 +1404,16 @@ __acquires(&lru_lock) | |||
1404 | gl = list_entry(list->next, struct gfs2_glock, gl_lru); | 1404 | gl = list_entry(list->next, struct gfs2_glock, gl_lru); |
1405 | list_del_init(&gl->gl_lru); | 1405 | list_del_init(&gl->gl_lru); |
1406 | if (!spin_trylock(&gl->gl_spin)) { | 1406 | if (!spin_trylock(&gl->gl_spin)) { |
1407 | add_back_to_lru: | ||
1407 | list_add(&gl->gl_lru, &lru_list); | 1408 | list_add(&gl->gl_lru, &lru_list); |
1408 | atomic_inc(&lru_count); | 1409 | atomic_inc(&lru_count); |
1409 | continue; | 1410 | continue; |
1410 | } | 1411 | } |
1412 | if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { | ||
1413 | spin_unlock(&gl->gl_spin); | ||
1414 | goto add_back_to_lru; | ||
1415 | } | ||
1411 | clear_bit(GLF_LRU, &gl->gl_flags); | 1416 | clear_bit(GLF_LRU, &gl->gl_flags); |
1412 | spin_unlock(&lru_lock); | ||
1413 | gl->gl_lockref.count++; | 1417 | gl->gl_lockref.count++; |
1414 | if (demote_ok(gl)) | 1418 | if (demote_ok(gl)) |
1415 | handle_callback(gl, LM_ST_UNLOCKED, 0, false); | 1419 | handle_callback(gl, LM_ST_UNLOCKED, 0, false); |
@@ -1417,7 +1421,7 @@ __acquires(&lru_lock) | |||
1417 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) | 1421 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) |
1418 | gl->gl_lockref.count--; | 1422 | gl->gl_lockref.count--; |
1419 | spin_unlock(&gl->gl_spin); | 1423 | spin_unlock(&gl->gl_spin); |
1420 | spin_lock(&lru_lock); | 1424 | cond_resched_lock(&lru_lock); |
1421 | } | 1425 | } |
1422 | } | 1426 | } |
1423 | 1427 | ||
@@ -1442,7 +1446,7 @@ static long gfs2_scan_glock_lru(int nr) | |||
1442 | gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru); | 1446 | gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru); |
1443 | 1447 | ||
1444 | /* Test for being demotable */ | 1448 | /* Test for being demotable */ |
1445 | if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { | 1449 | if (!test_bit(GLF_LOCK, &gl->gl_flags)) { |
1446 | list_move(&gl->gl_lru, &dispose); | 1450 | list_move(&gl->gl_lru, &dispose); |
1447 | atomic_dec(&lru_count); | 1451 | atomic_dec(&lru_count); |
1448 | freed++; | 1452 | freed++; |