diff options
author | Steven Whitehouse <swhiteho@redhat.com> | 2009-11-27 05:31:11 -0500 |
---|---|---|
committer | Steven Whitehouse <swhiteho@redhat.com> | 2009-12-03 07:00:12 -0500 |
commit | 26bb7505cf7db3560286be9f6384b6d3911f78b5 (patch) | |
tree | 0fcdf84b70a9effe7ab9c661dca74b6a4b4afb31 /fs/gfs2 | |
parent | c29cd9004e72acb5a6cb8caf08508f1c5edee686 (diff) |
GFS2: Fix glock refcount issues
This patch fixes some ref counting issues. Firstly by moving
the point at which we drop the ref count after a dlm lock
operation has completed we ensure that we never call
gfs2_glock_hold() on a lock with a zero ref count.
Secondly, by using atomic_dec_and_lock() in gfs2_glock_put()
we ensure that at no time will a glock with zero ref count
appear on the lru_list. That means that we can remove the
check for this in our shrinker (which was racy).
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2')
-rw-r--r-- | fs/gfs2/glock.c | 21 |
1 files changed, 8 insertions, 13 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index a3f90ad2af80..f455a03a09e2 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -241,15 +241,14 @@ int gfs2_glock_put(struct gfs2_glock *gl) | |||
241 | int rv = 0; | 241 | int rv = 0; |
242 | 242 | ||
243 | write_lock(gl_lock_addr(gl->gl_hash)); | 243 | write_lock(gl_lock_addr(gl->gl_hash)); |
244 | if (atomic_dec_and_test(&gl->gl_ref)) { | 244 | if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) { |
245 | hlist_del(&gl->gl_list); | 245 | hlist_del(&gl->gl_list); |
246 | write_unlock(gl_lock_addr(gl->gl_hash)); | ||
247 | spin_lock(&lru_lock); | ||
248 | if (!list_empty(&gl->gl_lru)) { | 246 | if (!list_empty(&gl->gl_lru)) { |
249 | list_del_init(&gl->gl_lru); | 247 | list_del_init(&gl->gl_lru); |
250 | atomic_dec(&lru_count); | 248 | atomic_dec(&lru_count); |
251 | } | 249 | } |
252 | spin_unlock(&lru_lock); | 250 | spin_unlock(&lru_lock); |
251 | write_unlock(gl_lock_addr(gl->gl_hash)); | ||
253 | GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); | 252 | GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); |
254 | glock_free(gl); | 253 | glock_free(gl); |
255 | rv = 1; | 254 | rv = 1; |
@@ -513,7 +512,6 @@ retry: | |||
513 | GLOCK_BUG_ON(gl, 1); | 512 | GLOCK_BUG_ON(gl, 1); |
514 | } | 513 | } |
515 | spin_unlock(&gl->gl_spin); | 514 | spin_unlock(&gl->gl_spin); |
516 | gfs2_glock_put(gl); | ||
517 | return; | 515 | return; |
518 | } | 516 | } |
519 | 517 | ||
@@ -524,8 +522,6 @@ retry: | |||
524 | if (glops->go_xmote_bh) { | 522 | if (glops->go_xmote_bh) { |
525 | spin_unlock(&gl->gl_spin); | 523 | spin_unlock(&gl->gl_spin); |
526 | rv = glops->go_xmote_bh(gl, gh); | 524 | rv = glops->go_xmote_bh(gl, gh); |
527 | if (rv == -EAGAIN) | ||
528 | return; | ||
529 | spin_lock(&gl->gl_spin); | 525 | spin_lock(&gl->gl_spin); |
530 | if (rv) { | 526 | if (rv) { |
531 | do_error(gl, rv); | 527 | do_error(gl, rv); |
@@ -540,7 +536,6 @@ out: | |||
540 | clear_bit(GLF_LOCK, &gl->gl_flags); | 536 | clear_bit(GLF_LOCK, &gl->gl_flags); |
541 | out_locked: | 537 | out_locked: |
542 | spin_unlock(&gl->gl_spin); | 538 | spin_unlock(&gl->gl_spin); |
543 | gfs2_glock_put(gl); | ||
544 | } | 539 | } |
545 | 540 | ||
546 | static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock, | 541 | static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock, |
@@ -600,7 +595,6 @@ __acquires(&gl->gl_spin) | |||
600 | 595 | ||
601 | if (!(ret & LM_OUT_ASYNC)) { | 596 | if (!(ret & LM_OUT_ASYNC)) { |
602 | finish_xmote(gl, ret); | 597 | finish_xmote(gl, ret); |
603 | gfs2_glock_hold(gl); | ||
604 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) | 598 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) |
605 | gfs2_glock_put(gl); | 599 | gfs2_glock_put(gl); |
606 | } else { | 600 | } else { |
@@ -712,9 +706,12 @@ static void glock_work_func(struct work_struct *work) | |||
712 | { | 706 | { |
713 | unsigned long delay = 0; | 707 | unsigned long delay = 0; |
714 | struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); | 708 | struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); |
709 | int drop_ref = 0; | ||
715 | 710 | ||
716 | if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) | 711 | if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) { |
717 | finish_xmote(gl, gl->gl_reply); | 712 | finish_xmote(gl, gl->gl_reply); |
713 | drop_ref = 1; | ||
714 | } | ||
718 | down_read(&gfs2_umount_flush_sem); | 715 | down_read(&gfs2_umount_flush_sem); |
719 | spin_lock(&gl->gl_spin); | 716 | spin_lock(&gl->gl_spin); |
720 | if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && | 717 | if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && |
@@ -732,6 +729,8 @@ static void glock_work_func(struct work_struct *work) | |||
732 | if (!delay || | 729 | if (!delay || |
733 | queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) | 730 | queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) |
734 | gfs2_glock_put(gl); | 731 | gfs2_glock_put(gl); |
732 | if (drop_ref) | ||
733 | gfs2_glock_put(gl); | ||
735 | } | 734 | } |
736 | 735 | ||
737 | /** | 736 | /** |
@@ -1366,10 +1365,6 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask) | |||
1366 | list_del_init(&gl->gl_lru); | 1365 | list_del_init(&gl->gl_lru); |
1367 | atomic_dec(&lru_count); | 1366 | atomic_dec(&lru_count); |
1368 | 1367 | ||
1369 | /* Check if glock is about to be freed */ | ||
1370 | if (atomic_read(&gl->gl_ref) == 0) | ||
1371 | continue; | ||
1372 | |||
1373 | /* Test for being demotable */ | 1368 | /* Test for being demotable */ |
1374 | if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { | 1369 | if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { |
1375 | gfs2_glock_hold(gl); | 1370 | gfs2_glock_hold(gl); |