diff options
author | Wendy Cheng <wcheng@redhat.com> | 2007-10-05 00:27:58 -0400 |
---|---|---|
committer | Steven Whitehouse <swhiteho@redhat.com> | 2008-01-25 03:07:09 -0500 |
commit | cc7e79b168a552152299bd8a8254dc099aacc993 (patch) | |
tree | 65871c8ec495401846bc9e7030a89d3c6335f5ee /fs/gfs2/glock.c | |
parent | 49914084e797530d9baaf51df9eda77babc98fa8 (diff) |
[GFS2] Handle multiple glock demote requests
Fix a race condition where multiple glock demote requests are sent to
a node back-to-back. This patch does a check inside handle_callback()
to see whether a demote request is in progress. If true, it sets a flag
to make sure run_queue() will loop again to handle the new request,
instead of erronously setting gl_demote_state to a different state.
Signed-off-by: S. Wendy Cheng <wcheng@redhat.com>
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2/glock.c')
-rw-r--r-- | fs/gfs2/glock.c | 15 |
1 files changed, 14 insertions, 1 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index a37efe4aae6..104e83ff874 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -567,7 +567,10 @@ static int rq_demote(struct gfs2_glock *gl) | |||
567 | gfs2_demote_wake(gl); | 567 | gfs2_demote_wake(gl); |
568 | return 0; | 568 | return 0; |
569 | } | 569 | } |
570 | |||
570 | set_bit(GLF_LOCK, &gl->gl_flags); | 571 | set_bit(GLF_LOCK, &gl->gl_flags); |
572 | set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); | ||
573 | |||
571 | if (gl->gl_demote_state == LM_ST_UNLOCKED || | 574 | if (gl->gl_demote_state == LM_ST_UNLOCKED || |
572 | gl->gl_state != LM_ST_EXCLUSIVE) { | 575 | gl->gl_state != LM_ST_EXCLUSIVE) { |
573 | spin_unlock(&gl->gl_spin); | 576 | spin_unlock(&gl->gl_spin); |
@@ -576,7 +579,9 @@ static int rq_demote(struct gfs2_glock *gl) | |||
576 | spin_unlock(&gl->gl_spin); | 579 | spin_unlock(&gl->gl_spin); |
577 | gfs2_glock_xmote_th(gl, NULL); | 580 | gfs2_glock_xmote_th(gl, NULL); |
578 | } | 581 | } |
582 | |||
579 | spin_lock(&gl->gl_spin); | 583 | spin_lock(&gl->gl_spin); |
584 | clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); | ||
580 | 585 | ||
581 | return 0; | 586 | return 0; |
582 | } | 587 | } |
@@ -606,6 +611,11 @@ static void run_queue(struct gfs2_glock *gl) | |||
606 | 611 | ||
607 | } else if (test_bit(GLF_DEMOTE, &gl->gl_flags)) { | 612 | } else if (test_bit(GLF_DEMOTE, &gl->gl_flags)) { |
608 | blocked = rq_demote(gl); | 613 | blocked = rq_demote(gl); |
614 | if (gl->gl_waiters2 && !blocked) { | ||
615 | set_bit(GLF_DEMOTE, &gl->gl_flags); | ||
616 | gl->gl_demote_state = LM_ST_UNLOCKED; | ||
617 | } | ||
618 | gl->gl_waiters2 = 0; | ||
609 | } else if (!list_empty(&gl->gl_waiters3)) { | 619 | } else if (!list_empty(&gl->gl_waiters3)) { |
610 | gh = list_entry(gl->gl_waiters3.next, | 620 | gh = list_entry(gl->gl_waiters3.next, |
611 | struct gfs2_holder, gh_list); | 621 | struct gfs2_holder, gh_list); |
@@ -722,7 +732,10 @@ static void handle_callback(struct gfs2_glock *gl, unsigned int state, | |||
722 | } | 732 | } |
723 | } else if (gl->gl_demote_state != LM_ST_UNLOCKED && | 733 | } else if (gl->gl_demote_state != LM_ST_UNLOCKED && |
724 | gl->gl_demote_state != state) { | 734 | gl->gl_demote_state != state) { |
725 | gl->gl_demote_state = LM_ST_UNLOCKED; | 735 | if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) |
736 | gl->gl_waiters2 = 1; | ||
737 | else | ||
738 | gl->gl_demote_state = LM_ST_UNLOCKED; | ||
726 | } | 739 | } |
727 | spin_unlock(&gl->gl_spin); | 740 | spin_unlock(&gl->gl_spin); |
728 | } | 741 | } |