diff options
author | Steven Whitehouse <swhiteho@redhat.com> | 2009-02-05 05:12:38 -0500 |
---|---|---|
committer | Steven Whitehouse <steve@dolmen.chygwyn.com> | 2009-03-24 07:21:18 -0400 |
commit | d8348de06f704fc34d24ec068546ecb1045fc11a (patch) | |
tree | e6b7eb01ec128e40b255b903e3c6629fbd8174e5 /fs/gfs2 | |
parent | e7c8707ea2b9106f0f78c43348ff5d5e82ba7961 (diff) |
GFS2: Fix deadlock on journal flush
This patch fixes a deadlock when the journal is flushed and there
are dirty inodes other than the one which caused the journal flush.
Originally the journal flushing code was trying to obtain the
transaction glock while running the flush code for an inode glock.
We no longer require the transaction glock at this point in time
since we know that any attempt to get the transaction glock from
another node will result in a journal flush. So if we are flushing
the journal, we can be sure that the transaction lock is still
cached from when the transaction was started.
By inlining a version of gfs2_trans_begin() (minus the bit which
gets the transaction glock) we can avoid the deadlock problems
caused if there is a demote request queued up on the transaction
glock.
In addition I've also moved the umount rwsem so that it covers
the glock workqueue, since it all demotions are done by this
workqueue now. That fixes a bug on umount which I came across
while fixing the original problem.
Reported-by: David Teigland <teigland@redhat.com>
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2')
-rw-r--r-- | fs/gfs2/glock.c | 26 | ||||
-rw-r--r-- | fs/gfs2/glops.c | 19 | ||||
-rw-r--r-- | fs/gfs2/trans.c | 16 |
3 files changed, 34 insertions, 27 deletions
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 173e59ce9ad3..ad8e121427c0 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -167,6 +167,7 @@ static void glock_free(struct gfs2_glock *gl) | |||
167 | 167 | ||
168 | static void gfs2_glock_hold(struct gfs2_glock *gl) | 168 | static void gfs2_glock_hold(struct gfs2_glock *gl) |
169 | { | 169 | { |
170 | GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0); | ||
170 | atomic_inc(&gl->gl_ref); | 171 | atomic_inc(&gl->gl_ref); |
171 | } | 172 | } |
172 | 173 | ||
@@ -206,16 +207,15 @@ int gfs2_glock_put(struct gfs2_glock *gl) | |||
206 | atomic_dec(&lru_count); | 207 | atomic_dec(&lru_count); |
207 | } | 208 | } |
208 | spin_unlock(&lru_lock); | 209 | spin_unlock(&lru_lock); |
209 | GLOCK_BUG_ON(gl, !list_empty(&gl->gl_lru)); | ||
210 | GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); | 210 | GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); |
211 | glock_free(gl); | 211 | glock_free(gl); |
212 | rv = 1; | 212 | rv = 1; |
213 | goto out; | 213 | goto out; |
214 | } | 214 | } |
215 | write_unlock(gl_lock_addr(gl->gl_hash)); | ||
216 | /* 1 for being hashed, 1 for having state != LM_ST_UNLOCKED */ | 215 | /* 1 for being hashed, 1 for having state != LM_ST_UNLOCKED */ |
217 | if (atomic_read(&gl->gl_ref) == 2) | 216 | if (atomic_read(&gl->gl_ref) == 2) |
218 | gfs2_glock_schedule_for_reclaim(gl); | 217 | gfs2_glock_schedule_for_reclaim(gl); |
218 | write_unlock(gl_lock_addr(gl->gl_hash)); | ||
219 | out: | 219 | out: |
220 | return rv; | 220 | return rv; |
221 | } | 221 | } |
@@ -597,10 +597,11 @@ __acquires(&gl->gl_spin) | |||
597 | 597 | ||
598 | GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)); | 598 | GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)); |
599 | 599 | ||
600 | down_read(&gfs2_umount_flush_sem); | ||
600 | if (test_bit(GLF_DEMOTE, &gl->gl_flags) && | 601 | if (test_bit(GLF_DEMOTE, &gl->gl_flags) && |
601 | gl->gl_demote_state != gl->gl_state) { | 602 | gl->gl_demote_state != gl->gl_state) { |
602 | if (find_first_holder(gl)) | 603 | if (find_first_holder(gl)) |
603 | goto out; | 604 | goto out_unlock; |
604 | if (nonblock) | 605 | if (nonblock) |
605 | goto out_sched; | 606 | goto out_sched; |
606 | set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); | 607 | set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); |
@@ -611,23 +612,26 @@ __acquires(&gl->gl_spin) | |||
611 | gfs2_demote_wake(gl); | 612 | gfs2_demote_wake(gl); |
612 | ret = do_promote(gl); | 613 | ret = do_promote(gl); |
613 | if (ret == 0) | 614 | if (ret == 0) |
614 | goto out; | 615 | goto out_unlock; |
615 | if (ret == 2) | 616 | if (ret == 2) |
616 | return; | 617 | goto out_sem; |
617 | gh = find_first_waiter(gl); | 618 | gh = find_first_waiter(gl); |
618 | gl->gl_target = gh->gh_state; | 619 | gl->gl_target = gh->gh_state; |
619 | if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) | 620 | if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) |
620 | do_error(gl, 0); /* Fail queued try locks */ | 621 | do_error(gl, 0); /* Fail queued try locks */ |
621 | } | 622 | } |
622 | do_xmote(gl, gh, gl->gl_target); | 623 | do_xmote(gl, gh, gl->gl_target); |
624 | out_sem: | ||
625 | up_read(&gfs2_umount_flush_sem); | ||
623 | return; | 626 | return; |
624 | 627 | ||
625 | out_sched: | 628 | out_sched: |
626 | gfs2_glock_hold(gl); | 629 | gfs2_glock_hold(gl); |
627 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) | 630 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) |
628 | gfs2_glock_put(gl); | 631 | gfs2_glock_put(gl); |
629 | out: | 632 | out_unlock: |
630 | clear_bit(GLF_LOCK, &gl->gl_flags); | 633 | clear_bit(GLF_LOCK, &gl->gl_flags); |
634 | goto out_sem; | ||
631 | } | 635 | } |
632 | 636 | ||
633 | static void glock_work_func(struct work_struct *work) | 637 | static void glock_work_func(struct work_struct *work) |
@@ -1225,7 +1229,6 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) | |||
1225 | void gfs2_glock_complete(struct gfs2_glock *gl, int ret) | 1229 | void gfs2_glock_complete(struct gfs2_glock *gl, int ret) |
1226 | { | 1230 | { |
1227 | struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct; | 1231 | struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct; |
1228 | down_read(&gfs2_umount_flush_sem); | ||
1229 | gl->gl_reply = ret; | 1232 | gl->gl_reply = ret; |
1230 | if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) { | 1233 | if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) { |
1231 | struct gfs2_holder *gh; | 1234 | struct gfs2_holder *gh; |
@@ -1236,16 +1239,13 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret) | |||
1236 | ((ret & ~LM_OUT_ST_MASK) != 0)) | 1239 | ((ret & ~LM_OUT_ST_MASK) != 0)) |
1237 | set_bit(GLF_FROZEN, &gl->gl_flags); | 1240 | set_bit(GLF_FROZEN, &gl->gl_flags); |
1238 | spin_unlock(&gl->gl_spin); | 1241 | spin_unlock(&gl->gl_spin); |
1239 | if (test_bit(GLF_FROZEN, &gl->gl_flags)) { | 1242 | if (test_bit(GLF_FROZEN, &gl->gl_flags)) |
1240 | up_read(&gfs2_umount_flush_sem); | ||
1241 | return; | 1243 | return; |
1242 | } | ||
1243 | } | 1244 | } |
1244 | set_bit(GLF_REPLY_PENDING, &gl->gl_flags); | 1245 | set_bit(GLF_REPLY_PENDING, &gl->gl_flags); |
1245 | gfs2_glock_hold(gl); | 1246 | gfs2_glock_hold(gl); |
1246 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) | 1247 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) |
1247 | gfs2_glock_put(gl); | 1248 | gfs2_glock_put(gl); |
1248 | up_read(&gfs2_umount_flush_sem); | ||
1249 | } | 1249 | } |
1250 | 1250 | ||
1251 | /** | 1251 | /** |
@@ -1389,12 +1389,10 @@ static void thaw_glock(struct gfs2_glock *gl) | |||
1389 | { | 1389 | { |
1390 | if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) | 1390 | if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) |
1391 | return; | 1391 | return; |
1392 | down_read(&gfs2_umount_flush_sem); | ||
1393 | set_bit(GLF_REPLY_PENDING, &gl->gl_flags); | 1392 | set_bit(GLF_REPLY_PENDING, &gl->gl_flags); |
1394 | gfs2_glock_hold(gl); | 1393 | gfs2_glock_hold(gl); |
1395 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) | 1394 | if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) |
1396 | gfs2_glock_put(gl); | 1395 | gfs2_glock_put(gl); |
1397 | up_read(&gfs2_umount_flush_sem); | ||
1398 | } | 1396 | } |
1399 | 1397 | ||
1400 | /** | 1398 | /** |
@@ -1580,7 +1578,7 @@ static const char *gflags2str(char *buf, const unsigned long *gflags) | |||
1580 | if (test_bit(GLF_REPLY_PENDING, gflags)) | 1578 | if (test_bit(GLF_REPLY_PENDING, gflags)) |
1581 | *p++ = 'r'; | 1579 | *p++ = 'r'; |
1582 | if (test_bit(GLF_INITIAL, gflags)) | 1580 | if (test_bit(GLF_INITIAL, gflags)) |
1583 | *p++ = 'i'; | 1581 | *p++ = 'I'; |
1584 | if (test_bit(GLF_FROZEN, gflags)) | 1582 | if (test_bit(GLF_FROZEN, gflags)) |
1585 | *p++ = 'F'; | 1583 | *p++ = 'F'; |
1586 | *p = 0; | 1584 | *p = 0; |
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index f07ede8cb9ba..a9b7d3a60081 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c | |||
@@ -37,20 +37,25 @@ | |||
37 | static void gfs2_ail_empty_gl(struct gfs2_glock *gl) | 37 | static void gfs2_ail_empty_gl(struct gfs2_glock *gl) |
38 | { | 38 | { |
39 | struct gfs2_sbd *sdp = gl->gl_sbd; | 39 | struct gfs2_sbd *sdp = gl->gl_sbd; |
40 | unsigned int blocks; | ||
41 | struct list_head *head = &gl->gl_ail_list; | 40 | struct list_head *head = &gl->gl_ail_list; |
42 | struct gfs2_bufdata *bd; | 41 | struct gfs2_bufdata *bd; |
43 | struct buffer_head *bh; | 42 | struct buffer_head *bh; |
44 | int error; | 43 | struct gfs2_trans tr; |
45 | 44 | ||
46 | blocks = atomic_read(&gl->gl_ail_count); | 45 | memset(&tr, 0, sizeof(tr)); |
47 | if (!blocks) | 46 | tr.tr_revokes = atomic_read(&gl->gl_ail_count); |
48 | return; | ||
49 | 47 | ||
50 | error = gfs2_trans_begin(sdp, 0, blocks); | 48 | if (!tr.tr_revokes) |
51 | if (gfs2_assert_withdraw(sdp, !error)) | ||
52 | return; | 49 | return; |
53 | 50 | ||
51 | /* A shortened, inline version of gfs2_trans_begin() */ | ||
52 | tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64)); | ||
53 | tr.tr_ip = (unsigned long)__builtin_return_address(0); | ||
54 | INIT_LIST_HEAD(&tr.tr_list_buf); | ||
55 | gfs2_log_reserve(sdp, tr.tr_reserved); | ||
56 | BUG_ON(current->journal_info); | ||
57 | current->journal_info = &tr; | ||
58 | |||
54 | gfs2_log_lock(sdp); | 59 | gfs2_log_lock(sdp); |
55 | while (!list_empty(head)) { | 60 | while (!list_empty(head)) { |
56 | bd = list_entry(head->next, struct gfs2_bufdata, | 61 | bd = list_entry(head->next, struct gfs2_bufdata, |
diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c index 33cd523ec97e..053752d4b27f 100644 --- a/fs/gfs2/trans.c +++ b/fs/gfs2/trans.c | |||
@@ -87,9 +87,11 @@ void gfs2_trans_end(struct gfs2_sbd *sdp) | |||
87 | 87 | ||
88 | if (!tr->tr_touched) { | 88 | if (!tr->tr_touched) { |
89 | gfs2_log_release(sdp, tr->tr_reserved); | 89 | gfs2_log_release(sdp, tr->tr_reserved); |
90 | gfs2_glock_dq(&tr->tr_t_gh); | 90 | if (tr->tr_t_gh.gh_gl) { |
91 | gfs2_holder_uninit(&tr->tr_t_gh); | 91 | gfs2_glock_dq(&tr->tr_t_gh); |
92 | kfree(tr); | 92 | gfs2_holder_uninit(&tr->tr_t_gh); |
93 | kfree(tr); | ||
94 | } | ||
93 | return; | 95 | return; |
94 | } | 96 | } |
95 | 97 | ||
@@ -105,9 +107,11 @@ void gfs2_trans_end(struct gfs2_sbd *sdp) | |||
105 | } | 107 | } |
106 | 108 | ||
107 | gfs2_log_commit(sdp, tr); | 109 | gfs2_log_commit(sdp, tr); |
108 | gfs2_glock_dq(&tr->tr_t_gh); | 110 | if (tr->tr_t_gh.gh_gl) { |
109 | gfs2_holder_uninit(&tr->tr_t_gh); | 111 | gfs2_glock_dq(&tr->tr_t_gh); |
110 | kfree(tr); | 112 | gfs2_holder_uninit(&tr->tr_t_gh); |
113 | kfree(tr); | ||
114 | } | ||
111 | 115 | ||
112 | if (sdp->sd_vfs->s_flags & MS_SYNCHRONOUS) | 116 | if (sdp->sd_vfs->s_flags & MS_SYNCHRONOUS) |
113 | gfs2_log_flush(sdp, NULL); | 117 | gfs2_log_flush(sdp, NULL); |