diff options
author | Eric Sandeen <sandeen@sandeen.net> | 2006-10-28 13:38:27 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-10-28 14:30:51 -0400 |
commit | f58a74dca88d48b0669609b4957f3dd757bdc898 (patch) | |
tree | bfd9a7f078d3d017e92fbd75659f35b619ccf188 | |
parent | 1939e49a0cb9d73785857bf312f4f65661b4b513 (diff) |
[PATCH] jbd: journal_dirty_data re-check for unmapped buffers
When running several fsx's and other filesystem stress tests, we found
cases where an unmapped buffer was still being sent to submit_bh by the
ext3 dirty data journaling code.
I saw this happen in two ways, both related to another thread doing a
truncate which would unmap the buffer in question.
Either we would get into journal_dirty_data with a bh which was already
unmapped (although journal_dirty_data_fn had checked for this earlier, the
state was not locked at that point), or it would get unmapped in the middle
of journal_dirty_data when we dropped locks to call sync_dirty_buffer.
By re-checking for mapped state after we've acquired the bh state lock, we
should avoid these races. If we find a buffer which is no longer mapped,
we essentially ignore it, because journal_unmap_buffer has already decided
that this buffer can go away.
I've also added tracepoints in these two cases, and made a couple other
tracepoint changes that I found useful in debugging this.
Signed-off-by: Eric Sandeen <esandeen@redhat.com>
Cc: <linux-ext4@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | fs/jbd/transaction.c | 15 |
1 files changed, 14 insertions, 1 deletions
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index d5c63047a8b3..4f82bcd63e48 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c | |||
@@ -967,6 +967,13 @@ int journal_dirty_data(handle_t *handle, struct buffer_head *bh) | |||
967 | */ | 967 | */ |
968 | jbd_lock_bh_state(bh); | 968 | jbd_lock_bh_state(bh); |
969 | spin_lock(&journal->j_list_lock); | 969 | spin_lock(&journal->j_list_lock); |
970 | |||
971 | /* Now that we have bh_state locked, are we really still mapped? */ | ||
972 | if (!buffer_mapped(bh)) { | ||
973 | JBUFFER_TRACE(jh, "unmapped buffer, bailing out"); | ||
974 | goto no_journal; | ||
975 | } | ||
976 | |||
970 | if (jh->b_transaction) { | 977 | if (jh->b_transaction) { |
971 | JBUFFER_TRACE(jh, "has transaction"); | 978 | JBUFFER_TRACE(jh, "has transaction"); |
972 | if (jh->b_transaction != handle->h_transaction) { | 979 | if (jh->b_transaction != handle->h_transaction) { |
@@ -1028,6 +1035,11 @@ int journal_dirty_data(handle_t *handle, struct buffer_head *bh) | |||
1028 | sync_dirty_buffer(bh); | 1035 | sync_dirty_buffer(bh); |
1029 | jbd_lock_bh_state(bh); | 1036 | jbd_lock_bh_state(bh); |
1030 | spin_lock(&journal->j_list_lock); | 1037 | spin_lock(&journal->j_list_lock); |
1038 | /* Since we dropped the lock... */ | ||
1039 | if (!buffer_mapped(bh)) { | ||
1040 | JBUFFER_TRACE(jh, "buffer got unmapped"); | ||
1041 | goto no_journal; | ||
1042 | } | ||
1031 | /* The buffer may become locked again at any | 1043 | /* The buffer may become locked again at any |
1032 | time if it is redirtied */ | 1044 | time if it is redirtied */ |
1033 | } | 1045 | } |
@@ -1824,6 +1836,7 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) | |||
1824 | } | 1836 | } |
1825 | } | 1837 | } |
1826 | } else if (transaction == journal->j_committing_transaction) { | 1838 | } else if (transaction == journal->j_committing_transaction) { |
1839 | JBUFFER_TRACE(jh, "on committing transaction"); | ||
1827 | if (jh->b_jlist == BJ_Locked) { | 1840 | if (jh->b_jlist == BJ_Locked) { |
1828 | /* | 1841 | /* |
1829 | * The buffer is on the committing transaction's locked | 1842 | * The buffer is on the committing transaction's locked |
@@ -1838,7 +1851,6 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) | |||
1838 | * can remove it's next_transaction pointer from the | 1851 | * can remove it's next_transaction pointer from the |
1839 | * running transaction if that is set, but nothing | 1852 | * running transaction if that is set, but nothing |
1840 | * else. */ | 1853 | * else. */ |
1841 | JBUFFER_TRACE(jh, "on committing transaction"); | ||
1842 | set_buffer_freed(bh); | 1854 | set_buffer_freed(bh); |
1843 | if (jh->b_next_transaction) { | 1855 | if (jh->b_next_transaction) { |
1844 | J_ASSERT(jh->b_next_transaction == | 1856 | J_ASSERT(jh->b_next_transaction == |
@@ -1858,6 +1870,7 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) | |||
1858 | * i_size already for this truncate so recovery will not | 1870 | * i_size already for this truncate so recovery will not |
1859 | * expose the disk blocks we are discarding here.) */ | 1871 | * expose the disk blocks we are discarding here.) */ |
1860 | J_ASSERT_JH(jh, transaction == journal->j_running_transaction); | 1872 | J_ASSERT_JH(jh, transaction == journal->j_running_transaction); |
1873 | JBUFFER_TRACE(jh, "on running transaction"); | ||
1861 | may_free = __dispose_buffer(jh, transaction); | 1874 | may_free = __dispose_buffer(jh, transaction); |
1862 | } | 1875 | } |
1863 | 1876 | ||