aboutsummaryrefslogtreecommitdiffstats
path: root/fs/jbd2
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2012-12-25 13:29:52 -0500
committerTheodore Ts'o <tytso@mit.edu>2012-12-25 13:29:52 -0500
commit53e872681fed6a43047e71bf927f77d06f467988 (patch)
tree8b5061acbaf222b3f25df54ddbcaa0b1123c471a /fs/jbd2
parent4520fb3c3690f2643006d85f09ecb74554c10e95 (diff)
ext4: fix deadlock in journal_unmap_buffer()
We cannot wait for transaction commit in journal_unmap_buffer() because we hold page lock which ranks below transaction start. We solve the issue by bailing out of journal_unmap_buffer() and jbd2_journal_invalidatepage() with -EBUSY. Caller is then responsible for waiting for transaction commit to finish and try invalidation again. Since the issue can happen only for page stradding i_size, it is simple enough to manually call jbd2_journal_invalidatepage() for such page from ext4_setattr(), check the return value and wait if necessary. Signed-off-by: Jan Kara <jack@suse.cz> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs/jbd2')
-rw-r--r--fs/jbd2/transaction.c27
1 files changed, 14 insertions, 13 deletions
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index cd4485db42b3..ddc51a7f4508 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -1840,7 +1840,6 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
1840 1840
1841 BUFFER_TRACE(bh, "entry"); 1841 BUFFER_TRACE(bh, "entry");
1842 1842
1843retry:
1844 /* 1843 /*
1845 * It is safe to proceed here without the j_list_lock because the 1844 * It is safe to proceed here without the j_list_lock because the
1846 * buffers cannot be stolen by try_to_free_buffers as long as we are 1845 * buffers cannot be stolen by try_to_free_buffers as long as we are
@@ -1935,14 +1934,11 @@ retry:
1935 * for commit and try again. 1934 * for commit and try again.
1936 */ 1935 */
1937 if (partial_page) { 1936 if (partial_page) {
1938 tid_t tid = journal->j_committing_transaction->t_tid;
1939
1940 jbd2_journal_put_journal_head(jh); 1937 jbd2_journal_put_journal_head(jh);
1941 spin_unlock(&journal->j_list_lock); 1938 spin_unlock(&journal->j_list_lock);
1942 jbd_unlock_bh_state(bh); 1939 jbd_unlock_bh_state(bh);
1943 write_unlock(&journal->j_state_lock); 1940 write_unlock(&journal->j_state_lock);
1944 jbd2_log_wait_commit(journal, tid); 1941 return -EBUSY;
1945 goto retry;
1946 } 1942 }
1947 /* 1943 /*
1948 * OK, buffer won't be reachable after truncate. We just set 1944 * OK, buffer won't be reachable after truncate. We just set
@@ -2003,21 +1999,23 @@ zap_buffer_unlocked:
2003 * @page: page to flush 1999 * @page: page to flush
2004 * @offset: length of page to invalidate. 2000 * @offset: length of page to invalidate.
2005 * 2001 *
2006 * Reap page buffers containing data after offset in page. 2002 * Reap page buffers containing data after offset in page. Can return -EBUSY
2007 * 2003 * if buffers are part of the committing transaction and the page is straddling
2004 * i_size. Caller then has to wait for current commit and try again.
2008 */ 2005 */
2009void jbd2_journal_invalidatepage(journal_t *journal, 2006int jbd2_journal_invalidatepage(journal_t *journal,
2010 struct page *page, 2007 struct page *page,
2011 unsigned long offset) 2008 unsigned long offset)
2012{ 2009{
2013 struct buffer_head *head, *bh, *next; 2010 struct buffer_head *head, *bh, *next;
2014 unsigned int curr_off = 0; 2011 unsigned int curr_off = 0;
2015 int may_free = 1; 2012 int may_free = 1;
2013 int ret = 0;
2016 2014
2017 if (!PageLocked(page)) 2015 if (!PageLocked(page))
2018 BUG(); 2016 BUG();
2019 if (!page_has_buffers(page)) 2017 if (!page_has_buffers(page))
2020 return; 2018 return 0;
2021 2019
2022 /* We will potentially be playing with lists other than just the 2020 /* We will potentially be playing with lists other than just the
2023 * data lists (especially for journaled data mode), so be 2021 * data lists (especially for journaled data mode), so be
@@ -2031,9 +2029,11 @@ void jbd2_journal_invalidatepage(journal_t *journal,
2031 if (offset <= curr_off) { 2029 if (offset <= curr_off) {
2032 /* This block is wholly outside the truncation point */ 2030 /* This block is wholly outside the truncation point */
2033 lock_buffer(bh); 2031 lock_buffer(bh);
2034 may_free &= journal_unmap_buffer(journal, bh, 2032 ret = journal_unmap_buffer(journal, bh, offset > 0);
2035 offset > 0);
2036 unlock_buffer(bh); 2033 unlock_buffer(bh);
2034 if (ret < 0)
2035 return ret;
2036 may_free &= ret;
2037 } 2037 }
2038 curr_off = next_off; 2038 curr_off = next_off;
2039 bh = next; 2039 bh = next;
@@ -2044,6 +2044,7 @@ void jbd2_journal_invalidatepage(journal_t *journal,
2044 if (may_free && try_to_free_buffers(page)) 2044 if (may_free && try_to_free_buffers(page))
2045 J_ASSERT(!page_has_buffers(page)); 2045 J_ASSERT(!page_has_buffers(page));
2046 } 2046 }
2047 return 0;
2047} 2048}
2048 2049
2049/* 2050/*