diff options
author | Nick Piggin <nickpiggin@yahoo.com.au> | 2005-05-05 19:15:46 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-05-05 19:36:40 -0400 |
commit | ad576e63e0c8b274a8558b8e05a6d0526e804dc0 (patch) | |
tree | a7c44175df405cf2851f45a486ff96037d1a43e6 /fs/buffer.c | |
parent | f3ddbdc6267c32223035ea9bb8456a2d86f65ba1 (diff) |
[PATCH] __block_write_full_page race fix
When running
fsstress -v -d $DIR/tmp -n 1000 -p 1000 -l 2
on an ext2 filesystem with 1024 byte block size, on SMP i386 with 4096 byte
page size over loopback to an image file on a tmpfs filesystem, I would
very quickly hit
BUG_ON(!buffer_async_write(bh));
in fs/buffer.c:end_buffer_async_write
It seems that more than one request would be submitted for a given bh
at a time.
What would happen is the following:
2 threads doing __mpage_writepages on the same page.
Thread 1 - lock the page first, and enter __block_write_full_page.
Thread 1 - (eg.) mark_buffer_async_write on the first 2 buffers.
Thread 1 - set page writeback, unlock page.
Thread 2 - lock page, wait on page writeback
Thread 1 - submit_bh on the first 2 buffers.
=> both requests complete, none of the page buffers are async_write,
end_page_writeback is called.
Thread 2 - wakes up. enters __block_write_full_page.
Thread 2 - mark_buffer_async_write on (eg.) the last buffer
Thread 1 - finds the last buffer has async_write set, submit_bh on that.
Thread 2 - submit_bh on the last buffer.
=> oops.
So change __block_write_full_page to explicitly keep track of the last bh
we need to issue, so we don't touch anything after issuing the last
request.
Signed-off-by: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs/buffer.c')
-rw-r--r-- | fs/buffer.c | 18 |
1 files changed, 13 insertions, 5 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index af7c51ded2e1..bc75f2e7b274 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -1751,7 +1751,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page, | |||
1751 | int err; | 1751 | int err; |
1752 | sector_t block; | 1752 | sector_t block; |
1753 | sector_t last_block; | 1753 | sector_t last_block; |
1754 | struct buffer_head *bh, *head; | 1754 | struct buffer_head *bh, *head, *last_bh = NULL; |
1755 | int nr_underway = 0; | 1755 | int nr_underway = 0; |
1756 | 1756 | ||
1757 | BUG_ON(!PageLocked(page)); | 1757 | BUG_ON(!PageLocked(page)); |
@@ -1809,7 +1809,6 @@ static int __block_write_full_page(struct inode *inode, struct page *page, | |||
1809 | } while (bh != head); | 1809 | } while (bh != head); |
1810 | 1810 | ||
1811 | do { | 1811 | do { |
1812 | get_bh(bh); | ||
1813 | if (!buffer_mapped(bh)) | 1812 | if (!buffer_mapped(bh)) |
1814 | continue; | 1813 | continue; |
1815 | /* | 1814 | /* |
@@ -1827,6 +1826,8 @@ static int __block_write_full_page(struct inode *inode, struct page *page, | |||
1827 | } | 1826 | } |
1828 | if (test_clear_buffer_dirty(bh)) { | 1827 | if (test_clear_buffer_dirty(bh)) { |
1829 | mark_buffer_async_write(bh); | 1828 | mark_buffer_async_write(bh); |
1829 | get_bh(bh); | ||
1830 | last_bh = bh; | ||
1830 | } else { | 1831 | } else { |
1831 | unlock_buffer(bh); | 1832 | unlock_buffer(bh); |
1832 | } | 1833 | } |
@@ -1845,10 +1846,13 @@ static int __block_write_full_page(struct inode *inode, struct page *page, | |||
1845 | if (buffer_async_write(bh)) { | 1846 | if (buffer_async_write(bh)) { |
1846 | submit_bh(WRITE, bh); | 1847 | submit_bh(WRITE, bh); |
1847 | nr_underway++; | 1848 | nr_underway++; |
1849 | put_bh(bh); | ||
1850 | if (bh == last_bh) | ||
1851 | break; | ||
1848 | } | 1852 | } |
1849 | put_bh(bh); | ||
1850 | bh = next; | 1853 | bh = next; |
1851 | } while (bh != head); | 1854 | } while (bh != head); |
1855 | bh = head; | ||
1852 | 1856 | ||
1853 | err = 0; | 1857 | err = 0; |
1854 | done: | 1858 | done: |
@@ -1887,10 +1891,11 @@ recover: | |||
1887 | bh = head; | 1891 | bh = head; |
1888 | /* Recovery: lock and submit the mapped buffers */ | 1892 | /* Recovery: lock and submit the mapped buffers */ |
1889 | do { | 1893 | do { |
1890 | get_bh(bh); | ||
1891 | if (buffer_mapped(bh) && buffer_dirty(bh)) { | 1894 | if (buffer_mapped(bh) && buffer_dirty(bh)) { |
1892 | lock_buffer(bh); | 1895 | lock_buffer(bh); |
1893 | mark_buffer_async_write(bh); | 1896 | mark_buffer_async_write(bh); |
1897 | get_bh(bh); | ||
1898 | last_bh = bh; | ||
1894 | } else { | 1899 | } else { |
1895 | /* | 1900 | /* |
1896 | * The buffer may have been set dirty during | 1901 | * The buffer may have been set dirty during |
@@ -1909,10 +1914,13 @@ recover: | |||
1909 | clear_buffer_dirty(bh); | 1914 | clear_buffer_dirty(bh); |
1910 | submit_bh(WRITE, bh); | 1915 | submit_bh(WRITE, bh); |
1911 | nr_underway++; | 1916 | nr_underway++; |
1917 | put_bh(bh); | ||
1918 | if (bh == last_bh) | ||
1919 | break; | ||
1912 | } | 1920 | } |
1913 | put_bh(bh); | ||
1914 | bh = next; | 1921 | bh = next; |
1915 | } while (bh != head); | 1922 | } while (bh != head); |
1923 | bh = head; | ||
1916 | goto done; | 1924 | goto done; |
1917 | } | 1925 | } |
1918 | 1926 | ||