diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-03-19 14:32:05 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-03-19 14:32:05 -0400 |
commit | a8e7d49aa7be728c4ae241a75a2a124cdcabc0c5 (patch) | |
tree | fec49351496b886d8aa0e7b55e58c2f1394b051a /fs/buffer.c | |
parent | 68df3755e383e6fecf2354a67b08f92f18536594 (diff) |
Fix race in create_empty_buffers() vs __set_page_dirty_buffers()
Nick Piggin noticed this (very unlikely) race between setting a page
dirty and creating the buffers for it - we need to hold the mapping
private_lock until we've set the page dirty bit in order to make sure
that create_empty_buffers() might not build up a set of buffers without
the dirty bits set when the page is dirty.
I doubt anybody has ever hit this race (and it didn't solve the issue
Nick was looking at), but as Nick says: "Still, it does appear to solve
a real race, which we should close."
Acked-by: Nick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/buffer.c')
-rw-r--r-- | fs/buffer.c | 23 |
1 files changed, 11 insertions, 12 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index 9f697419ed8e..891e1c78e4f1 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -760,15 +760,9 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode); | |||
760 | * If warn is true, then emit a warning if the page is not uptodate and has | 760 | * If warn is true, then emit a warning if the page is not uptodate and has |
761 | * not been truncated. | 761 | * not been truncated. |
762 | */ | 762 | */ |
763 | static int __set_page_dirty(struct page *page, | 763 | static void __set_page_dirty(struct page *page, |
764 | struct address_space *mapping, int warn) | 764 | struct address_space *mapping, int warn) |
765 | { | 765 | { |
766 | if (unlikely(!mapping)) | ||
767 | return !TestSetPageDirty(page); | ||
768 | |||
769 | if (TestSetPageDirty(page)) | ||
770 | return 0; | ||
771 | |||
772 | spin_lock_irq(&mapping->tree_lock); | 766 | spin_lock_irq(&mapping->tree_lock); |
773 | if (page->mapping) { /* Race with truncate? */ | 767 | if (page->mapping) { /* Race with truncate? */ |
774 | WARN_ON_ONCE(warn && !PageUptodate(page)); | 768 | WARN_ON_ONCE(warn && !PageUptodate(page)); |
@@ -785,8 +779,6 @@ static int __set_page_dirty(struct page *page, | |||
785 | } | 779 | } |
786 | spin_unlock_irq(&mapping->tree_lock); | 780 | spin_unlock_irq(&mapping->tree_lock); |
787 | __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); | 781 | __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); |
788 | |||
789 | return 1; | ||
790 | } | 782 | } |
791 | 783 | ||
792 | /* | 784 | /* |
@@ -816,6 +808,7 @@ static int __set_page_dirty(struct page *page, | |||
816 | */ | 808 | */ |
817 | int __set_page_dirty_buffers(struct page *page) | 809 | int __set_page_dirty_buffers(struct page *page) |
818 | { | 810 | { |
811 | int newly_dirty; | ||
819 | struct address_space *mapping = page_mapping(page); | 812 | struct address_space *mapping = page_mapping(page); |
820 | 813 | ||
821 | if (unlikely(!mapping)) | 814 | if (unlikely(!mapping)) |
@@ -831,9 +824,12 @@ int __set_page_dirty_buffers(struct page *page) | |||
831 | bh = bh->b_this_page; | 824 | bh = bh->b_this_page; |
832 | } while (bh != head); | 825 | } while (bh != head); |
833 | } | 826 | } |
827 | newly_dirty = !TestSetPageDirty(page); | ||
834 | spin_unlock(&mapping->private_lock); | 828 | spin_unlock(&mapping->private_lock); |
835 | 829 | ||
836 | return __set_page_dirty(page, mapping, 1); | 830 | if (newly_dirty) |
831 | __set_page_dirty(page, mapping, 1); | ||
832 | return newly_dirty; | ||
837 | } | 833 | } |
838 | EXPORT_SYMBOL(__set_page_dirty_buffers); | 834 | EXPORT_SYMBOL(__set_page_dirty_buffers); |
839 | 835 | ||
@@ -1262,8 +1258,11 @@ void mark_buffer_dirty(struct buffer_head *bh) | |||
1262 | return; | 1258 | return; |
1263 | } | 1259 | } |
1264 | 1260 | ||
1265 | if (!test_set_buffer_dirty(bh)) | 1261 | if (!test_set_buffer_dirty(bh)) { |
1266 | __set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0); | 1262 | struct page *page = bh->b_page; |
1263 | if (!TestSetPageDirty(page)) | ||
1264 | __set_page_dirty(page, page_mapping(page), 0); | ||
1265 | } | ||
1267 | } | 1266 | } |
1268 | 1267 | ||
1269 | /* | 1268 | /* |