aboutsummaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2007-07-17 07:03:34 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-17 13:23:02 -0400
commit787d2214c19bcc9b6ac48af0ce098277a801eded (patch)
treea040604fdf9620a66dc83a0cde4f2140e2ec25b3 /fs/buffer.c
parenta1ed3dda0ad181532f1e0f0d548067fb9fdddac4 (diff)
fs: introduce some page/buffer invariants
It is a bug to set a page dirty if it is not uptodate unless it has buffers. If the page has buffers, then the page may be dirty (some buffers dirty) but not uptodate (some buffers not uptodate). The exception to this rule is if the set_page_dirty caller is racing with truncate or invalidate. A buffer can not be set dirty if it is not uptodate. If either of these situations occurs, it indicates there could be some data loss problem. Some of these warnings could be a harmless one where the page or buffer is set uptodate immediately after it is dirtied, however we should fix those up, and enforce this ordering. Bring the order of operations for truncate into line with those of invalidate. This will prevent a page from being able to go !uptodate while we're holding the tree_lock, which is probably a good thing anyway. Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c54
1 files changed, 37 insertions, 17 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index d654a3b6209e..0f9006714230 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -676,6 +676,39 @@ void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
676EXPORT_SYMBOL(mark_buffer_dirty_inode); 676EXPORT_SYMBOL(mark_buffer_dirty_inode);
677 677
678/* 678/*
679 * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
680 * dirty.
681 *
682 * If warn is true, then emit a warning if the page is not uptodate and has
683 * not been truncated.
684 */
685static int __set_page_dirty(struct page *page,
686 struct address_space *mapping, int warn)
687{
688 if (unlikely(!mapping))
689 return !TestSetPageDirty(page);
690
691 if (TestSetPageDirty(page))
692 return 0;
693
694 write_lock_irq(&mapping->tree_lock);
695 if (page->mapping) { /* Race with truncate? */
696 WARN_ON_ONCE(warn && !PageUptodate(page));
697
698 if (mapping_cap_account_dirty(mapping)) {
699 __inc_zone_page_state(page, NR_FILE_DIRTY);
700 task_io_account_write(PAGE_CACHE_SIZE);
701 }
702 radix_tree_tag_set(&mapping->page_tree,
703 page_index(page), PAGECACHE_TAG_DIRTY);
704 }
705 write_unlock_irq(&mapping->tree_lock);
706 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
707
708 return 1;
709}
710
711/*
679 * Add a page to the dirty page list. 712 * Add a page to the dirty page list.
680 * 713 *
681 * It is a sad fact of life that this function is called from several places 714 * It is a sad fact of life that this function is called from several places
@@ -702,7 +735,7 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
702 */ 735 */
703int __set_page_dirty_buffers(struct page *page) 736int __set_page_dirty_buffers(struct page *page)
704{ 737{
705 struct address_space * const mapping = page_mapping(page); 738 struct address_space *mapping = page_mapping(page);
706 739
707 if (unlikely(!mapping)) 740 if (unlikely(!mapping))
708 return !TestSetPageDirty(page); 741 return !TestSetPageDirty(page);
@@ -719,21 +752,7 @@ int __set_page_dirty_buffers(struct page *page)
719 } 752 }
720 spin_unlock(&mapping->private_lock); 753 spin_unlock(&mapping->private_lock);
721 754
722 if (TestSetPageDirty(page)) 755 return __set_page_dirty(page, mapping, 1);
723 return 0;
724
725 write_lock_irq(&mapping->tree_lock);
726 if (page->mapping) { /* Race with truncate? */
727 if (mapping_cap_account_dirty(mapping)) {
728 __inc_zone_page_state(page, NR_FILE_DIRTY);
729 task_io_account_write(PAGE_CACHE_SIZE);
730 }
731 radix_tree_tag_set(&mapping->page_tree,
732 page_index(page), PAGECACHE_TAG_DIRTY);
733 }
734 write_unlock_irq(&mapping->tree_lock);
735 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
736 return 1;
737} 756}
738EXPORT_SYMBOL(__set_page_dirty_buffers); 757EXPORT_SYMBOL(__set_page_dirty_buffers);
739 758
@@ -1132,8 +1151,9 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
1132 */ 1151 */
1133void fastcall mark_buffer_dirty(struct buffer_head *bh) 1152void fastcall mark_buffer_dirty(struct buffer_head *bh)
1134{ 1153{
1154 WARN_ON_ONCE(!buffer_uptodate(bh));
1135 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh)) 1155 if (!buffer_dirty(bh) && !test_set_buffer_dirty(bh))
1136 __set_page_dirty_nobuffers(bh->b_page); 1156 __set_page_dirty(bh->b_page, page_mapping(bh->b_page), 0);
1137} 1157}
1138 1158
1139/* 1159/*