aboutsummaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-01-26 15:47:06 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-01-26 15:47:06 -0500
commitecdfc9787fe527491baefc22dce8b2dbd5b2908d (patch)
tree31e7ddac0339498095c40444f81c0b03751434ae /fs/buffer.c
parent5ad0d383ddbf0d2fce43b8aac267a6c299fd2dff (diff)
Resurrect 'try_to_free_buffers()' VM hackery
It's not pretty, but it appears that ext3 with data=journal will clean pages without ever actually telling the VM that they are clean. This, in turn, will result in the VM (and balance_dirty_pages() in particular) to never realize that the pages got cleaned, and wait forever for an event that already happened. Technically, this seems to be a problem with ext3 itself, but it used to be hidden by 'try_to_free_buffers()' noticing this situation on its own, and just working around the filesystem problem. This commit re-instates that hack, in order to avoid a regression for the 2.6.20 release. This fixes bugzilla 7844: http://bugzilla.kernel.org/show_bug.cgi?id=7844 Peter Zijlstra points out that we should probably retain the debugging code that this removes from cancel_dirty_page(), and I agree, but for the imminent release we might as well just silence the warning too (since it's not a new bug: anything that triggers that warning has been around forever). Acked-by: Randy Dunlap <rdunlap@xenotime.net> Acked-by: Jens Axboe <jens.axboe@oracle.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c15
1 files changed, 14 insertions, 1 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 3b116078b4c3..460f1c43238e 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2834,7 +2834,7 @@ int try_to_free_buffers(struct page *page)
2834 int ret = 0; 2834 int ret = 0;
2835 2835
2836 BUG_ON(!PageLocked(page)); 2836 BUG_ON(!PageLocked(page));
2837 if (PageDirty(page) || PageWriteback(page)) 2837 if (PageWriteback(page))
2838 return 0; 2838 return 0;
2839 2839
2840 if (mapping == NULL) { /* can this still happen? */ 2840 if (mapping == NULL) { /* can this still happen? */
@@ -2845,6 +2845,19 @@ int try_to_free_buffers(struct page *page)
2845 spin_lock(&mapping->private_lock); 2845 spin_lock(&mapping->private_lock);
2846 ret = drop_buffers(page, &buffers_to_free); 2846 ret = drop_buffers(page, &buffers_to_free);
2847 spin_unlock(&mapping->private_lock); 2847 spin_unlock(&mapping->private_lock);
2848
2849 /*
2850 * If the filesystem writes its buffers by hand (eg ext3)
2851 * then we can have clean buffers against a dirty page. We
2852 * clean the page here; otherwise the VM will never notice
2853 * that the filesystem did any IO at all.
2854 *
2855 * Also, during truncate, discard_buffer will have marked all
2856 * the page's buffers clean. We discover that here and clean
2857 * the page also.
2858 */
2859 if (ret)
2860 cancel_dirty_page(page, PAGE_CACHE_SIZE);
2848out: 2861out:
2849 if (buffers_to_free) { 2862 if (buffers_to_free) {
2850 struct buffer_head *bh = buffers_to_free; 2863 struct buffer_head *bh = buffers_to_free;