diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-01-26 15:47:06 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-01-26 15:47:06 -0500 |
commit | ecdfc9787fe527491baefc22dce8b2dbd5b2908d (patch) | |
tree | 31e7ddac0339498095c40444f81c0b03751434ae | |
parent | 5ad0d383ddbf0d2fce43b8aac267a6c299fd2dff (diff) |
Resurrect 'try_to_free_buffers()' VM hackery
It's not pretty, but it appears that ext3 with data=journal will clean
pages without ever actually telling the VM that they are clean. This,
in turn, will result in the VM (and balance_dirty_pages() in particular)
to never realize that the pages got cleaned, and wait forever for an
event that already happened.
Technically, this seems to be a problem with ext3 itself, but it used to
be hidden by 'try_to_free_buffers()' noticing this situation on its own,
and just working around the filesystem problem.
This commit re-instates that hack, in order to avoid a regression for
the 2.6.20 release. This fixes bugzilla 7844:
http://bugzilla.kernel.org/show_bug.cgi?id=7844
Peter Zijlstra points out that we should probably retain the debugging
code that this removes from cancel_dirty_page(), and I agree, but for
the imminent release we might as well just silence the warning too
(since it's not a new bug: anything that triggers that warning has been
around forever).
Acked-by: Randy Dunlap <rdunlap@xenotime.net>
Acked-by: Jens Axboe <jens.axboe@oracle.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | fs/buffer.c | 15 | ||||
-rw-r--r-- | mm/truncate.c | 21 |
2 files changed, 28 insertions, 8 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index 3b116078b4c3..460f1c43238e 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -2834,7 +2834,7 @@ int try_to_free_buffers(struct page *page) | |||
2834 | int ret = 0; | 2834 | int ret = 0; |
2835 | 2835 | ||
2836 | BUG_ON(!PageLocked(page)); | 2836 | BUG_ON(!PageLocked(page)); |
2837 | if (PageDirty(page) || PageWriteback(page)) | 2837 | if (PageWriteback(page)) |
2838 | return 0; | 2838 | return 0; |
2839 | 2839 | ||
2840 | if (mapping == NULL) { /* can this still happen? */ | 2840 | if (mapping == NULL) { /* can this still happen? */ |
@@ -2845,6 +2845,19 @@ int try_to_free_buffers(struct page *page) | |||
2845 | spin_lock(&mapping->private_lock); | 2845 | spin_lock(&mapping->private_lock); |
2846 | ret = drop_buffers(page, &buffers_to_free); | 2846 | ret = drop_buffers(page, &buffers_to_free); |
2847 | spin_unlock(&mapping->private_lock); | 2847 | spin_unlock(&mapping->private_lock); |
2848 | |||
2849 | /* | ||
2850 | * If the filesystem writes its buffers by hand (eg ext3) | ||
2851 | * then we can have clean buffers against a dirty page. We | ||
2852 | * clean the page here; otherwise the VM will never notice | ||
2853 | * that the filesystem did any IO at all. | ||
2854 | * | ||
2855 | * Also, during truncate, discard_buffer will have marked all | ||
2856 | * the page's buffers clean. We discover that here and clean | ||
2857 | * the page also. | ||
2858 | */ | ||
2859 | if (ret) | ||
2860 | cancel_dirty_page(page, PAGE_CACHE_SIZE); | ||
2848 | out: | 2861 | out: |
2849 | if (buffers_to_free) { | 2862 | if (buffers_to_free) { |
2850 | struct buffer_head *bh = buffers_to_free; | 2863 | struct buffer_head *bh = buffers_to_free; |
diff --git a/mm/truncate.c b/mm/truncate.c index 6c79ca4a1ca7..3262740aa059 100644 --- a/mm/truncate.c +++ b/mm/truncate.c | |||
@@ -51,15 +51,22 @@ static inline void truncate_partial_page(struct page *page, unsigned partial) | |||
51 | do_invalidatepage(page, partial); | 51 | do_invalidatepage(page, partial); |
52 | } | 52 | } |
53 | 53 | ||
54 | /* | ||
55 | * This cancels just the dirty bit on the kernel page itself, it | ||
56 | * does NOT actually remove dirty bits on any mmap's that may be | ||
57 | * around. It also leaves the page tagged dirty, so any sync | ||
58 | * activity will still find it on the dirty lists, and in particular, | ||
59 | * clear_page_dirty_for_io() will still look at the dirty bits in | ||
60 | * the VM. | ||
61 | * | ||
62 | * Doing this should *normally* only ever be done when a page | ||
63 | * is truncated, and is not actually mapped anywhere at all. However, | ||
64 | * fs/buffer.c does this when it notices that somebody has cleaned | ||
65 | * out all the buffers on a page without actually doing it through | ||
66 | * the VM. Can you say "ext3 is horribly ugly"? Tought you could. | ||
67 | */ | ||
54 | void cancel_dirty_page(struct page *page, unsigned int account_size) | 68 | void cancel_dirty_page(struct page *page, unsigned int account_size) |
55 | { | 69 | { |
56 | /* If we're cancelling the page, it had better not be mapped any more */ | ||
57 | if (page_mapped(page)) { | ||
58 | static unsigned int warncount; | ||
59 | |||
60 | WARN_ON(++warncount < 5); | ||
61 | } | ||
62 | |||
63 | if (TestClearPageDirty(page)) { | 70 | if (TestClearPageDirty(page)) { |
64 | struct address_space *mapping = page->mapping; | 71 | struct address_space *mapping = page->mapping; |
65 | if (mapping && mapping_cap_account_dirty(mapping)) { | 72 | if (mapping && mapping_cap_account_dirty(mapping)) { |