aboutsummaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c19
1 files changed, 18 insertions, 1 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 3b116078b4c3..1ad674fd348c 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2834,7 +2834,7 @@ int try_to_free_buffers(struct page *page)
2834 int ret = 0; 2834 int ret = 0;
2835 2835
2836 BUG_ON(!PageLocked(page)); 2836 BUG_ON(!PageLocked(page));
2837 if (PageDirty(page) || PageWriteback(page)) 2837 if (PageWriteback(page))
2838 return 0; 2838 return 0;
2839 2839
2840 if (mapping == NULL) { /* can this still happen? */ 2840 if (mapping == NULL) { /* can this still happen? */
@@ -2844,6 +2844,23 @@ int try_to_free_buffers(struct page *page)
2844 2844
2845 spin_lock(&mapping->private_lock); 2845 spin_lock(&mapping->private_lock);
2846 ret = drop_buffers(page, &buffers_to_free); 2846 ret = drop_buffers(page, &buffers_to_free);
2847
2848 /*
2849 * If the filesystem writes its buffers by hand (eg ext3)
2850 * then we can have clean buffers against a dirty page. We
2851 * clean the page here; otherwise the VM will never notice
2852 * that the filesystem did any IO at all.
2853 *
2854 * Also, during truncate, discard_buffer will have marked all
2855 * the page's buffers clean. We discover that here and clean
2856 * the page also.
2857 *
2858 * private_lock must be held over this entire operation in order
2859 * to synchronise against __set_page_dirty_buffers and prevent the
2860 * dirty bit from being lost.
2861 */
2862 if (ret)
2863 cancel_dirty_page(page, PAGE_CACHE_SIZE);
2847 spin_unlock(&mapping->private_lock); 2864 spin_unlock(&mapping->private_lock);
2848out: 2865out:
2849 if (buffers_to_free) { 2866 if (buffers_to_free) {