aboutsummaryrefslogtreecommitdiffstats
path: root/mm/truncate.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/truncate.c')
-rw-r--r--mm/truncate.c34
1 files changed, 25 insertions, 9 deletions
diff --git a/mm/truncate.c b/mm/truncate.c
index ecdfdcc50522..5df947de7654 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -51,15 +51,22 @@ static inline void truncate_partial_page(struct page *page, unsigned partial)
51 do_invalidatepage(page, partial); 51 do_invalidatepage(page, partial);
52} 52}
53 53
54/*
55 * This cancels just the dirty bit on the kernel page itself, it
56 * does NOT actually remove dirty bits on any mmap's that may be
57 * around. It also leaves the page tagged dirty, so any sync
58 * activity will still find it on the dirty lists, and in particular,
59 * clear_page_dirty_for_io() will still look at the dirty bits in
60 * the VM.
61 *
62 * Doing this should *normally* only ever be done when a page
63 * is truncated, and is not actually mapped anywhere at all. However,
64 * fs/buffer.c does this when it notices that somebody has cleaned
65 * out all the buffers on a page without actually doing it through
66 * the VM. Can you say "ext3 is horribly ugly"? Tought you could.
67 */
54void cancel_dirty_page(struct page *page, unsigned int account_size) 68void cancel_dirty_page(struct page *page, unsigned int account_size)
55{ 69{
56 /* If we're cancelling the page, it had better not be mapped any more */
57 if (page_mapped(page)) {
58 static unsigned int warncount;
59
60 WARN_ON(++warncount < 5);
61 }
62
63 if (TestClearPageDirty(page)) { 70 if (TestClearPageDirty(page)) {
64 struct address_space *mapping = page->mapping; 71 struct address_space *mapping = page->mapping;
65 if (mapping && mapping_cap_account_dirty(mapping)) { 72 if (mapping && mapping_cap_account_dirty(mapping)) {
@@ -341,6 +348,15 @@ failed:
341 return 0; 348 return 0;
342} 349}
343 350
351static int do_launder_page(struct address_space *mapping, struct page *page)
352{
353 if (!PageDirty(page))
354 return 0;
355 if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
356 return 0;
357 return mapping->a_ops->launder_page(page);
358}
359
344/** 360/**
345 * invalidate_inode_pages2_range - remove range of pages from an address_space 361 * invalidate_inode_pages2_range - remove range of pages from an address_space
346 * @mapping: the address_space 362 * @mapping: the address_space
@@ -405,14 +421,14 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
405 PAGE_CACHE_SIZE, 0); 421 PAGE_CACHE_SIZE, 0);
406 } 422 }
407 } 423 }
408 if (!invalidate_complete_page2(mapping, page)) 424 ret = do_launder_page(mapping, page);
425 if (ret == 0 && !invalidate_complete_page2(mapping, page))
409 ret = -EIO; 426 ret = -EIO;
410 unlock_page(page); 427 unlock_page(page);
411 } 428 }
412 pagevec_release(&pvec); 429 pagevec_release(&pvec);
413 cond_resched(); 430 cond_resched();
414 } 431 }
415 WARN_ON_ONCE(ret);
416 return ret; 432 return ret;
417} 433}
418EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range); 434EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);