aboutsummaryrefslogtreecommitdiffstats
path: root/mm/truncate.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.osdl.org>2006-12-20 16:46:42 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-21 12:19:57 -0500
commitfba2591bf4e418b6c3f9f8794c9dd8fe40ae7bd9 (patch)
treed6909973e402b3171ee409f660b33df2fad029ba /mm/truncate.c
parent46d2277c796f9f4937bfa668c40b2e3f43e93dd0 (diff)
VM: Remove "clear_page_dirty()" and "test_clear_page_dirty()" functions
They were horribly easy to mis-use because of their tempting naming, and they also did way more than any users of them generally wanted them to do. A dirty page can become clean under two circumstances: (a) when we write it out. We have "clear_page_dirty_for_io()" for this, and that function remains unchanged. In the "for IO" case it is not sufficient to just clear the dirty bit, you also have to mark the page as being under writeback etc. (b) when we actually remove a page due to it becoming inaccessible to users, notably because it was truncate()'d away or the file (or metadata) no longer exists, and we thus want to cancel any outstanding dirty state. For the (b) case, we now introduce "cancel_dirty_page()", which only touches the page state itself, and verifies that the page is not mapped (since cancelling writes on a mapped page would be actively wrong as it is still accessible to users). Some filesystems need to be fixed up for this: CIFS, FUSE, JFS, ReiserFS, XFS all use the old confusing functions, and will be fixed separately in subsequent commits (with some of them just removing the offending logic, and others using clear_page_dirty_for_io()). This was confirmed by Martin Michlmayr to fix the apt database corruption on ARM. Cc: Martin Michlmayr <tbm@cyrius.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Hugh Dickins <hugh@veritas.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Arjan van de Ven <arjan@infradead.org> Cc: Andrei Popa <andrei.popa@i-neo.ro> Cc: Andrew Morton <akpm@osdl.org> Cc: Dave Kleikamp <shaggy@linux.vnet.ibm.com> Cc: Gordon Farquharson <gordonfarquharson@gmail.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Trond Myklebust <trond.myklebust@fys.uio.no> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/truncate.c')
-rw-r--r--mm/truncate.c25
1 files changed, 17 insertions, 8 deletions
diff --git a/mm/truncate.c b/mm/truncate.c
index 9bfb8e853860..bf9e2965d666 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -51,6 +51,20 @@ static inline void truncate_partial_page(struct page *page, unsigned partial)
51 do_invalidatepage(page, partial); 51 do_invalidatepage(page, partial);
52} 52}
53 53
54void cancel_dirty_page(struct page *page, unsigned int account_size)
55{
56 /* If we're cancelling the page, it had better not be mapped any more */
57 if (page_mapped(page)) {
58 static unsigned int warncount;
59
60 WARN_ON(++warncount < 5);
61 }
62
63 if (TestClearPageDirty(page) && account_size)
64 task_io_account_cancelled_write(account_size);
65}
66
67
54/* 68/*
55 * If truncate cannot remove the fs-private metadata from the page, the page 69 * If truncate cannot remove the fs-private metadata from the page, the page
56 * becomes anonymous. It will be left on the LRU and may even be mapped into 70 * becomes anonymous. It will be left on the LRU and may even be mapped into
@@ -70,8 +84,8 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
70 if (PagePrivate(page)) 84 if (PagePrivate(page))
71 do_invalidatepage(page, 0); 85 do_invalidatepage(page, 0);
72 86
73 if (test_clear_page_dirty(page)) 87 cancel_dirty_page(page, PAGE_CACHE_SIZE);
74 task_io_account_cancelled_write(PAGE_CACHE_SIZE); 88
75 ClearPageUptodate(page); 89 ClearPageUptodate(page);
76 ClearPageMappedToDisk(page); 90 ClearPageMappedToDisk(page);
77 remove_from_page_cache(page); 91 remove_from_page_cache(page);
@@ -350,7 +364,6 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
350 for (i = 0; !ret && i < pagevec_count(&pvec); i++) { 364 for (i = 0; !ret && i < pagevec_count(&pvec); i++) {
351 struct page *page = pvec.pages[i]; 365 struct page *page = pvec.pages[i];
352 pgoff_t page_index; 366 pgoff_t page_index;
353 int was_dirty;
354 367
355 lock_page(page); 368 lock_page(page);
356 if (page->mapping != mapping) { 369 if (page->mapping != mapping) {
@@ -386,12 +399,8 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
386 PAGE_CACHE_SIZE, 0); 399 PAGE_CACHE_SIZE, 0);
387 } 400 }
388 } 401 }
389 was_dirty = test_clear_page_dirty(page); 402 if (!invalidate_complete_page2(mapping, page))
390 if (!invalidate_complete_page2(mapping, page)) {
391 if (was_dirty)
392 set_page_dirty(page);
393 ret = -EIO; 403 ret = -EIO;
394 }
395 unlock_page(page); 404 unlock_page(page);
396 } 405 }
397 pagevec_release(&pvec); 406 pagevec_release(&pvec);