aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/rmap.h
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2006-09-26 02:30:57 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-26 11:48:44 -0400
commitd08b3851da41d0ee60851f2c75b118e1f7a5fc89 (patch)
treea01f6930a1387e8f66607e2fe16c62bb7044353b /include/linux/rmap.h
parent725d704ecaca4a43f067092c140d4f3271cf2856 (diff)
[PATCH] mm: tracking shared dirty pages
Tracking of dirty pages in shared writeable mmap()s. The idea is simple: write protect clean shared writeable pages, catch the write-fault, make writeable and set dirty. On page write-back clean all the PTE dirty bits and write protect them once again. The implementation is a tad harder, mainly because the default backing_dev_info capabilities were too loosely maintained. Hence it is not enough to test the backing_dev_info for cap_account_dirty. The current heuristic is as follows, a VMA is eligible when: - its shared writeable (vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED) - it is not a 'special' mapping (vm_flags & (VM_PFNMAP|VM_INSERTPAGE)) == 0 - the backing_dev_info is cap_account_dirty mapping_cap_account_dirty(vma->vm_file->f_mapping) - f_op->mmap() didn't change the default page protection Page from remap_pfn_range() are explicitly excluded because their COW semantics are already horrid enough (see vm_normal_page() in do_wp_page()) and because they don't have a backing store anyway. mprotect() is taught about the new behaviour as well. However it overrides the last condition. Cleaning the pages on write-back is done with page_mkclean() a new rmap call. It can be called on any page, but is currently only implemented for mapped pages, if the page is found the be of a VMA that accounts dirty pages it will also wrprotect the PTE. Finally, in fs/buffers.c:try_to_free_buffers(); remove clear_page_dirty() from under ->private_lock. This seems to be safe, since ->private_lock is used to serialize access to the buffers, not the page itself. This is needed because clear_page_dirty() will call into page_mkclean() and would thereby violate locking order. [dhowells@redhat.com: Provide a page_mkclean() implementation for NOMMU] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Hugh Dickins <hugh@veritas.com> Signed-off-by: David Howells <dhowells@redhat.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/linux/rmap.h')
-rw-r--r--include/linux/rmap.h14
1 files changed, 14 insertions, 0 deletions
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index bf97b0900014..db2c1df4fef9 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -103,6 +103,14 @@ pte_t *page_check_address(struct page *, struct mm_struct *,
103 */ 103 */
104unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); 104unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
105 105
106/*
107 * Cleans the PTEs of shared mappings.
108 * (and since clean PTEs should also be readonly, write protects them too)
109 *
110 * returns the number of cleaned PTEs.
111 */
112int page_mkclean(struct page *);
113
106#else /* !CONFIG_MMU */ 114#else /* !CONFIG_MMU */
107 115
108#define anon_vma_init() do {} while (0) 116#define anon_vma_init() do {} while (0)
@@ -112,6 +120,12 @@ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
112#define page_referenced(page,l) TestClearPageReferenced(page) 120#define page_referenced(page,l) TestClearPageReferenced(page)
113#define try_to_unmap(page, refs) SWAP_FAIL 121#define try_to_unmap(page, refs) SWAP_FAIL
114 122
123static inline int page_mkclean(struct page *page)
124{
125 return 0;
126}
127
128
115#endif /* CONFIG_MMU */ 129#endif /* CONFIG_MMU */
116 130
117/* 131/*