aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/rmap.h
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2007-05-17 01:11:21 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-17 08:23:06 -0400
commitc97a9e10eaee328e6eea9f76acf7bacd7d48ef56 (patch)
treef14bf796d087e130452a2e2457c75eb1eca27483 /include/linux/rmap.h
parentea125892a17f43919c726777ed1e4929d41e7984 (diff)
mm: more rmap checking
Re-introduce rmap verification patches that Hugh removed when he removed PG_map_lock. PG_map_lock actually isn't needed to synchronise access to anonymous pages, because PG_locked and PTL together already do. These checks were important in discovering and fixing a rare rmap corruption in SLES9. Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/rmap.h')
-rw-r--r--include/linux/rmap.h13
1 files changed, 5 insertions, 8 deletions
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index bdd277223af0..97347f22fc20 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -74,17 +74,14 @@ void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned lon
74void page_add_file_rmap(struct page *); 74void page_add_file_rmap(struct page *);
75void page_remove_rmap(struct page *, struct vm_area_struct *); 75void page_remove_rmap(struct page *, struct vm_area_struct *);
76 76
77/** 77#ifdef CONFIG_DEBUG_VM
78 * page_dup_rmap - duplicate pte mapping to a page 78void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address);
79 * @page: the page to add the mapping to 79#else
80 * 80static inline void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)
81 * For copy_page_range only: minimal extract from page_add_rmap,
82 * avoiding unnecessary tests (already checked) so it's quicker.
83 */
84static inline void page_dup_rmap(struct page *page)
85{ 81{
86 atomic_inc(&page->_mapcount); 82 atomic_inc(&page->_mapcount);
87} 83}
84#endif
88 85
89/* 86/*
90 * Called from mm/vmscan.c to handle paging out 87 * Called from mm/vmscan.c to handle paging out