aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2007-05-17 01:11:21 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-17 08:23:06 -0400
commitc97a9e10eaee328e6eea9f76acf7bacd7d48ef56 (patch)
treef14bf796d087e130452a2e2457c75eb1eca27483
parentea125892a17f43919c726777ed1e4929d41e7984 (diff)
mm: more rmap checking
Re-introduce rmap verification patches that Hugh removed when he removed PG_map_lock. PG_map_lock actually isn't needed to synchronise access to anonymous pages, because PG_locked and PTL together already do. These checks were important in discovering and fixing a rare rmap corruption in SLES9. Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/rmap.h13
-rw-r--r--mm/memory.c2
-rw-r--r--mm/rmap.c58
3 files changed, 62 insertions, 11 deletions
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index bdd277223af0..97347f22fc20 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -74,17 +74,14 @@ void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned lon
74void page_add_file_rmap(struct page *); 74void page_add_file_rmap(struct page *);
75void page_remove_rmap(struct page *, struct vm_area_struct *); 75void page_remove_rmap(struct page *, struct vm_area_struct *);
76 76
77/** 77#ifdef CONFIG_DEBUG_VM
78 * page_dup_rmap - duplicate pte mapping to a page 78void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address);
79 * @page: the page to add the mapping to 79#else
80 * 80static inline void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)
81 * For copy_page_range only: minimal extract from page_add_rmap,
82 * avoiding unnecessary tests (already checked) so it's quicker.
83 */
84static inline void page_dup_rmap(struct page *page)
85{ 81{
86 atomic_inc(&page->_mapcount); 82 atomic_inc(&page->_mapcount);
87} 83}
84#endif
88 85
89/* 86/*
90 * Called from mm/vmscan.c to handle paging out 87 * Called from mm/vmscan.c to handle paging out
diff --git a/mm/memory.c b/mm/memory.c
index 1d647ab0ee72..cb94488ab96d 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -481,7 +481,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
481 page = vm_normal_page(vma, addr, pte); 481 page = vm_normal_page(vma, addr, pte);
482 if (page) { 482 if (page) {
483 get_page(page); 483 get_page(page);
484 page_dup_rmap(page); 484 page_dup_rmap(page, vma, addr);
485 rss[!!PageAnon(page)]++; 485 rss[!!PageAnon(page)]++;
486 } 486 }
487 487
diff --git a/mm/rmap.c b/mm/rmap.c
index 1c1af92732d5..850165d32b7a 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -530,19 +530,51 @@ static void __page_set_anon_rmap(struct page *page,
530} 530}
531 531
532/** 532/**
533 * page_set_anon_rmap - sanity check anonymous rmap addition
534 * @page: the page to add the mapping to
535 * @vma: the vm area in which the mapping is added
536 * @address: the user virtual address mapped
537 */
538static void __page_check_anon_rmap(struct page *page,
539 struct vm_area_struct *vma, unsigned long address)
540{
541#ifdef CONFIG_DEBUG_VM
542 /*
543 * The page's anon-rmap details (mapping and index) are guaranteed to
544 * be set up correctly at this point.
545 *
546 * We have exclusion against page_add_anon_rmap because the caller
547 * always holds the page locked, except if called from page_dup_rmap,
548 * in which case the page is already known to be setup.
549 *
550 * We have exclusion against page_add_new_anon_rmap because those pages
551 * are initially only visible via the pagetables, and the pte is locked
552 * over the call to page_add_new_anon_rmap.
553 */
554 struct anon_vma *anon_vma = vma->anon_vma;
555 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
556 BUG_ON(page->mapping != (struct address_space *)anon_vma);
557 BUG_ON(page->index != linear_page_index(vma, address));
558#endif
559}
560
561/**
533 * page_add_anon_rmap - add pte mapping to an anonymous page 562 * page_add_anon_rmap - add pte mapping to an anonymous page
534 * @page: the page to add the mapping to 563 * @page: the page to add the mapping to
535 * @vma: the vm area in which the mapping is added 564 * @vma: the vm area in which the mapping is added
536 * @address: the user virtual address mapped 565 * @address: the user virtual address mapped
537 * 566 *
538 * The caller needs to hold the pte lock. 567 * The caller needs to hold the pte lock and the page must be locked.
539 */ 568 */
540void page_add_anon_rmap(struct page *page, 569void page_add_anon_rmap(struct page *page,
541 struct vm_area_struct *vma, unsigned long address) 570 struct vm_area_struct *vma, unsigned long address)
542{ 571{
572 VM_BUG_ON(!PageLocked(page));
573 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
543 if (atomic_inc_and_test(&page->_mapcount)) 574 if (atomic_inc_and_test(&page->_mapcount))
544 __page_set_anon_rmap(page, vma, address); 575 __page_set_anon_rmap(page, vma, address);
545 /* else checking page index and mapping is racy */ 576 else
577 __page_check_anon_rmap(page, vma, address);
546} 578}
547 579
548/* 580/*
@@ -553,10 +585,12 @@ void page_add_anon_rmap(struct page *page,
553 * 585 *
554 * Same as page_add_anon_rmap but must only be called on *new* pages. 586 * Same as page_add_anon_rmap but must only be called on *new* pages.
555 * This means the inc-and-test can be bypassed. 587 * This means the inc-and-test can be bypassed.
588 * Page does not have to be locked.
556 */ 589 */
557void page_add_new_anon_rmap(struct page *page, 590void page_add_new_anon_rmap(struct page *page,
558 struct vm_area_struct *vma, unsigned long address) 591 struct vm_area_struct *vma, unsigned long address)
559{ 592{
593 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
560 atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */ 594 atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */
561 __page_set_anon_rmap(page, vma, address); 595 __page_set_anon_rmap(page, vma, address);
562} 596}
@@ -573,6 +607,26 @@ void page_add_file_rmap(struct page *page)
573 __inc_zone_page_state(page, NR_FILE_MAPPED); 607 __inc_zone_page_state(page, NR_FILE_MAPPED);
574} 608}
575 609
610#ifdef CONFIG_DEBUG_VM
611/**
612 * page_dup_rmap - duplicate pte mapping to a page
613 * @page: the page to add the mapping to
614 *
615 * For copy_page_range only: minimal extract from page_add_file_rmap /
616 * page_add_anon_rmap, avoiding unnecessary tests (already checked) so it's
617 * quicker.
618 *
619 * The caller needs to hold the pte lock.
620 */
621void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address)
622{
623 BUG_ON(page_mapcount(page) == 0);
624 if (PageAnon(page))
625 __page_check_anon_rmap(page, vma, address);
626 atomic_inc(&page->_mapcount);
627}
628#endif
629
576/** 630/**
577 * page_remove_rmap - take down pte mapping from a page 631 * page_remove_rmap - take down pte mapping from a page
578 * @page: page to remove mapping from 632 * @page: page to remove mapping from