diff options
author | Hugh Dickins <hugh.dickins@tiscali.co.uk> | 2009-12-14 20:59:22 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-15 11:53:19 -0500 |
commit | 73848b4684e84a84cfd1555af78d41158f31e16b (patch) | |
tree | b71ba30e2b20cbc45740a38e9b5aa51b8c2ea60e | |
parent | 08beca44dfb0ab008e365163df70dbd302ae1508 (diff) |
ksm: fix mlockfreed to munlocked
When KSM merges an mlocked page, it has been forgetting to munlock it:
that's been left to free_page_mlock(), which reports it in /proc/vmstat as
unevictable_pgs_mlockfreed instead of unevictable_pgs_munlocked (and
whinges "Page flag mlocked set for process" in mmotm, whereas mainline is
silently forgiving). Call munlock_vma_page() to fix that.
Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Cc: Izik Eidus <ieidus@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Chris Wright <chrisw@redhat.com>
Acked-by: Rik van Riel <riel@redhat.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/internal.h | 3 | ||||
-rw-r--r-- | mm/ksm.c | 4 | ||||
-rw-r--r-- | mm/mlock.c | 4 |
3 files changed, 8 insertions, 3 deletions
diff --git a/mm/internal.h b/mm/internal.h index cb7d92d0a46d..a4b927cdca09 100644 --- a/mm/internal.h +++ b/mm/internal.h | |||
@@ -105,9 +105,10 @@ static inline int is_mlocked_vma(struct vm_area_struct *vma, struct page *page) | |||
105 | } | 105 | } |
106 | 106 | ||
107 | /* | 107 | /* |
108 | * must be called with vma's mmap_sem held for read, and page locked. | 108 | * must be called with vma's mmap_sem held for read or write, and page locked. |
109 | */ | 109 | */ |
110 | extern void mlock_vma_page(struct page *page); | 110 | extern void mlock_vma_page(struct page *page); |
111 | extern void munlock_vma_page(struct page *page); | ||
111 | 112 | ||
112 | /* | 113 | /* |
113 | * Clear the page's PageMlocked(). This can be useful in a situation where | 114 | * Clear the page's PageMlocked(). This can be useful in a situation where |
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/ksm.h> | 34 | #include <linux/ksm.h> |
35 | 35 | ||
36 | #include <asm/tlbflush.h> | 36 | #include <asm/tlbflush.h> |
37 | #include "internal.h" | ||
37 | 38 | ||
38 | /* | 39 | /* |
39 | * A few notes about the KSM scanning process, | 40 | * A few notes about the KSM scanning process, |
@@ -762,6 +763,9 @@ static int try_to_merge_one_page(struct vm_area_struct *vma, | |||
762 | pages_identical(page, kpage)) | 763 | pages_identical(page, kpage)) |
763 | err = replace_page(vma, page, kpage, orig_pte); | 764 | err = replace_page(vma, page, kpage, orig_pte); |
764 | 765 | ||
766 | if ((vma->vm_flags & VM_LOCKED) && !err) | ||
767 | munlock_vma_page(page); | ||
768 | |||
765 | unlock_page(page); | 769 | unlock_page(page); |
766 | out: | 770 | out: |
767 | return err; | 771 | return err; |
diff --git a/mm/mlock.c b/mm/mlock.c index 48691fb08514..adcbe9032b58 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -99,14 +99,14 @@ void mlock_vma_page(struct page *page) | |||
99 | * not get another chance to clear PageMlocked. If we successfully | 99 | * not get another chance to clear PageMlocked. If we successfully |
100 | * isolate the page and try_to_munlock() detects other VM_LOCKED vmas | 100 | * isolate the page and try_to_munlock() detects other VM_LOCKED vmas |
101 | * mapping the page, it will restore the PageMlocked state, unless the page | 101 | * mapping the page, it will restore the PageMlocked state, unless the page |
102 | * is mapped in a non-linear vma. So, we go ahead and SetPageMlocked(), | 102 | * is mapped in a non-linear vma. So, we go ahead and ClearPageMlocked(), |
103 | * perhaps redundantly. | 103 | * perhaps redundantly. |
104 | * If we lose the isolation race, and the page is mapped by other VM_LOCKED | 104 | * If we lose the isolation race, and the page is mapped by other VM_LOCKED |
105 | * vmas, we'll detect this in vmscan--via try_to_munlock() or try_to_unmap() | 105 | * vmas, we'll detect this in vmscan--via try_to_munlock() or try_to_unmap() |
106 | * either of which will restore the PageMlocked state by calling | 106 | * either of which will restore the PageMlocked state by calling |
107 | * mlock_vma_page() above, if it can grab the vma's mmap sem. | 107 | * mlock_vma_page() above, if it can grab the vma's mmap sem. |
108 | */ | 108 | */ |
109 | static void munlock_vma_page(struct page *page) | 109 | void munlock_vma_page(struct page *page) |
110 | { | 110 | { |
111 | BUG_ON(!PageLocked(page)); | 111 | BUG_ON(!PageLocked(page)); |
112 | 112 | ||