diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/internal.h | 3 | ||||
-rw-r--r-- | mm/ksm.c | 4 | ||||
-rw-r--r-- | mm/mlock.c | 4 |
3 files changed, 8 insertions, 3 deletions
diff --git a/mm/internal.h b/mm/internal.h index cb7d92d0a46d..a4b927cdca09 100644 --- a/mm/internal.h +++ b/mm/internal.h | |||
@@ -105,9 +105,10 @@ static inline int is_mlocked_vma(struct vm_area_struct *vma, struct page *page) | |||
105 | } | 105 | } |
106 | 106 | ||
107 | /* | 107 | /* |
108 | * must be called with vma's mmap_sem held for read, and page locked. | 108 | * must be called with vma's mmap_sem held for read or write, and page locked. |
109 | */ | 109 | */ |
110 | extern void mlock_vma_page(struct page *page); | 110 | extern void mlock_vma_page(struct page *page); |
111 | extern void munlock_vma_page(struct page *page); | ||
111 | 112 | ||
112 | /* | 113 | /* |
113 | * Clear the page's PageMlocked(). This can be useful in a situation where | 114 | * Clear the page's PageMlocked(). This can be useful in a situation where |
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/ksm.h> | 34 | #include <linux/ksm.h> |
35 | 35 | ||
36 | #include <asm/tlbflush.h> | 36 | #include <asm/tlbflush.h> |
37 | #include "internal.h" | ||
37 | 38 | ||
38 | /* | 39 | /* |
39 | * A few notes about the KSM scanning process, | 40 | * A few notes about the KSM scanning process, |
@@ -762,6 +763,9 @@ static int try_to_merge_one_page(struct vm_area_struct *vma, | |||
762 | pages_identical(page, kpage)) | 763 | pages_identical(page, kpage)) |
763 | err = replace_page(vma, page, kpage, orig_pte); | 764 | err = replace_page(vma, page, kpage, orig_pte); |
764 | 765 | ||
766 | if ((vma->vm_flags & VM_LOCKED) && !err) | ||
767 | munlock_vma_page(page); | ||
768 | |||
765 | unlock_page(page); | 769 | unlock_page(page); |
766 | out: | 770 | out: |
767 | return err; | 771 | return err; |
diff --git a/mm/mlock.c b/mm/mlock.c index 48691fb08514..adcbe9032b58 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -99,14 +99,14 @@ void mlock_vma_page(struct page *page) | |||
99 | * not get another chance to clear PageMlocked. If we successfully | 99 | * not get another chance to clear PageMlocked. If we successfully |
100 | * isolate the page and try_to_munlock() detects other VM_LOCKED vmas | 100 | * isolate the page and try_to_munlock() detects other VM_LOCKED vmas |
101 | * mapping the page, it will restore the PageMlocked state, unless the page | 101 | * mapping the page, it will restore the PageMlocked state, unless the page |
102 | * is mapped in a non-linear vma. So, we go ahead and SetPageMlocked(), | 102 | * is mapped in a non-linear vma. So, we go ahead and ClearPageMlocked(), |
103 | * perhaps redundantly. | 103 | * perhaps redundantly. |
104 | * If we lose the isolation race, and the page is mapped by other VM_LOCKED | 104 | * If we lose the isolation race, and the page is mapped by other VM_LOCKED |
105 | * vmas, we'll detect this in vmscan--via try_to_munlock() or try_to_unmap() | 105 | * vmas, we'll detect this in vmscan--via try_to_munlock() or try_to_unmap() |
106 | * either of which will restore the PageMlocked state by calling | 106 | * either of which will restore the PageMlocked state by calling |
107 | * mlock_vma_page() above, if it can grab the vma's mmap sem. | 107 | * mlock_vma_page() above, if it can grab the vma's mmap sem. |
108 | */ | 108 | */ |
109 | static void munlock_vma_page(struct page *page) | 109 | void munlock_vma_page(struct page *page) |
110 | { | 110 | { |
111 | BUG_ON(!PageLocked(page)); | 111 | BUG_ON(!PageLocked(page)); |
112 | 112 | ||