aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLee Schermerhorn <Lee.Schermerhorn@hp.com>2009-12-14 20:59:55 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-15 11:53:23 -0500
commit6927c1dd93fc982140f3a3742ac4b224cd3e02b2 (patch)
treec87db39ee74fcc825cf238ec289473f01f476897 /mm
parent418b27ef50e7e9b0c2fbd88db804bf065e5eb1a6 (diff)
mlock: replace stale comments in munlock_vma_page()
Cleanup stale comments on munlock_vma_page(). Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com> Acked-by: Hugh Dickins <hugh.dickins@tiscali.co.uk> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/mlock.c41
1 files changed, 19 insertions, 22 deletions
diff --git a/mm/mlock.c b/mm/mlock.c
index adcbe9032b58..2b8335a89400 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -88,23 +88,20 @@ void mlock_vma_page(struct page *page)
88 } 88 }
89} 89}
90 90
91/* 91/**
92 * called from munlock()/munmap() path with page supposedly on the LRU. 92 * munlock_vma_page - munlock a vma page
93 * @page - page to be unlocked
93 * 94 *
94 * Note: unlike mlock_vma_page(), we can't just clear the PageMlocked 95 * called from munlock()/munmap() path with page supposedly on the LRU.
95 * [in try_to_munlock()] and then attempt to isolate the page. We must 96 * When we munlock a page, because the vma where we found the page is being
96 * isolate the page to keep others from messing with its unevictable 97 * munlock()ed or munmap()ed, we want to check whether other vmas hold the
97 * and mlocked state while trying to munlock. However, we pre-clear the 98 * page locked so that we can leave it on the unevictable lru list and not
98 * mlocked state anyway as we might lose the isolation race and we might 99 * bother vmscan with it. However, to walk the page's rmap list in
99 * not get another chance to clear PageMlocked. If we successfully 100 * try_to_munlock() we must isolate the page from the LRU. If some other
100 * isolate the page and try_to_munlock() detects other VM_LOCKED vmas 101 * task has removed the page from the LRU, we won't be able to do that.
101 * mapping the page, it will restore the PageMlocked state, unless the page 102 * So we clear the PageMlocked as we might not get another chance. If we
102 * is mapped in a non-linear vma. So, we go ahead and ClearPageMlocked(), 103 * can't isolate the page, we leave it for putback_lru_page() and vmscan
103 * perhaps redundantly. 104 * [page_referenced()/try_to_unmap()] to deal with.
104 * If we lose the isolation race, and the page is mapped by other VM_LOCKED
105 * vmas, we'll detect this in vmscan--via try_to_munlock() or try_to_unmap()
106 * either of which will restore the PageMlocked state by calling
107 * mlock_vma_page() above, if it can grab the vma's mmap sem.
108 */ 105 */
109void munlock_vma_page(struct page *page) 106void munlock_vma_page(struct page *page)
110{ 107{
@@ -123,12 +120,12 @@ void munlock_vma_page(struct page *page)
123 putback_lru_page(page); 120 putback_lru_page(page);
124 } else { 121 } else {
125 /* 122 /*
126 * We lost the race. let try_to_unmap() deal 123 * Some other task has removed the page from the LRU.
127 * with it. At least we get the page state and 124 * putback_lru_page() will take care of removing the
128 * mlock stats right. However, page is still on 125 * page from the unevictable list, if necessary.
129 * the noreclaim list. We'll fix that up when 126 * vmscan [page_referenced()] will move the page back
130 * the page is eventually freed or we scan the 127 * to the unevictable list if some other vma has it
131 * noreclaim list. 128 * mlocked.
132 */ 129 */
133 if (PageUnevictable(page)) 130 if (PageUnevictable(page))
134 count_vm_event(UNEVICTABLE_PGSTRANDED); 131 count_vm_event(UNEVICTABLE_PGSTRANDED);