aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mlock.c
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2008-10-18 23:26:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-20 11:52:30 -0400
commitb291f000393f5a0b679012b39d79fbc85c018233 (patch)
tree28eb785d4d157d3396e4377294e6054635a4bd90 /mm/mlock.c
parent89e004ea55abe201b29e2d6e35124101f1288ef7 (diff)
mlock: mlocked pages are unevictable
Make sure that mlocked pages also live on the unevictable LRU, so kswapd will not scan them over and over again. This is achieved through various strategies: 1) add yet another page flag--PG_mlocked--to indicate that the page is locked for efficient testing in vmscan and, optionally, fault path. This allows early culling of unevictable pages, preventing them from getting to page_referenced()/try_to_unmap(). Also allows separate accounting of mlock'd pages, as Nick's original patch did. Note: Nick's original mlock patch used a PG_mlocked flag. I had removed this in favor of the PG_unevictable flag + an mlock_count [new page struct member]. I restored the PG_mlocked flag to eliminate the new count field. 2) add the mlock/unevictable infrastructure to mm/mlock.c, with internal APIs in mm/internal.h. This is a rework of Nick's original patch to these files, taking into account that mlocked pages are now kept on unevictable LRU list. 3) update vmscan.c:page_evictable() to check PageMlocked() and, if vma passed in, the vm_flags. Note that the vma will only be passed in for new pages in the fault path; and then only if the "cull unevictable pages in fault path" patch is included. 4) add try_to_unlock() to rmap.c to walk a page's rmap and ClearPageMlocked() if no other vmas have it mlocked. Reuses as much of try_to_unmap() as possible. This effectively replaces the use of one of the lru list links as an mlock count. If this mechanism let's pages in mlocked vmas leak through w/o PG_mlocked set [I don't know that it does], we should catch them later in try_to_unmap(). One hopes this will be rare, as it will be relatively expensive. Original mm/internal.h, mm/rmap.c and mm/mlock.c changes: Signed-off-by: Nick Piggin <npiggin@suse.de> splitlru: introduce __get_user_pages(): New munlock processing need to GUP_FLAGS_IGNORE_VMA_PERMISSIONS. because current get_user_pages() can't grab PROT_NONE pages theresore it cause PROT_NONE pages can't munlock. [akpm@linux-foundation.org: fix this for pagemap-pass-mm-into-pagewalkers.patch] [akpm@linux-foundation.org: untangle patch interdependencies] [akpm@linux-foundation.org: fix things after out-of-order merging] [hugh@veritas.com: fix page-flags mess] [lee.schermerhorn@hp.com: fix munlock page table walk - now requires 'mm'] [kosaki.motohiro@jp.fujitsu.com: build fix] [kosaki.motohiro@jp.fujitsu.com: fix truncate race and sevaral comments] [kosaki.motohiro@jp.fujitsu.com: splitlru: introduce __get_user_pages()] Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Rik van Riel <riel@redhat.com> Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com> Cc: Nick Piggin <npiggin@suse.de> Cc: Dave Hansen <dave@linux.vnet.ibm.com> Cc: Matt Mackall <mpm@selenic.com> Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mlock.c')
-rw-r--r--mm/mlock.c394
1 files changed, 375 insertions, 19 deletions
diff --git a/mm/mlock.c b/mm/mlock.c
index 01fbe93eff5c..8746fe3f9730 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -8,10 +8,18 @@
8#include <linux/capability.h> 8#include <linux/capability.h>
9#include <linux/mman.h> 9#include <linux/mman.h>
10#include <linux/mm.h> 10#include <linux/mm.h>
11#include <linux/swap.h>
12#include <linux/swapops.h>
13#include <linux/pagemap.h>
11#include <linux/mempolicy.h> 14#include <linux/mempolicy.h>
12#include <linux/syscalls.h> 15#include <linux/syscalls.h>
13#include <linux/sched.h> 16#include <linux/sched.h>
14#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/rmap.h>
19#include <linux/mmzone.h>
20#include <linux/hugetlb.h>
21
22#include "internal.h"
15 23
16int can_do_mlock(void) 24int can_do_mlock(void)
17{ 25{
@@ -23,17 +31,360 @@ int can_do_mlock(void)
23} 31}
24EXPORT_SYMBOL(can_do_mlock); 32EXPORT_SYMBOL(can_do_mlock);
25 33
34#ifdef CONFIG_UNEVICTABLE_LRU
35/*
36 * Mlocked pages are marked with PageMlocked() flag for efficient testing
37 * in vmscan and, possibly, the fault path; and to support semi-accurate
38 * statistics.
39 *
40 * An mlocked page [PageMlocked(page)] is unevictable. As such, it will
41 * be placed on the LRU "unevictable" list, rather than the [in]active lists.
42 * The unevictable list is an LRU sibling list to the [in]active lists.
43 * PageUnevictable is set to indicate the unevictable state.
44 *
45 * When lazy mlocking via vmscan, it is important to ensure that the
46 * vma's VM_LOCKED status is not concurrently being modified, otherwise we
47 * may have mlocked a page that is being munlocked. So lazy mlock must take
48 * the mmap_sem for read, and verify that the vma really is locked
49 * (see mm/rmap.c).
50 */
51
52/*
53 * LRU accounting for clear_page_mlock()
54 */
55void __clear_page_mlock(struct page *page)
56{
57 VM_BUG_ON(!PageLocked(page));
58
59 if (!page->mapping) { /* truncated ? */
60 return;
61 }
62
63 if (!isolate_lru_page(page)) {
64 putback_lru_page(page);
65 } else {
66 /*
67 * Page not on the LRU yet. Flush all pagevecs and retry.
68 */
69 lru_add_drain_all();
70 if (!isolate_lru_page(page))
71 putback_lru_page(page);
72 }
73}
74
75/*
76 * Mark page as mlocked if not already.
77 * If page on LRU, isolate and putback to move to unevictable list.
78 */
79void mlock_vma_page(struct page *page)
80{
81 BUG_ON(!PageLocked(page));
82
83 if (!TestSetPageMlocked(page) && !isolate_lru_page(page))
84 putback_lru_page(page);
85}
86
87/*
88 * called from munlock()/munmap() path with page supposedly on the LRU.
89 *
90 * Note: unlike mlock_vma_page(), we can't just clear the PageMlocked
91 * [in try_to_munlock()] and then attempt to isolate the page. We must
92 * isolate the page to keep others from messing with its unevictable
93 * and mlocked state while trying to munlock. However, we pre-clear the
94 * mlocked state anyway as we might lose the isolation race and we might
95 * not get another chance to clear PageMlocked. If we successfully
96 * isolate the page and try_to_munlock() detects other VM_LOCKED vmas
97 * mapping the page, it will restore the PageMlocked state, unless the page
98 * is mapped in a non-linear vma. So, we go ahead and SetPageMlocked(),
99 * perhaps redundantly.
100 * If we lose the isolation race, and the page is mapped by other VM_LOCKED
101 * vmas, we'll detect this in vmscan--via try_to_munlock() or try_to_unmap()
102 * either of which will restore the PageMlocked state by calling
103 * mlock_vma_page() above, if it can grab the vma's mmap sem.
104 */
105static void munlock_vma_page(struct page *page)
106{
107 BUG_ON(!PageLocked(page));
108
109 if (TestClearPageMlocked(page) && !isolate_lru_page(page)) {
110 try_to_munlock(page);
111 putback_lru_page(page);
112 }
113}
114
115/*
116 * mlock a range of pages in the vma.
117 *
118 * This takes care of making the pages present too.
119 *
120 * vma->vm_mm->mmap_sem must be held for write.
121 */
122static int __mlock_vma_pages_range(struct vm_area_struct *vma,
123 unsigned long start, unsigned long end)
124{
125 struct mm_struct *mm = vma->vm_mm;
126 unsigned long addr = start;
127 struct page *pages[16]; /* 16 gives a reasonable batch */
128 int write = !!(vma->vm_flags & VM_WRITE);
129 int nr_pages = (end - start) / PAGE_SIZE;
130 int ret;
131
132 VM_BUG_ON(start & ~PAGE_MASK || end & ~PAGE_MASK);
133 VM_BUG_ON(start < vma->vm_start || end > vma->vm_end);
134 VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem));
135
136 lru_add_drain_all(); /* push cached pages to LRU */
137
138 while (nr_pages > 0) {
139 int i;
140
141 cond_resched();
142
143 /*
144 * get_user_pages makes pages present if we are
145 * setting mlock. and this extra reference count will
146 * disable migration of this page. However, page may
147 * still be truncated out from under us.
148 */
149 ret = get_user_pages(current, mm, addr,
150 min_t(int, nr_pages, ARRAY_SIZE(pages)),
151 write, 0, pages, NULL);
152 /*
153 * This can happen for, e.g., VM_NONLINEAR regions before
154 * a page has been allocated and mapped at a given offset,
155 * or for addresses that map beyond end of a file.
156 * We'll mlock the the pages if/when they get faulted in.
157 */
158 if (ret < 0)
159 break;
160 if (ret == 0) {
161 /*
162 * We know the vma is there, so the only time
163 * we cannot get a single page should be an
164 * error (ret < 0) case.
165 */
166 WARN_ON(1);
167 break;
168 }
169
170 lru_add_drain(); /* push cached pages to LRU */
171
172 for (i = 0; i < ret; i++) {
173 struct page *page = pages[i];
174
175 lock_page(page);
176 /*
177 * Because we lock page here and migration is blocked
178 * by the elevated reference, we need only check for
179 * page truncation (file-cache only).
180 */
181 if (page->mapping)
182 mlock_vma_page(page);
183 unlock_page(page);
184 put_page(page); /* ref from get_user_pages() */
185
186 /*
187 * here we assume that get_user_pages() has given us
188 * a list of virtually contiguous pages.
189 */
190 addr += PAGE_SIZE; /* for next get_user_pages() */
191 nr_pages--;
192 }
193 }
194
195 lru_add_drain_all(); /* to update stats */
196
197 return 0; /* count entire vma as locked_vm */
198}
199
200/*
201 * private structure for munlock page table walk
202 */
203struct munlock_page_walk {
204 struct vm_area_struct *vma;
205 pmd_t *pmd; /* for migration_entry_wait() */
206};
207
208/*
209 * munlock normal pages for present ptes
210 */
211static int __munlock_pte_handler(pte_t *ptep, unsigned long addr,
212 unsigned long end, struct mm_walk *walk)
213{
214 struct munlock_page_walk *mpw = walk->private;
215 swp_entry_t entry;
216 struct page *page;
217 pte_t pte;
218
219retry:
220 pte = *ptep;
221 /*
222 * If it's a swap pte, we might be racing with page migration.
223 */
224 if (unlikely(!pte_present(pte))) {
225 if (!is_swap_pte(pte))
226 goto out;
227 entry = pte_to_swp_entry(pte);
228 if (is_migration_entry(entry)) {
229 migration_entry_wait(mpw->vma->vm_mm, mpw->pmd, addr);
230 goto retry;
231 }
232 goto out;
233 }
234
235 page = vm_normal_page(mpw->vma, addr, pte);
236 if (!page)
237 goto out;
238
239 lock_page(page);
240 if (!page->mapping) {
241 unlock_page(page);
242 goto retry;
243 }
244 munlock_vma_page(page);
245 unlock_page(page);
246
247out:
248 return 0;
249}
250
251/*
252 * Save pmd for pte handler for waiting on migration entries
253 */
254static int __munlock_pmd_handler(pmd_t *pmd, unsigned long addr,
255 unsigned long end, struct mm_walk *walk)
256{
257 struct munlock_page_walk *mpw = walk->private;
258
259 mpw->pmd = pmd;
260 return 0;
261}
262
263
264/*
265 * munlock a range of pages in the vma using standard page table walk.
266 *
267 * vma->vm_mm->mmap_sem must be held for write.
268 */
269static void __munlock_vma_pages_range(struct vm_area_struct *vma,
270 unsigned long start, unsigned long end)
271{
272 struct mm_struct *mm = vma->vm_mm;
273 struct munlock_page_walk mpw = {
274 .vma = vma,
275 };
276 struct mm_walk munlock_page_walk = {
277 .pmd_entry = __munlock_pmd_handler,
278 .pte_entry = __munlock_pte_handler,
279 .private = &mpw,
280 .mm = mm,
281 };
282
283 VM_BUG_ON(start & ~PAGE_MASK || end & ~PAGE_MASK);
284 VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem));
285 VM_BUG_ON(start < vma->vm_start);
286 VM_BUG_ON(end > vma->vm_end);
287
288 lru_add_drain_all(); /* push cached pages to LRU */
289 walk_page_range(start, end, &munlock_page_walk);
290 lru_add_drain_all(); /* to update stats */
291}
292
293#else /* CONFIG_UNEVICTABLE_LRU */
294
295/*
296 * Just make pages present if VM_LOCKED. No-op if unlocking.
297 */
298static int __mlock_vma_pages_range(struct vm_area_struct *vma,
299 unsigned long start, unsigned long end)
300{
301 if (vma->vm_flags & VM_LOCKED)
302 make_pages_present(start, end);
303 return 0;
304}
305
306/*
307 * munlock a range of pages in the vma -- no-op.
308 */
309static void __munlock_vma_pages_range(struct vm_area_struct *vma,
310 unsigned long start, unsigned long end)
311{
312}
313#endif /* CONFIG_UNEVICTABLE_LRU */
314
315/*
316 * mlock all pages in this vma range. For mmap()/mremap()/...
317 */
318int mlock_vma_pages_range(struct vm_area_struct *vma,
319 unsigned long start, unsigned long end)
320{
321 int nr_pages = (end - start) / PAGE_SIZE;
322 BUG_ON(!(vma->vm_flags & VM_LOCKED));
323
324 /*
325 * filter unlockable vmas
326 */
327 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
328 goto no_mlock;
329
330 if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||
331 is_vm_hugetlb_page(vma) ||
332 vma == get_gate_vma(current)))
333 return __mlock_vma_pages_range(vma, start, end);
334
335 /*
336 * User mapped kernel pages or huge pages:
337 * make these pages present to populate the ptes, but
338 * fall thru' to reset VM_LOCKED--no need to unlock, and
339 * return nr_pages so these don't get counted against task's
340 * locked limit. huge pages are already counted against
341 * locked vm limit.
342 */
343 make_pages_present(start, end);
344
345no_mlock:
346 vma->vm_flags &= ~VM_LOCKED; /* and don't come back! */
347 return nr_pages; /* pages NOT mlocked */
348}
349
350
351/*
352 * munlock all pages in vma. For munmap() and exit().
353 */
354void munlock_vma_pages_all(struct vm_area_struct *vma)
355{
356 vma->vm_flags &= ~VM_LOCKED;
357 __munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
358}
359
360/*
361 * mlock_fixup - handle mlock[all]/munlock[all] requests.
362 *
363 * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
364 * munlock is a no-op. However, for some special vmas, we go ahead and
365 * populate the ptes via make_pages_present().
366 *
367 * For vmas that pass the filters, merge/split as appropriate.
368 */
26static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, 369static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
27 unsigned long start, unsigned long end, unsigned int newflags) 370 unsigned long start, unsigned long end, unsigned int newflags)
28{ 371{
29 struct mm_struct * mm = vma->vm_mm; 372 struct mm_struct *mm = vma->vm_mm;
30 pgoff_t pgoff; 373 pgoff_t pgoff;
31 int pages; 374 int nr_pages;
32 int ret = 0; 375 int ret = 0;
33 376 int lock = newflags & VM_LOCKED;
34 if (newflags == vma->vm_flags) { 377
35 *prev = vma; 378 if (newflags == vma->vm_flags ||
36 goto out; 379 (vma->vm_flags & (VM_IO | VM_PFNMAP)))
380 goto out; /* don't set VM_LOCKED, don't count */
381
382 if ((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||
383 is_vm_hugetlb_page(vma) ||
384 vma == get_gate_vma(current)) {
385 if (lock)
386 make_pages_present(start, end);
387 goto out; /* don't set VM_LOCKED, don't count */
37 } 388 }
38 389
39 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); 390 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
@@ -44,8 +395,6 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
44 goto success; 395 goto success;
45 } 396 }
46 397
47 *prev = vma;
48
49 if (start != vma->vm_start) { 398 if (start != vma->vm_start) {
50 ret = split_vma(mm, vma, start, 1); 399 ret = split_vma(mm, vma, start, 1);
51 if (ret) 400 if (ret)
@@ -60,24 +409,31 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
60 409
61success: 410success:
62 /* 411 /*
412 * Keep track of amount of locked VM.
413 */
414 nr_pages = (end - start) >> PAGE_SHIFT;
415 if (!lock)
416 nr_pages = -nr_pages;
417 mm->locked_vm += nr_pages;
418
419 /*
63 * vm_flags is protected by the mmap_sem held in write mode. 420 * vm_flags is protected by the mmap_sem held in write mode.
64 * It's okay if try_to_unmap_one unmaps a page just after we 421 * It's okay if try_to_unmap_one unmaps a page just after we
65 * set VM_LOCKED, make_pages_present below will bring it back. 422 * set VM_LOCKED, __mlock_vma_pages_range will bring it back.
66 */ 423 */
67 vma->vm_flags = newflags; 424 vma->vm_flags = newflags;
68 425
69 /* 426 if (lock) {
70 * Keep track of amount of locked VM. 427 ret = __mlock_vma_pages_range(vma, start, end);
71 */ 428 if (ret > 0) {
72 pages = (end - start) >> PAGE_SHIFT; 429 mm->locked_vm -= ret;
73 if (newflags & VM_LOCKED) { 430 ret = 0;
74 pages = -pages; 431 }
75 if (!(newflags & VM_IO)) 432 } else
76 ret = make_pages_present(start, end); 433 __munlock_vma_pages_range(vma, start, end);
77 }
78 434
79 mm->locked_vm -= pages;
80out: 435out:
436 *prev = vma;
81 return ret; 437 return ret;
82} 438}
83 439