aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2015-04-14 18:44:39 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-14 19:49:00 -0400
commitfc05f566210fa57f8e68ead8762b8dbb3f1c61e3 (patch)
tree429463d285b012a54d213e7d7eb8810e55e16b36
parent84d33df279e0380995b0e03fb8aad04cef2bc29f (diff)
mm: rename __mlock_vma_pages_range() to populate_vma_page_range()
__mlock_vma_pages_range() doesn't necessarily mlock pages. It depends on vma flags. The same codepath is used for MAP_POPULATE. Let's rename __mlock_vma_pages_range() to populate_vma_page_range(). This patch also drops mlock_vma_pages_range() references from documentation. It has gone in cea10a19b797 ("mm: directly use __mlock_vma_pages_range() in find_extend_vma()"). Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: David Rientjes <rientjes@google.com> Cc: Michel Lespinasse <walken@google.com> Cc: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--Documentation/vm/unevictable-lru.txt26
-rw-r--r--mm/internal.h2
-rw-r--r--mm/mlock.c12
-rw-r--r--mm/mmap.c4
4 files changed, 17 insertions, 27 deletions
diff --git a/Documentation/vm/unevictable-lru.txt b/Documentation/vm/unevictable-lru.txt
index 744f82f86c58..86cb4624fc5a 100644
--- a/Documentation/vm/unevictable-lru.txt
+++ b/Documentation/vm/unevictable-lru.txt
@@ -317,7 +317,7 @@ If the VMA passes some filtering as described in "Filtering Special Vmas"
317below, mlock_fixup() will attempt to merge the VMA with its neighbors or split 317below, mlock_fixup() will attempt to merge the VMA with its neighbors or split
318off a subset of the VMA if the range does not cover the entire VMA. Once the 318off a subset of the VMA if the range does not cover the entire VMA. Once the
319VMA has been merged or split or neither, mlock_fixup() will call 319VMA has been merged or split or neither, mlock_fixup() will call
320__mlock_vma_pages_range() to fault in the pages via get_user_pages() and to 320populate_vma_page_range() to fault in the pages via get_user_pages() and to
321mark the pages as mlocked via mlock_vma_page(). 321mark the pages as mlocked via mlock_vma_page().
322 322
323Note that the VMA being mlocked might be mapped with PROT_NONE. In this case, 323Note that the VMA being mlocked might be mapped with PROT_NONE. In this case,
@@ -327,7 +327,7 @@ fault path or in vmscan.
327 327
328Also note that a page returned by get_user_pages() could be truncated or 328Also note that a page returned by get_user_pages() could be truncated or
329migrated out from under us, while we're trying to mlock it. To detect this, 329migrated out from under us, while we're trying to mlock it. To detect this,
330__mlock_vma_pages_range() checks page_mapping() after acquiring the page lock. 330populate_vma_page_range() checks page_mapping() after acquiring the page lock.
331If the page is still associated with its mapping, we'll go ahead and call 331If the page is still associated with its mapping, we'll go ahead and call
332mlock_vma_page(). If the mapping is gone, we just unlock the page and move on. 332mlock_vma_page(). If the mapping is gone, we just unlock the page and move on.
333In the worst case, this will result in a page mapped in a VM_LOCKED VMA 333In the worst case, this will result in a page mapped in a VM_LOCKED VMA
@@ -392,7 +392,7 @@ ignored for munlock.
392 392
393If the VMA is VM_LOCKED, mlock_fixup() again attempts to merge or split off the 393If the VMA is VM_LOCKED, mlock_fixup() again attempts to merge or split off the
394specified range. The range is then munlocked via the function 394specified range. The range is then munlocked via the function
395__mlock_vma_pages_range() - the same function used to mlock a VMA range - 395populate_vma_page_range() - the same function used to mlock a VMA range -
396passing a flag to indicate that munlock() is being performed. 396passing a flag to indicate that munlock() is being performed.
397 397
398Because the VMA access protections could have been changed to PROT_NONE after 398Because the VMA access protections could have been changed to PROT_NONE after
@@ -402,7 +402,7 @@ get_user_pages() was enhanced to accept a flag to ignore the permissions when
402fetching the pages - all of which should be resident as a result of previous 402fetching the pages - all of which should be resident as a result of previous
403mlocking. 403mlocking.
404 404
405For munlock(), __mlock_vma_pages_range() unlocks individual pages by calling 405For munlock(), populate_vma_page_range() unlocks individual pages by calling
406munlock_vma_page(). munlock_vma_page() unconditionally clears the PG_mlocked 406munlock_vma_page(). munlock_vma_page() unconditionally clears the PG_mlocked
407flag using TestClearPageMlocked(). As with mlock_vma_page(), 407flag using TestClearPageMlocked(). As with mlock_vma_page(),
408munlock_vma_page() use the Test*PageMlocked() function to handle the case where 408munlock_vma_page() use the Test*PageMlocked() function to handle the case where
@@ -463,21 +463,11 @@ populate the page table.
463 463
464To mlock a range of memory under the unevictable/mlock infrastructure, the 464To mlock a range of memory under the unevictable/mlock infrastructure, the
465mmap() handler and task address space expansion functions call 465mmap() handler and task address space expansion functions call
466mlock_vma_pages_range() specifying the vma and the address range to mlock. 466populate_vma_page_range() specifying the vma and the address range to mlock.
467mlock_vma_pages_range() filters VMAs like mlock_fixup(), as described above in 467
468"Filtering Special VMAs". It will clear the VM_LOCKED flag, which will have 468The callers of populate_vma_page_range() will have already added the memory range
469already been set by the caller, in filtered VMAs. Thus these VMA's need not be
470visited for munlock when the region is unmapped.
471
472For "normal" VMAs, mlock_vma_pages_range() calls __mlock_vma_pages_range() to
473fault/allocate the pages and mlock them. Again, like mlock_fixup(),
474mlock_vma_pages_range() downgrades the mmap semaphore to read mode before
475attempting to fault/allocate and mlock the pages and "upgrades" the semaphore
476back to write mode before returning.
477
478The callers of mlock_vma_pages_range() will have already added the memory range
479to be mlocked to the task's "locked_vm". To account for filtered VMAs, 469to be mlocked to the task's "locked_vm". To account for filtered VMAs,
480mlock_vma_pages_range() returns the number of pages NOT mlocked. All of the 470populate_vma_page_range() returns the number of pages NOT mlocked. All of the
481callers then subtract a non-negative return value from the task's locked_vm. A 471callers then subtract a non-negative return value from the task's locked_vm. A
482negative return value represent an error - for example, from get_user_pages() 472negative return value represent an error - for example, from get_user_pages()
483attempting to fault in a VMA with PROT_NONE access. In this case, we leave the 473attempting to fault in a VMA with PROT_NONE access. In this case, we leave the
diff --git a/mm/internal.h b/mm/internal.h
index a96da5b0029d..7df78a5269f3 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -240,7 +240,7 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
240 struct vm_area_struct *prev, struct rb_node *rb_parent); 240 struct vm_area_struct *prev, struct rb_node *rb_parent);
241 241
242#ifdef CONFIG_MMU 242#ifdef CONFIG_MMU
243extern long __mlock_vma_pages_range(struct vm_area_struct *vma, 243extern long populate_vma_page_range(struct vm_area_struct *vma,
244 unsigned long start, unsigned long end, int *nonblocking); 244 unsigned long start, unsigned long end, int *nonblocking);
245extern void munlock_vma_pages_range(struct vm_area_struct *vma, 245extern void munlock_vma_pages_range(struct vm_area_struct *vma,
246 unsigned long start, unsigned long end); 246 unsigned long start, unsigned long end);
diff --git a/mm/mlock.c b/mm/mlock.c
index f756e28b33fc..9d0f3cd716c5 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -206,13 +206,13 @@ out:
206} 206}
207 207
208/** 208/**
209 * __mlock_vma_pages_range() - mlock a range of pages in the vma. 209 * populate_vma_page_range() - populate a range of pages in the vma.
210 * @vma: target vma 210 * @vma: target vma
211 * @start: start address 211 * @start: start address
212 * @end: end address 212 * @end: end address
213 * @nonblocking: 213 * @nonblocking:
214 * 214 *
215 * This takes care of making the pages present too. 215 * This takes care of mlocking the pages too if VM_LOCKED is set.
216 * 216 *
217 * return 0 on success, negative error code on error. 217 * return 0 on success, negative error code on error.
218 * 218 *
@@ -224,7 +224,7 @@ out:
224 * If @nonblocking is non-NULL, it must held for read only and may be 224 * If @nonblocking is non-NULL, it must held for read only and may be
225 * released. If it's released, *@nonblocking will be set to 0. 225 * released. If it's released, *@nonblocking will be set to 0.
226 */ 226 */
227long __mlock_vma_pages_range(struct vm_area_struct *vma, 227long populate_vma_page_range(struct vm_area_struct *vma,
228 unsigned long start, unsigned long end, int *nonblocking) 228 unsigned long start, unsigned long end, int *nonblocking)
229{ 229{
230 struct mm_struct *mm = vma->vm_mm; 230 struct mm_struct *mm = vma->vm_mm;
@@ -596,7 +596,7 @@ success:
596 /* 596 /*
597 * vm_flags is protected by the mmap_sem held in write mode. 597 * vm_flags is protected by the mmap_sem held in write mode.
598 * It's okay if try_to_unmap_one unmaps a page just after we 598 * It's okay if try_to_unmap_one unmaps a page just after we
599 * set VM_LOCKED, __mlock_vma_pages_range will bring it back. 599 * set VM_LOCKED, populate_vma_page_range will bring it back.
600 */ 600 */
601 601
602 if (lock) 602 if (lock)
@@ -702,11 +702,11 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
702 if (nstart < vma->vm_start) 702 if (nstart < vma->vm_start)
703 nstart = vma->vm_start; 703 nstart = vma->vm_start;
704 /* 704 /*
705 * Now fault in a range of pages. __mlock_vma_pages_range() 705 * Now fault in a range of pages. populate_vma_page_range()
706 * double checks the vma flags, so that it won't mlock pages 706 * double checks the vma flags, so that it won't mlock pages
707 * if the vma was already munlocked. 707 * if the vma was already munlocked.
708 */ 708 */
709 ret = __mlock_vma_pages_range(vma, nstart, nend, &locked); 709 ret = populate_vma_page_range(vma, nstart, nend, &locked);
710 if (ret < 0) { 710 if (ret < 0) {
711 if (ignore_errors) { 711 if (ignore_errors) {
712 ret = 0; 712 ret = 0;
diff --git a/mm/mmap.c b/mm/mmap.c
index 9ec50a368634..06a6076c92e5 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2316,7 +2316,7 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
2316 if (!prev || expand_stack(prev, addr)) 2316 if (!prev || expand_stack(prev, addr))
2317 return NULL; 2317 return NULL;
2318 if (prev->vm_flags & VM_LOCKED) 2318 if (prev->vm_flags & VM_LOCKED)
2319 __mlock_vma_pages_range(prev, addr, prev->vm_end, NULL); 2319 populate_vma_page_range(prev, addr, prev->vm_end, NULL);
2320 return prev; 2320 return prev;
2321} 2321}
2322#else 2322#else
@@ -2351,7 +2351,7 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
2351 if (expand_stack(vma, addr)) 2351 if (expand_stack(vma, addr))
2352 return NULL; 2352 return NULL;
2353 if (vma->vm_flags & VM_LOCKED) 2353 if (vma->vm_flags & VM_LOCKED)
2354 __mlock_vma_pages_range(vma, addr, start, NULL); 2354 populate_vma_page_range(vma, addr, start, NULL);
2355 return vma; 2355 return vma;
2356} 2356}
2357#endif 2357#endif