aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2015-04-14 18:44:37 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-14 19:48:59 -0400
commit84d33df279e0380995b0e03fb8aad04cef2bc29f (patch)
tree05472b7380d74e7ff9d386a52cee4d6c8b87fea8 /mm
parentc21a6daf466a7bfa7bc2ac594837a1ce793a7960 (diff)
mm: rename FOLL_MLOCK to FOLL_POPULATE
After commit a1fde08c74e9 ("VM: skip the stack guard page lookup in get_user_pages only for mlock") FOLL_MLOCK has lost its original meaning: we don't necessarily mlock the page if the flags is set -- we also take VM_LOCKED into consideration. Since we use the same codepath for __mm_populate(), let's rename FOLL_MLOCK to FOLL_POPULATE. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: David Rientjes <rientjes@google.com> Cc: Michel Lespinasse <walken@google.com> Cc: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/gup.c6
-rw-r--r--mm/huge_memory.c2
-rw-r--r--mm/mlock.c2
3 files changed, 5 insertions, 5 deletions
diff --git a/mm/gup.c b/mm/gup.c
index a6e24e246f86..1b114ba9aebf 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -92,7 +92,7 @@ retry:
92 */ 92 */
93 mark_page_accessed(page); 93 mark_page_accessed(page);
94 } 94 }
95 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { 95 if ((flags & FOLL_POPULATE) && (vma->vm_flags & VM_LOCKED)) {
96 /* 96 /*
97 * The preliminary mapping check is mainly to avoid the 97 * The preliminary mapping check is mainly to avoid the
98 * pointless overhead of lock_page on the ZERO_PAGE 98 * pointless overhead of lock_page on the ZERO_PAGE
@@ -265,8 +265,8 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
265 unsigned int fault_flags = 0; 265 unsigned int fault_flags = 0;
266 int ret; 266 int ret;
267 267
268 /* For mlock, just skip the stack guard page. */ 268 /* For mm_populate(), just skip the stack guard page. */
269 if ((*flags & FOLL_MLOCK) && 269 if ((*flags & FOLL_POPULATE) &&
270 (stack_guard_page_start(vma, address) || 270 (stack_guard_page_start(vma, address) ||
271 stack_guard_page_end(vma, address + PAGE_SIZE))) 271 stack_guard_page_end(vma, address + PAGE_SIZE)))
272 return -ENOENT; 272 return -ENOENT;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 6817b0350c71..10a4b6cea0d1 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1231,7 +1231,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1231 pmd, _pmd, 1)) 1231 pmd, _pmd, 1))
1232 update_mmu_cache_pmd(vma, addr, pmd); 1232 update_mmu_cache_pmd(vma, addr, pmd);
1233 } 1233 }
1234 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { 1234 if ((flags & FOLL_POPULATE) && (vma->vm_flags & VM_LOCKED)) {
1235 if (page->mapping && trylock_page(page)) { 1235 if (page->mapping && trylock_page(page)) {
1236 lru_add_drain(); 1236 lru_add_drain();
1237 if (page->mapping) 1237 if (page->mapping)
diff --git a/mm/mlock.c b/mm/mlock.c
index 8a54cd214925..f756e28b33fc 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -237,7 +237,7 @@ long __mlock_vma_pages_range(struct vm_area_struct *vma,
237 VM_BUG_ON_VMA(end > vma->vm_end, vma); 237 VM_BUG_ON_VMA(end > vma->vm_end, vma);
238 VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm); 238 VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
239 239
240 gup_flags = FOLL_TOUCH | FOLL_MLOCK; 240 gup_flags = FOLL_TOUCH | FOLL_POPULATE;
241 /* 241 /*
242 * We want to touch writable mappings with a write fault in order 242 * We want to touch writable mappings with a write fault in order
243 * to break COW, except for shared mappings because these don't COW 243 * to break COW, except for shared mappings because these don't COW