aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mlock.c
diff options
context:
space:
mode:
authorMichel Lespinasse <walken@google.com>2011-01-13 18:46:12 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 20:32:36 -0500
commit5fdb2002131cd4e210b9638a4fc932ec7be491d1 (patch)
tree233dc1cab275397d211a7d5490f19192a59a47fd /mm/mlock.c
parent110d74a921f4d272b47ef6104fcf937df808f4c8 (diff)
mm: move VM_LOCKED check to __mlock_vma_pages_range()
Use a single code path for faulting in pages during mlock. The reason to have it in this patch series is that I did not want to update both code paths in a later change that releases mmap_sem when blocking on disk. Signed-off-by: Michel Lespinasse <walken@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Nick Piggin <npiggin@kernel.dk> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: David Howells <dhowells@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mlock.c')
-rw-r--r--mm/mlock.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/mm/mlock.c b/mm/mlock.c
index 25cc9e88c54..84da66b7bbf 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -169,7 +169,7 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
169 VM_BUG_ON(end > vma->vm_end); 169 VM_BUG_ON(end > vma->vm_end);
170 VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); 170 VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
171 171
172 gup_flags = FOLL_TOUCH | FOLL_MLOCK; 172 gup_flags = FOLL_TOUCH;
173 /* 173 /*
174 * We want to touch writable mappings with a write fault in order 174 * We want to touch writable mappings with a write fault in order
175 * to break COW, except for shared mappings because these don't COW 175 * to break COW, except for shared mappings because these don't COW
@@ -178,6 +178,9 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
178 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) 178 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
179 gup_flags |= FOLL_WRITE; 179 gup_flags |= FOLL_WRITE;
180 180
181 if (vma->vm_flags & VM_LOCKED)
182 gup_flags |= FOLL_MLOCK;
183
181 /* We don't try to access the guard page of a stack vma */ 184 /* We don't try to access the guard page of a stack vma */
182 if (stack_guard_page(vma, start)) { 185 if (stack_guard_page(vma, start)) {
183 addr += PAGE_SIZE; 186 addr += PAGE_SIZE;
@@ -456,18 +459,15 @@ static int do_mlock_pages(unsigned long start, size_t len, int ignore_errors)
456 /* 459 /*
457 * Now fault in a range of pages within the first VMA. 460 * Now fault in a range of pages within the first VMA.
458 */ 461 */
459 if (vma->vm_flags & VM_LOCKED) { 462 ret = __mlock_vma_pages_range(vma, nstart, nend);
460 ret = __mlock_vma_pages_range(vma, nstart, nend); 463 if (ret < 0 && ignore_errors) {
461 if (ret < 0 && ignore_errors) { 464 ret = 0;
462 ret = 0; 465 continue; /* continue at next VMA */
463 continue; /* continue at next VMA */ 466 }
464 } 467 if (ret) {
465 if (ret) { 468 ret = __mlock_posix_error_return(ret);
466 ret = __mlock_posix_error_return(ret); 469 break;
467 break; 470 }
468 }
469 } else
470 make_pages_present(nstart, nend);
471 } 471 }
472 up_read(&mm->mmap_sem); 472 up_read(&mm->mmap_sem);
473 return ret; /* 0 or negative error code */ 473 return ret; /* 0 or negative error code */