aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mlock.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/mlock.c')
-rw-r--r--mm/mlock.c46
1 files changed, 43 insertions, 3 deletions
diff --git a/mm/mlock.c b/mm/mlock.c
index 8746fe3f9730..c83896a72504 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -318,6 +318,7 @@ static void __munlock_vma_pages_range(struct vm_area_struct *vma,
318int mlock_vma_pages_range(struct vm_area_struct *vma, 318int mlock_vma_pages_range(struct vm_area_struct *vma,
319 unsigned long start, unsigned long end) 319 unsigned long start, unsigned long end)
320{ 320{
321 struct mm_struct *mm = vma->vm_mm;
321 int nr_pages = (end - start) / PAGE_SIZE; 322 int nr_pages = (end - start) / PAGE_SIZE;
322 BUG_ON(!(vma->vm_flags & VM_LOCKED)); 323 BUG_ON(!(vma->vm_flags & VM_LOCKED));
323 324
@@ -329,8 +330,19 @@ int mlock_vma_pages_range(struct vm_area_struct *vma,
329 330
330 if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) || 331 if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||
331 is_vm_hugetlb_page(vma) || 332 is_vm_hugetlb_page(vma) ||
332 vma == get_gate_vma(current))) 333 vma == get_gate_vma(current))) {
333 return __mlock_vma_pages_range(vma, start, end); 334 downgrade_write(&mm->mmap_sem);
335 nr_pages = __mlock_vma_pages_range(vma, start, end);
336
337 up_read(&mm->mmap_sem);
338 /* vma can change or disappear */
339 down_write(&mm->mmap_sem);
340 vma = find_vma(mm, start);
341 /* non-NULL vma must contain @start, but need to check @end */
342 if (!vma || end > vma->vm_end)
343 return -EAGAIN;
344 return nr_pages;
345 }
334 346
335 /* 347 /*
336 * User mapped kernel pages or huge pages: 348 * User mapped kernel pages or huge pages:
@@ -424,13 +436,41 @@ success:
424 vma->vm_flags = newflags; 436 vma->vm_flags = newflags;
425 437
426 if (lock) { 438 if (lock) {
439 /*
440 * mmap_sem is currently held for write. Downgrade the write
441 * lock to a read lock so that other faults, mmap scans, ...
442 * while we fault in all pages.
443 */
444 downgrade_write(&mm->mmap_sem);
445
427 ret = __mlock_vma_pages_range(vma, start, end); 446 ret = __mlock_vma_pages_range(vma, start, end);
428 if (ret > 0) { 447 if (ret > 0) {
429 mm->locked_vm -= ret; 448 mm->locked_vm -= ret;
430 ret = 0; 449 ret = 0;
431 } 450 }
432 } else 451 /*
452 * Need to reacquire mmap sem in write mode, as our callers
453 * expect this. We have no support for atomically upgrading
454 * a sem to write, so we need to check for ranges while sem
455 * is unlocked.
456 */
457 up_read(&mm->mmap_sem);
458 /* vma can change or disappear */
459 down_write(&mm->mmap_sem);
460 *prev = find_vma(mm, start);
461 /* non-NULL *prev must contain @start, but need to check @end */
462 if (!(*prev) || end > (*prev)->vm_end)
463 ret = -EAGAIN;
464 } else {
465 /*
466 * TODO: for unlocking, pages will already be resident, so
467 * we don't need to wait for allocations/reclaim/pagein, ...
468 * However, unlocking a very large region can still take a
469 * while. Should we downgrade the semaphore for both lock
470 * AND unlock ?
471 */
433 __munlock_vma_pages_range(vma, start, end); 472 __munlock_vma_pages_range(vma, start, end);
473 }
434 474
435out: 475out:
436 *prev = vma; 476 *prev = vma;