diff options
Diffstat (limited to 'mm/mlock.c')
-rw-r--r-- | mm/mlock.c | 57 |
1 files changed, 27 insertions, 30 deletions
diff --git a/mm/mlock.c b/mm/mlock.c index bd6f0e466f6c..8f4e2dfceec1 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -25,7 +25,7 @@ int can_do_mlock(void) | |||
25 | { | 25 | { |
26 | if (capable(CAP_IPC_LOCK)) | 26 | if (capable(CAP_IPC_LOCK)) |
27 | return 1; | 27 | return 1; |
28 | if (current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur != 0) | 28 | if (rlimit(RLIMIT_MEMLOCK) != 0) |
29 | return 1; | 29 | return 1; |
30 | return 0; | 30 | return 0; |
31 | } | 31 | } |
@@ -88,25 +88,22 @@ void mlock_vma_page(struct page *page) | |||
88 | } | 88 | } |
89 | } | 89 | } |
90 | 90 | ||
91 | /* | 91 | /** |
92 | * called from munlock()/munmap() path with page supposedly on the LRU. | 92 | * munlock_vma_page - munlock a vma page |
93 | * @page - page to be unlocked | ||
93 | * | 94 | * |
94 | * Note: unlike mlock_vma_page(), we can't just clear the PageMlocked | 95 | * called from munlock()/munmap() path with page supposedly on the LRU. |
95 | * [in try_to_munlock()] and then attempt to isolate the page. We must | 96 | * When we munlock a page, because the vma where we found the page is being |
96 | * isolate the page to keep others from messing with its unevictable | 97 | * munlock()ed or munmap()ed, we want to check whether other vmas hold the |
97 | * and mlocked state while trying to munlock. However, we pre-clear the | 98 | * page locked so that we can leave it on the unevictable lru list and not |
98 | * mlocked state anyway as we might lose the isolation race and we might | 99 | * bother vmscan with it. However, to walk the page's rmap list in |
99 | * not get another chance to clear PageMlocked. If we successfully | 100 | * try_to_munlock() we must isolate the page from the LRU. If some other |
100 | * isolate the page and try_to_munlock() detects other VM_LOCKED vmas | 101 | * task has removed the page from the LRU, we won't be able to do that. |
101 | * mapping the page, it will restore the PageMlocked state, unless the page | 102 | * So we clear the PageMlocked as we might not get another chance. If we |
102 | * is mapped in a non-linear vma. So, we go ahead and SetPageMlocked(), | 103 | * can't isolate the page, we leave it for putback_lru_page() and vmscan |
103 | * perhaps redundantly. | 104 | * [page_referenced()/try_to_unmap()] to deal with. |
104 | * If we lose the isolation race, and the page is mapped by other VM_LOCKED | ||
105 | * vmas, we'll detect this in vmscan--via try_to_munlock() or try_to_unmap() | ||
106 | * either of which will restore the PageMlocked state by calling | ||
107 | * mlock_vma_page() above, if it can grab the vma's mmap sem. | ||
108 | */ | 105 | */ |
109 | static void munlock_vma_page(struct page *page) | 106 | void munlock_vma_page(struct page *page) |
110 | { | 107 | { |
111 | BUG_ON(!PageLocked(page)); | 108 | BUG_ON(!PageLocked(page)); |
112 | 109 | ||
@@ -117,18 +114,18 @@ static void munlock_vma_page(struct page *page) | |||
117 | /* | 114 | /* |
118 | * did try_to_unlock() succeed or punt? | 115 | * did try_to_unlock() succeed or punt? |
119 | */ | 116 | */ |
120 | if (ret == SWAP_SUCCESS || ret == SWAP_AGAIN) | 117 | if (ret != SWAP_MLOCK) |
121 | count_vm_event(UNEVICTABLE_PGMUNLOCKED); | 118 | count_vm_event(UNEVICTABLE_PGMUNLOCKED); |
122 | 119 | ||
123 | putback_lru_page(page); | 120 | putback_lru_page(page); |
124 | } else { | 121 | } else { |
125 | /* | 122 | /* |
126 | * We lost the race. let try_to_unmap() deal | 123 | * Some other task has removed the page from the LRU. |
127 | * with it. At least we get the page state and | 124 | * putback_lru_page() will take care of removing the |
128 | * mlock stats right. However, page is still on | 125 | * page from the unevictable list, if necessary. |
129 | * the noreclaim list. We'll fix that up when | 126 | * vmscan [page_referenced()] will move the page back |
130 | * the page is eventually freed or we scan the | 127 | * to the unevictable list if some other vma has it |
131 | * noreclaim list. | 128 | * mlocked. |
132 | */ | 129 | */ |
133 | if (PageUnevictable(page)) | 130 | if (PageUnevictable(page)) |
134 | count_vm_event(UNEVICTABLE_PGSTRANDED); | 131 | count_vm_event(UNEVICTABLE_PGSTRANDED); |
@@ -490,7 +487,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len) | |||
490 | locked = len >> PAGE_SHIFT; | 487 | locked = len >> PAGE_SHIFT; |
491 | locked += current->mm->locked_vm; | 488 | locked += current->mm->locked_vm; |
492 | 489 | ||
493 | lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; | 490 | lock_limit = rlimit(RLIMIT_MEMLOCK); |
494 | lock_limit >>= PAGE_SHIFT; | 491 | lock_limit >>= PAGE_SHIFT; |
495 | 492 | ||
496 | /* check against resource limits */ | 493 | /* check against resource limits */ |
@@ -553,7 +550,7 @@ SYSCALL_DEFINE1(mlockall, int, flags) | |||
553 | 550 | ||
554 | down_write(¤t->mm->mmap_sem); | 551 | down_write(¤t->mm->mmap_sem); |
555 | 552 | ||
556 | lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; | 553 | lock_limit = rlimit(RLIMIT_MEMLOCK); |
557 | lock_limit >>= PAGE_SHIFT; | 554 | lock_limit >>= PAGE_SHIFT; |
558 | 555 | ||
559 | ret = -ENOMEM; | 556 | ret = -ENOMEM; |
@@ -587,7 +584,7 @@ int user_shm_lock(size_t size, struct user_struct *user) | |||
587 | int allowed = 0; | 584 | int allowed = 0; |
588 | 585 | ||
589 | locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | 586 | locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
590 | lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; | 587 | lock_limit = rlimit(RLIMIT_MEMLOCK); |
591 | if (lock_limit == RLIM_INFINITY) | 588 | if (lock_limit == RLIM_INFINITY) |
592 | allowed = 1; | 589 | allowed = 1; |
593 | lock_limit >>= PAGE_SHIFT; | 590 | lock_limit >>= PAGE_SHIFT; |
@@ -621,12 +618,12 @@ int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim, | |||
621 | 618 | ||
622 | down_write(&mm->mmap_sem); | 619 | down_write(&mm->mmap_sem); |
623 | 620 | ||
624 | lim = rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT; | 621 | lim = ACCESS_ONCE(rlim[RLIMIT_AS].rlim_cur) >> PAGE_SHIFT; |
625 | vm = mm->total_vm + pgsz; | 622 | vm = mm->total_vm + pgsz; |
626 | if (lim < vm) | 623 | if (lim < vm) |
627 | goto out; | 624 | goto out; |
628 | 625 | ||
629 | lim = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT; | 626 | lim = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur) >> PAGE_SHIFT; |
630 | vm = mm->locked_vm + pgsz; | 627 | vm = mm->locked_vm + pgsz; |
631 | if (lim < vm) | 628 | if (lim < vm) |
632 | goto out; | 629 | goto out; |