aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mlock.c
diff options
context:
space:
mode:
authorMichel Lespinasse <walken@google.com>2013-02-22 19:32:44 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-23 20:50:11 -0500
commitcea10a19b7972a1954c4a2d05a7de8db48b444fb (patch)
tree694b3c906259cfbfc7b7cb1b0eb507ecf0d1d63c /mm/mlock.c
parentc22c0d6344c362b1dde5d8e160d3d07536aca120 (diff)
mm: directly use __mlock_vma_pages_range() in find_extend_vma()
In find_extend_vma(), we don't need mlock_vma_pages_range() to verify the vma type - we know we're working with a stack. So, we can call directly into __mlock_vma_pages_range(), and remove the last make_pages_present() call site. Note that we don't use mm_populate() here, so we can't release the mmap_sem while allocating new stack pages. This is deemed acceptable, because the stack vmas grow by a bounded number of pages at a time, and these are anon pages so we don't have to read from disk to populate them. Signed-off-by: Michel Lespinasse <walken@google.com> Acked-by: Rik van Riel <riel@redhat.com> Tested-by: Andy Lutomirski <luto@amacapital.net> Cc: Greg Ungerer <gregungerer@westnet.com.au> Cc: David Howells <dhowells@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mlock.c')
-rw-r--r--mm/mlock.c57
1 files changed, 3 insertions, 54 deletions
diff --git a/mm/mlock.c b/mm/mlock.c
index a296a49865df..569400a5d079 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -155,9 +155,8 @@ void munlock_vma_page(struct page *page)
155 * 155 *
156 * vma->vm_mm->mmap_sem must be held for at least read. 156 * vma->vm_mm->mmap_sem must be held for at least read.
157 */ 157 */
158static long __mlock_vma_pages_range(struct vm_area_struct *vma, 158long __mlock_vma_pages_range(struct vm_area_struct *vma,
159 unsigned long start, unsigned long end, 159 unsigned long start, unsigned long end, int *nonblocking)
160 int *nonblocking)
161{ 160{
162 struct mm_struct *mm = vma->vm_mm; 161 struct mm_struct *mm = vma->vm_mm;
163 unsigned long addr = start; 162 unsigned long addr = start;
@@ -202,56 +201,6 @@ static int __mlock_posix_error_return(long retval)
202 return retval; 201 return retval;
203} 202}
204 203
205/**
206 * mlock_vma_pages_range() - mlock pages in specified vma range.
207 * @vma - the vma containing the specfied address range
208 * @start - starting address in @vma to mlock
209 * @end - end address [+1] in @vma to mlock
210 *
211 * For mmap()/mremap()/expansion of mlocked vma.
212 *
213 * return 0 on success for "normal" vmas.
214 *
215 * return number of pages [> 0] to be removed from locked_vm on success
216 * of "special" vmas.
217 */
218long mlock_vma_pages_range(struct vm_area_struct *vma,
219 unsigned long start, unsigned long end)
220{
221 int nr_pages = (end - start) / PAGE_SIZE;
222 BUG_ON(!(vma->vm_flags & VM_LOCKED));
223
224 /*
225 * filter unlockable vmas
226 */
227 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
228 goto no_mlock;
229
230 if (!((vma->vm_flags & VM_DONTEXPAND) ||
231 is_vm_hugetlb_page(vma) ||
232 vma == get_gate_vma(current->mm))) {
233
234 __mlock_vma_pages_range(vma, start, end, NULL);
235
236 /* Hide errors from mmap() and other callers */
237 return 0;
238 }
239
240 /*
241 * User mapped kernel pages or huge pages:
242 * make these pages present to populate the ptes, but
243 * fall thru' to reset VM_LOCKED--no need to unlock, and
244 * return nr_pages so these don't get counted against task's
245 * locked limit. huge pages are already counted against
246 * locked vm limit.
247 */
248 make_pages_present(start, end);
249
250no_mlock:
251 vma->vm_flags &= ~VM_LOCKED; /* and don't come back! */
252 return nr_pages; /* error or pages NOT mlocked */
253}
254
255/* 204/*
256 * munlock_vma_pages_range() - munlock all pages in the vma range.' 205 * munlock_vma_pages_range() - munlock all pages in the vma range.'
257 * @vma - vma containing range to be munlock()ed. 206 * @vma - vma containing range to be munlock()ed.
@@ -303,7 +252,7 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
303 * 252 *
304 * Filters out "special" vmas -- VM_LOCKED never gets set for these, and 253 * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
305 * munlock is a no-op. However, for some special vmas, we go ahead and 254 * munlock is a no-op. However, for some special vmas, we go ahead and
306 * populate the ptes via make_pages_present(). 255 * populate the ptes.
307 * 256 *
308 * For vmas that pass the filters, merge/split as appropriate. 257 * For vmas that pass the filters, merge/split as appropriate.
309 */ 258 */