diff options
author | Michel Lespinasse <walken@google.com> | 2013-02-22 19:32:44 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-23 20:50:11 -0500 |
commit | cea10a19b7972a1954c4a2d05a7de8db48b444fb (patch) | |
tree | 694b3c906259cfbfc7b7cb1b0eb507ecf0d1d63c /mm/memory.c | |
parent | c22c0d6344c362b1dde5d8e160d3d07536aca120 (diff) |
mm: directly use __mlock_vma_pages_range() in find_extend_vma()
In find_extend_vma(), we don't need mlock_vma_pages_range() to verify
the vma type - we know we're working with a stack. So, we can call
directly into __mlock_vma_pages_range(), and remove the last
make_pages_present() call site.
Note that we don't use mm_populate() here, so we can't release the
mmap_sem while allocating new stack pages. This is deemed acceptable,
because the stack vmas grow by a bounded number of pages at a time, and
these are anon pages so we don't have to read from disk to populate
them.
Signed-off-by: Michel Lespinasse <walken@google.com>
Acked-by: Rik van Riel <riel@redhat.com>
Tested-by: Andy Lutomirski <luto@amacapital.net>
Cc: Greg Ungerer <gregungerer@westnet.com.au>
Cc: David Howells <dhowells@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 24 |
1 files changed, 0 insertions, 24 deletions
diff --git a/mm/memory.c b/mm/memory.c index 0abd07097ec6..7837ceacf090 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -3824,30 +3824,6 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) | |||
3824 | } | 3824 | } |
3825 | #endif /* __PAGETABLE_PMD_FOLDED */ | 3825 | #endif /* __PAGETABLE_PMD_FOLDED */ |
3826 | 3826 | ||
3827 | int make_pages_present(unsigned long addr, unsigned long end) | ||
3828 | { | ||
3829 | int ret, len, write; | ||
3830 | struct vm_area_struct * vma; | ||
3831 | |||
3832 | vma = find_vma(current->mm, addr); | ||
3833 | if (!vma) | ||
3834 | return -ENOMEM; | ||
3835 | /* | ||
3836 | * We want to touch writable mappings with a write fault in order | ||
3837 | * to break COW, except for shared mappings because these don't COW | ||
3838 | * and we would not want to dirty them for nothing. | ||
3839 | */ | ||
3840 | write = (vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE; | ||
3841 | BUG_ON(addr >= end); | ||
3842 | BUG_ON(end > vma->vm_end); | ||
3843 | len = DIV_ROUND_UP(end, PAGE_SIZE) - addr/PAGE_SIZE; | ||
3844 | ret = get_user_pages(current, current->mm, addr, | ||
3845 | len, write, 0, NULL, NULL); | ||
3846 | if (ret < 0) | ||
3847 | return ret; | ||
3848 | return ret == len ? 0 : -EFAULT; | ||
3849 | } | ||
3850 | |||
3851 | #if !defined(__HAVE_ARCH_GATE_AREA) | 3827 | #if !defined(__HAVE_ARCH_GATE_AREA) |
3852 | 3828 | ||
3853 | #if defined(AT_SYSINFO_EHDR) | 3829 | #if defined(AT_SYSINFO_EHDR) |