diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-14 19:49:17 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-14 19:49:17 -0400 |
commit | 1dcf58d6e6e6eb7ec10e9abc56887b040205b06f (patch) | |
tree | c03e7a25ef13eea62f1547914a76e5c68f3f4c28 /mm/mlock.c | |
parent | 80dcc31fbe55932ac9204daee5f2ebc0c49b6da3 (diff) | |
parent | e4b0db72be2487bae0e3251c22f82c104f7c1cfd (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge first patchbomb from Andrew Morton:
- arch/sh updates
- ocfs2 updates
- kernel/watchdog feature
- about half of mm/
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (122 commits)
Documentation: update arch list in the 'memtest' entry
Kconfig: memtest: update number of test patterns up to 17
arm: add support for memtest
arm64: add support for memtest
memtest: use phys_addr_t for physical addresses
mm: move memtest under mm
mm, hugetlb: abort __get_user_pages if current has been oom killed
mm, mempool: do not allow atomic resizing
memcg: print cgroup information when system panics due to panic_on_oom
mm: numa: remove migrate_ratelimited
mm: fold arch_randomize_brk into ARCH_HAS_ELF_RANDOMIZE
mm: split ET_DYN ASLR from mmap ASLR
s390: redefine randomize_et_dyn for ELF_ET_DYN_BASE
mm: expose arch_mmap_rnd when available
s390: standardize mmap_rnd() usage
powerpc: standardize mmap_rnd() usage
mips: extract logic for mmap_rnd()
arm64: standardize mmap_rnd() usage
x86: standardize mmap_rnd() usage
arm: factor out mmap ASLR into mmap_rnd
...
Diffstat (limited to 'mm/mlock.c')
-rw-r--r-- | mm/mlock.c | 131 |
1 files changed, 8 insertions, 123 deletions
diff --git a/mm/mlock.c b/mm/mlock.c index 8a54cd214925..6fd2cf15e868 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -205,62 +205,6 @@ out: | |||
205 | return nr_pages - 1; | 205 | return nr_pages - 1; |
206 | } | 206 | } |
207 | 207 | ||
208 | /** | ||
209 | * __mlock_vma_pages_range() - mlock a range of pages in the vma. | ||
210 | * @vma: target vma | ||
211 | * @start: start address | ||
212 | * @end: end address | ||
213 | * @nonblocking: | ||
214 | * | ||
215 | * This takes care of making the pages present too. | ||
216 | * | ||
217 | * return 0 on success, negative error code on error. | ||
218 | * | ||
219 | * vma->vm_mm->mmap_sem must be held. | ||
220 | * | ||
221 | * If @nonblocking is NULL, it may be held for read or write and will | ||
222 | * be unperturbed. | ||
223 | * | ||
224 | * If @nonblocking is non-NULL, it must held for read only and may be | ||
225 | * released. If it's released, *@nonblocking will be set to 0. | ||
226 | */ | ||
227 | long __mlock_vma_pages_range(struct vm_area_struct *vma, | ||
228 | unsigned long start, unsigned long end, int *nonblocking) | ||
229 | { | ||
230 | struct mm_struct *mm = vma->vm_mm; | ||
231 | unsigned long nr_pages = (end - start) / PAGE_SIZE; | ||
232 | int gup_flags; | ||
233 | |||
234 | VM_BUG_ON(start & ~PAGE_MASK); | ||
235 | VM_BUG_ON(end & ~PAGE_MASK); | ||
236 | VM_BUG_ON_VMA(start < vma->vm_start, vma); | ||
237 | VM_BUG_ON_VMA(end > vma->vm_end, vma); | ||
238 | VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm); | ||
239 | |||
240 | gup_flags = FOLL_TOUCH | FOLL_MLOCK; | ||
241 | /* | ||
242 | * We want to touch writable mappings with a write fault in order | ||
243 | * to break COW, except for shared mappings because these don't COW | ||
244 | * and we would not want to dirty them for nothing. | ||
245 | */ | ||
246 | if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) | ||
247 | gup_flags |= FOLL_WRITE; | ||
248 | |||
249 | /* | ||
250 | * We want mlock to succeed for regions that have any permissions | ||
251 | * other than PROT_NONE. | ||
252 | */ | ||
253 | if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) | ||
254 | gup_flags |= FOLL_FORCE; | ||
255 | |||
256 | /* | ||
257 | * We made sure addr is within a VMA, so the following will | ||
258 | * not result in a stack expansion that recurses back here. | ||
259 | */ | ||
260 | return __get_user_pages(current, mm, start, nr_pages, gup_flags, | ||
261 | NULL, NULL, nonblocking); | ||
262 | } | ||
263 | |||
264 | /* | 208 | /* |
265 | * convert get_user_pages() return value to posix mlock() error | 209 | * convert get_user_pages() return value to posix mlock() error |
266 | */ | 210 | */ |
@@ -596,7 +540,7 @@ success: | |||
596 | /* | 540 | /* |
597 | * vm_flags is protected by the mmap_sem held in write mode. | 541 | * vm_flags is protected by the mmap_sem held in write mode. |
598 | * It's okay if try_to_unmap_one unmaps a page just after we | 542 | * It's okay if try_to_unmap_one unmaps a page just after we |
599 | * set VM_LOCKED, __mlock_vma_pages_range will bring it back. | 543 | * set VM_LOCKED, populate_vma_page_range will bring it back. |
600 | */ | 544 | */ |
601 | 545 | ||
602 | if (lock) | 546 | if (lock) |
@@ -660,69 +604,6 @@ static int do_mlock(unsigned long start, size_t len, int on) | |||
660 | return error; | 604 | return error; |
661 | } | 605 | } |
662 | 606 | ||
663 | /* | ||
664 | * __mm_populate - populate and/or mlock pages within a range of address space. | ||
665 | * | ||
666 | * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap | ||
667 | * flags. VMAs must be already marked with the desired vm_flags, and | ||
668 | * mmap_sem must not be held. | ||
669 | */ | ||
670 | int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) | ||
671 | { | ||
672 | struct mm_struct *mm = current->mm; | ||
673 | unsigned long end, nstart, nend; | ||
674 | struct vm_area_struct *vma = NULL; | ||
675 | int locked = 0; | ||
676 | long ret = 0; | ||
677 | |||
678 | VM_BUG_ON(start & ~PAGE_MASK); | ||
679 | VM_BUG_ON(len != PAGE_ALIGN(len)); | ||
680 | end = start + len; | ||
681 | |||
682 | for (nstart = start; nstart < end; nstart = nend) { | ||
683 | /* | ||
684 | * We want to fault in pages for [nstart; end) address range. | ||
685 | * Find first corresponding VMA. | ||
686 | */ | ||
687 | if (!locked) { | ||
688 | locked = 1; | ||
689 | down_read(&mm->mmap_sem); | ||
690 | vma = find_vma(mm, nstart); | ||
691 | } else if (nstart >= vma->vm_end) | ||
692 | vma = vma->vm_next; | ||
693 | if (!vma || vma->vm_start >= end) | ||
694 | break; | ||
695 | /* | ||
696 | * Set [nstart; nend) to intersection of desired address | ||
697 | * range with the first VMA. Also, skip undesirable VMA types. | ||
698 | */ | ||
699 | nend = min(end, vma->vm_end); | ||
700 | if (vma->vm_flags & (VM_IO | VM_PFNMAP)) | ||
701 | continue; | ||
702 | if (nstart < vma->vm_start) | ||
703 | nstart = vma->vm_start; | ||
704 | /* | ||
705 | * Now fault in a range of pages. __mlock_vma_pages_range() | ||
706 | * double checks the vma flags, so that it won't mlock pages | ||
707 | * if the vma was already munlocked. | ||
708 | */ | ||
709 | ret = __mlock_vma_pages_range(vma, nstart, nend, &locked); | ||
710 | if (ret < 0) { | ||
711 | if (ignore_errors) { | ||
712 | ret = 0; | ||
713 | continue; /* continue at next VMA */ | ||
714 | } | ||
715 | ret = __mlock_posix_error_return(ret); | ||
716 | break; | ||
717 | } | ||
718 | nend = nstart + ret * PAGE_SIZE; | ||
719 | ret = 0; | ||
720 | } | ||
721 | if (locked) | ||
722 | up_read(&mm->mmap_sem); | ||
723 | return ret; /* 0 or negative error code */ | ||
724 | } | ||
725 | |||
726 | SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len) | 607 | SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len) |
727 | { | 608 | { |
728 | unsigned long locked; | 609 | unsigned long locked; |
@@ -750,9 +631,13 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len) | |||
750 | error = do_mlock(start, len, 1); | 631 | error = do_mlock(start, len, 1); |
751 | 632 | ||
752 | up_write(¤t->mm->mmap_sem); | 633 | up_write(¤t->mm->mmap_sem); |
753 | if (!error) | 634 | if (error) |
754 | error = __mm_populate(start, len, 0); | 635 | return error; |
755 | return error; | 636 | |
637 | error = __mm_populate(start, len, 0); | ||
638 | if (error) | ||
639 | return __mlock_posix_error_return(error); | ||
640 | return 0; | ||
756 | } | 641 | } |
757 | 642 | ||
758 | SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len) | 643 | SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len) |