diff options
author | Kirill A. Shutemov <kirill.shutemov@linux.intel.com> | 2015-07-06 16:18:37 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-07-09 14:12:48 -0400 |
commit | 6b7339f4c31ad69c8e9c0b2859276e22cf72176d (patch) | |
tree | ee208aa15f03b4c7336e56d6767dfecb87be9d7b | |
parent | 883a2dfd6f13eca5aab30f0bcc9a6f1e2f983b1e (diff) |
mm: avoid setting up anonymous pages into file mapping
Reading page fault handler code I've noticed that under right
circumstances kernel would map anonymous pages into file mappings: if
the VMA doesn't have vm_ops->fault() and the VMA wasn't fully populated
on ->mmap(), kernel would handle page fault to not populated pte with
do_anonymous_page().
Let's change page fault handler to use do_anonymous_page() only on
anonymous VMA (->vm_ops == NULL) and make sure that the VMA is not
shared.
For file mappings without vm_ops->fault() or shred VMA without vm_ops,
page fault on pte_none() entry would lead to SIGBUS.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Oleg Nesterov <oleg@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Willy Tarreau <w@1wt.eu>
Cc: stable@vger.kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/memory.c | 20 |
1 files changed, 13 insertions, 7 deletions
diff --git a/mm/memory.c b/mm/memory.c index a84fbb772034..388dcf9aa283 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -2670,6 +2670,10 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2670 | 2670 | ||
2671 | pte_unmap(page_table); | 2671 | pte_unmap(page_table); |
2672 | 2672 | ||
2673 | /* File mapping without ->vm_ops ? */ | ||
2674 | if (vma->vm_flags & VM_SHARED) | ||
2675 | return VM_FAULT_SIGBUS; | ||
2676 | |||
2673 | /* Check if we need to add a guard page to the stack */ | 2677 | /* Check if we need to add a guard page to the stack */ |
2674 | if (check_stack_guard_page(vma, address) < 0) | 2678 | if (check_stack_guard_page(vma, address) < 0) |
2675 | return VM_FAULT_SIGSEGV; | 2679 | return VM_FAULT_SIGSEGV; |
@@ -3099,6 +3103,9 @@ static int do_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3099 | - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; | 3103 | - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; |
3100 | 3104 | ||
3101 | pte_unmap(page_table); | 3105 | pte_unmap(page_table); |
3106 | /* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */ | ||
3107 | if (!vma->vm_ops->fault) | ||
3108 | return VM_FAULT_SIGBUS; | ||
3102 | if (!(flags & FAULT_FLAG_WRITE)) | 3109 | if (!(flags & FAULT_FLAG_WRITE)) |
3103 | return do_read_fault(mm, vma, address, pmd, pgoff, flags, | 3110 | return do_read_fault(mm, vma, address, pmd, pgoff, flags, |
3104 | orig_pte); | 3111 | orig_pte); |
@@ -3244,13 +3251,12 @@ static int handle_pte_fault(struct mm_struct *mm, | |||
3244 | barrier(); | 3251 | barrier(); |
3245 | if (!pte_present(entry)) { | 3252 | if (!pte_present(entry)) { |
3246 | if (pte_none(entry)) { | 3253 | if (pte_none(entry)) { |
3247 | if (vma->vm_ops) { | 3254 | if (vma->vm_ops) |
3248 | if (likely(vma->vm_ops->fault)) | 3255 | return do_fault(mm, vma, address, pte, pmd, |
3249 | return do_fault(mm, vma, address, pte, | 3256 | flags, entry); |
3250 | pmd, flags, entry); | 3257 | |
3251 | } | 3258 | return do_anonymous_page(mm, vma, address, pte, pmd, |
3252 | return do_anonymous_page(mm, vma, address, | 3259 | flags); |
3253 | pte, pmd, flags); | ||
3254 | } | 3260 | } |
3255 | return do_swap_page(mm, vma, address, | 3261 | return do_swap_page(mm, vma, address, |
3256 | pte, pmd, flags, entry); | 3262 | pte, pmd, flags, entry); |