aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/memory.c23
1 files changed, 23 insertions, 0 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 858829d06a92..9606ceb3c165 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2760,6 +2760,26 @@ out_release:
2760} 2760}
2761 2761
2762/* 2762/*
2763 * This is like a special single-page "expand_downwards()",
2764 * except we must first make sure that 'address-PAGE_SIZE'
2765 * doesn't hit another vma.
2766 *
2767 * The "find_vma()" will do the right thing even if we wrap
2768 */
2769static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
2770{
2771 address &= PAGE_MASK;
2772 if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
2773 address -= PAGE_SIZE;
2774 if (find_vma(vma->vm_mm, address) != vma)
2775 return -ENOMEM;
2776
2777 expand_stack(vma, address);
2778 }
2779 return 0;
2780}
2781
2782/*
2763 * We enter with non-exclusive mmap_sem (to exclude vma changes, 2783 * We enter with non-exclusive mmap_sem (to exclude vma changes,
2764 * but allow concurrent faults), and pte mapped but not yet locked. 2784 * but allow concurrent faults), and pte mapped but not yet locked.
2765 * We return with mmap_sem still held, but pte unmapped and unlocked. 2785 * We return with mmap_sem still held, but pte unmapped and unlocked.
@@ -2772,6 +2792,9 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2772 spinlock_t *ptl; 2792 spinlock_t *ptl;
2773 pte_t entry; 2793 pte_t entry;
2774 2794
2795 if (check_stack_guard_page(vma, address) < 0)
2796 return VM_FAULT_SIGBUS;
2797
2775 if (!(flags & FAULT_FLAG_WRITE)) { 2798 if (!(flags & FAULT_FLAG_WRITE)) {
2776 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), 2799 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
2777 vma->vm_page_prot)); 2800 vma->vm_page_prot));