diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-12 20:54:33 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-12 20:54:33 -0400 |
commit | 320b2b8de12698082609ebbc1a17165727f4c893 (patch) | |
tree | bb62fe1ba3bb8bf68ff1fd44e613ece9c9581c36 /mm/memory.c | |
parent | 2069601b3f0ea38170d4b509b89f3ca0a373bdc1 (diff) |
mm: keep a guard page below a grow-down stack segment
This is a rather minimally invasive patch to solve the problem of the
user stack growing into a memory mapped area below it. Whenever we fill
the first page of the stack segment, expand the segment down by one
page.
Now, admittedly some odd application might _want_ the stack to grow down
into the preceding memory mapping, and so we may at some point need to
make this a process tunable (some people might also want to have more
than a single page of guarding), but let's try the minimal approach
first.
Tested with trivial application that maps a single page just below the
stack, and then starts recursing. Without this, we will get a SIGSEGV
_after_ the stack has smashed the mapping. With this patch, we'll get a
nice SIGBUS just as the stack touches the page just above the mapping.
Requested-by: Keith Packard <keithp@keithp.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 23 |
1 files changed, 23 insertions, 0 deletions
diff --git a/mm/memory.c b/mm/memory.c index 858829d06a92..9606ceb3c165 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -2760,6 +2760,26 @@ out_release: | |||
2760 | } | 2760 | } |
2761 | 2761 | ||
2762 | /* | 2762 | /* |
2763 | * This is like a special single-page "expand_downwards()", | ||
2764 | * except we must first make sure that 'address-PAGE_SIZE' | ||
2765 | * doesn't hit another vma. | ||
2766 | * | ||
2767 | * The "find_vma()" will do the right thing even if we wrap | ||
2768 | */ | ||
2769 | static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) | ||
2770 | { | ||
2771 | address &= PAGE_MASK; | ||
2772 | if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { | ||
2773 | address -= PAGE_SIZE; | ||
2774 | if (find_vma(vma->vm_mm, address) != vma) | ||
2775 | return -ENOMEM; | ||
2776 | |||
2777 | expand_stack(vma, address); | ||
2778 | } | ||
2779 | return 0; | ||
2780 | } | ||
2781 | |||
2782 | /* | ||
2763 | * We enter with non-exclusive mmap_sem (to exclude vma changes, | 2783 | * We enter with non-exclusive mmap_sem (to exclude vma changes, |
2764 | * but allow concurrent faults), and pte mapped but not yet locked. | 2784 | * but allow concurrent faults), and pte mapped but not yet locked. |
2765 | * We return with mmap_sem still held, but pte unmapped and unlocked. | 2785 | * We return with mmap_sem still held, but pte unmapped and unlocked. |
@@ -2772,6 +2792,9 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2772 | spinlock_t *ptl; | 2792 | spinlock_t *ptl; |
2773 | pte_t entry; | 2793 | pte_t entry; |
2774 | 2794 | ||
2795 | if (check_stack_guard_page(vma, address) < 0) | ||
2796 | return VM_FAULT_SIGBUS; | ||
2797 | |||
2775 | if (!(flags & FAULT_FLAG_WRITE)) { | 2798 | if (!(flags & FAULT_FLAG_WRITE)) { |
2776 | entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), | 2799 | entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), |
2777 | vma->vm_page_prot)); | 2800 | vma->vm_page_prot)); |