diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-14 14:44:56 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-14 14:44:56 -0400 |
commit | 11ac552477e32835cb6970bf0a70c210807f5673 (patch) | |
tree | 959521ee3e217da81b08209df0f0db760e1efdb8 /mm/memory.c | |
parent | 92fa5bd9a946b6e7aab6764e7312e4e3d9bed295 (diff) |
mm: fix page table unmap for stack guard page properly
We do in fact need to unmap the page table _before_ doing the whole
stack guard page logic, because if it is needed (mainly 32-bit x86 with
PAE and CONFIG_HIGHPTE, but other architectures may use it too) then it
will do a kmap_atomic/kunmap_atomic.
And those kmaps will create an atomic region that we cannot do
allocations in. However, the whole stack expand code will need to do
anon_vma_prepare() and vma_lock_anon_vma() and they cannot do that in an
atomic region.
Now, a better model might actually be to do the anon_vma_prepare() when
_creating_ a VM_GROWSDOWN segment, and not have to worry about any of
this at page fault time. But in the meantime, this is the
straightforward fix for the issue.
See https://bugzilla.kernel.org/show_bug.cgi?id=16588 for details.
Reported-by: Wylda <wylda@volny.cz>
Reported-by: Sedat Dilek <sedat.dilek@gmail.com>
Reported-by: Mike Pagano <mpagano@gentoo.org>
Reported-by: François Valenduc <francois.valenduc@tvcablenet.be>
Tested-by: Ed Tomlinson <edt@aei.ca>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Greg KH <gregkh@suse.de>
Cc: stable@kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 13 |
1 files changed, 6 insertions, 7 deletions
diff --git a/mm/memory.c b/mm/memory.c index 9b3b73f4ae9c..b6e5fd23cc5a 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -2792,24 +2792,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2792 | spinlock_t *ptl; | 2792 | spinlock_t *ptl; |
2793 | pte_t entry; | 2793 | pte_t entry; |
2794 | 2794 | ||
2795 | if (check_stack_guard_page(vma, address) < 0) { | 2795 | pte_unmap(page_table); |
2796 | pte_unmap(page_table); | 2796 | |
2797 | /* Check if we need to add a guard page to the stack */ | ||
2798 | if (check_stack_guard_page(vma, address) < 0) | ||
2797 | return VM_FAULT_SIGBUS; | 2799 | return VM_FAULT_SIGBUS; |
2798 | } | ||
2799 | 2800 | ||
2801 | /* Use the zero-page for reads */ | ||
2800 | if (!(flags & FAULT_FLAG_WRITE)) { | 2802 | if (!(flags & FAULT_FLAG_WRITE)) { |
2801 | entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), | 2803 | entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), |
2802 | vma->vm_page_prot)); | 2804 | vma->vm_page_prot)); |
2803 | ptl = pte_lockptr(mm, pmd); | 2805 | page_table = pte_offset_map_lock(mm, pmd, address, &ptl); |
2804 | spin_lock(ptl); | ||
2805 | if (!pte_none(*page_table)) | 2806 | if (!pte_none(*page_table)) |
2806 | goto unlock; | 2807 | goto unlock; |
2807 | goto setpte; | 2808 | goto setpte; |
2808 | } | 2809 | } |
2809 | 2810 | ||
2810 | /* Allocate our own private page. */ | 2811 | /* Allocate our own private page. */ |
2811 | pte_unmap(page_table); | ||
2812 | |||
2813 | if (unlikely(anon_vma_prepare(vma))) | 2812 | if (unlikely(anon_vma_prepare(vma))) |
2814 | goto oom; | 2813 | goto oom; |
2815 | page = alloc_zeroed_user_highpage_movable(vma, address); | 2814 | page = alloc_zeroed_user_highpage_movable(vma, address); |