aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2017-06-19 07:03:24 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-06-19 09:50:20 -0400
commit1be7107fbe18eed3e319a6c3e83c78254b693acb (patch)
tree6e5f778ac6673bfbd586d271c291807063c7a4bb
parent1132d5e7b64445b3fa3cb982e6723e33318f7655 (diff)
mm: larger stack guard gap, between vmas
Stack guard page is a useful feature to reduce a risk of stack smashing into a different mapping. We have been using a single page gap which is sufficient to prevent having stack adjacent to a different mapping. But this seems to be insufficient in the light of the stack usage in userspace. E.g. glibc uses as large as 64kB alloca() in many commonly used functions. Others use constructs liks gid_t buffer[NGROUPS_MAX] which is 256kB or stack strings with MAX_ARG_STRLEN. This will become especially dangerous for suid binaries and the default no limit for the stack size limit because those applications can be tricked to consume a large portion of the stack and a single glibc call could jump over the guard page. These attacks are not theoretical, unfortunatelly. Make those attacks less probable by increasing the stack guard gap to 1MB (on systems with 4k pages; but make it depend on the page size because systems with larger base pages might cap stack allocations in the PAGE_SIZE units) which should cover larger alloca() and VLA stack allocations. It is obviously not a full fix because the problem is somehow inherent, but it should reduce attack space a lot. One could argue that the gap size should be configurable from userspace, but that can be done later when somebody finds that the new 1MB is wrong for some special case applications. For now, add a kernel command line option (stack_guard_gap) to specify the stack gap size (in page units). Implementation wise, first delete all the old code for stack guard page: because although we could get away with accounting one extra page in a stack vma, accounting a larger gap can break userspace - case in point, a program run with "ulimit -S -v 20000" failed when the 1MB gap was counted for RLIMIT_AS; similar problems could come with RLIMIT_MLOCK and strict non-overcommit mode. Instead of keeping gap inside the stack vma, maintain the stack guard gap as a gap between vmas: using vm_start_gap() in place of vm_start (or vm_end_gap() in place of vm_end if VM_GROWSUP) in just those few places which need to respect the gap - mainly arch_get_unmapped_area(), and and the vma tree's subtree_gap support for that. Original-patch-by: Oleg Nesterov <oleg@redhat.com> Original-patch-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Hugh Dickins <hughd@google.com> Acked-by: Michal Hocko <mhocko@suse.com> Tested-by: Helge Deller <deller@gmx.de> # parisc Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt7
-rw-r--r--arch/arc/mm/mmap.c2
-rw-r--r--arch/arm/mm/mmap.c4
-rw-r--r--arch/frv/mm/elf-fdpic.c2
-rw-r--r--arch/mips/mm/mmap.c2
-rw-r--r--arch/parisc/kernel/sys_parisc.c15
-rw-r--r--arch/powerpc/mm/hugetlbpage-radix.c2
-rw-r--r--arch/powerpc/mm/mmap.c4
-rw-r--r--arch/powerpc/mm/slice.c2
-rw-r--r--arch/s390/mm/mmap.c4
-rw-r--r--arch/sh/mm/mmap.c4
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c4
-rw-r--r--arch/sparc/mm/hugetlbpage.c2
-rw-r--r--arch/tile/mm/hugetlbpage.c2
-rw-r--r--arch/x86/kernel/sys_x86_64.c4
-rw-r--r--arch/x86/mm/hugetlbpage.c2
-rw-r--r--arch/xtensa/kernel/syscall.c2
-rw-r--r--fs/hugetlbfs/inode.c2
-rw-r--r--fs/proc/task_mmu.c4
-rw-r--r--include/linux/mm.h53
-rw-r--r--mm/gup.c5
-rw-r--r--mm/memory.c38
-rw-r--r--mm/mmap.c149
23 files changed, 152 insertions, 163 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 0f5c3b4347c6..7737ab5d04b2 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -3811,6 +3811,13 @@
3811 expediting. Set to zero to disable automatic 3811 expediting. Set to zero to disable automatic
3812 expediting. 3812 expediting.
3813 3813
3814 stack_guard_gap= [MM]
3815 override the default stack gap protection. The value
3816 is in page units and it defines how many pages prior
3817 to (for stacks growing down) resp. after (for stacks
3818 growing up) the main stack are reserved for no other
3819 mapping. Default value is 256 pages.
3820
3814 stacktrace [FTRACE] 3821 stacktrace [FTRACE]
3815 Enabled the stack tracer on boot up. 3822 Enabled the stack tracer on boot up.
3816 3823
diff --git a/arch/arc/mm/mmap.c b/arch/arc/mm/mmap.c
index 3e25e8d6486b..2e13683dfb24 100644
--- a/arch/arc/mm/mmap.c
+++ b/arch/arc/mm/mmap.c
@@ -65,7 +65,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
65 65
66 vma = find_vma(mm, addr); 66 vma = find_vma(mm, addr);
67 if (TASK_SIZE - len >= addr && 67 if (TASK_SIZE - len >= addr &&
68 (!vma || addr + len <= vma->vm_start)) 68 (!vma || addr + len <= vm_start_gap(vma)))
69 return addr; 69 return addr;
70 } 70 }
71 71
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 2239fde10b80..f0701d8d24df 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -90,7 +90,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
90 90
91 vma = find_vma(mm, addr); 91 vma = find_vma(mm, addr);
92 if (TASK_SIZE - len >= addr && 92 if (TASK_SIZE - len >= addr &&
93 (!vma || addr + len <= vma->vm_start)) 93 (!vma || addr + len <= vm_start_gap(vma)))
94 return addr; 94 return addr;
95 } 95 }
96 96
@@ -141,7 +141,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
141 addr = PAGE_ALIGN(addr); 141 addr = PAGE_ALIGN(addr);
142 vma = find_vma(mm, addr); 142 vma = find_vma(mm, addr);
143 if (TASK_SIZE - len >= addr && 143 if (TASK_SIZE - len >= addr &&
144 (!vma || addr + len <= vma->vm_start)) 144 (!vma || addr + len <= vm_start_gap(vma)))
145 return addr; 145 return addr;
146 } 146 }
147 147
diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
index da82c25301e7..46aa289c5102 100644
--- a/arch/frv/mm/elf-fdpic.c
+++ b/arch/frv/mm/elf-fdpic.c
@@ -75,7 +75,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
75 addr = PAGE_ALIGN(addr); 75 addr = PAGE_ALIGN(addr);
76 vma = find_vma(current->mm, addr); 76 vma = find_vma(current->mm, addr);
77 if (TASK_SIZE - len >= addr && 77 if (TASK_SIZE - len >= addr &&
78 (!vma || addr + len <= vma->vm_start)) 78 (!vma || addr + len <= vm_start_gap(vma)))
79 goto success; 79 goto success;
80 } 80 }
81 81
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
index 64dd8bdd92c3..28adeabe851f 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -93,7 +93,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
93 93
94 vma = find_vma(mm, addr); 94 vma = find_vma(mm, addr);
95 if (TASK_SIZE - len >= addr && 95 if (TASK_SIZE - len >= addr &&
96 (!vma || addr + len <= vma->vm_start)) 96 (!vma || addr + len <= vm_start_gap(vma)))
97 return addr; 97 return addr;
98 } 98 }
99 99
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index e5288638a1d9..378a754ca186 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -90,7 +90,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
90 unsigned long len, unsigned long pgoff, unsigned long flags) 90 unsigned long len, unsigned long pgoff, unsigned long flags)
91{ 91{
92 struct mm_struct *mm = current->mm; 92 struct mm_struct *mm = current->mm;
93 struct vm_area_struct *vma; 93 struct vm_area_struct *vma, *prev;
94 unsigned long task_size = TASK_SIZE; 94 unsigned long task_size = TASK_SIZE;
95 int do_color_align, last_mmap; 95 int do_color_align, last_mmap;
96 struct vm_unmapped_area_info info; 96 struct vm_unmapped_area_info info;
@@ -117,9 +117,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
117 else 117 else
118 addr = PAGE_ALIGN(addr); 118 addr = PAGE_ALIGN(addr);
119 119
120 vma = find_vma(mm, addr); 120 vma = find_vma_prev(mm, addr, &prev);
121 if (task_size - len >= addr && 121 if (task_size - len >= addr &&
122 (!vma || addr + len <= vma->vm_start)) 122 (!vma || addr + len <= vm_start_gap(vma)) &&
123 (!prev || addr >= vm_end_gap(prev)))
123 goto found_addr; 124 goto found_addr;
124 } 125 }
125 126
@@ -143,7 +144,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
143 const unsigned long len, const unsigned long pgoff, 144 const unsigned long len, const unsigned long pgoff,
144 const unsigned long flags) 145 const unsigned long flags)
145{ 146{
146 struct vm_area_struct *vma; 147 struct vm_area_struct *vma, *prev;
147 struct mm_struct *mm = current->mm; 148 struct mm_struct *mm = current->mm;
148 unsigned long addr = addr0; 149 unsigned long addr = addr0;
149 int do_color_align, last_mmap; 150 int do_color_align, last_mmap;
@@ -177,9 +178,11 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
177 addr = COLOR_ALIGN(addr, last_mmap, pgoff); 178 addr = COLOR_ALIGN(addr, last_mmap, pgoff);
178 else 179 else
179 addr = PAGE_ALIGN(addr); 180 addr = PAGE_ALIGN(addr);
180 vma = find_vma(mm, addr); 181
182 vma = find_vma_prev(mm, addr, &prev);
181 if (TASK_SIZE - len >= addr && 183 if (TASK_SIZE - len >= addr &&
182 (!vma || addr + len <= vma->vm_start)) 184 (!vma || addr + len <= vm_start_gap(vma)) &&
185 (!prev || addr >= vm_end_gap(prev)))
183 goto found_addr; 186 goto found_addr;
184 } 187 }
185 188
diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c
index 6575b9aabef4..a12e86395025 100644
--- a/arch/powerpc/mm/hugetlbpage-radix.c
+++ b/arch/powerpc/mm/hugetlbpage-radix.c
@@ -68,7 +68,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
68 addr = ALIGN(addr, huge_page_size(h)); 68 addr = ALIGN(addr, huge_page_size(h));
69 vma = find_vma(mm, addr); 69 vma = find_vma(mm, addr);
70 if (mm->task_size - len >= addr && 70 if (mm->task_size - len >= addr &&
71 (!vma || addr + len <= vma->vm_start)) 71 (!vma || addr + len <= vm_start_gap(vma)))
72 return addr; 72 return addr;
73 } 73 }
74 /* 74 /*
diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
index 9dbd2a733d6b..0ee6be4f1ba4 100644
--- a/arch/powerpc/mm/mmap.c
+++ b/arch/powerpc/mm/mmap.c
@@ -112,7 +112,7 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
112 addr = PAGE_ALIGN(addr); 112 addr = PAGE_ALIGN(addr);
113 vma = find_vma(mm, addr); 113 vma = find_vma(mm, addr);
114 if (mm->task_size - len >= addr && addr >= mmap_min_addr && 114 if (mm->task_size - len >= addr && addr >= mmap_min_addr &&
115 (!vma || addr + len <= vma->vm_start)) 115 (!vma || addr + len <= vm_start_gap(vma)))
116 return addr; 116 return addr;
117 } 117 }
118 118
@@ -157,7 +157,7 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
157 addr = PAGE_ALIGN(addr); 157 addr = PAGE_ALIGN(addr);
158 vma = find_vma(mm, addr); 158 vma = find_vma(mm, addr);
159 if (mm->task_size - len >= addr && addr >= mmap_min_addr && 159 if (mm->task_size - len >= addr && addr >= mmap_min_addr &&
160 (!vma || addr + len <= vma->vm_start)) 160 (!vma || addr + len <= vm_start_gap(vma)))
161 return addr; 161 return addr;
162 } 162 }
163 163
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 966b9fccfa66..45f6740dd407 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -99,7 +99,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
99 if ((mm->task_size - len) < addr) 99 if ((mm->task_size - len) < addr)
100 return 0; 100 return 0;
101 vma = find_vma(mm, addr); 101 vma = find_vma(mm, addr);
102 return (!vma || (addr + len) <= vma->vm_start); 102 return (!vma || (addr + len) <= vm_start_gap(vma));
103} 103}
104 104
105static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) 105static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index b017daed6887..b854b1da281a 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -101,7 +101,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
101 addr = PAGE_ALIGN(addr); 101 addr = PAGE_ALIGN(addr);
102 vma = find_vma(mm, addr); 102 vma = find_vma(mm, addr);
103 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && 103 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
104 (!vma || addr + len <= vma->vm_start)) 104 (!vma || addr + len <= vm_start_gap(vma)))
105 goto check_asce_limit; 105 goto check_asce_limit;
106 } 106 }
107 107
@@ -151,7 +151,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
151 addr = PAGE_ALIGN(addr); 151 addr = PAGE_ALIGN(addr);
152 vma = find_vma(mm, addr); 152 vma = find_vma(mm, addr);
153 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && 153 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
154 (!vma || addr + len <= vma->vm_start)) 154 (!vma || addr + len <= vm_start_gap(vma)))
155 goto check_asce_limit; 155 goto check_asce_limit;
156 } 156 }
157 157
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
index 08e7af0be4a7..6a1a1297baae 100644
--- a/arch/sh/mm/mmap.c
+++ b/arch/sh/mm/mmap.c
@@ -64,7 +64,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
64 64
65 vma = find_vma(mm, addr); 65 vma = find_vma(mm, addr);
66 if (TASK_SIZE - len >= addr && 66 if (TASK_SIZE - len >= addr &&
67 (!vma || addr + len <= vma->vm_start)) 67 (!vma || addr + len <= vm_start_gap(vma)))
68 return addr; 68 return addr;
69 } 69 }
70 70
@@ -114,7 +114,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
114 114
115 vma = find_vma(mm, addr); 115 vma = find_vma(mm, addr);
116 if (TASK_SIZE - len >= addr && 116 if (TASK_SIZE - len >= addr &&
117 (!vma || addr + len <= vma->vm_start)) 117 (!vma || addr + len <= vm_start_gap(vma)))
118 return addr; 118 return addr;
119 } 119 }
120 120
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index ef4520efc813..043544d0cda3 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -120,7 +120,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
120 120
121 vma = find_vma(mm, addr); 121 vma = find_vma(mm, addr);
122 if (task_size - len >= addr && 122 if (task_size - len >= addr &&
123 (!vma || addr + len <= vma->vm_start)) 123 (!vma || addr + len <= vm_start_gap(vma)))
124 return addr; 124 return addr;
125 } 125 }
126 126
@@ -183,7 +183,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
183 183
184 vma = find_vma(mm, addr); 184 vma = find_vma(mm, addr);
185 if (task_size - len >= addr && 185 if (task_size - len >= addr &&
186 (!vma || addr + len <= vma->vm_start)) 186 (!vma || addr + len <= vm_start_gap(vma)))
187 return addr; 187 return addr;
188 } 188 }
189 189
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index 7c29d38e6b99..88855e383b34 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -120,7 +120,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
120 addr = ALIGN(addr, huge_page_size(h)); 120 addr = ALIGN(addr, huge_page_size(h));
121 vma = find_vma(mm, addr); 121 vma = find_vma(mm, addr);
122 if (task_size - len >= addr && 122 if (task_size - len >= addr &&
123 (!vma || addr + len <= vma->vm_start)) 123 (!vma || addr + len <= vm_start_gap(vma)))
124 return addr; 124 return addr;
125 } 125 }
126 if (mm->get_unmapped_area == arch_get_unmapped_area) 126 if (mm->get_unmapped_area == arch_get_unmapped_area)
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
index cb10153b5c9f..03e5cc4e76e4 100644
--- a/arch/tile/mm/hugetlbpage.c
+++ b/arch/tile/mm/hugetlbpage.c
@@ -233,7 +233,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
233 addr = ALIGN(addr, huge_page_size(h)); 233 addr = ALIGN(addr, huge_page_size(h));
234 vma = find_vma(mm, addr); 234 vma = find_vma(mm, addr);
235 if (TASK_SIZE - len >= addr && 235 if (TASK_SIZE - len >= addr &&
236 (!vma || addr + len <= vma->vm_start)) 236 (!vma || addr + len <= vm_start_gap(vma)))
237 return addr; 237 return addr;
238 } 238 }
239 if (current->mm->get_unmapped_area == arch_get_unmapped_area) 239 if (current->mm->get_unmapped_area == arch_get_unmapped_area)
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 207b8f2582c7..213ddf3e937d 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -144,7 +144,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
144 addr = PAGE_ALIGN(addr); 144 addr = PAGE_ALIGN(addr);
145 vma = find_vma(mm, addr); 145 vma = find_vma(mm, addr);
146 if (end - len >= addr && 146 if (end - len >= addr &&
147 (!vma || addr + len <= vma->vm_start)) 147 (!vma || addr + len <= vm_start_gap(vma)))
148 return addr; 148 return addr;
149 } 149 }
150 150
@@ -187,7 +187,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
187 addr = PAGE_ALIGN(addr); 187 addr = PAGE_ALIGN(addr);
188 vma = find_vma(mm, addr); 188 vma = find_vma(mm, addr);
189 if (TASK_SIZE - len >= addr && 189 if (TASK_SIZE - len >= addr &&
190 (!vma || addr + len <= vma->vm_start)) 190 (!vma || addr + len <= vm_start_gap(vma)))
191 return addr; 191 return addr;
192 } 192 }
193 193
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index 302f43fd9c28..adad702b39cd 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -148,7 +148,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
148 addr = ALIGN(addr, huge_page_size(h)); 148 addr = ALIGN(addr, huge_page_size(h));
149 vma = find_vma(mm, addr); 149 vma = find_vma(mm, addr);
150 if (TASK_SIZE - len >= addr && 150 if (TASK_SIZE - len >= addr &&
151 (!vma || addr + len <= vma->vm_start)) 151 (!vma || addr + len <= vm_start_gap(vma)))
152 return addr; 152 return addr;
153 } 153 }
154 if (mm->get_unmapped_area == arch_get_unmapped_area) 154 if (mm->get_unmapped_area == arch_get_unmapped_area)
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
index 06937928cb72..74afbf02d07e 100644
--- a/arch/xtensa/kernel/syscall.c
+++ b/arch/xtensa/kernel/syscall.c
@@ -88,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
88 /* At this point: (!vmm || addr < vmm->vm_end). */ 88 /* At this point: (!vmm || addr < vmm->vm_end). */
89 if (TASK_SIZE - len < addr) 89 if (TASK_SIZE - len < addr)
90 return -ENOMEM; 90 return -ENOMEM;
91 if (!vmm || addr + len <= vmm->vm_start) 91 if (!vmm || addr + len <= vm_start_gap(vmm))
92 return addr; 92 return addr;
93 addr = vmm->vm_end; 93 addr = vmm->vm_end;
94 if (flags & MAP_SHARED) 94 if (flags & MAP_SHARED)
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index dde861387a40..d44f5456eb9b 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -200,7 +200,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
200 addr = ALIGN(addr, huge_page_size(h)); 200 addr = ALIGN(addr, huge_page_size(h));
201 vma = find_vma(mm, addr); 201 vma = find_vma(mm, addr);
202 if (TASK_SIZE - len >= addr && 202 if (TASK_SIZE - len >= addr &&
203 (!vma || addr + len <= vma->vm_start)) 203 (!vma || addr + len <= vm_start_gap(vma)))
204 return addr; 204 return addr;
205 } 205 }
206 206
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index f0c8b33d99b1..520802da059c 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -300,11 +300,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
300 300
301 /* We don't show the stack guard page in /proc/maps */ 301 /* We don't show the stack guard page in /proc/maps */
302 start = vma->vm_start; 302 start = vma->vm_start;
303 if (stack_guard_page_start(vma, start))
304 start += PAGE_SIZE;
305 end = vma->vm_end; 303 end = vma->vm_end;
306 if (stack_guard_page_end(vma, end))
307 end -= PAGE_SIZE;
308 304
309 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1); 305 seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
310 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ", 306 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
diff --git a/include/linux/mm.h b/include/linux/mm.h
index b892e95d4929..6f543a47fc92 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1393,12 +1393,6 @@ int clear_page_dirty_for_io(struct page *page);
1393 1393
1394int get_cmdline(struct task_struct *task, char *buffer, int buflen); 1394int get_cmdline(struct task_struct *task, char *buffer, int buflen);
1395 1395
1396/* Is the vma a continuation of the stack vma above it? */
1397static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
1398{
1399 return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
1400}
1401
1402static inline bool vma_is_anonymous(struct vm_area_struct *vma) 1396static inline bool vma_is_anonymous(struct vm_area_struct *vma)
1403{ 1397{
1404 return !vma->vm_ops; 1398 return !vma->vm_ops;
@@ -1414,28 +1408,6 @@ bool vma_is_shmem(struct vm_area_struct *vma);
1414static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; } 1408static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
1415#endif 1409#endif
1416 1410
1417static inline int stack_guard_page_start(struct vm_area_struct *vma,
1418 unsigned long addr)
1419{
1420 return (vma->vm_flags & VM_GROWSDOWN) &&
1421 (vma->vm_start == addr) &&
1422 !vma_growsdown(vma->vm_prev, addr);
1423}
1424
1425/* Is the vma a continuation of the stack vma below it? */
1426static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
1427{
1428 return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
1429}
1430
1431static inline int stack_guard_page_end(struct vm_area_struct *vma,
1432 unsigned long addr)
1433{
1434 return (vma->vm_flags & VM_GROWSUP) &&
1435 (vma->vm_end == addr) &&
1436 !vma_growsup(vma->vm_next, addr);
1437}
1438
1439int vma_is_stack_for_current(struct vm_area_struct *vma); 1411int vma_is_stack_for_current(struct vm_area_struct *vma);
1440 1412
1441extern unsigned long move_page_tables(struct vm_area_struct *vma, 1413extern unsigned long move_page_tables(struct vm_area_struct *vma,
@@ -2222,6 +2194,7 @@ void page_cache_async_readahead(struct address_space *mapping,
2222 pgoff_t offset, 2194 pgoff_t offset,
2223 unsigned long size); 2195 unsigned long size);
2224 2196
2197extern unsigned long stack_guard_gap;
2225/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ 2198/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
2226extern int expand_stack(struct vm_area_struct *vma, unsigned long address); 2199extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
2227 2200
@@ -2250,6 +2223,30 @@ static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * m
2250 return vma; 2223 return vma;
2251} 2224}
2252 2225
2226static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
2227{
2228 unsigned long vm_start = vma->vm_start;
2229
2230 if (vma->vm_flags & VM_GROWSDOWN) {
2231 vm_start -= stack_guard_gap;
2232 if (vm_start > vma->vm_start)
2233 vm_start = 0;
2234 }
2235 return vm_start;
2236}
2237
2238static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
2239{
2240 unsigned long vm_end = vma->vm_end;
2241
2242 if (vma->vm_flags & VM_GROWSUP) {
2243 vm_end += stack_guard_gap;
2244 if (vm_end < vma->vm_end)
2245 vm_end = -PAGE_SIZE;
2246 }
2247 return vm_end;
2248}
2249
2253static inline unsigned long vma_pages(struct vm_area_struct *vma) 2250static inline unsigned long vma_pages(struct vm_area_struct *vma)
2254{ 2251{
2255 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 2252 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
diff --git a/mm/gup.c b/mm/gup.c
index b3c7214d710d..576c4df58882 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -387,11 +387,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
387 /* mlock all present pages, but do not fault in new pages */ 387 /* mlock all present pages, but do not fault in new pages */
388 if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK) 388 if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
389 return -ENOENT; 389 return -ENOENT;
390 /* For mm_populate(), just skip the stack guard page. */
391 if ((*flags & FOLL_POPULATE) &&
392 (stack_guard_page_start(vma, address) ||
393 stack_guard_page_end(vma, address + PAGE_SIZE)))
394 return -ENOENT;
395 if (*flags & FOLL_WRITE) 390 if (*flags & FOLL_WRITE)
396 fault_flags |= FAULT_FLAG_WRITE; 391 fault_flags |= FAULT_FLAG_WRITE;
397 if (*flags & FOLL_REMOTE) 392 if (*flags & FOLL_REMOTE)
diff --git a/mm/memory.c b/mm/memory.c
index 2e65df1831d9..bb11c474857e 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2855,40 +2855,6 @@ out_release:
2855} 2855}
2856 2856
2857/* 2857/*
2858 * This is like a special single-page "expand_{down|up}wards()",
2859 * except we must first make sure that 'address{-|+}PAGE_SIZE'
2860 * doesn't hit another vma.
2861 */
2862static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
2863{
2864 address &= PAGE_MASK;
2865 if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
2866 struct vm_area_struct *prev = vma->vm_prev;
2867
2868 /*
2869 * Is there a mapping abutting this one below?
2870 *
2871 * That's only ok if it's the same stack mapping
2872 * that has gotten split..
2873 */
2874 if (prev && prev->vm_end == address)
2875 return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
2876
2877 return expand_downwards(vma, address - PAGE_SIZE);
2878 }
2879 if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
2880 struct vm_area_struct *next = vma->vm_next;
2881
2882 /* As VM_GROWSDOWN but s/below/above/ */
2883 if (next && next->vm_start == address + PAGE_SIZE)
2884 return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
2885
2886 return expand_upwards(vma, address + PAGE_SIZE);
2887 }
2888 return 0;
2889}
2890
2891/*
2892 * We enter with non-exclusive mmap_sem (to exclude vma changes, 2858 * We enter with non-exclusive mmap_sem (to exclude vma changes,
2893 * but allow concurrent faults), and pte mapped but not yet locked. 2859 * but allow concurrent faults), and pte mapped but not yet locked.
2894 * We return with mmap_sem still held, but pte unmapped and unlocked. 2860 * We return with mmap_sem still held, but pte unmapped and unlocked.
@@ -2904,10 +2870,6 @@ static int do_anonymous_page(struct vm_fault *vmf)
2904 if (vma->vm_flags & VM_SHARED) 2870 if (vma->vm_flags & VM_SHARED)
2905 return VM_FAULT_SIGBUS; 2871 return VM_FAULT_SIGBUS;
2906 2872
2907 /* Check if we need to add a guard page to the stack */
2908 if (check_stack_guard_page(vma, vmf->address) < 0)
2909 return VM_FAULT_SIGSEGV;
2910
2911 /* 2873 /*
2912 * Use pte_alloc() instead of pte_alloc_map(). We can't run 2874 * Use pte_alloc() instead of pte_alloc_map(). We can't run
2913 * pte_offset_map() on pmds where a huge pmd might be created 2875 * pte_offset_map() on pmds where a huge pmd might be created
diff --git a/mm/mmap.c b/mm/mmap.c
index f82741e199c0..8e07976d5e47 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -183,6 +183,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
183 unsigned long retval; 183 unsigned long retval;
184 unsigned long newbrk, oldbrk; 184 unsigned long newbrk, oldbrk;
185 struct mm_struct *mm = current->mm; 185 struct mm_struct *mm = current->mm;
186 struct vm_area_struct *next;
186 unsigned long min_brk; 187 unsigned long min_brk;
187 bool populate; 188 bool populate;
188 LIST_HEAD(uf); 189 LIST_HEAD(uf);
@@ -229,7 +230,8 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
229 } 230 }
230 231
231 /* Check against existing mmap mappings. */ 232 /* Check against existing mmap mappings. */
232 if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE)) 233 next = find_vma(mm, oldbrk);
234 if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
233 goto out; 235 goto out;
234 236
235 /* Ok, looks good - let it rip. */ 237 /* Ok, looks good - let it rip. */
@@ -253,10 +255,22 @@ out:
253 255
254static long vma_compute_subtree_gap(struct vm_area_struct *vma) 256static long vma_compute_subtree_gap(struct vm_area_struct *vma)
255{ 257{
256 unsigned long max, subtree_gap; 258 unsigned long max, prev_end, subtree_gap;
257 max = vma->vm_start; 259
258 if (vma->vm_prev) 260 /*
259 max -= vma->vm_prev->vm_end; 261 * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we
262 * allow two stack_guard_gaps between them here, and when choosing
263 * an unmapped area; whereas when expanding we only require one.
264 * That's a little inconsistent, but keeps the code here simpler.
265 */
266 max = vm_start_gap(vma);
267 if (vma->vm_prev) {
268 prev_end = vm_end_gap(vma->vm_prev);
269 if (max > prev_end)
270 max -= prev_end;
271 else
272 max = 0;
273 }
260 if (vma->vm_rb.rb_left) { 274 if (vma->vm_rb.rb_left) {
261 subtree_gap = rb_entry(vma->vm_rb.rb_left, 275 subtree_gap = rb_entry(vma->vm_rb.rb_left,
262 struct vm_area_struct, vm_rb)->rb_subtree_gap; 276 struct vm_area_struct, vm_rb)->rb_subtree_gap;
@@ -352,7 +366,7 @@ static void validate_mm(struct mm_struct *mm)
352 anon_vma_unlock_read(anon_vma); 366 anon_vma_unlock_read(anon_vma);
353 } 367 }
354 368
355 highest_address = vma->vm_end; 369 highest_address = vm_end_gap(vma);
356 vma = vma->vm_next; 370 vma = vma->vm_next;
357 i++; 371 i++;
358 } 372 }
@@ -541,7 +555,7 @@ void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
541 if (vma->vm_next) 555 if (vma->vm_next)
542 vma_gap_update(vma->vm_next); 556 vma_gap_update(vma->vm_next);
543 else 557 else
544 mm->highest_vm_end = vma->vm_end; 558 mm->highest_vm_end = vm_end_gap(vma);
545 559
546 /* 560 /*
547 * vma->vm_prev wasn't known when we followed the rbtree to find the 561 * vma->vm_prev wasn't known when we followed the rbtree to find the
@@ -856,7 +870,7 @@ again:
856 vma_gap_update(vma); 870 vma_gap_update(vma);
857 if (end_changed) { 871 if (end_changed) {
858 if (!next) 872 if (!next)
859 mm->highest_vm_end = end; 873 mm->highest_vm_end = vm_end_gap(vma);
860 else if (!adjust_next) 874 else if (!adjust_next)
861 vma_gap_update(next); 875 vma_gap_update(next);
862 } 876 }
@@ -941,7 +955,7 @@ again:
941 * mm->highest_vm_end doesn't need any update 955 * mm->highest_vm_end doesn't need any update
942 * in remove_next == 1 case. 956 * in remove_next == 1 case.
943 */ 957 */
944 VM_WARN_ON(mm->highest_vm_end != end); 958 VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma));
945 } 959 }
946 } 960 }
947 if (insert && file) 961 if (insert && file)
@@ -1787,7 +1801,7 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
1787 1801
1788 while (true) { 1802 while (true) {
1789 /* Visit left subtree if it looks promising */ 1803 /* Visit left subtree if it looks promising */
1790 gap_end = vma->vm_start; 1804 gap_end = vm_start_gap(vma);
1791 if (gap_end >= low_limit && vma->vm_rb.rb_left) { 1805 if (gap_end >= low_limit && vma->vm_rb.rb_left) {
1792 struct vm_area_struct *left = 1806 struct vm_area_struct *left =
1793 rb_entry(vma->vm_rb.rb_left, 1807 rb_entry(vma->vm_rb.rb_left,
@@ -1798,7 +1812,7 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
1798 } 1812 }
1799 } 1813 }
1800 1814
1801 gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; 1815 gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
1802check_current: 1816check_current:
1803 /* Check if current node has a suitable gap */ 1817 /* Check if current node has a suitable gap */
1804 if (gap_start > high_limit) 1818 if (gap_start > high_limit)
@@ -1825,8 +1839,8 @@ check_current:
1825 vma = rb_entry(rb_parent(prev), 1839 vma = rb_entry(rb_parent(prev),
1826 struct vm_area_struct, vm_rb); 1840 struct vm_area_struct, vm_rb);
1827 if (prev == vma->vm_rb.rb_left) { 1841 if (prev == vma->vm_rb.rb_left) {
1828 gap_start = vma->vm_prev->vm_end; 1842 gap_start = vm_end_gap(vma->vm_prev);
1829 gap_end = vma->vm_start; 1843 gap_end = vm_start_gap(vma);
1830 goto check_current; 1844 goto check_current;
1831 } 1845 }
1832 } 1846 }
@@ -1890,7 +1904,7 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
1890 1904
1891 while (true) { 1905 while (true) {
1892 /* Visit right subtree if it looks promising */ 1906 /* Visit right subtree if it looks promising */
1893 gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0; 1907 gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
1894 if (gap_start <= high_limit && vma->vm_rb.rb_right) { 1908 if (gap_start <= high_limit && vma->vm_rb.rb_right) {
1895 struct vm_area_struct *right = 1909 struct vm_area_struct *right =
1896 rb_entry(vma->vm_rb.rb_right, 1910 rb_entry(vma->vm_rb.rb_right,
@@ -1903,7 +1917,7 @@ unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
1903 1917
1904check_current: 1918check_current:
1905 /* Check if current node has a suitable gap */ 1919 /* Check if current node has a suitable gap */
1906 gap_end = vma->vm_start; 1920 gap_end = vm_start_gap(vma);
1907 if (gap_end < low_limit) 1921 if (gap_end < low_limit)
1908 return -ENOMEM; 1922 return -ENOMEM;
1909 if (gap_start <= high_limit && gap_end - gap_start >= length) 1923 if (gap_start <= high_limit && gap_end - gap_start >= length)
@@ -1929,7 +1943,7 @@ check_current:
1929 struct vm_area_struct, vm_rb); 1943 struct vm_area_struct, vm_rb);
1930 if (prev == vma->vm_rb.rb_right) { 1944 if (prev == vma->vm_rb.rb_right) {
1931 gap_start = vma->vm_prev ? 1945 gap_start = vma->vm_prev ?
1932 vma->vm_prev->vm_end : 0; 1946 vm_end_gap(vma->vm_prev) : 0;
1933 goto check_current; 1947 goto check_current;
1934 } 1948 }
1935 } 1949 }
@@ -1967,7 +1981,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1967 unsigned long len, unsigned long pgoff, unsigned long flags) 1981 unsigned long len, unsigned long pgoff, unsigned long flags)
1968{ 1982{
1969 struct mm_struct *mm = current->mm; 1983 struct mm_struct *mm = current->mm;
1970 struct vm_area_struct *vma; 1984 struct vm_area_struct *vma, *prev;
1971 struct vm_unmapped_area_info info; 1985 struct vm_unmapped_area_info info;
1972 1986
1973 if (len > TASK_SIZE - mmap_min_addr) 1987 if (len > TASK_SIZE - mmap_min_addr)
@@ -1978,9 +1992,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1978 1992
1979 if (addr) { 1993 if (addr) {
1980 addr = PAGE_ALIGN(addr); 1994 addr = PAGE_ALIGN(addr);
1981 vma = find_vma(mm, addr); 1995 vma = find_vma_prev(mm, addr, &prev);
1982 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && 1996 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
1983 (!vma || addr + len <= vma->vm_start)) 1997 (!vma || addr + len <= vm_start_gap(vma)) &&
1998 (!prev || addr >= vm_end_gap(prev)))
1984 return addr; 1999 return addr;
1985 } 2000 }
1986 2001
@@ -2003,7 +2018,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
2003 const unsigned long len, const unsigned long pgoff, 2018 const unsigned long len, const unsigned long pgoff,
2004 const unsigned long flags) 2019 const unsigned long flags)
2005{ 2020{
2006 struct vm_area_struct *vma; 2021 struct vm_area_struct *vma, *prev;
2007 struct mm_struct *mm = current->mm; 2022 struct mm_struct *mm = current->mm;
2008 unsigned long addr = addr0; 2023 unsigned long addr = addr0;
2009 struct vm_unmapped_area_info info; 2024 struct vm_unmapped_area_info info;
@@ -2018,9 +2033,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
2018 /* requesting a specific address */ 2033 /* requesting a specific address */
2019 if (addr) { 2034 if (addr) {
2020 addr = PAGE_ALIGN(addr); 2035 addr = PAGE_ALIGN(addr);
2021 vma = find_vma(mm, addr); 2036 vma = find_vma_prev(mm, addr, &prev);
2022 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && 2037 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
2023 (!vma || addr + len <= vma->vm_start)) 2038 (!vma || addr + len <= vm_start_gap(vma)) &&
2039 (!prev || addr >= vm_end_gap(prev)))
2024 return addr; 2040 return addr;
2025 } 2041 }
2026 2042
@@ -2155,21 +2171,19 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
2155 * update accounting. This is shared with both the 2171 * update accounting. This is shared with both the
2156 * grow-up and grow-down cases. 2172 * grow-up and grow-down cases.
2157 */ 2173 */
2158static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow) 2174static int acct_stack_growth(struct vm_area_struct *vma,
2175 unsigned long size, unsigned long grow)
2159{ 2176{
2160 struct mm_struct *mm = vma->vm_mm; 2177 struct mm_struct *mm = vma->vm_mm;
2161 struct rlimit *rlim = current->signal->rlim; 2178 struct rlimit *rlim = current->signal->rlim;
2162 unsigned long new_start, actual_size; 2179 unsigned long new_start;
2163 2180
2164 /* address space limit tests */ 2181 /* address space limit tests */
2165 if (!may_expand_vm(mm, vma->vm_flags, grow)) 2182 if (!may_expand_vm(mm, vma->vm_flags, grow))
2166 return -ENOMEM; 2183 return -ENOMEM;
2167 2184
2168 /* Stack limit test */ 2185 /* Stack limit test */
2169 actual_size = size; 2186 if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
2170 if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
2171 actual_size -= PAGE_SIZE;
2172 if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
2173 return -ENOMEM; 2187 return -ENOMEM;
2174 2188
2175 /* mlock limit tests */ 2189 /* mlock limit tests */
@@ -2207,17 +2221,30 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
2207int expand_upwards(struct vm_area_struct *vma, unsigned long address) 2221int expand_upwards(struct vm_area_struct *vma, unsigned long address)
2208{ 2222{
2209 struct mm_struct *mm = vma->vm_mm; 2223 struct mm_struct *mm = vma->vm_mm;
2224 struct vm_area_struct *next;
2225 unsigned long gap_addr;
2210 int error = 0; 2226 int error = 0;
2211 2227
2212 if (!(vma->vm_flags & VM_GROWSUP)) 2228 if (!(vma->vm_flags & VM_GROWSUP))
2213 return -EFAULT; 2229 return -EFAULT;
2214 2230
2215 /* Guard against wrapping around to address 0. */ 2231 /* Guard against wrapping around to address 0. */
2216 if (address < PAGE_ALIGN(address+4)) 2232 address &= PAGE_MASK;
2217 address = PAGE_ALIGN(address+4); 2233 address += PAGE_SIZE;
2218 else 2234 if (!address)
2219 return -ENOMEM; 2235 return -ENOMEM;
2220 2236
2237 /* Enforce stack_guard_gap */
2238 gap_addr = address + stack_guard_gap;
2239 if (gap_addr < address)
2240 return -ENOMEM;
2241 next = vma->vm_next;
2242 if (next && next->vm_start < gap_addr) {
2243 if (!(next->vm_flags & VM_GROWSUP))
2244 return -ENOMEM;
2245 /* Check that both stack segments have the same anon_vma? */
2246 }
2247
2221 /* We must make sure the anon_vma is allocated. */ 2248 /* We must make sure the anon_vma is allocated. */
2222 if (unlikely(anon_vma_prepare(vma))) 2249 if (unlikely(anon_vma_prepare(vma)))
2223 return -ENOMEM; 2250 return -ENOMEM;
@@ -2261,7 +2288,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
2261 if (vma->vm_next) 2288 if (vma->vm_next)
2262 vma_gap_update(vma->vm_next); 2289 vma_gap_update(vma->vm_next);
2263 else 2290 else
2264 mm->highest_vm_end = address; 2291 mm->highest_vm_end = vm_end_gap(vma);
2265 spin_unlock(&mm->page_table_lock); 2292 spin_unlock(&mm->page_table_lock);
2266 2293
2267 perf_event_mmap(vma); 2294 perf_event_mmap(vma);
@@ -2282,6 +2309,8 @@ int expand_downwards(struct vm_area_struct *vma,
2282 unsigned long address) 2309 unsigned long address)
2283{ 2310{
2284 struct mm_struct *mm = vma->vm_mm; 2311 struct mm_struct *mm = vma->vm_mm;
2312 struct vm_area_struct *prev;
2313 unsigned long gap_addr;
2285 int error; 2314 int error;
2286 2315
2287 address &= PAGE_MASK; 2316 address &= PAGE_MASK;
@@ -2289,6 +2318,17 @@ int expand_downwards(struct vm_area_struct *vma,
2289 if (error) 2318 if (error)
2290 return error; 2319 return error;
2291 2320
2321 /* Enforce stack_guard_gap */
2322 gap_addr = address - stack_guard_gap;
2323 if (gap_addr > address)
2324 return -ENOMEM;
2325 prev = vma->vm_prev;
2326 if (prev && prev->vm_end > gap_addr) {
2327 if (!(prev->vm_flags & VM_GROWSDOWN))
2328 return -ENOMEM;
2329 /* Check that both stack segments have the same anon_vma? */
2330 }
2331
2292 /* We must make sure the anon_vma is allocated. */ 2332 /* We must make sure the anon_vma is allocated. */
2293 if (unlikely(anon_vma_prepare(vma))) 2333 if (unlikely(anon_vma_prepare(vma)))
2294 return -ENOMEM; 2334 return -ENOMEM;
@@ -2343,28 +2383,25 @@ int expand_downwards(struct vm_area_struct *vma,
2343 return error; 2383 return error;
2344} 2384}
2345 2385
2346/* 2386/* enforced gap between the expanding stack and other mappings. */
2347 * Note how expand_stack() refuses to expand the stack all the way to 2387unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
2348 * abut the next virtual mapping, *unless* that mapping itself is also 2388
2349 * a stack mapping. We want to leave room for a guard page, after all 2389static int __init cmdline_parse_stack_guard_gap(char *p)
2350 * (the guard page itself is not added here, that is done by the 2390{
2351 * actual page faulting logic) 2391 unsigned long val;
2352 * 2392 char *endptr;
2353 * This matches the behavior of the guard page logic (see mm/memory.c: 2393
2354 * check_stack_guard_page()), which only allows the guard page to be 2394 val = simple_strtoul(p, &endptr, 10);
2355 * removed under these circumstances. 2395 if (!*endptr)
2356 */ 2396 stack_guard_gap = val << PAGE_SHIFT;
2397
2398 return 0;
2399}
2400__setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
2401
2357#ifdef CONFIG_STACK_GROWSUP 2402#ifdef CONFIG_STACK_GROWSUP
2358int expand_stack(struct vm_area_struct *vma, unsigned long address) 2403int expand_stack(struct vm_area_struct *vma, unsigned long address)
2359{ 2404{
2360 struct vm_area_struct *next;
2361
2362 address &= PAGE_MASK;
2363 next = vma->vm_next;
2364 if (next && next->vm_start == address + PAGE_SIZE) {
2365 if (!(next->vm_flags & VM_GROWSUP))
2366 return -ENOMEM;
2367 }
2368 return expand_upwards(vma, address); 2405 return expand_upwards(vma, address);
2369} 2406}
2370 2407
@@ -2386,14 +2423,6 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
2386#else 2423#else
2387int expand_stack(struct vm_area_struct *vma, unsigned long address) 2424int expand_stack(struct vm_area_struct *vma, unsigned long address)
2388{ 2425{
2389 struct vm_area_struct *prev;
2390
2391 address &= PAGE_MASK;
2392 prev = vma->vm_prev;
2393 if (prev && prev->vm_end == address) {
2394 if (!(prev->vm_flags & VM_GROWSDOWN))
2395 return -ENOMEM;
2396 }
2397 return expand_downwards(vma, address); 2426 return expand_downwards(vma, address);
2398} 2427}
2399 2428
@@ -2491,7 +2520,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
2491 vma->vm_prev = prev; 2520 vma->vm_prev = prev;
2492 vma_gap_update(vma); 2521 vma_gap_update(vma);
2493 } else 2522 } else
2494 mm->highest_vm_end = prev ? prev->vm_end : 0; 2523 mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
2495 tail_vma->vm_next = NULL; 2524 tail_vma->vm_next = NULL;
2496 2525
2497 /* Kill the cache */ 2526 /* Kill the cache */