aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKees Cook <keescook@chromium.org>2016-09-07 12:54:34 -0400
committerKees Cook <keescook@chromium.org>2016-09-07 14:33:26 -0400
commit8e1f74ea02cf4562404c48c6882214821552c13f (patch)
tree931135d5d5cf00255d596520bd59c4067d669c22
parenta85d6b8242dc78ef3f4542a0f979aebcbe77fc4e (diff)
usercopy: remove page-spanning test for now
A custom allocator without __GFP_COMP that copies to userspace has been found in vmw_execbuf_process[1], so this disables the page-span checker by placing it behind a CONFIG for future work where such things can be tracked down later. [1] https://bugzilla.redhat.com/show_bug.cgi?id=1373326 Reported-by: Vinson Lee <vlee@freedesktop.org> Fixes: f5509cc18daa ("mm: Hardened usercopy") Signed-off-by: Kees Cook <keescook@chromium.org>
-rw-r--r--mm/usercopy.c61
-rw-r--r--security/Kconfig11
2 files changed, 46 insertions, 26 deletions
diff --git a/mm/usercopy.c b/mm/usercopy.c
index a3cc3052f830..089328f2b920 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -134,31 +134,16 @@ static inline const char *check_bogus_address(const void *ptr, unsigned long n)
134 return NULL; 134 return NULL;
135} 135}
136 136
137static inline const char *check_heap_object(const void *ptr, unsigned long n, 137/* Checks for allocs that are marked in some way as spanning multiple pages. */
138 bool to_user) 138static inline const char *check_page_span(const void *ptr, unsigned long n,
139 struct page *page, bool to_user)
139{ 140{
140 struct page *page, *endpage; 141#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
141 const void *end = ptr + n - 1; 142 const void *end = ptr + n - 1;
143 struct page *endpage;
142 bool is_reserved, is_cma; 144 bool is_reserved, is_cma;
143 145
144 /* 146 /*
145 * Some architectures (arm64) return true for virt_addr_valid() on
146 * vmalloced addresses. Work around this by checking for vmalloc
147 * first.
148 */
149 if (is_vmalloc_addr(ptr))
150 return NULL;
151
152 if (!virt_addr_valid(ptr))
153 return NULL;
154
155 page = virt_to_head_page(ptr);
156
157 /* Check slab allocator for flags and size. */
158 if (PageSlab(page))
159 return __check_heap_object(ptr, n, page);
160
161 /*
162 * Sometimes the kernel data regions are not marked Reserved (see 147 * Sometimes the kernel data regions are not marked Reserved (see
163 * check below). And sometimes [_sdata,_edata) does not cover 148 * check below). And sometimes [_sdata,_edata) does not cover
164 * rodata and/or bss, so check each range explicitly. 149 * rodata and/or bss, so check each range explicitly.
@@ -186,7 +171,7 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n,
186 ((unsigned long)end & (unsigned long)PAGE_MASK))) 171 ((unsigned long)end & (unsigned long)PAGE_MASK)))
187 return NULL; 172 return NULL;
188 173
189 /* Allow if start and end are inside the same compound page. */ 174 /* Allow if fully inside the same compound (__GFP_COMP) page. */
190 endpage = virt_to_head_page(end); 175 endpage = virt_to_head_page(end);
191 if (likely(endpage == page)) 176 if (likely(endpage == page))
192 return NULL; 177 return NULL;
@@ -199,20 +184,44 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n,
199 is_reserved = PageReserved(page); 184 is_reserved = PageReserved(page);
200 is_cma = is_migrate_cma_page(page); 185 is_cma = is_migrate_cma_page(page);
201 if (!is_reserved && !is_cma) 186 if (!is_reserved && !is_cma)
202 goto reject; 187 return "<spans multiple pages>";
203 188
204 for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) { 189 for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
205 page = virt_to_head_page(ptr); 190 page = virt_to_head_page(ptr);
206 if (is_reserved && !PageReserved(page)) 191 if (is_reserved && !PageReserved(page))
207 goto reject; 192 return "<spans Reserved and non-Reserved pages>";
208 if (is_cma && !is_migrate_cma_page(page)) 193 if (is_cma && !is_migrate_cma_page(page))
209 goto reject; 194 return "<spans CMA and non-CMA pages>";
210 } 195 }
196#endif
211 197
212 return NULL; 198 return NULL;
199}
200
201static inline const char *check_heap_object(const void *ptr, unsigned long n,
202 bool to_user)
203{
204 struct page *page;
205
206 /*
207 * Some architectures (arm64) return true for virt_addr_valid() on
208 * vmalloced addresses. Work around this by checking for vmalloc
209 * first.
210 */
211 if (is_vmalloc_addr(ptr))
212 return NULL;
213
214 if (!virt_addr_valid(ptr))
215 return NULL;
216
217 page = virt_to_head_page(ptr);
218
219 /* Check slab allocator for flags and size. */
220 if (PageSlab(page))
221 return __check_heap_object(ptr, n, page);
213 222
214reject: 223 /* Verify object does not incorrectly span multiple pages. */
215 return "<spans multiple pages>"; 224 return check_page_span(ptr, n, page, to_user);
216} 225}
217 226
218/* 227/*
diff --git a/security/Kconfig b/security/Kconfig
index da10d9b573a4..2dfc0ce4083e 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -147,6 +147,17 @@ config HARDENED_USERCOPY
147 or are part of the kernel text. This kills entire classes 147 or are part of the kernel text. This kills entire classes
148 of heap overflow exploits and similar kernel memory exposures. 148 of heap overflow exploits and similar kernel memory exposures.
149 149
150config HARDENED_USERCOPY_PAGESPAN
151 bool "Refuse to copy allocations that span multiple pages"
152 depends on HARDENED_USERCOPY
153 depends on !COMPILE_TEST
154 help
155 When a multi-page allocation is done without __GFP_COMP,
156 hardened usercopy will reject attempts to copy it. There are,
157 however, several cases of this in the kernel that have not all
158 been removed. This config is intended to be used only while
159 trying to find such users.
160
150source security/selinux/Kconfig 161source security/selinux/Kconfig
151source security/smack/Kconfig 162source security/smack/Kconfig
152source security/tomoyo/Kconfig 163source security/tomoyo/Kconfig