aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKees Cook <keescook@chromium.org>2018-01-10 17:48:22 -0500
committerKees Cook <keescook@chromium.org>2018-01-15 15:07:45 -0500
commitf4e6e289cb9cf67885b6b18b9d56d2c3e1c714a1 (patch)
treeef4487691b7cc2581baeebedfa3ef4482ecad3fa
parentb394d468e7d75637e682a9be4a1181b27186c593 (diff)
usercopy: Include offset in hardened usercopy report
This refactors the hardened usercopy code so that failure reporting can happen within the checking functions instead of at the top level. This simplifies the return value handling and allows more details and offsets to be included in the report. Having the offset can be much more helpful in understanding hardened usercopy bugs. Signed-off-by: Kees Cook <keescook@chromium.org>
-rw-r--r--include/linux/slab.h12
-rw-r--r--mm/slab.c8
-rw-r--r--mm/slub.c14
-rw-r--r--mm/usercopy.c95
4 files changed, 57 insertions, 72 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 50697a1d6621..2dbeccdcb76b 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -167,15 +167,11 @@ void kzfree(const void *);
167size_t ksize(const void *); 167size_t ksize(const void *);
168 168
169#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR 169#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
170const char *__check_heap_object(const void *ptr, unsigned long n, 170void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
171 struct page *page); 171 bool to_user);
172#else 172#else
173static inline const char *__check_heap_object(const void *ptr, 173static inline void __check_heap_object(const void *ptr, unsigned long n,
174 unsigned long n, 174 struct page *page, bool to_user) { }
175 struct page *page)
176{
177 return NULL;
178}
179#endif 175#endif
180 176
181/* 177/*
diff --git a/mm/slab.c b/mm/slab.c
index 183e996dde5f..b2beb2cc15e2 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4397,8 +4397,8 @@ module_init(slab_proc_init);
4397 * Returns NULL if check passes, otherwise const char * to name of cache 4397 * Returns NULL if check passes, otherwise const char * to name of cache
4398 * to indicate an error. 4398 * to indicate an error.
4399 */ 4399 */
4400const char *__check_heap_object(const void *ptr, unsigned long n, 4400void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
4401 struct page *page) 4401 bool to_user)
4402{ 4402{
4403 struct kmem_cache *cachep; 4403 struct kmem_cache *cachep;
4404 unsigned int objnr; 4404 unsigned int objnr;
@@ -4414,9 +4414,9 @@ const char *__check_heap_object(const void *ptr, unsigned long n,
4414 4414
4415 /* Allow address range falling entirely within object size. */ 4415 /* Allow address range falling entirely within object size. */
4416 if (offset <= cachep->object_size && n <= cachep->object_size - offset) 4416 if (offset <= cachep->object_size && n <= cachep->object_size - offset)
4417 return NULL; 4417 return;
4418 4418
4419 return cachep->name; 4419 usercopy_abort("SLAB object", cachep->name, to_user, offset, n);
4420} 4420}
4421#endif /* CONFIG_HARDENED_USERCOPY */ 4421#endif /* CONFIG_HARDENED_USERCOPY */
4422 4422
diff --git a/mm/slub.c b/mm/slub.c
index cfd56e5a35fb..bcd22332300a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3818,8 +3818,8 @@ EXPORT_SYMBOL(__kmalloc_node);
3818 * Returns NULL if check passes, otherwise const char * to name of cache 3818 * Returns NULL if check passes, otherwise const char * to name of cache
3819 * to indicate an error. 3819 * to indicate an error.
3820 */ 3820 */
3821const char *__check_heap_object(const void *ptr, unsigned long n, 3821void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
3822 struct page *page) 3822 bool to_user)
3823{ 3823{
3824 struct kmem_cache *s; 3824 struct kmem_cache *s;
3825 unsigned long offset; 3825 unsigned long offset;
@@ -3831,7 +3831,8 @@ const char *__check_heap_object(const void *ptr, unsigned long n,
3831 3831
3832 /* Reject impossible pointers. */ 3832 /* Reject impossible pointers. */
3833 if (ptr < page_address(page)) 3833 if (ptr < page_address(page))
3834 return s->name; 3834 usercopy_abort("SLUB object not in SLUB page?!", NULL,
3835 to_user, 0, n);
3835 3836
3836 /* Find offset within object. */ 3837 /* Find offset within object. */
3837 offset = (ptr - page_address(page)) % s->size; 3838 offset = (ptr - page_address(page)) % s->size;
@@ -3839,15 +3840,16 @@ const char *__check_heap_object(const void *ptr, unsigned long n,
3839 /* Adjust for redzone and reject if within the redzone. */ 3840 /* Adjust for redzone and reject if within the redzone. */
3840 if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) { 3841 if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) {
3841 if (offset < s->red_left_pad) 3842 if (offset < s->red_left_pad)
3842 return s->name; 3843 usercopy_abort("SLUB object in left red zone",
3844 s->name, to_user, offset, n);
3843 offset -= s->red_left_pad; 3845 offset -= s->red_left_pad;
3844 } 3846 }
3845 3847
3846 /* Allow address range falling entirely within object size. */ 3848 /* Allow address range falling entirely within object size. */
3847 if (offset <= object_size && n <= object_size - offset) 3849 if (offset <= object_size && n <= object_size - offset)
3848 return NULL; 3850 return;
3849 3851
3850 return s->name; 3852 usercopy_abort("SLUB object", s->name, to_user, offset, n);
3851} 3853}
3852#endif /* CONFIG_HARDENED_USERCOPY */ 3854#endif /* CONFIG_HARDENED_USERCOPY */
3853 3855
diff --git a/mm/usercopy.c b/mm/usercopy.c
index 8006baa4caac..a562dd094ace 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -86,10 +86,10 @@ void __noreturn usercopy_abort(const char *name, const char *detail,
86} 86}
87 87
88/* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */ 88/* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
89static bool overlaps(const void *ptr, unsigned long n, unsigned long low, 89static bool overlaps(const unsigned long ptr, unsigned long n,
90 unsigned long high) 90 unsigned long low, unsigned long high)
91{ 91{
92 unsigned long check_low = (uintptr_t)ptr; 92 const unsigned long check_low = ptr;
93 unsigned long check_high = check_low + n; 93 unsigned long check_high = check_low + n;
94 94
95 /* Does not overlap if entirely above or entirely below. */ 95 /* Does not overlap if entirely above or entirely below. */
@@ -100,15 +100,15 @@ static bool overlaps(const void *ptr, unsigned long n, unsigned long low,
100} 100}
101 101
102/* Is this address range in the kernel text area? */ 102/* Is this address range in the kernel text area? */
103static inline const char *check_kernel_text_object(const void *ptr, 103static inline void check_kernel_text_object(const unsigned long ptr,
104 unsigned long n) 104 unsigned long n, bool to_user)
105{ 105{
106 unsigned long textlow = (unsigned long)_stext; 106 unsigned long textlow = (unsigned long)_stext;
107 unsigned long texthigh = (unsigned long)_etext; 107 unsigned long texthigh = (unsigned long)_etext;
108 unsigned long textlow_linear, texthigh_linear; 108 unsigned long textlow_linear, texthigh_linear;
109 109
110 if (overlaps(ptr, n, textlow, texthigh)) 110 if (overlaps(ptr, n, textlow, texthigh))
111 return "<kernel text>"; 111 usercopy_abort("kernel text", NULL, to_user, ptr - textlow, n);
112 112
113 /* 113 /*
114 * Some architectures have virtual memory mappings with a secondary 114 * Some architectures have virtual memory mappings with a secondary
@@ -121,32 +121,30 @@ static inline const char *check_kernel_text_object(const void *ptr,
121 textlow_linear = (unsigned long)lm_alias(textlow); 121 textlow_linear = (unsigned long)lm_alias(textlow);
122 /* No different mapping: we're done. */ 122 /* No different mapping: we're done. */
123 if (textlow_linear == textlow) 123 if (textlow_linear == textlow)
124 return NULL; 124 return;
125 125
126 /* Check the secondary mapping... */ 126 /* Check the secondary mapping... */
127 texthigh_linear = (unsigned long)lm_alias(texthigh); 127 texthigh_linear = (unsigned long)lm_alias(texthigh);
128 if (overlaps(ptr, n, textlow_linear, texthigh_linear)) 128 if (overlaps(ptr, n, textlow_linear, texthigh_linear))
129 return "<linear kernel text>"; 129 usercopy_abort("linear kernel text", NULL, to_user,
130 130 ptr - textlow_linear, n);
131 return NULL;
132} 131}
133 132
134static inline const char *check_bogus_address(const void *ptr, unsigned long n) 133static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
134 bool to_user)
135{ 135{
136 /* Reject if object wraps past end of memory. */ 136 /* Reject if object wraps past end of memory. */
137 if ((unsigned long)ptr + n < (unsigned long)ptr) 137 if (ptr + n < ptr)
138 return "<wrapped address>"; 138 usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
139 139
140 /* Reject if NULL or ZERO-allocation. */ 140 /* Reject if NULL or ZERO-allocation. */
141 if (ZERO_OR_NULL_PTR(ptr)) 141 if (ZERO_OR_NULL_PTR(ptr))
142 return "<null>"; 142 usercopy_abort("null address", NULL, to_user, ptr, n);
143
144 return NULL;
145} 143}
146 144
147/* Checks for allocs that are marked in some way as spanning multiple pages. */ 145/* Checks for allocs that are marked in some way as spanning multiple pages. */
148static inline const char *check_page_span(const void *ptr, unsigned long n, 146static inline void check_page_span(const void *ptr, unsigned long n,
149 struct page *page, bool to_user) 147 struct page *page, bool to_user)
150{ 148{
151#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN 149#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
152 const void *end = ptr + n - 1; 150 const void *end = ptr + n - 1;
@@ -163,28 +161,28 @@ static inline const char *check_page_span(const void *ptr, unsigned long n,
163 if (ptr >= (const void *)__start_rodata && 161 if (ptr >= (const void *)__start_rodata &&
164 end <= (const void *)__end_rodata) { 162 end <= (const void *)__end_rodata) {
165 if (!to_user) 163 if (!to_user)
166 return "<rodata>"; 164 usercopy_abort("rodata", NULL, to_user, 0, n);
167 return NULL; 165 return;
168 } 166 }
169 167
170 /* Allow kernel data region (if not marked as Reserved). */ 168 /* Allow kernel data region (if not marked as Reserved). */
171 if (ptr >= (const void *)_sdata && end <= (const void *)_edata) 169 if (ptr >= (const void *)_sdata && end <= (const void *)_edata)
172 return NULL; 170 return;
173 171
174 /* Allow kernel bss region (if not marked as Reserved). */ 172 /* Allow kernel bss region (if not marked as Reserved). */
175 if (ptr >= (const void *)__bss_start && 173 if (ptr >= (const void *)__bss_start &&
176 end <= (const void *)__bss_stop) 174 end <= (const void *)__bss_stop)
177 return NULL; 175 return;
178 176
179 /* Is the object wholly within one base page? */ 177 /* Is the object wholly within one base page? */
180 if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) == 178 if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
181 ((unsigned long)end & (unsigned long)PAGE_MASK))) 179 ((unsigned long)end & (unsigned long)PAGE_MASK)))
182 return NULL; 180 return;
183 181
184 /* Allow if fully inside the same compound (__GFP_COMP) page. */ 182 /* Allow if fully inside the same compound (__GFP_COMP) page. */
185 endpage = virt_to_head_page(end); 183 endpage = virt_to_head_page(end);
186 if (likely(endpage == page)) 184 if (likely(endpage == page))
187 return NULL; 185 return;
188 186
189 /* 187 /*
190 * Reject if range is entirely either Reserved (i.e. special or 188 * Reject if range is entirely either Reserved (i.e. special or
@@ -194,36 +192,37 @@ static inline const char *check_page_span(const void *ptr, unsigned long n,
194 is_reserved = PageReserved(page); 192 is_reserved = PageReserved(page);
195 is_cma = is_migrate_cma_page(page); 193 is_cma = is_migrate_cma_page(page);
196 if (!is_reserved && !is_cma) 194 if (!is_reserved && !is_cma)
197 return "<spans multiple pages>"; 195 usercopy_abort("spans multiple pages", NULL, to_user, 0, n);
198 196
199 for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) { 197 for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
200 page = virt_to_head_page(ptr); 198 page = virt_to_head_page(ptr);
201 if (is_reserved && !PageReserved(page)) 199 if (is_reserved && !PageReserved(page))
202 return "<spans Reserved and non-Reserved pages>"; 200 usercopy_abort("spans Reserved and non-Reserved pages",
201 NULL, to_user, 0, n);
203 if (is_cma && !is_migrate_cma_page(page)) 202 if (is_cma && !is_migrate_cma_page(page))
204 return "<spans CMA and non-CMA pages>"; 203 usercopy_abort("spans CMA and non-CMA pages", NULL,
204 to_user, 0, n);
205 } 205 }
206#endif 206#endif
207
208 return NULL;
209} 207}
210 208
211static inline const char *check_heap_object(const void *ptr, unsigned long n, 209static inline void check_heap_object(const void *ptr, unsigned long n,
212 bool to_user) 210 bool to_user)
213{ 211{
214 struct page *page; 212 struct page *page;
215 213
216 if (!virt_addr_valid(ptr)) 214 if (!virt_addr_valid(ptr))
217 return NULL; 215 return;
218 216
219 page = virt_to_head_page(ptr); 217 page = virt_to_head_page(ptr);
220 218
221 /* Check slab allocator for flags and size. */ 219 if (PageSlab(page)) {
222 if (PageSlab(page)) 220 /* Check slab allocator for flags and size. */
223 return __check_heap_object(ptr, n, page); 221 __check_heap_object(ptr, n, page, to_user);
224 222 } else {
225 /* Verify object does not incorrectly span multiple pages. */ 223 /* Verify object does not incorrectly span multiple pages. */
226 return check_page_span(ptr, n, page, to_user); 224 check_page_span(ptr, n, page, to_user);
225 }
227} 226}
228 227
229/* 228/*
@@ -234,21 +233,15 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n,
234 */ 233 */
235void __check_object_size(const void *ptr, unsigned long n, bool to_user) 234void __check_object_size(const void *ptr, unsigned long n, bool to_user)
236{ 235{
237 const char *err;
238
239 /* Skip all tests if size is zero. */ 236 /* Skip all tests if size is zero. */
240 if (!n) 237 if (!n)
241 return; 238 return;
242 239
243 /* Check for invalid addresses. */ 240 /* Check for invalid addresses. */
244 err = check_bogus_address(ptr, n); 241 check_bogus_address((const unsigned long)ptr, n, to_user);
245 if (err)
246 goto report;
247 242
248 /* Check for bad heap object. */ 243 /* Check for bad heap object. */
249 err = check_heap_object(ptr, n, to_user); 244 check_heap_object(ptr, n, to_user);
250 if (err)
251 goto report;
252 245
253 /* Check for bad stack object. */ 246 /* Check for bad stack object. */
254 switch (check_stack_object(ptr, n)) { 247 switch (check_stack_object(ptr, n)) {
@@ -264,16 +257,10 @@ void __check_object_size(const void *ptr, unsigned long n, bool to_user)
264 */ 257 */
265 return; 258 return;
266 default: 259 default:
267 err = "<process stack>"; 260 usercopy_abort("process stack", NULL, to_user, 0, n);
268 goto report;
269 } 261 }
270 262
271 /* Check for object in kernel to avoid text exposure. */ 263 /* Check for object in kernel to avoid text exposure. */
272 err = check_kernel_text_object(ptr, n); 264 check_kernel_text_object((const unsigned long)ptr, n, to_user);
273 if (!err)
274 return;
275
276report:
277 usercopy_abort(err, NULL, to_user, 0, n);
278} 265}
279EXPORT_SYMBOL(__check_object_size); 266EXPORT_SYMBOL(__check_object_size);