diff options
author | Kees Cook <keescook@chromium.org> | 2016-06-07 14:05:33 -0400 |
---|---|---|
committer | Kees Cook <keescook@chromium.org> | 2016-07-26 17:41:47 -0400 |
commit | f5509cc18daa7f82bcc553be70df2117c8eedc16 (patch) | |
tree | 648605cc96e4ac412a9f5201468795574997d9bb | |
parent | 0f60a8efe4005ab5e65ce000724b04d4ca04a199 (diff) |
mm: Hardened usercopy
This is the start of porting PAX_USERCOPY into the mainline kernel. This
is the first set of features, controlled by CONFIG_HARDENED_USERCOPY. The
work is based on code by PaX Team and Brad Spengler, and an earlier port
from Casey Schaufler. Additional non-slab page tests are from Rik van Riel.
This patch contains the logic for validating several conditions when
performing copy_to_user() and copy_from_user() on the kernel object
being copied to/from:
- address range doesn't wrap around
- address range isn't NULL or zero-allocated (with a non-zero copy size)
- if on the slab allocator:
- object size must be less than or equal to copy size (when check is
implemented in the allocator, which appear in subsequent patches)
- otherwise, object must not span page allocations (excepting Reserved
and CMA ranges)
- if on the stack
- object must not extend before/after the current process stack
- object must be contained by a valid stack frame (when there is
arch/build support for identifying stack frames)
- object must not overlap with kernel text
Signed-off-by: Kees Cook <keescook@chromium.org>
Tested-by: Valdis Kletnieks <valdis.kletnieks@vt.edu>
Tested-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r-- | include/linux/slab.h | 12 | ||||
-rw-r--r-- | include/linux/thread_info.h | 15 | ||||
-rw-r--r-- | mm/Makefile | 4 | ||||
-rw-r--r-- | mm/usercopy.c | 268 | ||||
-rw-r--r-- | security/Kconfig | 28 |
5 files changed, 327 insertions, 0 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h index aeb3e6d00a66..96a16a3fb7cb 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -155,6 +155,18 @@ void kfree(const void *); | |||
155 | void kzfree(const void *); | 155 | void kzfree(const void *); |
156 | size_t ksize(const void *); | 156 | size_t ksize(const void *); |
157 | 157 | ||
158 | #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR | ||
159 | const char *__check_heap_object(const void *ptr, unsigned long n, | ||
160 | struct page *page); | ||
161 | #else | ||
162 | static inline const char *__check_heap_object(const void *ptr, | ||
163 | unsigned long n, | ||
164 | struct page *page) | ||
165 | { | ||
166 | return NULL; | ||
167 | } | ||
168 | #endif | ||
169 | |||
158 | /* | 170 | /* |
159 | * Some archs want to perform DMA into kmalloc caches and need a guaranteed | 171 | * Some archs want to perform DMA into kmalloc caches and need a guaranteed |
160 | * alignment larger than the alignment of a 64-bit integer. | 172 | * alignment larger than the alignment of a 64-bit integer. |
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index 3d5c80b4391d..f24b99eac969 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h | |||
@@ -155,6 +155,21 @@ static inline int arch_within_stack_frames(const void * const stack, | |||
155 | } | 155 | } |
156 | #endif | 156 | #endif |
157 | 157 | ||
158 | #ifdef CONFIG_HARDENED_USERCOPY | ||
159 | extern void __check_object_size(const void *ptr, unsigned long n, | ||
160 | bool to_user); | ||
161 | |||
162 | static inline void check_object_size(const void *ptr, unsigned long n, | ||
163 | bool to_user) | ||
164 | { | ||
165 | __check_object_size(ptr, n, to_user); | ||
166 | } | ||
167 | #else | ||
168 | static inline void check_object_size(const void *ptr, unsigned long n, | ||
169 | bool to_user) | ||
170 | { } | ||
171 | #endif /* CONFIG_HARDENED_USERCOPY */ | ||
172 | |||
158 | #endif /* __KERNEL__ */ | 173 | #endif /* __KERNEL__ */ |
159 | 174 | ||
160 | #endif /* _LINUX_THREAD_INFO_H */ | 175 | #endif /* _LINUX_THREAD_INFO_H */ |
diff --git a/mm/Makefile b/mm/Makefile index 78c6f7dedb83..32d37247c7e5 100644 --- a/mm/Makefile +++ b/mm/Makefile | |||
@@ -21,6 +21,9 @@ KCOV_INSTRUMENT_memcontrol.o := n | |||
21 | KCOV_INSTRUMENT_mmzone.o := n | 21 | KCOV_INSTRUMENT_mmzone.o := n |
22 | KCOV_INSTRUMENT_vmstat.o := n | 22 | KCOV_INSTRUMENT_vmstat.o := n |
23 | 23 | ||
24 | # Since __builtin_frame_address does work as used, disable the warning. | ||
25 | CFLAGS_usercopy.o += $(call cc-disable-warning, frame-address) | ||
26 | |||
24 | mmu-y := nommu.o | 27 | mmu-y := nommu.o |
25 | mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \ | 28 | mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \ |
26 | mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \ | 29 | mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \ |
@@ -99,3 +102,4 @@ obj-$(CONFIG_USERFAULTFD) += userfaultfd.o | |||
99 | obj-$(CONFIG_IDLE_PAGE_TRACKING) += page_idle.o | 102 | obj-$(CONFIG_IDLE_PAGE_TRACKING) += page_idle.o |
100 | obj-$(CONFIG_FRAME_VECTOR) += frame_vector.o | 103 | obj-$(CONFIG_FRAME_VECTOR) += frame_vector.o |
101 | obj-$(CONFIG_DEBUG_PAGE_REF) += debug_page_ref.o | 104 | obj-$(CONFIG_DEBUG_PAGE_REF) += debug_page_ref.o |
105 | obj-$(CONFIG_HARDENED_USERCOPY) += usercopy.o | ||
diff --git a/mm/usercopy.c b/mm/usercopy.c new file mode 100644 index 000000000000..8ebae91a6b55 --- /dev/null +++ b/mm/usercopy.c | |||
@@ -0,0 +1,268 @@ | |||
1 | /* | ||
2 | * This implements the various checks for CONFIG_HARDENED_USERCOPY*, | ||
3 | * which are designed to protect kernel memory from needless exposure | ||
4 | * and overwrite under many unintended conditions. This code is based | ||
5 | * on PAX_USERCOPY, which is: | ||
6 | * | ||
7 | * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source | ||
8 | * Security Inc. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | */ | ||
15 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
16 | |||
17 | #include <linux/mm.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include <asm/sections.h> | ||
20 | |||
21 | enum { | ||
22 | BAD_STACK = -1, | ||
23 | NOT_STACK = 0, | ||
24 | GOOD_FRAME, | ||
25 | GOOD_STACK, | ||
26 | }; | ||
27 | |||
28 | /* | ||
29 | * Checks if a given pointer and length is contained by the current | ||
30 | * stack frame (if possible). | ||
31 | * | ||
32 | * Returns: | ||
33 | * NOT_STACK: not at all on the stack | ||
34 | * GOOD_FRAME: fully within a valid stack frame | ||
35 | * GOOD_STACK: fully on the stack (when can't do frame-checking) | ||
36 | * BAD_STACK: error condition (invalid stack position or bad stack frame) | ||
37 | */ | ||
38 | static noinline int check_stack_object(const void *obj, unsigned long len) | ||
39 | { | ||
40 | const void * const stack = task_stack_page(current); | ||
41 | const void * const stackend = stack + THREAD_SIZE; | ||
42 | int ret; | ||
43 | |||
44 | /* Object is not on the stack at all. */ | ||
45 | if (obj + len <= stack || stackend <= obj) | ||
46 | return NOT_STACK; | ||
47 | |||
48 | /* | ||
49 | * Reject: object partially overlaps the stack (passing the | ||
50 | * the check above means at least one end is within the stack, | ||
51 | * so if this check fails, the other end is outside the stack). | ||
52 | */ | ||
53 | if (obj < stack || stackend < obj + len) | ||
54 | return BAD_STACK; | ||
55 | |||
56 | /* Check if object is safely within a valid frame. */ | ||
57 | ret = arch_within_stack_frames(stack, stackend, obj, len); | ||
58 | if (ret) | ||
59 | return ret; | ||
60 | |||
61 | return GOOD_STACK; | ||
62 | } | ||
63 | |||
64 | static void report_usercopy(const void *ptr, unsigned long len, | ||
65 | bool to_user, const char *type) | ||
66 | { | ||
67 | pr_emerg("kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n", | ||
68 | to_user ? "exposure" : "overwrite", | ||
69 | to_user ? "from" : "to", ptr, type ? : "unknown", len); | ||
70 | /* | ||
71 | * For greater effect, it would be nice to do do_group_exit(), | ||
72 | * but BUG() actually hooks all the lock-breaking and per-arch | ||
73 | * Oops code, so that is used here instead. | ||
74 | */ | ||
75 | BUG(); | ||
76 | } | ||
77 | |||
78 | /* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */ | ||
79 | static bool overlaps(const void *ptr, unsigned long n, unsigned long low, | ||
80 | unsigned long high) | ||
81 | { | ||
82 | unsigned long check_low = (uintptr_t)ptr; | ||
83 | unsigned long check_high = check_low + n; | ||
84 | |||
85 | /* Does not overlap if entirely above or entirely below. */ | ||
86 | if (check_low >= high || check_high < low) | ||
87 | return false; | ||
88 | |||
89 | return true; | ||
90 | } | ||
91 | |||
92 | /* Is this address range in the kernel text area? */ | ||
93 | static inline const char *check_kernel_text_object(const void *ptr, | ||
94 | unsigned long n) | ||
95 | { | ||
96 | unsigned long textlow = (unsigned long)_stext; | ||
97 | unsigned long texthigh = (unsigned long)_etext; | ||
98 | unsigned long textlow_linear, texthigh_linear; | ||
99 | |||
100 | if (overlaps(ptr, n, textlow, texthigh)) | ||
101 | return "<kernel text>"; | ||
102 | |||
103 | /* | ||
104 | * Some architectures have virtual memory mappings with a secondary | ||
105 | * mapping of the kernel text, i.e. there is more than one virtual | ||
106 | * kernel address that points to the kernel image. It is usually | ||
107 | * when there is a separate linear physical memory mapping, in that | ||
108 | * __pa() is not just the reverse of __va(). This can be detected | ||
109 | * and checked: | ||
110 | */ | ||
111 | textlow_linear = (unsigned long)__va(__pa(textlow)); | ||
112 | /* No different mapping: we're done. */ | ||
113 | if (textlow_linear == textlow) | ||
114 | return NULL; | ||
115 | |||
116 | /* Check the secondary mapping... */ | ||
117 | texthigh_linear = (unsigned long)__va(__pa(texthigh)); | ||
118 | if (overlaps(ptr, n, textlow_linear, texthigh_linear)) | ||
119 | return "<linear kernel text>"; | ||
120 | |||
121 | return NULL; | ||
122 | } | ||
123 | |||
124 | static inline const char *check_bogus_address(const void *ptr, unsigned long n) | ||
125 | { | ||
126 | /* Reject if object wraps past end of memory. */ | ||
127 | if (ptr + n < ptr) | ||
128 | return "<wrapped address>"; | ||
129 | |||
130 | /* Reject if NULL or ZERO-allocation. */ | ||
131 | if (ZERO_OR_NULL_PTR(ptr)) | ||
132 | return "<null>"; | ||
133 | |||
134 | return NULL; | ||
135 | } | ||
136 | |||
137 | static inline const char *check_heap_object(const void *ptr, unsigned long n, | ||
138 | bool to_user) | ||
139 | { | ||
140 | struct page *page, *endpage; | ||
141 | const void *end = ptr + n - 1; | ||
142 | bool is_reserved, is_cma; | ||
143 | |||
144 | /* | ||
145 | * Some architectures (arm64) return true for virt_addr_valid() on | ||
146 | * vmalloced addresses. Work around this by checking for vmalloc | ||
147 | * first. | ||
148 | */ | ||
149 | if (is_vmalloc_addr(ptr)) | ||
150 | return NULL; | ||
151 | |||
152 | if (!virt_addr_valid(ptr)) | ||
153 | return NULL; | ||
154 | |||
155 | page = virt_to_head_page(ptr); | ||
156 | |||
157 | /* Check slab allocator for flags and size. */ | ||
158 | if (PageSlab(page)) | ||
159 | return __check_heap_object(ptr, n, page); | ||
160 | |||
161 | /* | ||
162 | * Sometimes the kernel data regions are not marked Reserved (see | ||
163 | * check below). And sometimes [_sdata,_edata) does not cover | ||
164 | * rodata and/or bss, so check each range explicitly. | ||
165 | */ | ||
166 | |||
167 | /* Allow reads of kernel rodata region (if not marked as Reserved). */ | ||
168 | if (ptr >= (const void *)__start_rodata && | ||
169 | end <= (const void *)__end_rodata) { | ||
170 | if (!to_user) | ||
171 | return "<rodata>"; | ||
172 | return NULL; | ||
173 | } | ||
174 | |||
175 | /* Allow kernel data region (if not marked as Reserved). */ | ||
176 | if (ptr >= (const void *)_sdata && end <= (const void *)_edata) | ||
177 | return NULL; | ||
178 | |||
179 | /* Allow kernel bss region (if not marked as Reserved). */ | ||
180 | if (ptr >= (const void *)__bss_start && | ||
181 | end <= (const void *)__bss_stop) | ||
182 | return NULL; | ||
183 | |||
184 | /* Is the object wholly within one base page? */ | ||
185 | if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) == | ||
186 | ((unsigned long)end & (unsigned long)PAGE_MASK))) | ||
187 | return NULL; | ||
188 | |||
189 | /* Allow if start and end are inside the same compound page. */ | ||
190 | endpage = virt_to_head_page(end); | ||
191 | if (likely(endpage == page)) | ||
192 | return NULL; | ||
193 | |||
194 | /* | ||
195 | * Reject if range is entirely either Reserved (i.e. special or | ||
196 | * device memory), or CMA. Otherwise, reject since the object spans | ||
197 | * several independently allocated pages. | ||
198 | */ | ||
199 | is_reserved = PageReserved(page); | ||
200 | is_cma = is_migrate_cma_page(page); | ||
201 | if (!is_reserved && !is_cma) | ||
202 | goto reject; | ||
203 | |||
204 | for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) { | ||
205 | page = virt_to_head_page(ptr); | ||
206 | if (is_reserved && !PageReserved(page)) | ||
207 | goto reject; | ||
208 | if (is_cma && !is_migrate_cma_page(page)) | ||
209 | goto reject; | ||
210 | } | ||
211 | |||
212 | return NULL; | ||
213 | |||
214 | reject: | ||
215 | return "<spans multiple pages>"; | ||
216 | } | ||
217 | |||
218 | /* | ||
219 | * Validates that the given object is: | ||
220 | * - not bogus address | ||
221 | * - known-safe heap or stack object | ||
222 | * - not in kernel text | ||
223 | */ | ||
224 | void __check_object_size(const void *ptr, unsigned long n, bool to_user) | ||
225 | { | ||
226 | const char *err; | ||
227 | |||
228 | /* Skip all tests if size is zero. */ | ||
229 | if (!n) | ||
230 | return; | ||
231 | |||
232 | /* Check for invalid addresses. */ | ||
233 | err = check_bogus_address(ptr, n); | ||
234 | if (err) | ||
235 | goto report; | ||
236 | |||
237 | /* Check for bad heap object. */ | ||
238 | err = check_heap_object(ptr, n, to_user); | ||
239 | if (err) | ||
240 | goto report; | ||
241 | |||
242 | /* Check for bad stack object. */ | ||
243 | switch (check_stack_object(ptr, n)) { | ||
244 | case NOT_STACK: | ||
245 | /* Object is not touching the current process stack. */ | ||
246 | break; | ||
247 | case GOOD_FRAME: | ||
248 | case GOOD_STACK: | ||
249 | /* | ||
250 | * Object is either in the correct frame (when it | ||
251 | * is possible to check) or just generally on the | ||
252 | * process stack (when frame checking not available). | ||
253 | */ | ||
254 | return; | ||
255 | default: | ||
256 | err = "<process stack>"; | ||
257 | goto report; | ||
258 | } | ||
259 | |||
260 | /* Check for object in kernel to avoid text exposure. */ | ||
261 | err = check_kernel_text_object(ptr, n); | ||
262 | if (!err) | ||
263 | return; | ||
264 | |||
265 | report: | ||
266 | report_usercopy(ptr, n, to_user, err); | ||
267 | } | ||
268 | EXPORT_SYMBOL(__check_object_size); | ||
diff --git a/security/Kconfig b/security/Kconfig index 176758cdfa57..df28f2b6f3e1 100644 --- a/security/Kconfig +++ b/security/Kconfig | |||
@@ -118,6 +118,34 @@ config LSM_MMAP_MIN_ADDR | |||
118 | this low address space will need the permission specific to the | 118 | this low address space will need the permission specific to the |
119 | systems running LSM. | 119 | systems running LSM. |
120 | 120 | ||
121 | config HAVE_HARDENED_USERCOPY_ALLOCATOR | ||
122 | bool | ||
123 | help | ||
124 | The heap allocator implements __check_heap_object() for | ||
125 | validating memory ranges against heap object sizes in | ||
126 | support of CONFIG_HARDENED_USERCOPY. | ||
127 | |||
128 | config HAVE_ARCH_HARDENED_USERCOPY | ||
129 | bool | ||
130 | help | ||
131 | The architecture supports CONFIG_HARDENED_USERCOPY by | ||
132 | calling check_object_size() just before performing the | ||
133 | userspace copies in the low level implementation of | ||
134 | copy_to_user() and copy_from_user(). | ||
135 | |||
136 | config HARDENED_USERCOPY | ||
137 | bool "Harden memory copies between kernel and userspace" | ||
138 | depends on HAVE_ARCH_HARDENED_USERCOPY | ||
139 | select BUG | ||
140 | help | ||
141 | This option checks for obviously wrong memory regions when | ||
142 | copying memory to/from the kernel (via copy_to_user() and | ||
143 | copy_from_user() functions) by rejecting memory ranges that | ||
144 | are larger than the specified heap object, span multiple | ||
145 | separately allocates pages, are not on the process stack, | ||
146 | or are part of the kernel text. This kills entire classes | ||
147 | of heap overflow exploits and similar kernel memory exposures. | ||
148 | |||
121 | source security/selinux/Kconfig | 149 | source security/selinux/Kconfig |
122 | source security/smack/Kconfig | 150 | source security/smack/Kconfig |
123 | source security/tomoyo/Kconfig | 151 | source security/tomoyo/Kconfig |