diff options
-rw-r--r-- | include/linux/bitops.h | 7 | ||||
-rw-r--r-- | include/linux/uaccess.h | 70 | ||||
-rw-r--r-- | lib/strnlen_user.c | 8 | ||||
-rw-r--r-- | lib/test_user_copy.c | 136 | ||||
-rw-r--r-- | lib/usercopy.c | 55 |
5 files changed, 263 insertions, 13 deletions
diff --git a/include/linux/bitops.h b/include/linux/bitops.h index cf074bce3eb3..c94a9ff9f082 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h | |||
@@ -4,6 +4,13 @@ | |||
4 | #include <asm/types.h> | 4 | #include <asm/types.h> |
5 | #include <linux/bits.h> | 5 | #include <linux/bits.h> |
6 | 6 | ||
7 | /* Set bits in the first 'n' bytes when loaded from memory */ | ||
8 | #ifdef __LITTLE_ENDIAN | ||
9 | # define aligned_byte_mask(n) ((1UL << 8*(n))-1) | ||
10 | #else | ||
11 | # define aligned_byte_mask(n) (~0xffUL << (BITS_PER_LONG - 8 - 8*(n))) | ||
12 | #endif | ||
13 | |||
7 | #define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) | 14 | #define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) |
8 | #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(long)) | 15 | #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(long)) |
9 | 16 | ||
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 70bbdc38dc37..e47d0522a1f4 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h | |||
@@ -231,6 +231,76 @@ __copy_from_user_inatomic_nocache(void *to, const void __user *from, | |||
231 | 231 | ||
232 | #endif /* ARCH_HAS_NOCACHE_UACCESS */ | 232 | #endif /* ARCH_HAS_NOCACHE_UACCESS */ |
233 | 233 | ||
234 | extern __must_check int check_zeroed_user(const void __user *from, size_t size); | ||
235 | |||
236 | /** | ||
237 | * copy_struct_from_user: copy a struct from userspace | ||
238 | * @dst: Destination address, in kernel space. This buffer must be @ksize | ||
239 | * bytes long. | ||
240 | * @ksize: Size of @dst struct. | ||
241 | * @src: Source address, in userspace. | ||
242 | * @usize: (Alleged) size of @src struct. | ||
243 | * | ||
244 | * Copies a struct from userspace to kernel space, in a way that guarantees | ||
245 | * backwards-compatibility for struct syscall arguments (as long as future | ||
246 | * struct extensions are made such that all new fields are *appended* to the | ||
247 | * old struct, and zeroed-out new fields have the same meaning as the old | ||
248 | * struct). | ||
249 | * | ||
250 | * @ksize is just sizeof(*dst), and @usize should've been passed by userspace. | ||
251 | * The recommended usage is something like the following: | ||
252 | * | ||
253 | * SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize) | ||
254 | * { | ||
255 | * int err; | ||
256 | * struct foo karg = {}; | ||
257 | * | ||
258 | * if (usize > PAGE_SIZE) | ||
259 | * return -E2BIG; | ||
260 | * if (usize < FOO_SIZE_VER0) | ||
261 | * return -EINVAL; | ||
262 | * | ||
263 | * err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize); | ||
264 | * if (err) | ||
265 | * return err; | ||
266 | * | ||
267 | * // ... | ||
268 | * } | ||
269 | * | ||
270 | * There are three cases to consider: | ||
271 | * * If @usize == @ksize, then it's copied verbatim. | ||
272 | * * If @usize < @ksize, then the userspace has passed an old struct to a | ||
273 | * newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize) | ||
274 | * are to be zero-filled. | ||
275 | * * If @usize > @ksize, then the userspace has passed a new struct to an | ||
276 | * older kernel. The trailing bytes unknown to the kernel (@usize - @ksize) | ||
277 | * are checked to ensure they are zeroed, otherwise -E2BIG is returned. | ||
278 | * | ||
279 | * Returns (in all cases, some data may have been copied): | ||
280 | * * -E2BIG: (@usize > @ksize) and there are non-zero trailing bytes in @src. | ||
281 | * * -EFAULT: access to userspace failed. | ||
282 | */ | ||
283 | static __always_inline __must_check int | ||
284 | copy_struct_from_user(void *dst, size_t ksize, const void __user *src, | ||
285 | size_t usize) | ||
286 | { | ||
287 | size_t size = min(ksize, usize); | ||
288 | size_t rest = max(ksize, usize) - size; | ||
289 | |||
290 | /* Deal with trailing bytes. */ | ||
291 | if (usize < ksize) { | ||
292 | memset(dst + size, 0, rest); | ||
293 | } else if (usize > ksize) { | ||
294 | int ret = check_zeroed_user(src + size, rest); | ||
295 | if (ret <= 0) | ||
296 | return ret ?: -E2BIG; | ||
297 | } | ||
298 | /* Copy the interoperable parts of the struct. */ | ||
299 | if (copy_from_user(dst, src, size)) | ||
300 | return -EFAULT; | ||
301 | return 0; | ||
302 | } | ||
303 | |||
234 | /* | 304 | /* |
235 | * probe_kernel_read(): safely attempt to read from a location | 305 | * probe_kernel_read(): safely attempt to read from a location |
236 | * @dst: pointer to the buffer that shall take the data | 306 | * @dst: pointer to the buffer that shall take the data |
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c index 28ff554a1be8..6c0005d5dd5c 100644 --- a/lib/strnlen_user.c +++ b/lib/strnlen_user.c | |||
@@ -3,16 +3,10 @@ | |||
3 | #include <linux/export.h> | 3 | #include <linux/export.h> |
4 | #include <linux/uaccess.h> | 4 | #include <linux/uaccess.h> |
5 | #include <linux/mm.h> | 5 | #include <linux/mm.h> |
6 | #include <linux/bitops.h> | ||
6 | 7 | ||
7 | #include <asm/word-at-a-time.h> | 8 | #include <asm/word-at-a-time.h> |
8 | 9 | ||
9 | /* Set bits in the first 'n' bytes when loaded from memory */ | ||
10 | #ifdef __LITTLE_ENDIAN | ||
11 | # define aligned_byte_mask(n) ((1ul << 8*(n))-1) | ||
12 | #else | ||
13 | # define aligned_byte_mask(n) (~0xfful << (BITS_PER_LONG - 8 - 8*(n))) | ||
14 | #endif | ||
15 | |||
16 | /* | 10 | /* |
17 | * Do a strnlen, return length of string *with* final '\0'. | 11 | * Do a strnlen, return length of string *with* final '\0'. |
18 | * 'count' is the user-supplied count, while 'max' is the | 12 | * 'count' is the user-supplied count, while 'max' is the |
diff --git a/lib/test_user_copy.c b/lib/test_user_copy.c index 67bcd5dfd847..950ee88cd6ac 100644 --- a/lib/test_user_copy.c +++ b/lib/test_user_copy.c | |||
@@ -31,14 +31,133 @@ | |||
31 | # define TEST_U64 | 31 | # define TEST_U64 |
32 | #endif | 32 | #endif |
33 | 33 | ||
34 | #define test(condition, msg) \ | 34 | #define test(condition, msg, ...) \ |
35 | ({ \ | 35 | ({ \ |
36 | int cond = (condition); \ | 36 | int cond = (condition); \ |
37 | if (cond) \ | 37 | if (cond) \ |
38 | pr_warn("%s\n", msg); \ | 38 | pr_warn("[%d] " msg "\n", __LINE__, ##__VA_ARGS__); \ |
39 | cond; \ | 39 | cond; \ |
40 | }) | 40 | }) |
41 | 41 | ||
42 | static bool is_zeroed(void *from, size_t size) | ||
43 | { | ||
44 | return memchr_inv(from, 0x0, size) == NULL; | ||
45 | } | ||
46 | |||
47 | static int test_check_nonzero_user(char *kmem, char __user *umem, size_t size) | ||
48 | { | ||
49 | int ret = 0; | ||
50 | size_t start, end, i; | ||
51 | size_t zero_start = size / 4; | ||
52 | size_t zero_end = size - zero_start; | ||
53 | |||
54 | /* | ||
55 | * We conduct a series of check_nonzero_user() tests on a block of memory | ||
56 | * with the following byte-pattern (trying every possible [start,end] | ||
57 | * pair): | ||
58 | * | ||
59 | * [ 00 ff 00 ff ... 00 00 00 00 ... ff 00 ff 00 ] | ||
60 | * | ||
61 | * And we verify that check_nonzero_user() acts identically to memchr_inv(). | ||
62 | */ | ||
63 | |||
64 | memset(kmem, 0x0, size); | ||
65 | for (i = 1; i < zero_start; i += 2) | ||
66 | kmem[i] = 0xff; | ||
67 | for (i = zero_end; i < size; i += 2) | ||
68 | kmem[i] = 0xff; | ||
69 | |||
70 | ret |= test(copy_to_user(umem, kmem, size), | ||
71 | "legitimate copy_to_user failed"); | ||
72 | |||
73 | for (start = 0; start <= size; start++) { | ||
74 | for (end = start; end <= size; end++) { | ||
75 | size_t len = end - start; | ||
76 | int retval = check_zeroed_user(umem + start, len); | ||
77 | int expected = is_zeroed(kmem + start, len); | ||
78 | |||
79 | ret |= test(retval != expected, | ||
80 | "check_nonzero_user(=%d) != memchr_inv(=%d) mismatch (start=%zu, end=%zu)", | ||
81 | retval, expected, start, end); | ||
82 | } | ||
83 | } | ||
84 | |||
85 | return ret; | ||
86 | } | ||
87 | |||
88 | static int test_copy_struct_from_user(char *kmem, char __user *umem, | ||
89 | size_t size) | ||
90 | { | ||
91 | int ret = 0; | ||
92 | char *umem_src = NULL, *expected = NULL; | ||
93 | size_t ksize, usize; | ||
94 | |||
95 | umem_src = kmalloc(size, GFP_KERNEL); | ||
96 | if (ret |= test(umem_src == NULL, "kmalloc failed")) | ||
97 | goto out_free; | ||
98 | |||
99 | expected = kmalloc(size, GFP_KERNEL); | ||
100 | if (ret |= test(expected == NULL, "kmalloc failed")) | ||
101 | goto out_free; | ||
102 | |||
103 | /* Fill umem with a fixed byte pattern. */ | ||
104 | memset(umem_src, 0x3e, size); | ||
105 | ret |= test(copy_to_user(umem, umem_src, size), | ||
106 | "legitimate copy_to_user failed"); | ||
107 | |||
108 | /* Check basic case -- (usize == ksize). */ | ||
109 | ksize = size; | ||
110 | usize = size; | ||
111 | |||
112 | memcpy(expected, umem_src, ksize); | ||
113 | |||
114 | memset(kmem, 0x0, size); | ||
115 | ret |= test(copy_struct_from_user(kmem, ksize, umem, usize), | ||
116 | "copy_struct_from_user(usize == ksize) failed"); | ||
117 | ret |= test(memcmp(kmem, expected, ksize), | ||
118 | "copy_struct_from_user(usize == ksize) gives unexpected copy"); | ||
119 | |||
120 | /* Old userspace case -- (usize < ksize). */ | ||
121 | ksize = size; | ||
122 | usize = size / 2; | ||
123 | |||
124 | memcpy(expected, umem_src, usize); | ||
125 | memset(expected + usize, 0x0, ksize - usize); | ||
126 | |||
127 | memset(kmem, 0x0, size); | ||
128 | ret |= test(copy_struct_from_user(kmem, ksize, umem, usize), | ||
129 | "copy_struct_from_user(usize < ksize) failed"); | ||
130 | ret |= test(memcmp(kmem, expected, ksize), | ||
131 | "copy_struct_from_user(usize < ksize) gives unexpected copy"); | ||
132 | |||
133 | /* New userspace (-E2BIG) case -- (usize > ksize). */ | ||
134 | ksize = size / 2; | ||
135 | usize = size; | ||
136 | |||
137 | memset(kmem, 0x0, size); | ||
138 | ret |= test(copy_struct_from_user(kmem, ksize, umem, usize) != -E2BIG, | ||
139 | "copy_struct_from_user(usize > ksize) didn't give E2BIG"); | ||
140 | |||
141 | /* New userspace (success) case -- (usize > ksize). */ | ||
142 | ksize = size / 2; | ||
143 | usize = size; | ||
144 | |||
145 | memcpy(expected, umem_src, ksize); | ||
146 | ret |= test(clear_user(umem + ksize, usize - ksize), | ||
147 | "legitimate clear_user failed"); | ||
148 | |||
149 | memset(kmem, 0x0, size); | ||
150 | ret |= test(copy_struct_from_user(kmem, ksize, umem, usize), | ||
151 | "copy_struct_from_user(usize > ksize) failed"); | ||
152 | ret |= test(memcmp(kmem, expected, ksize), | ||
153 | "copy_struct_from_user(usize > ksize) gives unexpected copy"); | ||
154 | |||
155 | out_free: | ||
156 | kfree(expected); | ||
157 | kfree(umem_src); | ||
158 | return ret; | ||
159 | } | ||
160 | |||
42 | static int __init test_user_copy_init(void) | 161 | static int __init test_user_copy_init(void) |
43 | { | 162 | { |
44 | int ret = 0; | 163 | int ret = 0; |
@@ -106,6 +225,11 @@ static int __init test_user_copy_init(void) | |||
106 | #endif | 225 | #endif |
107 | #undef test_legit | 226 | #undef test_legit |
108 | 227 | ||
228 | /* Test usage of check_nonzero_user(). */ | ||
229 | ret |= test_check_nonzero_user(kmem, usermem, 2 * PAGE_SIZE); | ||
230 | /* Test usage of copy_struct_from_user(). */ | ||
231 | ret |= test_copy_struct_from_user(kmem, usermem, 2 * PAGE_SIZE); | ||
232 | |||
109 | /* | 233 | /* |
110 | * Invalid usage: none of these copies should succeed. | 234 | * Invalid usage: none of these copies should succeed. |
111 | */ | 235 | */ |
diff --git a/lib/usercopy.c b/lib/usercopy.c index c2bfbcaeb3dc..cbb4d9ec00f2 100644 --- a/lib/usercopy.c +++ b/lib/usercopy.c | |||
@@ -1,5 +1,6 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/uaccess.h> | 2 | #include <linux/uaccess.h> |
3 | #include <linux/bitops.h> | ||
3 | 4 | ||
4 | /* out-of-line parts */ | 5 | /* out-of-line parts */ |
5 | 6 | ||
@@ -31,3 +32,57 @@ unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n) | |||
31 | } | 32 | } |
32 | EXPORT_SYMBOL(_copy_to_user); | 33 | EXPORT_SYMBOL(_copy_to_user); |
33 | #endif | 34 | #endif |
35 | |||
36 | /** | ||
37 | * check_zeroed_user: check if a userspace buffer only contains zero bytes | ||
38 | * @from: Source address, in userspace. | ||
39 | * @size: Size of buffer. | ||
40 | * | ||
41 | * This is effectively shorthand for "memchr_inv(from, 0, size) == NULL" for | ||
42 | * userspace addresses (and is more efficient because we don't care where the | ||
43 | * first non-zero byte is). | ||
44 | * | ||
45 | * Returns: | ||
46 | * * 0: There were non-zero bytes present in the buffer. | ||
47 | * * 1: The buffer was full of zero bytes. | ||
48 | * * -EFAULT: access to userspace failed. | ||
49 | */ | ||
50 | int check_zeroed_user(const void __user *from, size_t size) | ||
51 | { | ||
52 | unsigned long val; | ||
53 | uintptr_t align = (uintptr_t) from % sizeof(unsigned long); | ||
54 | |||
55 | if (unlikely(size == 0)) | ||
56 | return 1; | ||
57 | |||
58 | from -= align; | ||
59 | size += align; | ||
60 | |||
61 | if (!user_access_begin(from, size)) | ||
62 | return -EFAULT; | ||
63 | |||
64 | unsafe_get_user(val, (unsigned long __user *) from, err_fault); | ||
65 | if (align) | ||
66 | val &= ~aligned_byte_mask(align); | ||
67 | |||
68 | while (size > sizeof(unsigned long)) { | ||
69 | if (unlikely(val)) | ||
70 | goto done; | ||
71 | |||
72 | from += sizeof(unsigned long); | ||
73 | size -= sizeof(unsigned long); | ||
74 | |||
75 | unsafe_get_user(val, (unsigned long __user *) from, err_fault); | ||
76 | } | ||
77 | |||
78 | if (size < sizeof(unsigned long)) | ||
79 | val &= aligned_byte_mask(size); | ||
80 | |||
81 | done: | ||
82 | user_access_end(); | ||
83 | return (val == 0); | ||
84 | err_fault: | ||
85 | user_access_end(); | ||
86 | return -EFAULT; | ||
87 | } | ||
88 | EXPORT_SYMBOL(check_zeroed_user); | ||