summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-10-18 18:19:04 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-10-18 18:19:04 -0400
commit8eb4b3b0dd9ae3e5399ff902da87d13740a2b70f (patch)
treeb4660fe0fbd97aee24f1e9f7961815c34ce808b6 /lib
parent7571438a4868e7cb09d698ab52e54f9722020eef (diff)
parentf418dddffc8007945fd5962380ebde770a240cf5 (diff)
Merge tag 'copy-struct-from-user-v5.4-rc4' of gitolite.kernel.org:pub/scm/linux/kernel/git/brauner/linux
Pull usercopy test fixlets from Christian Brauner: "This contains two improvements for the copy_struct_from_user() tests: - a coding style change to get rid of the ugly "if ((ret |= test()))" pointed out when pulling the original patchset. - avoid a soft lockups when running the usercopy tests on machines with large page sizes by scanning only a 1024 byte region" * tag 'copy-struct-from-user-v5.4-rc4' of gitolite.kernel.org:pub/scm/linux/kernel/git/brauner/linux: usercopy: Avoid soft lockups in test_check_nonzero_user() lib: test_user_copy: style cleanup
Diffstat (limited to 'lib')
-rw-r--r--lib/test_user_copy.c37
1 files changed, 28 insertions, 9 deletions
diff --git a/lib/test_user_copy.c b/lib/test_user_copy.c
index e365ace06538..5ff04d8fe971 100644
--- a/lib/test_user_copy.c
+++ b/lib/test_user_copy.c
@@ -47,18 +47,35 @@ static bool is_zeroed(void *from, size_t size)
47static int test_check_nonzero_user(char *kmem, char __user *umem, size_t size) 47static int test_check_nonzero_user(char *kmem, char __user *umem, size_t size)
48{ 48{
49 int ret = 0; 49 int ret = 0;
50 size_t start, end, i; 50 size_t start, end, i, zero_start, zero_end;
51 size_t zero_start = size / 4; 51
52 size_t zero_end = size - zero_start; 52 if (test(size < 2 * PAGE_SIZE, "buffer too small"))
53 return -EINVAL;
54
55 /*
56 * We want to cross a page boundary to exercise the code more
57 * effectively. We also don't want to make the size we scan too large,
58 * otherwise the test can take a long time and cause soft lockups. So
59 * scan a 1024 byte region across the page boundary.
60 */
61 size = 1024;
62 start = PAGE_SIZE - (size / 2);
63
64 kmem += start;
65 umem += start;
66
67 zero_start = size / 4;
68 zero_end = size - zero_start;
53 69
54 /* 70 /*
55 * We conduct a series of check_nonzero_user() tests on a block of memory 71 * We conduct a series of check_nonzero_user() tests on a block of
56 * with the following byte-pattern (trying every possible [start,end] 72 * memory with the following byte-pattern (trying every possible
57 * pair): 73 * [start,end] pair):
58 * 74 *
59 * [ 00 ff 00 ff ... 00 00 00 00 ... ff 00 ff 00 ] 75 * [ 00 ff 00 ff ... 00 00 00 00 ... ff 00 ff 00 ]
60 * 76 *
61 * And we verify that check_nonzero_user() acts identically to memchr_inv(). 77 * And we verify that check_nonzero_user() acts identically to
78 * memchr_inv().
62 */ 79 */
63 80
64 memset(kmem, 0x0, size); 81 memset(kmem, 0x0, size);
@@ -93,11 +110,13 @@ static int test_copy_struct_from_user(char *kmem, char __user *umem,
93 size_t ksize, usize; 110 size_t ksize, usize;
94 111
95 umem_src = kmalloc(size, GFP_KERNEL); 112 umem_src = kmalloc(size, GFP_KERNEL);
96 if ((ret |= test(umem_src == NULL, "kmalloc failed"))) 113 ret = test(umem_src == NULL, "kmalloc failed");
114 if (ret)
97 goto out_free; 115 goto out_free;
98 116
99 expected = kmalloc(size, GFP_KERNEL); 117 expected = kmalloc(size, GFP_KERNEL);
100 if ((ret |= test(expected == NULL, "kmalloc failed"))) 118 ret = test(expected == NULL, "kmalloc failed");
119 if (ret)
101 goto out_free; 120 goto out_free;
102 121
103 /* Fill umem with a fixed byte pattern. */ 122 /* Fill umem with a fixed byte pattern. */