diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 37 | ||||
-rw-r--r-- | lib/Kconfig.kasan | 4 | ||||
-rw-r--r-- | lib/Makefile | 8 | ||||
-rw-r--r-- | lib/atomic64.c | 32 | ||||
-rw-r--r-- | lib/atomic64_test.c | 34 | ||||
-rw-r--r-- | lib/bitmap.c | 2 | ||||
-rw-r--r-- | lib/chacha20.c | 79 | ||||
-rw-r--r-- | lib/digsig.c | 16 | ||||
-rw-r--r-- | lib/dma-debug.c | 2 | ||||
-rw-r--r-- | lib/earlycpio.c | 5 | ||||
-rw-r--r-- | lib/hweight.c | 4 | ||||
-rw-r--r-- | lib/iov_iter.c | 53 | ||||
-rw-r--r-- | lib/mpi/mpicoder.c | 247 | ||||
-rw-r--r-- | lib/radix-tree.c | 84 | ||||
-rw-r--r-- | lib/random32.c | 1 | ||||
-rw-r--r-- | lib/rbtree.c | 26 | ||||
-rw-r--r-- | lib/stackdepot.c | 1 | ||||
-rw-r--r-- | lib/test_hash.c | 4 | ||||
-rw-r--r-- | lib/test_uuid.c | 133 | ||||
-rw-r--r-- | lib/uuid.c | 4 |
20 files changed, 513 insertions, 263 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 7936e5e4da9d..eb8917a71489 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -244,6 +244,7 @@ config PAGE_OWNER | |||
244 | depends on DEBUG_KERNEL && STACKTRACE_SUPPORT | 244 | depends on DEBUG_KERNEL && STACKTRACE_SUPPORT |
245 | select DEBUG_FS | 245 | select DEBUG_FS |
246 | select STACKTRACE | 246 | select STACKTRACE |
247 | select STACKDEPOT | ||
247 | select PAGE_EXTENSION | 248 | select PAGE_EXTENSION |
248 | help | 249 | help |
249 | This keeps track of what call chain is the owner of a page, may | 250 | This keeps track of what call chain is the owner of a page, may |
@@ -1309,22 +1310,6 @@ config RCU_PERF_TEST | |||
1309 | Say M if you want the RCU performance tests to build as a module. | 1310 | Say M if you want the RCU performance tests to build as a module. |
1310 | Say N if you are unsure. | 1311 | Say N if you are unsure. |
1311 | 1312 | ||
1312 | config RCU_PERF_TEST_RUNNABLE | ||
1313 | bool "performance tests for RCU runnable by default" | ||
1314 | depends on RCU_PERF_TEST = y | ||
1315 | default n | ||
1316 | help | ||
1317 | This option provides a way to build the RCU performance tests | ||
1318 | directly into the kernel without them starting up at boot time. | ||
1319 | You can use /sys/module to manually override this setting. | ||
1320 | This /proc file is available only when the RCU performance | ||
1321 | tests have been built into the kernel. | ||
1322 | |||
1323 | Say Y here if you want the RCU performance tests to start during | ||
1324 | boot (you probably don't). | ||
1325 | Say N here if you want the RCU performance tests to start only | ||
1326 | after being manually enabled via /sys/module. | ||
1327 | |||
1328 | config RCU_TORTURE_TEST | 1313 | config RCU_TORTURE_TEST |
1329 | tristate "torture tests for RCU" | 1314 | tristate "torture tests for RCU" |
1330 | depends on DEBUG_KERNEL | 1315 | depends on DEBUG_KERNEL |
@@ -1342,23 +1327,6 @@ config RCU_TORTURE_TEST | |||
1342 | Say M if you want the RCU torture tests to build as a module. | 1327 | Say M if you want the RCU torture tests to build as a module. |
1343 | Say N if you are unsure. | 1328 | Say N if you are unsure. |
1344 | 1329 | ||
1345 | config RCU_TORTURE_TEST_RUNNABLE | ||
1346 | bool "torture tests for RCU runnable by default" | ||
1347 | depends on RCU_TORTURE_TEST = y | ||
1348 | default n | ||
1349 | help | ||
1350 | This option provides a way to build the RCU torture tests | ||
1351 | directly into the kernel without them starting up at boot | ||
1352 | time. You can use /proc/sys/kernel/rcutorture_runnable | ||
1353 | to manually override this setting. This /proc file is | ||
1354 | available only when the RCU torture tests have been built | ||
1355 | into the kernel. | ||
1356 | |||
1357 | Say Y here if you want the RCU torture tests to start during | ||
1358 | boot (you probably don't). | ||
1359 | Say N here if you want the RCU torture tests to start only | ||
1360 | after being manually enabled via /proc. | ||
1361 | |||
1362 | config RCU_TORTURE_TEST_SLOW_PREINIT | 1330 | config RCU_TORTURE_TEST_SLOW_PREINIT |
1363 | bool "Slow down RCU grace-period pre-initialization to expose races" | 1331 | bool "Slow down RCU grace-period pre-initialization to expose races" |
1364 | depends on RCU_TORTURE_TEST | 1332 | depends on RCU_TORTURE_TEST |
@@ -1843,6 +1811,9 @@ config TEST_BITMAP | |||
1843 | 1811 | ||
1844 | If unsure, say N. | 1812 | If unsure, say N. |
1845 | 1813 | ||
1814 | config TEST_UUID | ||
1815 | tristate "Test functions located in the uuid module at runtime" | ||
1816 | |||
1846 | config TEST_RHASHTABLE | 1817 | config TEST_RHASHTABLE |
1847 | tristate "Perform selftest on resizable hash table" | 1818 | tristate "Perform selftest on resizable hash table" |
1848 | default n | 1819 | default n |
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index 67d8c6838ba9..bd38aab05929 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan | |||
@@ -5,9 +5,9 @@ if HAVE_ARCH_KASAN | |||
5 | 5 | ||
6 | config KASAN | 6 | config KASAN |
7 | bool "KASan: runtime memory debugger" | 7 | bool "KASan: runtime memory debugger" |
8 | depends on SLUB_DEBUG || (SLAB && !DEBUG_SLAB) | 8 | depends on SLUB || (SLAB && !DEBUG_SLAB) |
9 | select CONSTRUCTORS | 9 | select CONSTRUCTORS |
10 | select STACKDEPOT if SLAB | 10 | select STACKDEPOT |
11 | help | 11 | help |
12 | Enables kernel address sanitizer - runtime memory debugger, | 12 | Enables kernel address sanitizer - runtime memory debugger, |
13 | designed to find out-of-bounds accesses and use-after-free bugs. | 13 | designed to find out-of-bounds accesses and use-after-free bugs. |
diff --git a/lib/Makefile b/lib/Makefile index 499fb354d627..cfa68eb269e4 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -15,14 +15,11 @@ KCOV_INSTRUMENT_rbtree.o := n | |||
15 | KCOV_INSTRUMENT_list_debug.o := n | 15 | KCOV_INSTRUMENT_list_debug.o := n |
16 | KCOV_INSTRUMENT_debugobjects.o := n | 16 | KCOV_INSTRUMENT_debugobjects.o := n |
17 | KCOV_INSTRUMENT_dynamic_debug.o := n | 17 | KCOV_INSTRUMENT_dynamic_debug.o := n |
18 | # Kernel does not boot if we instrument this file as it uses custom calling | ||
19 | # convention (see CONFIG_ARCH_HWEIGHT_CFLAGS). | ||
20 | KCOV_INSTRUMENT_hweight.o := n | ||
21 | 18 | ||
22 | lib-y := ctype.o string.o vsprintf.o cmdline.o \ | 19 | lib-y := ctype.o string.o vsprintf.o cmdline.o \ |
23 | rbtree.o radix-tree.o dump_stack.o timerqueue.o\ | 20 | rbtree.o radix-tree.o dump_stack.o timerqueue.o\ |
24 | idr.o int_sqrt.o extable.o \ | 21 | idr.o int_sqrt.o extable.o \ |
25 | sha1.o md5.o irq_regs.o argv_split.o \ | 22 | sha1.o chacha20.o md5.o irq_regs.o argv_split.o \ |
26 | flex_proportions.o ratelimit.o show_mem.o \ | 23 | flex_proportions.o ratelimit.o show_mem.o \ |
27 | is_single_threaded.o plist.o decompress.o kobject_uevent.o \ | 24 | is_single_threaded.o plist.o decompress.o kobject_uevent.o \ |
28 | earlycpio.o seq_buf.o nmi_backtrace.o nodemask.o | 25 | earlycpio.o seq_buf.o nmi_backtrace.o nodemask.o |
@@ -58,6 +55,7 @@ obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o | |||
58 | obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o | 55 | obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o |
59 | obj-$(CONFIG_TEST_PRINTF) += test_printf.o | 56 | obj-$(CONFIG_TEST_PRINTF) += test_printf.o |
60 | obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o | 57 | obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o |
58 | obj-$(CONFIG_TEST_UUID) += test_uuid.o | ||
61 | 59 | ||
62 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) | 60 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) |
63 | CFLAGS_kobject.o += -DDEBUG | 61 | CFLAGS_kobject.o += -DDEBUG |
@@ -73,8 +71,6 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o | |||
73 | obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o | 71 | obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o |
74 | obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o | 72 | obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o |
75 | 73 | ||
76 | GCOV_PROFILE_hweight.o := n | ||
77 | CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS)) | ||
78 | obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o | 74 | obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o |
79 | 75 | ||
80 | obj-$(CONFIG_BTREE) += btree.o | 76 | obj-$(CONFIG_BTREE) += btree.o |
diff --git a/lib/atomic64.c b/lib/atomic64.c index 2886ebac6567..53c2d5edc826 100644 --- a/lib/atomic64.c +++ b/lib/atomic64.c | |||
@@ -96,17 +96,41 @@ long long atomic64_##op##_return(long long a, atomic64_t *v) \ | |||
96 | } \ | 96 | } \ |
97 | EXPORT_SYMBOL(atomic64_##op##_return); | 97 | EXPORT_SYMBOL(atomic64_##op##_return); |
98 | 98 | ||
99 | #define ATOMIC64_FETCH_OP(op, c_op) \ | ||
100 | long long atomic64_fetch_##op(long long a, atomic64_t *v) \ | ||
101 | { \ | ||
102 | unsigned long flags; \ | ||
103 | raw_spinlock_t *lock = lock_addr(v); \ | ||
104 | long long val; \ | ||
105 | \ | ||
106 | raw_spin_lock_irqsave(lock, flags); \ | ||
107 | val = v->counter; \ | ||
108 | v->counter c_op a; \ | ||
109 | raw_spin_unlock_irqrestore(lock, flags); \ | ||
110 | return val; \ | ||
111 | } \ | ||
112 | EXPORT_SYMBOL(atomic64_fetch_##op); | ||
113 | |||
99 | #define ATOMIC64_OPS(op, c_op) \ | 114 | #define ATOMIC64_OPS(op, c_op) \ |
100 | ATOMIC64_OP(op, c_op) \ | 115 | ATOMIC64_OP(op, c_op) \ |
101 | ATOMIC64_OP_RETURN(op, c_op) | 116 | ATOMIC64_OP_RETURN(op, c_op) \ |
117 | ATOMIC64_FETCH_OP(op, c_op) | ||
102 | 118 | ||
103 | ATOMIC64_OPS(add, +=) | 119 | ATOMIC64_OPS(add, +=) |
104 | ATOMIC64_OPS(sub, -=) | 120 | ATOMIC64_OPS(sub, -=) |
105 | ATOMIC64_OP(and, &=) | ||
106 | ATOMIC64_OP(or, |=) | ||
107 | ATOMIC64_OP(xor, ^=) | ||
108 | 121 | ||
109 | #undef ATOMIC64_OPS | 122 | #undef ATOMIC64_OPS |
123 | #define ATOMIC64_OPS(op, c_op) \ | ||
124 | ATOMIC64_OP(op, c_op) \ | ||
125 | ATOMIC64_OP_RETURN(op, c_op) \ | ||
126 | ATOMIC64_FETCH_OP(op, c_op) | ||
127 | |||
128 | ATOMIC64_OPS(and, &=) | ||
129 | ATOMIC64_OPS(or, |=) | ||
130 | ATOMIC64_OPS(xor, ^=) | ||
131 | |||
132 | #undef ATOMIC64_OPS | ||
133 | #undef ATOMIC64_FETCH_OP | ||
110 | #undef ATOMIC64_OP_RETURN | 134 | #undef ATOMIC64_OP_RETURN |
111 | #undef ATOMIC64_OP | 135 | #undef ATOMIC64_OP |
112 | 136 | ||
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c index 123481814320..dbb369145dda 100644 --- a/lib/atomic64_test.c +++ b/lib/atomic64_test.c | |||
@@ -53,11 +53,25 @@ do { \ | |||
53 | BUG_ON(atomic##bit##_read(&v) != r); \ | 53 | BUG_ON(atomic##bit##_read(&v) != r); \ |
54 | } while (0) | 54 | } while (0) |
55 | 55 | ||
56 | #define TEST_FETCH(bit, op, c_op, val) \ | ||
57 | do { \ | ||
58 | atomic##bit##_set(&v, v0); \ | ||
59 | r = v0; \ | ||
60 | r c_op val; \ | ||
61 | BUG_ON(atomic##bit##_##op(val, &v) != v0); \ | ||
62 | BUG_ON(atomic##bit##_read(&v) != r); \ | ||
63 | } while (0) | ||
64 | |||
56 | #define RETURN_FAMILY_TEST(bit, op, c_op, val) \ | 65 | #define RETURN_FAMILY_TEST(bit, op, c_op, val) \ |
57 | do { \ | 66 | do { \ |
58 | FAMILY_TEST(TEST_RETURN, bit, op, c_op, val); \ | 67 | FAMILY_TEST(TEST_RETURN, bit, op, c_op, val); \ |
59 | } while (0) | 68 | } while (0) |
60 | 69 | ||
70 | #define FETCH_FAMILY_TEST(bit, op, c_op, val) \ | ||
71 | do { \ | ||
72 | FAMILY_TEST(TEST_FETCH, bit, op, c_op, val); \ | ||
73 | } while (0) | ||
74 | |||
61 | #define TEST_ARGS(bit, op, init, ret, expect, args...) \ | 75 | #define TEST_ARGS(bit, op, init, ret, expect, args...) \ |
62 | do { \ | 76 | do { \ |
63 | atomic##bit##_set(&v, init); \ | 77 | atomic##bit##_set(&v, init); \ |
@@ -114,6 +128,16 @@ static __init void test_atomic(void) | |||
114 | RETURN_FAMILY_TEST(, sub_return, -=, onestwos); | 128 | RETURN_FAMILY_TEST(, sub_return, -=, onestwos); |
115 | RETURN_FAMILY_TEST(, sub_return, -=, -one); | 129 | RETURN_FAMILY_TEST(, sub_return, -=, -one); |
116 | 130 | ||
131 | FETCH_FAMILY_TEST(, fetch_add, +=, onestwos); | ||
132 | FETCH_FAMILY_TEST(, fetch_add, +=, -one); | ||
133 | FETCH_FAMILY_TEST(, fetch_sub, -=, onestwos); | ||
134 | FETCH_FAMILY_TEST(, fetch_sub, -=, -one); | ||
135 | |||
136 | FETCH_FAMILY_TEST(, fetch_or, |=, v1); | ||
137 | FETCH_FAMILY_TEST(, fetch_and, &=, v1); | ||
138 | FETCH_FAMILY_TEST(, fetch_andnot, &= ~, v1); | ||
139 | FETCH_FAMILY_TEST(, fetch_xor, ^=, v1); | ||
140 | |||
117 | INC_RETURN_FAMILY_TEST(, v0); | 141 | INC_RETURN_FAMILY_TEST(, v0); |
118 | DEC_RETURN_FAMILY_TEST(, v0); | 142 | DEC_RETURN_FAMILY_TEST(, v0); |
119 | 143 | ||
@@ -154,6 +178,16 @@ static __init void test_atomic64(void) | |||
154 | RETURN_FAMILY_TEST(64, sub_return, -=, onestwos); | 178 | RETURN_FAMILY_TEST(64, sub_return, -=, onestwos); |
155 | RETURN_FAMILY_TEST(64, sub_return, -=, -one); | 179 | RETURN_FAMILY_TEST(64, sub_return, -=, -one); |
156 | 180 | ||
181 | FETCH_FAMILY_TEST(64, fetch_add, +=, onestwos); | ||
182 | FETCH_FAMILY_TEST(64, fetch_add, +=, -one); | ||
183 | FETCH_FAMILY_TEST(64, fetch_sub, -=, onestwos); | ||
184 | FETCH_FAMILY_TEST(64, fetch_sub, -=, -one); | ||
185 | |||
186 | FETCH_FAMILY_TEST(64, fetch_or, |=, v1); | ||
187 | FETCH_FAMILY_TEST(64, fetch_and, &=, v1); | ||
188 | FETCH_FAMILY_TEST(64, fetch_andnot, &= ~, v1); | ||
189 | FETCH_FAMILY_TEST(64, fetch_xor, ^=, v1); | ||
190 | |||
157 | INIT(v0); | 191 | INIT(v0); |
158 | atomic64_inc(&v); | 192 | atomic64_inc(&v); |
159 | r += one; | 193 | r += one; |
diff --git a/lib/bitmap.c b/lib/bitmap.c index c66da508cbf7..eca88087fa8a 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c | |||
@@ -14,9 +14,9 @@ | |||
14 | #include <linux/bug.h> | 14 | #include <linux/bug.h> |
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/string.h> | 16 | #include <linux/string.h> |
17 | #include <linux/uaccess.h> | ||
17 | 18 | ||
18 | #include <asm/page.h> | 19 | #include <asm/page.h> |
19 | #include <asm/uaccess.h> | ||
20 | 20 | ||
21 | /* | 21 | /* |
22 | * bitmaps provide an array of bits, implemented using an an | 22 | * bitmaps provide an array of bits, implemented using an an |
diff --git a/lib/chacha20.c b/lib/chacha20.c new file mode 100644 index 000000000000..250ceed9ec9a --- /dev/null +++ b/lib/chacha20.c | |||
@@ -0,0 +1,79 @@ | |||
1 | /* | ||
2 | * ChaCha20 256-bit cipher algorithm, RFC7539 | ||
3 | * | ||
4 | * Copyright (C) 2015 Martin Willi | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/export.h> | ||
14 | #include <linux/bitops.h> | ||
15 | #include <linux/cryptohash.h> | ||
16 | #include <asm/unaligned.h> | ||
17 | #include <crypto/chacha20.h> | ||
18 | |||
19 | static inline u32 rotl32(u32 v, u8 n) | ||
20 | { | ||
21 | return (v << n) | (v >> (sizeof(v) * 8 - n)); | ||
22 | } | ||
23 | |||
24 | extern void chacha20_block(u32 *state, void *stream) | ||
25 | { | ||
26 | u32 x[16], *out = stream; | ||
27 | int i; | ||
28 | |||
29 | for (i = 0; i < ARRAY_SIZE(x); i++) | ||
30 | x[i] = state[i]; | ||
31 | |||
32 | for (i = 0; i < 20; i += 2) { | ||
33 | x[0] += x[4]; x[12] = rotl32(x[12] ^ x[0], 16); | ||
34 | x[1] += x[5]; x[13] = rotl32(x[13] ^ x[1], 16); | ||
35 | x[2] += x[6]; x[14] = rotl32(x[14] ^ x[2], 16); | ||
36 | x[3] += x[7]; x[15] = rotl32(x[15] ^ x[3], 16); | ||
37 | |||
38 | x[8] += x[12]; x[4] = rotl32(x[4] ^ x[8], 12); | ||
39 | x[9] += x[13]; x[5] = rotl32(x[5] ^ x[9], 12); | ||
40 | x[10] += x[14]; x[6] = rotl32(x[6] ^ x[10], 12); | ||
41 | x[11] += x[15]; x[7] = rotl32(x[7] ^ x[11], 12); | ||
42 | |||
43 | x[0] += x[4]; x[12] = rotl32(x[12] ^ x[0], 8); | ||
44 | x[1] += x[5]; x[13] = rotl32(x[13] ^ x[1], 8); | ||
45 | x[2] += x[6]; x[14] = rotl32(x[14] ^ x[2], 8); | ||
46 | x[3] += x[7]; x[15] = rotl32(x[15] ^ x[3], 8); | ||
47 | |||
48 | x[8] += x[12]; x[4] = rotl32(x[4] ^ x[8], 7); | ||
49 | x[9] += x[13]; x[5] = rotl32(x[5] ^ x[9], 7); | ||
50 | x[10] += x[14]; x[6] = rotl32(x[6] ^ x[10], 7); | ||
51 | x[11] += x[15]; x[7] = rotl32(x[7] ^ x[11], 7); | ||
52 | |||
53 | x[0] += x[5]; x[15] = rotl32(x[15] ^ x[0], 16); | ||
54 | x[1] += x[6]; x[12] = rotl32(x[12] ^ x[1], 16); | ||
55 | x[2] += x[7]; x[13] = rotl32(x[13] ^ x[2], 16); | ||
56 | x[3] += x[4]; x[14] = rotl32(x[14] ^ x[3], 16); | ||
57 | |||
58 | x[10] += x[15]; x[5] = rotl32(x[5] ^ x[10], 12); | ||
59 | x[11] += x[12]; x[6] = rotl32(x[6] ^ x[11], 12); | ||
60 | x[8] += x[13]; x[7] = rotl32(x[7] ^ x[8], 12); | ||
61 | x[9] += x[14]; x[4] = rotl32(x[4] ^ x[9], 12); | ||
62 | |||
63 | x[0] += x[5]; x[15] = rotl32(x[15] ^ x[0], 8); | ||
64 | x[1] += x[6]; x[12] = rotl32(x[12] ^ x[1], 8); | ||
65 | x[2] += x[7]; x[13] = rotl32(x[13] ^ x[2], 8); | ||
66 | x[3] += x[4]; x[14] = rotl32(x[14] ^ x[3], 8); | ||
67 | |||
68 | x[10] += x[15]; x[5] = rotl32(x[5] ^ x[10], 7); | ||
69 | x[11] += x[12]; x[6] = rotl32(x[6] ^ x[11], 7); | ||
70 | x[8] += x[13]; x[7] = rotl32(x[7] ^ x[8], 7); | ||
71 | x[9] += x[14]; x[4] = rotl32(x[4] ^ x[9], 7); | ||
72 | } | ||
73 | |||
74 | for (i = 0; i < ARRAY_SIZE(x); i++) | ||
75 | out[i] = cpu_to_le32(x[i] + state[i]); | ||
76 | |||
77 | state[12]++; | ||
78 | } | ||
79 | EXPORT_SYMBOL(chacha20_block); | ||
diff --git a/lib/digsig.c b/lib/digsig.c index 07be6c1ef4e2..55b8b2f41a9e 100644 --- a/lib/digsig.c +++ b/lib/digsig.c | |||
@@ -104,21 +104,25 @@ static int digsig_verify_rsa(struct key *key, | |||
104 | datap = pkh->mpi; | 104 | datap = pkh->mpi; |
105 | endp = ukp->data + ukp->datalen; | 105 | endp = ukp->data + ukp->datalen; |
106 | 106 | ||
107 | err = -ENOMEM; | ||
108 | |||
109 | for (i = 0; i < pkh->nmpi; i++) { | 107 | for (i = 0; i < pkh->nmpi; i++) { |
110 | unsigned int remaining = endp - datap; | 108 | unsigned int remaining = endp - datap; |
111 | pkey[i] = mpi_read_from_buffer(datap, &remaining); | 109 | pkey[i] = mpi_read_from_buffer(datap, &remaining); |
112 | if (!pkey[i]) | 110 | if (IS_ERR(pkey[i])) { |
111 | err = PTR_ERR(pkey[i]); | ||
113 | goto err; | 112 | goto err; |
113 | } | ||
114 | datap += remaining; | 114 | datap += remaining; |
115 | } | 115 | } |
116 | 116 | ||
117 | mblen = mpi_get_nbits(pkey[0]); | 117 | mblen = mpi_get_nbits(pkey[0]); |
118 | mlen = DIV_ROUND_UP(mblen, 8); | 118 | mlen = DIV_ROUND_UP(mblen, 8); |
119 | 119 | ||
120 | if (mlen == 0) | 120 | if (mlen == 0) { |
121 | err = -EINVAL; | ||
121 | goto err; | 122 | goto err; |
123 | } | ||
124 | |||
125 | err = -ENOMEM; | ||
122 | 126 | ||
123 | out1 = kzalloc(mlen, GFP_KERNEL); | 127 | out1 = kzalloc(mlen, GFP_KERNEL); |
124 | if (!out1) | 128 | if (!out1) |
@@ -126,8 +130,10 @@ static int digsig_verify_rsa(struct key *key, | |||
126 | 130 | ||
127 | nret = siglen; | 131 | nret = siglen; |
128 | in = mpi_read_from_buffer(sig, &nret); | 132 | in = mpi_read_from_buffer(sig, &nret); |
129 | if (!in) | 133 | if (IS_ERR(in)) { |
134 | err = PTR_ERR(in); | ||
130 | goto err; | 135 | goto err; |
136 | } | ||
131 | 137 | ||
132 | res = mpi_alloc(mpi_get_nlimbs(in) * 2); | 138 | res = mpi_alloc(mpi_get_nlimbs(in) * 2); |
133 | if (!res) | 139 | if (!res) |
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 51a76af25c66..fcfa1939ac41 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c | |||
@@ -253,6 +253,7 @@ static int hash_fn(struct dma_debug_entry *entry) | |||
253 | */ | 253 | */ |
254 | static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry, | 254 | static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry, |
255 | unsigned long *flags) | 255 | unsigned long *flags) |
256 | __acquires(&dma_entry_hash[idx].lock) | ||
256 | { | 257 | { |
257 | int idx = hash_fn(entry); | 258 | int idx = hash_fn(entry); |
258 | unsigned long __flags; | 259 | unsigned long __flags; |
@@ -267,6 +268,7 @@ static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry, | |||
267 | */ | 268 | */ |
268 | static void put_hash_bucket(struct hash_bucket *bucket, | 269 | static void put_hash_bucket(struct hash_bucket *bucket, |
269 | unsigned long *flags) | 270 | unsigned long *flags) |
271 | __releases(&bucket->lock) | ||
270 | { | 272 | { |
271 | unsigned long __flags = *flags; | 273 | unsigned long __flags = *flags; |
272 | 274 | ||
diff --git a/lib/earlycpio.c b/lib/earlycpio.c index 3eb3e4722b8e..db283ba4d2c1 100644 --- a/lib/earlycpio.c +++ b/lib/earlycpio.c | |||
@@ -125,7 +125,10 @@ struct cpio_data find_cpio_data(const char *path, void *data, | |||
125 | if ((ch[C_MODE] & 0170000) == 0100000 && | 125 | if ((ch[C_MODE] & 0170000) == 0100000 && |
126 | ch[C_NAMESIZE] >= mypathsize && | 126 | ch[C_NAMESIZE] >= mypathsize && |
127 | !memcmp(p, path, mypathsize)) { | 127 | !memcmp(p, path, mypathsize)) { |
128 | *nextoff = (long)nptr - (long)data; | 128 | |
129 | if (nextoff) | ||
130 | *nextoff = (long)nptr - (long)data; | ||
131 | |||
129 | if (ch[C_NAMESIZE] - mypathsize >= MAX_CPIO_FILE_NAME) { | 132 | if (ch[C_NAMESIZE] - mypathsize >= MAX_CPIO_FILE_NAME) { |
130 | pr_warn( | 133 | pr_warn( |
131 | "File %s exceeding MAX_CPIO_FILE_NAME [%d]\n", | 134 | "File %s exceeding MAX_CPIO_FILE_NAME [%d]\n", |
diff --git a/lib/hweight.c b/lib/hweight.c index 9a5c1f221558..43273a7d83cf 100644 --- a/lib/hweight.c +++ b/lib/hweight.c | |||
@@ -9,6 +9,7 @@ | |||
9 | * The Hamming Weight of a number is the total number of bits set in it. | 9 | * The Hamming Weight of a number is the total number of bits set in it. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #ifndef __HAVE_ARCH_SW_HWEIGHT | ||
12 | unsigned int __sw_hweight32(unsigned int w) | 13 | unsigned int __sw_hweight32(unsigned int w) |
13 | { | 14 | { |
14 | #ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER | 15 | #ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER |
@@ -25,6 +26,7 @@ unsigned int __sw_hweight32(unsigned int w) | |||
25 | #endif | 26 | #endif |
26 | } | 27 | } |
27 | EXPORT_SYMBOL(__sw_hweight32); | 28 | EXPORT_SYMBOL(__sw_hweight32); |
29 | #endif | ||
28 | 30 | ||
29 | unsigned int __sw_hweight16(unsigned int w) | 31 | unsigned int __sw_hweight16(unsigned int w) |
30 | { | 32 | { |
@@ -43,6 +45,7 @@ unsigned int __sw_hweight8(unsigned int w) | |||
43 | } | 45 | } |
44 | EXPORT_SYMBOL(__sw_hweight8); | 46 | EXPORT_SYMBOL(__sw_hweight8); |
45 | 47 | ||
48 | #ifndef __HAVE_ARCH_SW_HWEIGHT | ||
46 | unsigned long __sw_hweight64(__u64 w) | 49 | unsigned long __sw_hweight64(__u64 w) |
47 | { | 50 | { |
48 | #if BITS_PER_LONG == 32 | 51 | #if BITS_PER_LONG == 32 |
@@ -65,3 +68,4 @@ unsigned long __sw_hweight64(__u64 w) | |||
65 | #endif | 68 | #endif |
66 | } | 69 | } |
67 | EXPORT_SYMBOL(__sw_hweight64); | 70 | EXPORT_SYMBOL(__sw_hweight64); |
71 | #endif | ||
diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 0cd522753ff5..9e8c7386b3a0 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c | |||
@@ -56,37 +56,24 @@ | |||
56 | n = wanted; \ | 56 | n = wanted; \ |
57 | } | 57 | } |
58 | 58 | ||
59 | #define iterate_bvec(i, n, __v, __p, skip, STEP) { \ | 59 | #define iterate_bvec(i, n, __v, __bi, skip, STEP) { \ |
60 | size_t wanted = n; \ | 60 | struct bvec_iter __start; \ |
61 | __p = i->bvec; \ | 61 | __start.bi_size = n; \ |
62 | __v.bv_len = min_t(size_t, n, __p->bv_len - skip); \ | 62 | __start.bi_bvec_done = skip; \ |
63 | if (likely(__v.bv_len)) { \ | 63 | __start.bi_idx = 0; \ |
64 | __v.bv_page = __p->bv_page; \ | 64 | for_each_bvec(__v, i->bvec, __bi, __start) { \ |
65 | __v.bv_offset = __p->bv_offset + skip; \ | 65 | if (!__v.bv_len) \ |
66 | (void)(STEP); \ | ||
67 | skip += __v.bv_len; \ | ||
68 | n -= __v.bv_len; \ | ||
69 | } \ | ||
70 | while (unlikely(n)) { \ | ||
71 | __p++; \ | ||
72 | __v.bv_len = min_t(size_t, n, __p->bv_len); \ | ||
73 | if (unlikely(!__v.bv_len)) \ | ||
74 | continue; \ | 66 | continue; \ |
75 | __v.bv_page = __p->bv_page; \ | ||
76 | __v.bv_offset = __p->bv_offset; \ | ||
77 | (void)(STEP); \ | 67 | (void)(STEP); \ |
78 | skip = __v.bv_len; \ | ||
79 | n -= __v.bv_len; \ | ||
80 | } \ | 68 | } \ |
81 | n = wanted; \ | ||
82 | } | 69 | } |
83 | 70 | ||
84 | #define iterate_all_kinds(i, n, v, I, B, K) { \ | 71 | #define iterate_all_kinds(i, n, v, I, B, K) { \ |
85 | size_t skip = i->iov_offset; \ | 72 | size_t skip = i->iov_offset; \ |
86 | if (unlikely(i->type & ITER_BVEC)) { \ | 73 | if (unlikely(i->type & ITER_BVEC)) { \ |
87 | const struct bio_vec *bvec; \ | ||
88 | struct bio_vec v; \ | 74 | struct bio_vec v; \ |
89 | iterate_bvec(i, n, v, bvec, skip, (B)) \ | 75 | struct bvec_iter __bi; \ |
76 | iterate_bvec(i, n, v, __bi, skip, (B)) \ | ||
90 | } else if (unlikely(i->type & ITER_KVEC)) { \ | 77 | } else if (unlikely(i->type & ITER_KVEC)) { \ |
91 | const struct kvec *kvec; \ | 78 | const struct kvec *kvec; \ |
92 | struct kvec v; \ | 79 | struct kvec v; \ |
@@ -104,15 +91,13 @@ | |||
104 | if (i->count) { \ | 91 | if (i->count) { \ |
105 | size_t skip = i->iov_offset; \ | 92 | size_t skip = i->iov_offset; \ |
106 | if (unlikely(i->type & ITER_BVEC)) { \ | 93 | if (unlikely(i->type & ITER_BVEC)) { \ |
107 | const struct bio_vec *bvec; \ | 94 | const struct bio_vec *bvec = i->bvec; \ |
108 | struct bio_vec v; \ | 95 | struct bio_vec v; \ |
109 | iterate_bvec(i, n, v, bvec, skip, (B)) \ | 96 | struct bvec_iter __bi; \ |
110 | if (skip == bvec->bv_len) { \ | 97 | iterate_bvec(i, n, v, __bi, skip, (B)) \ |
111 | bvec++; \ | 98 | i->bvec = __bvec_iter_bvec(i->bvec, __bi); \ |
112 | skip = 0; \ | 99 | i->nr_segs -= i->bvec - bvec; \ |
113 | } \ | 100 | skip = __bi.bi_bvec_done; \ |
114 | i->nr_segs -= bvec - i->bvec; \ | ||
115 | i->bvec = bvec; \ | ||
116 | } else if (unlikely(i->type & ITER_KVEC)) { \ | 101 | } else if (unlikely(i->type & ITER_KVEC)) { \ |
117 | const struct kvec *kvec; \ | 102 | const struct kvec *kvec; \ |
118 | struct kvec v; \ | 103 | struct kvec v; \ |
@@ -159,7 +144,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b | |||
159 | buf = iov->iov_base + skip; | 144 | buf = iov->iov_base + skip; |
160 | copy = min(bytes, iov->iov_len - skip); | 145 | copy = min(bytes, iov->iov_len - skip); |
161 | 146 | ||
162 | if (!fault_in_pages_writeable(buf, copy)) { | 147 | if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) { |
163 | kaddr = kmap_atomic(page); | 148 | kaddr = kmap_atomic(page); |
164 | from = kaddr + offset; | 149 | from = kaddr + offset; |
165 | 150 | ||
@@ -190,6 +175,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b | |||
190 | copy = min(bytes, iov->iov_len - skip); | 175 | copy = min(bytes, iov->iov_len - skip); |
191 | } | 176 | } |
192 | /* Too bad - revert to non-atomic kmap */ | 177 | /* Too bad - revert to non-atomic kmap */ |
178 | |||
193 | kaddr = kmap(page); | 179 | kaddr = kmap(page); |
194 | from = kaddr + offset; | 180 | from = kaddr + offset; |
195 | left = __copy_to_user(buf, from, copy); | 181 | left = __copy_to_user(buf, from, copy); |
@@ -208,6 +194,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b | |||
208 | bytes -= copy; | 194 | bytes -= copy; |
209 | } | 195 | } |
210 | kunmap(page); | 196 | kunmap(page); |
197 | |||
211 | done: | 198 | done: |
212 | if (skip == iov->iov_len) { | 199 | if (skip == iov->iov_len) { |
213 | iov++; | 200 | iov++; |
@@ -240,7 +227,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t | |||
240 | buf = iov->iov_base + skip; | 227 | buf = iov->iov_base + skip; |
241 | copy = min(bytes, iov->iov_len - skip); | 228 | copy = min(bytes, iov->iov_len - skip); |
242 | 229 | ||
243 | if (!fault_in_pages_readable(buf, copy)) { | 230 | if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) { |
244 | kaddr = kmap_atomic(page); | 231 | kaddr = kmap_atomic(page); |
245 | to = kaddr + offset; | 232 | to = kaddr + offset; |
246 | 233 | ||
@@ -271,6 +258,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t | |||
271 | copy = min(bytes, iov->iov_len - skip); | 258 | copy = min(bytes, iov->iov_len - skip); |
272 | } | 259 | } |
273 | /* Too bad - revert to non-atomic kmap */ | 260 | /* Too bad - revert to non-atomic kmap */ |
261 | |||
274 | kaddr = kmap(page); | 262 | kaddr = kmap(page); |
275 | to = kaddr + offset; | 263 | to = kaddr + offset; |
276 | left = __copy_from_user(to, buf, copy); | 264 | left = __copy_from_user(to, buf, copy); |
@@ -289,6 +277,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t | |||
289 | bytes -= copy; | 277 | bytes -= copy; |
290 | } | 278 | } |
291 | kunmap(page); | 279 | kunmap(page); |
280 | |||
292 | done: | 281 | done: |
293 | if (skip == iov->iov_len) { | 282 | if (skip == iov->iov_len) { |
294 | iov++; | 283 | iov++; |
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c index 747606f9e4a3..5a0f75a3bf01 100644 --- a/lib/mpi/mpicoder.c +++ b/lib/mpi/mpicoder.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/bitops.h> | 21 | #include <linux/bitops.h> |
22 | #include <linux/count_zeros.h> | 22 | #include <linux/count_zeros.h> |
23 | #include <linux/byteorder/generic.h> | 23 | #include <linux/byteorder/generic.h> |
24 | #include <linux/scatterlist.h> | ||
24 | #include <linux/string.h> | 25 | #include <linux/string.h> |
25 | #include "mpi-internal.h" | 26 | #include "mpi-internal.h" |
26 | 27 | ||
@@ -50,9 +51,7 @@ MPI mpi_read_raw_data(const void *xbuffer, size_t nbytes) | |||
50 | return NULL; | 51 | return NULL; |
51 | } | 52 | } |
52 | if (nbytes > 0) | 53 | if (nbytes > 0) |
53 | nbits -= count_leading_zeros(buffer[0]); | 54 | nbits -= count_leading_zeros(buffer[0]) - (BITS_PER_LONG - 8); |
54 | else | ||
55 | nbits = 0; | ||
56 | 55 | ||
57 | nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB); | 56 | nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB); |
58 | val = mpi_alloc(nlimbs); | 57 | val = mpi_alloc(nlimbs); |
@@ -82,50 +81,30 @@ EXPORT_SYMBOL_GPL(mpi_read_raw_data); | |||
82 | MPI mpi_read_from_buffer(const void *xbuffer, unsigned *ret_nread) | 81 | MPI mpi_read_from_buffer(const void *xbuffer, unsigned *ret_nread) |
83 | { | 82 | { |
84 | const uint8_t *buffer = xbuffer; | 83 | const uint8_t *buffer = xbuffer; |
85 | int i, j; | 84 | unsigned int nbits, nbytes; |
86 | unsigned nbits, nbytes, nlimbs, nread = 0; | 85 | MPI val; |
87 | mpi_limb_t a; | ||
88 | MPI val = NULL; | ||
89 | 86 | ||
90 | if (*ret_nread < 2) | 87 | if (*ret_nread < 2) |
91 | goto leave; | 88 | return ERR_PTR(-EINVAL); |
92 | nbits = buffer[0] << 8 | buffer[1]; | 89 | nbits = buffer[0] << 8 | buffer[1]; |
93 | 90 | ||
94 | if (nbits > MAX_EXTERN_MPI_BITS) { | 91 | if (nbits > MAX_EXTERN_MPI_BITS) { |
95 | pr_info("MPI: mpi too large (%u bits)\n", nbits); | 92 | pr_info("MPI: mpi too large (%u bits)\n", nbits); |
96 | goto leave; | 93 | return ERR_PTR(-EINVAL); |
97 | } | 94 | } |
98 | buffer += 2; | ||
99 | nread = 2; | ||
100 | 95 | ||
101 | nbytes = DIV_ROUND_UP(nbits, 8); | 96 | nbytes = DIV_ROUND_UP(nbits, 8); |
102 | nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB); | 97 | if (nbytes + 2 > *ret_nread) { |
103 | val = mpi_alloc(nlimbs); | 98 | pr_info("MPI: mpi larger than buffer nbytes=%u ret_nread=%u\n", |
104 | if (!val) | 99 | nbytes, *ret_nread); |
105 | return NULL; | 100 | return ERR_PTR(-EINVAL); |
106 | i = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB; | ||
107 | i %= BYTES_PER_MPI_LIMB; | ||
108 | val->nbits = nbits; | ||
109 | j = val->nlimbs = nlimbs; | ||
110 | val->sign = 0; | ||
111 | for (; j > 0; j--) { | ||
112 | a = 0; | ||
113 | for (; i < BYTES_PER_MPI_LIMB; i++) { | ||
114 | if (++nread > *ret_nread) { | ||
115 | printk | ||
116 | ("MPI: mpi larger than buffer nread=%d ret_nread=%d\n", | ||
117 | nread, *ret_nread); | ||
118 | goto leave; | ||
119 | } | ||
120 | a <<= 8; | ||
121 | a |= *buffer++; | ||
122 | } | ||
123 | i = 0; | ||
124 | val->d[j - 1] = a; | ||
125 | } | 101 | } |
126 | 102 | ||
127 | leave: | 103 | val = mpi_read_raw_data(buffer + 2, nbytes); |
128 | *ret_nread = nread; | 104 | if (!val) |
105 | return ERR_PTR(-ENOMEM); | ||
106 | |||
107 | *ret_nread = nbytes + 2; | ||
129 | return val; | 108 | return val; |
130 | } | 109 | } |
131 | EXPORT_SYMBOL_GPL(mpi_read_from_buffer); | 110 | EXPORT_SYMBOL_GPL(mpi_read_from_buffer); |
@@ -250,82 +229,6 @@ void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign) | |||
250 | } | 229 | } |
251 | EXPORT_SYMBOL_GPL(mpi_get_buffer); | 230 | EXPORT_SYMBOL_GPL(mpi_get_buffer); |
252 | 231 | ||
253 | /**************** | ||
254 | * Use BUFFER to update MPI. | ||
255 | */ | ||
256 | int mpi_set_buffer(MPI a, const void *xbuffer, unsigned nbytes, int sign) | ||
257 | { | ||
258 | const uint8_t *buffer = xbuffer, *p; | ||
259 | mpi_limb_t alimb; | ||
260 | int nlimbs; | ||
261 | int i; | ||
262 | |||
263 | nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB); | ||
264 | if (RESIZE_IF_NEEDED(a, nlimbs) < 0) | ||
265 | return -ENOMEM; | ||
266 | a->sign = sign; | ||
267 | |||
268 | for (i = 0, p = buffer + nbytes - 1; p >= buffer + BYTES_PER_MPI_LIMB;) { | ||
269 | #if BYTES_PER_MPI_LIMB == 4 | ||
270 | alimb = (mpi_limb_t) *p--; | ||
271 | alimb |= (mpi_limb_t) *p-- << 8; | ||
272 | alimb |= (mpi_limb_t) *p-- << 16; | ||
273 | alimb |= (mpi_limb_t) *p-- << 24; | ||
274 | #elif BYTES_PER_MPI_LIMB == 8 | ||
275 | alimb = (mpi_limb_t) *p--; | ||
276 | alimb |= (mpi_limb_t) *p-- << 8; | ||
277 | alimb |= (mpi_limb_t) *p-- << 16; | ||
278 | alimb |= (mpi_limb_t) *p-- << 24; | ||
279 | alimb |= (mpi_limb_t) *p-- << 32; | ||
280 | alimb |= (mpi_limb_t) *p-- << 40; | ||
281 | alimb |= (mpi_limb_t) *p-- << 48; | ||
282 | alimb |= (mpi_limb_t) *p-- << 56; | ||
283 | #else | ||
284 | #error please implement for this limb size. | ||
285 | #endif | ||
286 | a->d[i++] = alimb; | ||
287 | } | ||
288 | if (p >= buffer) { | ||
289 | #if BYTES_PER_MPI_LIMB == 4 | ||
290 | alimb = *p--; | ||
291 | if (p >= buffer) | ||
292 | alimb |= (mpi_limb_t) *p-- << 8; | ||
293 | if (p >= buffer) | ||
294 | alimb |= (mpi_limb_t) *p-- << 16; | ||
295 | if (p >= buffer) | ||
296 | alimb |= (mpi_limb_t) *p-- << 24; | ||
297 | #elif BYTES_PER_MPI_LIMB == 8 | ||
298 | alimb = (mpi_limb_t) *p--; | ||
299 | if (p >= buffer) | ||
300 | alimb |= (mpi_limb_t) *p-- << 8; | ||
301 | if (p >= buffer) | ||
302 | alimb |= (mpi_limb_t) *p-- << 16; | ||
303 | if (p >= buffer) | ||
304 | alimb |= (mpi_limb_t) *p-- << 24; | ||
305 | if (p >= buffer) | ||
306 | alimb |= (mpi_limb_t) *p-- << 32; | ||
307 | if (p >= buffer) | ||
308 | alimb |= (mpi_limb_t) *p-- << 40; | ||
309 | if (p >= buffer) | ||
310 | alimb |= (mpi_limb_t) *p-- << 48; | ||
311 | if (p >= buffer) | ||
312 | alimb |= (mpi_limb_t) *p-- << 56; | ||
313 | #else | ||
314 | #error please implement for this limb size. | ||
315 | #endif | ||
316 | a->d[i++] = alimb; | ||
317 | } | ||
318 | a->nlimbs = i; | ||
319 | |||
320 | if (i != nlimbs) { | ||
321 | pr_emerg("MPI: mpi_set_buffer: Assertion failed (%d != %d)", i, | ||
322 | nlimbs); | ||
323 | BUG(); | ||
324 | } | ||
325 | return 0; | ||
326 | } | ||
327 | EXPORT_SYMBOL_GPL(mpi_set_buffer); | ||
328 | |||
329 | /** | 232 | /** |
330 | * mpi_write_to_sgl() - Funnction exports MPI to an sgl (msb first) | 233 | * mpi_write_to_sgl() - Funnction exports MPI to an sgl (msb first) |
331 | * | 234 | * |
@@ -335,16 +238,13 @@ EXPORT_SYMBOL_GPL(mpi_set_buffer); | |||
335 | * @a: a multi precision integer | 238 | * @a: a multi precision integer |
336 | * @sgl: scatterlist to write to. Needs to be at least | 239 | * @sgl: scatterlist to write to. Needs to be at least |
337 | * mpi_get_size(a) long. | 240 | * mpi_get_size(a) long. |
338 | * @nbytes: in/out param - it has the be set to the maximum number of | 241 | * @nbytes: the number of bytes to write. Leading bytes will be |
339 | * bytes that can be written to sgl. This has to be at least | 242 | * filled with zero. |
340 | * the size of the integer a. On return it receives the actual | ||
341 | * length of the data written on success or the data that would | ||
342 | * be written if buffer was too small. | ||
343 | * @sign: if not NULL, it will be set to the sign of a. | 243 | * @sign: if not NULL, it will be set to the sign of a. |
344 | * | 244 | * |
345 | * Return: 0 on success or error code in case of error | 245 | * Return: 0 on success or error code in case of error |
346 | */ | 246 | */ |
347 | int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes, | 247 | int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned nbytes, |
348 | int *sign) | 248 | int *sign) |
349 | { | 249 | { |
350 | u8 *p, *p2; | 250 | u8 *p, *p2; |
@@ -356,55 +256,60 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes, | |||
356 | #error please implement for this limb size. | 256 | #error please implement for this limb size. |
357 | #endif | 257 | #endif |
358 | unsigned int n = mpi_get_size(a); | 258 | unsigned int n = mpi_get_size(a); |
359 | int i, x, y = 0, lzeros, buf_len; | 259 | struct sg_mapping_iter miter; |
360 | 260 | int i, x, buf_len; | |
361 | if (!nbytes) | 261 | int nents; |
362 | return -EINVAL; | ||
363 | 262 | ||
364 | if (sign) | 263 | if (sign) |
365 | *sign = a->sign; | 264 | *sign = a->sign; |
366 | 265 | ||
367 | lzeros = count_lzeros(a); | 266 | if (nbytes < n) |
368 | |||
369 | if (*nbytes < n - lzeros) { | ||
370 | *nbytes = n - lzeros; | ||
371 | return -EOVERFLOW; | 267 | return -EOVERFLOW; |
372 | } | ||
373 | 268 | ||
374 | *nbytes = n - lzeros; | 269 | nents = sg_nents_for_len(sgl, nbytes); |
375 | buf_len = sgl->length; | 270 | if (nents < 0) |
376 | p2 = sg_virt(sgl); | 271 | return -EINVAL; |
377 | 272 | ||
378 | for (i = a->nlimbs - 1 - lzeros / BYTES_PER_MPI_LIMB, | 273 | sg_miter_start(&miter, sgl, nents, SG_MITER_ATOMIC | SG_MITER_TO_SG); |
379 | lzeros %= BYTES_PER_MPI_LIMB; | 274 | sg_miter_next(&miter); |
380 | i >= 0; i--) { | 275 | buf_len = miter.length; |
276 | p2 = miter.addr; | ||
277 | |||
278 | while (nbytes > n) { | ||
279 | i = min_t(unsigned, nbytes - n, buf_len); | ||
280 | memset(p2, 0, i); | ||
281 | p2 += i; | ||
282 | nbytes -= i; | ||
283 | |||
284 | buf_len -= i; | ||
285 | if (!buf_len) { | ||
286 | sg_miter_next(&miter); | ||
287 | buf_len = miter.length; | ||
288 | p2 = miter.addr; | ||
289 | } | ||
290 | } | ||
291 | |||
292 | for (i = a->nlimbs - 1; i >= 0; i--) { | ||
381 | #if BYTES_PER_MPI_LIMB == 4 | 293 | #if BYTES_PER_MPI_LIMB == 4 |
382 | alimb = cpu_to_be32(a->d[i]); | 294 | alimb = a->d[i] ? cpu_to_be32(a->d[i]) : 0; |
383 | #elif BYTES_PER_MPI_LIMB == 8 | 295 | #elif BYTES_PER_MPI_LIMB == 8 |
384 | alimb = cpu_to_be64(a->d[i]); | 296 | alimb = a->d[i] ? cpu_to_be64(a->d[i]) : 0; |
385 | #else | 297 | #else |
386 | #error please implement for this limb size. | 298 | #error please implement for this limb size. |
387 | #endif | 299 | #endif |
388 | if (lzeros) { | 300 | p = (u8 *)&alimb; |
389 | y = lzeros; | ||
390 | lzeros = 0; | ||
391 | } | ||
392 | |||
393 | p = (u8 *)&alimb + y; | ||
394 | 301 | ||
395 | for (x = 0; x < sizeof(alimb) - y; x++) { | 302 | for (x = 0; x < sizeof(alimb); x++) { |
396 | if (!buf_len) { | ||
397 | sgl = sg_next(sgl); | ||
398 | if (!sgl) | ||
399 | return -EINVAL; | ||
400 | buf_len = sgl->length; | ||
401 | p2 = sg_virt(sgl); | ||
402 | } | ||
403 | *p2++ = *p++; | 303 | *p2++ = *p++; |
404 | buf_len--; | 304 | if (!--buf_len) { |
305 | sg_miter_next(&miter); | ||
306 | buf_len = miter.length; | ||
307 | p2 = miter.addr; | ||
308 | } | ||
405 | } | 309 | } |
406 | y = 0; | ||
407 | } | 310 | } |
311 | |||
312 | sg_miter_stop(&miter); | ||
408 | return 0; | 313 | return 0; |
409 | } | 314 | } |
410 | EXPORT_SYMBOL_GPL(mpi_write_to_sgl); | 315 | EXPORT_SYMBOL_GPL(mpi_write_to_sgl); |
@@ -424,19 +329,23 @@ EXPORT_SYMBOL_GPL(mpi_write_to_sgl); | |||
424 | */ | 329 | */ |
425 | MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes) | 330 | MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes) |
426 | { | 331 | { |
427 | struct scatterlist *sg; | 332 | struct sg_mapping_iter miter; |
428 | int x, i, j, z, lzeros, ents; | ||
429 | unsigned int nbits, nlimbs; | 333 | unsigned int nbits, nlimbs; |
334 | int x, j, z, lzeros, ents; | ||
335 | unsigned int len; | ||
336 | const u8 *buff; | ||
430 | mpi_limb_t a; | 337 | mpi_limb_t a; |
431 | MPI val = NULL; | 338 | MPI val = NULL; |
432 | 339 | ||
433 | lzeros = 0; | 340 | ents = sg_nents_for_len(sgl, nbytes); |
434 | ents = sg_nents(sgl); | 341 | if (ents < 0) |
342 | return NULL; | ||
435 | 343 | ||
436 | for_each_sg(sgl, sg, ents, i) { | 344 | sg_miter_start(&miter, sgl, ents, SG_MITER_ATOMIC | SG_MITER_FROM_SG); |
437 | const u8 *buff = sg_virt(sg); | ||
438 | int len = sg->length; | ||
439 | 345 | ||
346 | lzeros = 0; | ||
347 | len = 0; | ||
348 | while (nbytes > 0) { | ||
440 | while (len && !*buff) { | 349 | while (len && !*buff) { |
441 | lzeros++; | 350 | lzeros++; |
442 | len--; | 351 | len--; |
@@ -446,12 +355,17 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes) | |||
446 | if (len && *buff) | 355 | if (len && *buff) |
447 | break; | 356 | break; |
448 | 357 | ||
449 | ents--; | 358 | sg_miter_next(&miter); |
359 | buff = miter.addr; | ||
360 | len = miter.length; | ||
361 | |||
450 | nbytes -= lzeros; | 362 | nbytes -= lzeros; |
451 | lzeros = 0; | 363 | lzeros = 0; |
452 | } | 364 | } |
453 | 365 | ||
454 | sgl = sg; | 366 | miter.consumed = lzeros; |
367 | sg_miter_stop(&miter); | ||
368 | |||
455 | nbytes -= lzeros; | 369 | nbytes -= lzeros; |
456 | nbits = nbytes * 8; | 370 | nbits = nbytes * 8; |
457 | if (nbits > MAX_EXTERN_MPI_BITS) { | 371 | if (nbits > MAX_EXTERN_MPI_BITS) { |
@@ -460,8 +374,7 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes) | |||
460 | } | 374 | } |
461 | 375 | ||
462 | if (nbytes > 0) | 376 | if (nbytes > 0) |
463 | nbits -= count_leading_zeros(*(u8 *)(sg_virt(sgl) + lzeros)) - | 377 | nbits -= count_leading_zeros(*buff) - (BITS_PER_LONG - 8); |
464 | (BITS_PER_LONG - 8); | ||
465 | 378 | ||
466 | nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB); | 379 | nlimbs = DIV_ROUND_UP(nbytes, BYTES_PER_MPI_LIMB); |
467 | val = mpi_alloc(nlimbs); | 380 | val = mpi_alloc(nlimbs); |
@@ -480,21 +393,21 @@ MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int nbytes) | |||
480 | z = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB; | 393 | z = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB; |
481 | z %= BYTES_PER_MPI_LIMB; | 394 | z %= BYTES_PER_MPI_LIMB; |
482 | 395 | ||
483 | for_each_sg(sgl, sg, ents, i) { | 396 | while (sg_miter_next(&miter)) { |
484 | const u8 *buffer = sg_virt(sg) + lzeros; | 397 | buff = miter.addr; |
485 | int len = sg->length - lzeros; | 398 | len = miter.length; |
486 | 399 | ||
487 | for (x = 0; x < len; x++) { | 400 | for (x = 0; x < len; x++) { |
488 | a <<= 8; | 401 | a <<= 8; |
489 | a |= *buffer++; | 402 | a |= *buff++; |
490 | if (((z + x + 1) % BYTES_PER_MPI_LIMB) == 0) { | 403 | if (((z + x + 1) % BYTES_PER_MPI_LIMB) == 0) { |
491 | val->d[j--] = a; | 404 | val->d[j--] = a; |
492 | a = 0; | 405 | a = 0; |
493 | } | 406 | } |
494 | } | 407 | } |
495 | z += x; | 408 | z += x; |
496 | lzeros = 0; | ||
497 | } | 409 | } |
410 | |||
498 | return val; | 411 | return val; |
499 | } | 412 | } |
500 | EXPORT_SYMBOL_GPL(mpi_read_raw_from_sgl); | 413 | EXPORT_SYMBOL_GPL(mpi_read_raw_from_sgl); |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 8b7d8459bb9d..61b8fb529cef 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
@@ -38,6 +38,9 @@ | |||
38 | #include <linux/preempt.h> /* in_interrupt() */ | 38 | #include <linux/preempt.h> /* in_interrupt() */ |
39 | 39 | ||
40 | 40 | ||
41 | /* Number of nodes in fully populated tree of given height */ | ||
42 | static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly; | ||
43 | |||
41 | /* | 44 | /* |
42 | * Radix tree node cache. | 45 | * Radix tree node cache. |
43 | */ | 46 | */ |
@@ -342,7 +345,7 @@ radix_tree_node_free(struct radix_tree_node *node) | |||
342 | * To make use of this facility, the radix tree must be initialised without | 345 | * To make use of this facility, the radix tree must be initialised without |
343 | * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE(). | 346 | * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE(). |
344 | */ | 347 | */ |
345 | static int __radix_tree_preload(gfp_t gfp_mask) | 348 | static int __radix_tree_preload(gfp_t gfp_mask, int nr) |
346 | { | 349 | { |
347 | struct radix_tree_preload *rtp; | 350 | struct radix_tree_preload *rtp; |
348 | struct radix_tree_node *node; | 351 | struct radix_tree_node *node; |
@@ -350,14 +353,14 @@ static int __radix_tree_preload(gfp_t gfp_mask) | |||
350 | 353 | ||
351 | preempt_disable(); | 354 | preempt_disable(); |
352 | rtp = this_cpu_ptr(&radix_tree_preloads); | 355 | rtp = this_cpu_ptr(&radix_tree_preloads); |
353 | while (rtp->nr < RADIX_TREE_PRELOAD_SIZE) { | 356 | while (rtp->nr < nr) { |
354 | preempt_enable(); | 357 | preempt_enable(); |
355 | node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); | 358 | node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); |
356 | if (node == NULL) | 359 | if (node == NULL) |
357 | goto out; | 360 | goto out; |
358 | preempt_disable(); | 361 | preempt_disable(); |
359 | rtp = this_cpu_ptr(&radix_tree_preloads); | 362 | rtp = this_cpu_ptr(&radix_tree_preloads); |
360 | if (rtp->nr < RADIX_TREE_PRELOAD_SIZE) { | 363 | if (rtp->nr < nr) { |
361 | node->private_data = rtp->nodes; | 364 | node->private_data = rtp->nodes; |
362 | rtp->nodes = node; | 365 | rtp->nodes = node; |
363 | rtp->nr++; | 366 | rtp->nr++; |
@@ -383,7 +386,7 @@ int radix_tree_preload(gfp_t gfp_mask) | |||
383 | { | 386 | { |
384 | /* Warn on non-sensical use... */ | 387 | /* Warn on non-sensical use... */ |
385 | WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask)); | 388 | WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask)); |
386 | return __radix_tree_preload(gfp_mask); | 389 | return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE); |
387 | } | 390 | } |
388 | EXPORT_SYMBOL(radix_tree_preload); | 391 | EXPORT_SYMBOL(radix_tree_preload); |
389 | 392 | ||
@@ -395,7 +398,7 @@ EXPORT_SYMBOL(radix_tree_preload); | |||
395 | int radix_tree_maybe_preload(gfp_t gfp_mask) | 398 | int radix_tree_maybe_preload(gfp_t gfp_mask) |
396 | { | 399 | { |
397 | if (gfpflags_allow_blocking(gfp_mask)) | 400 | if (gfpflags_allow_blocking(gfp_mask)) |
398 | return __radix_tree_preload(gfp_mask); | 401 | return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE); |
399 | /* Preloading doesn't help anything with this gfp mask, skip it */ | 402 | /* Preloading doesn't help anything with this gfp mask, skip it */ |
400 | preempt_disable(); | 403 | preempt_disable(); |
401 | return 0; | 404 | return 0; |
@@ -403,6 +406,51 @@ int radix_tree_maybe_preload(gfp_t gfp_mask) | |||
403 | EXPORT_SYMBOL(radix_tree_maybe_preload); | 406 | EXPORT_SYMBOL(radix_tree_maybe_preload); |
404 | 407 | ||
405 | /* | 408 | /* |
409 | * The same as function above, but preload number of nodes required to insert | ||
410 | * (1 << order) continuous naturally-aligned elements. | ||
411 | */ | ||
412 | int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order) | ||
413 | { | ||
414 | unsigned long nr_subtrees; | ||
415 | int nr_nodes, subtree_height; | ||
416 | |||
417 | /* Preloading doesn't help anything with this gfp mask, skip it */ | ||
418 | if (!gfpflags_allow_blocking(gfp_mask)) { | ||
419 | preempt_disable(); | ||
420 | return 0; | ||
421 | } | ||
422 | |||
423 | /* | ||
424 | * Calculate number and height of fully populated subtrees it takes to | ||
425 | * store (1 << order) elements. | ||
426 | */ | ||
427 | nr_subtrees = 1 << order; | ||
428 | for (subtree_height = 0; nr_subtrees > RADIX_TREE_MAP_SIZE; | ||
429 | subtree_height++) | ||
430 | nr_subtrees >>= RADIX_TREE_MAP_SHIFT; | ||
431 | |||
432 | /* | ||
433 | * The worst case is zero height tree with a single item at index 0 and | ||
434 | * then inserting items starting at ULONG_MAX - (1 << order). | ||
435 | * | ||
436 | * This requires RADIX_TREE_MAX_PATH nodes to build branch from root to | ||
437 | * 0-index item. | ||
438 | */ | ||
439 | nr_nodes = RADIX_TREE_MAX_PATH; | ||
440 | |||
441 | /* Plus branch to fully populated subtrees. */ | ||
442 | nr_nodes += RADIX_TREE_MAX_PATH - subtree_height; | ||
443 | |||
444 | /* Root node is shared. */ | ||
445 | nr_nodes--; | ||
446 | |||
447 | /* Plus nodes required to build subtrees. */ | ||
448 | nr_nodes += nr_subtrees * height_to_maxnodes[subtree_height]; | ||
449 | |||
450 | return __radix_tree_preload(gfp_mask, nr_nodes); | ||
451 | } | ||
452 | |||
453 | /* | ||
406 | * The maximum index which can be stored in a radix tree | 454 | * The maximum index which can be stored in a radix tree |
407 | */ | 455 | */ |
408 | static inline unsigned long shift_maxindex(unsigned int shift) | 456 | static inline unsigned long shift_maxindex(unsigned int shift) |
@@ -1571,6 +1619,31 @@ radix_tree_node_ctor(void *arg) | |||
1571 | INIT_LIST_HEAD(&node->private_list); | 1619 | INIT_LIST_HEAD(&node->private_list); |
1572 | } | 1620 | } |
1573 | 1621 | ||
1622 | static __init unsigned long __maxindex(unsigned int height) | ||
1623 | { | ||
1624 | unsigned int width = height * RADIX_TREE_MAP_SHIFT; | ||
1625 | int shift = RADIX_TREE_INDEX_BITS - width; | ||
1626 | |||
1627 | if (shift < 0) | ||
1628 | return ~0UL; | ||
1629 | if (shift >= BITS_PER_LONG) | ||
1630 | return 0UL; | ||
1631 | return ~0UL >> shift; | ||
1632 | } | ||
1633 | |||
1634 | static __init void radix_tree_init_maxnodes(void) | ||
1635 | { | ||
1636 | unsigned long height_to_maxindex[RADIX_TREE_MAX_PATH + 1]; | ||
1637 | unsigned int i, j; | ||
1638 | |||
1639 | for (i = 0; i < ARRAY_SIZE(height_to_maxindex); i++) | ||
1640 | height_to_maxindex[i] = __maxindex(i); | ||
1641 | for (i = 0; i < ARRAY_SIZE(height_to_maxnodes); i++) { | ||
1642 | for (j = i; j > 0; j--) | ||
1643 | height_to_maxnodes[i] += height_to_maxindex[j - 1] + 1; | ||
1644 | } | ||
1645 | } | ||
1646 | |||
1574 | static int radix_tree_callback(struct notifier_block *nfb, | 1647 | static int radix_tree_callback(struct notifier_block *nfb, |
1575 | unsigned long action, void *hcpu) | 1648 | unsigned long action, void *hcpu) |
1576 | { | 1649 | { |
@@ -1597,5 +1670,6 @@ void __init radix_tree_init(void) | |||
1597 | sizeof(struct radix_tree_node), 0, | 1670 | sizeof(struct radix_tree_node), 0, |
1598 | SLAB_PANIC | SLAB_RECLAIM_ACCOUNT, | 1671 | SLAB_PANIC | SLAB_RECLAIM_ACCOUNT, |
1599 | radix_tree_node_ctor); | 1672 | radix_tree_node_ctor); |
1673 | radix_tree_init_maxnodes(); | ||
1600 | hotcpu_notifier(radix_tree_callback, 0); | 1674 | hotcpu_notifier(radix_tree_callback, 0); |
1601 | } | 1675 | } |
diff --git a/lib/random32.c b/lib/random32.c index 510d1ce7d4d2..69ed593aab07 100644 --- a/lib/random32.c +++ b/lib/random32.c | |||
@@ -233,7 +233,6 @@ static void __prandom_timer(unsigned long dontcare) | |||
233 | 233 | ||
234 | static void __init __prandom_start_seed_timer(void) | 234 | static void __init __prandom_start_seed_timer(void) |
235 | { | 235 | { |
236 | set_timer_slack(&seed_timer, HZ); | ||
237 | seed_timer.expires = jiffies + msecs_to_jiffies(40 * MSEC_PER_SEC); | 236 | seed_timer.expires = jiffies + msecs_to_jiffies(40 * MSEC_PER_SEC); |
238 | add_timer(&seed_timer); | 237 | add_timer(&seed_timer); |
239 | } | 238 | } |
diff --git a/lib/rbtree.c b/lib/rbtree.c index 1356454e36de..eb8a19fee110 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c | |||
@@ -539,17 +539,39 @@ void rb_replace_node(struct rb_node *victim, struct rb_node *new, | |||
539 | { | 539 | { |
540 | struct rb_node *parent = rb_parent(victim); | 540 | struct rb_node *parent = rb_parent(victim); |
541 | 541 | ||
542 | /* Copy the pointers/colour from the victim to the replacement */ | ||
543 | *new = *victim; | ||
544 | |||
542 | /* Set the surrounding nodes to point to the replacement */ | 545 | /* Set the surrounding nodes to point to the replacement */ |
543 | __rb_change_child(victim, new, parent, root); | ||
544 | if (victim->rb_left) | 546 | if (victim->rb_left) |
545 | rb_set_parent(victim->rb_left, new); | 547 | rb_set_parent(victim->rb_left, new); |
546 | if (victim->rb_right) | 548 | if (victim->rb_right) |
547 | rb_set_parent(victim->rb_right, new); | 549 | rb_set_parent(victim->rb_right, new); |
550 | __rb_change_child(victim, new, parent, root); | ||
551 | } | ||
552 | EXPORT_SYMBOL(rb_replace_node); | ||
553 | |||
554 | void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new, | ||
555 | struct rb_root *root) | ||
556 | { | ||
557 | struct rb_node *parent = rb_parent(victim); | ||
548 | 558 | ||
549 | /* Copy the pointers/colour from the victim to the replacement */ | 559 | /* Copy the pointers/colour from the victim to the replacement */ |
550 | *new = *victim; | 560 | *new = *victim; |
561 | |||
562 | /* Set the surrounding nodes to point to the replacement */ | ||
563 | if (victim->rb_left) | ||
564 | rb_set_parent(victim->rb_left, new); | ||
565 | if (victim->rb_right) | ||
566 | rb_set_parent(victim->rb_right, new); | ||
567 | |||
568 | /* Set the parent's pointer to the new node last after an RCU barrier | ||
569 | * so that the pointers onwards are seen to be set correctly when doing | ||
570 | * an RCU walk over the tree. | ||
571 | */ | ||
572 | __rb_change_child_rcu(victim, new, parent, root); | ||
551 | } | 573 | } |
552 | EXPORT_SYMBOL(rb_replace_node); | 574 | EXPORT_SYMBOL(rb_replace_node_rcu); |
553 | 575 | ||
554 | static struct rb_node *rb_left_deepest_node(const struct rb_node *node) | 576 | static struct rb_node *rb_left_deepest_node(const struct rb_node *node) |
555 | { | 577 | { |
diff --git a/lib/stackdepot.c b/lib/stackdepot.c index 53ad6c0831ae..60f77f1d470a 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c | |||
@@ -242,6 +242,7 @@ depot_stack_handle_t depot_save_stack(struct stack_trace *trace, | |||
242 | */ | 242 | */ |
243 | alloc_flags &= ~GFP_ZONEMASK; | 243 | alloc_flags &= ~GFP_ZONEMASK; |
244 | alloc_flags &= (GFP_ATOMIC | GFP_KERNEL); | 244 | alloc_flags &= (GFP_ATOMIC | GFP_KERNEL); |
245 | alloc_flags |= __GFP_NOWARN; | ||
245 | page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER); | 246 | page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER); |
246 | if (page) | 247 | if (page) |
247 | prealloc = page_address(page); | 248 | prealloc = page_address(page); |
diff --git a/lib/test_hash.c b/lib/test_hash.c index c9549c8b4909..66c5fc8351e8 100644 --- a/lib/test_hash.c +++ b/lib/test_hash.c | |||
@@ -155,8 +155,8 @@ test_hash_init(void) | |||
155 | buf[j] = '\0'; | 155 | buf[j] = '\0'; |
156 | 156 | ||
157 | for (i = 0; i <= j; i++) { | 157 | for (i = 0; i <= j; i++) { |
158 | u64 hashlen = hashlen_string(buf+i); | 158 | u64 hashlen = hashlen_string(buf+i, buf+i); |
159 | u32 h0 = full_name_hash(buf+i, j-i); | 159 | u32 h0 = full_name_hash(buf+i, buf+i, j-i); |
160 | 160 | ||
161 | /* Check that hashlen_string gets the length right */ | 161 | /* Check that hashlen_string gets the length right */ |
162 | if (hashlen_len(hashlen) != j-i) { | 162 | if (hashlen_len(hashlen) != j-i) { |
diff --git a/lib/test_uuid.c b/lib/test_uuid.c new file mode 100644 index 000000000000..547d3127a3cf --- /dev/null +++ b/lib/test_uuid.c | |||
@@ -0,0 +1,133 @@ | |||
1 | /* | ||
2 | * Test cases for lib/uuid.c module. | ||
3 | */ | ||
4 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
5 | |||
6 | #include <linux/init.h> | ||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/module.h> | ||
9 | #include <linux/string.h> | ||
10 | #include <linux/uuid.h> | ||
11 | |||
12 | struct test_uuid_data { | ||
13 | const char *uuid; | ||
14 | uuid_le le; | ||
15 | uuid_be be; | ||
16 | }; | ||
17 | |||
18 | static const struct test_uuid_data test_uuid_test_data[] = { | ||
19 | { | ||
20 | .uuid = "c33f4995-3701-450e-9fbf-206a2e98e576", | ||
21 | .le = UUID_LE(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76), | ||
22 | .be = UUID_BE(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76), | ||
23 | }, | ||
24 | { | ||
25 | .uuid = "64b4371c-77c1-48f9-8221-29f054fc023b", | ||
26 | .le = UUID_LE(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b), | ||
27 | .be = UUID_BE(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b), | ||
28 | }, | ||
29 | { | ||
30 | .uuid = "0cb4ddff-a545-4401-9d06-688af53e7f84", | ||
31 | .le = UUID_LE(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84), | ||
32 | .be = UUID_BE(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84), | ||
33 | }, | ||
34 | }; | ||
35 | |||
36 | static const char * const test_uuid_wrong_data[] = { | ||
37 | "c33f4995-3701-450e-9fbf206a2e98e576 ", /* no hyphen(s) */ | ||
38 | "64b4371c-77c1-48f9-8221-29f054XX023b", /* invalid character(s) */ | ||
39 | "0cb4ddff-a545-4401-9d06-688af53e", /* not enough data */ | ||
40 | }; | ||
41 | |||
42 | static unsigned total_tests __initdata; | ||
43 | static unsigned failed_tests __initdata; | ||
44 | |||
45 | static void __init test_uuid_failed(const char *prefix, bool wrong, bool be, | ||
46 | const char *data, const char *actual) | ||
47 | { | ||
48 | pr_err("%s test #%u %s %s data: '%s'\n", | ||
49 | prefix, | ||
50 | total_tests, | ||
51 | wrong ? "passed on wrong" : "failed on", | ||
52 | be ? "BE" : "LE", | ||
53 | data); | ||
54 | if (actual && *actual) | ||
55 | pr_err("%s test #%u actual data: '%s'\n", | ||
56 | prefix, | ||
57 | total_tests, | ||
58 | actual); | ||
59 | failed_tests++; | ||
60 | } | ||
61 | |||
62 | static void __init test_uuid_test(const struct test_uuid_data *data) | ||
63 | { | ||
64 | uuid_le le; | ||
65 | uuid_be be; | ||
66 | char buf[48]; | ||
67 | |||
68 | /* LE */ | ||
69 | total_tests++; | ||
70 | if (uuid_le_to_bin(data->uuid, &le)) | ||
71 | test_uuid_failed("conversion", false, false, data->uuid, NULL); | ||
72 | |||
73 | total_tests++; | ||
74 | if (uuid_le_cmp(data->le, le)) { | ||
75 | sprintf(buf, "%pUl", &le); | ||
76 | test_uuid_failed("cmp", false, false, data->uuid, buf); | ||
77 | } | ||
78 | |||
79 | /* BE */ | ||
80 | total_tests++; | ||
81 | if (uuid_be_to_bin(data->uuid, &be)) | ||
82 | test_uuid_failed("conversion", false, true, data->uuid, NULL); | ||
83 | |||
84 | total_tests++; | ||
85 | if (uuid_be_cmp(data->be, be)) { | ||
86 | sprintf(buf, "%pUb", &be); | ||
87 | test_uuid_failed("cmp", false, true, data->uuid, buf); | ||
88 | } | ||
89 | } | ||
90 | |||
91 | static void __init test_uuid_wrong(const char *data) | ||
92 | { | ||
93 | uuid_le le; | ||
94 | uuid_be be; | ||
95 | |||
96 | /* LE */ | ||
97 | total_tests++; | ||
98 | if (!uuid_le_to_bin(data, &le)) | ||
99 | test_uuid_failed("negative", true, false, data, NULL); | ||
100 | |||
101 | /* BE */ | ||
102 | total_tests++; | ||
103 | if (!uuid_be_to_bin(data, &be)) | ||
104 | test_uuid_failed("negative", true, true, data, NULL); | ||
105 | } | ||
106 | |||
107 | static int __init test_uuid_init(void) | ||
108 | { | ||
109 | unsigned int i; | ||
110 | |||
111 | for (i = 0; i < ARRAY_SIZE(test_uuid_test_data); i++) | ||
112 | test_uuid_test(&test_uuid_test_data[i]); | ||
113 | |||
114 | for (i = 0; i < ARRAY_SIZE(test_uuid_wrong_data); i++) | ||
115 | test_uuid_wrong(test_uuid_wrong_data[i]); | ||
116 | |||
117 | if (failed_tests == 0) | ||
118 | pr_info("all %u tests passed\n", total_tests); | ||
119 | else | ||
120 | pr_err("failed %u out of %u tests\n", failed_tests, total_tests); | ||
121 | |||
122 | return failed_tests ? -EINVAL : 0; | ||
123 | } | ||
124 | module_init(test_uuid_init); | ||
125 | |||
126 | static void __exit test_uuid_exit(void) | ||
127 | { | ||
128 | /* do nothing */ | ||
129 | } | ||
130 | module_exit(test_uuid_exit); | ||
131 | |||
132 | MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>"); | ||
133 | MODULE_LICENSE("Dual BSD/GPL"); | ||
diff --git a/lib/uuid.c b/lib/uuid.c index e116ae5fa00f..37687af77ff8 100644 --- a/lib/uuid.c +++ b/lib/uuid.c | |||
@@ -106,8 +106,8 @@ static int __uuid_to_bin(const char *uuid, __u8 b[16], const u8 ei[16]) | |||
106 | return -EINVAL; | 106 | return -EINVAL; |
107 | 107 | ||
108 | for (i = 0; i < 16; i++) { | 108 | for (i = 0; i < 16; i++) { |
109 | int hi = hex_to_bin(uuid[si[i]] + 0); | 109 | int hi = hex_to_bin(uuid[si[i] + 0]); |
110 | int lo = hex_to_bin(uuid[si[i]] + 1); | 110 | int lo = hex_to_bin(uuid[si[i] + 1]); |
111 | 111 | ||
112 | b[ei[i]] = (hi << 4) | lo; | 112 | b[ei[i]] = (hi << 4) | lo; |
113 | } | 113 | } |