aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/842/842_debugfs.h5
-rw-r--r--lib/Kconfig13
-rw-r--r--lib/Kconfig.debug68
-rw-r--r--lib/Makefile4
-rw-r--r--lib/atomic64.c32
-rw-r--r--lib/crypto/Makefile4
-rw-r--r--lib/crypto/arc4.c74
-rw-r--r--lib/debugobjects.c321
-rw-r--r--lib/devres.c3
-rw-r--r--lib/digsig.c2
-rw-r--r--lib/dim/Makefile7
-rw-r--r--lib/dim/dim.c83
-rw-r--r--lib/dim/net_dim.c190
-rw-r--r--lib/dim/rdma_dim.c108
-rw-r--r--lib/dynamic_debug.c12
-rw-r--r--lib/fault-inject.c73
-rw-r--r--lib/fonts/fonts.c103
-rw-r--r--lib/genalloc.c125
-rw-r--r--lib/iov_iter.c15
-rw-r--r--lib/kobject.c4
-rw-r--r--lib/list_sort.c2
-rw-r--r--lib/mpi/mpi-pow.c6
-rw-r--r--lib/notifier-error-inject.c13
-rw-r--r--lib/objagg.c6
-rw-r--r--lib/percpu-refcount.c13
-rw-r--r--lib/raid6/Makefile98
-rw-r--r--lib/raid6/s390vx.uc2
-rw-r--r--lib/reed_solomon/Makefile2
-rw-r--r--lib/reed_solomon/decode_rs.c115
-rw-r--r--lib/reed_solomon/reed_solomon.c12
-rw-r--r--lib/reed_solomon/test_rslib.c518
-rw-r--r--lib/sbitmap.c10
-rw-r--r--lib/scatterlist.c45
-rw-r--r--lib/sg_pool.c39
-rw-r--r--lib/smp_processor_id.c2
-rw-r--r--lib/string_helpers.c19
-rw-r--r--lib/test_blackhole_dev.c100
-rw-r--r--lib/test_kasan.c98
-rw-r--r--lib/vdso/Kconfig36
-rw-r--r--lib/vdso/Makefile22
-rw-r--r--lib/vdso/gettimeofday.c239
-rw-r--r--lib/vsprintf.c4
42 files changed, 2231 insertions, 416 deletions
diff --git a/lib/842/842_debugfs.h b/lib/842/842_debugfs.h
index 277e403e8701..4469407c3e0d 100644
--- a/lib/842/842_debugfs.h
+++ b/lib/842/842_debugfs.h
@@ -22,8 +22,6 @@ static int __init sw842_debugfs_create(void)
22 return -ENODEV; 22 return -ENODEV;
23 23
24 sw842_debugfs_root = debugfs_create_dir(MODULE_NAME, NULL); 24 sw842_debugfs_root = debugfs_create_dir(MODULE_NAME, NULL);
25 if (IS_ERR(sw842_debugfs_root))
26 return PTR_ERR(sw842_debugfs_root);
27 25
28 for (i = 0; i < ARRAY_SIZE(template_count); i++) { 26 for (i = 0; i < ARRAY_SIZE(template_count); i++) {
29 char name[32]; 27 char name[32];
@@ -46,8 +44,7 @@ static int __init sw842_debugfs_create(void)
46 44
47static void __exit sw842_debugfs_remove(void) 45static void __exit sw842_debugfs_remove(void)
48{ 46{
49 if (sw842_debugfs_root && !IS_ERR(sw842_debugfs_root)) 47 debugfs_remove_recursive(sw842_debugfs_root);
50 debugfs_remove_recursive(sw842_debugfs_root);
51} 48}
52 49
53#endif 50#endif
diff --git a/lib/Kconfig b/lib/Kconfig
index 90623a0e1942..52a7b2e6fb74 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -562,6 +562,14 @@ config SIGNATURE
562 Digital signature verification. Currently only RSA is supported. 562 Digital signature verification. Currently only RSA is supported.
563 Implementation is done using GnuPG MPI library 563 Implementation is done using GnuPG MPI library
564 564
565config DIMLIB
566 bool "DIM library"
567 default y
568 help
569 Dynamic Interrupt Moderation library.
570 Implements an algorithm for dynamically change CQ modertion values
571 according to run time performance.
572
565# 573#
566# libfdt files, only selected if needed. 574# libfdt files, only selected if needed.
567# 575#
@@ -576,6 +584,11 @@ config OID_REGISTRY
576config UCS2_STRING 584config UCS2_STRING
577 tristate 585 tristate
578 586
587#
588# generic vdso
589#
590source "lib/vdso/Kconfig"
591
579source "lib/fonts/Kconfig" 592source "lib/fonts/Kconfig"
580 593
581config SG_SPLIT 594config SG_SPLIT
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index cbdfae379896..a858b55e8ac7 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -305,19 +305,26 @@ config DEBUG_FS
305 305
306 If unsure, say N. 306 If unsure, say N.
307 307
308config HEADERS_CHECK 308config HEADERS_INSTALL
309 bool "Run 'make headers_check' when building vmlinux" 309 bool "Install uapi headers to usr/include"
310 depends on !UML 310 depends on !UML
311 help 311 help
312 This option will extract the user-visible kernel headers whenever 312 This option will install uapi headers (headers exported to user-space)
313 building the kernel, and will run basic sanity checks on them to 313 into the usr/include directory for use during the kernel build.
314 ensure that exported files do not attempt to include files which 314 This is unneeded for building the kernel itself, but needed for some
315 were not exported, etc. 315 user-space program samples. It is also needed by some features such
316 as uapi header sanity checks.
317
318config HEADERS_CHECK
319 bool "Run sanity checks on uapi headers when building 'all'"
320 depends on HEADERS_INSTALL
321 help
322 This option will run basic sanity checks on uapi headers when
323 building the 'all' target, for example, ensure that they do not
324 attempt to include files which were not exported, etc.
316 325
317 If you're making modifications to header files which are 326 If you're making modifications to header files which are
318 relevant for userspace, say 'Y', and check the headers 327 relevant for userspace, say 'Y'.
319 exported to $(INSTALL_HDR_PATH) (usually 'usr/include' in
320 your build tree), to make sure they're suitable.
321 328
322config OPTIMIZE_INLINING 329config OPTIMIZE_INLINING
323 bool "Allow compiler to uninline functions marked 'inline'" 330 bool "Allow compiler to uninline functions marked 'inline'"
@@ -1095,7 +1102,7 @@ config PROVE_LOCKING
1095 select DEBUG_SPINLOCK 1102 select DEBUG_SPINLOCK
1096 select DEBUG_MUTEXES 1103 select DEBUG_MUTEXES
1097 select DEBUG_RT_MUTEXES if RT_MUTEXES 1104 select DEBUG_RT_MUTEXES if RT_MUTEXES
1098 select DEBUG_RWSEMS if RWSEM_SPIN_ON_OWNER 1105 select DEBUG_RWSEMS
1099 select DEBUG_WW_MUTEX_SLOWPATH 1106 select DEBUG_WW_MUTEX_SLOWPATH
1100 select DEBUG_LOCK_ALLOC 1107 select DEBUG_LOCK_ALLOC
1101 select TRACE_IRQFLAGS 1108 select TRACE_IRQFLAGS
@@ -1132,7 +1139,7 @@ config PROVE_LOCKING
1132 the proof of observed correctness is also maintained for an 1139 the proof of observed correctness is also maintained for an
1133 arbitrary combination of these separate locking variants. 1140 arbitrary combination of these separate locking variants.
1134 1141
1135 For more details, see Documentation/locking/lockdep-design.txt. 1142 For more details, see Documentation/locking/lockdep-design.rst.
1136 1143
1137config LOCK_STAT 1144config LOCK_STAT
1138 bool "Lock usage statistics" 1145 bool "Lock usage statistics"
@@ -1146,7 +1153,7 @@ config LOCK_STAT
1146 help 1153 help
1147 This feature enables tracking lock contention points 1154 This feature enables tracking lock contention points
1148 1155
1149 For more details, see Documentation/locking/lockstat.txt 1156 For more details, see Documentation/locking/lockstat.rst
1150 1157
1151 This also enables lock events required by "perf lock", 1158 This also enables lock events required by "perf lock",
1152 subcommand of perf. 1159 subcommand of perf.
@@ -1199,10 +1206,10 @@ config DEBUG_WW_MUTEX_SLOWPATH
1199 1206
1200config DEBUG_RWSEMS 1207config DEBUG_RWSEMS
1201 bool "RW Semaphore debugging: basic checks" 1208 bool "RW Semaphore debugging: basic checks"
1202 depends on DEBUG_KERNEL && RWSEM_SPIN_ON_OWNER 1209 depends on DEBUG_KERNEL
1203 help 1210 help
1204 This debugging feature allows mismatched rw semaphore locks and unlocks 1211 This debugging feature allows mismatched rw semaphore locks
1205 to be detected and reported. 1212 and unlocks to be detected and reported.
1206 1213
1207config DEBUG_LOCK_ALLOC 1214config DEBUG_LOCK_ALLOC
1208 bool "Lock debugging: detect incorrect freeing of live locks" 1215 bool "Lock debugging: detect incorrect freeing of live locks"
@@ -1701,7 +1708,7 @@ config LKDTM
1701 called lkdtm. 1708 called lkdtm.
1702 1709
1703 Documentation on how to use the module can be found in 1710 Documentation on how to use the module can be found in
1704 Documentation/fault-injection/provoke-crashes.txt 1711 Documentation/fault-injection/provoke-crashes.rst
1705 1712
1706config TEST_LIST_SORT 1713config TEST_LIST_SORT
1707 tristate "Linked list sorting test" 1714 tristate "Linked list sorting test"
@@ -1754,6 +1761,18 @@ config RBTREE_TEST
1754 A benchmark measuring the performance of the rbtree library. 1761 A benchmark measuring the performance of the rbtree library.
1755 Also includes rbtree invariant checks. 1762 Also includes rbtree invariant checks.
1756 1763
1764config REED_SOLOMON_TEST
1765 tristate "Reed-Solomon library test"
1766 depends on DEBUG_KERNEL || m
1767 select REED_SOLOMON
1768 select REED_SOLOMON_ENC16
1769 select REED_SOLOMON_DEC16
1770 help
1771 This option enables the self-test function of rslib at boot,
1772 or at module load time.
1773
1774 If unsure, say N.
1775
1757config INTERVAL_TREE_TEST 1776config INTERVAL_TREE_TEST
1758 tristate "Interval tree test" 1777 tristate "Interval tree test"
1759 depends on DEBUG_KERNEL 1778 depends on DEBUG_KERNEL
@@ -1858,6 +1877,14 @@ config TEST_PARMAN
1858 1877
1859 If unsure, say N. 1878 If unsure, say N.
1860 1879
1880config TEST_IRQ_TIMINGS
1881 bool "IRQ timings selftest"
1882 depends on IRQ_TIMINGS
1883 help
1884 Enable this option to test the irq timings code on boot.
1885
1886 If unsure, say N.
1887
1861config TEST_LKM 1888config TEST_LKM
1862 tristate "Test module loading with 'hello world' module" 1889 tristate "Test module loading with 'hello world' module"
1863 depends on m 1890 depends on m
@@ -1909,6 +1936,15 @@ config TEST_BPF
1909 1936
1910 If unsure, say N. 1937 If unsure, say N.
1911 1938
1939config TEST_BLACKHOLE_DEV
1940 tristate "Test blackhole netdev functionality"
1941 depends on m && NET
1942 help
1943 This builds the "test_blackhole_dev" module that validates the
1944 data path through this blackhole netdev.
1945
1946 If unsure, say N.
1947
1912config FIND_BIT_BENCHMARK 1948config FIND_BIT_BENCHMARK
1913 tristate "Test find_bit functions" 1949 tristate "Test find_bit functions"
1914 help 1950 help
diff --git a/lib/Makefile b/lib/Makefile
index fb7697031a79..fdd56bc219b8 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -91,6 +91,7 @@ obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o
91obj-$(CONFIG_TEST_MEMCAT_P) += test_memcat_p.o 91obj-$(CONFIG_TEST_MEMCAT_P) += test_memcat_p.o
92obj-$(CONFIG_TEST_OBJAGG) += test_objagg.o 92obj-$(CONFIG_TEST_OBJAGG) += test_objagg.o
93obj-$(CONFIG_TEST_STACKINIT) += test_stackinit.o 93obj-$(CONFIG_TEST_STACKINIT) += test_stackinit.o
94obj-$(CONFIG_TEST_BLACKHOLE_DEV) += test_blackhole_dev.o
94 95
95obj-$(CONFIG_TEST_LIVEPATCH) += livepatch/ 96obj-$(CONFIG_TEST_LIVEPATCH) += livepatch/
96 97
@@ -102,7 +103,7 @@ endif
102obj-$(CONFIG_DEBUG_INFO_REDUCED) += debug_info.o 103obj-$(CONFIG_DEBUG_INFO_REDUCED) += debug_info.o
103CFLAGS_debug_info.o += $(call cc-option, -femit-struct-debug-detailed=any) 104CFLAGS_debug_info.o += $(call cc-option, -femit-struct-debug-detailed=any)
104 105
105obj-y += math/ 106obj-y += math/ crypto/
106 107
107obj-$(CONFIG_GENERIC_IOMAP) += iomap.o 108obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
108obj-$(CONFIG_GENERIC_PCI_IOMAP) += pci_iomap.o 109obj-$(CONFIG_GENERIC_PCI_IOMAP) += pci_iomap.o
@@ -202,6 +203,7 @@ obj-$(CONFIG_GLOB) += glob.o
202obj-$(CONFIG_GLOB_SELFTEST) += globtest.o 203obj-$(CONFIG_GLOB_SELFTEST) += globtest.o
203 204
204obj-$(CONFIG_MPILIB) += mpi/ 205obj-$(CONFIG_MPILIB) += mpi/
206obj-$(CONFIG_DIMLIB) += dim/
205obj-$(CONFIG_SIGNATURE) += digsig.o 207obj-$(CONFIG_SIGNATURE) += digsig.o
206 208
207lib-$(CONFIG_CLZ_TAB) += clz_tab.o 209lib-$(CONFIG_CLZ_TAB) += clz_tab.o
diff --git a/lib/atomic64.c b/lib/atomic64.c
index 7e6905751522..e98c85a99787 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -42,11 +42,11 @@ static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
42 return &atomic64_lock[addr & (NR_LOCKS - 1)].lock; 42 return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
43} 43}
44 44
45long long atomic64_read(const atomic64_t *v) 45s64 atomic64_read(const atomic64_t *v)
46{ 46{
47 unsigned long flags; 47 unsigned long flags;
48 raw_spinlock_t *lock = lock_addr(v); 48 raw_spinlock_t *lock = lock_addr(v);
49 long long val; 49 s64 val;
50 50
51 raw_spin_lock_irqsave(lock, flags); 51 raw_spin_lock_irqsave(lock, flags);
52 val = v->counter; 52 val = v->counter;
@@ -55,7 +55,7 @@ long long atomic64_read(const atomic64_t *v)
55} 55}
56EXPORT_SYMBOL(atomic64_read); 56EXPORT_SYMBOL(atomic64_read);
57 57
58void atomic64_set(atomic64_t *v, long long i) 58void atomic64_set(atomic64_t *v, s64 i)
59{ 59{
60 unsigned long flags; 60 unsigned long flags;
61 raw_spinlock_t *lock = lock_addr(v); 61 raw_spinlock_t *lock = lock_addr(v);
@@ -67,7 +67,7 @@ void atomic64_set(atomic64_t *v, long long i)
67EXPORT_SYMBOL(atomic64_set); 67EXPORT_SYMBOL(atomic64_set);
68 68
69#define ATOMIC64_OP(op, c_op) \ 69#define ATOMIC64_OP(op, c_op) \
70void atomic64_##op(long long a, atomic64_t *v) \ 70void atomic64_##op(s64 a, atomic64_t *v) \
71{ \ 71{ \
72 unsigned long flags; \ 72 unsigned long flags; \
73 raw_spinlock_t *lock = lock_addr(v); \ 73 raw_spinlock_t *lock = lock_addr(v); \
@@ -79,11 +79,11 @@ void atomic64_##op(long long a, atomic64_t *v) \
79EXPORT_SYMBOL(atomic64_##op); 79EXPORT_SYMBOL(atomic64_##op);
80 80
81#define ATOMIC64_OP_RETURN(op, c_op) \ 81#define ATOMIC64_OP_RETURN(op, c_op) \
82long long atomic64_##op##_return(long long a, atomic64_t *v) \ 82s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
83{ \ 83{ \
84 unsigned long flags; \ 84 unsigned long flags; \
85 raw_spinlock_t *lock = lock_addr(v); \ 85 raw_spinlock_t *lock = lock_addr(v); \
86 long long val; \ 86 s64 val; \
87 \ 87 \
88 raw_spin_lock_irqsave(lock, flags); \ 88 raw_spin_lock_irqsave(lock, flags); \
89 val = (v->counter c_op a); \ 89 val = (v->counter c_op a); \
@@ -93,11 +93,11 @@ long long atomic64_##op##_return(long long a, atomic64_t *v) \
93EXPORT_SYMBOL(atomic64_##op##_return); 93EXPORT_SYMBOL(atomic64_##op##_return);
94 94
95#define ATOMIC64_FETCH_OP(op, c_op) \ 95#define ATOMIC64_FETCH_OP(op, c_op) \
96long long atomic64_fetch_##op(long long a, atomic64_t *v) \ 96s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
97{ \ 97{ \
98 unsigned long flags; \ 98 unsigned long flags; \
99 raw_spinlock_t *lock = lock_addr(v); \ 99 raw_spinlock_t *lock = lock_addr(v); \
100 long long val; \ 100 s64 val; \
101 \ 101 \
102 raw_spin_lock_irqsave(lock, flags); \ 102 raw_spin_lock_irqsave(lock, flags); \
103 val = v->counter; \ 103 val = v->counter; \
@@ -130,11 +130,11 @@ ATOMIC64_OPS(xor, ^=)
130#undef ATOMIC64_OP_RETURN 130#undef ATOMIC64_OP_RETURN
131#undef ATOMIC64_OP 131#undef ATOMIC64_OP
132 132
133long long atomic64_dec_if_positive(atomic64_t *v) 133s64 atomic64_dec_if_positive(atomic64_t *v)
134{ 134{
135 unsigned long flags; 135 unsigned long flags;
136 raw_spinlock_t *lock = lock_addr(v); 136 raw_spinlock_t *lock = lock_addr(v);
137 long long val; 137 s64 val;
138 138
139 raw_spin_lock_irqsave(lock, flags); 139 raw_spin_lock_irqsave(lock, flags);
140 val = v->counter - 1; 140 val = v->counter - 1;
@@ -145,11 +145,11 @@ long long atomic64_dec_if_positive(atomic64_t *v)
145} 145}
146EXPORT_SYMBOL(atomic64_dec_if_positive); 146EXPORT_SYMBOL(atomic64_dec_if_positive);
147 147
148long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) 148s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
149{ 149{
150 unsigned long flags; 150 unsigned long flags;
151 raw_spinlock_t *lock = lock_addr(v); 151 raw_spinlock_t *lock = lock_addr(v);
152 long long val; 152 s64 val;
153 153
154 raw_spin_lock_irqsave(lock, flags); 154 raw_spin_lock_irqsave(lock, flags);
155 val = v->counter; 155 val = v->counter;
@@ -160,11 +160,11 @@ long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
160} 160}
161EXPORT_SYMBOL(atomic64_cmpxchg); 161EXPORT_SYMBOL(atomic64_cmpxchg);
162 162
163long long atomic64_xchg(atomic64_t *v, long long new) 163s64 atomic64_xchg(atomic64_t *v, s64 new)
164{ 164{
165 unsigned long flags; 165 unsigned long flags;
166 raw_spinlock_t *lock = lock_addr(v); 166 raw_spinlock_t *lock = lock_addr(v);
167 long long val; 167 s64 val;
168 168
169 raw_spin_lock_irqsave(lock, flags); 169 raw_spin_lock_irqsave(lock, flags);
170 val = v->counter; 170 val = v->counter;
@@ -174,11 +174,11 @@ long long atomic64_xchg(atomic64_t *v, long long new)
174} 174}
175EXPORT_SYMBOL(atomic64_xchg); 175EXPORT_SYMBOL(atomic64_xchg);
176 176
177long long atomic64_fetch_add_unless(atomic64_t *v, long long a, long long u) 177s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
178{ 178{
179 unsigned long flags; 179 unsigned long flags;
180 raw_spinlock_t *lock = lock_addr(v); 180 raw_spinlock_t *lock = lock_addr(v);
181 long long val; 181 s64 val;
182 182
183 raw_spin_lock_irqsave(lock, flags); 183 raw_spin_lock_irqsave(lock, flags);
184 val = v->counter; 184 val = v->counter;
diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile
new file mode 100644
index 000000000000..88195c34932d
--- /dev/null
+++ b/lib/crypto/Makefile
@@ -0,0 +1,4 @@
1# SPDX-License-Identifier: GPL-2.0
2
3obj-$(CONFIG_CRYPTO_LIB_ARC4) += libarc4.o
4libarc4-y := arc4.o
diff --git a/lib/crypto/arc4.c b/lib/crypto/arc4.c
new file mode 100644
index 000000000000..c2020f19c652
--- /dev/null
+++ b/lib/crypto/arc4.c
@@ -0,0 +1,74 @@
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Cryptographic API
4 *
5 * ARC4 Cipher Algorithm
6 *
7 * Jon Oberheide <jon@oberheide.org>
8 */
9
10#include <crypto/arc4.h>
11#include <linux/module.h>
12
13int arc4_setkey(struct arc4_ctx *ctx, const u8 *in_key, unsigned int key_len)
14{
15 int i, j = 0, k = 0;
16
17 ctx->x = 1;
18 ctx->y = 0;
19
20 for (i = 0; i < 256; i++)
21 ctx->S[i] = i;
22
23 for (i = 0; i < 256; i++) {
24 u32 a = ctx->S[i];
25
26 j = (j + in_key[k] + a) & 0xff;
27 ctx->S[i] = ctx->S[j];
28 ctx->S[j] = a;
29 if (++k >= key_len)
30 k = 0;
31 }
32
33 return 0;
34}
35EXPORT_SYMBOL(arc4_setkey);
36
37void arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, unsigned int len)
38{
39 u32 *const S = ctx->S;
40 u32 x, y, a, b;
41 u32 ty, ta, tb;
42
43 if (len == 0)
44 return;
45
46 x = ctx->x;
47 y = ctx->y;
48
49 a = S[x];
50 y = (y + a) & 0xff;
51 b = S[y];
52
53 do {
54 S[y] = a;
55 a = (a + b) & 0xff;
56 S[x] = b;
57 x = (x + 1) & 0xff;
58 ta = S[x];
59 ty = (y + ta) & 0xff;
60 tb = S[ty];
61 *out++ = *in++ ^ S[a];
62 if (--len == 0)
63 break;
64 y = ty;
65 a = ta;
66 b = tb;
67 } while (true);
68
69 ctx->x = x;
70 ctx->y = y;
71}
72EXPORT_SYMBOL(arc4_crypt);
73
74MODULE_LICENSE("GPL");
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 55437fd5128b..61261195f5b6 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -25,16 +25,37 @@
25 25
26#define ODEBUG_POOL_SIZE 1024 26#define ODEBUG_POOL_SIZE 1024
27#define ODEBUG_POOL_MIN_LEVEL 256 27#define ODEBUG_POOL_MIN_LEVEL 256
28#define ODEBUG_POOL_PERCPU_SIZE 64
29#define ODEBUG_BATCH_SIZE 16
28 30
29#define ODEBUG_CHUNK_SHIFT PAGE_SHIFT 31#define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
30#define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT) 32#define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
31#define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1)) 33#define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
32 34
35/*
36 * We limit the freeing of debug objects via workqueue at a maximum
37 * frequency of 10Hz and about 1024 objects for each freeing operation.
38 * So it is freeing at most 10k debug objects per second.
39 */
40#define ODEBUG_FREE_WORK_MAX 1024
41#define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10)
42
33struct debug_bucket { 43struct debug_bucket {
34 struct hlist_head list; 44 struct hlist_head list;
35 raw_spinlock_t lock; 45 raw_spinlock_t lock;
36}; 46};
37 47
48/*
49 * Debug object percpu free list
50 * Access is protected by disabling irq
51 */
52struct debug_percpu_free {
53 struct hlist_head free_objs;
54 int obj_free;
55};
56
57static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
58
38static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; 59static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
39 60
40static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata; 61static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
@@ -44,13 +65,20 @@ static DEFINE_RAW_SPINLOCK(pool_lock);
44static HLIST_HEAD(obj_pool); 65static HLIST_HEAD(obj_pool);
45static HLIST_HEAD(obj_to_free); 66static HLIST_HEAD(obj_to_free);
46 67
68/*
69 * Because of the presence of percpu free pools, obj_pool_free will
70 * under-count those in the percpu free pools. Similarly, obj_pool_used
71 * will over-count those in the percpu free pools. Adjustments will be
72 * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
73 * can be off.
74 */
47static int obj_pool_min_free = ODEBUG_POOL_SIZE; 75static int obj_pool_min_free = ODEBUG_POOL_SIZE;
48static int obj_pool_free = ODEBUG_POOL_SIZE; 76static int obj_pool_free = ODEBUG_POOL_SIZE;
49static int obj_pool_used; 77static int obj_pool_used;
50static int obj_pool_max_used; 78static int obj_pool_max_used;
79static bool obj_freeing;
51/* The number of objs on the global free list */ 80/* The number of objs on the global free list */
52static int obj_nr_tofree; 81static int obj_nr_tofree;
53static struct kmem_cache *obj_cache;
54 82
55static int debug_objects_maxchain __read_mostly; 83static int debug_objects_maxchain __read_mostly;
56static int __maybe_unused debug_objects_maxchecked __read_mostly; 84static int __maybe_unused debug_objects_maxchecked __read_mostly;
@@ -63,6 +91,7 @@ static int debug_objects_pool_size __read_mostly
63static int debug_objects_pool_min_level __read_mostly 91static int debug_objects_pool_min_level __read_mostly
64 = ODEBUG_POOL_MIN_LEVEL; 92 = ODEBUG_POOL_MIN_LEVEL;
65static struct debug_obj_descr *descr_test __read_mostly; 93static struct debug_obj_descr *descr_test __read_mostly;
94static struct kmem_cache *obj_cache __read_mostly;
66 95
67/* 96/*
68 * Track numbers of kmem_cache_alloc()/free() calls done. 97 * Track numbers of kmem_cache_alloc()/free() calls done.
@@ -71,7 +100,7 @@ static int debug_objects_allocated;
71static int debug_objects_freed; 100static int debug_objects_freed;
72 101
73static void free_obj_work(struct work_struct *work); 102static void free_obj_work(struct work_struct *work);
74static DECLARE_WORK(debug_obj_work, free_obj_work); 103static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
75 104
76static int __init enable_object_debug(char *str) 105static int __init enable_object_debug(char *str)
77{ 106{
@@ -100,7 +129,7 @@ static const char *obj_states[ODEBUG_STATE_MAX] = {
100static void fill_pool(void) 129static void fill_pool(void)
101{ 130{
102 gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; 131 gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
103 struct debug_obj *new, *obj; 132 struct debug_obj *obj;
104 unsigned long flags; 133 unsigned long flags;
105 134
106 if (likely(obj_pool_free >= debug_objects_pool_min_level)) 135 if (likely(obj_pool_free >= debug_objects_pool_min_level))
@@ -116,7 +145,7 @@ static void fill_pool(void)
116 * Recheck with the lock held as the worker thread might have 145 * Recheck with the lock held as the worker thread might have
117 * won the race and freed the global free list already. 146 * won the race and freed the global free list already.
118 */ 147 */
119 if (obj_nr_tofree) { 148 while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
120 obj = hlist_entry(obj_to_free.first, typeof(*obj), node); 149 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
121 hlist_del(&obj->node); 150 hlist_del(&obj->node);
122 obj_nr_tofree--; 151 obj_nr_tofree--;
@@ -130,15 +159,23 @@ static void fill_pool(void)
130 return; 159 return;
131 160
132 while (obj_pool_free < debug_objects_pool_min_level) { 161 while (obj_pool_free < debug_objects_pool_min_level) {
162 struct debug_obj *new[ODEBUG_BATCH_SIZE];
163 int cnt;
133 164
134 new = kmem_cache_zalloc(obj_cache, gfp); 165 for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
135 if (!new) 166 new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
167 if (!new[cnt])
168 break;
169 }
170 if (!cnt)
136 return; 171 return;
137 172
138 raw_spin_lock_irqsave(&pool_lock, flags); 173 raw_spin_lock_irqsave(&pool_lock, flags);
139 hlist_add_head(&new->node, &obj_pool); 174 while (cnt) {
140 debug_objects_allocated++; 175 hlist_add_head(&new[--cnt]->node, &obj_pool);
141 obj_pool_free++; 176 debug_objects_allocated++;
177 obj_pool_free++;
178 }
142 raw_spin_unlock_irqrestore(&pool_lock, flags); 179 raw_spin_unlock_irqrestore(&pool_lock, flags);
143 } 180 }
144} 181}
@@ -163,36 +200,81 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
163} 200}
164 201
165/* 202/*
203 * Allocate a new object from the hlist
204 */
205static struct debug_obj *__alloc_object(struct hlist_head *list)
206{
207 struct debug_obj *obj = NULL;
208
209 if (list->first) {
210 obj = hlist_entry(list->first, typeof(*obj), node);
211 hlist_del(&obj->node);
212 }
213
214 return obj;
215}
216
217/*
166 * Allocate a new object. If the pool is empty, switch off the debugger. 218 * Allocate a new object. If the pool is empty, switch off the debugger.
167 * Must be called with interrupts disabled. 219 * Must be called with interrupts disabled.
168 */ 220 */
169static struct debug_obj * 221static struct debug_obj *
170alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) 222alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
171{ 223{
172 struct debug_obj *obj = NULL; 224 struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
225 struct debug_obj *obj;
173 226
174 raw_spin_lock(&pool_lock); 227 if (likely(obj_cache)) {
175 if (obj_pool.first) { 228 obj = __alloc_object(&percpu_pool->free_objs);
176 obj = hlist_entry(obj_pool.first, typeof(*obj), node); 229 if (obj) {
230 percpu_pool->obj_free--;
231 goto init_obj;
232 }
233 }
177 234
178 obj->object = addr; 235 raw_spin_lock(&pool_lock);
179 obj->descr = descr; 236 obj = __alloc_object(&obj_pool);
180 obj->state = ODEBUG_STATE_NONE; 237 if (obj) {
181 obj->astate = 0; 238 obj_pool_used++;
182 hlist_del(&obj->node); 239 obj_pool_free--;
183 240
184 hlist_add_head(&obj->node, &b->list); 241 /*
242 * Looking ahead, allocate one batch of debug objects and
243 * put them into the percpu free pool.
244 */
245 if (likely(obj_cache)) {
246 int i;
247
248 for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
249 struct debug_obj *obj2;
250
251 obj2 = __alloc_object(&obj_pool);
252 if (!obj2)
253 break;
254 hlist_add_head(&obj2->node,
255 &percpu_pool->free_objs);
256 percpu_pool->obj_free++;
257 obj_pool_used++;
258 obj_pool_free--;
259 }
260 }
185 261
186 obj_pool_used++;
187 if (obj_pool_used > obj_pool_max_used) 262 if (obj_pool_used > obj_pool_max_used)
188 obj_pool_max_used = obj_pool_used; 263 obj_pool_max_used = obj_pool_used;
189 264
190 obj_pool_free--;
191 if (obj_pool_free < obj_pool_min_free) 265 if (obj_pool_free < obj_pool_min_free)
192 obj_pool_min_free = obj_pool_free; 266 obj_pool_min_free = obj_pool_free;
193 } 267 }
194 raw_spin_unlock(&pool_lock); 268 raw_spin_unlock(&pool_lock);
195 269
270init_obj:
271 if (obj) {
272 obj->object = addr;
273 obj->descr = descr;
274 obj->state = ODEBUG_STATE_NONE;
275 obj->astate = 0;
276 hlist_add_head(&obj->node, &b->list);
277 }
196 return obj; 278 return obj;
197} 279}
198 280
@@ -209,13 +291,19 @@ static void free_obj_work(struct work_struct *work)
209 unsigned long flags; 291 unsigned long flags;
210 HLIST_HEAD(tofree); 292 HLIST_HEAD(tofree);
211 293
294 WRITE_ONCE(obj_freeing, false);
212 if (!raw_spin_trylock_irqsave(&pool_lock, flags)) 295 if (!raw_spin_trylock_irqsave(&pool_lock, flags))
213 return; 296 return;
214 297
298 if (obj_pool_free >= debug_objects_pool_size)
299 goto free_objs;
300
215 /* 301 /*
216 * The objs on the pool list might be allocated before the work is 302 * The objs on the pool list might be allocated before the work is
217 * run, so recheck if pool list it full or not, if not fill pool 303 * run, so recheck if pool list it full or not, if not fill pool
218 * list from the global free list 304 * list from the global free list. As it is likely that a workload
305 * may be gearing up to use more and more objects, don't free any
306 * of them until the next round.
219 */ 307 */
220 while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) { 308 while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
221 obj = hlist_entry(obj_to_free.first, typeof(*obj), node); 309 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
@@ -224,7 +312,10 @@ static void free_obj_work(struct work_struct *work)
224 obj_pool_free++; 312 obj_pool_free++;
225 obj_nr_tofree--; 313 obj_nr_tofree--;
226 } 314 }
315 raw_spin_unlock_irqrestore(&pool_lock, flags);
316 return;
227 317
318free_objs:
228 /* 319 /*
229 * Pool list is already full and there are still objs on the free 320 * Pool list is already full and there are still objs on the free
230 * list. Move remaining free objs to a temporary list to free the 321 * list. Move remaining free objs to a temporary list to free the
@@ -243,24 +334,86 @@ static void free_obj_work(struct work_struct *work)
243 } 334 }
244} 335}
245 336
246static bool __free_object(struct debug_obj *obj) 337static void __free_object(struct debug_obj *obj)
247{ 338{
339 struct debug_obj *objs[ODEBUG_BATCH_SIZE];
340 struct debug_percpu_free *percpu_pool;
341 int lookahead_count = 0;
248 unsigned long flags; 342 unsigned long flags;
249 bool work; 343 bool work;
250 344
251 raw_spin_lock_irqsave(&pool_lock, flags); 345 local_irq_save(flags);
252 work = (obj_pool_free > debug_objects_pool_size) && obj_cache; 346 if (!obj_cache)
347 goto free_to_obj_pool;
348
349 /*
350 * Try to free it into the percpu pool first.
351 */
352 percpu_pool = this_cpu_ptr(&percpu_obj_pool);
353 if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
354 hlist_add_head(&obj->node, &percpu_pool->free_objs);
355 percpu_pool->obj_free++;
356 local_irq_restore(flags);
357 return;
358 }
359
360 /*
361 * As the percpu pool is full, look ahead and pull out a batch
362 * of objects from the percpu pool and free them as well.
363 */
364 for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
365 objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
366 if (!objs[lookahead_count])
367 break;
368 percpu_pool->obj_free--;
369 }
370
371free_to_obj_pool:
372 raw_spin_lock(&pool_lock);
373 work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
374 (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
253 obj_pool_used--; 375 obj_pool_used--;
254 376
255 if (work) { 377 if (work) {
256 obj_nr_tofree++; 378 obj_nr_tofree++;
257 hlist_add_head(&obj->node, &obj_to_free); 379 hlist_add_head(&obj->node, &obj_to_free);
380 if (lookahead_count) {
381 obj_nr_tofree += lookahead_count;
382 obj_pool_used -= lookahead_count;
383 while (lookahead_count) {
384 hlist_add_head(&objs[--lookahead_count]->node,
385 &obj_to_free);
386 }
387 }
388
389 if ((obj_pool_free > debug_objects_pool_size) &&
390 (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
391 int i;
392
393 /*
394 * Free one more batch of objects from obj_pool.
395 */
396 for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
397 obj = __alloc_object(&obj_pool);
398 hlist_add_head(&obj->node, &obj_to_free);
399 obj_pool_free--;
400 obj_nr_tofree++;
401 }
402 }
258 } else { 403 } else {
259 obj_pool_free++; 404 obj_pool_free++;
260 hlist_add_head(&obj->node, &obj_pool); 405 hlist_add_head(&obj->node, &obj_pool);
406 if (lookahead_count) {
407 obj_pool_free += lookahead_count;
408 obj_pool_used -= lookahead_count;
409 while (lookahead_count) {
410 hlist_add_head(&objs[--lookahead_count]->node,
411 &obj_pool);
412 }
413 }
261 } 414 }
262 raw_spin_unlock_irqrestore(&pool_lock, flags); 415 raw_spin_unlock(&pool_lock);
263 return work; 416 local_irq_restore(flags);
264} 417}
265 418
266/* 419/*
@@ -269,8 +422,11 @@ static bool __free_object(struct debug_obj *obj)
269 */ 422 */
270static void free_object(struct debug_obj *obj) 423static void free_object(struct debug_obj *obj)
271{ 424{
272 if (__free_object(obj)) 425 __free_object(obj);
273 schedule_work(&debug_obj_work); 426 if (!obj_freeing && obj_nr_tofree) {
427 WRITE_ONCE(obj_freeing, true);
428 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
429 }
274} 430}
275 431
276/* 432/*
@@ -372,6 +528,7 @@ static void
372__debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) 528__debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
373{ 529{
374 enum debug_obj_state state; 530 enum debug_obj_state state;
531 bool check_stack = false;
375 struct debug_bucket *db; 532 struct debug_bucket *db;
376 struct debug_obj *obj; 533 struct debug_obj *obj;
377 unsigned long flags; 534 unsigned long flags;
@@ -391,7 +548,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
391 debug_objects_oom(); 548 debug_objects_oom();
392 return; 549 return;
393 } 550 }
394 debug_object_is_on_stack(addr, onstack); 551 check_stack = true;
395 } 552 }
396 553
397 switch (obj->state) { 554 switch (obj->state) {
@@ -402,20 +559,23 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
402 break; 559 break;
403 560
404 case ODEBUG_STATE_ACTIVE: 561 case ODEBUG_STATE_ACTIVE:
405 debug_print_object(obj, "init");
406 state = obj->state; 562 state = obj->state;
407 raw_spin_unlock_irqrestore(&db->lock, flags); 563 raw_spin_unlock_irqrestore(&db->lock, flags);
564 debug_print_object(obj, "init");
408 debug_object_fixup(descr->fixup_init, addr, state); 565 debug_object_fixup(descr->fixup_init, addr, state);
409 return; 566 return;
410 567
411 case ODEBUG_STATE_DESTROYED: 568 case ODEBUG_STATE_DESTROYED:
569 raw_spin_unlock_irqrestore(&db->lock, flags);
412 debug_print_object(obj, "init"); 570 debug_print_object(obj, "init");
413 break; 571 return;
414 default: 572 default:
415 break; 573 break;
416 } 574 }
417 575
418 raw_spin_unlock_irqrestore(&db->lock, flags); 576 raw_spin_unlock_irqrestore(&db->lock, flags);
577 if (check_stack)
578 debug_object_is_on_stack(addr, onstack);
419} 579}
420 580
421/** 581/**
@@ -473,6 +633,8 @@ int debug_object_activate(void *addr, struct debug_obj_descr *descr)
473 633
474 obj = lookup_object(addr, db); 634 obj = lookup_object(addr, db);
475 if (obj) { 635 if (obj) {
636 bool print_object = false;
637
476 switch (obj->state) { 638 switch (obj->state) {
477 case ODEBUG_STATE_INIT: 639 case ODEBUG_STATE_INIT:
478 case ODEBUG_STATE_INACTIVE: 640 case ODEBUG_STATE_INACTIVE:
@@ -481,14 +643,14 @@ int debug_object_activate(void *addr, struct debug_obj_descr *descr)
481 break; 643 break;
482 644
483 case ODEBUG_STATE_ACTIVE: 645 case ODEBUG_STATE_ACTIVE:
484 debug_print_object(obj, "activate");
485 state = obj->state; 646 state = obj->state;
486 raw_spin_unlock_irqrestore(&db->lock, flags); 647 raw_spin_unlock_irqrestore(&db->lock, flags);
648 debug_print_object(obj, "activate");
487 ret = debug_object_fixup(descr->fixup_activate, addr, state); 649 ret = debug_object_fixup(descr->fixup_activate, addr, state);
488 return ret ? 0 : -EINVAL; 650 return ret ? 0 : -EINVAL;
489 651
490 case ODEBUG_STATE_DESTROYED: 652 case ODEBUG_STATE_DESTROYED:
491 debug_print_object(obj, "activate"); 653 print_object = true;
492 ret = -EINVAL; 654 ret = -EINVAL;
493 break; 655 break;
494 default: 656 default:
@@ -496,10 +658,13 @@ int debug_object_activate(void *addr, struct debug_obj_descr *descr)
496 break; 658 break;
497 } 659 }
498 raw_spin_unlock_irqrestore(&db->lock, flags); 660 raw_spin_unlock_irqrestore(&db->lock, flags);
661 if (print_object)
662 debug_print_object(obj, "activate");
499 return ret; 663 return ret;
500 } 664 }
501 665
502 raw_spin_unlock_irqrestore(&db->lock, flags); 666 raw_spin_unlock_irqrestore(&db->lock, flags);
667
503 /* 668 /*
504 * We are here when a static object is activated. We 669 * We are here when a static object is activated. We
505 * let the type specific code confirm whether this is 670 * let the type specific code confirm whether this is
@@ -531,6 +696,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
531 struct debug_bucket *db; 696 struct debug_bucket *db;
532 struct debug_obj *obj; 697 struct debug_obj *obj;
533 unsigned long flags; 698 unsigned long flags;
699 bool print_object = false;
534 700
535 if (!debug_objects_enabled) 701 if (!debug_objects_enabled)
536 return; 702 return;
@@ -548,24 +714,27 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
548 if (!obj->astate) 714 if (!obj->astate)
549 obj->state = ODEBUG_STATE_INACTIVE; 715 obj->state = ODEBUG_STATE_INACTIVE;
550 else 716 else
551 debug_print_object(obj, "deactivate"); 717 print_object = true;
552 break; 718 break;
553 719
554 case ODEBUG_STATE_DESTROYED: 720 case ODEBUG_STATE_DESTROYED:
555 debug_print_object(obj, "deactivate"); 721 print_object = true;
556 break; 722 break;
557 default: 723 default:
558 break; 724 break;
559 } 725 }
560 } else { 726 }
727
728 raw_spin_unlock_irqrestore(&db->lock, flags);
729 if (!obj) {
561 struct debug_obj o = { .object = addr, 730 struct debug_obj o = { .object = addr,
562 .state = ODEBUG_STATE_NOTAVAILABLE, 731 .state = ODEBUG_STATE_NOTAVAILABLE,
563 .descr = descr }; 732 .descr = descr };
564 733
565 debug_print_object(&o, "deactivate"); 734 debug_print_object(&o, "deactivate");
735 } else if (print_object) {
736 debug_print_object(obj, "deactivate");
566 } 737 }
567
568 raw_spin_unlock_irqrestore(&db->lock, flags);
569} 738}
570EXPORT_SYMBOL_GPL(debug_object_deactivate); 739EXPORT_SYMBOL_GPL(debug_object_deactivate);
571 740
@@ -580,6 +749,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
580 struct debug_bucket *db; 749 struct debug_bucket *db;
581 struct debug_obj *obj; 750 struct debug_obj *obj;
582 unsigned long flags; 751 unsigned long flags;
752 bool print_object = false;
583 753
584 if (!debug_objects_enabled) 754 if (!debug_objects_enabled)
585 return; 755 return;
@@ -599,20 +769,22 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
599 obj->state = ODEBUG_STATE_DESTROYED; 769 obj->state = ODEBUG_STATE_DESTROYED;
600 break; 770 break;
601 case ODEBUG_STATE_ACTIVE: 771 case ODEBUG_STATE_ACTIVE:
602 debug_print_object(obj, "destroy");
603 state = obj->state; 772 state = obj->state;
604 raw_spin_unlock_irqrestore(&db->lock, flags); 773 raw_spin_unlock_irqrestore(&db->lock, flags);
774 debug_print_object(obj, "destroy");
605 debug_object_fixup(descr->fixup_destroy, addr, state); 775 debug_object_fixup(descr->fixup_destroy, addr, state);
606 return; 776 return;
607 777
608 case ODEBUG_STATE_DESTROYED: 778 case ODEBUG_STATE_DESTROYED:
609 debug_print_object(obj, "destroy"); 779 print_object = true;
610 break; 780 break;
611 default: 781 default:
612 break; 782 break;
613 } 783 }
614out_unlock: 784out_unlock:
615 raw_spin_unlock_irqrestore(&db->lock, flags); 785 raw_spin_unlock_irqrestore(&db->lock, flags);
786 if (print_object)
787 debug_print_object(obj, "destroy");
616} 788}
617EXPORT_SYMBOL_GPL(debug_object_destroy); 789EXPORT_SYMBOL_GPL(debug_object_destroy);
618 790
@@ -641,9 +813,9 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr)
641 813
642 switch (obj->state) { 814 switch (obj->state) {
643 case ODEBUG_STATE_ACTIVE: 815 case ODEBUG_STATE_ACTIVE:
644 debug_print_object(obj, "free");
645 state = obj->state; 816 state = obj->state;
646 raw_spin_unlock_irqrestore(&db->lock, flags); 817 raw_spin_unlock_irqrestore(&db->lock, flags);
818 debug_print_object(obj, "free");
647 debug_object_fixup(descr->fixup_free, addr, state); 819 debug_object_fixup(descr->fixup_free, addr, state);
648 return; 820 return;
649 default: 821 default:
@@ -716,6 +888,7 @@ debug_object_active_state(void *addr, struct debug_obj_descr *descr,
716 struct debug_bucket *db; 888 struct debug_bucket *db;
717 struct debug_obj *obj; 889 struct debug_obj *obj;
718 unsigned long flags; 890 unsigned long flags;
891 bool print_object = false;
719 892
720 if (!debug_objects_enabled) 893 if (!debug_objects_enabled)
721 return; 894 return;
@@ -731,22 +904,25 @@ debug_object_active_state(void *addr, struct debug_obj_descr *descr,
731 if (obj->astate == expect) 904 if (obj->astate == expect)
732 obj->astate = next; 905 obj->astate = next;
733 else 906 else
734 debug_print_object(obj, "active_state"); 907 print_object = true;
735 break; 908 break;
736 909
737 default: 910 default:
738 debug_print_object(obj, "active_state"); 911 print_object = true;
739 break; 912 break;
740 } 913 }
741 } else { 914 }
915
916 raw_spin_unlock_irqrestore(&db->lock, flags);
917 if (!obj) {
742 struct debug_obj o = { .object = addr, 918 struct debug_obj o = { .object = addr,
743 .state = ODEBUG_STATE_NOTAVAILABLE, 919 .state = ODEBUG_STATE_NOTAVAILABLE,
744 .descr = descr }; 920 .descr = descr };
745 921
746 debug_print_object(&o, "active_state"); 922 debug_print_object(&o, "active_state");
923 } else if (print_object) {
924 debug_print_object(obj, "active_state");
747 } 925 }
748
749 raw_spin_unlock_irqrestore(&db->lock, flags);
750} 926}
751EXPORT_SYMBOL_GPL(debug_object_active_state); 927EXPORT_SYMBOL_GPL(debug_object_active_state);
752 928
@@ -760,7 +936,6 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size)
760 struct hlist_node *tmp; 936 struct hlist_node *tmp;
761 struct debug_obj *obj; 937 struct debug_obj *obj;
762 int cnt, objs_checked = 0; 938 int cnt, objs_checked = 0;
763 bool work = false;
764 939
765 saddr = (unsigned long) address; 940 saddr = (unsigned long) address;
766 eaddr = saddr + size; 941 eaddr = saddr + size;
@@ -782,16 +957,16 @@ repeat:
782 957
783 switch (obj->state) { 958 switch (obj->state) {
784 case ODEBUG_STATE_ACTIVE: 959 case ODEBUG_STATE_ACTIVE:
785 debug_print_object(obj, "free");
786 descr = obj->descr; 960 descr = obj->descr;
787 state = obj->state; 961 state = obj->state;
788 raw_spin_unlock_irqrestore(&db->lock, flags); 962 raw_spin_unlock_irqrestore(&db->lock, flags);
963 debug_print_object(obj, "free");
789 debug_object_fixup(descr->fixup_free, 964 debug_object_fixup(descr->fixup_free,
790 (void *) oaddr, state); 965 (void *) oaddr, state);
791 goto repeat; 966 goto repeat;
792 default: 967 default:
793 hlist_del(&obj->node); 968 hlist_del(&obj->node);
794 work |= __free_object(obj); 969 __free_object(obj);
795 break; 970 break;
796 } 971 }
797 } 972 }
@@ -807,8 +982,10 @@ repeat:
807 debug_objects_maxchecked = objs_checked; 982 debug_objects_maxchecked = objs_checked;
808 983
809 /* Schedule work to actually kmem_cache_free() objects */ 984 /* Schedule work to actually kmem_cache_free() objects */
810 if (work) 985 if (!obj_freeing && obj_nr_tofree) {
811 schedule_work(&debug_obj_work); 986 WRITE_ONCE(obj_freeing, true);
987 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
988 }
812} 989}
813 990
814void debug_check_no_obj_freed(const void *address, unsigned long size) 991void debug_check_no_obj_freed(const void *address, unsigned long size)
@@ -822,13 +999,19 @@ void debug_check_no_obj_freed(const void *address, unsigned long size)
822 999
823static int debug_stats_show(struct seq_file *m, void *v) 1000static int debug_stats_show(struct seq_file *m, void *v)
824{ 1001{
1002 int cpu, obj_percpu_free = 0;
1003
1004 for_each_possible_cpu(cpu)
1005 obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
1006
825 seq_printf(m, "max_chain :%d\n", debug_objects_maxchain); 1007 seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
826 seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked); 1008 seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked);
827 seq_printf(m, "warnings :%d\n", debug_objects_warnings); 1009 seq_printf(m, "warnings :%d\n", debug_objects_warnings);
828 seq_printf(m, "fixups :%d\n", debug_objects_fixups); 1010 seq_printf(m, "fixups :%d\n", debug_objects_fixups);
829 seq_printf(m, "pool_free :%d\n", obj_pool_free); 1011 seq_printf(m, "pool_free :%d\n", obj_pool_free + obj_percpu_free);
1012 seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
830 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); 1013 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
831 seq_printf(m, "pool_used :%d\n", obj_pool_used); 1014 seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free);
832 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); 1015 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
833 seq_printf(m, "on_free_list :%d\n", obj_nr_tofree); 1016 seq_printf(m, "on_free_list :%d\n", obj_nr_tofree);
834 seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated); 1017 seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
@@ -850,26 +1033,16 @@ static const struct file_operations debug_stats_fops = {
850 1033
851static int __init debug_objects_init_debugfs(void) 1034static int __init debug_objects_init_debugfs(void)
852{ 1035{
853 struct dentry *dbgdir, *dbgstats; 1036 struct dentry *dbgdir;
854 1037
855 if (!debug_objects_enabled) 1038 if (!debug_objects_enabled)
856 return 0; 1039 return 0;
857 1040
858 dbgdir = debugfs_create_dir("debug_objects", NULL); 1041 dbgdir = debugfs_create_dir("debug_objects", NULL);
859 if (!dbgdir)
860 return -ENOMEM;
861 1042
862 dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL, 1043 debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
863 &debug_stats_fops);
864 if (!dbgstats)
865 goto err;
866 1044
867 return 0; 1045 return 0;
868
869err:
870 debugfs_remove(dbgdir);
871
872 return -ENOMEM;
873} 1046}
874__initcall(debug_objects_init_debugfs); 1047__initcall(debug_objects_init_debugfs);
875 1048
@@ -1175,9 +1348,20 @@ free:
1175 */ 1348 */
1176void __init debug_objects_mem_init(void) 1349void __init debug_objects_mem_init(void)
1177{ 1350{
1351 int cpu, extras;
1352
1178 if (!debug_objects_enabled) 1353 if (!debug_objects_enabled)
1179 return; 1354 return;
1180 1355
1356 /*
1357 * Initialize the percpu object pools
1358 *
1359 * Initialization is not strictly necessary, but was done for
1360 * completeness.
1361 */
1362 for_each_possible_cpu(cpu)
1363 INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
1364
1181 obj_cache = kmem_cache_create("debug_objects_cache", 1365 obj_cache = kmem_cache_create("debug_objects_cache",
1182 sizeof (struct debug_obj), 0, 1366 sizeof (struct debug_obj), 0,
1183 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE, 1367 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
@@ -1194,6 +1378,7 @@ void __init debug_objects_mem_init(void)
1194 * Increase the thresholds for allocating and freeing objects 1378 * Increase the thresholds for allocating and freeing objects
1195 * according to the number of possible CPUs available in the system. 1379 * according to the number of possible CPUs available in the system.
1196 */ 1380 */
1197 debug_objects_pool_size += num_possible_cpus() * 32; 1381 extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1198 debug_objects_pool_min_level += num_possible_cpus() * 4; 1382 debug_objects_pool_size += extras;
1383 debug_objects_pool_min_level += extras;
1199} 1384}
diff --git a/lib/devres.c b/lib/devres.c
index 69bed2f38306..6a0e9bd6524a 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -131,7 +131,8 @@ EXPORT_SYMBOL(devm_iounmap);
131 * if (IS_ERR(base)) 131 * if (IS_ERR(base))
132 * return PTR_ERR(base); 132 * return PTR_ERR(base);
133 */ 133 */
134void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res) 134void __iomem *devm_ioremap_resource(struct device *dev,
135 const struct resource *res)
135{ 136{
136 resource_size_t size; 137 resource_size_t size;
137 void __iomem *dest_ptr; 138 void __iomem *dest_ptr;
diff --git a/lib/digsig.c b/lib/digsig.c
index 3cf89c775ab2..e0627c3e53b2 100644
--- a/lib/digsig.c
+++ b/lib/digsig.c
@@ -218,7 +218,7 @@ int digsig_verify(struct key *keyring, const char *sig, int siglen,
218 /* search in specific keyring */ 218 /* search in specific keyring */
219 key_ref_t kref; 219 key_ref_t kref;
220 kref = keyring_search(make_key_ref(keyring, 1UL), 220 kref = keyring_search(make_key_ref(keyring, 1UL),
221 &key_type_user, name); 221 &key_type_user, name, true);
222 if (IS_ERR(kref)) 222 if (IS_ERR(kref))
223 key = ERR_CAST(kref); 223 key = ERR_CAST(kref);
224 else 224 else
diff --git a/lib/dim/Makefile b/lib/dim/Makefile
new file mode 100644
index 000000000000..1d6858a108cb
--- /dev/null
+++ b/lib/dim/Makefile
@@ -0,0 +1,7 @@
1#
2# DIM Dynamic Interrupt Moderation library
3#
4
5obj-$(CONFIG_DIMLIB) += dim.o
6
7dim-y := dim.o net_dim.o rdma_dim.o
diff --git a/lib/dim/dim.c b/lib/dim/dim.c
new file mode 100644
index 000000000000..439d641ec796
--- /dev/null
+++ b/lib/dim/dim.c
@@ -0,0 +1,83 @@
1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
4 */
5
6#include <linux/dim.h>
7
8bool dim_on_top(struct dim *dim)
9{
10 switch (dim->tune_state) {
11 case DIM_PARKING_ON_TOP:
12 case DIM_PARKING_TIRED:
13 return true;
14 case DIM_GOING_RIGHT:
15 return (dim->steps_left > 1) && (dim->steps_right == 1);
16 default: /* DIM_GOING_LEFT */
17 return (dim->steps_right > 1) && (dim->steps_left == 1);
18 }
19}
20EXPORT_SYMBOL(dim_on_top);
21
22void dim_turn(struct dim *dim)
23{
24 switch (dim->tune_state) {
25 case DIM_PARKING_ON_TOP:
26 case DIM_PARKING_TIRED:
27 break;
28 case DIM_GOING_RIGHT:
29 dim->tune_state = DIM_GOING_LEFT;
30 dim->steps_left = 0;
31 break;
32 case DIM_GOING_LEFT:
33 dim->tune_state = DIM_GOING_RIGHT;
34 dim->steps_right = 0;
35 break;
36 }
37}
38EXPORT_SYMBOL(dim_turn);
39
40void dim_park_on_top(struct dim *dim)
41{
42 dim->steps_right = 0;
43 dim->steps_left = 0;
44 dim->tired = 0;
45 dim->tune_state = DIM_PARKING_ON_TOP;
46}
47EXPORT_SYMBOL(dim_park_on_top);
48
49void dim_park_tired(struct dim *dim)
50{
51 dim->steps_right = 0;
52 dim->steps_left = 0;
53 dim->tune_state = DIM_PARKING_TIRED;
54}
55EXPORT_SYMBOL(dim_park_tired);
56
57void dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
58 struct dim_stats *curr_stats)
59{
60 /* u32 holds up to 71 minutes, should be enough */
61 u32 delta_us = ktime_us_delta(end->time, start->time);
62 u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr);
63 u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr,
64 start->byte_ctr);
65 u32 ncomps = BIT_GAP(BITS_PER_TYPE(u32), end->comp_ctr,
66 start->comp_ctr);
67
68 if (!delta_us)
69 return;
70
71 curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us);
72 curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us);
73 curr_stats->epms = DIV_ROUND_UP(DIM_NEVENTS * USEC_PER_MSEC,
74 delta_us);
75 curr_stats->cpms = DIV_ROUND_UP(ncomps * USEC_PER_MSEC, delta_us);
76 if (curr_stats->epms != 0)
77 curr_stats->cpe_ratio =
78 (curr_stats->cpms * 100) / curr_stats->epms;
79 else
80 curr_stats->cpe_ratio = 0;
81
82}
83EXPORT_SYMBOL(dim_calc_stats);
diff --git a/lib/dim/net_dim.c b/lib/dim/net_dim.c
new file mode 100644
index 000000000000..5bcc902c5388
--- /dev/null
+++ b/lib/dim/net_dim.c
@@ -0,0 +1,190 @@
1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
4 */
5
6#include <linux/dim.h>
7
8struct dim_cq_moder
9net_dim_get_rx_moderation(u8 cq_period_mode, int ix)
10{
11 struct dim_cq_moder cq_moder = rx_profile[cq_period_mode][ix];
12
13 cq_moder.cq_period_mode = cq_period_mode;
14 return cq_moder;
15}
16EXPORT_SYMBOL(net_dim_get_rx_moderation);
17
18struct dim_cq_moder
19net_dim_get_def_rx_moderation(u8 cq_period_mode)
20{
21 u8 profile_ix = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ?
22 NET_DIM_DEF_PROFILE_CQE : NET_DIM_DEF_PROFILE_EQE;
23
24 return net_dim_get_rx_moderation(cq_period_mode, profile_ix);
25}
26EXPORT_SYMBOL(net_dim_get_def_rx_moderation);
27
28struct dim_cq_moder
29net_dim_get_tx_moderation(u8 cq_period_mode, int ix)
30{
31 struct dim_cq_moder cq_moder = tx_profile[cq_period_mode][ix];
32
33 cq_moder.cq_period_mode = cq_period_mode;
34 return cq_moder;
35}
36EXPORT_SYMBOL(net_dim_get_tx_moderation);
37
38struct dim_cq_moder
39net_dim_get_def_tx_moderation(u8 cq_period_mode)
40{
41 u8 profile_ix = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ?
42 NET_DIM_DEF_PROFILE_CQE : NET_DIM_DEF_PROFILE_EQE;
43
44 return net_dim_get_tx_moderation(cq_period_mode, profile_ix);
45}
46EXPORT_SYMBOL(net_dim_get_def_tx_moderation);
47
48static int net_dim_step(struct dim *dim)
49{
50 if (dim->tired == (NET_DIM_PARAMS_NUM_PROFILES * 2))
51 return DIM_TOO_TIRED;
52
53 switch (dim->tune_state) {
54 case DIM_PARKING_ON_TOP:
55 case DIM_PARKING_TIRED:
56 break;
57 case DIM_GOING_RIGHT:
58 if (dim->profile_ix == (NET_DIM_PARAMS_NUM_PROFILES - 1))
59 return DIM_ON_EDGE;
60 dim->profile_ix++;
61 dim->steps_right++;
62 break;
63 case DIM_GOING_LEFT:
64 if (dim->profile_ix == 0)
65 return DIM_ON_EDGE;
66 dim->profile_ix--;
67 dim->steps_left++;
68 break;
69 }
70
71 dim->tired++;
72 return DIM_STEPPED;
73}
74
75static void net_dim_exit_parking(struct dim *dim)
76{
77 dim->tune_state = dim->profile_ix ? DIM_GOING_LEFT : DIM_GOING_RIGHT;
78 net_dim_step(dim);
79}
80
81static int net_dim_stats_compare(struct dim_stats *curr,
82 struct dim_stats *prev)
83{
84 if (!prev->bpms)
85 return curr->bpms ? DIM_STATS_BETTER : DIM_STATS_SAME;
86
87 if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms))
88 return (curr->bpms > prev->bpms) ? DIM_STATS_BETTER :
89 DIM_STATS_WORSE;
90
91 if (!prev->ppms)
92 return curr->ppms ? DIM_STATS_BETTER :
93 DIM_STATS_SAME;
94
95 if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
96 return (curr->ppms > prev->ppms) ? DIM_STATS_BETTER :
97 DIM_STATS_WORSE;
98
99 if (!prev->epms)
100 return DIM_STATS_SAME;
101
102 if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
103 return (curr->epms < prev->epms) ? DIM_STATS_BETTER :
104 DIM_STATS_WORSE;
105
106 return DIM_STATS_SAME;
107}
108
109static bool net_dim_decision(struct dim_stats *curr_stats, struct dim *dim)
110{
111 int prev_state = dim->tune_state;
112 int prev_ix = dim->profile_ix;
113 int stats_res;
114 int step_res;
115
116 switch (dim->tune_state) {
117 case DIM_PARKING_ON_TOP:
118 stats_res = net_dim_stats_compare(curr_stats,
119 &dim->prev_stats);
120 if (stats_res != DIM_STATS_SAME)
121 net_dim_exit_parking(dim);
122 break;
123
124 case DIM_PARKING_TIRED:
125 dim->tired--;
126 if (!dim->tired)
127 net_dim_exit_parking(dim);
128 break;
129
130 case DIM_GOING_RIGHT:
131 case DIM_GOING_LEFT:
132 stats_res = net_dim_stats_compare(curr_stats,
133 &dim->prev_stats);
134 if (stats_res != DIM_STATS_BETTER)
135 dim_turn(dim);
136
137 if (dim_on_top(dim)) {
138 dim_park_on_top(dim);
139 break;
140 }
141
142 step_res = net_dim_step(dim);
143 switch (step_res) {
144 case DIM_ON_EDGE:
145 dim_park_on_top(dim);
146 break;
147 case DIM_TOO_TIRED:
148 dim_park_tired(dim);
149 break;
150 }
151
152 break;
153 }
154
155 if (prev_state != DIM_PARKING_ON_TOP ||
156 dim->tune_state != DIM_PARKING_ON_TOP)
157 dim->prev_stats = *curr_stats;
158
159 return dim->profile_ix != prev_ix;
160}
161
162void net_dim(struct dim *dim, struct dim_sample end_sample)
163{
164 struct dim_stats curr_stats;
165 u16 nevents;
166
167 switch (dim->state) {
168 case DIM_MEASURE_IN_PROGRESS:
169 nevents = BIT_GAP(BITS_PER_TYPE(u16),
170 end_sample.event_ctr,
171 dim->start_sample.event_ctr);
172 if (nevents < DIM_NEVENTS)
173 break;
174 dim_calc_stats(&dim->start_sample, &end_sample, &curr_stats);
175 if (net_dim_decision(&curr_stats, dim)) {
176 dim->state = DIM_APPLY_NEW_PROFILE;
177 schedule_work(&dim->work);
178 break;
179 }
180 /* fall through */
181 case DIM_START_MEASURE:
182 dim_update_sample(end_sample.event_ctr, end_sample.pkt_ctr,
183 end_sample.byte_ctr, &dim->start_sample);
184 dim->state = DIM_MEASURE_IN_PROGRESS;
185 break;
186 case DIM_APPLY_NEW_PROFILE:
187 break;
188 }
189}
190EXPORT_SYMBOL(net_dim);
diff --git a/lib/dim/rdma_dim.c b/lib/dim/rdma_dim.c
new file mode 100644
index 000000000000..f7e26c7b4749
--- /dev/null
+++ b/lib/dim/rdma_dim.c
@@ -0,0 +1,108 @@
1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
4 */
5
6#include <linux/dim.h>
7
8static int rdma_dim_step(struct dim *dim)
9{
10 if (dim->tune_state == DIM_GOING_RIGHT) {
11 if (dim->profile_ix == (RDMA_DIM_PARAMS_NUM_PROFILES - 1))
12 return DIM_ON_EDGE;
13 dim->profile_ix++;
14 dim->steps_right++;
15 }
16 if (dim->tune_state == DIM_GOING_LEFT) {
17 if (dim->profile_ix == 0)
18 return DIM_ON_EDGE;
19 dim->profile_ix--;
20 dim->steps_left++;
21 }
22
23 return DIM_STEPPED;
24}
25
26static int rdma_dim_stats_compare(struct dim_stats *curr,
27 struct dim_stats *prev)
28{
29 /* first stat */
30 if (!prev->cpms)
31 return DIM_STATS_SAME;
32
33 if (IS_SIGNIFICANT_DIFF(curr->cpms, prev->cpms))
34 return (curr->cpms > prev->cpms) ? DIM_STATS_BETTER :
35 DIM_STATS_WORSE;
36
37 if (IS_SIGNIFICANT_DIFF(curr->cpe_ratio, prev->cpe_ratio))
38 return (curr->cpe_ratio > prev->cpe_ratio) ? DIM_STATS_BETTER :
39 DIM_STATS_WORSE;
40
41 return DIM_STATS_SAME;
42}
43
44static bool rdma_dim_decision(struct dim_stats *curr_stats, struct dim *dim)
45{
46 int prev_ix = dim->profile_ix;
47 u8 state = dim->tune_state;
48 int stats_res;
49 int step_res;
50
51 if (state != DIM_PARKING_ON_TOP && state != DIM_PARKING_TIRED) {
52 stats_res = rdma_dim_stats_compare(curr_stats,
53 &dim->prev_stats);
54
55 switch (stats_res) {
56 case DIM_STATS_SAME:
57 if (curr_stats->cpe_ratio <= 50 * prev_ix)
58 dim->profile_ix = 0;
59 break;
60 case DIM_STATS_WORSE:
61 dim_turn(dim);
62 /* fall through */
63 case DIM_STATS_BETTER:
64 step_res = rdma_dim_step(dim);
65 if (step_res == DIM_ON_EDGE)
66 dim_turn(dim);
67 break;
68 }
69 }
70
71 dim->prev_stats = *curr_stats;
72
73 return dim->profile_ix != prev_ix;
74}
75
76void rdma_dim(struct dim *dim, u64 completions)
77{
78 struct dim_sample *curr_sample = &dim->measuring_sample;
79 struct dim_stats curr_stats;
80 u32 nevents;
81
82 dim_update_sample_with_comps(curr_sample->event_ctr + 1, 0, 0,
83 curr_sample->comp_ctr + completions,
84 &dim->measuring_sample);
85
86 switch (dim->state) {
87 case DIM_MEASURE_IN_PROGRESS:
88 nevents = curr_sample->event_ctr - dim->start_sample.event_ctr;
89 if (nevents < DIM_NEVENTS)
90 break;
91 dim_calc_stats(&dim->start_sample, curr_sample, &curr_stats);
92 if (rdma_dim_decision(&curr_stats, dim)) {
93 dim->state = DIM_APPLY_NEW_PROFILE;
94 schedule_work(&dim->work);
95 break;
96 }
97 /* fall through */
98 case DIM_START_MEASURE:
99 dim->state = DIM_MEASURE_IN_PROGRESS;
100 dim_update_sample_with_comps(curr_sample->event_ctr, 0, 0,
101 curr_sample->comp_ctr,
102 &dim->start_sample);
103 break;
104 case DIM_APPLY_NEW_PROFILE:
105 break;
106 }
107}
108EXPORT_SYMBOL(rdma_dim);
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 8a16c2d498e9..c60409138e13 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -993,20 +993,14 @@ static __initdata int ddebug_init_success;
993 993
994static int __init dynamic_debug_init_debugfs(void) 994static int __init dynamic_debug_init_debugfs(void)
995{ 995{
996 struct dentry *dir, *file; 996 struct dentry *dir;
997 997
998 if (!ddebug_init_success) 998 if (!ddebug_init_success)
999 return -ENODEV; 999 return -ENODEV;
1000 1000
1001 dir = debugfs_create_dir("dynamic_debug", NULL); 1001 dir = debugfs_create_dir("dynamic_debug", NULL);
1002 if (!dir) 1002 debugfs_create_file("control", 0644, dir, NULL, &ddebug_proc_fops);
1003 return -ENOMEM; 1003
1004 file = debugfs_create_file("control", 0644, dir, NULL,
1005 &ddebug_proc_fops);
1006 if (!file) {
1007 debugfs_remove(dir);
1008 return -ENOMEM;
1009 }
1010 return 0; 1004 return 0;
1011} 1005}
1012 1006
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index 3cb21b2bf088..8186ca84910b 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -166,10 +166,10 @@ static int debugfs_ul_get(void *data, u64 *val)
166 166
167DEFINE_SIMPLE_ATTRIBUTE(fops_ul, debugfs_ul_get, debugfs_ul_set, "%llu\n"); 167DEFINE_SIMPLE_ATTRIBUTE(fops_ul, debugfs_ul_get, debugfs_ul_set, "%llu\n");
168 168
169static struct dentry *debugfs_create_ul(const char *name, umode_t mode, 169static void debugfs_create_ul(const char *name, umode_t mode,
170 struct dentry *parent, unsigned long *value) 170 struct dentry *parent, unsigned long *value)
171{ 171{
172 return debugfs_create_file(name, mode, parent, value, &fops_ul); 172 debugfs_create_file(name, mode, parent, value, &fops_ul);
173} 173}
174 174
175#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER 175#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
@@ -185,12 +185,11 @@ static int debugfs_stacktrace_depth_set(void *data, u64 val)
185DEFINE_SIMPLE_ATTRIBUTE(fops_stacktrace_depth, debugfs_ul_get, 185DEFINE_SIMPLE_ATTRIBUTE(fops_stacktrace_depth, debugfs_ul_get,
186 debugfs_stacktrace_depth_set, "%llu\n"); 186 debugfs_stacktrace_depth_set, "%llu\n");
187 187
188static struct dentry *debugfs_create_stacktrace_depth( 188static void debugfs_create_stacktrace_depth(const char *name, umode_t mode,
189 const char *name, umode_t mode, 189 struct dentry *parent,
190 struct dentry *parent, unsigned long *value) 190 unsigned long *value)
191{ 191{
192 return debugfs_create_file(name, mode, parent, value, 192 debugfs_create_file(name, mode, parent, value, &fops_stacktrace_depth);
193 &fops_stacktrace_depth);
194} 193}
195 194
196#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */ 195#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
@@ -202,51 +201,31 @@ struct dentry *fault_create_debugfs_attr(const char *name,
202 struct dentry *dir; 201 struct dentry *dir;
203 202
204 dir = debugfs_create_dir(name, parent); 203 dir = debugfs_create_dir(name, parent);
205 if (!dir) 204 if (IS_ERR(dir))
206 return ERR_PTR(-ENOMEM); 205 return dir;
207 206
208 if (!debugfs_create_ul("probability", mode, dir, &attr->probability)) 207 debugfs_create_ul("probability", mode, dir, &attr->probability);
209 goto fail; 208 debugfs_create_ul("interval", mode, dir, &attr->interval);
210 if (!debugfs_create_ul("interval", mode, dir, &attr->interval)) 209 debugfs_create_atomic_t("times", mode, dir, &attr->times);
211 goto fail; 210 debugfs_create_atomic_t("space", mode, dir, &attr->space);
212 if (!debugfs_create_atomic_t("times", mode, dir, &attr->times)) 211 debugfs_create_ul("verbose", mode, dir, &attr->verbose);
213 goto fail; 212 debugfs_create_u32("verbose_ratelimit_interval_ms", mode, dir,
214 if (!debugfs_create_atomic_t("space", mode, dir, &attr->space)) 213 &attr->ratelimit_state.interval);
215 goto fail; 214 debugfs_create_u32("verbose_ratelimit_burst", mode, dir,
216 if (!debugfs_create_ul("verbose", mode, dir, &attr->verbose)) 215 &attr->ratelimit_state.burst);
217 goto fail; 216 debugfs_create_bool("task-filter", mode, dir, &attr->task_filter);
218 if (!debugfs_create_u32("verbose_ratelimit_interval_ms", mode, dir,
219 &attr->ratelimit_state.interval))
220 goto fail;
221 if (!debugfs_create_u32("verbose_ratelimit_burst", mode, dir,
222 &attr->ratelimit_state.burst))
223 goto fail;
224 if (!debugfs_create_bool("task-filter", mode, dir, &attr->task_filter))
225 goto fail;
226 217
227#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER 218#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
228 219 debugfs_create_stacktrace_depth("stacktrace-depth", mode, dir,
229 if (!debugfs_create_stacktrace_depth("stacktrace-depth", mode, dir, 220 &attr->stacktrace_depth);
230 &attr->stacktrace_depth)) 221 debugfs_create_ul("require-start", mode, dir, &attr->require_start);
231 goto fail; 222 debugfs_create_ul("require-end", mode, dir, &attr->require_end);
232 if (!debugfs_create_ul("require-start", mode, dir, 223 debugfs_create_ul("reject-start", mode, dir, &attr->reject_start);
233 &attr->require_start)) 224 debugfs_create_ul("reject-end", mode, dir, &attr->reject_end);
234 goto fail;
235 if (!debugfs_create_ul("require-end", mode, dir, &attr->require_end))
236 goto fail;
237 if (!debugfs_create_ul("reject-start", mode, dir, &attr->reject_start))
238 goto fail;
239 if (!debugfs_create_ul("reject-end", mode, dir, &attr->reject_end))
240 goto fail;
241
242#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */ 225#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
243 226
244 attr->dname = dget(dir); 227 attr->dname = dget(dir);
245 return dir; 228 return dir;
246fail:
247 debugfs_remove_recursive(dir);
248
249 return ERR_PTR(-ENOMEM);
250} 229}
251EXPORT_SYMBOL_GPL(fault_create_debugfs_attr); 230EXPORT_SYMBOL_GPL(fault_create_debugfs_attr);
252 231
diff --git a/lib/fonts/fonts.c b/lib/fonts/fonts.c
index 9969358a7af5..e7258d8c252b 100644
--- a/lib/fonts/fonts.c
+++ b/lib/fonts/fonts.c
@@ -20,56 +20,42 @@
20#endif 20#endif
21#include <linux/font.h> 21#include <linux/font.h>
22 22
23#define NO_FONTS
24
25static const struct font_desc *fonts[] = { 23static const struct font_desc *fonts[] = {
26#ifdef CONFIG_FONT_8x8 24#ifdef CONFIG_FONT_8x8
27#undef NO_FONTS 25 &font_vga_8x8,
28 &font_vga_8x8,
29#endif 26#endif
30#ifdef CONFIG_FONT_8x16 27#ifdef CONFIG_FONT_8x16
31#undef NO_FONTS 28 &font_vga_8x16,
32 &font_vga_8x16,
33#endif 29#endif
34#ifdef CONFIG_FONT_6x11 30#ifdef CONFIG_FONT_6x11
35#undef NO_FONTS 31 &font_vga_6x11,
36 &font_vga_6x11,
37#endif 32#endif
38#ifdef CONFIG_FONT_7x14 33#ifdef CONFIG_FONT_7x14
39#undef NO_FONTS 34 &font_7x14,
40 &font_7x14,
41#endif 35#endif
42#ifdef CONFIG_FONT_SUN8x16 36#ifdef CONFIG_FONT_SUN8x16
43#undef NO_FONTS 37 &font_sun_8x16,
44 &font_sun_8x16,
45#endif 38#endif
46#ifdef CONFIG_FONT_SUN12x22 39#ifdef CONFIG_FONT_SUN12x22
47#undef NO_FONTS 40 &font_sun_12x22,
48 &font_sun_12x22,
49#endif 41#endif
50#ifdef CONFIG_FONT_10x18 42#ifdef CONFIG_FONT_10x18
51#undef NO_FONTS 43 &font_10x18,
52 &font_10x18,
53#endif 44#endif
54#ifdef CONFIG_FONT_ACORN_8x8 45#ifdef CONFIG_FONT_ACORN_8x8
55#undef NO_FONTS 46 &font_acorn_8x8,
56 &font_acorn_8x8,
57#endif 47#endif
58#ifdef CONFIG_FONT_PEARL_8x8 48#ifdef CONFIG_FONT_PEARL_8x8
59#undef NO_FONTS 49 &font_pearl_8x8,
60 &font_pearl_8x8,
61#endif 50#endif
62#ifdef CONFIG_FONT_MINI_4x6 51#ifdef CONFIG_FONT_MINI_4x6
63#undef NO_FONTS 52 &font_mini_4x6,
64 &font_mini_4x6,
65#endif 53#endif
66#ifdef CONFIG_FONT_6x10 54#ifdef CONFIG_FONT_6x10
67#undef NO_FONTS 55 &font_6x10,
68 &font_6x10,
69#endif 56#endif
70#ifdef CONFIG_FONT_TER16x32 57#ifdef CONFIG_FONT_TER16x32
71#undef NO_FONTS 58 &font_ter_16x32,
72 &font_ter_16x32,
73#endif 59#endif
74}; 60};
75 61
@@ -90,16 +76,17 @@ static const struct font_desc *fonts[] = {
90 * specified font. 76 * specified font.
91 * 77 *
92 */ 78 */
93
94const struct font_desc *find_font(const char *name) 79const struct font_desc *find_font(const char *name)
95{ 80{
96 unsigned int i; 81 unsigned int i;
97 82
98 for (i = 0; i < num_fonts; i++) 83 BUILD_BUG_ON(!num_fonts);
99 if (!strcmp(fonts[i]->name, name)) 84 for (i = 0; i < num_fonts; i++)
100 return fonts[i]; 85 if (!strcmp(fonts[i]->name, name))
101 return NULL; 86 return fonts[i];
87 return NULL;
102} 88}
89EXPORT_SYMBOL(find_font);
103 90
104 91
105/** 92/**
@@ -116,44 +103,46 @@ const struct font_desc *find_font(const char *name)
116 * chosen font. 103 * chosen font.
117 * 104 *
118 */ 105 */
119
120const struct font_desc *get_default_font(int xres, int yres, u32 font_w, 106const struct font_desc *get_default_font(int xres, int yres, u32 font_w,
121 u32 font_h) 107 u32 font_h)
122{ 108{
123 int i, c, cc; 109 int i, c, cc, res;
124 const struct font_desc *f, *g; 110 const struct font_desc *f, *g;
125 111
126 g = NULL; 112 g = NULL;
127 cc = -10000; 113 cc = -10000;
128 for(i=0; i<num_fonts; i++) { 114 for (i = 0; i < num_fonts; i++) {
129 f = fonts[i]; 115 f = fonts[i];
130 c = f->pref; 116 c = f->pref;
131#if defined(__mc68000__) 117#if defined(__mc68000__)
132#ifdef CONFIG_FONT_PEARL_8x8 118#ifdef CONFIG_FONT_PEARL_8x8
133 if (MACH_IS_AMIGA && f->idx == PEARL8x8_IDX) 119 if (MACH_IS_AMIGA && f->idx == PEARL8x8_IDX)
134 c = 100; 120 c = 100;
135#endif 121#endif
136#ifdef CONFIG_FONT_6x11 122#ifdef CONFIG_FONT_6x11
137 if (MACH_IS_MAC && xres < 640 && f->idx == VGA6x11_IDX) 123 if (MACH_IS_MAC && xres < 640 && f->idx == VGA6x11_IDX)
138 c = 100; 124 c = 100;
139#endif 125#endif
140#endif 126#endif
141 if ((yres < 400) == (f->height <= 8)) 127 if ((yres < 400) == (f->height <= 8))
142 c += 1000; 128 c += 1000;
129
130 /* prefer a bigger font for high resolution */
131 res = (xres / f->width) * (yres / f->height) / 1000;
132 if (res > 20)
133 c += 20 - res;
143 134
144 if ((font_w & (1 << (f->width - 1))) && 135 if ((font_w & (1 << (f->width - 1))) &&
145 (font_h & (1 << (f->height - 1)))) 136 (font_h & (1 << (f->height - 1))))
146 c += 1000; 137 c += 1000;
147 138
148 if (c > cc) { 139 if (c > cc) {
149 cc = c; 140 cc = c;
150 g = f; 141 g = f;
142 }
151 } 143 }
152 } 144 return g;
153 return g;
154} 145}
155
156EXPORT_SYMBOL(find_font);
157EXPORT_SYMBOL(get_default_font); 146EXPORT_SYMBOL(get_default_font);
158 147
159MODULE_AUTHOR("James Simmons <jsimmons@users.sf.net>"); 148MODULE_AUTHOR("James Simmons <jsimmons@users.sf.net>");
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 5257f74fccf3..9fc31292cfa1 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -327,21 +327,45 @@ EXPORT_SYMBOL(gen_pool_alloc_algo_owner);
327 * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage 327 * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
328 * @pool: pool to allocate from 328 * @pool: pool to allocate from
329 * @size: number of bytes to allocate from the pool 329 * @size: number of bytes to allocate from the pool
330 * @dma: dma-view physical address return value. Use NULL if unneeded. 330 * @dma: dma-view physical address return value. Use %NULL if unneeded.
331 * 331 *
332 * Allocate the requested number of bytes from the specified pool. 332 * Allocate the requested number of bytes from the specified pool.
333 * Uses the pool allocation function (with first-fit algorithm by default). 333 * Uses the pool allocation function (with first-fit algorithm by default).
334 * Can not be used in NMI handler on architectures without 334 * Can not be used in NMI handler on architectures without
335 * NMI-safe cmpxchg implementation. 335 * NMI-safe cmpxchg implementation.
336 *
337 * Return: virtual address of the allocated memory, or %NULL on failure
336 */ 338 */
337void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma) 339void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
338{ 340{
341 return gen_pool_dma_alloc_algo(pool, size, dma, pool->algo, pool->data);
342}
343EXPORT_SYMBOL(gen_pool_dma_alloc);
344
345/**
346 * gen_pool_dma_alloc_algo - allocate special memory from the pool for DMA
347 * usage with the given pool algorithm
348 * @pool: pool to allocate from
349 * @size: number of bytes to allocate from the pool
350 * @dma: DMA-view physical address return value. Use %NULL if unneeded.
351 * @algo: algorithm passed from caller
352 * @data: data passed to algorithm
353 *
354 * Allocate the requested number of bytes from the specified pool. Uses the
355 * given pool allocation function. Can not be used in NMI handler on
356 * architectures without NMI-safe cmpxchg implementation.
357 *
358 * Return: virtual address of the allocated memory, or %NULL on failure
359 */
360void *gen_pool_dma_alloc_algo(struct gen_pool *pool, size_t size,
361 dma_addr_t *dma, genpool_algo_t algo, void *data)
362{
339 unsigned long vaddr; 363 unsigned long vaddr;
340 364
341 if (!pool) 365 if (!pool)
342 return NULL; 366 return NULL;
343 367
344 vaddr = gen_pool_alloc(pool, size); 368 vaddr = gen_pool_alloc_algo(pool, size, algo, data);
345 if (!vaddr) 369 if (!vaddr)
346 return NULL; 370 return NULL;
347 371
@@ -350,7 +374,102 @@ void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
350 374
351 return (void *)vaddr; 375 return (void *)vaddr;
352} 376}
353EXPORT_SYMBOL(gen_pool_dma_alloc); 377EXPORT_SYMBOL(gen_pool_dma_alloc_algo);
378
379/**
380 * gen_pool_dma_alloc_align - allocate special memory from the pool for DMA
381 * usage with the given alignment
382 * @pool: pool to allocate from
383 * @size: number of bytes to allocate from the pool
384 * @dma: DMA-view physical address return value. Use %NULL if unneeded.
385 * @align: alignment in bytes for starting address
386 *
387 * Allocate the requested number bytes from the specified pool, with the given
388 * alignment restriction. Can not be used in NMI handler on architectures
389 * without NMI-safe cmpxchg implementation.
390 *
391 * Return: virtual address of the allocated memory, or %NULL on failure
392 */
393void *gen_pool_dma_alloc_align(struct gen_pool *pool, size_t size,
394 dma_addr_t *dma, int align)
395{
396 struct genpool_data_align data = { .align = align };
397
398 return gen_pool_dma_alloc_algo(pool, size, dma,
399 gen_pool_first_fit_align, &data);
400}
401EXPORT_SYMBOL(gen_pool_dma_alloc_align);
402
403/**
404 * gen_pool_dma_zalloc - allocate special zeroed memory from the pool for
405 * DMA usage
406 * @pool: pool to allocate from
407 * @size: number of bytes to allocate from the pool
408 * @dma: dma-view physical address return value. Use %NULL if unneeded.
409 *
410 * Allocate the requested number of zeroed bytes from the specified pool.
411 * Uses the pool allocation function (with first-fit algorithm by default).
412 * Can not be used in NMI handler on architectures without
413 * NMI-safe cmpxchg implementation.
414 *
415 * Return: virtual address of the allocated zeroed memory, or %NULL on failure
416 */
417void *gen_pool_dma_zalloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
418{
419 return gen_pool_dma_zalloc_algo(pool, size, dma, pool->algo, pool->data);
420}
421EXPORT_SYMBOL(gen_pool_dma_zalloc);
422
423/**
424 * gen_pool_dma_zalloc_algo - allocate special zeroed memory from the pool for
425 * DMA usage with the given pool algorithm
426 * @pool: pool to allocate from
427 * @size: number of bytes to allocate from the pool
428 * @dma: DMA-view physical address return value. Use %NULL if unneeded.
429 * @algo: algorithm passed from caller
430 * @data: data passed to algorithm
431 *
432 * Allocate the requested number of zeroed bytes from the specified pool. Uses
433 * the given pool allocation function. Can not be used in NMI handler on
434 * architectures without NMI-safe cmpxchg implementation.
435 *
436 * Return: virtual address of the allocated zeroed memory, or %NULL on failure
437 */
438void *gen_pool_dma_zalloc_algo(struct gen_pool *pool, size_t size,
439 dma_addr_t *dma, genpool_algo_t algo, void *data)
440{
441 void *vaddr = gen_pool_dma_alloc_algo(pool, size, dma, algo, data);
442
443 if (vaddr)
444 memset(vaddr, 0, size);
445
446 return vaddr;
447}
448EXPORT_SYMBOL(gen_pool_dma_zalloc_algo);
449
450/**
451 * gen_pool_dma_zalloc_align - allocate special zeroed memory from the pool for
452 * DMA usage with the given alignment
453 * @pool: pool to allocate from
454 * @size: number of bytes to allocate from the pool
455 * @dma: DMA-view physical address return value. Use %NULL if unneeded.
456 * @align: alignment in bytes for starting address
457 *
458 * Allocate the requested number of zeroed bytes from the specified pool,
459 * with the given alignment restriction. Can not be used in NMI handler on
460 * architectures without NMI-safe cmpxchg implementation.
461 *
462 * Return: virtual address of the allocated zeroed memory, or %NULL on failure
463 */
464void *gen_pool_dma_zalloc_align(struct gen_pool *pool, size_t size,
465 dma_addr_t *dma, int align)
466{
467 struct genpool_data_align data = { .align = align };
468
469 return gen_pool_dma_zalloc_algo(pool, size, dma,
470 gen_pool_first_fit_align, &data);
471}
472EXPORT_SYMBOL(gen_pool_dma_zalloc_align);
354 473
355/** 474/**
356 * gen_pool_free - free allocated special memory back to the pool 475 * gen_pool_free - free allocated special memory back to the pool
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index f99c41d4eb54..f1e0569b4539 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -1634,9 +1634,9 @@ EXPORT_SYMBOL(dup_iter);
1634 * on-stack array was used or not (and regardless of whether this function 1634 * on-stack array was used or not (and regardless of whether this function
1635 * returns an error or not). 1635 * returns an error or not).
1636 * 1636 *
1637 * Return: 0 on success or negative error code on error. 1637 * Return: Negative error code on error, bytes imported on success
1638 */ 1638 */
1639int import_iovec(int type, const struct iovec __user * uvector, 1639ssize_t import_iovec(int type, const struct iovec __user * uvector,
1640 unsigned nr_segs, unsigned fast_segs, 1640 unsigned nr_segs, unsigned fast_segs,
1641 struct iovec **iov, struct iov_iter *i) 1641 struct iovec **iov, struct iov_iter *i)
1642{ 1642{
@@ -1652,16 +1652,17 @@ int import_iovec(int type, const struct iovec __user * uvector,
1652 } 1652 }
1653 iov_iter_init(i, type, p, nr_segs, n); 1653 iov_iter_init(i, type, p, nr_segs, n);
1654 *iov = p == *iov ? NULL : p; 1654 *iov = p == *iov ? NULL : p;
1655 return 0; 1655 return n;
1656} 1656}
1657EXPORT_SYMBOL(import_iovec); 1657EXPORT_SYMBOL(import_iovec);
1658 1658
1659#ifdef CONFIG_COMPAT 1659#ifdef CONFIG_COMPAT
1660#include <linux/compat.h> 1660#include <linux/compat.h>
1661 1661
1662int compat_import_iovec(int type, const struct compat_iovec __user * uvector, 1662ssize_t compat_import_iovec(int type,
1663 unsigned nr_segs, unsigned fast_segs, 1663 const struct compat_iovec __user * uvector,
1664 struct iovec **iov, struct iov_iter *i) 1664 unsigned nr_segs, unsigned fast_segs,
1665 struct iovec **iov, struct iov_iter *i)
1665{ 1666{
1666 ssize_t n; 1667 ssize_t n;
1667 struct iovec *p; 1668 struct iovec *p;
@@ -1675,7 +1676,7 @@ int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
1675 } 1676 }
1676 iov_iter_init(i, type, p, nr_segs, n); 1677 iov_iter_init(i, type, p, nr_segs, n);
1677 *iov = p == *iov ? NULL : p; 1678 *iov = p == *iov ? NULL : p;
1678 return 0; 1679 return n;
1679} 1680}
1680#endif 1681#endif
1681 1682
diff --git a/lib/kobject.c b/lib/kobject.c
index f2ccdbac8ed9..83198cb37d8d 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -498,8 +498,10 @@ int kobject_rename(struct kobject *kobj, const char *new_name)
498 kobj = kobject_get(kobj); 498 kobj = kobject_get(kobj);
499 if (!kobj) 499 if (!kobj)
500 return -EINVAL; 500 return -EINVAL;
501 if (!kobj->parent) 501 if (!kobj->parent) {
502 kobject_put(kobj);
502 return -EINVAL; 503 return -EINVAL;
504 }
503 505
504 devpath = kobject_get_path(kobj, GFP_KERNEL); 506 devpath = kobject_get_path(kobj, GFP_KERNEL);
505 if (!devpath) { 507 if (!devpath) {
diff --git a/lib/list_sort.c b/lib/list_sort.c
index 712ed1f4eb64..52f0c258c895 100644
--- a/lib/list_sort.c
+++ b/lib/list_sort.c
@@ -157,9 +157,11 @@ static void merge_final(void *priv, cmp_func cmp, struct list_head *head,
157 * 157 *
158 * The number of pending lists of size 2^k is determined by the 158 * The number of pending lists of size 2^k is determined by the
159 * state of bit k of "count" plus two extra pieces of information: 159 * state of bit k of "count" plus two extra pieces of information:
160 *
160 * - The state of bit k-1 (when k == 0, consider bit -1 always set), and 161 * - The state of bit k-1 (when k == 0, consider bit -1 always set), and
161 * - Whether the higher-order bits are zero or non-zero (i.e. 162 * - Whether the higher-order bits are zero or non-zero (i.e.
162 * is count >= 2^(k+1)). 163 * is count >= 2^(k+1)).
164 *
163 * There are six states we distinguish. "x" represents some arbitrary 165 * There are six states we distinguish. "x" represents some arbitrary
164 * bits, and "y" represents some arbitrary non-zero bits: 166 * bits, and "y" represents some arbitrary non-zero bits:
165 * 0: 00x: 0 pending of size 2^k; x pending of sizes < 2^k 167 * 0: 00x: 0 pending of size 2^k; x pending of sizes < 2^k
diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c
index 82b19e4f1189..2fd7a46d55ec 100644
--- a/lib/mpi/mpi-pow.c
+++ b/lib/mpi/mpi-pow.c
@@ -24,6 +24,7 @@
24int mpi_powm(MPI res, MPI base, MPI exp, MPI mod) 24int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
25{ 25{
26 mpi_ptr_t mp_marker = NULL, bp_marker = NULL, ep_marker = NULL; 26 mpi_ptr_t mp_marker = NULL, bp_marker = NULL, ep_marker = NULL;
27 struct karatsuba_ctx karactx = {};
27 mpi_ptr_t xp_marker = NULL; 28 mpi_ptr_t xp_marker = NULL;
28 mpi_ptr_t tspace = NULL; 29 mpi_ptr_t tspace = NULL;
29 mpi_ptr_t rp, ep, mp, bp; 30 mpi_ptr_t rp, ep, mp, bp;
@@ -150,13 +151,11 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
150 int c; 151 int c;
151 mpi_limb_t e; 152 mpi_limb_t e;
152 mpi_limb_t carry_limb; 153 mpi_limb_t carry_limb;
153 struct karatsuba_ctx karactx;
154 154
155 xp = xp_marker = mpi_alloc_limb_space(2 * (msize + 1)); 155 xp = xp_marker = mpi_alloc_limb_space(2 * (msize + 1));
156 if (!xp) 156 if (!xp)
157 goto enomem; 157 goto enomem;
158 158
159 memset(&karactx, 0, sizeof karactx);
160 negative_result = (ep[0] & 1) && base->sign; 159 negative_result = (ep[0] & 1) && base->sign;
161 160
162 i = esize - 1; 161 i = esize - 1;
@@ -281,8 +280,6 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
281 if (mod_shift_cnt) 280 if (mod_shift_cnt)
282 mpihelp_rshift(rp, rp, rsize, mod_shift_cnt); 281 mpihelp_rshift(rp, rp, rsize, mod_shift_cnt);
283 MPN_NORMALIZE(rp, rsize); 282 MPN_NORMALIZE(rp, rsize);
284
285 mpihelp_release_karatsuba_ctx(&karactx);
286 } 283 }
287 284
288 if (negative_result && rsize) { 285 if (negative_result && rsize) {
@@ -299,6 +296,7 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
299leave: 296leave:
300 rc = 0; 297 rc = 0;
301enomem: 298enomem:
299 mpihelp_release_karatsuba_ctx(&karactx);
302 if (assign_rp) 300 if (assign_rp)
303 mpi_assign_limb_space(res, rp, size); 301 mpi_assign_limb_space(res, rp, size);
304 if (mp_marker) 302 if (mp_marker)
diff --git a/lib/notifier-error-inject.c b/lib/notifier-error-inject.c
index 3d2ba7cf83f4..21016b32d313 100644
--- a/lib/notifier-error-inject.c
+++ b/lib/notifier-error-inject.c
@@ -59,33 +59,22 @@ struct dentry *notifier_err_inject_init(const char *name, struct dentry *parent,
59 err_inject->nb.priority = priority; 59 err_inject->nb.priority = priority;
60 60
61 dir = debugfs_create_dir(name, parent); 61 dir = debugfs_create_dir(name, parent);
62 if (!dir)
63 return ERR_PTR(-ENOMEM);
64 62
65 actions_dir = debugfs_create_dir("actions", dir); 63 actions_dir = debugfs_create_dir("actions", dir);
66 if (!actions_dir)
67 goto fail;
68 64
69 for (action = err_inject->actions; action->name; action++) { 65 for (action = err_inject->actions; action->name; action++) {
70 struct dentry *action_dir; 66 struct dentry *action_dir;
71 67
72 action_dir = debugfs_create_dir(action->name, actions_dir); 68 action_dir = debugfs_create_dir(action->name, actions_dir);
73 if (!action_dir)
74 goto fail;
75 69
76 /* 70 /*
77 * Create debugfs r/w file containing action->error. If 71 * Create debugfs r/w file containing action->error. If
78 * notifier call chain is called with action->val, it will 72 * notifier call chain is called with action->val, it will
79 * fail with the error code 73 * fail with the error code
80 */ 74 */
81 if (!debugfs_create_errno("error", mode, action_dir, 75 debugfs_create_errno("error", mode, action_dir, &action->error);
82 &action->error))
83 goto fail;
84 } 76 }
85 return dir; 77 return dir;
86fail:
87 debugfs_remove_recursive(dir);
88 return ERR_PTR(-ENOMEM);
89} 78}
90EXPORT_SYMBOL_GPL(notifier_err_inject_init); 79EXPORT_SYMBOL_GPL(notifier_err_inject_init);
91 80
diff --git a/lib/objagg.c b/lib/objagg.c
index 576be22e86de..55621fb82e0a 100644
--- a/lib/objagg.c
+++ b/lib/objagg.c
@@ -605,12 +605,10 @@ const struct objagg_stats *objagg_stats_get(struct objagg *objagg)
605{ 605{
606 struct objagg_stats *objagg_stats; 606 struct objagg_stats *objagg_stats;
607 struct objagg_obj *objagg_obj; 607 struct objagg_obj *objagg_obj;
608 size_t alloc_size;
609 int i; 608 int i;
610 609
611 alloc_size = sizeof(*objagg_stats) + 610 objagg_stats = kzalloc(struct_size(objagg_stats, stats_info,
612 sizeof(objagg_stats->stats_info[0]) * objagg->obj_count; 611 objagg->obj_count), GFP_KERNEL);
613 objagg_stats = kzalloc(alloc_size, GFP_KERNEL);
614 if (!objagg_stats) 612 if (!objagg_stats)
615 return ERR_PTR(-ENOMEM); 613 return ERR_PTR(-ENOMEM);
616 614
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 071a76c7bac0..4f6c6ebbbbde 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -70,11 +70,14 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
70 return -ENOMEM; 70 return -ENOMEM;
71 71
72 ref->force_atomic = flags & PERCPU_REF_INIT_ATOMIC; 72 ref->force_atomic = flags & PERCPU_REF_INIT_ATOMIC;
73 ref->allow_reinit = flags & PERCPU_REF_ALLOW_REINIT;
73 74
74 if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD)) 75 if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD)) {
75 ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC; 76 ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
76 else 77 ref->allow_reinit = true;
78 } else {
77 start_count += PERCPU_COUNT_BIAS; 79 start_count += PERCPU_COUNT_BIAS;
80 }
78 81
79 if (flags & PERCPU_REF_INIT_DEAD) 82 if (flags & PERCPU_REF_INIT_DEAD)
80 ref->percpu_count_ptr |= __PERCPU_REF_DEAD; 83 ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
@@ -120,6 +123,9 @@ static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu)
120 ref->confirm_switch = NULL; 123 ref->confirm_switch = NULL;
121 wake_up_all(&percpu_ref_switch_waitq); 124 wake_up_all(&percpu_ref_switch_waitq);
122 125
126 if (!ref->allow_reinit)
127 percpu_ref_exit(ref);
128
123 /* drop ref from percpu_ref_switch_to_atomic() */ 129 /* drop ref from percpu_ref_switch_to_atomic() */
124 percpu_ref_put(ref); 130 percpu_ref_put(ref);
125} 131}
@@ -195,6 +201,9 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
195 if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC)) 201 if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
196 return; 202 return;
197 203
204 if (WARN_ON_ONCE(!ref->allow_reinit))
205 return;
206
198 atomic_long_add(PERCPU_COUNT_BIAS, &ref->count); 207 atomic_long_add(PERCPU_COUNT_BIAS, &ref->count);
199 208
200 /* 209 /*
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index e723eacf7868..42695bc8d451 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -12,9 +12,6 @@ raid6_pq-$(CONFIG_S390) += s390vx8.o recov_s390xc.o
12 12
13hostprogs-y += mktables 13hostprogs-y += mktables
14 14
15quiet_cmd_unroll = UNROLL $@
16 cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$(UNROLL) < $< > $@
17
18ifeq ($(CONFIG_ALTIVEC),y) 15ifeq ($(CONFIG_ALTIVEC),y)
19altivec_flags := -maltivec $(call cc-option,-mabi=altivec) 16altivec_flags := -maltivec $(call cc-option,-mabi=altivec)
20 17
@@ -26,7 +23,6 @@ CFLAGS_REMOVE_altivec1.o += -msoft-float
26CFLAGS_REMOVE_altivec2.o += -msoft-float 23CFLAGS_REMOVE_altivec2.o += -msoft-float
27CFLAGS_REMOVE_altivec4.o += -msoft-float 24CFLAGS_REMOVE_altivec4.o += -msoft-float
28CFLAGS_REMOVE_altivec8.o += -msoft-float 25CFLAGS_REMOVE_altivec8.o += -msoft-float
29CFLAGS_REMOVE_altivec8.o += -msoft-float
30CFLAGS_REMOVE_vpermxor1.o += -msoft-float 26CFLAGS_REMOVE_vpermxor1.o += -msoft-float
31CFLAGS_REMOVE_vpermxor2.o += -msoft-float 27CFLAGS_REMOVE_vpermxor2.o += -msoft-float
32CFLAGS_REMOVE_vpermxor4.o += -msoft-float 28CFLAGS_REMOVE_vpermxor4.o += -msoft-float
@@ -51,111 +47,39 @@ CFLAGS_REMOVE_neon8.o += -mgeneral-regs-only
51endif 47endif
52endif 48endif
53 49
54targets += int1.c 50quiet_cmd_unroll = UNROLL $@
55$(obj)/int1.c: UNROLL := 1 51 cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$* < $< > $@
56$(obj)/int1.c: $(src)/int.uc $(src)/unroll.awk FORCE
57 $(call if_changed,unroll)
58
59targets += int2.c
60$(obj)/int2.c: UNROLL := 2
61$(obj)/int2.c: $(src)/int.uc $(src)/unroll.awk FORCE
62 $(call if_changed,unroll)
63
64targets += int4.c
65$(obj)/int4.c: UNROLL := 4
66$(obj)/int4.c: $(src)/int.uc $(src)/unroll.awk FORCE
67 $(call if_changed,unroll)
68
69targets += int8.c
70$(obj)/int8.c: UNROLL := 8
71$(obj)/int8.c: $(src)/int.uc $(src)/unroll.awk FORCE
72 $(call if_changed,unroll)
73
74targets += int16.c
75$(obj)/int16.c: UNROLL := 16
76$(obj)/int16.c: $(src)/int.uc $(src)/unroll.awk FORCE
77 $(call if_changed,unroll)
78 52
79targets += int32.c 53targets += int1.c int2.c int4.c int8.c int16.c int32.c
80$(obj)/int32.c: UNROLL := 32 54$(obj)/int%.c: $(src)/int.uc $(src)/unroll.awk FORCE
81$(obj)/int32.c: $(src)/int.uc $(src)/unroll.awk FORCE
82 $(call if_changed,unroll) 55 $(call if_changed,unroll)
83 56
84CFLAGS_altivec1.o += $(altivec_flags) 57CFLAGS_altivec1.o += $(altivec_flags)
85targets += altivec1.c
86$(obj)/altivec1.c: UNROLL := 1
87$(obj)/altivec1.c: $(src)/altivec.uc $(src)/unroll.awk FORCE
88 $(call if_changed,unroll)
89
90CFLAGS_altivec2.o += $(altivec_flags) 58CFLAGS_altivec2.o += $(altivec_flags)
91targets += altivec2.c
92$(obj)/altivec2.c: UNROLL := 2
93$(obj)/altivec2.c: $(src)/altivec.uc $(src)/unroll.awk FORCE
94 $(call if_changed,unroll)
95
96CFLAGS_altivec4.o += $(altivec_flags) 59CFLAGS_altivec4.o += $(altivec_flags)
97targets += altivec4.c
98$(obj)/altivec4.c: UNROLL := 4
99$(obj)/altivec4.c: $(src)/altivec.uc $(src)/unroll.awk FORCE
100 $(call if_changed,unroll)
101
102CFLAGS_altivec8.o += $(altivec_flags) 60CFLAGS_altivec8.o += $(altivec_flags)
103targets += altivec8.c 61targets += altivec1.c altivec2.c altivec4.c altivec8.c
104$(obj)/altivec8.c: UNROLL := 8 62$(obj)/altivec%.c: $(src)/altivec.uc $(src)/unroll.awk FORCE
105$(obj)/altivec8.c: $(src)/altivec.uc $(src)/unroll.awk FORCE
106 $(call if_changed,unroll) 63 $(call if_changed,unroll)
107 64
108CFLAGS_vpermxor1.o += $(altivec_flags) 65CFLAGS_vpermxor1.o += $(altivec_flags)
109targets += vpermxor1.c
110$(obj)/vpermxor1.c: UNROLL := 1
111$(obj)/vpermxor1.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
112 $(call if_changed,unroll)
113
114CFLAGS_vpermxor2.o += $(altivec_flags) 66CFLAGS_vpermxor2.o += $(altivec_flags)
115targets += vpermxor2.c
116$(obj)/vpermxor2.c: UNROLL := 2
117$(obj)/vpermxor2.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
118 $(call if_changed,unroll)
119
120CFLAGS_vpermxor4.o += $(altivec_flags) 67CFLAGS_vpermxor4.o += $(altivec_flags)
121targets += vpermxor4.c
122$(obj)/vpermxor4.c: UNROLL := 4
123$(obj)/vpermxor4.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
124 $(call if_changed,unroll)
125
126CFLAGS_vpermxor8.o += $(altivec_flags) 68CFLAGS_vpermxor8.o += $(altivec_flags)
127targets += vpermxor8.c 69targets += vpermxor1.o vpermxor2.o vpermxor4.o vpermxor8.o
128$(obj)/vpermxor8.c: UNROLL := 8 70$(obj)/vpermxor%.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
129$(obj)/vpermxor8.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
130 $(call if_changed,unroll) 71 $(call if_changed,unroll)
131 72
132CFLAGS_neon1.o += $(NEON_FLAGS) 73CFLAGS_neon1.o += $(NEON_FLAGS)
133targets += neon1.c
134$(obj)/neon1.c: UNROLL := 1
135$(obj)/neon1.c: $(src)/neon.uc $(src)/unroll.awk FORCE
136 $(call if_changed,unroll)
137
138CFLAGS_neon2.o += $(NEON_FLAGS) 74CFLAGS_neon2.o += $(NEON_FLAGS)
139targets += neon2.c
140$(obj)/neon2.c: UNROLL := 2
141$(obj)/neon2.c: $(src)/neon.uc $(src)/unroll.awk FORCE
142 $(call if_changed,unroll)
143
144CFLAGS_neon4.o += $(NEON_FLAGS) 75CFLAGS_neon4.o += $(NEON_FLAGS)
145targets += neon4.c
146$(obj)/neon4.c: UNROLL := 4
147$(obj)/neon4.c: $(src)/neon.uc $(src)/unroll.awk FORCE
148 $(call if_changed,unroll)
149
150CFLAGS_neon8.o += $(NEON_FLAGS) 76CFLAGS_neon8.o += $(NEON_FLAGS)
151targets += neon8.c 77targets += neon1.c neon2.c neon4.c neon8.c
152$(obj)/neon8.c: UNROLL := 8 78$(obj)/neon%.c: $(src)/neon.uc $(src)/unroll.awk FORCE
153$(obj)/neon8.c: $(src)/neon.uc $(src)/unroll.awk FORCE
154 $(call if_changed,unroll) 79 $(call if_changed,unroll)
155 80
156targets += s390vx8.c 81targets += s390vx8.c
157$(obj)/s390vx8.c: UNROLL := 8 82$(obj)/s390vx%.c: $(src)/s390vx.uc $(src)/unroll.awk FORCE
158$(obj)/s390vx8.c: $(src)/s390vx.uc $(src)/unroll.awk FORCE
159 $(call if_changed,unroll) 83 $(call if_changed,unroll)
160 84
161quiet_cmd_mktable = TABLE $@ 85quiet_cmd_mktable = TABLE $@
diff --git a/lib/raid6/s390vx.uc b/lib/raid6/s390vx.uc
index 914ebe98fc21..9e597e1f91a4 100644
--- a/lib/raid6/s390vx.uc
+++ b/lib/raid6/s390vx.uc
@@ -60,7 +60,7 @@ static inline void LOAD_DATA(int x, u8 *ptr)
60 typedef struct { u8 _[16 * $#]; } addrtype; 60 typedef struct { u8 _[16 * $#]; } addrtype;
61 register addrtype *__ptr asm("1") = (addrtype *) ptr; 61 register addrtype *__ptr asm("1") = (addrtype *) ptr;
62 62
63 asm volatile ("VLM %2,%3,0,%r1" 63 asm volatile ("VLM %2,%3,0,%1"
64 : : "m" (*__ptr), "a" (__ptr), "i" (x), 64 : : "m" (*__ptr), "a" (__ptr), "i" (x),
65 "i" (x + $# - 1)); 65 "i" (x + $# - 1));
66} 66}
diff --git a/lib/reed_solomon/Makefile b/lib/reed_solomon/Makefile
index ba9d7a3329eb..5d4fa68f26cb 100644
--- a/lib/reed_solomon/Makefile
+++ b/lib/reed_solomon/Makefile
@@ -4,4 +4,4 @@
4# 4#
5 5
6obj-$(CONFIG_REED_SOLOMON) += reed_solomon.o 6obj-$(CONFIG_REED_SOLOMON) += reed_solomon.o
7 7obj-$(CONFIG_REED_SOLOMON_TEST) += test_rslib.o
diff --git a/lib/reed_solomon/decode_rs.c b/lib/reed_solomon/decode_rs.c
index 1db74eb098d0..805de84ae83d 100644
--- a/lib/reed_solomon/decode_rs.c
+++ b/lib/reed_solomon/decode_rs.c
@@ -22,6 +22,7 @@
22 uint16_t *index_of = rs->index_of; 22 uint16_t *index_of = rs->index_of;
23 uint16_t u, q, tmp, num1, num2, den, discr_r, syn_error; 23 uint16_t u, q, tmp, num1, num2, den, discr_r, syn_error;
24 int count = 0; 24 int count = 0;
25 int num_corrected;
25 uint16_t msk = (uint16_t) rs->nn; 26 uint16_t msk = (uint16_t) rs->nn;
26 27
27 /* 28 /*
@@ -39,11 +40,21 @@
39 40
40 /* Check length parameter for validity */ 41 /* Check length parameter for validity */
41 pad = nn - nroots - len; 42 pad = nn - nroots - len;
42 BUG_ON(pad < 0 || pad >= nn); 43 BUG_ON(pad < 0 || pad >= nn - nroots);
43 44
44 /* Does the caller provide the syndrome ? */ 45 /* Does the caller provide the syndrome ? */
45 if (s != NULL) 46 if (s != NULL) {
46 goto decode; 47 for (i = 0; i < nroots; i++) {
48 /* The syndrome is in index form,
49 * so nn represents zero
50 */
51 if (s[i] != nn)
52 goto decode;
53 }
54
55 /* syndrome is zero, no errors to correct */
56 return 0;
57 }
47 58
48 /* form the syndromes; i.e., evaluate data(x) at roots of 59 /* form the syndromes; i.e., evaluate data(x) at roots of
49 * g(x) */ 60 * g(x) */
@@ -88,8 +99,7 @@
88 /* if syndrome is zero, data[] is a codeword and there are no 99 /* if syndrome is zero, data[] is a codeword and there are no
89 * errors to correct. So return data[] unmodified 100 * errors to correct. So return data[] unmodified
90 */ 101 */
91 count = 0; 102 return 0;
92 goto finish;
93 } 103 }
94 104
95 decode: 105 decode:
@@ -99,9 +109,9 @@
99 if (no_eras > 0) { 109 if (no_eras > 0) {
100 /* Init lambda to be the erasure locator polynomial */ 110 /* Init lambda to be the erasure locator polynomial */
101 lambda[1] = alpha_to[rs_modnn(rs, 111 lambda[1] = alpha_to[rs_modnn(rs,
102 prim * (nn - 1 - eras_pos[0]))]; 112 prim * (nn - 1 - (eras_pos[0] + pad)))];
103 for (i = 1; i < no_eras; i++) { 113 for (i = 1; i < no_eras; i++) {
104 u = rs_modnn(rs, prim * (nn - 1 - eras_pos[i])); 114 u = rs_modnn(rs, prim * (nn - 1 - (eras_pos[i] + pad)));
105 for (j = i + 1; j > 0; j--) { 115 for (j = i + 1; j > 0; j--) {
106 tmp = index_of[lambda[j - 1]]; 116 tmp = index_of[lambda[j - 1]];
107 if (tmp != nn) { 117 if (tmp != nn) {
@@ -175,6 +185,15 @@
175 if (lambda[i] != nn) 185 if (lambda[i] != nn)
176 deg_lambda = i; 186 deg_lambda = i;
177 } 187 }
188
189 if (deg_lambda == 0) {
190 /*
191 * deg(lambda) is zero even though the syndrome is non-zero
192 * => uncorrectable error detected
193 */
194 return -EBADMSG;
195 }
196
178 /* Find roots of error+erasure locator polynomial by Chien search */ 197 /* Find roots of error+erasure locator polynomial by Chien search */
179 memcpy(&reg[1], &lambda[1], nroots * sizeof(reg[0])); 198 memcpy(&reg[1], &lambda[1], nroots * sizeof(reg[0]));
180 count = 0; /* Number of roots of lambda(x) */ 199 count = 0; /* Number of roots of lambda(x) */
@@ -188,6 +207,12 @@
188 } 207 }
189 if (q != 0) 208 if (q != 0)
190 continue; /* Not a root */ 209 continue; /* Not a root */
210
211 if (k < pad) {
212 /* Impossible error location. Uncorrectable error. */
213 return -EBADMSG;
214 }
215
191 /* store root (index-form) and error location number */ 216 /* store root (index-form) and error location number */
192 root[count] = i; 217 root[count] = i;
193 loc[count] = k; 218 loc[count] = k;
@@ -202,8 +227,7 @@
202 * deg(lambda) unequal to number of roots => uncorrectable 227 * deg(lambda) unequal to number of roots => uncorrectable
203 * error detected 228 * error detected
204 */ 229 */
205 count = -EBADMSG; 230 return -EBADMSG;
206 goto finish;
207 } 231 }
208 /* 232 /*
209 * Compute err+eras evaluator poly omega(x) = s(x)*lambda(x) (modulo 233 * Compute err+eras evaluator poly omega(x) = s(x)*lambda(x) (modulo
@@ -223,7 +247,9 @@
223 /* 247 /*
224 * Compute error values in poly-form. num1 = omega(inv(X(l))), num2 = 248 * Compute error values in poly-form. num1 = omega(inv(X(l))), num2 =
225 * inv(X(l))**(fcr-1) and den = lambda_pr(inv(X(l))) all in poly-form 249 * inv(X(l))**(fcr-1) and den = lambda_pr(inv(X(l))) all in poly-form
250 * Note: we reuse the buffer for b to store the correction pattern
226 */ 251 */
252 num_corrected = 0;
227 for (j = count - 1; j >= 0; j--) { 253 for (j = count - 1; j >= 0; j--) {
228 num1 = 0; 254 num1 = 0;
229 for (i = deg_omega; i >= 0; i--) { 255 for (i = deg_omega; i >= 0; i--) {
@@ -231,6 +257,13 @@
231 num1 ^= alpha_to[rs_modnn(rs, omega[i] + 257 num1 ^= alpha_to[rs_modnn(rs, omega[i] +
232 i * root[j])]; 258 i * root[j])];
233 } 259 }
260
261 if (num1 == 0) {
262 /* Nothing to correct at this position */
263 b[j] = 0;
264 continue;
265 }
266
234 num2 = alpha_to[rs_modnn(rs, root[j] * (fcr - 1) + nn)]; 267 num2 = alpha_to[rs_modnn(rs, root[j] * (fcr - 1) + nn)];
235 den = 0; 268 den = 0;
236 269
@@ -242,30 +275,52 @@
242 i * root[j])]; 275 i * root[j])];
243 } 276 }
244 } 277 }
245 /* Apply error to data */ 278
246 if (num1 != 0 && loc[j] >= pad) { 279 b[j] = alpha_to[rs_modnn(rs, index_of[num1] +
247 uint16_t cor = alpha_to[rs_modnn(rs,index_of[num1] + 280 index_of[num2] +
248 index_of[num2] + 281 nn - index_of[den])];
249 nn - index_of[den])]; 282 num_corrected++;
250 /* Store the error correction pattern, if a 283 }
251 * correction buffer is available */ 284
252 if (corr) { 285 /*
253 corr[j] = cor; 286 * We compute the syndrome of the 'error' and check that it matches
254 } else { 287 * the syndrome of the received word
255 /* If a data buffer is given and the 288 */
256 * error is inside the message, 289 for (i = 0; i < nroots; i++) {
257 * correct it */ 290 tmp = 0;
258 if (data && (loc[j] < (nn - nroots))) 291 for (j = 0; j < count; j++) {
259 data[loc[j] - pad] ^= cor; 292 if (b[j] == 0)
260 } 293 continue;
294
295 k = (fcr + i) * prim * (nn-loc[j]-1);
296 tmp ^= alpha_to[rs_modnn(rs, index_of[b[j]] + k)];
261 } 297 }
298
299 if (tmp != alpha_to[s[i]])
300 return -EBADMSG;
262 } 301 }
263 302
264finish: 303 /*
265 if (eras_pos != NULL) { 304 * Store the error correction pattern, if a
266 for (i = 0; i < count; i++) 305 * correction buffer is available
267 eras_pos[i] = loc[i] - pad; 306 */
307 if (corr && eras_pos) {
308 j = 0;
309 for (i = 0; i < count; i++) {
310 if (b[i]) {
311 corr[j] = b[i];
312 eras_pos[j++] = loc[i] - pad;
313 }
314 }
315 } else if (data && par) {
316 /* Apply error to data and parity */
317 for (i = 0; i < count; i++) {
318 if (loc[i] < (nn - nroots))
319 data[loc[i] - pad] ^= b[i];
320 else
321 par[loc[i] - pad - len] ^= b[i];
322 }
268 } 323 }
269 return count;
270 324
325 return num_corrected;
271} 326}
diff --git a/lib/reed_solomon/reed_solomon.c b/lib/reed_solomon/reed_solomon.c
index e5fdc8b9e856..bbc01bad3053 100644
--- a/lib/reed_solomon/reed_solomon.c
+++ b/lib/reed_solomon/reed_solomon.c
@@ -340,7 +340,8 @@ EXPORT_SYMBOL_GPL(encode_rs8);
340 * @data: data field of a given type 340 * @data: data field of a given type
341 * @par: received parity data field 341 * @par: received parity data field
342 * @len: data length 342 * @len: data length
343 * @s: syndrome data field (if NULL, syndrome is calculated) 343 * @s: syndrome data field, must be in index form
344 * (if NULL, syndrome is calculated)
344 * @no_eras: number of erasures 345 * @no_eras: number of erasures
345 * @eras_pos: position of erasures, can be NULL 346 * @eras_pos: position of erasures, can be NULL
346 * @invmsk: invert data mask (will be xored on data, not on parity!) 347 * @invmsk: invert data mask (will be xored on data, not on parity!)
@@ -354,7 +355,8 @@ EXPORT_SYMBOL_GPL(encode_rs8);
354 * decoding, so the caller has to ensure that decoder invocations are 355 * decoding, so the caller has to ensure that decoder invocations are
355 * serialized. 356 * serialized.
356 * 357 *
357 * Returns the number of corrected bits or -EBADMSG for uncorrectable errors. 358 * Returns the number of corrected symbols or -EBADMSG for uncorrectable
359 * errors. The count includes errors in the parity.
358 */ 360 */
359int decode_rs8(struct rs_control *rsc, uint8_t *data, uint16_t *par, int len, 361int decode_rs8(struct rs_control *rsc, uint8_t *data, uint16_t *par, int len,
360 uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk, 362 uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk,
@@ -391,7 +393,8 @@ EXPORT_SYMBOL_GPL(encode_rs16);
391 * @data: data field of a given type 393 * @data: data field of a given type
392 * @par: received parity data field 394 * @par: received parity data field
393 * @len: data length 395 * @len: data length
394 * @s: syndrome data field (if NULL, syndrome is calculated) 396 * @s: syndrome data field, must be in index form
397 * (if NULL, syndrome is calculated)
395 * @no_eras: number of erasures 398 * @no_eras: number of erasures
396 * @eras_pos: position of erasures, can be NULL 399 * @eras_pos: position of erasures, can be NULL
397 * @invmsk: invert data mask (will be xored on data, not on parity!) 400 * @invmsk: invert data mask (will be xored on data, not on parity!)
@@ -403,7 +406,8 @@ EXPORT_SYMBOL_GPL(encode_rs16);
403 * decoding, so the caller has to ensure that decoder invocations are 406 * decoding, so the caller has to ensure that decoder invocations are
404 * serialized. 407 * serialized.
405 * 408 *
406 * Returns the number of corrected bits or -EBADMSG for uncorrectable errors. 409 * Returns the number of corrected symbols or -EBADMSG for uncorrectable
410 * errors. The count includes errors in the parity.
407 */ 411 */
408int decode_rs16(struct rs_control *rsc, uint16_t *data, uint16_t *par, int len, 412int decode_rs16(struct rs_control *rsc, uint16_t *data, uint16_t *par, int len,
409 uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk, 413 uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk,
diff --git a/lib/reed_solomon/test_rslib.c b/lib/reed_solomon/test_rslib.c
new file mode 100644
index 000000000000..4eb29f365ece
--- /dev/null
+++ b/lib/reed_solomon/test_rslib.c
@@ -0,0 +1,518 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Tests for Generic Reed Solomon encoder / decoder library
4 *
5 * Written by Ferdinand Blomqvist
6 * Based on previous work by Phil Karn, KA9Q
7 */
8#include <linux/rslib.h>
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/moduleparam.h>
12#include <linux/random.h>
13#include <linux/slab.h>
14
15enum verbosity {
16 V_SILENT,
17 V_PROGRESS,
18 V_CSUMMARY
19};
20
21enum method {
22 CORR_BUFFER,
23 CALLER_SYNDROME,
24 IN_PLACE
25};
26
27#define __param(type, name, init, msg) \
28 static type name = init; \
29 module_param(name, type, 0444); \
30 MODULE_PARM_DESC(name, msg)
31
32__param(int, v, V_PROGRESS, "Verbosity level");
33__param(int, ewsc, 1, "Erasures without symbol corruption");
34__param(int, bc, 1, "Test for correct behaviour beyond error correction capacity");
35
36struct etab {
37 int symsize;
38 int genpoly;
39 int fcs;
40 int prim;
41 int nroots;
42 int ntrials;
43};
44
45/* List of codes to test */
46static struct etab Tab[] = {
47 {2, 0x7, 1, 1, 1, 100000 },
48 {3, 0xb, 1, 1, 2, 100000 },
49 {3, 0xb, 1, 1, 3, 100000 },
50 {3, 0xb, 2, 1, 4, 100000 },
51 {4, 0x13, 1, 1, 4, 10000 },
52 {5, 0x25, 1, 1, 6, 1000 },
53 {6, 0x43, 3, 1, 8, 1000 },
54 {7, 0x89, 1, 1, 14, 500 },
55 {8, 0x11d, 1, 1, 30, 100 },
56 {8, 0x187, 112, 11, 32, 100 },
57 {9, 0x211, 1, 1, 33, 80 },
58 {0, 0, 0, 0, 0, 0},
59};
60
61
62struct estat {
63 int dwrong;
64 int irv;
65 int wepos;
66 int nwords;
67};
68
69struct bcstat {
70 int rfail;
71 int rsuccess;
72 int noncw;
73 int nwords;
74};
75
76struct wspace {
77 uint16_t *c; /* sent codeword */
78 uint16_t *r; /* received word */
79 uint16_t *s; /* syndrome */
80 uint16_t *corr; /* correction buffer */
81 int *errlocs;
82 int *derrlocs;
83};
84
85struct pad {
86 int mult;
87 int shift;
88};
89
90static struct pad pad_coef[] = {
91 { 0, 0 },
92 { 1, 2 },
93 { 1, 1 },
94 { 3, 2 },
95 { 1, 0 },
96};
97
98static void free_ws(struct wspace *ws)
99{
100 if (!ws)
101 return;
102
103 kfree(ws->errlocs);
104 kfree(ws->c);
105 kfree(ws);
106}
107
108static struct wspace *alloc_ws(struct rs_codec *rs)
109{
110 int nroots = rs->nroots;
111 struct wspace *ws;
112 int nn = rs->nn;
113
114 ws = kzalloc(sizeof(*ws), GFP_KERNEL);
115 if (!ws)
116 return NULL;
117
118 ws->c = kmalloc_array(2 * (nn + nroots),
119 sizeof(uint16_t), GFP_KERNEL);
120 if (!ws->c)
121 goto err;
122
123 ws->r = ws->c + nn;
124 ws->s = ws->r + nn;
125 ws->corr = ws->s + nroots;
126
127 ws->errlocs = kmalloc_array(nn + nroots, sizeof(int), GFP_KERNEL);
128 if (!ws->errlocs)
129 goto err;
130
131 ws->derrlocs = ws->errlocs + nn;
132 return ws;
133
134err:
135 free_ws(ws);
136 return NULL;
137}
138
139
140/*
141 * Generates a random codeword and stores it in c. Generates random errors and
142 * erasures, and stores the random word with errors in r. Erasure positions are
143 * stored in derrlocs, while errlocs has one of three values in every position:
144 *
145 * 0 if there is no error in this position;
146 * 1 if there is a symbol error in this position;
147 * 2 if there is an erasure without symbol corruption.
148 *
149 * Returns the number of corrupted symbols.
150 */
151static int get_rcw_we(struct rs_control *rs, struct wspace *ws,
152 int len, int errs, int eras)
153{
154 int nroots = rs->codec->nroots;
155 int *derrlocs = ws->derrlocs;
156 int *errlocs = ws->errlocs;
157 int dlen = len - nroots;
158 int nn = rs->codec->nn;
159 uint16_t *c = ws->c;
160 uint16_t *r = ws->r;
161 int errval;
162 int errloc;
163 int i;
164
165 /* Load c with random data and encode */
166 for (i = 0; i < dlen; i++)
167 c[i] = prandom_u32() & nn;
168
169 memset(c + dlen, 0, nroots * sizeof(*c));
170 encode_rs16(rs, c, dlen, c + dlen, 0);
171
172 /* Make copyand add errors and erasures */
173 memcpy(r, c, len * sizeof(*r));
174 memset(errlocs, 0, len * sizeof(*errlocs));
175 memset(derrlocs, 0, nroots * sizeof(*derrlocs));
176
177 /* Generating random errors */
178 for (i = 0; i < errs; i++) {
179 do {
180 /* Error value must be nonzero */
181 errval = prandom_u32() & nn;
182 } while (errval == 0);
183
184 do {
185 /* Must not choose the same location twice */
186 errloc = prandom_u32() % len;
187 } while (errlocs[errloc] != 0);
188
189 errlocs[errloc] = 1;
190 r[errloc] ^= errval;
191 }
192
193 /* Generating random erasures */
194 for (i = 0; i < eras; i++) {
195 do {
196 /* Must not choose the same location twice */
197 errloc = prandom_u32() % len;
198 } while (errlocs[errloc] != 0);
199
200 derrlocs[i] = errloc;
201
202 if (ewsc && (prandom_u32() & 1)) {
203 /* Erasure with the symbol intact */
204 errlocs[errloc] = 2;
205 } else {
206 /* Erasure with corrupted symbol */
207 do {
208 /* Error value must be nonzero */
209 errval = prandom_u32() & nn;
210 } while (errval == 0);
211
212 errlocs[errloc] = 1;
213 r[errloc] ^= errval;
214 errs++;
215 }
216 }
217
218 return errs;
219}
220
221static void fix_err(uint16_t *data, int nerrs, uint16_t *corr, int *errlocs)
222{
223 int i;
224
225 for (i = 0; i < nerrs; i++)
226 data[errlocs[i]] ^= corr[i];
227}
228
229static void compute_syndrome(struct rs_control *rsc, uint16_t *data,
230 int len, uint16_t *syn)
231{
232 struct rs_codec *rs = rsc->codec;
233 uint16_t *alpha_to = rs->alpha_to;
234 uint16_t *index_of = rs->index_of;
235 int nroots = rs->nroots;
236 int prim = rs->prim;
237 int fcr = rs->fcr;
238 int i, j;
239
240 /* Calculating syndrome */
241 for (i = 0; i < nroots; i++) {
242 syn[i] = data[0];
243 for (j = 1; j < len; j++) {
244 if (syn[i] == 0) {
245 syn[i] = data[j];
246 } else {
247 syn[i] = data[j] ^
248 alpha_to[rs_modnn(rs, index_of[syn[i]]
249 + (fcr + i) * prim)];
250 }
251 }
252 }
253
254 /* Convert to index form */
255 for (i = 0; i < nroots; i++)
256 syn[i] = rs->index_of[syn[i]];
257}
258
259/* Test up to error correction capacity */
260static void test_uc(struct rs_control *rs, int len, int errs,
261 int eras, int trials, struct estat *stat,
262 struct wspace *ws, int method)
263{
264 int dlen = len - rs->codec->nroots;
265 int *derrlocs = ws->derrlocs;
266 int *errlocs = ws->errlocs;
267 uint16_t *corr = ws->corr;
268 uint16_t *c = ws->c;
269 uint16_t *r = ws->r;
270 uint16_t *s = ws->s;
271 int derrs, nerrs;
272 int i, j;
273
274 for (j = 0; j < trials; j++) {
275 nerrs = get_rcw_we(rs, ws, len, errs, eras);
276
277 switch (method) {
278 case CORR_BUFFER:
279 derrs = decode_rs16(rs, r, r + dlen, dlen,
280 NULL, eras, derrlocs, 0, corr);
281 fix_err(r, derrs, corr, derrlocs);
282 break;
283 case CALLER_SYNDROME:
284 compute_syndrome(rs, r, len, s);
285 derrs = decode_rs16(rs, NULL, NULL, dlen,
286 s, eras, derrlocs, 0, corr);
287 fix_err(r, derrs, corr, derrlocs);
288 break;
289 case IN_PLACE:
290 derrs = decode_rs16(rs, r, r + dlen, dlen,
291 NULL, eras, derrlocs, 0, NULL);
292 break;
293 default:
294 continue;
295 }
296
297 if (derrs != nerrs)
298 stat->irv++;
299
300 if (method != IN_PLACE) {
301 for (i = 0; i < derrs; i++) {
302 if (errlocs[derrlocs[i]] != 1)
303 stat->wepos++;
304 }
305 }
306
307 if (memcmp(r, c, len * sizeof(*r)))
308 stat->dwrong++;
309 }
310 stat->nwords += trials;
311}
312
313static int ex_rs_helper(struct rs_control *rs, struct wspace *ws,
314 int len, int trials, int method)
315{
316 static const char * const desc[] = {
317 "Testing correction buffer interface...",
318 "Testing with caller provided syndrome...",
319 "Testing in-place interface..."
320 };
321
322 struct estat stat = {0, 0, 0, 0};
323 int nroots = rs->codec->nroots;
324 int errs, eras, retval;
325
326 if (v >= V_PROGRESS)
327 pr_info(" %s\n", desc[method]);
328
329 for (errs = 0; errs <= nroots / 2; errs++)
330 for (eras = 0; eras <= nroots - 2 * errs; eras++)
331 test_uc(rs, len, errs, eras, trials, &stat, ws, method);
332
333 if (v >= V_CSUMMARY) {
334 pr_info(" Decodes wrong: %d / %d\n",
335 stat.dwrong, stat.nwords);
336 pr_info(" Wrong return value: %d / %d\n",
337 stat.irv, stat.nwords);
338 if (method != IN_PLACE)
339 pr_info(" Wrong error position: %d\n", stat.wepos);
340 }
341
342 retval = stat.dwrong + stat.wepos + stat.irv;
343 if (retval && v >= V_PROGRESS)
344 pr_warn(" FAIL: %d decoding failures!\n", retval);
345
346 return retval;
347}
348
349static int exercise_rs(struct rs_control *rs, struct wspace *ws,
350 int len, int trials)
351{
352
353 int retval = 0;
354 int i;
355
356 if (v >= V_PROGRESS)
357 pr_info("Testing up to error correction capacity...\n");
358
359 for (i = 0; i <= IN_PLACE; i++)
360 retval |= ex_rs_helper(rs, ws, len, trials, i);
361
362 return retval;
363}
364
365/* Tests for correct behaviour beyond error correction capacity */
366static void test_bc(struct rs_control *rs, int len, int errs,
367 int eras, int trials, struct bcstat *stat,
368 struct wspace *ws)
369{
370 int nroots = rs->codec->nroots;
371 int dlen = len - nroots;
372 int *derrlocs = ws->derrlocs;
373 uint16_t *corr = ws->corr;
374 uint16_t *r = ws->r;
375 int derrs, j;
376
377 for (j = 0; j < trials; j++) {
378 get_rcw_we(rs, ws, len, errs, eras);
379 derrs = decode_rs16(rs, r, r + dlen, dlen,
380 NULL, eras, derrlocs, 0, corr);
381 fix_err(r, derrs, corr, derrlocs);
382
383 if (derrs >= 0) {
384 stat->rsuccess++;
385
386 /*
387 * We check that the returned word is actually a
388 * codeword. The obious way to do this would be to
389 * compute the syndrome, but we don't want to replicate
390 * that code here. However, all the codes are in
391 * systematic form, and therefore we can encode the
392 * returned word, and see whether the parity changes or
393 * not.
394 */
395 memset(corr, 0, nroots * sizeof(*corr));
396 encode_rs16(rs, r, dlen, corr, 0);
397
398 if (memcmp(r + dlen, corr, nroots * sizeof(*corr)))
399 stat->noncw++;
400 } else {
401 stat->rfail++;
402 }
403 }
404 stat->nwords += trials;
405}
406
407static int exercise_rs_bc(struct rs_control *rs, struct wspace *ws,
408 int len, int trials)
409{
410 struct bcstat stat = {0, 0, 0, 0};
411 int nroots = rs->codec->nroots;
412 int errs, eras, cutoff;
413
414 if (v >= V_PROGRESS)
415 pr_info("Testing beyond error correction capacity...\n");
416
417 for (errs = 1; errs <= nroots; errs++) {
418 eras = nroots - 2 * errs + 1;
419 if (eras < 0)
420 eras = 0;
421
422 cutoff = nroots <= len - errs ? nroots : len - errs;
423 for (; eras <= cutoff; eras++)
424 test_bc(rs, len, errs, eras, trials, &stat, ws);
425 }
426
427 if (v >= V_CSUMMARY) {
428 pr_info(" decoder gives up: %d / %d\n",
429 stat.rfail, stat.nwords);
430 pr_info(" decoder returns success: %d / %d\n",
431 stat.rsuccess, stat.nwords);
432 pr_info(" not a codeword: %d / %d\n",
433 stat.noncw, stat.rsuccess);
434 }
435
436 if (stat.noncw && v >= V_PROGRESS)
437 pr_warn(" FAIL: %d silent failures!\n", stat.noncw);
438
439 return stat.noncw;
440}
441
442static int run_exercise(struct etab *e)
443{
444 int nn = (1 << e->symsize) - 1;
445 int kk = nn - e->nroots;
446 struct rs_control *rsc;
447 int retval = -ENOMEM;
448 int max_pad = kk - 1;
449 int prev_pad = -1;
450 struct wspace *ws;
451 int i;
452
453 rsc = init_rs(e->symsize, e->genpoly, e->fcs, e->prim, e->nroots);
454 if (!rsc)
455 return retval;
456
457 ws = alloc_ws(rsc->codec);
458 if (!ws)
459 goto err;
460
461 retval = 0;
462 for (i = 0; i < ARRAY_SIZE(pad_coef); i++) {
463 int pad = (pad_coef[i].mult * max_pad) >> pad_coef[i].shift;
464 int len = nn - pad;
465
466 if (pad == prev_pad)
467 continue;
468
469 prev_pad = pad;
470 if (v >= V_PROGRESS) {
471 pr_info("Testing (%d,%d)_%d code...\n",
472 len, kk - pad, nn + 1);
473 }
474
475 retval |= exercise_rs(rsc, ws, len, e->ntrials);
476 if (bc)
477 retval |= exercise_rs_bc(rsc, ws, len, e->ntrials);
478 }
479
480 free_ws(ws);
481
482err:
483 free_rs(rsc);
484 return retval;
485}
486
487static int __init test_rslib_init(void)
488{
489 int i, fail = 0;
490
491 for (i = 0; Tab[i].symsize != 0 ; i++) {
492 int retval;
493
494 retval = run_exercise(Tab + i);
495 if (retval < 0)
496 return -ENOMEM;
497
498 fail |= retval;
499 }
500
501 if (fail)
502 pr_warn("rslib: test failed\n");
503 else
504 pr_info("rslib: test ok\n");
505
506 return -EAGAIN; /* Fail will directly unload the module */
507}
508
509static void __exit test_rslib_exit(void)
510{
511}
512
513module_init(test_rslib_init)
514module_exit(test_rslib_exit)
515
516MODULE_LICENSE("GPL");
517MODULE_AUTHOR("Ferdinand Blomqvist");
518MODULE_DESCRIPTION("Reed-Solomon library test");
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 54f57cd117c6..969e5400a615 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -26,9 +26,7 @@ static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index)
26 /* 26 /*
27 * First get a stable cleared mask, setting the old mask to 0. 27 * First get a stable cleared mask, setting the old mask to 0.
28 */ 28 */
29 do { 29 mask = xchg(&sb->map[index].cleared, 0);
30 mask = sb->map[index].cleared;
31 } while (cmpxchg(&sb->map[index].cleared, mask, 0) != mask);
32 30
33 /* 31 /*
34 * Now clear the masked bits in our free word 32 * Now clear the masked bits in our free word
@@ -516,10 +514,8 @@ static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
516 struct sbq_wait_state *ws = &sbq->ws[wake_index]; 514 struct sbq_wait_state *ws = &sbq->ws[wake_index];
517 515
518 if (waitqueue_active(&ws->wait)) { 516 if (waitqueue_active(&ws->wait)) {
519 int o = atomic_read(&sbq->wake_index); 517 if (wake_index != atomic_read(&sbq->wake_index))
520 518 atomic_set(&sbq->wake_index, wake_index);
521 if (wake_index != o)
522 atomic_cmpxchg(&sbq->wake_index, o, wake_index);
523 return ws; 519 return ws;
524 } 520 }
525 521
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 2882d9ba6607..c2cf2c311b7d 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -179,7 +179,8 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents)
179 * __sg_free_table - Free a previously mapped sg table 179 * __sg_free_table - Free a previously mapped sg table
180 * @table: The sg table header to use 180 * @table: The sg table header to use
181 * @max_ents: The maximum number of entries per single scatterlist 181 * @max_ents: The maximum number of entries per single scatterlist
182 * @skip_first_chunk: don't free the (preallocated) first scatterlist chunk 182 * @nents_first_chunk: Number of entries int the (preallocated) first
183 * scatterlist chunk, 0 means no such preallocated first chunk
183 * @free_fn: Free function 184 * @free_fn: Free function
184 * 185 *
185 * Description: 186 * Description:
@@ -189,9 +190,10 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents)
189 * 190 *
190 **/ 191 **/
191void __sg_free_table(struct sg_table *table, unsigned int max_ents, 192void __sg_free_table(struct sg_table *table, unsigned int max_ents,
192 bool skip_first_chunk, sg_free_fn *free_fn) 193 unsigned int nents_first_chunk, sg_free_fn *free_fn)
193{ 194{
194 struct scatterlist *sgl, *next; 195 struct scatterlist *sgl, *next;
196 unsigned curr_max_ents = nents_first_chunk ?: max_ents;
195 197
196 if (unlikely(!table->sgl)) 198 if (unlikely(!table->sgl))
197 return; 199 return;
@@ -207,9 +209,9 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents,
207 * sg_size is then one less than alloc size, since the last 209 * sg_size is then one less than alloc size, since the last
208 * element is the chain pointer. 210 * element is the chain pointer.
209 */ 211 */
210 if (alloc_size > max_ents) { 212 if (alloc_size > curr_max_ents) {
211 next = sg_chain_ptr(&sgl[max_ents - 1]); 213 next = sg_chain_ptr(&sgl[curr_max_ents - 1]);
212 alloc_size = max_ents; 214 alloc_size = curr_max_ents;
213 sg_size = alloc_size - 1; 215 sg_size = alloc_size - 1;
214 } else { 216 } else {
215 sg_size = alloc_size; 217 sg_size = alloc_size;
@@ -217,11 +219,12 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents,
217 } 219 }
218 220
219 table->orig_nents -= sg_size; 221 table->orig_nents -= sg_size;
220 if (skip_first_chunk) 222 if (nents_first_chunk)
221 skip_first_chunk = false; 223 nents_first_chunk = 0;
222 else 224 else
223 free_fn(sgl, alloc_size); 225 free_fn(sgl, alloc_size);
224 sgl = next; 226 sgl = next;
227 curr_max_ents = max_ents;
225 } 228 }
226 229
227 table->sgl = NULL; 230 table->sgl = NULL;
@@ -244,6 +247,8 @@ EXPORT_SYMBOL(sg_free_table);
244 * @table: The sg table header to use 247 * @table: The sg table header to use
245 * @nents: Number of entries in sg list 248 * @nents: Number of entries in sg list
246 * @max_ents: The maximum number of entries the allocator returns per call 249 * @max_ents: The maximum number of entries the allocator returns per call
250 * @nents_first_chunk: Number of entries int the (preallocated) first
251 * scatterlist chunk, 0 means no such preallocated chunk provided by user
247 * @gfp_mask: GFP allocation mask 252 * @gfp_mask: GFP allocation mask
248 * @alloc_fn: Allocator to use 253 * @alloc_fn: Allocator to use
249 * 254 *
@@ -260,10 +265,13 @@ EXPORT_SYMBOL(sg_free_table);
260 **/ 265 **/
261int __sg_alloc_table(struct sg_table *table, unsigned int nents, 266int __sg_alloc_table(struct sg_table *table, unsigned int nents,
262 unsigned int max_ents, struct scatterlist *first_chunk, 267 unsigned int max_ents, struct scatterlist *first_chunk,
263 gfp_t gfp_mask, sg_alloc_fn *alloc_fn) 268 unsigned int nents_first_chunk, gfp_t gfp_mask,
269 sg_alloc_fn *alloc_fn)
264{ 270{
265 struct scatterlist *sg, *prv; 271 struct scatterlist *sg, *prv;
266 unsigned int left; 272 unsigned int left;
273 unsigned curr_max_ents = nents_first_chunk ?: max_ents;
274 unsigned prv_max_ents;
267 275
268 memset(table, 0, sizeof(*table)); 276 memset(table, 0, sizeof(*table));
269 277
@@ -279,8 +287,8 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
279 do { 287 do {
280 unsigned int sg_size, alloc_size = left; 288 unsigned int sg_size, alloc_size = left;
281 289
282 if (alloc_size > max_ents) { 290 if (alloc_size > curr_max_ents) {
283 alloc_size = max_ents; 291 alloc_size = curr_max_ents;
284 sg_size = alloc_size - 1; 292 sg_size = alloc_size - 1;
285 } else 293 } else
286 sg_size = alloc_size; 294 sg_size = alloc_size;
@@ -314,7 +322,7 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
314 * If this is not the first mapping, chain previous part. 322 * If this is not the first mapping, chain previous part.
315 */ 323 */
316 if (prv) 324 if (prv)
317 sg_chain(prv, max_ents, sg); 325 sg_chain(prv, prv_max_ents, sg);
318 else 326 else
319 table->sgl = sg; 327 table->sgl = sg;
320 328
@@ -325,6 +333,8 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
325 sg_mark_end(&sg[sg_size - 1]); 333 sg_mark_end(&sg[sg_size - 1]);
326 334
327 prv = sg; 335 prv = sg;
336 prv_max_ents = curr_max_ents;
337 curr_max_ents = max_ents;
328 } while (left); 338 } while (left);
329 339
330 return 0; 340 return 0;
@@ -347,9 +357,9 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
347 int ret; 357 int ret;
348 358
349 ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC, 359 ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
350 NULL, gfp_mask, sg_kmalloc); 360 NULL, 0, gfp_mask, sg_kmalloc);
351 if (unlikely(ret)) 361 if (unlikely(ret))
352 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree); 362 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree);
353 363
354 return ret; 364 return ret;
355} 365}
@@ -676,17 +686,18 @@ static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
676{ 686{
677 if (!miter->__remaining) { 687 if (!miter->__remaining) {
678 struct scatterlist *sg; 688 struct scatterlist *sg;
679 unsigned long pgoffset;
680 689
681 if (!__sg_page_iter_next(&miter->piter)) 690 if (!__sg_page_iter_next(&miter->piter))
682 return false; 691 return false;
683 692
684 sg = miter->piter.sg; 693 sg = miter->piter.sg;
685 pgoffset = miter->piter.sg_pgoffset;
686 694
687 miter->__offset = pgoffset ? 0 : sg->offset; 695 miter->__offset = miter->piter.sg_pgoffset ? 0 : sg->offset;
696 miter->piter.sg_pgoffset += miter->__offset >> PAGE_SHIFT;
697 miter->__offset &= PAGE_SIZE - 1;
688 miter->__remaining = sg->offset + sg->length - 698 miter->__remaining = sg->offset + sg->length -
689 (pgoffset << PAGE_SHIFT) - miter->__offset; 699 (miter->piter.sg_pgoffset << PAGE_SHIFT) -
700 miter->__offset;
690 miter->__remaining = min_t(unsigned long, miter->__remaining, 701 miter->__remaining = min_t(unsigned long, miter->__remaining,
691 PAGE_SIZE - miter->__offset); 702 PAGE_SIZE - miter->__offset);
692 } 703 }
diff --git a/lib/sg_pool.c b/lib/sg_pool.c
index cff20df2695e..db29e5c1f790 100644
--- a/lib/sg_pool.c
+++ b/lib/sg_pool.c
@@ -70,18 +70,27 @@ static struct scatterlist *sg_pool_alloc(unsigned int nents, gfp_t gfp_mask)
70/** 70/**
71 * sg_free_table_chained - Free a previously mapped sg table 71 * sg_free_table_chained - Free a previously mapped sg table
72 * @table: The sg table header to use 72 * @table: The sg table header to use
73 * @first_chunk: was first_chunk not NULL in sg_alloc_table_chained? 73 * @nents_first_chunk: size of the first_chunk SGL passed to
74 * sg_alloc_table_chained
74 * 75 *
75 * Description: 76 * Description:
76 * Free an sg table previously allocated and setup with 77 * Free an sg table previously allocated and setup with
77 * sg_alloc_table_chained(). 78 * sg_alloc_table_chained().
78 * 79 *
80 * @nents_first_chunk has to be same with that same parameter passed
81 * to sg_alloc_table_chained().
82 *
79 **/ 83 **/
80void sg_free_table_chained(struct sg_table *table, bool first_chunk) 84void sg_free_table_chained(struct sg_table *table,
85 unsigned nents_first_chunk)
81{ 86{
82 if (first_chunk && table->orig_nents <= SG_CHUNK_SIZE) 87 if (table->orig_nents <= nents_first_chunk)
83 return; 88 return;
84 __sg_free_table(table, SG_CHUNK_SIZE, first_chunk, sg_pool_free); 89
90 if (nents_first_chunk == 1)
91 nents_first_chunk = 0;
92
93 __sg_free_table(table, SG_CHUNK_SIZE, nents_first_chunk, sg_pool_free);
85} 94}
86EXPORT_SYMBOL_GPL(sg_free_table_chained); 95EXPORT_SYMBOL_GPL(sg_free_table_chained);
87 96
@@ -90,31 +99,41 @@ EXPORT_SYMBOL_GPL(sg_free_table_chained);
90 * @table: The sg table header to use 99 * @table: The sg table header to use
91 * @nents: Number of entries in sg list 100 * @nents: Number of entries in sg list
92 * @first_chunk: first SGL 101 * @first_chunk: first SGL
102 * @nents_first_chunk: number of the SGL of @first_chunk
93 * 103 *
94 * Description: 104 * Description:
95 * Allocate and chain SGLs in an sg table. If @nents@ is larger than 105 * Allocate and chain SGLs in an sg table. If @nents@ is larger than
96 * SG_CHUNK_SIZE a chained sg table will be setup. 106 * @nents_first_chunk a chained sg table will be setup. @first_chunk is
107 * ignored if nents_first_chunk <= 1 because user expects the SGL points
108 * non-chain SGL.
97 * 109 *
98 **/ 110 **/
99int sg_alloc_table_chained(struct sg_table *table, int nents, 111int sg_alloc_table_chained(struct sg_table *table, int nents,
100 struct scatterlist *first_chunk) 112 struct scatterlist *first_chunk, unsigned nents_first_chunk)
101{ 113{
102 int ret; 114 int ret;
103 115
104 BUG_ON(!nents); 116 BUG_ON(!nents);
105 117
106 if (first_chunk) { 118 if (first_chunk && nents_first_chunk) {
107 if (nents <= SG_CHUNK_SIZE) { 119 if (nents <= nents_first_chunk) {
108 table->nents = table->orig_nents = nents; 120 table->nents = table->orig_nents = nents;
109 sg_init_table(table->sgl, nents); 121 sg_init_table(table->sgl, nents);
110 return 0; 122 return 0;
111 } 123 }
112 } 124 }
113 125
126 /* User supposes that the 1st SGL includes real entry */
127 if (nents_first_chunk <= 1) {
128 first_chunk = NULL;
129 nents_first_chunk = 0;
130 }
131
114 ret = __sg_alloc_table(table, nents, SG_CHUNK_SIZE, 132 ret = __sg_alloc_table(table, nents, SG_CHUNK_SIZE,
115 first_chunk, GFP_ATOMIC, sg_pool_alloc); 133 first_chunk, nents_first_chunk,
134 GFP_ATOMIC, sg_pool_alloc);
116 if (unlikely(ret)) 135 if (unlikely(ret))
117 sg_free_table_chained(table, (bool)first_chunk); 136 sg_free_table_chained(table, nents_first_chunk);
118 return ret; 137 return ret;
119} 138}
120EXPORT_SYMBOL_GPL(sg_alloc_table_chained); 139EXPORT_SYMBOL_GPL(sg_alloc_table_chained);
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 157d9e31f6c2..60ba93fc42ce 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -23,7 +23,7 @@ unsigned int check_preemption_disabled(const char *what1, const char *what2)
23 * Kernel threads bound to a single CPU can safely use 23 * Kernel threads bound to a single CPU can safely use
24 * smp_processor_id(): 24 * smp_processor_id():
25 */ 25 */
26 if (cpumask_equal(&current->cpus_allowed, cpumask_of(this_cpu))) 26 if (cpumask_equal(current->cpus_ptr, cpumask_of(this_cpu)))
27 goto out; 27 goto out;
28 28
29 /* 29 /*
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index 4403e1924f73..3a90a9e2b94a 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -540,6 +540,25 @@ int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz,
540} 540}
541EXPORT_SYMBOL(string_escape_mem); 541EXPORT_SYMBOL(string_escape_mem);
542 542
543int string_escape_mem_ascii(const char *src, size_t isz, char *dst,
544 size_t osz)
545{
546 char *p = dst;
547 char *end = p + osz;
548
549 while (isz--) {
550 unsigned char c = *src++;
551
552 if (!isprint(c) || !isascii(c) || c == '"' || c == '\\')
553 escape_hex(c, &p, end);
554 else
555 escape_passthrough(c, &p, end);
556 }
557
558 return p - dst;
559}
560EXPORT_SYMBOL(string_escape_mem_ascii);
561
543/* 562/*
544 * Return an allocated string that has been escaped of special characters 563 * Return an allocated string that has been escaped of special characters
545 * and double quotes, making it safe to log in quotes. 564 * and double quotes, making it safe to log in quotes.
diff --git a/lib/test_blackhole_dev.c b/lib/test_blackhole_dev.c
new file mode 100644
index 000000000000..4c40580a99a3
--- /dev/null
+++ b/lib/test_blackhole_dev.c
@@ -0,0 +1,100 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * This module tests the blackhole_dev that is created during the
4 * net subsystem initialization. The test this module performs is
5 * by injecting an skb into the stack with skb->dev as the
6 * blackhole_dev and expects kernel to behave in a sane manner
7 * (in other words, *not crash*)!
8 *
9 * Copyright (c) 2018, Mahesh Bandewar <maheshb@google.com>
10 */
11
12#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/printk.h>
15#include <linux/skbuff.h>
16#include <linux/netdevice.h>
17#include <linux/udp.h>
18#include <linux/ipv6.h>
19
20#include <net/dst.h>
21
22#define SKB_SIZE 256
23#define HEAD_SIZE (14+40+8) /* Ether + IPv6 + UDP */
24#define TAIL_SIZE 32 /* random tail-room */
25
26#define UDP_PORT 1234
27
28static int __init test_blackholedev_init(void)
29{
30 struct ipv6hdr *ip6h;
31 struct sk_buff *skb;
32 struct ethhdr *ethh;
33 struct udphdr *uh;
34 int data_len;
35 int ret;
36
37 skb = alloc_skb(SKB_SIZE, GFP_KERNEL);
38 if (!skb)
39 return -ENOMEM;
40
41 /* Reserve head-room for the headers */
42 skb_reserve(skb, HEAD_SIZE);
43
44 /* Add data to the skb */
45 data_len = SKB_SIZE - (HEAD_SIZE + TAIL_SIZE);
46 memset(__skb_put(skb, data_len), 0xf, data_len);
47
48 /* Add protocol data */
49 /* (Transport) UDP */
50 uh = (struct udphdr *)skb_push(skb, sizeof(struct udphdr));
51 skb_set_transport_header(skb, 0);
52 uh->source = uh->dest = htons(UDP_PORT);
53 uh->len = htons(data_len);
54 uh->check = 0;
55 /* (Network) IPv6 */
56 ip6h = (struct ipv6hdr *)skb_push(skb, sizeof(struct ipv6hdr));
57 skb_set_network_header(skb, 0);
58 ip6h->hop_limit = 32;
59 ip6h->payload_len = data_len + sizeof(struct udphdr);
60 ip6h->nexthdr = IPPROTO_UDP;
61 ip6h->saddr = in6addr_loopback;
62 ip6h->daddr = in6addr_loopback;
63 /* Ether */
64 ethh = (struct ethhdr *)skb_push(skb, sizeof(struct ethhdr));
65 skb_set_mac_header(skb, 0);
66
67 skb->protocol = htons(ETH_P_IPV6);
68 skb->pkt_type = PACKET_HOST;
69 skb->dev = blackhole_netdev;
70
71 /* Now attempt to send the packet */
72 ret = dev_queue_xmit(skb);
73
74 switch (ret) {
75 case NET_XMIT_SUCCESS:
76 pr_warn("dev_queue_xmit() returned NET_XMIT_SUCCESS\n");
77 break;
78 case NET_XMIT_DROP:
79 pr_warn("dev_queue_xmit() returned NET_XMIT_DROP\n");
80 break;
81 case NET_XMIT_CN:
82 pr_warn("dev_queue_xmit() returned NET_XMIT_CN\n");
83 break;
84 default:
85 pr_err("dev_queue_xmit() returned UNKNOWN(%d)\n", ret);
86 }
87
88 return 0;
89}
90
91static void __exit test_blackholedev_exit(void)
92{
93 pr_warn("test_blackholedev module terminating.\n");
94}
95
96module_init(test_blackholedev_init);
97module_exit(test_blackholedev_exit);
98
99MODULE_AUTHOR("Mahesh Bandewar <maheshb@google.com>");
100MODULE_LICENSE("GPL");
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index e3c593c38eff..b63b367a94e8 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -7,16 +7,17 @@
7 7
8#define pr_fmt(fmt) "kasan test: %s " fmt, __func__ 8#define pr_fmt(fmt) "kasan test: %s " fmt, __func__
9 9
10#include <linux/bitops.h>
10#include <linux/delay.h> 11#include <linux/delay.h>
12#include <linux/kasan.h>
11#include <linux/kernel.h> 13#include <linux/kernel.h>
12#include <linux/mman.h>
13#include <linux/mm.h> 14#include <linux/mm.h>
15#include <linux/mman.h>
16#include <linux/module.h>
14#include <linux/printk.h> 17#include <linux/printk.h>
15#include <linux/slab.h> 18#include <linux/slab.h>
16#include <linux/string.h> 19#include <linux/string.h>
17#include <linux/uaccess.h> 20#include <linux/uaccess.h>
18#include <linux/module.h>
19#include <linux/kasan.h>
20 21
21/* 22/*
22 * Note: test functions are marked noinline so that their names appear in 23 * Note: test functions are marked noinline so that their names appear in
@@ -619,6 +620,95 @@ static noinline void __init kasan_strings(void)
619 strnlen(ptr, 1); 620 strnlen(ptr, 1);
620} 621}
621 622
623static noinline void __init kasan_bitops(void)
624{
625 /*
626 * Allocate 1 more byte, which causes kzalloc to round up to 16-bytes;
627 * this way we do not actually corrupt other memory.
628 */
629 long *bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
630 if (!bits)
631 return;
632
633 /*
634 * Below calls try to access bit within allocated memory; however, the
635 * below accesses are still out-of-bounds, since bitops are defined to
636 * operate on the whole long the bit is in.
637 */
638 pr_info("out-of-bounds in set_bit\n");
639 set_bit(BITS_PER_LONG, bits);
640
641 pr_info("out-of-bounds in __set_bit\n");
642 __set_bit(BITS_PER_LONG, bits);
643
644 pr_info("out-of-bounds in clear_bit\n");
645 clear_bit(BITS_PER_LONG, bits);
646
647 pr_info("out-of-bounds in __clear_bit\n");
648 __clear_bit(BITS_PER_LONG, bits);
649
650 pr_info("out-of-bounds in clear_bit_unlock\n");
651 clear_bit_unlock(BITS_PER_LONG, bits);
652
653 pr_info("out-of-bounds in __clear_bit_unlock\n");
654 __clear_bit_unlock(BITS_PER_LONG, bits);
655
656 pr_info("out-of-bounds in change_bit\n");
657 change_bit(BITS_PER_LONG, bits);
658
659 pr_info("out-of-bounds in __change_bit\n");
660 __change_bit(BITS_PER_LONG, bits);
661
662 /*
663 * Below calls try to access bit beyond allocated memory.
664 */
665 pr_info("out-of-bounds in test_and_set_bit\n");
666 test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
667
668 pr_info("out-of-bounds in __test_and_set_bit\n");
669 __test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
670
671 pr_info("out-of-bounds in test_and_set_bit_lock\n");
672 test_and_set_bit_lock(BITS_PER_LONG + BITS_PER_BYTE, bits);
673
674 pr_info("out-of-bounds in test_and_clear_bit\n");
675 test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
676
677 pr_info("out-of-bounds in __test_and_clear_bit\n");
678 __test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
679
680 pr_info("out-of-bounds in test_and_change_bit\n");
681 test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
682
683 pr_info("out-of-bounds in __test_and_change_bit\n");
684 __test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
685
686 pr_info("out-of-bounds in test_bit\n");
687 (void)test_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
688
689#if defined(clear_bit_unlock_is_negative_byte)
690 pr_info("out-of-bounds in clear_bit_unlock_is_negative_byte\n");
691 clear_bit_unlock_is_negative_byte(BITS_PER_LONG + BITS_PER_BYTE, bits);
692#endif
693 kfree(bits);
694}
695
696static noinline void __init kmalloc_double_kzfree(void)
697{
698 char *ptr;
699 size_t size = 16;
700
701 pr_info("double-free (kzfree)\n");
702 ptr = kmalloc(size, GFP_KERNEL);
703 if (!ptr) {
704 pr_err("Allocation failed\n");
705 return;
706 }
707
708 kzfree(ptr);
709 kzfree(ptr);
710}
711
622static int __init kmalloc_tests_init(void) 712static int __init kmalloc_tests_init(void)
623{ 713{
624 /* 714 /*
@@ -660,6 +750,8 @@ static int __init kmalloc_tests_init(void)
660 kasan_memchr(); 750 kasan_memchr();
661 kasan_memcmp(); 751 kasan_memcmp();
662 kasan_strings(); 752 kasan_strings();
753 kasan_bitops();
754 kmalloc_double_kzfree();
663 755
664 kasan_restore_multi_shot(multishot); 756 kasan_restore_multi_shot(multishot);
665 757
diff --git a/lib/vdso/Kconfig b/lib/vdso/Kconfig
new file mode 100644
index 000000000000..cc00364bd2c2
--- /dev/null
+++ b/lib/vdso/Kconfig
@@ -0,0 +1,36 @@
1# SPDX-License-Identifier: GPL-2.0
2
3config HAVE_GENERIC_VDSO
4 bool
5
6if HAVE_GENERIC_VDSO
7
8config GENERIC_GETTIMEOFDAY
9 bool
10 help
11 This is a generic implementation of gettimeofday vdso.
12 Each architecture that enables this feature has to
13 provide the fallback implementation.
14
15config GENERIC_VDSO_32
16 bool
17 depends on GENERIC_GETTIMEOFDAY && !64BIT
18 help
19 This config option helps to avoid possible performance issues
20 in 32 bit only architectures.
21
22config GENERIC_COMPAT_VDSO
23 bool
24 help
25 This config option enables the compat VDSO layer.
26
27config CROSS_COMPILE_COMPAT_VDSO
28 string "32 bit Toolchain prefix for compat vDSO"
29 default ""
30 depends on GENERIC_COMPAT_VDSO
31 help
32 Defines the cross-compiler prefix for compiling compat vDSO.
33 If a 64 bit compiler (i.e. x86_64) can compile the VDSO for
34 32 bit, it does not need to define this parameter.
35
36endif
diff --git a/lib/vdso/Makefile b/lib/vdso/Makefile
new file mode 100644
index 000000000000..c415a685d61b
--- /dev/null
+++ b/lib/vdso/Makefile
@@ -0,0 +1,22 @@
1# SPDX-License-Identifier: GPL-2.0
2
3GENERIC_VDSO_MK_PATH := $(abspath $(lastword $(MAKEFILE_LIST)))
4GENERIC_VDSO_DIR := $(dir $(GENERIC_VDSO_MK_PATH))
5
6c-gettimeofday-$(CONFIG_GENERIC_GETTIMEOFDAY) := $(addprefix $(GENERIC_VDSO_DIR), gettimeofday.c)
7
8# This cmd checks that the vdso library does not contain absolute relocation
9# It has to be called after the linking of the vdso library and requires it
10# as a parameter.
11#
12# $(ARCH_REL_TYPE_ABS) is defined in the arch specific makefile and corresponds
13# to the absolute relocation types printed by "objdump -R" and accepted by the
14# dynamic linker.
15ifndef ARCH_REL_TYPE_ABS
16$(error ARCH_REL_TYPE_ABS is not set)
17endif
18
19quiet_cmd_vdso_check = VDSOCHK $@
20 cmd_vdso_check = if $(OBJDUMP) -R $@ | egrep -h "$(ARCH_REL_TYPE_ABS)"; \
21 then (echo >&2 "$@: dynamic relocations are not supported"; \
22 rm -f $@; /bin/false); fi
diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c
new file mode 100644
index 000000000000..2d1c1f241fd9
--- /dev/null
+++ b/lib/vdso/gettimeofday.c
@@ -0,0 +1,239 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Generic userspace implementations of gettimeofday() and similar.
4 */
5#include <linux/compiler.h>
6#include <linux/math64.h>
7#include <linux/time.h>
8#include <linux/kernel.h>
9#include <linux/hrtimer_defs.h>
10#include <vdso/datapage.h>
11#include <vdso/helpers.h>
12
13/*
14 * The generic vDSO implementation requires that gettimeofday.h
15 * provides:
16 * - __arch_get_vdso_data(): to get the vdso datapage.
17 * - __arch_get_hw_counter(): to get the hw counter based on the
18 * clock_mode.
19 * - gettimeofday_fallback(): fallback for gettimeofday.
20 * - clock_gettime_fallback(): fallback for clock_gettime.
21 * - clock_getres_fallback(): fallback for clock_getres.
22 */
23#ifdef ENABLE_COMPAT_VDSO
24#include <asm/vdso/compat_gettimeofday.h>
25#else
26#include <asm/vdso/gettimeofday.h>
27#endif /* ENABLE_COMPAT_VDSO */
28
29#ifndef vdso_calc_delta
30/*
31 * Default implementation which works for all sane clocksources. That
32 * obviously excludes x86/TSC.
33 */
34static __always_inline
35u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult)
36{
37 return ((cycles - last) & mask) * mult;
38}
39#endif
40
41static int do_hres(const struct vdso_data *vd, clockid_t clk,
42 struct __kernel_timespec *ts)
43{
44 const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
45 u64 cycles, last, sec, ns;
46 u32 seq;
47
48 do {
49 seq = vdso_read_begin(vd);
50 cycles = __arch_get_hw_counter(vd->clock_mode);
51 ns = vdso_ts->nsec;
52 last = vd->cycle_last;
53 if (unlikely((s64)cycles < 0))
54 return clock_gettime_fallback(clk, ts);
55
56 ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
57 ns >>= vd->shift;
58 sec = vdso_ts->sec;
59 } while (unlikely(vdso_read_retry(vd, seq)));
60
61 /*
62 * Do this outside the loop: a race inside the loop could result
63 * in __iter_div_u64_rem() being extremely slow.
64 */
65 ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
66 ts->tv_nsec = ns;
67
68 return 0;
69}
70
71static void do_coarse(const struct vdso_data *vd, clockid_t clk,
72 struct __kernel_timespec *ts)
73{
74 const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
75 u32 seq;
76
77 do {
78 seq = vdso_read_begin(vd);
79 ts->tv_sec = vdso_ts->sec;
80 ts->tv_nsec = vdso_ts->nsec;
81 } while (unlikely(vdso_read_retry(vd, seq)));
82}
83
84static __maybe_unused int
85__cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
86{
87 const struct vdso_data *vd = __arch_get_vdso_data();
88 u32 msk;
89
90 /* Check for negative values or invalid clocks */
91 if (unlikely((u32) clock >= MAX_CLOCKS))
92 goto fallback;
93
94 /*
95 * Convert the clockid to a bitmask and use it to check which
96 * clocks are handled in the VDSO directly.
97 */
98 msk = 1U << clock;
99 if (likely(msk & VDSO_HRES)) {
100 return do_hres(&vd[CS_HRES_COARSE], clock, ts);
101 } else if (msk & VDSO_COARSE) {
102 do_coarse(&vd[CS_HRES_COARSE], clock, ts);
103 return 0;
104 } else if (msk & VDSO_RAW) {
105 return do_hres(&vd[CS_RAW], clock, ts);
106 }
107
108fallback:
109 return clock_gettime_fallback(clock, ts);
110}
111
112static __maybe_unused int
113__cvdso_clock_gettime32(clockid_t clock, struct old_timespec32 *res)
114{
115 struct __kernel_timespec ts;
116 int ret;
117
118 if (res == NULL)
119 goto fallback;
120
121 ret = __cvdso_clock_gettime(clock, &ts);
122
123 if (ret == 0) {
124 res->tv_sec = ts.tv_sec;
125 res->tv_nsec = ts.tv_nsec;
126 }
127
128 return ret;
129
130fallback:
131 return clock_gettime_fallback(clock, (struct __kernel_timespec *)res);
132}
133
134static __maybe_unused int
135__cvdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
136{
137 const struct vdso_data *vd = __arch_get_vdso_data();
138
139 if (likely(tv != NULL)) {
140 struct __kernel_timespec ts;
141
142 if (do_hres(&vd[CS_HRES_COARSE], CLOCK_REALTIME, &ts))
143 return gettimeofday_fallback(tv, tz);
144
145 tv->tv_sec = ts.tv_sec;
146 tv->tv_usec = (u32)ts.tv_nsec / NSEC_PER_USEC;
147 }
148
149 if (unlikely(tz != NULL)) {
150 tz->tz_minuteswest = vd[CS_HRES_COARSE].tz_minuteswest;
151 tz->tz_dsttime = vd[CS_HRES_COARSE].tz_dsttime;
152 }
153
154 return 0;
155}
156
157#ifdef VDSO_HAS_TIME
158static __maybe_unused time_t __cvdso_time(time_t *time)
159{
160 const struct vdso_data *vd = __arch_get_vdso_data();
161 time_t t = READ_ONCE(vd[CS_HRES_COARSE].basetime[CLOCK_REALTIME].sec);
162
163 if (time)
164 *time = t;
165
166 return t;
167}
168#endif /* VDSO_HAS_TIME */
169
170#ifdef VDSO_HAS_CLOCK_GETRES
171static __maybe_unused
172int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
173{
174 const struct vdso_data *vd = __arch_get_vdso_data();
175 u64 ns;
176 u32 msk;
177 u64 hrtimer_res = READ_ONCE(vd[CS_HRES_COARSE].hrtimer_res);
178
179 /* Check for negative values or invalid clocks */
180 if (unlikely((u32) clock >= MAX_CLOCKS))
181 goto fallback;
182
183 /*
184 * Convert the clockid to a bitmask and use it to check which
185 * clocks are handled in the VDSO directly.
186 */
187 msk = 1U << clock;
188 if (msk & VDSO_HRES) {
189 /*
190 * Preserves the behaviour of posix_get_hrtimer_res().
191 */
192 ns = hrtimer_res;
193 } else if (msk & VDSO_COARSE) {
194 /*
195 * Preserves the behaviour of posix_get_coarse_res().
196 */
197 ns = LOW_RES_NSEC;
198 } else if (msk & VDSO_RAW) {
199 /*
200 * Preserves the behaviour of posix_get_hrtimer_res().
201 */
202 ns = hrtimer_res;
203 } else {
204 goto fallback;
205 }
206
207 if (res) {
208 res->tv_sec = 0;
209 res->tv_nsec = ns;
210 }
211
212 return 0;
213
214fallback:
215 return clock_getres_fallback(clock, res);
216}
217
218static __maybe_unused int
219__cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res)
220{
221 struct __kernel_timespec ts;
222 int ret;
223
224 if (res == NULL)
225 goto fallback;
226
227 ret = __cvdso_clock_getres(clock, &ts);
228
229 if (ret == 0) {
230 res->tv_sec = ts.tv_sec;
231 res->tv_nsec = ts.tv_nsec;
232 }
233
234 return ret;
235
236fallback:
237 return clock_getres_fallback(clock, (struct __kernel_timespec *)res);
238}
239#endif /* VDSO_HAS_CLOCK_GETRES */
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 63937044c57d..b0967cf17137 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -599,7 +599,7 @@ static char *string_nocheck(char *buf, char *end, const char *s,
599 struct printf_spec spec) 599 struct printf_spec spec)
600{ 600{
601 int len = 0; 601 int len = 0;
602 size_t lim = spec.precision; 602 int lim = spec.precision;
603 603
604 while (lim--) { 604 while (lim--) {
605 char c = *s++; 605 char c = *s++;
@@ -1799,7 +1799,7 @@ char *clock(char *buf, char *end, struct clk *clk, struct printf_spec spec,
1799#ifdef CONFIG_COMMON_CLK 1799#ifdef CONFIG_COMMON_CLK
1800 return string(buf, end, __clk_get_name(clk), spec); 1800 return string(buf, end, __clk_get_name(clk), spec);
1801#else 1801#else
1802 return error_string(buf, end, "(%pC?)", spec); 1802 return ptr_to_id(buf, end, clk, spec);
1803#endif 1803#endif
1804 } 1804 }
1805} 1805}