summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/842/842_debugfs.h5
-rw-r--r--lib/Kconfig13
-rw-r--r--lib/Kconfig.debug39
-rw-r--r--lib/Makefile4
-rw-r--r--lib/atomic64.c32
-rw-r--r--lib/bitmap.c4
-rw-r--r--lib/clz_ctz.c4
-rw-r--r--lib/cmdline.c5
-rw-r--r--lib/cpu_rmap.c5
-rw-r--r--lib/crc-ccitt.c4
-rw-r--r--lib/crc-itu-t.c4
-rw-r--r--lib/crc-t10dif.c4
-rw-r--r--lib/crc16.c4
-rw-r--r--lib/crc4.c4
-rw-r--r--lib/crc7.c4
-rw-r--r--lib/crypto/Makefile4
-rw-r--r--lib/crypto/arc4.c74
-rw-r--r--lib/debugobjects.c321
-rw-r--r--lib/decompress_unlz4.c5
-rw-r--r--lib/devres.c3
-rw-r--r--lib/digsig.c2
-rw-r--r--lib/dim/Makefile9
-rw-r--r--lib/dim/dim.c83
-rw-r--r--lib/dim/net_dim.c190
-rw-r--r--lib/dynamic_debug.c12
-rw-r--r--lib/fault-inject.c73
-rw-r--r--lib/fonts/fonts.c103
-rw-r--r--lib/genalloc.c180
-rw-r--r--lib/hexdump.c6
-rw-r--r--lib/idr.c14
-rw-r--r--lib/iomap_copy.c14
-rw-r--r--lib/jedec_ddr_data.c5
-rw-r--r--lib/klist.c3
-rw-r--r--lib/kobject.c4
-rw-r--r--lib/list_sort.c2
-rw-r--r--lib/mpi/mpi-pow.c6
-rw-r--r--lib/notifier-error-inject.c13
-rw-r--r--lib/objagg.c6
-rw-r--r--lib/parser.c4
-rw-r--r--lib/raid6/neon.c5
-rw-r--r--lib/raid6/s390vx.uc2
-rw-r--r--lib/reed_solomon/Makefile2
-rw-r--r--lib/reed_solomon/decode_rs.c115
-rw-r--r--lib/reed_solomon/reed_solomon.c12
-rw-r--r--lib/reed_solomon/test_rslib.c518
-rw-r--r--lib/rhashtable.c5
-rw-r--r--lib/sbitmap.c10
-rw-r--r--lib/scatterlist.c49
-rw-r--r--lib/sg_pool.c39
-rw-r--r--lib/sg_split.c4
-rw-r--r--lib/smp_processor_id.c2
-rw-r--r--lib/string_helpers.c19
-rw-r--r--lib/test_blackhole_dev.c100
-rw-r--r--lib/test_kasan.c104
-rw-r--r--lib/test_rhashtable.c5
-rw-r--r--lib/test_stackinit.c21
-rw-r--r--lib/test_xarray.c38
-rw-r--r--lib/ubsan.c6
-rw-r--r--lib/vdso/Kconfig36
-rw-r--r--lib/vdso/Makefile22
-rw-r--r--lib/vdso/gettimeofday.c239
-rw-r--r--lib/vsprintf.c4
-rw-r--r--lib/xarray.c12
63 files changed, 2201 insertions, 439 deletions
diff --git a/lib/842/842_debugfs.h b/lib/842/842_debugfs.h
index 277e403e8701..4469407c3e0d 100644
--- a/lib/842/842_debugfs.h
+++ b/lib/842/842_debugfs.h
@@ -22,8 +22,6 @@ static int __init sw842_debugfs_create(void)
22 return -ENODEV; 22 return -ENODEV;
23 23
24 sw842_debugfs_root = debugfs_create_dir(MODULE_NAME, NULL); 24 sw842_debugfs_root = debugfs_create_dir(MODULE_NAME, NULL);
25 if (IS_ERR(sw842_debugfs_root))
26 return PTR_ERR(sw842_debugfs_root);
27 25
28 for (i = 0; i < ARRAY_SIZE(template_count); i++) { 26 for (i = 0; i < ARRAY_SIZE(template_count); i++) {
29 char name[32]; 27 char name[32];
@@ -46,8 +44,7 @@ static int __init sw842_debugfs_create(void)
46 44
47static void __exit sw842_debugfs_remove(void) 45static void __exit sw842_debugfs_remove(void)
48{ 46{
49 if (sw842_debugfs_root && !IS_ERR(sw842_debugfs_root)) 47 debugfs_remove_recursive(sw842_debugfs_root);
50 debugfs_remove_recursive(sw842_debugfs_root);
51} 48}
52 49
53#endif 50#endif
diff --git a/lib/Kconfig b/lib/Kconfig
index 90623a0e1942..52a7b2e6fb74 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -562,6 +562,14 @@ config SIGNATURE
562 Digital signature verification. Currently only RSA is supported. 562 Digital signature verification. Currently only RSA is supported.
563 Implementation is done using GnuPG MPI library 563 Implementation is done using GnuPG MPI library
564 564
565config DIMLIB
566 bool "DIM library"
567 default y
568 help
569 Dynamic Interrupt Moderation library.
570 Implements an algorithm for dynamically change CQ modertion values
571 according to run time performance.
572
565# 573#
566# libfdt files, only selected if needed. 574# libfdt files, only selected if needed.
567# 575#
@@ -576,6 +584,11 @@ config OID_REGISTRY
576config UCS2_STRING 584config UCS2_STRING
577 tristate 585 tristate
578 586
587#
588# generic vdso
589#
590source "lib/vdso/Kconfig"
591
579source "lib/fonts/Kconfig" 592source "lib/fonts/Kconfig"
580 593
581config SG_SPLIT 594config SG_SPLIT
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 0031a31d98c2..4ac4ca21a30a 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1102,7 +1102,7 @@ config PROVE_LOCKING
1102 select DEBUG_SPINLOCK 1102 select DEBUG_SPINLOCK
1103 select DEBUG_MUTEXES 1103 select DEBUG_MUTEXES
1104 select DEBUG_RT_MUTEXES if RT_MUTEXES 1104 select DEBUG_RT_MUTEXES if RT_MUTEXES
1105 select DEBUG_RWSEMS if RWSEM_SPIN_ON_OWNER 1105 select DEBUG_RWSEMS
1106 select DEBUG_WW_MUTEX_SLOWPATH 1106 select DEBUG_WW_MUTEX_SLOWPATH
1107 select DEBUG_LOCK_ALLOC 1107 select DEBUG_LOCK_ALLOC
1108 select TRACE_IRQFLAGS 1108 select TRACE_IRQFLAGS
@@ -1206,10 +1206,10 @@ config DEBUG_WW_MUTEX_SLOWPATH
1206 1206
1207config DEBUG_RWSEMS 1207config DEBUG_RWSEMS
1208 bool "RW Semaphore debugging: basic checks" 1208 bool "RW Semaphore debugging: basic checks"
1209 depends on DEBUG_KERNEL && RWSEM_SPIN_ON_OWNER 1209 depends on DEBUG_KERNEL
1210 help 1210 help
1211 This debugging feature allows mismatched rw semaphore locks and unlocks 1211 This debugging feature allows mismatched rw semaphore locks
1212 to be detected and reported. 1212 and unlocks to be detected and reported.
1213 1213
1214config DEBUG_LOCK_ALLOC 1214config DEBUG_LOCK_ALLOC
1215 bool "Lock debugging: detect incorrect freeing of live locks" 1215 bool "Lock debugging: detect incorrect freeing of live locks"
@@ -1708,7 +1708,7 @@ config LKDTM
1708 called lkdtm. 1708 called lkdtm.
1709 1709
1710 Documentation on how to use the module can be found in 1710 Documentation on how to use the module can be found in
1711 Documentation/fault-injection/provoke-crashes.txt 1711 Documentation/fault-injection/provoke-crashes.rst
1712 1712
1713config TEST_LIST_SORT 1713config TEST_LIST_SORT
1714 tristate "Linked list sorting test" 1714 tristate "Linked list sorting test"
@@ -1761,6 +1761,18 @@ config RBTREE_TEST
1761 A benchmark measuring the performance of the rbtree library. 1761 A benchmark measuring the performance of the rbtree library.
1762 Also includes rbtree invariant checks. 1762 Also includes rbtree invariant checks.
1763 1763
1764config REED_SOLOMON_TEST
1765 tristate "Reed-Solomon library test"
1766 depends on DEBUG_KERNEL || m
1767 select REED_SOLOMON
1768 select REED_SOLOMON_ENC16
1769 select REED_SOLOMON_DEC16
1770 help
1771 This option enables the self-test function of rslib at boot,
1772 or at module load time.
1773
1774 If unsure, say N.
1775
1764config INTERVAL_TREE_TEST 1776config INTERVAL_TREE_TEST
1765 tristate "Interval tree test" 1777 tristate "Interval tree test"
1766 depends on DEBUG_KERNEL 1778 depends on DEBUG_KERNEL
@@ -1865,6 +1877,14 @@ config TEST_PARMAN
1865 1877
1866 If unsure, say N. 1878 If unsure, say N.
1867 1879
1880config TEST_IRQ_TIMINGS
1881 bool "IRQ timings selftest"
1882 depends on IRQ_TIMINGS
1883 help
1884 Enable this option to test the irq timings code on boot.
1885
1886 If unsure, say N.
1887
1868config TEST_LKM 1888config TEST_LKM
1869 tristate "Test module loading with 'hello world' module" 1889 tristate "Test module loading with 'hello world' module"
1870 depends on m 1890 depends on m
@@ -1916,6 +1936,15 @@ config TEST_BPF
1916 1936
1917 If unsure, say N. 1937 If unsure, say N.
1918 1938
1939config TEST_BLACKHOLE_DEV
1940 tristate "Test blackhole netdev functionality"
1941 depends on m && NET
1942 help
1943 This builds the "test_blackhole_dev" module that validates the
1944 data path through this blackhole netdev.
1945
1946 If unsure, say N.
1947
1919config FIND_BIT_BENCHMARK 1948config FIND_BIT_BENCHMARK
1920 tristate "Test find_bit functions" 1949 tristate "Test find_bit functions"
1921 help 1950 help
diff --git a/lib/Makefile b/lib/Makefile
index fb7697031a79..fdd56bc219b8 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -91,6 +91,7 @@ obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o
91obj-$(CONFIG_TEST_MEMCAT_P) += test_memcat_p.o 91obj-$(CONFIG_TEST_MEMCAT_P) += test_memcat_p.o
92obj-$(CONFIG_TEST_OBJAGG) += test_objagg.o 92obj-$(CONFIG_TEST_OBJAGG) += test_objagg.o
93obj-$(CONFIG_TEST_STACKINIT) += test_stackinit.o 93obj-$(CONFIG_TEST_STACKINIT) += test_stackinit.o
94obj-$(CONFIG_TEST_BLACKHOLE_DEV) += test_blackhole_dev.o
94 95
95obj-$(CONFIG_TEST_LIVEPATCH) += livepatch/ 96obj-$(CONFIG_TEST_LIVEPATCH) += livepatch/
96 97
@@ -102,7 +103,7 @@ endif
102obj-$(CONFIG_DEBUG_INFO_REDUCED) += debug_info.o 103obj-$(CONFIG_DEBUG_INFO_REDUCED) += debug_info.o
103CFLAGS_debug_info.o += $(call cc-option, -femit-struct-debug-detailed=any) 104CFLAGS_debug_info.o += $(call cc-option, -femit-struct-debug-detailed=any)
104 105
105obj-y += math/ 106obj-y += math/ crypto/
106 107
107obj-$(CONFIG_GENERIC_IOMAP) += iomap.o 108obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
108obj-$(CONFIG_GENERIC_PCI_IOMAP) += pci_iomap.o 109obj-$(CONFIG_GENERIC_PCI_IOMAP) += pci_iomap.o
@@ -202,6 +203,7 @@ obj-$(CONFIG_GLOB) += glob.o
202obj-$(CONFIG_GLOB_SELFTEST) += globtest.o 203obj-$(CONFIG_GLOB_SELFTEST) += globtest.o
203 204
204obj-$(CONFIG_MPILIB) += mpi/ 205obj-$(CONFIG_MPILIB) += mpi/
206obj-$(CONFIG_DIMLIB) += dim/
205obj-$(CONFIG_SIGNATURE) += digsig.o 207obj-$(CONFIG_SIGNATURE) += digsig.o
206 208
207lib-$(CONFIG_CLZ_TAB) += clz_tab.o 209lib-$(CONFIG_CLZ_TAB) += clz_tab.o
diff --git a/lib/atomic64.c b/lib/atomic64.c
index 7e6905751522..e98c85a99787 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -42,11 +42,11 @@ static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
42 return &atomic64_lock[addr & (NR_LOCKS - 1)].lock; 42 return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
43} 43}
44 44
45long long atomic64_read(const atomic64_t *v) 45s64 atomic64_read(const atomic64_t *v)
46{ 46{
47 unsigned long flags; 47 unsigned long flags;
48 raw_spinlock_t *lock = lock_addr(v); 48 raw_spinlock_t *lock = lock_addr(v);
49 long long val; 49 s64 val;
50 50
51 raw_spin_lock_irqsave(lock, flags); 51 raw_spin_lock_irqsave(lock, flags);
52 val = v->counter; 52 val = v->counter;
@@ -55,7 +55,7 @@ long long atomic64_read(const atomic64_t *v)
55} 55}
56EXPORT_SYMBOL(atomic64_read); 56EXPORT_SYMBOL(atomic64_read);
57 57
58void atomic64_set(atomic64_t *v, long long i) 58void atomic64_set(atomic64_t *v, s64 i)
59{ 59{
60 unsigned long flags; 60 unsigned long flags;
61 raw_spinlock_t *lock = lock_addr(v); 61 raw_spinlock_t *lock = lock_addr(v);
@@ -67,7 +67,7 @@ void atomic64_set(atomic64_t *v, long long i)
67EXPORT_SYMBOL(atomic64_set); 67EXPORT_SYMBOL(atomic64_set);
68 68
69#define ATOMIC64_OP(op, c_op) \ 69#define ATOMIC64_OP(op, c_op) \
70void atomic64_##op(long long a, atomic64_t *v) \ 70void atomic64_##op(s64 a, atomic64_t *v) \
71{ \ 71{ \
72 unsigned long flags; \ 72 unsigned long flags; \
73 raw_spinlock_t *lock = lock_addr(v); \ 73 raw_spinlock_t *lock = lock_addr(v); \
@@ -79,11 +79,11 @@ void atomic64_##op(long long a, atomic64_t *v) \
79EXPORT_SYMBOL(atomic64_##op); 79EXPORT_SYMBOL(atomic64_##op);
80 80
81#define ATOMIC64_OP_RETURN(op, c_op) \ 81#define ATOMIC64_OP_RETURN(op, c_op) \
82long long atomic64_##op##_return(long long a, atomic64_t *v) \ 82s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
83{ \ 83{ \
84 unsigned long flags; \ 84 unsigned long flags; \
85 raw_spinlock_t *lock = lock_addr(v); \ 85 raw_spinlock_t *lock = lock_addr(v); \
86 long long val; \ 86 s64 val; \
87 \ 87 \
88 raw_spin_lock_irqsave(lock, flags); \ 88 raw_spin_lock_irqsave(lock, flags); \
89 val = (v->counter c_op a); \ 89 val = (v->counter c_op a); \
@@ -93,11 +93,11 @@ long long atomic64_##op##_return(long long a, atomic64_t *v) \
93EXPORT_SYMBOL(atomic64_##op##_return); 93EXPORT_SYMBOL(atomic64_##op##_return);
94 94
95#define ATOMIC64_FETCH_OP(op, c_op) \ 95#define ATOMIC64_FETCH_OP(op, c_op) \
96long long atomic64_fetch_##op(long long a, atomic64_t *v) \ 96s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
97{ \ 97{ \
98 unsigned long flags; \ 98 unsigned long flags; \
99 raw_spinlock_t *lock = lock_addr(v); \ 99 raw_spinlock_t *lock = lock_addr(v); \
100 long long val; \ 100 s64 val; \
101 \ 101 \
102 raw_spin_lock_irqsave(lock, flags); \ 102 raw_spin_lock_irqsave(lock, flags); \
103 val = v->counter; \ 103 val = v->counter; \
@@ -130,11 +130,11 @@ ATOMIC64_OPS(xor, ^=)
130#undef ATOMIC64_OP_RETURN 130#undef ATOMIC64_OP_RETURN
131#undef ATOMIC64_OP 131#undef ATOMIC64_OP
132 132
133long long atomic64_dec_if_positive(atomic64_t *v) 133s64 atomic64_dec_if_positive(atomic64_t *v)
134{ 134{
135 unsigned long flags; 135 unsigned long flags;
136 raw_spinlock_t *lock = lock_addr(v); 136 raw_spinlock_t *lock = lock_addr(v);
137 long long val; 137 s64 val;
138 138
139 raw_spin_lock_irqsave(lock, flags); 139 raw_spin_lock_irqsave(lock, flags);
140 val = v->counter - 1; 140 val = v->counter - 1;
@@ -145,11 +145,11 @@ long long atomic64_dec_if_positive(atomic64_t *v)
145} 145}
146EXPORT_SYMBOL(atomic64_dec_if_positive); 146EXPORT_SYMBOL(atomic64_dec_if_positive);
147 147
148long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) 148s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
149{ 149{
150 unsigned long flags; 150 unsigned long flags;
151 raw_spinlock_t *lock = lock_addr(v); 151 raw_spinlock_t *lock = lock_addr(v);
152 long long val; 152 s64 val;
153 153
154 raw_spin_lock_irqsave(lock, flags); 154 raw_spin_lock_irqsave(lock, flags);
155 val = v->counter; 155 val = v->counter;
@@ -160,11 +160,11 @@ long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
160} 160}
161EXPORT_SYMBOL(atomic64_cmpxchg); 161EXPORT_SYMBOL(atomic64_cmpxchg);
162 162
163long long atomic64_xchg(atomic64_t *v, long long new) 163s64 atomic64_xchg(atomic64_t *v, s64 new)
164{ 164{
165 unsigned long flags; 165 unsigned long flags;
166 raw_spinlock_t *lock = lock_addr(v); 166 raw_spinlock_t *lock = lock_addr(v);
167 long long val; 167 s64 val;
168 168
169 raw_spin_lock_irqsave(lock, flags); 169 raw_spin_lock_irqsave(lock, flags);
170 val = v->counter; 170 val = v->counter;
@@ -174,11 +174,11 @@ long long atomic64_xchg(atomic64_t *v, long long new)
174} 174}
175EXPORT_SYMBOL(atomic64_xchg); 175EXPORT_SYMBOL(atomic64_xchg);
176 176
177long long atomic64_fetch_add_unless(atomic64_t *v, long long a, long long u) 177s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
178{ 178{
179 unsigned long flags; 179 unsigned long flags;
180 raw_spinlock_t *lock = lock_addr(v); 180 raw_spinlock_t *lock = lock_addr(v);
181 long long val; 181 s64 val;
182 182
183 raw_spin_lock_irqsave(lock, flags); 183 raw_spin_lock_irqsave(lock, flags);
184 val = v->counter; 184 val = v->counter;
diff --git a/lib/bitmap.c b/lib/bitmap.c
index f235434df87b..bbe2589e8497 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -1,9 +1,7 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * lib/bitmap.c 3 * lib/bitmap.c
3 * Helper functions for bitmap.h. 4 * Helper functions for bitmap.h.
4 *
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
7 */ 5 */
8#include <linux/export.h> 6#include <linux/export.h>
9#include <linux/thread_info.h> 7#include <linux/thread_info.h>
diff --git a/lib/clz_ctz.c b/lib/clz_ctz.c
index 2e11e48446ab..0d3a686b5ba2 100644
--- a/lib/clz_ctz.c
+++ b/lib/clz_ctz.c
@@ -1,11 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * lib/clz_ctz.c 3 * lib/clz_ctz.c
3 * 4 *
4 * Copyright (C) 2013 Chanho Min <chanho.min@lge.com> 5 * Copyright (C) 2013 Chanho Min <chanho.min@lge.com>
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 * The functions in this file aren't called directly, but are required by 7 * The functions in this file aren't called directly, but are required by
10 * GCC builtins such as __builtin_ctz, and therefore they can't be removed 8 * GCC builtins such as __builtin_ctz, and therefore they can't be removed
11 * despite appearing unreferenced in kernel source. 9 * despite appearing unreferenced in kernel source.
diff --git a/lib/cmdline.c b/lib/cmdline.c
index dc59d6216318..fbb9981a04a4 100644
--- a/lib/cmdline.c
+++ b/lib/cmdline.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * linux/lib/cmdline.c 3 * linux/lib/cmdline.c
3 * Helper functions generally used for parsing kernel command line 4 * Helper functions generally used for parsing kernel command line
@@ -5,11 +6,7 @@
5 * 6 *
6 * Code and copyrights come from init/main.c and arch/i386/kernel/setup.c. 7 * Code and copyrights come from init/main.c and arch/i386/kernel/setup.c.
7 * 8 *
8 * This source code is licensed under the GNU General Public License,
9 * Version 2. See the file COPYING for more details.
10 *
11 * GNU Indent formatting options for this file: -kr -i8 -npsl -pcs 9 * GNU Indent formatting options for this file: -kr -i8 -npsl -pcs
12 *
13 */ 10 */
14 11
15#include <linux/export.h> 12#include <linux/export.h>
diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c
index f610b2a10b3e..075f3788bbe4 100644
--- a/lib/cpu_rmap.c
+++ b/lib/cpu_rmap.c
@@ -1,10 +1,7 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * cpu_rmap.c: CPU affinity reverse-map support 3 * cpu_rmap.c: CPU affinity reverse-map support
3 * Copyright 2011 Solarflare Communications Inc. 4 * Copyright 2011 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */ 5 */
9 6
10#include <linux/cpu_rmap.h> 7#include <linux/cpu_rmap.h>
diff --git a/lib/crc-ccitt.c b/lib/crc-ccitt.c
index d873b34039ff..d1a7d29d2ac9 100644
--- a/lib/crc-ccitt.c
+++ b/lib/crc-ccitt.c
@@ -1,8 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * linux/lib/crc-ccitt.c 3 * linux/lib/crc-ccitt.c
3 *
4 * This source code is licensed under the GNU General Public License,
5 * Version 2. See the file COPYING for more details.
6 */ 4 */
7 5
8#include <linux/types.h> 6#include <linux/types.h>
diff --git a/lib/crc-itu-t.c b/lib/crc-itu-t.c
index b3219d0abfb4..1974b355c148 100644
--- a/lib/crc-itu-t.c
+++ b/lib/crc-itu-t.c
@@ -1,8 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * crc-itu-t.c 3 * crc-itu-t.c
3 *
4 * This source code is licensed under the GNU General Public License,
5 * Version 2. See the file COPYING for more details.
6 */ 4 */
7 5
8#include <linux/types.h> 6#include <linux/types.h>
diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c
index e89ebfdbb0fc..8cc01a603416 100644
--- a/lib/crc-t10dif.c
+++ b/lib/crc-t10dif.c
@@ -1,11 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * T10 Data Integrity Field CRC16 calculation 3 * T10 Data Integrity Field CRC16 calculation
3 * 4 *
4 * Copyright (c) 2007 Oracle Corporation. All rights reserved. 5 * Copyright (c) 2007 Oracle Corporation. All rights reserved.
5 * Written by Martin K. Petersen <martin.petersen@oracle.com> 6 * Written by Martin K. Petersen <martin.petersen@oracle.com>
6 *
7 * This source code is licensed under the GNU General Public License,
8 * Version 2. See the file COPYING for more details.
9 */ 7 */
10 8
11#include <linux/types.h> 9#include <linux/types.h>
diff --git a/lib/crc16.c b/lib/crc16.c
index 8737b084d1f9..5c3a803c01e0 100644
--- a/lib/crc16.c
+++ b/lib/crc16.c
@@ -1,8 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * crc16.c 3 * crc16.c
3 *
4 * This source code is licensed under the GNU General Public License,
5 * Version 2. See the file COPYING for more details.
6 */ 4 */
7 5
8#include <linux/types.h> 6#include <linux/types.h>
diff --git a/lib/crc4.c b/lib/crc4.c
index 164ed9444cd3..e7e1779c67d9 100644
--- a/lib/crc4.c
+++ b/lib/crc4.c
@@ -1,8 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * crc4.c - simple crc-4 calculations. 3 * crc4.c - simple crc-4 calculations.
3 *
4 * This source code is licensed under the GNU General Public License, Version
5 * 2. See the file COPYING for more details.
6 */ 4 */
7 5
8#include <linux/crc4.h> 6#include <linux/crc4.h>
diff --git a/lib/crc7.c b/lib/crc7.c
index bf6255e23919..6a848d73e804 100644
--- a/lib/crc7.c
+++ b/lib/crc7.c
@@ -1,8 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * crc7.c 3 * crc7.c
3 *
4 * This source code is licensed under the GNU General Public License,
5 * Version 2. See the file COPYING for more details.
6 */ 4 */
7 5
8#include <linux/types.h> 6#include <linux/types.h>
diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile
new file mode 100644
index 000000000000..88195c34932d
--- /dev/null
+++ b/lib/crypto/Makefile
@@ -0,0 +1,4 @@
1# SPDX-License-Identifier: GPL-2.0
2
3obj-$(CONFIG_CRYPTO_LIB_ARC4) += libarc4.o
4libarc4-y := arc4.o
diff --git a/lib/crypto/arc4.c b/lib/crypto/arc4.c
new file mode 100644
index 000000000000..c2020f19c652
--- /dev/null
+++ b/lib/crypto/arc4.c
@@ -0,0 +1,74 @@
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Cryptographic API
4 *
5 * ARC4 Cipher Algorithm
6 *
7 * Jon Oberheide <jon@oberheide.org>
8 */
9
10#include <crypto/arc4.h>
11#include <linux/module.h>
12
13int arc4_setkey(struct arc4_ctx *ctx, const u8 *in_key, unsigned int key_len)
14{
15 int i, j = 0, k = 0;
16
17 ctx->x = 1;
18 ctx->y = 0;
19
20 for (i = 0; i < 256; i++)
21 ctx->S[i] = i;
22
23 for (i = 0; i < 256; i++) {
24 u32 a = ctx->S[i];
25
26 j = (j + in_key[k] + a) & 0xff;
27 ctx->S[i] = ctx->S[j];
28 ctx->S[j] = a;
29 if (++k >= key_len)
30 k = 0;
31 }
32
33 return 0;
34}
35EXPORT_SYMBOL(arc4_setkey);
36
37void arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, unsigned int len)
38{
39 u32 *const S = ctx->S;
40 u32 x, y, a, b;
41 u32 ty, ta, tb;
42
43 if (len == 0)
44 return;
45
46 x = ctx->x;
47 y = ctx->y;
48
49 a = S[x];
50 y = (y + a) & 0xff;
51 b = S[y];
52
53 do {
54 S[y] = a;
55 a = (a + b) & 0xff;
56 S[x] = b;
57 x = (x + 1) & 0xff;
58 ta = S[x];
59 ty = (y + ta) & 0xff;
60 tb = S[ty];
61 *out++ = *in++ ^ S[a];
62 if (--len == 0)
63 break;
64 y = ty;
65 a = ta;
66 b = tb;
67 } while (true);
68
69 ctx->x = x;
70 ctx->y = y;
71}
72EXPORT_SYMBOL(arc4_crypt);
73
74MODULE_LICENSE("GPL");
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 55437fd5128b..61261195f5b6 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -25,16 +25,37 @@
25 25
26#define ODEBUG_POOL_SIZE 1024 26#define ODEBUG_POOL_SIZE 1024
27#define ODEBUG_POOL_MIN_LEVEL 256 27#define ODEBUG_POOL_MIN_LEVEL 256
28#define ODEBUG_POOL_PERCPU_SIZE 64
29#define ODEBUG_BATCH_SIZE 16
28 30
29#define ODEBUG_CHUNK_SHIFT PAGE_SHIFT 31#define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
30#define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT) 32#define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
31#define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1)) 33#define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
32 34
35/*
36 * We limit the freeing of debug objects via workqueue at a maximum
37 * frequency of 10Hz and about 1024 objects for each freeing operation.
38 * So it is freeing at most 10k debug objects per second.
39 */
40#define ODEBUG_FREE_WORK_MAX 1024
41#define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10)
42
33struct debug_bucket { 43struct debug_bucket {
34 struct hlist_head list; 44 struct hlist_head list;
35 raw_spinlock_t lock; 45 raw_spinlock_t lock;
36}; 46};
37 47
48/*
49 * Debug object percpu free list
50 * Access is protected by disabling irq
51 */
52struct debug_percpu_free {
53 struct hlist_head free_objs;
54 int obj_free;
55};
56
57static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
58
38static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; 59static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
39 60
40static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata; 61static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
@@ -44,13 +65,20 @@ static DEFINE_RAW_SPINLOCK(pool_lock);
44static HLIST_HEAD(obj_pool); 65static HLIST_HEAD(obj_pool);
45static HLIST_HEAD(obj_to_free); 66static HLIST_HEAD(obj_to_free);
46 67
68/*
69 * Because of the presence of percpu free pools, obj_pool_free will
70 * under-count those in the percpu free pools. Similarly, obj_pool_used
71 * will over-count those in the percpu free pools. Adjustments will be
72 * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
73 * can be off.
74 */
47static int obj_pool_min_free = ODEBUG_POOL_SIZE; 75static int obj_pool_min_free = ODEBUG_POOL_SIZE;
48static int obj_pool_free = ODEBUG_POOL_SIZE; 76static int obj_pool_free = ODEBUG_POOL_SIZE;
49static int obj_pool_used; 77static int obj_pool_used;
50static int obj_pool_max_used; 78static int obj_pool_max_used;
79static bool obj_freeing;
51/* The number of objs on the global free list */ 80/* The number of objs on the global free list */
52static int obj_nr_tofree; 81static int obj_nr_tofree;
53static struct kmem_cache *obj_cache;
54 82
55static int debug_objects_maxchain __read_mostly; 83static int debug_objects_maxchain __read_mostly;
56static int __maybe_unused debug_objects_maxchecked __read_mostly; 84static int __maybe_unused debug_objects_maxchecked __read_mostly;
@@ -63,6 +91,7 @@ static int debug_objects_pool_size __read_mostly
63static int debug_objects_pool_min_level __read_mostly 91static int debug_objects_pool_min_level __read_mostly
64 = ODEBUG_POOL_MIN_LEVEL; 92 = ODEBUG_POOL_MIN_LEVEL;
65static struct debug_obj_descr *descr_test __read_mostly; 93static struct debug_obj_descr *descr_test __read_mostly;
94static struct kmem_cache *obj_cache __read_mostly;
66 95
67/* 96/*
68 * Track numbers of kmem_cache_alloc()/free() calls done. 97 * Track numbers of kmem_cache_alloc()/free() calls done.
@@ -71,7 +100,7 @@ static int debug_objects_allocated;
71static int debug_objects_freed; 100static int debug_objects_freed;
72 101
73static void free_obj_work(struct work_struct *work); 102static void free_obj_work(struct work_struct *work);
74static DECLARE_WORK(debug_obj_work, free_obj_work); 103static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
75 104
76static int __init enable_object_debug(char *str) 105static int __init enable_object_debug(char *str)
77{ 106{
@@ -100,7 +129,7 @@ static const char *obj_states[ODEBUG_STATE_MAX] = {
100static void fill_pool(void) 129static void fill_pool(void)
101{ 130{
102 gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; 131 gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
103 struct debug_obj *new, *obj; 132 struct debug_obj *obj;
104 unsigned long flags; 133 unsigned long flags;
105 134
106 if (likely(obj_pool_free >= debug_objects_pool_min_level)) 135 if (likely(obj_pool_free >= debug_objects_pool_min_level))
@@ -116,7 +145,7 @@ static void fill_pool(void)
116 * Recheck with the lock held as the worker thread might have 145 * Recheck with the lock held as the worker thread might have
117 * won the race and freed the global free list already. 146 * won the race and freed the global free list already.
118 */ 147 */
119 if (obj_nr_tofree) { 148 while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
120 obj = hlist_entry(obj_to_free.first, typeof(*obj), node); 149 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
121 hlist_del(&obj->node); 150 hlist_del(&obj->node);
122 obj_nr_tofree--; 151 obj_nr_tofree--;
@@ -130,15 +159,23 @@ static void fill_pool(void)
130 return; 159 return;
131 160
132 while (obj_pool_free < debug_objects_pool_min_level) { 161 while (obj_pool_free < debug_objects_pool_min_level) {
162 struct debug_obj *new[ODEBUG_BATCH_SIZE];
163 int cnt;
133 164
134 new = kmem_cache_zalloc(obj_cache, gfp); 165 for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
135 if (!new) 166 new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
167 if (!new[cnt])
168 break;
169 }
170 if (!cnt)
136 return; 171 return;
137 172
138 raw_spin_lock_irqsave(&pool_lock, flags); 173 raw_spin_lock_irqsave(&pool_lock, flags);
139 hlist_add_head(&new->node, &obj_pool); 174 while (cnt) {
140 debug_objects_allocated++; 175 hlist_add_head(&new[--cnt]->node, &obj_pool);
141 obj_pool_free++; 176 debug_objects_allocated++;
177 obj_pool_free++;
178 }
142 raw_spin_unlock_irqrestore(&pool_lock, flags); 179 raw_spin_unlock_irqrestore(&pool_lock, flags);
143 } 180 }
144} 181}
@@ -163,36 +200,81 @@ static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
163} 200}
164 201
165/* 202/*
203 * Allocate a new object from the hlist
204 */
205static struct debug_obj *__alloc_object(struct hlist_head *list)
206{
207 struct debug_obj *obj = NULL;
208
209 if (list->first) {
210 obj = hlist_entry(list->first, typeof(*obj), node);
211 hlist_del(&obj->node);
212 }
213
214 return obj;
215}
216
217/*
166 * Allocate a new object. If the pool is empty, switch off the debugger. 218 * Allocate a new object. If the pool is empty, switch off the debugger.
167 * Must be called with interrupts disabled. 219 * Must be called with interrupts disabled.
168 */ 220 */
169static struct debug_obj * 221static struct debug_obj *
170alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr) 222alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
171{ 223{
172 struct debug_obj *obj = NULL; 224 struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
225 struct debug_obj *obj;
173 226
174 raw_spin_lock(&pool_lock); 227 if (likely(obj_cache)) {
175 if (obj_pool.first) { 228 obj = __alloc_object(&percpu_pool->free_objs);
176 obj = hlist_entry(obj_pool.first, typeof(*obj), node); 229 if (obj) {
230 percpu_pool->obj_free--;
231 goto init_obj;
232 }
233 }
177 234
178 obj->object = addr; 235 raw_spin_lock(&pool_lock);
179 obj->descr = descr; 236 obj = __alloc_object(&obj_pool);
180 obj->state = ODEBUG_STATE_NONE; 237 if (obj) {
181 obj->astate = 0; 238 obj_pool_used++;
182 hlist_del(&obj->node); 239 obj_pool_free--;
183 240
184 hlist_add_head(&obj->node, &b->list); 241 /*
242 * Looking ahead, allocate one batch of debug objects and
243 * put them into the percpu free pool.
244 */
245 if (likely(obj_cache)) {
246 int i;
247
248 for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
249 struct debug_obj *obj2;
250
251 obj2 = __alloc_object(&obj_pool);
252 if (!obj2)
253 break;
254 hlist_add_head(&obj2->node,
255 &percpu_pool->free_objs);
256 percpu_pool->obj_free++;
257 obj_pool_used++;
258 obj_pool_free--;
259 }
260 }
185 261
186 obj_pool_used++;
187 if (obj_pool_used > obj_pool_max_used) 262 if (obj_pool_used > obj_pool_max_used)
188 obj_pool_max_used = obj_pool_used; 263 obj_pool_max_used = obj_pool_used;
189 264
190 obj_pool_free--;
191 if (obj_pool_free < obj_pool_min_free) 265 if (obj_pool_free < obj_pool_min_free)
192 obj_pool_min_free = obj_pool_free; 266 obj_pool_min_free = obj_pool_free;
193 } 267 }
194 raw_spin_unlock(&pool_lock); 268 raw_spin_unlock(&pool_lock);
195 269
270init_obj:
271 if (obj) {
272 obj->object = addr;
273 obj->descr = descr;
274 obj->state = ODEBUG_STATE_NONE;
275 obj->astate = 0;
276 hlist_add_head(&obj->node, &b->list);
277 }
196 return obj; 278 return obj;
197} 279}
198 280
@@ -209,13 +291,19 @@ static void free_obj_work(struct work_struct *work)
209 unsigned long flags; 291 unsigned long flags;
210 HLIST_HEAD(tofree); 292 HLIST_HEAD(tofree);
211 293
294 WRITE_ONCE(obj_freeing, false);
212 if (!raw_spin_trylock_irqsave(&pool_lock, flags)) 295 if (!raw_spin_trylock_irqsave(&pool_lock, flags))
213 return; 296 return;
214 297
298 if (obj_pool_free >= debug_objects_pool_size)
299 goto free_objs;
300
215 /* 301 /*
216 * The objs on the pool list might be allocated before the work is 302 * The objs on the pool list might be allocated before the work is
217 * run, so recheck if pool list it full or not, if not fill pool 303 * run, so recheck if pool list it full or not, if not fill pool
218 * list from the global free list 304 * list from the global free list. As it is likely that a workload
305 * may be gearing up to use more and more objects, don't free any
306 * of them until the next round.
219 */ 307 */
220 while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) { 308 while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
221 obj = hlist_entry(obj_to_free.first, typeof(*obj), node); 309 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
@@ -224,7 +312,10 @@ static void free_obj_work(struct work_struct *work)
224 obj_pool_free++; 312 obj_pool_free++;
225 obj_nr_tofree--; 313 obj_nr_tofree--;
226 } 314 }
315 raw_spin_unlock_irqrestore(&pool_lock, flags);
316 return;
227 317
318free_objs:
228 /* 319 /*
229 * Pool list is already full and there are still objs on the free 320 * Pool list is already full and there are still objs on the free
230 * list. Move remaining free objs to a temporary list to free the 321 * list. Move remaining free objs to a temporary list to free the
@@ -243,24 +334,86 @@ static void free_obj_work(struct work_struct *work)
243 } 334 }
244} 335}
245 336
246static bool __free_object(struct debug_obj *obj) 337static void __free_object(struct debug_obj *obj)
247{ 338{
339 struct debug_obj *objs[ODEBUG_BATCH_SIZE];
340 struct debug_percpu_free *percpu_pool;
341 int lookahead_count = 0;
248 unsigned long flags; 342 unsigned long flags;
249 bool work; 343 bool work;
250 344
251 raw_spin_lock_irqsave(&pool_lock, flags); 345 local_irq_save(flags);
252 work = (obj_pool_free > debug_objects_pool_size) && obj_cache; 346 if (!obj_cache)
347 goto free_to_obj_pool;
348
349 /*
350 * Try to free it into the percpu pool first.
351 */
352 percpu_pool = this_cpu_ptr(&percpu_obj_pool);
353 if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
354 hlist_add_head(&obj->node, &percpu_pool->free_objs);
355 percpu_pool->obj_free++;
356 local_irq_restore(flags);
357 return;
358 }
359
360 /*
361 * As the percpu pool is full, look ahead and pull out a batch
362 * of objects from the percpu pool and free them as well.
363 */
364 for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
365 objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
366 if (!objs[lookahead_count])
367 break;
368 percpu_pool->obj_free--;
369 }
370
371free_to_obj_pool:
372 raw_spin_lock(&pool_lock);
373 work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
374 (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
253 obj_pool_used--; 375 obj_pool_used--;
254 376
255 if (work) { 377 if (work) {
256 obj_nr_tofree++; 378 obj_nr_tofree++;
257 hlist_add_head(&obj->node, &obj_to_free); 379 hlist_add_head(&obj->node, &obj_to_free);
380 if (lookahead_count) {
381 obj_nr_tofree += lookahead_count;
382 obj_pool_used -= lookahead_count;
383 while (lookahead_count) {
384 hlist_add_head(&objs[--lookahead_count]->node,
385 &obj_to_free);
386 }
387 }
388
389 if ((obj_pool_free > debug_objects_pool_size) &&
390 (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
391 int i;
392
393 /*
394 * Free one more batch of objects from obj_pool.
395 */
396 for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
397 obj = __alloc_object(&obj_pool);
398 hlist_add_head(&obj->node, &obj_to_free);
399 obj_pool_free--;
400 obj_nr_tofree++;
401 }
402 }
258 } else { 403 } else {
259 obj_pool_free++; 404 obj_pool_free++;
260 hlist_add_head(&obj->node, &obj_pool); 405 hlist_add_head(&obj->node, &obj_pool);
406 if (lookahead_count) {
407 obj_pool_free += lookahead_count;
408 obj_pool_used -= lookahead_count;
409 while (lookahead_count) {
410 hlist_add_head(&objs[--lookahead_count]->node,
411 &obj_pool);
412 }
413 }
261 } 414 }
262 raw_spin_unlock_irqrestore(&pool_lock, flags); 415 raw_spin_unlock(&pool_lock);
263 return work; 416 local_irq_restore(flags);
264} 417}
265 418
266/* 419/*
@@ -269,8 +422,11 @@ static bool __free_object(struct debug_obj *obj)
269 */ 422 */
270static void free_object(struct debug_obj *obj) 423static void free_object(struct debug_obj *obj)
271{ 424{
272 if (__free_object(obj)) 425 __free_object(obj);
273 schedule_work(&debug_obj_work); 426 if (!obj_freeing && obj_nr_tofree) {
427 WRITE_ONCE(obj_freeing, true);
428 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
429 }
274} 430}
275 431
276/* 432/*
@@ -372,6 +528,7 @@ static void
372__debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) 528__debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
373{ 529{
374 enum debug_obj_state state; 530 enum debug_obj_state state;
531 bool check_stack = false;
375 struct debug_bucket *db; 532 struct debug_bucket *db;
376 struct debug_obj *obj; 533 struct debug_obj *obj;
377 unsigned long flags; 534 unsigned long flags;
@@ -391,7 +548,7 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
391 debug_objects_oom(); 548 debug_objects_oom();
392 return; 549 return;
393 } 550 }
394 debug_object_is_on_stack(addr, onstack); 551 check_stack = true;
395 } 552 }
396 553
397 switch (obj->state) { 554 switch (obj->state) {
@@ -402,20 +559,23 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
402 break; 559 break;
403 560
404 case ODEBUG_STATE_ACTIVE: 561 case ODEBUG_STATE_ACTIVE:
405 debug_print_object(obj, "init");
406 state = obj->state; 562 state = obj->state;
407 raw_spin_unlock_irqrestore(&db->lock, flags); 563 raw_spin_unlock_irqrestore(&db->lock, flags);
564 debug_print_object(obj, "init");
408 debug_object_fixup(descr->fixup_init, addr, state); 565 debug_object_fixup(descr->fixup_init, addr, state);
409 return; 566 return;
410 567
411 case ODEBUG_STATE_DESTROYED: 568 case ODEBUG_STATE_DESTROYED:
569 raw_spin_unlock_irqrestore(&db->lock, flags);
412 debug_print_object(obj, "init"); 570 debug_print_object(obj, "init");
413 break; 571 return;
414 default: 572 default:
415 break; 573 break;
416 } 574 }
417 575
418 raw_spin_unlock_irqrestore(&db->lock, flags); 576 raw_spin_unlock_irqrestore(&db->lock, flags);
577 if (check_stack)
578 debug_object_is_on_stack(addr, onstack);
419} 579}
420 580
421/** 581/**
@@ -473,6 +633,8 @@ int debug_object_activate(void *addr, struct debug_obj_descr *descr)
473 633
474 obj = lookup_object(addr, db); 634 obj = lookup_object(addr, db);
475 if (obj) { 635 if (obj) {
636 bool print_object = false;
637
476 switch (obj->state) { 638 switch (obj->state) {
477 case ODEBUG_STATE_INIT: 639 case ODEBUG_STATE_INIT:
478 case ODEBUG_STATE_INACTIVE: 640 case ODEBUG_STATE_INACTIVE:
@@ -481,14 +643,14 @@ int debug_object_activate(void *addr, struct debug_obj_descr *descr)
481 break; 643 break;
482 644
483 case ODEBUG_STATE_ACTIVE: 645 case ODEBUG_STATE_ACTIVE:
484 debug_print_object(obj, "activate");
485 state = obj->state; 646 state = obj->state;
486 raw_spin_unlock_irqrestore(&db->lock, flags); 647 raw_spin_unlock_irqrestore(&db->lock, flags);
648 debug_print_object(obj, "activate");
487 ret = debug_object_fixup(descr->fixup_activate, addr, state); 649 ret = debug_object_fixup(descr->fixup_activate, addr, state);
488 return ret ? 0 : -EINVAL; 650 return ret ? 0 : -EINVAL;
489 651
490 case ODEBUG_STATE_DESTROYED: 652 case ODEBUG_STATE_DESTROYED:
491 debug_print_object(obj, "activate"); 653 print_object = true;
492 ret = -EINVAL; 654 ret = -EINVAL;
493 break; 655 break;
494 default: 656 default:
@@ -496,10 +658,13 @@ int debug_object_activate(void *addr, struct debug_obj_descr *descr)
496 break; 658 break;
497 } 659 }
498 raw_spin_unlock_irqrestore(&db->lock, flags); 660 raw_spin_unlock_irqrestore(&db->lock, flags);
661 if (print_object)
662 debug_print_object(obj, "activate");
499 return ret; 663 return ret;
500 } 664 }
501 665
502 raw_spin_unlock_irqrestore(&db->lock, flags); 666 raw_spin_unlock_irqrestore(&db->lock, flags);
667
503 /* 668 /*
504 * We are here when a static object is activated. We 669 * We are here when a static object is activated. We
505 * let the type specific code confirm whether this is 670 * let the type specific code confirm whether this is
@@ -531,6 +696,7 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
531 struct debug_bucket *db; 696 struct debug_bucket *db;
532 struct debug_obj *obj; 697 struct debug_obj *obj;
533 unsigned long flags; 698 unsigned long flags;
699 bool print_object = false;
534 700
535 if (!debug_objects_enabled) 701 if (!debug_objects_enabled)
536 return; 702 return;
@@ -548,24 +714,27 @@ void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
548 if (!obj->astate) 714 if (!obj->astate)
549 obj->state = ODEBUG_STATE_INACTIVE; 715 obj->state = ODEBUG_STATE_INACTIVE;
550 else 716 else
551 debug_print_object(obj, "deactivate"); 717 print_object = true;
552 break; 718 break;
553 719
554 case ODEBUG_STATE_DESTROYED: 720 case ODEBUG_STATE_DESTROYED:
555 debug_print_object(obj, "deactivate"); 721 print_object = true;
556 break; 722 break;
557 default: 723 default:
558 break; 724 break;
559 } 725 }
560 } else { 726 }
727
728 raw_spin_unlock_irqrestore(&db->lock, flags);
729 if (!obj) {
561 struct debug_obj o = { .object = addr, 730 struct debug_obj o = { .object = addr,
562 .state = ODEBUG_STATE_NOTAVAILABLE, 731 .state = ODEBUG_STATE_NOTAVAILABLE,
563 .descr = descr }; 732 .descr = descr };
564 733
565 debug_print_object(&o, "deactivate"); 734 debug_print_object(&o, "deactivate");
735 } else if (print_object) {
736 debug_print_object(obj, "deactivate");
566 } 737 }
567
568 raw_spin_unlock_irqrestore(&db->lock, flags);
569} 738}
570EXPORT_SYMBOL_GPL(debug_object_deactivate); 739EXPORT_SYMBOL_GPL(debug_object_deactivate);
571 740
@@ -580,6 +749,7 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
580 struct debug_bucket *db; 749 struct debug_bucket *db;
581 struct debug_obj *obj; 750 struct debug_obj *obj;
582 unsigned long flags; 751 unsigned long flags;
752 bool print_object = false;
583 753
584 if (!debug_objects_enabled) 754 if (!debug_objects_enabled)
585 return; 755 return;
@@ -599,20 +769,22 @@ void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
599 obj->state = ODEBUG_STATE_DESTROYED; 769 obj->state = ODEBUG_STATE_DESTROYED;
600 break; 770 break;
601 case ODEBUG_STATE_ACTIVE: 771 case ODEBUG_STATE_ACTIVE:
602 debug_print_object(obj, "destroy");
603 state = obj->state; 772 state = obj->state;
604 raw_spin_unlock_irqrestore(&db->lock, flags); 773 raw_spin_unlock_irqrestore(&db->lock, flags);
774 debug_print_object(obj, "destroy");
605 debug_object_fixup(descr->fixup_destroy, addr, state); 775 debug_object_fixup(descr->fixup_destroy, addr, state);
606 return; 776 return;
607 777
608 case ODEBUG_STATE_DESTROYED: 778 case ODEBUG_STATE_DESTROYED:
609 debug_print_object(obj, "destroy"); 779 print_object = true;
610 break; 780 break;
611 default: 781 default:
612 break; 782 break;
613 } 783 }
614out_unlock: 784out_unlock:
615 raw_spin_unlock_irqrestore(&db->lock, flags); 785 raw_spin_unlock_irqrestore(&db->lock, flags);
786 if (print_object)
787 debug_print_object(obj, "destroy");
616} 788}
617EXPORT_SYMBOL_GPL(debug_object_destroy); 789EXPORT_SYMBOL_GPL(debug_object_destroy);
618 790
@@ -641,9 +813,9 @@ void debug_object_free(void *addr, struct debug_obj_descr *descr)
641 813
642 switch (obj->state) { 814 switch (obj->state) {
643 case ODEBUG_STATE_ACTIVE: 815 case ODEBUG_STATE_ACTIVE:
644 debug_print_object(obj, "free");
645 state = obj->state; 816 state = obj->state;
646 raw_spin_unlock_irqrestore(&db->lock, flags); 817 raw_spin_unlock_irqrestore(&db->lock, flags);
818 debug_print_object(obj, "free");
647 debug_object_fixup(descr->fixup_free, addr, state); 819 debug_object_fixup(descr->fixup_free, addr, state);
648 return; 820 return;
649 default: 821 default:
@@ -716,6 +888,7 @@ debug_object_active_state(void *addr, struct debug_obj_descr *descr,
716 struct debug_bucket *db; 888 struct debug_bucket *db;
717 struct debug_obj *obj; 889 struct debug_obj *obj;
718 unsigned long flags; 890 unsigned long flags;
891 bool print_object = false;
719 892
720 if (!debug_objects_enabled) 893 if (!debug_objects_enabled)
721 return; 894 return;
@@ -731,22 +904,25 @@ debug_object_active_state(void *addr, struct debug_obj_descr *descr,
731 if (obj->astate == expect) 904 if (obj->astate == expect)
732 obj->astate = next; 905 obj->astate = next;
733 else 906 else
734 debug_print_object(obj, "active_state"); 907 print_object = true;
735 break; 908 break;
736 909
737 default: 910 default:
738 debug_print_object(obj, "active_state"); 911 print_object = true;
739 break; 912 break;
740 } 913 }
741 } else { 914 }
915
916 raw_spin_unlock_irqrestore(&db->lock, flags);
917 if (!obj) {
742 struct debug_obj o = { .object = addr, 918 struct debug_obj o = { .object = addr,
743 .state = ODEBUG_STATE_NOTAVAILABLE, 919 .state = ODEBUG_STATE_NOTAVAILABLE,
744 .descr = descr }; 920 .descr = descr };
745 921
746 debug_print_object(&o, "active_state"); 922 debug_print_object(&o, "active_state");
923 } else if (print_object) {
924 debug_print_object(obj, "active_state");
747 } 925 }
748
749 raw_spin_unlock_irqrestore(&db->lock, flags);
750} 926}
751EXPORT_SYMBOL_GPL(debug_object_active_state); 927EXPORT_SYMBOL_GPL(debug_object_active_state);
752 928
@@ -760,7 +936,6 @@ static void __debug_check_no_obj_freed(const void *address, unsigned long size)
760 struct hlist_node *tmp; 936 struct hlist_node *tmp;
761 struct debug_obj *obj; 937 struct debug_obj *obj;
762 int cnt, objs_checked = 0; 938 int cnt, objs_checked = 0;
763 bool work = false;
764 939
765 saddr = (unsigned long) address; 940 saddr = (unsigned long) address;
766 eaddr = saddr + size; 941 eaddr = saddr + size;
@@ -782,16 +957,16 @@ repeat:
782 957
783 switch (obj->state) { 958 switch (obj->state) {
784 case ODEBUG_STATE_ACTIVE: 959 case ODEBUG_STATE_ACTIVE:
785 debug_print_object(obj, "free");
786 descr = obj->descr; 960 descr = obj->descr;
787 state = obj->state; 961 state = obj->state;
788 raw_spin_unlock_irqrestore(&db->lock, flags); 962 raw_spin_unlock_irqrestore(&db->lock, flags);
963 debug_print_object(obj, "free");
789 debug_object_fixup(descr->fixup_free, 964 debug_object_fixup(descr->fixup_free,
790 (void *) oaddr, state); 965 (void *) oaddr, state);
791 goto repeat; 966 goto repeat;
792 default: 967 default:
793 hlist_del(&obj->node); 968 hlist_del(&obj->node);
794 work |= __free_object(obj); 969 __free_object(obj);
795 break; 970 break;
796 } 971 }
797 } 972 }
@@ -807,8 +982,10 @@ repeat:
807 debug_objects_maxchecked = objs_checked; 982 debug_objects_maxchecked = objs_checked;
808 983
809 /* Schedule work to actually kmem_cache_free() objects */ 984 /* Schedule work to actually kmem_cache_free() objects */
810 if (work) 985 if (!obj_freeing && obj_nr_tofree) {
811 schedule_work(&debug_obj_work); 986 WRITE_ONCE(obj_freeing, true);
987 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
988 }
812} 989}
813 990
814void debug_check_no_obj_freed(const void *address, unsigned long size) 991void debug_check_no_obj_freed(const void *address, unsigned long size)
@@ -822,13 +999,19 @@ void debug_check_no_obj_freed(const void *address, unsigned long size)
822 999
823static int debug_stats_show(struct seq_file *m, void *v) 1000static int debug_stats_show(struct seq_file *m, void *v)
824{ 1001{
1002 int cpu, obj_percpu_free = 0;
1003
1004 for_each_possible_cpu(cpu)
1005 obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
1006
825 seq_printf(m, "max_chain :%d\n", debug_objects_maxchain); 1007 seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
826 seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked); 1008 seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked);
827 seq_printf(m, "warnings :%d\n", debug_objects_warnings); 1009 seq_printf(m, "warnings :%d\n", debug_objects_warnings);
828 seq_printf(m, "fixups :%d\n", debug_objects_fixups); 1010 seq_printf(m, "fixups :%d\n", debug_objects_fixups);
829 seq_printf(m, "pool_free :%d\n", obj_pool_free); 1011 seq_printf(m, "pool_free :%d\n", obj_pool_free + obj_percpu_free);
1012 seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
830 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); 1013 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
831 seq_printf(m, "pool_used :%d\n", obj_pool_used); 1014 seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free);
832 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); 1015 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
833 seq_printf(m, "on_free_list :%d\n", obj_nr_tofree); 1016 seq_printf(m, "on_free_list :%d\n", obj_nr_tofree);
834 seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated); 1017 seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
@@ -850,26 +1033,16 @@ static const struct file_operations debug_stats_fops = {
850 1033
851static int __init debug_objects_init_debugfs(void) 1034static int __init debug_objects_init_debugfs(void)
852{ 1035{
853 struct dentry *dbgdir, *dbgstats; 1036 struct dentry *dbgdir;
854 1037
855 if (!debug_objects_enabled) 1038 if (!debug_objects_enabled)
856 return 0; 1039 return 0;
857 1040
858 dbgdir = debugfs_create_dir("debug_objects", NULL); 1041 dbgdir = debugfs_create_dir("debug_objects", NULL);
859 if (!dbgdir)
860 return -ENOMEM;
861 1042
862 dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL, 1043 debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
863 &debug_stats_fops);
864 if (!dbgstats)
865 goto err;
866 1044
867 return 0; 1045 return 0;
868
869err:
870 debugfs_remove(dbgdir);
871
872 return -ENOMEM;
873} 1046}
874__initcall(debug_objects_init_debugfs); 1047__initcall(debug_objects_init_debugfs);
875 1048
@@ -1175,9 +1348,20 @@ free:
1175 */ 1348 */
1176void __init debug_objects_mem_init(void) 1349void __init debug_objects_mem_init(void)
1177{ 1350{
1351 int cpu, extras;
1352
1178 if (!debug_objects_enabled) 1353 if (!debug_objects_enabled)
1179 return; 1354 return;
1180 1355
1356 /*
1357 * Initialize the percpu object pools
1358 *
1359 * Initialization is not strictly necessary, but was done for
1360 * completeness.
1361 */
1362 for_each_possible_cpu(cpu)
1363 INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
1364
1181 obj_cache = kmem_cache_create("debug_objects_cache", 1365 obj_cache = kmem_cache_create("debug_objects_cache",
1182 sizeof (struct debug_obj), 0, 1366 sizeof (struct debug_obj), 0,
1183 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE, 1367 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
@@ -1194,6 +1378,7 @@ void __init debug_objects_mem_init(void)
1194 * Increase the thresholds for allocating and freeing objects 1378 * Increase the thresholds for allocating and freeing objects
1195 * according to the number of possible CPUs available in the system. 1379 * according to the number of possible CPUs available in the system.
1196 */ 1380 */
1197 debug_objects_pool_size += num_possible_cpus() * 32; 1381 extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1198 debug_objects_pool_min_level += num_possible_cpus() * 4; 1382 debug_objects_pool_size += extras;
1383 debug_objects_pool_min_level += extras;
1199} 1384}
diff --git a/lib/decompress_unlz4.c b/lib/decompress_unlz4.c
index 1b0baf3008ea..c0cfcfd486be 100644
--- a/lib/decompress_unlz4.c
+++ b/lib/decompress_unlz4.c
@@ -1,11 +1,8 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * Wrapper for decompressing LZ4-compressed kernel, initramfs, and initrd 3 * Wrapper for decompressing LZ4-compressed kernel, initramfs, and initrd
3 * 4 *
4 * Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com> 5 * Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */ 6 */
10 7
11#ifdef STATIC 8#ifdef STATIC
diff --git a/lib/devres.c b/lib/devres.c
index 69bed2f38306..6a0e9bd6524a 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -131,7 +131,8 @@ EXPORT_SYMBOL(devm_iounmap);
131 * if (IS_ERR(base)) 131 * if (IS_ERR(base))
132 * return PTR_ERR(base); 132 * return PTR_ERR(base);
133 */ 133 */
134void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res) 134void __iomem *devm_ioremap_resource(struct device *dev,
135 const struct resource *res)
135{ 136{
136 resource_size_t size; 137 resource_size_t size;
137 void __iomem *dest_ptr; 138 void __iomem *dest_ptr;
diff --git a/lib/digsig.c b/lib/digsig.c
index 3cf89c775ab2..e0627c3e53b2 100644
--- a/lib/digsig.c
+++ b/lib/digsig.c
@@ -218,7 +218,7 @@ int digsig_verify(struct key *keyring, const char *sig, int siglen,
218 /* search in specific keyring */ 218 /* search in specific keyring */
219 key_ref_t kref; 219 key_ref_t kref;
220 kref = keyring_search(make_key_ref(keyring, 1UL), 220 kref = keyring_search(make_key_ref(keyring, 1UL),
221 &key_type_user, name); 221 &key_type_user, name, true);
222 if (IS_ERR(kref)) 222 if (IS_ERR(kref))
223 key = ERR_CAST(kref); 223 key = ERR_CAST(kref);
224 else 224 else
diff --git a/lib/dim/Makefile b/lib/dim/Makefile
new file mode 100644
index 000000000000..160afe288df0
--- /dev/null
+++ b/lib/dim/Makefile
@@ -0,0 +1,9 @@
1#
2# DIM Dynamic Interrupt Moderation library
3#
4
5obj-$(CONFIG_DIMLIB) = net_dim.o
6
7net_dim-y = \
8 dim.o \
9 net_dim.o
diff --git a/lib/dim/dim.c b/lib/dim/dim.c
new file mode 100644
index 000000000000..439d641ec796
--- /dev/null
+++ b/lib/dim/dim.c
@@ -0,0 +1,83 @@
1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
4 */
5
6#include <linux/dim.h>
7
8bool dim_on_top(struct dim *dim)
9{
10 switch (dim->tune_state) {
11 case DIM_PARKING_ON_TOP:
12 case DIM_PARKING_TIRED:
13 return true;
14 case DIM_GOING_RIGHT:
15 return (dim->steps_left > 1) && (dim->steps_right == 1);
16 default: /* DIM_GOING_LEFT */
17 return (dim->steps_right > 1) && (dim->steps_left == 1);
18 }
19}
20EXPORT_SYMBOL(dim_on_top);
21
22void dim_turn(struct dim *dim)
23{
24 switch (dim->tune_state) {
25 case DIM_PARKING_ON_TOP:
26 case DIM_PARKING_TIRED:
27 break;
28 case DIM_GOING_RIGHT:
29 dim->tune_state = DIM_GOING_LEFT;
30 dim->steps_left = 0;
31 break;
32 case DIM_GOING_LEFT:
33 dim->tune_state = DIM_GOING_RIGHT;
34 dim->steps_right = 0;
35 break;
36 }
37}
38EXPORT_SYMBOL(dim_turn);
39
40void dim_park_on_top(struct dim *dim)
41{
42 dim->steps_right = 0;
43 dim->steps_left = 0;
44 dim->tired = 0;
45 dim->tune_state = DIM_PARKING_ON_TOP;
46}
47EXPORT_SYMBOL(dim_park_on_top);
48
49void dim_park_tired(struct dim *dim)
50{
51 dim->steps_right = 0;
52 dim->steps_left = 0;
53 dim->tune_state = DIM_PARKING_TIRED;
54}
55EXPORT_SYMBOL(dim_park_tired);
56
57void dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
58 struct dim_stats *curr_stats)
59{
60 /* u32 holds up to 71 minutes, should be enough */
61 u32 delta_us = ktime_us_delta(end->time, start->time);
62 u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr);
63 u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr,
64 start->byte_ctr);
65 u32 ncomps = BIT_GAP(BITS_PER_TYPE(u32), end->comp_ctr,
66 start->comp_ctr);
67
68 if (!delta_us)
69 return;
70
71 curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us);
72 curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us);
73 curr_stats->epms = DIV_ROUND_UP(DIM_NEVENTS * USEC_PER_MSEC,
74 delta_us);
75 curr_stats->cpms = DIV_ROUND_UP(ncomps * USEC_PER_MSEC, delta_us);
76 if (curr_stats->epms != 0)
77 curr_stats->cpe_ratio =
78 (curr_stats->cpms * 100) / curr_stats->epms;
79 else
80 curr_stats->cpe_ratio = 0;
81
82}
83EXPORT_SYMBOL(dim_calc_stats);
diff --git a/lib/dim/net_dim.c b/lib/dim/net_dim.c
new file mode 100644
index 000000000000..5bcc902c5388
--- /dev/null
+++ b/lib/dim/net_dim.c
@@ -0,0 +1,190 @@
1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/*
3 * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
4 */
5
6#include <linux/dim.h>
7
8struct dim_cq_moder
9net_dim_get_rx_moderation(u8 cq_period_mode, int ix)
10{
11 struct dim_cq_moder cq_moder = rx_profile[cq_period_mode][ix];
12
13 cq_moder.cq_period_mode = cq_period_mode;
14 return cq_moder;
15}
16EXPORT_SYMBOL(net_dim_get_rx_moderation);
17
18struct dim_cq_moder
19net_dim_get_def_rx_moderation(u8 cq_period_mode)
20{
21 u8 profile_ix = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ?
22 NET_DIM_DEF_PROFILE_CQE : NET_DIM_DEF_PROFILE_EQE;
23
24 return net_dim_get_rx_moderation(cq_period_mode, profile_ix);
25}
26EXPORT_SYMBOL(net_dim_get_def_rx_moderation);
27
28struct dim_cq_moder
29net_dim_get_tx_moderation(u8 cq_period_mode, int ix)
30{
31 struct dim_cq_moder cq_moder = tx_profile[cq_period_mode][ix];
32
33 cq_moder.cq_period_mode = cq_period_mode;
34 return cq_moder;
35}
36EXPORT_SYMBOL(net_dim_get_tx_moderation);
37
38struct dim_cq_moder
39net_dim_get_def_tx_moderation(u8 cq_period_mode)
40{
41 u8 profile_ix = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ?
42 NET_DIM_DEF_PROFILE_CQE : NET_DIM_DEF_PROFILE_EQE;
43
44 return net_dim_get_tx_moderation(cq_period_mode, profile_ix);
45}
46EXPORT_SYMBOL(net_dim_get_def_tx_moderation);
47
48static int net_dim_step(struct dim *dim)
49{
50 if (dim->tired == (NET_DIM_PARAMS_NUM_PROFILES * 2))
51 return DIM_TOO_TIRED;
52
53 switch (dim->tune_state) {
54 case DIM_PARKING_ON_TOP:
55 case DIM_PARKING_TIRED:
56 break;
57 case DIM_GOING_RIGHT:
58 if (dim->profile_ix == (NET_DIM_PARAMS_NUM_PROFILES - 1))
59 return DIM_ON_EDGE;
60 dim->profile_ix++;
61 dim->steps_right++;
62 break;
63 case DIM_GOING_LEFT:
64 if (dim->profile_ix == 0)
65 return DIM_ON_EDGE;
66 dim->profile_ix--;
67 dim->steps_left++;
68 break;
69 }
70
71 dim->tired++;
72 return DIM_STEPPED;
73}
74
75static void net_dim_exit_parking(struct dim *dim)
76{
77 dim->tune_state = dim->profile_ix ? DIM_GOING_LEFT : DIM_GOING_RIGHT;
78 net_dim_step(dim);
79}
80
81static int net_dim_stats_compare(struct dim_stats *curr,
82 struct dim_stats *prev)
83{
84 if (!prev->bpms)
85 return curr->bpms ? DIM_STATS_BETTER : DIM_STATS_SAME;
86
87 if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms))
88 return (curr->bpms > prev->bpms) ? DIM_STATS_BETTER :
89 DIM_STATS_WORSE;
90
91 if (!prev->ppms)
92 return curr->ppms ? DIM_STATS_BETTER :
93 DIM_STATS_SAME;
94
95 if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
96 return (curr->ppms > prev->ppms) ? DIM_STATS_BETTER :
97 DIM_STATS_WORSE;
98
99 if (!prev->epms)
100 return DIM_STATS_SAME;
101
102 if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
103 return (curr->epms < prev->epms) ? DIM_STATS_BETTER :
104 DIM_STATS_WORSE;
105
106 return DIM_STATS_SAME;
107}
108
109static bool net_dim_decision(struct dim_stats *curr_stats, struct dim *dim)
110{
111 int prev_state = dim->tune_state;
112 int prev_ix = dim->profile_ix;
113 int stats_res;
114 int step_res;
115
116 switch (dim->tune_state) {
117 case DIM_PARKING_ON_TOP:
118 stats_res = net_dim_stats_compare(curr_stats,
119 &dim->prev_stats);
120 if (stats_res != DIM_STATS_SAME)
121 net_dim_exit_parking(dim);
122 break;
123
124 case DIM_PARKING_TIRED:
125 dim->tired--;
126 if (!dim->tired)
127 net_dim_exit_parking(dim);
128 break;
129
130 case DIM_GOING_RIGHT:
131 case DIM_GOING_LEFT:
132 stats_res = net_dim_stats_compare(curr_stats,
133 &dim->prev_stats);
134 if (stats_res != DIM_STATS_BETTER)
135 dim_turn(dim);
136
137 if (dim_on_top(dim)) {
138 dim_park_on_top(dim);
139 break;
140 }
141
142 step_res = net_dim_step(dim);
143 switch (step_res) {
144 case DIM_ON_EDGE:
145 dim_park_on_top(dim);
146 break;
147 case DIM_TOO_TIRED:
148 dim_park_tired(dim);
149 break;
150 }
151
152 break;
153 }
154
155 if (prev_state != DIM_PARKING_ON_TOP ||
156 dim->tune_state != DIM_PARKING_ON_TOP)
157 dim->prev_stats = *curr_stats;
158
159 return dim->profile_ix != prev_ix;
160}
161
162void net_dim(struct dim *dim, struct dim_sample end_sample)
163{
164 struct dim_stats curr_stats;
165 u16 nevents;
166
167 switch (dim->state) {
168 case DIM_MEASURE_IN_PROGRESS:
169 nevents = BIT_GAP(BITS_PER_TYPE(u16),
170 end_sample.event_ctr,
171 dim->start_sample.event_ctr);
172 if (nevents < DIM_NEVENTS)
173 break;
174 dim_calc_stats(&dim->start_sample, &end_sample, &curr_stats);
175 if (net_dim_decision(&curr_stats, dim)) {
176 dim->state = DIM_APPLY_NEW_PROFILE;
177 schedule_work(&dim->work);
178 break;
179 }
180 /* fall through */
181 case DIM_START_MEASURE:
182 dim_update_sample(end_sample.event_ctr, end_sample.pkt_ctr,
183 end_sample.byte_ctr, &dim->start_sample);
184 dim->state = DIM_MEASURE_IN_PROGRESS;
185 break;
186 case DIM_APPLY_NEW_PROFILE:
187 break;
188 }
189}
190EXPORT_SYMBOL(net_dim);
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 8a16c2d498e9..c60409138e13 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -993,20 +993,14 @@ static __initdata int ddebug_init_success;
993 993
994static int __init dynamic_debug_init_debugfs(void) 994static int __init dynamic_debug_init_debugfs(void)
995{ 995{
996 struct dentry *dir, *file; 996 struct dentry *dir;
997 997
998 if (!ddebug_init_success) 998 if (!ddebug_init_success)
999 return -ENODEV; 999 return -ENODEV;
1000 1000
1001 dir = debugfs_create_dir("dynamic_debug", NULL); 1001 dir = debugfs_create_dir("dynamic_debug", NULL);
1002 if (!dir) 1002 debugfs_create_file("control", 0644, dir, NULL, &ddebug_proc_fops);
1003 return -ENOMEM; 1003
1004 file = debugfs_create_file("control", 0644, dir, NULL,
1005 &ddebug_proc_fops);
1006 if (!file) {
1007 debugfs_remove(dir);
1008 return -ENOMEM;
1009 }
1010 return 0; 1004 return 0;
1011} 1005}
1012 1006
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index 3cb21b2bf088..8186ca84910b 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -166,10 +166,10 @@ static int debugfs_ul_get(void *data, u64 *val)
166 166
167DEFINE_SIMPLE_ATTRIBUTE(fops_ul, debugfs_ul_get, debugfs_ul_set, "%llu\n"); 167DEFINE_SIMPLE_ATTRIBUTE(fops_ul, debugfs_ul_get, debugfs_ul_set, "%llu\n");
168 168
169static struct dentry *debugfs_create_ul(const char *name, umode_t mode, 169static void debugfs_create_ul(const char *name, umode_t mode,
170 struct dentry *parent, unsigned long *value) 170 struct dentry *parent, unsigned long *value)
171{ 171{
172 return debugfs_create_file(name, mode, parent, value, &fops_ul); 172 debugfs_create_file(name, mode, parent, value, &fops_ul);
173} 173}
174 174
175#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER 175#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
@@ -185,12 +185,11 @@ static int debugfs_stacktrace_depth_set(void *data, u64 val)
185DEFINE_SIMPLE_ATTRIBUTE(fops_stacktrace_depth, debugfs_ul_get, 185DEFINE_SIMPLE_ATTRIBUTE(fops_stacktrace_depth, debugfs_ul_get,
186 debugfs_stacktrace_depth_set, "%llu\n"); 186 debugfs_stacktrace_depth_set, "%llu\n");
187 187
188static struct dentry *debugfs_create_stacktrace_depth( 188static void debugfs_create_stacktrace_depth(const char *name, umode_t mode,
189 const char *name, umode_t mode, 189 struct dentry *parent,
190 struct dentry *parent, unsigned long *value) 190 unsigned long *value)
191{ 191{
192 return debugfs_create_file(name, mode, parent, value, 192 debugfs_create_file(name, mode, parent, value, &fops_stacktrace_depth);
193 &fops_stacktrace_depth);
194} 193}
195 194
196#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */ 195#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
@@ -202,51 +201,31 @@ struct dentry *fault_create_debugfs_attr(const char *name,
202 struct dentry *dir; 201 struct dentry *dir;
203 202
204 dir = debugfs_create_dir(name, parent); 203 dir = debugfs_create_dir(name, parent);
205 if (!dir) 204 if (IS_ERR(dir))
206 return ERR_PTR(-ENOMEM); 205 return dir;
207 206
208 if (!debugfs_create_ul("probability", mode, dir, &attr->probability)) 207 debugfs_create_ul("probability", mode, dir, &attr->probability);
209 goto fail; 208 debugfs_create_ul("interval", mode, dir, &attr->interval);
210 if (!debugfs_create_ul("interval", mode, dir, &attr->interval)) 209 debugfs_create_atomic_t("times", mode, dir, &attr->times);
211 goto fail; 210 debugfs_create_atomic_t("space", mode, dir, &attr->space);
212 if (!debugfs_create_atomic_t("times", mode, dir, &attr->times)) 211 debugfs_create_ul("verbose", mode, dir, &attr->verbose);
213 goto fail; 212 debugfs_create_u32("verbose_ratelimit_interval_ms", mode, dir,
214 if (!debugfs_create_atomic_t("space", mode, dir, &attr->space)) 213 &attr->ratelimit_state.interval);
215 goto fail; 214 debugfs_create_u32("verbose_ratelimit_burst", mode, dir,
216 if (!debugfs_create_ul("verbose", mode, dir, &attr->verbose)) 215 &attr->ratelimit_state.burst);
217 goto fail; 216 debugfs_create_bool("task-filter", mode, dir, &attr->task_filter);
218 if (!debugfs_create_u32("verbose_ratelimit_interval_ms", mode, dir,
219 &attr->ratelimit_state.interval))
220 goto fail;
221 if (!debugfs_create_u32("verbose_ratelimit_burst", mode, dir,
222 &attr->ratelimit_state.burst))
223 goto fail;
224 if (!debugfs_create_bool("task-filter", mode, dir, &attr->task_filter))
225 goto fail;
226 217
227#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER 218#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER
228 219 debugfs_create_stacktrace_depth("stacktrace-depth", mode, dir,
229 if (!debugfs_create_stacktrace_depth("stacktrace-depth", mode, dir, 220 &attr->stacktrace_depth);
230 &attr->stacktrace_depth)) 221 debugfs_create_ul("require-start", mode, dir, &attr->require_start);
231 goto fail; 222 debugfs_create_ul("require-end", mode, dir, &attr->require_end);
232 if (!debugfs_create_ul("require-start", mode, dir, 223 debugfs_create_ul("reject-start", mode, dir, &attr->reject_start);
233 &attr->require_start)) 224 debugfs_create_ul("reject-end", mode, dir, &attr->reject_end);
234 goto fail;
235 if (!debugfs_create_ul("require-end", mode, dir, &attr->require_end))
236 goto fail;
237 if (!debugfs_create_ul("reject-start", mode, dir, &attr->reject_start))
238 goto fail;
239 if (!debugfs_create_ul("reject-end", mode, dir, &attr->reject_end))
240 goto fail;
241
242#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */ 225#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */
243 226
244 attr->dname = dget(dir); 227 attr->dname = dget(dir);
245 return dir; 228 return dir;
246fail:
247 debugfs_remove_recursive(dir);
248
249 return ERR_PTR(-ENOMEM);
250} 229}
251EXPORT_SYMBOL_GPL(fault_create_debugfs_attr); 230EXPORT_SYMBOL_GPL(fault_create_debugfs_attr);
252 231
diff --git a/lib/fonts/fonts.c b/lib/fonts/fonts.c
index 9969358a7af5..e7258d8c252b 100644
--- a/lib/fonts/fonts.c
+++ b/lib/fonts/fonts.c
@@ -20,56 +20,42 @@
20#endif 20#endif
21#include <linux/font.h> 21#include <linux/font.h>
22 22
23#define NO_FONTS
24
25static const struct font_desc *fonts[] = { 23static const struct font_desc *fonts[] = {
26#ifdef CONFIG_FONT_8x8 24#ifdef CONFIG_FONT_8x8
27#undef NO_FONTS 25 &font_vga_8x8,
28 &font_vga_8x8,
29#endif 26#endif
30#ifdef CONFIG_FONT_8x16 27#ifdef CONFIG_FONT_8x16
31#undef NO_FONTS 28 &font_vga_8x16,
32 &font_vga_8x16,
33#endif 29#endif
34#ifdef CONFIG_FONT_6x11 30#ifdef CONFIG_FONT_6x11
35#undef NO_FONTS 31 &font_vga_6x11,
36 &font_vga_6x11,
37#endif 32#endif
38#ifdef CONFIG_FONT_7x14 33#ifdef CONFIG_FONT_7x14
39#undef NO_FONTS 34 &font_7x14,
40 &font_7x14,
41#endif 35#endif
42#ifdef CONFIG_FONT_SUN8x16 36#ifdef CONFIG_FONT_SUN8x16
43#undef NO_FONTS 37 &font_sun_8x16,
44 &font_sun_8x16,
45#endif 38#endif
46#ifdef CONFIG_FONT_SUN12x22 39#ifdef CONFIG_FONT_SUN12x22
47#undef NO_FONTS 40 &font_sun_12x22,
48 &font_sun_12x22,
49#endif 41#endif
50#ifdef CONFIG_FONT_10x18 42#ifdef CONFIG_FONT_10x18
51#undef NO_FONTS 43 &font_10x18,
52 &font_10x18,
53#endif 44#endif
54#ifdef CONFIG_FONT_ACORN_8x8 45#ifdef CONFIG_FONT_ACORN_8x8
55#undef NO_FONTS 46 &font_acorn_8x8,
56 &font_acorn_8x8,
57#endif 47#endif
58#ifdef CONFIG_FONT_PEARL_8x8 48#ifdef CONFIG_FONT_PEARL_8x8
59#undef NO_FONTS 49 &font_pearl_8x8,
60 &font_pearl_8x8,
61#endif 50#endif
62#ifdef CONFIG_FONT_MINI_4x6 51#ifdef CONFIG_FONT_MINI_4x6
63#undef NO_FONTS 52 &font_mini_4x6,
64 &font_mini_4x6,
65#endif 53#endif
66#ifdef CONFIG_FONT_6x10 54#ifdef CONFIG_FONT_6x10
67#undef NO_FONTS 55 &font_6x10,
68 &font_6x10,
69#endif 56#endif
70#ifdef CONFIG_FONT_TER16x32 57#ifdef CONFIG_FONT_TER16x32
71#undef NO_FONTS 58 &font_ter_16x32,
72 &font_ter_16x32,
73#endif 59#endif
74}; 60};
75 61
@@ -90,16 +76,17 @@ static const struct font_desc *fonts[] = {
90 * specified font. 76 * specified font.
91 * 77 *
92 */ 78 */
93
94const struct font_desc *find_font(const char *name) 79const struct font_desc *find_font(const char *name)
95{ 80{
96 unsigned int i; 81 unsigned int i;
97 82
98 for (i = 0; i < num_fonts; i++) 83 BUILD_BUG_ON(!num_fonts);
99 if (!strcmp(fonts[i]->name, name)) 84 for (i = 0; i < num_fonts; i++)
100 return fonts[i]; 85 if (!strcmp(fonts[i]->name, name))
101 return NULL; 86 return fonts[i];
87 return NULL;
102} 88}
89EXPORT_SYMBOL(find_font);
103 90
104 91
105/** 92/**
@@ -116,44 +103,46 @@ const struct font_desc *find_font(const char *name)
116 * chosen font. 103 * chosen font.
117 * 104 *
118 */ 105 */
119
120const struct font_desc *get_default_font(int xres, int yres, u32 font_w, 106const struct font_desc *get_default_font(int xres, int yres, u32 font_w,
121 u32 font_h) 107 u32 font_h)
122{ 108{
123 int i, c, cc; 109 int i, c, cc, res;
124 const struct font_desc *f, *g; 110 const struct font_desc *f, *g;
125 111
126 g = NULL; 112 g = NULL;
127 cc = -10000; 113 cc = -10000;
128 for(i=0; i<num_fonts; i++) { 114 for (i = 0; i < num_fonts; i++) {
129 f = fonts[i]; 115 f = fonts[i];
130 c = f->pref; 116 c = f->pref;
131#if defined(__mc68000__) 117#if defined(__mc68000__)
132#ifdef CONFIG_FONT_PEARL_8x8 118#ifdef CONFIG_FONT_PEARL_8x8
133 if (MACH_IS_AMIGA && f->idx == PEARL8x8_IDX) 119 if (MACH_IS_AMIGA && f->idx == PEARL8x8_IDX)
134 c = 100; 120 c = 100;
135#endif 121#endif
136#ifdef CONFIG_FONT_6x11 122#ifdef CONFIG_FONT_6x11
137 if (MACH_IS_MAC && xres < 640 && f->idx == VGA6x11_IDX) 123 if (MACH_IS_MAC && xres < 640 && f->idx == VGA6x11_IDX)
138 c = 100; 124 c = 100;
139#endif 125#endif
140#endif 126#endif
141 if ((yres < 400) == (f->height <= 8)) 127 if ((yres < 400) == (f->height <= 8))
142 c += 1000; 128 c += 1000;
129
130 /* prefer a bigger font for high resolution */
131 res = (xres / f->width) * (yres / f->height) / 1000;
132 if (res > 20)
133 c += 20 - res;
143 134
144 if ((font_w & (1 << (f->width - 1))) && 135 if ((font_w & (1 << (f->width - 1))) &&
145 (font_h & (1 << (f->height - 1)))) 136 (font_h & (1 << (f->height - 1))))
146 c += 1000; 137 c += 1000;
147 138
148 if (c > cc) { 139 if (c > cc) {
149 cc = c; 140 cc = c;
150 g = f; 141 g = f;
142 }
151 } 143 }
152 } 144 return g;
153 return g;
154} 145}
155
156EXPORT_SYMBOL(find_font);
157EXPORT_SYMBOL(get_default_font); 146EXPORT_SYMBOL(get_default_font);
158 147
159MODULE_AUTHOR("James Simmons <jsimmons@users.sf.net>"); 148MODULE_AUTHOR("James Simmons <jsimmons@users.sf.net>");
diff --git a/lib/genalloc.c b/lib/genalloc.c
index 7e85d1e37a6e..9fc31292cfa1 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * Basic general purpose allocator for managing special purpose 3 * Basic general purpose allocator for managing special purpose
3 * memory, for example, memory that is not managed by the regular 4 * memory, for example, memory that is not managed by the regular
@@ -23,9 +24,6 @@
23 * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. 24 * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
24 * 25 *
25 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org> 26 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
26 *
27 * This source code is licensed under the GNU General Public License,
28 * Version 2. See the file COPYING for more details.
29 */ 27 */
30 28
31#include <linux/slab.h> 29#include <linux/slab.h>
@@ -168,20 +166,21 @@ struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
168EXPORT_SYMBOL(gen_pool_create); 166EXPORT_SYMBOL(gen_pool_create);
169 167
170/** 168/**
171 * gen_pool_add_virt - add a new chunk of special memory to the pool 169 * gen_pool_add_owner- add a new chunk of special memory to the pool
172 * @pool: pool to add new memory chunk to 170 * @pool: pool to add new memory chunk to
173 * @virt: virtual starting address of memory chunk to add to pool 171 * @virt: virtual starting address of memory chunk to add to pool
174 * @phys: physical starting address of memory chunk to add to pool 172 * @phys: physical starting address of memory chunk to add to pool
175 * @size: size in bytes of the memory chunk to add to pool 173 * @size: size in bytes of the memory chunk to add to pool
176 * @nid: node id of the node the chunk structure and bitmap should be 174 * @nid: node id of the node the chunk structure and bitmap should be
177 * allocated on, or -1 175 * allocated on, or -1
176 * @owner: private data the publisher would like to recall at alloc time
178 * 177 *
179 * Add a new chunk of special memory to the specified pool. 178 * Add a new chunk of special memory to the specified pool.
180 * 179 *
181 * Returns 0 on success or a -ve errno on failure. 180 * Returns 0 on success or a -ve errno on failure.
182 */ 181 */
183int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys, 182int gen_pool_add_owner(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
184 size_t size, int nid) 183 size_t size, int nid, void *owner)
185{ 184{
186 struct gen_pool_chunk *chunk; 185 struct gen_pool_chunk *chunk;
187 int nbits = size >> pool->min_alloc_order; 186 int nbits = size >> pool->min_alloc_order;
@@ -195,6 +194,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy
195 chunk->phys_addr = phys; 194 chunk->phys_addr = phys;
196 chunk->start_addr = virt; 195 chunk->start_addr = virt;
197 chunk->end_addr = virt + size - 1; 196 chunk->end_addr = virt + size - 1;
197 chunk->owner = owner;
198 atomic_long_set(&chunk->avail, size); 198 atomic_long_set(&chunk->avail, size);
199 199
200 spin_lock(&pool->lock); 200 spin_lock(&pool->lock);
@@ -203,7 +203,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy
203 203
204 return 0; 204 return 0;
205} 205}
206EXPORT_SYMBOL(gen_pool_add_virt); 206EXPORT_SYMBOL(gen_pool_add_owner);
207 207
208/** 208/**
209 * gen_pool_virt_to_phys - return the physical address of memory 209 * gen_pool_virt_to_phys - return the physical address of memory
@@ -260,35 +260,20 @@ void gen_pool_destroy(struct gen_pool *pool)
260EXPORT_SYMBOL(gen_pool_destroy); 260EXPORT_SYMBOL(gen_pool_destroy);
261 261
262/** 262/**
263 * gen_pool_alloc - allocate special memory from the pool 263 * gen_pool_alloc_algo_owner - allocate special memory from the pool
264 * @pool: pool to allocate from
265 * @size: number of bytes to allocate from the pool
266 *
267 * Allocate the requested number of bytes from the specified pool.
268 * Uses the pool allocation function (with first-fit algorithm by default).
269 * Can not be used in NMI handler on architectures without
270 * NMI-safe cmpxchg implementation.
271 */
272unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
273{
274 return gen_pool_alloc_algo(pool, size, pool->algo, pool->data);
275}
276EXPORT_SYMBOL(gen_pool_alloc);
277
278/**
279 * gen_pool_alloc_algo - allocate special memory from the pool
280 * @pool: pool to allocate from 264 * @pool: pool to allocate from
281 * @size: number of bytes to allocate from the pool 265 * @size: number of bytes to allocate from the pool
282 * @algo: algorithm passed from caller 266 * @algo: algorithm passed from caller
283 * @data: data passed to algorithm 267 * @data: data passed to algorithm
268 * @owner: optionally retrieve the chunk owner
284 * 269 *
285 * Allocate the requested number of bytes from the specified pool. 270 * Allocate the requested number of bytes from the specified pool.
286 * Uses the pool allocation function (with first-fit algorithm by default). 271 * Uses the pool allocation function (with first-fit algorithm by default).
287 * Can not be used in NMI handler on architectures without 272 * Can not be used in NMI handler on architectures without
288 * NMI-safe cmpxchg implementation. 273 * NMI-safe cmpxchg implementation.
289 */ 274 */
290unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size, 275unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size,
291 genpool_algo_t algo, void *data) 276 genpool_algo_t algo, void *data, void **owner)
292{ 277{
293 struct gen_pool_chunk *chunk; 278 struct gen_pool_chunk *chunk;
294 unsigned long addr = 0; 279 unsigned long addr = 0;
@@ -299,6 +284,9 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
299 BUG_ON(in_nmi()); 284 BUG_ON(in_nmi());
300#endif 285#endif
301 286
287 if (owner)
288 *owner = NULL;
289
302 if (size == 0) 290 if (size == 0)
303 return 0; 291 return 0;
304 292
@@ -326,32 +314,58 @@ retry:
326 addr = chunk->start_addr + ((unsigned long)start_bit << order); 314 addr = chunk->start_addr + ((unsigned long)start_bit << order);
327 size = nbits << order; 315 size = nbits << order;
328 atomic_long_sub(size, &chunk->avail); 316 atomic_long_sub(size, &chunk->avail);
317 if (owner)
318 *owner = chunk->owner;
329 break; 319 break;
330 } 320 }
331 rcu_read_unlock(); 321 rcu_read_unlock();
332 return addr; 322 return addr;
333} 323}
334EXPORT_SYMBOL(gen_pool_alloc_algo); 324EXPORT_SYMBOL(gen_pool_alloc_algo_owner);
335 325
336/** 326/**
337 * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage 327 * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
338 * @pool: pool to allocate from 328 * @pool: pool to allocate from
339 * @size: number of bytes to allocate from the pool 329 * @size: number of bytes to allocate from the pool
340 * @dma: dma-view physical address return value. Use NULL if unneeded. 330 * @dma: dma-view physical address return value. Use %NULL if unneeded.
341 * 331 *
342 * Allocate the requested number of bytes from the specified pool. 332 * Allocate the requested number of bytes from the specified pool.
343 * Uses the pool allocation function (with first-fit algorithm by default). 333 * Uses the pool allocation function (with first-fit algorithm by default).
344 * Can not be used in NMI handler on architectures without 334 * Can not be used in NMI handler on architectures without
345 * NMI-safe cmpxchg implementation. 335 * NMI-safe cmpxchg implementation.
336 *
337 * Return: virtual address of the allocated memory, or %NULL on failure
346 */ 338 */
347void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma) 339void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
348{ 340{
341 return gen_pool_dma_alloc_algo(pool, size, dma, pool->algo, pool->data);
342}
343EXPORT_SYMBOL(gen_pool_dma_alloc);
344
345/**
346 * gen_pool_dma_alloc_algo - allocate special memory from the pool for DMA
347 * usage with the given pool algorithm
348 * @pool: pool to allocate from
349 * @size: number of bytes to allocate from the pool
350 * @dma: DMA-view physical address return value. Use %NULL if unneeded.
351 * @algo: algorithm passed from caller
352 * @data: data passed to algorithm
353 *
354 * Allocate the requested number of bytes from the specified pool. Uses the
355 * given pool allocation function. Can not be used in NMI handler on
356 * architectures without NMI-safe cmpxchg implementation.
357 *
358 * Return: virtual address of the allocated memory, or %NULL on failure
359 */
360void *gen_pool_dma_alloc_algo(struct gen_pool *pool, size_t size,
361 dma_addr_t *dma, genpool_algo_t algo, void *data)
362{
349 unsigned long vaddr; 363 unsigned long vaddr;
350 364
351 if (!pool) 365 if (!pool)
352 return NULL; 366 return NULL;
353 367
354 vaddr = gen_pool_alloc(pool, size); 368 vaddr = gen_pool_alloc_algo(pool, size, algo, data);
355 if (!vaddr) 369 if (!vaddr)
356 return NULL; 370 return NULL;
357 371
@@ -360,19 +374,116 @@ void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
360 374
361 return (void *)vaddr; 375 return (void *)vaddr;
362} 376}
363EXPORT_SYMBOL(gen_pool_dma_alloc); 377EXPORT_SYMBOL(gen_pool_dma_alloc_algo);
378
379/**
380 * gen_pool_dma_alloc_align - allocate special memory from the pool for DMA
381 * usage with the given alignment
382 * @pool: pool to allocate from
383 * @size: number of bytes to allocate from the pool
384 * @dma: DMA-view physical address return value. Use %NULL if unneeded.
385 * @align: alignment in bytes for starting address
386 *
387 * Allocate the requested number bytes from the specified pool, with the given
388 * alignment restriction. Can not be used in NMI handler on architectures
389 * without NMI-safe cmpxchg implementation.
390 *
391 * Return: virtual address of the allocated memory, or %NULL on failure
392 */
393void *gen_pool_dma_alloc_align(struct gen_pool *pool, size_t size,
394 dma_addr_t *dma, int align)
395{
396 struct genpool_data_align data = { .align = align };
397
398 return gen_pool_dma_alloc_algo(pool, size, dma,
399 gen_pool_first_fit_align, &data);
400}
401EXPORT_SYMBOL(gen_pool_dma_alloc_align);
402
403/**
404 * gen_pool_dma_zalloc - allocate special zeroed memory from the pool for
405 * DMA usage
406 * @pool: pool to allocate from
407 * @size: number of bytes to allocate from the pool
408 * @dma: dma-view physical address return value. Use %NULL if unneeded.
409 *
410 * Allocate the requested number of zeroed bytes from the specified pool.
411 * Uses the pool allocation function (with first-fit algorithm by default).
412 * Can not be used in NMI handler on architectures without
413 * NMI-safe cmpxchg implementation.
414 *
415 * Return: virtual address of the allocated zeroed memory, or %NULL on failure
416 */
417void *gen_pool_dma_zalloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
418{
419 return gen_pool_dma_zalloc_algo(pool, size, dma, pool->algo, pool->data);
420}
421EXPORT_SYMBOL(gen_pool_dma_zalloc);
422
423/**
424 * gen_pool_dma_zalloc_algo - allocate special zeroed memory from the pool for
425 * DMA usage with the given pool algorithm
426 * @pool: pool to allocate from
427 * @size: number of bytes to allocate from the pool
428 * @dma: DMA-view physical address return value. Use %NULL if unneeded.
429 * @algo: algorithm passed from caller
430 * @data: data passed to algorithm
431 *
432 * Allocate the requested number of zeroed bytes from the specified pool. Uses
433 * the given pool allocation function. Can not be used in NMI handler on
434 * architectures without NMI-safe cmpxchg implementation.
435 *
436 * Return: virtual address of the allocated zeroed memory, or %NULL on failure
437 */
438void *gen_pool_dma_zalloc_algo(struct gen_pool *pool, size_t size,
439 dma_addr_t *dma, genpool_algo_t algo, void *data)
440{
441 void *vaddr = gen_pool_dma_alloc_algo(pool, size, dma, algo, data);
442
443 if (vaddr)
444 memset(vaddr, 0, size);
445
446 return vaddr;
447}
448EXPORT_SYMBOL(gen_pool_dma_zalloc_algo);
449
450/**
451 * gen_pool_dma_zalloc_align - allocate special zeroed memory from the pool for
452 * DMA usage with the given alignment
453 * @pool: pool to allocate from
454 * @size: number of bytes to allocate from the pool
455 * @dma: DMA-view physical address return value. Use %NULL if unneeded.
456 * @align: alignment in bytes for starting address
457 *
458 * Allocate the requested number of zeroed bytes from the specified pool,
459 * with the given alignment restriction. Can not be used in NMI handler on
460 * architectures without NMI-safe cmpxchg implementation.
461 *
462 * Return: virtual address of the allocated zeroed memory, or %NULL on failure
463 */
464void *gen_pool_dma_zalloc_align(struct gen_pool *pool, size_t size,
465 dma_addr_t *dma, int align)
466{
467 struct genpool_data_align data = { .align = align };
468
469 return gen_pool_dma_zalloc_algo(pool, size, dma,
470 gen_pool_first_fit_align, &data);
471}
472EXPORT_SYMBOL(gen_pool_dma_zalloc_align);
364 473
365/** 474/**
366 * gen_pool_free - free allocated special memory back to the pool 475 * gen_pool_free - free allocated special memory back to the pool
367 * @pool: pool to free to 476 * @pool: pool to free to
368 * @addr: starting address of memory to free back to pool 477 * @addr: starting address of memory to free back to pool
369 * @size: size in bytes of memory to free 478 * @size: size in bytes of memory to free
479 * @owner: private data stashed at gen_pool_add() time
370 * 480 *
371 * Free previously allocated special memory back to the specified 481 * Free previously allocated special memory back to the specified
372 * pool. Can not be used in NMI handler on architectures without 482 * pool. Can not be used in NMI handler on architectures without
373 * NMI-safe cmpxchg implementation. 483 * NMI-safe cmpxchg implementation.
374 */ 484 */
375void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) 485void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr, size_t size,
486 void **owner)
376{ 487{
377 struct gen_pool_chunk *chunk; 488 struct gen_pool_chunk *chunk;
378 int order = pool->min_alloc_order; 489 int order = pool->min_alloc_order;
@@ -382,6 +493,9 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
382 BUG_ON(in_nmi()); 493 BUG_ON(in_nmi());
383#endif 494#endif
384 495
496 if (owner)
497 *owner = NULL;
498
385 nbits = (size + (1UL << order) - 1) >> order; 499 nbits = (size + (1UL << order) - 1) >> order;
386 rcu_read_lock(); 500 rcu_read_lock();
387 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { 501 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
@@ -392,6 +506,8 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
392 BUG_ON(remain); 506 BUG_ON(remain);
393 size = nbits << order; 507 size = nbits << order;
394 atomic_long_add(size, &chunk->avail); 508 atomic_long_add(size, &chunk->avail);
509 if (owner)
510 *owner = chunk->owner;
395 rcu_read_unlock(); 511 rcu_read_unlock();
396 return; 512 return;
397 } 513 }
@@ -399,7 +515,7 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
399 rcu_read_unlock(); 515 rcu_read_unlock();
400 BUG(); 516 BUG();
401} 517}
402EXPORT_SYMBOL(gen_pool_free); 518EXPORT_SYMBOL(gen_pool_free_owner);
403 519
404/** 520/**
405 * gen_pool_for_each_chunk - call func for every chunk of generic memory pool 521 * gen_pool_for_each_chunk - call func for every chunk of generic memory pool
diff --git a/lib/hexdump.c b/lib/hexdump.c
index 81b70ed37209..b1d55b669ae2 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -1,10 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * lib/hexdump.c 3 * lib/hexdump.c
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation. See README and COPYING for
7 * more details.
8 */ 4 */
9 5
10#include <linux/types.h> 6#include <linux/types.h>
diff --git a/lib/idr.c b/lib/idr.c
index c34e256d2f01..66a374892482 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -228,11 +228,21 @@ void *idr_get_next(struct idr *idr, int *nextid)
228{ 228{
229 struct radix_tree_iter iter; 229 struct radix_tree_iter iter;
230 void __rcu **slot; 230 void __rcu **slot;
231 void *entry = NULL;
231 unsigned long base = idr->idr_base; 232 unsigned long base = idr->idr_base;
232 unsigned long id = *nextid; 233 unsigned long id = *nextid;
233 234
234 id = (id < base) ? 0 : id - base; 235 id = (id < base) ? 0 : id - base;
235 slot = radix_tree_iter_find(&idr->idr_rt, &iter, id); 236 radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, id) {
237 entry = rcu_dereference_raw(*slot);
238 if (!entry)
239 continue;
240 if (!xa_is_internal(entry))
241 break;
242 if (slot != &idr->idr_rt.xa_head && !xa_is_retry(entry))
243 break;
244 slot = radix_tree_iter_retry(&iter);
245 }
236 if (!slot) 246 if (!slot)
237 return NULL; 247 return NULL;
238 id = iter.index + base; 248 id = iter.index + base;
@@ -241,7 +251,7 @@ void *idr_get_next(struct idr *idr, int *nextid)
241 return NULL; 251 return NULL;
242 252
243 *nextid = id; 253 *nextid = id;
244 return rcu_dereference_raw(*slot); 254 return entry;
245} 255}
246EXPORT_SYMBOL(idr_get_next); 256EXPORT_SYMBOL(idr_get_next);
247 257
diff --git a/lib/iomap_copy.c b/lib/iomap_copy.c
index b8f1d6cbb200..5de7c04e05ef 100644
--- a/lib/iomap_copy.c
+++ b/lib/iomap_copy.c
@@ -1,18 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * Copyright 2006 PathScale, Inc. All Rights Reserved. 3 * Copyright 2006 PathScale, Inc. All Rights Reserved.
3 *
4 * This file is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software Foundation,
15 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
16 */ 4 */
17 5
18#include <linux/export.h> 6#include <linux/export.h>
diff --git a/lib/jedec_ddr_data.c b/lib/jedec_ddr_data.c
index 6d2cbf1d567f..d0b312e28d36 100644
--- a/lib/jedec_ddr_data.c
+++ b/lib/jedec_ddr_data.c
@@ -1,13 +1,10 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * DDR addressing details and AC timing parameters from JEDEC specs 3 * DDR addressing details and AC timing parameters from JEDEC specs
3 * 4 *
4 * Copyright (C) 2012 Texas Instruments, Inc. 5 * Copyright (C) 2012 Texas Instruments, Inc.
5 * 6 *
6 * Aneesh V <aneesh@ti.com> 7 * Aneesh V <aneesh@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */ 8 */
12 9
13#include <memory/jedec_ddr.h> 10#include <memory/jedec_ddr.h>
diff --git a/lib/klist.c b/lib/klist.c
index f6b547812fe3..332a4fbf18ff 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -1,10 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * klist.c - Routines for manipulating klists. 3 * klist.c - Routines for manipulating klists.
3 * 4 *
4 * Copyright (C) 2005 Patrick Mochel 5 * Copyright (C) 2005 Patrick Mochel
5 * 6 *
6 * This file is released under the GPL v2.
7 *
8 * This klist interface provides a couple of structures that wrap around 7 * This klist interface provides a couple of structures that wrap around
9 * struct list_head to provide explicit list "head" (struct klist) and list 8 * struct list_head to provide explicit list "head" (struct klist) and list
10 * "node" (struct klist_node) objects. For struct klist, a spinlock is 9 * "node" (struct klist_node) objects. For struct klist, a spinlock is
diff --git a/lib/kobject.c b/lib/kobject.c
index f2ccdbac8ed9..83198cb37d8d 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -498,8 +498,10 @@ int kobject_rename(struct kobject *kobj, const char *new_name)
498 kobj = kobject_get(kobj); 498 kobj = kobject_get(kobj);
499 if (!kobj) 499 if (!kobj)
500 return -EINVAL; 500 return -EINVAL;
501 if (!kobj->parent) 501 if (!kobj->parent) {
502 kobject_put(kobj);
502 return -EINVAL; 503 return -EINVAL;
504 }
503 505
504 devpath = kobject_get_path(kobj, GFP_KERNEL); 506 devpath = kobject_get_path(kobj, GFP_KERNEL);
505 if (!devpath) { 507 if (!devpath) {
diff --git a/lib/list_sort.c b/lib/list_sort.c
index 712ed1f4eb64..52f0c258c895 100644
--- a/lib/list_sort.c
+++ b/lib/list_sort.c
@@ -157,9 +157,11 @@ static void merge_final(void *priv, cmp_func cmp, struct list_head *head,
157 * 157 *
158 * The number of pending lists of size 2^k is determined by the 158 * The number of pending lists of size 2^k is determined by the
159 * state of bit k of "count" plus two extra pieces of information: 159 * state of bit k of "count" plus two extra pieces of information:
160 *
160 * - The state of bit k-1 (when k == 0, consider bit -1 always set), and 161 * - The state of bit k-1 (when k == 0, consider bit -1 always set), and
161 * - Whether the higher-order bits are zero or non-zero (i.e. 162 * - Whether the higher-order bits are zero or non-zero (i.e.
162 * is count >= 2^(k+1)). 163 * is count >= 2^(k+1)).
164 *
163 * There are six states we distinguish. "x" represents some arbitrary 165 * There are six states we distinguish. "x" represents some arbitrary
164 * bits, and "y" represents some arbitrary non-zero bits: 166 * bits, and "y" represents some arbitrary non-zero bits:
165 * 0: 00x: 0 pending of size 2^k; x pending of sizes < 2^k 167 * 0: 00x: 0 pending of size 2^k; x pending of sizes < 2^k
diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c
index 82b19e4f1189..2fd7a46d55ec 100644
--- a/lib/mpi/mpi-pow.c
+++ b/lib/mpi/mpi-pow.c
@@ -24,6 +24,7 @@
24int mpi_powm(MPI res, MPI base, MPI exp, MPI mod) 24int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
25{ 25{
26 mpi_ptr_t mp_marker = NULL, bp_marker = NULL, ep_marker = NULL; 26 mpi_ptr_t mp_marker = NULL, bp_marker = NULL, ep_marker = NULL;
27 struct karatsuba_ctx karactx = {};
27 mpi_ptr_t xp_marker = NULL; 28 mpi_ptr_t xp_marker = NULL;
28 mpi_ptr_t tspace = NULL; 29 mpi_ptr_t tspace = NULL;
29 mpi_ptr_t rp, ep, mp, bp; 30 mpi_ptr_t rp, ep, mp, bp;
@@ -150,13 +151,11 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
150 int c; 151 int c;
151 mpi_limb_t e; 152 mpi_limb_t e;
152 mpi_limb_t carry_limb; 153 mpi_limb_t carry_limb;
153 struct karatsuba_ctx karactx;
154 154
155 xp = xp_marker = mpi_alloc_limb_space(2 * (msize + 1)); 155 xp = xp_marker = mpi_alloc_limb_space(2 * (msize + 1));
156 if (!xp) 156 if (!xp)
157 goto enomem; 157 goto enomem;
158 158
159 memset(&karactx, 0, sizeof karactx);
160 negative_result = (ep[0] & 1) && base->sign; 159 negative_result = (ep[0] & 1) && base->sign;
161 160
162 i = esize - 1; 161 i = esize - 1;
@@ -281,8 +280,6 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
281 if (mod_shift_cnt) 280 if (mod_shift_cnt)
282 mpihelp_rshift(rp, rp, rsize, mod_shift_cnt); 281 mpihelp_rshift(rp, rp, rsize, mod_shift_cnt);
283 MPN_NORMALIZE(rp, rsize); 282 MPN_NORMALIZE(rp, rsize);
284
285 mpihelp_release_karatsuba_ctx(&karactx);
286 } 283 }
287 284
288 if (negative_result && rsize) { 285 if (negative_result && rsize) {
@@ -299,6 +296,7 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
299leave: 296leave:
300 rc = 0; 297 rc = 0;
301enomem: 298enomem:
299 mpihelp_release_karatsuba_ctx(&karactx);
302 if (assign_rp) 300 if (assign_rp)
303 mpi_assign_limb_space(res, rp, size); 301 mpi_assign_limb_space(res, rp, size);
304 if (mp_marker) 302 if (mp_marker)
diff --git a/lib/notifier-error-inject.c b/lib/notifier-error-inject.c
index 3d2ba7cf83f4..21016b32d313 100644
--- a/lib/notifier-error-inject.c
+++ b/lib/notifier-error-inject.c
@@ -59,33 +59,22 @@ struct dentry *notifier_err_inject_init(const char *name, struct dentry *parent,
59 err_inject->nb.priority = priority; 59 err_inject->nb.priority = priority;
60 60
61 dir = debugfs_create_dir(name, parent); 61 dir = debugfs_create_dir(name, parent);
62 if (!dir)
63 return ERR_PTR(-ENOMEM);
64 62
65 actions_dir = debugfs_create_dir("actions", dir); 63 actions_dir = debugfs_create_dir("actions", dir);
66 if (!actions_dir)
67 goto fail;
68 64
69 for (action = err_inject->actions; action->name; action++) { 65 for (action = err_inject->actions; action->name; action++) {
70 struct dentry *action_dir; 66 struct dentry *action_dir;
71 67
72 action_dir = debugfs_create_dir(action->name, actions_dir); 68 action_dir = debugfs_create_dir(action->name, actions_dir);
73 if (!action_dir)
74 goto fail;
75 69
76 /* 70 /*
77 * Create debugfs r/w file containing action->error. If 71 * Create debugfs r/w file containing action->error. If
78 * notifier call chain is called with action->val, it will 72 * notifier call chain is called with action->val, it will
79 * fail with the error code 73 * fail with the error code
80 */ 74 */
81 if (!debugfs_create_errno("error", mode, action_dir, 75 debugfs_create_errno("error", mode, action_dir, &action->error);
82 &action->error))
83 goto fail;
84 } 76 }
85 return dir; 77 return dir;
86fail:
87 debugfs_remove_recursive(dir);
88 return ERR_PTR(-ENOMEM);
89} 78}
90EXPORT_SYMBOL_GPL(notifier_err_inject_init); 79EXPORT_SYMBOL_GPL(notifier_err_inject_init);
91 80
diff --git a/lib/objagg.c b/lib/objagg.c
index 576be22e86de..55621fb82e0a 100644
--- a/lib/objagg.c
+++ b/lib/objagg.c
@@ -605,12 +605,10 @@ const struct objagg_stats *objagg_stats_get(struct objagg *objagg)
605{ 605{
606 struct objagg_stats *objagg_stats; 606 struct objagg_stats *objagg_stats;
607 struct objagg_obj *objagg_obj; 607 struct objagg_obj *objagg_obj;
608 size_t alloc_size;
609 int i; 608 int i;
610 609
611 alloc_size = sizeof(*objagg_stats) + 610 objagg_stats = kzalloc(struct_size(objagg_stats, stats_info,
612 sizeof(objagg_stats->stats_info[0]) * objagg->obj_count; 611 objagg->obj_count), GFP_KERNEL);
613 objagg_stats = kzalloc(alloc_size, GFP_KERNEL);
614 if (!objagg_stats) 612 if (!objagg_stats)
615 return ERR_PTR(-ENOMEM); 613 return ERR_PTR(-ENOMEM);
616 614
diff --git a/lib/parser.c b/lib/parser.c
index dd70e5e6c9e2..f5b3e5d7a7f9 100644
--- a/lib/parser.c
+++ b/lib/parser.c
@@ -1,8 +1,6 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * lib/parser.c - simple parser for mount, etc. options. 3 * lib/parser.c - simple parser for mount, etc. options.
3 *
4 * This source code is licensed under the GNU General Public License,
5 * Version 2. See the file COPYING for more details.
6 */ 4 */
7 5
8#include <linux/ctype.h> 6#include <linux/ctype.h>
diff --git a/lib/raid6/neon.c b/lib/raid6/neon.c
index 7076ef1ba3dd..0a2e76035ea9 100644
--- a/lib/raid6/neon.c
+++ b/lib/raid6/neon.c
@@ -1,11 +1,8 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * linux/lib/raid6/neon.c - RAID6 syndrome calculation using ARM NEON intrinsics 3 * linux/lib/raid6/neon.c - RAID6 syndrome calculation using ARM NEON intrinsics
3 * 4 *
4 * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org> 5 * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */ 6 */
10 7
11#include <linux/raid/pq.h> 8#include <linux/raid/pq.h>
diff --git a/lib/raid6/s390vx.uc b/lib/raid6/s390vx.uc
index 914ebe98fc21..9e597e1f91a4 100644
--- a/lib/raid6/s390vx.uc
+++ b/lib/raid6/s390vx.uc
@@ -60,7 +60,7 @@ static inline void LOAD_DATA(int x, u8 *ptr)
60 typedef struct { u8 _[16 * $#]; } addrtype; 60 typedef struct { u8 _[16 * $#]; } addrtype;
61 register addrtype *__ptr asm("1") = (addrtype *) ptr; 61 register addrtype *__ptr asm("1") = (addrtype *) ptr;
62 62
63 asm volatile ("VLM %2,%3,0,%r1" 63 asm volatile ("VLM %2,%3,0,%1"
64 : : "m" (*__ptr), "a" (__ptr), "i" (x), 64 : : "m" (*__ptr), "a" (__ptr), "i" (x),
65 "i" (x + $# - 1)); 65 "i" (x + $# - 1));
66} 66}
diff --git a/lib/reed_solomon/Makefile b/lib/reed_solomon/Makefile
index ba9d7a3329eb..5d4fa68f26cb 100644
--- a/lib/reed_solomon/Makefile
+++ b/lib/reed_solomon/Makefile
@@ -4,4 +4,4 @@
4# 4#
5 5
6obj-$(CONFIG_REED_SOLOMON) += reed_solomon.o 6obj-$(CONFIG_REED_SOLOMON) += reed_solomon.o
7 7obj-$(CONFIG_REED_SOLOMON_TEST) += test_rslib.o
diff --git a/lib/reed_solomon/decode_rs.c b/lib/reed_solomon/decode_rs.c
index 1db74eb098d0..805de84ae83d 100644
--- a/lib/reed_solomon/decode_rs.c
+++ b/lib/reed_solomon/decode_rs.c
@@ -22,6 +22,7 @@
22 uint16_t *index_of = rs->index_of; 22 uint16_t *index_of = rs->index_of;
23 uint16_t u, q, tmp, num1, num2, den, discr_r, syn_error; 23 uint16_t u, q, tmp, num1, num2, den, discr_r, syn_error;
24 int count = 0; 24 int count = 0;
25 int num_corrected;
25 uint16_t msk = (uint16_t) rs->nn; 26 uint16_t msk = (uint16_t) rs->nn;
26 27
27 /* 28 /*
@@ -39,11 +40,21 @@
39 40
40 /* Check length parameter for validity */ 41 /* Check length parameter for validity */
41 pad = nn - nroots - len; 42 pad = nn - nroots - len;
42 BUG_ON(pad < 0 || pad >= nn); 43 BUG_ON(pad < 0 || pad >= nn - nroots);
43 44
44 /* Does the caller provide the syndrome ? */ 45 /* Does the caller provide the syndrome ? */
45 if (s != NULL) 46 if (s != NULL) {
46 goto decode; 47 for (i = 0; i < nroots; i++) {
48 /* The syndrome is in index form,
49 * so nn represents zero
50 */
51 if (s[i] != nn)
52 goto decode;
53 }
54
55 /* syndrome is zero, no errors to correct */
56 return 0;
57 }
47 58
48 /* form the syndromes; i.e., evaluate data(x) at roots of 59 /* form the syndromes; i.e., evaluate data(x) at roots of
49 * g(x) */ 60 * g(x) */
@@ -88,8 +99,7 @@
88 /* if syndrome is zero, data[] is a codeword and there are no 99 /* if syndrome is zero, data[] is a codeword and there are no
89 * errors to correct. So return data[] unmodified 100 * errors to correct. So return data[] unmodified
90 */ 101 */
91 count = 0; 102 return 0;
92 goto finish;
93 } 103 }
94 104
95 decode: 105 decode:
@@ -99,9 +109,9 @@
99 if (no_eras > 0) { 109 if (no_eras > 0) {
100 /* Init lambda to be the erasure locator polynomial */ 110 /* Init lambda to be the erasure locator polynomial */
101 lambda[1] = alpha_to[rs_modnn(rs, 111 lambda[1] = alpha_to[rs_modnn(rs,
102 prim * (nn - 1 - eras_pos[0]))]; 112 prim * (nn - 1 - (eras_pos[0] + pad)))];
103 for (i = 1; i < no_eras; i++) { 113 for (i = 1; i < no_eras; i++) {
104 u = rs_modnn(rs, prim * (nn - 1 - eras_pos[i])); 114 u = rs_modnn(rs, prim * (nn - 1 - (eras_pos[i] + pad)));
105 for (j = i + 1; j > 0; j--) { 115 for (j = i + 1; j > 0; j--) {
106 tmp = index_of[lambda[j - 1]]; 116 tmp = index_of[lambda[j - 1]];
107 if (tmp != nn) { 117 if (tmp != nn) {
@@ -175,6 +185,15 @@
175 if (lambda[i] != nn) 185 if (lambda[i] != nn)
176 deg_lambda = i; 186 deg_lambda = i;
177 } 187 }
188
189 if (deg_lambda == 0) {
190 /*
191 * deg(lambda) is zero even though the syndrome is non-zero
192 * => uncorrectable error detected
193 */
194 return -EBADMSG;
195 }
196
178 /* Find roots of error+erasure locator polynomial by Chien search */ 197 /* Find roots of error+erasure locator polynomial by Chien search */
179 memcpy(&reg[1], &lambda[1], nroots * sizeof(reg[0])); 198 memcpy(&reg[1], &lambda[1], nroots * sizeof(reg[0]));
180 count = 0; /* Number of roots of lambda(x) */ 199 count = 0; /* Number of roots of lambda(x) */
@@ -188,6 +207,12 @@
188 } 207 }
189 if (q != 0) 208 if (q != 0)
190 continue; /* Not a root */ 209 continue; /* Not a root */
210
211 if (k < pad) {
212 /* Impossible error location. Uncorrectable error. */
213 return -EBADMSG;
214 }
215
191 /* store root (index-form) and error location number */ 216 /* store root (index-form) and error location number */
192 root[count] = i; 217 root[count] = i;
193 loc[count] = k; 218 loc[count] = k;
@@ -202,8 +227,7 @@
202 * deg(lambda) unequal to number of roots => uncorrectable 227 * deg(lambda) unequal to number of roots => uncorrectable
203 * error detected 228 * error detected
204 */ 229 */
205 count = -EBADMSG; 230 return -EBADMSG;
206 goto finish;
207 } 231 }
208 /* 232 /*
209 * Compute err+eras evaluator poly omega(x) = s(x)*lambda(x) (modulo 233 * Compute err+eras evaluator poly omega(x) = s(x)*lambda(x) (modulo
@@ -223,7 +247,9 @@
223 /* 247 /*
224 * Compute error values in poly-form. num1 = omega(inv(X(l))), num2 = 248 * Compute error values in poly-form. num1 = omega(inv(X(l))), num2 =
225 * inv(X(l))**(fcr-1) and den = lambda_pr(inv(X(l))) all in poly-form 249 * inv(X(l))**(fcr-1) and den = lambda_pr(inv(X(l))) all in poly-form
250 * Note: we reuse the buffer for b to store the correction pattern
226 */ 251 */
252 num_corrected = 0;
227 for (j = count - 1; j >= 0; j--) { 253 for (j = count - 1; j >= 0; j--) {
228 num1 = 0; 254 num1 = 0;
229 for (i = deg_omega; i >= 0; i--) { 255 for (i = deg_omega; i >= 0; i--) {
@@ -231,6 +257,13 @@
231 num1 ^= alpha_to[rs_modnn(rs, omega[i] + 257 num1 ^= alpha_to[rs_modnn(rs, omega[i] +
232 i * root[j])]; 258 i * root[j])];
233 } 259 }
260
261 if (num1 == 0) {
262 /* Nothing to correct at this position */
263 b[j] = 0;
264 continue;
265 }
266
234 num2 = alpha_to[rs_modnn(rs, root[j] * (fcr - 1) + nn)]; 267 num2 = alpha_to[rs_modnn(rs, root[j] * (fcr - 1) + nn)];
235 den = 0; 268 den = 0;
236 269
@@ -242,30 +275,52 @@
242 i * root[j])]; 275 i * root[j])];
243 } 276 }
244 } 277 }
245 /* Apply error to data */ 278
246 if (num1 != 0 && loc[j] >= pad) { 279 b[j] = alpha_to[rs_modnn(rs, index_of[num1] +
247 uint16_t cor = alpha_to[rs_modnn(rs,index_of[num1] + 280 index_of[num2] +
248 index_of[num2] + 281 nn - index_of[den])];
249 nn - index_of[den])]; 282 num_corrected++;
250 /* Store the error correction pattern, if a 283 }
251 * correction buffer is available */ 284
252 if (corr) { 285 /*
253 corr[j] = cor; 286 * We compute the syndrome of the 'error' and check that it matches
254 } else { 287 * the syndrome of the received word
255 /* If a data buffer is given and the 288 */
256 * error is inside the message, 289 for (i = 0; i < nroots; i++) {
257 * correct it */ 290 tmp = 0;
258 if (data && (loc[j] < (nn - nroots))) 291 for (j = 0; j < count; j++) {
259 data[loc[j] - pad] ^= cor; 292 if (b[j] == 0)
260 } 293 continue;
294
295 k = (fcr + i) * prim * (nn-loc[j]-1);
296 tmp ^= alpha_to[rs_modnn(rs, index_of[b[j]] + k)];
261 } 297 }
298
299 if (tmp != alpha_to[s[i]])
300 return -EBADMSG;
262 } 301 }
263 302
264finish: 303 /*
265 if (eras_pos != NULL) { 304 * Store the error correction pattern, if a
266 for (i = 0; i < count; i++) 305 * correction buffer is available
267 eras_pos[i] = loc[i] - pad; 306 */
307 if (corr && eras_pos) {
308 j = 0;
309 for (i = 0; i < count; i++) {
310 if (b[i]) {
311 corr[j] = b[i];
312 eras_pos[j++] = loc[i] - pad;
313 }
314 }
315 } else if (data && par) {
316 /* Apply error to data and parity */
317 for (i = 0; i < count; i++) {
318 if (loc[i] < (nn - nroots))
319 data[loc[i] - pad] ^= b[i];
320 else
321 par[loc[i] - pad - len] ^= b[i];
322 }
268 } 323 }
269 return count;
270 324
325 return num_corrected;
271} 326}
diff --git a/lib/reed_solomon/reed_solomon.c b/lib/reed_solomon/reed_solomon.c
index e5fdc8b9e856..bbc01bad3053 100644
--- a/lib/reed_solomon/reed_solomon.c
+++ b/lib/reed_solomon/reed_solomon.c
@@ -340,7 +340,8 @@ EXPORT_SYMBOL_GPL(encode_rs8);
340 * @data: data field of a given type 340 * @data: data field of a given type
341 * @par: received parity data field 341 * @par: received parity data field
342 * @len: data length 342 * @len: data length
343 * @s: syndrome data field (if NULL, syndrome is calculated) 343 * @s: syndrome data field, must be in index form
344 * (if NULL, syndrome is calculated)
344 * @no_eras: number of erasures 345 * @no_eras: number of erasures
345 * @eras_pos: position of erasures, can be NULL 346 * @eras_pos: position of erasures, can be NULL
346 * @invmsk: invert data mask (will be xored on data, not on parity!) 347 * @invmsk: invert data mask (will be xored on data, not on parity!)
@@ -354,7 +355,8 @@ EXPORT_SYMBOL_GPL(encode_rs8);
354 * decoding, so the caller has to ensure that decoder invocations are 355 * decoding, so the caller has to ensure that decoder invocations are
355 * serialized. 356 * serialized.
356 * 357 *
357 * Returns the number of corrected bits or -EBADMSG for uncorrectable errors. 358 * Returns the number of corrected symbols or -EBADMSG for uncorrectable
359 * errors. The count includes errors in the parity.
358 */ 360 */
359int decode_rs8(struct rs_control *rsc, uint8_t *data, uint16_t *par, int len, 361int decode_rs8(struct rs_control *rsc, uint8_t *data, uint16_t *par, int len,
360 uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk, 362 uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk,
@@ -391,7 +393,8 @@ EXPORT_SYMBOL_GPL(encode_rs16);
391 * @data: data field of a given type 393 * @data: data field of a given type
392 * @par: received parity data field 394 * @par: received parity data field
393 * @len: data length 395 * @len: data length
394 * @s: syndrome data field (if NULL, syndrome is calculated) 396 * @s: syndrome data field, must be in index form
397 * (if NULL, syndrome is calculated)
395 * @no_eras: number of erasures 398 * @no_eras: number of erasures
396 * @eras_pos: position of erasures, can be NULL 399 * @eras_pos: position of erasures, can be NULL
397 * @invmsk: invert data mask (will be xored on data, not on parity!) 400 * @invmsk: invert data mask (will be xored on data, not on parity!)
@@ -403,7 +406,8 @@ EXPORT_SYMBOL_GPL(encode_rs16);
403 * decoding, so the caller has to ensure that decoder invocations are 406 * decoding, so the caller has to ensure that decoder invocations are
404 * serialized. 407 * serialized.
405 * 408 *
406 * Returns the number of corrected bits or -EBADMSG for uncorrectable errors. 409 * Returns the number of corrected symbols or -EBADMSG for uncorrectable
410 * errors. The count includes errors in the parity.
407 */ 411 */
408int decode_rs16(struct rs_control *rsc, uint16_t *data, uint16_t *par, int len, 412int decode_rs16(struct rs_control *rsc, uint16_t *data, uint16_t *par, int len,
409 uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk, 413 uint16_t *s, int no_eras, int *eras_pos, uint16_t invmsk,
diff --git a/lib/reed_solomon/test_rslib.c b/lib/reed_solomon/test_rslib.c
new file mode 100644
index 000000000000..4eb29f365ece
--- /dev/null
+++ b/lib/reed_solomon/test_rslib.c
@@ -0,0 +1,518 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Tests for Generic Reed Solomon encoder / decoder library
4 *
5 * Written by Ferdinand Blomqvist
6 * Based on previous work by Phil Karn, KA9Q
7 */
8#include <linux/rslib.h>
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/moduleparam.h>
12#include <linux/random.h>
13#include <linux/slab.h>
14
15enum verbosity {
16 V_SILENT,
17 V_PROGRESS,
18 V_CSUMMARY
19};
20
21enum method {
22 CORR_BUFFER,
23 CALLER_SYNDROME,
24 IN_PLACE
25};
26
27#define __param(type, name, init, msg) \
28 static type name = init; \
29 module_param(name, type, 0444); \
30 MODULE_PARM_DESC(name, msg)
31
32__param(int, v, V_PROGRESS, "Verbosity level");
33__param(int, ewsc, 1, "Erasures without symbol corruption");
34__param(int, bc, 1, "Test for correct behaviour beyond error correction capacity");
35
36struct etab {
37 int symsize;
38 int genpoly;
39 int fcs;
40 int prim;
41 int nroots;
42 int ntrials;
43};
44
45/* List of codes to test */
46static struct etab Tab[] = {
47 {2, 0x7, 1, 1, 1, 100000 },
48 {3, 0xb, 1, 1, 2, 100000 },
49 {3, 0xb, 1, 1, 3, 100000 },
50 {3, 0xb, 2, 1, 4, 100000 },
51 {4, 0x13, 1, 1, 4, 10000 },
52 {5, 0x25, 1, 1, 6, 1000 },
53 {6, 0x43, 3, 1, 8, 1000 },
54 {7, 0x89, 1, 1, 14, 500 },
55 {8, 0x11d, 1, 1, 30, 100 },
56 {8, 0x187, 112, 11, 32, 100 },
57 {9, 0x211, 1, 1, 33, 80 },
58 {0, 0, 0, 0, 0, 0},
59};
60
61
62struct estat {
63 int dwrong;
64 int irv;
65 int wepos;
66 int nwords;
67};
68
69struct bcstat {
70 int rfail;
71 int rsuccess;
72 int noncw;
73 int nwords;
74};
75
76struct wspace {
77 uint16_t *c; /* sent codeword */
78 uint16_t *r; /* received word */
79 uint16_t *s; /* syndrome */
80 uint16_t *corr; /* correction buffer */
81 int *errlocs;
82 int *derrlocs;
83};
84
85struct pad {
86 int mult;
87 int shift;
88};
89
90static struct pad pad_coef[] = {
91 { 0, 0 },
92 { 1, 2 },
93 { 1, 1 },
94 { 3, 2 },
95 { 1, 0 },
96};
97
98static void free_ws(struct wspace *ws)
99{
100 if (!ws)
101 return;
102
103 kfree(ws->errlocs);
104 kfree(ws->c);
105 kfree(ws);
106}
107
108static struct wspace *alloc_ws(struct rs_codec *rs)
109{
110 int nroots = rs->nroots;
111 struct wspace *ws;
112 int nn = rs->nn;
113
114 ws = kzalloc(sizeof(*ws), GFP_KERNEL);
115 if (!ws)
116 return NULL;
117
118 ws->c = kmalloc_array(2 * (nn + nroots),
119 sizeof(uint16_t), GFP_KERNEL);
120 if (!ws->c)
121 goto err;
122
123 ws->r = ws->c + nn;
124 ws->s = ws->r + nn;
125 ws->corr = ws->s + nroots;
126
127 ws->errlocs = kmalloc_array(nn + nroots, sizeof(int), GFP_KERNEL);
128 if (!ws->errlocs)
129 goto err;
130
131 ws->derrlocs = ws->errlocs + nn;
132 return ws;
133
134err:
135 free_ws(ws);
136 return NULL;
137}
138
139
140/*
141 * Generates a random codeword and stores it in c. Generates random errors and
142 * erasures, and stores the random word with errors in r. Erasure positions are
143 * stored in derrlocs, while errlocs has one of three values in every position:
144 *
145 * 0 if there is no error in this position;
146 * 1 if there is a symbol error in this position;
147 * 2 if there is an erasure without symbol corruption.
148 *
149 * Returns the number of corrupted symbols.
150 */
151static int get_rcw_we(struct rs_control *rs, struct wspace *ws,
152 int len, int errs, int eras)
153{
154 int nroots = rs->codec->nroots;
155 int *derrlocs = ws->derrlocs;
156 int *errlocs = ws->errlocs;
157 int dlen = len - nroots;
158 int nn = rs->codec->nn;
159 uint16_t *c = ws->c;
160 uint16_t *r = ws->r;
161 int errval;
162 int errloc;
163 int i;
164
165 /* Load c with random data and encode */
166 for (i = 0; i < dlen; i++)
167 c[i] = prandom_u32() & nn;
168
169 memset(c + dlen, 0, nroots * sizeof(*c));
170 encode_rs16(rs, c, dlen, c + dlen, 0);
171
172 /* Make copyand add errors and erasures */
173 memcpy(r, c, len * sizeof(*r));
174 memset(errlocs, 0, len * sizeof(*errlocs));
175 memset(derrlocs, 0, nroots * sizeof(*derrlocs));
176
177 /* Generating random errors */
178 for (i = 0; i < errs; i++) {
179 do {
180 /* Error value must be nonzero */
181 errval = prandom_u32() & nn;
182 } while (errval == 0);
183
184 do {
185 /* Must not choose the same location twice */
186 errloc = prandom_u32() % len;
187 } while (errlocs[errloc] != 0);
188
189 errlocs[errloc] = 1;
190 r[errloc] ^= errval;
191 }
192
193 /* Generating random erasures */
194 for (i = 0; i < eras; i++) {
195 do {
196 /* Must not choose the same location twice */
197 errloc = prandom_u32() % len;
198 } while (errlocs[errloc] != 0);
199
200 derrlocs[i] = errloc;
201
202 if (ewsc && (prandom_u32() & 1)) {
203 /* Erasure with the symbol intact */
204 errlocs[errloc] = 2;
205 } else {
206 /* Erasure with corrupted symbol */
207 do {
208 /* Error value must be nonzero */
209 errval = prandom_u32() & nn;
210 } while (errval == 0);
211
212 errlocs[errloc] = 1;
213 r[errloc] ^= errval;
214 errs++;
215 }
216 }
217
218 return errs;
219}
220
221static void fix_err(uint16_t *data, int nerrs, uint16_t *corr, int *errlocs)
222{
223 int i;
224
225 for (i = 0; i < nerrs; i++)
226 data[errlocs[i]] ^= corr[i];
227}
228
229static void compute_syndrome(struct rs_control *rsc, uint16_t *data,
230 int len, uint16_t *syn)
231{
232 struct rs_codec *rs = rsc->codec;
233 uint16_t *alpha_to = rs->alpha_to;
234 uint16_t *index_of = rs->index_of;
235 int nroots = rs->nroots;
236 int prim = rs->prim;
237 int fcr = rs->fcr;
238 int i, j;
239
240 /* Calculating syndrome */
241 for (i = 0; i < nroots; i++) {
242 syn[i] = data[0];
243 for (j = 1; j < len; j++) {
244 if (syn[i] == 0) {
245 syn[i] = data[j];
246 } else {
247 syn[i] = data[j] ^
248 alpha_to[rs_modnn(rs, index_of[syn[i]]
249 + (fcr + i) * prim)];
250 }
251 }
252 }
253
254 /* Convert to index form */
255 for (i = 0; i < nroots; i++)
256 syn[i] = rs->index_of[syn[i]];
257}
258
259/* Test up to error correction capacity */
260static void test_uc(struct rs_control *rs, int len, int errs,
261 int eras, int trials, struct estat *stat,
262 struct wspace *ws, int method)
263{
264 int dlen = len - rs->codec->nroots;
265 int *derrlocs = ws->derrlocs;
266 int *errlocs = ws->errlocs;
267 uint16_t *corr = ws->corr;
268 uint16_t *c = ws->c;
269 uint16_t *r = ws->r;
270 uint16_t *s = ws->s;
271 int derrs, nerrs;
272 int i, j;
273
274 for (j = 0; j < trials; j++) {
275 nerrs = get_rcw_we(rs, ws, len, errs, eras);
276
277 switch (method) {
278 case CORR_BUFFER:
279 derrs = decode_rs16(rs, r, r + dlen, dlen,
280 NULL, eras, derrlocs, 0, corr);
281 fix_err(r, derrs, corr, derrlocs);
282 break;
283 case CALLER_SYNDROME:
284 compute_syndrome(rs, r, len, s);
285 derrs = decode_rs16(rs, NULL, NULL, dlen,
286 s, eras, derrlocs, 0, corr);
287 fix_err(r, derrs, corr, derrlocs);
288 break;
289 case IN_PLACE:
290 derrs = decode_rs16(rs, r, r + dlen, dlen,
291 NULL, eras, derrlocs, 0, NULL);
292 break;
293 default:
294 continue;
295 }
296
297 if (derrs != nerrs)
298 stat->irv++;
299
300 if (method != IN_PLACE) {
301 for (i = 0; i < derrs; i++) {
302 if (errlocs[derrlocs[i]] != 1)
303 stat->wepos++;
304 }
305 }
306
307 if (memcmp(r, c, len * sizeof(*r)))
308 stat->dwrong++;
309 }
310 stat->nwords += trials;
311}
312
313static int ex_rs_helper(struct rs_control *rs, struct wspace *ws,
314 int len, int trials, int method)
315{
316 static const char * const desc[] = {
317 "Testing correction buffer interface...",
318 "Testing with caller provided syndrome...",
319 "Testing in-place interface..."
320 };
321
322 struct estat stat = {0, 0, 0, 0};
323 int nroots = rs->codec->nroots;
324 int errs, eras, retval;
325
326 if (v >= V_PROGRESS)
327 pr_info(" %s\n", desc[method]);
328
329 for (errs = 0; errs <= nroots / 2; errs++)
330 for (eras = 0; eras <= nroots - 2 * errs; eras++)
331 test_uc(rs, len, errs, eras, trials, &stat, ws, method);
332
333 if (v >= V_CSUMMARY) {
334 pr_info(" Decodes wrong: %d / %d\n",
335 stat.dwrong, stat.nwords);
336 pr_info(" Wrong return value: %d / %d\n",
337 stat.irv, stat.nwords);
338 if (method != IN_PLACE)
339 pr_info(" Wrong error position: %d\n", stat.wepos);
340 }
341
342 retval = stat.dwrong + stat.wepos + stat.irv;
343 if (retval && v >= V_PROGRESS)
344 pr_warn(" FAIL: %d decoding failures!\n", retval);
345
346 return retval;
347}
348
349static int exercise_rs(struct rs_control *rs, struct wspace *ws,
350 int len, int trials)
351{
352
353 int retval = 0;
354 int i;
355
356 if (v >= V_PROGRESS)
357 pr_info("Testing up to error correction capacity...\n");
358
359 for (i = 0; i <= IN_PLACE; i++)
360 retval |= ex_rs_helper(rs, ws, len, trials, i);
361
362 return retval;
363}
364
365/* Tests for correct behaviour beyond error correction capacity */
366static void test_bc(struct rs_control *rs, int len, int errs,
367 int eras, int trials, struct bcstat *stat,
368 struct wspace *ws)
369{
370 int nroots = rs->codec->nroots;
371 int dlen = len - nroots;
372 int *derrlocs = ws->derrlocs;
373 uint16_t *corr = ws->corr;
374 uint16_t *r = ws->r;
375 int derrs, j;
376
377 for (j = 0; j < trials; j++) {
378 get_rcw_we(rs, ws, len, errs, eras);
379 derrs = decode_rs16(rs, r, r + dlen, dlen,
380 NULL, eras, derrlocs, 0, corr);
381 fix_err(r, derrs, corr, derrlocs);
382
383 if (derrs >= 0) {
384 stat->rsuccess++;
385
386 /*
387 * We check that the returned word is actually a
388 * codeword. The obious way to do this would be to
389 * compute the syndrome, but we don't want to replicate
390 * that code here. However, all the codes are in
391 * systematic form, and therefore we can encode the
392 * returned word, and see whether the parity changes or
393 * not.
394 */
395 memset(corr, 0, nroots * sizeof(*corr));
396 encode_rs16(rs, r, dlen, corr, 0);
397
398 if (memcmp(r + dlen, corr, nroots * sizeof(*corr)))
399 stat->noncw++;
400 } else {
401 stat->rfail++;
402 }
403 }
404 stat->nwords += trials;
405}
406
407static int exercise_rs_bc(struct rs_control *rs, struct wspace *ws,
408 int len, int trials)
409{
410 struct bcstat stat = {0, 0, 0, 0};
411 int nroots = rs->codec->nroots;
412 int errs, eras, cutoff;
413
414 if (v >= V_PROGRESS)
415 pr_info("Testing beyond error correction capacity...\n");
416
417 for (errs = 1; errs <= nroots; errs++) {
418 eras = nroots - 2 * errs + 1;
419 if (eras < 0)
420 eras = 0;
421
422 cutoff = nroots <= len - errs ? nroots : len - errs;
423 for (; eras <= cutoff; eras++)
424 test_bc(rs, len, errs, eras, trials, &stat, ws);
425 }
426
427 if (v >= V_CSUMMARY) {
428 pr_info(" decoder gives up: %d / %d\n",
429 stat.rfail, stat.nwords);
430 pr_info(" decoder returns success: %d / %d\n",
431 stat.rsuccess, stat.nwords);
432 pr_info(" not a codeword: %d / %d\n",
433 stat.noncw, stat.rsuccess);
434 }
435
436 if (stat.noncw && v >= V_PROGRESS)
437 pr_warn(" FAIL: %d silent failures!\n", stat.noncw);
438
439 return stat.noncw;
440}
441
442static int run_exercise(struct etab *e)
443{
444 int nn = (1 << e->symsize) - 1;
445 int kk = nn - e->nroots;
446 struct rs_control *rsc;
447 int retval = -ENOMEM;
448 int max_pad = kk - 1;
449 int prev_pad = -1;
450 struct wspace *ws;
451 int i;
452
453 rsc = init_rs(e->symsize, e->genpoly, e->fcs, e->prim, e->nroots);
454 if (!rsc)
455 return retval;
456
457 ws = alloc_ws(rsc->codec);
458 if (!ws)
459 goto err;
460
461 retval = 0;
462 for (i = 0; i < ARRAY_SIZE(pad_coef); i++) {
463 int pad = (pad_coef[i].mult * max_pad) >> pad_coef[i].shift;
464 int len = nn - pad;
465
466 if (pad == prev_pad)
467 continue;
468
469 prev_pad = pad;
470 if (v >= V_PROGRESS) {
471 pr_info("Testing (%d,%d)_%d code...\n",
472 len, kk - pad, nn + 1);
473 }
474
475 retval |= exercise_rs(rsc, ws, len, e->ntrials);
476 if (bc)
477 retval |= exercise_rs_bc(rsc, ws, len, e->ntrials);
478 }
479
480 free_ws(ws);
481
482err:
483 free_rs(rsc);
484 return retval;
485}
486
487static int __init test_rslib_init(void)
488{
489 int i, fail = 0;
490
491 for (i = 0; Tab[i].symsize != 0 ; i++) {
492 int retval;
493
494 retval = run_exercise(Tab + i);
495 if (retval < 0)
496 return -ENOMEM;
497
498 fail |= retval;
499 }
500
501 if (fail)
502 pr_warn("rslib: test failed\n");
503 else
504 pr_info("rslib: test ok\n");
505
506 return -EAGAIN; /* Fail will directly unload the module */
507}
508
509static void __exit test_rslib_exit(void)
510{
511}
512
513module_init(test_rslib_init)
514module_exit(test_rslib_exit)
515
516MODULE_LICENSE("GPL");
517MODULE_AUTHOR("Ferdinand Blomqvist");
518MODULE_DESCRIPTION("Reed-Solomon library test");
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 935ec80f213f..bdb7e4cadf05 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * Resizable, Scalable, Concurrent Hash Table 3 * Resizable, Scalable, Concurrent Hash Table
3 * 4 *
@@ -8,10 +9,6 @@
8 * Code partially derived from nft_hash 9 * Code partially derived from nft_hash
9 * Rewritten with rehash code from br_multicast plus single list 10 * Rewritten with rehash code from br_multicast plus single list
10 * pointer as suggested by Josh Triplett 11 * pointer as suggested by Josh Triplett
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */ 12 */
16 13
17#include <linux/atomic.h> 14#include <linux/atomic.h>
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 54f57cd117c6..969e5400a615 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -26,9 +26,7 @@ static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index)
26 /* 26 /*
27 * First get a stable cleared mask, setting the old mask to 0. 27 * First get a stable cleared mask, setting the old mask to 0.
28 */ 28 */
29 do { 29 mask = xchg(&sb->map[index].cleared, 0);
30 mask = sb->map[index].cleared;
31 } while (cmpxchg(&sb->map[index].cleared, mask, 0) != mask);
32 30
33 /* 31 /*
34 * Now clear the masked bits in our free word 32 * Now clear the masked bits in our free word
@@ -516,10 +514,8 @@ static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
516 struct sbq_wait_state *ws = &sbq->ws[wake_index]; 514 struct sbq_wait_state *ws = &sbq->ws[wake_index];
517 515
518 if (waitqueue_active(&ws->wait)) { 516 if (waitqueue_active(&ws->wait)) {
519 int o = atomic_read(&sbq->wake_index); 517 if (wake_index != atomic_read(&sbq->wake_index))
520 518 atomic_set(&sbq->wake_index, wake_index);
521 if (wake_index != o)
522 atomic_cmpxchg(&sbq->wake_index, o, wake_index);
523 return ws; 519 return ws;
524 } 520 }
525 521
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 739dc9fe2c55..c2cf2c311b7d 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -1,10 +1,8 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com> 3 * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
3 * 4 *
4 * Scatterlist handling helpers. 5 * Scatterlist handling helpers.
5 *
6 * This source code is licensed under the GNU General Public License,
7 * Version 2. See the file COPYING for more details.
8 */ 6 */
9#include <linux/export.h> 7#include <linux/export.h>
10#include <linux/slab.h> 8#include <linux/slab.h>
@@ -181,7 +179,8 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents)
181 * __sg_free_table - Free a previously mapped sg table 179 * __sg_free_table - Free a previously mapped sg table
182 * @table: The sg table header to use 180 * @table: The sg table header to use
183 * @max_ents: The maximum number of entries per single scatterlist 181 * @max_ents: The maximum number of entries per single scatterlist
184 * @skip_first_chunk: don't free the (preallocated) first scatterlist chunk 182 * @nents_first_chunk: Number of entries int the (preallocated) first
183 * scatterlist chunk, 0 means no such preallocated first chunk
185 * @free_fn: Free function 184 * @free_fn: Free function
186 * 185 *
187 * Description: 186 * Description:
@@ -191,9 +190,10 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents)
191 * 190 *
192 **/ 191 **/
193void __sg_free_table(struct sg_table *table, unsigned int max_ents, 192void __sg_free_table(struct sg_table *table, unsigned int max_ents,
194 bool skip_first_chunk, sg_free_fn *free_fn) 193 unsigned int nents_first_chunk, sg_free_fn *free_fn)
195{ 194{
196 struct scatterlist *sgl, *next; 195 struct scatterlist *sgl, *next;
196 unsigned curr_max_ents = nents_first_chunk ?: max_ents;
197 197
198 if (unlikely(!table->sgl)) 198 if (unlikely(!table->sgl))
199 return; 199 return;
@@ -209,9 +209,9 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents,
209 * sg_size is then one less than alloc size, since the last 209 * sg_size is then one less than alloc size, since the last
210 * element is the chain pointer. 210 * element is the chain pointer.
211 */ 211 */
212 if (alloc_size > max_ents) { 212 if (alloc_size > curr_max_ents) {
213 next = sg_chain_ptr(&sgl[max_ents - 1]); 213 next = sg_chain_ptr(&sgl[curr_max_ents - 1]);
214 alloc_size = max_ents; 214 alloc_size = curr_max_ents;
215 sg_size = alloc_size - 1; 215 sg_size = alloc_size - 1;
216 } else { 216 } else {
217 sg_size = alloc_size; 217 sg_size = alloc_size;
@@ -219,11 +219,12 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents,
219 } 219 }
220 220
221 table->orig_nents -= sg_size; 221 table->orig_nents -= sg_size;
222 if (skip_first_chunk) 222 if (nents_first_chunk)
223 skip_first_chunk = false; 223 nents_first_chunk = 0;
224 else 224 else
225 free_fn(sgl, alloc_size); 225 free_fn(sgl, alloc_size);
226 sgl = next; 226 sgl = next;
227 curr_max_ents = max_ents;
227 } 228 }
228 229
229 table->sgl = NULL; 230 table->sgl = NULL;
@@ -246,6 +247,8 @@ EXPORT_SYMBOL(sg_free_table);
246 * @table: The sg table header to use 247 * @table: The sg table header to use
247 * @nents: Number of entries in sg list 248 * @nents: Number of entries in sg list
248 * @max_ents: The maximum number of entries the allocator returns per call 249 * @max_ents: The maximum number of entries the allocator returns per call
250 * @nents_first_chunk: Number of entries int the (preallocated) first
251 * scatterlist chunk, 0 means no such preallocated chunk provided by user
249 * @gfp_mask: GFP allocation mask 252 * @gfp_mask: GFP allocation mask
250 * @alloc_fn: Allocator to use 253 * @alloc_fn: Allocator to use
251 * 254 *
@@ -262,10 +265,13 @@ EXPORT_SYMBOL(sg_free_table);
262 **/ 265 **/
263int __sg_alloc_table(struct sg_table *table, unsigned int nents, 266int __sg_alloc_table(struct sg_table *table, unsigned int nents,
264 unsigned int max_ents, struct scatterlist *first_chunk, 267 unsigned int max_ents, struct scatterlist *first_chunk,
265 gfp_t gfp_mask, sg_alloc_fn *alloc_fn) 268 unsigned int nents_first_chunk, gfp_t gfp_mask,
269 sg_alloc_fn *alloc_fn)
266{ 270{
267 struct scatterlist *sg, *prv; 271 struct scatterlist *sg, *prv;
268 unsigned int left; 272 unsigned int left;
273 unsigned curr_max_ents = nents_first_chunk ?: max_ents;
274 unsigned prv_max_ents;
269 275
270 memset(table, 0, sizeof(*table)); 276 memset(table, 0, sizeof(*table));
271 277
@@ -281,8 +287,8 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
281 do { 287 do {
282 unsigned int sg_size, alloc_size = left; 288 unsigned int sg_size, alloc_size = left;
283 289
284 if (alloc_size > max_ents) { 290 if (alloc_size > curr_max_ents) {
285 alloc_size = max_ents; 291 alloc_size = curr_max_ents;
286 sg_size = alloc_size - 1; 292 sg_size = alloc_size - 1;
287 } else 293 } else
288 sg_size = alloc_size; 294 sg_size = alloc_size;
@@ -316,7 +322,7 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
316 * If this is not the first mapping, chain previous part. 322 * If this is not the first mapping, chain previous part.
317 */ 323 */
318 if (prv) 324 if (prv)
319 sg_chain(prv, max_ents, sg); 325 sg_chain(prv, prv_max_ents, sg);
320 else 326 else
321 table->sgl = sg; 327 table->sgl = sg;
322 328
@@ -327,6 +333,8 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
327 sg_mark_end(&sg[sg_size - 1]); 333 sg_mark_end(&sg[sg_size - 1]);
328 334
329 prv = sg; 335 prv = sg;
336 prv_max_ents = curr_max_ents;
337 curr_max_ents = max_ents;
330 } while (left); 338 } while (left);
331 339
332 return 0; 340 return 0;
@@ -349,9 +357,9 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
349 int ret; 357 int ret;
350 358
351 ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC, 359 ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
352 NULL, gfp_mask, sg_kmalloc); 360 NULL, 0, gfp_mask, sg_kmalloc);
353 if (unlikely(ret)) 361 if (unlikely(ret))
354 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree); 362 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree);
355 363
356 return ret; 364 return ret;
357} 365}
@@ -678,17 +686,18 @@ static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
678{ 686{
679 if (!miter->__remaining) { 687 if (!miter->__remaining) {
680 struct scatterlist *sg; 688 struct scatterlist *sg;
681 unsigned long pgoffset;
682 689
683 if (!__sg_page_iter_next(&miter->piter)) 690 if (!__sg_page_iter_next(&miter->piter))
684 return false; 691 return false;
685 692
686 sg = miter->piter.sg; 693 sg = miter->piter.sg;
687 pgoffset = miter->piter.sg_pgoffset;
688 694
689 miter->__offset = pgoffset ? 0 : sg->offset; 695 miter->__offset = miter->piter.sg_pgoffset ? 0 : sg->offset;
696 miter->piter.sg_pgoffset += miter->__offset >> PAGE_SHIFT;
697 miter->__offset &= PAGE_SIZE - 1;
690 miter->__remaining = sg->offset + sg->length - 698 miter->__remaining = sg->offset + sg->length -
691 (pgoffset << PAGE_SHIFT) - miter->__offset; 699 (miter->piter.sg_pgoffset << PAGE_SHIFT) -
700 miter->__offset;
692 miter->__remaining = min_t(unsigned long, miter->__remaining, 701 miter->__remaining = min_t(unsigned long, miter->__remaining,
693 PAGE_SIZE - miter->__offset); 702 PAGE_SIZE - miter->__offset);
694 } 703 }
diff --git a/lib/sg_pool.c b/lib/sg_pool.c
index cff20df2695e..db29e5c1f790 100644
--- a/lib/sg_pool.c
+++ b/lib/sg_pool.c
@@ -70,18 +70,27 @@ static struct scatterlist *sg_pool_alloc(unsigned int nents, gfp_t gfp_mask)
70/** 70/**
71 * sg_free_table_chained - Free a previously mapped sg table 71 * sg_free_table_chained - Free a previously mapped sg table
72 * @table: The sg table header to use 72 * @table: The sg table header to use
73 * @first_chunk: was first_chunk not NULL in sg_alloc_table_chained? 73 * @nents_first_chunk: size of the first_chunk SGL passed to
74 * sg_alloc_table_chained
74 * 75 *
75 * Description: 76 * Description:
76 * Free an sg table previously allocated and setup with 77 * Free an sg table previously allocated and setup with
77 * sg_alloc_table_chained(). 78 * sg_alloc_table_chained().
78 * 79 *
80 * @nents_first_chunk has to be same with that same parameter passed
81 * to sg_alloc_table_chained().
82 *
79 **/ 83 **/
80void sg_free_table_chained(struct sg_table *table, bool first_chunk) 84void sg_free_table_chained(struct sg_table *table,
85 unsigned nents_first_chunk)
81{ 86{
82 if (first_chunk && table->orig_nents <= SG_CHUNK_SIZE) 87 if (table->orig_nents <= nents_first_chunk)
83 return; 88 return;
84 __sg_free_table(table, SG_CHUNK_SIZE, first_chunk, sg_pool_free); 89
90 if (nents_first_chunk == 1)
91 nents_first_chunk = 0;
92
93 __sg_free_table(table, SG_CHUNK_SIZE, nents_first_chunk, sg_pool_free);
85} 94}
86EXPORT_SYMBOL_GPL(sg_free_table_chained); 95EXPORT_SYMBOL_GPL(sg_free_table_chained);
87 96
@@ -90,31 +99,41 @@ EXPORT_SYMBOL_GPL(sg_free_table_chained);
90 * @table: The sg table header to use 99 * @table: The sg table header to use
91 * @nents: Number of entries in sg list 100 * @nents: Number of entries in sg list
92 * @first_chunk: first SGL 101 * @first_chunk: first SGL
102 * @nents_first_chunk: number of the SGL of @first_chunk
93 * 103 *
94 * Description: 104 * Description:
95 * Allocate and chain SGLs in an sg table. If @nents@ is larger than 105 * Allocate and chain SGLs in an sg table. If @nents@ is larger than
96 * SG_CHUNK_SIZE a chained sg table will be setup. 106 * @nents_first_chunk a chained sg table will be setup. @first_chunk is
107 * ignored if nents_first_chunk <= 1 because user expects the SGL points
108 * non-chain SGL.
97 * 109 *
98 **/ 110 **/
99int sg_alloc_table_chained(struct sg_table *table, int nents, 111int sg_alloc_table_chained(struct sg_table *table, int nents,
100 struct scatterlist *first_chunk) 112 struct scatterlist *first_chunk, unsigned nents_first_chunk)
101{ 113{
102 int ret; 114 int ret;
103 115
104 BUG_ON(!nents); 116 BUG_ON(!nents);
105 117
106 if (first_chunk) { 118 if (first_chunk && nents_first_chunk) {
107 if (nents <= SG_CHUNK_SIZE) { 119 if (nents <= nents_first_chunk) {
108 table->nents = table->orig_nents = nents; 120 table->nents = table->orig_nents = nents;
109 sg_init_table(table->sgl, nents); 121 sg_init_table(table->sgl, nents);
110 return 0; 122 return 0;
111 } 123 }
112 } 124 }
113 125
126 /* User supposes that the 1st SGL includes real entry */
127 if (nents_first_chunk <= 1) {
128 first_chunk = NULL;
129 nents_first_chunk = 0;
130 }
131
114 ret = __sg_alloc_table(table, nents, SG_CHUNK_SIZE, 132 ret = __sg_alloc_table(table, nents, SG_CHUNK_SIZE,
115 first_chunk, GFP_ATOMIC, sg_pool_alloc); 133 first_chunk, nents_first_chunk,
134 GFP_ATOMIC, sg_pool_alloc);
116 if (unlikely(ret)) 135 if (unlikely(ret))
117 sg_free_table_chained(table, (bool)first_chunk); 136 sg_free_table_chained(table, nents_first_chunk);
118 return ret; 137 return ret;
119} 138}
120EXPORT_SYMBOL_GPL(sg_alloc_table_chained); 139EXPORT_SYMBOL_GPL(sg_alloc_table_chained);
diff --git a/lib/sg_split.c b/lib/sg_split.c
index b063410c3593..9982c63d1063 100644
--- a/lib/sg_split.c
+++ b/lib/sg_split.c
@@ -1,10 +1,8 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * Copyright (C) 2015 Robert Jarzmik <robert.jarzmik@free.fr> 3 * Copyright (C) 2015 Robert Jarzmik <robert.jarzmik@free.fr>
3 * 4 *
4 * Scatterlist splitting helpers. 5 * Scatterlist splitting helpers.
5 *
6 * This source code is licensed under the GNU General Public License,
7 * Version 2. See the file COPYING for more details.
8 */ 6 */
9 7
10#include <linux/scatterlist.h> 8#include <linux/scatterlist.h>
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 157d9e31f6c2..60ba93fc42ce 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -23,7 +23,7 @@ unsigned int check_preemption_disabled(const char *what1, const char *what2)
23 * Kernel threads bound to a single CPU can safely use 23 * Kernel threads bound to a single CPU can safely use
24 * smp_processor_id(): 24 * smp_processor_id():
25 */ 25 */
26 if (cpumask_equal(&current->cpus_allowed, cpumask_of(this_cpu))) 26 if (cpumask_equal(current->cpus_ptr, cpumask_of(this_cpu)))
27 goto out; 27 goto out;
28 28
29 /* 29 /*
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index 4403e1924f73..3a90a9e2b94a 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -540,6 +540,25 @@ int string_escape_mem(const char *src, size_t isz, char *dst, size_t osz,
540} 540}
541EXPORT_SYMBOL(string_escape_mem); 541EXPORT_SYMBOL(string_escape_mem);
542 542
543int string_escape_mem_ascii(const char *src, size_t isz, char *dst,
544 size_t osz)
545{
546 char *p = dst;
547 char *end = p + osz;
548
549 while (isz--) {
550 unsigned char c = *src++;
551
552 if (!isprint(c) || !isascii(c) || c == '"' || c == '\\')
553 escape_hex(c, &p, end);
554 else
555 escape_passthrough(c, &p, end);
556 }
557
558 return p - dst;
559}
560EXPORT_SYMBOL(string_escape_mem_ascii);
561
543/* 562/*
544 * Return an allocated string that has been escaped of special characters 563 * Return an allocated string that has been escaped of special characters
545 * and double quotes, making it safe to log in quotes. 564 * and double quotes, making it safe to log in quotes.
diff --git a/lib/test_blackhole_dev.c b/lib/test_blackhole_dev.c
new file mode 100644
index 000000000000..4c40580a99a3
--- /dev/null
+++ b/lib/test_blackhole_dev.c
@@ -0,0 +1,100 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * This module tests the blackhole_dev that is created during the
4 * net subsystem initialization. The test this module performs is
5 * by injecting an skb into the stack with skb->dev as the
6 * blackhole_dev and expects kernel to behave in a sane manner
7 * (in other words, *not crash*)!
8 *
9 * Copyright (c) 2018, Mahesh Bandewar <maheshb@google.com>
10 */
11
12#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/printk.h>
15#include <linux/skbuff.h>
16#include <linux/netdevice.h>
17#include <linux/udp.h>
18#include <linux/ipv6.h>
19
20#include <net/dst.h>
21
22#define SKB_SIZE 256
23#define HEAD_SIZE (14+40+8) /* Ether + IPv6 + UDP */
24#define TAIL_SIZE 32 /* random tail-room */
25
26#define UDP_PORT 1234
27
28static int __init test_blackholedev_init(void)
29{
30 struct ipv6hdr *ip6h;
31 struct sk_buff *skb;
32 struct ethhdr *ethh;
33 struct udphdr *uh;
34 int data_len;
35 int ret;
36
37 skb = alloc_skb(SKB_SIZE, GFP_KERNEL);
38 if (!skb)
39 return -ENOMEM;
40
41 /* Reserve head-room for the headers */
42 skb_reserve(skb, HEAD_SIZE);
43
44 /* Add data to the skb */
45 data_len = SKB_SIZE - (HEAD_SIZE + TAIL_SIZE);
46 memset(__skb_put(skb, data_len), 0xf, data_len);
47
48 /* Add protocol data */
49 /* (Transport) UDP */
50 uh = (struct udphdr *)skb_push(skb, sizeof(struct udphdr));
51 skb_set_transport_header(skb, 0);
52 uh->source = uh->dest = htons(UDP_PORT);
53 uh->len = htons(data_len);
54 uh->check = 0;
55 /* (Network) IPv6 */
56 ip6h = (struct ipv6hdr *)skb_push(skb, sizeof(struct ipv6hdr));
57 skb_set_network_header(skb, 0);
58 ip6h->hop_limit = 32;
59 ip6h->payload_len = data_len + sizeof(struct udphdr);
60 ip6h->nexthdr = IPPROTO_UDP;
61 ip6h->saddr = in6addr_loopback;
62 ip6h->daddr = in6addr_loopback;
63 /* Ether */
64 ethh = (struct ethhdr *)skb_push(skb, sizeof(struct ethhdr));
65 skb_set_mac_header(skb, 0);
66
67 skb->protocol = htons(ETH_P_IPV6);
68 skb->pkt_type = PACKET_HOST;
69 skb->dev = blackhole_netdev;
70
71 /* Now attempt to send the packet */
72 ret = dev_queue_xmit(skb);
73
74 switch (ret) {
75 case NET_XMIT_SUCCESS:
76 pr_warn("dev_queue_xmit() returned NET_XMIT_SUCCESS\n");
77 break;
78 case NET_XMIT_DROP:
79 pr_warn("dev_queue_xmit() returned NET_XMIT_DROP\n");
80 break;
81 case NET_XMIT_CN:
82 pr_warn("dev_queue_xmit() returned NET_XMIT_CN\n");
83 break;
84 default:
85 pr_err("dev_queue_xmit() returned UNKNOWN(%d)\n", ret);
86 }
87
88 return 0;
89}
90
91static void __exit test_blackholedev_exit(void)
92{
93 pr_warn("test_blackholedev module terminating.\n");
94}
95
96module_init(test_blackholedev_init);
97module_exit(test_blackholedev_exit);
98
99MODULE_AUTHOR("Mahesh Bandewar <maheshb@google.com>");
100MODULE_LICENSE("GPL");
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 7de2702621dc..b63b367a94e8 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -1,26 +1,23 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * 3 *
3 * Copyright (c) 2014 Samsung Electronics Co., Ltd. 4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
4 * Author: Andrey Ryabinin <a.ryabinin@samsung.com> 5 * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */ 6 */
11 7
12#define pr_fmt(fmt) "kasan test: %s " fmt, __func__ 8#define pr_fmt(fmt) "kasan test: %s " fmt, __func__
13 9
10#include <linux/bitops.h>
14#include <linux/delay.h> 11#include <linux/delay.h>
12#include <linux/kasan.h>
15#include <linux/kernel.h> 13#include <linux/kernel.h>
16#include <linux/mman.h>
17#include <linux/mm.h> 14#include <linux/mm.h>
15#include <linux/mman.h>
16#include <linux/module.h>
18#include <linux/printk.h> 17#include <linux/printk.h>
19#include <linux/slab.h> 18#include <linux/slab.h>
20#include <linux/string.h> 19#include <linux/string.h>
21#include <linux/uaccess.h> 20#include <linux/uaccess.h>
22#include <linux/module.h>
23#include <linux/kasan.h>
24 21
25/* 22/*
26 * Note: test functions are marked noinline so that their names appear in 23 * Note: test functions are marked noinline so that their names appear in
@@ -623,6 +620,95 @@ static noinline void __init kasan_strings(void)
623 strnlen(ptr, 1); 620 strnlen(ptr, 1);
624} 621}
625 622
623static noinline void __init kasan_bitops(void)
624{
625 /*
626 * Allocate 1 more byte, which causes kzalloc to round up to 16-bytes;
627 * this way we do not actually corrupt other memory.
628 */
629 long *bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
630 if (!bits)
631 return;
632
633 /*
634 * Below calls try to access bit within allocated memory; however, the
635 * below accesses are still out-of-bounds, since bitops are defined to
636 * operate on the whole long the bit is in.
637 */
638 pr_info("out-of-bounds in set_bit\n");
639 set_bit(BITS_PER_LONG, bits);
640
641 pr_info("out-of-bounds in __set_bit\n");
642 __set_bit(BITS_PER_LONG, bits);
643
644 pr_info("out-of-bounds in clear_bit\n");
645 clear_bit(BITS_PER_LONG, bits);
646
647 pr_info("out-of-bounds in __clear_bit\n");
648 __clear_bit(BITS_PER_LONG, bits);
649
650 pr_info("out-of-bounds in clear_bit_unlock\n");
651 clear_bit_unlock(BITS_PER_LONG, bits);
652
653 pr_info("out-of-bounds in __clear_bit_unlock\n");
654 __clear_bit_unlock(BITS_PER_LONG, bits);
655
656 pr_info("out-of-bounds in change_bit\n");
657 change_bit(BITS_PER_LONG, bits);
658
659 pr_info("out-of-bounds in __change_bit\n");
660 __change_bit(BITS_PER_LONG, bits);
661
662 /*
663 * Below calls try to access bit beyond allocated memory.
664 */
665 pr_info("out-of-bounds in test_and_set_bit\n");
666 test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
667
668 pr_info("out-of-bounds in __test_and_set_bit\n");
669 __test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
670
671 pr_info("out-of-bounds in test_and_set_bit_lock\n");
672 test_and_set_bit_lock(BITS_PER_LONG + BITS_PER_BYTE, bits);
673
674 pr_info("out-of-bounds in test_and_clear_bit\n");
675 test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
676
677 pr_info("out-of-bounds in __test_and_clear_bit\n");
678 __test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
679
680 pr_info("out-of-bounds in test_and_change_bit\n");
681 test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
682
683 pr_info("out-of-bounds in __test_and_change_bit\n");
684 __test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
685
686 pr_info("out-of-bounds in test_bit\n");
687 (void)test_bit(BITS_PER_LONG + BITS_PER_BYTE, bits);
688
689#if defined(clear_bit_unlock_is_negative_byte)
690 pr_info("out-of-bounds in clear_bit_unlock_is_negative_byte\n");
691 clear_bit_unlock_is_negative_byte(BITS_PER_LONG + BITS_PER_BYTE, bits);
692#endif
693 kfree(bits);
694}
695
696static noinline void __init kmalloc_double_kzfree(void)
697{
698 char *ptr;
699 size_t size = 16;
700
701 pr_info("double-free (kzfree)\n");
702 ptr = kmalloc(size, GFP_KERNEL);
703 if (!ptr) {
704 pr_err("Allocation failed\n");
705 return;
706 }
707
708 kzfree(ptr);
709 kzfree(ptr);
710}
711
626static int __init kmalloc_tests_init(void) 712static int __init kmalloc_tests_init(void)
627{ 713{
628 /* 714 /*
@@ -664,6 +750,8 @@ static int __init kmalloc_tests_init(void)
664 kasan_memchr(); 750 kasan_memchr();
665 kasan_memcmp(); 751 kasan_memcmp();
666 kasan_strings(); 752 kasan_strings();
753 kasan_bitops();
754 kmalloc_double_kzfree();
667 755
668 kasan_restore_multi_shot(multishot); 756 kasan_restore_multi_shot(multishot);
669 757
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 084fe5a6ac57..c5a6fef7b45d 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -1,12 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * Resizable, Scalable, Concurrent Hash Table 3 * Resizable, Scalable, Concurrent Hash Table
3 * 4 *
4 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch> 5 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
5 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> 6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */ 7 */
11 8
12/************************************************************************** 9/**************************************************************************
diff --git a/lib/test_stackinit.c b/lib/test_stackinit.c
index e97dc54b4fdf..2d7d257a430e 100644
--- a/lib/test_stackinit.c
+++ b/lib/test_stackinit.c
@@ -12,7 +12,7 @@
12 12
13/* Exfiltration buffer. */ 13/* Exfiltration buffer. */
14#define MAX_VAR_SIZE 128 14#define MAX_VAR_SIZE 128
15static char check_buf[MAX_VAR_SIZE]; 15static u8 check_buf[MAX_VAR_SIZE];
16 16
17/* Character array to trigger stack protector in all functions. */ 17/* Character array to trigger stack protector in all functions. */
18#define VAR_BUFFER 32 18#define VAR_BUFFER 32
@@ -106,9 +106,18 @@ static noinline __init int test_ ## name (void) \
106 \ 106 \
107 /* Fill clone type with zero for per-field init. */ \ 107 /* Fill clone type with zero for per-field init. */ \
108 memset(&zero, 0x00, sizeof(zero)); \ 108 memset(&zero, 0x00, sizeof(zero)); \
109 /* Clear entire check buffer for 0xFF overlap test. */ \
110 memset(check_buf, 0x00, sizeof(check_buf)); \
109 /* Fill stack with 0xFF. */ \ 111 /* Fill stack with 0xFF. */ \
110 ignored = leaf_ ##name((unsigned long)&ignored, 1, \ 112 ignored = leaf_ ##name((unsigned long)&ignored, 1, \
111 FETCH_ARG_ ## which(zero)); \ 113 FETCH_ARG_ ## which(zero)); \
114 /* Verify all bytes overwritten with 0xFF. */ \
115 for (sum = 0, i = 0; i < target_size; i++) \
116 sum += (check_buf[i] != 0xFF); \
117 if (sum) { \
118 pr_err(#name ": leaf fill was not 0xFF!?\n"); \
119 return 1; \
120 } \
112 /* Clear entire check buffer for later bit tests. */ \ 121 /* Clear entire check buffer for later bit tests. */ \
113 memset(check_buf, 0x00, sizeof(check_buf)); \ 122 memset(check_buf, 0x00, sizeof(check_buf)); \
114 /* Extract stack-defined variable contents. */ \ 123 /* Extract stack-defined variable contents. */ \
@@ -126,9 +135,9 @@ static noinline __init int test_ ## name (void) \
126 return 1; \ 135 return 1; \
127 } \ 136 } \
128 \ 137 \
129 /* Look for any set bits in the check region. */ \ 138 /* Look for any bytes still 0xFF in check region. */ \
130 for (i = 0; i < sizeof(check_buf); i++) \ 139 for (sum = 0, i = 0; i < target_size; i++) \
131 sum += (check_buf[i] != 0); \ 140 sum += (check_buf[i] == 0xFF); \
132 \ 141 \
133 if (sum == 0) \ 142 if (sum == 0) \
134 pr_info(#name " ok\n"); \ 143 pr_info(#name " ok\n"); \
@@ -162,13 +171,13 @@ static noinline __init int leaf_ ## name(unsigned long sp, \
162 * Keep this buffer around to make sure we've got a \ 171 * Keep this buffer around to make sure we've got a \
163 * stack frame of SOME kind... \ 172 * stack frame of SOME kind... \
164 */ \ 173 */ \
165 memset(buf, (char)(sp && 0xff), sizeof(buf)); \ 174 memset(buf, (char)(sp & 0xff), sizeof(buf)); \
166 /* Fill variable with 0xFF. */ \ 175 /* Fill variable with 0xFF. */ \
167 if (fill) { \ 176 if (fill) { \
168 fill_start = &var; \ 177 fill_start = &var; \
169 fill_size = sizeof(var); \ 178 fill_size = sizeof(var); \
170 memset(fill_start, \ 179 memset(fill_start, \
171 (char)((sp && 0xff) | forced_mask), \ 180 (char)((sp & 0xff) | forced_mask), \
172 fill_size); \ 181 fill_size); \
173 } \ 182 } \
174 \ 183 \
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index 5d4bad8bd96a..9d631a7b6a70 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -38,6 +38,12 @@ static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp)
38 return xa_store(xa, index, xa_mk_index(index), gfp); 38 return xa_store(xa, index, xa_mk_index(index), gfp);
39} 39}
40 40
41static void xa_insert_index(struct xarray *xa, unsigned long index)
42{
43 XA_BUG_ON(xa, xa_insert(xa, index, xa_mk_index(index),
44 GFP_KERNEL) != 0);
45}
46
41static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp) 47static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp)
42{ 48{
43 u32 id; 49 u32 id;
@@ -338,6 +344,37 @@ static noinline void check_xa_shrink(struct xarray *xa)
338 } 344 }
339} 345}
340 346
347static noinline void check_insert(struct xarray *xa)
348{
349 unsigned long i;
350
351 for (i = 0; i < 1024; i++) {
352 xa_insert_index(xa, i);
353 XA_BUG_ON(xa, xa_load(xa, i - 1) != NULL);
354 XA_BUG_ON(xa, xa_load(xa, i + 1) != NULL);
355 xa_erase_index(xa, i);
356 }
357
358 for (i = 10; i < BITS_PER_LONG; i++) {
359 xa_insert_index(xa, 1UL << i);
360 XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 1) != NULL);
361 XA_BUG_ON(xa, xa_load(xa, (1UL << i) + 1) != NULL);
362 xa_erase_index(xa, 1UL << i);
363
364 xa_insert_index(xa, (1UL << i) - 1);
365 XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 2) != NULL);
366 XA_BUG_ON(xa, xa_load(xa, 1UL << i) != NULL);
367 xa_erase_index(xa, (1UL << i) - 1);
368 }
369
370 xa_insert_index(xa, ~0UL);
371 XA_BUG_ON(xa, xa_load(xa, 0UL) != NULL);
372 XA_BUG_ON(xa, xa_load(xa, ~1UL) != NULL);
373 xa_erase_index(xa, ~0UL);
374
375 XA_BUG_ON(xa, !xa_empty(xa));
376}
377
341static noinline void check_cmpxchg(struct xarray *xa) 378static noinline void check_cmpxchg(struct xarray *xa)
342{ 379{
343 void *FIVE = xa_mk_value(5); 380 void *FIVE = xa_mk_value(5);
@@ -1527,6 +1564,7 @@ static int xarray_checks(void)
1527 check_xa_mark(&array); 1564 check_xa_mark(&array);
1528 check_xa_shrink(&array); 1565 check_xa_shrink(&array);
1529 check_xas_erase(&array); 1566 check_xas_erase(&array);
1567 check_insert(&array);
1530 check_cmpxchg(&array); 1568 check_cmpxchg(&array);
1531 check_reserve(&array); 1569 check_reserve(&array);
1532 check_reserve(&xa0); 1570 check_reserve(&xa0);
diff --git a/lib/ubsan.c b/lib/ubsan.c
index ecc179338094..e7d31735950d 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -1,13 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0-only
1/* 2/*
2 * UBSAN error reporting functions 3 * UBSAN error reporting functions
3 * 4 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd. 5 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> 6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */ 7 */
12 8
13#include <linux/bitops.h> 9#include <linux/bitops.h>
diff --git a/lib/vdso/Kconfig b/lib/vdso/Kconfig
new file mode 100644
index 000000000000..cc00364bd2c2
--- /dev/null
+++ b/lib/vdso/Kconfig
@@ -0,0 +1,36 @@
1# SPDX-License-Identifier: GPL-2.0
2
3config HAVE_GENERIC_VDSO
4 bool
5
6if HAVE_GENERIC_VDSO
7
8config GENERIC_GETTIMEOFDAY
9 bool
10 help
11 This is a generic implementation of gettimeofday vdso.
12 Each architecture that enables this feature has to
13 provide the fallback implementation.
14
15config GENERIC_VDSO_32
16 bool
17 depends on GENERIC_GETTIMEOFDAY && !64BIT
18 help
19 This config option helps to avoid possible performance issues
20 in 32 bit only architectures.
21
22config GENERIC_COMPAT_VDSO
23 bool
24 help
25 This config option enables the compat VDSO layer.
26
27config CROSS_COMPILE_COMPAT_VDSO
28 string "32 bit Toolchain prefix for compat vDSO"
29 default ""
30 depends on GENERIC_COMPAT_VDSO
31 help
32 Defines the cross-compiler prefix for compiling compat vDSO.
33 If a 64 bit compiler (i.e. x86_64) can compile the VDSO for
34 32 bit, it does not need to define this parameter.
35
36endif
diff --git a/lib/vdso/Makefile b/lib/vdso/Makefile
new file mode 100644
index 000000000000..c415a685d61b
--- /dev/null
+++ b/lib/vdso/Makefile
@@ -0,0 +1,22 @@
1# SPDX-License-Identifier: GPL-2.0
2
3GENERIC_VDSO_MK_PATH := $(abspath $(lastword $(MAKEFILE_LIST)))
4GENERIC_VDSO_DIR := $(dir $(GENERIC_VDSO_MK_PATH))
5
6c-gettimeofday-$(CONFIG_GENERIC_GETTIMEOFDAY) := $(addprefix $(GENERIC_VDSO_DIR), gettimeofday.c)
7
8# This cmd checks that the vdso library does not contain absolute relocation
9# It has to be called after the linking of the vdso library and requires it
10# as a parameter.
11#
12# $(ARCH_REL_TYPE_ABS) is defined in the arch specific makefile and corresponds
13# to the absolute relocation types printed by "objdump -R" and accepted by the
14# dynamic linker.
15ifndef ARCH_REL_TYPE_ABS
16$(error ARCH_REL_TYPE_ABS is not set)
17endif
18
19quiet_cmd_vdso_check = VDSOCHK $@
20 cmd_vdso_check = if $(OBJDUMP) -R $@ | egrep -h "$(ARCH_REL_TYPE_ABS)"; \
21 then (echo >&2 "$@: dynamic relocations are not supported"; \
22 rm -f $@; /bin/false); fi
diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c
new file mode 100644
index 000000000000..2d1c1f241fd9
--- /dev/null
+++ b/lib/vdso/gettimeofday.c
@@ -0,0 +1,239 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Generic userspace implementations of gettimeofday() and similar.
4 */
5#include <linux/compiler.h>
6#include <linux/math64.h>
7#include <linux/time.h>
8#include <linux/kernel.h>
9#include <linux/hrtimer_defs.h>
10#include <vdso/datapage.h>
11#include <vdso/helpers.h>
12
13/*
14 * The generic vDSO implementation requires that gettimeofday.h
15 * provides:
16 * - __arch_get_vdso_data(): to get the vdso datapage.
17 * - __arch_get_hw_counter(): to get the hw counter based on the
18 * clock_mode.
19 * - gettimeofday_fallback(): fallback for gettimeofday.
20 * - clock_gettime_fallback(): fallback for clock_gettime.
21 * - clock_getres_fallback(): fallback for clock_getres.
22 */
23#ifdef ENABLE_COMPAT_VDSO
24#include <asm/vdso/compat_gettimeofday.h>
25#else
26#include <asm/vdso/gettimeofday.h>
27#endif /* ENABLE_COMPAT_VDSO */
28
29#ifndef vdso_calc_delta
30/*
31 * Default implementation which works for all sane clocksources. That
32 * obviously excludes x86/TSC.
33 */
34static __always_inline
35u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult)
36{
37 return ((cycles - last) & mask) * mult;
38}
39#endif
40
41static int do_hres(const struct vdso_data *vd, clockid_t clk,
42 struct __kernel_timespec *ts)
43{
44 const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
45 u64 cycles, last, sec, ns;
46 u32 seq;
47
48 do {
49 seq = vdso_read_begin(vd);
50 cycles = __arch_get_hw_counter(vd->clock_mode);
51 ns = vdso_ts->nsec;
52 last = vd->cycle_last;
53 if (unlikely((s64)cycles < 0))
54 return clock_gettime_fallback(clk, ts);
55
56 ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
57 ns >>= vd->shift;
58 sec = vdso_ts->sec;
59 } while (unlikely(vdso_read_retry(vd, seq)));
60
61 /*
62 * Do this outside the loop: a race inside the loop could result
63 * in __iter_div_u64_rem() being extremely slow.
64 */
65 ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
66 ts->tv_nsec = ns;
67
68 return 0;
69}
70
71static void do_coarse(const struct vdso_data *vd, clockid_t clk,
72 struct __kernel_timespec *ts)
73{
74 const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
75 u32 seq;
76
77 do {
78 seq = vdso_read_begin(vd);
79 ts->tv_sec = vdso_ts->sec;
80 ts->tv_nsec = vdso_ts->nsec;
81 } while (unlikely(vdso_read_retry(vd, seq)));
82}
83
84static __maybe_unused int
85__cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
86{
87 const struct vdso_data *vd = __arch_get_vdso_data();
88 u32 msk;
89
90 /* Check for negative values or invalid clocks */
91 if (unlikely((u32) clock >= MAX_CLOCKS))
92 goto fallback;
93
94 /*
95 * Convert the clockid to a bitmask and use it to check which
96 * clocks are handled in the VDSO directly.
97 */
98 msk = 1U << clock;
99 if (likely(msk & VDSO_HRES)) {
100 return do_hres(&vd[CS_HRES_COARSE], clock, ts);
101 } else if (msk & VDSO_COARSE) {
102 do_coarse(&vd[CS_HRES_COARSE], clock, ts);
103 return 0;
104 } else if (msk & VDSO_RAW) {
105 return do_hres(&vd[CS_RAW], clock, ts);
106 }
107
108fallback:
109 return clock_gettime_fallback(clock, ts);
110}
111
112static __maybe_unused int
113__cvdso_clock_gettime32(clockid_t clock, struct old_timespec32 *res)
114{
115 struct __kernel_timespec ts;
116 int ret;
117
118 if (res == NULL)
119 goto fallback;
120
121 ret = __cvdso_clock_gettime(clock, &ts);
122
123 if (ret == 0) {
124 res->tv_sec = ts.tv_sec;
125 res->tv_nsec = ts.tv_nsec;
126 }
127
128 return ret;
129
130fallback:
131 return clock_gettime_fallback(clock, (struct __kernel_timespec *)res);
132}
133
134static __maybe_unused int
135__cvdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
136{
137 const struct vdso_data *vd = __arch_get_vdso_data();
138
139 if (likely(tv != NULL)) {
140 struct __kernel_timespec ts;
141
142 if (do_hres(&vd[CS_HRES_COARSE], CLOCK_REALTIME, &ts))
143 return gettimeofday_fallback(tv, tz);
144
145 tv->tv_sec = ts.tv_sec;
146 tv->tv_usec = (u32)ts.tv_nsec / NSEC_PER_USEC;
147 }
148
149 if (unlikely(tz != NULL)) {
150 tz->tz_minuteswest = vd[CS_HRES_COARSE].tz_minuteswest;
151 tz->tz_dsttime = vd[CS_HRES_COARSE].tz_dsttime;
152 }
153
154 return 0;
155}
156
157#ifdef VDSO_HAS_TIME
158static __maybe_unused time_t __cvdso_time(time_t *time)
159{
160 const struct vdso_data *vd = __arch_get_vdso_data();
161 time_t t = READ_ONCE(vd[CS_HRES_COARSE].basetime[CLOCK_REALTIME].sec);
162
163 if (time)
164 *time = t;
165
166 return t;
167}
168#endif /* VDSO_HAS_TIME */
169
170#ifdef VDSO_HAS_CLOCK_GETRES
171static __maybe_unused
172int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
173{
174 const struct vdso_data *vd = __arch_get_vdso_data();
175 u64 ns;
176 u32 msk;
177 u64 hrtimer_res = READ_ONCE(vd[CS_HRES_COARSE].hrtimer_res);
178
179 /* Check for negative values or invalid clocks */
180 if (unlikely((u32) clock >= MAX_CLOCKS))
181 goto fallback;
182
183 /*
184 * Convert the clockid to a bitmask and use it to check which
185 * clocks are handled in the VDSO directly.
186 */
187 msk = 1U << clock;
188 if (msk & VDSO_HRES) {
189 /*
190 * Preserves the behaviour of posix_get_hrtimer_res().
191 */
192 ns = hrtimer_res;
193 } else if (msk & VDSO_COARSE) {
194 /*
195 * Preserves the behaviour of posix_get_coarse_res().
196 */
197 ns = LOW_RES_NSEC;
198 } else if (msk & VDSO_RAW) {
199 /*
200 * Preserves the behaviour of posix_get_hrtimer_res().
201 */
202 ns = hrtimer_res;
203 } else {
204 goto fallback;
205 }
206
207 if (res) {
208 res->tv_sec = 0;
209 res->tv_nsec = ns;
210 }
211
212 return 0;
213
214fallback:
215 return clock_getres_fallback(clock, res);
216}
217
218static __maybe_unused int
219__cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res)
220{
221 struct __kernel_timespec ts;
222 int ret;
223
224 if (res == NULL)
225 goto fallback;
226
227 ret = __cvdso_clock_getres(clock, &ts);
228
229 if (ret == 0) {
230 res->tv_sec = ts.tv_sec;
231 res->tv_nsec = ts.tv_nsec;
232 }
233
234 return ret;
235
236fallback:
237 return clock_getres_fallback(clock, (struct __kernel_timespec *)res);
238}
239#endif /* VDSO_HAS_CLOCK_GETRES */
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 63937044c57d..b0967cf17137 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -599,7 +599,7 @@ static char *string_nocheck(char *buf, char *end, const char *s,
599 struct printf_spec spec) 599 struct printf_spec spec)
600{ 600{
601 int len = 0; 601 int len = 0;
602 size_t lim = spec.precision; 602 int lim = spec.precision;
603 603
604 while (lim--) { 604 while (lim--) {
605 char c = *s++; 605 char c = *s++;
@@ -1799,7 +1799,7 @@ char *clock(char *buf, char *end, struct clk *clk, struct printf_spec spec,
1799#ifdef CONFIG_COMMON_CLK 1799#ifdef CONFIG_COMMON_CLK
1800 return string(buf, end, __clk_get_name(clk), spec); 1800 return string(buf, end, __clk_get_name(clk), spec);
1801#else 1801#else
1802 return error_string(buf, end, "(%pC?)", spec); 1802 return ptr_to_id(buf, end, clk, spec);
1803#endif 1803#endif
1804 } 1804 }
1805} 1805}
diff --git a/lib/xarray.c b/lib/xarray.c
index 6be3acbb861f..446b956c9188 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -298,6 +298,8 @@ bool xas_nomem(struct xa_state *xas, gfp_t gfp)
298 xas_destroy(xas); 298 xas_destroy(xas);
299 return false; 299 return false;
300 } 300 }
301 if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
302 gfp |= __GFP_ACCOUNT;
301 xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp); 303 xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
302 if (!xas->xa_alloc) 304 if (!xas->xa_alloc)
303 return false; 305 return false;
@@ -325,6 +327,8 @@ static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
325 xas_destroy(xas); 327 xas_destroy(xas);
326 return false; 328 return false;
327 } 329 }
330 if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
331 gfp |= __GFP_ACCOUNT;
328 if (gfpflags_allow_blocking(gfp)) { 332 if (gfpflags_allow_blocking(gfp)) {
329 xas_unlock_type(xas, lock_type); 333 xas_unlock_type(xas, lock_type);
330 xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp); 334 xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
@@ -358,8 +362,12 @@ static void *xas_alloc(struct xa_state *xas, unsigned int shift)
358 if (node) { 362 if (node) {
359 xas->xa_alloc = NULL; 363 xas->xa_alloc = NULL;
360 } else { 364 } else {
361 node = kmem_cache_alloc(radix_tree_node_cachep, 365 gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN;
362 GFP_NOWAIT | __GFP_NOWARN); 366
367 if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
368 gfp |= __GFP_ACCOUNT;
369
370 node = kmem_cache_alloc(radix_tree_node_cachep, gfp);
363 if (!node) { 371 if (!node) {
364 xas_set_err(xas, -ENOMEM); 372 xas_set_err(xas, -ENOMEM);
365 return NULL; 373 return NULL;