aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig16
-rw-r--r--lib/Kconfig.debug165
-rw-r--r--lib/Makefile5
-rw-r--r--lib/bitmap.c2
-rw-r--r--lib/btree.c10
-rw-r--r--lib/bug.c4
-rw-r--r--lib/debugobjects.c141
-rw-r--r--lib/devres.c78
-rw-r--r--lib/dma-direct.c35
-rw-r--r--lib/dump_stack.c60
-rw-r--r--lib/int_sqrt.c30
-rw-r--r--lib/ioremap.c6
-rw-r--r--lib/kfifo.c2
-rw-r--r--lib/kobject.c39
-rw-r--r--lib/kobject_uevent.c96
-rw-r--r--lib/libcrc32c.c6
-rw-r--r--lib/logic_pio.c280
-rw-r--r--lib/percpu-refcount.c2
-rw-r--r--lib/raid6/.gitignore1
-rw-r--r--lib/raid6/Makefile33
-rw-r--r--lib/raid6/algos.c7
-rw-r--r--lib/raid6/altivec.uc3
-rw-r--r--lib/raid6/sse2.c14
-rw-r--r--lib/raid6/test/Makefile29
-rw-r--r--lib/raid6/tilegx.uc87
-rw-r--r--lib/raid6/vpermxor.uc105
-rw-r--r--lib/rhashtable.c6
-rw-r--r--lib/sbitmap.c10
-rw-r--r--lib/scatterlist.c9
-rw-r--r--lib/swiotlb.c77
-rw-r--r--lib/test_bitmap.c4
-rw-r--r--lib/test_bpf.c99
-rw-r--r--lib/test_firmware.c1
-rw-r--r--lib/test_kmod.c2
-rw-r--r--lib/test_rhashtable.c134
-rw-r--r--lib/test_user_copy.c3
-rw-r--r--lib/zstd/Makefile17
37 files changed, 1194 insertions, 424 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index e96089499371..5fe577673b98 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -55,6 +55,22 @@ config ARCH_USE_CMPXCHG_LOCKREF
55config ARCH_HAS_FAST_MULTIPLIER 55config ARCH_HAS_FAST_MULTIPLIER
56 bool 56 bool
57 57
58config INDIRECT_PIO
59 bool "Access I/O in non-MMIO mode"
60 depends on ARM64
61 help
62 On some platforms where no separate I/O space exists, there are I/O
63 hosts which can not be accessed in MMIO mode. Using the logical PIO
64 mechanism, the host-local I/O resource can be mapped into system
65 logic PIO space shared with MMIO hosts, such as PCI/PCIe, then the
66 system can access the I/O devices with the mapped-logic PIO through
67 I/O accessors.
68
69 This way has relatively little I/O performance cost. Please make
70 sure your devices really need this configure item enabled.
71
72 When in doubt, say N.
73
58config CRC_CCITT 74config CRC_CCITT
59 tristate "CRC-CCITT functions" 75 tristate "CRC-CCITT functions"
60 help 76 help
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 64155e310a9f..51c6bf0d93c6 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -165,7 +165,7 @@ config DEBUG_INFO_REDUCED
165 165
166config DEBUG_INFO_SPLIT 166config DEBUG_INFO_SPLIT
167 bool "Produce split debuginfo in .dwo files" 167 bool "Produce split debuginfo in .dwo files"
168 depends on DEBUG_INFO && !FRV 168 depends on DEBUG_INFO
169 help 169 help
170 Generate debug info into separate .dwo files. This significantly 170 Generate debug info into separate .dwo files. This significantly
171 reduces the build directory size for builds with DEBUG_INFO, 171 reduces the build directory size for builds with DEBUG_INFO,
@@ -324,11 +324,11 @@ config DEBUG_SECTION_MISMATCH
324 the analysis would not catch the illegal reference. 324 the analysis would not catch the illegal reference.
325 This option tells gcc to inline less (but it does result in 325 This option tells gcc to inline less (but it does result in
326 a larger kernel). 326 a larger kernel).
327 - Run the section mismatch analysis for each module/built-in.o file. 327 - Run the section mismatch analysis for each module/built-in.a file.
328 When we run the section mismatch analysis on vmlinux.o, we 328 When we run the section mismatch analysis on vmlinux.o, we
329 lose valuable information about where the mismatch was 329 lose valuable information about where the mismatch was
330 introduced. 330 introduced.
331 Running the analysis for each module/built-in.o file 331 Running the analysis for each module/built-in.a file
332 tells where the mismatch happens much closer to the 332 tells where the mismatch happens much closer to the
333 source. The drawback is that the same mismatch is 333 source. The drawback is that the same mismatch is
334 reported at least twice. 334 reported at least twice.
@@ -354,10 +354,7 @@ config ARCH_WANT_FRAME_POINTERS
354 354
355config FRAME_POINTER 355config FRAME_POINTER
356 bool "Compile the kernel with frame pointers" 356 bool "Compile the kernel with frame pointers"
357 depends on DEBUG_KERNEL && \ 357 depends on DEBUG_KERNEL && (M68K || UML || SUPERH) || ARCH_WANT_FRAME_POINTERS
358 (CRIS || M68K || FRV || UML || \
359 SUPERH || BLACKFIN || MN10300 || METAG) || \
360 ARCH_WANT_FRAME_POINTERS
361 default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS 358 default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS
362 help 359 help
363 If you say Y here the resulting kernel image will be slightly 360 If you say Y here the resulting kernel image will be slightly
@@ -1034,69 +1031,20 @@ config DEBUG_PREEMPT
1034 1031
1035menu "Lock Debugging (spinlocks, mutexes, etc...)" 1032menu "Lock Debugging (spinlocks, mutexes, etc...)"
1036 1033
1037config DEBUG_RT_MUTEXES 1034config LOCK_DEBUGGING_SUPPORT
1038 bool "RT Mutex debugging, deadlock detection" 1035 bool
1039 depends on DEBUG_KERNEL && RT_MUTEXES 1036 depends on TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
1040 help 1037 default y
1041 This allows rt mutex semantics violations and rt mutex related
1042 deadlocks (lockups) to be detected and reported automatically.
1043
1044config DEBUG_SPINLOCK
1045 bool "Spinlock and rw-lock debugging: basic checks"
1046 depends on DEBUG_KERNEL
1047 select UNINLINE_SPIN_UNLOCK
1048 help
1049 Say Y here and build SMP to catch missing spinlock initialization
1050 and certain other kinds of spinlock errors commonly made. This is
1051 best used in conjunction with the NMI watchdog so that spinlock
1052 deadlocks are also debuggable.
1053
1054config DEBUG_MUTEXES
1055 bool "Mutex debugging: basic checks"
1056 depends on DEBUG_KERNEL
1057 help
1058 This feature allows mutex semantics violations to be detected and
1059 reported.
1060
1061config DEBUG_WW_MUTEX_SLOWPATH
1062 bool "Wait/wound mutex debugging: Slowpath testing"
1063 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
1064 select DEBUG_LOCK_ALLOC
1065 select DEBUG_SPINLOCK
1066 select DEBUG_MUTEXES
1067 help
1068 This feature enables slowpath testing for w/w mutex users by
1069 injecting additional -EDEADLK wound/backoff cases. Together with
1070 the full mutex checks enabled with (CONFIG_PROVE_LOCKING) this
1071 will test all possible w/w mutex interface abuse with the
1072 exception of simply not acquiring all the required locks.
1073 Note that this feature can introduce significant overhead, so
1074 it really should not be enabled in a production or distro kernel,
1075 even a debug kernel. If you are a driver writer, enable it. If
1076 you are a distro, do not.
1077
1078config DEBUG_LOCK_ALLOC
1079 bool "Lock debugging: detect incorrect freeing of live locks"
1080 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
1081 select DEBUG_SPINLOCK
1082 select DEBUG_MUTEXES
1083 select DEBUG_RT_MUTEXES if RT_MUTEXES
1084 select LOCKDEP
1085 help
1086 This feature will check whether any held lock (spinlock, rwlock,
1087 mutex or rwsem) is incorrectly freed by the kernel, via any of the
1088 memory-freeing routines (kfree(), kmem_cache_free(), free_pages(),
1089 vfree(), etc.), whether a live lock is incorrectly reinitialized via
1090 spin_lock_init()/mutex_init()/etc., or whether there is any lock
1091 held during task exit.
1092 1038
1093config PROVE_LOCKING 1039config PROVE_LOCKING
1094 bool "Lock debugging: prove locking correctness" 1040 bool "Lock debugging: prove locking correctness"
1095 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 1041 depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
1096 select LOCKDEP 1042 select LOCKDEP
1097 select DEBUG_SPINLOCK 1043 select DEBUG_SPINLOCK
1098 select DEBUG_MUTEXES 1044 select DEBUG_MUTEXES
1099 select DEBUG_RT_MUTEXES if RT_MUTEXES 1045 select DEBUG_RT_MUTEXES if RT_MUTEXES
1046 select DEBUG_RWSEMS if RWSEM_SPIN_ON_OWNER
1047 select DEBUG_WW_MUTEX_SLOWPATH
1100 select DEBUG_LOCK_ALLOC 1048 select DEBUG_LOCK_ALLOC
1101 select TRACE_IRQFLAGS 1049 select TRACE_IRQFLAGS
1102 default n 1050 default n
@@ -1134,20 +1082,9 @@ config PROVE_LOCKING
1134 1082
1135 For more details, see Documentation/locking/lockdep-design.txt. 1083 For more details, see Documentation/locking/lockdep-design.txt.
1136 1084
1137config LOCKDEP
1138 bool
1139 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
1140 select STACKTRACE
1141 select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC && !SCORE && !X86
1142 select KALLSYMS
1143 select KALLSYMS_ALL
1144
1145config LOCKDEP_SMALL
1146 bool
1147
1148config LOCK_STAT 1085config LOCK_STAT
1149 bool "Lock usage statistics" 1086 bool "Lock usage statistics"
1150 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 1087 depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
1151 select LOCKDEP 1088 select LOCKDEP
1152 select DEBUG_SPINLOCK 1089 select DEBUG_SPINLOCK
1153 select DEBUG_MUTEXES 1090 select DEBUG_MUTEXES
@@ -1167,6 +1104,80 @@ config LOCK_STAT
1167 CONFIG_LOCK_STAT defines "contended" and "acquired" lock events. 1104 CONFIG_LOCK_STAT defines "contended" and "acquired" lock events.
1168 (CONFIG_LOCKDEP defines "acquire" and "release" events.) 1105 (CONFIG_LOCKDEP defines "acquire" and "release" events.)
1169 1106
1107config DEBUG_RT_MUTEXES
1108 bool "RT Mutex debugging, deadlock detection"
1109 depends on DEBUG_KERNEL && RT_MUTEXES
1110 help
1111 This allows rt mutex semantics violations and rt mutex related
1112 deadlocks (lockups) to be detected and reported automatically.
1113
1114config DEBUG_SPINLOCK
1115 bool "Spinlock and rw-lock debugging: basic checks"
1116 depends on DEBUG_KERNEL
1117 select UNINLINE_SPIN_UNLOCK
1118 help
1119 Say Y here and build SMP to catch missing spinlock initialization
1120 and certain other kinds of spinlock errors commonly made. This is
1121 best used in conjunction with the NMI watchdog so that spinlock
1122 deadlocks are also debuggable.
1123
1124config DEBUG_MUTEXES
1125 bool "Mutex debugging: basic checks"
1126 depends on DEBUG_KERNEL
1127 help
1128 This feature allows mutex semantics violations to be detected and
1129 reported.
1130
1131config DEBUG_WW_MUTEX_SLOWPATH
1132 bool "Wait/wound mutex debugging: Slowpath testing"
1133 depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
1134 select DEBUG_LOCK_ALLOC
1135 select DEBUG_SPINLOCK
1136 select DEBUG_MUTEXES
1137 help
1138 This feature enables slowpath testing for w/w mutex users by
1139 injecting additional -EDEADLK wound/backoff cases. Together with
1140 the full mutex checks enabled with (CONFIG_PROVE_LOCKING) this
1141 will test all possible w/w mutex interface abuse with the
1142 exception of simply not acquiring all the required locks.
1143 Note that this feature can introduce significant overhead, so
1144 it really should not be enabled in a production or distro kernel,
1145 even a debug kernel. If you are a driver writer, enable it. If
1146 you are a distro, do not.
1147
1148config DEBUG_RWSEMS
1149 bool "RW Semaphore debugging: basic checks"
1150 depends on DEBUG_KERNEL && RWSEM_SPIN_ON_OWNER
1151 help
1152 This debugging feature allows mismatched rw semaphore locks and unlocks
1153 to be detected and reported.
1154
1155config DEBUG_LOCK_ALLOC
1156 bool "Lock debugging: detect incorrect freeing of live locks"
1157 depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
1158 select DEBUG_SPINLOCK
1159 select DEBUG_MUTEXES
1160 select DEBUG_RT_MUTEXES if RT_MUTEXES
1161 select LOCKDEP
1162 help
1163 This feature will check whether any held lock (spinlock, rwlock,
1164 mutex or rwsem) is incorrectly freed by the kernel, via any of the
1165 memory-freeing routines (kfree(), kmem_cache_free(), free_pages(),
1166 vfree(), etc.), whether a live lock is incorrectly reinitialized via
1167 spin_lock_init()/mutex_init()/etc., or whether there is any lock
1168 held during task exit.
1169
1170config LOCKDEP
1171 bool
1172 depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
1173 select STACKTRACE
1174 select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC && !X86
1175 select KALLSYMS
1176 select KALLSYMS_ALL
1177
1178config LOCKDEP_SMALL
1179 bool
1180
1170config DEBUG_LOCKDEP 1181config DEBUG_LOCKDEP
1171 bool "Lock dependency engine debugging" 1182 bool "Lock dependency engine debugging"
1172 depends on DEBUG_KERNEL && LOCKDEP 1183 depends on DEBUG_KERNEL && LOCKDEP
@@ -1571,7 +1582,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER
1571 depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT 1582 depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
1572 depends on !X86_64 1583 depends on !X86_64
1573 select STACKTRACE 1584 select STACKTRACE
1574 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !SCORE && !X86 1585 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !X86
1575 help 1586 help
1576 Provide stacktrace filter for fault-injection capabilities 1587 Provide stacktrace filter for fault-injection capabilities
1577 1588
@@ -1969,7 +1980,7 @@ config STRICT_DEVMEM
1969 bool "Filter access to /dev/mem" 1980 bool "Filter access to /dev/mem"
1970 depends on MMU && DEVMEM 1981 depends on MMU && DEVMEM
1971 depends on ARCH_HAS_DEVMEM_IS_ALLOWED 1982 depends on ARCH_HAS_DEVMEM_IS_ALLOWED
1972 default y if TILE || PPC || X86 || ARM64 1983 default y if PPC || X86 || ARM64
1973 ---help--- 1984 ---help---
1974 If this option is disabled, you allow userspace (root) access to all 1985 If this option is disabled, you allow userspace (root) access to all
1975 of memory, including kernel and userspace memory. Accidental 1986 of memory, including kernel and userspace memory. Accidental
diff --git a/lib/Makefile b/lib/Makefile
index a90d4fcd748f..8fc0d3a9b34f 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -18,7 +18,7 @@ KCOV_INSTRUMENT_debugobjects.o := n
18KCOV_INSTRUMENT_dynamic_debug.o := n 18KCOV_INSTRUMENT_dynamic_debug.o := n
19 19
20lib-y := ctype.o string.o vsprintf.o cmdline.o \ 20lib-y := ctype.o string.o vsprintf.o cmdline.o \
21 rbtree.o radix-tree.o dump_stack.o timerqueue.o\ 21 rbtree.o radix-tree.o timerqueue.o\
22 idr.o int_sqrt.o extable.o \ 22 idr.o int_sqrt.o extable.o \
23 sha1.o chacha20.o irq_regs.o argv_split.o \ 23 sha1.o chacha20.o irq_regs.o argv_split.o \
24 flex_proportions.o ratelimit.o show_mem.o \ 24 flex_proportions.o ratelimit.o show_mem.o \
@@ -26,6 +26,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
26 earlycpio.o seq_buf.o siphash.o \ 26 earlycpio.o seq_buf.o siphash.o \
27 nmi_backtrace.o nodemask.o win_minmax.o 27 nmi_backtrace.o nodemask.o win_minmax.o
28 28
29lib-$(CONFIG_PRINTK) += dump_stack.o
29lib-$(CONFIG_MMU) += ioremap.o 30lib-$(CONFIG_MMU) += ioremap.o
30lib-$(CONFIG_SMP) += cpumask.o 31lib-$(CONFIG_SMP) += cpumask.o
31lib-$(CONFIG_DMA_DIRECT_OPS) += dma-direct.o 32lib-$(CONFIG_DMA_DIRECT_OPS) += dma-direct.o
@@ -81,6 +82,8 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
81obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o 82obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
82obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o 83obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
83 84
85obj-y += logic_pio.o
86
84obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o 87obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
85 88
86obj-$(CONFIG_BTREE) += btree.o 89obj-$(CONFIG_BTREE) += btree.o
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 9e498c77ed0e..a42eff7e8c48 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -607,7 +607,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
607 /* if no digit is after '-', it's wrong*/ 607 /* if no digit is after '-', it's wrong*/
608 if (at_start && in_range) 608 if (at_start && in_range)
609 return -EINVAL; 609 return -EINVAL;
610 if (!(a <= b) || !(used_size <= group_size)) 610 if (!(a <= b) || group_size == 0 || !(used_size <= group_size))
611 return -EINVAL; 611 return -EINVAL;
612 if (b >= nmaskbits) 612 if (b >= nmaskbits)
613 return -ERANGE; 613 return -ERANGE;
diff --git a/lib/btree.c b/lib/btree.c
index f93a945274af..590facba2c50 100644
--- a/lib/btree.c
+++ b/lib/btree.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * As should be obvious for Linux kernel code, license is GPLv2 4 * As should be obvious for Linux kernel code, license is GPLv2
5 * 5 *
6 * Copyright (c) 2007-2008 Joern Engel <joern@logfs.org> 6 * Copyright (c) 2007-2008 Joern Engel <joern@purestorage.com>
7 * Bits and pieces stolen from Peter Zijlstra's code, which is 7 * Bits and pieces stolen from Peter Zijlstra's code, which is
8 * Copyright 2007, Red Hat Inc. Peter Zijlstra 8 * Copyright 2007, Red Hat Inc. Peter Zijlstra
9 * GPLv2 9 * GPLv2
@@ -76,6 +76,8 @@ struct btree_geo btree_geo128 = {
76}; 76};
77EXPORT_SYMBOL_GPL(btree_geo128); 77EXPORT_SYMBOL_GPL(btree_geo128);
78 78
79#define MAX_KEYLEN (2 * LONG_PER_U64)
80
79static struct kmem_cache *btree_cachep; 81static struct kmem_cache *btree_cachep;
80 82
81void *btree_alloc(gfp_t gfp_mask, void *pool_data) 83void *btree_alloc(gfp_t gfp_mask, void *pool_data)
@@ -313,7 +315,7 @@ void *btree_get_prev(struct btree_head *head, struct btree_geo *geo,
313{ 315{
314 int i, height; 316 int i, height;
315 unsigned long *node, *oldnode; 317 unsigned long *node, *oldnode;
316 unsigned long *retry_key = NULL, key[geo->keylen]; 318 unsigned long *retry_key = NULL, key[MAX_KEYLEN];
317 319
318 if (keyzero(geo, __key)) 320 if (keyzero(geo, __key))
319 return NULL; 321 return NULL;
@@ -639,8 +641,8 @@ EXPORT_SYMBOL_GPL(btree_remove);
639int btree_merge(struct btree_head *target, struct btree_head *victim, 641int btree_merge(struct btree_head *target, struct btree_head *victim,
640 struct btree_geo *geo, gfp_t gfp) 642 struct btree_geo *geo, gfp_t gfp)
641{ 643{
642 unsigned long key[geo->keylen]; 644 unsigned long key[MAX_KEYLEN];
643 unsigned long dup[geo->keylen]; 645 unsigned long dup[MAX_KEYLEN];
644 void *val; 646 void *val;
645 int err; 647 int err;
646 648
diff --git a/lib/bug.c b/lib/bug.c
index c1b0fad31b10..1077366f496b 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -150,6 +150,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
150 return BUG_TRAP_TYPE_NONE; 150 return BUG_TRAP_TYPE_NONE;
151 151
152 bug = find_bug(bugaddr); 152 bug = find_bug(bugaddr);
153 if (!bug)
154 return BUG_TRAP_TYPE_NONE;
153 155
154 file = NULL; 156 file = NULL;
155 line = 0; 157 line = 0;
@@ -191,7 +193,7 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
191 if (file) 193 if (file)
192 pr_crit("kernel BUG at %s:%u!\n", file, line); 194 pr_crit("kernel BUG at %s:%u!\n", file, line);
193 else 195 else
194 pr_crit("Kernel BUG at %p [verbose debug info unavailable]\n", 196 pr_crit("Kernel BUG at %pB [verbose debug info unavailable]\n",
195 (void *)bugaddr); 197 (void *)bugaddr);
196 198
197 return BUG_TRAP_TYPE_BUG; 199 return BUG_TRAP_TYPE_BUG;
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 2f5349c6e81a..994be4805cec 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -42,14 +42,18 @@ static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
42static DEFINE_RAW_SPINLOCK(pool_lock); 42static DEFINE_RAW_SPINLOCK(pool_lock);
43 43
44static HLIST_HEAD(obj_pool); 44static HLIST_HEAD(obj_pool);
45static HLIST_HEAD(obj_to_free);
45 46
46static int obj_pool_min_free = ODEBUG_POOL_SIZE; 47static int obj_pool_min_free = ODEBUG_POOL_SIZE;
47static int obj_pool_free = ODEBUG_POOL_SIZE; 48static int obj_pool_free = ODEBUG_POOL_SIZE;
48static int obj_pool_used; 49static int obj_pool_used;
49static int obj_pool_max_used; 50static int obj_pool_max_used;
51/* The number of objs on the global free list */
52static int obj_nr_tofree;
50static struct kmem_cache *obj_cache; 53static struct kmem_cache *obj_cache;
51 54
52static int debug_objects_maxchain __read_mostly; 55static int debug_objects_maxchain __read_mostly;
56static int __maybe_unused debug_objects_maxchecked __read_mostly;
53static int debug_objects_fixups __read_mostly; 57static int debug_objects_fixups __read_mostly;
54static int debug_objects_warnings __read_mostly; 58static int debug_objects_warnings __read_mostly;
55static int debug_objects_enabled __read_mostly 59static int debug_objects_enabled __read_mostly
@@ -96,12 +100,32 @@ static const char *obj_states[ODEBUG_STATE_MAX] = {
96static void fill_pool(void) 100static void fill_pool(void)
97{ 101{
98 gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; 102 gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
99 struct debug_obj *new; 103 struct debug_obj *new, *obj;
100 unsigned long flags; 104 unsigned long flags;
101 105
102 if (likely(obj_pool_free >= debug_objects_pool_min_level)) 106 if (likely(obj_pool_free >= debug_objects_pool_min_level))
103 return; 107 return;
104 108
109 /*
110 * Reuse objs from the global free list; they will be reinitialized
111 * when allocating.
112 */
113 while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
114 raw_spin_lock_irqsave(&pool_lock, flags);
115 /*
116 * Recheck with the lock held as the worker thread might have
117 * won the race and freed the global free list already.
118 */
119 if (obj_nr_tofree) {
120 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
121 hlist_del(&obj->node);
122 obj_nr_tofree--;
123 hlist_add_head(&obj->node, &obj_pool);
124 obj_pool_free++;
125 }
126 raw_spin_unlock_irqrestore(&pool_lock, flags);
127 }
128
105 if (unlikely(!obj_cache)) 129 if (unlikely(!obj_cache))
106 return; 130 return;
107 131
@@ -177,62 +201,76 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
177 * workqueue function to free objects. 201 * workqueue function to free objects.
178 * 202 *
179 * To reduce contention on the global pool_lock, the actual freeing of 203 * To reduce contention on the global pool_lock, the actual freeing of
180 * debug objects will be delayed if the pool_lock is busy. We also free 204 * debug objects will be delayed if the pool_lock is busy.
181 * the objects in a batch of 4 for each lock/unlock cycle.
182 */ 205 */
183#define ODEBUG_FREE_BATCH 4
184
185static void free_obj_work(struct work_struct *work) 206static void free_obj_work(struct work_struct *work)
186{ 207{
187 struct debug_obj *objs[ODEBUG_FREE_BATCH]; 208 struct hlist_node *tmp;
209 struct debug_obj *obj;
188 unsigned long flags; 210 unsigned long flags;
189 int i; 211 HLIST_HEAD(tofree);
190 212
191 if (!raw_spin_trylock_irqsave(&pool_lock, flags)) 213 if (!raw_spin_trylock_irqsave(&pool_lock, flags))
192 return; 214 return;
193 while (obj_pool_free >= debug_objects_pool_size + ODEBUG_FREE_BATCH) {
194 for (i = 0; i < ODEBUG_FREE_BATCH; i++) {
195 objs[i] = hlist_entry(obj_pool.first,
196 typeof(*objs[0]), node);
197 hlist_del(&objs[i]->node);
198 }
199 215
200 obj_pool_free -= ODEBUG_FREE_BATCH; 216 /*
201 debug_objects_freed += ODEBUG_FREE_BATCH; 217 * The objs on the pool list might be allocated before the work is
202 /* 218 * run, so recheck if pool list it full or not, if not fill pool
203 * We release pool_lock across kmem_cache_free() to 219 * list from the global free list
204 * avoid contention on pool_lock. 220 */
205 */ 221 while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
206 raw_spin_unlock_irqrestore(&pool_lock, flags); 222 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
207 for (i = 0; i < ODEBUG_FREE_BATCH; i++) 223 hlist_del(&obj->node);
208 kmem_cache_free(obj_cache, objs[i]); 224 hlist_add_head(&obj->node, &obj_pool);
209 if (!raw_spin_trylock_irqsave(&pool_lock, flags)) 225 obj_pool_free++;
210 return; 226 obj_nr_tofree--;
227 }
228
229 /*
230 * Pool list is already full and there are still objs on the free
231 * list. Move remaining free objs to a temporary list to free the
232 * memory outside the pool_lock held region.
233 */
234 if (obj_nr_tofree) {
235 hlist_move_list(&obj_to_free, &tofree);
236 debug_objects_freed += obj_nr_tofree;
237 obj_nr_tofree = 0;
211 } 238 }
212 raw_spin_unlock_irqrestore(&pool_lock, flags); 239 raw_spin_unlock_irqrestore(&pool_lock, flags);
240
241 hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
242 hlist_del(&obj->node);
243 kmem_cache_free(obj_cache, obj);
244 }
213} 245}
214 246
215/* 247static bool __free_object(struct debug_obj *obj)
216 * Put the object back into the pool and schedule work to free objects
217 * if necessary.
218 */
219static void free_object(struct debug_obj *obj)
220{ 248{
221 unsigned long flags; 249 unsigned long flags;
222 int sched = 0; 250 bool work;
223 251
224 raw_spin_lock_irqsave(&pool_lock, flags); 252 raw_spin_lock_irqsave(&pool_lock, flags);
225 /* 253 work = (obj_pool_free > debug_objects_pool_size) && obj_cache;
226 * schedule work when the pool is filled and the cache is
227 * initialized:
228 */
229 if (obj_pool_free > debug_objects_pool_size && obj_cache)
230 sched = 1;
231 hlist_add_head(&obj->node, &obj_pool);
232 obj_pool_free++;
233 obj_pool_used--; 254 obj_pool_used--;
255
256 if (work) {
257 obj_nr_tofree++;
258 hlist_add_head(&obj->node, &obj_to_free);
259 } else {
260 obj_pool_free++;
261 hlist_add_head(&obj->node, &obj_pool);
262 }
234 raw_spin_unlock_irqrestore(&pool_lock, flags); 263 raw_spin_unlock_irqrestore(&pool_lock, flags);
235 if (sched) 264 return work;
265}
266
267/*
268 * Put the object back into the pool and schedule work to free objects
269 * if necessary.
270 */
271static void free_object(struct debug_obj *obj)
272{
273 if (__free_object(obj))
236 schedule_work(&debug_obj_work); 274 schedule_work(&debug_obj_work);
237} 275}
238 276
@@ -714,13 +752,13 @@ EXPORT_SYMBOL_GPL(debug_object_active_state);
714static void __debug_check_no_obj_freed(const void *address, unsigned long size) 752static void __debug_check_no_obj_freed(const void *address, unsigned long size)
715{ 753{
716 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; 754 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
717 struct hlist_node *tmp;
718 HLIST_HEAD(freelist);
719 struct debug_obj_descr *descr; 755 struct debug_obj_descr *descr;
720 enum debug_obj_state state; 756 enum debug_obj_state state;
721 struct debug_bucket *db; 757 struct debug_bucket *db;
758 struct hlist_node *tmp;
722 struct debug_obj *obj; 759 struct debug_obj *obj;
723 int cnt; 760 int cnt, objs_checked = 0;
761 bool work = false;
724 762
725 saddr = (unsigned long) address; 763 saddr = (unsigned long) address;
726 eaddr = saddr + size; 764 eaddr = saddr + size;
@@ -751,21 +789,24 @@ repeat:
751 goto repeat; 789 goto repeat;
752 default: 790 default:
753 hlist_del(&obj->node); 791 hlist_del(&obj->node);
754 hlist_add_head(&obj->node, &freelist); 792 work |= __free_object(obj);
755 break; 793 break;
756 } 794 }
757 } 795 }
758 raw_spin_unlock_irqrestore(&db->lock, flags); 796 raw_spin_unlock_irqrestore(&db->lock, flags);
759 797
760 /* Now free them */
761 hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
762 hlist_del(&obj->node);
763 free_object(obj);
764 }
765
766 if (cnt > debug_objects_maxchain) 798 if (cnt > debug_objects_maxchain)
767 debug_objects_maxchain = cnt; 799 debug_objects_maxchain = cnt;
800
801 objs_checked += cnt;
768 } 802 }
803
804 if (objs_checked > debug_objects_maxchecked)
805 debug_objects_maxchecked = objs_checked;
806
807 /* Schedule work to actually kmem_cache_free() objects */
808 if (work)
809 schedule_work(&debug_obj_work);
769} 810}
770 811
771void debug_check_no_obj_freed(const void *address, unsigned long size) 812void debug_check_no_obj_freed(const void *address, unsigned long size)
@@ -780,12 +821,14 @@ void debug_check_no_obj_freed(const void *address, unsigned long size)
780static int debug_stats_show(struct seq_file *m, void *v) 821static int debug_stats_show(struct seq_file *m, void *v)
781{ 822{
782 seq_printf(m, "max_chain :%d\n", debug_objects_maxchain); 823 seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
824 seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked);
783 seq_printf(m, "warnings :%d\n", debug_objects_warnings); 825 seq_printf(m, "warnings :%d\n", debug_objects_warnings);
784 seq_printf(m, "fixups :%d\n", debug_objects_fixups); 826 seq_printf(m, "fixups :%d\n", debug_objects_fixups);
785 seq_printf(m, "pool_free :%d\n", obj_pool_free); 827 seq_printf(m, "pool_free :%d\n", obj_pool_free);
786 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); 828 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
787 seq_printf(m, "pool_used :%d\n", obj_pool_used); 829 seq_printf(m, "pool_used :%d\n", obj_pool_used);
788 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); 830 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
831 seq_printf(m, "on_free_list :%d\n", obj_nr_tofree);
789 seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated); 832 seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
790 seq_printf(m, "objs_freed :%d\n", debug_objects_freed); 833 seq_printf(m, "objs_freed :%d\n", debug_objects_freed);
791 return 0; 834 return 0;
diff --git a/lib/devres.c b/lib/devres.c
index 5f2aedd58bc5..5bec1120b392 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -5,6 +5,12 @@
5#include <linux/gfp.h> 5#include <linux/gfp.h>
6#include <linux/export.h> 6#include <linux/export.h>
7 7
8enum devm_ioremap_type {
9 DEVM_IOREMAP = 0,
10 DEVM_IOREMAP_NC,
11 DEVM_IOREMAP_WC,
12};
13
8void devm_ioremap_release(struct device *dev, void *res) 14void devm_ioremap_release(struct device *dev, void *res)
9{ 15{
10 iounmap(*(void __iomem **)res); 16 iounmap(*(void __iomem **)res);
@@ -15,24 +21,28 @@ static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
15 return *(void **)res == match_data; 21 return *(void **)res == match_data;
16} 22}
17 23
18/** 24static void __iomem *__devm_ioremap(struct device *dev, resource_size_t offset,
19 * devm_ioremap - Managed ioremap() 25 resource_size_t size,
20 * @dev: Generic device to remap IO address for 26 enum devm_ioremap_type type)
21 * @offset: Resource address to map
22 * @size: Size of map
23 *
24 * Managed ioremap(). Map is automatically unmapped on driver detach.
25 */
26void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
27 resource_size_t size)
28{ 27{
29 void __iomem **ptr, *addr; 28 void __iomem **ptr, *addr = NULL;
30 29
31 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL); 30 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
32 if (!ptr) 31 if (!ptr)
33 return NULL; 32 return NULL;
34 33
35 addr = ioremap(offset, size); 34 switch (type) {
35 case DEVM_IOREMAP:
36 addr = ioremap(offset, size);
37 break;
38 case DEVM_IOREMAP_NC:
39 addr = ioremap_nocache(offset, size);
40 break;
41 case DEVM_IOREMAP_WC:
42 addr = ioremap_wc(offset, size);
43 break;
44 }
45
36 if (addr) { 46 if (addr) {
37 *ptr = addr; 47 *ptr = addr;
38 devres_add(dev, ptr); 48 devres_add(dev, ptr);
@@ -41,6 +51,20 @@ void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
41 51
42 return addr; 52 return addr;
43} 53}
54
55/**
56 * devm_ioremap - Managed ioremap()
57 * @dev: Generic device to remap IO address for
58 * @offset: Resource address to map
59 * @size: Size of map
60 *
61 * Managed ioremap(). Map is automatically unmapped on driver detach.
62 */
63void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
64 resource_size_t size)
65{
66 return __devm_ioremap(dev, offset, size, DEVM_IOREMAP);
67}
44EXPORT_SYMBOL(devm_ioremap); 68EXPORT_SYMBOL(devm_ioremap);
45 69
46/** 70/**
@@ -55,20 +79,7 @@ EXPORT_SYMBOL(devm_ioremap);
55void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset, 79void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
56 resource_size_t size) 80 resource_size_t size)
57{ 81{
58 void __iomem **ptr, *addr; 82 return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_NC);
59
60 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
61 if (!ptr)
62 return NULL;
63
64 addr = ioremap_nocache(offset, size);
65 if (addr) {
66 *ptr = addr;
67 devres_add(dev, ptr);
68 } else
69 devres_free(ptr);
70
71 return addr;
72} 83}
73EXPORT_SYMBOL(devm_ioremap_nocache); 84EXPORT_SYMBOL(devm_ioremap_nocache);
74 85
@@ -83,20 +94,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
83void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset, 94void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset,
84 resource_size_t size) 95 resource_size_t size)
85{ 96{
86 void __iomem **ptr, *addr; 97 return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_WC);
87
88 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
89 if (!ptr)
90 return NULL;
91
92 addr = ioremap_wc(offset, size);
93 if (addr) {
94 *ptr = addr;
95 devres_add(dev, ptr);
96 } else
97 devres_free(ptr);
98
99 return addr;
100} 98}
101EXPORT_SYMBOL(devm_ioremap_wc); 99EXPORT_SYMBOL(devm_ioremap_wc);
102 100
diff --git a/lib/dma-direct.c b/lib/dma-direct.c
index c9e8e21cb334..c0bba30fef0a 100644
--- a/lib/dma-direct.c
+++ b/lib/dma-direct.c
@@ -9,6 +9,7 @@
9#include <linux/scatterlist.h> 9#include <linux/scatterlist.h>
10#include <linux/dma-contiguous.h> 10#include <linux/dma-contiguous.h>
11#include <linux/pfn.h> 11#include <linux/pfn.h>
12#include <linux/set_memory.h>
12 13
13#define DIRECT_MAPPING_ERROR 0 14#define DIRECT_MAPPING_ERROR 0
14 15
@@ -20,6 +21,14 @@
20#define ARCH_ZONE_DMA_BITS 24 21#define ARCH_ZONE_DMA_BITS 24
21#endif 22#endif
22 23
24/*
25 * For AMD SEV all DMA must be to unencrypted addresses.
26 */
27static inline bool force_dma_unencrypted(void)
28{
29 return sev_active();
30}
31
23static bool 32static bool
24check_addr(struct device *dev, dma_addr_t dma_addr, size_t size, 33check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
25 const char *caller) 34 const char *caller)
@@ -37,7 +46,9 @@ check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
37 46
38static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) 47static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
39{ 48{
40 return phys_to_dma(dev, phys) + size - 1 <= dev->coherent_dma_mask; 49 dma_addr_t addr = force_dma_unencrypted() ?
50 __phys_to_dma(dev, phys) : phys_to_dma(dev, phys);
51 return addr + size - 1 <= dev->coherent_dma_mask;
41} 52}
42 53
43void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 54void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
@@ -46,6 +57,10 @@ void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
46 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 57 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
47 int page_order = get_order(size); 58 int page_order = get_order(size);
48 struct page *page = NULL; 59 struct page *page = NULL;
60 void *ret;
61
62 /* we always manually zero the memory once we are done: */
63 gfp &= ~__GFP_ZERO;
49 64
50 /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */ 65 /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */
51 if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)) 66 if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
@@ -78,10 +93,15 @@ again:
78 93
79 if (!page) 94 if (!page)
80 return NULL; 95 return NULL;
81 96 ret = page_address(page);
82 *dma_handle = phys_to_dma(dev, page_to_phys(page)); 97 if (force_dma_unencrypted()) {
83 memset(page_address(page), 0, size); 98 set_memory_decrypted((unsigned long)ret, 1 << page_order);
84 return page_address(page); 99 *dma_handle = __phys_to_dma(dev, page_to_phys(page));
100 } else {
101 *dma_handle = phys_to_dma(dev, page_to_phys(page));
102 }
103 memset(ret, 0, size);
104 return ret;
85} 105}
86 106
87/* 107/*
@@ -92,9 +112,12 @@ void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
92 dma_addr_t dma_addr, unsigned long attrs) 112 dma_addr_t dma_addr, unsigned long attrs)
93{ 113{
94 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 114 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
115 unsigned int page_order = get_order(size);
95 116
117 if (force_dma_unencrypted())
118 set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
96 if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count)) 119 if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count))
97 free_pages((unsigned long)cpu_addr, get_order(size)); 120 free_pages((unsigned long)cpu_addr, page_order);
98} 121}
99 122
100static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, 123static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index c5edbedd364d..5cff72f18c4a 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -10,6 +10,66 @@
10#include <linux/sched/debug.h> 10#include <linux/sched/debug.h>
11#include <linux/smp.h> 11#include <linux/smp.h>
12#include <linux/atomic.h> 12#include <linux/atomic.h>
13#include <linux/kexec.h>
14#include <linux/utsname.h>
15
16static char dump_stack_arch_desc_str[128];
17
18/**
19 * dump_stack_set_arch_desc - set arch-specific str to show with task dumps
20 * @fmt: printf-style format string
21 * @...: arguments for the format string
22 *
23 * The configured string will be printed right after utsname during task
24 * dumps. Usually used to add arch-specific system identifiers. If an
25 * arch wants to make use of such an ID string, it should initialize this
26 * as soon as possible during boot.
27 */
28void __init dump_stack_set_arch_desc(const char *fmt, ...)
29{
30 va_list args;
31
32 va_start(args, fmt);
33 vsnprintf(dump_stack_arch_desc_str, sizeof(dump_stack_arch_desc_str),
34 fmt, args);
35 va_end(args);
36}
37
38/**
39 * dump_stack_print_info - print generic debug info for dump_stack()
40 * @log_lvl: log level
41 *
42 * Arch-specific dump_stack() implementations can use this function to
43 * print out the same debug information as the generic dump_stack().
44 */
45void dump_stack_print_info(const char *log_lvl)
46{
47 printk("%sCPU: %d PID: %d Comm: %.20s %s%s %s %.*s\n",
48 log_lvl, raw_smp_processor_id(), current->pid, current->comm,
49 kexec_crash_loaded() ? "Kdump: loaded " : "",
50 print_tainted(),
51 init_utsname()->release,
52 (int)strcspn(init_utsname()->version, " "),
53 init_utsname()->version);
54
55 if (dump_stack_arch_desc_str[0] != '\0')
56 printk("%sHardware name: %s\n",
57 log_lvl, dump_stack_arch_desc_str);
58
59 print_worker_info(log_lvl, current);
60}
61
62/**
63 * show_regs_print_info - print generic debug info for show_regs()
64 * @log_lvl: log level
65 *
66 * show_regs() implementations can use this function to print out generic
67 * debug information.
68 */
69void show_regs_print_info(const char *log_lvl)
70{
71 dump_stack_print_info(log_lvl);
72}
13 73
14static void __dump_stack(void) 74static void __dump_stack(void)
15{ 75{
diff --git a/lib/int_sqrt.c b/lib/int_sqrt.c
index e2d329099bf7..14436f4ca6bd 100644
--- a/lib/int_sqrt.c
+++ b/lib/int_sqrt.c
@@ -38,3 +38,33 @@ unsigned long int_sqrt(unsigned long x)
38 return y; 38 return y;
39} 39}
40EXPORT_SYMBOL(int_sqrt); 40EXPORT_SYMBOL(int_sqrt);
41
42#if BITS_PER_LONG < 64
43/**
44 * int_sqrt64 - strongly typed int_sqrt function when minimum 64 bit input
45 * is expected.
46 * @x: 64bit integer of which to calculate the sqrt
47 */
48u32 int_sqrt64(u64 x)
49{
50 u64 b, m, y = 0;
51
52 if (x <= ULONG_MAX)
53 return int_sqrt((unsigned long) x);
54
55 m = 1ULL << (fls64(x) & ~1ULL);
56 while (m != 0) {
57 b = y + m;
58 y >>= 1;
59
60 if (x >= b) {
61 x -= b;
62 y += m;
63 }
64 m >>= 2;
65 }
66
67 return y;
68}
69EXPORT_SYMBOL(int_sqrt64);
70#endif
diff --git a/lib/ioremap.c b/lib/ioremap.c
index b808a390e4c3..54e5bbaa3200 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -91,7 +91,8 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
91 91
92 if (ioremap_pmd_enabled() && 92 if (ioremap_pmd_enabled() &&
93 ((next - addr) == PMD_SIZE) && 93 ((next - addr) == PMD_SIZE) &&
94 IS_ALIGNED(phys_addr + addr, PMD_SIZE)) { 94 IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
95 pmd_free_pte_page(pmd)) {
95 if (pmd_set_huge(pmd, phys_addr + addr, prot)) 96 if (pmd_set_huge(pmd, phys_addr + addr, prot))
96 continue; 97 continue;
97 } 98 }
@@ -117,7 +118,8 @@ static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
117 118
118 if (ioremap_pud_enabled() && 119 if (ioremap_pud_enabled() &&
119 ((next - addr) == PUD_SIZE) && 120 ((next - addr) == PUD_SIZE) &&
120 IS_ALIGNED(phys_addr + addr, PUD_SIZE)) { 121 IS_ALIGNED(phys_addr + addr, PUD_SIZE) &&
122 pud_free_pmd_page(pud)) {
121 if (pud_set_huge(pud, phys_addr + addr, prot)) 123 if (pud_set_huge(pud, phys_addr + addr, prot))
122 continue; 124 continue;
123 } 125 }
diff --git a/lib/kfifo.c b/lib/kfifo.c
index 90ba1eb1df06..b0f757bf7213 100644
--- a/lib/kfifo.c
+++ b/lib/kfifo.c
@@ -39,7 +39,7 @@ int __kfifo_alloc(struct __kfifo *fifo, unsigned int size,
39 size_t esize, gfp_t gfp_mask) 39 size_t esize, gfp_t gfp_mask)
40{ 40{
41 /* 41 /*
42 * round down to the next power of 2, since our 'let the indices 42 * round up to the next power of 2, since our 'let the indices
43 * wrap' technique works only in this case. 43 * wrap' technique works only in this case.
44 */ 44 */
45 size = roundup_pow_of_two(size); 45 size = roundup_pow_of_two(size);
diff --git a/lib/kobject.c b/lib/kobject.c
index afd5a3fc6123..e1d1f290bf35 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -204,8 +204,9 @@ static int kobject_add_internal(struct kobject *kobj)
204 return -ENOENT; 204 return -ENOENT;
205 205
206 if (!kobj->name || !kobj->name[0]) { 206 if (!kobj->name || !kobj->name[0]) {
207 WARN(1, "kobject: (%p): attempted to be registered with empty " 207 WARN(1,
208 "name!\n", kobj); 208 "kobject: (%p): attempted to be registered with empty name!\n",
209 kobj);
209 return -EINVAL; 210 return -EINVAL;
210 } 211 }
211 212
@@ -232,9 +233,8 @@ static int kobject_add_internal(struct kobject *kobj)
232 233
233 /* be noisy on error issues */ 234 /* be noisy on error issues */
234 if (error == -EEXIST) 235 if (error == -EEXIST)
235 WARN(1, "%s failed for %s with " 236 WARN(1,
236 "-EEXIST, don't try to register things with " 237 "%s failed for %s with -EEXIST, don't try to register things with the same name in the same directory.\n",
237 "the same name in the same directory.\n",
238 __func__, kobject_name(kobj)); 238 __func__, kobject_name(kobj));
239 else 239 else
240 WARN(1, "%s failed for %s (error: %d parent: %s)\n", 240 WARN(1, "%s failed for %s (error: %d parent: %s)\n",
@@ -334,8 +334,8 @@ void kobject_init(struct kobject *kobj, struct kobj_type *ktype)
334 } 334 }
335 if (kobj->state_initialized) { 335 if (kobj->state_initialized) {
336 /* do not error out as sometimes we can recover */ 336 /* do not error out as sometimes we can recover */
337 printk(KERN_ERR "kobject (%p): tried to init an initialized " 337 pr_err("kobject (%p): tried to init an initialized object, something is seriously wrong.\n",
338 "object, something is seriously wrong.\n", kobj); 338 kobj);
339 dump_stack(); 339 dump_stack();
340 } 340 }
341 341
@@ -344,7 +344,7 @@ void kobject_init(struct kobject *kobj, struct kobj_type *ktype)
344 return; 344 return;
345 345
346error: 346error:
347 printk(KERN_ERR "kobject (%p): %s\n", kobj, err_str); 347 pr_err("kobject (%p): %s\n", kobj, err_str);
348 dump_stack(); 348 dump_stack();
349} 349}
350EXPORT_SYMBOL(kobject_init); 350EXPORT_SYMBOL(kobject_init);
@@ -357,7 +357,7 @@ static __printf(3, 0) int kobject_add_varg(struct kobject *kobj,
357 357
358 retval = kobject_set_name_vargs(kobj, fmt, vargs); 358 retval = kobject_set_name_vargs(kobj, fmt, vargs);
359 if (retval) { 359 if (retval) {
360 printk(KERN_ERR "kobject: can not set name properly!\n"); 360 pr_err("kobject: can not set name properly!\n");
361 return retval; 361 return retval;
362 } 362 }
363 kobj->parent = parent; 363 kobj->parent = parent;
@@ -399,8 +399,7 @@ int kobject_add(struct kobject *kobj, struct kobject *parent,
399 return -EINVAL; 399 return -EINVAL;
400 400
401 if (!kobj->state_initialized) { 401 if (!kobj->state_initialized) {
402 printk(KERN_ERR "kobject '%s' (%p): tried to add an " 402 pr_err("kobject '%s' (%p): tried to add an uninitialized object, something is seriously wrong.\n",
403 "uninitialized object, something is seriously wrong.\n",
404 kobject_name(kobj), kobj); 403 kobject_name(kobj), kobj);
405 dump_stack(); 404 dump_stack();
406 return -EINVAL; 405 return -EINVAL;
@@ -590,9 +589,9 @@ struct kobject *kobject_get(struct kobject *kobj)
590{ 589{
591 if (kobj) { 590 if (kobj) {
592 if (!kobj->state_initialized) 591 if (!kobj->state_initialized)
593 WARN(1, KERN_WARNING "kobject: '%s' (%p): is not " 592 WARN(1, KERN_WARNING
594 "initialized, yet kobject_get() is being " 593 "kobject: '%s' (%p): is not initialized, yet kobject_get() is being called.\n",
595 "called.\n", kobject_name(kobj), kobj); 594 kobject_name(kobj), kobj);
596 kref_get(&kobj->kref); 595 kref_get(&kobj->kref);
597 } 596 }
598 return kobj; 597 return kobj;
@@ -622,8 +621,7 @@ static void kobject_cleanup(struct kobject *kobj)
622 kobject_name(kobj), kobj, __func__, kobj->parent); 621 kobject_name(kobj), kobj, __func__, kobj->parent);
623 622
624 if (t && !t->release) 623 if (t && !t->release)
625 pr_debug("kobject: '%s' (%p): does not have a release() " 624 pr_debug("kobject: '%s' (%p): does not have a release() function, it is broken and must be fixed.\n",
626 "function, it is broken and must be fixed.\n",
627 kobject_name(kobj), kobj); 625 kobject_name(kobj), kobj);
628 626
629 /* send "remove" if the caller did not do it but sent "add" */ 627 /* send "remove" if the caller did not do it but sent "add" */
@@ -686,9 +684,9 @@ void kobject_put(struct kobject *kobj)
686{ 684{
687 if (kobj) { 685 if (kobj) {
688 if (!kobj->state_initialized) 686 if (!kobj->state_initialized)
689 WARN(1, KERN_WARNING "kobject: '%s' (%p): is not " 687 WARN(1, KERN_WARNING
690 "initialized, yet kobject_put() is being " 688 "kobject: '%s' (%p): is not initialized, yet kobject_put() is being called.\n",
691 "called.\n", kobject_name(kobj), kobj); 689 kobject_name(kobj), kobj);
692 kref_put(&kobj->kref, kobject_release); 690 kref_put(&kobj->kref, kobject_release);
693 } 691 }
694} 692}
@@ -752,8 +750,7 @@ struct kobject *kobject_create_and_add(const char *name, struct kobject *parent)
752 750
753 retval = kobject_add(kobj, parent, "%s", name); 751 retval = kobject_add(kobj, parent, "%s", name);
754 if (retval) { 752 if (retval) {
755 printk(KERN_WARNING "%s: kobject_add error: %d\n", 753 pr_warn("%s: kobject_add error: %d\n", __func__, retval);
756 __func__, retval);
757 kobject_put(kobj); 754 kobject_put(kobj);
758 kobj = NULL; 755 kobj = NULL;
759 } 756 }
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 9fe6ec8fda28..15ea216a67ce 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -25,6 +25,7 @@
25#include <linux/uuid.h> 25#include <linux/uuid.h>
26#include <linux/ctype.h> 26#include <linux/ctype.h>
27#include <net/sock.h> 27#include <net/sock.h>
28#include <net/netlink.h>
28#include <net/net_namespace.h> 29#include <net/net_namespace.h>
29 30
30 31
@@ -32,11 +33,13 @@ u64 uevent_seqnum;
32#ifdef CONFIG_UEVENT_HELPER 33#ifdef CONFIG_UEVENT_HELPER
33char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH; 34char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH;
34#endif 35#endif
35#ifdef CONFIG_NET 36
36struct uevent_sock { 37struct uevent_sock {
37 struct list_head list; 38 struct list_head list;
38 struct sock *sk; 39 struct sock *sk;
39}; 40};
41
42#ifdef CONFIG_NET
40static LIST_HEAD(uevent_sock_list); 43static LIST_HEAD(uevent_sock_list);
41#endif 44#endif
42 45
@@ -602,12 +605,88 @@ int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
602EXPORT_SYMBOL_GPL(add_uevent_var); 605EXPORT_SYMBOL_GPL(add_uevent_var);
603 606
604#if defined(CONFIG_NET) 607#if defined(CONFIG_NET)
608static int uevent_net_broadcast(struct sock *usk, struct sk_buff *skb,
609 struct netlink_ext_ack *extack)
610{
611 /* u64 to chars: 2^64 - 1 = 21 chars */
612 char buf[sizeof("SEQNUM=") + 21];
613 struct sk_buff *skbc;
614 int ret;
615
616 /* bump and prepare sequence number */
617 ret = snprintf(buf, sizeof(buf), "SEQNUM=%llu", ++uevent_seqnum);
618 if (ret < 0 || (size_t)ret >= sizeof(buf))
619 return -ENOMEM;
620 ret++;
621
622 /* verify message does not overflow */
623 if ((skb->len + ret) > UEVENT_BUFFER_SIZE) {
624 NL_SET_ERR_MSG(extack, "uevent message too big");
625 return -EINVAL;
626 }
627
628 /* copy skb and extend to accommodate sequence number */
629 skbc = skb_copy_expand(skb, 0, ret, GFP_KERNEL);
630 if (!skbc)
631 return -ENOMEM;
632
633 /* append sequence number */
634 skb_put_data(skbc, buf, ret);
635
636 /* remove msg header */
637 skb_pull(skbc, NLMSG_HDRLEN);
638
639 /* set portid 0 to inform userspace message comes from kernel */
640 NETLINK_CB(skbc).portid = 0;
641 NETLINK_CB(skbc).dst_group = 1;
642
643 ret = netlink_broadcast(usk, skbc, 0, 1, GFP_KERNEL);
644 /* ENOBUFS should be handled in userspace */
645 if (ret == -ENOBUFS || ret == -ESRCH)
646 ret = 0;
647
648 return ret;
649}
650
651static int uevent_net_rcv_skb(struct sk_buff *skb, struct nlmsghdr *nlh,
652 struct netlink_ext_ack *extack)
653{
654 struct net *net;
655 int ret;
656
657 if (!nlmsg_data(nlh))
658 return -EINVAL;
659
660 /*
661 * Verify that we are allowed to send messages to the target
662 * network namespace. The caller must have CAP_SYS_ADMIN in the
663 * owning user namespace of the target network namespace.
664 */
665 net = sock_net(NETLINK_CB(skb).sk);
666 if (!netlink_ns_capable(skb, net->user_ns, CAP_SYS_ADMIN)) {
667 NL_SET_ERR_MSG(extack, "missing CAP_SYS_ADMIN capability");
668 return -EPERM;
669 }
670
671 mutex_lock(&uevent_sock_mutex);
672 ret = uevent_net_broadcast(net->uevent_sock->sk, skb, extack);
673 mutex_unlock(&uevent_sock_mutex);
674
675 return ret;
676}
677
678static void uevent_net_rcv(struct sk_buff *skb)
679{
680 netlink_rcv_skb(skb, &uevent_net_rcv_skb);
681}
682
605static int uevent_net_init(struct net *net) 683static int uevent_net_init(struct net *net)
606{ 684{
607 struct uevent_sock *ue_sk; 685 struct uevent_sock *ue_sk;
608 struct netlink_kernel_cfg cfg = { 686 struct netlink_kernel_cfg cfg = {
609 .groups = 1, 687 .groups = 1,
610 .flags = NL_CFG_F_NONROOT_RECV, 688 .input = uevent_net_rcv,
689 .flags = NL_CFG_F_NONROOT_RECV
611 }; 690 };
612 691
613 ue_sk = kzalloc(sizeof(*ue_sk), GFP_KERNEL); 692 ue_sk = kzalloc(sizeof(*ue_sk), GFP_KERNEL);
@@ -621,6 +700,9 @@ static int uevent_net_init(struct net *net)
621 kfree(ue_sk); 700 kfree(ue_sk);
622 return -ENODEV; 701 return -ENODEV;
623 } 702 }
703
704 net->uevent_sock = ue_sk;
705
624 mutex_lock(&uevent_sock_mutex); 706 mutex_lock(&uevent_sock_mutex);
625 list_add_tail(&ue_sk->list, &uevent_sock_list); 707 list_add_tail(&ue_sk->list, &uevent_sock_list);
626 mutex_unlock(&uevent_sock_mutex); 708 mutex_unlock(&uevent_sock_mutex);
@@ -629,17 +711,9 @@ static int uevent_net_init(struct net *net)
629 711
630static void uevent_net_exit(struct net *net) 712static void uevent_net_exit(struct net *net)
631{ 713{
632 struct uevent_sock *ue_sk; 714 struct uevent_sock *ue_sk = net->uevent_sock;
633 715
634 mutex_lock(&uevent_sock_mutex); 716 mutex_lock(&uevent_sock_mutex);
635 list_for_each_entry(ue_sk, &uevent_sock_list, list) {
636 if (sock_net(ue_sk->sk) == net)
637 goto found;
638 }
639 mutex_unlock(&uevent_sock_mutex);
640 return;
641
642found:
643 list_del(&ue_sk->list); 717 list_del(&ue_sk->list);
644 mutex_unlock(&uevent_sock_mutex); 718 mutex_unlock(&uevent_sock_mutex);
645 719
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
index 9f79547d1b97..f0a2934605bf 100644
--- a/lib/libcrc32c.c
+++ b/lib/libcrc32c.c
@@ -71,6 +71,12 @@ static void __exit libcrc32c_mod_fini(void)
71 crypto_free_shash(tfm); 71 crypto_free_shash(tfm);
72} 72}
73 73
74const char *crc32c_impl(void)
75{
76 return crypto_shash_driver_name(tfm);
77}
78EXPORT_SYMBOL(crc32c_impl);
79
74module_init(libcrc32c_mod_init); 80module_init(libcrc32c_mod_init);
75module_exit(libcrc32c_mod_fini); 81module_exit(libcrc32c_mod_fini);
76 82
diff --git a/lib/logic_pio.c b/lib/logic_pio.c
new file mode 100644
index 000000000000..feea48fd1a0d
--- /dev/null
+++ b/lib/logic_pio.c
@@ -0,0 +1,280 @@
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2017 HiSilicon Limited, All Rights Reserved.
4 * Author: Gabriele Paoloni <gabriele.paoloni@huawei.com>
5 * Author: Zhichang Yuan <yuanzhichang@hisilicon.com>
6 */
7
8#define pr_fmt(fmt) "LOGIC PIO: " fmt
9
10#include <linux/of.h>
11#include <linux/io.h>
12#include <linux/logic_pio.h>
13#include <linux/mm.h>
14#include <linux/rculist.h>
15#include <linux/sizes.h>
16#include <linux/slab.h>
17
18/* The unique hardware address list */
19static LIST_HEAD(io_range_list);
20static DEFINE_MUTEX(io_range_mutex);
21
22/* Consider a kernel general helper for this */
23#define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len))
24
25/**
26 * logic_pio_register_range - register logical PIO range for a host
27 * @new_range: pointer to the IO range to be registered.
28 *
29 * Returns 0 on success, the error code in case of failure.
30 *
31 * Register a new IO range node in the IO range list.
32 */
33int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
34{
35 struct logic_pio_hwaddr *range;
36 resource_size_t start;
37 resource_size_t end;
38 resource_size_t mmio_sz = 0;
39 resource_size_t iio_sz = MMIO_UPPER_LIMIT;
40 int ret = 0;
41
42 if (!new_range || !new_range->fwnode || !new_range->size)
43 return -EINVAL;
44
45 start = new_range->hw_start;
46 end = new_range->hw_start + new_range->size;
47
48 mutex_lock(&io_range_mutex);
49 list_for_each_entry_rcu(range, &io_range_list, list) {
50 if (range->fwnode == new_range->fwnode) {
51 /* range already there */
52 goto end_register;
53 }
54 if (range->flags == LOGIC_PIO_CPU_MMIO &&
55 new_range->flags == LOGIC_PIO_CPU_MMIO) {
56 /* for MMIO ranges we need to check for overlap */
57 if (start >= range->hw_start + range->size ||
58 end < range->hw_start) {
59 mmio_sz += range->size;
60 } else {
61 ret = -EFAULT;
62 goto end_register;
63 }
64 } else if (range->flags == LOGIC_PIO_INDIRECT &&
65 new_range->flags == LOGIC_PIO_INDIRECT) {
66 iio_sz += range->size;
67 }
68 }
69
70 /* range not registered yet, check for available space */
71 if (new_range->flags == LOGIC_PIO_CPU_MMIO) {
72 if (mmio_sz + new_range->size - 1 > MMIO_UPPER_LIMIT) {
73 /* if it's too big check if 64K space can be reserved */
74 if (mmio_sz + SZ_64K - 1 > MMIO_UPPER_LIMIT) {
75 ret = -E2BIG;
76 goto end_register;
77 }
78 new_range->size = SZ_64K;
79 pr_warn("Requested IO range too big, new size set to 64K\n");
80 }
81 new_range->io_start = mmio_sz;
82 } else if (new_range->flags == LOGIC_PIO_INDIRECT) {
83 if (iio_sz + new_range->size - 1 > IO_SPACE_LIMIT) {
84 ret = -E2BIG;
85 goto end_register;
86 }
87 new_range->io_start = iio_sz;
88 } else {
89 /* invalid flag */
90 ret = -EINVAL;
91 goto end_register;
92 }
93
94 list_add_tail_rcu(&new_range->list, &io_range_list);
95
96end_register:
97 mutex_unlock(&io_range_mutex);
98 return ret;
99}
100
101/**
102 * find_io_range_by_fwnode - find logical PIO range for given FW node
103 * @fwnode: FW node handle associated with logical PIO range
104 *
105 * Returns pointer to node on success, NULL otherwise.
106 *
107 * Traverse the io_range_list to find the registered node for @fwnode.
108 */
109struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode)
110{
111 struct logic_pio_hwaddr *range;
112
113 list_for_each_entry_rcu(range, &io_range_list, list) {
114 if (range->fwnode == fwnode)
115 return range;
116 }
117 return NULL;
118}
119
120/* Return a registered range given an input PIO token */
121static struct logic_pio_hwaddr *find_io_range(unsigned long pio)
122{
123 struct logic_pio_hwaddr *range;
124
125 list_for_each_entry_rcu(range, &io_range_list, list) {
126 if (in_range(pio, range->io_start, range->size))
127 return range;
128 }
129 pr_err("PIO entry token %lx invalid\n", pio);
130 return NULL;
131}
132
133/**
134 * logic_pio_to_hwaddr - translate logical PIO to HW address
135 * @pio: logical PIO value
136 *
137 * Returns HW address if valid, ~0 otherwise.
138 *
139 * Translate the input logical PIO to the corresponding hardware address.
140 * The input PIO should be unique in the whole logical PIO space.
141 */
142resource_size_t logic_pio_to_hwaddr(unsigned long pio)
143{
144 struct logic_pio_hwaddr *range;
145
146 range = find_io_range(pio);
147 if (range)
148 return range->hw_start + pio - range->io_start;
149
150 return (resource_size_t)~0;
151}
152
153/**
154 * logic_pio_trans_hwaddr - translate HW address to logical PIO
155 * @fwnode: FW node reference for the host
156 * @addr: Host-relative HW address
157 * @size: size to translate
158 *
159 * Returns Logical PIO value if successful, ~0UL otherwise
160 */
161unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode,
162 resource_size_t addr, resource_size_t size)
163{
164 struct logic_pio_hwaddr *range;
165
166 range = find_io_range_by_fwnode(fwnode);
167 if (!range || range->flags == LOGIC_PIO_CPU_MMIO) {
168 pr_err("IO range not found or invalid\n");
169 return ~0UL;
170 }
171 if (range->size < size) {
172 pr_err("resource size %pa cannot fit in IO range size %pa\n",
173 &size, &range->size);
174 return ~0UL;
175 }
176 return addr - range->hw_start + range->io_start;
177}
178
179unsigned long logic_pio_trans_cpuaddr(resource_size_t addr)
180{
181 struct logic_pio_hwaddr *range;
182
183 list_for_each_entry_rcu(range, &io_range_list, list) {
184 if (range->flags != LOGIC_PIO_CPU_MMIO)
185 continue;
186 if (in_range(addr, range->hw_start, range->size))
187 return addr - range->hw_start + range->io_start;
188 }
189 pr_err("addr %llx not registered in io_range_list\n",
190 (unsigned long long) addr);
191 return ~0UL;
192}
193
194#if defined(CONFIG_INDIRECT_PIO) && defined(PCI_IOBASE)
195#define BUILD_LOGIC_IO(bw, type) \
196type logic_in##bw(unsigned long addr) \
197{ \
198 type ret = (type)~0; \
199 \
200 if (addr < MMIO_UPPER_LIMIT) { \
201 ret = read##bw(PCI_IOBASE + addr); \
202 } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
203 struct logic_pio_hwaddr *entry = find_io_range(addr); \
204 \
205 if (entry && entry->ops) \
206 ret = entry->ops->in(entry->hostdata, \
207 addr, sizeof(type)); \
208 else \
209 WARN_ON_ONCE(1); \
210 } \
211 return ret; \
212} \
213 \
214void logic_out##bw(type value, unsigned long addr) \
215{ \
216 if (addr < MMIO_UPPER_LIMIT) { \
217 write##bw(value, PCI_IOBASE + addr); \
218 } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
219 struct logic_pio_hwaddr *entry = find_io_range(addr); \
220 \
221 if (entry && entry->ops) \
222 entry->ops->out(entry->hostdata, \
223 addr, value, sizeof(type)); \
224 else \
225 WARN_ON_ONCE(1); \
226 } \
227} \
228 \
229void logic_ins##bw(unsigned long addr, void *buffer, \
230 unsigned int count) \
231{ \
232 if (addr < MMIO_UPPER_LIMIT) { \
233 reads##bw(PCI_IOBASE + addr, buffer, count); \
234 } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
235 struct logic_pio_hwaddr *entry = find_io_range(addr); \
236 \
237 if (entry && entry->ops) \
238 entry->ops->ins(entry->hostdata, \
239 addr, buffer, sizeof(type), count); \
240 else \
241 WARN_ON_ONCE(1); \
242 } \
243 \
244} \
245 \
246void logic_outs##bw(unsigned long addr, const void *buffer, \
247 unsigned int count) \
248{ \
249 if (addr < MMIO_UPPER_LIMIT) { \
250 writes##bw(PCI_IOBASE + addr, buffer, count); \
251 } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
252 struct logic_pio_hwaddr *entry = find_io_range(addr); \
253 \
254 if (entry && entry->ops) \
255 entry->ops->outs(entry->hostdata, \
256 addr, buffer, sizeof(type), count); \
257 else \
258 WARN_ON_ONCE(1); \
259 } \
260}
261
262BUILD_LOGIC_IO(b, u8)
263EXPORT_SYMBOL(logic_inb);
264EXPORT_SYMBOL(logic_insb);
265EXPORT_SYMBOL(logic_outb);
266EXPORT_SYMBOL(logic_outsb);
267
268BUILD_LOGIC_IO(w, u16)
269EXPORT_SYMBOL(logic_inw);
270EXPORT_SYMBOL(logic_insw);
271EXPORT_SYMBOL(logic_outw);
272EXPORT_SYMBOL(logic_outsw);
273
274BUILD_LOGIC_IO(l, u32)
275EXPORT_SYMBOL(logic_inl);
276EXPORT_SYMBOL(logic_insl);
277EXPORT_SYMBOL(logic_outl);
278EXPORT_SYMBOL(logic_outsl);
279
280#endif /* CONFIG_INDIRECT_PIO && PCI_IOBASE */
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 30e7dd88148b..9f96fa7bc000 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -322,6 +322,8 @@ EXPORT_SYMBOL_GPL(percpu_ref_switch_to_percpu);
322 * This function normally doesn't block and can be called from any context 322 * This function normally doesn't block and can be called from any context
323 * but it may block if @confirm_kill is specified and @ref is in the 323 * but it may block if @confirm_kill is specified and @ref is in the
324 * process of switching to atomic mode by percpu_ref_switch_to_atomic(). 324 * process of switching to atomic mode by percpu_ref_switch_to_atomic().
325 *
326 * There are no implied RCU grace periods between kill and release.
325 */ 327 */
326void percpu_ref_kill_and_confirm(struct percpu_ref *ref, 328void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
327 percpu_ref_func_t *confirm_kill) 329 percpu_ref_func_t *confirm_kill)
diff --git a/lib/raid6/.gitignore b/lib/raid6/.gitignore
index f01b1cb04f91..3de0d8921286 100644
--- a/lib/raid6/.gitignore
+++ b/lib/raid6/.gitignore
@@ -4,3 +4,4 @@ int*.c
4tables.c 4tables.c
5neon?.c 5neon?.c
6s390vx?.c 6s390vx?.c
7vpermxor*.c
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index 4add700ddfe3..2f8b61dfd9b0 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -5,9 +5,9 @@ raid6_pq-y += algos.o recov.o tables.o int1.o int2.o int4.o \
5 int8.o int16.o int32.o 5 int8.o int16.o int32.o
6 6
7raid6_pq-$(CONFIG_X86) += recov_ssse3.o recov_avx2.o mmx.o sse1.o sse2.o avx2.o avx512.o recov_avx512.o 7raid6_pq-$(CONFIG_X86) += recov_ssse3.o recov_avx2.o mmx.o sse1.o sse2.o avx2.o avx512.o recov_avx512.o
8raid6_pq-$(CONFIG_ALTIVEC) += altivec1.o altivec2.o altivec4.o altivec8.o 8raid6_pq-$(CONFIG_ALTIVEC) += altivec1.o altivec2.o altivec4.o altivec8.o \
9 vpermxor1.o vpermxor2.o vpermxor4.o vpermxor8.o
9raid6_pq-$(CONFIG_KERNEL_MODE_NEON) += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o 10raid6_pq-$(CONFIG_KERNEL_MODE_NEON) += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o
10raid6_pq-$(CONFIG_TILEGX) += tilegx8.o
11raid6_pq-$(CONFIG_S390) += s390vx8.o recov_s390xc.o 11raid6_pq-$(CONFIG_S390) += s390vx8.o recov_s390xc.o
12 12
13hostprogs-y += mktables 13hostprogs-y += mktables
@@ -91,6 +91,30 @@ $(obj)/altivec8.c: UNROLL := 8
91$(obj)/altivec8.c: $(src)/altivec.uc $(src)/unroll.awk FORCE 91$(obj)/altivec8.c: $(src)/altivec.uc $(src)/unroll.awk FORCE
92 $(call if_changed,unroll) 92 $(call if_changed,unroll)
93 93
94CFLAGS_vpermxor1.o += $(altivec_flags)
95targets += vpermxor1.c
96$(obj)/vpermxor1.c: UNROLL := 1
97$(obj)/vpermxor1.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
98 $(call if_changed,unroll)
99
100CFLAGS_vpermxor2.o += $(altivec_flags)
101targets += vpermxor2.c
102$(obj)/vpermxor2.c: UNROLL := 2
103$(obj)/vpermxor2.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
104 $(call if_changed,unroll)
105
106CFLAGS_vpermxor4.o += $(altivec_flags)
107targets += vpermxor4.c
108$(obj)/vpermxor4.c: UNROLL := 4
109$(obj)/vpermxor4.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
110 $(call if_changed,unroll)
111
112CFLAGS_vpermxor8.o += $(altivec_flags)
113targets += vpermxor8.c
114$(obj)/vpermxor8.c: UNROLL := 8
115$(obj)/vpermxor8.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
116 $(call if_changed,unroll)
117
94CFLAGS_neon1.o += $(NEON_FLAGS) 118CFLAGS_neon1.o += $(NEON_FLAGS)
95targets += neon1.c 119targets += neon1.c
96$(obj)/neon1.c: UNROLL := 1 120$(obj)/neon1.c: UNROLL := 1
@@ -115,11 +139,6 @@ $(obj)/neon8.c: UNROLL := 8
115$(obj)/neon8.c: $(src)/neon.uc $(src)/unroll.awk FORCE 139$(obj)/neon8.c: $(src)/neon.uc $(src)/unroll.awk FORCE
116 $(call if_changed,unroll) 140 $(call if_changed,unroll)
117 141
118targets += tilegx8.c
119$(obj)/tilegx8.c: UNROLL := 8
120$(obj)/tilegx8.c: $(src)/tilegx.uc $(src)/unroll.awk FORCE
121 $(call if_changed,unroll)
122
123targets += s390vx8.c 142targets += s390vx8.c
124$(obj)/s390vx8.c: UNROLL := 8 143$(obj)/s390vx8.c: UNROLL := 8
125$(obj)/s390vx8.c: $(src)/s390vx.uc $(src)/unroll.awk FORCE 144$(obj)/s390vx8.c: $(src)/s390vx.uc $(src)/unroll.awk FORCE
diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c
index 476994723258..5065b1e7e327 100644
--- a/lib/raid6/algos.c
+++ b/lib/raid6/algos.c
@@ -74,9 +74,10 @@ const struct raid6_calls * const raid6_algos[] = {
74 &raid6_altivec2, 74 &raid6_altivec2,
75 &raid6_altivec4, 75 &raid6_altivec4,
76 &raid6_altivec8, 76 &raid6_altivec8,
77#endif 77 &raid6_vpermxor1,
78#if defined(CONFIG_TILEGX) 78 &raid6_vpermxor2,
79 &raid6_tilegx8, 79 &raid6_vpermxor4,
80 &raid6_vpermxor8,
80#endif 81#endif
81#if defined(CONFIG_S390) 82#if defined(CONFIG_S390)
82 &raid6_s390vx8, 83 &raid6_s390vx8,
diff --git a/lib/raid6/altivec.uc b/lib/raid6/altivec.uc
index 682aae8a1fef..d20ed0d11411 100644
--- a/lib/raid6/altivec.uc
+++ b/lib/raid6/altivec.uc
@@ -24,10 +24,13 @@
24 24
25#include <linux/raid/pq.h> 25#include <linux/raid/pq.h>
26 26
27#ifdef CONFIG_ALTIVEC
28
27#include <altivec.h> 29#include <altivec.h>
28#ifdef __KERNEL__ 30#ifdef __KERNEL__
29# include <asm/cputable.h> 31# include <asm/cputable.h>
30# include <asm/switch_to.h> 32# include <asm/switch_to.h>
33#endif /* __KERNEL__ */
31 34
32/* 35/*
33 * This is the C data type to use. We use a vector of 36 * This is the C data type to use. We use a vector of
diff --git a/lib/raid6/sse2.c b/lib/raid6/sse2.c
index 1d2276b007ee..8191e1d0d2fb 100644
--- a/lib/raid6/sse2.c
+++ b/lib/raid6/sse2.c
@@ -91,7 +91,7 @@ static void raid6_sse21_gen_syndrome(int disks, size_t bytes, void **ptrs)
91 91
92static void raid6_sse21_xor_syndrome(int disks, int start, int stop, 92static void raid6_sse21_xor_syndrome(int disks, int start, int stop,
93 size_t bytes, void **ptrs) 93 size_t bytes, void **ptrs)
94 { 94{
95 u8 **dptr = (u8 **)ptrs; 95 u8 **dptr = (u8 **)ptrs;
96 u8 *p, *q; 96 u8 *p, *q;
97 int d, z, z0; 97 int d, z, z0;
@@ -200,9 +200,9 @@ static void raid6_sse22_gen_syndrome(int disks, size_t bytes, void **ptrs)
200 kernel_fpu_end(); 200 kernel_fpu_end();
201} 201}
202 202
203 static void raid6_sse22_xor_syndrome(int disks, int start, int stop, 203static void raid6_sse22_xor_syndrome(int disks, int start, int stop,
204 size_t bytes, void **ptrs) 204 size_t bytes, void **ptrs)
205 { 205{
206 u8 **dptr = (u8 **)ptrs; 206 u8 **dptr = (u8 **)ptrs;
207 u8 *p, *q; 207 u8 *p, *q;
208 int d, z, z0; 208 int d, z, z0;
@@ -265,7 +265,7 @@ static void raid6_sse22_gen_syndrome(int disks, size_t bytes, void **ptrs)
265 265
266 asm volatile("sfence" : : : "memory"); 266 asm volatile("sfence" : : : "memory");
267 kernel_fpu_end(); 267 kernel_fpu_end();
268 } 268}
269 269
270const struct raid6_calls raid6_sse2x2 = { 270const struct raid6_calls raid6_sse2x2 = {
271 raid6_sse22_gen_syndrome, 271 raid6_sse22_gen_syndrome,
@@ -366,9 +366,9 @@ static void raid6_sse24_gen_syndrome(int disks, size_t bytes, void **ptrs)
366 kernel_fpu_end(); 366 kernel_fpu_end();
367} 367}
368 368
369 static void raid6_sse24_xor_syndrome(int disks, int start, int stop, 369static void raid6_sse24_xor_syndrome(int disks, int start, int stop,
370 size_t bytes, void **ptrs) 370 size_t bytes, void **ptrs)
371 { 371{
372 u8 **dptr = (u8 **)ptrs; 372 u8 **dptr = (u8 **)ptrs;
373 u8 *p, *q; 373 u8 *p, *q;
374 int d, z, z0; 374 int d, z, z0;
@@ -471,7 +471,7 @@ static void raid6_sse24_gen_syndrome(int disks, size_t bytes, void **ptrs)
471 } 471 }
472 asm volatile("sfence" : : : "memory"); 472 asm volatile("sfence" : : : "memory");
473 kernel_fpu_end(); 473 kernel_fpu_end();
474 } 474}
475 475
476 476
477const struct raid6_calls raid6_sse2x4 = { 477const struct raid6_calls raid6_sse2x4 = {
diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile
index be1010bdc435..5d73f5cb4d8a 100644
--- a/lib/raid6/test/Makefile
+++ b/lib/raid6/test/Makefile
@@ -45,15 +45,14 @@ else ifeq ($(HAS_NEON),yes)
45 CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1 45 CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1
46else 46else
47 HAS_ALTIVEC := $(shell printf '\#include <altivec.h>\nvector int a;\n' |\ 47 HAS_ALTIVEC := $(shell printf '\#include <altivec.h>\nvector int a;\n' |\
48 gcc -c -x c - >&/dev/null && \ 48 gcc -c -x c - >/dev/null && rm ./-.o && echo yes)
49 rm ./-.o && echo yes)
50 ifeq ($(HAS_ALTIVEC),yes) 49 ifeq ($(HAS_ALTIVEC),yes)
51 OBJS += altivec1.o altivec2.o altivec4.o altivec8.o 50 CFLAGS += -I../../../arch/powerpc/include
51 CFLAGS += -DCONFIG_ALTIVEC
52 OBJS += altivec1.o altivec2.o altivec4.o altivec8.o \
53 vpermxor1.o vpermxor2.o vpermxor4.o vpermxor8.o
52 endif 54 endif
53endif 55endif
54ifeq ($(ARCH),tilegx)
55OBJS += tilegx8.o
56endif
57 56
58.c.o: 57.c.o:
59 $(CC) $(CFLAGS) -c -o $@ $< 58 $(CC) $(CFLAGS) -c -o $@ $<
@@ -98,6 +97,18 @@ altivec4.c: altivec.uc ../unroll.awk
98altivec8.c: altivec.uc ../unroll.awk 97altivec8.c: altivec.uc ../unroll.awk
99 $(AWK) ../unroll.awk -vN=8 < altivec.uc > $@ 98 $(AWK) ../unroll.awk -vN=8 < altivec.uc > $@
100 99
100vpermxor1.c: vpermxor.uc ../unroll.awk
101 $(AWK) ../unroll.awk -vN=1 < vpermxor.uc > $@
102
103vpermxor2.c: vpermxor.uc ../unroll.awk
104 $(AWK) ../unroll.awk -vN=2 < vpermxor.uc > $@
105
106vpermxor4.c: vpermxor.uc ../unroll.awk
107 $(AWK) ../unroll.awk -vN=4 < vpermxor.uc > $@
108
109vpermxor8.c: vpermxor.uc ../unroll.awk
110 $(AWK) ../unroll.awk -vN=8 < vpermxor.uc > $@
111
101int1.c: int.uc ../unroll.awk 112int1.c: int.uc ../unroll.awk
102 $(AWK) ../unroll.awk -vN=1 < int.uc > $@ 113 $(AWK) ../unroll.awk -vN=1 < int.uc > $@
103 114
@@ -116,15 +127,11 @@ int16.c: int.uc ../unroll.awk
116int32.c: int.uc ../unroll.awk 127int32.c: int.uc ../unroll.awk
117 $(AWK) ../unroll.awk -vN=32 < int.uc > $@ 128 $(AWK) ../unroll.awk -vN=32 < int.uc > $@
118 129
119tilegx8.c: tilegx.uc ../unroll.awk
120 $(AWK) ../unroll.awk -vN=8 < tilegx.uc > $@
121
122tables.c: mktables 130tables.c: mktables
123 ./mktables > tables.c 131 ./mktables > tables.c
124 132
125clean: 133clean:
126 rm -f *.o *.a mktables mktables.c *.uc int*.c altivec*.c neon*.c tables.c raid6test 134 rm -f *.o *.a mktables mktables.c *.uc int*.c altivec*.c vpermxor*.c neon*.c tables.c raid6test
127 rm -f tilegx*.c
128 135
129spotless: clean 136spotless: clean
130 rm -f *~ 137 rm -f *~
diff --git a/lib/raid6/tilegx.uc b/lib/raid6/tilegx.uc
deleted file mode 100644
index 2dd291a11264..000000000000
--- a/lib/raid6/tilegx.uc
+++ /dev/null
@@ -1,87 +0,0 @@
1/* -*- linux-c -*- ------------------------------------------------------- *
2 *
3 * Copyright 2002 H. Peter Anvin - All Rights Reserved
4 * Copyright 2012 Tilera Corporation - All Rights Reserved
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
9 * Boston MA 02111-1307, USA; either version 2 of the License, or
10 * (at your option) any later version; incorporated herein by reference.
11 *
12 * ----------------------------------------------------------------------- */
13
14/*
15 * tilegx$#.c
16 *
17 * $#-way unrolled TILE-Gx SIMD for RAID-6 math.
18 *
19 * This file is postprocessed using unroll.awk.
20 *
21 */
22
23#include <linux/raid/pq.h>
24
25/* Create 8 byte copies of constant byte */
26# define NBYTES(x) (__insn_v1addi(0, x))
27# define NSIZE 8
28
29/*
30 * The SHLBYTE() operation shifts each byte left by 1, *not*
31 * rolling over into the next byte
32 */
33static inline __attribute_const__ u64 SHLBYTE(u64 v)
34{
35 /* Vector One Byte Shift Left Immediate. */
36 return __insn_v1shli(v, 1);
37}
38
39/*
40 * The MASK() operation returns 0xFF in any byte for which the high
41 * bit is 1, 0x00 for any byte for which the high bit is 0.
42 */
43static inline __attribute_const__ u64 MASK(u64 v)
44{
45 /* Vector One Byte Shift Right Signed Immediate. */
46 return __insn_v1shrsi(v, 7);
47}
48
49
50void raid6_tilegx$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
51{
52 u8 **dptr = (u8 **)ptrs;
53 u64 *p, *q;
54 int d, z, z0;
55
56 u64 wd$$, wq$$, wp$$, w1$$, w2$$;
57 u64 x1d = NBYTES(0x1d);
58 u64 * z0ptr;
59
60 z0 = disks - 3; /* Highest data disk */
61 p = (u64 *)dptr[z0+1]; /* XOR parity */
62 q = (u64 *)dptr[z0+2]; /* RS syndrome */
63
64 z0ptr = (u64 *)&dptr[z0][0];
65 for ( d = 0 ; d < bytes ; d += NSIZE*$# ) {
66 wq$$ = wp$$ = *z0ptr++;
67 for ( z = z0-1 ; z >= 0 ; z-- ) {
68 wd$$ = *(u64 *)&dptr[z][d+$$*NSIZE];
69 wp$$ = wp$$ ^ wd$$;
70 w2$$ = MASK(wq$$);
71 w1$$ = SHLBYTE(wq$$);
72 w2$$ = w2$$ & x1d;
73 w1$$ = w1$$ ^ w2$$;
74 wq$$ = w1$$ ^ wd$$;
75 }
76 *p++ = wp$$;
77 *q++ = wq$$;
78 }
79}
80
81const struct raid6_calls raid6_tilegx$# = {
82 raid6_tilegx$#_gen_syndrome,
83 NULL, /* XOR not yet implemented */
84 NULL,
85 "tilegx$#",
86 0
87};
diff --git a/lib/raid6/vpermxor.uc b/lib/raid6/vpermxor.uc
new file mode 100644
index 000000000000..10475dc423c1
--- /dev/null
+++ b/lib/raid6/vpermxor.uc
@@ -0,0 +1,105 @@
1/*
2 * Copyright 2017, Matt Brown, IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * vpermxor$#.c
10 *
11 * Based on H. Peter Anvin's paper - The mathematics of RAID-6
12 *
13 * $#-way unrolled portable integer math RAID-6 instruction set
14 * This file is postprocessed using unroll.awk
15 *
16 * vpermxor$#.c makes use of the vpermxor instruction to optimise the RAID6 Q
17 * syndrome calculations.
18 * This can be run on systems which have both Altivec and vpermxor instruction.
19 *
20 * This instruction was introduced in POWER8 - ISA v2.07.
21 */
22
23#include <linux/raid/pq.h>
24#ifdef CONFIG_ALTIVEC
25
26#include <altivec.h>
27#ifdef __KERNEL__
28#include <asm/cputable.h>
29#include <asm/ppc-opcode.h>
30#include <asm/switch_to.h>
31#endif
32
33typedef vector unsigned char unative_t;
34#define NSIZE sizeof(unative_t)
35
36static const vector unsigned char gf_low = {0x1e, 0x1c, 0x1a, 0x18, 0x16, 0x14,
37 0x12, 0x10, 0x0e, 0x0c, 0x0a, 0x08,
38 0x06, 0x04, 0x02,0x00};
39static const vector unsigned char gf_high = {0xfd, 0xdd, 0xbd, 0x9d, 0x7d, 0x5d,
40 0x3d, 0x1d, 0xe0, 0xc0, 0xa0, 0x80,
41 0x60, 0x40, 0x20, 0x00};
42
43static void noinline raid6_vpermxor$#_gen_syndrome_real(int disks, size_t bytes,
44 void **ptrs)
45{
46 u8 **dptr = (u8 **)ptrs;
47 u8 *p, *q;
48 int d, z, z0;
49 unative_t wp$$, wq$$, wd$$;
50
51 z0 = disks - 3; /* Highest data disk */
52 p = dptr[z0+1]; /* XOR parity */
53 q = dptr[z0+2]; /* RS syndrome */
54
55 for (d = 0; d < bytes; d += NSIZE*$#) {
56 wp$$ = wq$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE];
57
58 for (z = z0-1; z>=0; z--) {
59 wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE];
60 /* P syndrome */
61 wp$$ = vec_xor(wp$$, wd$$);
62
63 /* Q syndrome */
64 asm(VPERMXOR(%0,%1,%2,%3):"=v"(wq$$):"v"(gf_high), "v"(gf_low), "v"(wq$$));
65 wq$$ = vec_xor(wq$$, wd$$);
66 }
67 *(unative_t *)&p[d+NSIZE*$$] = wp$$;
68 *(unative_t *)&q[d+NSIZE*$$] = wq$$;
69 }
70}
71
72static void raid6_vpermxor$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
73{
74 preempt_disable();
75 enable_kernel_altivec();
76
77 raid6_vpermxor$#_gen_syndrome_real(disks, bytes, ptrs);
78
79 disable_kernel_altivec();
80 preempt_enable();
81}
82
83int raid6_have_altivec_vpermxor(void);
84#if $# == 1
85int raid6_have_altivec_vpermxor(void)
86{
87 /* Check if arch has both altivec and the vpermxor instructions */
88# ifdef __KERNEL__
89 return (cpu_has_feature(CPU_FTR_ALTIVEC_COMP) &&
90 cpu_has_feature(CPU_FTR_ARCH_207S));
91# else
92 return 1;
93#endif
94
95}
96#endif
97
98const struct raid6_calls raid6_vpermxor$# = {
99 raid6_vpermxor$#_gen_syndrome,
100 NULL,
101 raid6_have_altivec_vpermxor,
102 "vpermxor$#",
103 0
104};
105#endif
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 3825c30aaa36..2b2b79974b61 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -333,6 +333,7 @@ static int rhashtable_rehash_table(struct rhashtable *ht)
333 err = rhashtable_rehash_chain(ht, old_hash); 333 err = rhashtable_rehash_chain(ht, old_hash);
334 if (err) 334 if (err)
335 return err; 335 return err;
336 cond_resched();
336 } 337 }
337 338
338 /* Publish the new table pointer. */ 339 /* Publish the new table pointer. */
@@ -506,8 +507,10 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
506 if (!key || 507 if (!key ||
507 (ht->p.obj_cmpfn ? 508 (ht->p.obj_cmpfn ?
508 ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) : 509 ht->p.obj_cmpfn(&arg, rht_obj(ht, head)) :
509 rhashtable_compare(&arg, rht_obj(ht, head)))) 510 rhashtable_compare(&arg, rht_obj(ht, head)))) {
511 pprev = &head->next;
510 continue; 512 continue;
513 }
511 514
512 if (!ht->rhlist) 515 if (!ht->rhlist)
513 return rht_obj(ht, head); 516 return rht_obj(ht, head);
@@ -1110,6 +1113,7 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
1110 for (i = 0; i < tbl->size; i++) { 1113 for (i = 0; i < tbl->size; i++) {
1111 struct rhash_head *pos, *next; 1114 struct rhash_head *pos, *next;
1112 1115
1116 cond_resched();
1113 for (pos = rht_dereference(*rht_bucket(tbl, i), ht), 1117 for (pos = rht_dereference(*rht_bucket(tbl, i), ht),
1114 next = !rht_is_a_nulls(pos) ? 1118 next = !rht_is_a_nulls(pos) ?
1115 rht_dereference(pos->next, ht) : NULL; 1119 rht_dereference(pos->next, ht) : NULL;
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 42b5ca0acf93..e6a9c06ec70c 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -100,7 +100,7 @@ static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
100 return -1; 100 return -1;
101 } 101 }
102 102
103 if (!test_and_set_bit(nr, word)) 103 if (!test_and_set_bit_lock(nr, word))
104 break; 104 break;
105 105
106 hint = nr + 1; 106 hint = nr + 1;
@@ -434,9 +434,9 @@ static void sbq_wake_up(struct sbitmap_queue *sbq)
434 /* 434 /*
435 * Pairs with the memory barrier in set_current_state() to ensure the 435 * Pairs with the memory barrier in set_current_state() to ensure the
436 * proper ordering of clear_bit()/waitqueue_active() in the waker and 436 * proper ordering of clear_bit()/waitqueue_active() in the waker and
437 * test_and_set_bit()/prepare_to_wait()/finish_wait() in the waiter. See 437 * test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the
438 * the comment on waitqueue_active(). This is __after_atomic because we 438 * waiter. See the comment on waitqueue_active(). This is __after_atomic
439 * just did clear_bit() in the caller. 439 * because we just did clear_bit_unlock() in the caller.
440 */ 440 */
441 smp_mb__after_atomic(); 441 smp_mb__after_atomic();
442 442
@@ -469,7 +469,7 @@ static void sbq_wake_up(struct sbitmap_queue *sbq)
469void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, 469void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
470 unsigned int cpu) 470 unsigned int cpu)
471{ 471{
472 sbitmap_clear_bit(&sbq->sb, nr); 472 sbitmap_clear_bit_unlock(&sbq->sb, nr);
473 sbq_wake_up(sbq); 473 sbq_wake_up(sbq);
474 if (likely(!sbq->round_robin && nr < sbq->sb.depth)) 474 if (likely(!sbq->round_robin && nr < sbq->sb.depth))
475 *per_cpu_ptr(sbq->alloc_hint, cpu) = nr; 475 *per_cpu_ptr(sbq->alloc_hint, cpu) = nr;
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 53728d391d3a..06dad7a072fd 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -132,14 +132,7 @@ EXPORT_SYMBOL(sg_last);
132void sg_init_table(struct scatterlist *sgl, unsigned int nents) 132void sg_init_table(struct scatterlist *sgl, unsigned int nents)
133{ 133{
134 memset(sgl, 0, sizeof(*sgl) * nents); 134 memset(sgl, 0, sizeof(*sgl) * nents);
135#ifdef CONFIG_DEBUG_SG 135 sg_init_marker(sgl, nents);
136 {
137 unsigned int i;
138 for (i = 0; i < nents; i++)
139 sgl[i].sg_magic = SG_MAGIC;
140 }
141#endif
142 sg_mark_end(&sgl[nents - 1]);
143} 136}
144EXPORT_SYMBOL(sg_init_table); 137EXPORT_SYMBOL(sg_init_table);
145 138
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index c43ec2271469..47aeb04c1997 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -31,6 +31,7 @@
31#include <linux/gfp.h> 31#include <linux/gfp.h>
32#include <linux/scatterlist.h> 32#include <linux/scatterlist.h>
33#include <linux/mem_encrypt.h> 33#include <linux/mem_encrypt.h>
34#include <linux/set_memory.h>
34 35
35#include <asm/io.h> 36#include <asm/io.h>
36#include <asm/dma.h> 37#include <asm/dma.h>
@@ -156,22 +157,6 @@ unsigned long swiotlb_size_or_default(void)
156 return size ? size : (IO_TLB_DEFAULT_SIZE); 157 return size ? size : (IO_TLB_DEFAULT_SIZE);
157} 158}
158 159
159void __weak swiotlb_set_mem_attributes(void *vaddr, unsigned long size) { }
160
161/* For swiotlb, clear memory encryption mask from dma addresses */
162static dma_addr_t swiotlb_phys_to_dma(struct device *hwdev,
163 phys_addr_t address)
164{
165 return __sme_clr(phys_to_dma(hwdev, address));
166}
167
168/* Note that this doesn't work with highmem page */
169static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
170 volatile void *address)
171{
172 return phys_to_dma(hwdev, virt_to_phys(address));
173}
174
175static bool no_iotlb_memory; 160static bool no_iotlb_memory;
176 161
177void swiotlb_print_info(void) 162void swiotlb_print_info(void)
@@ -209,12 +194,12 @@ void __init swiotlb_update_mem_attributes(void)
209 194
210 vaddr = phys_to_virt(io_tlb_start); 195 vaddr = phys_to_virt(io_tlb_start);
211 bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT); 196 bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT);
212 swiotlb_set_mem_attributes(vaddr, bytes); 197 set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
213 memset(vaddr, 0, bytes); 198 memset(vaddr, 0, bytes);
214 199
215 vaddr = phys_to_virt(io_tlb_overflow_buffer); 200 vaddr = phys_to_virt(io_tlb_overflow_buffer);
216 bytes = PAGE_ALIGN(io_tlb_overflow); 201 bytes = PAGE_ALIGN(io_tlb_overflow);
217 swiotlb_set_mem_attributes(vaddr, bytes); 202 set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
218 memset(vaddr, 0, bytes); 203 memset(vaddr, 0, bytes);
219} 204}
220 205
@@ -355,7 +340,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
355 io_tlb_start = virt_to_phys(tlb); 340 io_tlb_start = virt_to_phys(tlb);
356 io_tlb_end = io_tlb_start + bytes; 341 io_tlb_end = io_tlb_start + bytes;
357 342
358 swiotlb_set_mem_attributes(tlb, bytes); 343 set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT);
359 memset(tlb, 0, bytes); 344 memset(tlb, 0, bytes);
360 345
361 /* 346 /*
@@ -366,7 +351,8 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
366 if (!v_overflow_buffer) 351 if (!v_overflow_buffer)
367 goto cleanup2; 352 goto cleanup2;
368 353
369 swiotlb_set_mem_attributes(v_overflow_buffer, io_tlb_overflow); 354 set_memory_decrypted((unsigned long)v_overflow_buffer,
355 io_tlb_overflow >> PAGE_SHIFT);
370 memset(v_overflow_buffer, 0, io_tlb_overflow); 356 memset(v_overflow_buffer, 0, io_tlb_overflow);
371 io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer); 357 io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer);
372 358
@@ -622,7 +608,7 @@ map_single(struct device *hwdev, phys_addr_t phys, size_t size,
622 return SWIOTLB_MAP_ERROR; 608 return SWIOTLB_MAP_ERROR;
623 } 609 }
624 610
625 start_dma_addr = swiotlb_phys_to_dma(hwdev, io_tlb_start); 611 start_dma_addr = __phys_to_dma(hwdev, io_tlb_start);
626 return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, 612 return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size,
627 dir, attrs); 613 dir, attrs);
628} 614}
@@ -706,6 +692,7 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
706 } 692 }
707} 693}
708 694
695#ifdef CONFIG_DMA_DIRECT_OPS
709static inline bool dma_coherent_ok(struct device *dev, dma_addr_t addr, 696static inline bool dma_coherent_ok(struct device *dev, dma_addr_t addr,
710 size_t size) 697 size_t size)
711{ 698{
@@ -726,12 +713,12 @@ swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle,
726 goto out_warn; 713 goto out_warn;
727 714
728 phys_addr = swiotlb_tbl_map_single(dev, 715 phys_addr = swiotlb_tbl_map_single(dev,
729 swiotlb_phys_to_dma(dev, io_tlb_start), 716 __phys_to_dma(dev, io_tlb_start),
730 0, size, DMA_FROM_DEVICE, 0); 717 0, size, DMA_FROM_DEVICE, 0);
731 if (phys_addr == SWIOTLB_MAP_ERROR) 718 if (phys_addr == SWIOTLB_MAP_ERROR)
732 goto out_warn; 719 goto out_warn;
733 720
734 *dma_handle = swiotlb_phys_to_dma(dev, phys_addr); 721 *dma_handle = __phys_to_dma(dev, phys_addr);
735 if (dma_coherent_ok(dev, *dma_handle, size)) 722 if (dma_coherent_ok(dev, *dma_handle, size))
736 goto out_unmap; 723 goto out_unmap;
737 724
@@ -759,28 +746,6 @@ out_warn:
759 return NULL; 746 return NULL;
760} 747}
761 748
762void *
763swiotlb_alloc_coherent(struct device *hwdev, size_t size,
764 dma_addr_t *dma_handle, gfp_t flags)
765{
766 int order = get_order(size);
767 unsigned long attrs = (flags & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0;
768 void *ret;
769
770 ret = (void *)__get_free_pages(flags, order);
771 if (ret) {
772 *dma_handle = swiotlb_virt_to_bus(hwdev, ret);
773 if (dma_coherent_ok(hwdev, *dma_handle, size)) {
774 memset(ret, 0, size);
775 return ret;
776 }
777 free_pages((unsigned long)ret, order);
778 }
779
780 return swiotlb_alloc_buffer(hwdev, size, dma_handle, attrs);
781}
782EXPORT_SYMBOL(swiotlb_alloc_coherent);
783
784static bool swiotlb_free_buffer(struct device *dev, size_t size, 749static bool swiotlb_free_buffer(struct device *dev, size_t size,
785 dma_addr_t dma_addr) 750 dma_addr_t dma_addr)
786{ 751{
@@ -799,15 +764,7 @@ static bool swiotlb_free_buffer(struct device *dev, size_t size,
799 DMA_ATTR_SKIP_CPU_SYNC); 764 DMA_ATTR_SKIP_CPU_SYNC);
800 return true; 765 return true;
801} 766}
802 767#endif
803void
804swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
805 dma_addr_t dev_addr)
806{
807 if (!swiotlb_free_buffer(hwdev, size, dev_addr))
808 free_pages((unsigned long)vaddr, get_order(size));
809}
810EXPORT_SYMBOL(swiotlb_free_coherent);
811 768
812static void 769static void
813swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir, 770swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
@@ -867,10 +824,10 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
867 map = map_single(dev, phys, size, dir, attrs); 824 map = map_single(dev, phys, size, dir, attrs);
868 if (map == SWIOTLB_MAP_ERROR) { 825 if (map == SWIOTLB_MAP_ERROR) {
869 swiotlb_full(dev, size, dir, 1); 826 swiotlb_full(dev, size, dir, 1);
870 return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer); 827 return __phys_to_dma(dev, io_tlb_overflow_buffer);
871 } 828 }
872 829
873 dev_addr = swiotlb_phys_to_dma(dev, map); 830 dev_addr = __phys_to_dma(dev, map);
874 831
875 /* Ensure that the address returned is DMA'ble */ 832 /* Ensure that the address returned is DMA'ble */
876 if (dma_capable(dev, dev_addr, size)) 833 if (dma_capable(dev, dev_addr, size))
@@ -879,7 +836,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
879 attrs |= DMA_ATTR_SKIP_CPU_SYNC; 836 attrs |= DMA_ATTR_SKIP_CPU_SYNC;
880 swiotlb_tbl_unmap_single(dev, map, size, dir, attrs); 837 swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
881 838
882 return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer); 839 return __phys_to_dma(dev, io_tlb_overflow_buffer);
883} 840}
884 841
885/* 842/*
@@ -1009,7 +966,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
1009 sg_dma_len(sgl) = 0; 966 sg_dma_len(sgl) = 0;
1010 return 0; 967 return 0;
1011 } 968 }
1012 sg->dma_address = swiotlb_phys_to_dma(hwdev, map); 969 sg->dma_address = __phys_to_dma(hwdev, map);
1013 } else 970 } else
1014 sg->dma_address = dev_addr; 971 sg->dma_address = dev_addr;
1015 sg_dma_len(sg) = sg->length; 972 sg_dma_len(sg) = sg->length;
@@ -1073,7 +1030,7 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
1073int 1030int
1074swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) 1031swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
1075{ 1032{
1076 return (dma_addr == swiotlb_phys_to_dma(hwdev, io_tlb_overflow_buffer)); 1033 return (dma_addr == __phys_to_dma(hwdev, io_tlb_overflow_buffer));
1077} 1034}
1078 1035
1079/* 1036/*
@@ -1085,7 +1042,7 @@ swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
1085int 1042int
1086swiotlb_dma_supported(struct device *hwdev, u64 mask) 1043swiotlb_dma_supported(struct device *hwdev, u64 mask)
1087{ 1044{
1088 return swiotlb_phys_to_dma(hwdev, io_tlb_end - 1) <= mask; 1045 return __phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
1089} 1046}
1090 1047
1091#ifdef CONFIG_DMA_DIRECT_OPS 1048#ifdef CONFIG_DMA_DIRECT_OPS
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index b3f235baa05d..413367cf569e 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -255,6 +255,10 @@ static const struct test_bitmap_parselist parselist_tests[] __initconst = {
255 {-EINVAL, "-1", NULL, 8, 0}, 255 {-EINVAL, "-1", NULL, 8, 0},
256 {-EINVAL, "-0", NULL, 8, 0}, 256 {-EINVAL, "-0", NULL, 8, 0},
257 {-EINVAL, "10-1", NULL, 8, 0}, 257 {-EINVAL, "10-1", NULL, 8, 0},
258 {-EINVAL, "0-31:", NULL, 8, 0},
259 {-EINVAL, "0-31:0", NULL, 8, 0},
260 {-EINVAL, "0-31:0/0", NULL, 8, 0},
261 {-EINVAL, "0-31:1/0", NULL, 8, 0},
258 {-EINVAL, "0-31:10/1", NULL, 8, 0}, 262 {-EINVAL, "0-31:10/1", NULL, 8, 0},
259}; 263};
260 264
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index b4e22345963f..8e157806df7a 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -24,10 +24,11 @@
24#include <linux/if_vlan.h> 24#include <linux/if_vlan.h>
25#include <linux/random.h> 25#include <linux/random.h>
26#include <linux/highmem.h> 26#include <linux/highmem.h>
27#include <linux/sched.h>
27 28
28/* General test specific settings */ 29/* General test specific settings */
29#define MAX_SUBTESTS 3 30#define MAX_SUBTESTS 3
30#define MAX_TESTRUNS 10000 31#define MAX_TESTRUNS 1000
31#define MAX_DATA 128 32#define MAX_DATA 128
32#define MAX_INSNS 512 33#define MAX_INSNS 512
33#define MAX_K 0xffffFFFF 34#define MAX_K 0xffffFFFF
@@ -5466,7 +5467,7 @@ static struct bpf_test tests[] = {
5466 { 5467 {
5467 "BPF_MAXINSNS: Jump, gap, jump, ...", 5468 "BPF_MAXINSNS: Jump, gap, jump, ...",
5468 { }, 5469 { },
5469#ifdef CONFIG_BPF_JIT_ALWAYS_ON 5470#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_X86)
5470 CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, 5471 CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
5471#else 5472#else
5472 CLASSIC | FLAG_NO_DATA, 5473 CLASSIC | FLAG_NO_DATA,
@@ -6573,6 +6574,93 @@ static bool exclude_test(int test_id)
6573 return test_id < test_range[0] || test_id > test_range[1]; 6574 return test_id < test_range[0] || test_id > test_range[1];
6574} 6575}
6575 6576
6577static __init struct sk_buff *build_test_skb(void)
6578{
6579 u32 headroom = NET_SKB_PAD + NET_IP_ALIGN + ETH_HLEN;
6580 struct sk_buff *skb[2];
6581 struct page *page[2];
6582 int i, data_size = 8;
6583
6584 for (i = 0; i < 2; i++) {
6585 page[i] = alloc_page(GFP_KERNEL);
6586 if (!page[i]) {
6587 if (i == 0)
6588 goto err_page0;
6589 else
6590 goto err_page1;
6591 }
6592
6593 /* this will set skb[i]->head_frag */
6594 skb[i] = dev_alloc_skb(headroom + data_size);
6595 if (!skb[i]) {
6596 if (i == 0)
6597 goto err_skb0;
6598 else
6599 goto err_skb1;
6600 }
6601
6602 skb_reserve(skb[i], headroom);
6603 skb_put(skb[i], data_size);
6604 skb[i]->protocol = htons(ETH_P_IP);
6605 skb_reset_network_header(skb[i]);
6606 skb_set_mac_header(skb[i], -ETH_HLEN);
6607
6608 skb_add_rx_frag(skb[i], 0, page[i], 0, 64, 64);
6609 // skb_headlen(skb[i]): 8, skb[i]->head_frag = 1
6610 }
6611
6612 /* setup shinfo */
6613 skb_shinfo(skb[0])->gso_size = 1448;
6614 skb_shinfo(skb[0])->gso_type = SKB_GSO_TCPV4;
6615 skb_shinfo(skb[0])->gso_type |= SKB_GSO_DODGY;
6616 skb_shinfo(skb[0])->gso_segs = 0;
6617 skb_shinfo(skb[0])->frag_list = skb[1];
6618
6619 /* adjust skb[0]'s len */
6620 skb[0]->len += skb[1]->len;
6621 skb[0]->data_len += skb[1]->data_len;
6622 skb[0]->truesize += skb[1]->truesize;
6623
6624 return skb[0];
6625
6626err_skb1:
6627 __free_page(page[1]);
6628err_page1:
6629 kfree_skb(skb[0]);
6630err_skb0:
6631 __free_page(page[0]);
6632err_page0:
6633 return NULL;
6634}
6635
6636static __init int test_skb_segment(void)
6637{
6638 netdev_features_t features;
6639 struct sk_buff *skb, *segs;
6640 int ret = -1;
6641
6642 features = NETIF_F_SG | NETIF_F_GSO_PARTIAL | NETIF_F_IP_CSUM |
6643 NETIF_F_IPV6_CSUM;
6644 features |= NETIF_F_RXCSUM;
6645 skb = build_test_skb();
6646 if (!skb) {
6647 pr_info("%s: failed to build_test_skb", __func__);
6648 goto done;
6649 }
6650
6651 segs = skb_segment(skb, features);
6652 if (!IS_ERR(segs)) {
6653 kfree_skb_list(segs);
6654 ret = 0;
6655 pr_info("%s: success in skb_segment!", __func__);
6656 } else {
6657 pr_info("%s: failed in skb_segment!", __func__);
6658 }
6659 kfree_skb(skb);
6660done:
6661 return ret;
6662}
6663
6576static __init int test_bpf(void) 6664static __init int test_bpf(void)
6577{ 6665{
6578 int i, err_cnt = 0, pass_cnt = 0; 6666 int i, err_cnt = 0, pass_cnt = 0;
@@ -6582,6 +6670,7 @@ static __init int test_bpf(void)
6582 struct bpf_prog *fp; 6670 struct bpf_prog *fp;
6583 int err; 6671 int err;
6584 6672
6673 cond_resched();
6585 if (exclude_test(i)) 6674 if (exclude_test(i))
6586 continue; 6675 continue;
6587 6676
@@ -6630,9 +6719,11 @@ static int __init test_bpf_init(void)
6630 return ret; 6719 return ret;
6631 6720
6632 ret = test_bpf(); 6721 ret = test_bpf();
6633
6634 destroy_bpf_tests(); 6722 destroy_bpf_tests();
6635 return ret; 6723 if (ret)
6724 return ret;
6725
6726 return test_skb_segment();
6636} 6727}
6637 6728
6638static void __exit test_bpf_exit(void) 6729static void __exit test_bpf_exit(void)
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index 078a61480573..cee000ac54d8 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -21,6 +21,7 @@
21#include <linux/uaccess.h> 21#include <linux/uaccess.h>
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/kthread.h> 23#include <linux/kthread.h>
24#include <linux/vmalloc.h>
24 25
25#define TEST_FIRMWARE_NAME "test-firmware.bin" 26#define TEST_FIRMWARE_NAME "test-firmware.bin"
26#define TEST_FIRMWARE_NUM_REQS 4 27#define TEST_FIRMWARE_NUM_REQS 4
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index e372b97eee13..0e5b7a61460b 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -1141,7 +1141,7 @@ static struct kmod_test_device *register_test_dev_kmod(void)
1141 mutex_lock(&reg_dev_mutex); 1141 mutex_lock(&reg_dev_mutex);
1142 1142
1143 /* int should suffice for number of devices, test for wrap */ 1143 /* int should suffice for number of devices, test for wrap */
1144 if (unlikely(num_test_devs + 1) < 0) { 1144 if (num_test_devs + 1 == INT_MAX) {
1145 pr_err("reached limit of number of test devices\n"); 1145 pr_err("reached limit of number of test devices\n");
1146 goto out; 1146 goto out;
1147 } 1147 }
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 76d3667fdea2..f4000c137dbe 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -79,6 +79,21 @@ struct thread_data {
79 struct test_obj *objs; 79 struct test_obj *objs;
80}; 80};
81 81
82static u32 my_hashfn(const void *data, u32 len, u32 seed)
83{
84 const struct test_obj_rhl *obj = data;
85
86 return (obj->value.id % 10) << RHT_HASH_RESERVED_SPACE;
87}
88
89static int my_cmpfn(struct rhashtable_compare_arg *arg, const void *obj)
90{
91 const struct test_obj_rhl *test_obj = obj;
92 const struct test_obj_val *val = arg->key;
93
94 return test_obj->value.id - val->id;
95}
96
82static struct rhashtable_params test_rht_params = { 97static struct rhashtable_params test_rht_params = {
83 .head_offset = offsetof(struct test_obj, node), 98 .head_offset = offsetof(struct test_obj, node),
84 .key_offset = offsetof(struct test_obj, value), 99 .key_offset = offsetof(struct test_obj, value),
@@ -87,6 +102,17 @@ static struct rhashtable_params test_rht_params = {
87 .nulls_base = (3U << RHT_BASE_SHIFT), 102 .nulls_base = (3U << RHT_BASE_SHIFT),
88}; 103};
89 104
105static struct rhashtable_params test_rht_params_dup = {
106 .head_offset = offsetof(struct test_obj_rhl, list_node),
107 .key_offset = offsetof(struct test_obj_rhl, value),
108 .key_len = sizeof(struct test_obj_val),
109 .hashfn = jhash,
110 .obj_hashfn = my_hashfn,
111 .obj_cmpfn = my_cmpfn,
112 .nelem_hint = 128,
113 .automatic_shrinking = false,
114};
115
90static struct semaphore prestart_sem; 116static struct semaphore prestart_sem;
91static struct semaphore startup_sem = __SEMAPHORE_INITIALIZER(startup_sem, 0); 117static struct semaphore startup_sem = __SEMAPHORE_INITIALIZER(startup_sem, 0);
92 118
@@ -465,6 +491,112 @@ static int __init test_rhashtable_max(struct test_obj *array,
465 return err; 491 return err;
466} 492}
467 493
494static unsigned int __init print_ht(struct rhltable *rhlt)
495{
496 struct rhashtable *ht;
497 const struct bucket_table *tbl;
498 char buff[512] = "";
499 unsigned int i, cnt = 0;
500
501 ht = &rhlt->ht;
502 tbl = rht_dereference(ht->tbl, ht);
503 for (i = 0; i < tbl->size; i++) {
504 struct rhash_head *pos, *next;
505 struct test_obj_rhl *p;
506
507 pos = rht_dereference(tbl->buckets[i], ht);
508 next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL;
509
510 if (!rht_is_a_nulls(pos)) {
511 sprintf(buff, "%s\nbucket[%d] -> ", buff, i);
512 }
513
514 while (!rht_is_a_nulls(pos)) {
515 struct rhlist_head *list = container_of(pos, struct rhlist_head, rhead);
516 sprintf(buff, "%s[[", buff);
517 do {
518 pos = &list->rhead;
519 list = rht_dereference(list->next, ht);
520 p = rht_obj(ht, pos);
521
522 sprintf(buff, "%s val %d (tid=%d)%s", buff, p->value.id, p->value.tid,
523 list? ", " : " ");
524 cnt++;
525 } while (list);
526
527 pos = next,
528 next = !rht_is_a_nulls(pos) ?
529 rht_dereference(pos->next, ht) : NULL;
530
531 sprintf(buff, "%s]]%s", buff, !rht_is_a_nulls(pos) ? " -> " : "");
532 }
533 }
534 printk(KERN_ERR "\n---- ht: ----%s\n-------------\n", buff);
535
536 return cnt;
537}
538
539static int __init test_insert_dup(struct test_obj_rhl *rhl_test_objects,
540 int cnt, bool slow)
541{
542 struct rhltable rhlt;
543 unsigned int i, ret;
544 const char *key;
545 int err = 0;
546
547 err = rhltable_init(&rhlt, &test_rht_params_dup);
548 if (WARN_ON(err))
549 return err;
550
551 for (i = 0; i < cnt; i++) {
552 rhl_test_objects[i].value.tid = i;
553 key = rht_obj(&rhlt.ht, &rhl_test_objects[i].list_node.rhead);
554 key += test_rht_params_dup.key_offset;
555
556 if (slow) {
557 err = PTR_ERR(rhashtable_insert_slow(&rhlt.ht, key,
558 &rhl_test_objects[i].list_node.rhead));
559 if (err == -EAGAIN)
560 err = 0;
561 } else
562 err = rhltable_insert(&rhlt,
563 &rhl_test_objects[i].list_node,
564 test_rht_params_dup);
565 if (WARN(err, "error %d on element %d/%d (%s)\n", err, i, cnt, slow? "slow" : "fast"))
566 goto skip_print;
567 }
568
569 ret = print_ht(&rhlt);
570 WARN(ret != cnt, "missing rhltable elements (%d != %d, %s)\n", ret, cnt, slow? "slow" : "fast");
571
572skip_print:
573 rhltable_destroy(&rhlt);
574
575 return 0;
576}
577
578static int __init test_insert_duplicates_run(void)
579{
580 struct test_obj_rhl rhl_test_objects[3] = {};
581
582 pr_info("test inserting duplicates\n");
583
584 /* two different values that map to same bucket */
585 rhl_test_objects[0].value.id = 1;
586 rhl_test_objects[1].value.id = 21;
587
588 /* and another duplicate with same as [0] value
589 * which will be second on the bucket list */
590 rhl_test_objects[2].value.id = rhl_test_objects[0].value.id;
591
592 test_insert_dup(rhl_test_objects, 2, false);
593 test_insert_dup(rhl_test_objects, 3, false);
594 test_insert_dup(rhl_test_objects, 2, true);
595 test_insert_dup(rhl_test_objects, 3, true);
596
597 return 0;
598}
599
468static int thread_lookup_test(struct thread_data *tdata) 600static int thread_lookup_test(struct thread_data *tdata)
469{ 601{
470 unsigned int entries = tdata->entries; 602 unsigned int entries = tdata->entries;
@@ -613,6 +745,8 @@ static int __init test_rht_init(void)
613 do_div(total_time, runs); 745 do_div(total_time, runs);
614 pr_info("Average test time: %llu\n", total_time); 746 pr_info("Average test time: %llu\n", total_time);
615 747
748 test_insert_duplicates_run();
749
616 if (!tcount) 750 if (!tcount)
617 return 0; 751 return 0;
618 752
diff --git a/lib/test_user_copy.c b/lib/test_user_copy.c
index 4621db801b23..e161f0498f42 100644
--- a/lib/test_user_copy.c
+++ b/lib/test_user_copy.c
@@ -31,11 +31,8 @@
31 * their capability at compile-time, we just have to opt-out certain archs. 31 * their capability at compile-time, we just have to opt-out certain archs.
32 */ 32 */
33#if BITS_PER_LONG == 64 || (!(defined(CONFIG_ARM) && !defined(MMU)) && \ 33#if BITS_PER_LONG == 64 || (!(defined(CONFIG_ARM) && !defined(MMU)) && \
34 !defined(CONFIG_BLACKFIN) && \
35 !defined(CONFIG_M32R) && \
36 !defined(CONFIG_M68K) && \ 34 !defined(CONFIG_M68K) && \
37 !defined(CONFIG_MICROBLAZE) && \ 35 !defined(CONFIG_MICROBLAZE) && \
38 !defined(CONFIG_MN10300) && \
39 !defined(CONFIG_NIOS2) && \ 36 !defined(CONFIG_NIOS2) && \
40 !defined(CONFIG_PPC32) && \ 37 !defined(CONFIG_PPC32) && \
41 !defined(CONFIG_SUPERH)) 38 !defined(CONFIG_SUPERH))
diff --git a/lib/zstd/Makefile b/lib/zstd/Makefile
index dd0a359c135b..7920cbbfeae9 100644
--- a/lib/zstd/Makefile
+++ b/lib/zstd/Makefile
@@ -3,16 +3,7 @@ obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd_decompress.o
3 3
4ccflags-y += -O3 4ccflags-y += -O3
5 5
6# Object files unique to zstd_compress and zstd_decompress 6zstd_compress-y := fse_compress.o huf_compress.o compress.o \
7zstd_compress-y := fse_compress.o huf_compress.o compress.o 7 entropy_common.o fse_decompress.o zstd_common.o
8zstd_decompress-y := huf_decompress.o decompress.o 8zstd_decompress-y := huf_decompress.o decompress.o \
9 9 entropy_common.o fse_decompress.o zstd_common.o
10# These object files are shared between the modules.
11# Always add them to zstd_compress.
12# Unless both zstd_compress and zstd_decompress are built in
13# then also add them to zstd_decompress.
14zstd_compress-y += entropy_common.o fse_decompress.o zstd_common.o
15
16ifneq ($(CONFIG_ZSTD_COMPRESS)$(CONFIG_ZSTD_DECOMPRESS),yy)
17 zstd_decompress-y += entropy_common.o fse_decompress.o zstd_common.o
18endif