aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig16
-rw-r--r--lib/Kconfig.debug213
-rw-r--r--lib/Kconfig.ubsan7
-rw-r--r--lib/Makefile8
-rw-r--r--lib/bitmap.c2
-rw-r--r--lib/debugobjects.c141
-rw-r--r--lib/devres.c78
-rw-r--r--lib/dma-direct.c38
-rw-r--r--lib/dump_stack.c60
-rw-r--r--lib/errseq.c23
-rw-r--r--lib/find_bit_benchmark.c7
-rw-r--r--lib/int_sqrt.c30
-rw-r--r--lib/iov_iter.c4
-rw-r--r--lib/kfifo.c2
-rw-r--r--lib/kobject.c46
-rw-r--r--lib/kobject_uevent.c96
-rw-r--r--lib/list_debug.c14
-rw-r--r--lib/lockref.c28
-rw-r--r--lib/logic_pio.c280
-rw-r--r--lib/radix-tree.c9
-rw-r--r--lib/raid6/.gitignore1
-rw-r--r--lib/raid6/Makefile33
-rw-r--r--lib/raid6/algos.c7
-rw-r--r--lib/raid6/altivec.uc3
-rw-r--r--lib/raid6/sse2.c14
-rw-r--r--lib/raid6/test/Makefile29
-rw-r--r--lib/raid6/tilegx.uc87
-rw-r--r--lib/raid6/vpermxor.uc105
-rw-r--r--lib/rhashtable.c2
-rw-r--r--lib/sbitmap.c10
-rw-r--r--lib/scatterlist.c9
-rw-r--r--lib/sha256.c283
-rw-r--r--lib/swiotlb.c85
-rw-r--r--lib/test_bitmap.c35
-rw-r--r--lib/test_bpf.c93
-rw-r--r--lib/test_firmware.c1
-rw-r--r--lib/test_kasan.c8
-rw-r--r--lib/test_ubsan.c144
-rw-r--r--lib/test_user_copy.c3
-rw-r--r--lib/textsearch.c40
-rw-r--r--lib/vsprintf.c48
-rw-r--r--lib/zstd/Makefile17
42 files changed, 1642 insertions, 517 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index e96089499371..5fe577673b98 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -55,6 +55,22 @@ config ARCH_USE_CMPXCHG_LOCKREF
55config ARCH_HAS_FAST_MULTIPLIER 55config ARCH_HAS_FAST_MULTIPLIER
56 bool 56 bool
57 57
58config INDIRECT_PIO
59 bool "Access I/O in non-MMIO mode"
60 depends on ARM64
61 help
62 On some platforms where no separate I/O space exists, there are I/O
63 hosts which can not be accessed in MMIO mode. Using the logical PIO
64 mechanism, the host-local I/O resource can be mapped into system
65 logic PIO space shared with MMIO hosts, such as PCI/PCIe, then the
66 system can access the I/O devices with the mapped-logic PIO through
67 I/O accessors.
68
69 This way has relatively little I/O performance cost. Please make
70 sure your devices really need this configure item enabled.
71
72 When in doubt, say N.
73
58config CRC_CCITT 74config CRC_CCITT
59 tristate "CRC-CCITT functions" 75 tristate "CRC-CCITT functions"
60 help 76 help
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 64155e310a9f..c40c7b734cd1 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -165,7 +165,7 @@ config DEBUG_INFO_REDUCED
165 165
166config DEBUG_INFO_SPLIT 166config DEBUG_INFO_SPLIT
167 bool "Produce split debuginfo in .dwo files" 167 bool "Produce split debuginfo in .dwo files"
168 depends on DEBUG_INFO && !FRV 168 depends on DEBUG_INFO
169 help 169 help
170 Generate debug info into separate .dwo files. This significantly 170 Generate debug info into separate .dwo files. This significantly
171 reduces the build directory size for builds with DEBUG_INFO, 171 reduces the build directory size for builds with DEBUG_INFO,
@@ -324,11 +324,11 @@ config DEBUG_SECTION_MISMATCH
324 the analysis would not catch the illegal reference. 324 the analysis would not catch the illegal reference.
325 This option tells gcc to inline less (but it does result in 325 This option tells gcc to inline less (but it does result in
326 a larger kernel). 326 a larger kernel).
327 - Run the section mismatch analysis for each module/built-in.o file. 327 - Run the section mismatch analysis for each module/built-in.a file.
328 When we run the section mismatch analysis on vmlinux.o, we 328 When we run the section mismatch analysis on vmlinux.o, we
329 lose valuable information about where the mismatch was 329 lose valuable information about where the mismatch was
330 introduced. 330 introduced.
331 Running the analysis for each module/built-in.o file 331 Running the analysis for each module/built-in.a file
332 tells where the mismatch happens much closer to the 332 tells where the mismatch happens much closer to the
333 source. The drawback is that the same mismatch is 333 source. The drawback is that the same mismatch is
334 reported at least twice. 334 reported at least twice.
@@ -354,10 +354,7 @@ config ARCH_WANT_FRAME_POINTERS
354 354
355config FRAME_POINTER 355config FRAME_POINTER
356 bool "Compile the kernel with frame pointers" 356 bool "Compile the kernel with frame pointers"
357 depends on DEBUG_KERNEL && \ 357 depends on DEBUG_KERNEL && (M68K || UML || SUPERH) || ARCH_WANT_FRAME_POINTERS
358 (CRIS || M68K || FRV || UML || \
359 SUPERH || BLACKFIN || MN10300 || METAG) || \
360 ARCH_WANT_FRAME_POINTERS
361 default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS 358 default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS
362 help 359 help
363 If you say Y here the resulting kernel image will be slightly 360 If you say Y here the resulting kernel image will be slightly
@@ -803,6 +800,30 @@ config SOFTLOCKUP_DETECTOR
803 chance to run. The current stack trace is displayed upon 800 chance to run. The current stack trace is displayed upon
804 detection and the system will stay locked up. 801 detection and the system will stay locked up.
805 802
803config BOOTPARAM_SOFTLOCKUP_PANIC
804 bool "Panic (Reboot) On Soft Lockups"
805 depends on SOFTLOCKUP_DETECTOR
806 help
807 Say Y here to enable the kernel to panic on "soft lockups",
808 which are bugs that cause the kernel to loop in kernel
809 mode for more than 20 seconds (configurable using the watchdog_thresh
810 sysctl), without giving other tasks a chance to run.
811
812 The panic can be used in combination with panic_timeout,
813 to cause the system to reboot automatically after a
814 lockup has been detected. This feature is useful for
815 high-availability systems that have uptime guarantees and
816 where a lockup must be resolved ASAP.
817
818 Say N if unsure.
819
820config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
821 int
822 depends on SOFTLOCKUP_DETECTOR
823 range 0 1
824 default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC
825 default 1 if BOOTPARAM_SOFTLOCKUP_PANIC
826
806config HARDLOCKUP_DETECTOR_PERF 827config HARDLOCKUP_DETECTOR_PERF
807 bool 828 bool
808 select SOFTLOCKUP_DETECTOR 829 select SOFTLOCKUP_DETECTOR
@@ -852,30 +873,6 @@ config BOOTPARAM_HARDLOCKUP_PANIC_VALUE
852 default 0 if !BOOTPARAM_HARDLOCKUP_PANIC 873 default 0 if !BOOTPARAM_HARDLOCKUP_PANIC
853 default 1 if BOOTPARAM_HARDLOCKUP_PANIC 874 default 1 if BOOTPARAM_HARDLOCKUP_PANIC
854 875
855config BOOTPARAM_SOFTLOCKUP_PANIC
856 bool "Panic (Reboot) On Soft Lockups"
857 depends on SOFTLOCKUP_DETECTOR
858 help
859 Say Y here to enable the kernel to panic on "soft lockups",
860 which are bugs that cause the kernel to loop in kernel
861 mode for more than 20 seconds (configurable using the watchdog_thresh
862 sysctl), without giving other tasks a chance to run.
863
864 The panic can be used in combination with panic_timeout,
865 to cause the system to reboot automatically after a
866 lockup has been detected. This feature is useful for
867 high-availability systems that have uptime guarantees and
868 where a lockup must be resolved ASAP.
869
870 Say N if unsure.
871
872config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
873 int
874 depends on SOFTLOCKUP_DETECTOR
875 range 0 1
876 default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC
877 default 1 if BOOTPARAM_SOFTLOCKUP_PANIC
878
879config DETECT_HUNG_TASK 876config DETECT_HUNG_TASK
880 bool "Detect Hung Tasks" 877 bool "Detect Hung Tasks"
881 depends on DEBUG_KERNEL 878 depends on DEBUG_KERNEL
@@ -1034,69 +1031,20 @@ config DEBUG_PREEMPT
1034 1031
1035menu "Lock Debugging (spinlocks, mutexes, etc...)" 1032menu "Lock Debugging (spinlocks, mutexes, etc...)"
1036 1033
1037config DEBUG_RT_MUTEXES 1034config LOCK_DEBUGGING_SUPPORT
1038 bool "RT Mutex debugging, deadlock detection" 1035 bool
1039 depends on DEBUG_KERNEL && RT_MUTEXES 1036 depends on TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
1040 help 1037 default y
1041 This allows rt mutex semantics violations and rt mutex related
1042 deadlocks (lockups) to be detected and reported automatically.
1043
1044config DEBUG_SPINLOCK
1045 bool "Spinlock and rw-lock debugging: basic checks"
1046 depends on DEBUG_KERNEL
1047 select UNINLINE_SPIN_UNLOCK
1048 help
1049 Say Y here and build SMP to catch missing spinlock initialization
1050 and certain other kinds of spinlock errors commonly made. This is
1051 best used in conjunction with the NMI watchdog so that spinlock
1052 deadlocks are also debuggable.
1053
1054config DEBUG_MUTEXES
1055 bool "Mutex debugging: basic checks"
1056 depends on DEBUG_KERNEL
1057 help
1058 This feature allows mutex semantics violations to be detected and
1059 reported.
1060
1061config DEBUG_WW_MUTEX_SLOWPATH
1062 bool "Wait/wound mutex debugging: Slowpath testing"
1063 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
1064 select DEBUG_LOCK_ALLOC
1065 select DEBUG_SPINLOCK
1066 select DEBUG_MUTEXES
1067 help
1068 This feature enables slowpath testing for w/w mutex users by
1069 injecting additional -EDEADLK wound/backoff cases. Together with
1070 the full mutex checks enabled with (CONFIG_PROVE_LOCKING) this
1071 will test all possible w/w mutex interface abuse with the
1072 exception of simply not acquiring all the required locks.
1073 Note that this feature can introduce significant overhead, so
1074 it really should not be enabled in a production or distro kernel,
1075 even a debug kernel. If you are a driver writer, enable it. If
1076 you are a distro, do not.
1077
1078config DEBUG_LOCK_ALLOC
1079 bool "Lock debugging: detect incorrect freeing of live locks"
1080 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
1081 select DEBUG_SPINLOCK
1082 select DEBUG_MUTEXES
1083 select DEBUG_RT_MUTEXES if RT_MUTEXES
1084 select LOCKDEP
1085 help
1086 This feature will check whether any held lock (spinlock, rwlock,
1087 mutex or rwsem) is incorrectly freed by the kernel, via any of the
1088 memory-freeing routines (kfree(), kmem_cache_free(), free_pages(),
1089 vfree(), etc.), whether a live lock is incorrectly reinitialized via
1090 spin_lock_init()/mutex_init()/etc., or whether there is any lock
1091 held during task exit.
1092 1038
1093config PROVE_LOCKING 1039config PROVE_LOCKING
1094 bool "Lock debugging: prove locking correctness" 1040 bool "Lock debugging: prove locking correctness"
1095 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 1041 depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
1096 select LOCKDEP 1042 select LOCKDEP
1097 select DEBUG_SPINLOCK 1043 select DEBUG_SPINLOCK
1098 select DEBUG_MUTEXES 1044 select DEBUG_MUTEXES
1099 select DEBUG_RT_MUTEXES if RT_MUTEXES 1045 select DEBUG_RT_MUTEXES if RT_MUTEXES
1046 select DEBUG_RWSEMS if RWSEM_SPIN_ON_OWNER
1047 select DEBUG_WW_MUTEX_SLOWPATH
1100 select DEBUG_LOCK_ALLOC 1048 select DEBUG_LOCK_ALLOC
1101 select TRACE_IRQFLAGS 1049 select TRACE_IRQFLAGS
1102 default n 1050 default n
@@ -1134,20 +1082,9 @@ config PROVE_LOCKING
1134 1082
1135 For more details, see Documentation/locking/lockdep-design.txt. 1083 For more details, see Documentation/locking/lockdep-design.txt.
1136 1084
1137config LOCKDEP
1138 bool
1139 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
1140 select STACKTRACE
1141 select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC && !SCORE && !X86
1142 select KALLSYMS
1143 select KALLSYMS_ALL
1144
1145config LOCKDEP_SMALL
1146 bool
1147
1148config LOCK_STAT 1085config LOCK_STAT
1149 bool "Lock usage statistics" 1086 bool "Lock usage statistics"
1150 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 1087 depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
1151 select LOCKDEP 1088 select LOCKDEP
1152 select DEBUG_SPINLOCK 1089 select DEBUG_SPINLOCK
1153 select DEBUG_MUTEXES 1090 select DEBUG_MUTEXES
@@ -1167,6 +1104,80 @@ config LOCK_STAT
1167 CONFIG_LOCK_STAT defines "contended" and "acquired" lock events. 1104 CONFIG_LOCK_STAT defines "contended" and "acquired" lock events.
1168 (CONFIG_LOCKDEP defines "acquire" and "release" events.) 1105 (CONFIG_LOCKDEP defines "acquire" and "release" events.)
1169 1106
1107config DEBUG_RT_MUTEXES
1108 bool "RT Mutex debugging, deadlock detection"
1109 depends on DEBUG_KERNEL && RT_MUTEXES
1110 help
1111 This allows rt mutex semantics violations and rt mutex related
1112 deadlocks (lockups) to be detected and reported automatically.
1113
1114config DEBUG_SPINLOCK
1115 bool "Spinlock and rw-lock debugging: basic checks"
1116 depends on DEBUG_KERNEL
1117 select UNINLINE_SPIN_UNLOCK
1118 help
1119 Say Y here and build SMP to catch missing spinlock initialization
1120 and certain other kinds of spinlock errors commonly made. This is
1121 best used in conjunction with the NMI watchdog so that spinlock
1122 deadlocks are also debuggable.
1123
1124config DEBUG_MUTEXES
1125 bool "Mutex debugging: basic checks"
1126 depends on DEBUG_KERNEL
1127 help
1128 This feature allows mutex semantics violations to be detected and
1129 reported.
1130
1131config DEBUG_WW_MUTEX_SLOWPATH
1132 bool "Wait/wound mutex debugging: Slowpath testing"
1133 depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
1134 select DEBUG_LOCK_ALLOC
1135 select DEBUG_SPINLOCK
1136 select DEBUG_MUTEXES
1137 help
1138 This feature enables slowpath testing for w/w mutex users by
1139 injecting additional -EDEADLK wound/backoff cases. Together with
1140 the full mutex checks enabled with (CONFIG_PROVE_LOCKING) this
1141 will test all possible w/w mutex interface abuse with the
1142 exception of simply not acquiring all the required locks.
1143 Note that this feature can introduce significant overhead, so
1144 it really should not be enabled in a production or distro kernel,
1145 even a debug kernel. If you are a driver writer, enable it. If
1146 you are a distro, do not.
1147
1148config DEBUG_RWSEMS
1149 bool "RW Semaphore debugging: basic checks"
1150 depends on DEBUG_KERNEL && RWSEM_SPIN_ON_OWNER
1151 help
1152 This debugging feature allows mismatched rw semaphore locks and unlocks
1153 to be detected and reported.
1154
1155config DEBUG_LOCK_ALLOC
1156 bool "Lock debugging: detect incorrect freeing of live locks"
1157 depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
1158 select DEBUG_SPINLOCK
1159 select DEBUG_MUTEXES
1160 select DEBUG_RT_MUTEXES if RT_MUTEXES
1161 select LOCKDEP
1162 help
1163 This feature will check whether any held lock (spinlock, rwlock,
1164 mutex or rwsem) is incorrectly freed by the kernel, via any of the
1165 memory-freeing routines (kfree(), kmem_cache_free(), free_pages(),
1166 vfree(), etc.), whether a live lock is incorrectly reinitialized via
1167 spin_lock_init()/mutex_init()/etc., or whether there is any lock
1168 held during task exit.
1169
1170config LOCKDEP
1171 bool
1172 depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
1173 select STACKTRACE
1174 select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC && !X86
1175 select KALLSYMS
1176 select KALLSYMS_ALL
1177
1178config LOCKDEP_SMALL
1179 bool
1180
1170config DEBUG_LOCKDEP 1181config DEBUG_LOCKDEP
1171 bool "Lock dependency engine debugging" 1182 bool "Lock dependency engine debugging"
1172 depends on DEBUG_KERNEL && LOCKDEP 1183 depends on DEBUG_KERNEL && LOCKDEP
@@ -1571,7 +1582,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER
1571 depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT 1582 depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
1572 depends on !X86_64 1583 depends on !X86_64
1573 select STACKTRACE 1584 select STACKTRACE
1574 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !SCORE && !X86 1585 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !X86
1575 help 1586 help
1576 Provide stacktrace filter for fault-injection capabilities 1587 Provide stacktrace filter for fault-injection capabilities
1577 1588
@@ -1969,7 +1980,7 @@ config STRICT_DEVMEM
1969 bool "Filter access to /dev/mem" 1980 bool "Filter access to /dev/mem"
1970 depends on MMU && DEVMEM 1981 depends on MMU && DEVMEM
1971 depends on ARCH_HAS_DEVMEM_IS_ALLOWED 1982 depends on ARCH_HAS_DEVMEM_IS_ALLOWED
1972 default y if TILE || PPC || X86 || ARM64 1983 default y if PPC || X86 || ARM64
1973 ---help--- 1984 ---help---
1974 If this option is disabled, you allow userspace (root) access to all 1985 If this option is disabled, you allow userspace (root) access to all
1975 of memory, including kernel and userspace memory. Accidental 1986 of memory, including kernel and userspace memory. Accidental
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index a669c193b878..19d42ea75ec2 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -46,3 +46,10 @@ config UBSAN_NULL
46 help 46 help
47 This option enables detection of memory accesses via a 47 This option enables detection of memory accesses via a
48 null pointer. 48 null pointer.
49
50config TEST_UBSAN
51 tristate "Module for testing for undefined behavior detection"
52 depends on m && UBSAN
53 help
54 This is a test module for UBSAN.
55 It triggers various undefined behavior, and detect it.
diff --git a/lib/Makefile b/lib/Makefile
index a90d4fcd748f..ce20696d5a92 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -18,7 +18,7 @@ KCOV_INSTRUMENT_debugobjects.o := n
18KCOV_INSTRUMENT_dynamic_debug.o := n 18KCOV_INSTRUMENT_dynamic_debug.o := n
19 19
20lib-y := ctype.o string.o vsprintf.o cmdline.o \ 20lib-y := ctype.o string.o vsprintf.o cmdline.o \
21 rbtree.o radix-tree.o dump_stack.o timerqueue.o\ 21 rbtree.o radix-tree.o timerqueue.o\
22 idr.o int_sqrt.o extable.o \ 22 idr.o int_sqrt.o extable.o \
23 sha1.o chacha20.o irq_regs.o argv_split.o \ 23 sha1.o chacha20.o irq_regs.o argv_split.o \
24 flex_proportions.o ratelimit.o show_mem.o \ 24 flex_proportions.o ratelimit.o show_mem.o \
@@ -26,6 +26,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
26 earlycpio.o seq_buf.o siphash.o \ 26 earlycpio.o seq_buf.o siphash.o \
27 nmi_backtrace.o nodemask.o win_minmax.o 27 nmi_backtrace.o nodemask.o win_minmax.o
28 28
29lib-$(CONFIG_PRINTK) += dump_stack.o
29lib-$(CONFIG_MMU) += ioremap.o 30lib-$(CONFIG_MMU) += ioremap.o
30lib-$(CONFIG_SMP) += cpumask.o 31lib-$(CONFIG_SMP) += cpumask.o
31lib-$(CONFIG_DMA_DIRECT_OPS) += dma-direct.o 32lib-$(CONFIG_DMA_DIRECT_OPS) += dma-direct.o
@@ -52,6 +53,9 @@ obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
52obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o 53obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o
53obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o 54obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o
54obj-$(CONFIG_TEST_KASAN) += test_kasan.o 55obj-$(CONFIG_TEST_KASAN) += test_kasan.o
56CFLAGS_test_kasan.o += -fno-builtin
57obj-$(CONFIG_TEST_UBSAN) += test_ubsan.o
58UBSAN_SANITIZE_test_ubsan.o := y
55obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o 59obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
56obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o 60obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o
57obj-$(CONFIG_TEST_LKM) += test_module.o 61obj-$(CONFIG_TEST_LKM) += test_module.o
@@ -81,6 +85,8 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
81obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o 85obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
82obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o 86obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
83 87
88obj-y += logic_pio.o
89
84obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o 90obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
85 91
86obj-$(CONFIG_BTREE) += btree.o 92obj-$(CONFIG_BTREE) += btree.o
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 9e498c77ed0e..a42eff7e8c48 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -607,7 +607,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
607 /* if no digit is after '-', it's wrong*/ 607 /* if no digit is after '-', it's wrong*/
608 if (at_start && in_range) 608 if (at_start && in_range)
609 return -EINVAL; 609 return -EINVAL;
610 if (!(a <= b) || !(used_size <= group_size)) 610 if (!(a <= b) || group_size == 0 || !(used_size <= group_size))
611 return -EINVAL; 611 return -EINVAL;
612 if (b >= nmaskbits) 612 if (b >= nmaskbits)
613 return -ERANGE; 613 return -ERANGE;
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 2f5349c6e81a..994be4805cec 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -42,14 +42,18 @@ static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
42static DEFINE_RAW_SPINLOCK(pool_lock); 42static DEFINE_RAW_SPINLOCK(pool_lock);
43 43
44static HLIST_HEAD(obj_pool); 44static HLIST_HEAD(obj_pool);
45static HLIST_HEAD(obj_to_free);
45 46
46static int obj_pool_min_free = ODEBUG_POOL_SIZE; 47static int obj_pool_min_free = ODEBUG_POOL_SIZE;
47static int obj_pool_free = ODEBUG_POOL_SIZE; 48static int obj_pool_free = ODEBUG_POOL_SIZE;
48static int obj_pool_used; 49static int obj_pool_used;
49static int obj_pool_max_used; 50static int obj_pool_max_used;
51/* The number of objs on the global free list */
52static int obj_nr_tofree;
50static struct kmem_cache *obj_cache; 53static struct kmem_cache *obj_cache;
51 54
52static int debug_objects_maxchain __read_mostly; 55static int debug_objects_maxchain __read_mostly;
56static int __maybe_unused debug_objects_maxchecked __read_mostly;
53static int debug_objects_fixups __read_mostly; 57static int debug_objects_fixups __read_mostly;
54static int debug_objects_warnings __read_mostly; 58static int debug_objects_warnings __read_mostly;
55static int debug_objects_enabled __read_mostly 59static int debug_objects_enabled __read_mostly
@@ -96,12 +100,32 @@ static const char *obj_states[ODEBUG_STATE_MAX] = {
96static void fill_pool(void) 100static void fill_pool(void)
97{ 101{
98 gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; 102 gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
99 struct debug_obj *new; 103 struct debug_obj *new, *obj;
100 unsigned long flags; 104 unsigned long flags;
101 105
102 if (likely(obj_pool_free >= debug_objects_pool_min_level)) 106 if (likely(obj_pool_free >= debug_objects_pool_min_level))
103 return; 107 return;
104 108
109 /*
110 * Reuse objs from the global free list; they will be reinitialized
111 * when allocating.
112 */
113 while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
114 raw_spin_lock_irqsave(&pool_lock, flags);
115 /*
116 * Recheck with the lock held as the worker thread might have
117 * won the race and freed the global free list already.
118 */
119 if (obj_nr_tofree) {
120 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
121 hlist_del(&obj->node);
122 obj_nr_tofree--;
123 hlist_add_head(&obj->node, &obj_pool);
124 obj_pool_free++;
125 }
126 raw_spin_unlock_irqrestore(&pool_lock, flags);
127 }
128
105 if (unlikely(!obj_cache)) 129 if (unlikely(!obj_cache))
106 return; 130 return;
107 131
@@ -177,62 +201,76 @@ alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
177 * workqueue function to free objects. 201 * workqueue function to free objects.
178 * 202 *
179 * To reduce contention on the global pool_lock, the actual freeing of 203 * To reduce contention on the global pool_lock, the actual freeing of
180 * debug objects will be delayed if the pool_lock is busy. We also free 204 * debug objects will be delayed if the pool_lock is busy.
181 * the objects in a batch of 4 for each lock/unlock cycle.
182 */ 205 */
183#define ODEBUG_FREE_BATCH 4
184
185static void free_obj_work(struct work_struct *work) 206static void free_obj_work(struct work_struct *work)
186{ 207{
187 struct debug_obj *objs[ODEBUG_FREE_BATCH]; 208 struct hlist_node *tmp;
209 struct debug_obj *obj;
188 unsigned long flags; 210 unsigned long flags;
189 int i; 211 HLIST_HEAD(tofree);
190 212
191 if (!raw_spin_trylock_irqsave(&pool_lock, flags)) 213 if (!raw_spin_trylock_irqsave(&pool_lock, flags))
192 return; 214 return;
193 while (obj_pool_free >= debug_objects_pool_size + ODEBUG_FREE_BATCH) {
194 for (i = 0; i < ODEBUG_FREE_BATCH; i++) {
195 objs[i] = hlist_entry(obj_pool.first,
196 typeof(*objs[0]), node);
197 hlist_del(&objs[i]->node);
198 }
199 215
200 obj_pool_free -= ODEBUG_FREE_BATCH; 216 /*
201 debug_objects_freed += ODEBUG_FREE_BATCH; 217 * The objs on the pool list might be allocated before the work is
202 /* 218 * run, so recheck if pool list it full or not, if not fill pool
203 * We release pool_lock across kmem_cache_free() to 219 * list from the global free list
204 * avoid contention on pool_lock. 220 */
205 */ 221 while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
206 raw_spin_unlock_irqrestore(&pool_lock, flags); 222 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
207 for (i = 0; i < ODEBUG_FREE_BATCH; i++) 223 hlist_del(&obj->node);
208 kmem_cache_free(obj_cache, objs[i]); 224 hlist_add_head(&obj->node, &obj_pool);
209 if (!raw_spin_trylock_irqsave(&pool_lock, flags)) 225 obj_pool_free++;
210 return; 226 obj_nr_tofree--;
227 }
228
229 /*
230 * Pool list is already full and there are still objs on the free
231 * list. Move remaining free objs to a temporary list to free the
232 * memory outside the pool_lock held region.
233 */
234 if (obj_nr_tofree) {
235 hlist_move_list(&obj_to_free, &tofree);
236 debug_objects_freed += obj_nr_tofree;
237 obj_nr_tofree = 0;
211 } 238 }
212 raw_spin_unlock_irqrestore(&pool_lock, flags); 239 raw_spin_unlock_irqrestore(&pool_lock, flags);
240
241 hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
242 hlist_del(&obj->node);
243 kmem_cache_free(obj_cache, obj);
244 }
213} 245}
214 246
215/* 247static bool __free_object(struct debug_obj *obj)
216 * Put the object back into the pool and schedule work to free objects
217 * if necessary.
218 */
219static void free_object(struct debug_obj *obj)
220{ 248{
221 unsigned long flags; 249 unsigned long flags;
222 int sched = 0; 250 bool work;
223 251
224 raw_spin_lock_irqsave(&pool_lock, flags); 252 raw_spin_lock_irqsave(&pool_lock, flags);
225 /* 253 work = (obj_pool_free > debug_objects_pool_size) && obj_cache;
226 * schedule work when the pool is filled and the cache is
227 * initialized:
228 */
229 if (obj_pool_free > debug_objects_pool_size && obj_cache)
230 sched = 1;
231 hlist_add_head(&obj->node, &obj_pool);
232 obj_pool_free++;
233 obj_pool_used--; 254 obj_pool_used--;
255
256 if (work) {
257 obj_nr_tofree++;
258 hlist_add_head(&obj->node, &obj_to_free);
259 } else {
260 obj_pool_free++;
261 hlist_add_head(&obj->node, &obj_pool);
262 }
234 raw_spin_unlock_irqrestore(&pool_lock, flags); 263 raw_spin_unlock_irqrestore(&pool_lock, flags);
235 if (sched) 264 return work;
265}
266
267/*
268 * Put the object back into the pool and schedule work to free objects
269 * if necessary.
270 */
271static void free_object(struct debug_obj *obj)
272{
273 if (__free_object(obj))
236 schedule_work(&debug_obj_work); 274 schedule_work(&debug_obj_work);
237} 275}
238 276
@@ -714,13 +752,13 @@ EXPORT_SYMBOL_GPL(debug_object_active_state);
714static void __debug_check_no_obj_freed(const void *address, unsigned long size) 752static void __debug_check_no_obj_freed(const void *address, unsigned long size)
715{ 753{
716 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; 754 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
717 struct hlist_node *tmp;
718 HLIST_HEAD(freelist);
719 struct debug_obj_descr *descr; 755 struct debug_obj_descr *descr;
720 enum debug_obj_state state; 756 enum debug_obj_state state;
721 struct debug_bucket *db; 757 struct debug_bucket *db;
758 struct hlist_node *tmp;
722 struct debug_obj *obj; 759 struct debug_obj *obj;
723 int cnt; 760 int cnt, objs_checked = 0;
761 bool work = false;
724 762
725 saddr = (unsigned long) address; 763 saddr = (unsigned long) address;
726 eaddr = saddr + size; 764 eaddr = saddr + size;
@@ -751,21 +789,24 @@ repeat:
751 goto repeat; 789 goto repeat;
752 default: 790 default:
753 hlist_del(&obj->node); 791 hlist_del(&obj->node);
754 hlist_add_head(&obj->node, &freelist); 792 work |= __free_object(obj);
755 break; 793 break;
756 } 794 }
757 } 795 }
758 raw_spin_unlock_irqrestore(&db->lock, flags); 796 raw_spin_unlock_irqrestore(&db->lock, flags);
759 797
760 /* Now free them */
761 hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
762 hlist_del(&obj->node);
763 free_object(obj);
764 }
765
766 if (cnt > debug_objects_maxchain) 798 if (cnt > debug_objects_maxchain)
767 debug_objects_maxchain = cnt; 799 debug_objects_maxchain = cnt;
800
801 objs_checked += cnt;
768 } 802 }
803
804 if (objs_checked > debug_objects_maxchecked)
805 debug_objects_maxchecked = objs_checked;
806
807 /* Schedule work to actually kmem_cache_free() objects */
808 if (work)
809 schedule_work(&debug_obj_work);
769} 810}
770 811
771void debug_check_no_obj_freed(const void *address, unsigned long size) 812void debug_check_no_obj_freed(const void *address, unsigned long size)
@@ -780,12 +821,14 @@ void debug_check_no_obj_freed(const void *address, unsigned long size)
780static int debug_stats_show(struct seq_file *m, void *v) 821static int debug_stats_show(struct seq_file *m, void *v)
781{ 822{
782 seq_printf(m, "max_chain :%d\n", debug_objects_maxchain); 823 seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
824 seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked);
783 seq_printf(m, "warnings :%d\n", debug_objects_warnings); 825 seq_printf(m, "warnings :%d\n", debug_objects_warnings);
784 seq_printf(m, "fixups :%d\n", debug_objects_fixups); 826 seq_printf(m, "fixups :%d\n", debug_objects_fixups);
785 seq_printf(m, "pool_free :%d\n", obj_pool_free); 827 seq_printf(m, "pool_free :%d\n", obj_pool_free);
786 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); 828 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
787 seq_printf(m, "pool_used :%d\n", obj_pool_used); 829 seq_printf(m, "pool_used :%d\n", obj_pool_used);
788 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); 830 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
831 seq_printf(m, "on_free_list :%d\n", obj_nr_tofree);
789 seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated); 832 seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
790 seq_printf(m, "objs_freed :%d\n", debug_objects_freed); 833 seq_printf(m, "objs_freed :%d\n", debug_objects_freed);
791 return 0; 834 return 0;
diff --git a/lib/devres.c b/lib/devres.c
index 5f2aedd58bc5..5bec1120b392 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -5,6 +5,12 @@
5#include <linux/gfp.h> 5#include <linux/gfp.h>
6#include <linux/export.h> 6#include <linux/export.h>
7 7
8enum devm_ioremap_type {
9 DEVM_IOREMAP = 0,
10 DEVM_IOREMAP_NC,
11 DEVM_IOREMAP_WC,
12};
13
8void devm_ioremap_release(struct device *dev, void *res) 14void devm_ioremap_release(struct device *dev, void *res)
9{ 15{
10 iounmap(*(void __iomem **)res); 16 iounmap(*(void __iomem **)res);
@@ -15,24 +21,28 @@ static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
15 return *(void **)res == match_data; 21 return *(void **)res == match_data;
16} 22}
17 23
18/** 24static void __iomem *__devm_ioremap(struct device *dev, resource_size_t offset,
19 * devm_ioremap - Managed ioremap() 25 resource_size_t size,
20 * @dev: Generic device to remap IO address for 26 enum devm_ioremap_type type)
21 * @offset: Resource address to map
22 * @size: Size of map
23 *
24 * Managed ioremap(). Map is automatically unmapped on driver detach.
25 */
26void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
27 resource_size_t size)
28{ 27{
29 void __iomem **ptr, *addr; 28 void __iomem **ptr, *addr = NULL;
30 29
31 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL); 30 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
32 if (!ptr) 31 if (!ptr)
33 return NULL; 32 return NULL;
34 33
35 addr = ioremap(offset, size); 34 switch (type) {
35 case DEVM_IOREMAP:
36 addr = ioremap(offset, size);
37 break;
38 case DEVM_IOREMAP_NC:
39 addr = ioremap_nocache(offset, size);
40 break;
41 case DEVM_IOREMAP_WC:
42 addr = ioremap_wc(offset, size);
43 break;
44 }
45
36 if (addr) { 46 if (addr) {
37 *ptr = addr; 47 *ptr = addr;
38 devres_add(dev, ptr); 48 devres_add(dev, ptr);
@@ -41,6 +51,20 @@ void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
41 51
42 return addr; 52 return addr;
43} 53}
54
55/**
56 * devm_ioremap - Managed ioremap()
57 * @dev: Generic device to remap IO address for
58 * @offset: Resource address to map
59 * @size: Size of map
60 *
61 * Managed ioremap(). Map is automatically unmapped on driver detach.
62 */
63void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
64 resource_size_t size)
65{
66 return __devm_ioremap(dev, offset, size, DEVM_IOREMAP);
67}
44EXPORT_SYMBOL(devm_ioremap); 68EXPORT_SYMBOL(devm_ioremap);
45 69
46/** 70/**
@@ -55,20 +79,7 @@ EXPORT_SYMBOL(devm_ioremap);
55void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset, 79void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
56 resource_size_t size) 80 resource_size_t size)
57{ 81{
58 void __iomem **ptr, *addr; 82 return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_NC);
59
60 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
61 if (!ptr)
62 return NULL;
63
64 addr = ioremap_nocache(offset, size);
65 if (addr) {
66 *ptr = addr;
67 devres_add(dev, ptr);
68 } else
69 devres_free(ptr);
70
71 return addr;
72} 83}
73EXPORT_SYMBOL(devm_ioremap_nocache); 84EXPORT_SYMBOL(devm_ioremap_nocache);
74 85
@@ -83,20 +94,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
83void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset, 94void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset,
84 resource_size_t size) 95 resource_size_t size)
85{ 96{
86 void __iomem **ptr, *addr; 97 return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_WC);
87
88 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
89 if (!ptr)
90 return NULL;
91
92 addr = ioremap_wc(offset, size);
93 if (addr) {
94 *ptr = addr;
95 devres_add(dev, ptr);
96 } else
97 devres_free(ptr);
98
99 return addr;
100} 98}
101EXPORT_SYMBOL(devm_ioremap_wc); 99EXPORT_SYMBOL(devm_ioremap_wc);
102 100
diff --git a/lib/dma-direct.c b/lib/dma-direct.c
index c9e8e21cb334..bbfb229aa067 100644
--- a/lib/dma-direct.c
+++ b/lib/dma-direct.c
@@ -9,6 +9,7 @@
9#include <linux/scatterlist.h> 9#include <linux/scatterlist.h>
10#include <linux/dma-contiguous.h> 10#include <linux/dma-contiguous.h>
11#include <linux/pfn.h> 11#include <linux/pfn.h>
12#include <linux/set_memory.h>
12 13
13#define DIRECT_MAPPING_ERROR 0 14#define DIRECT_MAPPING_ERROR 0
14 15
@@ -20,6 +21,14 @@
20#define ARCH_ZONE_DMA_BITS 24 21#define ARCH_ZONE_DMA_BITS 24
21#endif 22#endif
22 23
24/*
25 * For AMD SEV all DMA must be to unencrypted addresses.
26 */
27static inline bool force_dma_unencrypted(void)
28{
29 return sev_active();
30}
31
23static bool 32static bool
24check_addr(struct device *dev, dma_addr_t dma_addr, size_t size, 33check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
25 const char *caller) 34 const char *caller)
@@ -37,7 +46,9 @@ check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
37 46
38static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) 47static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
39{ 48{
40 return phys_to_dma(dev, phys) + size - 1 <= dev->coherent_dma_mask; 49 dma_addr_t addr = force_dma_unencrypted() ?
50 __phys_to_dma(dev, phys) : phys_to_dma(dev, phys);
51 return addr + size - 1 <= dev->coherent_dma_mask;
41} 52}
42 53
43void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 54void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
@@ -46,6 +57,10 @@ void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
46 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 57 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
47 int page_order = get_order(size); 58 int page_order = get_order(size);
48 struct page *page = NULL; 59 struct page *page = NULL;
60 void *ret;
61
62 /* we always manually zero the memory once we are done: */
63 gfp &= ~__GFP_ZERO;
49 64
50 /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */ 65 /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */
51 if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)) 66 if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
@@ -69,7 +84,8 @@ again:
69 __free_pages(page, page_order); 84 __free_pages(page, page_order);
70 page = NULL; 85 page = NULL;
71 86
72 if (dev->coherent_dma_mask < DMA_BIT_MASK(32) && 87 if (IS_ENABLED(CONFIG_ZONE_DMA) &&
88 dev->coherent_dma_mask < DMA_BIT_MASK(32) &&
73 !(gfp & GFP_DMA)) { 89 !(gfp & GFP_DMA)) {
74 gfp = (gfp & ~GFP_DMA32) | GFP_DMA; 90 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
75 goto again; 91 goto again;
@@ -78,10 +94,15 @@ again:
78 94
79 if (!page) 95 if (!page)
80 return NULL; 96 return NULL;
81 97 ret = page_address(page);
82 *dma_handle = phys_to_dma(dev, page_to_phys(page)); 98 if (force_dma_unencrypted()) {
83 memset(page_address(page), 0, size); 99 set_memory_decrypted((unsigned long)ret, 1 << page_order);
84 return page_address(page); 100 *dma_handle = __phys_to_dma(dev, page_to_phys(page));
101 } else {
102 *dma_handle = phys_to_dma(dev, page_to_phys(page));
103 }
104 memset(ret, 0, size);
105 return ret;
85} 106}
86 107
87/* 108/*
@@ -92,9 +113,12 @@ void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
92 dma_addr_t dma_addr, unsigned long attrs) 113 dma_addr_t dma_addr, unsigned long attrs)
93{ 114{
94 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 115 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
116 unsigned int page_order = get_order(size);
95 117
118 if (force_dma_unencrypted())
119 set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
96 if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count)) 120 if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count))
97 free_pages((unsigned long)cpu_addr, get_order(size)); 121 free_pages((unsigned long)cpu_addr, page_order);
98} 122}
99 123
100static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, 124static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index c5edbedd364d..5cff72f18c4a 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -10,6 +10,66 @@
10#include <linux/sched/debug.h> 10#include <linux/sched/debug.h>
11#include <linux/smp.h> 11#include <linux/smp.h>
12#include <linux/atomic.h> 12#include <linux/atomic.h>
13#include <linux/kexec.h>
14#include <linux/utsname.h>
15
16static char dump_stack_arch_desc_str[128];
17
18/**
19 * dump_stack_set_arch_desc - set arch-specific str to show with task dumps
20 * @fmt: printf-style format string
21 * @...: arguments for the format string
22 *
23 * The configured string will be printed right after utsname during task
24 * dumps. Usually used to add arch-specific system identifiers. If an
25 * arch wants to make use of such an ID string, it should initialize this
26 * as soon as possible during boot.
27 */
28void __init dump_stack_set_arch_desc(const char *fmt, ...)
29{
30 va_list args;
31
32 va_start(args, fmt);
33 vsnprintf(dump_stack_arch_desc_str, sizeof(dump_stack_arch_desc_str),
34 fmt, args);
35 va_end(args);
36}
37
38/**
39 * dump_stack_print_info - print generic debug info for dump_stack()
40 * @log_lvl: log level
41 *
42 * Arch-specific dump_stack() implementations can use this function to
43 * print out the same debug information as the generic dump_stack().
44 */
45void dump_stack_print_info(const char *log_lvl)
46{
47 printk("%sCPU: %d PID: %d Comm: %.20s %s%s %s %.*s\n",
48 log_lvl, raw_smp_processor_id(), current->pid, current->comm,
49 kexec_crash_loaded() ? "Kdump: loaded " : "",
50 print_tainted(),
51 init_utsname()->release,
52 (int)strcspn(init_utsname()->version, " "),
53 init_utsname()->version);
54
55 if (dump_stack_arch_desc_str[0] != '\0')
56 printk("%sHardware name: %s\n",
57 log_lvl, dump_stack_arch_desc_str);
58
59 print_worker_info(log_lvl, current);
60}
61
62/**
63 * show_regs_print_info - print generic debug info for show_regs()
64 * @log_lvl: log level
65 *
66 * show_regs() implementations can use this function to print out generic
67 * debug information.
68 */
69void show_regs_print_info(const char *log_lvl)
70{
71 dump_stack_print_info(log_lvl);
72}
13 73
14static void __dump_stack(void) 74static void __dump_stack(void)
15{ 75{
diff --git a/lib/errseq.c b/lib/errseq.c
index df782418b333..81f9e33aa7e7 100644
--- a/lib/errseq.c
+++ b/lib/errseq.c
@@ -111,27 +111,22 @@ EXPORT_SYMBOL(errseq_set);
111 * errseq_sample() - Grab current errseq_t value. 111 * errseq_sample() - Grab current errseq_t value.
112 * @eseq: Pointer to errseq_t to be sampled. 112 * @eseq: Pointer to errseq_t to be sampled.
113 * 113 *
114 * This function allows callers to sample an errseq_t value, marking it as 114 * This function allows callers to initialise their errseq_t variable.
115 * "seen" if required. 115 * If the error has been "seen", new callers will not see an old error.
116 * If there is an unseen error in @eseq, the caller of this function will
117 * see it the next time it checks for an error.
116 * 118 *
119 * Context: Any context.
117 * Return: The current errseq value. 120 * Return: The current errseq value.
118 */ 121 */
119errseq_t errseq_sample(errseq_t *eseq) 122errseq_t errseq_sample(errseq_t *eseq)
120{ 123{
121 errseq_t old = READ_ONCE(*eseq); 124 errseq_t old = READ_ONCE(*eseq);
122 errseq_t new = old;
123 125
124 /* 126 /* If nobody has seen this error yet, then we can be the first. */
125 * For the common case of no errors ever having been set, we can skip 127 if (!(old & ERRSEQ_SEEN))
126 * marking the SEEN bit. Once an error has been set, the value will 128 old = 0;
127 * never go back to zero. 129 return old;
128 */
129 if (old != 0) {
130 new |= ERRSEQ_SEEN;
131 if (old != new)
132 cmpxchg(eseq, old, new);
133 }
134 return new;
135} 130}
136EXPORT_SYMBOL(errseq_sample); 131EXPORT_SYMBOL(errseq_sample);
137 132
diff --git a/lib/find_bit_benchmark.c b/lib/find_bit_benchmark.c
index 5985a25e6cbc..5367ffa5c18f 100644
--- a/lib/find_bit_benchmark.c
+++ b/lib/find_bit_benchmark.c
@@ -132,7 +132,12 @@ static int __init find_bit_test(void)
132 test_find_next_bit(bitmap, BITMAP_LEN); 132 test_find_next_bit(bitmap, BITMAP_LEN);
133 test_find_next_zero_bit(bitmap, BITMAP_LEN); 133 test_find_next_zero_bit(bitmap, BITMAP_LEN);
134 test_find_last_bit(bitmap, BITMAP_LEN); 134 test_find_last_bit(bitmap, BITMAP_LEN);
135 test_find_first_bit(bitmap, BITMAP_LEN); 135
136 /*
137 * test_find_first_bit() may take some time, so
138 * traverse only part of bitmap to avoid soft lockup.
139 */
140 test_find_first_bit(bitmap, BITMAP_LEN / 10);
136 test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN); 141 test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN);
137 142
138 pr_err("\nStart testing find_bit() with sparse bitmap\n"); 143 pr_err("\nStart testing find_bit() with sparse bitmap\n");
diff --git a/lib/int_sqrt.c b/lib/int_sqrt.c
index e2d329099bf7..14436f4ca6bd 100644
--- a/lib/int_sqrt.c
+++ b/lib/int_sqrt.c
@@ -38,3 +38,33 @@ unsigned long int_sqrt(unsigned long x)
38 return y; 38 return y;
39} 39}
40EXPORT_SYMBOL(int_sqrt); 40EXPORT_SYMBOL(int_sqrt);
41
42#if BITS_PER_LONG < 64
43/**
44 * int_sqrt64 - strongly typed int_sqrt function when minimum 64 bit input
45 * is expected.
46 * @x: 64bit integer of which to calculate the sqrt
47 */
48u32 int_sqrt64(u64 x)
49{
50 u64 b, m, y = 0;
51
52 if (x <= ULONG_MAX)
53 return int_sqrt((unsigned long) x);
54
55 m = 1ULL << (fls64(x) & ~1ULL);
56 while (m != 0) {
57 b = y + m;
58 y >>= 1;
59
60 if (x >= b) {
61 x -= b;
62 y += m;
63 }
64 m >>= 2;
65 }
66
67 return y;
68}
69EXPORT_SYMBOL(int_sqrt64);
70#endif
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 970212670b6a..fdae394172fa 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -1012,7 +1012,7 @@ unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1012} 1012}
1013EXPORT_SYMBOL(iov_iter_gap_alignment); 1013EXPORT_SYMBOL(iov_iter_gap_alignment);
1014 1014
1015static inline size_t __pipe_get_pages(struct iov_iter *i, 1015static inline ssize_t __pipe_get_pages(struct iov_iter *i,
1016 size_t maxsize, 1016 size_t maxsize,
1017 struct page **pages, 1017 struct page **pages,
1018 int idx, 1018 int idx,
@@ -1102,7 +1102,7 @@ static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
1102 size_t *start) 1102 size_t *start)
1103{ 1103{
1104 struct page **p; 1104 struct page **p;
1105 size_t n; 1105 ssize_t n;
1106 int idx; 1106 int idx;
1107 int npages; 1107 int npages;
1108 1108
diff --git a/lib/kfifo.c b/lib/kfifo.c
index 90ba1eb1df06..b0f757bf7213 100644
--- a/lib/kfifo.c
+++ b/lib/kfifo.c
@@ -39,7 +39,7 @@ int __kfifo_alloc(struct __kfifo *fifo, unsigned int size,
39 size_t esize, gfp_t gfp_mask) 39 size_t esize, gfp_t gfp_mask)
40{ 40{
41 /* 41 /*
42 * round down to the next power of 2, since our 'let the indices 42 * round up to the next power of 2, since our 'let the indices
43 * wrap' technique works only in this case. 43 * wrap' technique works only in this case.
44 */ 44 */
45 size = roundup_pow_of_two(size); 45 size = roundup_pow_of_two(size);
diff --git a/lib/kobject.c b/lib/kobject.c
index afd5a3fc6123..18989b5b3b56 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -204,8 +204,9 @@ static int kobject_add_internal(struct kobject *kobj)
204 return -ENOENT; 204 return -ENOENT;
205 205
206 if (!kobj->name || !kobj->name[0]) { 206 if (!kobj->name || !kobj->name[0]) {
207 WARN(1, "kobject: (%p): attempted to be registered with empty " 207 WARN(1,
208 "name!\n", kobj); 208 "kobject: (%p): attempted to be registered with empty name!\n",
209 kobj);
209 return -EINVAL; 210 return -EINVAL;
210 } 211 }
211 212
@@ -232,14 +233,12 @@ static int kobject_add_internal(struct kobject *kobj)
232 233
233 /* be noisy on error issues */ 234 /* be noisy on error issues */
234 if (error == -EEXIST) 235 if (error == -EEXIST)
235 WARN(1, "%s failed for %s with " 236 pr_err("%s failed for %s with -EEXIST, don't try to register things with the same name in the same directory.\n",
236 "-EEXIST, don't try to register things with " 237 __func__, kobject_name(kobj));
237 "the same name in the same directory.\n",
238 __func__, kobject_name(kobj));
239 else 238 else
240 WARN(1, "%s failed for %s (error: %d parent: %s)\n", 239 pr_err("%s failed for %s (error: %d parent: %s)\n",
241 __func__, kobject_name(kobj), error, 240 __func__, kobject_name(kobj), error,
242 parent ? kobject_name(parent) : "'none'"); 241 parent ? kobject_name(parent) : "'none'");
243 } else 242 } else
244 kobj->state_in_sysfs = 1; 243 kobj->state_in_sysfs = 1;
245 244
@@ -334,8 +333,8 @@ void kobject_init(struct kobject *kobj, struct kobj_type *ktype)
334 } 333 }
335 if (kobj->state_initialized) { 334 if (kobj->state_initialized) {
336 /* do not error out as sometimes we can recover */ 335 /* do not error out as sometimes we can recover */
337 printk(KERN_ERR "kobject (%p): tried to init an initialized " 336 pr_err("kobject (%p): tried to init an initialized object, something is seriously wrong.\n",
338 "object, something is seriously wrong.\n", kobj); 337 kobj);
339 dump_stack(); 338 dump_stack();
340 } 339 }
341 340
@@ -344,7 +343,7 @@ void kobject_init(struct kobject *kobj, struct kobj_type *ktype)
344 return; 343 return;
345 344
346error: 345error:
347 printk(KERN_ERR "kobject (%p): %s\n", kobj, err_str); 346 pr_err("kobject (%p): %s\n", kobj, err_str);
348 dump_stack(); 347 dump_stack();
349} 348}
350EXPORT_SYMBOL(kobject_init); 349EXPORT_SYMBOL(kobject_init);
@@ -357,7 +356,7 @@ static __printf(3, 0) int kobject_add_varg(struct kobject *kobj,
357 356
358 retval = kobject_set_name_vargs(kobj, fmt, vargs); 357 retval = kobject_set_name_vargs(kobj, fmt, vargs);
359 if (retval) { 358 if (retval) {
360 printk(KERN_ERR "kobject: can not set name properly!\n"); 359 pr_err("kobject: can not set name properly!\n");
361 return retval; 360 return retval;
362 } 361 }
363 kobj->parent = parent; 362 kobj->parent = parent;
@@ -399,8 +398,7 @@ int kobject_add(struct kobject *kobj, struct kobject *parent,
399 return -EINVAL; 398 return -EINVAL;
400 399
401 if (!kobj->state_initialized) { 400 if (!kobj->state_initialized) {
402 printk(KERN_ERR "kobject '%s' (%p): tried to add an " 401 pr_err("kobject '%s' (%p): tried to add an uninitialized object, something is seriously wrong.\n",
403 "uninitialized object, something is seriously wrong.\n",
404 kobject_name(kobj), kobj); 402 kobject_name(kobj), kobj);
405 dump_stack(); 403 dump_stack();
406 return -EINVAL; 404 return -EINVAL;
@@ -590,9 +588,9 @@ struct kobject *kobject_get(struct kobject *kobj)
590{ 588{
591 if (kobj) { 589 if (kobj) {
592 if (!kobj->state_initialized) 590 if (!kobj->state_initialized)
593 WARN(1, KERN_WARNING "kobject: '%s' (%p): is not " 591 WARN(1, KERN_WARNING
594 "initialized, yet kobject_get() is being " 592 "kobject: '%s' (%p): is not initialized, yet kobject_get() is being called.\n",
595 "called.\n", kobject_name(kobj), kobj); 593 kobject_name(kobj), kobj);
596 kref_get(&kobj->kref); 594 kref_get(&kobj->kref);
597 } 595 }
598 return kobj; 596 return kobj;
@@ -622,8 +620,7 @@ static void kobject_cleanup(struct kobject *kobj)
622 kobject_name(kobj), kobj, __func__, kobj->parent); 620 kobject_name(kobj), kobj, __func__, kobj->parent);
623 621
624 if (t && !t->release) 622 if (t && !t->release)
625 pr_debug("kobject: '%s' (%p): does not have a release() " 623 pr_debug("kobject: '%s' (%p): does not have a release() function, it is broken and must be fixed.\n",
626 "function, it is broken and must be fixed.\n",
627 kobject_name(kobj), kobj); 624 kobject_name(kobj), kobj);
628 625
629 /* send "remove" if the caller did not do it but sent "add" */ 626 /* send "remove" if the caller did not do it but sent "add" */
@@ -686,9 +683,9 @@ void kobject_put(struct kobject *kobj)
686{ 683{
687 if (kobj) { 684 if (kobj) {
688 if (!kobj->state_initialized) 685 if (!kobj->state_initialized)
689 WARN(1, KERN_WARNING "kobject: '%s' (%p): is not " 686 WARN(1, KERN_WARNING
690 "initialized, yet kobject_put() is being " 687 "kobject: '%s' (%p): is not initialized, yet kobject_put() is being called.\n",
691 "called.\n", kobject_name(kobj), kobj); 688 kobject_name(kobj), kobj);
692 kref_put(&kobj->kref, kobject_release); 689 kref_put(&kobj->kref, kobject_release);
693 } 690 }
694} 691}
@@ -752,8 +749,7 @@ struct kobject *kobject_create_and_add(const char *name, struct kobject *parent)
752 749
753 retval = kobject_add(kobj, parent, "%s", name); 750 retval = kobject_add(kobj, parent, "%s", name);
754 if (retval) { 751 if (retval) {
755 printk(KERN_WARNING "%s: kobject_add error: %d\n", 752 pr_warn("%s: kobject_add error: %d\n", __func__, retval);
756 __func__, retval);
757 kobject_put(kobj); 753 kobject_put(kobj);
758 kobj = NULL; 754 kobj = NULL;
759 } 755 }
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 9fe6ec8fda28..15ea216a67ce 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -25,6 +25,7 @@
25#include <linux/uuid.h> 25#include <linux/uuid.h>
26#include <linux/ctype.h> 26#include <linux/ctype.h>
27#include <net/sock.h> 27#include <net/sock.h>
28#include <net/netlink.h>
28#include <net/net_namespace.h> 29#include <net/net_namespace.h>
29 30
30 31
@@ -32,11 +33,13 @@ u64 uevent_seqnum;
32#ifdef CONFIG_UEVENT_HELPER 33#ifdef CONFIG_UEVENT_HELPER
33char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH; 34char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH;
34#endif 35#endif
35#ifdef CONFIG_NET 36
36struct uevent_sock { 37struct uevent_sock {
37 struct list_head list; 38 struct list_head list;
38 struct sock *sk; 39 struct sock *sk;
39}; 40};
41
42#ifdef CONFIG_NET
40static LIST_HEAD(uevent_sock_list); 43static LIST_HEAD(uevent_sock_list);
41#endif 44#endif
42 45
@@ -602,12 +605,88 @@ int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...)
602EXPORT_SYMBOL_GPL(add_uevent_var); 605EXPORT_SYMBOL_GPL(add_uevent_var);
603 606
604#if defined(CONFIG_NET) 607#if defined(CONFIG_NET)
608static int uevent_net_broadcast(struct sock *usk, struct sk_buff *skb,
609 struct netlink_ext_ack *extack)
610{
611 /* u64 to chars: 2^64 - 1 = 21 chars */
612 char buf[sizeof("SEQNUM=") + 21];
613 struct sk_buff *skbc;
614 int ret;
615
616 /* bump and prepare sequence number */
617 ret = snprintf(buf, sizeof(buf), "SEQNUM=%llu", ++uevent_seqnum);
618 if (ret < 0 || (size_t)ret >= sizeof(buf))
619 return -ENOMEM;
620 ret++;
621
622 /* verify message does not overflow */
623 if ((skb->len + ret) > UEVENT_BUFFER_SIZE) {
624 NL_SET_ERR_MSG(extack, "uevent message too big");
625 return -EINVAL;
626 }
627
628 /* copy skb and extend to accommodate sequence number */
629 skbc = skb_copy_expand(skb, 0, ret, GFP_KERNEL);
630 if (!skbc)
631 return -ENOMEM;
632
633 /* append sequence number */
634 skb_put_data(skbc, buf, ret);
635
636 /* remove msg header */
637 skb_pull(skbc, NLMSG_HDRLEN);
638
639 /* set portid 0 to inform userspace message comes from kernel */
640 NETLINK_CB(skbc).portid = 0;
641 NETLINK_CB(skbc).dst_group = 1;
642
643 ret = netlink_broadcast(usk, skbc, 0, 1, GFP_KERNEL);
644 /* ENOBUFS should be handled in userspace */
645 if (ret == -ENOBUFS || ret == -ESRCH)
646 ret = 0;
647
648 return ret;
649}
650
651static int uevent_net_rcv_skb(struct sk_buff *skb, struct nlmsghdr *nlh,
652 struct netlink_ext_ack *extack)
653{
654 struct net *net;
655 int ret;
656
657 if (!nlmsg_data(nlh))
658 return -EINVAL;
659
660 /*
661 * Verify that we are allowed to send messages to the target
662 * network namespace. The caller must have CAP_SYS_ADMIN in the
663 * owning user namespace of the target network namespace.
664 */
665 net = sock_net(NETLINK_CB(skb).sk);
666 if (!netlink_ns_capable(skb, net->user_ns, CAP_SYS_ADMIN)) {
667 NL_SET_ERR_MSG(extack, "missing CAP_SYS_ADMIN capability");
668 return -EPERM;
669 }
670
671 mutex_lock(&uevent_sock_mutex);
672 ret = uevent_net_broadcast(net->uevent_sock->sk, skb, extack);
673 mutex_unlock(&uevent_sock_mutex);
674
675 return ret;
676}
677
678static void uevent_net_rcv(struct sk_buff *skb)
679{
680 netlink_rcv_skb(skb, &uevent_net_rcv_skb);
681}
682
605static int uevent_net_init(struct net *net) 683static int uevent_net_init(struct net *net)
606{ 684{
607 struct uevent_sock *ue_sk; 685 struct uevent_sock *ue_sk;
608 struct netlink_kernel_cfg cfg = { 686 struct netlink_kernel_cfg cfg = {
609 .groups = 1, 687 .groups = 1,
610 .flags = NL_CFG_F_NONROOT_RECV, 688 .input = uevent_net_rcv,
689 .flags = NL_CFG_F_NONROOT_RECV
611 }; 690 };
612 691
613 ue_sk = kzalloc(sizeof(*ue_sk), GFP_KERNEL); 692 ue_sk = kzalloc(sizeof(*ue_sk), GFP_KERNEL);
@@ -621,6 +700,9 @@ static int uevent_net_init(struct net *net)
621 kfree(ue_sk); 700 kfree(ue_sk);
622 return -ENODEV; 701 return -ENODEV;
623 } 702 }
703
704 net->uevent_sock = ue_sk;
705
624 mutex_lock(&uevent_sock_mutex); 706 mutex_lock(&uevent_sock_mutex);
625 list_add_tail(&ue_sk->list, &uevent_sock_list); 707 list_add_tail(&ue_sk->list, &uevent_sock_list);
626 mutex_unlock(&uevent_sock_mutex); 708 mutex_unlock(&uevent_sock_mutex);
@@ -629,17 +711,9 @@ static int uevent_net_init(struct net *net)
629 711
630static void uevent_net_exit(struct net *net) 712static void uevent_net_exit(struct net *net)
631{ 713{
632 struct uevent_sock *ue_sk; 714 struct uevent_sock *ue_sk = net->uevent_sock;
633 715
634 mutex_lock(&uevent_sock_mutex); 716 mutex_lock(&uevent_sock_mutex);
635 list_for_each_entry(ue_sk, &uevent_sock_list, list) {
636 if (sock_net(ue_sk->sk) == net)
637 goto found;
638 }
639 mutex_unlock(&uevent_sock_mutex);
640 return;
641
642found:
643 list_del(&ue_sk->list); 717 list_del(&ue_sk->list);
644 mutex_unlock(&uevent_sock_mutex); 718 mutex_unlock(&uevent_sock_mutex);
645 719
diff --git a/lib/list_debug.c b/lib/list_debug.c
index a34db8d27667..5d5424b51b74 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -21,13 +21,13 @@ bool __list_add_valid(struct list_head *new, struct list_head *prev,
21 struct list_head *next) 21 struct list_head *next)
22{ 22{
23 if (CHECK_DATA_CORRUPTION(next->prev != prev, 23 if (CHECK_DATA_CORRUPTION(next->prev != prev,
24 "list_add corruption. next->prev should be prev (%p), but was %p. (next=%p).\n", 24 "list_add corruption. next->prev should be prev (%px), but was %px. (next=%px).\n",
25 prev, next->prev, next) || 25 prev, next->prev, next) ||
26 CHECK_DATA_CORRUPTION(prev->next != next, 26 CHECK_DATA_CORRUPTION(prev->next != next,
27 "list_add corruption. prev->next should be next (%p), but was %p. (prev=%p).\n", 27 "list_add corruption. prev->next should be next (%px), but was %px. (prev=%px).\n",
28 next, prev->next, prev) || 28 next, prev->next, prev) ||
29 CHECK_DATA_CORRUPTION(new == prev || new == next, 29 CHECK_DATA_CORRUPTION(new == prev || new == next,
30 "list_add double add: new=%p, prev=%p, next=%p.\n", 30 "list_add double add: new=%px, prev=%px, next=%px.\n",
31 new, prev, next)) 31 new, prev, next))
32 return false; 32 return false;
33 33
@@ -43,16 +43,16 @@ bool __list_del_entry_valid(struct list_head *entry)
43 next = entry->next; 43 next = entry->next;
44 44
45 if (CHECK_DATA_CORRUPTION(next == LIST_POISON1, 45 if (CHECK_DATA_CORRUPTION(next == LIST_POISON1,
46 "list_del corruption, %p->next is LIST_POISON1 (%p)\n", 46 "list_del corruption, %px->next is LIST_POISON1 (%px)\n",
47 entry, LIST_POISON1) || 47 entry, LIST_POISON1) ||
48 CHECK_DATA_CORRUPTION(prev == LIST_POISON2, 48 CHECK_DATA_CORRUPTION(prev == LIST_POISON2,
49 "list_del corruption, %p->prev is LIST_POISON2 (%p)\n", 49 "list_del corruption, %px->prev is LIST_POISON2 (%px)\n",
50 entry, LIST_POISON2) || 50 entry, LIST_POISON2) ||
51 CHECK_DATA_CORRUPTION(prev->next != entry, 51 CHECK_DATA_CORRUPTION(prev->next != entry,
52 "list_del corruption. prev->next should be %p, but was %p\n", 52 "list_del corruption. prev->next should be %px, but was %px\n",
53 entry, prev->next) || 53 entry, prev->next) ||
54 CHECK_DATA_CORRUPTION(next->prev != entry, 54 CHECK_DATA_CORRUPTION(next->prev != entry,
55 "list_del corruption. next->prev should be %p, but was %p\n", 55 "list_del corruption. next->prev should be %px, but was %px\n",
56 entry, next->prev)) 56 entry, next->prev))
57 return false; 57 return false;
58 58
diff --git a/lib/lockref.c b/lib/lockref.c
index 47169ed7e964..3d468b53d4c9 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -81,6 +81,34 @@ int lockref_get_not_zero(struct lockref *lockref)
81EXPORT_SYMBOL(lockref_get_not_zero); 81EXPORT_SYMBOL(lockref_get_not_zero);
82 82
83/** 83/**
84 * lockref_put_not_zero - Decrements count unless count <= 1 before decrement
85 * @lockref: pointer to lockref structure
86 * Return: 1 if count updated successfully or 0 if count would become zero
87 */
88int lockref_put_not_zero(struct lockref *lockref)
89{
90 int retval;
91
92 CMPXCHG_LOOP(
93 new.count--;
94 if (old.count <= 1)
95 return 0;
96 ,
97 return 1;
98 );
99
100 spin_lock(&lockref->lock);
101 retval = 0;
102 if (lockref->count > 1) {
103 lockref->count--;
104 retval = 1;
105 }
106 spin_unlock(&lockref->lock);
107 return retval;
108}
109EXPORT_SYMBOL(lockref_put_not_zero);
110
111/**
84 * lockref_get_or_lock - Increments count unless the count is 0 or dead 112 * lockref_get_or_lock - Increments count unless the count is 0 or dead
85 * @lockref: pointer to lockref structure 113 * @lockref: pointer to lockref structure
86 * Return: 1 if count updated successfully or 0 if count was zero 114 * Return: 1 if count updated successfully or 0 if count was zero
diff --git a/lib/logic_pio.c b/lib/logic_pio.c
new file mode 100644
index 000000000000..feea48fd1a0d
--- /dev/null
+++ b/lib/logic_pio.c
@@ -0,0 +1,280 @@
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2017 HiSilicon Limited, All Rights Reserved.
4 * Author: Gabriele Paoloni <gabriele.paoloni@huawei.com>
5 * Author: Zhichang Yuan <yuanzhichang@hisilicon.com>
6 */
7
8#define pr_fmt(fmt) "LOGIC PIO: " fmt
9
10#include <linux/of.h>
11#include <linux/io.h>
12#include <linux/logic_pio.h>
13#include <linux/mm.h>
14#include <linux/rculist.h>
15#include <linux/sizes.h>
16#include <linux/slab.h>
17
18/* The unique hardware address list */
19static LIST_HEAD(io_range_list);
20static DEFINE_MUTEX(io_range_mutex);
21
22/* Consider a kernel general helper for this */
23#define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len))
24
25/**
26 * logic_pio_register_range - register logical PIO range for a host
27 * @new_range: pointer to the IO range to be registered.
28 *
29 * Returns 0 on success, the error code in case of failure.
30 *
31 * Register a new IO range node in the IO range list.
32 */
33int logic_pio_register_range(struct logic_pio_hwaddr *new_range)
34{
35 struct logic_pio_hwaddr *range;
36 resource_size_t start;
37 resource_size_t end;
38 resource_size_t mmio_sz = 0;
39 resource_size_t iio_sz = MMIO_UPPER_LIMIT;
40 int ret = 0;
41
42 if (!new_range || !new_range->fwnode || !new_range->size)
43 return -EINVAL;
44
45 start = new_range->hw_start;
46 end = new_range->hw_start + new_range->size;
47
48 mutex_lock(&io_range_mutex);
49 list_for_each_entry_rcu(range, &io_range_list, list) {
50 if (range->fwnode == new_range->fwnode) {
51 /* range already there */
52 goto end_register;
53 }
54 if (range->flags == LOGIC_PIO_CPU_MMIO &&
55 new_range->flags == LOGIC_PIO_CPU_MMIO) {
56 /* for MMIO ranges we need to check for overlap */
57 if (start >= range->hw_start + range->size ||
58 end < range->hw_start) {
59 mmio_sz += range->size;
60 } else {
61 ret = -EFAULT;
62 goto end_register;
63 }
64 } else if (range->flags == LOGIC_PIO_INDIRECT &&
65 new_range->flags == LOGIC_PIO_INDIRECT) {
66 iio_sz += range->size;
67 }
68 }
69
70 /* range not registered yet, check for available space */
71 if (new_range->flags == LOGIC_PIO_CPU_MMIO) {
72 if (mmio_sz + new_range->size - 1 > MMIO_UPPER_LIMIT) {
73 /* if it's too big check if 64K space can be reserved */
74 if (mmio_sz + SZ_64K - 1 > MMIO_UPPER_LIMIT) {
75 ret = -E2BIG;
76 goto end_register;
77 }
78 new_range->size = SZ_64K;
79 pr_warn("Requested IO range too big, new size set to 64K\n");
80 }
81 new_range->io_start = mmio_sz;
82 } else if (new_range->flags == LOGIC_PIO_INDIRECT) {
83 if (iio_sz + new_range->size - 1 > IO_SPACE_LIMIT) {
84 ret = -E2BIG;
85 goto end_register;
86 }
87 new_range->io_start = iio_sz;
88 } else {
89 /* invalid flag */
90 ret = -EINVAL;
91 goto end_register;
92 }
93
94 list_add_tail_rcu(&new_range->list, &io_range_list);
95
96end_register:
97 mutex_unlock(&io_range_mutex);
98 return ret;
99}
100
101/**
102 * find_io_range_by_fwnode - find logical PIO range for given FW node
103 * @fwnode: FW node handle associated with logical PIO range
104 *
105 * Returns pointer to node on success, NULL otherwise.
106 *
107 * Traverse the io_range_list to find the registered node for @fwnode.
108 */
109struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode)
110{
111 struct logic_pio_hwaddr *range;
112
113 list_for_each_entry_rcu(range, &io_range_list, list) {
114 if (range->fwnode == fwnode)
115 return range;
116 }
117 return NULL;
118}
119
120/* Return a registered range given an input PIO token */
121static struct logic_pio_hwaddr *find_io_range(unsigned long pio)
122{
123 struct logic_pio_hwaddr *range;
124
125 list_for_each_entry_rcu(range, &io_range_list, list) {
126 if (in_range(pio, range->io_start, range->size))
127 return range;
128 }
129 pr_err("PIO entry token %lx invalid\n", pio);
130 return NULL;
131}
132
133/**
134 * logic_pio_to_hwaddr - translate logical PIO to HW address
135 * @pio: logical PIO value
136 *
137 * Returns HW address if valid, ~0 otherwise.
138 *
139 * Translate the input logical PIO to the corresponding hardware address.
140 * The input PIO should be unique in the whole logical PIO space.
141 */
142resource_size_t logic_pio_to_hwaddr(unsigned long pio)
143{
144 struct logic_pio_hwaddr *range;
145
146 range = find_io_range(pio);
147 if (range)
148 return range->hw_start + pio - range->io_start;
149
150 return (resource_size_t)~0;
151}
152
153/**
154 * logic_pio_trans_hwaddr - translate HW address to logical PIO
155 * @fwnode: FW node reference for the host
156 * @addr: Host-relative HW address
157 * @size: size to translate
158 *
159 * Returns Logical PIO value if successful, ~0UL otherwise
160 */
161unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode,
162 resource_size_t addr, resource_size_t size)
163{
164 struct logic_pio_hwaddr *range;
165
166 range = find_io_range_by_fwnode(fwnode);
167 if (!range || range->flags == LOGIC_PIO_CPU_MMIO) {
168 pr_err("IO range not found or invalid\n");
169 return ~0UL;
170 }
171 if (range->size < size) {
172 pr_err("resource size %pa cannot fit in IO range size %pa\n",
173 &size, &range->size);
174 return ~0UL;
175 }
176 return addr - range->hw_start + range->io_start;
177}
178
179unsigned long logic_pio_trans_cpuaddr(resource_size_t addr)
180{
181 struct logic_pio_hwaddr *range;
182
183 list_for_each_entry_rcu(range, &io_range_list, list) {
184 if (range->flags != LOGIC_PIO_CPU_MMIO)
185 continue;
186 if (in_range(addr, range->hw_start, range->size))
187 return addr - range->hw_start + range->io_start;
188 }
189 pr_err("addr %llx not registered in io_range_list\n",
190 (unsigned long long) addr);
191 return ~0UL;
192}
193
194#if defined(CONFIG_INDIRECT_PIO) && defined(PCI_IOBASE)
195#define BUILD_LOGIC_IO(bw, type) \
196type logic_in##bw(unsigned long addr) \
197{ \
198 type ret = (type)~0; \
199 \
200 if (addr < MMIO_UPPER_LIMIT) { \
201 ret = read##bw(PCI_IOBASE + addr); \
202 } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
203 struct logic_pio_hwaddr *entry = find_io_range(addr); \
204 \
205 if (entry && entry->ops) \
206 ret = entry->ops->in(entry->hostdata, \
207 addr, sizeof(type)); \
208 else \
209 WARN_ON_ONCE(1); \
210 } \
211 return ret; \
212} \
213 \
214void logic_out##bw(type value, unsigned long addr) \
215{ \
216 if (addr < MMIO_UPPER_LIMIT) { \
217 write##bw(value, PCI_IOBASE + addr); \
218 } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
219 struct logic_pio_hwaddr *entry = find_io_range(addr); \
220 \
221 if (entry && entry->ops) \
222 entry->ops->out(entry->hostdata, \
223 addr, value, sizeof(type)); \
224 else \
225 WARN_ON_ONCE(1); \
226 } \
227} \
228 \
229void logic_ins##bw(unsigned long addr, void *buffer, \
230 unsigned int count) \
231{ \
232 if (addr < MMIO_UPPER_LIMIT) { \
233 reads##bw(PCI_IOBASE + addr, buffer, count); \
234 } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
235 struct logic_pio_hwaddr *entry = find_io_range(addr); \
236 \
237 if (entry && entry->ops) \
238 entry->ops->ins(entry->hostdata, \
239 addr, buffer, sizeof(type), count); \
240 else \
241 WARN_ON_ONCE(1); \
242 } \
243 \
244} \
245 \
246void logic_outs##bw(unsigned long addr, const void *buffer, \
247 unsigned int count) \
248{ \
249 if (addr < MMIO_UPPER_LIMIT) { \
250 writes##bw(PCI_IOBASE + addr, buffer, count); \
251 } else if (addr >= MMIO_UPPER_LIMIT && addr < IO_SPACE_LIMIT) { \
252 struct logic_pio_hwaddr *entry = find_io_range(addr); \
253 \
254 if (entry && entry->ops) \
255 entry->ops->outs(entry->hostdata, \
256 addr, buffer, sizeof(type), count); \
257 else \
258 WARN_ON_ONCE(1); \
259 } \
260}
261
262BUILD_LOGIC_IO(b, u8)
263EXPORT_SYMBOL(logic_inb);
264EXPORT_SYMBOL(logic_insb);
265EXPORT_SYMBOL(logic_outb);
266EXPORT_SYMBOL(logic_outsb);
267
268BUILD_LOGIC_IO(w, u16)
269EXPORT_SYMBOL(logic_inw);
270EXPORT_SYMBOL(logic_insw);
271EXPORT_SYMBOL(logic_outw);
272EXPORT_SYMBOL(logic_outsw);
273
274BUILD_LOGIC_IO(l, u32)
275EXPORT_SYMBOL(logic_inl);
276EXPORT_SYMBOL(logic_insl);
277EXPORT_SYMBOL(logic_outl);
278EXPORT_SYMBOL(logic_outsl);
279
280#endif /* CONFIG_INDIRECT_PIO && PCI_IOBASE */
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 8e00138d593f..43e0cbedc3a0 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -146,7 +146,7 @@ static unsigned int radix_tree_descend(const struct radix_tree_node *parent,
146 146
147static inline gfp_t root_gfp_mask(const struct radix_tree_root *root) 147static inline gfp_t root_gfp_mask(const struct radix_tree_root *root)
148{ 148{
149 return root->gfp_mask & __GFP_BITS_MASK; 149 return root->gfp_mask & (__GFP_BITS_MASK & ~GFP_ZONEMASK);
150} 150}
151 151
152static inline void tag_set(struct radix_tree_node *node, unsigned int tag, 152static inline void tag_set(struct radix_tree_node *node, unsigned int tag,
@@ -1612,11 +1612,9 @@ static void set_iter_tags(struct radix_tree_iter *iter,
1612static void __rcu **skip_siblings(struct radix_tree_node **nodep, 1612static void __rcu **skip_siblings(struct radix_tree_node **nodep,
1613 void __rcu **slot, struct radix_tree_iter *iter) 1613 void __rcu **slot, struct radix_tree_iter *iter)
1614{ 1614{
1615 void *sib = node_to_entry(slot - 1);
1616
1617 while (iter->index < iter->next_index) { 1615 while (iter->index < iter->next_index) {
1618 *nodep = rcu_dereference_raw(*slot); 1616 *nodep = rcu_dereference_raw(*slot);
1619 if (*nodep && *nodep != sib) 1617 if (*nodep && !is_sibling_entry(iter->node, *nodep))
1620 return slot; 1618 return slot;
1621 slot++; 1619 slot++;
1622 iter->index = __radix_tree_iter_add(iter, 1); 1620 iter->index = __radix_tree_iter_add(iter, 1);
@@ -1631,7 +1629,7 @@ void __rcu **__radix_tree_next_slot(void __rcu **slot,
1631 struct radix_tree_iter *iter, unsigned flags) 1629 struct radix_tree_iter *iter, unsigned flags)
1632{ 1630{
1633 unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK; 1631 unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK;
1634 struct radix_tree_node *node = rcu_dereference_raw(*slot); 1632 struct radix_tree_node *node;
1635 1633
1636 slot = skip_siblings(&node, slot, iter); 1634 slot = skip_siblings(&node, slot, iter);
1637 1635
@@ -2285,6 +2283,7 @@ void __init radix_tree_init(void)
2285 int ret; 2283 int ret;
2286 2284
2287 BUILD_BUG_ON(RADIX_TREE_MAX_TAGS + __GFP_BITS_SHIFT > 32); 2285 BUILD_BUG_ON(RADIX_TREE_MAX_TAGS + __GFP_BITS_SHIFT > 32);
2286 BUILD_BUG_ON(ROOT_IS_IDR & ~GFP_ZONEMASK);
2288 radix_tree_node_cachep = kmem_cache_create("radix_tree_node", 2287 radix_tree_node_cachep = kmem_cache_create("radix_tree_node",
2289 sizeof(struct radix_tree_node), 0, 2288 sizeof(struct radix_tree_node), 0,
2290 SLAB_PANIC | SLAB_RECLAIM_ACCOUNT, 2289 SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
diff --git a/lib/raid6/.gitignore b/lib/raid6/.gitignore
index f01b1cb04f91..3de0d8921286 100644
--- a/lib/raid6/.gitignore
+++ b/lib/raid6/.gitignore
@@ -4,3 +4,4 @@ int*.c
4tables.c 4tables.c
5neon?.c 5neon?.c
6s390vx?.c 6s390vx?.c
7vpermxor*.c
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index 4add700ddfe3..2f8b61dfd9b0 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -5,9 +5,9 @@ raid6_pq-y += algos.o recov.o tables.o int1.o int2.o int4.o \
5 int8.o int16.o int32.o 5 int8.o int16.o int32.o
6 6
7raid6_pq-$(CONFIG_X86) += recov_ssse3.o recov_avx2.o mmx.o sse1.o sse2.o avx2.o avx512.o recov_avx512.o 7raid6_pq-$(CONFIG_X86) += recov_ssse3.o recov_avx2.o mmx.o sse1.o sse2.o avx2.o avx512.o recov_avx512.o
8raid6_pq-$(CONFIG_ALTIVEC) += altivec1.o altivec2.o altivec4.o altivec8.o 8raid6_pq-$(CONFIG_ALTIVEC) += altivec1.o altivec2.o altivec4.o altivec8.o \
9 vpermxor1.o vpermxor2.o vpermxor4.o vpermxor8.o
9raid6_pq-$(CONFIG_KERNEL_MODE_NEON) += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o 10raid6_pq-$(CONFIG_KERNEL_MODE_NEON) += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o
10raid6_pq-$(CONFIG_TILEGX) += tilegx8.o
11raid6_pq-$(CONFIG_S390) += s390vx8.o recov_s390xc.o 11raid6_pq-$(CONFIG_S390) += s390vx8.o recov_s390xc.o
12 12
13hostprogs-y += mktables 13hostprogs-y += mktables
@@ -91,6 +91,30 @@ $(obj)/altivec8.c: UNROLL := 8
91$(obj)/altivec8.c: $(src)/altivec.uc $(src)/unroll.awk FORCE 91$(obj)/altivec8.c: $(src)/altivec.uc $(src)/unroll.awk FORCE
92 $(call if_changed,unroll) 92 $(call if_changed,unroll)
93 93
94CFLAGS_vpermxor1.o += $(altivec_flags)
95targets += vpermxor1.c
96$(obj)/vpermxor1.c: UNROLL := 1
97$(obj)/vpermxor1.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
98 $(call if_changed,unroll)
99
100CFLAGS_vpermxor2.o += $(altivec_flags)
101targets += vpermxor2.c
102$(obj)/vpermxor2.c: UNROLL := 2
103$(obj)/vpermxor2.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
104 $(call if_changed,unroll)
105
106CFLAGS_vpermxor4.o += $(altivec_flags)
107targets += vpermxor4.c
108$(obj)/vpermxor4.c: UNROLL := 4
109$(obj)/vpermxor4.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
110 $(call if_changed,unroll)
111
112CFLAGS_vpermxor8.o += $(altivec_flags)
113targets += vpermxor8.c
114$(obj)/vpermxor8.c: UNROLL := 8
115$(obj)/vpermxor8.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
116 $(call if_changed,unroll)
117
94CFLAGS_neon1.o += $(NEON_FLAGS) 118CFLAGS_neon1.o += $(NEON_FLAGS)
95targets += neon1.c 119targets += neon1.c
96$(obj)/neon1.c: UNROLL := 1 120$(obj)/neon1.c: UNROLL := 1
@@ -115,11 +139,6 @@ $(obj)/neon8.c: UNROLL := 8
115$(obj)/neon8.c: $(src)/neon.uc $(src)/unroll.awk FORCE 139$(obj)/neon8.c: $(src)/neon.uc $(src)/unroll.awk FORCE
116 $(call if_changed,unroll) 140 $(call if_changed,unroll)
117 141
118targets += tilegx8.c
119$(obj)/tilegx8.c: UNROLL := 8
120$(obj)/tilegx8.c: $(src)/tilegx.uc $(src)/unroll.awk FORCE
121 $(call if_changed,unroll)
122
123targets += s390vx8.c 142targets += s390vx8.c
124$(obj)/s390vx8.c: UNROLL := 8 143$(obj)/s390vx8.c: UNROLL := 8
125$(obj)/s390vx8.c: $(src)/s390vx.uc $(src)/unroll.awk FORCE 144$(obj)/s390vx8.c: $(src)/s390vx.uc $(src)/unroll.awk FORCE
diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c
index 476994723258..5065b1e7e327 100644
--- a/lib/raid6/algos.c
+++ b/lib/raid6/algos.c
@@ -74,9 +74,10 @@ const struct raid6_calls * const raid6_algos[] = {
74 &raid6_altivec2, 74 &raid6_altivec2,
75 &raid6_altivec4, 75 &raid6_altivec4,
76 &raid6_altivec8, 76 &raid6_altivec8,
77#endif 77 &raid6_vpermxor1,
78#if defined(CONFIG_TILEGX) 78 &raid6_vpermxor2,
79 &raid6_tilegx8, 79 &raid6_vpermxor4,
80 &raid6_vpermxor8,
80#endif 81#endif
81#if defined(CONFIG_S390) 82#if defined(CONFIG_S390)
82 &raid6_s390vx8, 83 &raid6_s390vx8,
diff --git a/lib/raid6/altivec.uc b/lib/raid6/altivec.uc
index 682aae8a1fef..d20ed0d11411 100644
--- a/lib/raid6/altivec.uc
+++ b/lib/raid6/altivec.uc
@@ -24,10 +24,13 @@
24 24
25#include <linux/raid/pq.h> 25#include <linux/raid/pq.h>
26 26
27#ifdef CONFIG_ALTIVEC
28
27#include <altivec.h> 29#include <altivec.h>
28#ifdef __KERNEL__ 30#ifdef __KERNEL__
29# include <asm/cputable.h> 31# include <asm/cputable.h>
30# include <asm/switch_to.h> 32# include <asm/switch_to.h>
33#endif /* __KERNEL__ */
31 34
32/* 35/*
33 * This is the C data type to use. We use a vector of 36 * This is the C data type to use. We use a vector of
diff --git a/lib/raid6/sse2.c b/lib/raid6/sse2.c
index 1d2276b007ee..8191e1d0d2fb 100644
--- a/lib/raid6/sse2.c
+++ b/lib/raid6/sse2.c
@@ -91,7 +91,7 @@ static void raid6_sse21_gen_syndrome(int disks, size_t bytes, void **ptrs)
91 91
92static void raid6_sse21_xor_syndrome(int disks, int start, int stop, 92static void raid6_sse21_xor_syndrome(int disks, int start, int stop,
93 size_t bytes, void **ptrs) 93 size_t bytes, void **ptrs)
94 { 94{
95 u8 **dptr = (u8 **)ptrs; 95 u8 **dptr = (u8 **)ptrs;
96 u8 *p, *q; 96 u8 *p, *q;
97 int d, z, z0; 97 int d, z, z0;
@@ -200,9 +200,9 @@ static void raid6_sse22_gen_syndrome(int disks, size_t bytes, void **ptrs)
200 kernel_fpu_end(); 200 kernel_fpu_end();
201} 201}
202 202
203 static void raid6_sse22_xor_syndrome(int disks, int start, int stop, 203static void raid6_sse22_xor_syndrome(int disks, int start, int stop,
204 size_t bytes, void **ptrs) 204 size_t bytes, void **ptrs)
205 { 205{
206 u8 **dptr = (u8 **)ptrs; 206 u8 **dptr = (u8 **)ptrs;
207 u8 *p, *q; 207 u8 *p, *q;
208 int d, z, z0; 208 int d, z, z0;
@@ -265,7 +265,7 @@ static void raid6_sse22_gen_syndrome(int disks, size_t bytes, void **ptrs)
265 265
266 asm volatile("sfence" : : : "memory"); 266 asm volatile("sfence" : : : "memory");
267 kernel_fpu_end(); 267 kernel_fpu_end();
268 } 268}
269 269
270const struct raid6_calls raid6_sse2x2 = { 270const struct raid6_calls raid6_sse2x2 = {
271 raid6_sse22_gen_syndrome, 271 raid6_sse22_gen_syndrome,
@@ -366,9 +366,9 @@ static void raid6_sse24_gen_syndrome(int disks, size_t bytes, void **ptrs)
366 kernel_fpu_end(); 366 kernel_fpu_end();
367} 367}
368 368
369 static void raid6_sse24_xor_syndrome(int disks, int start, int stop, 369static void raid6_sse24_xor_syndrome(int disks, int start, int stop,
370 size_t bytes, void **ptrs) 370 size_t bytes, void **ptrs)
371 { 371{
372 u8 **dptr = (u8 **)ptrs; 372 u8 **dptr = (u8 **)ptrs;
373 u8 *p, *q; 373 u8 *p, *q;
374 int d, z, z0; 374 int d, z, z0;
@@ -471,7 +471,7 @@ static void raid6_sse24_gen_syndrome(int disks, size_t bytes, void **ptrs)
471 } 471 }
472 asm volatile("sfence" : : : "memory"); 472 asm volatile("sfence" : : : "memory");
473 kernel_fpu_end(); 473 kernel_fpu_end();
474 } 474}
475 475
476 476
477const struct raid6_calls raid6_sse2x4 = { 477const struct raid6_calls raid6_sse2x4 = {
diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile
index be1010bdc435..5d73f5cb4d8a 100644
--- a/lib/raid6/test/Makefile
+++ b/lib/raid6/test/Makefile
@@ -45,15 +45,14 @@ else ifeq ($(HAS_NEON),yes)
45 CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1 45 CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1
46else 46else
47 HAS_ALTIVEC := $(shell printf '\#include <altivec.h>\nvector int a;\n' |\ 47 HAS_ALTIVEC := $(shell printf '\#include <altivec.h>\nvector int a;\n' |\
48 gcc -c -x c - >&/dev/null && \ 48 gcc -c -x c - >/dev/null && rm ./-.o && echo yes)
49 rm ./-.o && echo yes)
50 ifeq ($(HAS_ALTIVEC),yes) 49 ifeq ($(HAS_ALTIVEC),yes)
51 OBJS += altivec1.o altivec2.o altivec4.o altivec8.o 50 CFLAGS += -I../../../arch/powerpc/include
51 CFLAGS += -DCONFIG_ALTIVEC
52 OBJS += altivec1.o altivec2.o altivec4.o altivec8.o \
53 vpermxor1.o vpermxor2.o vpermxor4.o vpermxor8.o
52 endif 54 endif
53endif 55endif
54ifeq ($(ARCH),tilegx)
55OBJS += tilegx8.o
56endif
57 56
58.c.o: 57.c.o:
59 $(CC) $(CFLAGS) -c -o $@ $< 58 $(CC) $(CFLAGS) -c -o $@ $<
@@ -98,6 +97,18 @@ altivec4.c: altivec.uc ../unroll.awk
98altivec8.c: altivec.uc ../unroll.awk 97altivec8.c: altivec.uc ../unroll.awk
99 $(AWK) ../unroll.awk -vN=8 < altivec.uc > $@ 98 $(AWK) ../unroll.awk -vN=8 < altivec.uc > $@
100 99
100vpermxor1.c: vpermxor.uc ../unroll.awk
101 $(AWK) ../unroll.awk -vN=1 < vpermxor.uc > $@
102
103vpermxor2.c: vpermxor.uc ../unroll.awk
104 $(AWK) ../unroll.awk -vN=2 < vpermxor.uc > $@
105
106vpermxor4.c: vpermxor.uc ../unroll.awk
107 $(AWK) ../unroll.awk -vN=4 < vpermxor.uc > $@
108
109vpermxor8.c: vpermxor.uc ../unroll.awk
110 $(AWK) ../unroll.awk -vN=8 < vpermxor.uc > $@
111
101int1.c: int.uc ../unroll.awk 112int1.c: int.uc ../unroll.awk
102 $(AWK) ../unroll.awk -vN=1 < int.uc > $@ 113 $(AWK) ../unroll.awk -vN=1 < int.uc > $@
103 114
@@ -116,15 +127,11 @@ int16.c: int.uc ../unroll.awk
116int32.c: int.uc ../unroll.awk 127int32.c: int.uc ../unroll.awk
117 $(AWK) ../unroll.awk -vN=32 < int.uc > $@ 128 $(AWK) ../unroll.awk -vN=32 < int.uc > $@
118 129
119tilegx8.c: tilegx.uc ../unroll.awk
120 $(AWK) ../unroll.awk -vN=8 < tilegx.uc > $@
121
122tables.c: mktables 130tables.c: mktables
123 ./mktables > tables.c 131 ./mktables > tables.c
124 132
125clean: 133clean:
126 rm -f *.o *.a mktables mktables.c *.uc int*.c altivec*.c neon*.c tables.c raid6test 134 rm -f *.o *.a mktables mktables.c *.uc int*.c altivec*.c vpermxor*.c neon*.c tables.c raid6test
127 rm -f tilegx*.c
128 135
129spotless: clean 136spotless: clean
130 rm -f *~ 137 rm -f *~
diff --git a/lib/raid6/tilegx.uc b/lib/raid6/tilegx.uc
deleted file mode 100644
index 2dd291a11264..000000000000
--- a/lib/raid6/tilegx.uc
+++ /dev/null
@@ -1,87 +0,0 @@
1/* -*- linux-c -*- ------------------------------------------------------- *
2 *
3 * Copyright 2002 H. Peter Anvin - All Rights Reserved
4 * Copyright 2012 Tilera Corporation - All Rights Reserved
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
9 * Boston MA 02111-1307, USA; either version 2 of the License, or
10 * (at your option) any later version; incorporated herein by reference.
11 *
12 * ----------------------------------------------------------------------- */
13
14/*
15 * tilegx$#.c
16 *
17 * $#-way unrolled TILE-Gx SIMD for RAID-6 math.
18 *
19 * This file is postprocessed using unroll.awk.
20 *
21 */
22
23#include <linux/raid/pq.h>
24
25/* Create 8 byte copies of constant byte */
26# define NBYTES(x) (__insn_v1addi(0, x))
27# define NSIZE 8
28
29/*
30 * The SHLBYTE() operation shifts each byte left by 1, *not*
31 * rolling over into the next byte
32 */
33static inline __attribute_const__ u64 SHLBYTE(u64 v)
34{
35 /* Vector One Byte Shift Left Immediate. */
36 return __insn_v1shli(v, 1);
37}
38
39/*
40 * The MASK() operation returns 0xFF in any byte for which the high
41 * bit is 1, 0x00 for any byte for which the high bit is 0.
42 */
43static inline __attribute_const__ u64 MASK(u64 v)
44{
45 /* Vector One Byte Shift Right Signed Immediate. */
46 return __insn_v1shrsi(v, 7);
47}
48
49
50void raid6_tilegx$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
51{
52 u8 **dptr = (u8 **)ptrs;
53 u64 *p, *q;
54 int d, z, z0;
55
56 u64 wd$$, wq$$, wp$$, w1$$, w2$$;
57 u64 x1d = NBYTES(0x1d);
58 u64 * z0ptr;
59
60 z0 = disks - 3; /* Highest data disk */
61 p = (u64 *)dptr[z0+1]; /* XOR parity */
62 q = (u64 *)dptr[z0+2]; /* RS syndrome */
63
64 z0ptr = (u64 *)&dptr[z0][0];
65 for ( d = 0 ; d < bytes ; d += NSIZE*$# ) {
66 wq$$ = wp$$ = *z0ptr++;
67 for ( z = z0-1 ; z >= 0 ; z-- ) {
68 wd$$ = *(u64 *)&dptr[z][d+$$*NSIZE];
69 wp$$ = wp$$ ^ wd$$;
70 w2$$ = MASK(wq$$);
71 w1$$ = SHLBYTE(wq$$);
72 w2$$ = w2$$ & x1d;
73 w1$$ = w1$$ ^ w2$$;
74 wq$$ = w1$$ ^ wd$$;
75 }
76 *p++ = wp$$;
77 *q++ = wq$$;
78 }
79}
80
81const struct raid6_calls raid6_tilegx$# = {
82 raid6_tilegx$#_gen_syndrome,
83 NULL, /* XOR not yet implemented */
84 NULL,
85 "tilegx$#",
86 0
87};
diff --git a/lib/raid6/vpermxor.uc b/lib/raid6/vpermxor.uc
new file mode 100644
index 000000000000..10475dc423c1
--- /dev/null
+++ b/lib/raid6/vpermxor.uc
@@ -0,0 +1,105 @@
1/*
2 * Copyright 2017, Matt Brown, IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * vpermxor$#.c
10 *
11 * Based on H. Peter Anvin's paper - The mathematics of RAID-6
12 *
13 * $#-way unrolled portable integer math RAID-6 instruction set
14 * This file is postprocessed using unroll.awk
15 *
16 * vpermxor$#.c makes use of the vpermxor instruction to optimise the RAID6 Q
17 * syndrome calculations.
18 * This can be run on systems which have both Altivec and vpermxor instruction.
19 *
20 * This instruction was introduced in POWER8 - ISA v2.07.
21 */
22
23#include <linux/raid/pq.h>
24#ifdef CONFIG_ALTIVEC
25
26#include <altivec.h>
27#ifdef __KERNEL__
28#include <asm/cputable.h>
29#include <asm/ppc-opcode.h>
30#include <asm/switch_to.h>
31#endif
32
33typedef vector unsigned char unative_t;
34#define NSIZE sizeof(unative_t)
35
36static const vector unsigned char gf_low = {0x1e, 0x1c, 0x1a, 0x18, 0x16, 0x14,
37 0x12, 0x10, 0x0e, 0x0c, 0x0a, 0x08,
38 0x06, 0x04, 0x02,0x00};
39static const vector unsigned char gf_high = {0xfd, 0xdd, 0xbd, 0x9d, 0x7d, 0x5d,
40 0x3d, 0x1d, 0xe0, 0xc0, 0xa0, 0x80,
41 0x60, 0x40, 0x20, 0x00};
42
43static void noinline raid6_vpermxor$#_gen_syndrome_real(int disks, size_t bytes,
44 void **ptrs)
45{
46 u8 **dptr = (u8 **)ptrs;
47 u8 *p, *q;
48 int d, z, z0;
49 unative_t wp$$, wq$$, wd$$;
50
51 z0 = disks - 3; /* Highest data disk */
52 p = dptr[z0+1]; /* XOR parity */
53 q = dptr[z0+2]; /* RS syndrome */
54
55 for (d = 0; d < bytes; d += NSIZE*$#) {
56 wp$$ = wq$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE];
57
58 for (z = z0-1; z>=0; z--) {
59 wd$$ = *(unative_t *)&dptr[z][d+$$*NSIZE];
60 /* P syndrome */
61 wp$$ = vec_xor(wp$$, wd$$);
62
63 /* Q syndrome */
64 asm(VPERMXOR(%0,%1,%2,%3):"=v"(wq$$):"v"(gf_high), "v"(gf_low), "v"(wq$$));
65 wq$$ = vec_xor(wq$$, wd$$);
66 }
67 *(unative_t *)&p[d+NSIZE*$$] = wp$$;
68 *(unative_t *)&q[d+NSIZE*$$] = wq$$;
69 }
70}
71
72static void raid6_vpermxor$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
73{
74 preempt_disable();
75 enable_kernel_altivec();
76
77 raid6_vpermxor$#_gen_syndrome_real(disks, bytes, ptrs);
78
79 disable_kernel_altivec();
80 preempt_enable();
81}
82
83int raid6_have_altivec_vpermxor(void);
84#if $# == 1
85int raid6_have_altivec_vpermxor(void)
86{
87 /* Check if arch has both altivec and the vpermxor instructions */
88# ifdef __KERNEL__
89 return (cpu_has_feature(CPU_FTR_ALTIVEC_COMP) &&
90 cpu_has_feature(CPU_FTR_ARCH_207S));
91# else
92 return 1;
93#endif
94
95}
96#endif
97
98const struct raid6_calls raid6_vpermxor$# = {
99 raid6_vpermxor$#_gen_syndrome,
100 NULL,
101 raid6_have_altivec_vpermxor,
102 "vpermxor$#",
103 0
104};
105#endif
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 47de025b6245..2b2b79974b61 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -333,6 +333,7 @@ static int rhashtable_rehash_table(struct rhashtable *ht)
333 err = rhashtable_rehash_chain(ht, old_hash); 333 err = rhashtable_rehash_chain(ht, old_hash);
334 if (err) 334 if (err)
335 return err; 335 return err;
336 cond_resched();
336 } 337 }
337 338
338 /* Publish the new table pointer. */ 339 /* Publish the new table pointer. */
@@ -1112,6 +1113,7 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
1112 for (i = 0; i < tbl->size; i++) { 1113 for (i = 0; i < tbl->size; i++) {
1113 struct rhash_head *pos, *next; 1114 struct rhash_head *pos, *next;
1114 1115
1116 cond_resched();
1115 for (pos = rht_dereference(*rht_bucket(tbl, i), ht), 1117 for (pos = rht_dereference(*rht_bucket(tbl, i), ht),
1116 next = !rht_is_a_nulls(pos) ? 1118 next = !rht_is_a_nulls(pos) ?
1117 rht_dereference(pos->next, ht) : NULL; 1119 rht_dereference(pos->next, ht) : NULL;
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 42b5ca0acf93..e6a9c06ec70c 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -100,7 +100,7 @@ static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
100 return -1; 100 return -1;
101 } 101 }
102 102
103 if (!test_and_set_bit(nr, word)) 103 if (!test_and_set_bit_lock(nr, word))
104 break; 104 break;
105 105
106 hint = nr + 1; 106 hint = nr + 1;
@@ -434,9 +434,9 @@ static void sbq_wake_up(struct sbitmap_queue *sbq)
434 /* 434 /*
435 * Pairs with the memory barrier in set_current_state() to ensure the 435 * Pairs with the memory barrier in set_current_state() to ensure the
436 * proper ordering of clear_bit()/waitqueue_active() in the waker and 436 * proper ordering of clear_bit()/waitqueue_active() in the waker and
437 * test_and_set_bit()/prepare_to_wait()/finish_wait() in the waiter. See 437 * test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the
438 * the comment on waitqueue_active(). This is __after_atomic because we 438 * waiter. See the comment on waitqueue_active(). This is __after_atomic
439 * just did clear_bit() in the caller. 439 * because we just did clear_bit_unlock() in the caller.
440 */ 440 */
441 smp_mb__after_atomic(); 441 smp_mb__after_atomic();
442 442
@@ -469,7 +469,7 @@ static void sbq_wake_up(struct sbitmap_queue *sbq)
469void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, 469void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
470 unsigned int cpu) 470 unsigned int cpu)
471{ 471{
472 sbitmap_clear_bit(&sbq->sb, nr); 472 sbitmap_clear_bit_unlock(&sbq->sb, nr);
473 sbq_wake_up(sbq); 473 sbq_wake_up(sbq);
474 if (likely(!sbq->round_robin && nr < sbq->sb.depth)) 474 if (likely(!sbq->round_robin && nr < sbq->sb.depth))
475 *per_cpu_ptr(sbq->alloc_hint, cpu) = nr; 475 *per_cpu_ptr(sbq->alloc_hint, cpu) = nr;
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 53728d391d3a..06dad7a072fd 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -132,14 +132,7 @@ EXPORT_SYMBOL(sg_last);
132void sg_init_table(struct scatterlist *sgl, unsigned int nents) 132void sg_init_table(struct scatterlist *sgl, unsigned int nents)
133{ 133{
134 memset(sgl, 0, sizeof(*sgl) * nents); 134 memset(sgl, 0, sizeof(*sgl) * nents);
135#ifdef CONFIG_DEBUG_SG 135 sg_init_marker(sgl, nents);
136 {
137 unsigned int i;
138 for (i = 0; i < nents; i++)
139 sgl[i].sg_magic = SG_MAGIC;
140 }
141#endif
142 sg_mark_end(&sgl[nents - 1]);
143} 136}
144EXPORT_SYMBOL(sg_init_table); 137EXPORT_SYMBOL(sg_init_table);
145 138
diff --git a/lib/sha256.c b/lib/sha256.c
new file mode 100644
index 000000000000..4400c832e2aa
--- /dev/null
+++ b/lib/sha256.c
@@ -0,0 +1,283 @@
1/*
2 * SHA-256, as specified in
3 * http://csrc.nist.gov/groups/STM/cavp/documents/shs/sha256-384-512.pdf
4 *
5 * SHA-256 code by Jean-Luc Cooke <jlcooke@certainkey.com>.
6 *
7 * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com>
8 * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
9 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
10 * Copyright (c) 2014 Red Hat Inc.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the Free
14 * Software Foundation; either version 2 of the License, or (at your option)
15 * any later version.
16 */
17
18#include <linux/bitops.h>
19#include <linux/sha256.h>
20#include <linux/string.h>
21#include <asm/byteorder.h>
22
23static inline u32 Ch(u32 x, u32 y, u32 z)
24{
25 return z ^ (x & (y ^ z));
26}
27
28static inline u32 Maj(u32 x, u32 y, u32 z)
29{
30 return (x & y) | (z & (x | y));
31}
32
33#define e0(x) (ror32(x, 2) ^ ror32(x, 13) ^ ror32(x, 22))
34#define e1(x) (ror32(x, 6) ^ ror32(x, 11) ^ ror32(x, 25))
35#define s0(x) (ror32(x, 7) ^ ror32(x, 18) ^ (x >> 3))
36#define s1(x) (ror32(x, 17) ^ ror32(x, 19) ^ (x >> 10))
37
38static inline void LOAD_OP(int I, u32 *W, const u8 *input)
39{
40 W[I] = __be32_to_cpu(((__be32 *)(input))[I]);
41}
42
43static inline void BLEND_OP(int I, u32 *W)
44{
45 W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16];
46}
47
48static void sha256_transform(u32 *state, const u8 *input)
49{
50 u32 a, b, c, d, e, f, g, h, t1, t2;
51 u32 W[64];
52 int i;
53
54 /* load the input */
55 for (i = 0; i < 16; i++)
56 LOAD_OP(i, W, input);
57
58 /* now blend */
59 for (i = 16; i < 64; i++)
60 BLEND_OP(i, W);
61
62 /* load the state into our registers */
63 a = state[0]; b = state[1]; c = state[2]; d = state[3];
64 e = state[4]; f = state[5]; g = state[6]; h = state[7];
65
66 /* now iterate */
67 t1 = h + e1(e) + Ch(e, f, g) + 0x428a2f98 + W[0];
68 t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2;
69 t1 = g + e1(d) + Ch(d, e, f) + 0x71374491 + W[1];
70 t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2;
71 t1 = f + e1(c) + Ch(c, d, e) + 0xb5c0fbcf + W[2];
72 t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2;
73 t1 = e + e1(b) + Ch(b, c, d) + 0xe9b5dba5 + W[3];
74 t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2;
75 t1 = d + e1(a) + Ch(a, b, c) + 0x3956c25b + W[4];
76 t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2;
77 t1 = c + e1(h) + Ch(h, a, b) + 0x59f111f1 + W[5];
78 t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2;
79 t1 = b + e1(g) + Ch(g, h, a) + 0x923f82a4 + W[6];
80 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2;
81 t1 = a + e1(f) + Ch(f, g, h) + 0xab1c5ed5 + W[7];
82 t2 = e0(b) + Maj(b, c, d); e += t1; a = t1 + t2;
83
84 t1 = h + e1(e) + Ch(e, f, g) + 0xd807aa98 + W[8];
85 t2 = e0(a) + Maj(a, b, c); d += t1; h = t1 + t2;
86 t1 = g + e1(d) + Ch(d, e, f) + 0x12835b01 + W[9];
87 t2 = e0(h) + Maj(h, a, b); c += t1; g = t1 + t2;
88 t1 = f + e1(c) + Ch(c, d, e) + 0x243185be + W[10];
89 t2 = e0(g) + Maj(g, h, a); b += t1; f = t1 + t2;
90 t1 = e + e1(b) + Ch(b, c, d) + 0x550c7dc3 + W[11];
91 t2 = e0(f) + Maj(f, g, h); a += t1; e = t1 + t2;
92 t1 = d + e1(a) + Ch(a, b, c) + 0x72be5d74 + W[12];
93 t2 = e0(e) + Maj(e, f, g); h += t1; d = t1 + t2;
94 t1 = c + e1(h) + Ch(h, a, b) + 0x80deb1fe + W[13];
95 t2 = e0(d) + Maj(d, e, f); g += t1; c = t1 + t2;
96 t1 = b + e1(g) + Ch(g, h, a) + 0x9bdc06a7 + W[14];
97 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1 + t2;
98 t1 = a + e1(f) + Ch(f, g, h) + 0xc19bf174 + W[15];
99 t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
100
101 t1 = h + e1(e) + Ch(e, f, g) + 0xe49b69c1 + W[16];
102 t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2;
103 t1 = g + e1(d) + Ch(d, e, f) + 0xefbe4786 + W[17];
104 t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2;
105 t1 = f + e1(c) + Ch(c, d, e) + 0x0fc19dc6 + W[18];
106 t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2;
107 t1 = e + e1(b) + Ch(b, c, d) + 0x240ca1cc + W[19];
108 t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2;
109 t1 = d + e1(a) + Ch(a, b, c) + 0x2de92c6f + W[20];
110 t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2;
111 t1 = c + e1(h) + Ch(h, a, b) + 0x4a7484aa + W[21];
112 t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2;
113 t1 = b + e1(g) + Ch(g, h, a) + 0x5cb0a9dc + W[22];
114 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2;
115 t1 = a + e1(f) + Ch(f, g, h) + 0x76f988da + W[23];
116 t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
117
118 t1 = h + e1(e) + Ch(e, f, g) + 0x983e5152 + W[24];
119 t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2;
120 t1 = g + e1(d) + Ch(d, e, f) + 0xa831c66d + W[25];
121 t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2;
122 t1 = f + e1(c) + Ch(c, d, e) + 0xb00327c8 + W[26];
123 t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2;
124 t1 = e + e1(b) + Ch(b, c, d) + 0xbf597fc7 + W[27];
125 t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2;
126 t1 = d + e1(a) + Ch(a, b, c) + 0xc6e00bf3 + W[28];
127 t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2;
128 t1 = c + e1(h) + Ch(h, a, b) + 0xd5a79147 + W[29];
129 t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2;
130 t1 = b + e1(g) + Ch(g, h, a) + 0x06ca6351 + W[30];
131 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2;
132 t1 = a + e1(f) + Ch(f, g, h) + 0x14292967 + W[31];
133 t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
134
135 t1 = h + e1(e) + Ch(e, f, g) + 0x27b70a85 + W[32];
136 t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2;
137 t1 = g + e1(d) + Ch(d, e, f) + 0x2e1b2138 + W[33];
138 t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2;
139 t1 = f + e1(c) + Ch(c, d, e) + 0x4d2c6dfc + W[34];
140 t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2;
141 t1 = e + e1(b) + Ch(b, c, d) + 0x53380d13 + W[35];
142 t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2;
143 t1 = d + e1(a) + Ch(a, b, c) + 0x650a7354 + W[36];
144 t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2;
145 t1 = c + e1(h) + Ch(h, a, b) + 0x766a0abb + W[37];
146 t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2;
147 t1 = b + e1(g) + Ch(g, h, a) + 0x81c2c92e + W[38];
148 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2;
149 t1 = a + e1(f) + Ch(f, g, h) + 0x92722c85 + W[39];
150 t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
151
152 t1 = h + e1(e) + Ch(e, f, g) + 0xa2bfe8a1 + W[40];
153 t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2;
154 t1 = g + e1(d) + Ch(d, e, f) + 0xa81a664b + W[41];
155 t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2;
156 t1 = f + e1(c) + Ch(c, d, e) + 0xc24b8b70 + W[42];
157 t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2;
158 t1 = e + e1(b) + Ch(b, c, d) + 0xc76c51a3 + W[43];
159 t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2;
160 t1 = d + e1(a) + Ch(a, b, c) + 0xd192e819 + W[44];
161 t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2;
162 t1 = c + e1(h) + Ch(h, a, b) + 0xd6990624 + W[45];
163 t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2;
164 t1 = b + e1(g) + Ch(g, h, a) + 0xf40e3585 + W[46];
165 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2;
166 t1 = a + e1(f) + Ch(f, g, h) + 0x106aa070 + W[47];
167 t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
168
169 t1 = h + e1(e) + Ch(e, f, g) + 0x19a4c116 + W[48];
170 t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2;
171 t1 = g + e1(d) + Ch(d, e, f) + 0x1e376c08 + W[49];
172 t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2;
173 t1 = f + e1(c) + Ch(c, d, e) + 0x2748774c + W[50];
174 t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2;
175 t1 = e + e1(b) + Ch(b, c, d) + 0x34b0bcb5 + W[51];
176 t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2;
177 t1 = d + e1(a) + Ch(a, b, c) + 0x391c0cb3 + W[52];
178 t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2;
179 t1 = c + e1(h) + Ch(h, a, b) + 0x4ed8aa4a + W[53];
180 t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2;
181 t1 = b + e1(g) + Ch(g, h, a) + 0x5b9cca4f + W[54];
182 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2;
183 t1 = a + e1(f) + Ch(f, g, h) + 0x682e6ff3 + W[55];
184 t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
185
186 t1 = h + e1(e) + Ch(e, f, g) + 0x748f82ee + W[56];
187 t2 = e0(a) + Maj(a, b, c); d += t1; h = t1+t2;
188 t1 = g + e1(d) + Ch(d, e, f) + 0x78a5636f + W[57];
189 t2 = e0(h) + Maj(h, a, b); c += t1; g = t1+t2;
190 t1 = f + e1(c) + Ch(c, d, e) + 0x84c87814 + W[58];
191 t2 = e0(g) + Maj(g, h, a); b += t1; f = t1+t2;
192 t1 = e + e1(b) + Ch(b, c, d) + 0x8cc70208 + W[59];
193 t2 = e0(f) + Maj(f, g, h); a += t1; e = t1+t2;
194 t1 = d + e1(a) + Ch(a, b, c) + 0x90befffa + W[60];
195 t2 = e0(e) + Maj(e, f, g); h += t1; d = t1+t2;
196 t1 = c + e1(h) + Ch(h, a, b) + 0xa4506ceb + W[61];
197 t2 = e0(d) + Maj(d, e, f); g += t1; c = t1+t2;
198 t1 = b + e1(g) + Ch(g, h, a) + 0xbef9a3f7 + W[62];
199 t2 = e0(c) + Maj(c, d, e); f += t1; b = t1+t2;
200 t1 = a + e1(f) + Ch(f, g, h) + 0xc67178f2 + W[63];
201 t2 = e0(b) + Maj(b, c, d); e += t1; a = t1+t2;
202
203 state[0] += a; state[1] += b; state[2] += c; state[3] += d;
204 state[4] += e; state[5] += f; state[6] += g; state[7] += h;
205
206 /* clear any sensitive info... */
207 a = b = c = d = e = f = g = h = t1 = t2 = 0;
208 memset(W, 0, 64 * sizeof(u32));
209}
210
211int sha256_init(struct sha256_state *sctx)
212{
213 sctx->state[0] = SHA256_H0;
214 sctx->state[1] = SHA256_H1;
215 sctx->state[2] = SHA256_H2;
216 sctx->state[3] = SHA256_H3;
217 sctx->state[4] = SHA256_H4;
218 sctx->state[5] = SHA256_H5;
219 sctx->state[6] = SHA256_H6;
220 sctx->state[7] = SHA256_H7;
221 sctx->count = 0;
222
223 return 0;
224}
225
226int sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len)
227{
228 unsigned int partial, done;
229 const u8 *src;
230
231 partial = sctx->count & 0x3f;
232 sctx->count += len;
233 done = 0;
234 src = data;
235
236 if ((partial + len) > 63) {
237 if (partial) {
238 done = -partial;
239 memcpy(sctx->buf + partial, data, done + 64);
240 src = sctx->buf;
241 }
242
243 do {
244 sha256_transform(sctx->state, src);
245 done += 64;
246 src = data + done;
247 } while (done + 63 < len);
248
249 partial = 0;
250 }
251 memcpy(sctx->buf + partial, src, len - done);
252
253 return 0;
254}
255
256int sha256_final(struct sha256_state *sctx, u8 *out)
257{
258 __be32 *dst = (__be32 *)out;
259 __be64 bits;
260 unsigned int index, pad_len;
261 int i;
262 static const u8 padding[64] = { 0x80, };
263
264 /* Save number of bits */
265 bits = cpu_to_be64(sctx->count << 3);
266
267 /* Pad out to 56 mod 64. */
268 index = sctx->count & 0x3f;
269 pad_len = (index < 56) ? (56 - index) : ((64+56) - index);
270 sha256_update(sctx, padding, pad_len);
271
272 /* Append length (before padding) */
273 sha256_update(sctx, (const u8 *)&bits, sizeof(bits));
274
275 /* Store state in digest */
276 for (i = 0; i < 8; i++)
277 dst[i] = cpu_to_be32(sctx->state[i]);
278
279 /* Zeroize sensitive information. */
280 memset(sctx, 0, sizeof(*sctx));
281
282 return 0;
283}
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index c43ec2271469..cc640588f145 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -31,6 +31,7 @@
31#include <linux/gfp.h> 31#include <linux/gfp.h>
32#include <linux/scatterlist.h> 32#include <linux/scatterlist.h>
33#include <linux/mem_encrypt.h> 33#include <linux/mem_encrypt.h>
34#include <linux/set_memory.h>
34 35
35#include <asm/io.h> 36#include <asm/io.h>
36#include <asm/dma.h> 37#include <asm/dma.h>
@@ -156,22 +157,6 @@ unsigned long swiotlb_size_or_default(void)
156 return size ? size : (IO_TLB_DEFAULT_SIZE); 157 return size ? size : (IO_TLB_DEFAULT_SIZE);
157} 158}
158 159
159void __weak swiotlb_set_mem_attributes(void *vaddr, unsigned long size) { }
160
161/* For swiotlb, clear memory encryption mask from dma addresses */
162static dma_addr_t swiotlb_phys_to_dma(struct device *hwdev,
163 phys_addr_t address)
164{
165 return __sme_clr(phys_to_dma(hwdev, address));
166}
167
168/* Note that this doesn't work with highmem page */
169static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
170 volatile void *address)
171{
172 return phys_to_dma(hwdev, virt_to_phys(address));
173}
174
175static bool no_iotlb_memory; 160static bool no_iotlb_memory;
176 161
177void swiotlb_print_info(void) 162void swiotlb_print_info(void)
@@ -209,12 +194,12 @@ void __init swiotlb_update_mem_attributes(void)
209 194
210 vaddr = phys_to_virt(io_tlb_start); 195 vaddr = phys_to_virt(io_tlb_start);
211 bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT); 196 bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT);
212 swiotlb_set_mem_attributes(vaddr, bytes); 197 set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
213 memset(vaddr, 0, bytes); 198 memset(vaddr, 0, bytes);
214 199
215 vaddr = phys_to_virt(io_tlb_overflow_buffer); 200 vaddr = phys_to_virt(io_tlb_overflow_buffer);
216 bytes = PAGE_ALIGN(io_tlb_overflow); 201 bytes = PAGE_ALIGN(io_tlb_overflow);
217 swiotlb_set_mem_attributes(vaddr, bytes); 202 set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
218 memset(vaddr, 0, bytes); 203 memset(vaddr, 0, bytes);
219} 204}
220 205
@@ -355,7 +340,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
355 io_tlb_start = virt_to_phys(tlb); 340 io_tlb_start = virt_to_phys(tlb);
356 io_tlb_end = io_tlb_start + bytes; 341 io_tlb_end = io_tlb_start + bytes;
357 342
358 swiotlb_set_mem_attributes(tlb, bytes); 343 set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT);
359 memset(tlb, 0, bytes); 344 memset(tlb, 0, bytes);
360 345
361 /* 346 /*
@@ -366,7 +351,8 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
366 if (!v_overflow_buffer) 351 if (!v_overflow_buffer)
367 goto cleanup2; 352 goto cleanup2;
368 353
369 swiotlb_set_mem_attributes(v_overflow_buffer, io_tlb_overflow); 354 set_memory_decrypted((unsigned long)v_overflow_buffer,
355 io_tlb_overflow >> PAGE_SHIFT);
370 memset(v_overflow_buffer, 0, io_tlb_overflow); 356 memset(v_overflow_buffer, 0, io_tlb_overflow);
371 io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer); 357 io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer);
372 358
@@ -622,7 +608,7 @@ map_single(struct device *hwdev, phys_addr_t phys, size_t size,
622 return SWIOTLB_MAP_ERROR; 608 return SWIOTLB_MAP_ERROR;
623 } 609 }
624 610
625 start_dma_addr = swiotlb_phys_to_dma(hwdev, io_tlb_start); 611 start_dma_addr = __phys_to_dma(hwdev, io_tlb_start);
626 return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, 612 return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size,
627 dir, attrs); 613 dir, attrs);
628} 614}
@@ -706,6 +692,7 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
706 } 692 }
707} 693}
708 694
695#ifdef CONFIG_DMA_DIRECT_OPS
709static inline bool dma_coherent_ok(struct device *dev, dma_addr_t addr, 696static inline bool dma_coherent_ok(struct device *dev, dma_addr_t addr,
710 size_t size) 697 size_t size)
711{ 698{
@@ -726,13 +713,13 @@ swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle,
726 goto out_warn; 713 goto out_warn;
727 714
728 phys_addr = swiotlb_tbl_map_single(dev, 715 phys_addr = swiotlb_tbl_map_single(dev,
729 swiotlb_phys_to_dma(dev, io_tlb_start), 716 __phys_to_dma(dev, io_tlb_start),
730 0, size, DMA_FROM_DEVICE, 0); 717 0, size, DMA_FROM_DEVICE, attrs);
731 if (phys_addr == SWIOTLB_MAP_ERROR) 718 if (phys_addr == SWIOTLB_MAP_ERROR)
732 goto out_warn; 719 goto out_warn;
733 720
734 *dma_handle = swiotlb_phys_to_dma(dev, phys_addr); 721 *dma_handle = __phys_to_dma(dev, phys_addr);
735 if (dma_coherent_ok(dev, *dma_handle, size)) 722 if (!dma_coherent_ok(dev, *dma_handle, size))
736 goto out_unmap; 723 goto out_unmap;
737 724
738 memset(phys_to_virt(phys_addr), 0, size); 725 memset(phys_to_virt(phys_addr), 0, size);
@@ -750,7 +737,7 @@ out_unmap:
750 swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE, 737 swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE,
751 DMA_ATTR_SKIP_CPU_SYNC); 738 DMA_ATTR_SKIP_CPU_SYNC);
752out_warn: 739out_warn:
753 if ((attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) { 740 if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) {
754 dev_warn(dev, 741 dev_warn(dev,
755 "swiotlb: coherent allocation failed, size=%zu\n", 742 "swiotlb: coherent allocation failed, size=%zu\n",
756 size); 743 size);
@@ -759,28 +746,6 @@ out_warn:
759 return NULL; 746 return NULL;
760} 747}
761 748
762void *
763swiotlb_alloc_coherent(struct device *hwdev, size_t size,
764 dma_addr_t *dma_handle, gfp_t flags)
765{
766 int order = get_order(size);
767 unsigned long attrs = (flags & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0;
768 void *ret;
769
770 ret = (void *)__get_free_pages(flags, order);
771 if (ret) {
772 *dma_handle = swiotlb_virt_to_bus(hwdev, ret);
773 if (dma_coherent_ok(hwdev, *dma_handle, size)) {
774 memset(ret, 0, size);
775 return ret;
776 }
777 free_pages((unsigned long)ret, order);
778 }
779
780 return swiotlb_alloc_buffer(hwdev, size, dma_handle, attrs);
781}
782EXPORT_SYMBOL(swiotlb_alloc_coherent);
783
784static bool swiotlb_free_buffer(struct device *dev, size_t size, 749static bool swiotlb_free_buffer(struct device *dev, size_t size,
785 dma_addr_t dma_addr) 750 dma_addr_t dma_addr)
786{ 751{
@@ -799,15 +764,7 @@ static bool swiotlb_free_buffer(struct device *dev, size_t size,
799 DMA_ATTR_SKIP_CPU_SYNC); 764 DMA_ATTR_SKIP_CPU_SYNC);
800 return true; 765 return true;
801} 766}
802 767#endif
803void
804swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
805 dma_addr_t dev_addr)
806{
807 if (!swiotlb_free_buffer(hwdev, size, dev_addr))
808 free_pages((unsigned long)vaddr, get_order(size));
809}
810EXPORT_SYMBOL(swiotlb_free_coherent);
811 768
812static void 769static void
813swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir, 770swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
@@ -867,10 +824,10 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
867 map = map_single(dev, phys, size, dir, attrs); 824 map = map_single(dev, phys, size, dir, attrs);
868 if (map == SWIOTLB_MAP_ERROR) { 825 if (map == SWIOTLB_MAP_ERROR) {
869 swiotlb_full(dev, size, dir, 1); 826 swiotlb_full(dev, size, dir, 1);
870 return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer); 827 return __phys_to_dma(dev, io_tlb_overflow_buffer);
871 } 828 }
872 829
873 dev_addr = swiotlb_phys_to_dma(dev, map); 830 dev_addr = __phys_to_dma(dev, map);
874 831
875 /* Ensure that the address returned is DMA'ble */ 832 /* Ensure that the address returned is DMA'ble */
876 if (dma_capable(dev, dev_addr, size)) 833 if (dma_capable(dev, dev_addr, size))
@@ -879,7 +836,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
879 attrs |= DMA_ATTR_SKIP_CPU_SYNC; 836 attrs |= DMA_ATTR_SKIP_CPU_SYNC;
880 swiotlb_tbl_unmap_single(dev, map, size, dir, attrs); 837 swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
881 838
882 return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer); 839 return __phys_to_dma(dev, io_tlb_overflow_buffer);
883} 840}
884 841
885/* 842/*
@@ -1009,7 +966,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
1009 sg_dma_len(sgl) = 0; 966 sg_dma_len(sgl) = 0;
1010 return 0; 967 return 0;
1011 } 968 }
1012 sg->dma_address = swiotlb_phys_to_dma(hwdev, map); 969 sg->dma_address = __phys_to_dma(hwdev, map);
1013 } else 970 } else
1014 sg->dma_address = dev_addr; 971 sg->dma_address = dev_addr;
1015 sg_dma_len(sg) = sg->length; 972 sg_dma_len(sg) = sg->length;
@@ -1073,7 +1030,7 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
1073int 1030int
1074swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) 1031swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
1075{ 1032{
1076 return (dma_addr == swiotlb_phys_to_dma(hwdev, io_tlb_overflow_buffer)); 1033 return (dma_addr == __phys_to_dma(hwdev, io_tlb_overflow_buffer));
1077} 1034}
1078 1035
1079/* 1036/*
@@ -1085,7 +1042,7 @@ swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
1085int 1042int
1086swiotlb_dma_supported(struct device *hwdev, u64 mask) 1043swiotlb_dma_supported(struct device *hwdev, u64 mask)
1087{ 1044{
1088 return swiotlb_phys_to_dma(hwdev, io_tlb_end - 1) <= mask; 1045 return __phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
1089} 1046}
1090 1047
1091#ifdef CONFIG_DMA_DIRECT_OPS 1048#ifdef CONFIG_DMA_DIRECT_OPS
@@ -1130,6 +1087,6 @@ const struct dma_map_ops swiotlb_dma_ops = {
1130 .unmap_sg = swiotlb_unmap_sg_attrs, 1087 .unmap_sg = swiotlb_unmap_sg_attrs,
1131 .map_page = swiotlb_map_page, 1088 .map_page = swiotlb_map_page,
1132 .unmap_page = swiotlb_unmap_page, 1089 .unmap_page = swiotlb_unmap_page,
1133 .dma_supported = swiotlb_dma_supported, 1090 .dma_supported = dma_direct_supported,
1134}; 1091};
1135#endif /* CONFIG_DMA_DIRECT_OPS */ 1092#endif /* CONFIG_DMA_DIRECT_OPS */
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index b3f235baa05d..6cd7d0740005 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -255,6 +255,10 @@ static const struct test_bitmap_parselist parselist_tests[] __initconst = {
255 {-EINVAL, "-1", NULL, 8, 0}, 255 {-EINVAL, "-1", NULL, 8, 0},
256 {-EINVAL, "-0", NULL, 8, 0}, 256 {-EINVAL, "-0", NULL, 8, 0},
257 {-EINVAL, "10-1", NULL, 8, 0}, 257 {-EINVAL, "10-1", NULL, 8, 0},
258 {-EINVAL, "0-31:", NULL, 8, 0},
259 {-EINVAL, "0-31:0", NULL, 8, 0},
260 {-EINVAL, "0-31:0/0", NULL, 8, 0},
261 {-EINVAL, "0-31:1/0", NULL, 8, 0},
258 {-EINVAL, "0-31:10/1", NULL, 8, 0}, 262 {-EINVAL, "0-31:10/1", NULL, 8, 0},
259}; 263};
260 264
@@ -292,15 +296,17 @@ static void __init test_bitmap_parselist(void)
292 } 296 }
293} 297}
294 298
299#define EXP_BYTES (sizeof(exp) * 8)
300
295static void __init test_bitmap_arr32(void) 301static void __init test_bitmap_arr32(void)
296{ 302{
297 unsigned int nbits, next_bit, len = sizeof(exp) * 8; 303 unsigned int nbits, next_bit;
298 u32 arr[sizeof(exp) / 4]; 304 u32 arr[sizeof(exp) / 4];
299 DECLARE_BITMAP(bmap2, len); 305 DECLARE_BITMAP(bmap2, EXP_BYTES);
300 306
301 memset(arr, 0xa5, sizeof(arr)); 307 memset(arr, 0xa5, sizeof(arr));
302 308
303 for (nbits = 0; nbits < len; ++nbits) { 309 for (nbits = 0; nbits < EXP_BYTES; ++nbits) {
304 bitmap_to_arr32(arr, exp, nbits); 310 bitmap_to_arr32(arr, exp, nbits);
305 bitmap_from_arr32(bmap2, arr, nbits); 311 bitmap_from_arr32(bmap2, arr, nbits);
306 expect_eq_bitmap(bmap2, exp, nbits); 312 expect_eq_bitmap(bmap2, exp, nbits);
@@ -312,7 +318,7 @@ static void __init test_bitmap_arr32(void)
312 " tail is not safely cleared: %d\n", 318 " tail is not safely cleared: %d\n",
313 nbits, next_bit); 319 nbits, next_bit);
314 320
315 if (nbits < len - 32) 321 if (nbits < EXP_BYTES - 32)
316 expect_eq_uint(arr[DIV_ROUND_UP(nbits, 32)], 322 expect_eq_uint(arr[DIV_ROUND_UP(nbits, 32)],
317 0xa5a5a5a5); 323 0xa5a5a5a5);
318 } 324 }
@@ -325,23 +331,32 @@ static void noinline __init test_mem_optimisations(void)
325 unsigned int start, nbits; 331 unsigned int start, nbits;
326 332
327 for (start = 0; start < 1024; start += 8) { 333 for (start = 0; start < 1024; start += 8) {
328 memset(bmap1, 0x5a, sizeof(bmap1));
329 memset(bmap2, 0x5a, sizeof(bmap2));
330 for (nbits = 0; nbits < 1024 - start; nbits += 8) { 334 for (nbits = 0; nbits < 1024 - start; nbits += 8) {
335 memset(bmap1, 0x5a, sizeof(bmap1));
336 memset(bmap2, 0x5a, sizeof(bmap2));
337
331 bitmap_set(bmap1, start, nbits); 338 bitmap_set(bmap1, start, nbits);
332 __bitmap_set(bmap2, start, nbits); 339 __bitmap_set(bmap2, start, nbits);
333 if (!bitmap_equal(bmap1, bmap2, 1024)) 340 if (!bitmap_equal(bmap1, bmap2, 1024)) {
334 printk("set not equal %d %d\n", start, nbits); 341 printk("set not equal %d %d\n", start, nbits);
335 if (!__bitmap_equal(bmap1, bmap2, 1024)) 342 failed_tests++;
343 }
344 if (!__bitmap_equal(bmap1, bmap2, 1024)) {
336 printk("set not __equal %d %d\n", start, nbits); 345 printk("set not __equal %d %d\n", start, nbits);
346 failed_tests++;
347 }
337 348
338 bitmap_clear(bmap1, start, nbits); 349 bitmap_clear(bmap1, start, nbits);
339 __bitmap_clear(bmap2, start, nbits); 350 __bitmap_clear(bmap2, start, nbits);
340 if (!bitmap_equal(bmap1, bmap2, 1024)) 351 if (!bitmap_equal(bmap1, bmap2, 1024)) {
341 printk("clear not equal %d %d\n", start, nbits); 352 printk("clear not equal %d %d\n", start, nbits);
342 if (!__bitmap_equal(bmap1, bmap2, 1024)) 353 failed_tests++;
354 }
355 if (!__bitmap_equal(bmap1, bmap2, 1024)) {
343 printk("clear not __equal %d %d\n", start, 356 printk("clear not __equal %d %d\n", start,
344 nbits); 357 nbits);
358 failed_tests++;
359 }
345 } 360 }
346 } 361 }
347} 362}
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 3e9335493fe4..8e157806df7a 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -6574,6 +6574,93 @@ static bool exclude_test(int test_id)
6574 return test_id < test_range[0] || test_id > test_range[1]; 6574 return test_id < test_range[0] || test_id > test_range[1];
6575} 6575}
6576 6576
6577static __init struct sk_buff *build_test_skb(void)
6578{
6579 u32 headroom = NET_SKB_PAD + NET_IP_ALIGN + ETH_HLEN;
6580 struct sk_buff *skb[2];
6581 struct page *page[2];
6582 int i, data_size = 8;
6583
6584 for (i = 0; i < 2; i++) {
6585 page[i] = alloc_page(GFP_KERNEL);
6586 if (!page[i]) {
6587 if (i == 0)
6588 goto err_page0;
6589 else
6590 goto err_page1;
6591 }
6592
6593 /* this will set skb[i]->head_frag */
6594 skb[i] = dev_alloc_skb(headroom + data_size);
6595 if (!skb[i]) {
6596 if (i == 0)
6597 goto err_skb0;
6598 else
6599 goto err_skb1;
6600 }
6601
6602 skb_reserve(skb[i], headroom);
6603 skb_put(skb[i], data_size);
6604 skb[i]->protocol = htons(ETH_P_IP);
6605 skb_reset_network_header(skb[i]);
6606 skb_set_mac_header(skb[i], -ETH_HLEN);
6607
6608 skb_add_rx_frag(skb[i], 0, page[i], 0, 64, 64);
6609 // skb_headlen(skb[i]): 8, skb[i]->head_frag = 1
6610 }
6611
6612 /* setup shinfo */
6613 skb_shinfo(skb[0])->gso_size = 1448;
6614 skb_shinfo(skb[0])->gso_type = SKB_GSO_TCPV4;
6615 skb_shinfo(skb[0])->gso_type |= SKB_GSO_DODGY;
6616 skb_shinfo(skb[0])->gso_segs = 0;
6617 skb_shinfo(skb[0])->frag_list = skb[1];
6618
6619 /* adjust skb[0]'s len */
6620 skb[0]->len += skb[1]->len;
6621 skb[0]->data_len += skb[1]->data_len;
6622 skb[0]->truesize += skb[1]->truesize;
6623
6624 return skb[0];
6625
6626err_skb1:
6627 __free_page(page[1]);
6628err_page1:
6629 kfree_skb(skb[0]);
6630err_skb0:
6631 __free_page(page[0]);
6632err_page0:
6633 return NULL;
6634}
6635
6636static __init int test_skb_segment(void)
6637{
6638 netdev_features_t features;
6639 struct sk_buff *skb, *segs;
6640 int ret = -1;
6641
6642 features = NETIF_F_SG | NETIF_F_GSO_PARTIAL | NETIF_F_IP_CSUM |
6643 NETIF_F_IPV6_CSUM;
6644 features |= NETIF_F_RXCSUM;
6645 skb = build_test_skb();
6646 if (!skb) {
6647 pr_info("%s: failed to build_test_skb", __func__);
6648 goto done;
6649 }
6650
6651 segs = skb_segment(skb, features);
6652 if (!IS_ERR(segs)) {
6653 kfree_skb_list(segs);
6654 ret = 0;
6655 pr_info("%s: success in skb_segment!", __func__);
6656 } else {
6657 pr_info("%s: failed in skb_segment!", __func__);
6658 }
6659 kfree_skb(skb);
6660done:
6661 return ret;
6662}
6663
6577static __init int test_bpf(void) 6664static __init int test_bpf(void)
6578{ 6665{
6579 int i, err_cnt = 0, pass_cnt = 0; 6666 int i, err_cnt = 0, pass_cnt = 0;
@@ -6632,9 +6719,11 @@ static int __init test_bpf_init(void)
6632 return ret; 6719 return ret;
6633 6720
6634 ret = test_bpf(); 6721 ret = test_bpf();
6635
6636 destroy_bpf_tests(); 6722 destroy_bpf_tests();
6637 return ret; 6723 if (ret)
6724 return ret;
6725
6726 return test_skb_segment();
6638} 6727}
6639 6728
6640static void __exit test_bpf_exit(void) 6729static void __exit test_bpf_exit(void)
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index 078a61480573..cee000ac54d8 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -21,6 +21,7 @@
21#include <linux/uaccess.h> 21#include <linux/uaccess.h>
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/kthread.h> 23#include <linux/kthread.h>
24#include <linux/vmalloc.h>
24 25
25#define TEST_FIRMWARE_NAME "test-firmware.bin" 26#define TEST_FIRMWARE_NAME "test-firmware.bin"
26#define TEST_FIRMWARE_NUM_REQS 4 27#define TEST_FIRMWARE_NUM_REQS 4
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 98854a64b014..ec657105edbf 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -567,7 +567,15 @@ static noinline void __init kmem_cache_invalid_free(void)
567 return; 567 return;
568 } 568 }
569 569
570 /* Trigger invalid free, the object doesn't get freed */
570 kmem_cache_free(cache, p + 1); 571 kmem_cache_free(cache, p + 1);
572
573 /*
574 * Properly free the object to prevent the "Objects remaining in
575 * test_cache on __kmem_cache_shutdown" BUG failure.
576 */
577 kmem_cache_free(cache, p);
578
571 kmem_cache_destroy(cache); 579 kmem_cache_destroy(cache);
572} 580}
573 581
diff --git a/lib/test_ubsan.c b/lib/test_ubsan.c
new file mode 100644
index 000000000000..280f4979d00e
--- /dev/null
+++ b/lib/test_ubsan.c
@@ -0,0 +1,144 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/init.h>
3#include <linux/kernel.h>
4#include <linux/module.h>
5
6typedef void(*test_ubsan_fp)(void);
7
8static void test_ubsan_add_overflow(void)
9{
10 volatile int val = INT_MAX;
11
12 val += 2;
13}
14
15static void test_ubsan_sub_overflow(void)
16{
17 volatile int val = INT_MIN;
18 volatile int val2 = 2;
19
20 val -= val2;
21}
22
23static void test_ubsan_mul_overflow(void)
24{
25 volatile int val = INT_MAX / 2;
26
27 val *= 3;
28}
29
30static void test_ubsan_negate_overflow(void)
31{
32 volatile int val = INT_MIN;
33
34 val = -val;
35}
36
37static void test_ubsan_divrem_overflow(void)
38{
39 volatile int val = 16;
40 volatile int val2 = 0;
41
42 val /= val2;
43}
44
45static void test_ubsan_vla_bound_not_positive(void)
46{
47 volatile int size = -1;
48 char buf[size];
49
50 (void)buf;
51}
52
53static void test_ubsan_shift_out_of_bounds(void)
54{
55 volatile int val = -1;
56 int val2 = 10;
57
58 val2 <<= val;
59}
60
61static void test_ubsan_out_of_bounds(void)
62{
63 volatile int i = 4, j = 5;
64 volatile int arr[i];
65
66 arr[j] = i;
67}
68
69static void test_ubsan_load_invalid_value(void)
70{
71 volatile char *dst, *src;
72 bool val, val2, *ptr;
73 char c = 4;
74
75 dst = (char *)&val;
76 src = &c;
77 *dst = *src;
78
79 ptr = &val2;
80 val2 = val;
81}
82
83static void test_ubsan_null_ptr_deref(void)
84{
85 volatile int *ptr = NULL;
86 int val;
87
88 val = *ptr;
89}
90
91static void test_ubsan_misaligned_access(void)
92{
93 volatile char arr[5] __aligned(4) = {1, 2, 3, 4, 5};
94 volatile int *ptr, val = 6;
95
96 ptr = (int *)(arr + 1);
97 *ptr = val;
98}
99
100static void test_ubsan_object_size_mismatch(void)
101{
102 /* "((aligned(8)))" helps this not into be misaligned for ptr-access. */
103 volatile int val __aligned(8) = 4;
104 volatile long long *ptr, val2;
105
106 ptr = (long long *)&val;
107 val2 = *ptr;
108}
109
110static const test_ubsan_fp test_ubsan_array[] = {
111 test_ubsan_add_overflow,
112 test_ubsan_sub_overflow,
113 test_ubsan_mul_overflow,
114 test_ubsan_negate_overflow,
115 test_ubsan_divrem_overflow,
116 test_ubsan_vla_bound_not_positive,
117 test_ubsan_shift_out_of_bounds,
118 test_ubsan_out_of_bounds,
119 test_ubsan_load_invalid_value,
120 //test_ubsan_null_ptr_deref, /* exclude it because there is a crash */
121 test_ubsan_misaligned_access,
122 test_ubsan_object_size_mismatch,
123};
124
125static int __init test_ubsan_init(void)
126{
127 unsigned int i;
128
129 for (i = 0; i < ARRAY_SIZE(test_ubsan_array); i++)
130 test_ubsan_array[i]();
131
132 (void)test_ubsan_null_ptr_deref; /* to avoid unsed-function warning */
133 return 0;
134}
135module_init(test_ubsan_init);
136
137static void __exit test_ubsan_exit(void)
138{
139 /* do nothing */
140}
141module_exit(test_ubsan_exit);
142
143MODULE_AUTHOR("Jinbum Park <jinb.park7@gmail.com>");
144MODULE_LICENSE("GPL v2");
diff --git a/lib/test_user_copy.c b/lib/test_user_copy.c
index 4621db801b23..e161f0498f42 100644
--- a/lib/test_user_copy.c
+++ b/lib/test_user_copy.c
@@ -31,11 +31,8 @@
31 * their capability at compile-time, we just have to opt-out certain archs. 31 * their capability at compile-time, we just have to opt-out certain archs.
32 */ 32 */
33#if BITS_PER_LONG == 64 || (!(defined(CONFIG_ARM) && !defined(MMU)) && \ 33#if BITS_PER_LONG == 64 || (!(defined(CONFIG_ARM) && !defined(MMU)) && \
34 !defined(CONFIG_BLACKFIN) && \
35 !defined(CONFIG_M32R) && \
36 !defined(CONFIG_M68K) && \ 34 !defined(CONFIG_M68K) && \
37 !defined(CONFIG_MICROBLAZE) && \ 35 !defined(CONFIG_MICROBLAZE) && \
38 !defined(CONFIG_MN10300) && \
39 !defined(CONFIG_NIOS2) && \ 36 !defined(CONFIG_NIOS2) && \
40 !defined(CONFIG_PPC32) && \ 37 !defined(CONFIG_PPC32) && \
41 !defined(CONFIG_SUPERH)) 38 !defined(CONFIG_SUPERH))
diff --git a/lib/textsearch.c b/lib/textsearch.c
index 0b79908dfe89..5939549c0e7b 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -10,7 +10,10 @@
10 * Pablo Neira Ayuso <pablo@netfilter.org> 10 * Pablo Neira Ayuso <pablo@netfilter.org>
11 * 11 *
12 * ========================================================================== 12 * ==========================================================================
13 * 13 */
14
15/**
16 * DOC: ts_intro
14 * INTRODUCTION 17 * INTRODUCTION
15 * 18 *
16 * The textsearch infrastructure provides text searching facilities for 19 * The textsearch infrastructure provides text searching facilities for
@@ -19,7 +22,9 @@
19 * 22 *
20 * ARCHITECTURE 23 * ARCHITECTURE
21 * 24 *
22 * User 25 * .. code-block:: none
26 *
27 * User
23 * +----------------+ 28 * +----------------+
24 * | finish()|<--------------(6)-----------------+ 29 * | finish()|<--------------(6)-----------------+
25 * |get_next_block()|<--------------(5)---------------+ | 30 * |get_next_block()|<--------------(5)---------------+ |
@@ -33,21 +38,21 @@
33 * | (3)|----->| find()/next() |-----------+ | 38 * | (3)|----->| find()/next() |-----------+ |
34 * | (7)|----->| destroy() |----------------------+ 39 * | (7)|----->| destroy() |----------------------+
35 * +----------------+ +---------------+ 40 * +----------------+ +---------------+
36 * 41 *
37 * (1) User configures a search by calling _prepare() specifying the 42 * (1) User configures a search by calling textsearch_prepare() specifying
38 * search parameters such as the pattern and algorithm name. 43 * the search parameters such as the pattern and algorithm name.
39 * (2) Core requests the algorithm to allocate and initialize a search 44 * (2) Core requests the algorithm to allocate and initialize a search
40 * configuration according to the specified parameters. 45 * configuration according to the specified parameters.
41 * (3) User starts the search(es) by calling _find() or _next() to 46 * (3) User starts the search(es) by calling textsearch_find() or
42 * fetch subsequent occurrences. A state variable is provided 47 * textsearch_next() to fetch subsequent occurrences. A state variable
43 * to the algorithm to store persistent variables. 48 * is provided to the algorithm to store persistent variables.
44 * (4) Core eventually resets the search offset and forwards the find() 49 * (4) Core eventually resets the search offset and forwards the find()
45 * request to the algorithm. 50 * request to the algorithm.
46 * (5) Algorithm calls get_next_block() provided by the user continuously 51 * (5) Algorithm calls get_next_block() provided by the user continuously
47 * to fetch the data to be searched in block by block. 52 * to fetch the data to be searched in block by block.
48 * (6) Algorithm invokes finish() after the last call to get_next_block 53 * (6) Algorithm invokes finish() after the last call to get_next_block
49 * to clean up any leftovers from get_next_block. (Optional) 54 * to clean up any leftovers from get_next_block. (Optional)
50 * (7) User destroys the configuration by calling _destroy(). 55 * (7) User destroys the configuration by calling textsearch_destroy().
51 * (8) Core notifies the algorithm to destroy algorithm specific 56 * (8) Core notifies the algorithm to destroy algorithm specific
52 * allocations. (Optional) 57 * allocations. (Optional)
53 * 58 *
@@ -62,9 +67,10 @@
62 * amount of times and even in parallel as long as a separate struct 67 * amount of times and even in parallel as long as a separate struct
63 * ts_state variable is provided to every instance. 68 * ts_state variable is provided to every instance.
64 * 69 *
65 * The actual search is performed by either calling textsearch_find_- 70 * The actual search is performed by either calling
66 * continuous() for linear data or by providing an own get_next_block() 71 * textsearch_find_continuous() for linear data or by providing
67 * implementation and calling textsearch_find(). Both functions return 72 * an own get_next_block() implementation and
73 * calling textsearch_find(). Both functions return
68 * the position of the first occurrence of the pattern or UINT_MAX if 74 * the position of the first occurrence of the pattern or UINT_MAX if
69 * no match was found. Subsequent occurrences can be found by calling 75 * no match was found. Subsequent occurrences can be found by calling
70 * textsearch_next() regardless of the linearity of the data. 76 * textsearch_next() regardless of the linearity of the data.
@@ -72,7 +78,7 @@
72 * Once you're done using a configuration it must be given back via 78 * Once you're done using a configuration it must be given back via
73 * textsearch_destroy. 79 * textsearch_destroy.
74 * 80 *
75 * EXAMPLE 81 * EXAMPLE::
76 * 82 *
77 * int pos; 83 * int pos;
78 * struct ts_config *conf; 84 * struct ts_config *conf;
@@ -87,13 +93,13 @@
87 * goto errout; 93 * goto errout;
88 * } 94 * }
89 * 95 *
90 * pos = textsearch_find_continuous(conf, &state, example, strlen(example)); 96 * pos = textsearch_find_continuous(conf, \&state, example, strlen(example));
91 * if (pos != UINT_MAX) 97 * if (pos != UINT_MAX)
92 * panic("Oh my god, dancing chickens at %d\n", pos); 98 * panic("Oh my god, dancing chickens at \%d\n", pos);
93 * 99 *
94 * textsearch_destroy(conf); 100 * textsearch_destroy(conf);
95 * ==========================================================================
96 */ 101 */
102/* ========================================================================== */
97 103
98#include <linux/module.h> 104#include <linux/module.h>
99#include <linux/types.h> 105#include <linux/types.h>
@@ -225,7 +231,7 @@ static unsigned int get_linear_data(unsigned int consumed, const u8 **dst,
225 * 231 *
226 * Returns the position of first occurrence of the pattern or 232 * Returns the position of first occurrence of the pattern or
227 * %UINT_MAX if no occurrence was found. 233 * %UINT_MAX if no occurrence was found.
228 */ 234 */
229unsigned int textsearch_find_continuous(struct ts_config *conf, 235unsigned int textsearch_find_continuous(struct ts_config *conf,
230 struct ts_state *state, 236 struct ts_state *state,
231 const void *data, unsigned int len) 237 const void *data, unsigned int len)
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index d7a708f82559..23920c5ff728 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -336,7 +336,7 @@ char *put_dec(char *buf, unsigned long long n)
336 * 336 *
337 * If speed is not important, use snprintf(). It's easy to read the code. 337 * If speed is not important, use snprintf(). It's easy to read the code.
338 */ 338 */
339int num_to_str(char *buf, int size, unsigned long long num) 339int num_to_str(char *buf, int size, unsigned long long num, unsigned int width)
340{ 340{
341 /* put_dec requires 2-byte alignment of the buffer. */ 341 /* put_dec requires 2-byte alignment of the buffer. */
342 char tmp[sizeof(num) * 3] __aligned(2); 342 char tmp[sizeof(num) * 3] __aligned(2);
@@ -350,11 +350,21 @@ int num_to_str(char *buf, int size, unsigned long long num)
350 len = put_dec(tmp, num) - tmp; 350 len = put_dec(tmp, num) - tmp;
351 } 351 }
352 352
353 if (len > size) 353 if (len > size || width > size)
354 return 0; 354 return 0;
355
356 if (width > len) {
357 width = width - len;
358 for (idx = 0; idx < width; idx++)
359 buf[idx] = ' ';
360 } else {
361 width = 0;
362 }
363
355 for (idx = 0; idx < len; ++idx) 364 for (idx = 0; idx < len; ++idx)
356 buf[idx] = tmp[len - idx - 1]; 365 buf[idx + width] = tmp[len - idx - 1];
357 return len; 366
367 return len + width;
358} 368}
359 369
360#define SIGN 1 /* unsigned/signed, must be 1 */ 370#define SIGN 1 /* unsigned/signed, must be 1 */
@@ -1659,19 +1669,22 @@ char *pointer_string(char *buf, char *end, const void *ptr,
1659 return number(buf, end, (unsigned long int)ptr, spec); 1669 return number(buf, end, (unsigned long int)ptr, spec);
1660} 1670}
1661 1671
1662static bool have_filled_random_ptr_key __read_mostly; 1672static DEFINE_STATIC_KEY_TRUE(not_filled_random_ptr_key);
1663static siphash_key_t ptr_key __read_mostly; 1673static siphash_key_t ptr_key __read_mostly;
1664 1674
1665static void fill_random_ptr_key(struct random_ready_callback *unused) 1675static void enable_ptr_key_workfn(struct work_struct *work)
1666{ 1676{
1667 get_random_bytes(&ptr_key, sizeof(ptr_key)); 1677 get_random_bytes(&ptr_key, sizeof(ptr_key));
1668 /* 1678 /* Needs to run from preemptible context */
1669 * have_filled_random_ptr_key==true is dependent on get_random_bytes(). 1679 static_branch_disable(&not_filled_random_ptr_key);
1670 * ptr_to_id() needs to see have_filled_random_ptr_key==true 1680}
1671 * after get_random_bytes() returns. 1681
1672 */ 1682static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
1673 smp_mb(); 1683
1674 WRITE_ONCE(have_filled_random_ptr_key, true); 1684static void fill_random_ptr_key(struct random_ready_callback *unused)
1685{
1686 /* This may be in an interrupt handler. */
1687 queue_work(system_unbound_wq, &enable_ptr_key_work);
1675} 1688}
1676 1689
1677static struct random_ready_callback random_ready = { 1690static struct random_ready_callback random_ready = {
@@ -1685,7 +1698,8 @@ static int __init initialize_ptr_random(void)
1685 if (!ret) { 1698 if (!ret) {
1686 return 0; 1699 return 0;
1687 } else if (ret == -EALREADY) { 1700 } else if (ret == -EALREADY) {
1688 fill_random_ptr_key(&random_ready); 1701 /* This is in preemptible context */
1702 enable_ptr_key_workfn(&enable_ptr_key_work);
1689 return 0; 1703 return 0;
1690 } 1704 }
1691 1705
@@ -1699,7 +1713,7 @@ static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec)
1699 unsigned long hashval; 1713 unsigned long hashval;
1700 const int default_width = 2 * sizeof(ptr); 1714 const int default_width = 2 * sizeof(ptr);
1701 1715
1702 if (unlikely(!have_filled_random_ptr_key)) { 1716 if (static_branch_unlikely(&not_filled_random_ptr_key)) {
1703 spec.field_width = default_width; 1717 spec.field_width = default_width;
1704 /* string length must be less than default_width */ 1718 /* string length must be less than default_width */
1705 return string(buf, end, "(ptrval)", spec); 1719 return string(buf, end, "(ptrval)", spec);
@@ -2591,6 +2605,8 @@ int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args)
2591 case 's': 2605 case 's':
2592 case 'F': 2606 case 'F':
2593 case 'f': 2607 case 'f':
2608 case 'x':
2609 case 'K':
2594 save_arg(void *); 2610 save_arg(void *);
2595 break; 2611 break;
2596 default: 2612 default:
@@ -2765,6 +2781,8 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
2765 case 's': 2781 case 's':
2766 case 'F': 2782 case 'F':
2767 case 'f': 2783 case 'f':
2784 case 'x':
2785 case 'K':
2768 process = true; 2786 process = true;
2769 break; 2787 break;
2770 default: 2788 default:
diff --git a/lib/zstd/Makefile b/lib/zstd/Makefile
index dd0a359c135b..7920cbbfeae9 100644
--- a/lib/zstd/Makefile
+++ b/lib/zstd/Makefile
@@ -3,16 +3,7 @@ obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd_decompress.o
3 3
4ccflags-y += -O3 4ccflags-y += -O3
5 5
6# Object files unique to zstd_compress and zstd_decompress 6zstd_compress-y := fse_compress.o huf_compress.o compress.o \
7zstd_compress-y := fse_compress.o huf_compress.o compress.o 7 entropy_common.o fse_decompress.o zstd_common.o
8zstd_decompress-y := huf_decompress.o decompress.o 8zstd_decompress-y := huf_decompress.o decompress.o \
9 9 entropy_common.o fse_decompress.o zstd_common.o
10# These object files are shared between the modules.
11# Always add them to zstd_compress.
12# Unless both zstd_compress and zstd_decompress are built in
13# then also add them to zstd_decompress.
14zstd_compress-y += entropy_common.o fse_decompress.o zstd_common.o
15
16ifneq ($(CONFIG_ZSTD_COMPRESS)$(CONFIG_ZSTD_DECOMPRESS),yy)
17 zstd_decompress-y += entropy_common.o fse_decompress.o zstd_common.o
18endif