aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig6
-rw-r--r--lib/Kconfig.debug16
-rw-r--r--lib/Makefile5
-rw-r--r--lib/assoc_array.c37
-rw-r--r--lib/bucket_locks.c54
-rw-r--r--lib/chacha20.c71
-rw-r--r--lib/crc-ccitt.c58
-rw-r--r--lib/dma-direct.c156
-rw-r--r--lib/dma-noop.c68
-rw-r--r--lib/error-inject.c242
-rw-r--r--lib/errseq.c37
-rw-r--r--lib/kobject.c6
-rw-r--r--lib/kobject_uevent.c3
-rw-r--r--lib/mpi/longlong.h18
-rw-r--r--lib/pci_iomap.c1
-rw-r--r--lib/percpu-refcount.c8
-rw-r--r--lib/rhashtable.c160
-rw-r--r--lib/sbitmap.c2
-rw-r--r--lib/scatterlist.c127
-rw-r--r--lib/smp_processor_id.c3
-rw-r--r--lib/string.c2
-rw-r--r--lib/swiotlb.c205
-rw-r--r--lib/test_bpf.c123
-rw-r--r--lib/test_firmware.c17
-rw-r--r--lib/test_kmod.c14
-rw-r--r--lib/test_rhashtable.c6
-rw-r--r--lib/usercopy.c2
-rw-r--r--lib/vsprintf.c87
28 files changed, 1185 insertions, 349 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index c5e84fbcb30b..e96089499371 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -409,7 +409,11 @@ config HAS_DMA
409 depends on !NO_DMA 409 depends on !NO_DMA
410 default y 410 default y
411 411
412config DMA_NOOP_OPS 412config SGL_ALLOC
413 bool
414 default n
415
416config DMA_DIRECT_OPS
413 bool 417 bool
414 depends on HAS_DMA && (!64BIT || ARCH_DMA_ADDR_T_64BIT) 418 depends on HAS_DMA && (!64BIT || ARCH_DMA_ADDR_T_64BIT)
415 default n 419 default n
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 9d5b78aad4c5..64d7c19d3167 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1500,6 +1500,10 @@ config FAULT_INJECTION
1500 Provide fault-injection framework. 1500 Provide fault-injection framework.
1501 For more details, see Documentation/fault-injection/. 1501 For more details, see Documentation/fault-injection/.
1502 1502
1503config FUNCTION_ERROR_INJECTION
1504 def_bool y
1505 depends on HAVE_FUNCTION_ERROR_INJECTION && KPROBES
1506
1503config FAILSLAB 1507config FAILSLAB
1504 bool "Fault-injection capability for kmalloc" 1508 bool "Fault-injection capability for kmalloc"
1505 depends on FAULT_INJECTION 1509 depends on FAULT_INJECTION
@@ -1547,6 +1551,16 @@ config FAIL_FUTEX
1547 help 1551 help
1548 Provide fault-injection capability for futexes. 1552 Provide fault-injection capability for futexes.
1549 1553
1554config FAIL_FUNCTION
1555 bool "Fault-injection capability for functions"
1556 depends on FAULT_INJECTION_DEBUG_FS && FUNCTION_ERROR_INJECTION
1557 help
1558 Provide function-based fault-injection capability.
1559 This will allow you to override a specific function with a return
1560 with given return value. As a result, function caller will see
1561 an error value and have to handle it. This is useful to test the
1562 error handling in various subsystems.
1563
1550config FAULT_INJECTION_DEBUG_FS 1564config FAULT_INJECTION_DEBUG_FS
1551 bool "Debugfs entries for fault-injection capabilities" 1565 bool "Debugfs entries for fault-injection capabilities"
1552 depends on FAULT_INJECTION && SYSFS && DEBUG_FS 1566 depends on FAULT_INJECTION && SYSFS && DEBUG_FS
@@ -1952,7 +1966,7 @@ config STRICT_DEVMEM
1952 bool "Filter access to /dev/mem" 1966 bool "Filter access to /dev/mem"
1953 depends on MMU && DEVMEM 1967 depends on MMU && DEVMEM
1954 depends on ARCH_HAS_DEVMEM_IS_ALLOWED 1968 depends on ARCH_HAS_DEVMEM_IS_ALLOWED
1955 default y if TILE || PPC 1969 default y if TILE || PPC || X86 || ARM64
1956 ---help--- 1970 ---help---
1957 If this option is disabled, you allow userspace (root) access to all 1971 If this option is disabled, you allow userspace (root) access to all
1958 of memory, including kernel and userspace memory. Accidental 1972 of memory, including kernel and userspace memory. Accidental
diff --git a/lib/Makefile b/lib/Makefile
index d11c48ec8ffd..7adb066692b3 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -28,7 +28,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
28 28
29lib-$(CONFIG_MMU) += ioremap.o 29lib-$(CONFIG_MMU) += ioremap.o
30lib-$(CONFIG_SMP) += cpumask.o 30lib-$(CONFIG_SMP) += cpumask.o
31lib-$(CONFIG_DMA_NOOP_OPS) += dma-noop.o 31lib-$(CONFIG_DMA_DIRECT_OPS) += dma-direct.o
32lib-$(CONFIG_DMA_VIRT_OPS) += dma-virt.o 32lib-$(CONFIG_DMA_VIRT_OPS) += dma-virt.o
33 33
34lib-y += kobject.o klist.o 34lib-y += kobject.o klist.o
@@ -39,7 +39,7 @@ obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \
39 gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \ 39 gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
40 bsearch.o find_bit.o llist.o memweight.o kfifo.o \ 40 bsearch.o find_bit.o llist.o memweight.o kfifo.o \
41 percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \ 41 percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \
42 once.o refcount.o usercopy.o errseq.o 42 once.o refcount.o usercopy.o errseq.o bucket_locks.o
43obj-$(CONFIG_STRING_SELFTEST) += test_string.o 43obj-$(CONFIG_STRING_SELFTEST) += test_string.o
44obj-y += string_helpers.o 44obj-y += string_helpers.o
45obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o 45obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
@@ -149,6 +149,7 @@ obj-$(CONFIG_NETDEV_NOTIFIER_ERROR_INJECT) += netdev-notifier-error-inject.o
149obj-$(CONFIG_MEMORY_NOTIFIER_ERROR_INJECT) += memory-notifier-error-inject.o 149obj-$(CONFIG_MEMORY_NOTIFIER_ERROR_INJECT) += memory-notifier-error-inject.o
150obj-$(CONFIG_OF_RECONFIG_NOTIFIER_ERROR_INJECT) += \ 150obj-$(CONFIG_OF_RECONFIG_NOTIFIER_ERROR_INJECT) += \
151 of-reconfig-notifier-error-inject.o 151 of-reconfig-notifier-error-inject.o
152obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
152 153
153lib-$(CONFIG_GENERIC_BUG) += bug.o 154lib-$(CONFIG_GENERIC_BUG) += bug.o
154 155
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index b77d51da8c73..c6659cb37033 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -38,12 +38,10 @@ begin_node:
38 if (assoc_array_ptr_is_shortcut(cursor)) { 38 if (assoc_array_ptr_is_shortcut(cursor)) {
39 /* Descend through a shortcut */ 39 /* Descend through a shortcut */
40 shortcut = assoc_array_ptr_to_shortcut(cursor); 40 shortcut = assoc_array_ptr_to_shortcut(cursor);
41 smp_read_barrier_depends(); 41 cursor = READ_ONCE(shortcut->next_node); /* Address dependency. */
42 cursor = READ_ONCE(shortcut->next_node);
43 } 42 }
44 43
45 node = assoc_array_ptr_to_node(cursor); 44 node = assoc_array_ptr_to_node(cursor);
46 smp_read_barrier_depends();
47 slot = 0; 45 slot = 0;
48 46
49 /* We perform two passes of each node. 47 /* We perform two passes of each node.
@@ -55,15 +53,12 @@ begin_node:
55 */ 53 */
56 has_meta = 0; 54 has_meta = 0;
57 for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) { 55 for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
58 ptr = READ_ONCE(node->slots[slot]); 56 ptr = READ_ONCE(node->slots[slot]); /* Address dependency. */
59 has_meta |= (unsigned long)ptr; 57 has_meta |= (unsigned long)ptr;
60 if (ptr && assoc_array_ptr_is_leaf(ptr)) { 58 if (ptr && assoc_array_ptr_is_leaf(ptr)) {
61 /* We need a barrier between the read of the pointer 59 /* We need a barrier between the read of the pointer,
62 * and dereferencing the pointer - but only if we are 60 * which is supplied by the above READ_ONCE().
63 * actually going to dereference it.
64 */ 61 */
65 smp_read_barrier_depends();
66
67 /* Invoke the callback */ 62 /* Invoke the callback */
68 ret = iterator(assoc_array_ptr_to_leaf(ptr), 63 ret = iterator(assoc_array_ptr_to_leaf(ptr),
69 iterator_data); 64 iterator_data);
@@ -86,10 +81,8 @@ begin_node:
86 81
87continue_node: 82continue_node:
88 node = assoc_array_ptr_to_node(cursor); 83 node = assoc_array_ptr_to_node(cursor);
89 smp_read_barrier_depends();
90
91 for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) { 84 for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
92 ptr = READ_ONCE(node->slots[slot]); 85 ptr = READ_ONCE(node->slots[slot]); /* Address dependency. */
93 if (assoc_array_ptr_is_meta(ptr)) { 86 if (assoc_array_ptr_is_meta(ptr)) {
94 cursor = ptr; 87 cursor = ptr;
95 goto begin_node; 88 goto begin_node;
@@ -98,16 +91,15 @@ continue_node:
98 91
99finished_node: 92finished_node:
100 /* Move up to the parent (may need to skip back over a shortcut) */ 93 /* Move up to the parent (may need to skip back over a shortcut) */
101 parent = READ_ONCE(node->back_pointer); 94 parent = READ_ONCE(node->back_pointer); /* Address dependency. */
102 slot = node->parent_slot; 95 slot = node->parent_slot;
103 if (parent == stop) 96 if (parent == stop)
104 return 0; 97 return 0;
105 98
106 if (assoc_array_ptr_is_shortcut(parent)) { 99 if (assoc_array_ptr_is_shortcut(parent)) {
107 shortcut = assoc_array_ptr_to_shortcut(parent); 100 shortcut = assoc_array_ptr_to_shortcut(parent);
108 smp_read_barrier_depends();
109 cursor = parent; 101 cursor = parent;
110 parent = READ_ONCE(shortcut->back_pointer); 102 parent = READ_ONCE(shortcut->back_pointer); /* Address dependency. */
111 slot = shortcut->parent_slot; 103 slot = shortcut->parent_slot;
112 if (parent == stop) 104 if (parent == stop)
113 return 0; 105 return 0;
@@ -147,7 +139,7 @@ int assoc_array_iterate(const struct assoc_array *array,
147 void *iterator_data), 139 void *iterator_data),
148 void *iterator_data) 140 void *iterator_data)
149{ 141{
150 struct assoc_array_ptr *root = READ_ONCE(array->root); 142 struct assoc_array_ptr *root = READ_ONCE(array->root); /* Address dependency. */
151 143
152 if (!root) 144 if (!root)
153 return 0; 145 return 0;
@@ -194,7 +186,7 @@ assoc_array_walk(const struct assoc_array *array,
194 186
195 pr_devel("-->%s()\n", __func__); 187 pr_devel("-->%s()\n", __func__);
196 188
197 cursor = READ_ONCE(array->root); 189 cursor = READ_ONCE(array->root); /* Address dependency. */
198 if (!cursor) 190 if (!cursor)
199 return assoc_array_walk_tree_empty; 191 return assoc_array_walk_tree_empty;
200 192
@@ -216,11 +208,9 @@ jumped:
216 208
217consider_node: 209consider_node:
218 node = assoc_array_ptr_to_node(cursor); 210 node = assoc_array_ptr_to_node(cursor);
219 smp_read_barrier_depends();
220
221 slot = segments >> (level & ASSOC_ARRAY_KEY_CHUNK_MASK); 211 slot = segments >> (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
222 slot &= ASSOC_ARRAY_FAN_MASK; 212 slot &= ASSOC_ARRAY_FAN_MASK;
223 ptr = READ_ONCE(node->slots[slot]); 213 ptr = READ_ONCE(node->slots[slot]); /* Address dependency. */
224 214
225 pr_devel("consider slot %x [ix=%d type=%lu]\n", 215 pr_devel("consider slot %x [ix=%d type=%lu]\n",
226 slot, level, (unsigned long)ptr & 3); 216 slot, level, (unsigned long)ptr & 3);
@@ -254,7 +244,6 @@ consider_node:
254 cursor = ptr; 244 cursor = ptr;
255follow_shortcut: 245follow_shortcut:
256 shortcut = assoc_array_ptr_to_shortcut(cursor); 246 shortcut = assoc_array_ptr_to_shortcut(cursor);
257 smp_read_barrier_depends();
258 pr_devel("shortcut to %d\n", shortcut->skip_to_level); 247 pr_devel("shortcut to %d\n", shortcut->skip_to_level);
259 sc_level = level + ASSOC_ARRAY_LEVEL_STEP; 248 sc_level = level + ASSOC_ARRAY_LEVEL_STEP;
260 BUG_ON(sc_level > shortcut->skip_to_level); 249 BUG_ON(sc_level > shortcut->skip_to_level);
@@ -294,7 +283,7 @@ follow_shortcut:
294 } while (sc_level < shortcut->skip_to_level); 283 } while (sc_level < shortcut->skip_to_level);
295 284
296 /* The shortcut matches the leaf's index to this point. */ 285 /* The shortcut matches the leaf's index to this point. */
297 cursor = READ_ONCE(shortcut->next_node); 286 cursor = READ_ONCE(shortcut->next_node); /* Address dependency. */
298 if (((level ^ sc_level) & ~ASSOC_ARRAY_KEY_CHUNK_MASK) != 0) { 287 if (((level ^ sc_level) & ~ASSOC_ARRAY_KEY_CHUNK_MASK) != 0) {
299 level = sc_level; 288 level = sc_level;
300 goto jumped; 289 goto jumped;
@@ -331,20 +320,18 @@ void *assoc_array_find(const struct assoc_array *array,
331 return NULL; 320 return NULL;
332 321
333 node = result.terminal_node.node; 322 node = result.terminal_node.node;
334 smp_read_barrier_depends();
335 323
336 /* If the target key is available to us, it's has to be pointed to by 324 /* If the target key is available to us, it's has to be pointed to by
337 * the terminal node. 325 * the terminal node.
338 */ 326 */
339 for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) { 327 for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
340 ptr = READ_ONCE(node->slots[slot]); 328 ptr = READ_ONCE(node->slots[slot]); /* Address dependency. */
341 if (ptr && assoc_array_ptr_is_leaf(ptr)) { 329 if (ptr && assoc_array_ptr_is_leaf(ptr)) {
342 /* We need a barrier between the read of the pointer 330 /* We need a barrier between the read of the pointer
343 * and dereferencing the pointer - but only if we are 331 * and dereferencing the pointer - but only if we are
344 * actually going to dereference it. 332 * actually going to dereference it.
345 */ 333 */
346 leaf = assoc_array_ptr_to_leaf(ptr); 334 leaf = assoc_array_ptr_to_leaf(ptr);
347 smp_read_barrier_depends();
348 if (ops->compare_object(leaf, index_key)) 335 if (ops->compare_object(leaf, index_key))
349 return (void *)leaf; 336 return (void *)leaf;
350 } 337 }
diff --git a/lib/bucket_locks.c b/lib/bucket_locks.c
new file mode 100644
index 000000000000..266a97c5708b
--- /dev/null
+++ b/lib/bucket_locks.c
@@ -0,0 +1,54 @@
1#include <linux/export.h>
2#include <linux/kernel.h>
3#include <linux/mm.h>
4#include <linux/slab.h>
5#include <linux/vmalloc.h>
6
7/* Allocate an array of spinlocks to be accessed by a hash. Two arguments
8 * indicate the number of elements to allocate in the array. max_size
9 * gives the maximum number of elements to allocate. cpu_mult gives
10 * the number of locks per CPU to allocate. The size is rounded up
11 * to a power of 2 to be suitable as a hash table.
12 */
13
14int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask,
15 size_t max_size, unsigned int cpu_mult, gfp_t gfp)
16{
17 spinlock_t *tlocks = NULL;
18 unsigned int i, size;
19#if defined(CONFIG_PROVE_LOCKING)
20 unsigned int nr_pcpus = 2;
21#else
22 unsigned int nr_pcpus = num_possible_cpus();
23#endif
24
25 if (cpu_mult) {
26 nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
27 size = min_t(unsigned int, nr_pcpus * cpu_mult, max_size);
28 } else {
29 size = max_size;
30 }
31
32 if (sizeof(spinlock_t) != 0) {
33 if (gfpflags_allow_blocking(gfp))
34 tlocks = kvmalloc(size * sizeof(spinlock_t), gfp);
35 else
36 tlocks = kmalloc_array(size, sizeof(spinlock_t), gfp);
37 if (!tlocks)
38 return -ENOMEM;
39 for (i = 0; i < size; i++)
40 spin_lock_init(&tlocks[i]);
41 }
42
43 *locks = tlocks;
44 *locks_mask = size - 1;
45
46 return 0;
47}
48EXPORT_SYMBOL(alloc_bucket_spinlocks);
49
50void free_bucket_spinlocks(spinlock_t *locks)
51{
52 kvfree(locks);
53}
54EXPORT_SYMBOL(free_bucket_spinlocks);
diff --git a/lib/chacha20.c b/lib/chacha20.c
index 250ceed9ec9a..c1cc50fb68c9 100644
--- a/lib/chacha20.c
+++ b/lib/chacha20.c
@@ -16,12 +16,7 @@
16#include <asm/unaligned.h> 16#include <asm/unaligned.h>
17#include <crypto/chacha20.h> 17#include <crypto/chacha20.h>
18 18
19static inline u32 rotl32(u32 v, u8 n) 19void chacha20_block(u32 *state, u32 *stream)
20{
21 return (v << n) | (v >> (sizeof(v) * 8 - n));
22}
23
24extern void chacha20_block(u32 *state, void *stream)
25{ 20{
26 u32 x[16], *out = stream; 21 u32 x[16], *out = stream;
27 int i; 22 int i;
@@ -30,45 +25,45 @@ extern void chacha20_block(u32 *state, void *stream)
30 x[i] = state[i]; 25 x[i] = state[i];
31 26
32 for (i = 0; i < 20; i += 2) { 27 for (i = 0; i < 20; i += 2) {
33 x[0] += x[4]; x[12] = rotl32(x[12] ^ x[0], 16); 28 x[0] += x[4]; x[12] = rol32(x[12] ^ x[0], 16);
34 x[1] += x[5]; x[13] = rotl32(x[13] ^ x[1], 16); 29 x[1] += x[5]; x[13] = rol32(x[13] ^ x[1], 16);
35 x[2] += x[6]; x[14] = rotl32(x[14] ^ x[2], 16); 30 x[2] += x[6]; x[14] = rol32(x[14] ^ x[2], 16);
36 x[3] += x[7]; x[15] = rotl32(x[15] ^ x[3], 16); 31 x[3] += x[7]; x[15] = rol32(x[15] ^ x[3], 16);
37 32
38 x[8] += x[12]; x[4] = rotl32(x[4] ^ x[8], 12); 33 x[8] += x[12]; x[4] = rol32(x[4] ^ x[8], 12);
39 x[9] += x[13]; x[5] = rotl32(x[5] ^ x[9], 12); 34 x[9] += x[13]; x[5] = rol32(x[5] ^ x[9], 12);
40 x[10] += x[14]; x[6] = rotl32(x[6] ^ x[10], 12); 35 x[10] += x[14]; x[6] = rol32(x[6] ^ x[10], 12);
41 x[11] += x[15]; x[7] = rotl32(x[7] ^ x[11], 12); 36 x[11] += x[15]; x[7] = rol32(x[7] ^ x[11], 12);
42 37
43 x[0] += x[4]; x[12] = rotl32(x[12] ^ x[0], 8); 38 x[0] += x[4]; x[12] = rol32(x[12] ^ x[0], 8);
44 x[1] += x[5]; x[13] = rotl32(x[13] ^ x[1], 8); 39 x[1] += x[5]; x[13] = rol32(x[13] ^ x[1], 8);
45 x[2] += x[6]; x[14] = rotl32(x[14] ^ x[2], 8); 40 x[2] += x[6]; x[14] = rol32(x[14] ^ x[2], 8);
46 x[3] += x[7]; x[15] = rotl32(x[15] ^ x[3], 8); 41 x[3] += x[7]; x[15] = rol32(x[15] ^ x[3], 8);
47 42
48 x[8] += x[12]; x[4] = rotl32(x[4] ^ x[8], 7); 43 x[8] += x[12]; x[4] = rol32(x[4] ^ x[8], 7);
49 x[9] += x[13]; x[5] = rotl32(x[5] ^ x[9], 7); 44 x[9] += x[13]; x[5] = rol32(x[5] ^ x[9], 7);
50 x[10] += x[14]; x[6] = rotl32(x[6] ^ x[10], 7); 45 x[10] += x[14]; x[6] = rol32(x[6] ^ x[10], 7);
51 x[11] += x[15]; x[7] = rotl32(x[7] ^ x[11], 7); 46 x[11] += x[15]; x[7] = rol32(x[7] ^ x[11], 7);
52 47
53 x[0] += x[5]; x[15] = rotl32(x[15] ^ x[0], 16); 48 x[0] += x[5]; x[15] = rol32(x[15] ^ x[0], 16);
54 x[1] += x[6]; x[12] = rotl32(x[12] ^ x[1], 16); 49 x[1] += x[6]; x[12] = rol32(x[12] ^ x[1], 16);
55 x[2] += x[7]; x[13] = rotl32(x[13] ^ x[2], 16); 50 x[2] += x[7]; x[13] = rol32(x[13] ^ x[2], 16);
56 x[3] += x[4]; x[14] = rotl32(x[14] ^ x[3], 16); 51 x[3] += x[4]; x[14] = rol32(x[14] ^ x[3], 16);
57 52
58 x[10] += x[15]; x[5] = rotl32(x[5] ^ x[10], 12); 53 x[10] += x[15]; x[5] = rol32(x[5] ^ x[10], 12);
59 x[11] += x[12]; x[6] = rotl32(x[6] ^ x[11], 12); 54 x[11] += x[12]; x[6] = rol32(x[6] ^ x[11], 12);
60 x[8] += x[13]; x[7] = rotl32(x[7] ^ x[8], 12); 55 x[8] += x[13]; x[7] = rol32(x[7] ^ x[8], 12);
61 x[9] += x[14]; x[4] = rotl32(x[4] ^ x[9], 12); 56 x[9] += x[14]; x[4] = rol32(x[4] ^ x[9], 12);
62 57
63 x[0] += x[5]; x[15] = rotl32(x[15] ^ x[0], 8); 58 x[0] += x[5]; x[15] = rol32(x[15] ^ x[0], 8);
64 x[1] += x[6]; x[12] = rotl32(x[12] ^ x[1], 8); 59 x[1] += x[6]; x[12] = rol32(x[12] ^ x[1], 8);
65 x[2] += x[7]; x[13] = rotl32(x[13] ^ x[2], 8); 60 x[2] += x[7]; x[13] = rol32(x[13] ^ x[2], 8);
66 x[3] += x[4]; x[14] = rotl32(x[14] ^ x[3], 8); 61 x[3] += x[4]; x[14] = rol32(x[14] ^ x[3], 8);
67 62
68 x[10] += x[15]; x[5] = rotl32(x[5] ^ x[10], 7); 63 x[10] += x[15]; x[5] = rol32(x[5] ^ x[10], 7);
69 x[11] += x[12]; x[6] = rotl32(x[6] ^ x[11], 7); 64 x[11] += x[12]; x[6] = rol32(x[6] ^ x[11], 7);
70 x[8] += x[13]; x[7] = rotl32(x[7] ^ x[8], 7); 65 x[8] += x[13]; x[7] = rol32(x[7] ^ x[8], 7);
71 x[9] += x[14]; x[4] = rotl32(x[4] ^ x[9], 7); 66 x[9] += x[14]; x[4] = rol32(x[4] ^ x[9], 7);
72 } 67 }
73 68
74 for (i = 0; i < ARRAY_SIZE(x); i++) 69 for (i = 0; i < ARRAY_SIZE(x); i++)
diff --git a/lib/crc-ccitt.c b/lib/crc-ccitt.c
index 7f6dd68d2d09..d873b34039ff 100644
--- a/lib/crc-ccitt.c
+++ b/lib/crc-ccitt.c
@@ -51,8 +51,49 @@ u16 const crc_ccitt_table[256] = {
51}; 51};
52EXPORT_SYMBOL(crc_ccitt_table); 52EXPORT_SYMBOL(crc_ccitt_table);
53 53
54/*
55 * Similar table to calculate CRC16 variant known as CRC-CCITT-FALSE
56 * Reflected bits order, does not augment final value.
57 */
58u16 const crc_ccitt_false_table[256] = {
59 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50A5, 0x60C6, 0x70E7,
60 0x8108, 0x9129, 0xA14A, 0xB16B, 0xC18C, 0xD1AD, 0xE1CE, 0xF1EF,
61 0x1231, 0x0210, 0x3273, 0x2252, 0x52B5, 0x4294, 0x72F7, 0x62D6,
62 0x9339, 0x8318, 0xB37B, 0xA35A, 0xD3BD, 0xC39C, 0xF3FF, 0xE3DE,
63 0x2462, 0x3443, 0x0420, 0x1401, 0x64E6, 0x74C7, 0x44A4, 0x5485,
64 0xA56A, 0xB54B, 0x8528, 0x9509, 0xE5EE, 0xF5CF, 0xC5AC, 0xD58D,
65 0x3653, 0x2672, 0x1611, 0x0630, 0x76D7, 0x66F6, 0x5695, 0x46B4,
66 0xB75B, 0xA77A, 0x9719, 0x8738, 0xF7DF, 0xE7FE, 0xD79D, 0xC7BC,
67 0x48C4, 0x58E5, 0x6886, 0x78A7, 0x0840, 0x1861, 0x2802, 0x3823,
68 0xC9CC, 0xD9ED, 0xE98E, 0xF9AF, 0x8948, 0x9969, 0xA90A, 0xB92B,
69 0x5AF5, 0x4AD4, 0x7AB7, 0x6A96, 0x1A71, 0x0A50, 0x3A33, 0x2A12,
70 0xDBFD, 0xCBDC, 0xFBBF, 0xEB9E, 0x9B79, 0x8B58, 0xBB3B, 0xAB1A,
71 0x6CA6, 0x7C87, 0x4CE4, 0x5CC5, 0x2C22, 0x3C03, 0x0C60, 0x1C41,
72 0xEDAE, 0xFD8F, 0xCDEC, 0xDDCD, 0xAD2A, 0xBD0B, 0x8D68, 0x9D49,
73 0x7E97, 0x6EB6, 0x5ED5, 0x4EF4, 0x3E13, 0x2E32, 0x1E51, 0x0E70,
74 0xFF9F, 0xEFBE, 0xDFDD, 0xCFFC, 0xBF1B, 0xAF3A, 0x9F59, 0x8F78,
75 0x9188, 0x81A9, 0xB1CA, 0xA1EB, 0xD10C, 0xC12D, 0xF14E, 0xE16F,
76 0x1080, 0x00A1, 0x30C2, 0x20E3, 0x5004, 0x4025, 0x7046, 0x6067,
77 0x83B9, 0x9398, 0xA3FB, 0xB3DA, 0xC33D, 0xD31C, 0xE37F, 0xF35E,
78 0x02B1, 0x1290, 0x22F3, 0x32D2, 0x4235, 0x5214, 0x6277, 0x7256,
79 0xB5EA, 0xA5CB, 0x95A8, 0x8589, 0xF56E, 0xE54F, 0xD52C, 0xC50D,
80 0x34E2, 0x24C3, 0x14A0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
81 0xA7DB, 0xB7FA, 0x8799, 0x97B8, 0xE75F, 0xF77E, 0xC71D, 0xD73C,
82 0x26D3, 0x36F2, 0x0691, 0x16B0, 0x6657, 0x7676, 0x4615, 0x5634,
83 0xD94C, 0xC96D, 0xF90E, 0xE92F, 0x99C8, 0x89E9, 0xB98A, 0xA9AB,
84 0x5844, 0x4865, 0x7806, 0x6827, 0x18C0, 0x08E1, 0x3882, 0x28A3,
85 0xCB7D, 0xDB5C, 0xEB3F, 0xFB1E, 0x8BF9, 0x9BD8, 0xABBB, 0xBB9A,
86 0x4A75, 0x5A54, 0x6A37, 0x7A16, 0x0AF1, 0x1AD0, 0x2AB3, 0x3A92,
87 0xFD2E, 0xED0F, 0xDD6C, 0xCD4D, 0xBDAA, 0xAD8B, 0x9DE8, 0x8DC9,
88 0x7C26, 0x6C07, 0x5C64, 0x4C45, 0x3CA2, 0x2C83, 0x1CE0, 0x0CC1,
89 0xEF1F, 0xFF3E, 0xCF5D, 0xDF7C, 0xAF9B, 0xBFBA, 0x8FD9, 0x9FF8,
90 0x6E17, 0x7E36, 0x4E55, 0x5E74, 0x2E93, 0x3EB2, 0x0ED1, 0x1EF0
91};
92EXPORT_SYMBOL(crc_ccitt_false_table);
93
54/** 94/**
55 * crc_ccitt - recompute the CRC for the data buffer 95 * crc_ccitt - recompute the CRC (CRC-CCITT variant) for the data
96 * buffer
56 * @crc: previous CRC value 97 * @crc: previous CRC value
57 * @buffer: data pointer 98 * @buffer: data pointer
58 * @len: number of bytes in the buffer 99 * @len: number of bytes in the buffer
@@ -65,5 +106,20 @@ u16 crc_ccitt(u16 crc, u8 const *buffer, size_t len)
65} 106}
66EXPORT_SYMBOL(crc_ccitt); 107EXPORT_SYMBOL(crc_ccitt);
67 108
109/**
110 * crc_ccitt_false - recompute the CRC (CRC-CCITT-FALSE variant)
111 * for the data buffer
112 * @crc: previous CRC value
113 * @buffer: data pointer
114 * @len: number of bytes in the buffer
115 */
116u16 crc_ccitt_false(u16 crc, u8 const *buffer, size_t len)
117{
118 while (len--)
119 crc = crc_ccitt_false_byte(crc, *buffer++);
120 return crc;
121}
122EXPORT_SYMBOL(crc_ccitt_false);
123
68MODULE_DESCRIPTION("CRC-CCITT calculations"); 124MODULE_DESCRIPTION("CRC-CCITT calculations");
69MODULE_LICENSE("GPL"); 125MODULE_LICENSE("GPL");
diff --git a/lib/dma-direct.c b/lib/dma-direct.c
new file mode 100644
index 000000000000..40b1f92f2214
--- /dev/null
+++ b/lib/dma-direct.c
@@ -0,0 +1,156 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * DMA operations that map physical memory directly without using an IOMMU or
4 * flushing caches.
5 */
6#include <linux/export.h>
7#include <linux/mm.h>
8#include <linux/dma-direct.h>
9#include <linux/scatterlist.h>
10#include <linux/dma-contiguous.h>
11#include <linux/pfn.h>
12
13#define DIRECT_MAPPING_ERROR 0
14
15/*
16 * Most architectures use ZONE_DMA for the first 16 Megabytes, but
17 * some use it for entirely different regions:
18 */
19#ifndef ARCH_ZONE_DMA_BITS
20#define ARCH_ZONE_DMA_BITS 24
21#endif
22
23static bool
24check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
25 const char *caller)
26{
27 if (unlikely(dev && !dma_capable(dev, dma_addr, size))) {
28 if (*dev->dma_mask >= DMA_BIT_MASK(32)) {
29 dev_err(dev,
30 "%s: overflow %pad+%zu of device mask %llx\n",
31 caller, &dma_addr, size, *dev->dma_mask);
32 }
33 return false;
34 }
35 return true;
36}
37
38static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
39{
40 return phys_to_dma(dev, phys) + size - 1 <= dev->coherent_dma_mask;
41}
42
43void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
44 gfp_t gfp, unsigned long attrs)
45{
46 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
47 int page_order = get_order(size);
48 struct page *page = NULL;
49
50 /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */
51 if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
52 gfp |= GFP_DMA;
53 if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
54 gfp |= GFP_DMA32;
55
56again:
57 /* CMA can be used only in the context which permits sleeping */
58 if (gfpflags_allow_blocking(gfp)) {
59 page = dma_alloc_from_contiguous(dev, count, page_order, gfp);
60 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
61 dma_release_from_contiguous(dev, page, count);
62 page = NULL;
63 }
64 }
65 if (!page)
66 page = alloc_pages_node(dev_to_node(dev), gfp, page_order);
67
68 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
69 __free_pages(page, page_order);
70 page = NULL;
71
72 if (dev->coherent_dma_mask < DMA_BIT_MASK(32) &&
73 !(gfp & GFP_DMA)) {
74 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
75 goto again;
76 }
77 }
78
79 if (!page)
80 return NULL;
81
82 *dma_handle = phys_to_dma(dev, page_to_phys(page));
83 memset(page_address(page), 0, size);
84 return page_address(page);
85}
86
87void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
88 dma_addr_t dma_addr, unsigned long attrs)
89{
90 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
91
92 if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count))
93 free_pages((unsigned long)cpu_addr, get_order(size));
94}
95
96static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
97 unsigned long offset, size_t size, enum dma_data_direction dir,
98 unsigned long attrs)
99{
100 dma_addr_t dma_addr = phys_to_dma(dev, page_to_phys(page)) + offset;
101
102 if (!check_addr(dev, dma_addr, size, __func__))
103 return DIRECT_MAPPING_ERROR;
104 return dma_addr;
105}
106
107static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
108 int nents, enum dma_data_direction dir, unsigned long attrs)
109{
110 int i;
111 struct scatterlist *sg;
112
113 for_each_sg(sgl, sg, nents, i) {
114 BUG_ON(!sg_page(sg));
115
116 sg_dma_address(sg) = phys_to_dma(dev, sg_phys(sg));
117 if (!check_addr(dev, sg_dma_address(sg), sg->length, __func__))
118 return 0;
119 sg_dma_len(sg) = sg->length;
120 }
121
122 return nents;
123}
124
125int dma_direct_supported(struct device *dev, u64 mask)
126{
127#ifdef CONFIG_ZONE_DMA
128 if (mask < DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
129 return 0;
130#else
131 /*
132 * Because 32-bit DMA masks are so common we expect every architecture
133 * to be able to satisfy them - either by not supporting more physical
134 * memory, or by providing a ZONE_DMA32. If neither is the case, the
135 * architecture needs to use an IOMMU instead of the direct mapping.
136 */
137 if (mask < DMA_BIT_MASK(32))
138 return 0;
139#endif
140 return 1;
141}
142
143static int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr)
144{
145 return dma_addr == DIRECT_MAPPING_ERROR;
146}
147
148const struct dma_map_ops dma_direct_ops = {
149 .alloc = dma_direct_alloc,
150 .free = dma_direct_free,
151 .map_page = dma_direct_map_page,
152 .map_sg = dma_direct_map_sg,
153 .dma_supported = dma_direct_supported,
154 .mapping_error = dma_direct_mapping_error,
155};
156EXPORT_SYMBOL(dma_direct_ops);
diff --git a/lib/dma-noop.c b/lib/dma-noop.c
deleted file mode 100644
index a10185b0c2d4..000000000000
--- a/lib/dma-noop.c
+++ /dev/null
@@ -1,68 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * lib/dma-noop.c
4 *
5 * DMA operations that map to physical addresses without flushing memory.
6 */
7#include <linux/export.h>
8#include <linux/mm.h>
9#include <linux/dma-mapping.h>
10#include <linux/scatterlist.h>
11#include <linux/pfn.h>
12
13static void *dma_noop_alloc(struct device *dev, size_t size,
14 dma_addr_t *dma_handle, gfp_t gfp,
15 unsigned long attrs)
16{
17 void *ret;
18
19 ret = (void *)__get_free_pages(gfp, get_order(size));
20 if (ret)
21 *dma_handle = virt_to_phys(ret) - PFN_PHYS(dev->dma_pfn_offset);
22
23 return ret;
24}
25
26static void dma_noop_free(struct device *dev, size_t size,
27 void *cpu_addr, dma_addr_t dma_addr,
28 unsigned long attrs)
29{
30 free_pages((unsigned long)cpu_addr, get_order(size));
31}
32
33static dma_addr_t dma_noop_map_page(struct device *dev, struct page *page,
34 unsigned long offset, size_t size,
35 enum dma_data_direction dir,
36 unsigned long attrs)
37{
38 return page_to_phys(page) + offset - PFN_PHYS(dev->dma_pfn_offset);
39}
40
41static int dma_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
42 enum dma_data_direction dir,
43 unsigned long attrs)
44{
45 int i;
46 struct scatterlist *sg;
47
48 for_each_sg(sgl, sg, nents, i) {
49 dma_addr_t offset = PFN_PHYS(dev->dma_pfn_offset);
50 void *va;
51
52 BUG_ON(!sg_page(sg));
53 va = sg_virt(sg);
54 sg_dma_address(sg) = (dma_addr_t)virt_to_phys(va) - offset;
55 sg_dma_len(sg) = sg->length;
56 }
57
58 return nents;
59}
60
61const struct dma_map_ops dma_noop_ops = {
62 .alloc = dma_noop_alloc,
63 .free = dma_noop_free,
64 .map_page = dma_noop_map_page,
65 .map_sg = dma_noop_map_sg,
66};
67
68EXPORT_SYMBOL(dma_noop_ops);
diff --git a/lib/error-inject.c b/lib/error-inject.c
new file mode 100644
index 000000000000..c0d4600f4896
--- /dev/null
+++ b/lib/error-inject.c
@@ -0,0 +1,242 @@
1// SPDX-License-Identifier: GPL-2.0
2// error-inject.c: Function-level error injection table
3#include <linux/error-injection.h>
4#include <linux/debugfs.h>
5#include <linux/kallsyms.h>
6#include <linux/kprobes.h>
7#include <linux/module.h>
8#include <linux/mutex.h>
9#include <linux/list.h>
10#include <linux/slab.h>
11
12/* Whitelist of symbols that can be overridden for error injection. */
13static LIST_HEAD(error_injection_list);
14static DEFINE_MUTEX(ei_mutex);
15struct ei_entry {
16 struct list_head list;
17 unsigned long start_addr;
18 unsigned long end_addr;
19 int etype;
20 void *priv;
21};
22
23bool within_error_injection_list(unsigned long addr)
24{
25 struct ei_entry *ent;
26 bool ret = false;
27
28 mutex_lock(&ei_mutex);
29 list_for_each_entry(ent, &error_injection_list, list) {
30 if (addr >= ent->start_addr && addr < ent->end_addr) {
31 ret = true;
32 break;
33 }
34 }
35 mutex_unlock(&ei_mutex);
36 return ret;
37}
38
39int get_injectable_error_type(unsigned long addr)
40{
41 struct ei_entry *ent;
42
43 list_for_each_entry(ent, &error_injection_list, list) {
44 if (addr >= ent->start_addr && addr < ent->end_addr)
45 return ent->etype;
46 }
47 return EI_ETYPE_NONE;
48}
49
50/*
51 * Lookup and populate the error_injection_list.
52 *
53 * For safety reasons we only allow certain functions to be overridden with
54 * bpf_error_injection, so we need to populate the list of the symbols that have
55 * been marked as safe for overriding.
56 */
57static void populate_error_injection_list(struct error_injection_entry *start,
58 struct error_injection_entry *end,
59 void *priv)
60{
61 struct error_injection_entry *iter;
62 struct ei_entry *ent;
63 unsigned long entry, offset = 0, size = 0;
64
65 mutex_lock(&ei_mutex);
66 for (iter = start; iter < end; iter++) {
67 entry = arch_deref_entry_point((void *)iter->addr);
68
69 if (!kernel_text_address(entry) ||
70 !kallsyms_lookup_size_offset(entry, &size, &offset)) {
71 pr_err("Failed to find error inject entry at %p\n",
72 (void *)entry);
73 continue;
74 }
75
76 ent = kmalloc(sizeof(*ent), GFP_KERNEL);
77 if (!ent)
78 break;
79 ent->start_addr = entry;
80 ent->end_addr = entry + size;
81 ent->etype = iter->etype;
82 ent->priv = priv;
83 INIT_LIST_HEAD(&ent->list);
84 list_add_tail(&ent->list, &error_injection_list);
85 }
86 mutex_unlock(&ei_mutex);
87}
88
89/* Markers of the _error_inject_whitelist section */
90extern struct error_injection_entry __start_error_injection_whitelist[];
91extern struct error_injection_entry __stop_error_injection_whitelist[];
92
93static void __init populate_kernel_ei_list(void)
94{
95 populate_error_injection_list(__start_error_injection_whitelist,
96 __stop_error_injection_whitelist,
97 NULL);
98}
99
100#ifdef CONFIG_MODULES
101static void module_load_ei_list(struct module *mod)
102{
103 if (!mod->num_ei_funcs)
104 return;
105
106 populate_error_injection_list(mod->ei_funcs,
107 mod->ei_funcs + mod->num_ei_funcs, mod);
108}
109
110static void module_unload_ei_list(struct module *mod)
111{
112 struct ei_entry *ent, *n;
113
114 if (!mod->num_ei_funcs)
115 return;
116
117 mutex_lock(&ei_mutex);
118 list_for_each_entry_safe(ent, n, &error_injection_list, list) {
119 if (ent->priv == mod) {
120 list_del_init(&ent->list);
121 kfree(ent);
122 }
123 }
124 mutex_unlock(&ei_mutex);
125}
126
127/* Module notifier call back, checking error injection table on the module */
128static int ei_module_callback(struct notifier_block *nb,
129 unsigned long val, void *data)
130{
131 struct module *mod = data;
132
133 if (val == MODULE_STATE_COMING)
134 module_load_ei_list(mod);
135 else if (val == MODULE_STATE_GOING)
136 module_unload_ei_list(mod);
137
138 return NOTIFY_DONE;
139}
140
141static struct notifier_block ei_module_nb = {
142 .notifier_call = ei_module_callback,
143 .priority = 0
144};
145
146static __init int module_ei_init(void)
147{
148 return register_module_notifier(&ei_module_nb);
149}
150#else /* !CONFIG_MODULES */
151#define module_ei_init() (0)
152#endif
153
154/*
155 * error_injection/whitelist -- shows which functions can be overridden for
156 * error injection.
157 */
158static void *ei_seq_start(struct seq_file *m, loff_t *pos)
159{
160 mutex_lock(&ei_mutex);
161 return seq_list_start(&error_injection_list, *pos);
162}
163
164static void ei_seq_stop(struct seq_file *m, void *v)
165{
166 mutex_unlock(&ei_mutex);
167}
168
169static void *ei_seq_next(struct seq_file *m, void *v, loff_t *pos)
170{
171 return seq_list_next(v, &error_injection_list, pos);
172}
173
174static const char *error_type_string(int etype)
175{
176 switch (etype) {
177 case EI_ETYPE_NULL:
178 return "NULL";
179 case EI_ETYPE_ERRNO:
180 return "ERRNO";
181 case EI_ETYPE_ERRNO_NULL:
182 return "ERRNO_NULL";
183 default:
184 return "(unknown)";
185 }
186}
187
188static int ei_seq_show(struct seq_file *m, void *v)
189{
190 struct ei_entry *ent = list_entry(v, struct ei_entry, list);
191
192 seq_printf(m, "%pf\t%s\n", (void *)ent->start_addr,
193 error_type_string(ent->etype));
194 return 0;
195}
196
197static const struct seq_operations ei_seq_ops = {
198 .start = ei_seq_start,
199 .next = ei_seq_next,
200 .stop = ei_seq_stop,
201 .show = ei_seq_show,
202};
203
204static int ei_open(struct inode *inode, struct file *filp)
205{
206 return seq_open(filp, &ei_seq_ops);
207}
208
209static const struct file_operations debugfs_ei_ops = {
210 .open = ei_open,
211 .read = seq_read,
212 .llseek = seq_lseek,
213 .release = seq_release,
214};
215
216static int __init ei_debugfs_init(void)
217{
218 struct dentry *dir, *file;
219
220 dir = debugfs_create_dir("error_injection", NULL);
221 if (!dir)
222 return -ENOMEM;
223
224 file = debugfs_create_file("list", 0444, dir, NULL, &debugfs_ei_ops);
225 if (!file) {
226 debugfs_remove(dir);
227 return -ENOMEM;
228 }
229
230 return 0;
231}
232
233static int __init init_error_injection(void)
234{
235 populate_kernel_ei_list();
236
237 if (!module_ei_init())
238 ei_debugfs_init();
239
240 return 0;
241}
242late_initcall(init_error_injection);
diff --git a/lib/errseq.c b/lib/errseq.c
index 79cc66897db4..df782418b333 100644
--- a/lib/errseq.c
+++ b/lib/errseq.c
@@ -46,14 +46,14 @@
46 * @eseq: errseq_t field that should be set 46 * @eseq: errseq_t field that should be set
47 * @err: error to set (must be between -1 and -MAX_ERRNO) 47 * @err: error to set (must be between -1 and -MAX_ERRNO)
48 * 48 *
49 * This function sets the error in *eseq, and increments the sequence counter 49 * This function sets the error in @eseq, and increments the sequence counter
50 * if the last sequence was sampled at some point in the past. 50 * if the last sequence was sampled at some point in the past.
51 * 51 *
52 * Any error set will always overwrite an existing error. 52 * Any error set will always overwrite an existing error.
53 * 53 *
54 * We do return the latest value here, primarily for debugging purposes. The 54 * Return: The previous value, primarily for debugging purposes. The
55 * return value should not be used as a previously sampled value in later calls 55 * return value should not be used as a previously sampled value in later
56 * as it will not have the SEEN flag set. 56 * calls as it will not have the SEEN flag set.
57 */ 57 */
58errseq_t errseq_set(errseq_t *eseq, int err) 58errseq_t errseq_set(errseq_t *eseq, int err)
59{ 59{
@@ -108,11 +108,13 @@ errseq_t errseq_set(errseq_t *eseq, int err)
108EXPORT_SYMBOL(errseq_set); 108EXPORT_SYMBOL(errseq_set);
109 109
110/** 110/**
111 * errseq_sample - grab current errseq_t value 111 * errseq_sample() - Grab current errseq_t value.
112 * @eseq: pointer to errseq_t to be sampled 112 * @eseq: Pointer to errseq_t to be sampled.
113 * 113 *
114 * This function allows callers to sample an errseq_t value, marking it as 114 * This function allows callers to sample an errseq_t value, marking it as
115 * "seen" if required. 115 * "seen" if required.
116 *
117 * Return: The current errseq value.
116 */ 118 */
117errseq_t errseq_sample(errseq_t *eseq) 119errseq_t errseq_sample(errseq_t *eseq)
118{ 120{
@@ -134,15 +136,15 @@ errseq_t errseq_sample(errseq_t *eseq)
134EXPORT_SYMBOL(errseq_sample); 136EXPORT_SYMBOL(errseq_sample);
135 137
136/** 138/**
137 * errseq_check - has an error occurred since a particular sample point? 139 * errseq_check() - Has an error occurred since a particular sample point?
138 * @eseq: pointer to errseq_t value to be checked 140 * @eseq: Pointer to errseq_t value to be checked.
139 * @since: previously-sampled errseq_t from which to check 141 * @since: Previously-sampled errseq_t from which to check.
140 * 142 *
141 * Grab the value that eseq points to, and see if it has changed "since" 143 * Grab the value that eseq points to, and see if it has changed @since
142 * the given value was sampled. The "since" value is not advanced, so there 144 * the given value was sampled. The @since value is not advanced, so there
143 * is no need to mark the value as seen. 145 * is no need to mark the value as seen.
144 * 146 *
145 * Returns the latest error set in the errseq_t or 0 if it hasn't changed. 147 * Return: The latest error set in the errseq_t or 0 if it hasn't changed.
146 */ 148 */
147int errseq_check(errseq_t *eseq, errseq_t since) 149int errseq_check(errseq_t *eseq, errseq_t since)
148{ 150{
@@ -155,11 +157,11 @@ int errseq_check(errseq_t *eseq, errseq_t since)
155EXPORT_SYMBOL(errseq_check); 157EXPORT_SYMBOL(errseq_check);
156 158
157/** 159/**
158 * errseq_check_and_advance - check an errseq_t and advance to current value 160 * errseq_check_and_advance() - Check an errseq_t and advance to current value.
159 * @eseq: pointer to value being checked and reported 161 * @eseq: Pointer to value being checked and reported.
160 * @since: pointer to previously-sampled errseq_t to check against and advance 162 * @since: Pointer to previously-sampled errseq_t to check against and advance.
161 * 163 *
162 * Grab the eseq value, and see whether it matches the value that "since" 164 * Grab the eseq value, and see whether it matches the value that @since
163 * points to. If it does, then just return 0. 165 * points to. If it does, then just return 0.
164 * 166 *
165 * If it doesn't, then the value has changed. Set the "seen" flag, and try to 167 * If it doesn't, then the value has changed. Set the "seen" flag, and try to
@@ -170,6 +172,9 @@ EXPORT_SYMBOL(errseq_check);
170 * value. The caller must provide that if necessary. Because of this, callers 172 * value. The caller must provide that if necessary. Because of this, callers
171 * may want to do a lockless errseq_check before taking the lock and calling 173 * may want to do a lockless errseq_check before taking the lock and calling
172 * this. 174 * this.
175 *
176 * Return: Negative errno if one has been stored, or 0 if no new error has
177 * occurred.
173 */ 178 */
174int errseq_check_and_advance(errseq_t *eseq, errseq_t *since) 179int errseq_check_and_advance(errseq_t *eseq, errseq_t *since)
175{ 180{
diff --git a/lib/kobject.c b/lib/kobject.c
index 763d70a18941..afd5a3fc6123 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * kobject.c - library routines for handling generic kernel objects 3 * kobject.c - library routines for handling generic kernel objects
3 * 4 *
@@ -5,9 +6,6 @@
5 * Copyright (c) 2006-2007 Greg Kroah-Hartman <greg@kroah.com> 6 * Copyright (c) 2006-2007 Greg Kroah-Hartman <greg@kroah.com>
6 * Copyright (c) 2006-2007 Novell Inc. 7 * Copyright (c) 2006-2007 Novell Inc.
7 * 8 *
8 * This file is released under the GPLv2.
9 *
10 *
11 * Please see the file Documentation/kobject.txt for critical information 9 * Please see the file Documentation/kobject.txt for critical information
12 * about using the kobject interface. 10 * about using the kobject interface.
13 */ 11 */
@@ -1039,6 +1037,7 @@ void *kobj_ns_grab_current(enum kobj_ns_type type)
1039 1037
1040 return ns; 1038 return ns;
1041} 1039}
1040EXPORT_SYMBOL_GPL(kobj_ns_grab_current);
1042 1041
1043const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk) 1042const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk)
1044{ 1043{
@@ -1074,3 +1073,4 @@ void kobj_ns_drop(enum kobj_ns_type type, void *ns)
1074 kobj_ns_ops_tbl[type]->drop_ns(ns); 1073 kobj_ns_ops_tbl[type]->drop_ns(ns);
1075 spin_unlock(&kobj_ns_type_lock); 1074 spin_unlock(&kobj_ns_type_lock);
1076} 1075}
1076EXPORT_SYMBOL_GPL(kobj_ns_drop);
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 2615074d3de5..9fe6ec8fda28 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * kernel userspace event delivery 3 * kernel userspace event delivery
3 * 4 *
@@ -5,8 +6,6 @@
5 * Copyright (C) 2004 Novell, Inc. All rights reserved. 6 * Copyright (C) 2004 Novell, Inc. All rights reserved.
6 * Copyright (C) 2004 IBM, Inc. All rights reserved. 7 * Copyright (C) 2004 IBM, Inc. All rights reserved.
7 * 8 *
8 * Licensed under the GNU GPL v2.
9 *
10 * Authors: 9 * Authors:
11 * Robert Love <rml@novell.com> 10 * Robert Love <rml@novell.com>
12 * Kay Sievers <kay.sievers@vrfy.org> 11 * Kay Sievers <kay.sievers@vrfy.org>
diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h
index 57fd45ab7af1..08c60d10747f 100644
--- a/lib/mpi/longlong.h
+++ b/lib/mpi/longlong.h
@@ -671,7 +671,23 @@ do { \
671 ************** MIPS/64 ************** 671 ************** MIPS/64 **************
672 ***************************************/ 672 ***************************************/
673#if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64 673#if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64
674#if (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4) 674#if defined(__mips_isa_rev) && __mips_isa_rev >= 6
675/*
676 * GCC ends up emitting a __multi3 intrinsic call for MIPS64r6 with the plain C
677 * code below, so we special case MIPS64r6 until the compiler can do better.
678 */
679#define umul_ppmm(w1, w0, u, v) \
680do { \
681 __asm__ ("dmulu %0,%1,%2" \
682 : "=d" ((UDItype)(w0)) \
683 : "d" ((UDItype)(u)), \
684 "d" ((UDItype)(v))); \
685 __asm__ ("dmuhu %0,%1,%2" \
686 : "=d" ((UDItype)(w1)) \
687 : "d" ((UDItype)(u)), \
688 "d" ((UDItype)(v))); \
689} while (0)
690#elif (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4)
675#define umul_ppmm(w1, w0, u, v) \ 691#define umul_ppmm(w1, w0, u, v) \
676do { \ 692do { \
677 typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \ 693 typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \
diff --git a/lib/pci_iomap.c b/lib/pci_iomap.c
index c10fba461454..2d3eb1cb73b8 100644
--- a/lib/pci_iomap.c
+++ b/lib/pci_iomap.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * Implement the default iomap interfaces 3 * Implement the default iomap interfaces
3 * 4 *
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index fe03c6d52761..30e7dd88148b 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -197,10 +197,10 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
197 atomic_long_add(PERCPU_COUNT_BIAS, &ref->count); 197 atomic_long_add(PERCPU_COUNT_BIAS, &ref->count);
198 198
199 /* 199 /*
200 * Restore per-cpu operation. smp_store_release() is paired with 200 * Restore per-cpu operation. smp_store_release() is paired
201 * smp_read_barrier_depends() in __ref_is_percpu() and guarantees 201 * with READ_ONCE() in __ref_is_percpu() and guarantees that the
202 * that the zeroing is visible to all percpu accesses which can see 202 * zeroing is visible to all percpu accesses which can see the
203 * the following __PERCPU_REF_ATOMIC clearing. 203 * following __PERCPU_REF_ATOMIC clearing.
204 */ 204 */
205 for_each_possible_cpu(cpu) 205 for_each_possible_cpu(cpu)
206 *per_cpu_ptr(percpu_count, cpu) = 0; 206 *per_cpu_ptr(percpu_count, cpu) = 0;
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index ddd7dde87c3c..3825c30aaa36 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -65,42 +65,6 @@ EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
65#define ASSERT_RHT_MUTEX(HT) 65#define ASSERT_RHT_MUTEX(HT)
66#endif 66#endif
67 67
68
69static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
70 gfp_t gfp)
71{
72 unsigned int i, size;
73#if defined(CONFIG_PROVE_LOCKING)
74 unsigned int nr_pcpus = 2;
75#else
76 unsigned int nr_pcpus = num_possible_cpus();
77#endif
78
79 nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
80 size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
81
82 /* Never allocate more than 0.5 locks per bucket */
83 size = min_t(unsigned int, size, tbl->size >> 1);
84
85 if (tbl->nest)
86 size = min(size, 1U << tbl->nest);
87
88 if (sizeof(spinlock_t) != 0) {
89 if (gfpflags_allow_blocking(gfp))
90 tbl->locks = kvmalloc(size * sizeof(spinlock_t), gfp);
91 else
92 tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
93 gfp);
94 if (!tbl->locks)
95 return -ENOMEM;
96 for (i = 0; i < size; i++)
97 spin_lock_init(&tbl->locks[i]);
98 }
99 tbl->locks_mask = size - 1;
100
101 return 0;
102}
103
104static void nested_table_free(union nested_table *ntbl, unsigned int size) 68static void nested_table_free(union nested_table *ntbl, unsigned int size)
105{ 69{
106 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); 70 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
@@ -140,7 +104,7 @@ static void bucket_table_free(const struct bucket_table *tbl)
140 if (tbl->nest) 104 if (tbl->nest)
141 nested_bucket_table_free(tbl); 105 nested_bucket_table_free(tbl);
142 106
143 kvfree(tbl->locks); 107 free_bucket_spinlocks(tbl->locks);
144 kvfree(tbl); 108 kvfree(tbl);
145} 109}
146 110
@@ -207,7 +171,7 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
207 gfp_t gfp) 171 gfp_t gfp)
208{ 172{
209 struct bucket_table *tbl = NULL; 173 struct bucket_table *tbl = NULL;
210 size_t size; 174 size_t size, max_locks;
211 int i; 175 int i;
212 176
213 size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); 177 size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
@@ -227,7 +191,12 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
227 191
228 tbl->size = size; 192 tbl->size = size;
229 193
230 if (alloc_bucket_locks(ht, tbl, gfp) < 0) { 194 max_locks = size >> 1;
195 if (tbl->nest)
196 max_locks = min_t(size_t, max_locks, 1U << tbl->nest);
197
198 if (alloc_bucket_spinlocks(&tbl->locks, &tbl->locks_mask, max_locks,
199 ht->p.locks_mul, gfp) < 0) {
231 bucket_table_free(tbl); 200 bucket_table_free(tbl);
232 return NULL; 201 return NULL;
233 } 202 }
@@ -707,6 +676,7 @@ void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
707 iter->p = NULL; 676 iter->p = NULL;
708 iter->slot = 0; 677 iter->slot = 0;
709 iter->skip = 0; 678 iter->skip = 0;
679 iter->end_of_table = 0;
710 680
711 spin_lock(&ht->lock); 681 spin_lock(&ht->lock);
712 iter->walker.tbl = 682 iter->walker.tbl =
@@ -732,7 +702,7 @@ void rhashtable_walk_exit(struct rhashtable_iter *iter)
732EXPORT_SYMBOL_GPL(rhashtable_walk_exit); 702EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
733 703
734/** 704/**
735 * rhashtable_walk_start - Start a hash table walk 705 * rhashtable_walk_start_check - Start a hash table walk
736 * @iter: Hash table iterator 706 * @iter: Hash table iterator
737 * 707 *
738 * Start a hash table walk at the current iterator position. Note that we take 708 * Start a hash table walk at the current iterator position. Note that we take
@@ -744,8 +714,12 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
744 * Returns -EAGAIN if resize event occured. Note that the iterator 714 * Returns -EAGAIN if resize event occured. Note that the iterator
745 * will rewind back to the beginning and you may use it immediately 715 * will rewind back to the beginning and you may use it immediately
746 * by calling rhashtable_walk_next. 716 * by calling rhashtable_walk_next.
717 *
718 * rhashtable_walk_start is defined as an inline variant that returns
719 * void. This is preferred in cases where the caller would ignore
720 * resize events and always continue.
747 */ 721 */
748int rhashtable_walk_start(struct rhashtable_iter *iter) 722int rhashtable_walk_start_check(struct rhashtable_iter *iter)
749 __acquires(RCU) 723 __acquires(RCU)
750{ 724{
751 struct rhashtable *ht = iter->ht; 725 struct rhashtable *ht = iter->ht;
@@ -757,28 +731,26 @@ int rhashtable_walk_start(struct rhashtable_iter *iter)
757 list_del(&iter->walker.list); 731 list_del(&iter->walker.list);
758 spin_unlock(&ht->lock); 732 spin_unlock(&ht->lock);
759 733
760 if (!iter->walker.tbl) { 734 if (!iter->walker.tbl && !iter->end_of_table) {
761 iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht); 735 iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
762 return -EAGAIN; 736 return -EAGAIN;
763 } 737 }
764 738
765 return 0; 739 return 0;
766} 740}
767EXPORT_SYMBOL_GPL(rhashtable_walk_start); 741EXPORT_SYMBOL_GPL(rhashtable_walk_start_check);
768 742
769/** 743/**
770 * rhashtable_walk_next - Return the next object and advance the iterator 744 * __rhashtable_walk_find_next - Find the next element in a table (or the first
771 * @iter: Hash table iterator 745 * one in case of a new walk).
772 * 746 *
773 * Note that you must call rhashtable_walk_stop when you are finished 747 * @iter: Hash table iterator
774 * with the walk.
775 * 748 *
776 * Returns the next object or NULL when the end of the table is reached. 749 * Returns the found object or NULL when the end of the table is reached.
777 * 750 *
778 * Returns -EAGAIN if resize event occured. Note that the iterator 751 * Returns -EAGAIN if resize event occurred.
779 * will rewind back to the beginning and you may continue to use it.
780 */ 752 */
781void *rhashtable_walk_next(struct rhashtable_iter *iter) 753static void *__rhashtable_walk_find_next(struct rhashtable_iter *iter)
782{ 754{
783 struct bucket_table *tbl = iter->walker.tbl; 755 struct bucket_table *tbl = iter->walker.tbl;
784 struct rhlist_head *list = iter->list; 756 struct rhlist_head *list = iter->list;
@@ -786,13 +758,8 @@ void *rhashtable_walk_next(struct rhashtable_iter *iter)
786 struct rhash_head *p = iter->p; 758 struct rhash_head *p = iter->p;
787 bool rhlist = ht->rhlist; 759 bool rhlist = ht->rhlist;
788 760
789 if (p) { 761 if (!tbl)
790 if (!rhlist || !(list = rcu_dereference(list->next))) { 762 return NULL;
791 p = rcu_dereference(p->next);
792 list = container_of(p, struct rhlist_head, rhead);
793 }
794 goto next;
795 }
796 763
797 for (; iter->slot < tbl->size; iter->slot++) { 764 for (; iter->slot < tbl->size; iter->slot++) {
798 int skip = iter->skip; 765 int skip = iter->skip;
@@ -836,13 +803,90 @@ next:
836 iter->slot = 0; 803 iter->slot = 0;
837 iter->skip = 0; 804 iter->skip = 0;
838 return ERR_PTR(-EAGAIN); 805 return ERR_PTR(-EAGAIN);
806 } else {
807 iter->end_of_table = true;
839 } 808 }
840 809
841 return NULL; 810 return NULL;
842} 811}
812
813/**
814 * rhashtable_walk_next - Return the next object and advance the iterator
815 * @iter: Hash table iterator
816 *
817 * Note that you must call rhashtable_walk_stop when you are finished
818 * with the walk.
819 *
820 * Returns the next object or NULL when the end of the table is reached.
821 *
822 * Returns -EAGAIN if resize event occurred. Note that the iterator
823 * will rewind back to the beginning and you may continue to use it.
824 */
825void *rhashtable_walk_next(struct rhashtable_iter *iter)
826{
827 struct rhlist_head *list = iter->list;
828 struct rhashtable *ht = iter->ht;
829 struct rhash_head *p = iter->p;
830 bool rhlist = ht->rhlist;
831
832 if (p) {
833 if (!rhlist || !(list = rcu_dereference(list->next))) {
834 p = rcu_dereference(p->next);
835 list = container_of(p, struct rhlist_head, rhead);
836 }
837 if (!rht_is_a_nulls(p)) {
838 iter->skip++;
839 iter->p = p;
840 iter->list = list;
841 return rht_obj(ht, rhlist ? &list->rhead : p);
842 }
843
844 /* At the end of this slot, switch to next one and then find
845 * next entry from that point.
846 */
847 iter->skip = 0;
848 iter->slot++;
849 }
850
851 return __rhashtable_walk_find_next(iter);
852}
843EXPORT_SYMBOL_GPL(rhashtable_walk_next); 853EXPORT_SYMBOL_GPL(rhashtable_walk_next);
844 854
845/** 855/**
856 * rhashtable_walk_peek - Return the next object but don't advance the iterator
857 * @iter: Hash table iterator
858 *
859 * Returns the next object or NULL when the end of the table is reached.
860 *
861 * Returns -EAGAIN if resize event occurred. Note that the iterator
862 * will rewind back to the beginning and you may continue to use it.
863 */
864void *rhashtable_walk_peek(struct rhashtable_iter *iter)
865{
866 struct rhlist_head *list = iter->list;
867 struct rhashtable *ht = iter->ht;
868 struct rhash_head *p = iter->p;
869
870 if (p)
871 return rht_obj(ht, ht->rhlist ? &list->rhead : p);
872
873 /* No object found in current iter, find next one in the table. */
874
875 if (iter->skip) {
876 /* A nonzero skip value points to the next entry in the table
877 * beyond that last one that was found. Decrement skip so
878 * we find the current value. __rhashtable_walk_find_next
879 * will restore the original value of skip assuming that
880 * the table hasn't changed.
881 */
882 iter->skip--;
883 }
884
885 return __rhashtable_walk_find_next(iter);
886}
887EXPORT_SYMBOL_GPL(rhashtable_walk_peek);
888
889/**
846 * rhashtable_walk_stop - Finish a hash table walk 890 * rhashtable_walk_stop - Finish a hash table walk
847 * @iter: Hash table iterator 891 * @iter: Hash table iterator
848 * 892 *
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 80aa8d5463fa..42b5ca0acf93 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -462,7 +462,7 @@ static void sbq_wake_up(struct sbitmap_queue *sbq)
462 */ 462 */
463 atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wait_cnt + wake_batch); 463 atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wait_cnt + wake_batch);
464 sbq_index_atomic_inc(&sbq->wake_index); 464 sbq_index_atomic_inc(&sbq->wake_index);
465 wake_up(&ws->wait); 465 wake_up_nr(&ws->wait, wake_batch);
466 } 466 }
467} 467}
468 468
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 7c1c55f7daaa..53728d391d3a 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -474,6 +474,133 @@ int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
474} 474}
475EXPORT_SYMBOL(sg_alloc_table_from_pages); 475EXPORT_SYMBOL(sg_alloc_table_from_pages);
476 476
477#ifdef CONFIG_SGL_ALLOC
478
479/**
480 * sgl_alloc_order - allocate a scatterlist and its pages
481 * @length: Length in bytes of the scatterlist. Must be at least one
482 * @order: Second argument for alloc_pages()
483 * @chainable: Whether or not to allocate an extra element in the scatterlist
484 * for scatterlist chaining purposes
485 * @gfp: Memory allocation flags
486 * @nent_p: [out] Number of entries in the scatterlist that have pages
487 *
488 * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
489 */
490struct scatterlist *sgl_alloc_order(unsigned long long length,
491 unsigned int order, bool chainable,
492 gfp_t gfp, unsigned int *nent_p)
493{
494 struct scatterlist *sgl, *sg;
495 struct page *page;
496 unsigned int nent, nalloc;
497 u32 elem_len;
498
499 nent = round_up(length, PAGE_SIZE << order) >> (PAGE_SHIFT + order);
500 /* Check for integer overflow */
501 if (length > (nent << (PAGE_SHIFT + order)))
502 return NULL;
503 nalloc = nent;
504 if (chainable) {
505 /* Check for integer overflow */
506 if (nalloc + 1 < nalloc)
507 return NULL;
508 nalloc++;
509 }
510 sgl = kmalloc_array(nalloc, sizeof(struct scatterlist),
511 (gfp & ~GFP_DMA) | __GFP_ZERO);
512 if (!sgl)
513 return NULL;
514
515 sg_init_table(sgl, nalloc);
516 sg = sgl;
517 while (length) {
518 elem_len = min_t(u64, length, PAGE_SIZE << order);
519 page = alloc_pages(gfp, order);
520 if (!page) {
521 sgl_free(sgl);
522 return NULL;
523 }
524
525 sg_set_page(sg, page, elem_len, 0);
526 length -= elem_len;
527 sg = sg_next(sg);
528 }
529 WARN_ONCE(length, "length = %lld\n", length);
530 if (nent_p)
531 *nent_p = nent;
532 return sgl;
533}
534EXPORT_SYMBOL(sgl_alloc_order);
535
536/**
537 * sgl_alloc - allocate a scatterlist and its pages
538 * @length: Length in bytes of the scatterlist
539 * @gfp: Memory allocation flags
540 * @nent_p: [out] Number of entries in the scatterlist
541 *
542 * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
543 */
544struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
545 unsigned int *nent_p)
546{
547 return sgl_alloc_order(length, 0, false, gfp, nent_p);
548}
549EXPORT_SYMBOL(sgl_alloc);
550
551/**
552 * sgl_free_n_order - free a scatterlist and its pages
553 * @sgl: Scatterlist with one or more elements
554 * @nents: Maximum number of elements to free
555 * @order: Second argument for __free_pages()
556 *
557 * Notes:
558 * - If several scatterlists have been chained and each chain element is
559 * freed separately then it's essential to set nents correctly to avoid that a
560 * page would get freed twice.
561 * - All pages in a chained scatterlist can be freed at once by setting @nents
562 * to a high number.
563 */
564void sgl_free_n_order(struct scatterlist *sgl, int nents, int order)
565{
566 struct scatterlist *sg;
567 struct page *page;
568 int i;
569
570 for_each_sg(sgl, sg, nents, i) {
571 if (!sg)
572 break;
573 page = sg_page(sg);
574 if (page)
575 __free_pages(page, order);
576 }
577 kfree(sgl);
578}
579EXPORT_SYMBOL(sgl_free_n_order);
580
581/**
582 * sgl_free_order - free a scatterlist and its pages
583 * @sgl: Scatterlist with one or more elements
584 * @order: Second argument for __free_pages()
585 */
586void sgl_free_order(struct scatterlist *sgl, int order)
587{
588 sgl_free_n_order(sgl, INT_MAX, order);
589}
590EXPORT_SYMBOL(sgl_free_order);
591
592/**
593 * sgl_free - free a scatterlist and its pages
594 * @sgl: Scatterlist with one or more elements
595 */
596void sgl_free(struct scatterlist *sgl)
597{
598 sgl_free_order(sgl, 0);
599}
600EXPORT_SYMBOL(sgl_free);
601
602#endif /* CONFIG_SGL_ALLOC */
603
477void __sg_page_iter_start(struct sg_page_iter *piter, 604void __sg_page_iter_start(struct sg_page_iter *piter,
478 struct scatterlist *sglist, unsigned int nents, 605 struct scatterlist *sglist, unsigned int nents,
479 unsigned long pgoffset) 606 unsigned long pgoffset)
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 835cc6df2776..85925aaa4fff 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -5,7 +5,6 @@
5 * DEBUG_PREEMPT variant of smp_processor_id(). 5 * DEBUG_PREEMPT variant of smp_processor_id().
6 */ 6 */
7#include <linux/export.h> 7#include <linux/export.h>
8#include <linux/kallsyms.h>
9#include <linux/sched.h> 8#include <linux/sched.h>
10 9
11notrace static unsigned int check_preemption_disabled(const char *what1, 10notrace static unsigned int check_preemption_disabled(const char *what1,
@@ -43,7 +42,7 @@ notrace static unsigned int check_preemption_disabled(const char *what1,
43 printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x] code: %s/%d\n", 42 printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x] code: %s/%d\n",
44 what1, what2, preempt_count() - 1, current->comm, current->pid); 43 what1, what2, preempt_count() - 1, current->comm, current->pid);
45 44
46 print_symbol("caller is %s\n", (long)__builtin_return_address(0)); 45 printk("caller is %pS\n", __builtin_return_address(0));
47 dump_stack(); 46 dump_stack();
48 47
49out_enable: 48out_enable:
diff --git a/lib/string.c b/lib/string.c
index 64a9e33f1daa..2c0900a5d51a 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -203,7 +203,7 @@ ssize_t strscpy(char *dest, const char *src, size_t count)
203 while (max >= sizeof(unsigned long)) { 203 while (max >= sizeof(unsigned long)) {
204 unsigned long c, data; 204 unsigned long c, data;
205 205
206 c = *(unsigned long *)(src+res); 206 c = read_word_at_a_time(src+res);
207 if (has_zero(c, &data, &constants)) { 207 if (has_zero(c, &data, &constants)) {
208 data = prep_zero_mask(c, data, &constants); 208 data = prep_zero_mask(c, data, &constants);
209 data = create_zero_mask(data); 209 data = create_zero_mask(data);
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index cea19aaf303c..c43ec2271469 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -18,7 +18,7 @@
18 */ 18 */
19 19
20#include <linux/cache.h> 20#include <linux/cache.h>
21#include <linux/dma-mapping.h> 21#include <linux/dma-direct.h>
22#include <linux/mm.h> 22#include <linux/mm.h>
23#include <linux/export.h> 23#include <linux/export.h>
24#include <linux/spinlock.h> 24#include <linux/spinlock.h>
@@ -417,7 +417,7 @@ cleanup2:
417 return -ENOMEM; 417 return -ENOMEM;
418} 418}
419 419
420void __init swiotlb_free(void) 420void __init swiotlb_exit(void)
421{ 421{
422 if (!io_tlb_orig_addr) 422 if (!io_tlb_orig_addr)
423 return; 423 return;
@@ -586,7 +586,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
586 586
587not_found: 587not_found:
588 spin_unlock_irqrestore(&io_tlb_lock, flags); 588 spin_unlock_irqrestore(&io_tlb_lock, flags);
589 if (printk_ratelimit()) 589 if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
590 dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size); 590 dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size);
591 return SWIOTLB_MAP_ERROR; 591 return SWIOTLB_MAP_ERROR;
592found: 592found:
@@ -605,7 +605,6 @@ found:
605 605
606 return tlb_addr; 606 return tlb_addr;
607} 607}
608EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
609 608
610/* 609/*
611 * Allocates bounce buffer and returns its kernel virtual address. 610 * Allocates bounce buffer and returns its kernel virtual address.
@@ -675,7 +674,6 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
675 } 674 }
676 spin_unlock_irqrestore(&io_tlb_lock, flags); 675 spin_unlock_irqrestore(&io_tlb_lock, flags);
677} 676}
678EXPORT_SYMBOL_GPL(swiotlb_tbl_unmap_single);
679 677
680void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr, 678void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
681 size_t size, enum dma_data_direction dir, 679 size_t size, enum dma_data_direction dir,
@@ -707,92 +705,107 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
707 BUG(); 705 BUG();
708 } 706 }
709} 707}
710EXPORT_SYMBOL_GPL(swiotlb_tbl_sync_single); 708
709static inline bool dma_coherent_ok(struct device *dev, dma_addr_t addr,
710 size_t size)
711{
712 u64 mask = DMA_BIT_MASK(32);
713
714 if (dev && dev->coherent_dma_mask)
715 mask = dev->coherent_dma_mask;
716 return addr + size - 1 <= mask;
717}
718
719static void *
720swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle,
721 unsigned long attrs)
722{
723 phys_addr_t phys_addr;
724
725 if (swiotlb_force == SWIOTLB_NO_FORCE)
726 goto out_warn;
727
728 phys_addr = swiotlb_tbl_map_single(dev,
729 swiotlb_phys_to_dma(dev, io_tlb_start),
730 0, size, DMA_FROM_DEVICE, 0);
731 if (phys_addr == SWIOTLB_MAP_ERROR)
732 goto out_warn;
733
734 *dma_handle = swiotlb_phys_to_dma(dev, phys_addr);
735 if (dma_coherent_ok(dev, *dma_handle, size))
736 goto out_unmap;
737
738 memset(phys_to_virt(phys_addr), 0, size);
739 return phys_to_virt(phys_addr);
740
741out_unmap:
742 dev_warn(dev, "hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
743 (unsigned long long)(dev ? dev->coherent_dma_mask : 0),
744 (unsigned long long)*dma_handle);
745
746 /*
747 * DMA_TO_DEVICE to avoid memcpy in unmap_single.
748 * DMA_ATTR_SKIP_CPU_SYNC is optional.
749 */
750 swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE,
751 DMA_ATTR_SKIP_CPU_SYNC);
752out_warn:
753 if ((attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) {
754 dev_warn(dev,
755 "swiotlb: coherent allocation failed, size=%zu\n",
756 size);
757 dump_stack();
758 }
759 return NULL;
760}
711 761
712void * 762void *
713swiotlb_alloc_coherent(struct device *hwdev, size_t size, 763swiotlb_alloc_coherent(struct device *hwdev, size_t size,
714 dma_addr_t *dma_handle, gfp_t flags) 764 dma_addr_t *dma_handle, gfp_t flags)
715{ 765{
716 dma_addr_t dev_addr;
717 void *ret;
718 int order = get_order(size); 766 int order = get_order(size);
719 u64 dma_mask = DMA_BIT_MASK(32); 767 unsigned long attrs = (flags & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0;
720 768 void *ret;
721 if (hwdev && hwdev->coherent_dma_mask)
722 dma_mask = hwdev->coherent_dma_mask;
723 769
724 ret = (void *)__get_free_pages(flags, order); 770 ret = (void *)__get_free_pages(flags, order);
725 if (ret) { 771 if (ret) {
726 dev_addr = swiotlb_virt_to_bus(hwdev, ret); 772 *dma_handle = swiotlb_virt_to_bus(hwdev, ret);
727 if (dev_addr + size - 1 > dma_mask) { 773 if (dma_coherent_ok(hwdev, *dma_handle, size)) {
728 /* 774 memset(ret, 0, size);
729 * The allocated memory isn't reachable by the device. 775 return ret;
730 */
731 free_pages((unsigned long) ret, order);
732 ret = NULL;
733 } 776 }
777 free_pages((unsigned long)ret, order);
734 } 778 }
735 if (!ret) {
736 /*
737 * We are either out of memory or the device can't DMA to
738 * GFP_DMA memory; fall back on map_single(), which
739 * will grab memory from the lowest available address range.
740 */
741 phys_addr_t paddr = map_single(hwdev, 0, size,
742 DMA_FROM_DEVICE, 0);
743 if (paddr == SWIOTLB_MAP_ERROR)
744 goto err_warn;
745 779
746 ret = phys_to_virt(paddr); 780 return swiotlb_alloc_buffer(hwdev, size, dma_handle, attrs);
747 dev_addr = swiotlb_phys_to_dma(hwdev, paddr); 781}
748 782EXPORT_SYMBOL(swiotlb_alloc_coherent);
749 /* Confirm address can be DMA'd by device */
750 if (dev_addr + size - 1 > dma_mask) {
751 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
752 (unsigned long long)dma_mask,
753 (unsigned long long)dev_addr);
754
755 /*
756 * DMA_TO_DEVICE to avoid memcpy in unmap_single.
757 * The DMA_ATTR_SKIP_CPU_SYNC is optional.
758 */
759 swiotlb_tbl_unmap_single(hwdev, paddr,
760 size, DMA_TO_DEVICE,
761 DMA_ATTR_SKIP_CPU_SYNC);
762 goto err_warn;
763 }
764 }
765 783
766 *dma_handle = dev_addr; 784static bool swiotlb_free_buffer(struct device *dev, size_t size,
767 memset(ret, 0, size); 785 dma_addr_t dma_addr)
786{
787 phys_addr_t phys_addr = dma_to_phys(dev, dma_addr);
768 788
769 return ret; 789 WARN_ON_ONCE(irqs_disabled());
770 790
771err_warn: 791 if (!is_swiotlb_buffer(phys_addr))
772 pr_warn("swiotlb: coherent allocation failed for device %s size=%zu\n", 792 return false;
773 dev_name(hwdev), size);
774 dump_stack();
775 793
776 return NULL; 794 /*
795 * DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single.
796 * DMA_ATTR_SKIP_CPU_SYNC is optional.
797 */
798 swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE,
799 DMA_ATTR_SKIP_CPU_SYNC);
800 return true;
777} 801}
778EXPORT_SYMBOL(swiotlb_alloc_coherent);
779 802
780void 803void
781swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, 804swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
782 dma_addr_t dev_addr) 805 dma_addr_t dev_addr)
783{ 806{
784 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); 807 if (!swiotlb_free_buffer(hwdev, size, dev_addr))
785
786 WARN_ON(irqs_disabled());
787 if (!is_swiotlb_buffer(paddr))
788 free_pages((unsigned long)vaddr, get_order(size)); 808 free_pages((unsigned long)vaddr, get_order(size));
789 else
790 /*
791 * DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single.
792 * DMA_ATTR_SKIP_CPU_SYNC is optional.
793 */
794 swiotlb_tbl_unmap_single(hwdev, paddr, size, DMA_TO_DEVICE,
795 DMA_ATTR_SKIP_CPU_SYNC);
796} 809}
797EXPORT_SYMBOL(swiotlb_free_coherent); 810EXPORT_SYMBOL(swiotlb_free_coherent);
798 811
@@ -868,7 +881,6 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
868 881
869 return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer); 882 return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer);
870} 883}
871EXPORT_SYMBOL_GPL(swiotlb_map_page);
872 884
873/* 885/*
874 * Unmap a single streaming mode DMA translation. The dma_addr and size must 886 * Unmap a single streaming mode DMA translation. The dma_addr and size must
@@ -909,7 +921,6 @@ void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
909{ 921{
910 unmap_single(hwdev, dev_addr, size, dir, attrs); 922 unmap_single(hwdev, dev_addr, size, dir, attrs);
911} 923}
912EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
913 924
914/* 925/*
915 * Make physical memory consistent for a single streaming mode DMA translation 926 * Make physical memory consistent for a single streaming mode DMA translation
@@ -947,7 +958,6 @@ swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
947{ 958{
948 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); 959 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
949} 960}
950EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
951 961
952void 962void
953swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, 963swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
@@ -955,7 +965,6 @@ swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
955{ 965{
956 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); 966 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
957} 967}
958EXPORT_SYMBOL(swiotlb_sync_single_for_device);
959 968
960/* 969/*
961 * Map a set of buffers described by scatterlist in streaming mode for DMA. 970 * Map a set of buffers described by scatterlist in streaming mode for DMA.
@@ -1007,7 +1016,6 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
1007 } 1016 }
1008 return nelems; 1017 return nelems;
1009} 1018}
1010EXPORT_SYMBOL(swiotlb_map_sg_attrs);
1011 1019
1012/* 1020/*
1013 * Unmap a set of streaming mode DMA translations. Again, cpu read rules 1021 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
@@ -1027,7 +1035,6 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
1027 unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, 1035 unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir,
1028 attrs); 1036 attrs);
1029} 1037}
1030EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
1031 1038
1032/* 1039/*
1033 * Make physical memory consistent for a set of streaming mode DMA translations 1040 * Make physical memory consistent for a set of streaming mode DMA translations
@@ -1055,7 +1062,6 @@ swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
1055{ 1062{
1056 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); 1063 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
1057} 1064}
1058EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
1059 1065
1060void 1066void
1061swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, 1067swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
@@ -1063,14 +1069,12 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
1063{ 1069{
1064 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); 1070 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
1065} 1071}
1066EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
1067 1072
1068int 1073int
1069swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) 1074swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
1070{ 1075{
1071 return (dma_addr == swiotlb_phys_to_dma(hwdev, io_tlb_overflow_buffer)); 1076 return (dma_addr == swiotlb_phys_to_dma(hwdev, io_tlb_overflow_buffer));
1072} 1077}
1073EXPORT_SYMBOL(swiotlb_dma_mapping_error);
1074 1078
1075/* 1079/*
1076 * Return whether the given device DMA address mask can be supported 1080 * Return whether the given device DMA address mask can be supported
@@ -1083,4 +1087,49 @@ swiotlb_dma_supported(struct device *hwdev, u64 mask)
1083{ 1087{
1084 return swiotlb_phys_to_dma(hwdev, io_tlb_end - 1) <= mask; 1088 return swiotlb_phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
1085} 1089}
1086EXPORT_SYMBOL(swiotlb_dma_supported); 1090
1091#ifdef CONFIG_DMA_DIRECT_OPS
1092void *swiotlb_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
1093 gfp_t gfp, unsigned long attrs)
1094{
1095 void *vaddr;
1096
1097 /* temporary workaround: */
1098 if (gfp & __GFP_NOWARN)
1099 attrs |= DMA_ATTR_NO_WARN;
1100
1101 /*
1102 * Don't print a warning when the first allocation attempt fails.
1103 * swiotlb_alloc_coherent() will print a warning when the DMA memory
1104 * allocation ultimately failed.
1105 */
1106 gfp |= __GFP_NOWARN;
1107
1108 vaddr = dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
1109 if (!vaddr)
1110 vaddr = swiotlb_alloc_buffer(dev, size, dma_handle, attrs);
1111 return vaddr;
1112}
1113
1114void swiotlb_free(struct device *dev, size_t size, void *vaddr,
1115 dma_addr_t dma_addr, unsigned long attrs)
1116{
1117 if (!swiotlb_free_buffer(dev, size, dma_addr))
1118 dma_direct_free(dev, size, vaddr, dma_addr, attrs);
1119}
1120
1121const struct dma_map_ops swiotlb_dma_ops = {
1122 .mapping_error = swiotlb_dma_mapping_error,
1123 .alloc = swiotlb_alloc,
1124 .free = swiotlb_free,
1125 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
1126 .sync_single_for_device = swiotlb_sync_single_for_device,
1127 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
1128 .sync_sg_for_device = swiotlb_sync_sg_for_device,
1129 .map_sg = swiotlb_map_sg_attrs,
1130 .unmap_sg = swiotlb_unmap_sg_attrs,
1131 .map_page = swiotlb_map_page,
1132 .unmap_page = swiotlb_unmap_page,
1133 .dma_supported = swiotlb_dma_supported,
1134};
1135#endif /* CONFIG_DMA_DIRECT_OPS */
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 9e9748089270..4cd9ea9b3449 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -2003,10 +2003,14 @@ static struct bpf_test tests[] = {
2003 { { 4, 0 }, { 5, 10 } } 2003 { { 4, 0 }, { 5, 10 } }
2004 }, 2004 },
2005 { 2005 {
2006 "INT: DIV by zero", 2006 /* This one doesn't go through verifier, but is just raw insn
2007 * as opposed to cBPF tests from here. Thus div by 0 tests are
2008 * done in test_verifier in BPF kselftests.
2009 */
2010 "INT: DIV by -1",
2007 .u.insns_int = { 2011 .u.insns_int = {
2008 BPF_ALU64_REG(BPF_MOV, R6, R1), 2012 BPF_ALU64_REG(BPF_MOV, R6, R1),
2009 BPF_ALU64_IMM(BPF_MOV, R7, 0), 2013 BPF_ALU64_IMM(BPF_MOV, R7, -1),
2010 BPF_LD_ABS(BPF_B, 3), 2014 BPF_LD_ABS(BPF_B, 3),
2011 BPF_ALU32_REG(BPF_DIV, R0, R7), 2015 BPF_ALU32_REG(BPF_DIV, R0, R7),
2012 BPF_EXIT_INSN(), 2016 BPF_EXIT_INSN(),
@@ -6109,6 +6113,110 @@ static struct bpf_test tests[] = {
6109 { { ETH_HLEN, 42 } }, 6113 { { ETH_HLEN, 42 } },
6110 .fill_helper = bpf_fill_ld_abs_vlan_push_pop2, 6114 .fill_helper = bpf_fill_ld_abs_vlan_push_pop2,
6111 }, 6115 },
6116 /* Checking interpreter vs JIT wrt signed extended imms. */
6117 {
6118 "JNE signed compare, test 1",
6119 .u.insns_int = {
6120 BPF_ALU32_IMM(BPF_MOV, R1, 0xfefbbc12),
6121 BPF_ALU32_IMM(BPF_MOV, R3, 0xffff0000),
6122 BPF_MOV64_REG(R2, R1),
6123 BPF_ALU64_REG(BPF_AND, R2, R3),
6124 BPF_ALU32_IMM(BPF_MOV, R0, 1),
6125 BPF_JMP_IMM(BPF_JNE, R2, -17104896, 1),
6126 BPF_ALU32_IMM(BPF_MOV, R0, 2),
6127 BPF_EXIT_INSN(),
6128 },
6129 INTERNAL,
6130 { },
6131 { { 0, 1 } },
6132 },
6133 {
6134 "JNE signed compare, test 2",
6135 .u.insns_int = {
6136 BPF_ALU32_IMM(BPF_MOV, R1, 0xfefbbc12),
6137 BPF_ALU32_IMM(BPF_MOV, R3, 0xffff0000),
6138 BPF_MOV64_REG(R2, R1),
6139 BPF_ALU64_REG(BPF_AND, R2, R3),
6140 BPF_ALU32_IMM(BPF_MOV, R0, 1),
6141 BPF_JMP_IMM(BPF_JNE, R2, 0xfefb0000, 1),
6142 BPF_ALU32_IMM(BPF_MOV, R0, 2),
6143 BPF_EXIT_INSN(),
6144 },
6145 INTERNAL,
6146 { },
6147 { { 0, 1 } },
6148 },
6149 {
6150 "JNE signed compare, test 3",
6151 .u.insns_int = {
6152 BPF_ALU32_IMM(BPF_MOV, R1, 0xfefbbc12),
6153 BPF_ALU32_IMM(BPF_MOV, R3, 0xffff0000),
6154 BPF_ALU32_IMM(BPF_MOV, R4, 0xfefb0000),
6155 BPF_MOV64_REG(R2, R1),
6156 BPF_ALU64_REG(BPF_AND, R2, R3),
6157 BPF_ALU32_IMM(BPF_MOV, R0, 1),
6158 BPF_JMP_REG(BPF_JNE, R2, R4, 1),
6159 BPF_ALU32_IMM(BPF_MOV, R0, 2),
6160 BPF_EXIT_INSN(),
6161 },
6162 INTERNAL,
6163 { },
6164 { { 0, 2 } },
6165 },
6166 {
6167 "JNE signed compare, test 4",
6168 .u.insns_int = {
6169 BPF_LD_IMM64(R1, -17104896),
6170 BPF_ALU32_IMM(BPF_MOV, R0, 1),
6171 BPF_JMP_IMM(BPF_JNE, R1, -17104896, 1),
6172 BPF_ALU32_IMM(BPF_MOV, R0, 2),
6173 BPF_EXIT_INSN(),
6174 },
6175 INTERNAL,
6176 { },
6177 { { 0, 2 } },
6178 },
6179 {
6180 "JNE signed compare, test 5",
6181 .u.insns_int = {
6182 BPF_LD_IMM64(R1, 0xfefb0000),
6183 BPF_ALU32_IMM(BPF_MOV, R0, 1),
6184 BPF_JMP_IMM(BPF_JNE, R1, 0xfefb0000, 1),
6185 BPF_ALU32_IMM(BPF_MOV, R0, 2),
6186 BPF_EXIT_INSN(),
6187 },
6188 INTERNAL,
6189 { },
6190 { { 0, 1 } },
6191 },
6192 {
6193 "JNE signed compare, test 6",
6194 .u.insns_int = {
6195 BPF_LD_IMM64(R1, 0x7efb0000),
6196 BPF_ALU32_IMM(BPF_MOV, R0, 1),
6197 BPF_JMP_IMM(BPF_JNE, R1, 0x7efb0000, 1),
6198 BPF_ALU32_IMM(BPF_MOV, R0, 2),
6199 BPF_EXIT_INSN(),
6200 },
6201 INTERNAL,
6202 { },
6203 { { 0, 2 } },
6204 },
6205 {
6206 "JNE signed compare, test 7",
6207 .u.insns = {
6208 BPF_STMT(BPF_LD | BPF_IMM, 0xffff0000),
6209 BPF_STMT(BPF_MISC | BPF_TAX, 0),
6210 BPF_STMT(BPF_LD | BPF_IMM, 0xfefbbc12),
6211 BPF_STMT(BPF_ALU | BPF_AND | BPF_X, 0),
6212 BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0xfefb0000, 1, 0),
6213 BPF_STMT(BPF_RET | BPF_K, 1),
6214 BPF_STMT(BPF_RET | BPF_K, 2),
6215 },
6216 CLASSIC | FLAG_NO_DATA,
6217 {},
6218 { { 0, 2 } },
6219 },
6112}; 6220};
6113 6221
6114static struct net_device dev; 6222static struct net_device dev;
@@ -6250,9 +6358,8 @@ static struct bpf_prog *generate_filter(int which, int *err)
6250 return NULL; 6358 return NULL;
6251 } 6359 }
6252 } 6360 }
6253 /* We don't expect to fail. */
6254 if (*err) { 6361 if (*err) {
6255 pr_cont("FAIL to attach err=%d len=%d\n", 6362 pr_cont("FAIL to prog_create err=%d len=%d\n",
6256 *err, fprog.len); 6363 *err, fprog.len);
6257 return NULL; 6364 return NULL;
6258 } 6365 }
@@ -6276,6 +6383,10 @@ static struct bpf_prog *generate_filter(int which, int *err)
6276 * checks. 6383 * checks.
6277 */ 6384 */
6278 fp = bpf_prog_select_runtime(fp, err); 6385 fp = bpf_prog_select_runtime(fp, err);
6386 if (*err) {
6387 pr_cont("FAIL to select_runtime err=%d\n", *err);
6388 return NULL;
6389 }
6279 break; 6390 break;
6280 } 6391 }
6281 6392
@@ -6461,8 +6572,8 @@ static __init int test_bpf(void)
6461 pass_cnt++; 6572 pass_cnt++;
6462 continue; 6573 continue;
6463 } 6574 }
6464 6575 err_cnt++;
6465 return err; 6576 continue;
6466 } 6577 }
6467 6578
6468 pr_cont("jited:%u ", fp->jited); 6579 pr_cont("jited:%u ", fp->jited);
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index 64a4c76cba2b..078a61480573 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -96,7 +96,7 @@ struct test_config {
96 struct device *device); 96 struct device *device);
97}; 97};
98 98
99struct test_config *test_fw_config; 99static struct test_config *test_fw_config;
100 100
101static ssize_t test_fw_misc_read(struct file *f, char __user *buf, 101static ssize_t test_fw_misc_read(struct file *f, char __user *buf,
102 size_t size, loff_t *offset) 102 size_t size, loff_t *offset)
@@ -359,7 +359,7 @@ static ssize_t config_name_show(struct device *dev,
359{ 359{
360 return config_test_show_str(buf, test_fw_config->name); 360 return config_test_show_str(buf, test_fw_config->name);
361} 361}
362static DEVICE_ATTR(config_name, 0644, config_name_show, config_name_store); 362static DEVICE_ATTR_RW(config_name);
363 363
364static ssize_t config_num_requests_store(struct device *dev, 364static ssize_t config_num_requests_store(struct device *dev,
365 struct device_attribute *attr, 365 struct device_attribute *attr,
@@ -371,6 +371,7 @@ static ssize_t config_num_requests_store(struct device *dev,
371 if (test_fw_config->reqs) { 371 if (test_fw_config->reqs) {
372 pr_err("Must call release_all_firmware prior to changing config\n"); 372 pr_err("Must call release_all_firmware prior to changing config\n");
373 rc = -EINVAL; 373 rc = -EINVAL;
374 mutex_unlock(&test_fw_mutex);
374 goto out; 375 goto out;
375 } 376 }
376 mutex_unlock(&test_fw_mutex); 377 mutex_unlock(&test_fw_mutex);
@@ -388,8 +389,7 @@ static ssize_t config_num_requests_show(struct device *dev,
388{ 389{
389 return test_dev_config_show_u8(buf, test_fw_config->num_requests); 390 return test_dev_config_show_u8(buf, test_fw_config->num_requests);
390} 391}
391static DEVICE_ATTR(config_num_requests, 0644, config_num_requests_show, 392static DEVICE_ATTR_RW(config_num_requests);
392 config_num_requests_store);
393 393
394static ssize_t config_sync_direct_store(struct device *dev, 394static ssize_t config_sync_direct_store(struct device *dev,
395 struct device_attribute *attr, 395 struct device_attribute *attr,
@@ -411,8 +411,7 @@ static ssize_t config_sync_direct_show(struct device *dev,
411{ 411{
412 return test_dev_config_show_bool(buf, test_fw_config->sync_direct); 412 return test_dev_config_show_bool(buf, test_fw_config->sync_direct);
413} 413}
414static DEVICE_ATTR(config_sync_direct, 0644, config_sync_direct_show, 414static DEVICE_ATTR_RW(config_sync_direct);
415 config_sync_direct_store);
416 415
417static ssize_t config_send_uevent_store(struct device *dev, 416static ssize_t config_send_uevent_store(struct device *dev,
418 struct device_attribute *attr, 417 struct device_attribute *attr,
@@ -428,8 +427,7 @@ static ssize_t config_send_uevent_show(struct device *dev,
428{ 427{
429 return test_dev_config_show_bool(buf, test_fw_config->send_uevent); 428 return test_dev_config_show_bool(buf, test_fw_config->send_uevent);
430} 429}
431static DEVICE_ATTR(config_send_uevent, 0644, config_send_uevent_show, 430static DEVICE_ATTR_RW(config_send_uevent);
432 config_send_uevent_store);
433 431
434static ssize_t config_read_fw_idx_store(struct device *dev, 432static ssize_t config_read_fw_idx_store(struct device *dev,
435 struct device_attribute *attr, 433 struct device_attribute *attr,
@@ -445,8 +443,7 @@ static ssize_t config_read_fw_idx_show(struct device *dev,
445{ 443{
446 return test_dev_config_show_u8(buf, test_fw_config->read_fw_idx); 444 return test_dev_config_show_u8(buf, test_fw_config->read_fw_idx);
447} 445}
448static DEVICE_ATTR(config_read_fw_idx, 0644, config_read_fw_idx_show, 446static DEVICE_ATTR_RW(config_read_fw_idx);
449 config_read_fw_idx_store);
450 447
451 448
452static ssize_t trigger_request_store(struct device *dev, 449static ssize_t trigger_request_store(struct device *dev,
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index 337f408b4de6..e372b97eee13 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -694,8 +694,7 @@ static ssize_t config_test_driver_show(struct device *dev,
694 return config_test_show_str(&test_dev->config_mutex, buf, 694 return config_test_show_str(&test_dev->config_mutex, buf,
695 config->test_driver); 695 config->test_driver);
696} 696}
697static DEVICE_ATTR(config_test_driver, 0644, config_test_driver_show, 697static DEVICE_ATTR_RW(config_test_driver);
698 config_test_driver_store);
699 698
700static ssize_t config_test_fs_store(struct device *dev, 699static ssize_t config_test_fs_store(struct device *dev,
701 struct device_attribute *attr, 700 struct device_attribute *attr,
@@ -726,8 +725,7 @@ static ssize_t config_test_fs_show(struct device *dev,
726 return config_test_show_str(&test_dev->config_mutex, buf, 725 return config_test_show_str(&test_dev->config_mutex, buf,
727 config->test_fs); 726 config->test_fs);
728} 727}
729static DEVICE_ATTR(config_test_fs, 0644, config_test_fs_show, 728static DEVICE_ATTR_RW(config_test_fs);
730 config_test_fs_store);
731 729
732static int trigger_config_run_type(struct kmod_test_device *test_dev, 730static int trigger_config_run_type(struct kmod_test_device *test_dev,
733 enum kmod_test_case test_case, 731 enum kmod_test_case test_case,
@@ -1012,8 +1010,7 @@ static ssize_t config_num_threads_show(struct device *dev,
1012 1010
1013 return test_dev_config_show_int(test_dev, buf, config->num_threads); 1011 return test_dev_config_show_int(test_dev, buf, config->num_threads);
1014} 1012}
1015static DEVICE_ATTR(config_num_threads, 0644, config_num_threads_show, 1013static DEVICE_ATTR_RW(config_num_threads);
1016 config_num_threads_store);
1017 1014
1018static ssize_t config_test_case_store(struct device *dev, 1015static ssize_t config_test_case_store(struct device *dev,
1019 struct device_attribute *attr, 1016 struct device_attribute *attr,
@@ -1037,8 +1034,7 @@ static ssize_t config_test_case_show(struct device *dev,
1037 1034
1038 return test_dev_config_show_uint(test_dev, buf, config->test_case); 1035 return test_dev_config_show_uint(test_dev, buf, config->test_case);
1039} 1036}
1040static DEVICE_ATTR(config_test_case, 0644, config_test_case_show, 1037static DEVICE_ATTR_RW(config_test_case);
1041 config_test_case_store);
1042 1038
1043static ssize_t test_result_show(struct device *dev, 1039static ssize_t test_result_show(struct device *dev,
1044 struct device_attribute *attr, 1040 struct device_attribute *attr,
@@ -1049,7 +1045,7 @@ static ssize_t test_result_show(struct device *dev,
1049 1045
1050 return test_dev_config_show_int(test_dev, buf, config->test_result); 1046 return test_dev_config_show_int(test_dev, buf, config->test_result);
1051} 1047}
1052static DEVICE_ATTR(test_result, 0644, test_result_show, test_result_store); 1048static DEVICE_ATTR_RW(test_result);
1053 1049
1054#define TEST_KMOD_DEV_ATTR(name) &dev_attr_##name.attr 1050#define TEST_KMOD_DEV_ATTR(name) &dev_attr_##name.attr
1055 1051
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 8e83cbdc049c..76d3667fdea2 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -162,11 +162,7 @@ static void test_bucket_stats(struct rhashtable *ht, unsigned int entries)
162 return; 162 return;
163 } 163 }
164 164
165 err = rhashtable_walk_start(&hti); 165 rhashtable_walk_start(&hti);
166 if (err && err != -EAGAIN) {
167 pr_warn("Test failed: iterator failed: %d\n", err);
168 return;
169 }
170 166
171 while ((pos = rhashtable_walk_next(&hti))) { 167 while ((pos = rhashtable_walk_next(&hti))) {
172 if (PTR_ERR(pos) == -EAGAIN) { 168 if (PTR_ERR(pos) == -EAGAIN) {
diff --git a/lib/usercopy.c b/lib/usercopy.c
index 15e2e6fb060e..3744b2a8e591 100644
--- a/lib/usercopy.c
+++ b/lib/usercopy.c
@@ -20,7 +20,7 @@ EXPORT_SYMBOL(_copy_from_user);
20#endif 20#endif
21 21
22#ifndef INLINE_COPY_TO_USER 22#ifndef INLINE_COPY_TO_USER
23unsigned long _copy_to_user(void *to, const void __user *from, unsigned long n) 23unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n)
24{ 24{
25 might_fault(); 25 might_fault();
26 if (likely(access_ok(VERIFY_WRITE, to, n))) { 26 if (likely(access_ok(VERIFY_WRITE, to, n))) {
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 2b18135446dc..77ee6ced11b1 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -42,7 +42,6 @@
42#include "../mm/internal.h" /* For the trace_print_flags arrays */ 42#include "../mm/internal.h" /* For the trace_print_flags arrays */
43 43
44#include <asm/page.h> /* for PAGE_SIZE */ 44#include <asm/page.h> /* for PAGE_SIZE */
45#include <asm/sections.h> /* for dereference_function_descriptor() */
46#include <asm/byteorder.h> /* cpu_to_le16 */ 45#include <asm/byteorder.h> /* cpu_to_le16 */
47 46
48#include <linux/string_helpers.h> 47#include <linux/string_helpers.h>
@@ -1863,10 +1862,10 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
1863 switch (*fmt) { 1862 switch (*fmt) {
1864 case 'F': 1863 case 'F':
1865 case 'f': 1864 case 'f':
1866 ptr = dereference_function_descriptor(ptr);
1867 /* Fallthrough */
1868 case 'S': 1865 case 'S':
1869 case 's': 1866 case 's':
1867 ptr = dereference_symbol_descriptor(ptr);
1868 /* Fallthrough */
1870 case 'B': 1869 case 'B':
1871 return symbol_string(buf, end, ptr, spec, fmt); 1870 return symbol_string(buf, end, ptr, spec, fmt);
1872 case 'R': 1871 case 'R':
@@ -2517,29 +2516,34 @@ int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args)
2517{ 2516{
2518 struct printf_spec spec = {0}; 2517 struct printf_spec spec = {0};
2519 char *str, *end; 2518 char *str, *end;
2519 int width;
2520 2520
2521 str = (char *)bin_buf; 2521 str = (char *)bin_buf;
2522 end = (char *)(bin_buf + size); 2522 end = (char *)(bin_buf + size);
2523 2523
2524#define save_arg(type) \ 2524#define save_arg(type) \
2525do { \ 2525({ \
2526 unsigned long long value; \
2526 if (sizeof(type) == 8) { \ 2527 if (sizeof(type) == 8) { \
2527 unsigned long long value; \ 2528 unsigned long long val8; \
2528 str = PTR_ALIGN(str, sizeof(u32)); \ 2529 str = PTR_ALIGN(str, sizeof(u32)); \
2529 value = va_arg(args, unsigned long long); \ 2530 val8 = va_arg(args, unsigned long long); \
2530 if (str + sizeof(type) <= end) { \ 2531 if (str + sizeof(type) <= end) { \
2531 *(u32 *)str = *(u32 *)&value; \ 2532 *(u32 *)str = *(u32 *)&val8; \
2532 *(u32 *)(str + 4) = *((u32 *)&value + 1); \ 2533 *(u32 *)(str + 4) = *((u32 *)&val8 + 1); \
2533 } \ 2534 } \
2535 value = val8; \
2534 } else { \ 2536 } else { \
2535 unsigned long value; \ 2537 unsigned int val4; \
2536 str = PTR_ALIGN(str, sizeof(type)); \ 2538 str = PTR_ALIGN(str, sizeof(type)); \
2537 value = va_arg(args, int); \ 2539 val4 = va_arg(args, int); \
2538 if (str + sizeof(type) <= end) \ 2540 if (str + sizeof(type) <= end) \
2539 *(typeof(type) *)str = (type)value; \ 2541 *(typeof(type) *)str = (type)(long)val4; \
2542 value = (unsigned long long)val4; \
2540 } \ 2543 } \
2541 str += sizeof(type); \ 2544 str += sizeof(type); \
2542} while (0) 2545 value; \
2546})
2543 2547
2544 while (*fmt) { 2548 while (*fmt) {
2545 int read = format_decode(fmt, &spec); 2549 int read = format_decode(fmt, &spec);
@@ -2555,7 +2559,10 @@ do { \
2555 2559
2556 case FORMAT_TYPE_WIDTH: 2560 case FORMAT_TYPE_WIDTH:
2557 case FORMAT_TYPE_PRECISION: 2561 case FORMAT_TYPE_PRECISION:
2558 save_arg(int); 2562 width = (int)save_arg(int);
2563 /* Pointers may require the width */
2564 if (*fmt == 'p')
2565 set_field_width(&spec, width);
2559 break; 2566 break;
2560 2567
2561 case FORMAT_TYPE_CHAR: 2568 case FORMAT_TYPE_CHAR:
@@ -2577,7 +2584,27 @@ do { \
2577 } 2584 }
2578 2585
2579 case FORMAT_TYPE_PTR: 2586 case FORMAT_TYPE_PTR:
2580 save_arg(void *); 2587 /* Dereferenced pointers must be done now */
2588 switch (*fmt) {
2589 /* Dereference of functions is still OK */
2590 case 'S':
2591 case 's':
2592 case 'F':
2593 case 'f':
2594 save_arg(void *);
2595 break;
2596 default:
2597 if (!isalnum(*fmt)) {
2598 save_arg(void *);
2599 break;
2600 }
2601 str = pointer(fmt, str, end, va_arg(args, void *),
2602 spec);
2603 if (str + 1 < end)
2604 *str++ = '\0';
2605 else
2606 end[-1] = '\0'; /* Must be nul terminated */
2607 }
2581 /* skip all alphanumeric pointer suffixes */ 2608 /* skip all alphanumeric pointer suffixes */
2582 while (isalnum(*fmt)) 2609 while (isalnum(*fmt))
2583 fmt++; 2610 fmt++;
@@ -2729,11 +2756,39 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
2729 break; 2756 break;
2730 } 2757 }
2731 2758
2732 case FORMAT_TYPE_PTR: 2759 case FORMAT_TYPE_PTR: {
2733 str = pointer(fmt, str, end, get_arg(void *), spec); 2760 bool process = false;
2761 int copy, len;
2762 /* Non function dereferences were already done */
2763 switch (*fmt) {
2764 case 'S':
2765 case 's':
2766 case 'F':
2767 case 'f':
2768 process = true;
2769 break;
2770 default:
2771 if (!isalnum(*fmt)) {
2772 process = true;
2773 break;
2774 }
2775 /* Pointer dereference was already processed */
2776 if (str < end) {
2777 len = copy = strlen(args);
2778 if (copy > end - str)
2779 copy = end - str;
2780 memcpy(str, args, copy);
2781 str += len;
2782 args += len;
2783 }
2784 }
2785 if (process)
2786 str = pointer(fmt, str, end, get_arg(void *), spec);
2787
2734 while (isalnum(*fmt)) 2788 while (isalnum(*fmt))
2735 fmt++; 2789 fmt++;
2736 break; 2790 break;
2791 }
2737 2792
2738 case FORMAT_TYPE_PERCENT_CHAR: 2793 case FORMAT_TYPE_PERCENT_CHAR:
2739 if (str < end) 2794 if (str < end)