aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig31
-rw-r--r--lib/Kconfig.debug37
-rw-r--r--lib/Makefile17
-rw-r--r--lib/asn1_decoder.c4
-rw-r--r--lib/bitmap.c280
-rw-r--r--lib/cmdline.c5
-rw-r--r--lib/crc-t10dif.c1
-rw-r--r--lib/digsig.c1
-rw-r--r--lib/dynamic_debug.c37
-rw-r--r--lib/error-inject.c2
-rw-r--r--lib/iov_iter.c7
-rw-r--r--lib/kobject.c93
-rw-r--r--lib/kobject_uevent.c11
-rw-r--r--lib/libcrc32c.c1
-rw-r--r--lib/list_sort.c242
-rw-r--r--lib/math/Kconfig11
-rw-r--r--lib/math/Makefile5
-rw-r--r--lib/math/cordic.c (renamed from lib/cordic.c)0
-rw-r--r--lib/math/div64.c (renamed from lib/div64.c)2
-rw-r--r--lib/math/gcd.c (renamed from lib/gcd.c)0
-rw-r--r--lib/math/int_pow.c32
-rw-r--r--lib/math/int_sqrt.c (renamed from lib/int_sqrt.c)0
-rw-r--r--lib/math/lcm.c (renamed from lib/lcm.c)0
-rw-r--r--lib/math/prime_numbers.c (renamed from lib/prime_numbers.c)0
-rw-r--r--lib/math/rational.c (renamed from lib/rational.c)0
-rw-r--r--lib/math/reciprocal_div.c (renamed from lib/reciprocal_div.c)0
-rw-r--r--lib/nlattr.c200
-rw-r--r--lib/packing.c213
-rw-r--r--lib/percpu-refcount.c4
-rw-r--r--lib/plist.c4
-rw-r--r--lib/rhashtable.c210
-rw-r--r--lib/siphash.c36
-rw-r--r--lib/sort.c254
-rw-r--r--lib/string.c47
-rw-r--r--lib/test_bitmap.c87
-rw-r--r--lib/test_printf.c46
-rw-r--r--lib/test_rhashtable.c2
-rw-r--r--lib/test_strscpy.c150
-rw-r--r--lib/test_sysctl.c18
-rw-r--r--lib/test_vmalloc.c8
-rw-r--r--lib/vsprintf.c428
-rw-r--r--lib/zstd/bitstream.h5
-rw-r--r--lib/zstd/compress.c1
-rw-r--r--lib/zstd/decompress.c5
-rw-r--r--lib/zstd/huf_compress.c2
45 files changed, 1793 insertions, 746 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index e86975bfca6a..3577609b61be 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -18,6 +18,23 @@ config RAID6_PQ_BENCHMARK
18 Benchmark all available RAID6 PQ functions on init and choose the 18 Benchmark all available RAID6 PQ functions on init and choose the
19 fastest one. 19 fastest one.
20 20
21config PACKING
22 bool "Generic bitfield packing and unpacking"
23 default n
24 help
25 This option provides the packing() helper function, which permits
26 converting bitfields between a CPU-usable representation and a
27 memory representation that can have any combination of these quirks:
28 - Is little endian (bytes are reversed within a 32-bit group)
29 - The least-significant 32-bit word comes first (within a 64-bit
30 group)
31 - The most significant bit of a byte is at its right (bit 0 of a
32 register description is numerically 2^7).
33 Drivers may use these helpers to match the bit indices as described
34 in the data sheets of the peripherals they are in control of.
35
36 When in doubt, say N.
37
21config BITREVERSE 38config BITREVERSE
22 tristate 39 tristate
23 40
@@ -29,9 +46,6 @@ config HAVE_ARCH_BITREVERSE
29 This option enables the use of hardware bit-reversal instructions on 46 This option enables the use of hardware bit-reversal instructions on
30 architectures which support such operations. 47 architectures which support such operations.
31 48
32config RATIONAL
33 bool
34
35config GENERIC_STRNCPY_FROM_USER 49config GENERIC_STRNCPY_FROM_USER
36 bool 50 bool
37 51
@@ -44,6 +58,8 @@ config GENERIC_NET_UTILS
44config GENERIC_FIND_FIRST_BIT 58config GENERIC_FIND_FIRST_BIT
45 bool 59 bool
46 60
61source "lib/math/Kconfig"
62
47config NO_GENERIC_PCI_IOPORT_MAP 63config NO_GENERIC_PCI_IOPORT_MAP
48 bool 64 bool
49 65
@@ -514,12 +530,6 @@ config LRU_CACHE
514config CLZ_TAB 530config CLZ_TAB
515 bool 531 bool
516 532
517config CORDIC
518 tristate "CORDIC algorithm"
519 help
520 This option provides an implementation of the CORDIC algorithm;
521 calculations are in fixed point. Module will be called cordic.
522
523config DDR 533config DDR
524 bool "JEDEC DDR data" 534 bool "JEDEC DDR data"
525 help 535 help
@@ -611,9 +621,6 @@ config SBITMAP
611config PARMAN 621config PARMAN
612 tristate "parman" if COMPILE_TEST 622 tristate "parman" if COMPILE_TEST
613 623
614config PRIME_NUMBERS
615 tristate
616
617config STRING_SELFTEST 624config STRING_SELFTEST
618 tristate "Test string functions" 625 tristate "Test string functions"
619 626
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index d5a4a4036d2f..fdfa173651eb 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -219,6 +219,14 @@ config DEBUG_INFO_DWARF4
219 But it significantly improves the success of resolving 219 But it significantly improves the success of resolving
220 variables in gdb on optimized code. 220 variables in gdb on optimized code.
221 221
222config DEBUG_INFO_BTF
223 bool "Generate BTF typeinfo"
224 depends on DEBUG_INFO
225 help
226 Generate deduplicated BTF type information from DWARF debug info.
227 Turning this on expects presence of pahole tool, which will convert
228 DWARF type info into equivalent deduplicated BTF type info.
229
222config GDB_SCRIPTS 230config GDB_SCRIPTS
223 bool "Provide GDB scripts for kernel debugging" 231 bool "Provide GDB scripts for kernel debugging"
224 depends on DEBUG_INFO 232 depends on DEBUG_INFO
@@ -310,6 +318,20 @@ config HEADERS_CHECK
310 exported to $(INSTALL_HDR_PATH) (usually 'usr/include' in 318 exported to $(INSTALL_HDR_PATH) (usually 'usr/include' in
311 your build tree), to make sure they're suitable. 319 your build tree), to make sure they're suitable.
312 320
321config OPTIMIZE_INLINING
322 bool "Allow compiler to uninline functions marked 'inline'"
323 help
324 This option determines if the kernel forces gcc to inline the functions
325 developers have marked 'inline'. Doing so takes away freedom from gcc to
326 do what it thinks is best, which is desirable for the gcc 3.x series of
327 compilers. The gcc 4.x series have a rewritten inlining algorithm and
328 enabling this option will generate a smaller kernel there. Hopefully
329 this algorithm is so good that allowing gcc 4.x and above to make the
330 decision will become the default in the future. Until then this option
331 is there to test gcc for this.
332
333 If unsure, say N.
334
313config DEBUG_SECTION_MISMATCH 335config DEBUG_SECTION_MISMATCH
314 bool "Enable full Section mismatch analysis" 336 bool "Enable full Section mismatch analysis"
315 help 337 help
@@ -438,6 +460,15 @@ config DEBUG_KERNEL
438 Say Y here if you are developing drivers or trying to debug and 460 Say Y here if you are developing drivers or trying to debug and
439 identify kernel problems. 461 identify kernel problems.
440 462
463config DEBUG_MISC
464 bool "Miscellaneous debug code"
465 default DEBUG_KERNEL
466 depends on DEBUG_KERNEL
467 help
468 Say Y here if you need to enable miscellaneous debug code that should
469 be under a more specific debug option but isn't.
470
471
441menu "Memory Debugging" 472menu "Memory Debugging"
442 473
443source "mm/Kconfig.debug" 474source "mm/Kconfig.debug"
@@ -1350,7 +1381,7 @@ config DEBUG_LIST
1350 1381
1351 If unsure, say N. 1382 If unsure, say N.
1352 1383
1353config DEBUG_PI_LIST 1384config DEBUG_PLIST
1354 bool "Debug priority linked list manipulation" 1385 bool "Debug priority linked list manipulation"
1355 depends on DEBUG_KERNEL 1386 depends on DEBUG_KERNEL
1356 help 1387 help
@@ -1769,6 +1800,9 @@ config TEST_HEXDUMP
1769config TEST_STRING_HELPERS 1800config TEST_STRING_HELPERS
1770 tristate "Test functions located in the string_helpers module at runtime" 1801 tristate "Test functions located in the string_helpers module at runtime"
1771 1802
1803config TEST_STRSCPY
1804 tristate "Test strscpy*() family of functions at runtime"
1805
1772config TEST_KSTRTOX 1806config TEST_KSTRTOX
1773 tristate "Test kstrto*() family of functions at runtime" 1807 tristate "Test kstrto*() family of functions at runtime"
1774 1808
@@ -1927,7 +1961,6 @@ config TEST_STATIC_KEYS
1927config TEST_KMOD 1961config TEST_KMOD
1928 tristate "kmod stress tester" 1962 tristate "kmod stress tester"
1929 depends on m 1963 depends on m
1930 depends on BLOCK && (64BIT || LBDAF) # for XFS, BTRFS
1931 depends on NETDEVICES && NET_CORE && INET # for TUN 1964 depends on NETDEVICES && NET_CORE && INET # for TUN
1932 depends on BLOCK 1965 depends on BLOCK
1933 select TEST_LKM 1966 select TEST_LKM
diff --git a/lib/Makefile b/lib/Makefile
index e16e7aadc41a..fb7697031a79 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -30,7 +30,7 @@ endif
30 30
31lib-y := ctype.o string.o vsprintf.o cmdline.o \ 31lib-y := ctype.o string.o vsprintf.o cmdline.o \
32 rbtree.o radix-tree.o timerqueue.o xarray.o \ 32 rbtree.o radix-tree.o timerqueue.o xarray.o \
33 idr.o int_sqrt.o extable.o \ 33 idr.o extable.o \
34 sha1.o chacha.o irq_regs.o argv_split.o \ 34 sha1.o chacha.o irq_regs.o argv_split.o \
35 flex_proportions.o ratelimit.o show_mem.o \ 35 flex_proportions.o ratelimit.o show_mem.o \
36 is_single_threaded.o plist.o decompress.o kobject_uevent.o \ 36 is_single_threaded.o plist.o decompress.o kobject_uevent.o \
@@ -44,11 +44,11 @@ lib-$(CONFIG_SMP) += cpumask.o
44lib-y += kobject.o klist.o 44lib-y += kobject.o klist.o
45obj-y += lockref.o 45obj-y += lockref.o
46 46
47obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \ 47obj-y += bcd.o sort.o parser.o debug_locks.o random32.o \
48 bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \ 48 bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
49 gcd.o lcm.o list_sort.o uuid.o iov_iter.o clz_ctz.o \ 49 list_sort.o uuid.o iov_iter.o clz_ctz.o \
50 bsearch.o find_bit.o llist.o memweight.o kfifo.o \ 50 bsearch.o find_bit.o llist.o memweight.o kfifo.o \
51 percpu-refcount.o rhashtable.o reciprocal_div.o \ 51 percpu-refcount.o rhashtable.o \
52 once.o refcount.o usercopy.o errseq.o bucket_locks.o \ 52 once.o refcount.o usercopy.o errseq.o bucket_locks.o \
53 generic-radix-tree.o 53 generic-radix-tree.o
54obj-$(CONFIG_STRING_SELFTEST) += test_string.o 54obj-$(CONFIG_STRING_SELFTEST) += test_string.o
@@ -81,6 +81,7 @@ obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o
81obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o 81obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o
82obj-$(CONFIG_TEST_PRINTF) += test_printf.o 82obj-$(CONFIG_TEST_PRINTF) += test_printf.o
83obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o 83obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
84obj-$(CONFIG_TEST_STRSCPY) += test_strscpy.o
84obj-$(CONFIG_TEST_BITFIELD) += test_bitfield.o 85obj-$(CONFIG_TEST_BITFIELD) += test_bitfield.o
85obj-$(CONFIG_TEST_UUID) += test_uuid.o 86obj-$(CONFIG_TEST_UUID) += test_uuid.o
86obj-$(CONFIG_TEST_XARRAY) += test_xarray.o 87obj-$(CONFIG_TEST_XARRAY) += test_xarray.o
@@ -101,6 +102,8 @@ endif
101obj-$(CONFIG_DEBUG_INFO_REDUCED) += debug_info.o 102obj-$(CONFIG_DEBUG_INFO_REDUCED) += debug_info.o
102CFLAGS_debug_info.o += $(call cc-option, -femit-struct-debug-detailed=any) 103CFLAGS_debug_info.o += $(call cc-option, -femit-struct-debug-detailed=any)
103 104
105obj-y += math/
106
104obj-$(CONFIG_GENERIC_IOMAP) += iomap.o 107obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
105obj-$(CONFIG_GENERIC_PCI_IOMAP) += pci_iomap.o 108obj-$(CONFIG_GENERIC_PCI_IOMAP) += pci_iomap.o
106obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o 109obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
@@ -119,7 +122,7 @@ obj-$(CONFIG_DEBUG_LIST) += list_debug.o
119obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o 122obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
120 123
121obj-$(CONFIG_BITREVERSE) += bitrev.o 124obj-$(CONFIG_BITREVERSE) += bitrev.o
122obj-$(CONFIG_RATIONAL) += rational.o 125obj-$(CONFIG_PACKING) += packing.o
123obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o 126obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o
124obj-$(CONFIG_CRC16) += crc16.o 127obj-$(CONFIG_CRC16) += crc16.o
125obj-$(CONFIG_CRC_T10DIF)+= crc-t10dif.o 128obj-$(CONFIG_CRC_T10DIF)+= crc-t10dif.o
@@ -193,8 +196,6 @@ obj-$(CONFIG_ATOMIC64_SELFTEST) += atomic64_test.o
193 196
194obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o 197obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o
195 198
196obj-$(CONFIG_CORDIC) += cordic.o
197
198obj-$(CONFIG_DQL) += dynamic_queue_limits.o 199obj-$(CONFIG_DQL) += dynamic_queue_limits.o
199 200
200obj-$(CONFIG_GLOB) += glob.o 201obj-$(CONFIG_GLOB) += glob.o
@@ -236,8 +237,6 @@ obj-$(CONFIG_ASN1) += asn1_decoder.o
236 237
237obj-$(CONFIG_FONT_SUPPORT) += fonts/ 238obj-$(CONFIG_FONT_SUPPORT) += fonts/
238 239
239obj-$(CONFIG_PRIME_NUMBERS) += prime_numbers.o
240
241hostprogs-y := gen_crc32table 240hostprogs-y := gen_crc32table
242hostprogs-y += gen_crc64table 241hostprogs-y += gen_crc64table
243clean-files := crc32table.h 242clean-files := crc32table.h
diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
index dc14beae2c9a..8f3d207d2b00 100644
--- a/lib/asn1_decoder.c
+++ b/lib/asn1_decoder.c
@@ -385,6 +385,8 @@ next_op:
385 case ASN1_OP_END_SET_ACT: 385 case ASN1_OP_END_SET_ACT:
386 if (unlikely(!(flags & FLAG_MATCHED))) 386 if (unlikely(!(flags & FLAG_MATCHED)))
387 goto tag_mismatch; 387 goto tag_mismatch;
388 /* fall through */
389
388 case ASN1_OP_END_SEQ: 390 case ASN1_OP_END_SEQ:
389 case ASN1_OP_END_SET_OF: 391 case ASN1_OP_END_SET_OF:
390 case ASN1_OP_END_SEQ_OF: 392 case ASN1_OP_END_SEQ_OF:
@@ -450,6 +452,8 @@ next_op:
450 pc += asn1_op_lengths[op]; 452 pc += asn1_op_lengths[op];
451 goto next_op; 453 goto next_op;
452 } 454 }
455 /* fall through */
456
453 case ASN1_OP_ACT: 457 case ASN1_OP_ACT:
454 ret = actions[machine[pc + 1]](context, hdr, tag, data + tdp, len); 458 ret = actions[machine[pc + 1]](context, hdr, tag, data + tdp, len);
455 if (ret < 0) 459 if (ret < 0)
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 98872e9025da..f235434df87b 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -20,6 +20,8 @@
20 20
21#include <asm/page.h> 21#include <asm/page.h>
22 22
23#include "kstrtox.h"
24
23/** 25/**
24 * DOC: bitmap introduction 26 * DOC: bitmap introduction
25 * 27 *
@@ -477,12 +479,128 @@ int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp,
477} 479}
478EXPORT_SYMBOL(bitmap_print_to_pagebuf); 480EXPORT_SYMBOL(bitmap_print_to_pagebuf);
479 481
482/*
483 * Region 9-38:4/10 describes the following bitmap structure:
484 * 0 9 12 18 38
485 * .........****......****......****......
486 * ^ ^ ^ ^
487 * start off group_len end
488 */
489struct region {
490 unsigned int start;
491 unsigned int off;
492 unsigned int group_len;
493 unsigned int end;
494};
495
496static int bitmap_set_region(const struct region *r,
497 unsigned long *bitmap, int nbits)
498{
499 unsigned int start;
500
501 if (r->end >= nbits)
502 return -ERANGE;
503
504 for (start = r->start; start <= r->end; start += r->group_len)
505 bitmap_set(bitmap, start, min(r->end - start + 1, r->off));
506
507 return 0;
508}
509
510static int bitmap_check_region(const struct region *r)
511{
512 if (r->start > r->end || r->group_len == 0 || r->off > r->group_len)
513 return -EINVAL;
514
515 return 0;
516}
517
518static const char *bitmap_getnum(const char *str, unsigned int *num)
519{
520 unsigned long long n;
521 unsigned int len;
522
523 len = _parse_integer(str, 10, &n);
524 if (!len)
525 return ERR_PTR(-EINVAL);
526 if (len & KSTRTOX_OVERFLOW || n != (unsigned int)n)
527 return ERR_PTR(-EOVERFLOW);
528
529 *num = n;
530 return str + len;
531}
532
533static inline bool end_of_str(char c)
534{
535 return c == '\0' || c == '\n';
536}
537
538static inline bool __end_of_region(char c)
539{
540 return isspace(c) || c == ',';
541}
542
543static inline bool end_of_region(char c)
544{
545 return __end_of_region(c) || end_of_str(c);
546}
547
548/*
549 * The format allows commas and whitespases at the beginning
550 * of the region.
551 */
552static const char *bitmap_find_region(const char *str)
553{
554 while (__end_of_region(*str))
555 str++;
556
557 return end_of_str(*str) ? NULL : str;
558}
559
560static const char *bitmap_parse_region(const char *str, struct region *r)
561{
562 str = bitmap_getnum(str, &r->start);
563 if (IS_ERR(str))
564 return str;
565
566 if (end_of_region(*str))
567 goto no_end;
568
569 if (*str != '-')
570 return ERR_PTR(-EINVAL);
571
572 str = bitmap_getnum(str + 1, &r->end);
573 if (IS_ERR(str))
574 return str;
575
576 if (end_of_region(*str))
577 goto no_pattern;
578
579 if (*str != ':')
580 return ERR_PTR(-EINVAL);
581
582 str = bitmap_getnum(str + 1, &r->off);
583 if (IS_ERR(str))
584 return str;
585
586 if (*str != '/')
587 return ERR_PTR(-EINVAL);
588
589 return bitmap_getnum(str + 1, &r->group_len);
590
591no_end:
592 r->end = r->start;
593no_pattern:
594 r->off = r->end + 1;
595 r->group_len = r->end + 1;
596
597 return end_of_str(*str) ? NULL : str;
598}
599
480/** 600/**
481 * __bitmap_parselist - convert list format ASCII string to bitmap 601 * bitmap_parselist - convert list format ASCII string to bitmap
482 * @buf: read nul-terminated user string from this buffer 602 * @buf: read user string from this buffer; must be terminated
483 * @buflen: buffer size in bytes. If string is smaller than this 603 * with a \0 or \n.
484 * then it must be terminated with a \0.
485 * @is_user: location of buffer, 0 indicates kernel space
486 * @maskp: write resulting mask here 604 * @maskp: write resulting mask here
487 * @nmaskbits: number of bits in mask to be written 605 * @nmaskbits: number of bits in mask to be written
488 * 606 *
@@ -498,127 +616,38 @@ EXPORT_SYMBOL(bitmap_print_to_pagebuf);
498 * 616 *
499 * Returns: 0 on success, -errno on invalid input strings. Error values: 617 * Returns: 0 on success, -errno on invalid input strings. Error values:
500 * 618 *
501 * - ``-EINVAL``: second number in range smaller than first 619 * - ``-EINVAL``: wrong region format
502 * - ``-EINVAL``: invalid character in string 620 * - ``-EINVAL``: invalid character in string
503 * - ``-ERANGE``: bit number specified too large for mask 621 * - ``-ERANGE``: bit number specified too large for mask
622 * - ``-EOVERFLOW``: integer overflow in the input parameters
504 */ 623 */
505static int __bitmap_parselist(const char *buf, unsigned int buflen, 624int bitmap_parselist(const char *buf, unsigned long *maskp, int nmaskbits)
506 int is_user, unsigned long *maskp,
507 int nmaskbits)
508{ 625{
509 unsigned int a, b, old_a, old_b; 626 struct region r;
510 unsigned int group_size, used_size, off; 627 long ret;
511 int c, old_c, totaldigits, ndigits;
512 const char __user __force *ubuf = (const char __user __force *)buf;
513 int at_start, in_range, in_partial_range;
514 628
515 totaldigits = c = 0;
516 old_a = old_b = 0;
517 group_size = used_size = 0;
518 bitmap_zero(maskp, nmaskbits); 629 bitmap_zero(maskp, nmaskbits);
519 do {
520 at_start = 1;
521 in_range = 0;
522 in_partial_range = 0;
523 a = b = 0;
524 ndigits = totaldigits;
525
526 /* Get the next cpu# or a range of cpu#'s */
527 while (buflen) {
528 old_c = c;
529 if (is_user) {
530 if (__get_user(c, ubuf++))
531 return -EFAULT;
532 } else
533 c = *buf++;
534 buflen--;
535 if (isspace(c))
536 continue;
537
538 /* A '\0' or a ',' signal the end of a cpu# or range */
539 if (c == '\0' || c == ',')
540 break;
541 /*
542 * whitespaces between digits are not allowed,
543 * but it's ok if whitespaces are on head or tail.
544 * when old_c is whilespace,
545 * if totaldigits == ndigits, whitespace is on head.
546 * if whitespace is on tail, it should not run here.
547 * as c was ',' or '\0',
548 * the last code line has broken the current loop.
549 */
550 if ((totaldigits != ndigits) && isspace(old_c))
551 return -EINVAL;
552 630
553 if (c == '/') { 631 while (buf) {
554 used_size = a; 632 buf = bitmap_find_region(buf);
555 at_start = 1; 633 if (buf == NULL)
556 in_range = 0; 634 return 0;
557 a = b = 0;
558 continue;
559 }
560 635
561 if (c == ':') { 636 buf = bitmap_parse_region(buf, &r);
562 old_a = a; 637 if (IS_ERR(buf))
563 old_b = b; 638 return PTR_ERR(buf);
564 at_start = 1;
565 in_range = 0;
566 in_partial_range = 1;
567 a = b = 0;
568 continue;
569 }
570 639
571 if (c == '-') { 640 ret = bitmap_check_region(&r);
572 if (at_start || in_range) 641 if (ret)
573 return -EINVAL; 642 return ret;
574 b = 0;
575 in_range = 1;
576 at_start = 1;
577 continue;
578 }
579 643
580 if (!isdigit(c)) 644 ret = bitmap_set_region(&r, maskp, nmaskbits);
581 return -EINVAL; 645 if (ret)
646 return ret;
647 }
582 648
583 b = b * 10 + (c - '0');
584 if (!in_range)
585 a = b;
586 at_start = 0;
587 totaldigits++;
588 }
589 if (ndigits == totaldigits)
590 continue;
591 if (in_partial_range) {
592 group_size = a;
593 a = old_a;
594 b = old_b;
595 old_a = old_b = 0;
596 } else {
597 used_size = group_size = b - a + 1;
598 }
599 /* if no digit is after '-', it's wrong*/
600 if (at_start && in_range)
601 return -EINVAL;
602 if (!(a <= b) || group_size == 0 || !(used_size <= group_size))
603 return -EINVAL;
604 if (b >= nmaskbits)
605 return -ERANGE;
606 while (a <= b) {
607 off = min(b - a + 1, used_size);
608 bitmap_set(maskp, a, off);
609 a += group_size;
610 }
611 } while (buflen && c == ',');
612 return 0; 649 return 0;
613} 650}
614
615int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits)
616{
617 char *nl = strchrnul(bp, '\n');
618 int len = nl - bp;
619
620 return __bitmap_parselist(bp, len, 0, maskp, nmaskbits);
621}
622EXPORT_SYMBOL(bitmap_parselist); 651EXPORT_SYMBOL(bitmap_parselist);
623 652
624 653
@@ -632,23 +661,27 @@ EXPORT_SYMBOL(bitmap_parselist);
632 * @nmaskbits: size of bitmap, in bits. 661 * @nmaskbits: size of bitmap, in bits.
633 * 662 *
634 * Wrapper for bitmap_parselist(), providing it with user buffer. 663 * Wrapper for bitmap_parselist(), providing it with user buffer.
635 *
636 * We cannot have this as an inline function in bitmap.h because it needs
637 * linux/uaccess.h to get the access_ok() declaration and this causes
638 * cyclic dependencies.
639 */ 664 */
640int bitmap_parselist_user(const char __user *ubuf, 665int bitmap_parselist_user(const char __user *ubuf,
641 unsigned int ulen, unsigned long *maskp, 666 unsigned int ulen, unsigned long *maskp,
642 int nmaskbits) 667 int nmaskbits)
643{ 668{
644 if (!access_ok(ubuf, ulen)) 669 char *buf;
645 return -EFAULT; 670 int ret;
646 return __bitmap_parselist((const char __force *)ubuf, 671
647 ulen, 1, maskp, nmaskbits); 672 buf = memdup_user_nul(ubuf, ulen);
673 if (IS_ERR(buf))
674 return PTR_ERR(buf);
675
676 ret = bitmap_parselist(buf, maskp, nmaskbits);
677
678 kfree(buf);
679 return ret;
648} 680}
649EXPORT_SYMBOL(bitmap_parselist_user); 681EXPORT_SYMBOL(bitmap_parselist_user);
650 682
651 683
684#ifdef CONFIG_NUMA
652/** 685/**
653 * bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap 686 * bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap
654 * @buf: pointer to a bitmap 687 * @buf: pointer to a bitmap
@@ -757,7 +790,6 @@ void bitmap_remap(unsigned long *dst, const unsigned long *src,
757 set_bit(bitmap_ord_to_pos(new, n % w, nbits), dst); 790 set_bit(bitmap_ord_to_pos(new, n % w, nbits), dst);
758 } 791 }
759} 792}
760EXPORT_SYMBOL(bitmap_remap);
761 793
762/** 794/**
763 * bitmap_bitremap - Apply map defined by a pair of bitmaps to a single bit 795 * bitmap_bitremap - Apply map defined by a pair of bitmaps to a single bit
@@ -795,7 +827,6 @@ int bitmap_bitremap(int oldbit, const unsigned long *old,
795 else 827 else
796 return bitmap_ord_to_pos(new, n % w, bits); 828 return bitmap_ord_to_pos(new, n % w, bits);
797} 829}
798EXPORT_SYMBOL(bitmap_bitremap);
799 830
800/** 831/**
801 * bitmap_onto - translate one bitmap relative to another 832 * bitmap_onto - translate one bitmap relative to another
@@ -930,7 +961,6 @@ void bitmap_onto(unsigned long *dst, const unsigned long *orig,
930 m++; 961 m++;
931 } 962 }
932} 963}
933EXPORT_SYMBOL(bitmap_onto);
934 964
935/** 965/**
936 * bitmap_fold - fold larger bitmap into smaller, modulo specified size 966 * bitmap_fold - fold larger bitmap into smaller, modulo specified size
@@ -955,7 +985,7 @@ void bitmap_fold(unsigned long *dst, const unsigned long *orig,
955 for_each_set_bit(oldbit, orig, nbits) 985 for_each_set_bit(oldbit, orig, nbits)
956 set_bit(oldbit % sz, dst); 986 set_bit(oldbit % sz, dst);
957} 987}
958EXPORT_SYMBOL(bitmap_fold); 988#endif /* CONFIG_NUMA */
959 989
960/* 990/*
961 * Common code for bitmap_*_region() routines. 991 * Common code for bitmap_*_region() routines.
diff --git a/lib/cmdline.c b/lib/cmdline.c
index 171c19b6888e..dc59d6216318 100644
--- a/lib/cmdline.c
+++ b/lib/cmdline.c
@@ -135,18 +135,23 @@ unsigned long long memparse(const char *ptr, char **retptr)
135 case 'E': 135 case 'E':
136 case 'e': 136 case 'e':
137 ret <<= 10; 137 ret <<= 10;
138 /* fall through */
138 case 'P': 139 case 'P':
139 case 'p': 140 case 'p':
140 ret <<= 10; 141 ret <<= 10;
142 /* fall through */
141 case 'T': 143 case 'T':
142 case 't': 144 case 't':
143 ret <<= 10; 145 ret <<= 10;
146 /* fall through */
144 case 'G': 147 case 'G':
145 case 'g': 148 case 'g':
146 ret <<= 10; 149 ret <<= 10;
150 /* fall through */
147 case 'M': 151 case 'M':
148 case 'm': 152 case 'm':
149 ret <<= 10; 153 ret <<= 10;
154 /* fall through */
150 case 'K': 155 case 'K':
151 case 'k': 156 case 'k':
152 ret <<= 10; 157 ret <<= 10;
diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c
index 4d0d47c1ffbd..e89ebfdbb0fc 100644
--- a/lib/crc-t10dif.c
+++ b/lib/crc-t10dif.c
@@ -69,7 +69,6 @@ __u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len)
69 69
70 rcu_read_lock(); 70 rcu_read_lock();
71 desc.shash.tfm = rcu_dereference(crct10dif_tfm); 71 desc.shash.tfm = rcu_dereference(crct10dif_tfm);
72 desc.shash.flags = 0;
73 *(__u16 *)desc.ctx = crc; 72 *(__u16 *)desc.ctx = crc;
74 73
75 err = crypto_shash_update(&desc.shash, buffer, len); 74 err = crypto_shash_update(&desc.shash, buffer, len);
diff --git a/lib/digsig.c b/lib/digsig.c
index 6ba6fcd92dd1..3b0a579bdcdf 100644
--- a/lib/digsig.c
+++ b/lib/digsig.c
@@ -240,7 +240,6 @@ int digsig_verify(struct key *keyring, const char *sig, int siglen,
240 goto err; 240 goto err;
241 241
242 desc->tfm = shash; 242 desc->tfm = shash;
243 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
244 243
245 crypto_shash_init(desc); 244 crypto_shash_init(desc);
246 crypto_shash_update(desc, data, datalen); 245 crypto_shash_update(desc, data, datalen);
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 7bdf98c37e91..8a16c2d498e9 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -37,6 +37,8 @@
37#include <linux/device.h> 37#include <linux/device.h>
38#include <linux/netdevice.h> 38#include <linux/netdevice.h>
39 39
40#include <rdma/ib_verbs.h>
41
40extern struct _ddebug __start___verbose[]; 42extern struct _ddebug __start___verbose[];
41extern struct _ddebug __stop___verbose[]; 43extern struct _ddebug __stop___verbose[];
42 44
@@ -636,6 +638,41 @@ EXPORT_SYMBOL(__dynamic_netdev_dbg);
636 638
637#endif 639#endif
638 640
641#if IS_ENABLED(CONFIG_INFINIBAND)
642
643void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
644 const struct ib_device *ibdev, const char *fmt, ...)
645{
646 struct va_format vaf;
647 va_list args;
648
649 va_start(args, fmt);
650
651 vaf.fmt = fmt;
652 vaf.va = &args;
653
654 if (ibdev && ibdev->dev.parent) {
655 char buf[PREFIX_SIZE];
656
657 dev_printk_emit(LOGLEVEL_DEBUG, ibdev->dev.parent,
658 "%s%s %s %s: %pV",
659 dynamic_emit_prefix(descriptor, buf),
660 dev_driver_string(ibdev->dev.parent),
661 dev_name(ibdev->dev.parent),
662 dev_name(&ibdev->dev),
663 &vaf);
664 } else if (ibdev) {
665 printk(KERN_DEBUG "%s: %pV", dev_name(&ibdev->dev), &vaf);
666 } else {
667 printk(KERN_DEBUG "(NULL ib_device): %pV", &vaf);
668 }
669
670 va_end(args);
671}
672EXPORT_SYMBOL(__dynamic_ibdev_dbg);
673
674#endif
675
639#define DDEBUG_STRING_SIZE 1024 676#define DDEBUG_STRING_SIZE 1024
640static __initdata char ddebug_setup_string[DDEBUG_STRING_SIZE]; 677static __initdata char ddebug_setup_string[DDEBUG_STRING_SIZE];
641 678
diff --git a/lib/error-inject.c b/lib/error-inject.c
index c0d4600f4896..aa63751c916f 100644
--- a/lib/error-inject.c
+++ b/lib/error-inject.c
@@ -189,7 +189,7 @@ static int ei_seq_show(struct seq_file *m, void *v)
189{ 189{
190 struct ei_entry *ent = list_entry(v, struct ei_entry, list); 190 struct ei_entry *ent = list_entry(v, struct ei_entry, list);
191 191
192 seq_printf(m, "%pf\t%s\n", (void *)ent->start_addr, 192 seq_printf(m, "%ps\t%s\n", (void *)ent->start_addr,
193 error_type_string(ent->etype)); 193 error_type_string(ent->etype));
194 return 0; 194 return 0;
195} 195}
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index b396d328a764..f74fa832f3aa 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -1293,7 +1293,9 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
1293 len = maxpages * PAGE_SIZE; 1293 len = maxpages * PAGE_SIZE;
1294 addr &= ~(PAGE_SIZE - 1); 1294 addr &= ~(PAGE_SIZE - 1);
1295 n = DIV_ROUND_UP(len, PAGE_SIZE); 1295 n = DIV_ROUND_UP(len, PAGE_SIZE);
1296 res = get_user_pages_fast(addr, n, iov_iter_rw(i) != WRITE, pages); 1296 res = get_user_pages_fast(addr, n,
1297 iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0,
1298 pages);
1297 if (unlikely(res < 0)) 1299 if (unlikely(res < 0))
1298 return res; 1300 return res;
1299 return (res == n ? len : res * PAGE_SIZE) - *start; 1301 return (res == n ? len : res * PAGE_SIZE) - *start;
@@ -1374,7 +1376,8 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1374 p = get_pages_array(n); 1376 p = get_pages_array(n);
1375 if (!p) 1377 if (!p)
1376 return -ENOMEM; 1378 return -ENOMEM;
1377 res = get_user_pages_fast(addr, n, iov_iter_rw(i) != WRITE, p); 1379 res = get_user_pages_fast(addr, n,
1380 iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0, p);
1378 if (unlikely(res < 0)) { 1381 if (unlikely(res < 0)) {
1379 kvfree(p); 1382 kvfree(p);
1380 return res; 1383 return res;
diff --git a/lib/kobject.c b/lib/kobject.c
index aa89edcd2b63..f2ccdbac8ed9 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -18,7 +18,7 @@
18#include <linux/random.h> 18#include <linux/random.h>
19 19
20/** 20/**
21 * kobject_namespace - return @kobj's namespace tag 21 * kobject_namespace() - Return @kobj's namespace tag.
22 * @kobj: kobject in question 22 * @kobj: kobject in question
23 * 23 *
24 * Returns namespace tag of @kobj if its parent has namespace ops enabled 24 * Returns namespace tag of @kobj if its parent has namespace ops enabled
@@ -36,7 +36,7 @@ const void *kobject_namespace(struct kobject *kobj)
36} 36}
37 37
38/** 38/**
39 * kobject_get_ownership - get sysfs ownership data for @kobj 39 * kobject_get_ownership() - Get sysfs ownership data for @kobj.
40 * @kobj: kobject in question 40 * @kobj: kobject in question
41 * @uid: kernel user ID for sysfs objects 41 * @uid: kernel user ID for sysfs objects
42 * @gid: kernel group ID for sysfs objects 42 * @gid: kernel group ID for sysfs objects
@@ -82,6 +82,7 @@ static int populate_dir(struct kobject *kobj)
82 82
83static int create_dir(struct kobject *kobj) 83static int create_dir(struct kobject *kobj)
84{ 84{
85 const struct kobj_type *ktype = get_ktype(kobj);
85 const struct kobj_ns_type_operations *ops; 86 const struct kobj_ns_type_operations *ops;
86 int error; 87 int error;
87 88
@@ -95,6 +96,14 @@ static int create_dir(struct kobject *kobj)
95 return error; 96 return error;
96 } 97 }
97 98
99 if (ktype) {
100 error = sysfs_create_groups(kobj, ktype->default_groups);
101 if (error) {
102 sysfs_remove_dir(kobj);
103 return error;
104 }
105 }
106
98 /* 107 /*
99 * @kobj->sd may be deleted by an ancestor going away. Hold an 108 * @kobj->sd may be deleted by an ancestor going away. Hold an
100 * extra reference so that it stays until @kobj is gone. 109 * extra reference so that it stays until @kobj is gone.
@@ -153,12 +162,11 @@ static void fill_kobj_path(struct kobject *kobj, char *path, int length)
153} 162}
154 163
155/** 164/**
156 * kobject_get_path - generate and return the path associated with a given kobj and kset pair. 165 * kobject_get_path() - Allocate memory and fill in the path for @kobj.
157 *
158 * @kobj: kobject in question, with which to build the path 166 * @kobj: kobject in question, with which to build the path
159 * @gfp_mask: the allocation type used to allocate the path 167 * @gfp_mask: the allocation type used to allocate the path
160 * 168 *
161 * The result must be freed by the caller with kfree(). 169 * Return: The newly allocated memory, caller must free with kfree().
162 */ 170 */
163char *kobject_get_path(struct kobject *kobj, gfp_t gfp_mask) 171char *kobject_get_path(struct kobject *kobj, gfp_t gfp_mask)
164{ 172{
@@ -265,7 +273,7 @@ static int kobject_add_internal(struct kobject *kobj)
265} 273}
266 274
267/** 275/**
268 * kobject_set_name_vargs - Set the name of an kobject 276 * kobject_set_name_vargs() - Set the name of a kobject.
269 * @kobj: struct kobject to set the name of 277 * @kobj: struct kobject to set the name of
270 * @fmt: format string used to build the name 278 * @fmt: format string used to build the name
271 * @vargs: vargs to format the string. 279 * @vargs: vargs to format the string.
@@ -305,7 +313,7 @@ int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
305} 313}
306 314
307/** 315/**
308 * kobject_set_name - Set the name of a kobject 316 * kobject_set_name() - Set the name of a kobject.
309 * @kobj: struct kobject to set the name of 317 * @kobj: struct kobject to set the name of
310 * @fmt: format string used to build the name 318 * @fmt: format string used to build the name
311 * 319 *
@@ -327,7 +335,7 @@ int kobject_set_name(struct kobject *kobj, const char *fmt, ...)
327EXPORT_SYMBOL(kobject_set_name); 335EXPORT_SYMBOL(kobject_set_name);
328 336
329/** 337/**
330 * kobject_init - initialize a kobject structure 338 * kobject_init() - Initialize a kobject structure.
331 * @kobj: pointer to the kobject to initialize 339 * @kobj: pointer to the kobject to initialize
332 * @ktype: pointer to the ktype for this kobject. 340 * @ktype: pointer to the ktype for this kobject.
333 * 341 *
@@ -383,7 +391,7 @@ static __printf(3, 0) int kobject_add_varg(struct kobject *kobj,
383} 391}
384 392
385/** 393/**
386 * kobject_add - the main kobject add function 394 * kobject_add() - The main kobject add function.
387 * @kobj: the kobject to add 395 * @kobj: the kobject to add
388 * @parent: pointer to the parent of the kobject. 396 * @parent: pointer to the parent of the kobject.
389 * @fmt: format to name the kobject with. 397 * @fmt: format to name the kobject with.
@@ -397,15 +405,23 @@ static __printf(3, 0) int kobject_add_varg(struct kobject *kobj,
397 * is assigned to the kobject, then the kobject will be located in the 405 * is assigned to the kobject, then the kobject will be located in the
398 * root of the sysfs tree. 406 * root of the sysfs tree.
399 * 407 *
400 * If this function returns an error, kobject_put() must be called to
401 * properly clean up the memory associated with the object.
402 * Under no instance should the kobject that is passed to this function
403 * be directly freed with a call to kfree(), that can leak memory.
404 *
405 * Note, no "add" uevent will be created with this call, the caller should set 408 * Note, no "add" uevent will be created with this call, the caller should set
406 * up all of the necessary sysfs files for the object and then call 409 * up all of the necessary sysfs files for the object and then call
407 * kobject_uevent() with the UEVENT_ADD parameter to ensure that 410 * kobject_uevent() with the UEVENT_ADD parameter to ensure that
408 * userspace is properly notified of this kobject's creation. 411 * userspace is properly notified of this kobject's creation.
412 *
413 * Return: If this function returns an error, kobject_put() must be
414 * called to properly clean up the memory associated with the
415 * object. Under no instance should the kobject that is passed
416 * to this function be directly freed with a call to kfree(),
417 * that can leak memory.
418 *
419 * If this function returns success, kobject_put() must also be called
420 * in order to properly clean up the memory associated with the object.
421 *
422 * In short, once this function is called, kobject_put() MUST be called
423 * when the use of the object is finished in order to properly free
424 * everything.
409 */ 425 */
410int kobject_add(struct kobject *kobj, struct kobject *parent, 426int kobject_add(struct kobject *kobj, struct kobject *parent,
411 const char *fmt, ...) 427 const char *fmt, ...)
@@ -431,15 +447,19 @@ int kobject_add(struct kobject *kobj, struct kobject *parent,
431EXPORT_SYMBOL(kobject_add); 447EXPORT_SYMBOL(kobject_add);
432 448
433/** 449/**
434 * kobject_init_and_add - initialize a kobject structure and add it to the kobject hierarchy 450 * kobject_init_and_add() - Initialize a kobject structure and add it to
451 * the kobject hierarchy.
435 * @kobj: pointer to the kobject to initialize 452 * @kobj: pointer to the kobject to initialize
436 * @ktype: pointer to the ktype for this kobject. 453 * @ktype: pointer to the ktype for this kobject.
437 * @parent: pointer to the parent of this kobject. 454 * @parent: pointer to the parent of this kobject.
438 * @fmt: the name of the kobject. 455 * @fmt: the name of the kobject.
439 * 456 *
440 * This function combines the call to kobject_init() and 457 * This function combines the call to kobject_init() and kobject_add().
441 * kobject_add(). The same type of error handling after a call to 458 *
442 * kobject_add() and kobject lifetime rules are the same here. 459 * If this function returns an error, kobject_put() must be called to
460 * properly clean up the memory associated with the object. This is the
461 * same type of error handling after a call to kobject_add() and kobject
462 * lifetime rules are the same here.
443 */ 463 */
444int kobject_init_and_add(struct kobject *kobj, struct kobj_type *ktype, 464int kobject_init_and_add(struct kobject *kobj, struct kobj_type *ktype,
445 struct kobject *parent, const char *fmt, ...) 465 struct kobject *parent, const char *fmt, ...)
@@ -458,7 +478,7 @@ int kobject_init_and_add(struct kobject *kobj, struct kobj_type *ktype,
458EXPORT_SYMBOL_GPL(kobject_init_and_add); 478EXPORT_SYMBOL_GPL(kobject_init_and_add);
459 479
460/** 480/**
461 * kobject_rename - change the name of an object 481 * kobject_rename() - Change the name of an object.
462 * @kobj: object in question. 482 * @kobj: object in question.
463 * @new_name: object's new name 483 * @new_name: object's new name
464 * 484 *
@@ -525,7 +545,7 @@ out:
525EXPORT_SYMBOL_GPL(kobject_rename); 545EXPORT_SYMBOL_GPL(kobject_rename);
526 546
527/** 547/**
528 * kobject_move - move object to another parent 548 * kobject_move() - Move object to another parent.
529 * @kobj: object in question. 549 * @kobj: object in question.
530 * @new_parent: object's new parent (can be NULL) 550 * @new_parent: object's new parent (can be NULL)
531 */ 551 */
@@ -578,17 +598,26 @@ out:
578EXPORT_SYMBOL_GPL(kobject_move); 598EXPORT_SYMBOL_GPL(kobject_move);
579 599
580/** 600/**
581 * kobject_del - unlink kobject from hierarchy. 601 * kobject_del() - Unlink kobject from hierarchy.
582 * @kobj: object. 602 * @kobj: object.
603 *
604 * This is the function that should be called to delete an object
605 * successfully added via kobject_add().
583 */ 606 */
584void kobject_del(struct kobject *kobj) 607void kobject_del(struct kobject *kobj)
585{ 608{
586 struct kernfs_node *sd; 609 struct kernfs_node *sd;
610 const struct kobj_type *ktype;
587 611
588 if (!kobj) 612 if (!kobj)
589 return; 613 return;
590 614
591 sd = kobj->sd; 615 sd = kobj->sd;
616 ktype = get_ktype(kobj);
617
618 if (ktype)
619 sysfs_remove_groups(kobj, ktype->default_groups);
620
592 sysfs_remove_dir(kobj); 621 sysfs_remove_dir(kobj);
593 sysfs_put(sd); 622 sysfs_put(sd);
594 623
@@ -600,7 +629,7 @@ void kobject_del(struct kobject *kobj)
600EXPORT_SYMBOL(kobject_del); 629EXPORT_SYMBOL(kobject_del);
601 630
602/** 631/**
603 * kobject_get - increment refcount for object. 632 * kobject_get() - Increment refcount for object.
604 * @kobj: object. 633 * @kobj: object.
605 */ 634 */
606struct kobject *kobject_get(struct kobject *kobj) 635struct kobject *kobject_get(struct kobject *kobj)
@@ -693,7 +722,7 @@ static void kobject_release(struct kref *kref)
693} 722}
694 723
695/** 724/**
696 * kobject_put - decrement refcount for object. 725 * kobject_put() - Decrement refcount for object.
697 * @kobj: object. 726 * @kobj: object.
698 * 727 *
699 * Decrement the refcount, and if 0, call kobject_cleanup(). 728 * Decrement the refcount, and if 0, call kobject_cleanup().
@@ -722,7 +751,7 @@ static struct kobj_type dynamic_kobj_ktype = {
722}; 751};
723 752
724/** 753/**
725 * kobject_create - create a struct kobject dynamically 754 * kobject_create() - Create a struct kobject dynamically.
726 * 755 *
727 * This function creates a kobject structure dynamically and sets it up 756 * This function creates a kobject structure dynamically and sets it up
728 * to be a "dynamic" kobject with a default release function set up. 757 * to be a "dynamic" kobject with a default release function set up.
@@ -745,8 +774,8 @@ struct kobject *kobject_create(void)
745} 774}
746 775
747/** 776/**
748 * kobject_create_and_add - create a struct kobject dynamically and register it with sysfs 777 * kobject_create_and_add() - Create a struct kobject dynamically and
749 * 778 * register it with sysfs.
750 * @name: the name for the kobject 779 * @name: the name for the kobject
751 * @parent: the parent kobject of this kobject, if any. 780 * @parent: the parent kobject of this kobject, if any.
752 * 781 *
@@ -777,7 +806,7 @@ struct kobject *kobject_create_and_add(const char *name, struct kobject *parent)
777EXPORT_SYMBOL_GPL(kobject_create_and_add); 806EXPORT_SYMBOL_GPL(kobject_create_and_add);
778 807
779/** 808/**
780 * kset_init - initialize a kset for use 809 * kset_init() - Initialize a kset for use.
781 * @k: kset 810 * @k: kset
782 */ 811 */
783void kset_init(struct kset *k) 812void kset_init(struct kset *k)
@@ -819,7 +848,7 @@ const struct sysfs_ops kobj_sysfs_ops = {
819EXPORT_SYMBOL_GPL(kobj_sysfs_ops); 848EXPORT_SYMBOL_GPL(kobj_sysfs_ops);
820 849
821/** 850/**
822 * kset_register - initialize and add a kset. 851 * kset_register() - Initialize and add a kset.
823 * @k: kset. 852 * @k: kset.
824 */ 853 */
825int kset_register(struct kset *k) 854int kset_register(struct kset *k)
@@ -839,7 +868,7 @@ int kset_register(struct kset *k)
839EXPORT_SYMBOL(kset_register); 868EXPORT_SYMBOL(kset_register);
840 869
841/** 870/**
842 * kset_unregister - remove a kset. 871 * kset_unregister() - Remove a kset.
843 * @k: kset. 872 * @k: kset.
844 */ 873 */
845void kset_unregister(struct kset *k) 874void kset_unregister(struct kset *k)
@@ -852,7 +881,7 @@ void kset_unregister(struct kset *k)
852EXPORT_SYMBOL(kset_unregister); 881EXPORT_SYMBOL(kset_unregister);
853 882
854/** 883/**
855 * kset_find_obj - search for object in kset. 884 * kset_find_obj() - Search for object in kset.
856 * @kset: kset we're looking in. 885 * @kset: kset we're looking in.
857 * @name: object's name. 886 * @name: object's name.
858 * 887 *
@@ -900,7 +929,7 @@ static struct kobj_type kset_ktype = {
900}; 929};
901 930
902/** 931/**
903 * kset_create - create a struct kset dynamically 932 * kset_create() - Create a struct kset dynamically.
904 * 933 *
905 * @name: the name for the kset 934 * @name: the name for the kset
906 * @uevent_ops: a struct kset_uevent_ops for the kset 935 * @uevent_ops: a struct kset_uevent_ops for the kset
@@ -944,7 +973,7 @@ static struct kset *kset_create(const char *name,
944} 973}
945 974
946/** 975/**
947 * kset_create_and_add - create a struct kset dynamically and add it to sysfs 976 * kset_create_and_add() - Create a struct kset dynamically and add it to sysfs.
948 * 977 *
949 * @name: the name for the kset 978 * @name: the name for the kset
950 * @uevent_ops: a struct kset_uevent_ops for the kset 979 * @uevent_ops: a struct kset_uevent_ops for the kset
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index f05802687ba4..7998affa45d4 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -466,6 +466,13 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
466 int i = 0; 466 int i = 0;
467 int retval = 0; 467 int retval = 0;
468 468
469 /*
470 * Mark "remove" event done regardless of result, for some subsystems
471 * do not want to re-trigger "remove" event via automatic cleanup.
472 */
473 if (action == KOBJ_REMOVE)
474 kobj->state_remove_uevent_sent = 1;
475
469 pr_debug("kobject: '%s' (%p): %s\n", 476 pr_debug("kobject: '%s' (%p): %s\n",
470 kobject_name(kobj), kobj, __func__); 477 kobject_name(kobj), kobj, __func__);
471 478
@@ -567,10 +574,6 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
567 kobj->state_add_uevent_sent = 1; 574 kobj->state_add_uevent_sent = 1;
568 break; 575 break;
569 576
570 case KOBJ_REMOVE:
571 kobj->state_remove_uevent_sent = 1;
572 break;
573
574 case KOBJ_UNBIND: 577 case KOBJ_UNBIND:
575 zap_modalias_env(env); 578 zap_modalias_env(env);
576 break; 579 break;
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
index f0a2934605bf..4e9829c4d64c 100644
--- a/lib/libcrc32c.c
+++ b/lib/libcrc32c.c
@@ -47,7 +47,6 @@ u32 crc32c(u32 crc, const void *address, unsigned int length)
47 int err; 47 int err;
48 48
49 shash->tfm = tfm; 49 shash->tfm = tfm;
50 shash->flags = 0;
51 *ctx = crc; 50 *ctx = crc;
52 51
53 err = crypto_shash_update(shash, address, length); 52 err = crypto_shash_update(shash, address, length);
diff --git a/lib/list_sort.c b/lib/list_sort.c
index 85759928215b..06e900c5587b 100644
--- a/lib/list_sort.c
+++ b/lib/list_sort.c
@@ -7,33 +7,41 @@
7#include <linux/list_sort.h> 7#include <linux/list_sort.h>
8#include <linux/list.h> 8#include <linux/list.h>
9 9
10#define MAX_LIST_LENGTH_BITS 20 10typedef int __attribute__((nonnull(2,3))) (*cmp_func)(void *,
11 struct list_head const *, struct list_head const *);
11 12
12/* 13/*
13 * Returns a list organized in an intermediate format suited 14 * Returns a list organized in an intermediate format suited
14 * to chaining of merge() calls: null-terminated, no reserved or 15 * to chaining of merge() calls: null-terminated, no reserved or
15 * sentinel head node, "prev" links not maintained. 16 * sentinel head node, "prev" links not maintained.
16 */ 17 */
17static struct list_head *merge(void *priv, 18__attribute__((nonnull(2,3,4)))
18 int (*cmp)(void *priv, struct list_head *a, 19static struct list_head *merge(void *priv, cmp_func cmp,
19 struct list_head *b),
20 struct list_head *a, struct list_head *b) 20 struct list_head *a, struct list_head *b)
21{ 21{
22 struct list_head head, *tail = &head; 22 struct list_head *head, **tail = &head;
23 23
24 while (a && b) { 24 for (;;) {
25 /* if equal, take 'a' -- important for sort stability */ 25 /* if equal, take 'a' -- important for sort stability */
26 if ((*cmp)(priv, a, b) <= 0) { 26 if (cmp(priv, a, b) <= 0) {
27 tail->next = a; 27 *tail = a;
28 tail = &a->next;
28 a = a->next; 29 a = a->next;
30 if (!a) {
31 *tail = b;
32 break;
33 }
29 } else { 34 } else {
30 tail->next = b; 35 *tail = b;
36 tail = &b->next;
31 b = b->next; 37 b = b->next;
38 if (!b) {
39 *tail = a;
40 break;
41 }
32 } 42 }
33 tail = tail->next;
34 } 43 }
35 tail->next = a?:b; 44 return head;
36 return head.next;
37} 45}
38 46
39/* 47/*
@@ -43,44 +51,52 @@ static struct list_head *merge(void *priv,
43 * prev-link restoration pass, or maintaining the prev links 51 * prev-link restoration pass, or maintaining the prev links
44 * throughout. 52 * throughout.
45 */ 53 */
46static void merge_and_restore_back_links(void *priv, 54__attribute__((nonnull(2,3,4,5)))
47 int (*cmp)(void *priv, struct list_head *a, 55static void merge_final(void *priv, cmp_func cmp, struct list_head *head,
48 struct list_head *b), 56 struct list_head *a, struct list_head *b)
49 struct list_head *head,
50 struct list_head *a, struct list_head *b)
51{ 57{
52 struct list_head *tail = head; 58 struct list_head *tail = head;
53 u8 count = 0; 59 u8 count = 0;
54 60
55 while (a && b) { 61 for (;;) {
56 /* if equal, take 'a' -- important for sort stability */ 62 /* if equal, take 'a' -- important for sort stability */
57 if ((*cmp)(priv, a, b) <= 0) { 63 if (cmp(priv, a, b) <= 0) {
58 tail->next = a; 64 tail->next = a;
59 a->prev = tail; 65 a->prev = tail;
66 tail = a;
60 a = a->next; 67 a = a->next;
68 if (!a)
69 break;
61 } else { 70 } else {
62 tail->next = b; 71 tail->next = b;
63 b->prev = tail; 72 b->prev = tail;
73 tail = b;
64 b = b->next; 74 b = b->next;
75 if (!b) {
76 b = a;
77 break;
78 }
65 } 79 }
66 tail = tail->next;
67 } 80 }
68 tail->next = a ? : b;
69 81
82 /* Finish linking remainder of list b on to tail */
83 tail->next = b;
70 do { 84 do {
71 /* 85 /*
72 * In worst cases this loop may run many iterations. 86 * If the merge is highly unbalanced (e.g. the input is
87 * already sorted), this loop may run many iterations.
73 * Continue callbacks to the client even though no 88 * Continue callbacks to the client even though no
74 * element comparison is needed, so the client's cmp() 89 * element comparison is needed, so the client's cmp()
75 * routine can invoke cond_resched() periodically. 90 * routine can invoke cond_resched() periodically.
76 */ 91 */
77 if (unlikely(!(++count))) 92 if (unlikely(!++count))
78 (*cmp)(priv, tail->next, tail->next); 93 cmp(priv, b, b);
79 94 b->prev = tail;
80 tail->next->prev = tail; 95 tail = b;
81 tail = tail->next; 96 b = b->next;
82 } while (tail->next); 97 } while (b);
83 98
99 /* And the final links to make a circular doubly-linked list */
84 tail->next = head; 100 tail->next = head;
85 head->prev = tail; 101 head->prev = tail;
86} 102}
@@ -91,55 +107,149 @@ static void merge_and_restore_back_links(void *priv,
91 * @head: the list to sort 107 * @head: the list to sort
92 * @cmp: the elements comparison function 108 * @cmp: the elements comparison function
93 * 109 *
94 * This function implements "merge sort", which has O(nlog(n)) 110 * The comparison funtion @cmp must return > 0 if @a should sort after
95 * complexity. 111 * @b ("@a > @b" if you want an ascending sort), and <= 0 if @a should
112 * sort before @b *or* their original order should be preserved. It is
113 * always called with the element that came first in the input in @a,
114 * and list_sort is a stable sort, so it is not necessary to distinguish
115 * the @a < @b and @a == @b cases.
116 *
117 * This is compatible with two styles of @cmp function:
118 * - The traditional style which returns <0 / =0 / >0, or
119 * - Returning a boolean 0/1.
120 * The latter offers a chance to save a few cycles in the comparison
121 * (which is used by e.g. plug_ctx_cmp() in block/blk-mq.c).
122 *
123 * A good way to write a multi-word comparison is
124 * if (a->high != b->high)
125 * return a->high > b->high;
126 * if (a->middle != b->middle)
127 * return a->middle > b->middle;
128 * return a->low > b->low;
129 *
130 *
131 * This mergesort is as eager as possible while always performing at least
132 * 2:1 balanced merges. Given two pending sublists of size 2^k, they are
133 * merged to a size-2^(k+1) list as soon as we have 2^k following elements.
134 *
135 * Thus, it will avoid cache thrashing as long as 3*2^k elements can
136 * fit into the cache. Not quite as good as a fully-eager bottom-up
137 * mergesort, but it does use 0.2*n fewer comparisons, so is faster in
138 * the common case that everything fits into L1.
139 *
140 *
141 * The merging is controlled by "count", the number of elements in the
142 * pending lists. This is beautiully simple code, but rather subtle.
96 * 143 *
97 * The comparison function @cmp must return a negative value if @a 144 * Each time we increment "count", we set one bit (bit k) and clear
98 * should sort before @b, and a positive value if @a should sort after 145 * bits k-1 .. 0. Each time this happens (except the very first time
99 * @b. If @a and @b are equivalent, and their original relative 146 * for each bit, when count increments to 2^k), we merge two lists of
100 * ordering is to be preserved, @cmp must return 0. 147 * size 2^k into one list of size 2^(k+1).
148 *
149 * This merge happens exactly when the count reaches an odd multiple of
150 * 2^k, which is when we have 2^k elements pending in smaller lists,
151 * so it's safe to merge away two lists of size 2^k.
152 *
153 * After this happens twice, we have created two lists of size 2^(k+1),
154 * which will be merged into a list of size 2^(k+2) before we create
155 * a third list of size 2^(k+1), so there are never more than two pending.
156 *
157 * The number of pending lists of size 2^k is determined by the
158 * state of bit k of "count" plus two extra pieces of information:
159 * - The state of bit k-1 (when k == 0, consider bit -1 always set), and
160 * - Whether the higher-order bits are zero or non-zero (i.e.
161 * is count >= 2^(k+1)).
162 * There are six states we distinguish. "x" represents some arbitrary
163 * bits, and "y" represents some arbitrary non-zero bits:
164 * 0: 00x: 0 pending of size 2^k; x pending of sizes < 2^k
165 * 1: 01x: 0 pending of size 2^k; 2^(k-1) + x pending of sizes < 2^k
166 * 2: x10x: 0 pending of size 2^k; 2^k + x pending of sizes < 2^k
167 * 3: x11x: 1 pending of size 2^k; 2^(k-1) + x pending of sizes < 2^k
168 * 4: y00x: 1 pending of size 2^k; 2^k + x pending of sizes < 2^k
169 * 5: y01x: 2 pending of size 2^k; 2^(k-1) + x pending of sizes < 2^k
170 * (merge and loop back to state 2)
171 *
172 * We gain lists of size 2^k in the 2->3 and 4->5 transitions (because
173 * bit k-1 is set while the more significant bits are non-zero) and
174 * merge them away in the 5->2 transition. Note in particular that just
175 * before the 5->2 transition, all lower-order bits are 11 (state 3),
176 * so there is one list of each smaller size.
177 *
178 * When we reach the end of the input, we merge all the pending
179 * lists, from smallest to largest. If you work through cases 2 to
180 * 5 above, you can see that the number of elements we merge with a list
181 * of size 2^k varies from 2^(k-1) (cases 3 and 5 when x == 0) to
182 * 2^(k+1) - 1 (second merge of case 5 when x == 2^(k-1) - 1).
101 */ 183 */
184__attribute__((nonnull(2,3)))
102void list_sort(void *priv, struct list_head *head, 185void list_sort(void *priv, struct list_head *head,
103 int (*cmp)(void *priv, struct list_head *a, 186 int (*cmp)(void *priv, struct list_head *a,
104 struct list_head *b)) 187 struct list_head *b))
105{ 188{
106 struct list_head *part[MAX_LIST_LENGTH_BITS+1]; /* sorted partial lists 189 struct list_head *list = head->next, *pending = NULL;
107 -- last slot is a sentinel */ 190 size_t count = 0; /* Count of pending */
108 int lev; /* index into part[] */
109 int max_lev = 0;
110 struct list_head *list;
111 191
112 if (list_empty(head)) 192 if (list == head->prev) /* Zero or one elements */
113 return; 193 return;
114 194
115 memset(part, 0, sizeof(part)); 195 /* Convert to a null-terminated singly-linked list. */
116
117 head->prev->next = NULL; 196 head->prev->next = NULL;
118 list = head->next;
119
120 while (list) {
121 struct list_head *cur = list;
122 list = list->next;
123 cur->next = NULL;
124 197
125 for (lev = 0; part[lev]; lev++) { 198 /*
126 cur = merge(priv, cmp, part[lev], cur); 199 * Data structure invariants:
127 part[lev] = NULL; 200 * - All lists are singly linked and null-terminated; prev
128 } 201 * pointers are not maintained.
129 if (lev > max_lev) { 202 * - pending is a prev-linked "list of lists" of sorted
130 if (unlikely(lev >= ARRAY_SIZE(part)-1)) { 203 * sublists awaiting further merging.
131 printk_once(KERN_DEBUG "list too long for efficiency\n"); 204 * - Each of the sorted sublists is power-of-two in size.
132 lev--; 205 * - Sublists are sorted by size and age, smallest & newest at front.
133 } 206 * - There are zero to two sublists of each size.
134 max_lev = lev; 207 * - A pair of pending sublists are merged as soon as the number
208 * of following pending elements equals their size (i.e.
209 * each time count reaches an odd multiple of that size).
210 * That ensures each later final merge will be at worst 2:1.
211 * - Each round consists of:
212 * - Merging the two sublists selected by the highest bit
213 * which flips when count is incremented, and
214 * - Adding an element from the input as a size-1 sublist.
215 */
216 do {
217 size_t bits;
218 struct list_head **tail = &pending;
219
220 /* Find the least-significant clear bit in count */
221 for (bits = count; bits & 1; bits >>= 1)
222 tail = &(*tail)->prev;
223 /* Do the indicated merge */
224 if (likely(bits)) {
225 struct list_head *a = *tail, *b = a->prev;
226
227 a = merge(priv, (cmp_func)cmp, b, a);
228 /* Install the merged result in place of the inputs */
229 a->prev = b->prev;
230 *tail = a;
135 } 231 }
136 part[lev] = cur;
137 }
138 232
139 for (lev = 0; lev < max_lev; lev++) 233 /* Move one element from input list to pending */
140 if (part[lev]) 234 list->prev = pending;
141 list = merge(priv, cmp, part[lev], list); 235 pending = list;
142 236 list = list->next;
143 merge_and_restore_back_links(priv, cmp, head, part[max_lev], list); 237 pending->next = NULL;
238 count++;
239 } while (list);
240
241 /* End of input; merge together all the pending lists. */
242 list = pending;
243 pending = pending->prev;
244 for (;;) {
245 struct list_head *next = pending->prev;
246
247 if (!next)
248 break;
249 list = merge(priv, (cmp_func)cmp, pending, list);
250 pending = next;
251 }
252 /* The final merge, rebuilding prev links */
253 merge_final(priv, (cmp_func)cmp, head, pending, list);
144} 254}
145EXPORT_SYMBOL(list_sort); 255EXPORT_SYMBOL(list_sort);
diff --git a/lib/math/Kconfig b/lib/math/Kconfig
new file mode 100644
index 000000000000..73bdf37178d1
--- /dev/null
+++ b/lib/math/Kconfig
@@ -0,0 +1,11 @@
1config CORDIC
2 tristate "CORDIC algorithm"
3 help
4 This option provides an implementation of the CORDIC algorithm;
5 calculations are in fixed point. Module will be called cordic.
6
7config PRIME_NUMBERS
8 tristate
9
10config RATIONAL
11 bool
diff --git a/lib/math/Makefile b/lib/math/Makefile
new file mode 100644
index 000000000000..583bbfebfc09
--- /dev/null
+++ b/lib/math/Makefile
@@ -0,0 +1,5 @@
1obj-y += div64.o gcd.o lcm.o int_pow.o int_sqrt.o reciprocal_div.o
2
3obj-$(CONFIG_CORDIC) += cordic.o
4obj-$(CONFIG_PRIME_NUMBERS) += prime_numbers.o
5obj-$(CONFIG_RATIONAL) += rational.o
diff --git a/lib/cordic.c b/lib/math/cordic.c
index 8ef27c12956f..8ef27c12956f 100644
--- a/lib/cordic.c
+++ b/lib/math/cordic.c
diff --git a/lib/div64.c b/lib/math/div64.c
index ee146bb4c558..368ca7fd0d82 100644
--- a/lib/div64.c
+++ b/lib/math/div64.c
@@ -10,7 +10,7 @@
10 * Generic C version of 64bit/32bit division and modulo, with 10 * Generic C version of 64bit/32bit division and modulo, with
11 * 64bit result and 32bit remainder. 11 * 64bit result and 32bit remainder.
12 * 12 *
13 * The fast case for (n>>32 == 0) is handled inline by do_div(). 13 * The fast case for (n>>32 == 0) is handled inline by do_div().
14 * 14 *
15 * Code generated for this function might be very inefficient 15 * Code generated for this function might be very inefficient
16 * for some CPUs. __div64_32() can be overridden by linking arch-specific 16 * for some CPUs. __div64_32() can be overridden by linking arch-specific
diff --git a/lib/gcd.c b/lib/math/gcd.c
index 7948ab27f0a4..7948ab27f0a4 100644
--- a/lib/gcd.c
+++ b/lib/math/gcd.c
diff --git a/lib/math/int_pow.c b/lib/math/int_pow.c
new file mode 100644
index 000000000000..622fc1ab3c74
--- /dev/null
+++ b/lib/math/int_pow.c
@@ -0,0 +1,32 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * An integer based power function
4 *
5 * Derived from drivers/video/backlight/pwm_bl.c
6 */
7
8#include <linux/export.h>
9#include <linux/kernel.h>
10#include <linux/types.h>
11
12/**
13 * int_pow - computes the exponentiation of the given base and exponent
14 * @base: base which will be raised to the given power
15 * @exp: power to be raised to
16 *
17 * Computes: pow(base, exp), i.e. @base raised to the @exp power
18 */
19u64 int_pow(u64 base, unsigned int exp)
20{
21 u64 result = 1;
22
23 while (exp) {
24 if (exp & 1)
25 result *= base;
26 exp >>= 1;
27 base *= base;
28 }
29
30 return result;
31}
32EXPORT_SYMBOL_GPL(int_pow);
diff --git a/lib/int_sqrt.c b/lib/math/int_sqrt.c
index 30e0f9770f88..30e0f9770f88 100644
--- a/lib/int_sqrt.c
+++ b/lib/math/int_sqrt.c
diff --git a/lib/lcm.c b/lib/math/lcm.c
index 03d7fcb420b5..03d7fcb420b5 100644
--- a/lib/lcm.c
+++ b/lib/math/lcm.c
diff --git a/lib/prime_numbers.c b/lib/math/prime_numbers.c
index 550eec457c2e..550eec457c2e 100644
--- a/lib/prime_numbers.c
+++ b/lib/math/prime_numbers.c
diff --git a/lib/rational.c b/lib/math/rational.c
index ba7443677c90..ba7443677c90 100644
--- a/lib/rational.c
+++ b/lib/math/rational.c
diff --git a/lib/reciprocal_div.c b/lib/math/reciprocal_div.c
index bf043258fa00..bf043258fa00 100644
--- a/lib/reciprocal_div.c
+++ b/lib/math/reciprocal_div.c
diff --git a/lib/nlattr.c b/lib/nlattr.c
index d26de6156b97..cace9b307781 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -69,7 +69,8 @@ static int validate_nla_bitfield32(const struct nlattr *nla,
69 69
70static int nla_validate_array(const struct nlattr *head, int len, int maxtype, 70static int nla_validate_array(const struct nlattr *head, int len, int maxtype,
71 const struct nla_policy *policy, 71 const struct nla_policy *policy,
72 struct netlink_ext_ack *extack) 72 struct netlink_ext_ack *extack,
73 unsigned int validate)
73{ 74{
74 const struct nlattr *entry; 75 const struct nlattr *entry;
75 int rem; 76 int rem;
@@ -86,8 +87,8 @@ static int nla_validate_array(const struct nlattr *head, int len, int maxtype,
86 return -ERANGE; 87 return -ERANGE;
87 } 88 }
88 89
89 ret = nla_validate(nla_data(entry), nla_len(entry), 90 ret = __nla_validate(nla_data(entry), nla_len(entry),
90 maxtype, policy, extack); 91 maxtype, policy, validate, extack);
91 if (ret < 0) 92 if (ret < 0)
92 return ret; 93 return ret;
93 } 94 }
@@ -154,13 +155,17 @@ static int nla_validate_int_range(const struct nla_policy *pt,
154} 155}
155 156
156static int validate_nla(const struct nlattr *nla, int maxtype, 157static int validate_nla(const struct nlattr *nla, int maxtype,
157 const struct nla_policy *policy, 158 const struct nla_policy *policy, unsigned int validate,
158 struct netlink_ext_ack *extack) 159 struct netlink_ext_ack *extack)
159{ 160{
161 u16 strict_start_type = policy[0].strict_start_type;
160 const struct nla_policy *pt; 162 const struct nla_policy *pt;
161 int minlen = 0, attrlen = nla_len(nla), type = nla_type(nla); 163 int minlen = 0, attrlen = nla_len(nla), type = nla_type(nla);
162 int err = -ERANGE; 164 int err = -ERANGE;
163 165
166 if (strict_start_type && type >= strict_start_type)
167 validate |= NL_VALIDATE_STRICT;
168
164 if (type <= 0 || type > maxtype) 169 if (type <= 0 || type > maxtype)
165 return 0; 170 return 0;
166 171
@@ -172,6 +177,26 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
172 (pt->type == NLA_EXACT_LEN_WARN && attrlen != pt->len)) { 177 (pt->type == NLA_EXACT_LEN_WARN && attrlen != pt->len)) {
173 pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n", 178 pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n",
174 current->comm, type); 179 current->comm, type);
180 if (validate & NL_VALIDATE_STRICT_ATTRS) {
181 NL_SET_ERR_MSG_ATTR(extack, nla,
182 "invalid attribute length");
183 return -EINVAL;
184 }
185 }
186
187 if (validate & NL_VALIDATE_NESTED) {
188 if ((pt->type == NLA_NESTED || pt->type == NLA_NESTED_ARRAY) &&
189 !(nla->nla_type & NLA_F_NESTED)) {
190 NL_SET_ERR_MSG_ATTR(extack, nla,
191 "NLA_F_NESTED is missing");
192 return -EINVAL;
193 }
194 if (pt->type != NLA_NESTED && pt->type != NLA_NESTED_ARRAY &&
195 pt->type != NLA_UNSPEC && (nla->nla_type & NLA_F_NESTED)) {
196 NL_SET_ERR_MSG_ATTR(extack, nla,
197 "NLA_F_NESTED not expected");
198 return -EINVAL;
199 }
175 } 200 }
176 201
177 switch (pt->type) { 202 switch (pt->type) {
@@ -244,8 +269,9 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
244 if (attrlen < NLA_HDRLEN) 269 if (attrlen < NLA_HDRLEN)
245 goto out_err; 270 goto out_err;
246 if (pt->validation_data) { 271 if (pt->validation_data) {
247 err = nla_validate(nla_data(nla), nla_len(nla), pt->len, 272 err = __nla_validate(nla_data(nla), nla_len(nla), pt->len,
248 pt->validation_data, extack); 273 pt->validation_data, validate,
274 extack);
249 if (err < 0) { 275 if (err < 0) {
250 /* 276 /*
251 * return directly to preserve the inner 277 * return directly to preserve the inner
@@ -268,7 +294,7 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
268 294
269 err = nla_validate_array(nla_data(nla), nla_len(nla), 295 err = nla_validate_array(nla_data(nla), nla_len(nla),
270 pt->len, pt->validation_data, 296 pt->len, pt->validation_data,
271 extack); 297 extack, validate);
272 if (err < 0) { 298 if (err < 0) {
273 /* 299 /*
274 * return directly to preserve the inner 300 * return directly to preserve the inner
@@ -278,10 +304,23 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
278 } 304 }
279 } 305 }
280 break; 306 break;
307
308 case NLA_UNSPEC:
309 if (validate & NL_VALIDATE_UNSPEC) {
310 NL_SET_ERR_MSG_ATTR(extack, nla,
311 "Unsupported attribute");
312 return -EINVAL;
313 }
314 /* fall through */
315 case NLA_MIN_LEN:
316 if (attrlen < pt->len)
317 goto out_err;
318 break;
319
281 default: 320 default:
282 if (pt->len) 321 if (pt->len)
283 minlen = pt->len; 322 minlen = pt->len;
284 else if (pt->type != NLA_UNSPEC) 323 else
285 minlen = nla_attr_minlen[pt->type]; 324 minlen = nla_attr_minlen[pt->type];
286 325
287 if (attrlen < minlen) 326 if (attrlen < minlen)
@@ -315,37 +354,76 @@ out_err:
315 return err; 354 return err;
316} 355}
317 356
357static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype,
358 const struct nla_policy *policy,
359 unsigned int validate,
360 struct netlink_ext_ack *extack,
361 struct nlattr **tb)
362{
363 const struct nlattr *nla;
364 int rem;
365
366 if (tb)
367 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
368
369 nla_for_each_attr(nla, head, len, rem) {
370 u16 type = nla_type(nla);
371
372 if (type == 0 || type > maxtype) {
373 if (validate & NL_VALIDATE_MAXTYPE) {
374 NL_SET_ERR_MSG_ATTR(extack, nla,
375 "Unknown attribute type");
376 return -EINVAL;
377 }
378 continue;
379 }
380 if (policy) {
381 int err = validate_nla(nla, maxtype, policy,
382 validate, extack);
383
384 if (err < 0)
385 return err;
386 }
387
388 if (tb)
389 tb[type] = (struct nlattr *)nla;
390 }
391
392 if (unlikely(rem > 0)) {
393 pr_warn_ratelimited("netlink: %d bytes leftover after parsing attributes in process `%s'.\n",
394 rem, current->comm);
395 NL_SET_ERR_MSG(extack, "bytes leftover after parsing attributes");
396 if (validate & NL_VALIDATE_TRAILING)
397 return -EINVAL;
398 }
399
400 return 0;
401}
402
318/** 403/**
319 * nla_validate - Validate a stream of attributes 404 * __nla_validate - Validate a stream of attributes
320 * @head: head of attribute stream 405 * @head: head of attribute stream
321 * @len: length of attribute stream 406 * @len: length of attribute stream
322 * @maxtype: maximum attribute type to be expected 407 * @maxtype: maximum attribute type to be expected
323 * @policy: validation policy 408 * @policy: validation policy
409 * @validate: validation strictness
324 * @extack: extended ACK report struct 410 * @extack: extended ACK report struct
325 * 411 *
326 * Validates all attributes in the specified attribute stream against the 412 * Validates all attributes in the specified attribute stream against the
327 * specified policy. Attributes with a type exceeding maxtype will be 413 * specified policy. Validation depends on the validate flags passed, see
328 * ignored. See documenation of struct nla_policy for more details. 414 * &enum netlink_validation for more details on that.
415 * See documenation of struct nla_policy for more details.
329 * 416 *
330 * Returns 0 on success or a negative error code. 417 * Returns 0 on success or a negative error code.
331 */ 418 */
332int nla_validate(const struct nlattr *head, int len, int maxtype, 419int __nla_validate(const struct nlattr *head, int len, int maxtype,
333 const struct nla_policy *policy, 420 const struct nla_policy *policy, unsigned int validate,
334 struct netlink_ext_ack *extack) 421 struct netlink_ext_ack *extack)
335{ 422{
336 const struct nlattr *nla; 423 return __nla_validate_parse(head, len, maxtype, policy, validate,
337 int rem; 424 extack, NULL);
338
339 nla_for_each_attr(nla, head, len, rem) {
340 int err = validate_nla(nla, maxtype, policy, extack);
341
342 if (err < 0)
343 return err;
344 }
345
346 return 0;
347} 425}
348EXPORT_SYMBOL(nla_validate); 426EXPORT_SYMBOL(__nla_validate);
349 427
350/** 428/**
351 * nla_policy_len - Determin the max. length of a policy 429 * nla_policy_len - Determin the max. length of a policy
@@ -377,76 +455,30 @@ nla_policy_len(const struct nla_policy *p, int n)
377EXPORT_SYMBOL(nla_policy_len); 455EXPORT_SYMBOL(nla_policy_len);
378 456
379/** 457/**
380 * nla_parse - Parse a stream of attributes into a tb buffer 458 * __nla_parse - Parse a stream of attributes into a tb buffer
381 * @tb: destination array with maxtype+1 elements 459 * @tb: destination array with maxtype+1 elements
382 * @maxtype: maximum attribute type to be expected 460 * @maxtype: maximum attribute type to be expected
383 * @head: head of attribute stream 461 * @head: head of attribute stream
384 * @len: length of attribute stream 462 * @len: length of attribute stream
385 * @policy: validation policy 463 * @policy: validation policy
464 * @validate: validation strictness
465 * @extack: extended ACK pointer
386 * 466 *
387 * Parses a stream of attributes and stores a pointer to each attribute in 467 * Parses a stream of attributes and stores a pointer to each attribute in
388 * the tb array accessible via the attribute type. Attributes with a type 468 * the tb array accessible via the attribute type.
389 * exceeding maxtype will be silently ignored for backwards compatibility 469 * Validation is controlled by the @validate parameter.
390 * reasons. policy may be set to NULL if no validation is required.
391 * 470 *
392 * Returns 0 on success or a negative error code. 471 * Returns 0 on success or a negative error code.
393 */ 472 */
394static int __nla_parse(struct nlattr **tb, int maxtype, 473int __nla_parse(struct nlattr **tb, int maxtype,
395 const struct nlattr *head, int len, 474 const struct nlattr *head, int len,
396 bool strict, const struct nla_policy *policy, 475 const struct nla_policy *policy, unsigned int validate,
397 struct netlink_ext_ack *extack) 476 struct netlink_ext_ack *extack)
398{
399 const struct nlattr *nla;
400 int rem;
401
402 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
403
404 nla_for_each_attr(nla, head, len, rem) {
405 u16 type = nla_type(nla);
406
407 if (type == 0 || type > maxtype) {
408 if (strict) {
409 NL_SET_ERR_MSG(extack, "Unknown attribute type");
410 return -EINVAL;
411 }
412 continue;
413 }
414 if (policy) {
415 int err = validate_nla(nla, maxtype, policy, extack);
416
417 if (err < 0)
418 return err;
419 }
420
421 tb[type] = (struct nlattr *)nla;
422 }
423
424 if (unlikely(rem > 0)) {
425 pr_warn_ratelimited("netlink: %d bytes leftover after parsing attributes in process `%s'.\n",
426 rem, current->comm);
427 NL_SET_ERR_MSG(extack, "bytes leftover after parsing attributes");
428 if (strict)
429 return -EINVAL;
430 }
431
432 return 0;
433}
434
435int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
436 int len, const struct nla_policy *policy,
437 struct netlink_ext_ack *extack)
438{
439 return __nla_parse(tb, maxtype, head, len, false, policy, extack);
440}
441EXPORT_SYMBOL(nla_parse);
442
443int nla_parse_strict(struct nlattr **tb, int maxtype, const struct nlattr *head,
444 int len, const struct nla_policy *policy,
445 struct netlink_ext_ack *extack)
446{ 477{
447 return __nla_parse(tb, maxtype, head, len, true, policy, extack); 478 return __nla_validate_parse(head, len, maxtype, policy, validate,
479 extack, tb);
448} 480}
449EXPORT_SYMBOL(nla_parse_strict); 481EXPORT_SYMBOL(__nla_parse);
450 482
451/** 483/**
452 * nla_find - Find a specific attribute in a stream of attributes 484 * nla_find - Find a specific attribute in a stream of attributes
diff --git a/lib/packing.c b/lib/packing.c
new file mode 100644
index 000000000000..50d1e9f2f5a7
--- /dev/null
+++ b/lib/packing.c
@@ -0,0 +1,213 @@
1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2/* Copyright (c) 2016-2018, NXP Semiconductors
3 * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
4 */
5#include <linux/packing.h>
6#include <linux/module.h>
7#include <linux/bitops.h>
8#include <linux/errno.h>
9#include <linux/types.h>
10
11static int get_le_offset(int offset)
12{
13 int closest_multiple_of_4;
14
15 closest_multiple_of_4 = (offset / 4) * 4;
16 offset -= closest_multiple_of_4;
17 return closest_multiple_of_4 + (3 - offset);
18}
19
20static int get_reverse_lsw32_offset(int offset, size_t len)
21{
22 int closest_multiple_of_4;
23 int word_index;
24
25 word_index = offset / 4;
26 closest_multiple_of_4 = word_index * 4;
27 offset -= closest_multiple_of_4;
28 word_index = (len / 4) - word_index - 1;
29 return word_index * 4 + offset;
30}
31
32static u64 bit_reverse(u64 val, unsigned int width)
33{
34 u64 new_val = 0;
35 unsigned int bit;
36 unsigned int i;
37
38 for (i = 0; i < width; i++) {
39 bit = (val & (1 << i)) != 0;
40 new_val |= (bit << (width - i - 1));
41 }
42 return new_val;
43}
44
45static void adjust_for_msb_right_quirk(u64 *to_write, int *box_start_bit,
46 int *box_end_bit, u8 *box_mask)
47{
48 int box_bit_width = *box_start_bit - *box_end_bit + 1;
49 int new_box_start_bit, new_box_end_bit;
50
51 *to_write >>= *box_end_bit;
52 *to_write = bit_reverse(*to_write, box_bit_width);
53 *to_write <<= *box_end_bit;
54
55 new_box_end_bit = box_bit_width - *box_start_bit - 1;
56 new_box_start_bit = box_bit_width - *box_end_bit - 1;
57 *box_mask = GENMASK_ULL(new_box_start_bit, new_box_end_bit);
58 *box_start_bit = new_box_start_bit;
59 *box_end_bit = new_box_end_bit;
60}
61
62/**
63 * packing - Convert numbers (currently u64) between a packed and an unpacked
64 * format. Unpacked means laid out in memory in the CPU's native
65 * understanding of integers, while packed means anything else that
66 * requires translation.
67 *
68 * @pbuf: Pointer to a buffer holding the packed value.
69 * @uval: Pointer to an u64 holding the unpacked value.
70 * @startbit: The index (in logical notation, compensated for quirks) where
71 * the packed value starts within pbuf. Must be larger than, or
72 * equal to, endbit.
73 * @endbit: The index (in logical notation, compensated for quirks) where
74 * the packed value ends within pbuf. Must be smaller than, or equal
75 * to, startbit.
76 * @op: If PACK, then uval will be treated as const pointer and copied (packed)
77 * into pbuf, between startbit and endbit.
78 * If UNPACK, then pbuf will be treated as const pointer and the logical
79 * value between startbit and endbit will be copied (unpacked) to uval.
80 * @quirks: A bit mask of QUIRK_LITTLE_ENDIAN, QUIRK_LSW32_IS_FIRST and
81 * QUIRK_MSB_ON_THE_RIGHT.
82 *
83 * Return: 0 on success, EINVAL or ERANGE if called incorrectly. Assuming
84 * correct usage, return code may be discarded.
85 * If op is PACK, pbuf is modified.
86 * If op is UNPACK, uval is modified.
87 */
88int packing(void *pbuf, u64 *uval, int startbit, int endbit, size_t pbuflen,
89 enum packing_op op, u8 quirks)
90{
91 /* Number of bits for storing "uval"
92 * also width of the field to access in the pbuf
93 */
94 u64 value_width;
95 /* Logical byte indices corresponding to the
96 * start and end of the field.
97 */
98 int plogical_first_u8, plogical_last_u8, box;
99
100 /* startbit is expected to be larger than endbit */
101 if (startbit < endbit)
102 /* Invalid function call */
103 return -EINVAL;
104
105 value_width = startbit - endbit + 1;
106 if (value_width > 64)
107 return -ERANGE;
108
109 /* Check if "uval" fits in "value_width" bits.
110 * If value_width is 64, the check will fail, but any
111 * 64-bit uval will surely fit.
112 */
113 if (op == PACK && value_width < 64 && (*uval >= (1ull << value_width)))
114 /* Cannot store "uval" inside "value_width" bits.
115 * Truncating "uval" is most certainly not desirable,
116 * so simply erroring out is appropriate.
117 */
118 return -ERANGE;
119
120 /* Initialize parameter */
121 if (op == UNPACK)
122 *uval = 0;
123
124 /* Iterate through an idealistic view of the pbuf as an u64 with
125 * no quirks, u8 by u8 (aligned at u8 boundaries), from high to low
126 * logical bit significance. "box" denotes the current logical u8.
127 */
128 plogical_first_u8 = startbit / 8;
129 plogical_last_u8 = endbit / 8;
130
131 for (box = plogical_first_u8; box >= plogical_last_u8; box--) {
132 /* Bit indices into the currently accessed 8-bit box */
133 int box_start_bit, box_end_bit, box_addr;
134 u8 box_mask;
135 /* Corresponding bits from the unpacked u64 parameter */
136 int proj_start_bit, proj_end_bit;
137 u64 proj_mask;
138
139 /* This u8 may need to be accessed in its entirety
140 * (from bit 7 to bit 0), or not, depending on the
141 * input arguments startbit and endbit.
142 */
143 if (box == plogical_first_u8)
144 box_start_bit = startbit % 8;
145 else
146 box_start_bit = 7;
147 if (box == plogical_last_u8)
148 box_end_bit = endbit % 8;
149 else
150 box_end_bit = 0;
151
152 /* We have determined the box bit start and end.
153 * Now we calculate where this (masked) u8 box would fit
154 * in the unpacked (CPU-readable) u64 - the u8 box's
155 * projection onto the unpacked u64. Though the
156 * box is u8, the projection is u64 because it may fall
157 * anywhere within the unpacked u64.
158 */
159 proj_start_bit = ((box * 8) + box_start_bit) - endbit;
160 proj_end_bit = ((box * 8) + box_end_bit) - endbit;
161 proj_mask = GENMASK_ULL(proj_start_bit, proj_end_bit);
162 box_mask = GENMASK_ULL(box_start_bit, box_end_bit);
163
164 /* Determine the offset of the u8 box inside the pbuf,
165 * adjusted for quirks. The adjusted box_addr will be used for
166 * effective addressing inside the pbuf (so it's not
167 * logical any longer).
168 */
169 box_addr = pbuflen - box - 1;
170 if (quirks & QUIRK_LITTLE_ENDIAN)
171 box_addr = get_le_offset(box_addr);
172 if (quirks & QUIRK_LSW32_IS_FIRST)
173 box_addr = get_reverse_lsw32_offset(box_addr,
174 pbuflen);
175
176 if (op == UNPACK) {
177 u64 pval;
178
179 /* Read from pbuf, write to uval */
180 pval = ((u8 *)pbuf)[box_addr] & box_mask;
181 if (quirks & QUIRK_MSB_ON_THE_RIGHT)
182 adjust_for_msb_right_quirk(&pval,
183 &box_start_bit,
184 &box_end_bit,
185 &box_mask);
186
187 pval >>= box_end_bit;
188 pval <<= proj_end_bit;
189 *uval &= ~proj_mask;
190 *uval |= pval;
191 } else {
192 u64 pval;
193
194 /* Write to pbuf, read from uval */
195 pval = (*uval) & proj_mask;
196 pval >>= proj_end_bit;
197 if (quirks & QUIRK_MSB_ON_THE_RIGHT)
198 adjust_for_msb_right_quirk(&pval,
199 &box_start_bit,
200 &box_end_bit,
201 &box_mask);
202
203 pval <<= box_end_bit;
204 ((u8 *)pbuf)[box_addr] &= ~box_mask;
205 ((u8 *)pbuf)[box_addr] |= pval;
206 }
207 }
208 return 0;
209}
210EXPORT_SYMBOL(packing);
211
212MODULE_LICENSE("GPL v2");
213MODULE_DESCRIPTION("Generic bitfield packing and unpacking");
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 9877682e49c7..da54318d3b55 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -151,7 +151,7 @@ static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
151 atomic_long_add((long)count - PERCPU_COUNT_BIAS, &ref->count); 151 atomic_long_add((long)count - PERCPU_COUNT_BIAS, &ref->count);
152 152
153 WARN_ONCE(atomic_long_read(&ref->count) <= 0, 153 WARN_ONCE(atomic_long_read(&ref->count) <= 0,
154 "percpu ref (%pf) <= 0 (%ld) after switching to atomic", 154 "percpu ref (%ps) <= 0 (%ld) after switching to atomic",
155 ref->release, atomic_long_read(&ref->count)); 155 ref->release, atomic_long_read(&ref->count));
156 156
157 /* @ref is viewed as dead on all CPUs, send out switch confirmation */ 157 /* @ref is viewed as dead on all CPUs, send out switch confirmation */
@@ -333,7 +333,7 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
333 spin_lock_irqsave(&percpu_ref_switch_lock, flags); 333 spin_lock_irqsave(&percpu_ref_switch_lock, flags);
334 334
335 WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD, 335 WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD,
336 "%s called more than once on %pf!", __func__, ref->release); 336 "%s called more than once on %ps!", __func__, ref->release);
337 337
338 ref->percpu_count_ptr |= __PERCPU_REF_DEAD; 338 ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
339 __percpu_ref_switch_mode(ref, confirm_kill); 339 __percpu_ref_switch_mode(ref, confirm_kill);
diff --git a/lib/plist.c b/lib/plist.c
index 199408f91057..d3bd8827186f 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -26,7 +26,7 @@
26#include <linux/bug.h> 26#include <linux/bug.h>
27#include <linux/plist.h> 27#include <linux/plist.h>
28 28
29#ifdef CONFIG_DEBUG_PI_LIST 29#ifdef CONFIG_DEBUG_PLIST
30 30
31static struct plist_head test_head; 31static struct plist_head test_head;
32 32
@@ -173,7 +173,7 @@ void plist_requeue(struct plist_node *node, struct plist_head *head)
173 plist_check_head(head); 173 plist_check_head(head);
174} 174}
175 175
176#ifdef CONFIG_DEBUG_PI_LIST 176#ifdef CONFIG_DEBUG_PLIST
177#include <linux/sched.h> 177#include <linux/sched.h>
178#include <linux/sched/clock.h> 178#include <linux/sched/clock.h>
179#include <linux/module.h> 179#include <linux/module.h>
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 97f59abc3e92..6529fe1b45c1 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -31,11 +31,10 @@
31 31
32#define HASH_DEFAULT_SIZE 64UL 32#define HASH_DEFAULT_SIZE 64UL
33#define HASH_MIN_SIZE 4U 33#define HASH_MIN_SIZE 4U
34#define BUCKET_LOCKS_PER_CPU 32UL
35 34
36union nested_table { 35union nested_table {
37 union nested_table __rcu *table; 36 union nested_table __rcu *table;
38 struct rhash_head __rcu *bucket; 37 struct rhash_lock_head __rcu *bucket;
39}; 38};
40 39
41static u32 head_hashfn(struct rhashtable *ht, 40static u32 head_hashfn(struct rhashtable *ht,
@@ -56,9 +55,11 @@ EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
56 55
57int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) 56int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
58{ 57{
59 spinlock_t *lock = rht_bucket_lock(tbl, hash); 58 if (!debug_locks)
60 59 return 1;
61 return (debug_locks) ? lockdep_is_held(lock) : 1; 60 if (unlikely(tbl->nest))
61 return 1;
62 return bit_spin_is_locked(0, (unsigned long *)&tbl->buckets[hash]);
62} 63}
63EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); 64EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
64#else 65#else
@@ -104,7 +105,6 @@ static void bucket_table_free(const struct bucket_table *tbl)
104 if (tbl->nest) 105 if (tbl->nest)
105 nested_bucket_table_free(tbl); 106 nested_bucket_table_free(tbl);
106 107
107 free_bucket_spinlocks(tbl->locks);
108 kvfree(tbl); 108 kvfree(tbl);
109} 109}
110 110
@@ -131,9 +131,11 @@ static union nested_table *nested_table_alloc(struct rhashtable *ht,
131 INIT_RHT_NULLS_HEAD(ntbl[i].bucket); 131 INIT_RHT_NULLS_HEAD(ntbl[i].bucket);
132 } 132 }
133 133
134 rcu_assign_pointer(*prev, ntbl); 134 if (cmpxchg(prev, NULL, ntbl) == NULL)
135 135 return ntbl;
136 return ntbl; 136 /* Raced with another thread. */
137 kfree(ntbl);
138 return rcu_dereference(*prev);
137} 139}
138 140
139static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht, 141static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
@@ -169,11 +171,11 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
169 gfp_t gfp) 171 gfp_t gfp)
170{ 172{
171 struct bucket_table *tbl = NULL; 173 struct bucket_table *tbl = NULL;
172 size_t size, max_locks; 174 size_t size;
173 int i; 175 int i;
176 static struct lock_class_key __key;
174 177
175 size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); 178 tbl = kvzalloc(struct_size(tbl, buckets, nbuckets), gfp);
176 tbl = kvzalloc(size, gfp);
177 179
178 size = nbuckets; 180 size = nbuckets;
179 181
@@ -185,18 +187,11 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
185 if (tbl == NULL) 187 if (tbl == NULL)
186 return NULL; 188 return NULL;
187 189
188 tbl->size = size; 190 lockdep_init_map(&tbl->dep_map, "rhashtable_bucket", &__key, 0);
189
190 max_locks = size >> 1;
191 if (tbl->nest)
192 max_locks = min_t(size_t, max_locks, 1U << tbl->nest);
193 191
194 if (alloc_bucket_spinlocks(&tbl->locks, &tbl->locks_mask, max_locks, 192 tbl->size = size;
195 ht->p.locks_mul, gfp) < 0) {
196 bucket_table_free(tbl);
197 return NULL;
198 }
199 193
194 rcu_head_init(&tbl->rcu);
200 INIT_LIST_HEAD(&tbl->walkers); 195 INIT_LIST_HEAD(&tbl->walkers);
201 196
202 tbl->hash_rnd = get_random_u32(); 197 tbl->hash_rnd = get_random_u32();
@@ -220,14 +215,15 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
220 return new_tbl; 215 return new_tbl;
221} 216}
222 217
223static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash) 218static int rhashtable_rehash_one(struct rhashtable *ht,
219 struct rhash_lock_head __rcu **bkt,
220 unsigned int old_hash)
224{ 221{
225 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); 222 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
226 struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl); 223 struct bucket_table *new_tbl = rhashtable_last_table(ht, old_tbl);
227 struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash);
228 int err = -EAGAIN; 224 int err = -EAGAIN;
229 struct rhash_head *head, *next, *entry; 225 struct rhash_head *head, *next, *entry;
230 spinlock_t *new_bucket_lock; 226 struct rhash_head __rcu **pprev = NULL;
231 unsigned int new_hash; 227 unsigned int new_hash;
232 228
233 if (new_tbl->nest) 229 if (new_tbl->nest)
@@ -235,7 +231,8 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
235 231
236 err = -ENOENT; 232 err = -ENOENT;
237 233
238 rht_for_each(entry, old_tbl, old_hash) { 234 rht_for_each_from(entry, rht_ptr(bkt, old_tbl, old_hash),
235 old_tbl, old_hash) {
239 err = 0; 236 err = 0;
240 next = rht_dereference_bucket(entry->next, old_tbl, old_hash); 237 next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
241 238
@@ -250,18 +247,19 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
250 247
251 new_hash = head_hashfn(ht, new_tbl, entry); 248 new_hash = head_hashfn(ht, new_tbl, entry);
252 249
253 new_bucket_lock = rht_bucket_lock(new_tbl, new_hash); 250 rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash], SINGLE_DEPTH_NESTING);
254 251
255 spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING); 252 head = rht_ptr(new_tbl->buckets + new_hash, new_tbl, new_hash);
256 head = rht_dereference_bucket(new_tbl->buckets[new_hash],
257 new_tbl, new_hash);
258 253
259 RCU_INIT_POINTER(entry->next, head); 254 RCU_INIT_POINTER(entry->next, head);
260 255
261 rcu_assign_pointer(new_tbl->buckets[new_hash], entry); 256 rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry);
262 spin_unlock(new_bucket_lock);
263 257
264 rcu_assign_pointer(*pprev, next); 258 if (pprev)
259 rcu_assign_pointer(*pprev, next);
260 else
261 /* Need to preserved the bit lock. */
262 rht_assign_locked(bkt, next);
265 263
266out: 264out:
267 return err; 265 return err;
@@ -271,20 +269,19 @@ static int rhashtable_rehash_chain(struct rhashtable *ht,
271 unsigned int old_hash) 269 unsigned int old_hash)
272{ 270{
273 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); 271 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
274 spinlock_t *old_bucket_lock; 272 struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash);
275 int err; 273 int err;
276 274
277 old_bucket_lock = rht_bucket_lock(old_tbl, old_hash); 275 if (!bkt)
276 return 0;
277 rht_lock(old_tbl, bkt);
278 278
279 spin_lock_bh(old_bucket_lock); 279 while (!(err = rhashtable_rehash_one(ht, bkt, old_hash)))
280 while (!(err = rhashtable_rehash_one(ht, old_hash)))
281 ; 280 ;
282 281
283 if (err == -ENOENT) { 282 if (err == -ENOENT)
284 old_tbl->rehash++;
285 err = 0; 283 err = 0;
286 } 284 rht_unlock(old_tbl, bkt);
287 spin_unlock_bh(old_bucket_lock);
288 285
289 return err; 286 return err;
290} 287}
@@ -330,13 +327,16 @@ static int rhashtable_rehash_table(struct rhashtable *ht)
330 spin_lock(&ht->lock); 327 spin_lock(&ht->lock);
331 list_for_each_entry(walker, &old_tbl->walkers, list) 328 list_for_each_entry(walker, &old_tbl->walkers, list)
332 walker->tbl = NULL; 329 walker->tbl = NULL;
333 spin_unlock(&ht->lock);
334 330
335 /* Wait for readers. All new readers will see the new 331 /* Wait for readers. All new readers will see the new
336 * table, and thus no references to the old table will 332 * table, and thus no references to the old table will
337 * remain. 333 * remain.
334 * We do this inside the locked region so that
335 * rhashtable_walk_stop() can use rcu_head_after_call_rcu()
336 * to check if it should not re-link the table.
338 */ 337 */
339 call_rcu(&old_tbl->rcu, bucket_table_free_rcu); 338 call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
339 spin_unlock(&ht->lock);
340 340
341 return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0; 341 return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
342} 342}
@@ -478,6 +478,7 @@ fail:
478} 478}
479 479
480static void *rhashtable_lookup_one(struct rhashtable *ht, 480static void *rhashtable_lookup_one(struct rhashtable *ht,
481 struct rhash_lock_head __rcu **bkt,
481 struct bucket_table *tbl, unsigned int hash, 482 struct bucket_table *tbl, unsigned int hash,
482 const void *key, struct rhash_head *obj) 483 const void *key, struct rhash_head *obj)
483{ 484{
@@ -485,13 +486,12 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
485 .ht = ht, 486 .ht = ht,
486 .key = key, 487 .key = key,
487 }; 488 };
488 struct rhash_head __rcu **pprev; 489 struct rhash_head __rcu **pprev = NULL;
489 struct rhash_head *head; 490 struct rhash_head *head;
490 int elasticity; 491 int elasticity;
491 492
492 elasticity = RHT_ELASTICITY; 493 elasticity = RHT_ELASTICITY;
493 pprev = rht_bucket_var(tbl, hash); 494 rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) {
494 rht_for_each_continue(head, *pprev, tbl, hash) {
495 struct rhlist_head *list; 495 struct rhlist_head *list;
496 struct rhlist_head *plist; 496 struct rhlist_head *plist;
497 497
@@ -513,7 +513,11 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
513 RCU_INIT_POINTER(list->next, plist); 513 RCU_INIT_POINTER(list->next, plist);
514 head = rht_dereference_bucket(head->next, tbl, hash); 514 head = rht_dereference_bucket(head->next, tbl, hash);
515 RCU_INIT_POINTER(list->rhead.next, head); 515 RCU_INIT_POINTER(list->rhead.next, head);
516 rcu_assign_pointer(*pprev, obj); 516 if (pprev)
517 rcu_assign_pointer(*pprev, obj);
518 else
519 /* Need to preserve the bit lock */
520 rht_assign_locked(bkt, obj);
517 521
518 return NULL; 522 return NULL;
519 } 523 }
@@ -525,12 +529,12 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
525} 529}
526 530
527static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, 531static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
532 struct rhash_lock_head __rcu **bkt,
528 struct bucket_table *tbl, 533 struct bucket_table *tbl,
529 unsigned int hash, 534 unsigned int hash,
530 struct rhash_head *obj, 535 struct rhash_head *obj,
531 void *data) 536 void *data)
532{ 537{
533 struct rhash_head __rcu **pprev;
534 struct bucket_table *new_tbl; 538 struct bucket_table *new_tbl;
535 struct rhash_head *head; 539 struct rhash_head *head;
536 540
@@ -553,11 +557,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
553 if (unlikely(rht_grow_above_100(ht, tbl))) 557 if (unlikely(rht_grow_above_100(ht, tbl)))
554 return ERR_PTR(-EAGAIN); 558 return ERR_PTR(-EAGAIN);
555 559
556 pprev = rht_bucket_insert(ht, tbl, hash); 560 head = rht_ptr(bkt, tbl, hash);
557 if (!pprev)
558 return ERR_PTR(-ENOMEM);
559
560 head = rht_dereference_bucket(*pprev, tbl, hash);
561 561
562 RCU_INIT_POINTER(obj->next, head); 562 RCU_INIT_POINTER(obj->next, head);
563 if (ht->rhlist) { 563 if (ht->rhlist) {
@@ -567,7 +567,10 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
567 RCU_INIT_POINTER(list->next, NULL); 567 RCU_INIT_POINTER(list->next, NULL);
568 } 568 }
569 569
570 rcu_assign_pointer(*pprev, obj); 570 /* bkt is always the head of the list, so it holds
571 * the lock, which we need to preserve
572 */
573 rht_assign_locked(bkt, obj);
571 574
572 atomic_inc(&ht->nelems); 575 atomic_inc(&ht->nelems);
573 if (rht_grow_above_75(ht, tbl)) 576 if (rht_grow_above_75(ht, tbl))
@@ -581,47 +584,35 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key,
581{ 584{
582 struct bucket_table *new_tbl; 585 struct bucket_table *new_tbl;
583 struct bucket_table *tbl; 586 struct bucket_table *tbl;
587 struct rhash_lock_head __rcu **bkt;
584 unsigned int hash; 588 unsigned int hash;
585 spinlock_t *lock;
586 void *data; 589 void *data;
587 590
588 tbl = rcu_dereference(ht->tbl); 591 new_tbl = rcu_dereference(ht->tbl);
589
590 /* All insertions must grab the oldest table containing
591 * the hashed bucket that is yet to be rehashed.
592 */
593 for (;;) {
594 hash = rht_head_hashfn(ht, tbl, obj, ht->p);
595 lock = rht_bucket_lock(tbl, hash);
596 spin_lock_bh(lock);
597
598 if (tbl->rehash <= hash)
599 break;
600
601 spin_unlock_bh(lock);
602 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
603 }
604
605 data = rhashtable_lookup_one(ht, tbl, hash, key, obj);
606 new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data);
607 if (PTR_ERR(new_tbl) != -EEXIST)
608 data = ERR_CAST(new_tbl);
609 592
610 while (!IS_ERR_OR_NULL(new_tbl)) { 593 do {
611 tbl = new_tbl; 594 tbl = new_tbl;
612 hash = rht_head_hashfn(ht, tbl, obj, ht->p); 595 hash = rht_head_hashfn(ht, tbl, obj, ht->p);
613 spin_lock_nested(rht_bucket_lock(tbl, hash), 596 if (rcu_access_pointer(tbl->future_tbl))
614 SINGLE_DEPTH_NESTING); 597 /* Failure is OK */
615 598 bkt = rht_bucket_var(tbl, hash);
616 data = rhashtable_lookup_one(ht, tbl, hash, key, obj); 599 else
617 new_tbl = rhashtable_insert_one(ht, tbl, hash, obj, data); 600 bkt = rht_bucket_insert(ht, tbl, hash);
618 if (PTR_ERR(new_tbl) != -EEXIST) 601 if (bkt == NULL) {
619 data = ERR_CAST(new_tbl); 602 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
620 603 data = ERR_PTR(-EAGAIN);
621 spin_unlock(rht_bucket_lock(tbl, hash)); 604 } else {
622 } 605 rht_lock(tbl, bkt);
623 606 data = rhashtable_lookup_one(ht, bkt, tbl,
624 spin_unlock_bh(lock); 607 hash, key, obj);
608 new_tbl = rhashtable_insert_one(ht, bkt, tbl,
609 hash, obj, data);
610 if (PTR_ERR(new_tbl) != -EEXIST)
611 data = ERR_CAST(new_tbl);
612
613 rht_unlock(tbl, bkt);
614 }
615 } while (!IS_ERR_OR_NULL(new_tbl));
625 616
626 if (PTR_ERR(data) == -EAGAIN) 617 if (PTR_ERR(data) == -EAGAIN)
627 data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?: 618 data = ERR_PTR(rhashtable_insert_rehash(ht, tbl) ?:
@@ -943,10 +934,11 @@ void rhashtable_walk_stop(struct rhashtable_iter *iter)
943 ht = iter->ht; 934 ht = iter->ht;
944 935
945 spin_lock(&ht->lock); 936 spin_lock(&ht->lock);
946 if (tbl->rehash < tbl->size) 937 if (rcu_head_after_call_rcu(&tbl->rcu, bucket_table_free_rcu))
947 list_add(&iter->walker.list, &tbl->walkers); 938 /* This bucket table is being freed, don't re-link it. */
948 else
949 iter->walker.tbl = NULL; 939 iter->walker.tbl = NULL;
940 else
941 list_add(&iter->walker.list, &tbl->walkers);
950 spin_unlock(&ht->lock); 942 spin_unlock(&ht->lock);
951 943
952out: 944out:
@@ -1046,11 +1038,6 @@ int rhashtable_init(struct rhashtable *ht,
1046 1038
1047 size = rounded_hashtable_size(&ht->p); 1039 size = rounded_hashtable_size(&ht->p);
1048 1040
1049 if (params->locks_mul)
1050 ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
1051 else
1052 ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
1053
1054 ht->key_len = ht->p.key_len; 1041 ht->key_len = ht->p.key_len;
1055 if (!params->hashfn) { 1042 if (!params->hashfn) {
1056 ht->p.hashfn = jhash; 1043 ht->p.hashfn = jhash;
@@ -1152,7 +1139,7 @@ restart:
1152 struct rhash_head *pos, *next; 1139 struct rhash_head *pos, *next;
1153 1140
1154 cond_resched(); 1141 cond_resched();
1155 for (pos = rht_dereference(*rht_bucket(tbl, i), ht), 1142 for (pos = rht_ptr_exclusive(rht_bucket(tbl, i)),
1156 next = !rht_is_a_nulls(pos) ? 1143 next = !rht_is_a_nulls(pos) ?
1157 rht_dereference(pos->next, ht) : NULL; 1144 rht_dereference(pos->next, ht) : NULL;
1158 !rht_is_a_nulls(pos); 1145 !rht_is_a_nulls(pos);
@@ -1179,11 +1166,10 @@ void rhashtable_destroy(struct rhashtable *ht)
1179} 1166}
1180EXPORT_SYMBOL_GPL(rhashtable_destroy); 1167EXPORT_SYMBOL_GPL(rhashtable_destroy);
1181 1168
1182struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, 1169struct rhash_lock_head __rcu **__rht_bucket_nested(const struct bucket_table *tbl,
1183 unsigned int hash) 1170 unsigned int hash)
1184{ 1171{
1185 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); 1172 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1186 static struct rhash_head __rcu *rhnull;
1187 unsigned int index = hash & ((1 << tbl->nest) - 1); 1173 unsigned int index = hash & ((1 << tbl->nest) - 1);
1188 unsigned int size = tbl->size >> tbl->nest; 1174 unsigned int size = tbl->size >> tbl->nest;
1189 unsigned int subhash = hash; 1175 unsigned int subhash = hash;
@@ -1201,20 +1187,28 @@ struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
1201 subhash >>= shift; 1187 subhash >>= shift;
1202 } 1188 }
1203 1189
1204 if (!ntbl) { 1190 if (!ntbl)
1205 if (!rhnull) 1191 return NULL;
1206 INIT_RHT_NULLS_HEAD(rhnull);
1207 return &rhnull;
1208 }
1209 1192
1210 return &ntbl[subhash].bucket; 1193 return &ntbl[subhash].bucket;
1211 1194
1212} 1195}
1196EXPORT_SYMBOL_GPL(__rht_bucket_nested);
1197
1198struct rhash_lock_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
1199 unsigned int hash)
1200{
1201 static struct rhash_lock_head __rcu *rhnull;
1202
1203 if (!rhnull)
1204 INIT_RHT_NULLS_HEAD(rhnull);
1205 return __rht_bucket_nested(tbl, hash) ?: &rhnull;
1206}
1213EXPORT_SYMBOL_GPL(rht_bucket_nested); 1207EXPORT_SYMBOL_GPL(rht_bucket_nested);
1214 1208
1215struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, 1209struct rhash_lock_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
1216 struct bucket_table *tbl, 1210 struct bucket_table *tbl,
1217 unsigned int hash) 1211 unsigned int hash)
1218{ 1212{
1219 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); 1213 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1220 unsigned int index = hash & ((1 << tbl->nest) - 1); 1214 unsigned int index = hash & ((1 << tbl->nest) - 1);
diff --git a/lib/siphash.c b/lib/siphash.c
index 3ae58b4edad6..c47bb6ff2149 100644
--- a/lib/siphash.c
+++ b/lib/siphash.c
@@ -68,11 +68,11 @@ u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
68 bytemask_from_count(left))); 68 bytemask_from_count(left)));
69#else 69#else
70 switch (left) { 70 switch (left) {
71 case 7: b |= ((u64)end[6]) << 48; 71 case 7: b |= ((u64)end[6]) << 48; /* fall through */
72 case 6: b |= ((u64)end[5]) << 40; 72 case 6: b |= ((u64)end[5]) << 40; /* fall through */
73 case 5: b |= ((u64)end[4]) << 32; 73 case 5: b |= ((u64)end[4]) << 32; /* fall through */
74 case 4: b |= le32_to_cpup(data); break; 74 case 4: b |= le32_to_cpup(data); break;
75 case 3: b |= ((u64)end[2]) << 16; 75 case 3: b |= ((u64)end[2]) << 16; /* fall through */
76 case 2: b |= le16_to_cpup(data); break; 76 case 2: b |= le16_to_cpup(data); break;
77 case 1: b |= end[0]; 77 case 1: b |= end[0];
78 } 78 }
@@ -101,11 +101,11 @@ u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
101 bytemask_from_count(left))); 101 bytemask_from_count(left)));
102#else 102#else
103 switch (left) { 103 switch (left) {
104 case 7: b |= ((u64)end[6]) << 48; 104 case 7: b |= ((u64)end[6]) << 48; /* fall through */
105 case 6: b |= ((u64)end[5]) << 40; 105 case 6: b |= ((u64)end[5]) << 40; /* fall through */
106 case 5: b |= ((u64)end[4]) << 32; 106 case 5: b |= ((u64)end[4]) << 32; /* fall through */
107 case 4: b |= get_unaligned_le32(end); break; 107 case 4: b |= get_unaligned_le32(end); break;
108 case 3: b |= ((u64)end[2]) << 16; 108 case 3: b |= ((u64)end[2]) << 16; /* fall through */
109 case 2: b |= get_unaligned_le16(end); break; 109 case 2: b |= get_unaligned_le16(end); break;
110 case 1: b |= end[0]; 110 case 1: b |= end[0];
111 } 111 }
@@ -268,11 +268,11 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
268 bytemask_from_count(left))); 268 bytemask_from_count(left)));
269#else 269#else
270 switch (left) { 270 switch (left) {
271 case 7: b |= ((u64)end[6]) << 48; 271 case 7: b |= ((u64)end[6]) << 48; /* fall through */
272 case 6: b |= ((u64)end[5]) << 40; 272 case 6: b |= ((u64)end[5]) << 40; /* fall through */
273 case 5: b |= ((u64)end[4]) << 32; 273 case 5: b |= ((u64)end[4]) << 32; /* fall through */
274 case 4: b |= le32_to_cpup(data); break; 274 case 4: b |= le32_to_cpup(data); break;
275 case 3: b |= ((u64)end[2]) << 16; 275 case 3: b |= ((u64)end[2]) << 16; /* fall through */
276 case 2: b |= le16_to_cpup(data); break; 276 case 2: b |= le16_to_cpup(data); break;
277 case 1: b |= end[0]; 277 case 1: b |= end[0];
278 } 278 }
@@ -301,11 +301,11 @@ u32 __hsiphash_unaligned(const void *data, size_t len,
301 bytemask_from_count(left))); 301 bytemask_from_count(left)));
302#else 302#else
303 switch (left) { 303 switch (left) {
304 case 7: b |= ((u64)end[6]) << 48; 304 case 7: b |= ((u64)end[6]) << 48; /* fall through */
305 case 6: b |= ((u64)end[5]) << 40; 305 case 6: b |= ((u64)end[5]) << 40; /* fall through */
306 case 5: b |= ((u64)end[4]) << 32; 306 case 5: b |= ((u64)end[4]) << 32; /* fall through */
307 case 4: b |= get_unaligned_le32(end); break; 307 case 4: b |= get_unaligned_le32(end); break;
308 case 3: b |= ((u64)end[2]) << 16; 308 case 3: b |= ((u64)end[2]) << 16; /* fall through */
309 case 2: b |= get_unaligned_le16(end); break; 309 case 2: b |= get_unaligned_le16(end); break;
310 case 1: b |= end[0]; 310 case 1: b |= end[0];
311 } 311 }
@@ -431,7 +431,7 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
431 v0 ^= m; 431 v0 ^= m;
432 } 432 }
433 switch (left) { 433 switch (left) {
434 case 3: b |= ((u32)end[2]) << 16; 434 case 3: b |= ((u32)end[2]) << 16; /* fall through */
435 case 2: b |= le16_to_cpup(data); break; 435 case 2: b |= le16_to_cpup(data); break;
436 case 1: b |= end[0]; 436 case 1: b |= end[0];
437 } 437 }
@@ -454,7 +454,7 @@ u32 __hsiphash_unaligned(const void *data, size_t len,
454 v0 ^= m; 454 v0 ^= m;
455 } 455 }
456 switch (left) { 456 switch (left) {
457 case 3: b |= ((u32)end[2]) << 16; 457 case 3: b |= ((u32)end[2]) << 16; /* fall through */
458 case 2: b |= get_unaligned_le16(end); break; 458 case 2: b |= get_unaligned_le16(end); break;
459 case 1: b |= end[0]; 459 case 1: b |= end[0];
460 } 460 }
diff --git a/lib/sort.c b/lib/sort.c
index d6b7a202b0b6..50855ea8c262 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -1,8 +1,13 @@
1// SPDX-License-Identifier: GPL-2.0 1// SPDX-License-Identifier: GPL-2.0
2/* 2/*
3 * A fast, small, non-recursive O(nlog n) sort for the Linux kernel 3 * A fast, small, non-recursive O(n log n) sort for the Linux kernel
4 * 4 *
5 * Jan 23 2005 Matt Mackall <mpm@selenic.com> 5 * This performs n*log2(n) + 0.37*n + o(n) comparisons on average,
6 * and 1.5*n*log2(n) + O(n) in the (very contrived) worst case.
7 *
8 * Glibc qsort() manages n*log2(n) - 1.26*n for random inputs (1.63*n
9 * better) at the expense of stack usage and much larger code to avoid
10 * quicksort's O(n^2) worst case.
6 */ 11 */
7 12
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -11,35 +16,155 @@
11#include <linux/export.h> 16#include <linux/export.h>
12#include <linux/sort.h> 17#include <linux/sort.h>
13 18
14static int alignment_ok(const void *base, int align) 19/**
20 * is_aligned - is this pointer & size okay for word-wide copying?
21 * @base: pointer to data
22 * @size: size of each element
23 * @align: required alignment (typically 4 or 8)
24 *
25 * Returns true if elements can be copied using word loads and stores.
26 * The size must be a multiple of the alignment, and the base address must
27 * be if we do not have CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS.
28 *
29 * For some reason, gcc doesn't know to optimize "if (a & mask || b & mask)"
30 * to "if ((a | b) & mask)", so we do that by hand.
31 */
32__attribute_const__ __always_inline
33static bool is_aligned(const void *base, size_t size, unsigned char align)
15{ 34{
16 return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || 35 unsigned char lsbits = (unsigned char)size;
17 ((unsigned long)base & (align - 1)) == 0; 36
37 (void)base;
38#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
39 lsbits |= (unsigned char)(uintptr_t)base;
40#endif
41 return (lsbits & (align - 1)) == 0;
18} 42}
19 43
20static void u32_swap(void *a, void *b, int size) 44/**
45 * swap_words_32 - swap two elements in 32-bit chunks
46 * @a, @b: pointers to the elements
47 * @size: element size (must be a multiple of 4)
48 *
49 * Exchange the two objects in memory. This exploits base+index addressing,
50 * which basically all CPUs have, to minimize loop overhead computations.
51 *
52 * For some reason, on x86 gcc 7.3.0 adds a redundant test of n at the
53 * bottom of the loop, even though the zero flag is stil valid from the
54 * subtract (since the intervening mov instructions don't alter the flags).
55 * Gcc 8.1.0 doesn't have that problem.
56 */
57static void swap_words_32(void *a, void *b, size_t n)
21{ 58{
22 u32 t = *(u32 *)a; 59 do {
23 *(u32 *)a = *(u32 *)b; 60 u32 t = *(u32 *)(a + (n -= 4));
24 *(u32 *)b = t; 61 *(u32 *)(a + n) = *(u32 *)(b + n);
62 *(u32 *)(b + n) = t;
63 } while (n);
25} 64}
26 65
27static void u64_swap(void *a, void *b, int size) 66/**
67 * swap_words_64 - swap two elements in 64-bit chunks
68 * @a, @b: pointers to the elements
69 * @size: element size (must be a multiple of 8)
70 *
71 * Exchange the two objects in memory. This exploits base+index
72 * addressing, which basically all CPUs have, to minimize loop overhead
73 * computations.
74 *
75 * We'd like to use 64-bit loads if possible. If they're not, emulating
76 * one requires base+index+4 addressing which x86 has but most other
77 * processors do not. If CONFIG_64BIT, we definitely have 64-bit loads,
78 * but it's possible to have 64-bit loads without 64-bit pointers (e.g.
79 * x32 ABI). Are there any cases the kernel needs to worry about?
80 */
81static void swap_words_64(void *a, void *b, size_t n)
28{ 82{
29 u64 t = *(u64 *)a; 83 do {
30 *(u64 *)a = *(u64 *)b; 84#ifdef CONFIG_64BIT
31 *(u64 *)b = t; 85 u64 t = *(u64 *)(a + (n -= 8));
86 *(u64 *)(a + n) = *(u64 *)(b + n);
87 *(u64 *)(b + n) = t;
88#else
89 /* Use two 32-bit transfers to avoid base+index+4 addressing */
90 u32 t = *(u32 *)(a + (n -= 4));
91 *(u32 *)(a + n) = *(u32 *)(b + n);
92 *(u32 *)(b + n) = t;
93
94 t = *(u32 *)(a + (n -= 4));
95 *(u32 *)(a + n) = *(u32 *)(b + n);
96 *(u32 *)(b + n) = t;
97#endif
98 } while (n);
32} 99}
33 100
34static void generic_swap(void *a, void *b, int size) 101/**
102 * swap_bytes - swap two elements a byte at a time
103 * @a, @b: pointers to the elements
104 * @size: element size
105 *
106 * This is the fallback if alignment doesn't allow using larger chunks.
107 */
108static void swap_bytes(void *a, void *b, size_t n)
35{ 109{
36 char t;
37
38 do { 110 do {
39 t = *(char *)a; 111 char t = ((char *)a)[--n];
40 *(char *)a++ = *(char *)b; 112 ((char *)a)[n] = ((char *)b)[n];
41 *(char *)b++ = t; 113 ((char *)b)[n] = t;
42 } while (--size > 0); 114 } while (n);
115}
116
117typedef void (*swap_func_t)(void *a, void *b, int size);
118
119/*
120 * The values are arbitrary as long as they can't be confused with
121 * a pointer, but small integers make for the smallest compare
122 * instructions.
123 */
124#define SWAP_WORDS_64 (swap_func_t)0
125#define SWAP_WORDS_32 (swap_func_t)1
126#define SWAP_BYTES (swap_func_t)2
127
128/*
129 * The function pointer is last to make tail calls most efficient if the
130 * compiler decides not to inline this function.
131 */
132static void do_swap(void *a, void *b, size_t size, swap_func_t swap_func)
133{
134 if (swap_func == SWAP_WORDS_64)
135 swap_words_64(a, b, size);
136 else if (swap_func == SWAP_WORDS_32)
137 swap_words_32(a, b, size);
138 else if (swap_func == SWAP_BYTES)
139 swap_bytes(a, b, size);
140 else
141 swap_func(a, b, (int)size);
142}
143
144/**
145 * parent - given the offset of the child, find the offset of the parent.
146 * @i: the offset of the heap element whose parent is sought. Non-zero.
147 * @lsbit: a precomputed 1-bit mask, equal to "size & -size"
148 * @size: size of each element
149 *
150 * In terms of array indexes, the parent of element j = @i/@size is simply
151 * (j-1)/2. But when working in byte offsets, we can't use implicit
152 * truncation of integer divides.
153 *
154 * Fortunately, we only need one bit of the quotient, not the full divide.
155 * @size has a least significant bit. That bit will be clear if @i is
156 * an even multiple of @size, and set if it's an odd multiple.
157 *
158 * Logically, we're doing "if (i & lsbit) i -= size;", but since the
159 * branch is unpredictable, it's done with a bit of clever branch-free
160 * code instead.
161 */
162__attribute_const__ __always_inline
163static size_t parent(size_t i, unsigned int lsbit, size_t size)
164{
165 i -= size;
166 i -= size & -(i & lsbit);
167 return i / 2;
43} 168}
44 169
45/** 170/**
@@ -50,57 +175,78 @@ static void generic_swap(void *a, void *b, int size)
50 * @cmp_func: pointer to comparison function 175 * @cmp_func: pointer to comparison function
51 * @swap_func: pointer to swap function or NULL 176 * @swap_func: pointer to swap function or NULL
52 * 177 *
53 * This function does a heapsort on the given array. You may provide a 178 * This function does a heapsort on the given array. You may provide
54 * swap_func function optimized to your element type. 179 * a swap_func function if you need to do something more than a memory
180 * copy (e.g. fix up pointers or auxiliary data), but the built-in swap
181 * avoids a slow retpoline and so is significantly faster.
55 * 182 *
56 * Sorting time is O(n log n) both on average and worst-case. While 183 * Sorting time is O(n log n) both on average and worst-case. While
57 * qsort is about 20% faster on average, it suffers from exploitable 184 * quicksort is slightly faster on average, it suffers from exploitable
58 * O(n*n) worst-case behavior and extra memory requirements that make 185 * O(n*n) worst-case behavior and extra memory requirements that make
59 * it less suitable for kernel use. 186 * it less suitable for kernel use.
60 */ 187 */
61
62void sort(void *base, size_t num, size_t size, 188void sort(void *base, size_t num, size_t size,
63 int (*cmp_func)(const void *, const void *), 189 int (*cmp_func)(const void *, const void *),
64 void (*swap_func)(void *, void *, int size)) 190 void (*swap_func)(void *, void *, int size))
65{ 191{
66 /* pre-scale counters for performance */ 192 /* pre-scale counters for performance */
67 int i = (num/2 - 1) * size, n = num * size, c, r; 193 size_t n = num * size, a = (num/2) * size;
194 const unsigned int lsbit = size & -size; /* Used to find parent */
195
196 if (!a) /* num < 2 || size == 0 */
197 return;
68 198
69 if (!swap_func) { 199 if (!swap_func) {
70 if (size == 4 && alignment_ok(base, 4)) 200 if (is_aligned(base, size, 8))
71 swap_func = u32_swap; 201 swap_func = SWAP_WORDS_64;
72 else if (size == 8 && alignment_ok(base, 8)) 202 else if (is_aligned(base, size, 4))
73 swap_func = u64_swap; 203 swap_func = SWAP_WORDS_32;
74 else 204 else
75 swap_func = generic_swap; 205 swap_func = SWAP_BYTES;
76 } 206 }
77 207
78 /* heapify */ 208 /*
79 for ( ; i >= 0; i -= size) { 209 * Loop invariants:
80 for (r = i; r * 2 + size < n; r = c) { 210 * 1. elements [a,n) satisfy the heap property (compare greater than
81 c = r * 2 + size; 211 * all of their children),
82 if (c < n - size && 212 * 2. elements [n,num*size) are sorted, and
83 cmp_func(base + c, base + c + size) < 0) 213 * 3. a <= b <= c <= d <= n (whenever they are valid).
84 c += size; 214 */
85 if (cmp_func(base + r, base + c) >= 0) 215 for (;;) {
86 break; 216 size_t b, c, d;
87 swap_func(base + r, base + c, size); 217
88 } 218 if (a) /* Building heap: sift down --a */
89 } 219 a -= size;
220 else if (n -= size) /* Sorting: Extract root to --n */
221 do_swap(base, base + n, size, swap_func);
222 else /* Sort complete */
223 break;
90 224
91 /* sort */ 225 /*
92 for (i = n - size; i > 0; i -= size) { 226 * Sift element at "a" down into heap. This is the
93 swap_func(base, base + i, size); 227 * "bottom-up" variant, which significantly reduces
94 for (r = 0; r * 2 + size < i; r = c) { 228 * calls to cmp_func(): we find the sift-down path all
95 c = r * 2 + size; 229 * the way to the leaves (one compare per level), then
96 if (c < i - size && 230 * backtrack to find where to insert the target element.
97 cmp_func(base + c, base + c + size) < 0) 231 *
98 c += size; 232 * Because elements tend to sift down close to the leaves,
99 if (cmp_func(base + r, base + c) >= 0) 233 * this uses fewer compares than doing two per level
100 break; 234 * on the way down. (A bit more than half as many on
101 swap_func(base + r, base + c, size); 235 * average, 3/4 worst-case.)
236 */
237 for (b = a; c = 2*b + size, (d = c + size) < n;)
238 b = cmp_func(base + c, base + d) >= 0 ? c : d;
239 if (d == n) /* Special case last leaf with no sibling */
240 b = c;
241
242 /* Now backtrack from "b" to the correct location for "a" */
243 while (b != a && cmp_func(base + a, base + b) >= 0)
244 b = parent(b, lsbit, size);
245 c = b; /* Where "a" belongs */
246 while (b != a) { /* Shift it into place */
247 b = parent(b, lsbit, size);
248 do_swap(base + b, base + c, size, swap_func);
102 } 249 }
103 } 250 }
104} 251}
105
106EXPORT_SYMBOL(sort); 252EXPORT_SYMBOL(sort);
diff --git a/lib/string.c b/lib/string.c
index 3ab861c1a857..6016eb3ac73d 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -159,11 +159,9 @@ EXPORT_SYMBOL(strlcpy);
159 * @src: Where to copy the string from 159 * @src: Where to copy the string from
160 * @count: Size of destination buffer 160 * @count: Size of destination buffer
161 * 161 *
162 * Copy the string, or as much of it as fits, into the dest buffer. 162 * Copy the string, or as much of it as fits, into the dest buffer. The
163 * The routine returns the number of characters copied (not including 163 * behavior is undefined if the string buffers overlap. The destination
164 * the trailing NUL) or -E2BIG if the destination buffer wasn't big enough. 164 * buffer is always NUL terminated, unless it's zero-sized.
165 * The behavior is undefined if the string buffers overlap.
166 * The destination buffer is always NUL terminated, unless it's zero-sized.
167 * 165 *
168 * Preferred to strlcpy() since the API doesn't require reading memory 166 * Preferred to strlcpy() since the API doesn't require reading memory
169 * from the src string beyond the specified "count" bytes, and since 167 * from the src string beyond the specified "count" bytes, and since
@@ -173,8 +171,10 @@ EXPORT_SYMBOL(strlcpy);
173 * 171 *
174 * Preferred to strncpy() since it always returns a valid string, and 172 * Preferred to strncpy() since it always returns a valid string, and
175 * doesn't unnecessarily force the tail of the destination buffer to be 173 * doesn't unnecessarily force the tail of the destination buffer to be
176 * zeroed. If the zeroing is desired, it's likely cleaner to use strscpy() 174 * zeroed. If zeroing is desired please use strscpy_pad().
177 * with an overflow test, then just memset() the tail of the dest buffer. 175 *
176 * Return: The number of characters copied (not including the trailing
177 * %NUL) or -E2BIG if the destination buffer wasn't big enough.
178 */ 178 */
179ssize_t strscpy(char *dest, const char *src, size_t count) 179ssize_t strscpy(char *dest, const char *src, size_t count)
180{ 180{
@@ -237,6 +237,39 @@ ssize_t strscpy(char *dest, const char *src, size_t count)
237EXPORT_SYMBOL(strscpy); 237EXPORT_SYMBOL(strscpy);
238#endif 238#endif
239 239
240/**
241 * strscpy_pad() - Copy a C-string into a sized buffer
242 * @dest: Where to copy the string to
243 * @src: Where to copy the string from
244 * @count: Size of destination buffer
245 *
246 * Copy the string, or as much of it as fits, into the dest buffer. The
247 * behavior is undefined if the string buffers overlap. The destination
248 * buffer is always %NUL terminated, unless it's zero-sized.
249 *
250 * If the source string is shorter than the destination buffer, zeros
251 * the tail of the destination buffer.
252 *
253 * For full explanation of why you may want to consider using the
254 * 'strscpy' functions please see the function docstring for strscpy().
255 *
256 * Return: The number of characters copied (not including the trailing
257 * %NUL) or -E2BIG if the destination buffer wasn't big enough.
258 */
259ssize_t strscpy_pad(char *dest, const char *src, size_t count)
260{
261 ssize_t written;
262
263 written = strscpy(dest, src, count);
264 if (written < 0 || written == count - 1)
265 return written;
266
267 memset(dest + written + 1, 0, count - written - 1);
268
269 return written;
270}
271EXPORT_SYMBOL(strscpy_pad);
272
240#ifndef __HAVE_ARCH_STRCAT 273#ifndef __HAVE_ARCH_STRCAT
241/** 274/**
242 * strcat - Append one %NUL-terminated string to another 275 * strcat - Append one %NUL-terminated string to another
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index 6cd7d0740005..d3a501f2a81a 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -11,6 +11,9 @@
11#include <linux/printk.h> 11#include <linux/printk.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/string.h> 13#include <linux/string.h>
14#include <linux/uaccess.h>
15
16#include "../tools/testing/selftests/kselftest_module.h"
14 17
15static unsigned total_tests __initdata; 18static unsigned total_tests __initdata;
16static unsigned failed_tests __initdata; 19static unsigned failed_tests __initdata;
@@ -224,7 +227,8 @@ static const unsigned long exp[] __initconst = {
224 BITMAP_FROM_U64(0xffffffff), 227 BITMAP_FROM_U64(0xffffffff),
225 BITMAP_FROM_U64(0xfffffffe), 228 BITMAP_FROM_U64(0xfffffffe),
226 BITMAP_FROM_U64(0x3333333311111111ULL), 229 BITMAP_FROM_U64(0x3333333311111111ULL),
227 BITMAP_FROM_U64(0xffffffff77777777ULL) 230 BITMAP_FROM_U64(0xffffffff77777777ULL),
231 BITMAP_FROM_U64(0),
228}; 232};
229 233
230static const unsigned long exp2[] __initconst = { 234static const unsigned long exp2[] __initconst = {
@@ -247,55 +251,93 @@ static const struct test_bitmap_parselist parselist_tests[] __initconst = {
247 {0, "1-31:4/4", &exp[9 * step], 32, 0}, 251 {0, "1-31:4/4", &exp[9 * step], 32, 0},
248 {0, "0-31:1/4,32-63:2/4", &exp[10 * step], 64, 0}, 252 {0, "0-31:1/4,32-63:2/4", &exp[10 * step], 64, 0},
249 {0, "0-31:3/4,32-63:4/4", &exp[11 * step], 64, 0}, 253 {0, "0-31:3/4,32-63:4/4", &exp[11 * step], 64, 0},
254 {0, " ,, 0-31:3/4 ,, 32-63:4/4 ,, ", &exp[11 * step], 64, 0},
250 255
251 {0, "0-31:1/4,32-63:2/4,64-95:3/4,96-127:4/4", exp2, 128, 0}, 256 {0, "0-31:1/4,32-63:2/4,64-95:3/4,96-127:4/4", exp2, 128, 0},
252 257
253 {0, "0-2047:128/256", NULL, 2048, PARSE_TIME}, 258 {0, "0-2047:128/256", NULL, 2048, PARSE_TIME},
254 259
260 {0, "", &exp[12 * step], 8, 0},
261 {0, "\n", &exp[12 * step], 8, 0},
262 {0, ",, ,, , , ,", &exp[12 * step], 8, 0},
263 {0, " , ,, , , ", &exp[12 * step], 8, 0},
264 {0, " , ,, , , \n", &exp[12 * step], 8, 0},
265
255 {-EINVAL, "-1", NULL, 8, 0}, 266 {-EINVAL, "-1", NULL, 8, 0},
256 {-EINVAL, "-0", NULL, 8, 0}, 267 {-EINVAL, "-0", NULL, 8, 0},
257 {-EINVAL, "10-1", NULL, 8, 0}, 268 {-EINVAL, "10-1", NULL, 8, 0},
258 {-EINVAL, "0-31:", NULL, 8, 0}, 269 {-EINVAL, "0-31:", NULL, 8, 0},
259 {-EINVAL, "0-31:0", NULL, 8, 0}, 270 {-EINVAL, "0-31:0", NULL, 8, 0},
271 {-EINVAL, "0-31:0/", NULL, 8, 0},
260 {-EINVAL, "0-31:0/0", NULL, 8, 0}, 272 {-EINVAL, "0-31:0/0", NULL, 8, 0},
261 {-EINVAL, "0-31:1/0", NULL, 8, 0}, 273 {-EINVAL, "0-31:1/0", NULL, 8, 0},
262 {-EINVAL, "0-31:10/1", NULL, 8, 0}, 274 {-EINVAL, "0-31:10/1", NULL, 8, 0},
275 {-EOVERFLOW, "0-98765432123456789:10/1", NULL, 8, 0},
276
277 {-EINVAL, "a-31", NULL, 8, 0},
278 {-EINVAL, "0-a1", NULL, 8, 0},
279 {-EINVAL, "a-31:10/1", NULL, 8, 0},
280 {-EINVAL, "0-31:a/1", NULL, 8, 0},
281 {-EINVAL, "0-\n", NULL, 8, 0},
263}; 282};
264 283
265static void __init test_bitmap_parselist(void) 284static void __init __test_bitmap_parselist(int is_user)
266{ 285{
267 int i; 286 int i;
268 int err; 287 int err;
269 cycles_t cycles; 288 ktime_t time;
270 DECLARE_BITMAP(bmap, 2048); 289 DECLARE_BITMAP(bmap, 2048);
290 char *mode = is_user ? "_user" : "";
271 291
272 for (i = 0; i < ARRAY_SIZE(parselist_tests); i++) { 292 for (i = 0; i < ARRAY_SIZE(parselist_tests); i++) {
273#define ptest parselist_tests[i] 293#define ptest parselist_tests[i]
274 294
275 cycles = get_cycles(); 295 if (is_user) {
276 err = bitmap_parselist(ptest.in, bmap, ptest.nbits); 296 mm_segment_t orig_fs = get_fs();
277 cycles = get_cycles() - cycles; 297 size_t len = strlen(ptest.in);
298
299 set_fs(KERNEL_DS);
300 time = ktime_get();
301 err = bitmap_parselist_user(ptest.in, len,
302 bmap, ptest.nbits);
303 time = ktime_get() - time;
304 set_fs(orig_fs);
305 } else {
306 time = ktime_get();
307 err = bitmap_parselist(ptest.in, bmap, ptest.nbits);
308 time = ktime_get() - time;
309 }
278 310
279 if (err != ptest.errno) { 311 if (err != ptest.errno) {
280 pr_err("test %d: input is %s, errno is %d, expected %d\n", 312 pr_err("parselist%s: %d: input is %s, errno is %d, expected %d\n",
281 i, ptest.in, err, ptest.errno); 313 mode, i, ptest.in, err, ptest.errno);
282 continue; 314 continue;
283 } 315 }
284 316
285 if (!err && ptest.expected 317 if (!err && ptest.expected
286 && !__bitmap_equal(bmap, ptest.expected, ptest.nbits)) { 318 && !__bitmap_equal(bmap, ptest.expected, ptest.nbits)) {
287 pr_err("test %d: input is %s, result is 0x%lx, expected 0x%lx\n", 319 pr_err("parselist%s: %d: input is %s, result is 0x%lx, expected 0x%lx\n",
288 i, ptest.in, bmap[0], *ptest.expected); 320 mode, i, ptest.in, bmap[0],
321 *ptest.expected);
289 continue; 322 continue;
290 } 323 }
291 324
292 if (ptest.flags & PARSE_TIME) 325 if (ptest.flags & PARSE_TIME)
293 pr_err("test %d: input is '%s' OK, Time: %llu\n", 326 pr_err("parselist%s: %d: input is '%s' OK, Time: %llu\n",
294 i, ptest.in, 327 mode, i, ptest.in, time);
295 (unsigned long long)cycles);
296 } 328 }
297} 329}
298 330
331static void __init test_bitmap_parselist(void)
332{
333 __test_bitmap_parselist(0);
334}
335
336static void __init test_bitmap_parselist_user(void)
337{
338 __test_bitmap_parselist(1);
339}
340
299#define EXP_BYTES (sizeof(exp) * 8) 341#define EXP_BYTES (sizeof(exp) * 8)
300 342
301static void __init test_bitmap_arr32(void) 343static void __init test_bitmap_arr32(void)
@@ -361,30 +403,17 @@ static void noinline __init test_mem_optimisations(void)
361 } 403 }
362} 404}
363 405
364static int __init test_bitmap_init(void) 406static void __init selftest(void)
365{ 407{
366 test_zero_clear(); 408 test_zero_clear();
367 test_fill_set(); 409 test_fill_set();
368 test_copy(); 410 test_copy();
369 test_bitmap_arr32(); 411 test_bitmap_arr32();
370 test_bitmap_parselist(); 412 test_bitmap_parselist();
413 test_bitmap_parselist_user();
371 test_mem_optimisations(); 414 test_mem_optimisations();
372
373 if (failed_tests == 0)
374 pr_info("all %u tests passed\n", total_tests);
375 else
376 pr_warn("failed %u out of %u tests\n",
377 failed_tests, total_tests);
378
379 return failed_tests ? -EINVAL : 0;
380} 415}
381 416
382static void __exit test_bitmap_cleanup(void) 417KSTM_MODULE_LOADERS(test_bitmap);
383{
384}
385
386module_init(test_bitmap_init);
387module_exit(test_bitmap_cleanup);
388
389MODULE_AUTHOR("david decotigny <david.decotigny@googlers.com>"); 418MODULE_AUTHOR("david decotigny <david.decotigny@googlers.com>");
390MODULE_LICENSE("GPL"); 419MODULE_LICENSE("GPL");
diff --git a/lib/test_printf.c b/lib/test_printf.c
index 659b6cc0d483..93da0a5000ec 100644
--- a/lib/test_printf.c
+++ b/lib/test_printf.c
@@ -21,6 +21,8 @@
21#include <linux/gfp.h> 21#include <linux/gfp.h>
22#include <linux/mm.h> 22#include <linux/mm.h>
23 23
24#include "../tools/testing/selftests/kselftest_module.h"
25
24#define BUF_SIZE 256 26#define BUF_SIZE 256
25#define PAD_SIZE 16 27#define PAD_SIZE 16
26#define FILL_CHAR '$' 28#define FILL_CHAR '$'
@@ -239,6 +241,7 @@ plain_format(void)
239#define PTR ((void *)0x456789ab) 241#define PTR ((void *)0x456789ab)
240#define PTR_STR "456789ab" 242#define PTR_STR "456789ab"
241#define PTR_VAL_NO_CRNG "(ptrval)" 243#define PTR_VAL_NO_CRNG "(ptrval)"
244#define ZEROS ""
242 245
243static int __init 246static int __init
244plain_format(void) 247plain_format(void)
@@ -268,7 +271,6 @@ plain_hash_to_buffer(const void *p, char *buf, size_t len)
268 return 0; 271 return 0;
269} 272}
270 273
271
272static int __init 274static int __init
273plain_hash(void) 275plain_hash(void)
274{ 276{
@@ -326,6 +328,24 @@ test_hashed(const char *fmt, const void *p)
326} 328}
327 329
328static void __init 330static void __init
331null_pointer(void)
332{
333 test_hashed("%p", NULL);
334 test(ZEROS "00000000", "%px", NULL);
335 test("(null)", "%pE", NULL);
336}
337
338#define PTR_INVALID ((void *)0x000000ab)
339
340static void __init
341invalid_pointer(void)
342{
343 test_hashed("%p", PTR_INVALID);
344 test(ZEROS "000000ab", "%px", PTR_INVALID);
345 test("(efault)", "%pE", PTR_INVALID);
346}
347
348static void __init
329symbol_ptr(void) 349symbol_ptr(void)
330{ 350{
331} 351}
@@ -462,8 +482,7 @@ struct_rtc_time(void)
462 .tm_year = 118, 482 .tm_year = 118,
463 }; 483 };
464 484
465 test_hashed("%pt", &tm); 485 test("(%ptR?)", "%pt", &tm);
466
467 test("2018-11-26T05:35:43", "%ptR", &tm); 486 test("2018-11-26T05:35:43", "%ptR", &tm);
468 test("0118-10-26T05:35:43", "%ptRr", &tm); 487 test("0118-10-26T05:35:43", "%ptRr", &tm);
469 test("05:35:43|2018-11-26", "%ptRt|%ptRd", &tm, &tm); 488 test("05:35:43|2018-11-26", "%ptRt|%ptRd", &tm, &tm);
@@ -481,14 +500,14 @@ static void __init
481large_bitmap(void) 500large_bitmap(void)
482{ 501{
483 const int nbits = 1 << 16; 502 const int nbits = 1 << 16;
484 unsigned long *bits = kcalloc(BITS_TO_LONGS(nbits), sizeof(long), GFP_KERNEL); 503 unsigned long *bits = bitmap_zalloc(nbits, GFP_KERNEL);
485 if (!bits) 504 if (!bits)
486 return; 505 return;
487 506
488 bitmap_set(bits, 1, 20); 507 bitmap_set(bits, 1, 20);
489 bitmap_set(bits, 60000, 15); 508 bitmap_set(bits, 60000, 15);
490 test("1-20,60000-60014", "%*pbl", nbits, bits); 509 test("1-20,60000-60014", "%*pbl", nbits, bits);
491 kfree(bits); 510 bitmap_free(bits);
492} 511}
493 512
494static void __init 513static void __init
@@ -572,6 +591,8 @@ static void __init
572test_pointer(void) 591test_pointer(void)
573{ 592{
574 plain(); 593 plain();
594 null_pointer();
595 invalid_pointer();
575 symbol_ptr(); 596 symbol_ptr();
576 kernel_ptr(); 597 kernel_ptr();
577 struct_resource(); 598 struct_resource();
@@ -590,12 +611,11 @@ test_pointer(void)
590 flags(); 611 flags();
591} 612}
592 613
593static int __init 614static void __init selftest(void)
594test_printf_init(void)
595{ 615{
596 alloced_buffer = kmalloc(BUF_SIZE + 2*PAD_SIZE, GFP_KERNEL); 616 alloced_buffer = kmalloc(BUF_SIZE + 2*PAD_SIZE, GFP_KERNEL);
597 if (!alloced_buffer) 617 if (!alloced_buffer)
598 return -ENOMEM; 618 return;
599 test_buffer = alloced_buffer + PAD_SIZE; 619 test_buffer = alloced_buffer + PAD_SIZE;
600 620
601 test_basic(); 621 test_basic();
@@ -604,16 +624,8 @@ test_printf_init(void)
604 test_pointer(); 624 test_pointer();
605 625
606 kfree(alloced_buffer); 626 kfree(alloced_buffer);
607
608 if (failed_tests == 0)
609 pr_info("all %u tests passed\n", total_tests);
610 else
611 pr_warn("failed %u out of %u tests\n", failed_tests, total_tests);
612
613 return failed_tests ? -EINVAL : 0;
614} 627}
615 628
616module_init(test_printf_init); 629KSTM_MODULE_LOADERS(test_printf);
617
618MODULE_AUTHOR("Rasmus Villemoes <linux@rasmusvillemoes.dk>"); 630MODULE_AUTHOR("Rasmus Villemoes <linux@rasmusvillemoes.dk>");
619MODULE_LICENSE("GPL"); 631MODULE_LICENSE("GPL");
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 3bd2e91bfc29..084fe5a6ac57 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -500,7 +500,7 @@ static unsigned int __init print_ht(struct rhltable *rhlt)
500 struct rhash_head *pos, *next; 500 struct rhash_head *pos, *next;
501 struct test_obj_rhl *p; 501 struct test_obj_rhl *p;
502 502
503 pos = rht_dereference(tbl->buckets[i], ht); 503 pos = rht_ptr_exclusive(tbl->buckets + i);
504 next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL; 504 next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL;
505 505
506 if (!rht_is_a_nulls(pos)) { 506 if (!rht_is_a_nulls(pos)) {
diff --git a/lib/test_strscpy.c b/lib/test_strscpy.c
new file mode 100644
index 000000000000..a827f94601f5
--- /dev/null
+++ b/lib/test_strscpy.c
@@ -0,0 +1,150 @@
1// SPDX-License-Identifier: GPL-2.0+
2
3#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
4
5#include <linux/string.h>
6
7#include "../tools/testing/selftests/kselftest_module.h"
8
9/*
10 * Kernel module for testing 'strscpy' family of functions.
11 */
12
13KSTM_MODULE_GLOBALS();
14
15/*
16 * tc() - Run a specific test case.
17 * @src: Source string, argument to strscpy_pad()
18 * @count: Size of destination buffer, argument to strscpy_pad()
19 * @expected: Expected return value from call to strscpy_pad()
20 * @terminator: 1 if there should be a terminating null byte 0 otherwise.
21 * @chars: Number of characters from the src string expected to be
22 * written to the dst buffer.
23 * @pad: Number of pad characters expected (in the tail of dst buffer).
24 * (@pad does not include the null terminator byte.)
25 *
26 * Calls strscpy_pad() and verifies the return value and state of the
27 * destination buffer after the call returns.
28 */
29static int __init tc(char *src, int count, int expected,
30 int chars, int terminator, int pad)
31{
32 int nr_bytes_poison;
33 int max_expected;
34 int max_count;
35 int written;
36 char buf[6];
37 int index, i;
38 const char POISON = 'z';
39
40 total_tests++;
41
42 if (!src) {
43 pr_err("null source string not supported\n");
44 return -1;
45 }
46
47 memset(buf, POISON, sizeof(buf));
48 /* Future proofing test suite, validate args */
49 max_count = sizeof(buf) - 2; /* Space for null and to verify overflow */
50 max_expected = count - 1; /* Space for the null */
51 if (count > max_count) {
52 pr_err("count (%d) is too big (%d) ... aborting", count, max_count);
53 return -1;
54 }
55 if (expected > max_expected) {
56 pr_warn("expected (%d) is bigger than can possibly be returned (%d)",
57 expected, max_expected);
58 }
59
60 written = strscpy_pad(buf, src, count);
61 if ((written) != (expected)) {
62 pr_err("%d != %d (written, expected)\n", written, expected);
63 goto fail;
64 }
65
66 if (count && written == -E2BIG) {
67 if (strncmp(buf, src, count - 1) != 0) {
68 pr_err("buffer state invalid for -E2BIG\n");
69 goto fail;
70 }
71 if (buf[count - 1] != '\0') {
72 pr_err("too big string is not null terminated correctly\n");
73 goto fail;
74 }
75 }
76
77 for (i = 0; i < chars; i++) {
78 if (buf[i] != src[i]) {
79 pr_err("buf[i]==%c != src[i]==%c\n", buf[i], src[i]);
80 goto fail;
81 }
82 }
83
84 if (terminator) {
85 if (buf[count - 1] != '\0') {
86 pr_err("string is not null terminated correctly\n");
87 goto fail;
88 }
89 }
90
91 for (i = 0; i < pad; i++) {
92 index = chars + terminator + i;
93 if (buf[index] != '\0') {
94 pr_err("padding missing at index: %d\n", i);
95 goto fail;
96 }
97 }
98
99 nr_bytes_poison = sizeof(buf) - chars - terminator - pad;
100 for (i = 0; i < nr_bytes_poison; i++) {
101 index = sizeof(buf) - 1 - i; /* Check from the end back */
102 if (buf[index] != POISON) {
103 pr_err("poison value missing at index: %d\n", i);
104 goto fail;
105 }
106 }
107
108 return 0;
109fail:
110 failed_tests++;
111 return -1;
112}
113
114static void __init selftest(void)
115{
116 /*
117 * tc() uses a destination buffer of size 6 and needs at
118 * least 2 characters spare (one for null and one to check for
119 * overflow). This means we should only call tc() with
120 * strings up to a maximum of 4 characters long and 'count'
121 * should not exceed 4. To test with longer strings increase
122 * the buffer size in tc().
123 */
124
125 /* tc(src, count, expected, chars, terminator, pad) */
126 KSTM_CHECK_ZERO(tc("a", 0, -E2BIG, 0, 0, 0));
127 KSTM_CHECK_ZERO(tc("", 0, -E2BIG, 0, 0, 0));
128
129 KSTM_CHECK_ZERO(tc("a", 1, -E2BIG, 0, 1, 0));
130 KSTM_CHECK_ZERO(tc("", 1, 0, 0, 1, 0));
131
132 KSTM_CHECK_ZERO(tc("ab", 2, -E2BIG, 1, 1, 0));
133 KSTM_CHECK_ZERO(tc("a", 2, 1, 1, 1, 0));
134 KSTM_CHECK_ZERO(tc("", 2, 0, 0, 1, 1));
135
136 KSTM_CHECK_ZERO(tc("abc", 3, -E2BIG, 2, 1, 0));
137 KSTM_CHECK_ZERO(tc("ab", 3, 2, 2, 1, 0));
138 KSTM_CHECK_ZERO(tc("a", 3, 1, 1, 1, 1));
139 KSTM_CHECK_ZERO(tc("", 3, 0, 0, 1, 2));
140
141 KSTM_CHECK_ZERO(tc("abcd", 4, -E2BIG, 3, 1, 0));
142 KSTM_CHECK_ZERO(tc("abc", 4, 3, 3, 1, 0));
143 KSTM_CHECK_ZERO(tc("ab", 4, 2, 2, 1, 1));
144 KSTM_CHECK_ZERO(tc("a", 4, 1, 1, 1, 2));
145 KSTM_CHECK_ZERO(tc("", 4, 0, 0, 1, 3));
146}
147
148KSTM_MODULE_LOADERS(test_strscpy);
149MODULE_AUTHOR("Tobin C. Harding <tobin@kernel.org>");
150MODULE_LICENSE("GPL");
diff --git a/lib/test_sysctl.c b/lib/test_sysctl.c
index 3dd801c1c85b..566dad3f4196 100644
--- a/lib/test_sysctl.c
+++ b/lib/test_sysctl.c
@@ -47,6 +47,9 @@ struct test_sysctl_data {
47 unsigned int uint_0001; 47 unsigned int uint_0001;
48 48
49 char string_0001[65]; 49 char string_0001[65];
50
51#define SYSCTL_TEST_BITMAP_SIZE 65536
52 unsigned long *bitmap_0001;
50}; 53};
51 54
52static struct test_sysctl_data test_data = { 55static struct test_sysctl_data test_data = {
@@ -102,6 +105,13 @@ static struct ctl_table test_table[] = {
102 .mode = 0644, 105 .mode = 0644,
103 .proc_handler = proc_dostring, 106 .proc_handler = proc_dostring,
104 }, 107 },
108 {
109 .procname = "bitmap_0001",
110 .data = &test_data.bitmap_0001,
111 .maxlen = SYSCTL_TEST_BITMAP_SIZE,
112 .mode = 0644,
113 .proc_handler = proc_do_large_bitmap,
114 },
105 { } 115 { }
106}; 116};
107 117
@@ -129,15 +139,21 @@ static struct ctl_table_header *test_sysctl_header;
129 139
130static int __init test_sysctl_init(void) 140static int __init test_sysctl_init(void)
131{ 141{
142 test_data.bitmap_0001 = kzalloc(SYSCTL_TEST_BITMAP_SIZE/8, GFP_KERNEL);
143 if (!test_data.bitmap_0001)
144 return -ENOMEM;
132 test_sysctl_header = register_sysctl_table(test_sysctl_root_table); 145 test_sysctl_header = register_sysctl_table(test_sysctl_root_table);
133 if (!test_sysctl_header) 146 if (!test_sysctl_header) {
147 kfree(test_data.bitmap_0001);
134 return -ENOMEM; 148 return -ENOMEM;
149 }
135 return 0; 150 return 0;
136} 151}
137late_initcall(test_sysctl_init); 152late_initcall(test_sysctl_init);
138 153
139static void __exit test_sysctl_exit(void) 154static void __exit test_sysctl_exit(void)
140{ 155{
156 kfree(test_data.bitmap_0001);
141 if (test_sysctl_header) 157 if (test_sysctl_header)
142 unregister_sysctl_table(test_sysctl_header); 158 unregister_sysctl_table(test_sysctl_header);
143} 159}
diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c
index f832b095afba..8bbefcaddfe8 100644
--- a/lib/test_vmalloc.c
+++ b/lib/test_vmalloc.c
@@ -384,12 +384,11 @@ static int test_func(void *private)
384{ 384{
385 struct test_driver *t = private; 385 struct test_driver *t = private;
386 int random_array[ARRAY_SIZE(test_case_array)]; 386 int random_array[ARRAY_SIZE(test_case_array)];
387 int index, i, j, ret; 387 int index, i, j;
388 ktime_t kt; 388 ktime_t kt;
389 u64 delta; 389 u64 delta;
390 390
391 ret = set_cpus_allowed_ptr(current, cpumask_of(t->cpu)); 391 if (set_cpus_allowed_ptr(current, cpumask_of(t->cpu)) < 0)
392 if (ret < 0)
393 pr_err("Failed to set affinity to %d CPU\n", t->cpu); 392 pr_err("Failed to set affinity to %d CPU\n", t->cpu);
394 393
395 for (i = 0; i < ARRAY_SIZE(test_case_array); i++) 394 for (i = 0; i < ARRAY_SIZE(test_case_array); i++)
@@ -415,8 +414,7 @@ static int test_func(void *private)
415 414
416 kt = ktime_get(); 415 kt = ktime_get();
417 for (j = 0; j < test_repeat_count; j++) { 416 for (j = 0; j < test_repeat_count; j++) {
418 ret = test_case_array[index].test_func(); 417 if (!test_case_array[index].test_func())
419 if (!ret)
420 per_cpu_test_data[t->cpu][index].test_passed++; 418 per_cpu_test_data[t->cpu][index].test_passed++;
421 else 419 else
422 per_cpu_test_data[t->cpu][index].test_failed++; 420 per_cpu_test_data[t->cpu][index].test_failed++;
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 791b6fa36905..2f003cfe340e 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -593,15 +593,13 @@ char *widen_string(char *buf, int n, char *end, struct printf_spec spec)
593 return buf; 593 return buf;
594} 594}
595 595
596static noinline_for_stack 596/* Handle string from a well known address. */
597char *string(char *buf, char *end, const char *s, struct printf_spec spec) 597static char *string_nocheck(char *buf, char *end, const char *s,
598 struct printf_spec spec)
598{ 599{
599 int len = 0; 600 int len = 0;
600 size_t lim = spec.precision; 601 size_t lim = spec.precision;
601 602
602 if ((unsigned long)s < PAGE_SIZE)
603 s = "(null)";
604
605 while (lim--) { 603 while (lim--) {
606 char c = *s++; 604 char c = *s++;
607 if (!c) 605 if (!c)
@@ -614,9 +612,64 @@ char *string(char *buf, char *end, const char *s, struct printf_spec spec)
614 return widen_string(buf, len, end, spec); 612 return widen_string(buf, len, end, spec);
615} 613}
616 614
615/* Be careful: error messages must fit into the given buffer. */
616static char *error_string(char *buf, char *end, const char *s,
617 struct printf_spec spec)
618{
619 /*
620 * Hard limit to avoid a completely insane messages. It actually
621 * works pretty well because most error messages are in
622 * the many pointer format modifiers.
623 */
624 if (spec.precision == -1)
625 spec.precision = 2 * sizeof(void *);
626
627 return string_nocheck(buf, end, s, spec);
628}
629
630/*
631 * Do not call any complex external code here. Nested printk()/vsprintf()
632 * might cause infinite loops. Failures might break printk() and would
633 * be hard to debug.
634 */
635static const char *check_pointer_msg(const void *ptr)
636{
637 if (!ptr)
638 return "(null)";
639
640 if ((unsigned long)ptr < PAGE_SIZE || IS_ERR_VALUE(ptr))
641 return "(efault)";
642
643 return NULL;
644}
645
646static int check_pointer(char **buf, char *end, const void *ptr,
647 struct printf_spec spec)
648{
649 const char *err_msg;
650
651 err_msg = check_pointer_msg(ptr);
652 if (err_msg) {
653 *buf = error_string(*buf, end, err_msg, spec);
654 return -EFAULT;
655 }
656
657 return 0;
658}
659
617static noinline_for_stack 660static noinline_for_stack
618char *pointer_string(char *buf, char *end, const void *ptr, 661char *string(char *buf, char *end, const char *s,
619 struct printf_spec spec) 662 struct printf_spec spec)
663{
664 if (check_pointer(&buf, end, s, spec))
665 return buf;
666
667 return string_nocheck(buf, end, s, spec);
668}
669
670static char *pointer_string(char *buf, char *end,
671 const void *ptr,
672 struct printf_spec spec)
620{ 673{
621 spec.base = 16; 674 spec.base = 16;
622 spec.flags |= SMALL; 675 spec.flags |= SMALL;
@@ -701,7 +754,7 @@ static char *ptr_to_id(char *buf, char *end, const void *ptr,
701 if (static_branch_unlikely(&not_filled_random_ptr_key)) { 754 if (static_branch_unlikely(&not_filled_random_ptr_key)) {
702 spec.field_width = 2 * sizeof(ptr); 755 spec.field_width = 2 * sizeof(ptr);
703 /* string length must be less than default_width */ 756 /* string length must be less than default_width */
704 return string(buf, end, str, spec); 757 return error_string(buf, end, str, spec);
705 } 758 }
706 759
707#ifdef CONFIG_64BIT 760#ifdef CONFIG_64BIT
@@ -717,6 +770,55 @@ static char *ptr_to_id(char *buf, char *end, const void *ptr,
717 return pointer_string(buf, end, (const void *)hashval, spec); 770 return pointer_string(buf, end, (const void *)hashval, spec);
718} 771}
719 772
773int kptr_restrict __read_mostly;
774
775static noinline_for_stack
776char *restricted_pointer(char *buf, char *end, const void *ptr,
777 struct printf_spec spec)
778{
779 switch (kptr_restrict) {
780 case 0:
781 /* Handle as %p, hash and do _not_ leak addresses. */
782 return ptr_to_id(buf, end, ptr, spec);
783 case 1: {
784 const struct cred *cred;
785
786 /*
787 * kptr_restrict==1 cannot be used in IRQ context
788 * because its test for CAP_SYSLOG would be meaningless.
789 */
790 if (in_irq() || in_serving_softirq() || in_nmi()) {
791 if (spec.field_width == -1)
792 spec.field_width = 2 * sizeof(ptr);
793 return error_string(buf, end, "pK-error", spec);
794 }
795
796 /*
797 * Only print the real pointer value if the current
798 * process has CAP_SYSLOG and is running with the
799 * same credentials it started with. This is because
800 * access to files is checked at open() time, but %pK
801 * checks permission at read() time. We don't want to
802 * leak pointer values if a binary opens a file using
803 * %pK and then elevates privileges before reading it.
804 */
805 cred = current_cred();
806 if (!has_capability_noaudit(current, CAP_SYSLOG) ||
807 !uid_eq(cred->euid, cred->uid) ||
808 !gid_eq(cred->egid, cred->gid))
809 ptr = NULL;
810 break;
811 }
812 case 2:
813 default:
814 /* Always print 0's for %pK */
815 ptr = NULL;
816 break;
817 }
818
819 return pointer_string(buf, end, ptr, spec);
820}
821
720static noinline_for_stack 822static noinline_for_stack
721char *dentry_name(char *buf, char *end, const struct dentry *d, struct printf_spec spec, 823char *dentry_name(char *buf, char *end, const struct dentry *d, struct printf_spec spec,
722 const char *fmt) 824 const char *fmt)
@@ -736,6 +838,11 @@ char *dentry_name(char *buf, char *end, const struct dentry *d, struct printf_sp
736 838
737 rcu_read_lock(); 839 rcu_read_lock();
738 for (i = 0; i < depth; i++, d = p) { 840 for (i = 0; i < depth; i++, d = p) {
841 if (check_pointer(&buf, end, d, spec)) {
842 rcu_read_unlock();
843 return buf;
844 }
845
739 p = READ_ONCE(d->d_parent); 846 p = READ_ONCE(d->d_parent);
740 array[i] = READ_ONCE(d->d_name.name); 847 array[i] = READ_ONCE(d->d_name.name);
741 if (p == d) { 848 if (p == d) {
@@ -766,8 +873,12 @@ static noinline_for_stack
766char *bdev_name(char *buf, char *end, struct block_device *bdev, 873char *bdev_name(char *buf, char *end, struct block_device *bdev,
767 struct printf_spec spec, const char *fmt) 874 struct printf_spec spec, const char *fmt)
768{ 875{
769 struct gendisk *hd = bdev->bd_disk; 876 struct gendisk *hd;
770 877
878 if (check_pointer(&buf, end, bdev, spec))
879 return buf;
880
881 hd = bdev->bd_disk;
771 buf = string(buf, end, hd->disk_name, spec); 882 buf = string(buf, end, hd->disk_name, spec);
772 if (bdev->bd_part->partno) { 883 if (bdev->bd_part->partno) {
773 if (isdigit(hd->disk_name[strlen(hd->disk_name)-1])) { 884 if (isdigit(hd->disk_name[strlen(hd->disk_name)-1])) {
@@ -802,7 +913,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
802 else 913 else
803 sprint_symbol_no_offset(sym, value); 914 sprint_symbol_no_offset(sym, value);
804 915
805 return string(buf, end, sym, spec); 916 return string_nocheck(buf, end, sym, spec);
806#else 917#else
807 return special_hex_number(buf, end, value, sizeof(void *)); 918 return special_hex_number(buf, end, value, sizeof(void *));
808#endif 919#endif
@@ -886,29 +997,32 @@ char *resource_string(char *buf, char *end, struct resource *res,
886 int decode = (fmt[0] == 'R') ? 1 : 0; 997 int decode = (fmt[0] == 'R') ? 1 : 0;
887 const struct printf_spec *specp; 998 const struct printf_spec *specp;
888 999
1000 if (check_pointer(&buf, end, res, spec))
1001 return buf;
1002
889 *p++ = '['; 1003 *p++ = '[';
890 if (res->flags & IORESOURCE_IO) { 1004 if (res->flags & IORESOURCE_IO) {
891 p = string(p, pend, "io ", str_spec); 1005 p = string_nocheck(p, pend, "io ", str_spec);
892 specp = &io_spec; 1006 specp = &io_spec;
893 } else if (res->flags & IORESOURCE_MEM) { 1007 } else if (res->flags & IORESOURCE_MEM) {
894 p = string(p, pend, "mem ", str_spec); 1008 p = string_nocheck(p, pend, "mem ", str_spec);
895 specp = &mem_spec; 1009 specp = &mem_spec;
896 } else if (res->flags & IORESOURCE_IRQ) { 1010 } else if (res->flags & IORESOURCE_IRQ) {
897 p = string(p, pend, "irq ", str_spec); 1011 p = string_nocheck(p, pend, "irq ", str_spec);
898 specp = &default_dec_spec; 1012 specp = &default_dec_spec;
899 } else if (res->flags & IORESOURCE_DMA) { 1013 } else if (res->flags & IORESOURCE_DMA) {
900 p = string(p, pend, "dma ", str_spec); 1014 p = string_nocheck(p, pend, "dma ", str_spec);
901 specp = &default_dec_spec; 1015 specp = &default_dec_spec;
902 } else if (res->flags & IORESOURCE_BUS) { 1016 } else if (res->flags & IORESOURCE_BUS) {
903 p = string(p, pend, "bus ", str_spec); 1017 p = string_nocheck(p, pend, "bus ", str_spec);
904 specp = &bus_spec; 1018 specp = &bus_spec;
905 } else { 1019 } else {
906 p = string(p, pend, "??? ", str_spec); 1020 p = string_nocheck(p, pend, "??? ", str_spec);
907 specp = &mem_spec; 1021 specp = &mem_spec;
908 decode = 0; 1022 decode = 0;
909 } 1023 }
910 if (decode && res->flags & IORESOURCE_UNSET) { 1024 if (decode && res->flags & IORESOURCE_UNSET) {
911 p = string(p, pend, "size ", str_spec); 1025 p = string_nocheck(p, pend, "size ", str_spec);
912 p = number(p, pend, resource_size(res), *specp); 1026 p = number(p, pend, resource_size(res), *specp);
913 } else { 1027 } else {
914 p = number(p, pend, res->start, *specp); 1028 p = number(p, pend, res->start, *specp);
@@ -919,21 +1033,21 @@ char *resource_string(char *buf, char *end, struct resource *res,
919 } 1033 }
920 if (decode) { 1034 if (decode) {
921 if (res->flags & IORESOURCE_MEM_64) 1035 if (res->flags & IORESOURCE_MEM_64)
922 p = string(p, pend, " 64bit", str_spec); 1036 p = string_nocheck(p, pend, " 64bit", str_spec);
923 if (res->flags & IORESOURCE_PREFETCH) 1037 if (res->flags & IORESOURCE_PREFETCH)
924 p = string(p, pend, " pref", str_spec); 1038 p = string_nocheck(p, pend, " pref", str_spec);
925 if (res->flags & IORESOURCE_WINDOW) 1039 if (res->flags & IORESOURCE_WINDOW)
926 p = string(p, pend, " window", str_spec); 1040 p = string_nocheck(p, pend, " window", str_spec);
927 if (res->flags & IORESOURCE_DISABLED) 1041 if (res->flags & IORESOURCE_DISABLED)
928 p = string(p, pend, " disabled", str_spec); 1042 p = string_nocheck(p, pend, " disabled", str_spec);
929 } else { 1043 } else {
930 p = string(p, pend, " flags ", str_spec); 1044 p = string_nocheck(p, pend, " flags ", str_spec);
931 p = number(p, pend, res->flags, default_flag_spec); 1045 p = number(p, pend, res->flags, default_flag_spec);
932 } 1046 }
933 *p++ = ']'; 1047 *p++ = ']';
934 *p = '\0'; 1048 *p = '\0';
935 1049
936 return string(buf, end, sym, spec); 1050 return string_nocheck(buf, end, sym, spec);
937} 1051}
938 1052
939static noinline_for_stack 1053static noinline_for_stack
@@ -948,9 +1062,8 @@ char *hex_string(char *buf, char *end, u8 *addr, struct printf_spec spec,
948 /* nothing to print */ 1062 /* nothing to print */
949 return buf; 1063 return buf;
950 1064
951 if (ZERO_OR_NULL_PTR(addr)) 1065 if (check_pointer(&buf, end, addr, spec))
952 /* NULL pointer */ 1066 return buf;
953 return string(buf, end, NULL, spec);
954 1067
955 switch (fmt[1]) { 1068 switch (fmt[1]) {
956 case 'C': 1069 case 'C':
@@ -997,6 +1110,9 @@ char *bitmap_string(char *buf, char *end, unsigned long *bitmap,
997 int i, chunksz; 1110 int i, chunksz;
998 bool first = true; 1111 bool first = true;
999 1112
1113 if (check_pointer(&buf, end, bitmap, spec))
1114 return buf;
1115
1000 /* reused to print numbers */ 1116 /* reused to print numbers */
1001 spec = (struct printf_spec){ .flags = SMALL | ZEROPAD, .base = 16 }; 1117 spec = (struct printf_spec){ .flags = SMALL | ZEROPAD, .base = 16 };
1002 1118
@@ -1038,6 +1154,9 @@ char *bitmap_list_string(char *buf, char *end, unsigned long *bitmap,
1038 int cur, rbot, rtop; 1154 int cur, rbot, rtop;
1039 bool first = true; 1155 bool first = true;
1040 1156
1157 if (check_pointer(&buf, end, bitmap, spec))
1158 return buf;
1159
1041 rbot = cur = find_first_bit(bitmap, nr_bits); 1160 rbot = cur = find_first_bit(bitmap, nr_bits);
1042 while (cur < nr_bits) { 1161 while (cur < nr_bits) {
1043 rtop = cur; 1162 rtop = cur;
@@ -1076,6 +1195,9 @@ char *mac_address_string(char *buf, char *end, u8 *addr,
1076 char separator; 1195 char separator;
1077 bool reversed = false; 1196 bool reversed = false;
1078 1197
1198 if (check_pointer(&buf, end, addr, spec))
1199 return buf;
1200
1079 switch (fmt[1]) { 1201 switch (fmt[1]) {
1080 case 'F': 1202 case 'F':
1081 separator = '-'; 1203 separator = '-';
@@ -1101,7 +1223,7 @@ char *mac_address_string(char *buf, char *end, u8 *addr,
1101 } 1223 }
1102 *p = '\0'; 1224 *p = '\0';
1103 1225
1104 return string(buf, end, mac_addr, spec); 1226 return string_nocheck(buf, end, mac_addr, spec);
1105} 1227}
1106 1228
1107static noinline_for_stack 1229static noinline_for_stack
@@ -1264,7 +1386,7 @@ char *ip6_addr_string(char *buf, char *end, const u8 *addr,
1264 else 1386 else
1265 ip6_string(ip6_addr, addr, fmt); 1387 ip6_string(ip6_addr, addr, fmt);
1266 1388
1267 return string(buf, end, ip6_addr, spec); 1389 return string_nocheck(buf, end, ip6_addr, spec);
1268} 1390}
1269 1391
1270static noinline_for_stack 1392static noinline_for_stack
@@ -1275,7 +1397,7 @@ char *ip4_addr_string(char *buf, char *end, const u8 *addr,
1275 1397
1276 ip4_string(ip4_addr, addr, fmt); 1398 ip4_string(ip4_addr, addr, fmt);
1277 1399
1278 return string(buf, end, ip4_addr, spec); 1400 return string_nocheck(buf, end, ip4_addr, spec);
1279} 1401}
1280 1402
1281static noinline_for_stack 1403static noinline_for_stack
@@ -1337,7 +1459,7 @@ char *ip6_addr_string_sa(char *buf, char *end, const struct sockaddr_in6 *sa,
1337 } 1459 }
1338 *p = '\0'; 1460 *p = '\0';
1339 1461
1340 return string(buf, end, ip6_addr, spec); 1462 return string_nocheck(buf, end, ip6_addr, spec);
1341} 1463}
1342 1464
1343static noinline_for_stack 1465static noinline_for_stack
@@ -1372,7 +1494,42 @@ char *ip4_addr_string_sa(char *buf, char *end, const struct sockaddr_in *sa,
1372 } 1494 }
1373 *p = '\0'; 1495 *p = '\0';
1374 1496
1375 return string(buf, end, ip4_addr, spec); 1497 return string_nocheck(buf, end, ip4_addr, spec);
1498}
1499
1500static noinline_for_stack
1501char *ip_addr_string(char *buf, char *end, const void *ptr,
1502 struct printf_spec spec, const char *fmt)
1503{
1504 char *err_fmt_msg;
1505
1506 if (check_pointer(&buf, end, ptr, spec))
1507 return buf;
1508
1509 switch (fmt[1]) {
1510 case '6':
1511 return ip6_addr_string(buf, end, ptr, spec, fmt);
1512 case '4':
1513 return ip4_addr_string(buf, end, ptr, spec, fmt);
1514 case 'S': {
1515 const union {
1516 struct sockaddr raw;
1517 struct sockaddr_in v4;
1518 struct sockaddr_in6 v6;
1519 } *sa = ptr;
1520
1521 switch (sa->raw.sa_family) {
1522 case AF_INET:
1523 return ip4_addr_string_sa(buf, end, &sa->v4, spec, fmt);
1524 case AF_INET6:
1525 return ip6_addr_string_sa(buf, end, &sa->v6, spec, fmt);
1526 default:
1527 return error_string(buf, end, "(einval)", spec);
1528 }}
1529 }
1530
1531 err_fmt_msg = fmt[0] == 'i' ? "(%pi?)" : "(%pI?)";
1532 return error_string(buf, end, err_fmt_msg, spec);
1376} 1533}
1377 1534
1378static noinline_for_stack 1535static noinline_for_stack
@@ -1387,9 +1544,8 @@ char *escaped_string(char *buf, char *end, u8 *addr, struct printf_spec spec,
1387 if (spec.field_width == 0) 1544 if (spec.field_width == 0)
1388 return buf; /* nothing to print */ 1545 return buf; /* nothing to print */
1389 1546
1390 if (ZERO_OR_NULL_PTR(addr)) 1547 if (check_pointer(&buf, end, addr, spec))
1391 return string(buf, end, NULL, spec); /* NULL pointer */ 1548 return buf;
1392
1393 1549
1394 do { 1550 do {
1395 switch (fmt[count++]) { 1551 switch (fmt[count++]) {
@@ -1435,6 +1591,21 @@ char *escaped_string(char *buf, char *end, u8 *addr, struct printf_spec spec,
1435 return buf; 1591 return buf;
1436} 1592}
1437 1593
1594static char *va_format(char *buf, char *end, struct va_format *va_fmt,
1595 struct printf_spec spec, const char *fmt)
1596{
1597 va_list va;
1598
1599 if (check_pointer(&buf, end, va_fmt, spec))
1600 return buf;
1601
1602 va_copy(va, *va_fmt->va);
1603 buf += vsnprintf(buf, end > buf ? end - buf : 0, va_fmt->fmt, va);
1604 va_end(va);
1605
1606 return buf;
1607}
1608
1438static noinline_for_stack 1609static noinline_for_stack
1439char *uuid_string(char *buf, char *end, const u8 *addr, 1610char *uuid_string(char *buf, char *end, const u8 *addr,
1440 struct printf_spec spec, const char *fmt) 1611 struct printf_spec spec, const char *fmt)
@@ -1445,6 +1616,9 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
1445 const u8 *index = uuid_index; 1616 const u8 *index = uuid_index;
1446 bool uc = false; 1617 bool uc = false;
1447 1618
1619 if (check_pointer(&buf, end, addr, spec))
1620 return buf;
1621
1448 switch (*(++fmt)) { 1622 switch (*(++fmt)) {
1449 case 'L': 1623 case 'L':
1450 uc = true; /* fall-through */ 1624 uc = true; /* fall-through */
@@ -1473,56 +1647,7 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
1473 1647
1474 *p = 0; 1648 *p = 0;
1475 1649
1476 return string(buf, end, uuid, spec); 1650 return string_nocheck(buf, end, uuid, spec);
1477}
1478
1479int kptr_restrict __read_mostly;
1480
1481static noinline_for_stack
1482char *restricted_pointer(char *buf, char *end, const void *ptr,
1483 struct printf_spec spec)
1484{
1485 switch (kptr_restrict) {
1486 case 0:
1487 /* Always print %pK values */
1488 break;
1489 case 1: {
1490 const struct cred *cred;
1491
1492 /*
1493 * kptr_restrict==1 cannot be used in IRQ context
1494 * because its test for CAP_SYSLOG would be meaningless.
1495 */
1496 if (in_irq() || in_serving_softirq() || in_nmi()) {
1497 if (spec.field_width == -1)
1498 spec.field_width = 2 * sizeof(ptr);
1499 return string(buf, end, "pK-error", spec);
1500 }
1501
1502 /*
1503 * Only print the real pointer value if the current
1504 * process has CAP_SYSLOG and is running with the
1505 * same credentials it started with. This is because
1506 * access to files is checked at open() time, but %pK
1507 * checks permission at read() time. We don't want to
1508 * leak pointer values if a binary opens a file using
1509 * %pK and then elevates privileges before reading it.
1510 */
1511 cred = current_cred();
1512 if (!has_capability_noaudit(current, CAP_SYSLOG) ||
1513 !uid_eq(cred->euid, cred->uid) ||
1514 !gid_eq(cred->egid, cred->gid))
1515 ptr = NULL;
1516 break;
1517 }
1518 case 2:
1519 default:
1520 /* Always print 0's for %pK */
1521 ptr = NULL;
1522 break;
1523 }
1524
1525 return pointer_string(buf, end, ptr, spec);
1526} 1651}
1527 1652
1528static noinline_for_stack 1653static noinline_for_stack
@@ -1532,24 +1657,31 @@ char *netdev_bits(char *buf, char *end, const void *addr,
1532 unsigned long long num; 1657 unsigned long long num;
1533 int size; 1658 int size;
1534 1659
1660 if (check_pointer(&buf, end, addr, spec))
1661 return buf;
1662
1535 switch (fmt[1]) { 1663 switch (fmt[1]) {
1536 case 'F': 1664 case 'F':
1537 num = *(const netdev_features_t *)addr; 1665 num = *(const netdev_features_t *)addr;
1538 size = sizeof(netdev_features_t); 1666 size = sizeof(netdev_features_t);
1539 break; 1667 break;
1540 default: 1668 default:
1541 return ptr_to_id(buf, end, addr, spec); 1669 return error_string(buf, end, "(%pN?)", spec);
1542 } 1670 }
1543 1671
1544 return special_hex_number(buf, end, num, size); 1672 return special_hex_number(buf, end, num, size);
1545} 1673}
1546 1674
1547static noinline_for_stack 1675static noinline_for_stack
1548char *address_val(char *buf, char *end, const void *addr, const char *fmt) 1676char *address_val(char *buf, char *end, const void *addr,
1677 struct printf_spec spec, const char *fmt)
1549{ 1678{
1550 unsigned long long num; 1679 unsigned long long num;
1551 int size; 1680 int size;
1552 1681
1682 if (check_pointer(&buf, end, addr, spec))
1683 return buf;
1684
1553 switch (fmt[1]) { 1685 switch (fmt[1]) {
1554 case 'd': 1686 case 'd':
1555 num = *(const dma_addr_t *)addr; 1687 num = *(const dma_addr_t *)addr;
@@ -1601,12 +1733,16 @@ char *time_str(char *buf, char *end, const struct rtc_time *tm, bool r)
1601} 1733}
1602 1734
1603static noinline_for_stack 1735static noinline_for_stack
1604char *rtc_str(char *buf, char *end, const struct rtc_time *tm, const char *fmt) 1736char *rtc_str(char *buf, char *end, const struct rtc_time *tm,
1737 struct printf_spec spec, const char *fmt)
1605{ 1738{
1606 bool have_t = true, have_d = true; 1739 bool have_t = true, have_d = true;
1607 bool raw = false; 1740 bool raw = false;
1608 int count = 2; 1741 int count = 2;
1609 1742
1743 if (check_pointer(&buf, end, tm, spec))
1744 return buf;
1745
1610 switch (fmt[count]) { 1746 switch (fmt[count]) {
1611 case 'd': 1747 case 'd':
1612 have_t = false; 1748 have_t = false;
@@ -1640,9 +1776,9 @@ char *time_and_date(char *buf, char *end, void *ptr, struct printf_spec spec,
1640{ 1776{
1641 switch (fmt[1]) { 1777 switch (fmt[1]) {
1642 case 'R': 1778 case 'R':
1643 return rtc_str(buf, end, (const struct rtc_time *)ptr, fmt); 1779 return rtc_str(buf, end, (const struct rtc_time *)ptr, spec, fmt);
1644 default: 1780 default:
1645 return ptr_to_id(buf, end, ptr, spec); 1781 return error_string(buf, end, "(%ptR?)", spec);
1646 } 1782 }
1647} 1783}
1648 1784
@@ -1650,8 +1786,11 @@ static noinline_for_stack
1650char *clock(char *buf, char *end, struct clk *clk, struct printf_spec spec, 1786char *clock(char *buf, char *end, struct clk *clk, struct printf_spec spec,
1651 const char *fmt) 1787 const char *fmt)
1652{ 1788{
1653 if (!IS_ENABLED(CONFIG_HAVE_CLK) || !clk) 1789 if (!IS_ENABLED(CONFIG_HAVE_CLK))
1654 return string(buf, end, NULL, spec); 1790 return error_string(buf, end, "(%pC?)", spec);
1791
1792 if (check_pointer(&buf, end, clk, spec))
1793 return buf;
1655 1794
1656 switch (fmt[1]) { 1795 switch (fmt[1]) {
1657 case 'n': 1796 case 'n':
@@ -1659,7 +1798,7 @@ char *clock(char *buf, char *end, struct clk *clk, struct printf_spec spec,
1659#ifdef CONFIG_COMMON_CLK 1798#ifdef CONFIG_COMMON_CLK
1660 return string(buf, end, __clk_get_name(clk), spec); 1799 return string(buf, end, __clk_get_name(clk), spec);
1661#else 1800#else
1662 return ptr_to_id(buf, end, clk, spec); 1801 return error_string(buf, end, "(%pC?)", spec);
1663#endif 1802#endif
1664 } 1803 }
1665} 1804}
@@ -1692,11 +1831,15 @@ char *format_flags(char *buf, char *end, unsigned long flags,
1692} 1831}
1693 1832
1694static noinline_for_stack 1833static noinline_for_stack
1695char *flags_string(char *buf, char *end, void *flags_ptr, const char *fmt) 1834char *flags_string(char *buf, char *end, void *flags_ptr,
1835 struct printf_spec spec, const char *fmt)
1696{ 1836{
1697 unsigned long flags; 1837 unsigned long flags;
1698 const struct trace_print_flags *names; 1838 const struct trace_print_flags *names;
1699 1839
1840 if (check_pointer(&buf, end, flags_ptr, spec))
1841 return buf;
1842
1700 switch (fmt[1]) { 1843 switch (fmt[1]) {
1701 case 'p': 1844 case 'p':
1702 flags = *(unsigned long *)flags_ptr; 1845 flags = *(unsigned long *)flags_ptr;
@@ -1713,8 +1856,7 @@ char *flags_string(char *buf, char *end, void *flags_ptr, const char *fmt)
1713 names = gfpflag_names; 1856 names = gfpflag_names;
1714 break; 1857 break;
1715 default: 1858 default:
1716 WARN_ONCE(1, "Unsupported flags modifier: %c\n", fmt[1]); 1859 return error_string(buf, end, "(%pG?)", spec);
1717 return buf;
1718 } 1860 }
1719 1861
1720 return format_flags(buf, end, flags, names); 1862 return format_flags(buf, end, flags, names);
@@ -1736,13 +1878,13 @@ char *device_node_gen_full_name(const struct device_node *np, char *buf, char *e
1736 1878
1737 /* special case for root node */ 1879 /* special case for root node */
1738 if (!parent) 1880 if (!parent)
1739 return string(buf, end, "/", default_str_spec); 1881 return string_nocheck(buf, end, "/", default_str_spec);
1740 1882
1741 for (depth = 0; parent->parent; depth++) 1883 for (depth = 0; parent->parent; depth++)
1742 parent = parent->parent; 1884 parent = parent->parent;
1743 1885
1744 for ( ; depth >= 0; depth--) { 1886 for ( ; depth >= 0; depth--) {
1745 buf = string(buf, end, "/", default_str_spec); 1887 buf = string_nocheck(buf, end, "/", default_str_spec);
1746 buf = string(buf, end, device_node_name_for_depth(np, depth), 1888 buf = string(buf, end, device_node_name_for_depth(np, depth),
1747 default_str_spec); 1889 default_str_spec);
1748 } 1890 }
@@ -1770,10 +1912,10 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
1770 str_spec.field_width = -1; 1912 str_spec.field_width = -1;
1771 1913
1772 if (!IS_ENABLED(CONFIG_OF)) 1914 if (!IS_ENABLED(CONFIG_OF))
1773 return string(buf, end, "(!OF)", spec); 1915 return error_string(buf, end, "(%pOF?)", spec);
1774 1916
1775 if ((unsigned long)dn < PAGE_SIZE) 1917 if (check_pointer(&buf, end, dn, spec))
1776 return string(buf, end, "(null)", spec); 1918 return buf;
1777 1919
1778 /* simple case without anything any more format specifiers */ 1920 /* simple case without anything any more format specifiers */
1779 fmt++; 1921 fmt++;
@@ -1814,7 +1956,7 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
1814 tbuf[2] = of_node_check_flag(dn, OF_POPULATED) ? 'P' : '-'; 1956 tbuf[2] = of_node_check_flag(dn, OF_POPULATED) ? 'P' : '-';
1815 tbuf[3] = of_node_check_flag(dn, OF_POPULATED_BUS) ? 'B' : '-'; 1957 tbuf[3] = of_node_check_flag(dn, OF_POPULATED_BUS) ? 'B' : '-';
1816 tbuf[4] = 0; 1958 tbuf[4] = 0;
1817 buf = string(buf, end, tbuf, str_spec); 1959 buf = string_nocheck(buf, end, tbuf, str_spec);
1818 break; 1960 break;
1819 case 'c': /* major compatible string */ 1961 case 'c': /* major compatible string */
1820 ret = of_property_read_string(dn, "compatible", &p); 1962 ret = of_property_read_string(dn, "compatible", &p);
@@ -1825,10 +1967,10 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
1825 has_mult = false; 1967 has_mult = false;
1826 of_property_for_each_string(dn, "compatible", prop, p) { 1968 of_property_for_each_string(dn, "compatible", prop, p) {
1827 if (has_mult) 1969 if (has_mult)
1828 buf = string(buf, end, ",", str_spec); 1970 buf = string_nocheck(buf, end, ",", str_spec);
1829 buf = string(buf, end, "\"", str_spec); 1971 buf = string_nocheck(buf, end, "\"", str_spec);
1830 buf = string(buf, end, p, str_spec); 1972 buf = string(buf, end, p, str_spec);
1831 buf = string(buf, end, "\"", str_spec); 1973 buf = string_nocheck(buf, end, "\"", str_spec);
1832 1974
1833 has_mult = true; 1975 has_mult = true;
1834 } 1976 }
@@ -1841,6 +1983,17 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
1841 return widen_string(buf, buf - buf_start, end, spec); 1983 return widen_string(buf, buf - buf_start, end, spec);
1842} 1984}
1843 1985
1986static char *kobject_string(char *buf, char *end, void *ptr,
1987 struct printf_spec spec, const char *fmt)
1988{
1989 switch (fmt[1]) {
1990 case 'F':
1991 return device_node_string(buf, end, ptr, spec, fmt + 1);
1992 }
1993
1994 return error_string(buf, end, "(%pO?)", spec);
1995}
1996
1844/* 1997/*
1845 * Show a '%p' thing. A kernel extension is that the '%p' is followed 1998 * Show a '%p' thing. A kernel extension is that the '%p' is followed
1846 * by an extra set of alphanumeric characters that are extended format 1999 * by an extra set of alphanumeric characters that are extended format
@@ -1957,18 +2110,6 @@ static noinline_for_stack
1957char *pointer(const char *fmt, char *buf, char *end, void *ptr, 2110char *pointer(const char *fmt, char *buf, char *end, void *ptr,
1958 struct printf_spec spec) 2111 struct printf_spec spec)
1959{ 2112{
1960 const int default_width = 2 * sizeof(void *);
1961
1962 if (!ptr && *fmt != 'K' && *fmt != 'x') {
1963 /*
1964 * Print (null) with the same width as a pointer so it makes
1965 * tabular output look nice.
1966 */
1967 if (spec.field_width == -1)
1968 spec.field_width = default_width;
1969 return string(buf, end, "(null)", spec);
1970 }
1971
1972 switch (*fmt) { 2113 switch (*fmt) {
1973 case 'F': 2114 case 'F':
1974 case 'f': 2115 case 'f':
@@ -2004,50 +2145,19 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
2004 * 4: 001.002.003.004 2145 * 4: 001.002.003.004
2005 * 6: 000102...0f 2146 * 6: 000102...0f
2006 */ 2147 */
2007 switch (fmt[1]) { 2148 return ip_addr_string(buf, end, ptr, spec, fmt);
2008 case '6':
2009 return ip6_addr_string(buf, end, ptr, spec, fmt);
2010 case '4':
2011 return ip4_addr_string(buf, end, ptr, spec, fmt);
2012 case 'S': {
2013 const union {
2014 struct sockaddr raw;
2015 struct sockaddr_in v4;
2016 struct sockaddr_in6 v6;
2017 } *sa = ptr;
2018
2019 switch (sa->raw.sa_family) {
2020 case AF_INET:
2021 return ip4_addr_string_sa(buf, end, &sa->v4, spec, fmt);
2022 case AF_INET6:
2023 return ip6_addr_string_sa(buf, end, &sa->v6, spec, fmt);
2024 default:
2025 return string(buf, end, "(invalid address)", spec);
2026 }}
2027 }
2028 break;
2029 case 'E': 2149 case 'E':
2030 return escaped_string(buf, end, ptr, spec, fmt); 2150 return escaped_string(buf, end, ptr, spec, fmt);
2031 case 'U': 2151 case 'U':
2032 return uuid_string(buf, end, ptr, spec, fmt); 2152 return uuid_string(buf, end, ptr, spec, fmt);
2033 case 'V': 2153 case 'V':
2034 { 2154 return va_format(buf, end, ptr, spec, fmt);
2035 va_list va;
2036
2037 va_copy(va, *((struct va_format *)ptr)->va);
2038 buf += vsnprintf(buf, end > buf ? end - buf : 0,
2039 ((struct va_format *)ptr)->fmt, va);
2040 va_end(va);
2041 return buf;
2042 }
2043 case 'K': 2155 case 'K':
2044 if (!kptr_restrict)
2045 break;
2046 return restricted_pointer(buf, end, ptr, spec); 2156 return restricted_pointer(buf, end, ptr, spec);
2047 case 'N': 2157 case 'N':
2048 return netdev_bits(buf, end, ptr, spec, fmt); 2158 return netdev_bits(buf, end, ptr, spec, fmt);
2049 case 'a': 2159 case 'a':
2050 return address_val(buf, end, ptr, fmt); 2160 return address_val(buf, end, ptr, spec, fmt);
2051 case 'd': 2161 case 'd':
2052 return dentry_name(buf, end, ptr, spec, fmt); 2162 return dentry_name(buf, end, ptr, spec, fmt);
2053 case 't': 2163 case 't':
@@ -2064,13 +2174,9 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
2064#endif 2174#endif
2065 2175
2066 case 'G': 2176 case 'G':
2067 return flags_string(buf, end, ptr, fmt); 2177 return flags_string(buf, end, ptr, spec, fmt);
2068 case 'O': 2178 case 'O':
2069 switch (fmt[1]) { 2179 return kobject_string(buf, end, ptr, spec, fmt);
2070 case 'F':
2071 return device_node_string(buf, end, ptr, spec, fmt + 1);
2072 }
2073 break;
2074 case 'x': 2180 case 'x':
2075 return pointer_string(buf, end, ptr, spec); 2181 return pointer_string(buf, end, ptr, spec);
2076 } 2182 }
@@ -2685,11 +2791,13 @@ int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args)
2685 2791
2686 case FORMAT_TYPE_STR: { 2792 case FORMAT_TYPE_STR: {
2687 const char *save_str = va_arg(args, char *); 2793 const char *save_str = va_arg(args, char *);
2794 const char *err_msg;
2688 size_t len; 2795 size_t len;
2689 2796
2690 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE 2797 err_msg = check_pointer_msg(save_str);
2691 || (unsigned long)save_str < PAGE_SIZE) 2798 if (err_msg)
2692 save_str = "(null)"; 2799 save_str = err_msg;
2800
2693 len = strlen(save_str) + 1; 2801 len = strlen(save_str) + 1;
2694 if (str + len < end) 2802 if (str + len < end)
2695 memcpy(str, save_str, len); 2803 memcpy(str, save_str, len);
diff --git a/lib/zstd/bitstream.h b/lib/zstd/bitstream.h
index a826b99e1d63..3a49784d5c61 100644
--- a/lib/zstd/bitstream.h
+++ b/lib/zstd/bitstream.h
@@ -259,10 +259,15 @@ ZSTD_STATIC size_t BIT_initDStream(BIT_DStream_t *bitD, const void *srcBuffer, s
259 bitD->bitContainer = *(const BYTE *)(bitD->start); 259 bitD->bitContainer = *(const BYTE *)(bitD->start);
260 switch (srcSize) { 260 switch (srcSize) {
261 case 7: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[6]) << (sizeof(bitD->bitContainer) * 8 - 16); 261 case 7: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[6]) << (sizeof(bitD->bitContainer) * 8 - 16);
262 /* fall through */
262 case 6: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[5]) << (sizeof(bitD->bitContainer) * 8 - 24); 263 case 6: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[5]) << (sizeof(bitD->bitContainer) * 8 - 24);
264 /* fall through */
263 case 5: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[4]) << (sizeof(bitD->bitContainer) * 8 - 32); 265 case 5: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[4]) << (sizeof(bitD->bitContainer) * 8 - 32);
266 /* fall through */
264 case 4: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[3]) << 24; 267 case 4: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[3]) << 24;
268 /* fall through */
265 case 3: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[2]) << 16; 269 case 3: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[2]) << 16;
270 /* fall through */
266 case 2: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[1]) << 8; 271 case 2: bitD->bitContainer += (size_t)(((const BYTE *)(srcBuffer))[1]) << 8;
267 default:; 272 default:;
268 } 273 }
diff --git a/lib/zstd/compress.c b/lib/zstd/compress.c
index f9166cf4f7a9..5e0b67003e55 100644
--- a/lib/zstd/compress.c
+++ b/lib/zstd/compress.c
@@ -3182,6 +3182,7 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream *zcs, void *dst, size_t *
3182 zcs->outBuffFlushedSize = 0; 3182 zcs->outBuffFlushedSize = 0;
3183 zcs->stage = zcss_flush; /* pass-through to flush stage */ 3183 zcs->stage = zcss_flush; /* pass-through to flush stage */
3184 } 3184 }
3185 /* fall through */
3185 3186
3186 case zcss_flush: { 3187 case zcss_flush: {
3187 size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; 3188 size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
diff --git a/lib/zstd/decompress.c b/lib/zstd/decompress.c
index b17846725ca0..269ee9a796c1 100644
--- a/lib/zstd/decompress.c
+++ b/lib/zstd/decompress.c
@@ -1768,6 +1768,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, c
1768 return 0; 1768 return 0;
1769 } 1769 }
1770 dctx->expected = 0; /* not necessary to copy more */ 1770 dctx->expected = 0; /* not necessary to copy more */
1771 /* fall through */
1771 1772
1772 case ZSTDds_decodeFrameHeader: 1773 case ZSTDds_decodeFrameHeader:
1773 memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected); 1774 memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected);
@@ -2375,7 +2376,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output, ZSTD_inB
2375 } 2376 }
2376 zds->stage = zdss_read; 2377 zds->stage = zdss_read;
2377 } 2378 }
2378 /* pass-through */ 2379 /* fall through */
2379 2380
2380 case zdss_read: { 2381 case zdss_read: {
2381 size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx); 2382 size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx);
@@ -2404,6 +2405,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output, ZSTD_inB
2404 zds->stage = zdss_load; 2405 zds->stage = zdss_load;
2405 /* pass-through */ 2406 /* pass-through */
2406 } 2407 }
2408 /* fall through */
2407 2409
2408 case zdss_load: { 2410 case zdss_load: {
2409 size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx); 2411 size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx);
@@ -2436,6 +2438,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output, ZSTD_inB
2436 /* pass-through */ 2438 /* pass-through */
2437 } 2439 }
2438 } 2440 }
2441 /* fall through */
2439 2442
2440 case zdss_flush: { 2443 case zdss_flush: {
2441 size_t const toFlushSize = zds->outEnd - zds->outStart; 2444 size_t const toFlushSize = zds->outEnd - zds->outStart;
diff --git a/lib/zstd/huf_compress.c b/lib/zstd/huf_compress.c
index 40055a7016e6..e727812d12aa 100644
--- a/lib/zstd/huf_compress.c
+++ b/lib/zstd/huf_compress.c
@@ -556,7 +556,9 @@ size_t HUF_compress1X_usingCTable(void *dst, size_t dstSize, const void *src, si
556 n = srcSize & ~3; /* join to mod 4 */ 556 n = srcSize & ~3; /* join to mod 4 */
557 switch (srcSize & 3) { 557 switch (srcSize & 3) {
558 case 3: HUF_encodeSymbol(&bitC, ip[n + 2], CTable); HUF_FLUSHBITS_2(&bitC); 558 case 3: HUF_encodeSymbol(&bitC, ip[n + 2], CTable); HUF_FLUSHBITS_2(&bitC);
559 /* fall through */
559 case 2: HUF_encodeSymbol(&bitC, ip[n + 1], CTable); HUF_FLUSHBITS_1(&bitC); 560 case 2: HUF_encodeSymbol(&bitC, ip[n + 1], CTable); HUF_FLUSHBITS_1(&bitC);
561 /* fall through */
560 case 1: HUF_encodeSymbol(&bitC, ip[n + 0], CTable); HUF_FLUSHBITS(&bitC); 562 case 1: HUF_encodeSymbol(&bitC, ip[n + 0], CTable); HUF_FLUSHBITS(&bitC);
561 case 0: 563 case 0:
562 default:; 564 default:;