aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Kconfig.debug32
-rw-r--r--lib/Makefile4
-rw-r--r--lib/assoc_array.c6
-rw-r--r--lib/atomic64.c83
-rw-r--r--lib/bitmap.c4
-rw-r--r--lib/dma-debug.c2
-rw-r--r--lib/dynamic_debug.c67
-rw-r--r--lib/flex_proportions.c8
-rw-r--r--lib/genalloc.c50
-rw-r--r--lib/hexdump.c16
-rw-r--r--lib/hweight.c4
-rw-r--r--lib/idr.c2
-rw-r--r--lib/libcrc32c.c16
-rw-r--r--lib/lzo/lzo1x_decompress_safe.c103
-rw-r--r--lib/percpu-refcount.c299
-rw-r--r--lib/percpu_counter.c20
-rw-r--r--lib/prio_heap.c70
-rw-r--r--lib/proportions.c10
-rw-r--r--lib/random32.c39
-rw-r--r--lib/rhashtable.c25
-rw-r--r--lib/string.c32
-rw-r--r--lib/string_helpers.c312
-rw-r--r--lib/test-string_helpers.c277
-rw-r--r--lib/test_bpf.c63
-rw-r--r--lib/textsearch.c4
-rw-r--r--lib/vsprintf.c73
27 files changed, 1226 insertions, 398 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index a5ce0c7f6c30..54cf309a92a5 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -51,6 +51,9 @@ config PERCPU_RWSEM
51config ARCH_USE_CMPXCHG_LOCKREF 51config ARCH_USE_CMPXCHG_LOCKREF
52 bool 52 bool
53 53
54config ARCH_HAS_FAST_MULTIPLIER
55 bool
56
54config CRC_CCITT 57config CRC_CCITT
55 tristate "CRC-CCITT functions" 58 tristate "CRC-CCITT functions"
56 help 59 help
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 07c28323f88f..4e35a5d767ed 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -824,6 +824,18 @@ config SCHEDSTATS
824 application, you can say N to avoid the very slight overhead 824 application, you can say N to avoid the very slight overhead
825 this adds. 825 this adds.
826 826
827config SCHED_STACK_END_CHECK
828 bool "Detect stack corruption on calls to schedule()"
829 depends on DEBUG_KERNEL
830 default n
831 help
832 This option checks for a stack overrun on calls to schedule().
833 If the stack end location is found to be over written always panic as
834 the content of the corrupted region can no longer be trusted.
835 This is to ensure no erroneous behaviour occurs which could result in
836 data corruption or a sporadic crash at a later stage once the region
837 is examined. The runtime overhead introduced is minimal.
838
827config TIMER_STATS 839config TIMER_STATS
828 bool "Collect kernel timers statistics" 840 bool "Collect kernel timers statistics"
829 depends on DEBUG_KERNEL && PROC_FS 841 depends on DEBUG_KERNEL && PROC_FS
@@ -892,6 +904,10 @@ config DEBUG_WW_MUTEX_SLOWPATH
892 the full mutex checks enabled with (CONFIG_PROVE_LOCKING) this 904 the full mutex checks enabled with (CONFIG_PROVE_LOCKING) this
893 will test all possible w/w mutex interface abuse with the 905 will test all possible w/w mutex interface abuse with the
894 exception of simply not acquiring all the required locks. 906 exception of simply not acquiring all the required locks.
907 Note that this feature can introduce significant overhead, so
908 it really should not be enabled in a production or distro kernel,
909 even a debug kernel. If you are a driver writer, enable it. If
910 you are a distro, do not.
895 911
896config DEBUG_LOCK_ALLOC 912config DEBUG_LOCK_ALLOC
897 bool "Lock debugging: detect incorrect freeing of live locks" 913 bool "Lock debugging: detect incorrect freeing of live locks"
@@ -948,7 +964,7 @@ config PROVE_LOCKING
948 the proof of observed correctness is also maintained for an 964 the proof of observed correctness is also maintained for an
949 arbitrary combination of these separate locking variants. 965 arbitrary combination of these separate locking variants.
950 966
951 For more details, see Documentation/lockdep-design.txt. 967 For more details, see Documentation/locking/lockdep-design.txt.
952 968
953config LOCKDEP 969config LOCKDEP
954 bool 970 bool
@@ -969,7 +985,7 @@ config LOCK_STAT
969 help 985 help
970 This feature enables tracking lock contention points 986 This feature enables tracking lock contention points
971 987
972 For more details, see Documentation/lockstat.txt 988 For more details, see Documentation/locking/lockstat.txt
973 989
974 This also enables lock events required by "perf lock", 990 This also enables lock events required by "perf lock",
975 subcommand of perf. 991 subcommand of perf.
@@ -1032,8 +1048,13 @@ config TRACE_IRQFLAGS
1032 either tracing or lock debugging. 1048 either tracing or lock debugging.
1033 1049
1034config STACKTRACE 1050config STACKTRACE
1035 bool 1051 bool "Stack backtrace support"
1036 depends on STACKTRACE_SUPPORT 1052 depends on STACKTRACE_SUPPORT
1053 help
1054 This option causes the kernel to create a /proc/pid/stack for
1055 every process, showing its current stack trace.
1056 It is also used by various kernel debugging features that require
1057 stack trace generation.
1037 1058
1038config DEBUG_KOBJECT 1059config DEBUG_KOBJECT
1039 bool "kobject debugging" 1060 bool "kobject debugging"
@@ -1627,7 +1648,7 @@ config DMA_API_DEBUG
1627 1648
1628 If unsure, say N. 1649 If unsure, say N.
1629 1650
1630config TEST_MODULE 1651config TEST_LKM
1631 tristate "Test module loading with 'hello world' module" 1652 tristate "Test module loading with 'hello world' module"
1632 default n 1653 default n
1633 depends on m 1654 depends on m
@@ -1663,7 +1684,8 @@ config TEST_BPF
1663 against the BPF interpreter or BPF JIT compiler depending on the 1684 against the BPF interpreter or BPF JIT compiler depending on the
1664 current setting. This is in particular useful for BPF JIT compiler 1685 current setting. This is in particular useful for BPF JIT compiler
1665 development, but also to run regression tests against changes in 1686 development, but also to run regression tests against changes in
1666 the interpreter code. 1687 the interpreter code. It also enables test stubs for eBPF maps and
1688 verifier used by user space verifier testsuite.
1667 1689
1668 If unsure, say N. 1690 If unsure, say N.
1669 1691
diff --git a/lib/Makefile b/lib/Makefile
index d6b4bc496408..7512dc978f18 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -11,7 +11,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
11 rbtree.o radix-tree.o dump_stack.o timerqueue.o\ 11 rbtree.o radix-tree.o dump_stack.o timerqueue.o\
12 idr.o int_sqrt.o extable.o \ 12 idr.o int_sqrt.o extable.o \
13 sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \ 13 sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \
14 proportions.o flex_proportions.o prio_heap.o ratelimit.o show_mem.o \ 14 proportions.o flex_proportions.o ratelimit.o show_mem.o \
15 is_single_threaded.o plist.o decompress.o kobject_uevent.o \ 15 is_single_threaded.o plist.o decompress.o kobject_uevent.o \
16 earlycpio.o 16 earlycpio.o
17 17
@@ -31,7 +31,7 @@ obj-y += string_helpers.o
31obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o 31obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
32obj-y += kstrtox.o 32obj-y += kstrtox.o
33obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o 33obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
34obj-$(CONFIG_TEST_MODULE) += test_module.o 34obj-$(CONFIG_TEST_LKM) += test_module.o
35obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o 35obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
36obj-$(CONFIG_TEST_BPF) += test_bpf.o 36obj-$(CONFIG_TEST_BPF) += test_bpf.o
37obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o 37obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index c0b1007011e1..2404d03e251a 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -1723,11 +1723,13 @@ ascend_old_tree:
1723 shortcut = assoc_array_ptr_to_shortcut(ptr); 1723 shortcut = assoc_array_ptr_to_shortcut(ptr);
1724 slot = shortcut->parent_slot; 1724 slot = shortcut->parent_slot;
1725 cursor = shortcut->back_pointer; 1725 cursor = shortcut->back_pointer;
1726 if (!cursor)
1727 goto gc_complete;
1726 } else { 1728 } else {
1727 slot = node->parent_slot; 1729 slot = node->parent_slot;
1728 cursor = ptr; 1730 cursor = ptr;
1729 } 1731 }
1730 BUG_ON(!ptr); 1732 BUG_ON(!cursor);
1731 node = assoc_array_ptr_to_node(cursor); 1733 node = assoc_array_ptr_to_node(cursor);
1732 slot++; 1734 slot++;
1733 goto continue_node; 1735 goto continue_node;
@@ -1735,7 +1737,7 @@ ascend_old_tree:
1735gc_complete: 1737gc_complete:
1736 edit->set[0].to = new_root; 1738 edit->set[0].to = new_root;
1737 assoc_array_apply_edit(edit); 1739 assoc_array_apply_edit(edit);
1738 edit->array->nr_leaves_on_tree = nr_leaves_on_tree; 1740 array->nr_leaves_on_tree = nr_leaves_on_tree;
1739 return 0; 1741 return 0;
1740 1742
1741enomem: 1743enomem:
diff --git a/lib/atomic64.c b/lib/atomic64.c
index 08a4f068e61e..1298c05ef528 100644
--- a/lib/atomic64.c
+++ b/lib/atomic64.c
@@ -70,53 +70,42 @@ void atomic64_set(atomic64_t *v, long long i)
70} 70}
71EXPORT_SYMBOL(atomic64_set); 71EXPORT_SYMBOL(atomic64_set);
72 72
73void atomic64_add(long long a, atomic64_t *v) 73#define ATOMIC64_OP(op, c_op) \
74{ 74void atomic64_##op(long long a, atomic64_t *v) \
75 unsigned long flags; 75{ \
76 raw_spinlock_t *lock = lock_addr(v); 76 unsigned long flags; \
77 77 raw_spinlock_t *lock = lock_addr(v); \
78 raw_spin_lock_irqsave(lock, flags); 78 \
79 v->counter += a; 79 raw_spin_lock_irqsave(lock, flags); \
80 raw_spin_unlock_irqrestore(lock, flags); 80 v->counter c_op a; \
81} 81 raw_spin_unlock_irqrestore(lock, flags); \
82EXPORT_SYMBOL(atomic64_add); 82} \
83 83EXPORT_SYMBOL(atomic64_##op);
84long long atomic64_add_return(long long a, atomic64_t *v) 84
85{ 85#define ATOMIC64_OP_RETURN(op, c_op) \
86 unsigned long flags; 86long long atomic64_##op##_return(long long a, atomic64_t *v) \
87 raw_spinlock_t *lock = lock_addr(v); 87{ \
88 long long val; 88 unsigned long flags; \
89 89 raw_spinlock_t *lock = lock_addr(v); \
90 raw_spin_lock_irqsave(lock, flags); 90 long long val; \
91 val = v->counter += a; 91 \
92 raw_spin_unlock_irqrestore(lock, flags); 92 raw_spin_lock_irqsave(lock, flags); \
93 return val; 93 val = (v->counter c_op a); \
94} 94 raw_spin_unlock_irqrestore(lock, flags); \
95EXPORT_SYMBOL(atomic64_add_return); 95 return val; \
96 96} \
97void atomic64_sub(long long a, atomic64_t *v) 97EXPORT_SYMBOL(atomic64_##op##_return);
98{ 98
99 unsigned long flags; 99#define ATOMIC64_OPS(op, c_op) \
100 raw_spinlock_t *lock = lock_addr(v); 100 ATOMIC64_OP(op, c_op) \
101 101 ATOMIC64_OP_RETURN(op, c_op)
102 raw_spin_lock_irqsave(lock, flags); 102
103 v->counter -= a; 103ATOMIC64_OPS(add, +=)
104 raw_spin_unlock_irqrestore(lock, flags); 104ATOMIC64_OPS(sub, -=)
105} 105
106EXPORT_SYMBOL(atomic64_sub); 106#undef ATOMIC64_OPS
107 107#undef ATOMIC64_OP_RETURN
108long long atomic64_sub_return(long long a, atomic64_t *v) 108#undef ATOMIC64_OP
109{
110 unsigned long flags;
111 raw_spinlock_t *lock = lock_addr(v);
112 long long val;
113
114 raw_spin_lock_irqsave(lock, flags);
115 val = v->counter -= a;
116 raw_spin_unlock_irqrestore(lock, flags);
117 return val;
118}
119EXPORT_SYMBOL(atomic64_sub_return);
120 109
121long long atomic64_dec_if_positive(atomic64_t *v) 110long long atomic64_dec_if_positive(atomic64_t *v)
122{ 111{
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 1e031f2c9aba..cd250a2e14cb 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -884,7 +884,7 @@ EXPORT_SYMBOL(bitmap_bitremap);
884 * read it, you're overqualified for your current job.) 884 * read it, you're overqualified for your current job.)
885 * 885 *
886 * In other words, @orig is mapped onto (surjectively) @dst, 886 * In other words, @orig is mapped onto (surjectively) @dst,
887 * using the the map { <n, m> | the n-th bit of @relmap is the 887 * using the map { <n, m> | the n-th bit of @relmap is the
888 * m-th set bit of @relmap }. 888 * m-th set bit of @relmap }.
889 * 889 *
890 * Any set bits in @orig above bit number W, where W is the 890 * Any set bits in @orig above bit number W, where W is the
@@ -932,7 +932,7 @@ EXPORT_SYMBOL(bitmap_bitremap);
932 * 932 *
933 * Further lets say we use the following code, invoking 933 * Further lets say we use the following code, invoking
934 * bitmap_fold() then bitmap_onto, as suggested above to 934 * bitmap_fold() then bitmap_onto, as suggested above to
935 * avoid the possitility of an empty @dst result: 935 * avoid the possibility of an empty @dst result:
936 * 936 *
937 * unsigned long *tmp; // a temporary bitmap's bits 937 * unsigned long *tmp; // a temporary bitmap's bits
938 * 938 *
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 98f2d7e91a91..add80cc02dbe 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -1149,7 +1149,7 @@ static void check_unmap(struct dma_debug_entry *ref)
1149static void check_for_stack(struct device *dev, void *addr) 1149static void check_for_stack(struct device *dev, void *addr)
1150{ 1150{
1151 if (object_is_on_stack(addr)) 1151 if (object_is_on_stack(addr))
1152 err_printk(dev, NULL, "DMA-API: device driver maps memory from" 1152 err_printk(dev, NULL, "DMA-API: device driver maps memory from "
1153 "stack [addr=%p]\n", addr); 1153 "stack [addr=%p]\n", addr);
1154} 1154}
1155 1155
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index c9afbe2c445a..dfba05521748 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -537,10 +537,9 @@ static char *dynamic_emit_prefix(const struct _ddebug *desc, char *buf)
537 return buf; 537 return buf;
538} 538}
539 539
540int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...) 540void __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...)
541{ 541{
542 va_list args; 542 va_list args;
543 int res;
544 struct va_format vaf; 543 struct va_format vaf;
545 char buf[PREFIX_SIZE]; 544 char buf[PREFIX_SIZE];
546 545
@@ -552,21 +551,17 @@ int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...)
552 vaf.fmt = fmt; 551 vaf.fmt = fmt;
553 vaf.va = &args; 552 vaf.va = &args;
554 553
555 res = printk(KERN_DEBUG "%s%pV", 554 printk(KERN_DEBUG "%s%pV", dynamic_emit_prefix(descriptor, buf), &vaf);
556 dynamic_emit_prefix(descriptor, buf), &vaf);
557 555
558 va_end(args); 556 va_end(args);
559
560 return res;
561} 557}
562EXPORT_SYMBOL(__dynamic_pr_debug); 558EXPORT_SYMBOL(__dynamic_pr_debug);
563 559
564int __dynamic_dev_dbg(struct _ddebug *descriptor, 560void __dynamic_dev_dbg(struct _ddebug *descriptor,
565 const struct device *dev, const char *fmt, ...) 561 const struct device *dev, const char *fmt, ...)
566{ 562{
567 struct va_format vaf; 563 struct va_format vaf;
568 va_list args; 564 va_list args;
569 int res;
570 565
571 BUG_ON(!descriptor); 566 BUG_ON(!descriptor);
572 BUG_ON(!fmt); 567 BUG_ON(!fmt);
@@ -577,30 +572,27 @@ int __dynamic_dev_dbg(struct _ddebug *descriptor,
577 vaf.va = &args; 572 vaf.va = &args;
578 573
579 if (!dev) { 574 if (!dev) {
580 res = printk(KERN_DEBUG "(NULL device *): %pV", &vaf); 575 printk(KERN_DEBUG "(NULL device *): %pV", &vaf);
581 } else { 576 } else {
582 char buf[PREFIX_SIZE]; 577 char buf[PREFIX_SIZE];
583 578
584 res = dev_printk_emit(7, dev, "%s%s %s: %pV", 579 dev_printk_emit(7, dev, "%s%s %s: %pV",
585 dynamic_emit_prefix(descriptor, buf), 580 dynamic_emit_prefix(descriptor, buf),
586 dev_driver_string(dev), dev_name(dev), 581 dev_driver_string(dev), dev_name(dev),
587 &vaf); 582 &vaf);
588 } 583 }
589 584
590 va_end(args); 585 va_end(args);
591
592 return res;
593} 586}
594EXPORT_SYMBOL(__dynamic_dev_dbg); 587EXPORT_SYMBOL(__dynamic_dev_dbg);
595 588
596#ifdef CONFIG_NET 589#ifdef CONFIG_NET
597 590
598int __dynamic_netdev_dbg(struct _ddebug *descriptor, 591void __dynamic_netdev_dbg(struct _ddebug *descriptor,
599 const struct net_device *dev, const char *fmt, ...) 592 const struct net_device *dev, const char *fmt, ...)
600{ 593{
601 struct va_format vaf; 594 struct va_format vaf;
602 va_list args; 595 va_list args;
603 int res;
604 596
605 BUG_ON(!descriptor); 597 BUG_ON(!descriptor);
606 BUG_ON(!fmt); 598 BUG_ON(!fmt);
@@ -613,23 +605,21 @@ int __dynamic_netdev_dbg(struct _ddebug *descriptor,
613 if (dev && dev->dev.parent) { 605 if (dev && dev->dev.parent) {
614 char buf[PREFIX_SIZE]; 606 char buf[PREFIX_SIZE];
615 607
616 res = dev_printk_emit(7, dev->dev.parent, 608 dev_printk_emit(7, dev->dev.parent,
617 "%s%s %s %s%s: %pV", 609 "%s%s %s %s%s: %pV",
618 dynamic_emit_prefix(descriptor, buf), 610 dynamic_emit_prefix(descriptor, buf),
619 dev_driver_string(dev->dev.parent), 611 dev_driver_string(dev->dev.parent),
620 dev_name(dev->dev.parent), 612 dev_name(dev->dev.parent),
621 netdev_name(dev), netdev_reg_state(dev), 613 netdev_name(dev), netdev_reg_state(dev),
622 &vaf); 614 &vaf);
623 } else if (dev) { 615 } else if (dev) {
624 res = printk(KERN_DEBUG "%s%s: %pV", netdev_name(dev), 616 printk(KERN_DEBUG "%s%s: %pV", netdev_name(dev),
625 netdev_reg_state(dev), &vaf); 617 netdev_reg_state(dev), &vaf);
626 } else { 618 } else {
627 res = printk(KERN_DEBUG "(NULL net_device): %pV", &vaf); 619 printk(KERN_DEBUG "(NULL net_device): %pV", &vaf);
628 } 620 }
629 621
630 va_end(args); 622 va_end(args);
631
632 return res;
633} 623}
634EXPORT_SYMBOL(__dynamic_netdev_dbg); 624EXPORT_SYMBOL(__dynamic_netdev_dbg);
635 625
@@ -829,22 +819,9 @@ static const struct seq_operations ddebug_proc_seqops = {
829 */ 819 */
830static int ddebug_proc_open(struct inode *inode, struct file *file) 820static int ddebug_proc_open(struct inode *inode, struct file *file)
831{ 821{
832 struct ddebug_iter *iter;
833 int err;
834
835 vpr_info("called\n"); 822 vpr_info("called\n");
836 823 return seq_open_private(file, &ddebug_proc_seqops,
837 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 824 sizeof(struct ddebug_iter));
838 if (iter == NULL)
839 return -ENOMEM;
840
841 err = seq_open(file, &ddebug_proc_seqops);
842 if (err) {
843 kfree(iter);
844 return err;
845 }
846 ((struct seq_file *)file->private_data)->private = iter;
847 return 0;
848} 825}
849 826
850static const struct file_operations ddebug_proc_fops = { 827static const struct file_operations ddebug_proc_fops = {
diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c
index ebf3bac460b0..8f25652f40d4 100644
--- a/lib/flex_proportions.c
+++ b/lib/flex_proportions.c
@@ -34,13 +34,13 @@
34 */ 34 */
35#include <linux/flex_proportions.h> 35#include <linux/flex_proportions.h>
36 36
37int fprop_global_init(struct fprop_global *p) 37int fprop_global_init(struct fprop_global *p, gfp_t gfp)
38{ 38{
39 int err; 39 int err;
40 40
41 p->period = 0; 41 p->period = 0;
42 /* Use 1 to avoid dealing with periods with 0 events... */ 42 /* Use 1 to avoid dealing with periods with 0 events... */
43 err = percpu_counter_init(&p->events, 1); 43 err = percpu_counter_init(&p->events, 1, gfp);
44 if (err) 44 if (err)
45 return err; 45 return err;
46 seqcount_init(&p->sequence); 46 seqcount_init(&p->sequence);
@@ -168,11 +168,11 @@ void fprop_fraction_single(struct fprop_global *p,
168 */ 168 */
169#define PROP_BATCH (8*(1+ilog2(nr_cpu_ids))) 169#define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
170 170
171int fprop_local_init_percpu(struct fprop_local_percpu *pl) 171int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp)
172{ 172{
173 int err; 173 int err;
174 174
175 err = percpu_counter_init(&pl->events, 0); 175 err = percpu_counter_init(&pl->events, 0, gfp);
176 if (err) 176 if (err)
177 return err; 177 return err;
178 pl->period = 0; 178 pl->period = 0;
diff --git a/lib/genalloc.c b/lib/genalloc.c
index bdb9a456bcbb..cce4dd68c40d 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -403,6 +403,35 @@ void gen_pool_for_each_chunk(struct gen_pool *pool,
403EXPORT_SYMBOL(gen_pool_for_each_chunk); 403EXPORT_SYMBOL(gen_pool_for_each_chunk);
404 404
405/** 405/**
406 * addr_in_gen_pool - checks if an address falls within the range of a pool
407 * @pool: the generic memory pool
408 * @start: start address
409 * @size: size of the region
410 *
411 * Check if the range of addresses falls within the specified pool. Returns
412 * true if the entire range is contained in the pool and false otherwise.
413 */
414bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
415 size_t size)
416{
417 bool found = false;
418 unsigned long end = start + size;
419 struct gen_pool_chunk *chunk;
420
421 rcu_read_lock();
422 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) {
423 if (start >= chunk->start_addr && start <= chunk->end_addr) {
424 if (end <= chunk->end_addr) {
425 found = true;
426 break;
427 }
428 }
429 }
430 rcu_read_unlock();
431 return found;
432}
433
434/**
406 * gen_pool_avail - get available free space of the pool 435 * gen_pool_avail - get available free space of the pool
407 * @pool: pool to get available free space 436 * @pool: pool to get available free space
408 * 437 *
@@ -481,6 +510,26 @@ unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
481EXPORT_SYMBOL(gen_pool_first_fit); 510EXPORT_SYMBOL(gen_pool_first_fit);
482 511
483/** 512/**
513 * gen_pool_first_fit_order_align - find the first available region
514 * of memory matching the size requirement. The region will be aligned
515 * to the order of the size specified.
516 * @map: The address to base the search on
517 * @size: The bitmap size in bits
518 * @start: The bitnumber to start searching at
519 * @nr: The number of zeroed bits we're looking for
520 * @data: additional data - unused
521 */
522unsigned long gen_pool_first_fit_order_align(unsigned long *map,
523 unsigned long size, unsigned long start,
524 unsigned int nr, void *data)
525{
526 unsigned long align_mask = roundup_pow_of_two(nr) - 1;
527
528 return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
529}
530EXPORT_SYMBOL(gen_pool_first_fit_order_align);
531
532/**
484 * gen_pool_best_fit - find the best fitting region of memory 533 * gen_pool_best_fit - find the best fitting region of memory
485 * macthing the size requirement (no alignment constraint) 534 * macthing the size requirement (no alignment constraint)
486 * @map: The address to base the search on 535 * @map: The address to base the search on
@@ -588,6 +637,7 @@ struct gen_pool *of_get_named_gen_pool(struct device_node *np,
588 if (!np_pool) 637 if (!np_pool)
589 return NULL; 638 return NULL;
590 pdev = of_find_device_by_node(np_pool); 639 pdev = of_find_device_by_node(np_pool);
640 of_node_put(np_pool);
591 if (!pdev) 641 if (!pdev)
592 return NULL; 642 return NULL;
593 return dev_get_gen_pool(&pdev->dev); 643 return dev_get_gen_pool(&pdev->dev);
diff --git a/lib/hexdump.c b/lib/hexdump.c
index 8499c810909a..270773b91923 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -59,6 +59,22 @@ int hex2bin(u8 *dst, const char *src, size_t count)
59EXPORT_SYMBOL(hex2bin); 59EXPORT_SYMBOL(hex2bin);
60 60
61/** 61/**
62 * bin2hex - convert binary data to an ascii hexadecimal string
63 * @dst: ascii hexadecimal result
64 * @src: binary data
65 * @count: binary data length
66 */
67char *bin2hex(char *dst, const void *src, size_t count)
68{
69 const unsigned char *_src = src;
70
71 while (count--)
72 dst = hex_byte_pack(dst, *_src++);
73 return dst;
74}
75EXPORT_SYMBOL(bin2hex);
76
77/**
62 * hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory 78 * hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory
63 * @buf: data blob to dump 79 * @buf: data blob to dump
64 * @len: number of bytes in the @buf 80 * @len: number of bytes in the @buf
diff --git a/lib/hweight.c b/lib/hweight.c
index b7d81ba143d1..9a5c1f221558 100644
--- a/lib/hweight.c
+++ b/lib/hweight.c
@@ -11,7 +11,7 @@
11 11
12unsigned int __sw_hweight32(unsigned int w) 12unsigned int __sw_hweight32(unsigned int w)
13{ 13{
14#ifdef ARCH_HAS_FAST_MULTIPLIER 14#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
15 w -= (w >> 1) & 0x55555555; 15 w -= (w >> 1) & 0x55555555;
16 w = (w & 0x33333333) + ((w >> 2) & 0x33333333); 16 w = (w & 0x33333333) + ((w >> 2) & 0x33333333);
17 w = (w + (w >> 4)) & 0x0f0f0f0f; 17 w = (w + (w >> 4)) & 0x0f0f0f0f;
@@ -49,7 +49,7 @@ unsigned long __sw_hweight64(__u64 w)
49 return __sw_hweight32((unsigned int)(w >> 32)) + 49 return __sw_hweight32((unsigned int)(w >> 32)) +
50 __sw_hweight32((unsigned int)w); 50 __sw_hweight32((unsigned int)w);
51#elif BITS_PER_LONG == 64 51#elif BITS_PER_LONG == 64
52#ifdef ARCH_HAS_FAST_MULTIPLIER 52#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
53 w -= (w >> 1) & 0x5555555555555555ul; 53 w -= (w >> 1) & 0x5555555555555555ul;
54 w = (w & 0x3333333333333333ul) + ((w >> 2) & 0x3333333333333333ul); 54 w = (w & 0x3333333333333333ul) + ((w >> 2) & 0x3333333333333333ul);
55 w = (w + (w >> 4)) & 0x0f0f0f0f0f0f0f0ful; 55 w = (w + (w >> 4)) & 0x0f0f0f0f0f0f0f0ful;
diff --git a/lib/idr.c b/lib/idr.c
index 50be3fa9b657..e654aebd5f80 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -626,7 +626,7 @@ static void __idr_remove_all(struct idr *idp)
626 * idr_destroy(). 626 * idr_destroy().
627 * 627 *
628 * A typical clean-up sequence for objects stored in an idr tree will use 628 * A typical clean-up sequence for objects stored in an idr tree will use
629 * idr_for_each() to free all objects, if necessay, then idr_destroy() to 629 * idr_for_each() to free all objects, if necessary, then idr_destroy() to
630 * free up the id mappings and cached idr_layers. 630 * free up the id mappings and cached idr_layers.
631 */ 631 */
632void idr_destroy(struct idr *idp) 632void idr_destroy(struct idr *idp)
diff --git a/lib/libcrc32c.c b/lib/libcrc32c.c
index b3131f5cf8a2..6a08ce7d6adc 100644
--- a/lib/libcrc32c.c
+++ b/lib/libcrc32c.c
@@ -41,20 +41,18 @@ static struct crypto_shash *tfm;
41 41
42u32 crc32c(u32 crc, const void *address, unsigned int length) 42u32 crc32c(u32 crc, const void *address, unsigned int length)
43{ 43{
44 struct { 44 SHASH_DESC_ON_STACK(shash, tfm);
45 struct shash_desc shash; 45 u32 *ctx = (u32 *)shash_desc_ctx(shash);
46 char ctx[crypto_shash_descsize(tfm)];
47 } desc;
48 int err; 46 int err;
49 47
50 desc.shash.tfm = tfm; 48 shash->tfm = tfm;
51 desc.shash.flags = 0; 49 shash->flags = 0;
52 *(u32 *)desc.ctx = crc; 50 *ctx = crc;
53 51
54 err = crypto_shash_update(&desc.shash, address, length); 52 err = crypto_shash_update(shash, address, length);
55 BUG_ON(err); 53 BUG_ON(err);
56 54
57 return *(u32 *)desc.ctx; 55 return *ctx;
58} 56}
59 57
60EXPORT_SYMBOL(crc32c); 58EXPORT_SYMBOL(crc32c);
diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c
index 8563081e8da3..a1c387f6afba 100644
--- a/lib/lzo/lzo1x_decompress_safe.c
+++ b/lib/lzo/lzo1x_decompress_safe.c
@@ -19,31 +19,21 @@
19#include <linux/lzo.h> 19#include <linux/lzo.h>
20#include "lzodefs.h" 20#include "lzodefs.h"
21 21
22#define HAVE_IP(t, x) \ 22#define HAVE_IP(x) ((size_t)(ip_end - ip) >= (size_t)(x))
23 (((size_t)(ip_end - ip) >= (size_t)(t + x)) && \ 23#define HAVE_OP(x) ((size_t)(op_end - op) >= (size_t)(x))
24 (((t + x) >= t) && ((t + x) >= x))) 24#define NEED_IP(x) if (!HAVE_IP(x)) goto input_overrun
25#define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun
26#define TEST_LB(m_pos) if ((m_pos) < out) goto lookbehind_overrun
25 27
26#define HAVE_OP(t, x) \ 28/* This MAX_255_COUNT is the maximum number of times we can add 255 to a base
27 (((size_t)(op_end - op) >= (size_t)(t + x)) && \ 29 * count without overflowing an integer. The multiply will overflow when
28 (((t + x) >= t) && ((t + x) >= x))) 30 * multiplying 255 by more than MAXINT/255. The sum will overflow earlier
29 31 * depending on the base count. Since the base count is taken from a u8
30#define NEED_IP(t, x) \ 32 * and a few bits, it is safe to assume that it will always be lower than
31 do { \ 33 * or equal to 2*255, thus we can always prevent any overflow by accepting
32 if (!HAVE_IP(t, x)) \ 34 * two less 255 steps. See Documentation/lzo.txt for more information.
33 goto input_overrun; \ 35 */
34 } while (0) 36#define MAX_255_COUNT ((((size_t)~0) / 255) - 2)
35
36#define NEED_OP(t, x) \
37 do { \
38 if (!HAVE_OP(t, x)) \
39 goto output_overrun; \
40 } while (0)
41
42#define TEST_LB(m_pos) \
43 do { \
44 if ((m_pos) < out) \
45 goto lookbehind_overrun; \
46 } while (0)
47 37
48int lzo1x_decompress_safe(const unsigned char *in, size_t in_len, 38int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
49 unsigned char *out, size_t *out_len) 39 unsigned char *out, size_t *out_len)
@@ -75,17 +65,24 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
75 if (t < 16) { 65 if (t < 16) {
76 if (likely(state == 0)) { 66 if (likely(state == 0)) {
77 if (unlikely(t == 0)) { 67 if (unlikely(t == 0)) {
68 size_t offset;
69 const unsigned char *ip_last = ip;
70
78 while (unlikely(*ip == 0)) { 71 while (unlikely(*ip == 0)) {
79 t += 255;
80 ip++; 72 ip++;
81 NEED_IP(1, 0); 73 NEED_IP(1);
82 } 74 }
83 t += 15 + *ip++; 75 offset = ip - ip_last;
76 if (unlikely(offset > MAX_255_COUNT))
77 return LZO_E_ERROR;
78
79 offset = (offset << 8) - offset;
80 t += offset + 15 + *ip++;
84 } 81 }
85 t += 3; 82 t += 3;
86copy_literal_run: 83copy_literal_run:
87#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 84#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
88 if (likely(HAVE_IP(t, 15) && HAVE_OP(t, 15))) { 85 if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) {
89 const unsigned char *ie = ip + t; 86 const unsigned char *ie = ip + t;
90 unsigned char *oe = op + t; 87 unsigned char *oe = op + t;
91 do { 88 do {
@@ -101,8 +98,8 @@ copy_literal_run:
101 } else 98 } else
102#endif 99#endif
103 { 100 {
104 NEED_OP(t, 0); 101 NEED_OP(t);
105 NEED_IP(t, 3); 102 NEED_IP(t + 3);
106 do { 103 do {
107 *op++ = *ip++; 104 *op++ = *ip++;
108 } while (--t > 0); 105 } while (--t > 0);
@@ -115,7 +112,7 @@ copy_literal_run:
115 m_pos -= t >> 2; 112 m_pos -= t >> 2;
116 m_pos -= *ip++ << 2; 113 m_pos -= *ip++ << 2;
117 TEST_LB(m_pos); 114 TEST_LB(m_pos);
118 NEED_OP(2, 0); 115 NEED_OP(2);
119 op[0] = m_pos[0]; 116 op[0] = m_pos[0];
120 op[1] = m_pos[1]; 117 op[1] = m_pos[1];
121 op += 2; 118 op += 2;
@@ -136,13 +133,20 @@ copy_literal_run:
136 } else if (t >= 32) { 133 } else if (t >= 32) {
137 t = (t & 31) + (3 - 1); 134 t = (t & 31) + (3 - 1);
138 if (unlikely(t == 2)) { 135 if (unlikely(t == 2)) {
136 size_t offset;
137 const unsigned char *ip_last = ip;
138
139 while (unlikely(*ip == 0)) { 139 while (unlikely(*ip == 0)) {
140 t += 255;
141 ip++; 140 ip++;
142 NEED_IP(1, 0); 141 NEED_IP(1);
143 } 142 }
144 t += 31 + *ip++; 143 offset = ip - ip_last;
145 NEED_IP(2, 0); 144 if (unlikely(offset > MAX_255_COUNT))
145 return LZO_E_ERROR;
146
147 offset = (offset << 8) - offset;
148 t += offset + 31 + *ip++;
149 NEED_IP(2);
146 } 150 }
147 m_pos = op - 1; 151 m_pos = op - 1;
148 next = get_unaligned_le16(ip); 152 next = get_unaligned_le16(ip);
@@ -154,13 +158,20 @@ copy_literal_run:
154 m_pos -= (t & 8) << 11; 158 m_pos -= (t & 8) << 11;
155 t = (t & 7) + (3 - 1); 159 t = (t & 7) + (3 - 1);
156 if (unlikely(t == 2)) { 160 if (unlikely(t == 2)) {
161 size_t offset;
162 const unsigned char *ip_last = ip;
163
157 while (unlikely(*ip == 0)) { 164 while (unlikely(*ip == 0)) {
158 t += 255;
159 ip++; 165 ip++;
160 NEED_IP(1, 0); 166 NEED_IP(1);
161 } 167 }
162 t += 7 + *ip++; 168 offset = ip - ip_last;
163 NEED_IP(2, 0); 169 if (unlikely(offset > MAX_255_COUNT))
170 return LZO_E_ERROR;
171
172 offset = (offset << 8) - offset;
173 t += offset + 7 + *ip++;
174 NEED_IP(2);
164 } 175 }
165 next = get_unaligned_le16(ip); 176 next = get_unaligned_le16(ip);
166 ip += 2; 177 ip += 2;
@@ -174,7 +185,7 @@ copy_literal_run:
174#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 185#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
175 if (op - m_pos >= 8) { 186 if (op - m_pos >= 8) {
176 unsigned char *oe = op + t; 187 unsigned char *oe = op + t;
177 if (likely(HAVE_OP(t, 15))) { 188 if (likely(HAVE_OP(t + 15))) {
178 do { 189 do {
179 COPY8(op, m_pos); 190 COPY8(op, m_pos);
180 op += 8; 191 op += 8;
@@ -184,7 +195,7 @@ copy_literal_run:
184 m_pos += 8; 195 m_pos += 8;
185 } while (op < oe); 196 } while (op < oe);
186 op = oe; 197 op = oe;
187 if (HAVE_IP(6, 0)) { 198 if (HAVE_IP(6)) {
188 state = next; 199 state = next;
189 COPY4(op, ip); 200 COPY4(op, ip);
190 op += next; 201 op += next;
@@ -192,7 +203,7 @@ copy_literal_run:
192 continue; 203 continue;
193 } 204 }
194 } else { 205 } else {
195 NEED_OP(t, 0); 206 NEED_OP(t);
196 do { 207 do {
197 *op++ = *m_pos++; 208 *op++ = *m_pos++;
198 } while (op < oe); 209 } while (op < oe);
@@ -201,7 +212,7 @@ copy_literal_run:
201#endif 212#endif
202 { 213 {
203 unsigned char *oe = op + t; 214 unsigned char *oe = op + t;
204 NEED_OP(t, 0); 215 NEED_OP(t);
205 op[0] = m_pos[0]; 216 op[0] = m_pos[0];
206 op[1] = m_pos[1]; 217 op[1] = m_pos[1];
207 op += 2; 218 op += 2;
@@ -214,15 +225,15 @@ match_next:
214 state = next; 225 state = next;
215 t = next; 226 t = next;
216#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 227#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
217 if (likely(HAVE_IP(6, 0) && HAVE_OP(4, 0))) { 228 if (likely(HAVE_IP(6) && HAVE_OP(4))) {
218 COPY4(op, ip); 229 COPY4(op, ip);
219 op += t; 230 op += t;
220 ip += t; 231 ip += t;
221 } else 232 } else
222#endif 233#endif
223 { 234 {
224 NEED_IP(t, 3); 235 NEED_IP(t + 3);
225 NEED_OP(t, 0); 236 NEED_OP(t);
226 while (t > 0) { 237 while (t > 0) {
227 *op++ = *ip++; 238 *op++ = *ip++;
228 t--; 239 t--;
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index fe5a3342e960..6111bcb28376 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -1,6 +1,8 @@
1#define pr_fmt(fmt) "%s: " fmt "\n", __func__ 1#define pr_fmt(fmt) "%s: " fmt "\n", __func__
2 2
3#include <linux/kernel.h> 3#include <linux/kernel.h>
4#include <linux/sched.h>
5#include <linux/wait.h>
4#include <linux/percpu-refcount.h> 6#include <linux/percpu-refcount.h>
5 7
6/* 8/*
@@ -11,8 +13,8 @@
11 * percpu counters will all sum to the correct value 13 * percpu counters will all sum to the correct value
12 * 14 *
13 * (More precisely: because moduler arithmatic is commutative the sum of all the 15 * (More precisely: because moduler arithmatic is commutative the sum of all the
14 * pcpu_count vars will be equal to what it would have been if all the gets and 16 * percpu_count vars will be equal to what it would have been if all the gets
15 * puts were done to a single integer, even if some of the percpu integers 17 * and puts were done to a single integer, even if some of the percpu integers
16 * overflow or underflow). 18 * overflow or underflow).
17 * 19 *
18 * The real trick to implementing percpu refcounts is shutdown. We can't detect 20 * The real trick to implementing percpu refcounts is shutdown. We can't detect
@@ -25,75 +27,64 @@
25 * works. 27 * works.
26 * 28 *
27 * Converting to non percpu mode is done with some RCUish stuff in 29 * Converting to non percpu mode is done with some RCUish stuff in
28 * percpu_ref_kill. Additionally, we need a bias value so that the atomic_t 30 * percpu_ref_kill. Additionally, we need a bias value so that the
29 * can't hit 0 before we've added up all the percpu refs. 31 * atomic_long_t can't hit 0 before we've added up all the percpu refs.
30 */ 32 */
31 33
32#define PCPU_COUNT_BIAS (1U << 31) 34#define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1))
33 35
34static unsigned __percpu *pcpu_count_ptr(struct percpu_ref *ref) 36static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq);
37
38static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref)
35{ 39{
36 return (unsigned __percpu *)(ref->pcpu_count_ptr & ~PCPU_REF_DEAD); 40 return (unsigned long __percpu *)
41 (ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD);
37} 42}
38 43
39/** 44/**
40 * percpu_ref_init - initialize a percpu refcount 45 * percpu_ref_init - initialize a percpu refcount
41 * @ref: percpu_ref to initialize 46 * @ref: percpu_ref to initialize
42 * @release: function which will be called when refcount hits 0 47 * @release: function which will be called when refcount hits 0
48 * @flags: PERCPU_REF_INIT_* flags
49 * @gfp: allocation mask to use
43 * 50 *
44 * Initializes the refcount in single atomic counter mode with a refcount of 1; 51 * Initializes @ref. If @flags is zero, @ref starts in percpu mode with a
45 * analagous to atomic_set(ref, 1). 52 * refcount of 1; analagous to atomic_long_set(ref, 1). See the
53 * definitions of PERCPU_REF_INIT_* flags for flag behaviors.
46 * 54 *
47 * Note that @release must not sleep - it may potentially be called from RCU 55 * Note that @release must not sleep - it may potentially be called from RCU
48 * callback context by percpu_ref_kill(). 56 * callback context by percpu_ref_kill().
49 */ 57 */
50int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release) 58int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
59 unsigned int flags, gfp_t gfp)
51{ 60{
52 atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS); 61 size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS,
62 __alignof__(unsigned long));
63 unsigned long start_count = 0;
53 64
54 ref->pcpu_count_ptr = (unsigned long)alloc_percpu(unsigned); 65 ref->percpu_count_ptr = (unsigned long)
55 if (!ref->pcpu_count_ptr) 66 __alloc_percpu_gfp(sizeof(unsigned long), align, gfp);
67 if (!ref->percpu_count_ptr)
56 return -ENOMEM; 68 return -ENOMEM;
57 69
58 ref->release = release; 70 ref->force_atomic = flags & PERCPU_REF_INIT_ATOMIC;
59 return 0;
60}
61EXPORT_SYMBOL_GPL(percpu_ref_init);
62 71
63/** 72 if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD))
64 * percpu_ref_reinit - re-initialize a percpu refcount 73 ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
65 * @ref: perpcu_ref to re-initialize 74 else
66 * 75 start_count += PERCPU_COUNT_BIAS;
67 * Re-initialize @ref so that it's in the same state as when it finished
68 * percpu_ref_init(). @ref must have been initialized successfully, killed
69 * and reached 0 but not exited.
70 *
71 * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
72 * this function is in progress.
73 */
74void percpu_ref_reinit(struct percpu_ref *ref)
75{
76 unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
77 int cpu;
78
79 BUG_ON(!pcpu_count);
80 WARN_ON(!percpu_ref_is_zero(ref));
81 76
82 atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS); 77 if (flags & PERCPU_REF_INIT_DEAD)
78 ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
79 else
80 start_count++;
83 81
84 /* 82 atomic_long_set(&ref->count, start_count);
85 * Restore per-cpu operation. smp_store_release() is paired with
86 * smp_read_barrier_depends() in __pcpu_ref_alive() and guarantees
87 * that the zeroing is visible to all percpu accesses which can see
88 * the following PCPU_REF_DEAD clearing.
89 */
90 for_each_possible_cpu(cpu)
91 *per_cpu_ptr(pcpu_count, cpu) = 0;
92 83
93 smp_store_release(&ref->pcpu_count_ptr, 84 ref->release = release;
94 ref->pcpu_count_ptr & ~PCPU_REF_DEAD); 85 return 0;
95} 86}
96EXPORT_SYMBOL_GPL(percpu_ref_reinit); 87EXPORT_SYMBOL_GPL(percpu_ref_init);
97 88
98/** 89/**
99 * percpu_ref_exit - undo percpu_ref_init() 90 * percpu_ref_exit - undo percpu_ref_init()
@@ -107,26 +98,39 @@ EXPORT_SYMBOL_GPL(percpu_ref_reinit);
107 */ 98 */
108void percpu_ref_exit(struct percpu_ref *ref) 99void percpu_ref_exit(struct percpu_ref *ref)
109{ 100{
110 unsigned __percpu *pcpu_count = pcpu_count_ptr(ref); 101 unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
111 102
112 if (pcpu_count) { 103 if (percpu_count) {
113 free_percpu(pcpu_count); 104 free_percpu(percpu_count);
114 ref->pcpu_count_ptr = PCPU_REF_DEAD; 105 ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD;
115 } 106 }
116} 107}
117EXPORT_SYMBOL_GPL(percpu_ref_exit); 108EXPORT_SYMBOL_GPL(percpu_ref_exit);
118 109
119static void percpu_ref_kill_rcu(struct rcu_head *rcu) 110static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu)
111{
112 struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
113
114 ref->confirm_switch(ref);
115 ref->confirm_switch = NULL;
116 wake_up_all(&percpu_ref_switch_waitq);
117
118 /* drop ref from percpu_ref_switch_to_atomic() */
119 percpu_ref_put(ref);
120}
121
122static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu)
120{ 123{
121 struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu); 124 struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
122 unsigned __percpu *pcpu_count = pcpu_count_ptr(ref); 125 unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
123 unsigned count = 0; 126 unsigned long count = 0;
124 int cpu; 127 int cpu;
125 128
126 for_each_possible_cpu(cpu) 129 for_each_possible_cpu(cpu)
127 count += *per_cpu_ptr(pcpu_count, cpu); 130 count += *per_cpu_ptr(percpu_count, cpu);
128 131
129 pr_debug("global %i pcpu %i", atomic_read(&ref->count), (int) count); 132 pr_debug("global %ld percpu %ld",
133 atomic_long_read(&ref->count), (long)count);
130 134
131 /* 135 /*
132 * It's crucial that we sum the percpu counters _before_ adding the sum 136 * It's crucial that we sum the percpu counters _before_ adding the sum
@@ -140,21 +144,137 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu)
140 * reaching 0 before we add the percpu counts. But doing it at the same 144 * reaching 0 before we add the percpu counts. But doing it at the same
141 * time is equivalent and saves us atomic operations: 145 * time is equivalent and saves us atomic operations:
142 */ 146 */
147 atomic_long_add((long)count - PERCPU_COUNT_BIAS, &ref->count);
148
149 WARN_ONCE(atomic_long_read(&ref->count) <= 0,
150 "percpu ref (%pf) <= 0 (%ld) after switching to atomic",
151 ref->release, atomic_long_read(&ref->count));
152
153 /* @ref is viewed as dead on all CPUs, send out switch confirmation */
154 percpu_ref_call_confirm_rcu(rcu);
155}
156
157static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref)
158{
159}
160
161static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
162 percpu_ref_func_t *confirm_switch)
163{
164 if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC)) {
165 /* switching from percpu to atomic */
166 ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
167
168 /*
169 * Non-NULL ->confirm_switch is used to indicate that
170 * switching is in progress. Use noop one if unspecified.
171 */
172 WARN_ON_ONCE(ref->confirm_switch);
173 ref->confirm_switch =
174 confirm_switch ?: percpu_ref_noop_confirm_switch;
175
176 percpu_ref_get(ref); /* put after confirmation */
177 call_rcu_sched(&ref->rcu, percpu_ref_switch_to_atomic_rcu);
178 } else if (confirm_switch) {
179 /*
180 * Somebody already set ATOMIC. Switching may still be in
181 * progress. @confirm_switch must be invoked after the
182 * switching is complete and a full sched RCU grace period
183 * has passed. Wait synchronously for the previous
184 * switching and schedule @confirm_switch invocation.
185 */
186 wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
187 ref->confirm_switch = confirm_switch;
188
189 percpu_ref_get(ref); /* put after confirmation */
190 call_rcu_sched(&ref->rcu, percpu_ref_call_confirm_rcu);
191 }
192}
193
194/**
195 * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode
196 * @ref: percpu_ref to switch to atomic mode
197 * @confirm_switch: optional confirmation callback
198 *
199 * There's no reason to use this function for the usual reference counting.
200 * Use percpu_ref_kill[_and_confirm]().
201 *
202 * Schedule switching of @ref to atomic mode. All its percpu counts will
203 * be collected to the main atomic counter. On completion, when all CPUs
204 * are guaraneed to be in atomic mode, @confirm_switch, which may not
205 * block, is invoked. This function may be invoked concurrently with all
206 * the get/put operations and can safely be mixed with kill and reinit
207 * operations. Note that @ref will stay in atomic mode across kill/reinit
208 * cycles until percpu_ref_switch_to_percpu() is called.
209 *
210 * This function normally doesn't block and can be called from any context
211 * but it may block if @confirm_kill is specified and @ref is already in
212 * the process of switching to atomic mode. In such cases, @confirm_switch
213 * will be invoked after the switching is complete.
214 *
215 * Due to the way percpu_ref is implemented, @confirm_switch will be called
216 * after at least one full sched RCU grace period has passed but this is an
217 * implementation detail and must not be depended upon.
218 */
219void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
220 percpu_ref_func_t *confirm_switch)
221{
222 ref->force_atomic = true;
223 __percpu_ref_switch_to_atomic(ref, confirm_switch);
224}
143 225
144 atomic_add((int) count - PCPU_COUNT_BIAS, &ref->count); 226static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
227{
228 unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
229 int cpu;
145 230
146 WARN_ONCE(atomic_read(&ref->count) <= 0, "percpu ref <= 0 (%i)", 231 BUG_ON(!percpu_count);
147 atomic_read(&ref->count));
148 232
149 /* @ref is viewed as dead on all CPUs, send out kill confirmation */ 233 if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
150 if (ref->confirm_kill) 234 return;
151 ref->confirm_kill(ref); 235
236 wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
237
238 atomic_long_add(PERCPU_COUNT_BIAS, &ref->count);
152 239
153 /* 240 /*
154 * Now we're in single atomic_t mode with a consistent refcount, so it's 241 * Restore per-cpu operation. smp_store_release() is paired with
155 * safe to drop our initial ref: 242 * smp_read_barrier_depends() in __ref_is_percpu() and guarantees
243 * that the zeroing is visible to all percpu accesses which can see
244 * the following __PERCPU_REF_ATOMIC clearing.
156 */ 245 */
157 percpu_ref_put(ref); 246 for_each_possible_cpu(cpu)
247 *per_cpu_ptr(percpu_count, cpu) = 0;
248
249 smp_store_release(&ref->percpu_count_ptr,
250 ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC);
251}
252
253/**
254 * percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode
255 * @ref: percpu_ref to switch to percpu mode
256 *
257 * There's no reason to use this function for the usual reference counting.
258 * To re-use an expired ref, use percpu_ref_reinit().
259 *
260 * Switch @ref to percpu mode. This function may be invoked concurrently
261 * with all the get/put operations and can safely be mixed with kill and
262 * reinit operations. This function reverses the sticky atomic state set
263 * by PERCPU_REF_INIT_ATOMIC or percpu_ref_switch_to_atomic(). If @ref is
264 * dying or dead, the actual switching takes place on the following
265 * percpu_ref_reinit().
266 *
267 * This function normally doesn't block and can be called from any context
268 * but it may block if @ref is in the process of switching to atomic mode
269 * by percpu_ref_switch_atomic().
270 */
271void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
272{
273 ref->force_atomic = false;
274
275 /* a dying or dead ref can't be switched to percpu mode w/o reinit */
276 if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD))
277 __percpu_ref_switch_to_percpu(ref);
158} 278}
159 279
160/** 280/**
@@ -164,23 +284,48 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu)
164 * 284 *
165 * Equivalent to percpu_ref_kill() but also schedules kill confirmation if 285 * Equivalent to percpu_ref_kill() but also schedules kill confirmation if
166 * @confirm_kill is not NULL. @confirm_kill, which may not block, will be 286 * @confirm_kill is not NULL. @confirm_kill, which may not block, will be
167 * called after @ref is seen as dead from all CPUs - all further 287 * called after @ref is seen as dead from all CPUs at which point all
168 * invocations of percpu_ref_tryget() will fail. See percpu_ref_tryget() 288 * further invocations of percpu_ref_tryget_live() will fail. See
169 * for more details. 289 * percpu_ref_tryget_live() for details.
290 *
291 * This function normally doesn't block and can be called from any context
292 * but it may block if @confirm_kill is specified and @ref is in the
293 * process of switching to atomic mode by percpu_ref_switch_atomic().
170 * 294 *
171 * Due to the way percpu_ref is implemented, @confirm_kill will be called 295 * Due to the way percpu_ref is implemented, @confirm_switch will be called
172 * after at least one full RCU grace period has passed but this is an 296 * after at least one full sched RCU grace period has passed but this is an
173 * implementation detail and callers must not depend on it. 297 * implementation detail and must not be depended upon.
174 */ 298 */
175void percpu_ref_kill_and_confirm(struct percpu_ref *ref, 299void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
176 percpu_ref_func_t *confirm_kill) 300 percpu_ref_func_t *confirm_kill)
177{ 301{
178 WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD, 302 WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD,
179 "percpu_ref_kill() called more than once!\n"); 303 "%s called more than once on %pf!", __func__, ref->release);
180 304
181 ref->pcpu_count_ptr |= PCPU_REF_DEAD; 305 ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
182 ref->confirm_kill = confirm_kill; 306 __percpu_ref_switch_to_atomic(ref, confirm_kill);
183 307 percpu_ref_put(ref);
184 call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu);
185} 308}
186EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm); 309EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
310
311/**
312 * percpu_ref_reinit - re-initialize a percpu refcount
313 * @ref: perpcu_ref to re-initialize
314 *
315 * Re-initialize @ref so that it's in the same state as when it finished
316 * percpu_ref_init() ignoring %PERCPU_REF_INIT_DEAD. @ref must have been
317 * initialized successfully and reached 0 but not exited.
318 *
319 * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
320 * this function is in progress.
321 */
322void percpu_ref_reinit(struct percpu_ref *ref)
323{
324 WARN_ON_ONCE(!percpu_ref_is_zero(ref));
325
326 ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
327 percpu_ref_get(ref);
328 if (!ref->force_atomic)
329 __percpu_ref_switch_to_percpu(ref);
330}
331EXPORT_SYMBOL_GPL(percpu_ref_reinit);
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 7dd33577b905..48144cdae819 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -112,13 +112,15 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
112} 112}
113EXPORT_SYMBOL(__percpu_counter_sum); 113EXPORT_SYMBOL(__percpu_counter_sum);
114 114
115int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, 115int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
116 struct lock_class_key *key) 116 struct lock_class_key *key)
117{ 117{
118 unsigned long flags __maybe_unused;
119
118 raw_spin_lock_init(&fbc->lock); 120 raw_spin_lock_init(&fbc->lock);
119 lockdep_set_class(&fbc->lock, key); 121 lockdep_set_class(&fbc->lock, key);
120 fbc->count = amount; 122 fbc->count = amount;
121 fbc->counters = alloc_percpu(s32); 123 fbc->counters = alloc_percpu_gfp(s32, gfp);
122 if (!fbc->counters) 124 if (!fbc->counters)
123 return -ENOMEM; 125 return -ENOMEM;
124 126
@@ -126,9 +128,9 @@ int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
126 128
127#ifdef CONFIG_HOTPLUG_CPU 129#ifdef CONFIG_HOTPLUG_CPU
128 INIT_LIST_HEAD(&fbc->list); 130 INIT_LIST_HEAD(&fbc->list);
129 spin_lock(&percpu_counters_lock); 131 spin_lock_irqsave(&percpu_counters_lock, flags);
130 list_add(&fbc->list, &percpu_counters); 132 list_add(&fbc->list, &percpu_counters);
131 spin_unlock(&percpu_counters_lock); 133 spin_unlock_irqrestore(&percpu_counters_lock, flags);
132#endif 134#endif
133 return 0; 135 return 0;
134} 136}
@@ -136,15 +138,17 @@ EXPORT_SYMBOL(__percpu_counter_init);
136 138
137void percpu_counter_destroy(struct percpu_counter *fbc) 139void percpu_counter_destroy(struct percpu_counter *fbc)
138{ 140{
141 unsigned long flags __maybe_unused;
142
139 if (!fbc->counters) 143 if (!fbc->counters)
140 return; 144 return;
141 145
142 debug_percpu_counter_deactivate(fbc); 146 debug_percpu_counter_deactivate(fbc);
143 147
144#ifdef CONFIG_HOTPLUG_CPU 148#ifdef CONFIG_HOTPLUG_CPU
145 spin_lock(&percpu_counters_lock); 149 spin_lock_irqsave(&percpu_counters_lock, flags);
146 list_del(&fbc->list); 150 list_del(&fbc->list);
147 spin_unlock(&percpu_counters_lock); 151 spin_unlock_irqrestore(&percpu_counters_lock, flags);
148#endif 152#endif
149 free_percpu(fbc->counters); 153 free_percpu(fbc->counters);
150 fbc->counters = NULL; 154 fbc->counters = NULL;
@@ -173,7 +177,7 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
173 return NOTIFY_OK; 177 return NOTIFY_OK;
174 178
175 cpu = (unsigned long)hcpu; 179 cpu = (unsigned long)hcpu;
176 spin_lock(&percpu_counters_lock); 180 spin_lock_irq(&percpu_counters_lock);
177 list_for_each_entry(fbc, &percpu_counters, list) { 181 list_for_each_entry(fbc, &percpu_counters, list) {
178 s32 *pcount; 182 s32 *pcount;
179 unsigned long flags; 183 unsigned long flags;
@@ -184,7 +188,7 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
184 *pcount = 0; 188 *pcount = 0;
185 raw_spin_unlock_irqrestore(&fbc->lock, flags); 189 raw_spin_unlock_irqrestore(&fbc->lock, flags);
186 } 190 }
187 spin_unlock(&percpu_counters_lock); 191 spin_unlock_irq(&percpu_counters_lock);
188#endif 192#endif
189 return NOTIFY_OK; 193 return NOTIFY_OK;
190} 194}
diff --git a/lib/prio_heap.c b/lib/prio_heap.c
deleted file mode 100644
index a7af6f85eca8..000000000000
--- a/lib/prio_heap.c
+++ /dev/null
@@ -1,70 +0,0 @@
1/*
2 * Simple insertion-only static-sized priority heap containing
3 * pointers, based on CLR, chapter 7
4 */
5
6#include <linux/slab.h>
7#include <linux/prio_heap.h>
8
9int heap_init(struct ptr_heap *heap, size_t size, gfp_t gfp_mask,
10 int (*gt)(void *, void *))
11{
12 heap->ptrs = kmalloc(size, gfp_mask);
13 if (!heap->ptrs)
14 return -ENOMEM;
15 heap->size = 0;
16 heap->max = size / sizeof(void *);
17 heap->gt = gt;
18 return 0;
19}
20
21void heap_free(struct ptr_heap *heap)
22{
23 kfree(heap->ptrs);
24}
25
26void *heap_insert(struct ptr_heap *heap, void *p)
27{
28 void *res;
29 void **ptrs = heap->ptrs;
30 int pos;
31
32 if (heap->size < heap->max) {
33 /* Heap insertion */
34 pos = heap->size++;
35 while (pos > 0 && heap->gt(p, ptrs[(pos-1)/2])) {
36 ptrs[pos] = ptrs[(pos-1)/2];
37 pos = (pos-1)/2;
38 }
39 ptrs[pos] = p;
40 return NULL;
41 }
42
43 /* The heap is full, so something will have to be dropped */
44
45 /* If the new pointer is greater than the current max, drop it */
46 if (heap->gt(p, ptrs[0]))
47 return p;
48
49 /* Replace the current max and heapify */
50 res = ptrs[0];
51 ptrs[0] = p;
52 pos = 0;
53
54 while (1) {
55 int left = 2 * pos + 1;
56 int right = 2 * pos + 2;
57 int largest = pos;
58 if (left < heap->size && heap->gt(ptrs[left], p))
59 largest = left;
60 if (right < heap->size && heap->gt(ptrs[right], ptrs[largest]))
61 largest = right;
62 if (largest == pos)
63 break;
64 /* Push p down the heap one level and bump one up */
65 ptrs[pos] = ptrs[largest];
66 ptrs[largest] = p;
67 pos = largest;
68 }
69 return res;
70}
diff --git a/lib/proportions.c b/lib/proportions.c
index 05df84801b56..6f724298f67a 100644
--- a/lib/proportions.c
+++ b/lib/proportions.c
@@ -73,7 +73,7 @@
73#include <linux/proportions.h> 73#include <linux/proportions.h>
74#include <linux/rcupdate.h> 74#include <linux/rcupdate.h>
75 75
76int prop_descriptor_init(struct prop_descriptor *pd, int shift) 76int prop_descriptor_init(struct prop_descriptor *pd, int shift, gfp_t gfp)
77{ 77{
78 int err; 78 int err;
79 79
@@ -83,11 +83,11 @@ int prop_descriptor_init(struct prop_descriptor *pd, int shift)
83 pd->index = 0; 83 pd->index = 0;
84 pd->pg[0].shift = shift; 84 pd->pg[0].shift = shift;
85 mutex_init(&pd->mutex); 85 mutex_init(&pd->mutex);
86 err = percpu_counter_init(&pd->pg[0].events, 0); 86 err = percpu_counter_init(&pd->pg[0].events, 0, gfp);
87 if (err) 87 if (err)
88 goto out; 88 goto out;
89 89
90 err = percpu_counter_init(&pd->pg[1].events, 0); 90 err = percpu_counter_init(&pd->pg[1].events, 0, gfp);
91 if (err) 91 if (err)
92 percpu_counter_destroy(&pd->pg[0].events); 92 percpu_counter_destroy(&pd->pg[0].events);
93 93
@@ -188,12 +188,12 @@ prop_adjust_shift(int *pl_shift, unsigned long *pl_period, int new_shift)
188 188
189#define PROP_BATCH (8*(1+ilog2(nr_cpu_ids))) 189#define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
190 190
191int prop_local_init_percpu(struct prop_local_percpu *pl) 191int prop_local_init_percpu(struct prop_local_percpu *pl, gfp_t gfp)
192{ 192{
193 raw_spin_lock_init(&pl->lock); 193 raw_spin_lock_init(&pl->lock);
194 pl->shift = 0; 194 pl->shift = 0;
195 pl->period = 0; 195 pl->period = 0;
196 return percpu_counter_init(&pl->events, 0); 196 return percpu_counter_init(&pl->events, 0, gfp);
197} 197}
198 198
199void prop_local_destroy_percpu(struct prop_local_percpu *pl) 199void prop_local_destroy_percpu(struct prop_local_percpu *pl)
diff --git a/lib/random32.c b/lib/random32.c
index c9b6bf3afe0c..0bee183fa18f 100644
--- a/lib/random32.c
+++ b/lib/random32.c
@@ -37,6 +37,7 @@
37#include <linux/jiffies.h> 37#include <linux/jiffies.h>
38#include <linux/random.h> 38#include <linux/random.h>
39#include <linux/sched.h> 39#include <linux/sched.h>
40#include <asm/unaligned.h>
40 41
41#ifdef CONFIG_RANDOM32_SELFTEST 42#ifdef CONFIG_RANDOM32_SELFTEST
42static void __init prandom_state_selftest(void); 43static void __init prandom_state_selftest(void);
@@ -96,27 +97,23 @@ EXPORT_SYMBOL(prandom_u32);
96 * This is used for pseudo-randomness with no outside seeding. 97 * This is used for pseudo-randomness with no outside seeding.
97 * For more random results, use prandom_bytes(). 98 * For more random results, use prandom_bytes().
98 */ 99 */
99void prandom_bytes_state(struct rnd_state *state, void *buf, int bytes) 100void prandom_bytes_state(struct rnd_state *state, void *buf, size_t bytes)
100{ 101{
101 unsigned char *p = buf; 102 u8 *ptr = buf;
102 int i;
103
104 for (i = 0; i < round_down(bytes, sizeof(u32)); i += sizeof(u32)) {
105 u32 random = prandom_u32_state(state);
106 int j;
107 103
108 for (j = 0; j < sizeof(u32); j++) { 104 while (bytes >= sizeof(u32)) {
109 p[i + j] = random; 105 put_unaligned(prandom_u32_state(state), (u32 *) ptr);
110 random >>= BITS_PER_BYTE; 106 ptr += sizeof(u32);
111 } 107 bytes -= sizeof(u32);
112 } 108 }
113 if (i < bytes) {
114 u32 random = prandom_u32_state(state);
115 109
116 for (; i < bytes; i++) { 110 if (bytes > 0) {
117 p[i] = random; 111 u32 rem = prandom_u32_state(state);
118 random >>= BITS_PER_BYTE; 112 do {
119 } 113 *ptr++ = (u8) rem;
114 bytes--;
115 rem >>= BITS_PER_BYTE;
116 } while (bytes > 0);
120 } 117 }
121} 118}
122EXPORT_SYMBOL(prandom_bytes_state); 119EXPORT_SYMBOL(prandom_bytes_state);
@@ -126,7 +123,7 @@ EXPORT_SYMBOL(prandom_bytes_state);
126 * @buf: where to copy the pseudo-random bytes to 123 * @buf: where to copy the pseudo-random bytes to
127 * @bytes: the requested number of bytes 124 * @bytes: the requested number of bytes
128 */ 125 */
129void prandom_bytes(void *buf, int bytes) 126void prandom_bytes(void *buf, size_t bytes)
130{ 127{
131 struct rnd_state *state = &get_cpu_var(net_rand_state); 128 struct rnd_state *state = &get_cpu_var(net_rand_state);
132 129
@@ -137,7 +134,7 @@ EXPORT_SYMBOL(prandom_bytes);
137 134
138static void prandom_warmup(struct rnd_state *state) 135static void prandom_warmup(struct rnd_state *state)
139{ 136{
140 /* Calling RNG ten times to satify recurrence condition */ 137 /* Calling RNG ten times to satisfy recurrence condition */
141 prandom_u32_state(state); 138 prandom_u32_state(state);
142 prandom_u32_state(state); 139 prandom_u32_state(state);
143 prandom_u32_state(state); 140 prandom_u32_state(state);
@@ -152,7 +149,7 @@ static void prandom_warmup(struct rnd_state *state)
152 149
153static u32 __extract_hwseed(void) 150static u32 __extract_hwseed(void)
154{ 151{
155 u32 val = 0; 152 unsigned int val = 0;
156 153
157 (void)(arch_get_random_seed_int(&val) || 154 (void)(arch_get_random_seed_int(&val) ||
158 arch_get_random_int(&val)); 155 arch_get_random_int(&val));
@@ -228,7 +225,7 @@ static void __prandom_timer(unsigned long dontcare)
228 prandom_seed(entropy); 225 prandom_seed(entropy);
229 226
230 /* reseed every ~60 seconds, in [40 .. 80) interval with slack */ 227 /* reseed every ~60 seconds, in [40 .. 80) interval with slack */
231 expires = 40 + (prandom_u32() % 40); 228 expires = 40 + prandom_u32_max(40);
232 seed_timer.expires = jiffies + msecs_to_jiffies(expires * MSEC_PER_SEC); 229 seed_timer.expires = jiffies + msecs_to_jiffies(expires * MSEC_PER_SEC);
233 230
234 add_timer(&seed_timer); 231 add_timer(&seed_timer);
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index a2c78810ebc1..081be3ba9ea8 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -23,7 +23,6 @@
23#include <linux/hash.h> 23#include <linux/hash.h>
24#include <linux/random.h> 24#include <linux/random.h>
25#include <linux/rhashtable.h> 25#include <linux/rhashtable.h>
26#include <linux/log2.h>
27 26
28#define HASH_DEFAULT_SIZE 64UL 27#define HASH_DEFAULT_SIZE 64UL
29#define HASH_MIN_SIZE 4UL 28#define HASH_MIN_SIZE 4UL
@@ -55,7 +54,7 @@ static u32 __hashfn(const struct rhashtable *ht, const void *key,
55 54
56/** 55/**
57 * rhashtable_hashfn - compute hash for key of given length 56 * rhashtable_hashfn - compute hash for key of given length
58 * @ht: hash table to compuate for 57 * @ht: hash table to compute for
59 * @key: pointer to key 58 * @key: pointer to key
60 * @len: length of key 59 * @len: length of key
61 * 60 *
@@ -86,7 +85,7 @@ static u32 obj_hashfn(const struct rhashtable *ht, const void *ptr, u32 hsize)
86 85
87/** 86/**
88 * rhashtable_obj_hashfn - compute hash for hashed object 87 * rhashtable_obj_hashfn - compute hash for hashed object
89 * @ht: hash table to compuate for 88 * @ht: hash table to compute for
90 * @ptr: pointer to hashed object 89 * @ptr: pointer to hashed object
91 * 90 *
92 * Computes the hash value using the hash function `hashfn` respectively 91 * Computes the hash value using the hash function `hashfn` respectively
@@ -298,7 +297,7 @@ int rhashtable_shrink(struct rhashtable *ht, gfp_t flags)
298 297
299 ASSERT_RHT_MUTEX(ht); 298 ASSERT_RHT_MUTEX(ht);
300 299
301 if (tbl->size <= HASH_MIN_SIZE) 300 if (ht->shift <= ht->p.min_shift)
302 return 0; 301 return 0;
303 302
304 ntbl = bucket_table_alloc(tbl->size / 2, flags); 303 ntbl = bucket_table_alloc(tbl->size / 2, flags);
@@ -506,9 +505,10 @@ void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash,
506} 505}
507EXPORT_SYMBOL_GPL(rhashtable_lookup_compare); 506EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
508 507
509static size_t rounded_hashtable_size(unsigned int nelem) 508static size_t rounded_hashtable_size(struct rhashtable_params *params)
510{ 509{
511 return max(roundup_pow_of_two(nelem * 4 / 3), HASH_MIN_SIZE); 510 return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
511 1UL << params->min_shift);
512} 512}
513 513
514/** 514/**
@@ -566,8 +566,11 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
566 (!params->key_len && !params->obj_hashfn)) 566 (!params->key_len && !params->obj_hashfn))
567 return -EINVAL; 567 return -EINVAL;
568 568
569 params->min_shift = max_t(size_t, params->min_shift,
570 ilog2(HASH_MIN_SIZE));
571
569 if (params->nelem_hint) 572 if (params->nelem_hint)
570 size = rounded_hashtable_size(params->nelem_hint); 573 size = rounded_hashtable_size(params);
571 574
572 tbl = bucket_table_alloc(size, GFP_KERNEL); 575 tbl = bucket_table_alloc(size, GFP_KERNEL);
573 if (tbl == NULL) 576 if (tbl == NULL)
@@ -589,13 +592,13 @@ EXPORT_SYMBOL_GPL(rhashtable_init);
589 * rhashtable_destroy - destroy hash table 592 * rhashtable_destroy - destroy hash table
590 * @ht: the hash table to destroy 593 * @ht: the hash table to destroy
591 * 594 *
592 * Frees the bucket array. 595 * Frees the bucket array. This function is not rcu safe, therefore the caller
596 * has to make sure that no resizing may happen by unpublishing the hashtable
597 * and waiting for the quiescent cycle before releasing the bucket array.
593 */ 598 */
594void rhashtable_destroy(const struct rhashtable *ht) 599void rhashtable_destroy(const struct rhashtable *ht)
595{ 600{
596 const struct bucket_table *tbl = rht_dereference(ht->tbl, ht); 601 bucket_table_free(ht->tbl);
597
598 bucket_table_free(tbl);
599} 602}
600EXPORT_SYMBOL_GPL(rhashtable_destroy); 603EXPORT_SYMBOL_GPL(rhashtable_destroy);
601 604
diff --git a/lib/string.c b/lib/string.c
index 992bf30af759..2fc20aa06f84 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -27,14 +27,14 @@
27#include <linux/bug.h> 27#include <linux/bug.h>
28#include <linux/errno.h> 28#include <linux/errno.h>
29 29
30#ifndef __HAVE_ARCH_STRNICMP 30#ifndef __HAVE_ARCH_STRNCASECMP
31/** 31/**
32 * strnicmp - Case insensitive, length-limited string comparison 32 * strncasecmp - Case insensitive, length-limited string comparison
33 * @s1: One string 33 * @s1: One string
34 * @s2: The other string 34 * @s2: The other string
35 * @len: the maximum number of characters to compare 35 * @len: the maximum number of characters to compare
36 */ 36 */
37int strnicmp(const char *s1, const char *s2, size_t len) 37int strncasecmp(const char *s1, const char *s2, size_t len)
38{ 38{
39 /* Yes, Virginia, it had better be unsigned */ 39 /* Yes, Virginia, it had better be unsigned */
40 unsigned char c1, c2; 40 unsigned char c1, c2;
@@ -56,6 +56,14 @@ int strnicmp(const char *s1, const char *s2, size_t len)
56 } while (--len); 56 } while (--len);
57 return (int)c1 - (int)c2; 57 return (int)c1 - (int)c2;
58} 58}
59EXPORT_SYMBOL(strncasecmp);
60#endif
61#ifndef __HAVE_ARCH_STRNICMP
62#undef strnicmp
63int strnicmp(const char *s1, const char *s2, size_t len)
64{
65 return strncasecmp(s1, s2, len);
66}
59EXPORT_SYMBOL(strnicmp); 67EXPORT_SYMBOL(strnicmp);
60#endif 68#endif
61 69
@@ -73,20 +81,6 @@ int strcasecmp(const char *s1, const char *s2)
73EXPORT_SYMBOL(strcasecmp); 81EXPORT_SYMBOL(strcasecmp);
74#endif 82#endif
75 83
76#ifndef __HAVE_ARCH_STRNCASECMP
77int strncasecmp(const char *s1, const char *s2, size_t n)
78{
79 int c1, c2;
80
81 do {
82 c1 = tolower(*s1++);
83 c2 = tolower(*s2++);
84 } while ((--n > 0) && c1 == c2 && c1 != 0);
85 return c1 - c2;
86}
87EXPORT_SYMBOL(strncasecmp);
88#endif
89
90#ifndef __HAVE_ARCH_STRCPY 84#ifndef __HAVE_ARCH_STRCPY
91/** 85/**
92 * strcpy - Copy a %NUL terminated string 86 * strcpy - Copy a %NUL terminated string
@@ -807,9 +801,9 @@ void *memchr_inv(const void *start, int c, size_t bytes)
807 return check_bytes8(start, value, bytes); 801 return check_bytes8(start, value, bytes);
808 802
809 value64 = value; 803 value64 = value;
810#if defined(ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64 804#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
811 value64 *= 0x0101010101010101; 805 value64 *= 0x0101010101010101;
812#elif defined(ARCH_HAS_FAST_MULTIPLIER) 806#elif defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER)
813 value64 *= 0x01010101; 807 value64 *= 0x01010101;
814 value64 |= value64 << 32; 808 value64 |= value64 << 32;
815#else 809#else
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index 29033f319aea..58b78ba57439 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -8,6 +8,8 @@
8#include <linux/math64.h> 8#include <linux/math64.h>
9#include <linux/export.h> 9#include <linux/export.h>
10#include <linux/ctype.h> 10#include <linux/ctype.h>
11#include <linux/errno.h>
12#include <linux/string.h>
11#include <linux/string_helpers.h> 13#include <linux/string_helpers.h>
12 14
13/** 15/**
@@ -168,6 +170,44 @@ static bool unescape_special(char **src, char **dst)
168 return true; 170 return true;
169} 171}
170 172
173/**
174 * string_unescape - unquote characters in the given string
175 * @src: source buffer (escaped)
176 * @dst: destination buffer (unescaped)
177 * @size: size of the destination buffer (0 to unlimit)
178 * @flags: combination of the flags (bitwise OR):
179 * %UNESCAPE_SPACE:
180 * '\f' - form feed
181 * '\n' - new line
182 * '\r' - carriage return
183 * '\t' - horizontal tab
184 * '\v' - vertical tab
185 * %UNESCAPE_OCTAL:
186 * '\NNN' - byte with octal value NNN (1 to 3 digits)
187 * %UNESCAPE_HEX:
188 * '\xHH' - byte with hexadecimal value HH (1 to 2 digits)
189 * %UNESCAPE_SPECIAL:
190 * '\"' - double quote
191 * '\\' - backslash
192 * '\a' - alert (BEL)
193 * '\e' - escape
194 * %UNESCAPE_ANY:
195 * all previous together
196 *
197 * Description:
198 * The function unquotes characters in the given string.
199 *
200 * Because the size of the output will be the same as or less than the size of
201 * the input, the transformation may be performed in place.
202 *
203 * Caller must provide valid source and destination pointers. Be aware that
204 * destination buffer will always be NULL-terminated. Source string must be
205 * NULL-terminated as well.
206 *
207 * Return:
208 * The amount of the characters processed to the destination buffer excluding
209 * trailing '\0' is returned.
210 */
171int string_unescape(char *src, char *dst, size_t size, unsigned int flags) 211int string_unescape(char *src, char *dst, size_t size, unsigned int flags)
172{ 212{
173 char *out = dst; 213 char *out = dst;
@@ -202,3 +242,275 @@ int string_unescape(char *src, char *dst, size_t size, unsigned int flags)
202 return out - dst; 242 return out - dst;
203} 243}
204EXPORT_SYMBOL(string_unescape); 244EXPORT_SYMBOL(string_unescape);
245
246static int escape_passthrough(unsigned char c, char **dst, size_t *osz)
247{
248 char *out = *dst;
249
250 if (*osz < 1)
251 return -ENOMEM;
252
253 *out++ = c;
254
255 *dst = out;
256 *osz -= 1;
257
258 return 1;
259}
260
261static int escape_space(unsigned char c, char **dst, size_t *osz)
262{
263 char *out = *dst;
264 unsigned char to;
265
266 if (*osz < 2)
267 return -ENOMEM;
268
269 switch (c) {
270 case '\n':
271 to = 'n';
272 break;
273 case '\r':
274 to = 'r';
275 break;
276 case '\t':
277 to = 't';
278 break;
279 case '\v':
280 to = 'v';
281 break;
282 case '\f':
283 to = 'f';
284 break;
285 default:
286 return 0;
287 }
288
289 *out++ = '\\';
290 *out++ = to;
291
292 *dst = out;
293 *osz -= 2;
294
295 return 1;
296}
297
298static int escape_special(unsigned char c, char **dst, size_t *osz)
299{
300 char *out = *dst;
301 unsigned char to;
302
303 if (*osz < 2)
304 return -ENOMEM;
305
306 switch (c) {
307 case '\\':
308 to = '\\';
309 break;
310 case '\a':
311 to = 'a';
312 break;
313 case '\e':
314 to = 'e';
315 break;
316 default:
317 return 0;
318 }
319
320 *out++ = '\\';
321 *out++ = to;
322
323 *dst = out;
324 *osz -= 2;
325
326 return 1;
327}
328
329static int escape_null(unsigned char c, char **dst, size_t *osz)
330{
331 char *out = *dst;
332
333 if (*osz < 2)
334 return -ENOMEM;
335
336 if (c)
337 return 0;
338
339 *out++ = '\\';
340 *out++ = '0';
341
342 *dst = out;
343 *osz -= 2;
344
345 return 1;
346}
347
348static int escape_octal(unsigned char c, char **dst, size_t *osz)
349{
350 char *out = *dst;
351
352 if (*osz < 4)
353 return -ENOMEM;
354
355 *out++ = '\\';
356 *out++ = ((c >> 6) & 0x07) + '0';
357 *out++ = ((c >> 3) & 0x07) + '0';
358 *out++ = ((c >> 0) & 0x07) + '0';
359
360 *dst = out;
361 *osz -= 4;
362
363 return 1;
364}
365
366static int escape_hex(unsigned char c, char **dst, size_t *osz)
367{
368 char *out = *dst;
369
370 if (*osz < 4)
371 return -ENOMEM;
372
373 *out++ = '\\';
374 *out++ = 'x';
375 *out++ = hex_asc_hi(c);
376 *out++ = hex_asc_lo(c);
377
378 *dst = out;
379 *osz -= 4;
380
381 return 1;
382}
383
384/**
385 * string_escape_mem - quote characters in the given memory buffer
386 * @src: source buffer (unescaped)
387 * @isz: source buffer size
388 * @dst: destination buffer (escaped)
389 * @osz: destination buffer size
390 * @flags: combination of the flags (bitwise OR):
391 * %ESCAPE_SPACE:
392 * '\f' - form feed
393 * '\n' - new line
394 * '\r' - carriage return
395 * '\t' - horizontal tab
396 * '\v' - vertical tab
397 * %ESCAPE_SPECIAL:
398 * '\\' - backslash
399 * '\a' - alert (BEL)
400 * '\e' - escape
401 * %ESCAPE_NULL:
402 * '\0' - null
403 * %ESCAPE_OCTAL:
404 * '\NNN' - byte with octal value NNN (3 digits)
405 * %ESCAPE_ANY:
406 * all previous together
407 * %ESCAPE_NP:
408 * escape only non-printable characters (checked by isprint)
409 * %ESCAPE_ANY_NP:
410 * all previous together
411 * %ESCAPE_HEX:
412 * '\xHH' - byte with hexadecimal value HH (2 digits)
413 * @esc: NULL-terminated string of characters any of which, if found in
414 * the source, has to be escaped
415 *
416 * Description:
417 * The process of escaping byte buffer includes several parts. They are applied
418 * in the following sequence.
419 * 1. The character is matched to the printable class, if asked, and in
420 * case of match it passes through to the output.
421 * 2. The character is not matched to the one from @esc string and thus
422 * must go as is to the output.
423 * 3. The character is checked if it falls into the class given by @flags.
424 * %ESCAPE_OCTAL and %ESCAPE_HEX are going last since they cover any
425 * character. Note that they actually can't go together, otherwise
426 * %ESCAPE_HEX will be ignored.
427 *
428 * Caller must provide valid source and destination pointers. Be aware that
429 * destination buffer will not be NULL-terminated, thus caller have to append
430 * it if needs.
431 *
432 * Return:
433 * The amount of the characters processed to the destination buffer, or
434 * %-ENOMEM if the size of buffer is not enough to put an escaped character is
435 * returned.
436 *
437 * Even in the case of error @dst pointer will be updated to point to the byte
438 * after the last processed character.
439 */
440int string_escape_mem(const char *src, size_t isz, char **dst, size_t osz,
441 unsigned int flags, const char *esc)
442{
443 char *out = *dst, *p = out;
444 bool is_dict = esc && *esc;
445 int ret = 0;
446
447 while (isz--) {
448 unsigned char c = *src++;
449
450 /*
451 * Apply rules in the following sequence:
452 * - the character is printable, when @flags has
453 * %ESCAPE_NP bit set
454 * - the @esc string is supplied and does not contain a
455 * character under question
456 * - the character doesn't fall into a class of symbols
457 * defined by given @flags
458 * In these cases we just pass through a character to the
459 * output buffer.
460 */
461 if ((flags & ESCAPE_NP && isprint(c)) ||
462 (is_dict && !strchr(esc, c))) {
463 /* do nothing */
464 } else {
465 if (flags & ESCAPE_SPACE) {
466 ret = escape_space(c, &p, &osz);
467 if (ret < 0)
468 break;
469 if (ret > 0)
470 continue;
471 }
472
473 if (flags & ESCAPE_SPECIAL) {
474 ret = escape_special(c, &p, &osz);
475 if (ret < 0)
476 break;
477 if (ret > 0)
478 continue;
479 }
480
481 if (flags & ESCAPE_NULL) {
482 ret = escape_null(c, &p, &osz);
483 if (ret < 0)
484 break;
485 if (ret > 0)
486 continue;
487 }
488
489 /* ESCAPE_OCTAL and ESCAPE_HEX always go last */
490 if (flags & ESCAPE_OCTAL) {
491 ret = escape_octal(c, &p, &osz);
492 if (ret < 0)
493 break;
494 continue;
495 }
496 if (flags & ESCAPE_HEX) {
497 ret = escape_hex(c, &p, &osz);
498 if (ret < 0)
499 break;
500 continue;
501 }
502 }
503
504 ret = escape_passthrough(c, &p, &osz);
505 if (ret < 0)
506 break;
507 }
508
509 *dst = p;
510
511 if (ret < 0)
512 return ret;
513
514 return p - out;
515}
516EXPORT_SYMBOL(string_escape_mem);
diff --git a/lib/test-string_helpers.c b/lib/test-string_helpers.c
index 6ac48de04c0e..ab0d30e1e18f 100644
--- a/lib/test-string_helpers.c
+++ b/lib/test-string_helpers.c
@@ -5,11 +5,32 @@
5 5
6#include <linux/init.h> 6#include <linux/init.h>
7#include <linux/kernel.h> 7#include <linux/kernel.h>
8#include <linux/slab.h>
8#include <linux/module.h> 9#include <linux/module.h>
9#include <linux/random.h> 10#include <linux/random.h>
10#include <linux/string.h> 11#include <linux/string.h>
11#include <linux/string_helpers.h> 12#include <linux/string_helpers.h>
12 13
14static __init bool test_string_check_buf(const char *name, unsigned int flags,
15 char *in, size_t p,
16 char *out_real, size_t q_real,
17 char *out_test, size_t q_test)
18{
19 if (q_real == q_test && !memcmp(out_test, out_real, q_test))
20 return true;
21
22 pr_warn("Test '%s' failed: flags = %u\n", name, flags);
23
24 print_hex_dump(KERN_WARNING, "Input: ", DUMP_PREFIX_NONE, 16, 1,
25 in, p, true);
26 print_hex_dump(KERN_WARNING, "Expected: ", DUMP_PREFIX_NONE, 16, 1,
27 out_test, q_test, true);
28 print_hex_dump(KERN_WARNING, "Got: ", DUMP_PREFIX_NONE, 16, 1,
29 out_real, q_real, true);
30
31 return false;
32}
33
13struct test_string { 34struct test_string {
14 const char *in; 35 const char *in;
15 const char *out; 36 const char *out;
@@ -39,12 +60,17 @@ static const struct test_string strings[] __initconst = {
39 }, 60 },
40}; 61};
41 62
42static void __init test_string_unescape(unsigned int flags, bool inplace) 63static void __init test_string_unescape(const char *name, unsigned int flags,
64 bool inplace)
43{ 65{
44 char in[256]; 66 int q_real = 256;
45 char out_test[256]; 67 char *in = kmalloc(q_real, GFP_KERNEL);
46 char out_real[256]; 68 char *out_test = kmalloc(q_real, GFP_KERNEL);
47 int i, p = 0, q_test = 0, q_real = sizeof(out_real); 69 char *out_real = kmalloc(q_real, GFP_KERNEL);
70 int i, p = 0, q_test = 0;
71
72 if (!in || !out_test || !out_real)
73 goto out;
48 74
49 for (i = 0; i < ARRAY_SIZE(strings); i++) { 75 for (i = 0; i < ARRAY_SIZE(strings); i++) {
50 const char *s = strings[i].in; 76 const char *s = strings[i].in;
@@ -77,15 +103,225 @@ static void __init test_string_unescape(unsigned int flags, bool inplace)
77 q_real = string_unescape(in, out_real, q_real, flags); 103 q_real = string_unescape(in, out_real, q_real, flags);
78 } 104 }
79 105
80 if (q_real != q_test || memcmp(out_test, out_real, q_test)) { 106 test_string_check_buf(name, flags, in, p - 1, out_real, q_real,
81 pr_warn("Test failed: flags = %u\n", flags); 107 out_test, q_test);
82 print_hex_dump(KERN_WARNING, "Input: ", 108out:
83 DUMP_PREFIX_NONE, 16, 1, in, p - 1, true); 109 kfree(out_real);
84 print_hex_dump(KERN_WARNING, "Expected: ", 110 kfree(out_test);
85 DUMP_PREFIX_NONE, 16, 1, out_test, q_test, true); 111 kfree(in);
86 print_hex_dump(KERN_WARNING, "Got: ", 112}
87 DUMP_PREFIX_NONE, 16, 1, out_real, q_real, true); 113
114struct test_string_1 {
115 const char *out;
116 unsigned int flags;
117};
118
119#define TEST_STRING_2_MAX_S1 32
120struct test_string_2 {
121 const char *in;
122 struct test_string_1 s1[TEST_STRING_2_MAX_S1];
123};
124
125#define TEST_STRING_2_DICT_0 NULL
126static const struct test_string_2 escape0[] __initconst = {{
127 .in = "\f\\ \n\r\t\v",
128 .s1 = {{
129 .out = "\\f\\ \\n\\r\\t\\v",
130 .flags = ESCAPE_SPACE,
131 },{
132 .out = "\\f\\134\\040\\n\\r\\t\\v",
133 .flags = ESCAPE_SPACE | ESCAPE_OCTAL,
134 },{
135 .out = "\\f\\x5c\\x20\\n\\r\\t\\v",
136 .flags = ESCAPE_SPACE | ESCAPE_HEX,
137 },{
138 /* terminator */
139 }},
140},{
141 .in = "\\h\\\"\a\e\\",
142 .s1 = {{
143 .out = "\\\\h\\\\\"\\a\\e\\\\",
144 .flags = ESCAPE_SPECIAL,
145 },{
146 .out = "\\\\\\150\\\\\\042\\a\\e\\\\",
147 .flags = ESCAPE_SPECIAL | ESCAPE_OCTAL,
148 },{
149 .out = "\\\\\\x68\\\\\\x22\\a\\e\\\\",
150 .flags = ESCAPE_SPECIAL | ESCAPE_HEX,
151 },{
152 /* terminator */
153 }},
154},{
155 .in = "\eb \\C\007\"\x90\r]",
156 .s1 = {{
157 .out = "\eb \\C\007\"\x90\\r]",
158 .flags = ESCAPE_SPACE,
159 },{
160 .out = "\\eb \\\\C\\a\"\x90\r]",
161 .flags = ESCAPE_SPECIAL,
162 },{
163 .out = "\\eb \\\\C\\a\"\x90\\r]",
164 .flags = ESCAPE_SPACE | ESCAPE_SPECIAL,
165 },{
166 .out = "\\033\\142\\040\\134\\103\\007\\042\\220\\015\\135",
167 .flags = ESCAPE_OCTAL,
168 },{
169 .out = "\\033\\142\\040\\134\\103\\007\\042\\220\\r\\135",
170 .flags = ESCAPE_SPACE | ESCAPE_OCTAL,
171 },{
172 .out = "\\e\\142\\040\\\\\\103\\a\\042\\220\\015\\135",
173 .flags = ESCAPE_SPECIAL | ESCAPE_OCTAL,
174 },{
175 .out = "\\e\\142\\040\\\\\\103\\a\\042\\220\\r\\135",
176 .flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_OCTAL,
177 },{
178 .out = "\eb \\C\007\"\x90\r]",
179 .flags = ESCAPE_NP,
180 },{
181 .out = "\eb \\C\007\"\x90\\r]",
182 .flags = ESCAPE_SPACE | ESCAPE_NP,
183 },{
184 .out = "\\eb \\C\\a\"\x90\r]",
185 .flags = ESCAPE_SPECIAL | ESCAPE_NP,
186 },{
187 .out = "\\eb \\C\\a\"\x90\\r]",
188 .flags = ESCAPE_SPACE | ESCAPE_SPECIAL | ESCAPE_NP,
189 },{
190 .out = "\\033b \\C\\007\"\\220\\015]",
191 .flags = ESCAPE_OCTAL | ESCAPE_NP,
192 },{
193 .out = "\\033b \\C\\007\"\\220\\r]",
194 .flags = ESCAPE_SPACE | ESCAPE_OCTAL | ESCAPE_NP,
195 },{
196 .out = "\\eb \\C\\a\"\\220\\r]",
197 .flags = ESCAPE_SPECIAL | ESCAPE_SPACE | ESCAPE_OCTAL |
198 ESCAPE_NP,
199 },{
200 .out = "\\x1bb \\C\\x07\"\\x90\\x0d]",
201 .flags = ESCAPE_NP | ESCAPE_HEX,
202 },{
203 /* terminator */
204 }},
205},{
206 /* terminator */
207}};
208
209#define TEST_STRING_2_DICT_1 "b\\ \t\r"
210static const struct test_string_2 escape1[] __initconst = {{
211 .in = "\f\\ \n\r\t\v",
212 .s1 = {{
213 .out = "\f\\134\\040\n\\015\\011\v",
214 .flags = ESCAPE_OCTAL,
215 },{
216 .out = "\f\\x5c\\x20\n\\x0d\\x09\v",
217 .flags = ESCAPE_HEX,
218 },{
219 /* terminator */
220 }},
221},{
222 .in = "\\h\\\"\a\e\\",
223 .s1 = {{
224 .out = "\\134h\\134\"\a\e\\134",
225 .flags = ESCAPE_OCTAL,
226 },{
227 /* terminator */
228 }},
229},{
230 .in = "\eb \\C\007\"\x90\r]",
231 .s1 = {{
232 .out = "\e\\142\\040\\134C\007\"\x90\\015]",
233 .flags = ESCAPE_OCTAL,
234 },{
235 /* terminator */
236 }},
237},{
238 /* terminator */
239}};
240
241static __init const char *test_string_find_match(const struct test_string_2 *s2,
242 unsigned int flags)
243{
244 const struct test_string_1 *s1 = s2->s1;
245 unsigned int i;
246
247 if (!flags)
248 return s2->in;
249
250 /* Test cases are NULL-aware */
251 flags &= ~ESCAPE_NULL;
252
253 /* ESCAPE_OCTAL has a higher priority */
254 if (flags & ESCAPE_OCTAL)
255 flags &= ~ESCAPE_HEX;
256
257 for (i = 0; i < TEST_STRING_2_MAX_S1 && s1->out; i++, s1++)
258 if (s1->flags == flags)
259 return s1->out;
260 return NULL;
261}
262
263static __init void test_string_escape(const char *name,
264 const struct test_string_2 *s2,
265 unsigned int flags, const char *esc)
266{
267 int q_real = 512;
268 char *out_test = kmalloc(q_real, GFP_KERNEL);
269 char *out_real = kmalloc(q_real, GFP_KERNEL);
270 char *in = kmalloc(256, GFP_KERNEL);
271 char *buf = out_real;
272 int p = 0, q_test = 0;
273
274 if (!out_test || !out_real || !in)
275 goto out;
276
277 for (; s2->in; s2++) {
278 const char *out;
279 int len;
280
281 /* NULL injection */
282 if (flags & ESCAPE_NULL) {
283 in[p++] = '\0';
284 out_test[q_test++] = '\\';
285 out_test[q_test++] = '0';
286 }
287
288 /* Don't try strings that have no output */
289 out = test_string_find_match(s2, flags);
290 if (!out)
291 continue;
292
293 /* Copy string to in buffer */
294 len = strlen(s2->in);
295 memcpy(&in[p], s2->in, len);
296 p += len;
297
298 /* Copy expected result for given flags */
299 len = strlen(out);
300 memcpy(&out_test[q_test], out, len);
301 q_test += len;
88 } 302 }
303
304 q_real = string_escape_mem(in, p, &buf, q_real, flags, esc);
305
306 test_string_check_buf(name, flags, in, p, out_real, q_real, out_test,
307 q_test);
308out:
309 kfree(in);
310 kfree(out_real);
311 kfree(out_test);
312}
313
314static __init void test_string_escape_nomem(void)
315{
316 char *in = "\eb \\C\007\"\x90\r]";
317 char out[64], *buf = out;
318 int rc = -ENOMEM, ret;
319
320 ret = string_escape_str_any_np(in, &buf, strlen(in), NULL);
321 if (ret == rc)
322 return;
323
324 pr_err("Test 'escape nomem' failed: got %d instead of %d\n", ret, rc);
89} 325}
90 326
91static int __init test_string_helpers_init(void) 327static int __init test_string_helpers_init(void)
@@ -94,8 +330,19 @@ static int __init test_string_helpers_init(void)
94 330
95 pr_info("Running tests...\n"); 331 pr_info("Running tests...\n");
96 for (i = 0; i < UNESCAPE_ANY + 1; i++) 332 for (i = 0; i < UNESCAPE_ANY + 1; i++)
97 test_string_unescape(i, false); 333 test_string_unescape("unescape", i, false);
98 test_string_unescape(get_random_int() % (UNESCAPE_ANY + 1), true); 334 test_string_unescape("unescape inplace",
335 get_random_int() % (UNESCAPE_ANY + 1), true);
336
337 /* Without dictionary */
338 for (i = 0; i < (ESCAPE_ANY_NP | ESCAPE_HEX) + 1; i++)
339 test_string_escape("escape 0", escape0, i, TEST_STRING_2_DICT_0);
340
341 /* With dictionary */
342 for (i = 0; i < (ESCAPE_ANY_NP | ESCAPE_HEX) + 1; i++)
343 test_string_escape("escape 1", escape1, i, TEST_STRING_2_DICT_1);
344
345 test_string_escape_nomem();
99 346
100 return -EINVAL; 347 return -EINVAL;
101} 348}
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 89e0345733bd..23e070bcf72d 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -1342,6 +1342,44 @@ static struct bpf_test tests[] = {
1342 { { 0, -1 } } 1342 { { 0, -1 } }
1343 }, 1343 },
1344 { 1344 {
1345 "INT: shifts by register",
1346 .u.insns_int = {
1347 BPF_MOV64_IMM(R0, -1234),
1348 BPF_MOV64_IMM(R1, 1),
1349 BPF_ALU32_REG(BPF_RSH, R0, R1),
1350 BPF_JMP_IMM(BPF_JEQ, R0, 0x7ffffd97, 1),
1351 BPF_EXIT_INSN(),
1352 BPF_MOV64_IMM(R2, 1),
1353 BPF_ALU64_REG(BPF_LSH, R0, R2),
1354 BPF_MOV32_IMM(R4, -1234),
1355 BPF_JMP_REG(BPF_JEQ, R0, R4, 1),
1356 BPF_EXIT_INSN(),
1357 BPF_ALU64_IMM(BPF_AND, R4, 63),
1358 BPF_ALU64_REG(BPF_LSH, R0, R4), /* R0 <= 46 */
1359 BPF_MOV64_IMM(R3, 47),
1360 BPF_ALU64_REG(BPF_ARSH, R0, R3),
1361 BPF_JMP_IMM(BPF_JEQ, R0, -617, 1),
1362 BPF_EXIT_INSN(),
1363 BPF_MOV64_IMM(R2, 1),
1364 BPF_ALU64_REG(BPF_LSH, R4, R2), /* R4 = 46 << 1 */
1365 BPF_JMP_IMM(BPF_JEQ, R4, 92, 1),
1366 BPF_EXIT_INSN(),
1367 BPF_MOV64_IMM(R4, 4),
1368 BPF_ALU64_REG(BPF_LSH, R4, R4), /* R4 = 4 << 4 */
1369 BPF_JMP_IMM(BPF_JEQ, R4, 64, 1),
1370 BPF_EXIT_INSN(),
1371 BPF_MOV64_IMM(R4, 5),
1372 BPF_ALU32_REG(BPF_LSH, R4, R4), /* R4 = 5 << 5 */
1373 BPF_JMP_IMM(BPF_JEQ, R4, 160, 1),
1374 BPF_EXIT_INSN(),
1375 BPF_MOV64_IMM(R0, -1),
1376 BPF_EXIT_INSN(),
1377 },
1378 INTERNAL,
1379 { },
1380 { { 0, -1 } }
1381 },
1382 {
1345 "INT: DIV + ABS", 1383 "INT: DIV + ABS",
1346 .u.insns_int = { 1384 .u.insns_int = {
1347 BPF_ALU64_REG(BPF_MOV, R6, R1), 1385 BPF_ALU64_REG(BPF_MOV, R6, R1),
@@ -1697,6 +1735,27 @@ static struct bpf_test tests[] = {
1697 { }, 1735 { },
1698 { { 1, 0 } }, 1736 { { 1, 0 } },
1699 }, 1737 },
1738 {
1739 "load 64-bit immediate",
1740 .u.insns_int = {
1741 BPF_LD_IMM64(R1, 0x567800001234LL),
1742 BPF_MOV64_REG(R2, R1),
1743 BPF_MOV64_REG(R3, R2),
1744 BPF_ALU64_IMM(BPF_RSH, R2, 32),
1745 BPF_ALU64_IMM(BPF_LSH, R3, 32),
1746 BPF_ALU64_IMM(BPF_RSH, R3, 32),
1747 BPF_ALU64_IMM(BPF_MOV, R0, 0),
1748 BPF_JMP_IMM(BPF_JEQ, R2, 0x5678, 1),
1749 BPF_EXIT_INSN(),
1750 BPF_JMP_IMM(BPF_JEQ, R3, 0x1234, 1),
1751 BPF_EXIT_INSN(),
1752 BPF_ALU64_IMM(BPF_MOV, R0, 1),
1753 BPF_EXIT_INSN(),
1754 },
1755 INTERNAL,
1756 { },
1757 { { 0, 1 } }
1758 },
1700}; 1759};
1701 1760
1702static struct net_device dev; 1761static struct net_device dev;
@@ -1798,7 +1857,7 @@ static struct bpf_prog *generate_filter(int which, int *err)
1798 break; 1857 break;
1799 1858
1800 case INTERNAL: 1859 case INTERNAL:
1801 fp = kzalloc(bpf_prog_size(flen), GFP_KERNEL); 1860 fp = bpf_prog_alloc(bpf_prog_size(flen), 0);
1802 if (fp == NULL) { 1861 if (fp == NULL) {
1803 pr_cont("UNEXPECTED_FAIL no memory left\n"); 1862 pr_cont("UNEXPECTED_FAIL no memory left\n");
1804 *err = -ENOMEM; 1863 *err = -ENOMEM;
@@ -1835,7 +1894,7 @@ static int __run_one(const struct bpf_prog *fp, const void *data,
1835 int runs, u64 *duration) 1894 int runs, u64 *duration)
1836{ 1895{
1837 u64 start, finish; 1896 u64 start, finish;
1838 int ret, i; 1897 int ret = 0, i;
1839 1898
1840 start = ktime_to_us(ktime_get()); 1899 start = ktime_to_us(ktime_get());
1841 1900
diff --git a/lib/textsearch.c b/lib/textsearch.c
index 0c7e9ab2d88f..0b79908dfe89 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -249,9 +249,7 @@ EXPORT_SYMBOL(textsearch_find_continuous);
249 * @flags: search flags 249 * @flags: search flags
250 * 250 *
251 * Looks up the search algorithm module and creates a new textsearch 251 * Looks up the search algorithm module and creates a new textsearch
252 * configuration for the specified pattern. Upon completion all 252 * configuration for the specified pattern.
253 * necessary refcnts are held and the configuration must be put back
254 * using textsearch_put() after usage.
255 * 253 *
256 * Note: The format of the pattern may not be compatible between 254 * Note: The format of the pattern may not be compatible between
257 * the various search algorithms. 255 * the various search algorithms.
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 6fe2c84eb055..ec337f64f52d 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -33,6 +33,7 @@
33#include <asm/page.h> /* for PAGE_SIZE */ 33#include <asm/page.h> /* for PAGE_SIZE */
34#include <asm/sections.h> /* for dereference_function_descriptor() */ 34#include <asm/sections.h> /* for dereference_function_descriptor() */
35 35
36#include <linux/string_helpers.h>
36#include "kstrtox.h" 37#include "kstrtox.h"
37 38
38/** 39/**
@@ -1101,6 +1102,62 @@ char *ip4_addr_string_sa(char *buf, char *end, const struct sockaddr_in *sa,
1101} 1102}
1102 1103
1103static noinline_for_stack 1104static noinline_for_stack
1105char *escaped_string(char *buf, char *end, u8 *addr, struct printf_spec spec,
1106 const char *fmt)
1107{
1108 bool found = true;
1109 int count = 1;
1110 unsigned int flags = 0;
1111 int len;
1112
1113 if (spec.field_width == 0)
1114 return buf; /* nothing to print */
1115
1116 if (ZERO_OR_NULL_PTR(addr))
1117 return string(buf, end, NULL, spec); /* NULL pointer */
1118
1119
1120 do {
1121 switch (fmt[count++]) {
1122 case 'a':
1123 flags |= ESCAPE_ANY;
1124 break;
1125 case 'c':
1126 flags |= ESCAPE_SPECIAL;
1127 break;
1128 case 'h':
1129 flags |= ESCAPE_HEX;
1130 break;
1131 case 'n':
1132 flags |= ESCAPE_NULL;
1133 break;
1134 case 'o':
1135 flags |= ESCAPE_OCTAL;
1136 break;
1137 case 'p':
1138 flags |= ESCAPE_NP;
1139 break;
1140 case 's':
1141 flags |= ESCAPE_SPACE;
1142 break;
1143 default:
1144 found = false;
1145 break;
1146 }
1147 } while (found);
1148
1149 if (!flags)
1150 flags = ESCAPE_ANY_NP;
1151
1152 len = spec.field_width < 0 ? 1 : spec.field_width;
1153
1154 /* Ignore the error. We print as many characters as we can */
1155 string_escape_mem(addr, len, &buf, end - buf, flags, NULL);
1156
1157 return buf;
1158}
1159
1160static noinline_for_stack
1104char *uuid_string(char *buf, char *end, const u8 *addr, 1161char *uuid_string(char *buf, char *end, const u8 *addr,
1105 struct printf_spec spec, const char *fmt) 1162 struct printf_spec spec, const char *fmt)
1106{ 1163{
@@ -1221,6 +1278,17 @@ int kptr_restrict __read_mostly;
1221 * - '[Ii][4S][hnbl]' IPv4 addresses in host, network, big or little endian order 1278 * - '[Ii][4S][hnbl]' IPv4 addresses in host, network, big or little endian order
1222 * - 'I[6S]c' for IPv6 addresses printed as specified by 1279 * - 'I[6S]c' for IPv6 addresses printed as specified by
1223 * http://tools.ietf.org/html/rfc5952 1280 * http://tools.ietf.org/html/rfc5952
1281 * - 'E[achnops]' For an escaped buffer, where rules are defined by combination
1282 * of the following flags (see string_escape_mem() for the
1283 * details):
1284 * a - ESCAPE_ANY
1285 * c - ESCAPE_SPECIAL
1286 * h - ESCAPE_HEX
1287 * n - ESCAPE_NULL
1288 * o - ESCAPE_OCTAL
1289 * p - ESCAPE_NP
1290 * s - ESCAPE_SPACE
1291 * By default ESCAPE_ANY_NP is used.
1224 * - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form 1292 * - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form
1225 * "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" 1293 * "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
1226 * Options for %pU are: 1294 * Options for %pU are:
@@ -1321,6 +1389,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
1321 }} 1389 }}
1322 } 1390 }
1323 break; 1391 break;
1392 case 'E':
1393 return escaped_string(buf, end, ptr, spec, fmt);
1324 case 'U': 1394 case 'U':
1325 return uuid_string(buf, end, ptr, spec, fmt); 1395 return uuid_string(buf, end, ptr, spec, fmt);
1326 case 'V': 1396 case 'V':
@@ -1633,6 +1703,7 @@ qualifier:
1633 * %piS depending on sa_family of 'struct sockaddr *' print IPv4/IPv6 address 1703 * %piS depending on sa_family of 'struct sockaddr *' print IPv4/IPv6 address
1634 * %pU[bBlL] print a UUID/GUID in big or little endian using lower or upper 1704 * %pU[bBlL] print a UUID/GUID in big or little endian using lower or upper
1635 * case. 1705 * case.
1706 * %*pE[achnops] print an escaped buffer
1636 * %*ph[CDN] a variable-length hex string with a separator (supports up to 64 1707 * %*ph[CDN] a variable-length hex string with a separator (supports up to 64
1637 * bytes of the input) 1708 * bytes of the input)
1638 * %n is ignored 1709 * %n is ignored
@@ -1937,7 +2008,7 @@ EXPORT_SYMBOL(sprintf);
1937 * @args: Arguments for the format string 2008 * @args: Arguments for the format string
1938 * 2009 *
1939 * The format follows C99 vsnprintf, except %n is ignored, and its argument 2010 * The format follows C99 vsnprintf, except %n is ignored, and its argument
1940 * is skiped. 2011 * is skipped.
1941 * 2012 *
1942 * The return value is the number of words(32bits) which would be generated for 2013 * The return value is the number of words(32bits) which would be generated for
1943 * the given input. 2014 * the given input.