diff options
Diffstat (limited to 'lib')
39 files changed, 1778 insertions, 705 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 334f7722a999..54cf309a92a5 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
@@ -51,6 +51,9 @@ config PERCPU_RWSEM | |||
51 | config ARCH_USE_CMPXCHG_LOCKREF | 51 | config ARCH_USE_CMPXCHG_LOCKREF |
52 | bool | 52 | bool |
53 | 53 | ||
54 | config ARCH_HAS_FAST_MULTIPLIER | ||
55 | bool | ||
56 | |||
54 | config CRC_CCITT | 57 | config CRC_CCITT |
55 | tristate "CRC-CCITT functions" | 58 | tristate "CRC-CCITT functions" |
56 | help | 59 | help |
@@ -396,6 +399,39 @@ config CPU_RMAP | |||
396 | config DQL | 399 | config DQL |
397 | bool | 400 | bool |
398 | 401 | ||
402 | config GLOB | ||
403 | bool | ||
404 | # This actually supports modular compilation, but the module overhead | ||
405 | # is ridiculous for the amount of code involved. Until an out-of-tree | ||
406 | # driver asks for it, we'll just link it directly it into the kernel | ||
407 | # when required. Since we're ignoring out-of-tree users, there's also | ||
408 | # no need bother prompting for a manual decision: | ||
409 | # prompt "glob_match() function" | ||
410 | help | ||
411 | This option provides a glob_match function for performing | ||
412 | simple text pattern matching. It originated in the ATA code | ||
413 | to blacklist particular drive models, but other device drivers | ||
414 | may need similar functionality. | ||
415 | |||
416 | All drivers in the Linux kernel tree that require this function | ||
417 | should automatically select this option. Say N unless you | ||
418 | are compiling an out-of tree driver which tells you that it | ||
419 | depends on this. | ||
420 | |||
421 | config GLOB_SELFTEST | ||
422 | bool "glob self-test on init" | ||
423 | default n | ||
424 | depends on GLOB | ||
425 | help | ||
426 | This option enables a simple self-test of the glob_match | ||
427 | function on startup. It is primarily useful for people | ||
428 | working on the code to ensure they haven't introduced any | ||
429 | regressions. | ||
430 | |||
431 | It only adds a little bit of code and slows kernel boot (or | ||
432 | module load) by a small amount, so you're welcome to play with | ||
433 | it, but you probably don't need it. | ||
434 | |||
399 | # | 435 | # |
400 | # Netlink attribute parsing support is select'ed if needed | 436 | # Netlink attribute parsing support is select'ed if needed |
401 | # | 437 | # |
@@ -451,7 +487,8 @@ config MPILIB | |||
451 | 487 | ||
452 | config SIGNATURE | 488 | config SIGNATURE |
453 | tristate | 489 | tristate |
454 | depends on KEYS && CRYPTO | 490 | depends on KEYS |
491 | select CRYPTO | ||
455 | select CRYPTO_SHA1 | 492 | select CRYPTO_SHA1 |
456 | select MPILIB | 493 | select MPILIB |
457 | help | 494 | help |
@@ -474,4 +511,11 @@ config UCS2_STRING | |||
474 | 511 | ||
475 | source "lib/fonts/Kconfig" | 512 | source "lib/fonts/Kconfig" |
476 | 513 | ||
514 | # | ||
515 | # sg chaining option | ||
516 | # | ||
517 | |||
518 | config ARCH_HAS_SG_CHAIN | ||
519 | def_bool n | ||
520 | |||
477 | endmenu | 521 | endmenu |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 7a638aa3545b..a28590083622 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -15,7 +15,7 @@ config PRINTK_TIME | |||
15 | The behavior is also controlled by the kernel command line | 15 | The behavior is also controlled by the kernel command line |
16 | parameter printk.time=1. See Documentation/kernel-parameters.txt | 16 | parameter printk.time=1. See Documentation/kernel-parameters.txt |
17 | 17 | ||
18 | config DEFAULT_MESSAGE_LOGLEVEL | 18 | config MESSAGE_LOGLEVEL_DEFAULT |
19 | int "Default message log level (1-7)" | 19 | int "Default message log level (1-7)" |
20 | range 1 7 | 20 | range 1 7 |
21 | default "4" | 21 | default "4" |
@@ -143,6 +143,30 @@ config DEBUG_INFO_REDUCED | |||
143 | DEBUG_INFO build and compile times are reduced too. | 143 | DEBUG_INFO build and compile times are reduced too. |
144 | Only works with newer gcc versions. | 144 | Only works with newer gcc versions. |
145 | 145 | ||
146 | config DEBUG_INFO_SPLIT | ||
147 | bool "Produce split debuginfo in .dwo files" | ||
148 | depends on DEBUG_INFO | ||
149 | help | ||
150 | Generate debug info into separate .dwo files. This significantly | ||
151 | reduces the build directory size for builds with DEBUG_INFO, | ||
152 | because it stores the information only once on disk in .dwo | ||
153 | files instead of multiple times in object files and executables. | ||
154 | In addition the debug information is also compressed. | ||
155 | |||
156 | Requires recent gcc (4.7+) and recent gdb/binutils. | ||
157 | Any tool that packages or reads debug information would need | ||
158 | to know about the .dwo files and include them. | ||
159 | Incompatible with older versions of ccache. | ||
160 | |||
161 | config DEBUG_INFO_DWARF4 | ||
162 | bool "Generate dwarf4 debuginfo" | ||
163 | depends on DEBUG_INFO | ||
164 | help | ||
165 | Generate dwarf4 debug info. This requires recent versions | ||
166 | of gcc and gdb. It makes the debug information larger. | ||
167 | But it significantly improves the success of resolving | ||
168 | variables in gdb on optimized code. | ||
169 | |||
146 | config ENABLE_WARN_DEPRECATED | 170 | config ENABLE_WARN_DEPRECATED |
147 | bool "Enable __deprecated logic" | 171 | bool "Enable __deprecated logic" |
148 | default y | 172 | default y |
@@ -835,7 +859,7 @@ config DEBUG_RT_MUTEXES | |||
835 | 859 | ||
836 | config RT_MUTEX_TESTER | 860 | config RT_MUTEX_TESTER |
837 | bool "Built-in scriptable tester for rt-mutexes" | 861 | bool "Built-in scriptable tester for rt-mutexes" |
838 | depends on DEBUG_KERNEL && RT_MUTEXES | 862 | depends on DEBUG_KERNEL && RT_MUTEXES && BROKEN |
839 | help | 863 | help |
840 | This option enables a rt-mutex tester. | 864 | This option enables a rt-mutex tester. |
841 | 865 | ||
@@ -868,6 +892,10 @@ config DEBUG_WW_MUTEX_SLOWPATH | |||
868 | the full mutex checks enabled with (CONFIG_PROVE_LOCKING) this | 892 | the full mutex checks enabled with (CONFIG_PROVE_LOCKING) this |
869 | will test all possible w/w mutex interface abuse with the | 893 | will test all possible w/w mutex interface abuse with the |
870 | exception of simply not acquiring all the required locks. | 894 | exception of simply not acquiring all the required locks. |
895 | Note that this feature can introduce significant overhead, so | ||
896 | it really should not be enabled in a production or distro kernel, | ||
897 | even a debug kernel. If you are a driver writer, enable it. If | ||
898 | you are a distro, do not. | ||
871 | 899 | ||
872 | config DEBUG_LOCK_ALLOC | 900 | config DEBUG_LOCK_ALLOC |
873 | bool "Lock debugging: detect incorrect freeing of live locks" | 901 | bool "Lock debugging: detect incorrect freeing of live locks" |
@@ -1008,8 +1036,13 @@ config TRACE_IRQFLAGS | |||
1008 | either tracing or lock debugging. | 1036 | either tracing or lock debugging. |
1009 | 1037 | ||
1010 | config STACKTRACE | 1038 | config STACKTRACE |
1011 | bool | 1039 | bool "Stack backtrace support" |
1012 | depends on STACKTRACE_SUPPORT | 1040 | depends on STACKTRACE_SUPPORT |
1041 | help | ||
1042 | This option causes the kernel to create a /proc/pid/stack for | ||
1043 | every process, showing its current stack trace. | ||
1044 | It is also used by various kernel debugging features that require | ||
1045 | stack trace generation. | ||
1013 | 1046 | ||
1014 | config DEBUG_KOBJECT | 1047 | config DEBUG_KOBJECT |
1015 | bool "kobject debugging" | 1048 | bool "kobject debugging" |
@@ -1131,20 +1164,6 @@ config PROVE_RCU_REPEATEDLY | |||
1131 | 1164 | ||
1132 | Say N if you are unsure. | 1165 | Say N if you are unsure. |
1133 | 1166 | ||
1134 | config PROVE_RCU_DELAY | ||
1135 | bool "RCU debugging: preemptible RCU race provocation" | ||
1136 | depends on DEBUG_KERNEL && PREEMPT_RCU | ||
1137 | default n | ||
1138 | help | ||
1139 | There is a class of races that involve an unlikely preemption | ||
1140 | of __rcu_read_unlock() just after ->rcu_read_lock_nesting has | ||
1141 | been set to INT_MIN. This feature inserts a delay at that | ||
1142 | point to increase the probability of these races. | ||
1143 | |||
1144 | Say Y to increase probability of preemption of __rcu_read_unlock(). | ||
1145 | |||
1146 | Say N if you are unsure. | ||
1147 | |||
1148 | config SPARSE_RCU_POINTER | 1167 | config SPARSE_RCU_POINTER |
1149 | bool "RCU debugging: sparse-based checks for pointer usage" | 1168 | bool "RCU debugging: sparse-based checks for pointer usage" |
1150 | default n | 1169 | default n |
@@ -1550,6 +1569,14 @@ config TEST_STRING_HELPERS | |||
1550 | config TEST_KSTRTOX | 1569 | config TEST_KSTRTOX |
1551 | tristate "Test kstrto*() family of functions at runtime" | 1570 | tristate "Test kstrto*() family of functions at runtime" |
1552 | 1571 | ||
1572 | config TEST_RHASHTABLE | ||
1573 | bool "Perform selftest on resizable hash table" | ||
1574 | default n | ||
1575 | help | ||
1576 | Enable this option to test the rhashtable functions at boot. | ||
1577 | |||
1578 | If unsure, say N. | ||
1579 | |||
1553 | endmenu # runtime tests | 1580 | endmenu # runtime tests |
1554 | 1581 | ||
1555 | config PROVIDE_OHCI1394_DMA_INIT | 1582 | config PROVIDE_OHCI1394_DMA_INIT |
@@ -1649,6 +1676,28 @@ config TEST_BPF | |||
1649 | 1676 | ||
1650 | If unsure, say N. | 1677 | If unsure, say N. |
1651 | 1678 | ||
1679 | config TEST_FIRMWARE | ||
1680 | tristate "Test firmware loading via userspace interface" | ||
1681 | default n | ||
1682 | depends on FW_LOADER | ||
1683 | help | ||
1684 | This builds the "test_firmware" module that creates a userspace | ||
1685 | interface for testing firmware loading. This can be used to | ||
1686 | control the triggering of firmware loading without needing an | ||
1687 | actual firmware-using device. The contents can be rechecked by | ||
1688 | userspace. | ||
1689 | |||
1690 | If unsure, say N. | ||
1691 | |||
1692 | config TEST_UDELAY | ||
1693 | tristate "udelay test driver" | ||
1694 | default n | ||
1695 | help | ||
1696 | This builds the "udelay_test" module that helps to make sure | ||
1697 | that udelay() is working properly. | ||
1698 | |||
1699 | If unsure, say N. | ||
1700 | |||
1652 | source "samples/Kconfig" | 1701 | source "samples/Kconfig" |
1653 | 1702 | ||
1654 | source "lib/Kconfig.kgdb" | 1703 | source "lib/Kconfig.kgdb" |
diff --git a/lib/Makefile b/lib/Makefile index ba967a19edba..d6b4bc496408 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -26,7 +26,7 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ | |||
26 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ | 26 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ |
27 | gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \ | 27 | gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \ |
28 | bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ | 28 | bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ |
29 | percpu-refcount.o percpu_ida.o hash.o | 29 | percpu-refcount.o percpu_ida.o hash.o rhashtable.o |
30 | obj-y += string_helpers.o | 30 | obj-y += string_helpers.o |
31 | obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o | 31 | obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o |
32 | obj-y += kstrtox.o | 32 | obj-y += kstrtox.o |
@@ -34,6 +34,7 @@ obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o | |||
34 | obj-$(CONFIG_TEST_MODULE) += test_module.o | 34 | obj-$(CONFIG_TEST_MODULE) += test_module.o |
35 | obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o | 35 | obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o |
36 | obj-$(CONFIG_TEST_BPF) += test_bpf.o | 36 | obj-$(CONFIG_TEST_BPF) += test_bpf.o |
37 | obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o | ||
37 | 38 | ||
38 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) | 39 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) |
39 | CFLAGS_kobject.o += -DDEBUG | 40 | CFLAGS_kobject.o += -DDEBUG |
@@ -136,6 +137,8 @@ obj-$(CONFIG_CORDIC) += cordic.o | |||
136 | 137 | ||
137 | obj-$(CONFIG_DQL) += dynamic_queue_limits.o | 138 | obj-$(CONFIG_DQL) += dynamic_queue_limits.o |
138 | 139 | ||
140 | obj-$(CONFIG_GLOB) += glob.o | ||
141 | |||
139 | obj-$(CONFIG_MPILIB) += mpi/ | 142 | obj-$(CONFIG_MPILIB) += mpi/ |
140 | obj-$(CONFIG_SIGNATURE) += digsig.o | 143 | obj-$(CONFIG_SIGNATURE) += digsig.o |
141 | 144 | ||
diff --git a/lib/assoc_array.c b/lib/assoc_array.c index c0b1007011e1..2404d03e251a 100644 --- a/lib/assoc_array.c +++ b/lib/assoc_array.c | |||
@@ -1723,11 +1723,13 @@ ascend_old_tree: | |||
1723 | shortcut = assoc_array_ptr_to_shortcut(ptr); | 1723 | shortcut = assoc_array_ptr_to_shortcut(ptr); |
1724 | slot = shortcut->parent_slot; | 1724 | slot = shortcut->parent_slot; |
1725 | cursor = shortcut->back_pointer; | 1725 | cursor = shortcut->back_pointer; |
1726 | if (!cursor) | ||
1727 | goto gc_complete; | ||
1726 | } else { | 1728 | } else { |
1727 | slot = node->parent_slot; | 1729 | slot = node->parent_slot; |
1728 | cursor = ptr; | 1730 | cursor = ptr; |
1729 | } | 1731 | } |
1730 | BUG_ON(!ptr); | 1732 | BUG_ON(!cursor); |
1731 | node = assoc_array_ptr_to_node(cursor); | 1733 | node = assoc_array_ptr_to_node(cursor); |
1732 | slot++; | 1734 | slot++; |
1733 | goto continue_node; | 1735 | goto continue_node; |
@@ -1735,7 +1737,7 @@ ascend_old_tree: | |||
1735 | gc_complete: | 1737 | gc_complete: |
1736 | edit->set[0].to = new_root; | 1738 | edit->set[0].to = new_root; |
1737 | assoc_array_apply_edit(edit); | 1739 | assoc_array_apply_edit(edit); |
1738 | edit->array->nr_leaves_on_tree = nr_leaves_on_tree; | 1740 | array->nr_leaves_on_tree = nr_leaves_on_tree; |
1739 | return 0; | 1741 | return 0; |
1740 | 1742 | ||
1741 | enomem: | 1743 | enomem: |
diff --git a/lib/bitmap.c b/lib/bitmap.c index 06f7e4fe8d2d..1e031f2c9aba 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c | |||
@@ -40,9 +40,9 @@ | |||
40 | * for the best explanations of this ordering. | 40 | * for the best explanations of this ordering. |
41 | */ | 41 | */ |
42 | 42 | ||
43 | int __bitmap_empty(const unsigned long *bitmap, int bits) | 43 | int __bitmap_empty(const unsigned long *bitmap, unsigned int bits) |
44 | { | 44 | { |
45 | int k, lim = bits/BITS_PER_LONG; | 45 | unsigned int k, lim = bits/BITS_PER_LONG; |
46 | for (k = 0; k < lim; ++k) | 46 | for (k = 0; k < lim; ++k) |
47 | if (bitmap[k]) | 47 | if (bitmap[k]) |
48 | return 0; | 48 | return 0; |
@@ -55,9 +55,9 @@ int __bitmap_empty(const unsigned long *bitmap, int bits) | |||
55 | } | 55 | } |
56 | EXPORT_SYMBOL(__bitmap_empty); | 56 | EXPORT_SYMBOL(__bitmap_empty); |
57 | 57 | ||
58 | int __bitmap_full(const unsigned long *bitmap, int bits) | 58 | int __bitmap_full(const unsigned long *bitmap, unsigned int bits) |
59 | { | 59 | { |
60 | int k, lim = bits/BITS_PER_LONG; | 60 | unsigned int k, lim = bits/BITS_PER_LONG; |
61 | for (k = 0; k < lim; ++k) | 61 | for (k = 0; k < lim; ++k) |
62 | if (~bitmap[k]) | 62 | if (~bitmap[k]) |
63 | return 0; | 63 | return 0; |
@@ -71,9 +71,9 @@ int __bitmap_full(const unsigned long *bitmap, int bits) | |||
71 | EXPORT_SYMBOL(__bitmap_full); | 71 | EXPORT_SYMBOL(__bitmap_full); |
72 | 72 | ||
73 | int __bitmap_equal(const unsigned long *bitmap1, | 73 | int __bitmap_equal(const unsigned long *bitmap1, |
74 | const unsigned long *bitmap2, int bits) | 74 | const unsigned long *bitmap2, unsigned int bits) |
75 | { | 75 | { |
76 | int k, lim = bits/BITS_PER_LONG; | 76 | unsigned int k, lim = bits/BITS_PER_LONG; |
77 | for (k = 0; k < lim; ++k) | 77 | for (k = 0; k < lim; ++k) |
78 | if (bitmap1[k] != bitmap2[k]) | 78 | if (bitmap1[k] != bitmap2[k]) |
79 | return 0; | 79 | return 0; |
@@ -86,14 +86,14 @@ int __bitmap_equal(const unsigned long *bitmap1, | |||
86 | } | 86 | } |
87 | EXPORT_SYMBOL(__bitmap_equal); | 87 | EXPORT_SYMBOL(__bitmap_equal); |
88 | 88 | ||
89 | void __bitmap_complement(unsigned long *dst, const unsigned long *src, int bits) | 89 | void __bitmap_complement(unsigned long *dst, const unsigned long *src, unsigned int bits) |
90 | { | 90 | { |
91 | int k, lim = bits/BITS_PER_LONG; | 91 | unsigned int k, lim = bits/BITS_PER_LONG; |
92 | for (k = 0; k < lim; ++k) | 92 | for (k = 0; k < lim; ++k) |
93 | dst[k] = ~src[k]; | 93 | dst[k] = ~src[k]; |
94 | 94 | ||
95 | if (bits % BITS_PER_LONG) | 95 | if (bits % BITS_PER_LONG) |
96 | dst[k] = ~src[k] & BITMAP_LAST_WORD_MASK(bits); | 96 | dst[k] = ~src[k]; |
97 | } | 97 | } |
98 | EXPORT_SYMBOL(__bitmap_complement); | 98 | EXPORT_SYMBOL(__bitmap_complement); |
99 | 99 | ||
@@ -182,23 +182,26 @@ void __bitmap_shift_left(unsigned long *dst, | |||
182 | EXPORT_SYMBOL(__bitmap_shift_left); | 182 | EXPORT_SYMBOL(__bitmap_shift_left); |
183 | 183 | ||
184 | int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, | 184 | int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, |
185 | const unsigned long *bitmap2, int bits) | 185 | const unsigned long *bitmap2, unsigned int bits) |
186 | { | 186 | { |
187 | int k; | 187 | unsigned int k; |
188 | int nr = BITS_TO_LONGS(bits); | 188 | unsigned int lim = bits/BITS_PER_LONG; |
189 | unsigned long result = 0; | 189 | unsigned long result = 0; |
190 | 190 | ||
191 | for (k = 0; k < nr; k++) | 191 | for (k = 0; k < lim; k++) |
192 | result |= (dst[k] = bitmap1[k] & bitmap2[k]); | 192 | result |= (dst[k] = bitmap1[k] & bitmap2[k]); |
193 | if (bits % BITS_PER_LONG) | ||
194 | result |= (dst[k] = bitmap1[k] & bitmap2[k] & | ||
195 | BITMAP_LAST_WORD_MASK(bits)); | ||
193 | return result != 0; | 196 | return result != 0; |
194 | } | 197 | } |
195 | EXPORT_SYMBOL(__bitmap_and); | 198 | EXPORT_SYMBOL(__bitmap_and); |
196 | 199 | ||
197 | void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, | 200 | void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, |
198 | const unsigned long *bitmap2, int bits) | 201 | const unsigned long *bitmap2, unsigned int bits) |
199 | { | 202 | { |
200 | int k; | 203 | unsigned int k; |
201 | int nr = BITS_TO_LONGS(bits); | 204 | unsigned int nr = BITS_TO_LONGS(bits); |
202 | 205 | ||
203 | for (k = 0; k < nr; k++) | 206 | for (k = 0; k < nr; k++) |
204 | dst[k] = bitmap1[k] | bitmap2[k]; | 207 | dst[k] = bitmap1[k] | bitmap2[k]; |
@@ -206,10 +209,10 @@ void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, | |||
206 | EXPORT_SYMBOL(__bitmap_or); | 209 | EXPORT_SYMBOL(__bitmap_or); |
207 | 210 | ||
208 | void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, | 211 | void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, |
209 | const unsigned long *bitmap2, int bits) | 212 | const unsigned long *bitmap2, unsigned int bits) |
210 | { | 213 | { |
211 | int k; | 214 | unsigned int k; |
212 | int nr = BITS_TO_LONGS(bits); | 215 | unsigned int nr = BITS_TO_LONGS(bits); |
213 | 216 | ||
214 | for (k = 0; k < nr; k++) | 217 | for (k = 0; k < nr; k++) |
215 | dst[k] = bitmap1[k] ^ bitmap2[k]; | 218 | dst[k] = bitmap1[k] ^ bitmap2[k]; |
@@ -217,22 +220,25 @@ void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, | |||
217 | EXPORT_SYMBOL(__bitmap_xor); | 220 | EXPORT_SYMBOL(__bitmap_xor); |
218 | 221 | ||
219 | int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, | 222 | int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, |
220 | const unsigned long *bitmap2, int bits) | 223 | const unsigned long *bitmap2, unsigned int bits) |
221 | { | 224 | { |
222 | int k; | 225 | unsigned int k; |
223 | int nr = BITS_TO_LONGS(bits); | 226 | unsigned int lim = bits/BITS_PER_LONG; |
224 | unsigned long result = 0; | 227 | unsigned long result = 0; |
225 | 228 | ||
226 | for (k = 0; k < nr; k++) | 229 | for (k = 0; k < lim; k++) |
227 | result |= (dst[k] = bitmap1[k] & ~bitmap2[k]); | 230 | result |= (dst[k] = bitmap1[k] & ~bitmap2[k]); |
231 | if (bits % BITS_PER_LONG) | ||
232 | result |= (dst[k] = bitmap1[k] & ~bitmap2[k] & | ||
233 | BITMAP_LAST_WORD_MASK(bits)); | ||
228 | return result != 0; | 234 | return result != 0; |
229 | } | 235 | } |
230 | EXPORT_SYMBOL(__bitmap_andnot); | 236 | EXPORT_SYMBOL(__bitmap_andnot); |
231 | 237 | ||
232 | int __bitmap_intersects(const unsigned long *bitmap1, | 238 | int __bitmap_intersects(const unsigned long *bitmap1, |
233 | const unsigned long *bitmap2, int bits) | 239 | const unsigned long *bitmap2, unsigned int bits) |
234 | { | 240 | { |
235 | int k, lim = bits/BITS_PER_LONG; | 241 | unsigned int k, lim = bits/BITS_PER_LONG; |
236 | for (k = 0; k < lim; ++k) | 242 | for (k = 0; k < lim; ++k) |
237 | if (bitmap1[k] & bitmap2[k]) | 243 | if (bitmap1[k] & bitmap2[k]) |
238 | return 1; | 244 | return 1; |
@@ -245,9 +251,9 @@ int __bitmap_intersects(const unsigned long *bitmap1, | |||
245 | EXPORT_SYMBOL(__bitmap_intersects); | 251 | EXPORT_SYMBOL(__bitmap_intersects); |
246 | 252 | ||
247 | int __bitmap_subset(const unsigned long *bitmap1, | 253 | int __bitmap_subset(const unsigned long *bitmap1, |
248 | const unsigned long *bitmap2, int bits) | 254 | const unsigned long *bitmap2, unsigned int bits) |
249 | { | 255 | { |
250 | int k, lim = bits/BITS_PER_LONG; | 256 | unsigned int k, lim = bits/BITS_PER_LONG; |
251 | for (k = 0; k < lim; ++k) | 257 | for (k = 0; k < lim; ++k) |
252 | if (bitmap1[k] & ~bitmap2[k]) | 258 | if (bitmap1[k] & ~bitmap2[k]) |
253 | return 0; | 259 | return 0; |
@@ -259,9 +265,10 @@ int __bitmap_subset(const unsigned long *bitmap1, | |||
259 | } | 265 | } |
260 | EXPORT_SYMBOL(__bitmap_subset); | 266 | EXPORT_SYMBOL(__bitmap_subset); |
261 | 267 | ||
262 | int __bitmap_weight(const unsigned long *bitmap, int bits) | 268 | int __bitmap_weight(const unsigned long *bitmap, unsigned int bits) |
263 | { | 269 | { |
264 | int k, w = 0, lim = bits/BITS_PER_LONG; | 270 | unsigned int k, lim = bits/BITS_PER_LONG; |
271 | int w = 0; | ||
265 | 272 | ||
266 | for (k = 0; k < lim; k++) | 273 | for (k = 0; k < lim; k++) |
267 | w += hweight_long(bitmap[k]); | 274 | w += hweight_long(bitmap[k]); |
@@ -273,42 +280,42 @@ int __bitmap_weight(const unsigned long *bitmap, int bits) | |||
273 | } | 280 | } |
274 | EXPORT_SYMBOL(__bitmap_weight); | 281 | EXPORT_SYMBOL(__bitmap_weight); |
275 | 282 | ||
276 | void bitmap_set(unsigned long *map, int start, int nr) | 283 | void bitmap_set(unsigned long *map, unsigned int start, int len) |
277 | { | 284 | { |
278 | unsigned long *p = map + BIT_WORD(start); | 285 | unsigned long *p = map + BIT_WORD(start); |
279 | const int size = start + nr; | 286 | const unsigned int size = start + len; |
280 | int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); | 287 | int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); |
281 | unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); | 288 | unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); |
282 | 289 | ||
283 | while (nr - bits_to_set >= 0) { | 290 | while (len - bits_to_set >= 0) { |
284 | *p |= mask_to_set; | 291 | *p |= mask_to_set; |
285 | nr -= bits_to_set; | 292 | len -= bits_to_set; |
286 | bits_to_set = BITS_PER_LONG; | 293 | bits_to_set = BITS_PER_LONG; |
287 | mask_to_set = ~0UL; | 294 | mask_to_set = ~0UL; |
288 | p++; | 295 | p++; |
289 | } | 296 | } |
290 | if (nr) { | 297 | if (len) { |
291 | mask_to_set &= BITMAP_LAST_WORD_MASK(size); | 298 | mask_to_set &= BITMAP_LAST_WORD_MASK(size); |
292 | *p |= mask_to_set; | 299 | *p |= mask_to_set; |
293 | } | 300 | } |
294 | } | 301 | } |
295 | EXPORT_SYMBOL(bitmap_set); | 302 | EXPORT_SYMBOL(bitmap_set); |
296 | 303 | ||
297 | void bitmap_clear(unsigned long *map, int start, int nr) | 304 | void bitmap_clear(unsigned long *map, unsigned int start, int len) |
298 | { | 305 | { |
299 | unsigned long *p = map + BIT_WORD(start); | 306 | unsigned long *p = map + BIT_WORD(start); |
300 | const int size = start + nr; | 307 | const unsigned int size = start + len; |
301 | int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); | 308 | int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); |
302 | unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); | 309 | unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); |
303 | 310 | ||
304 | while (nr - bits_to_clear >= 0) { | 311 | while (len - bits_to_clear >= 0) { |
305 | *p &= ~mask_to_clear; | 312 | *p &= ~mask_to_clear; |
306 | nr -= bits_to_clear; | 313 | len -= bits_to_clear; |
307 | bits_to_clear = BITS_PER_LONG; | 314 | bits_to_clear = BITS_PER_LONG; |
308 | mask_to_clear = ~0UL; | 315 | mask_to_clear = ~0UL; |
309 | p++; | 316 | p++; |
310 | } | 317 | } |
311 | if (nr) { | 318 | if (len) { |
312 | mask_to_clear &= BITMAP_LAST_WORD_MASK(size); | 319 | mask_to_clear &= BITMAP_LAST_WORD_MASK(size); |
313 | *p &= ~mask_to_clear; | 320 | *p &= ~mask_to_clear; |
314 | } | 321 | } |
@@ -664,13 +671,8 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen, | |||
664 | 671 | ||
665 | int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits) | 672 | int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits) |
666 | { | 673 | { |
667 | char *nl = strchr(bp, '\n'); | 674 | char *nl = strchrnul(bp, '\n'); |
668 | int len; | 675 | int len = nl - bp; |
669 | |||
670 | if (nl) | ||
671 | len = nl - bp; | ||
672 | else | ||
673 | len = strlen(bp); | ||
674 | 676 | ||
675 | return __bitmap_parselist(bp, len, 0, maskp, nmaskbits); | 677 | return __bitmap_parselist(bp, len, 0, maskp, nmaskbits); |
676 | } | 678 | } |
@@ -716,7 +718,7 @@ EXPORT_SYMBOL(bitmap_parselist_user); | |||
716 | * | 718 | * |
717 | * If for example, just bits 4 through 7 are set in @buf, then @pos | 719 | * If for example, just bits 4 through 7 are set in @buf, then @pos |
718 | * values 4 through 7 will get mapped to 0 through 3, respectively, | 720 | * values 4 through 7 will get mapped to 0 through 3, respectively, |
719 | * and other @pos values will get mapped to 0. When @pos value 7 | 721 | * and other @pos values will get mapped to -1. When @pos value 7 |
720 | * gets mapped to (returns) @ord value 3 in this example, that means | 722 | * gets mapped to (returns) @ord value 3 in this example, that means |
721 | * that bit 7 is the 3rd (starting with 0th) set bit in @buf. | 723 | * that bit 7 is the 3rd (starting with 0th) set bit in @buf. |
722 | * | 724 | * |
@@ -1046,7 +1048,7 @@ enum { | |||
1046 | REG_OP_RELEASE, /* clear all bits in region */ | 1048 | REG_OP_RELEASE, /* clear all bits in region */ |
1047 | }; | 1049 | }; |
1048 | 1050 | ||
1049 | static int __reg_op(unsigned long *bitmap, int pos, int order, int reg_op) | 1051 | static int __reg_op(unsigned long *bitmap, unsigned int pos, int order, int reg_op) |
1050 | { | 1052 | { |
1051 | int nbits_reg; /* number of bits in region */ | 1053 | int nbits_reg; /* number of bits in region */ |
1052 | int index; /* index first long of region in bitmap */ | 1054 | int index; /* index first long of region in bitmap */ |
@@ -1112,11 +1114,11 @@ done: | |||
1112 | * Return the bit offset in bitmap of the allocated region, | 1114 | * Return the bit offset in bitmap of the allocated region, |
1113 | * or -errno on failure. | 1115 | * or -errno on failure. |
1114 | */ | 1116 | */ |
1115 | int bitmap_find_free_region(unsigned long *bitmap, int bits, int order) | 1117 | int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order) |
1116 | { | 1118 | { |
1117 | int pos, end; /* scans bitmap by regions of size order */ | 1119 | unsigned int pos, end; /* scans bitmap by regions of size order */ |
1118 | 1120 | ||
1119 | for (pos = 0 ; (end = pos + (1 << order)) <= bits; pos = end) { | 1121 | for (pos = 0 ; (end = pos + (1U << order)) <= bits; pos = end) { |
1120 | if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE)) | 1122 | if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE)) |
1121 | continue; | 1123 | continue; |
1122 | __reg_op(bitmap, pos, order, REG_OP_ALLOC); | 1124 | __reg_op(bitmap, pos, order, REG_OP_ALLOC); |
@@ -1137,7 +1139,7 @@ EXPORT_SYMBOL(bitmap_find_free_region); | |||
1137 | * | 1139 | * |
1138 | * No return value. | 1140 | * No return value. |
1139 | */ | 1141 | */ |
1140 | void bitmap_release_region(unsigned long *bitmap, int pos, int order) | 1142 | void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order) |
1141 | { | 1143 | { |
1142 | __reg_op(bitmap, pos, order, REG_OP_RELEASE); | 1144 | __reg_op(bitmap, pos, order, REG_OP_RELEASE); |
1143 | } | 1145 | } |
@@ -1154,12 +1156,11 @@ EXPORT_SYMBOL(bitmap_release_region); | |||
1154 | * Return 0 on success, or %-EBUSY if specified region wasn't | 1156 | * Return 0 on success, or %-EBUSY if specified region wasn't |
1155 | * free (not all bits were zero). | 1157 | * free (not all bits were zero). |
1156 | */ | 1158 | */ |
1157 | int bitmap_allocate_region(unsigned long *bitmap, int pos, int order) | 1159 | int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order) |
1158 | { | 1160 | { |
1159 | if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE)) | 1161 | if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE)) |
1160 | return -EBUSY; | 1162 | return -EBUSY; |
1161 | __reg_op(bitmap, pos, order, REG_OP_ALLOC); | 1163 | return __reg_op(bitmap, pos, order, REG_OP_ALLOC); |
1162 | return 0; | ||
1163 | } | 1164 | } |
1164 | EXPORT_SYMBOL(bitmap_allocate_region); | 1165 | EXPORT_SYMBOL(bitmap_allocate_region); |
1165 | 1166 | ||
diff --git a/lib/cmdline.c b/lib/cmdline.c index d4932f745e92..76a712e6e20e 100644 --- a/lib/cmdline.c +++ b/lib/cmdline.c | |||
@@ -121,11 +121,7 @@ EXPORT_SYMBOL(get_options); | |||
121 | * @retptr: (output) Optional pointer to next char after parse completes | 121 | * @retptr: (output) Optional pointer to next char after parse completes |
122 | * | 122 | * |
123 | * Parses a string into a number. The number stored at @ptr is | 123 | * Parses a string into a number. The number stored at @ptr is |
124 | * potentially suffixed with %K (for kilobytes, or 1024 bytes), | 124 | * potentially suffixed with K, M, G, T, P, E. |
125 | * %M (for megabytes, or 1048576 bytes), or %G (for gigabytes, or | ||
126 | * 1073741824). If the number is suffixed with K, M, or G, then | ||
127 | * the return value is the number multiplied by one kilobyte, one | ||
128 | * megabyte, or one gigabyte, respectively. | ||
129 | */ | 125 | */ |
130 | 126 | ||
131 | unsigned long long memparse(const char *ptr, char **retptr) | 127 | unsigned long long memparse(const char *ptr, char **retptr) |
@@ -135,6 +131,15 @@ unsigned long long memparse(const char *ptr, char **retptr) | |||
135 | unsigned long long ret = simple_strtoull(ptr, &endptr, 0); | 131 | unsigned long long ret = simple_strtoull(ptr, &endptr, 0); |
136 | 132 | ||
137 | switch (*endptr) { | 133 | switch (*endptr) { |
134 | case 'E': | ||
135 | case 'e': | ||
136 | ret <<= 10; | ||
137 | case 'P': | ||
138 | case 'p': | ||
139 | ret <<= 10; | ||
140 | case 'T': | ||
141 | case 't': | ||
142 | ret <<= 10; | ||
138 | case 'G': | 143 | case 'G': |
139 | case 'g': | 144 | case 'g': |
140 | ret <<= 10; | 145 | ret <<= 10; |
diff --git a/lib/crc32.c b/lib/crc32.c index 21a7b2135af6..9a907d489d95 100644 --- a/lib/crc32.c +++ b/lib/crc32.c | |||
@@ -50,34 +50,10 @@ MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>"); | |||
50 | MODULE_DESCRIPTION("Various CRC32 calculations"); | 50 | MODULE_DESCRIPTION("Various CRC32 calculations"); |
51 | MODULE_LICENSE("GPL"); | 51 | MODULE_LICENSE("GPL"); |
52 | 52 | ||
53 | #define GF2_DIM 32 | ||
54 | |||
55 | static u32 gf2_matrix_times(u32 *mat, u32 vec) | ||
56 | { | ||
57 | u32 sum = 0; | ||
58 | |||
59 | while (vec) { | ||
60 | if (vec & 1) | ||
61 | sum ^= *mat; | ||
62 | vec >>= 1; | ||
63 | mat++; | ||
64 | } | ||
65 | |||
66 | return sum; | ||
67 | } | ||
68 | |||
69 | static void gf2_matrix_square(u32 *square, u32 *mat) | ||
70 | { | ||
71 | int i; | ||
72 | |||
73 | for (i = 0; i < GF2_DIM; i++) | ||
74 | square[i] = gf2_matrix_times(mat, mat[i]); | ||
75 | } | ||
76 | |||
77 | #if CRC_LE_BITS > 8 || CRC_BE_BITS > 8 | 53 | #if CRC_LE_BITS > 8 || CRC_BE_BITS > 8 |
78 | 54 | ||
79 | /* implements slicing-by-4 or slicing-by-8 algorithm */ | 55 | /* implements slicing-by-4 or slicing-by-8 algorithm */ |
80 | static inline u32 | 56 | static inline u32 __pure |
81 | crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256]) | 57 | crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256]) |
82 | { | 58 | { |
83 | # ifdef __LITTLE_ENDIAN | 59 | # ifdef __LITTLE_ENDIAN |
@@ -155,51 +131,6 @@ crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256]) | |||
155 | } | 131 | } |
156 | #endif | 132 | #endif |
157 | 133 | ||
158 | /* For conditions of distribution and use, see copyright notice in zlib.h */ | ||
159 | static u32 crc32_generic_combine(u32 crc1, u32 crc2, size_t len2, | ||
160 | u32 polynomial) | ||
161 | { | ||
162 | u32 even[GF2_DIM]; /* Even-power-of-two zeros operator */ | ||
163 | u32 odd[GF2_DIM]; /* Odd-power-of-two zeros operator */ | ||
164 | u32 row; | ||
165 | int i; | ||
166 | |||
167 | if (len2 <= 0) | ||
168 | return crc1; | ||
169 | |||
170 | /* Put operator for one zero bit in odd */ | ||
171 | odd[0] = polynomial; | ||
172 | row = 1; | ||
173 | for (i = 1; i < GF2_DIM; i++) { | ||
174 | odd[i] = row; | ||
175 | row <<= 1; | ||
176 | } | ||
177 | |||
178 | gf2_matrix_square(even, odd); /* Put operator for two zero bits in even */ | ||
179 | gf2_matrix_square(odd, even); /* Put operator for four zero bits in odd */ | ||
180 | |||
181 | /* Apply len2 zeros to crc1 (first square will put the operator for one | ||
182 | * zero byte, eight zero bits, in even). | ||
183 | */ | ||
184 | do { | ||
185 | /* Apply zeros operator for this bit of len2 */ | ||
186 | gf2_matrix_square(even, odd); | ||
187 | if (len2 & 1) | ||
188 | crc1 = gf2_matrix_times(even, crc1); | ||
189 | len2 >>= 1; | ||
190 | /* If no more bits set, then done */ | ||
191 | if (len2 == 0) | ||
192 | break; | ||
193 | /* Another iteration of the loop with odd and even swapped */ | ||
194 | gf2_matrix_square(odd, even); | ||
195 | if (len2 & 1) | ||
196 | crc1 = gf2_matrix_times(odd, crc1); | ||
197 | len2 >>= 1; | ||
198 | } while (len2 != 0); | ||
199 | |||
200 | crc1 ^= crc2; | ||
201 | return crc1; | ||
202 | } | ||
203 | 134 | ||
204 | /** | 135 | /** |
205 | * crc32_le_generic() - Calculate bitwise little-endian Ethernet AUTODIN II | 136 | * crc32_le_generic() - Calculate bitwise little-endian Ethernet AUTODIN II |
@@ -271,19 +202,81 @@ u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len) | |||
271 | (const u32 (*)[256])crc32ctable_le, CRC32C_POLY_LE); | 202 | (const u32 (*)[256])crc32ctable_le, CRC32C_POLY_LE); |
272 | } | 203 | } |
273 | #endif | 204 | #endif |
274 | u32 __pure crc32_le_combine(u32 crc1, u32 crc2, size_t len2) | 205 | EXPORT_SYMBOL(crc32_le); |
206 | EXPORT_SYMBOL(__crc32c_le); | ||
207 | |||
208 | /* | ||
209 | * This multiplies the polynomials x and y modulo the given modulus. | ||
210 | * This follows the "little-endian" CRC convention that the lsbit | ||
211 | * represents the highest power of x, and the msbit represents x^0. | ||
212 | */ | ||
213 | static u32 __attribute_const__ gf2_multiply(u32 x, u32 y, u32 modulus) | ||
275 | { | 214 | { |
276 | return crc32_generic_combine(crc1, crc2, len2, CRCPOLY_LE); | 215 | u32 product = x & 1 ? y : 0; |
216 | int i; | ||
217 | |||
218 | for (i = 0; i < 31; i++) { | ||
219 | product = (product >> 1) ^ (product & 1 ? modulus : 0); | ||
220 | x >>= 1; | ||
221 | product ^= x & 1 ? y : 0; | ||
222 | } | ||
223 | |||
224 | return product; | ||
277 | } | 225 | } |
278 | 226 | ||
279 | u32 __pure __crc32c_le_combine(u32 crc1, u32 crc2, size_t len2) | 227 | /** |
228 | * crc32_generic_shift - Append len 0 bytes to crc, in logarithmic time | ||
229 | * @crc: The original little-endian CRC (i.e. lsbit is x^31 coefficient) | ||
230 | * @len: The number of bytes. @crc is multiplied by x^(8*@len) | ||
231 | * @polynomial: The modulus used to reduce the result to 32 bits. | ||
232 | * | ||
233 | * It's possible to parallelize CRC computations by computing a CRC | ||
234 | * over separate ranges of a buffer, then summing them. | ||
235 | * This shifts the given CRC by 8*len bits (i.e. produces the same effect | ||
236 | * as appending len bytes of zero to the data), in time proportional | ||
237 | * to log(len). | ||
238 | */ | ||
239 | static u32 __attribute_const__ crc32_generic_shift(u32 crc, size_t len, | ||
240 | u32 polynomial) | ||
280 | { | 241 | { |
281 | return crc32_generic_combine(crc1, crc2, len2, CRC32C_POLY_LE); | 242 | u32 power = polynomial; /* CRC of x^32 */ |
243 | int i; | ||
244 | |||
245 | /* Shift up to 32 bits in the simple linear way */ | ||
246 | for (i = 0; i < 8 * (int)(len & 3); i++) | ||
247 | crc = (crc >> 1) ^ (crc & 1 ? polynomial : 0); | ||
248 | |||
249 | len >>= 2; | ||
250 | if (!len) | ||
251 | return crc; | ||
252 | |||
253 | for (;;) { | ||
254 | /* "power" is x^(2^i), modulo the polynomial */ | ||
255 | if (len & 1) | ||
256 | crc = gf2_multiply(crc, power, polynomial); | ||
257 | |||
258 | len >>= 1; | ||
259 | if (!len) | ||
260 | break; | ||
261 | |||
262 | /* Square power, advancing to x^(2^(i+1)) */ | ||
263 | power = gf2_multiply(power, power, polynomial); | ||
264 | } | ||
265 | |||
266 | return crc; | ||
282 | } | 267 | } |
283 | EXPORT_SYMBOL(crc32_le); | 268 | |
284 | EXPORT_SYMBOL(crc32_le_combine); | 269 | u32 __attribute_const__ crc32_le_shift(u32 crc, size_t len) |
285 | EXPORT_SYMBOL(__crc32c_le); | 270 | { |
286 | EXPORT_SYMBOL(__crc32c_le_combine); | 271 | return crc32_generic_shift(crc, len, CRCPOLY_LE); |
272 | } | ||
273 | |||
274 | u32 __attribute_const__ __crc32c_le_shift(u32 crc, size_t len) | ||
275 | { | ||
276 | return crc32_generic_shift(crc, len, CRC32C_POLY_LE); | ||
277 | } | ||
278 | EXPORT_SYMBOL(crc32_le_shift); | ||
279 | EXPORT_SYMBOL(__crc32c_le_shift); | ||
287 | 280 | ||
288 | /** | 281 | /** |
289 | * crc32_be_generic() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32 | 282 | * crc32_be_generic() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32 |
@@ -351,7 +344,7 @@ EXPORT_SYMBOL(crc32_be); | |||
351 | #ifdef CONFIG_CRC32_SELFTEST | 344 | #ifdef CONFIG_CRC32_SELFTEST |
352 | 345 | ||
353 | /* 4096 random bytes */ | 346 | /* 4096 random bytes */ |
354 | static u8 __attribute__((__aligned__(8))) test_buf[] = | 347 | static u8 const __aligned(8) test_buf[] __initconst = |
355 | { | 348 | { |
356 | 0x5b, 0x85, 0x21, 0xcb, 0x09, 0x68, 0x7d, 0x30, | 349 | 0x5b, 0x85, 0x21, 0xcb, 0x09, 0x68, 0x7d, 0x30, |
357 | 0xc7, 0x69, 0xd7, 0x30, 0x92, 0xde, 0x59, 0xe4, | 350 | 0xc7, 0x69, 0xd7, 0x30, 0x92, 0xde, 0x59, 0xe4, |
@@ -875,7 +868,7 @@ static struct crc_test { | |||
875 | u32 crc_le; /* expected crc32_le result */ | 868 | u32 crc_le; /* expected crc32_le result */ |
876 | u32 crc_be; /* expected crc32_be result */ | 869 | u32 crc_be; /* expected crc32_be result */ |
877 | u32 crc32c_le; /* expected crc32c_le result */ | 870 | u32 crc32c_le; /* expected crc32c_le result */ |
878 | } test[] = | 871 | } const test[] __initconst = |
879 | { | 872 | { |
880 | {0x674bf11d, 0x00000038, 0x00000542, 0x0af6d466, 0xd8b6e4c1, 0xf6e93d6c}, | 873 | {0x674bf11d, 0x00000038, 0x00000542, 0x0af6d466, 0xd8b6e4c1, 0xf6e93d6c}, |
881 | {0x35c672c6, 0x0000003a, 0x000001aa, 0xc6d3dfba, 0x28aaf3ad, 0x0fe92aca}, | 874 | {0x35c672c6, 0x0000003a, 0x000001aa, 0xc6d3dfba, 0x28aaf3ad, 0x0fe92aca}, |
diff --git a/lib/decompress.c b/lib/decompress.c index 86069d74c062..37f3c786348f 100644 --- a/lib/decompress.c +++ b/lib/decompress.c | |||
@@ -54,7 +54,7 @@ static const struct compress_format compressed_formats[] __initconst = { | |||
54 | { {0, 0}, NULL, NULL } | 54 | { {0, 0}, NULL, NULL } |
55 | }; | 55 | }; |
56 | 56 | ||
57 | decompress_fn __init decompress_method(const unsigned char *inbuf, int len, | 57 | decompress_fn __init decompress_method(const unsigned char *inbuf, long len, |
58 | const char **name) | 58 | const char **name) |
59 | { | 59 | { |
60 | const struct compress_format *cf; | 60 | const struct compress_format *cf; |
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c index 31c5f7675fbf..8290e0bef7ea 100644 --- a/lib/decompress_bunzip2.c +++ b/lib/decompress_bunzip2.c | |||
@@ -92,8 +92,8 @@ struct bunzip_data { | |||
92 | /* State for interrupting output loop */ | 92 | /* State for interrupting output loop */ |
93 | int writeCopies, writePos, writeRunCountdown, writeCount, writeCurrent; | 93 | int writeCopies, writePos, writeRunCountdown, writeCount, writeCurrent; |
94 | /* I/O tracking data (file handles, buffers, positions, etc.) */ | 94 | /* I/O tracking data (file handles, buffers, positions, etc.) */ |
95 | int (*fill)(void*, unsigned int); | 95 | long (*fill)(void*, unsigned long); |
96 | int inbufCount, inbufPos /*, outbufPos*/; | 96 | long inbufCount, inbufPos /*, outbufPos*/; |
97 | unsigned char *inbuf /*,*outbuf*/; | 97 | unsigned char *inbuf /*,*outbuf*/; |
98 | unsigned int inbufBitCount, inbufBits; | 98 | unsigned int inbufBitCount, inbufBits; |
99 | /* The CRC values stored in the block header and calculated from the | 99 | /* The CRC values stored in the block header and calculated from the |
@@ -617,7 +617,7 @@ decode_next_byte: | |||
617 | goto decode_next_byte; | 617 | goto decode_next_byte; |
618 | } | 618 | } |
619 | 619 | ||
620 | static int INIT nofill(void *buf, unsigned int len) | 620 | static long INIT nofill(void *buf, unsigned long len) |
621 | { | 621 | { |
622 | return -1; | 622 | return -1; |
623 | } | 623 | } |
@@ -625,8 +625,8 @@ static int INIT nofill(void *buf, unsigned int len) | |||
625 | /* Allocate the structure, read file header. If in_fd ==-1, inbuf must contain | 625 | /* Allocate the structure, read file header. If in_fd ==-1, inbuf must contain |
626 | a complete bunzip file (len bytes long). If in_fd!=-1, inbuf and len are | 626 | a complete bunzip file (len bytes long). If in_fd!=-1, inbuf and len are |
627 | ignored, and data is read from file handle into temporary buffer. */ | 627 | ignored, and data is read from file handle into temporary buffer. */ |
628 | static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, int len, | 628 | static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, long len, |
629 | int (*fill)(void*, unsigned int)) | 629 | long (*fill)(void*, unsigned long)) |
630 | { | 630 | { |
631 | struct bunzip_data *bd; | 631 | struct bunzip_data *bd; |
632 | unsigned int i, j, c; | 632 | unsigned int i, j, c; |
@@ -675,11 +675,11 @@ static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, int len, | |||
675 | 675 | ||
676 | /* Example usage: decompress src_fd to dst_fd. (Stops at end of bzip2 data, | 676 | /* Example usage: decompress src_fd to dst_fd. (Stops at end of bzip2 data, |
677 | not end of file.) */ | 677 | not end of file.) */ |
678 | STATIC int INIT bunzip2(unsigned char *buf, int len, | 678 | STATIC int INIT bunzip2(unsigned char *buf, long len, |
679 | int(*fill)(void*, unsigned int), | 679 | long (*fill)(void*, unsigned long), |
680 | int(*flush)(void*, unsigned int), | 680 | long (*flush)(void*, unsigned long), |
681 | unsigned char *outbuf, | 681 | unsigned char *outbuf, |
682 | int *pos, | 682 | long *pos, |
683 | void(*error)(char *x)) | 683 | void(*error)(char *x)) |
684 | { | 684 | { |
685 | struct bunzip_data *bd; | 685 | struct bunzip_data *bd; |
@@ -743,11 +743,11 @@ exit_0: | |||
743 | } | 743 | } |
744 | 744 | ||
745 | #ifdef PREBOOT | 745 | #ifdef PREBOOT |
746 | STATIC int INIT decompress(unsigned char *buf, int len, | 746 | STATIC int INIT decompress(unsigned char *buf, long len, |
747 | int(*fill)(void*, unsigned int), | 747 | long (*fill)(void*, unsigned long), |
748 | int(*flush)(void*, unsigned int), | 748 | long (*flush)(void*, unsigned long), |
749 | unsigned char *outbuf, | 749 | unsigned char *outbuf, |
750 | int *pos, | 750 | long *pos, |
751 | void(*error)(char *x)) | 751 | void(*error)(char *x)) |
752 | { | 752 | { |
753 | return bunzip2(buf, len - 4, fill, flush, outbuf, pos, error); | 753 | return bunzip2(buf, len - 4, fill, flush, outbuf, pos, error); |
diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c index 0edfd742a154..d4c7891635ec 100644 --- a/lib/decompress_inflate.c +++ b/lib/decompress_inflate.c | |||
@@ -27,17 +27,17 @@ | |||
27 | 27 | ||
28 | #define GZIP_IOBUF_SIZE (16*1024) | 28 | #define GZIP_IOBUF_SIZE (16*1024) |
29 | 29 | ||
30 | static int INIT nofill(void *buffer, unsigned int len) | 30 | static long INIT nofill(void *buffer, unsigned long len) |
31 | { | 31 | { |
32 | return -1; | 32 | return -1; |
33 | } | 33 | } |
34 | 34 | ||
35 | /* Included from initramfs et al code */ | 35 | /* Included from initramfs et al code */ |
36 | STATIC int INIT gunzip(unsigned char *buf, int len, | 36 | STATIC int INIT gunzip(unsigned char *buf, long len, |
37 | int(*fill)(void*, unsigned int), | 37 | long (*fill)(void*, unsigned long), |
38 | int(*flush)(void*, unsigned int), | 38 | long (*flush)(void*, unsigned long), |
39 | unsigned char *out_buf, | 39 | unsigned char *out_buf, |
40 | int *pos, | 40 | long *pos, |
41 | void(*error)(char *x)) { | 41 | void(*error)(char *x)) { |
42 | u8 *zbuf; | 42 | u8 *zbuf; |
43 | struct z_stream_s *strm; | 43 | struct z_stream_s *strm; |
@@ -142,7 +142,7 @@ STATIC int INIT gunzip(unsigned char *buf, int len, | |||
142 | 142 | ||
143 | /* Write any data generated */ | 143 | /* Write any data generated */ |
144 | if (flush && strm->next_out > out_buf) { | 144 | if (flush && strm->next_out > out_buf) { |
145 | int l = strm->next_out - out_buf; | 145 | long l = strm->next_out - out_buf; |
146 | if (l != flush(out_buf, l)) { | 146 | if (l != flush(out_buf, l)) { |
147 | rc = -1; | 147 | rc = -1; |
148 | error("write error"); | 148 | error("write error"); |
diff --git a/lib/decompress_unlz4.c b/lib/decompress_unlz4.c index 7d1e83caf8ad..40f66ebe57b7 100644 --- a/lib/decompress_unlz4.c +++ b/lib/decompress_unlz4.c | |||
@@ -31,10 +31,10 @@ | |||
31 | #define LZ4_DEFAULT_UNCOMPRESSED_CHUNK_SIZE (8 << 20) | 31 | #define LZ4_DEFAULT_UNCOMPRESSED_CHUNK_SIZE (8 << 20) |
32 | #define ARCHIVE_MAGICNUMBER 0x184C2102 | 32 | #define ARCHIVE_MAGICNUMBER 0x184C2102 |
33 | 33 | ||
34 | STATIC inline int INIT unlz4(u8 *input, int in_len, | 34 | STATIC inline int INIT unlz4(u8 *input, long in_len, |
35 | int (*fill) (void *, unsigned int), | 35 | long (*fill)(void *, unsigned long), |
36 | int (*flush) (void *, unsigned int), | 36 | long (*flush)(void *, unsigned long), |
37 | u8 *output, int *posp, | 37 | u8 *output, long *posp, |
38 | void (*error) (char *x)) | 38 | void (*error) (char *x)) |
39 | { | 39 | { |
40 | int ret = -1; | 40 | int ret = -1; |
@@ -43,7 +43,7 @@ STATIC inline int INIT unlz4(u8 *input, int in_len, | |||
43 | u8 *inp; | 43 | u8 *inp; |
44 | u8 *inp_start; | 44 | u8 *inp_start; |
45 | u8 *outp; | 45 | u8 *outp; |
46 | int size = in_len; | 46 | long size = in_len; |
47 | #ifdef PREBOOT | 47 | #ifdef PREBOOT |
48 | size_t out_len = get_unaligned_le32(input + in_len); | 48 | size_t out_len = get_unaligned_le32(input + in_len); |
49 | #endif | 49 | #endif |
@@ -83,13 +83,20 @@ STATIC inline int INIT unlz4(u8 *input, int in_len, | |||
83 | if (posp) | 83 | if (posp) |
84 | *posp = 0; | 84 | *posp = 0; |
85 | 85 | ||
86 | if (fill) | 86 | if (fill) { |
87 | fill(inp, 4); | 87 | size = fill(inp, 4); |
88 | if (size < 4) { | ||
89 | error("data corrupted"); | ||
90 | goto exit_2; | ||
91 | } | ||
92 | } | ||
88 | 93 | ||
89 | chunksize = get_unaligned_le32(inp); | 94 | chunksize = get_unaligned_le32(inp); |
90 | if (chunksize == ARCHIVE_MAGICNUMBER) { | 95 | if (chunksize == ARCHIVE_MAGICNUMBER) { |
91 | inp += 4; | 96 | if (!fill) { |
92 | size -= 4; | 97 | inp += 4; |
98 | size -= 4; | ||
99 | } | ||
93 | } else { | 100 | } else { |
94 | error("invalid header"); | 101 | error("invalid header"); |
95 | goto exit_2; | 102 | goto exit_2; |
@@ -100,29 +107,44 @@ STATIC inline int INIT unlz4(u8 *input, int in_len, | |||
100 | 107 | ||
101 | for (;;) { | 108 | for (;;) { |
102 | 109 | ||
103 | if (fill) | 110 | if (fill) { |
104 | fill(inp, 4); | 111 | size = fill(inp, 4); |
112 | if (size == 0) | ||
113 | break; | ||
114 | if (size < 4) { | ||
115 | error("data corrupted"); | ||
116 | goto exit_2; | ||
117 | } | ||
118 | } | ||
105 | 119 | ||
106 | chunksize = get_unaligned_le32(inp); | 120 | chunksize = get_unaligned_le32(inp); |
107 | if (chunksize == ARCHIVE_MAGICNUMBER) { | 121 | if (chunksize == ARCHIVE_MAGICNUMBER) { |
108 | inp += 4; | 122 | if (!fill) { |
109 | size -= 4; | 123 | inp += 4; |
124 | size -= 4; | ||
125 | } | ||
110 | if (posp) | 126 | if (posp) |
111 | *posp += 4; | 127 | *posp += 4; |
112 | continue; | 128 | continue; |
113 | } | 129 | } |
114 | inp += 4; | 130 | |
115 | size -= 4; | ||
116 | 131 | ||
117 | if (posp) | 132 | if (posp) |
118 | *posp += 4; | 133 | *posp += 4; |
119 | 134 | ||
120 | if (fill) { | 135 | if (!fill) { |
136 | inp += 4; | ||
137 | size -= 4; | ||
138 | } else { | ||
121 | if (chunksize > lz4_compressbound(uncomp_chunksize)) { | 139 | if (chunksize > lz4_compressbound(uncomp_chunksize)) { |
122 | error("chunk length is longer than allocated"); | 140 | error("chunk length is longer than allocated"); |
123 | goto exit_2; | 141 | goto exit_2; |
124 | } | 142 | } |
125 | fill(inp, chunksize); | 143 | size = fill(inp, chunksize); |
144 | if (size < chunksize) { | ||
145 | error("data corrupted"); | ||
146 | goto exit_2; | ||
147 | } | ||
126 | } | 148 | } |
127 | #ifdef PREBOOT | 149 | #ifdef PREBOOT |
128 | if (out_len >= uncomp_chunksize) { | 150 | if (out_len >= uncomp_chunksize) { |
@@ -149,18 +171,17 @@ STATIC inline int INIT unlz4(u8 *input, int in_len, | |||
149 | if (posp) | 171 | if (posp) |
150 | *posp += chunksize; | 172 | *posp += chunksize; |
151 | 173 | ||
152 | size -= chunksize; | 174 | if (!fill) { |
175 | size -= chunksize; | ||
153 | 176 | ||
154 | if (size == 0) | 177 | if (size == 0) |
155 | break; | 178 | break; |
156 | else if (size < 0) { | 179 | else if (size < 0) { |
157 | error("data corrupted"); | 180 | error("data corrupted"); |
158 | goto exit_2; | 181 | goto exit_2; |
182 | } | ||
183 | inp += chunksize; | ||
159 | } | 184 | } |
160 | |||
161 | inp += chunksize; | ||
162 | if (fill) | ||
163 | inp = inp_start; | ||
164 | } | 185 | } |
165 | 186 | ||
166 | ret = 0; | 187 | ret = 0; |
@@ -175,11 +196,11 @@ exit_0: | |||
175 | } | 196 | } |
176 | 197 | ||
177 | #ifdef PREBOOT | 198 | #ifdef PREBOOT |
178 | STATIC int INIT decompress(unsigned char *buf, int in_len, | 199 | STATIC int INIT decompress(unsigned char *buf, long in_len, |
179 | int(*fill)(void*, unsigned int), | 200 | long (*fill)(void*, unsigned long), |
180 | int(*flush)(void*, unsigned int), | 201 | long (*flush)(void*, unsigned long), |
181 | unsigned char *output, | 202 | unsigned char *output, |
182 | int *posp, | 203 | long *posp, |
183 | void(*error)(char *x) | 204 | void(*error)(char *x) |
184 | ) | 205 | ) |
185 | { | 206 | { |
diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c index 32adb73a9038..0be83af62b88 100644 --- a/lib/decompress_unlzma.c +++ b/lib/decompress_unlzma.c | |||
@@ -65,11 +65,11 @@ static long long INIT read_int(unsigned char *ptr, int size) | |||
65 | #define LZMA_IOBUF_SIZE 0x10000 | 65 | #define LZMA_IOBUF_SIZE 0x10000 |
66 | 66 | ||
67 | struct rc { | 67 | struct rc { |
68 | int (*fill)(void*, unsigned int); | 68 | long (*fill)(void*, unsigned long); |
69 | uint8_t *ptr; | 69 | uint8_t *ptr; |
70 | uint8_t *buffer; | 70 | uint8_t *buffer; |
71 | uint8_t *buffer_end; | 71 | uint8_t *buffer_end; |
72 | int buffer_size; | 72 | long buffer_size; |
73 | uint32_t code; | 73 | uint32_t code; |
74 | uint32_t range; | 74 | uint32_t range; |
75 | uint32_t bound; | 75 | uint32_t bound; |
@@ -82,7 +82,7 @@ struct rc { | |||
82 | #define RC_MODEL_TOTAL_BITS 11 | 82 | #define RC_MODEL_TOTAL_BITS 11 |
83 | 83 | ||
84 | 84 | ||
85 | static int INIT nofill(void *buffer, unsigned int len) | 85 | static long INIT nofill(void *buffer, unsigned long len) |
86 | { | 86 | { |
87 | return -1; | 87 | return -1; |
88 | } | 88 | } |
@@ -99,8 +99,8 @@ static void INIT rc_read(struct rc *rc) | |||
99 | 99 | ||
100 | /* Called once */ | 100 | /* Called once */ |
101 | static inline void INIT rc_init(struct rc *rc, | 101 | static inline void INIT rc_init(struct rc *rc, |
102 | int (*fill)(void*, unsigned int), | 102 | long (*fill)(void*, unsigned long), |
103 | char *buffer, int buffer_size) | 103 | char *buffer, long buffer_size) |
104 | { | 104 | { |
105 | if (fill) | 105 | if (fill) |
106 | rc->fill = fill; | 106 | rc->fill = fill; |
@@ -280,7 +280,7 @@ struct writer { | |||
280 | size_t buffer_pos; | 280 | size_t buffer_pos; |
281 | int bufsize; | 281 | int bufsize; |
282 | size_t global_pos; | 282 | size_t global_pos; |
283 | int(*flush)(void*, unsigned int); | 283 | long (*flush)(void*, unsigned long); |
284 | struct lzma_header *header; | 284 | struct lzma_header *header; |
285 | }; | 285 | }; |
286 | 286 | ||
@@ -534,11 +534,11 @@ static inline int INIT process_bit1(struct writer *wr, struct rc *rc, | |||
534 | 534 | ||
535 | 535 | ||
536 | 536 | ||
537 | STATIC inline int INIT unlzma(unsigned char *buf, int in_len, | 537 | STATIC inline int INIT unlzma(unsigned char *buf, long in_len, |
538 | int(*fill)(void*, unsigned int), | 538 | long (*fill)(void*, unsigned long), |
539 | int(*flush)(void*, unsigned int), | 539 | long (*flush)(void*, unsigned long), |
540 | unsigned char *output, | 540 | unsigned char *output, |
541 | int *posp, | 541 | long *posp, |
542 | void(*error)(char *x) | 542 | void(*error)(char *x) |
543 | ) | 543 | ) |
544 | { | 544 | { |
@@ -667,11 +667,11 @@ exit_0: | |||
667 | } | 667 | } |
668 | 668 | ||
669 | #ifdef PREBOOT | 669 | #ifdef PREBOOT |
670 | STATIC int INIT decompress(unsigned char *buf, int in_len, | 670 | STATIC int INIT decompress(unsigned char *buf, long in_len, |
671 | int(*fill)(void*, unsigned int), | 671 | long (*fill)(void*, unsigned long), |
672 | int(*flush)(void*, unsigned int), | 672 | long (*flush)(void*, unsigned long), |
673 | unsigned char *output, | 673 | unsigned char *output, |
674 | int *posp, | 674 | long *posp, |
675 | void(*error)(char *x) | 675 | void(*error)(char *x) |
676 | ) | 676 | ) |
677 | { | 677 | { |
diff --git a/lib/decompress_unlzo.c b/lib/decompress_unlzo.c index 960183d4258f..b94a31bdd87d 100644 --- a/lib/decompress_unlzo.c +++ b/lib/decompress_unlzo.c | |||
@@ -51,7 +51,7 @@ static const unsigned char lzop_magic[] = { | |||
51 | #define HEADER_SIZE_MIN (9 + 7 + 4 + 8 + 1 + 4) | 51 | #define HEADER_SIZE_MIN (9 + 7 + 4 + 8 + 1 + 4) |
52 | #define HEADER_SIZE_MAX (9 + 7 + 1 + 8 + 8 + 4 + 1 + 255 + 4) | 52 | #define HEADER_SIZE_MAX (9 + 7 + 1 + 8 + 8 + 4 + 1 + 255 + 4) |
53 | 53 | ||
54 | STATIC inline int INIT parse_header(u8 *input, int *skip, int in_len) | 54 | STATIC inline long INIT parse_header(u8 *input, long *skip, long in_len) |
55 | { | 55 | { |
56 | int l; | 56 | int l; |
57 | u8 *parse = input; | 57 | u8 *parse = input; |
@@ -108,14 +108,14 @@ STATIC inline int INIT parse_header(u8 *input, int *skip, int in_len) | |||
108 | return 1; | 108 | return 1; |
109 | } | 109 | } |
110 | 110 | ||
111 | STATIC inline int INIT unlzo(u8 *input, int in_len, | 111 | STATIC int INIT unlzo(u8 *input, long in_len, |
112 | int (*fill) (void *, unsigned int), | 112 | long (*fill)(void *, unsigned long), |
113 | int (*flush) (void *, unsigned int), | 113 | long (*flush)(void *, unsigned long), |
114 | u8 *output, int *posp, | 114 | u8 *output, long *posp, |
115 | void (*error) (char *x)) | 115 | void (*error) (char *x)) |
116 | { | 116 | { |
117 | u8 r = 0; | 117 | u8 r = 0; |
118 | int skip = 0; | 118 | long skip = 0; |
119 | u32 src_len, dst_len; | 119 | u32 src_len, dst_len; |
120 | size_t tmp; | 120 | size_t tmp; |
121 | u8 *in_buf, *in_buf_save, *out_buf; | 121 | u8 *in_buf, *in_buf_save, *out_buf; |
diff --git a/lib/decompress_unxz.c b/lib/decompress_unxz.c index 9f34eb56854d..b07a78340e9d 100644 --- a/lib/decompress_unxz.c +++ b/lib/decompress_unxz.c | |||
@@ -248,10 +248,10 @@ void *memmove(void *dest, const void *src, size_t size) | |||
248 | * both input and output buffers are available as a single chunk, i.e. when | 248 | * both input and output buffers are available as a single chunk, i.e. when |
249 | * fill() and flush() won't be used. | 249 | * fill() and flush() won't be used. |
250 | */ | 250 | */ |
251 | STATIC int INIT unxz(unsigned char *in, int in_size, | 251 | STATIC int INIT unxz(unsigned char *in, long in_size, |
252 | int (*fill)(void *dest, unsigned int size), | 252 | long (*fill)(void *dest, unsigned long size), |
253 | int (*flush)(void *src, unsigned int size), | 253 | long (*flush)(void *src, unsigned long size), |
254 | unsigned char *out, int *in_used, | 254 | unsigned char *out, long *in_used, |
255 | void (*error)(char *x)) | 255 | void (*error)(char *x)) |
256 | { | 256 | { |
257 | struct xz_buf b; | 257 | struct xz_buf b; |
@@ -329,7 +329,7 @@ STATIC int INIT unxz(unsigned char *in, int in_size, | |||
329 | * returned by xz_dec_run(), but probably | 329 | * returned by xz_dec_run(), but probably |
330 | * it's not too bad. | 330 | * it's not too bad. |
331 | */ | 331 | */ |
332 | if (flush(b.out, b.out_pos) != (int)b.out_pos) | 332 | if (flush(b.out, b.out_pos) != (long)b.out_pos) |
333 | ret = XZ_BUF_ERROR; | 333 | ret = XZ_BUF_ERROR; |
334 | 334 | ||
335 | b.out_pos = 0; | 335 | b.out_pos = 0; |
diff --git a/lib/devres.c b/lib/devres.c index f562bf6ff71d..f4a195a6efe4 100644 --- a/lib/devres.c +++ b/lib/devres.c | |||
@@ -86,8 +86,6 @@ void devm_iounmap(struct device *dev, void __iomem *addr) | |||
86 | } | 86 | } |
87 | EXPORT_SYMBOL(devm_iounmap); | 87 | EXPORT_SYMBOL(devm_iounmap); |
88 | 88 | ||
89 | #define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err) | ||
90 | |||
91 | /** | 89 | /** |
92 | * devm_ioremap_resource() - check, request region, and ioremap resource | 90 | * devm_ioremap_resource() - check, request region, and ioremap resource |
93 | * @dev: generic device to handle the resource for | 91 | * @dev: generic device to handle the resource for |
@@ -142,34 +140,6 @@ void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res) | |||
142 | } | 140 | } |
143 | EXPORT_SYMBOL(devm_ioremap_resource); | 141 | EXPORT_SYMBOL(devm_ioremap_resource); |
144 | 142 | ||
145 | /** | ||
146 | * devm_request_and_ioremap() - Check, request region, and ioremap resource | ||
147 | * @dev: Generic device to handle the resource for | ||
148 | * @res: resource to be handled | ||
149 | * | ||
150 | * Takes all necessary steps to ioremap a mem resource. Uses managed device, so | ||
151 | * everything is undone on driver detach. Checks arguments, so you can feed | ||
152 | * it the result from e.g. platform_get_resource() directly. Returns the | ||
153 | * remapped pointer or NULL on error. Usage example: | ||
154 | * | ||
155 | * res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
156 | * base = devm_request_and_ioremap(&pdev->dev, res); | ||
157 | * if (!base) | ||
158 | * return -EADDRNOTAVAIL; | ||
159 | */ | ||
160 | void __iomem *devm_request_and_ioremap(struct device *dev, | ||
161 | struct resource *res) | ||
162 | { | ||
163 | void __iomem *dest_ptr; | ||
164 | |||
165 | dest_ptr = devm_ioremap_resource(dev, res); | ||
166 | if (IS_ERR(dest_ptr)) | ||
167 | return NULL; | ||
168 | |||
169 | return dest_ptr; | ||
170 | } | ||
171 | EXPORT_SYMBOL(devm_request_and_ioremap); | ||
172 | |||
173 | #ifdef CONFIG_HAS_IOPORT_MAP | 143 | #ifdef CONFIG_HAS_IOPORT_MAP |
174 | /* | 144 | /* |
175 | * Generic iomap devres | 145 | * Generic iomap devres |
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index 7288e38e1757..c9afbe2c445a 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c | |||
@@ -614,13 +614,15 @@ int __dynamic_netdev_dbg(struct _ddebug *descriptor, | |||
614 | char buf[PREFIX_SIZE]; | 614 | char buf[PREFIX_SIZE]; |
615 | 615 | ||
616 | res = dev_printk_emit(7, dev->dev.parent, | 616 | res = dev_printk_emit(7, dev->dev.parent, |
617 | "%s%s %s %s: %pV", | 617 | "%s%s %s %s%s: %pV", |
618 | dynamic_emit_prefix(descriptor, buf), | 618 | dynamic_emit_prefix(descriptor, buf), |
619 | dev_driver_string(dev->dev.parent), | 619 | dev_driver_string(dev->dev.parent), |
620 | dev_name(dev->dev.parent), | 620 | dev_name(dev->dev.parent), |
621 | netdev_name(dev), &vaf); | 621 | netdev_name(dev), netdev_reg_state(dev), |
622 | &vaf); | ||
622 | } else if (dev) { | 623 | } else if (dev) { |
623 | res = printk(KERN_DEBUG "%s: %pV", netdev_name(dev), &vaf); | 624 | res = printk(KERN_DEBUG "%s%s: %pV", netdev_name(dev), |
625 | netdev_reg_state(dev), &vaf); | ||
624 | } else { | 626 | } else { |
625 | res = printk(KERN_DEBUG "(NULL net_device): %pV", &vaf); | 627 | res = printk(KERN_DEBUG "(NULL net_device): %pV", &vaf); |
626 | } | 628 | } |
diff --git a/lib/genalloc.c b/lib/genalloc.c index bdb9a456bcbb..38d2db82228c 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c | |||
@@ -588,6 +588,7 @@ struct gen_pool *of_get_named_gen_pool(struct device_node *np, | |||
588 | if (!np_pool) | 588 | if (!np_pool) |
589 | return NULL; | 589 | return NULL; |
590 | pdev = of_find_device_by_node(np_pool); | 590 | pdev = of_find_device_by_node(np_pool); |
591 | of_node_put(np_pool); | ||
591 | if (!pdev) | 592 | if (!pdev) |
592 | return NULL; | 593 | return NULL; |
593 | return dev_get_gen_pool(&pdev->dev); | 594 | return dev_get_gen_pool(&pdev->dev); |
diff --git a/lib/glob.c b/lib/glob.c new file mode 100644 index 000000000000..500fc80d23e1 --- /dev/null +++ b/lib/glob.c | |||
@@ -0,0 +1,287 @@ | |||
1 | #include <linux/module.h> | ||
2 | #include <linux/glob.h> | ||
3 | |||
4 | /* | ||
5 | * The only reason this code can be compiled as a module is because the | ||
6 | * ATA code that depends on it can be as well. In practice, they're | ||
7 | * both usually compiled in and the module overhead goes away. | ||
8 | */ | ||
9 | MODULE_DESCRIPTION("glob(7) matching"); | ||
10 | MODULE_LICENSE("Dual MIT/GPL"); | ||
11 | |||
12 | /** | ||
13 | * glob_match - Shell-style pattern matching, like !fnmatch(pat, str, 0) | ||
14 | * @pat: Shell-style pattern to match, e.g. "*.[ch]". | ||
15 | * @str: String to match. The pattern must match the entire string. | ||
16 | * | ||
17 | * Perform shell-style glob matching, returning true (1) if the match | ||
18 | * succeeds, or false (0) if it fails. Equivalent to !fnmatch(@pat, @str, 0). | ||
19 | * | ||
20 | * Pattern metacharacters are ?, *, [ and \. | ||
21 | * (And, inside character classes, !, - and ].) | ||
22 | * | ||
23 | * This is small and simple implementation intended for device blacklists | ||
24 | * where a string is matched against a number of patterns. Thus, it | ||
25 | * does not preprocess the patterns. It is non-recursive, and run-time | ||
26 | * is at most quadratic: strlen(@str)*strlen(@pat). | ||
27 | * | ||
28 | * An example of the worst case is glob_match("*aaaaa", "aaaaaaaaaa"); | ||
29 | * it takes 6 passes over the pattern before matching the string. | ||
30 | * | ||
31 | * Like !fnmatch(@pat, @str, 0) and unlike the shell, this does NOT | ||
32 | * treat / or leading . specially; it isn't actually used for pathnames. | ||
33 | * | ||
34 | * Note that according to glob(7) (and unlike bash), character classes | ||
35 | * are complemented by a leading !; this does not support the regex-style | ||
36 | * [^a-z] syntax. | ||
37 | * | ||
38 | * An opening bracket without a matching close is matched literally. | ||
39 | */ | ||
40 | bool __pure glob_match(char const *pat, char const *str) | ||
41 | { | ||
42 | /* | ||
43 | * Backtrack to previous * on mismatch and retry starting one | ||
44 | * character later in the string. Because * matches all characters | ||
45 | * (no exception for /), it can be easily proved that there's | ||
46 | * never a need to backtrack multiple levels. | ||
47 | */ | ||
48 | char const *back_pat = NULL, *back_str = back_str; | ||
49 | |||
50 | /* | ||
51 | * Loop over each token (character or class) in pat, matching | ||
52 | * it against the remaining unmatched tail of str. Return false | ||
53 | * on mismatch, or true after matching the trailing nul bytes. | ||
54 | */ | ||
55 | for (;;) { | ||
56 | unsigned char c = *str++; | ||
57 | unsigned char d = *pat++; | ||
58 | |||
59 | switch (d) { | ||
60 | case '?': /* Wildcard: anything but nul */ | ||
61 | if (c == '\0') | ||
62 | return false; | ||
63 | break; | ||
64 | case '*': /* Any-length wildcard */ | ||
65 | if (*pat == '\0') /* Optimize trailing * case */ | ||
66 | return true; | ||
67 | back_pat = pat; | ||
68 | back_str = --str; /* Allow zero-length match */ | ||
69 | break; | ||
70 | case '[': { /* Character class */ | ||
71 | bool match = false, inverted = (*pat == '!'); | ||
72 | char const *class = pat + inverted; | ||
73 | unsigned char a = *class++; | ||
74 | |||
75 | /* | ||
76 | * Iterate over each span in the character class. | ||
77 | * A span is either a single character a, or a | ||
78 | * range a-b. The first span may begin with ']'. | ||
79 | */ | ||
80 | do { | ||
81 | unsigned char b = a; | ||
82 | |||
83 | if (a == '\0') /* Malformed */ | ||
84 | goto literal; | ||
85 | |||
86 | if (class[0] == '-' && class[1] != ']') { | ||
87 | b = class[1]; | ||
88 | |||
89 | if (b == '\0') | ||
90 | goto literal; | ||
91 | |||
92 | class += 2; | ||
93 | /* Any special action if a > b? */ | ||
94 | } | ||
95 | match |= (a <= c && c <= b); | ||
96 | } while ((a = *class++) != ']'); | ||
97 | |||
98 | if (match == inverted) | ||
99 | goto backtrack; | ||
100 | pat = class; | ||
101 | } | ||
102 | break; | ||
103 | case '\\': | ||
104 | d = *pat++; | ||
105 | /*FALLTHROUGH*/ | ||
106 | default: /* Literal character */ | ||
107 | literal: | ||
108 | if (c == d) { | ||
109 | if (d == '\0') | ||
110 | return true; | ||
111 | break; | ||
112 | } | ||
113 | backtrack: | ||
114 | if (c == '\0' || !back_pat) | ||
115 | return false; /* No point continuing */ | ||
116 | /* Try again from last *, one character later in str. */ | ||
117 | pat = back_pat; | ||
118 | str = ++back_str; | ||
119 | break; | ||
120 | } | ||
121 | } | ||
122 | } | ||
123 | EXPORT_SYMBOL(glob_match); | ||
124 | |||
125 | |||
126 | #ifdef CONFIG_GLOB_SELFTEST | ||
127 | |||
128 | #include <linux/printk.h> | ||
129 | #include <linux/moduleparam.h> | ||
130 | |||
131 | /* Boot with "glob.verbose=1" to show successful tests, too */ | ||
132 | static bool verbose = false; | ||
133 | module_param(verbose, bool, 0); | ||
134 | |||
135 | struct glob_test { | ||
136 | char const *pat, *str; | ||
137 | bool expected; | ||
138 | }; | ||
139 | |||
140 | static bool __pure __init test(char const *pat, char const *str, bool expected) | ||
141 | { | ||
142 | bool match = glob_match(pat, str); | ||
143 | bool success = match == expected; | ||
144 | |||
145 | /* Can't get string literals into a particular section, so... */ | ||
146 | static char const msg_error[] __initconst = | ||
147 | KERN_ERR "glob: \"%s\" vs. \"%s\": %s *** ERROR ***\n"; | ||
148 | static char const msg_ok[] __initconst = | ||
149 | KERN_DEBUG "glob: \"%s\" vs. \"%s\": %s OK\n"; | ||
150 | static char const mismatch[] __initconst = "mismatch"; | ||
151 | char const *message; | ||
152 | |||
153 | if (!success) | ||
154 | message = msg_error; | ||
155 | else if (verbose) | ||
156 | message = msg_ok; | ||
157 | else | ||
158 | return success; | ||
159 | |||
160 | printk(message, pat, str, mismatch + 3*match); | ||
161 | return success; | ||
162 | } | ||
163 | |||
164 | /* | ||
165 | * The tests are all jammed together in one array to make it simpler | ||
166 | * to place that array in the .init.rodata section. The obvious | ||
167 | * "array of structures containing char *" has no way to force the | ||
168 | * pointed-to strings to be in a particular section. | ||
169 | * | ||
170 | * Anyway, a test consists of: | ||
171 | * 1. Expected glob_match result: '1' or '0'. | ||
172 | * 2. Pattern to match: null-terminated string | ||
173 | * 3. String to match against: null-terminated string | ||
174 | * | ||
175 | * The list of tests is terminated with a final '\0' instead of | ||
176 | * a glob_match result character. | ||
177 | */ | ||
178 | static char const glob_tests[] __initconst = | ||
179 | /* Some basic tests */ | ||
180 | "1" "a\0" "a\0" | ||
181 | "0" "a\0" "b\0" | ||
182 | "0" "a\0" "aa\0" | ||
183 | "0" "a\0" "\0" | ||
184 | "1" "\0" "\0" | ||
185 | "0" "\0" "a\0" | ||
186 | /* Simple character class tests */ | ||
187 | "1" "[a]\0" "a\0" | ||
188 | "0" "[a]\0" "b\0" | ||
189 | "0" "[!a]\0" "a\0" | ||
190 | "1" "[!a]\0" "b\0" | ||
191 | "1" "[ab]\0" "a\0" | ||
192 | "1" "[ab]\0" "b\0" | ||
193 | "0" "[ab]\0" "c\0" | ||
194 | "1" "[!ab]\0" "c\0" | ||
195 | "1" "[a-c]\0" "b\0" | ||
196 | "0" "[a-c]\0" "d\0" | ||
197 | /* Corner cases in character class parsing */ | ||
198 | "1" "[a-c-e-g]\0" "-\0" | ||
199 | "0" "[a-c-e-g]\0" "d\0" | ||
200 | "1" "[a-c-e-g]\0" "f\0" | ||
201 | "1" "[]a-ceg-ik[]\0" "a\0" | ||
202 | "1" "[]a-ceg-ik[]\0" "]\0" | ||
203 | "1" "[]a-ceg-ik[]\0" "[\0" | ||
204 | "1" "[]a-ceg-ik[]\0" "h\0" | ||
205 | "0" "[]a-ceg-ik[]\0" "f\0" | ||
206 | "0" "[!]a-ceg-ik[]\0" "h\0" | ||
207 | "0" "[!]a-ceg-ik[]\0" "]\0" | ||
208 | "1" "[!]a-ceg-ik[]\0" "f\0" | ||
209 | /* Simple wild cards */ | ||
210 | "1" "?\0" "a\0" | ||
211 | "0" "?\0" "aa\0" | ||
212 | "0" "??\0" "a\0" | ||
213 | "1" "?x?\0" "axb\0" | ||
214 | "0" "?x?\0" "abx\0" | ||
215 | "0" "?x?\0" "xab\0" | ||
216 | /* Asterisk wild cards (backtracking) */ | ||
217 | "0" "*??\0" "a\0" | ||
218 | "1" "*??\0" "ab\0" | ||
219 | "1" "*??\0" "abc\0" | ||
220 | "1" "*??\0" "abcd\0" | ||
221 | "0" "??*\0" "a\0" | ||
222 | "1" "??*\0" "ab\0" | ||
223 | "1" "??*\0" "abc\0" | ||
224 | "1" "??*\0" "abcd\0" | ||
225 | "0" "?*?\0" "a\0" | ||
226 | "1" "?*?\0" "ab\0" | ||
227 | "1" "?*?\0" "abc\0" | ||
228 | "1" "?*?\0" "abcd\0" | ||
229 | "1" "*b\0" "b\0" | ||
230 | "1" "*b\0" "ab\0" | ||
231 | "0" "*b\0" "ba\0" | ||
232 | "1" "*b\0" "bb\0" | ||
233 | "1" "*b\0" "abb\0" | ||
234 | "1" "*b\0" "bab\0" | ||
235 | "1" "*bc\0" "abbc\0" | ||
236 | "1" "*bc\0" "bc\0" | ||
237 | "1" "*bc\0" "bbc\0" | ||
238 | "1" "*bc\0" "bcbc\0" | ||
239 | /* Multiple asterisks (complex backtracking) */ | ||
240 | "1" "*ac*\0" "abacadaeafag\0" | ||
241 | "1" "*ac*ae*ag*\0" "abacadaeafag\0" | ||
242 | "1" "*a*b*[bc]*[ef]*g*\0" "abacadaeafag\0" | ||
243 | "0" "*a*b*[ef]*[cd]*g*\0" "abacadaeafag\0" | ||
244 | "1" "*abcd*\0" "abcabcabcabcdefg\0" | ||
245 | "1" "*ab*cd*\0" "abcabcabcabcdefg\0" | ||
246 | "1" "*abcd*abcdef*\0" "abcabcdabcdeabcdefg\0" | ||
247 | "0" "*abcd*\0" "abcabcabcabcefg\0" | ||
248 | "0" "*ab*cd*\0" "abcabcabcabcefg\0"; | ||
249 | |||
250 | static int __init glob_init(void) | ||
251 | { | ||
252 | unsigned successes = 0; | ||
253 | unsigned n = 0; | ||
254 | char const *p = glob_tests; | ||
255 | static char const message[] __initconst = | ||
256 | KERN_INFO "glob: %u self-tests passed, %u failed\n"; | ||
257 | |||
258 | /* | ||
259 | * Tests are jammed together in a string. The first byte is '1' | ||
260 | * or '0' to indicate the expected outcome, or '\0' to indicate the | ||
261 | * end of the tests. Then come two null-terminated strings: the | ||
262 | * pattern and the string to match it against. | ||
263 | */ | ||
264 | while (*p) { | ||
265 | bool expected = *p++ & 1; | ||
266 | char const *pat = p; | ||
267 | |||
268 | p += strlen(p) + 1; | ||
269 | successes += test(pat, p, expected); | ||
270 | p += strlen(p) + 1; | ||
271 | n++; | ||
272 | } | ||
273 | |||
274 | n -= successes; | ||
275 | printk(message, successes, n); | ||
276 | |||
277 | /* What's the errno for "kernel bug detected"? Guess... */ | ||
278 | return n ? -ECANCELED : 0; | ||
279 | } | ||
280 | |||
281 | /* We need a dummy exit function to allow unload */ | ||
282 | static void __exit glob_fini(void) { } | ||
283 | |||
284 | module_init(glob_init); | ||
285 | module_exit(glob_fini); | ||
286 | |||
287 | #endif /* CONFIG_GLOB_SELFTEST */ | ||
diff --git a/lib/hweight.c b/lib/hweight.c index b7d81ba143d1..9a5c1f221558 100644 --- a/lib/hweight.c +++ b/lib/hweight.c | |||
@@ -11,7 +11,7 @@ | |||
11 | 11 | ||
12 | unsigned int __sw_hweight32(unsigned int w) | 12 | unsigned int __sw_hweight32(unsigned int w) |
13 | { | 13 | { |
14 | #ifdef ARCH_HAS_FAST_MULTIPLIER | 14 | #ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER |
15 | w -= (w >> 1) & 0x55555555; | 15 | w -= (w >> 1) & 0x55555555; |
16 | w = (w & 0x33333333) + ((w >> 2) & 0x33333333); | 16 | w = (w & 0x33333333) + ((w >> 2) & 0x33333333); |
17 | w = (w + (w >> 4)) & 0x0f0f0f0f; | 17 | w = (w + (w >> 4)) & 0x0f0f0f0f; |
@@ -49,7 +49,7 @@ unsigned long __sw_hweight64(__u64 w) | |||
49 | return __sw_hweight32((unsigned int)(w >> 32)) + | 49 | return __sw_hweight32((unsigned int)(w >> 32)) + |
50 | __sw_hweight32((unsigned int)w); | 50 | __sw_hweight32((unsigned int)w); |
51 | #elif BITS_PER_LONG == 64 | 51 | #elif BITS_PER_LONG == 64 |
52 | #ifdef ARCH_HAS_FAST_MULTIPLIER | 52 | #ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER |
53 | w -= (w >> 1) & 0x5555555555555555ul; | 53 | w -= (w >> 1) & 0x5555555555555555ul; |
54 | w = (w & 0x3333333333333333ul) + ((w >> 2) & 0x3333333333333333ul); | 54 | w = (w & 0x3333333333333333ul) + ((w >> 2) & 0x3333333333333333ul); |
55 | w = (w + (w >> 4)) & 0x0f0f0f0f0f0f0f0ful; | 55 | w = (w + (w >> 4)) & 0x0f0f0f0f0f0f0f0ful; |
@@ -590,26 +590,27 @@ static void __idr_remove_all(struct idr *idp) | |||
590 | struct idr_layer **paa = &pa[0]; | 590 | struct idr_layer **paa = &pa[0]; |
591 | 591 | ||
592 | n = idp->layers * IDR_BITS; | 592 | n = idp->layers * IDR_BITS; |
593 | p = idp->top; | 593 | *paa = idp->top; |
594 | RCU_INIT_POINTER(idp->top, NULL); | 594 | RCU_INIT_POINTER(idp->top, NULL); |
595 | max = idr_max(idp->layers); | 595 | max = idr_max(idp->layers); |
596 | 596 | ||
597 | id = 0; | 597 | id = 0; |
598 | while (id >= 0 && id <= max) { | 598 | while (id >= 0 && id <= max) { |
599 | p = *paa; | ||
599 | while (n > IDR_BITS && p) { | 600 | while (n > IDR_BITS && p) { |
600 | n -= IDR_BITS; | 601 | n -= IDR_BITS; |
601 | *paa++ = p; | ||
602 | p = p->ary[(id >> n) & IDR_MASK]; | 602 | p = p->ary[(id >> n) & IDR_MASK]; |
603 | *++paa = p; | ||
603 | } | 604 | } |
604 | 605 | ||
605 | bt_mask = id; | 606 | bt_mask = id; |
606 | id += 1 << n; | 607 | id += 1 << n; |
607 | /* Get the highest bit that the above add changed from 0->1. */ | 608 | /* Get the highest bit that the above add changed from 0->1. */ |
608 | while (n < fls(id ^ bt_mask)) { | 609 | while (n < fls(id ^ bt_mask)) { |
609 | if (p) | 610 | if (*paa) |
610 | free_layer(idp, p); | 611 | free_layer(idp, *paa); |
611 | n += IDR_BITS; | 612 | n += IDR_BITS; |
612 | p = *--paa; | 613 | --paa; |
613 | } | 614 | } |
614 | } | 615 | } |
615 | idp->layers = 0; | 616 | idp->layers = 0; |
@@ -692,15 +693,16 @@ int idr_for_each(struct idr *idp, | |||
692 | struct idr_layer **paa = &pa[0]; | 693 | struct idr_layer **paa = &pa[0]; |
693 | 694 | ||
694 | n = idp->layers * IDR_BITS; | 695 | n = idp->layers * IDR_BITS; |
695 | p = rcu_dereference_raw(idp->top); | 696 | *paa = rcu_dereference_raw(idp->top); |
696 | max = idr_max(idp->layers); | 697 | max = idr_max(idp->layers); |
697 | 698 | ||
698 | id = 0; | 699 | id = 0; |
699 | while (id >= 0 && id <= max) { | 700 | while (id >= 0 && id <= max) { |
701 | p = *paa; | ||
700 | while (n > 0 && p) { | 702 | while (n > 0 && p) { |
701 | n -= IDR_BITS; | 703 | n -= IDR_BITS; |
702 | *paa++ = p; | ||
703 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); | 704 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
705 | *++paa = p; | ||
704 | } | 706 | } |
705 | 707 | ||
706 | if (p) { | 708 | if (p) { |
@@ -712,7 +714,7 @@ int idr_for_each(struct idr *idp, | |||
712 | id += 1 << n; | 714 | id += 1 << n; |
713 | while (n < fls(id)) { | 715 | while (n < fls(id)) { |
714 | n += IDR_BITS; | 716 | n += IDR_BITS; |
715 | p = *--paa; | 717 | --paa; |
716 | } | 718 | } |
717 | } | 719 | } |
718 | 720 | ||
@@ -740,17 +742,18 @@ void *idr_get_next(struct idr *idp, int *nextidp) | |||
740 | int n, max; | 742 | int n, max; |
741 | 743 | ||
742 | /* find first ent */ | 744 | /* find first ent */ |
743 | p = rcu_dereference_raw(idp->top); | 745 | p = *paa = rcu_dereference_raw(idp->top); |
744 | if (!p) | 746 | if (!p) |
745 | return NULL; | 747 | return NULL; |
746 | n = (p->layer + 1) * IDR_BITS; | 748 | n = (p->layer + 1) * IDR_BITS; |
747 | max = idr_max(p->layer + 1); | 749 | max = idr_max(p->layer + 1); |
748 | 750 | ||
749 | while (id >= 0 && id <= max) { | 751 | while (id >= 0 && id <= max) { |
752 | p = *paa; | ||
750 | while (n > 0 && p) { | 753 | while (n > 0 && p) { |
751 | n -= IDR_BITS; | 754 | n -= IDR_BITS; |
752 | *paa++ = p; | ||
753 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); | 755 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
756 | *++paa = p; | ||
754 | } | 757 | } |
755 | 758 | ||
756 | if (p) { | 759 | if (p) { |
@@ -768,7 +771,7 @@ void *idr_get_next(struct idr *idp, int *nextidp) | |||
768 | id = round_up(id + 1, 1 << n); | 771 | id = round_up(id + 1, 1 << n); |
769 | while (n < fls(id)) { | 772 | while (n < fls(id)) { |
770 | n += IDR_BITS; | 773 | n += IDR_BITS; |
771 | p = *--paa; | 774 | --paa; |
772 | } | 775 | } |
773 | } | 776 | } |
774 | return NULL; | 777 | return NULL; |
diff --git a/lib/iovec.c b/lib/iovec.c index 7a7c2da4cddf..df3abd1eaa4a 100644 --- a/lib/iovec.c +++ b/lib/iovec.c | |||
@@ -85,6 +85,10 @@ EXPORT_SYMBOL(memcpy_toiovecend); | |||
85 | int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, | 85 | int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, |
86 | int offset, int len) | 86 | int offset, int len) |
87 | { | 87 | { |
88 | /* No data? Done! */ | ||
89 | if (len == 0) | ||
90 | return 0; | ||
91 | |||
88 | /* Skip over the finished iovecs */ | 92 | /* Skip over the finished iovecs */ |
89 | while (offset >= iov->iov_len) { | 93 | while (offset >= iov->iov_len) { |
90 | offset -= iov->iov_len; | 94 | offset -= iov->iov_len; |
diff --git a/lib/kfifo.c b/lib/kfifo.c index d79b9d222065..90ba1eb1df06 100644 --- a/lib/kfifo.c +++ b/lib/kfifo.c | |||
@@ -561,8 +561,7 @@ EXPORT_SYMBOL(__kfifo_to_user_r); | |||
561 | unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo, | 561 | unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo, |
562 | struct scatterlist *sgl, int nents, unsigned int len, size_t recsize) | 562 | struct scatterlist *sgl, int nents, unsigned int len, size_t recsize) |
563 | { | 563 | { |
564 | if (!nents) | 564 | BUG_ON(!nents); |
565 | BUG(); | ||
566 | 565 | ||
567 | len = __kfifo_max_r(len, recsize); | 566 | len = __kfifo_max_r(len, recsize); |
568 | 567 | ||
@@ -585,8 +584,7 @@ EXPORT_SYMBOL(__kfifo_dma_in_finish_r); | |||
585 | unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo, | 584 | unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo, |
586 | struct scatterlist *sgl, int nents, unsigned int len, size_t recsize) | 585 | struct scatterlist *sgl, int nents, unsigned int len, size_t recsize) |
587 | { | 586 | { |
588 | if (!nents) | 587 | BUG_ON(!nents); |
589 | BUG(); | ||
590 | 588 | ||
591 | len = __kfifo_max_r(len, recsize); | 589 | len = __kfifo_max_r(len, recsize); |
592 | 590 | ||
diff --git a/lib/klist.c b/lib/klist.c index 358a368a2947..89b485a2a58d 100644 --- a/lib/klist.c +++ b/lib/klist.c | |||
@@ -140,11 +140,11 @@ void klist_add_tail(struct klist_node *n, struct klist *k) | |||
140 | EXPORT_SYMBOL_GPL(klist_add_tail); | 140 | EXPORT_SYMBOL_GPL(klist_add_tail); |
141 | 141 | ||
142 | /** | 142 | /** |
143 | * klist_add_after - Init a klist_node and add it after an existing node | 143 | * klist_add_behind - Init a klist_node and add it after an existing node |
144 | * @n: node we're adding. | 144 | * @n: node we're adding. |
145 | * @pos: node to put @n after | 145 | * @pos: node to put @n after |
146 | */ | 146 | */ |
147 | void klist_add_after(struct klist_node *n, struct klist_node *pos) | 147 | void klist_add_behind(struct klist_node *n, struct klist_node *pos) |
148 | { | 148 | { |
149 | struct klist *k = knode_klist(pos); | 149 | struct klist *k = knode_klist(pos); |
150 | 150 | ||
@@ -153,7 +153,7 @@ void klist_add_after(struct klist_node *n, struct klist_node *pos) | |||
153 | list_add(&n->n_node, &pos->n_node); | 153 | list_add(&n->n_node, &pos->n_node); |
154 | spin_unlock(&k->k_lock); | 154 | spin_unlock(&k->k_lock); |
155 | } | 155 | } |
156 | EXPORT_SYMBOL_GPL(klist_add_after); | 156 | EXPORT_SYMBOL_GPL(klist_add_behind); |
157 | 157 | ||
158 | /** | 158 | /** |
159 | * klist_add_before - Init a klist_node and add it before an existing node | 159 | * klist_add_before - Init a klist_node and add it before an existing node |
diff --git a/lib/list_sort.c b/lib/list_sort.c index 1183fa70a44d..12bcba1c8612 100644 --- a/lib/list_sort.c +++ b/lib/list_sort.c | |||
@@ -1,3 +1,6 @@ | |||
1 | |||
2 | #define pr_fmt(fmt) "list_sort_test: " fmt | ||
3 | |||
1 | #include <linux/kernel.h> | 4 | #include <linux/kernel.h> |
2 | #include <linux/module.h> | 5 | #include <linux/module.h> |
3 | #include <linux/list_sort.h> | 6 | #include <linux/list_sort.h> |
@@ -47,6 +50,7 @@ static void merge_and_restore_back_links(void *priv, | |||
47 | struct list_head *a, struct list_head *b) | 50 | struct list_head *a, struct list_head *b) |
48 | { | 51 | { |
49 | struct list_head *tail = head; | 52 | struct list_head *tail = head; |
53 | u8 count = 0; | ||
50 | 54 | ||
51 | while (a && b) { | 55 | while (a && b) { |
52 | /* if equal, take 'a' -- important for sort stability */ | 56 | /* if equal, take 'a' -- important for sort stability */ |
@@ -70,7 +74,8 @@ static void merge_and_restore_back_links(void *priv, | |||
70 | * element comparison is needed, so the client's cmp() | 74 | * element comparison is needed, so the client's cmp() |
71 | * routine can invoke cond_resched() periodically. | 75 | * routine can invoke cond_resched() periodically. |
72 | */ | 76 | */ |
73 | (*cmp)(priv, tail->next, tail->next); | 77 | if (unlikely(!(++count))) |
78 | (*cmp)(priv, tail->next, tail->next); | ||
74 | 79 | ||
75 | tail->next->prev = tail; | 80 | tail->next->prev = tail; |
76 | tail = tail->next; | 81 | tail = tail->next; |
@@ -123,9 +128,7 @@ void list_sort(void *priv, struct list_head *head, | |||
123 | } | 128 | } |
124 | if (lev > max_lev) { | 129 | if (lev > max_lev) { |
125 | if (unlikely(lev >= ARRAY_SIZE(part)-1)) { | 130 | if (unlikely(lev >= ARRAY_SIZE(part)-1)) { |
126 | printk_once(KERN_DEBUG "list passed to" | 131 | printk_once(KERN_DEBUG "list too long for efficiency\n"); |
127 | " list_sort() too long for" | ||
128 | " efficiency\n"); | ||
129 | lev--; | 132 | lev--; |
130 | } | 133 | } |
131 | max_lev = lev; | 134 | max_lev = lev; |
@@ -168,27 +171,25 @@ static struct debug_el **elts __initdata; | |||
168 | static int __init check(struct debug_el *ela, struct debug_el *elb) | 171 | static int __init check(struct debug_el *ela, struct debug_el *elb) |
169 | { | 172 | { |
170 | if (ela->serial >= TEST_LIST_LEN) { | 173 | if (ela->serial >= TEST_LIST_LEN) { |
171 | printk(KERN_ERR "list_sort_test: error: incorrect serial %d\n", | 174 | pr_err("error: incorrect serial %d\n", ela->serial); |
172 | ela->serial); | ||
173 | return -EINVAL; | 175 | return -EINVAL; |
174 | } | 176 | } |
175 | if (elb->serial >= TEST_LIST_LEN) { | 177 | if (elb->serial >= TEST_LIST_LEN) { |
176 | printk(KERN_ERR "list_sort_test: error: incorrect serial %d\n", | 178 | pr_err("error: incorrect serial %d\n", elb->serial); |
177 | elb->serial); | ||
178 | return -EINVAL; | 179 | return -EINVAL; |
179 | } | 180 | } |
180 | if (elts[ela->serial] != ela || elts[elb->serial] != elb) { | 181 | if (elts[ela->serial] != ela || elts[elb->serial] != elb) { |
181 | printk(KERN_ERR "list_sort_test: error: phantom element\n"); | 182 | pr_err("error: phantom element\n"); |
182 | return -EINVAL; | 183 | return -EINVAL; |
183 | } | 184 | } |
184 | if (ela->poison1 != TEST_POISON1 || ela->poison2 != TEST_POISON2) { | 185 | if (ela->poison1 != TEST_POISON1 || ela->poison2 != TEST_POISON2) { |
185 | printk(KERN_ERR "list_sort_test: error: bad poison: %#x/%#x\n", | 186 | pr_err("error: bad poison: %#x/%#x\n", |
186 | ela->poison1, ela->poison2); | 187 | ela->poison1, ela->poison2); |
187 | return -EINVAL; | 188 | return -EINVAL; |
188 | } | 189 | } |
189 | if (elb->poison1 != TEST_POISON1 || elb->poison2 != TEST_POISON2) { | 190 | if (elb->poison1 != TEST_POISON1 || elb->poison2 != TEST_POISON2) { |
190 | printk(KERN_ERR "list_sort_test: error: bad poison: %#x/%#x\n", | 191 | pr_err("error: bad poison: %#x/%#x\n", |
191 | elb->poison1, elb->poison2); | 192 | elb->poison1, elb->poison2); |
192 | return -EINVAL; | 193 | return -EINVAL; |
193 | } | 194 | } |
194 | return 0; | 195 | return 0; |
@@ -207,25 +208,23 @@ static int __init cmp(void *priv, struct list_head *a, struct list_head *b) | |||
207 | 208 | ||
208 | static int __init list_sort_test(void) | 209 | static int __init list_sort_test(void) |
209 | { | 210 | { |
210 | int i, count = 1, err = -EINVAL; | 211 | int i, count = 1, err = -ENOMEM; |
211 | struct debug_el *el; | 212 | struct debug_el *el; |
212 | struct list_head *cur, *tmp; | 213 | struct list_head *cur; |
213 | LIST_HEAD(head); | 214 | LIST_HEAD(head); |
214 | 215 | ||
215 | printk(KERN_DEBUG "list_sort_test: start testing list_sort()\n"); | 216 | pr_debug("start testing list_sort()\n"); |
216 | 217 | ||
217 | elts = kmalloc(sizeof(void *) * TEST_LIST_LEN, GFP_KERNEL); | 218 | elts = kcalloc(TEST_LIST_LEN, sizeof(*elts), GFP_KERNEL); |
218 | if (!elts) { | 219 | if (!elts) { |
219 | printk(KERN_ERR "list_sort_test: error: cannot allocate " | 220 | pr_err("error: cannot allocate memory\n"); |
220 | "memory\n"); | 221 | return err; |
221 | goto exit; | ||
222 | } | 222 | } |
223 | 223 | ||
224 | for (i = 0; i < TEST_LIST_LEN; i++) { | 224 | for (i = 0; i < TEST_LIST_LEN; i++) { |
225 | el = kmalloc(sizeof(*el), GFP_KERNEL); | 225 | el = kmalloc(sizeof(*el), GFP_KERNEL); |
226 | if (!el) { | 226 | if (!el) { |
227 | printk(KERN_ERR "list_sort_test: error: cannot " | 227 | pr_err("error: cannot allocate memory\n"); |
228 | "allocate memory\n"); | ||
229 | goto exit; | 228 | goto exit; |
230 | } | 229 | } |
231 | /* force some equivalencies */ | 230 | /* force some equivalencies */ |
@@ -239,52 +238,52 @@ static int __init list_sort_test(void) | |||
239 | 238 | ||
240 | list_sort(NULL, &head, cmp); | 239 | list_sort(NULL, &head, cmp); |
241 | 240 | ||
241 | err = -EINVAL; | ||
242 | for (cur = head.next; cur->next != &head; cur = cur->next) { | 242 | for (cur = head.next; cur->next != &head; cur = cur->next) { |
243 | struct debug_el *el1; | 243 | struct debug_el *el1; |
244 | int cmp_result; | 244 | int cmp_result; |
245 | 245 | ||
246 | if (cur->next->prev != cur) { | 246 | if (cur->next->prev != cur) { |
247 | printk(KERN_ERR "list_sort_test: error: list is " | 247 | pr_err("error: list is corrupted\n"); |
248 | "corrupted\n"); | ||
249 | goto exit; | 248 | goto exit; |
250 | } | 249 | } |
251 | 250 | ||
252 | cmp_result = cmp(NULL, cur, cur->next); | 251 | cmp_result = cmp(NULL, cur, cur->next); |
253 | if (cmp_result > 0) { | 252 | if (cmp_result > 0) { |
254 | printk(KERN_ERR "list_sort_test: error: list is not " | 253 | pr_err("error: list is not sorted\n"); |
255 | "sorted\n"); | ||
256 | goto exit; | 254 | goto exit; |
257 | } | 255 | } |
258 | 256 | ||
259 | el = container_of(cur, struct debug_el, list); | 257 | el = container_of(cur, struct debug_el, list); |
260 | el1 = container_of(cur->next, struct debug_el, list); | 258 | el1 = container_of(cur->next, struct debug_el, list); |
261 | if (cmp_result == 0 && el->serial >= el1->serial) { | 259 | if (cmp_result == 0 && el->serial >= el1->serial) { |
262 | printk(KERN_ERR "list_sort_test: error: order of " | 260 | pr_err("error: order of equivalent elements not " |
263 | "equivalent elements not preserved\n"); | 261 | "preserved\n"); |
264 | goto exit; | 262 | goto exit; |
265 | } | 263 | } |
266 | 264 | ||
267 | if (check(el, el1)) { | 265 | if (check(el, el1)) { |
268 | printk(KERN_ERR "list_sort_test: error: element check " | 266 | pr_err("error: element check failed\n"); |
269 | "failed\n"); | ||
270 | goto exit; | 267 | goto exit; |
271 | } | 268 | } |
272 | count++; | 269 | count++; |
273 | } | 270 | } |
271 | if (head.prev != cur) { | ||
272 | pr_err("error: list is corrupted\n"); | ||
273 | goto exit; | ||
274 | } | ||
275 | |||
274 | 276 | ||
275 | if (count != TEST_LIST_LEN) { | 277 | if (count != TEST_LIST_LEN) { |
276 | printk(KERN_ERR "list_sort_test: error: bad list length %d", | 278 | pr_err("error: bad list length %d", count); |
277 | count); | ||
278 | goto exit; | 279 | goto exit; |
279 | } | 280 | } |
280 | 281 | ||
281 | err = 0; | 282 | err = 0; |
282 | exit: | 283 | exit: |
284 | for (i = 0; i < TEST_LIST_LEN; i++) | ||
285 | kfree(elts[i]); | ||
283 | kfree(elts); | 286 | kfree(elts); |
284 | list_for_each_safe(cur, tmp, &head) { | ||
285 | list_del(cur); | ||
286 | kfree(container_of(cur, struct debug_el, list)); | ||
287 | } | ||
288 | return err; | 287 | return err; |
289 | } | 288 | } |
290 | module_init(list_sort_test); | 289 | module_init(list_sort_test); |
diff --git a/lib/lockref.c b/lib/lockref.c index f07a40d33871..d2233de9a86e 100644 --- a/lib/lockref.c +++ b/lib/lockref.c | |||
@@ -1,6 +1,5 @@ | |||
1 | #include <linux/export.h> | 1 | #include <linux/export.h> |
2 | #include <linux/lockref.h> | 2 | #include <linux/lockref.h> |
3 | #include <linux/mutex.h> | ||
4 | 3 | ||
5 | #if USE_CMPXCHG_LOCKREF | 4 | #if USE_CMPXCHG_LOCKREF |
6 | 5 | ||
@@ -29,7 +28,7 @@ | |||
29 | if (likely(old.lock_count == prev.lock_count)) { \ | 28 | if (likely(old.lock_count == prev.lock_count)) { \ |
30 | SUCCESS; \ | 29 | SUCCESS; \ |
31 | } \ | 30 | } \ |
32 | arch_mutex_cpu_relax(); \ | 31 | cpu_relax_lowlatency(); \ |
33 | } \ | 32 | } \ |
34 | } while (0) | 33 | } while (0) |
35 | 34 | ||
diff --git a/lib/lru_cache.c b/lib/lru_cache.c index 4a83ecd03650..852c81e3ba9a 100644 --- a/lib/lru_cache.c +++ b/lib/lru_cache.c | |||
@@ -169,7 +169,7 @@ out_fail: | |||
169 | return NULL; | 169 | return NULL; |
170 | } | 170 | } |
171 | 171 | ||
172 | void lc_free_by_index(struct lru_cache *lc, unsigned i) | 172 | static void lc_free_by_index(struct lru_cache *lc, unsigned i) |
173 | { | 173 | { |
174 | void *p = lc->lc_element[i]; | 174 | void *p = lc->lc_element[i]; |
175 | WARN_ON(!p); | 175 | WARN_ON(!p); |
@@ -643,9 +643,10 @@ void lc_set(struct lru_cache *lc, unsigned int enr, int index) | |||
643 | * lc_dump - Dump a complete LRU cache to seq in textual form. | 643 | * lc_dump - Dump a complete LRU cache to seq in textual form. |
644 | * @lc: the lru cache to operate on | 644 | * @lc: the lru cache to operate on |
645 | * @seq: the &struct seq_file pointer to seq_printf into | 645 | * @seq: the &struct seq_file pointer to seq_printf into |
646 | * @utext: user supplied "heading" or other info | 646 | * @utext: user supplied additional "heading" or other info |
647 | * @detail: function pointer the user may provide to dump further details | 647 | * @detail: function pointer the user may provide to dump further details |
648 | * of the object the lc_element is embedded in. | 648 | * of the object the lc_element is embedded in. May be NULL. |
649 | * Note: a leading space ' ' and trailing newline '\n' is implied. | ||
649 | */ | 650 | */ |
650 | void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char *utext, | 651 | void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char *utext, |
651 | void (*detail) (struct seq_file *, struct lc_element *)) | 652 | void (*detail) (struct seq_file *, struct lc_element *)) |
@@ -654,16 +655,18 @@ void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char *utext | |||
654 | struct lc_element *e; | 655 | struct lc_element *e; |
655 | int i; | 656 | int i; |
656 | 657 | ||
657 | seq_printf(seq, "\tnn: lc_number refcnt %s\n ", utext); | 658 | seq_printf(seq, "\tnn: lc_number (new nr) refcnt %s\n ", utext); |
658 | for (i = 0; i < nr_elements; i++) { | 659 | for (i = 0; i < nr_elements; i++) { |
659 | e = lc_element_by_index(lc, i); | 660 | e = lc_element_by_index(lc, i); |
660 | if (e->lc_number == LC_FREE) { | 661 | if (e->lc_number != e->lc_new_number) |
661 | seq_printf(seq, "\t%2d: FREE\n", i); | 662 | seq_printf(seq, "\t%5d: %6d %8d %6d ", |
662 | } else { | 663 | i, e->lc_number, e->lc_new_number, e->refcnt); |
663 | seq_printf(seq, "\t%2d: %4u %4u ", i, | 664 | else |
664 | e->lc_number, e->refcnt); | 665 | seq_printf(seq, "\t%5d: %6d %-8s %6d ", |
666 | i, e->lc_number, "-\"-", e->refcnt); | ||
667 | if (detail) | ||
665 | detail(seq, e); | 668 | detail(seq, e); |
666 | } | 669 | seq_putc(seq, '\n'); |
667 | } | 670 | } |
668 | } | 671 | } |
669 | 672 | ||
diff --git a/lib/net_utils.c b/lib/net_utils.c index 2e3c52c8d050..148fc6e99ef6 100644 --- a/lib/net_utils.c +++ b/lib/net_utils.c | |||
@@ -3,24 +3,24 @@ | |||
3 | #include <linux/ctype.h> | 3 | #include <linux/ctype.h> |
4 | #include <linux/kernel.h> | 4 | #include <linux/kernel.h> |
5 | 5 | ||
6 | int mac_pton(const char *s, u8 *mac) | 6 | bool mac_pton(const char *s, u8 *mac) |
7 | { | 7 | { |
8 | int i; | 8 | int i; |
9 | 9 | ||
10 | /* XX:XX:XX:XX:XX:XX */ | 10 | /* XX:XX:XX:XX:XX:XX */ |
11 | if (strlen(s) < 3 * ETH_ALEN - 1) | 11 | if (strlen(s) < 3 * ETH_ALEN - 1) |
12 | return 0; | 12 | return false; |
13 | 13 | ||
14 | /* Don't dirty result unless string is valid MAC. */ | 14 | /* Don't dirty result unless string is valid MAC. */ |
15 | for (i = 0; i < ETH_ALEN; i++) { | 15 | for (i = 0; i < ETH_ALEN; i++) { |
16 | if (!isxdigit(s[i * 3]) || !isxdigit(s[i * 3 + 1])) | 16 | if (!isxdigit(s[i * 3]) || !isxdigit(s[i * 3 + 1])) |
17 | return 0; | 17 | return false; |
18 | if (i != ETH_ALEN - 1 && s[i * 3 + 2] != ':') | 18 | if (i != ETH_ALEN - 1 && s[i * 3 + 2] != ':') |
19 | return 0; | 19 | return false; |
20 | } | 20 | } |
21 | for (i = 0; i < ETH_ALEN; i++) { | 21 | for (i = 0; i < ETH_ALEN; i++) { |
22 | mac[i] = (hex_to_bin(s[i * 3]) << 4) | hex_to_bin(s[i * 3 + 1]); | 22 | mac[i] = (hex_to_bin(s[i * 3]) << 4) | hex_to_bin(s[i * 3 + 1]); |
23 | } | 23 | } |
24 | return 1; | 24 | return true; |
25 | } | 25 | } |
26 | EXPORT_SYMBOL(mac_pton); | 26 | EXPORT_SYMBOL(mac_pton); |
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c index 963b7034a51b..a89cf09a8268 100644 --- a/lib/percpu-refcount.c +++ b/lib/percpu-refcount.c | |||
@@ -31,6 +31,11 @@ | |||
31 | 31 | ||
32 | #define PCPU_COUNT_BIAS (1U << 31) | 32 | #define PCPU_COUNT_BIAS (1U << 31) |
33 | 33 | ||
34 | static unsigned __percpu *pcpu_count_ptr(struct percpu_ref *ref) | ||
35 | { | ||
36 | return (unsigned __percpu *)(ref->pcpu_count_ptr & ~PCPU_REF_DEAD); | ||
37 | } | ||
38 | |||
34 | /** | 39 | /** |
35 | * percpu_ref_init - initialize a percpu refcount | 40 | * percpu_ref_init - initialize a percpu refcount |
36 | * @ref: percpu_ref to initialize | 41 | * @ref: percpu_ref to initialize |
@@ -46,8 +51,8 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release) | |||
46 | { | 51 | { |
47 | atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS); | 52 | atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS); |
48 | 53 | ||
49 | ref->pcpu_count = alloc_percpu(unsigned); | 54 | ref->pcpu_count_ptr = (unsigned long)alloc_percpu(unsigned); |
50 | if (!ref->pcpu_count) | 55 | if (!ref->pcpu_count_ptr) |
51 | return -ENOMEM; | 56 | return -ENOMEM; |
52 | 57 | ||
53 | ref->release = release; | 58 | ref->release = release; |
@@ -56,53 +61,71 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release) | |||
56 | EXPORT_SYMBOL_GPL(percpu_ref_init); | 61 | EXPORT_SYMBOL_GPL(percpu_ref_init); |
57 | 62 | ||
58 | /** | 63 | /** |
59 | * percpu_ref_cancel_init - cancel percpu_ref_init() | 64 | * percpu_ref_reinit - re-initialize a percpu refcount |
60 | * @ref: percpu_ref to cancel init for | 65 | * @ref: perpcu_ref to re-initialize |
61 | * | ||
62 | * Once a percpu_ref is initialized, its destruction is initiated by | ||
63 | * percpu_ref_kill() and completes asynchronously, which can be painful to | ||
64 | * do when destroying a half-constructed object in init failure path. | ||
65 | * | 66 | * |
66 | * This function destroys @ref without invoking @ref->release and the | 67 | * Re-initialize @ref so that it's in the same state as when it finished |
67 | * memory area containing it can be freed immediately on return. To | 68 | * percpu_ref_init(). @ref must have been initialized successfully, killed |
68 | * prevent accidental misuse, it's required that @ref has finished | 69 | * and reached 0 but not exited. |
69 | * percpu_ref_init(), whether successful or not, but never used. | ||
70 | * | 70 | * |
71 | * The weird name and usage restriction are to prevent people from using | 71 | * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while |
72 | * this function by mistake for normal shutdown instead of | 72 | * this function is in progress. |
73 | * percpu_ref_kill(). | ||
74 | */ | 73 | */ |
75 | void percpu_ref_cancel_init(struct percpu_ref *ref) | 74 | void percpu_ref_reinit(struct percpu_ref *ref) |
76 | { | 75 | { |
77 | unsigned __percpu *pcpu_count = ref->pcpu_count; | 76 | unsigned __percpu *pcpu_count = pcpu_count_ptr(ref); |
78 | int cpu; | 77 | int cpu; |
79 | 78 | ||
80 | WARN_ON_ONCE(atomic_read(&ref->count) != 1 + PCPU_COUNT_BIAS); | 79 | BUG_ON(!pcpu_count); |
80 | WARN_ON(!percpu_ref_is_zero(ref)); | ||
81 | |||
82 | atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS); | ||
83 | |||
84 | /* | ||
85 | * Restore per-cpu operation. smp_store_release() is paired with | ||
86 | * smp_read_barrier_depends() in __pcpu_ref_alive() and guarantees | ||
87 | * that the zeroing is visible to all percpu accesses which can see | ||
88 | * the following PCPU_REF_DEAD clearing. | ||
89 | */ | ||
90 | for_each_possible_cpu(cpu) | ||
91 | *per_cpu_ptr(pcpu_count, cpu) = 0; | ||
92 | |||
93 | smp_store_release(&ref->pcpu_count_ptr, | ||
94 | ref->pcpu_count_ptr & ~PCPU_REF_DEAD); | ||
95 | } | ||
96 | EXPORT_SYMBOL_GPL(percpu_ref_reinit); | ||
97 | |||
98 | /** | ||
99 | * percpu_ref_exit - undo percpu_ref_init() | ||
100 | * @ref: percpu_ref to exit | ||
101 | * | ||
102 | * This function exits @ref. The caller is responsible for ensuring that | ||
103 | * @ref is no longer in active use. The usual places to invoke this | ||
104 | * function from are the @ref->release() callback or in init failure path | ||
105 | * where percpu_ref_init() succeeded but other parts of the initialization | ||
106 | * of the embedding object failed. | ||
107 | */ | ||
108 | void percpu_ref_exit(struct percpu_ref *ref) | ||
109 | { | ||
110 | unsigned __percpu *pcpu_count = pcpu_count_ptr(ref); | ||
81 | 111 | ||
82 | if (pcpu_count) { | 112 | if (pcpu_count) { |
83 | for_each_possible_cpu(cpu) | 113 | free_percpu(pcpu_count); |
84 | WARN_ON_ONCE(*per_cpu_ptr(pcpu_count, cpu)); | 114 | ref->pcpu_count_ptr = PCPU_REF_DEAD; |
85 | free_percpu(ref->pcpu_count); | ||
86 | } | 115 | } |
87 | } | 116 | } |
88 | EXPORT_SYMBOL_GPL(percpu_ref_cancel_init); | 117 | EXPORT_SYMBOL_GPL(percpu_ref_exit); |
89 | 118 | ||
90 | static void percpu_ref_kill_rcu(struct rcu_head *rcu) | 119 | static void percpu_ref_kill_rcu(struct rcu_head *rcu) |
91 | { | 120 | { |
92 | struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu); | 121 | struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu); |
93 | unsigned __percpu *pcpu_count = ref->pcpu_count; | 122 | unsigned __percpu *pcpu_count = pcpu_count_ptr(ref); |
94 | unsigned count = 0; | 123 | unsigned count = 0; |
95 | int cpu; | 124 | int cpu; |
96 | 125 | ||
97 | /* Mask out PCPU_REF_DEAD */ | ||
98 | pcpu_count = (unsigned __percpu *) | ||
99 | (((unsigned long) pcpu_count) & ~PCPU_STATUS_MASK); | ||
100 | |||
101 | for_each_possible_cpu(cpu) | 126 | for_each_possible_cpu(cpu) |
102 | count += *per_cpu_ptr(pcpu_count, cpu); | 127 | count += *per_cpu_ptr(pcpu_count, cpu); |
103 | 128 | ||
104 | free_percpu(pcpu_count); | ||
105 | |||
106 | pr_debug("global %i pcpu %i", atomic_read(&ref->count), (int) count); | 129 | pr_debug("global %i pcpu %i", atomic_read(&ref->count), (int) count); |
107 | 130 | ||
108 | /* | 131 | /* |
@@ -152,13 +175,28 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu) | |||
152 | void percpu_ref_kill_and_confirm(struct percpu_ref *ref, | 175 | void percpu_ref_kill_and_confirm(struct percpu_ref *ref, |
153 | percpu_ref_func_t *confirm_kill) | 176 | percpu_ref_func_t *confirm_kill) |
154 | { | 177 | { |
155 | WARN_ONCE(REF_STATUS(ref->pcpu_count) == PCPU_REF_DEAD, | 178 | WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD, |
156 | "percpu_ref_kill() called more than once!\n"); | 179 | "percpu_ref_kill() called more than once!\n"); |
157 | 180 | ||
158 | ref->pcpu_count = (unsigned __percpu *) | 181 | ref->pcpu_count_ptr |= PCPU_REF_DEAD; |
159 | (((unsigned long) ref->pcpu_count)|PCPU_REF_DEAD); | ||
160 | ref->confirm_kill = confirm_kill; | 182 | ref->confirm_kill = confirm_kill; |
161 | 183 | ||
162 | call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu); | 184 | call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu); |
163 | } | 185 | } |
164 | EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm); | 186 | EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm); |
187 | |||
188 | /* | ||
189 | * XXX: Temporary kludge to work around SCSI blk-mq stall. Used only by | ||
190 | * block/blk-mq.c::blk_mq_freeze_queue(). Will be removed during v3.18 | ||
191 | * devel cycle. Do not use anywhere else. | ||
192 | */ | ||
193 | void __percpu_ref_kill_expedited(struct percpu_ref *ref) | ||
194 | { | ||
195 | WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD, | ||
196 | "percpu_ref_kill() called more than once on %pf!", | ||
197 | ref->release); | ||
198 | |||
199 | ref->pcpu_count_ptr |= PCPU_REF_DEAD; | ||
200 | synchronize_sched_expedited(); | ||
201 | percpu_ref_kill_rcu(&ref->rcu); | ||
202 | } | ||
diff --git a/lib/random32.c b/lib/random32.c index fa5da61ce7ad..c9b6bf3afe0c 100644 --- a/lib/random32.c +++ b/lib/random32.c | |||
@@ -40,6 +40,10 @@ | |||
40 | 40 | ||
41 | #ifdef CONFIG_RANDOM32_SELFTEST | 41 | #ifdef CONFIG_RANDOM32_SELFTEST |
42 | static void __init prandom_state_selftest(void); | 42 | static void __init prandom_state_selftest(void); |
43 | #else | ||
44 | static inline void prandom_state_selftest(void) | ||
45 | { | ||
46 | } | ||
43 | #endif | 47 | #endif |
44 | 48 | ||
45 | static DEFINE_PER_CPU(struct rnd_state, net_rand_state); | 49 | static DEFINE_PER_CPU(struct rnd_state, net_rand_state); |
@@ -53,8 +57,7 @@ static DEFINE_PER_CPU(struct rnd_state, net_rand_state); | |||
53 | */ | 57 | */ |
54 | u32 prandom_u32_state(struct rnd_state *state) | 58 | u32 prandom_u32_state(struct rnd_state *state) |
55 | { | 59 | { |
56 | #define TAUSWORTHE(s,a,b,c,d) ((s&c)<<d) ^ (((s <<a) ^ s)>>b) | 60 | #define TAUSWORTHE(s, a, b, c, d) ((s & c) << d) ^ (((s << a) ^ s) >> b) |
57 | |||
58 | state->s1 = TAUSWORTHE(state->s1, 6U, 13U, 4294967294U, 18U); | 61 | state->s1 = TAUSWORTHE(state->s1, 6U, 13U, 4294967294U, 18U); |
59 | state->s2 = TAUSWORTHE(state->s2, 2U, 27U, 4294967288U, 2U); | 62 | state->s2 = TAUSWORTHE(state->s2, 2U, 27U, 4294967288U, 2U); |
60 | state->s3 = TAUSWORTHE(state->s3, 13U, 21U, 4294967280U, 7U); | 63 | state->s3 = TAUSWORTHE(state->s3, 13U, 21U, 4294967280U, 7U); |
@@ -147,21 +150,25 @@ static void prandom_warmup(struct rnd_state *state) | |||
147 | prandom_u32_state(state); | 150 | prandom_u32_state(state); |
148 | } | 151 | } |
149 | 152 | ||
150 | static void prandom_seed_very_weak(struct rnd_state *state, u32 seed) | 153 | static u32 __extract_hwseed(void) |
151 | { | 154 | { |
152 | /* Note: This sort of seeding is ONLY used in test cases and | 155 | u32 val = 0; |
153 | * during boot at the time from core_initcall until late_initcall | 156 | |
154 | * as we don't have a stronger entropy source available yet. | 157 | (void)(arch_get_random_seed_int(&val) || |
155 | * After late_initcall, we reseed entire state, we have to (!), | 158 | arch_get_random_int(&val)); |
156 | * otherwise an attacker just needs to search 32 bit space to | 159 | |
157 | * probe for our internal 128 bit state if he knows a couple | 160 | return val; |
158 | * of prandom32 outputs! | 161 | } |
159 | */ | 162 | |
160 | #define LCG(x) ((x) * 69069U) /* super-duper LCG */ | 163 | static void prandom_seed_early(struct rnd_state *state, u32 seed, |
161 | state->s1 = __seed(LCG(seed), 2U); | 164 | bool mix_with_hwseed) |
162 | state->s2 = __seed(LCG(state->s1), 8U); | 165 | { |
163 | state->s3 = __seed(LCG(state->s2), 16U); | 166 | #define LCG(x) ((x) * 69069U) /* super-duper LCG */ |
164 | state->s4 = __seed(LCG(state->s3), 128U); | 167 | #define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0) |
168 | state->s1 = __seed(HWSEED() ^ LCG(seed), 2U); | ||
169 | state->s2 = __seed(HWSEED() ^ LCG(state->s1), 8U); | ||
170 | state->s3 = __seed(HWSEED() ^ LCG(state->s2), 16U); | ||
171 | state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U); | ||
165 | } | 172 | } |
166 | 173 | ||
167 | /** | 174 | /** |
@@ -194,14 +201,13 @@ static int __init prandom_init(void) | |||
194 | { | 201 | { |
195 | int i; | 202 | int i; |
196 | 203 | ||
197 | #ifdef CONFIG_RANDOM32_SELFTEST | ||
198 | prandom_state_selftest(); | 204 | prandom_state_selftest(); |
199 | #endif | ||
200 | 205 | ||
201 | for_each_possible_cpu(i) { | 206 | for_each_possible_cpu(i) { |
202 | struct rnd_state *state = &per_cpu(net_rand_state,i); | 207 | struct rnd_state *state = &per_cpu(net_rand_state,i); |
208 | u32 weak_seed = (i + jiffies) ^ random_get_entropy(); | ||
203 | 209 | ||
204 | prandom_seed_very_weak(state, (i + jiffies) ^ random_get_entropy()); | 210 | prandom_seed_early(state, weak_seed, true); |
205 | prandom_warmup(state); | 211 | prandom_warmup(state); |
206 | } | 212 | } |
207 | 213 | ||
@@ -210,6 +216,7 @@ static int __init prandom_init(void) | |||
210 | core_initcall(prandom_init); | 216 | core_initcall(prandom_init); |
211 | 217 | ||
212 | static void __prandom_timer(unsigned long dontcare); | 218 | static void __prandom_timer(unsigned long dontcare); |
219 | |||
213 | static DEFINE_TIMER(seed_timer, __prandom_timer, 0, 0); | 220 | static DEFINE_TIMER(seed_timer, __prandom_timer, 0, 0); |
214 | 221 | ||
215 | static void __prandom_timer(unsigned long dontcare) | 222 | static void __prandom_timer(unsigned long dontcare) |
@@ -419,7 +426,7 @@ static void __init prandom_state_selftest(void) | |||
419 | for (i = 0; i < ARRAY_SIZE(test1); i++) { | 426 | for (i = 0; i < ARRAY_SIZE(test1); i++) { |
420 | struct rnd_state state; | 427 | struct rnd_state state; |
421 | 428 | ||
422 | prandom_seed_very_weak(&state, test1[i].seed); | 429 | prandom_seed_early(&state, test1[i].seed, false); |
423 | prandom_warmup(&state); | 430 | prandom_warmup(&state); |
424 | 431 | ||
425 | if (test1[i].result != prandom_u32_state(&state)) | 432 | if (test1[i].result != prandom_u32_state(&state)) |
@@ -434,7 +441,7 @@ static void __init prandom_state_selftest(void) | |||
434 | for (i = 0; i < ARRAY_SIZE(test2); i++) { | 441 | for (i = 0; i < ARRAY_SIZE(test2); i++) { |
435 | struct rnd_state state; | 442 | struct rnd_state state; |
436 | 443 | ||
437 | prandom_seed_very_weak(&state, test2[i].seed); | 444 | prandom_seed_early(&state, test2[i].seed, false); |
438 | prandom_warmup(&state); | 445 | prandom_warmup(&state); |
439 | 446 | ||
440 | for (j = 0; j < test2[i].iteration - 1; j++) | 447 | for (j = 0; j < test2[i].iteration - 1; j++) |
diff --git a/lib/rbtree.c b/lib/rbtree.c index 65f4effd117f..c16c81a3d430 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c | |||
@@ -101,7 +101,7 @@ __rb_insert(struct rb_node *node, struct rb_root *root, | |||
101 | * / \ / \ | 101 | * / \ / \ |
102 | * p u --> P U | 102 | * p u --> P U |
103 | * / / | 103 | * / / |
104 | * n N | 104 | * n n |
105 | * | 105 | * |
106 | * However, since g's parent might be red, and | 106 | * However, since g's parent might be red, and |
107 | * 4) does not allow this, we need to recurse | 107 | * 4) does not allow this, we need to recurse |
diff --git a/lib/rhashtable.c b/lib/rhashtable.c new file mode 100644 index 000000000000..16d02639d334 --- /dev/null +++ b/lib/rhashtable.c | |||
@@ -0,0 +1,790 @@ | |||
1 | /* | ||
2 | * Resizable, Scalable, Concurrent Hash Table | ||
3 | * | ||
4 | * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch> | ||
5 | * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> | ||
6 | * | ||
7 | * Based on the following paper: | ||
8 | * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf | ||
9 | * | ||
10 | * Code partially derived from nft_hash | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License version 2 as | ||
14 | * published by the Free Software Foundation. | ||
15 | */ | ||
16 | |||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/log2.h> | ||
20 | #include <linux/slab.h> | ||
21 | #include <linux/vmalloc.h> | ||
22 | #include <linux/mm.h> | ||
23 | #include <linux/hash.h> | ||
24 | #include <linux/random.h> | ||
25 | #include <linux/rhashtable.h> | ||
26 | |||
27 | #define HASH_DEFAULT_SIZE 64UL | ||
28 | #define HASH_MIN_SIZE 4UL | ||
29 | |||
30 | #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) | ||
31 | |||
32 | #ifdef CONFIG_PROVE_LOCKING | ||
33 | int lockdep_rht_mutex_is_held(const struct rhashtable *ht) | ||
34 | { | ||
35 | return ht->p.mutex_is_held(); | ||
36 | } | ||
37 | EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); | ||
38 | #endif | ||
39 | |||
40 | static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he) | ||
41 | { | ||
42 | return (void *) he - ht->p.head_offset; | ||
43 | } | ||
44 | |||
45 | static u32 __hashfn(const struct rhashtable *ht, const void *key, | ||
46 | u32 len, u32 hsize) | ||
47 | { | ||
48 | u32 h; | ||
49 | |||
50 | h = ht->p.hashfn(key, len, ht->p.hash_rnd); | ||
51 | |||
52 | return h & (hsize - 1); | ||
53 | } | ||
54 | |||
55 | /** | ||
56 | * rhashtable_hashfn - compute hash for key of given length | ||
57 | * @ht: hash table to compuate for | ||
58 | * @key: pointer to key | ||
59 | * @len: length of key | ||
60 | * | ||
61 | * Computes the hash value using the hash function provided in the 'hashfn' | ||
62 | * of struct rhashtable_params. The returned value is guaranteed to be | ||
63 | * smaller than the number of buckets in the hash table. | ||
64 | */ | ||
65 | u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len) | ||
66 | { | ||
67 | struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); | ||
68 | |||
69 | return __hashfn(ht, key, len, tbl->size); | ||
70 | } | ||
71 | EXPORT_SYMBOL_GPL(rhashtable_hashfn); | ||
72 | |||
73 | static u32 obj_hashfn(const struct rhashtable *ht, const void *ptr, u32 hsize) | ||
74 | { | ||
75 | if (unlikely(!ht->p.key_len)) { | ||
76 | u32 h; | ||
77 | |||
78 | h = ht->p.obj_hashfn(ptr, ht->p.hash_rnd); | ||
79 | |||
80 | return h & (hsize - 1); | ||
81 | } | ||
82 | |||
83 | return __hashfn(ht, ptr + ht->p.key_offset, ht->p.key_len, hsize); | ||
84 | } | ||
85 | |||
86 | /** | ||
87 | * rhashtable_obj_hashfn - compute hash for hashed object | ||
88 | * @ht: hash table to compuate for | ||
89 | * @ptr: pointer to hashed object | ||
90 | * | ||
91 | * Computes the hash value using the hash function `hashfn` respectively | ||
92 | * 'obj_hashfn' depending on whether the hash table is set up to work with | ||
93 | * a fixed length key. The returned value is guaranteed to be smaller than | ||
94 | * the number of buckets in the hash table. | ||
95 | */ | ||
96 | u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr) | ||
97 | { | ||
98 | struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); | ||
99 | |||
100 | return obj_hashfn(ht, ptr, tbl->size); | ||
101 | } | ||
102 | EXPORT_SYMBOL_GPL(rhashtable_obj_hashfn); | ||
103 | |||
104 | static u32 head_hashfn(const struct rhashtable *ht, | ||
105 | const struct rhash_head *he, u32 hsize) | ||
106 | { | ||
107 | return obj_hashfn(ht, rht_obj(ht, he), hsize); | ||
108 | } | ||
109 | |||
110 | static struct bucket_table *bucket_table_alloc(size_t nbuckets, gfp_t flags) | ||
111 | { | ||
112 | struct bucket_table *tbl; | ||
113 | size_t size; | ||
114 | |||
115 | size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); | ||
116 | tbl = kzalloc(size, flags); | ||
117 | if (tbl == NULL) | ||
118 | tbl = vzalloc(size); | ||
119 | |||
120 | if (tbl == NULL) | ||
121 | return NULL; | ||
122 | |||
123 | tbl->size = nbuckets; | ||
124 | |||
125 | return tbl; | ||
126 | } | ||
127 | |||
128 | static void bucket_table_free(const struct bucket_table *tbl) | ||
129 | { | ||
130 | kvfree(tbl); | ||
131 | } | ||
132 | |||
133 | /** | ||
134 | * rht_grow_above_75 - returns true if nelems > 0.75 * table-size | ||
135 | * @ht: hash table | ||
136 | * @new_size: new table size | ||
137 | */ | ||
138 | bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size) | ||
139 | { | ||
140 | /* Expand table when exceeding 75% load */ | ||
141 | return ht->nelems > (new_size / 4 * 3); | ||
142 | } | ||
143 | EXPORT_SYMBOL_GPL(rht_grow_above_75); | ||
144 | |||
145 | /** | ||
146 | * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size | ||
147 | * @ht: hash table | ||
148 | * @new_size: new table size | ||
149 | */ | ||
150 | bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size) | ||
151 | { | ||
152 | /* Shrink table beneath 30% load */ | ||
153 | return ht->nelems < (new_size * 3 / 10); | ||
154 | } | ||
155 | EXPORT_SYMBOL_GPL(rht_shrink_below_30); | ||
156 | |||
157 | static void hashtable_chain_unzip(const struct rhashtable *ht, | ||
158 | const struct bucket_table *new_tbl, | ||
159 | struct bucket_table *old_tbl, size_t n) | ||
160 | { | ||
161 | struct rhash_head *he, *p, *next; | ||
162 | unsigned int h; | ||
163 | |||
164 | /* Old bucket empty, no work needed. */ | ||
165 | p = rht_dereference(old_tbl->buckets[n], ht); | ||
166 | if (!p) | ||
167 | return; | ||
168 | |||
169 | /* Advance the old bucket pointer one or more times until it | ||
170 | * reaches a node that doesn't hash to the same bucket as the | ||
171 | * previous node p. Call the previous node p; | ||
172 | */ | ||
173 | h = head_hashfn(ht, p, new_tbl->size); | ||
174 | rht_for_each(he, p->next, ht) { | ||
175 | if (head_hashfn(ht, he, new_tbl->size) != h) | ||
176 | break; | ||
177 | p = he; | ||
178 | } | ||
179 | RCU_INIT_POINTER(old_tbl->buckets[n], p->next); | ||
180 | |||
181 | /* Find the subsequent node which does hash to the same | ||
182 | * bucket as node P, or NULL if no such node exists. | ||
183 | */ | ||
184 | next = NULL; | ||
185 | if (he) { | ||
186 | rht_for_each(he, he->next, ht) { | ||
187 | if (head_hashfn(ht, he, new_tbl->size) == h) { | ||
188 | next = he; | ||
189 | break; | ||
190 | } | ||
191 | } | ||
192 | } | ||
193 | |||
194 | /* Set p's next pointer to that subsequent node pointer, | ||
195 | * bypassing the nodes which do not hash to p's bucket | ||
196 | */ | ||
197 | RCU_INIT_POINTER(p->next, next); | ||
198 | } | ||
199 | |||
200 | /** | ||
201 | * rhashtable_expand - Expand hash table while allowing concurrent lookups | ||
202 | * @ht: the hash table to expand | ||
203 | * @flags: allocation flags | ||
204 | * | ||
205 | * A secondary bucket array is allocated and the hash entries are migrated | ||
206 | * while keeping them on both lists until the end of the RCU grace period. | ||
207 | * | ||
208 | * This function may only be called in a context where it is safe to call | ||
209 | * synchronize_rcu(), e.g. not within a rcu_read_lock() section. | ||
210 | * | ||
211 | * The caller must ensure that no concurrent table mutations take place. | ||
212 | * It is however valid to have concurrent lookups if they are RCU protected. | ||
213 | */ | ||
214 | int rhashtable_expand(struct rhashtable *ht, gfp_t flags) | ||
215 | { | ||
216 | struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); | ||
217 | struct rhash_head *he; | ||
218 | unsigned int i, h; | ||
219 | bool complete; | ||
220 | |||
221 | ASSERT_RHT_MUTEX(ht); | ||
222 | |||
223 | if (ht->p.max_shift && ht->shift >= ht->p.max_shift) | ||
224 | return 0; | ||
225 | |||
226 | new_tbl = bucket_table_alloc(old_tbl->size * 2, flags); | ||
227 | if (new_tbl == NULL) | ||
228 | return -ENOMEM; | ||
229 | |||
230 | ht->shift++; | ||
231 | |||
232 | /* For each new bucket, search the corresponding old bucket | ||
233 | * for the first entry that hashes to the new bucket, and | ||
234 | * link the new bucket to that entry. Since all the entries | ||
235 | * which will end up in the new bucket appear in the same | ||
236 | * old bucket, this constructs an entirely valid new hash | ||
237 | * table, but with multiple buckets "zipped" together into a | ||
238 | * single imprecise chain. | ||
239 | */ | ||
240 | for (i = 0; i < new_tbl->size; i++) { | ||
241 | h = i & (old_tbl->size - 1); | ||
242 | rht_for_each(he, old_tbl->buckets[h], ht) { | ||
243 | if (head_hashfn(ht, he, new_tbl->size) == i) { | ||
244 | RCU_INIT_POINTER(new_tbl->buckets[i], he); | ||
245 | break; | ||
246 | } | ||
247 | } | ||
248 | } | ||
249 | |||
250 | /* Publish the new table pointer. Lookups may now traverse | ||
251 | * the new table, but they will not benefit from any | ||
252 | * additional efficiency until later steps unzip the buckets. | ||
253 | */ | ||
254 | rcu_assign_pointer(ht->tbl, new_tbl); | ||
255 | |||
256 | /* Unzip interleaved hash chains */ | ||
257 | do { | ||
258 | /* Wait for readers. All new readers will see the new | ||
259 | * table, and thus no references to the old table will | ||
260 | * remain. | ||
261 | */ | ||
262 | synchronize_rcu(); | ||
263 | |||
264 | /* For each bucket in the old table (each of which | ||
265 | * contains items from multiple buckets of the new | ||
266 | * table): ... | ||
267 | */ | ||
268 | complete = true; | ||
269 | for (i = 0; i < old_tbl->size; i++) { | ||
270 | hashtable_chain_unzip(ht, new_tbl, old_tbl, i); | ||
271 | if (old_tbl->buckets[i] != NULL) | ||
272 | complete = false; | ||
273 | } | ||
274 | } while (!complete); | ||
275 | |||
276 | bucket_table_free(old_tbl); | ||
277 | return 0; | ||
278 | } | ||
279 | EXPORT_SYMBOL_GPL(rhashtable_expand); | ||
280 | |||
281 | /** | ||
282 | * rhashtable_shrink - Shrink hash table while allowing concurrent lookups | ||
283 | * @ht: the hash table to shrink | ||
284 | * @flags: allocation flags | ||
285 | * | ||
286 | * This function may only be called in a context where it is safe to call | ||
287 | * synchronize_rcu(), e.g. not within a rcu_read_lock() section. | ||
288 | * | ||
289 | * The caller must ensure that no concurrent table mutations take place. | ||
290 | * It is however valid to have concurrent lookups if they are RCU protected. | ||
291 | */ | ||
292 | int rhashtable_shrink(struct rhashtable *ht, gfp_t flags) | ||
293 | { | ||
294 | struct bucket_table *ntbl, *tbl = rht_dereference(ht->tbl, ht); | ||
295 | struct rhash_head __rcu **pprev; | ||
296 | unsigned int i; | ||
297 | |||
298 | ASSERT_RHT_MUTEX(ht); | ||
299 | |||
300 | if (tbl->size <= HASH_MIN_SIZE) | ||
301 | return 0; | ||
302 | |||
303 | ntbl = bucket_table_alloc(tbl->size / 2, flags); | ||
304 | if (ntbl == NULL) | ||
305 | return -ENOMEM; | ||
306 | |||
307 | ht->shift--; | ||
308 | |||
309 | /* Link each bucket in the new table to the first bucket | ||
310 | * in the old table that contains entries which will hash | ||
311 | * to the new bucket. | ||
312 | */ | ||
313 | for (i = 0; i < ntbl->size; i++) { | ||
314 | ntbl->buckets[i] = tbl->buckets[i]; | ||
315 | |||
316 | /* Link each bucket in the new table to the first bucket | ||
317 | * in the old table that contains entries which will hash | ||
318 | * to the new bucket. | ||
319 | */ | ||
320 | for (pprev = &ntbl->buckets[i]; *pprev != NULL; | ||
321 | pprev = &rht_dereference(*pprev, ht)->next) | ||
322 | ; | ||
323 | RCU_INIT_POINTER(*pprev, tbl->buckets[i + ntbl->size]); | ||
324 | } | ||
325 | |||
326 | /* Publish the new, valid hash table */ | ||
327 | rcu_assign_pointer(ht->tbl, ntbl); | ||
328 | |||
329 | /* Wait for readers. No new readers will have references to the | ||
330 | * old hash table. | ||
331 | */ | ||
332 | synchronize_rcu(); | ||
333 | |||
334 | bucket_table_free(tbl); | ||
335 | |||
336 | return 0; | ||
337 | } | ||
338 | EXPORT_SYMBOL_GPL(rhashtable_shrink); | ||
339 | |||
340 | /** | ||
341 | * rhashtable_insert - insert object into hash hash table | ||
342 | * @ht: hash table | ||
343 | * @obj: pointer to hash head inside object | ||
344 | * @flags: allocation flags (table expansion) | ||
345 | * | ||
346 | * Will automatically grow the table via rhashtable_expand() if the the | ||
347 | * grow_decision function specified at rhashtable_init() returns true. | ||
348 | * | ||
349 | * The caller must ensure that no concurrent table mutations occur. It is | ||
350 | * however valid to have concurrent lookups if they are RCU protected. | ||
351 | */ | ||
352 | void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, | ||
353 | gfp_t flags) | ||
354 | { | ||
355 | struct bucket_table *tbl = rht_dereference(ht->tbl, ht); | ||
356 | u32 hash; | ||
357 | |||
358 | ASSERT_RHT_MUTEX(ht); | ||
359 | |||
360 | hash = head_hashfn(ht, obj, tbl->size); | ||
361 | RCU_INIT_POINTER(obj->next, tbl->buckets[hash]); | ||
362 | rcu_assign_pointer(tbl->buckets[hash], obj); | ||
363 | ht->nelems++; | ||
364 | |||
365 | if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) | ||
366 | rhashtable_expand(ht, flags); | ||
367 | } | ||
368 | EXPORT_SYMBOL_GPL(rhashtable_insert); | ||
369 | |||
370 | /** | ||
371 | * rhashtable_remove_pprev - remove object from hash table given previous element | ||
372 | * @ht: hash table | ||
373 | * @obj: pointer to hash head inside object | ||
374 | * @pprev: pointer to previous element | ||
375 | * @flags: allocation flags (table expansion) | ||
376 | * | ||
377 | * Identical to rhashtable_remove() but caller is alreayd aware of the element | ||
378 | * in front of the element to be deleted. This is in particular useful for | ||
379 | * deletion when combined with walking or lookup. | ||
380 | */ | ||
381 | void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, | ||
382 | struct rhash_head __rcu **pprev, gfp_t flags) | ||
383 | { | ||
384 | struct bucket_table *tbl = rht_dereference(ht->tbl, ht); | ||
385 | |||
386 | ASSERT_RHT_MUTEX(ht); | ||
387 | |||
388 | RCU_INIT_POINTER(*pprev, obj->next); | ||
389 | ht->nelems--; | ||
390 | |||
391 | if (ht->p.shrink_decision && | ||
392 | ht->p.shrink_decision(ht, tbl->size)) | ||
393 | rhashtable_shrink(ht, flags); | ||
394 | } | ||
395 | EXPORT_SYMBOL_GPL(rhashtable_remove_pprev); | ||
396 | |||
397 | /** | ||
398 | * rhashtable_remove - remove object from hash table | ||
399 | * @ht: hash table | ||
400 | * @obj: pointer to hash head inside object | ||
401 | * @flags: allocation flags (table expansion) | ||
402 | * | ||
403 | * Since the hash chain is single linked, the removal operation needs to | ||
404 | * walk the bucket chain upon removal. The removal operation is thus | ||
405 | * considerable slow if the hash table is not correctly sized. | ||
406 | * | ||
407 | * Will automatically shrink the table via rhashtable_expand() if the the | ||
408 | * shrink_decision function specified at rhashtable_init() returns true. | ||
409 | * | ||
410 | * The caller must ensure that no concurrent table mutations occur. It is | ||
411 | * however valid to have concurrent lookups if they are RCU protected. | ||
412 | */ | ||
413 | bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj, | ||
414 | gfp_t flags) | ||
415 | { | ||
416 | struct bucket_table *tbl = rht_dereference(ht->tbl, ht); | ||
417 | struct rhash_head __rcu **pprev; | ||
418 | struct rhash_head *he; | ||
419 | u32 h; | ||
420 | |||
421 | ASSERT_RHT_MUTEX(ht); | ||
422 | |||
423 | h = head_hashfn(ht, obj, tbl->size); | ||
424 | |||
425 | pprev = &tbl->buckets[h]; | ||
426 | rht_for_each(he, tbl->buckets[h], ht) { | ||
427 | if (he != obj) { | ||
428 | pprev = &he->next; | ||
429 | continue; | ||
430 | } | ||
431 | |||
432 | rhashtable_remove_pprev(ht, he, pprev, flags); | ||
433 | return true; | ||
434 | } | ||
435 | |||
436 | return false; | ||
437 | } | ||
438 | EXPORT_SYMBOL_GPL(rhashtable_remove); | ||
439 | |||
440 | /** | ||
441 | * rhashtable_lookup - lookup key in hash table | ||
442 | * @ht: hash table | ||
443 | * @key: pointer to key | ||
444 | * | ||
445 | * Computes the hash value for the key and traverses the bucket chain looking | ||
446 | * for a entry with an identical key. The first matching entry is returned. | ||
447 | * | ||
448 | * This lookup function may only be used for fixed key hash table (key_len | ||
449 | * paramter set). It will BUG() if used inappropriately. | ||
450 | * | ||
451 | * Lookups may occur in parallel with hash mutations as long as the lookup is | ||
452 | * guarded by rcu_read_lock(). The caller must take care of this. | ||
453 | */ | ||
454 | void *rhashtable_lookup(const struct rhashtable *ht, const void *key) | ||
455 | { | ||
456 | const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); | ||
457 | struct rhash_head *he; | ||
458 | u32 h; | ||
459 | |||
460 | BUG_ON(!ht->p.key_len); | ||
461 | |||
462 | h = __hashfn(ht, key, ht->p.key_len, tbl->size); | ||
463 | rht_for_each_rcu(he, tbl->buckets[h], ht) { | ||
464 | if (memcmp(rht_obj(ht, he) + ht->p.key_offset, key, | ||
465 | ht->p.key_len)) | ||
466 | continue; | ||
467 | return (void *) he - ht->p.head_offset; | ||
468 | } | ||
469 | |||
470 | return NULL; | ||
471 | } | ||
472 | EXPORT_SYMBOL_GPL(rhashtable_lookup); | ||
473 | |||
474 | /** | ||
475 | * rhashtable_lookup_compare - search hash table with compare function | ||
476 | * @ht: hash table | ||
477 | * @hash: hash value of desired entry | ||
478 | * @compare: compare function, must return true on match | ||
479 | * @arg: argument passed on to compare function | ||
480 | * | ||
481 | * Traverses the bucket chain behind the provided hash value and calls the | ||
482 | * specified compare function for each entry. | ||
483 | * | ||
484 | * Lookups may occur in parallel with hash mutations as long as the lookup is | ||
485 | * guarded by rcu_read_lock(). The caller must take care of this. | ||
486 | * | ||
487 | * Returns the first entry on which the compare function returned true. | ||
488 | */ | ||
489 | void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash, | ||
490 | bool (*compare)(void *, void *), void *arg) | ||
491 | { | ||
492 | const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); | ||
493 | struct rhash_head *he; | ||
494 | |||
495 | if (unlikely(hash >= tbl->size)) | ||
496 | return NULL; | ||
497 | |||
498 | rht_for_each_rcu(he, tbl->buckets[hash], ht) { | ||
499 | if (!compare(rht_obj(ht, he), arg)) | ||
500 | continue; | ||
501 | return (void *) he - ht->p.head_offset; | ||
502 | } | ||
503 | |||
504 | return NULL; | ||
505 | } | ||
506 | EXPORT_SYMBOL_GPL(rhashtable_lookup_compare); | ||
507 | |||
508 | static size_t rounded_hashtable_size(unsigned int nelem) | ||
509 | { | ||
510 | return max(roundup_pow_of_two(nelem * 4 / 3), HASH_MIN_SIZE); | ||
511 | } | ||
512 | |||
513 | /** | ||
514 | * rhashtable_init - initialize a new hash table | ||
515 | * @ht: hash table to be initialized | ||
516 | * @params: configuration parameters | ||
517 | * | ||
518 | * Initializes a new hash table based on the provided configuration | ||
519 | * parameters. A table can be configured either with a variable or | ||
520 | * fixed length key: | ||
521 | * | ||
522 | * Configuration Example 1: Fixed length keys | ||
523 | * struct test_obj { | ||
524 | * int key; | ||
525 | * void * my_member; | ||
526 | * struct rhash_head node; | ||
527 | * }; | ||
528 | * | ||
529 | * struct rhashtable_params params = { | ||
530 | * .head_offset = offsetof(struct test_obj, node), | ||
531 | * .key_offset = offsetof(struct test_obj, key), | ||
532 | * .key_len = sizeof(int), | ||
533 | * .hashfn = arch_fast_hash, | ||
534 | * .mutex_is_held = &my_mutex_is_held, | ||
535 | * }; | ||
536 | * | ||
537 | * Configuration Example 2: Variable length keys | ||
538 | * struct test_obj { | ||
539 | * [...] | ||
540 | * struct rhash_head node; | ||
541 | * }; | ||
542 | * | ||
543 | * u32 my_hash_fn(const void *data, u32 seed) | ||
544 | * { | ||
545 | * struct test_obj *obj = data; | ||
546 | * | ||
547 | * return [... hash ...]; | ||
548 | * } | ||
549 | * | ||
550 | * struct rhashtable_params params = { | ||
551 | * .head_offset = offsetof(struct test_obj, node), | ||
552 | * .hashfn = arch_fast_hash, | ||
553 | * .obj_hashfn = my_hash_fn, | ||
554 | * .mutex_is_held = &my_mutex_is_held, | ||
555 | * }; | ||
556 | */ | ||
557 | int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) | ||
558 | { | ||
559 | struct bucket_table *tbl; | ||
560 | size_t size; | ||
561 | |||
562 | size = HASH_DEFAULT_SIZE; | ||
563 | |||
564 | if ((params->key_len && !params->hashfn) || | ||
565 | (!params->key_len && !params->obj_hashfn)) | ||
566 | return -EINVAL; | ||
567 | |||
568 | if (params->nelem_hint) | ||
569 | size = rounded_hashtable_size(params->nelem_hint); | ||
570 | |||
571 | tbl = bucket_table_alloc(size, GFP_KERNEL); | ||
572 | if (tbl == NULL) | ||
573 | return -ENOMEM; | ||
574 | |||
575 | memset(ht, 0, sizeof(*ht)); | ||
576 | ht->shift = ilog2(tbl->size); | ||
577 | memcpy(&ht->p, params, sizeof(*params)); | ||
578 | RCU_INIT_POINTER(ht->tbl, tbl); | ||
579 | |||
580 | if (!ht->p.hash_rnd) | ||
581 | get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd)); | ||
582 | |||
583 | return 0; | ||
584 | } | ||
585 | EXPORT_SYMBOL_GPL(rhashtable_init); | ||
586 | |||
587 | /** | ||
588 | * rhashtable_destroy - destroy hash table | ||
589 | * @ht: the hash table to destroy | ||
590 | * | ||
591 | * Frees the bucket array. This function is not rcu safe, therefore the caller | ||
592 | * has to make sure that no resizing may happen by unpublishing the hashtable | ||
593 | * and waiting for the quiescent cycle before releasing the bucket array. | ||
594 | */ | ||
595 | void rhashtable_destroy(const struct rhashtable *ht) | ||
596 | { | ||
597 | bucket_table_free(ht->tbl); | ||
598 | } | ||
599 | EXPORT_SYMBOL_GPL(rhashtable_destroy); | ||
600 | |||
601 | /************************************************************************** | ||
602 | * Self Test | ||
603 | **************************************************************************/ | ||
604 | |||
605 | #ifdef CONFIG_TEST_RHASHTABLE | ||
606 | |||
607 | #define TEST_HT_SIZE 8 | ||
608 | #define TEST_ENTRIES 2048 | ||
609 | #define TEST_PTR ((void *) 0xdeadbeef) | ||
610 | #define TEST_NEXPANDS 4 | ||
611 | |||
612 | static int test_mutex_is_held(void) | ||
613 | { | ||
614 | return 1; | ||
615 | } | ||
616 | |||
617 | struct test_obj { | ||
618 | void *ptr; | ||
619 | int value; | ||
620 | struct rhash_head node; | ||
621 | }; | ||
622 | |||
623 | static int __init test_rht_lookup(struct rhashtable *ht) | ||
624 | { | ||
625 | unsigned int i; | ||
626 | |||
627 | for (i = 0; i < TEST_ENTRIES * 2; i++) { | ||
628 | struct test_obj *obj; | ||
629 | bool expected = !(i % 2); | ||
630 | u32 key = i; | ||
631 | |||
632 | obj = rhashtable_lookup(ht, &key); | ||
633 | |||
634 | if (expected && !obj) { | ||
635 | pr_warn("Test failed: Could not find key %u\n", key); | ||
636 | return -ENOENT; | ||
637 | } else if (!expected && obj) { | ||
638 | pr_warn("Test failed: Unexpected entry found for key %u\n", | ||
639 | key); | ||
640 | return -EEXIST; | ||
641 | } else if (expected && obj) { | ||
642 | if (obj->ptr != TEST_PTR || obj->value != i) { | ||
643 | pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n", | ||
644 | obj->ptr, TEST_PTR, obj->value, i); | ||
645 | return -EINVAL; | ||
646 | } | ||
647 | } | ||
648 | } | ||
649 | |||
650 | return 0; | ||
651 | } | ||
652 | |||
653 | static void test_bucket_stats(struct rhashtable *ht, | ||
654 | struct bucket_table *tbl, | ||
655 | bool quiet) | ||
656 | { | ||
657 | unsigned int cnt, i, total = 0; | ||
658 | struct test_obj *obj; | ||
659 | |||
660 | for (i = 0; i < tbl->size; i++) { | ||
661 | cnt = 0; | ||
662 | |||
663 | if (!quiet) | ||
664 | pr_info(" [%#4x/%zu]", i, tbl->size); | ||
665 | |||
666 | rht_for_each_entry_rcu(obj, tbl->buckets[i], node) { | ||
667 | cnt++; | ||
668 | total++; | ||
669 | if (!quiet) | ||
670 | pr_cont(" [%p],", obj); | ||
671 | } | ||
672 | |||
673 | if (!quiet) | ||
674 | pr_cont("\n [%#x] first element: %p, chain length: %u\n", | ||
675 | i, tbl->buckets[i], cnt); | ||
676 | } | ||
677 | |||
678 | pr_info(" Traversal complete: counted=%u, nelems=%zu, entries=%d\n", | ||
679 | total, ht->nelems, TEST_ENTRIES); | ||
680 | } | ||
681 | |||
682 | static int __init test_rhashtable(struct rhashtable *ht) | ||
683 | { | ||
684 | struct bucket_table *tbl; | ||
685 | struct test_obj *obj, *next; | ||
686 | int err; | ||
687 | unsigned int i; | ||
688 | |||
689 | /* | ||
690 | * Insertion Test: | ||
691 | * Insert TEST_ENTRIES into table with all keys even numbers | ||
692 | */ | ||
693 | pr_info(" Adding %d keys\n", TEST_ENTRIES); | ||
694 | for (i = 0; i < TEST_ENTRIES; i++) { | ||
695 | struct test_obj *obj; | ||
696 | |||
697 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); | ||
698 | if (!obj) { | ||
699 | err = -ENOMEM; | ||
700 | goto error; | ||
701 | } | ||
702 | |||
703 | obj->ptr = TEST_PTR; | ||
704 | obj->value = i * 2; | ||
705 | |||
706 | rhashtable_insert(ht, &obj->node, GFP_KERNEL); | ||
707 | } | ||
708 | |||
709 | rcu_read_lock(); | ||
710 | tbl = rht_dereference_rcu(ht->tbl, ht); | ||
711 | test_bucket_stats(ht, tbl, true); | ||
712 | test_rht_lookup(ht); | ||
713 | rcu_read_unlock(); | ||
714 | |||
715 | for (i = 0; i < TEST_NEXPANDS; i++) { | ||
716 | pr_info(" Table expansion iteration %u...\n", i); | ||
717 | rhashtable_expand(ht, GFP_KERNEL); | ||
718 | |||
719 | rcu_read_lock(); | ||
720 | pr_info(" Verifying lookups...\n"); | ||
721 | test_rht_lookup(ht); | ||
722 | rcu_read_unlock(); | ||
723 | } | ||
724 | |||
725 | for (i = 0; i < TEST_NEXPANDS; i++) { | ||
726 | pr_info(" Table shrinkage iteration %u...\n", i); | ||
727 | rhashtable_shrink(ht, GFP_KERNEL); | ||
728 | |||
729 | rcu_read_lock(); | ||
730 | pr_info(" Verifying lookups...\n"); | ||
731 | test_rht_lookup(ht); | ||
732 | rcu_read_unlock(); | ||
733 | } | ||
734 | |||
735 | pr_info(" Deleting %d keys\n", TEST_ENTRIES); | ||
736 | for (i = 0; i < TEST_ENTRIES; i++) { | ||
737 | u32 key = i * 2; | ||
738 | |||
739 | obj = rhashtable_lookup(ht, &key); | ||
740 | BUG_ON(!obj); | ||
741 | |||
742 | rhashtable_remove(ht, &obj->node, GFP_KERNEL); | ||
743 | kfree(obj); | ||
744 | } | ||
745 | |||
746 | return 0; | ||
747 | |||
748 | error: | ||
749 | tbl = rht_dereference_rcu(ht->tbl, ht); | ||
750 | for (i = 0; i < tbl->size; i++) | ||
751 | rht_for_each_entry_safe(obj, next, tbl->buckets[i], ht, node) | ||
752 | kfree(obj); | ||
753 | |||
754 | return err; | ||
755 | } | ||
756 | |||
757 | static int __init test_rht_init(void) | ||
758 | { | ||
759 | struct rhashtable ht; | ||
760 | struct rhashtable_params params = { | ||
761 | .nelem_hint = TEST_HT_SIZE, | ||
762 | .head_offset = offsetof(struct test_obj, node), | ||
763 | .key_offset = offsetof(struct test_obj, value), | ||
764 | .key_len = sizeof(int), | ||
765 | .hashfn = arch_fast_hash, | ||
766 | .mutex_is_held = &test_mutex_is_held, | ||
767 | .grow_decision = rht_grow_above_75, | ||
768 | .shrink_decision = rht_shrink_below_30, | ||
769 | }; | ||
770 | int err; | ||
771 | |||
772 | pr_info("Running resizable hashtable tests...\n"); | ||
773 | |||
774 | err = rhashtable_init(&ht, ¶ms); | ||
775 | if (err < 0) { | ||
776 | pr_warn("Test failed: Unable to initialize hashtable: %d\n", | ||
777 | err); | ||
778 | return err; | ||
779 | } | ||
780 | |||
781 | err = test_rhashtable(&ht); | ||
782 | |||
783 | rhashtable_destroy(&ht); | ||
784 | |||
785 | return err; | ||
786 | } | ||
787 | |||
788 | subsys_initcall(test_rht_init); | ||
789 | |||
790 | #endif /* CONFIG_TEST_RHASHTABLE */ | ||
diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 3a8e8e8fb2a5..9cdf62f8accd 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c | |||
@@ -73,7 +73,7 @@ EXPORT_SYMBOL(sg_nents); | |||
73 | **/ | 73 | **/ |
74 | struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents) | 74 | struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents) |
75 | { | 75 | { |
76 | #ifndef ARCH_HAS_SG_CHAIN | 76 | #ifndef CONFIG_ARCH_HAS_SG_CHAIN |
77 | struct scatterlist *ret = &sgl[nents - 1]; | 77 | struct scatterlist *ret = &sgl[nents - 1]; |
78 | #else | 78 | #else |
79 | struct scatterlist *sg, *ret = NULL; | 79 | struct scatterlist *sg, *ret = NULL; |
@@ -165,6 +165,7 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents) | |||
165 | * __sg_free_table - Free a previously mapped sg table | 165 | * __sg_free_table - Free a previously mapped sg table |
166 | * @table: The sg table header to use | 166 | * @table: The sg table header to use |
167 | * @max_ents: The maximum number of entries per single scatterlist | 167 | * @max_ents: The maximum number of entries per single scatterlist |
168 | * @skip_first_chunk: don't free the (preallocated) first scatterlist chunk | ||
168 | * @free_fn: Free function | 169 | * @free_fn: Free function |
169 | * | 170 | * |
170 | * Description: | 171 | * Description: |
@@ -174,7 +175,7 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents) | |||
174 | * | 175 | * |
175 | **/ | 176 | **/ |
176 | void __sg_free_table(struct sg_table *table, unsigned int max_ents, | 177 | void __sg_free_table(struct sg_table *table, unsigned int max_ents, |
177 | sg_free_fn *free_fn) | 178 | bool skip_first_chunk, sg_free_fn *free_fn) |
178 | { | 179 | { |
179 | struct scatterlist *sgl, *next; | 180 | struct scatterlist *sgl, *next; |
180 | 181 | ||
@@ -202,7 +203,10 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents, | |||
202 | } | 203 | } |
203 | 204 | ||
204 | table->orig_nents -= sg_size; | 205 | table->orig_nents -= sg_size; |
205 | free_fn(sgl, alloc_size); | 206 | if (!skip_first_chunk) { |
207 | free_fn(sgl, alloc_size); | ||
208 | skip_first_chunk = false; | ||
209 | } | ||
206 | sgl = next; | 210 | sgl = next; |
207 | } | 211 | } |
208 | 212 | ||
@@ -217,7 +221,7 @@ EXPORT_SYMBOL(__sg_free_table); | |||
217 | **/ | 221 | **/ |
218 | void sg_free_table(struct sg_table *table) | 222 | void sg_free_table(struct sg_table *table) |
219 | { | 223 | { |
220 | __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree); | 224 | __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree); |
221 | } | 225 | } |
222 | EXPORT_SYMBOL(sg_free_table); | 226 | EXPORT_SYMBOL(sg_free_table); |
223 | 227 | ||
@@ -241,8 +245,8 @@ EXPORT_SYMBOL(sg_free_table); | |||
241 | * | 245 | * |
242 | **/ | 246 | **/ |
243 | int __sg_alloc_table(struct sg_table *table, unsigned int nents, | 247 | int __sg_alloc_table(struct sg_table *table, unsigned int nents, |
244 | unsigned int max_ents, gfp_t gfp_mask, | 248 | unsigned int max_ents, struct scatterlist *first_chunk, |
245 | sg_alloc_fn *alloc_fn) | 249 | gfp_t gfp_mask, sg_alloc_fn *alloc_fn) |
246 | { | 250 | { |
247 | struct scatterlist *sg, *prv; | 251 | struct scatterlist *sg, *prv; |
248 | unsigned int left; | 252 | unsigned int left; |
@@ -251,7 +255,7 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents, | |||
251 | 255 | ||
252 | if (nents == 0) | 256 | if (nents == 0) |
253 | return -EINVAL; | 257 | return -EINVAL; |
254 | #ifndef ARCH_HAS_SG_CHAIN | 258 | #ifndef CONFIG_ARCH_HAS_SG_CHAIN |
255 | if (WARN_ON_ONCE(nents > max_ents)) | 259 | if (WARN_ON_ONCE(nents > max_ents)) |
256 | return -EINVAL; | 260 | return -EINVAL; |
257 | #endif | 261 | #endif |
@@ -269,7 +273,12 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents, | |||
269 | 273 | ||
270 | left -= sg_size; | 274 | left -= sg_size; |
271 | 275 | ||
272 | sg = alloc_fn(alloc_size, gfp_mask); | 276 | if (first_chunk) { |
277 | sg = first_chunk; | ||
278 | first_chunk = NULL; | ||
279 | } else { | ||
280 | sg = alloc_fn(alloc_size, gfp_mask); | ||
281 | } | ||
273 | if (unlikely(!sg)) { | 282 | if (unlikely(!sg)) { |
274 | /* | 283 | /* |
275 | * Adjust entry count to reflect that the last | 284 | * Adjust entry count to reflect that the last |
@@ -324,9 +333,9 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) | |||
324 | int ret; | 333 | int ret; |
325 | 334 | ||
326 | ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC, | 335 | ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC, |
327 | gfp_mask, sg_kmalloc); | 336 | NULL, gfp_mask, sg_kmalloc); |
328 | if (unlikely(ret)) | 337 | if (unlikely(ret)) |
329 | __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree); | 338 | __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree); |
330 | 339 | ||
331 | return ret; | 340 | return ret; |
332 | } | 341 | } |
diff --git a/lib/string.c b/lib/string.c index 992bf30af759..f3c6ff596414 100644 --- a/lib/string.c +++ b/lib/string.c | |||
@@ -807,9 +807,9 @@ void *memchr_inv(const void *start, int c, size_t bytes) | |||
807 | return check_bytes8(start, value, bytes); | 807 | return check_bytes8(start, value, bytes); |
808 | 808 | ||
809 | value64 = value; | 809 | value64 = value; |
810 | #if defined(ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64 | 810 | #if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64 |
811 | value64 *= 0x0101010101010101; | 811 | value64 *= 0x0101010101010101; |
812 | #elif defined(ARCH_HAS_FAST_MULTIPLIER) | 812 | #elif defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) |
813 | value64 *= 0x01010101; | 813 | value64 *= 0x01010101; |
814 | value64 |= value64 << 32; | 814 | value64 |= value64 << 32; |
815 | #else | 815 | #else |
diff --git a/lib/string_helpers.c b/lib/string_helpers.c index ed5c1454dd62..29033f319aea 100644 --- a/lib/string_helpers.c +++ b/lib/string_helpers.c | |||
@@ -25,12 +25,15 @@ | |||
25 | int string_get_size(u64 size, const enum string_size_units units, | 25 | int string_get_size(u64 size, const enum string_size_units units, |
26 | char *buf, int len) | 26 | char *buf, int len) |
27 | { | 27 | { |
28 | static const char *units_10[] = { "B", "kB", "MB", "GB", "TB", "PB", | 28 | static const char *const units_10[] = { |
29 | "EB", "ZB", "YB", NULL}; | 29 | "B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB", NULL |
30 | static const char *units_2[] = {"B", "KiB", "MiB", "GiB", "TiB", "PiB", | 30 | }; |
31 | "EiB", "ZiB", "YiB", NULL }; | 31 | static const char *const units_2[] = { |
32 | static const char **units_str[] = { | 32 | "B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB", |
33 | [STRING_UNITS_10] = units_10, | 33 | NULL |
34 | }; | ||
35 | static const char *const *const units_str[] = { | ||
36 | [STRING_UNITS_10] = units_10, | ||
34 | [STRING_UNITS_2] = units_2, | 37 | [STRING_UNITS_2] = units_2, |
35 | }; | 38 | }; |
36 | static const unsigned int divisor[] = { | 39 | static const unsigned int divisor[] = { |
diff --git a/lib/test-kstrtox.c b/lib/test-kstrtox.c index bea3f3fa3f02..4137bca5f8e8 100644 --- a/lib/test-kstrtox.c +++ b/lib/test-kstrtox.c | |||
@@ -3,7 +3,7 @@ | |||
3 | #include <linux/module.h> | 3 | #include <linux/module.h> |
4 | 4 | ||
5 | #define for_each_test(i, test) \ | 5 | #define for_each_test(i, test) \ |
6 | for (i = 0; i < sizeof(test) / sizeof(test[0]); i++) | 6 | for (i = 0; i < ARRAY_SIZE(test); i++) |
7 | 7 | ||
8 | struct test_fail { | 8 | struct test_fail { |
9 | const char *str; | 9 | const char *str; |
diff --git a/lib/test_bpf.c b/lib/test_bpf.c index c579e0f58818..89e0345733bd 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c | |||
@@ -66,7 +66,7 @@ struct bpf_test { | |||
66 | const char *descr; | 66 | const char *descr; |
67 | union { | 67 | union { |
68 | struct sock_filter insns[MAX_INSNS]; | 68 | struct sock_filter insns[MAX_INSNS]; |
69 | struct sock_filter_int insns_int[MAX_INSNS]; | 69 | struct bpf_insn insns_int[MAX_INSNS]; |
70 | } u; | 70 | } u; |
71 | __u8 aux; | 71 | __u8 aux; |
72 | __u8 data[MAX_DATA]; | 72 | __u8 data[MAX_DATA]; |
@@ -1761,9 +1761,9 @@ static int probe_filter_length(struct sock_filter *fp) | |||
1761 | return len + 1; | 1761 | return len + 1; |
1762 | } | 1762 | } |
1763 | 1763 | ||
1764 | static struct sk_filter *generate_filter(int which, int *err) | 1764 | static struct bpf_prog *generate_filter(int which, int *err) |
1765 | { | 1765 | { |
1766 | struct sk_filter *fp; | 1766 | struct bpf_prog *fp; |
1767 | struct sock_fprog_kern fprog; | 1767 | struct sock_fprog_kern fprog; |
1768 | unsigned int flen = probe_filter_length(tests[which].u.insns); | 1768 | unsigned int flen = probe_filter_length(tests[which].u.insns); |
1769 | __u8 test_type = tests[which].aux & TEST_TYPE_MASK; | 1769 | __u8 test_type = tests[which].aux & TEST_TYPE_MASK; |
@@ -1773,7 +1773,7 @@ static struct sk_filter *generate_filter(int which, int *err) | |||
1773 | fprog.filter = tests[which].u.insns; | 1773 | fprog.filter = tests[which].u.insns; |
1774 | fprog.len = flen; | 1774 | fprog.len = flen; |
1775 | 1775 | ||
1776 | *err = sk_unattached_filter_create(&fp, &fprog); | 1776 | *err = bpf_prog_create(&fp, &fprog); |
1777 | if (tests[which].aux & FLAG_EXPECTED_FAIL) { | 1777 | if (tests[which].aux & FLAG_EXPECTED_FAIL) { |
1778 | if (*err == -EINVAL) { | 1778 | if (*err == -EINVAL) { |
1779 | pr_cont("PASS\n"); | 1779 | pr_cont("PASS\n"); |
@@ -1798,7 +1798,7 @@ static struct sk_filter *generate_filter(int which, int *err) | |||
1798 | break; | 1798 | break; |
1799 | 1799 | ||
1800 | case INTERNAL: | 1800 | case INTERNAL: |
1801 | fp = kzalloc(sk_filter_size(flen), GFP_KERNEL); | 1801 | fp = kzalloc(bpf_prog_size(flen), GFP_KERNEL); |
1802 | if (fp == NULL) { | 1802 | if (fp == NULL) { |
1803 | pr_cont("UNEXPECTED_FAIL no memory left\n"); | 1803 | pr_cont("UNEXPECTED_FAIL no memory left\n"); |
1804 | *err = -ENOMEM; | 1804 | *err = -ENOMEM; |
@@ -1807,9 +1807,9 @@ static struct sk_filter *generate_filter(int which, int *err) | |||
1807 | 1807 | ||
1808 | fp->len = flen; | 1808 | fp->len = flen; |
1809 | memcpy(fp->insnsi, tests[which].u.insns_int, | 1809 | memcpy(fp->insnsi, tests[which].u.insns_int, |
1810 | fp->len * sizeof(struct sock_filter_int)); | 1810 | fp->len * sizeof(struct bpf_insn)); |
1811 | 1811 | ||
1812 | sk_filter_select_runtime(fp); | 1812 | bpf_prog_select_runtime(fp); |
1813 | break; | 1813 | break; |
1814 | } | 1814 | } |
1815 | 1815 | ||
@@ -1817,21 +1817,21 @@ static struct sk_filter *generate_filter(int which, int *err) | |||
1817 | return fp; | 1817 | return fp; |
1818 | } | 1818 | } |
1819 | 1819 | ||
1820 | static void release_filter(struct sk_filter *fp, int which) | 1820 | static void release_filter(struct bpf_prog *fp, int which) |
1821 | { | 1821 | { |
1822 | __u8 test_type = tests[which].aux & TEST_TYPE_MASK; | 1822 | __u8 test_type = tests[which].aux & TEST_TYPE_MASK; |
1823 | 1823 | ||
1824 | switch (test_type) { | 1824 | switch (test_type) { |
1825 | case CLASSIC: | 1825 | case CLASSIC: |
1826 | sk_unattached_filter_destroy(fp); | 1826 | bpf_prog_destroy(fp); |
1827 | break; | 1827 | break; |
1828 | case INTERNAL: | 1828 | case INTERNAL: |
1829 | sk_filter_free(fp); | 1829 | bpf_prog_free(fp); |
1830 | break; | 1830 | break; |
1831 | } | 1831 | } |
1832 | } | 1832 | } |
1833 | 1833 | ||
1834 | static int __run_one(const struct sk_filter *fp, const void *data, | 1834 | static int __run_one(const struct bpf_prog *fp, const void *data, |
1835 | int runs, u64 *duration) | 1835 | int runs, u64 *duration) |
1836 | { | 1836 | { |
1837 | u64 start, finish; | 1837 | u64 start, finish; |
@@ -1840,7 +1840,7 @@ static int __run_one(const struct sk_filter *fp, const void *data, | |||
1840 | start = ktime_to_us(ktime_get()); | 1840 | start = ktime_to_us(ktime_get()); |
1841 | 1841 | ||
1842 | for (i = 0; i < runs; i++) | 1842 | for (i = 0; i < runs; i++) |
1843 | ret = SK_RUN_FILTER(fp, data); | 1843 | ret = BPF_PROG_RUN(fp, data); |
1844 | 1844 | ||
1845 | finish = ktime_to_us(ktime_get()); | 1845 | finish = ktime_to_us(ktime_get()); |
1846 | 1846 | ||
@@ -1850,7 +1850,7 @@ static int __run_one(const struct sk_filter *fp, const void *data, | |||
1850 | return ret; | 1850 | return ret; |
1851 | } | 1851 | } |
1852 | 1852 | ||
1853 | static int run_one(const struct sk_filter *fp, struct bpf_test *test) | 1853 | static int run_one(const struct bpf_prog *fp, struct bpf_test *test) |
1854 | { | 1854 | { |
1855 | int err_cnt = 0, i, runs = MAX_TESTRUNS; | 1855 | int err_cnt = 0, i, runs = MAX_TESTRUNS; |
1856 | 1856 | ||
@@ -1884,7 +1884,7 @@ static __init int test_bpf(void) | |||
1884 | int i, err_cnt = 0, pass_cnt = 0; | 1884 | int i, err_cnt = 0, pass_cnt = 0; |
1885 | 1885 | ||
1886 | for (i = 0; i < ARRAY_SIZE(tests); i++) { | 1886 | for (i = 0; i < ARRAY_SIZE(tests); i++) { |
1887 | struct sk_filter *fp; | 1887 | struct bpf_prog *fp; |
1888 | int err; | 1888 | int err; |
1889 | 1889 | ||
1890 | pr_info("#%d %s ", i, tests[i].descr); | 1890 | pr_info("#%d %s ", i, tests[i].descr); |
diff --git a/lib/test_firmware.c b/lib/test_firmware.c new file mode 100644 index 000000000000..86374c1c49a4 --- /dev/null +++ b/lib/test_firmware.c | |||
@@ -0,0 +1,117 @@ | |||
1 | /* | ||
2 | * This module provides an interface to trigger and test firmware loading. | ||
3 | * | ||
4 | * It is designed to be used for basic evaluation of the firmware loading | ||
5 | * subsystem (for example when validating firmware verification). It lacks | ||
6 | * any extra dependencies, and will not normally be loaded by the system | ||
7 | * unless explicitly requested by name. | ||
8 | */ | ||
9 | |||
10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
11 | |||
12 | #include <linux/init.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/printk.h> | ||
15 | #include <linux/firmware.h> | ||
16 | #include <linux/device.h> | ||
17 | #include <linux/fs.h> | ||
18 | #include <linux/miscdevice.h> | ||
19 | #include <linux/slab.h> | ||
20 | #include <linux/uaccess.h> | ||
21 | |||
22 | static DEFINE_MUTEX(test_fw_mutex); | ||
23 | static const struct firmware *test_firmware; | ||
24 | |||
25 | static ssize_t test_fw_misc_read(struct file *f, char __user *buf, | ||
26 | size_t size, loff_t *offset) | ||
27 | { | ||
28 | ssize_t rc = 0; | ||
29 | |||
30 | mutex_lock(&test_fw_mutex); | ||
31 | if (test_firmware) | ||
32 | rc = simple_read_from_buffer(buf, size, offset, | ||
33 | test_firmware->data, | ||
34 | test_firmware->size); | ||
35 | mutex_unlock(&test_fw_mutex); | ||
36 | return rc; | ||
37 | } | ||
38 | |||
39 | static const struct file_operations test_fw_fops = { | ||
40 | .owner = THIS_MODULE, | ||
41 | .read = test_fw_misc_read, | ||
42 | }; | ||
43 | |||
44 | static struct miscdevice test_fw_misc_device = { | ||
45 | .minor = MISC_DYNAMIC_MINOR, | ||
46 | .name = "test_firmware", | ||
47 | .fops = &test_fw_fops, | ||
48 | }; | ||
49 | |||
50 | static ssize_t trigger_request_store(struct device *dev, | ||
51 | struct device_attribute *attr, | ||
52 | const char *buf, size_t count) | ||
53 | { | ||
54 | int rc; | ||
55 | char *name; | ||
56 | |||
57 | name = kzalloc(count + 1, GFP_KERNEL); | ||
58 | if (!name) | ||
59 | return -ENOSPC; | ||
60 | memcpy(name, buf, count); | ||
61 | |||
62 | pr_info("loading '%s'\n", name); | ||
63 | |||
64 | mutex_lock(&test_fw_mutex); | ||
65 | release_firmware(test_firmware); | ||
66 | test_firmware = NULL; | ||
67 | rc = request_firmware(&test_firmware, name, dev); | ||
68 | if (rc) | ||
69 | pr_info("load of '%s' failed: %d\n", name, rc); | ||
70 | pr_info("loaded: %zu\n", test_firmware ? test_firmware->size : 0); | ||
71 | mutex_unlock(&test_fw_mutex); | ||
72 | |||
73 | kfree(name); | ||
74 | |||
75 | return count; | ||
76 | } | ||
77 | static DEVICE_ATTR_WO(trigger_request); | ||
78 | |||
79 | static int __init test_firmware_init(void) | ||
80 | { | ||
81 | int rc; | ||
82 | |||
83 | rc = misc_register(&test_fw_misc_device); | ||
84 | if (rc) { | ||
85 | pr_err("could not register misc device: %d\n", rc); | ||
86 | return rc; | ||
87 | } | ||
88 | rc = device_create_file(test_fw_misc_device.this_device, | ||
89 | &dev_attr_trigger_request); | ||
90 | if (rc) { | ||
91 | pr_err("could not create sysfs interface: %d\n", rc); | ||
92 | goto dereg; | ||
93 | } | ||
94 | |||
95 | pr_warn("interface ready\n"); | ||
96 | |||
97 | return 0; | ||
98 | dereg: | ||
99 | misc_deregister(&test_fw_misc_device); | ||
100 | return rc; | ||
101 | } | ||
102 | |||
103 | module_init(test_firmware_init); | ||
104 | |||
105 | static void __exit test_firmware_exit(void) | ||
106 | { | ||
107 | release_firmware(test_firmware); | ||
108 | device_remove_file(test_fw_misc_device.this_device, | ||
109 | &dev_attr_trigger_request); | ||
110 | misc_deregister(&test_fw_misc_device); | ||
111 | pr_warn("removed interface\n"); | ||
112 | } | ||
113 | |||
114 | module_exit(test_firmware_exit); | ||
115 | |||
116 | MODULE_AUTHOR("Kees Cook <keescook@chromium.org>"); | ||
117 | MODULE_LICENSE("GPL"); | ||
diff --git a/lib/zlib_deflate/deflate.c b/lib/zlib_deflate/deflate.c index d63381e8e333..d20ef458f137 100644 --- a/lib/zlib_deflate/deflate.c +++ b/lib/zlib_deflate/deflate.c | |||
@@ -250,52 +250,6 @@ int zlib_deflateInit2( | |||
250 | } | 250 | } |
251 | 251 | ||
252 | /* ========================================================================= */ | 252 | /* ========================================================================= */ |
253 | #if 0 | ||
254 | int zlib_deflateSetDictionary( | ||
255 | z_streamp strm, | ||
256 | const Byte *dictionary, | ||
257 | uInt dictLength | ||
258 | ) | ||
259 | { | ||
260 | deflate_state *s; | ||
261 | uInt length = dictLength; | ||
262 | uInt n; | ||
263 | IPos hash_head = 0; | ||
264 | |||
265 | if (strm == NULL || strm->state == NULL || dictionary == NULL) | ||
266 | return Z_STREAM_ERROR; | ||
267 | |||
268 | s = (deflate_state *) strm->state; | ||
269 | if (s->status != INIT_STATE) return Z_STREAM_ERROR; | ||
270 | |||
271 | strm->adler = zlib_adler32(strm->adler, dictionary, dictLength); | ||
272 | |||
273 | if (length < MIN_MATCH) return Z_OK; | ||
274 | if (length > MAX_DIST(s)) { | ||
275 | length = MAX_DIST(s); | ||
276 | #ifndef USE_DICT_HEAD | ||
277 | dictionary += dictLength - length; /* use the tail of the dictionary */ | ||
278 | #endif | ||
279 | } | ||
280 | memcpy((char *)s->window, dictionary, length); | ||
281 | s->strstart = length; | ||
282 | s->block_start = (long)length; | ||
283 | |||
284 | /* Insert all strings in the hash table (except for the last two bytes). | ||
285 | * s->lookahead stays null, so s->ins_h will be recomputed at the next | ||
286 | * call of fill_window. | ||
287 | */ | ||
288 | s->ins_h = s->window[0]; | ||
289 | UPDATE_HASH(s, s->ins_h, s->window[1]); | ||
290 | for (n = 0; n <= length - MIN_MATCH; n++) { | ||
291 | INSERT_STRING(s, n, hash_head); | ||
292 | } | ||
293 | if (hash_head) hash_head = 0; /* to make compiler happy */ | ||
294 | return Z_OK; | ||
295 | } | ||
296 | #endif /* 0 */ | ||
297 | |||
298 | /* ========================================================================= */ | ||
299 | int zlib_deflateReset( | 253 | int zlib_deflateReset( |
300 | z_streamp strm | 254 | z_streamp strm |
301 | ) | 255 | ) |
@@ -326,45 +280,6 @@ int zlib_deflateReset( | |||
326 | return Z_OK; | 280 | return Z_OK; |
327 | } | 281 | } |
328 | 282 | ||
329 | /* ========================================================================= */ | ||
330 | #if 0 | ||
331 | int zlib_deflateParams( | ||
332 | z_streamp strm, | ||
333 | int level, | ||
334 | int strategy | ||
335 | ) | ||
336 | { | ||
337 | deflate_state *s; | ||
338 | compress_func func; | ||
339 | int err = Z_OK; | ||
340 | |||
341 | if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR; | ||
342 | s = (deflate_state *) strm->state; | ||
343 | |||
344 | if (level == Z_DEFAULT_COMPRESSION) { | ||
345 | level = 6; | ||
346 | } | ||
347 | if (level < 0 || level > 9 || strategy < 0 || strategy > Z_HUFFMAN_ONLY) { | ||
348 | return Z_STREAM_ERROR; | ||
349 | } | ||
350 | func = configuration_table[s->level].func; | ||
351 | |||
352 | if (func != configuration_table[level].func && strm->total_in != 0) { | ||
353 | /* Flush the last buffer: */ | ||
354 | err = zlib_deflate(strm, Z_PARTIAL_FLUSH); | ||
355 | } | ||
356 | if (s->level != level) { | ||
357 | s->level = level; | ||
358 | s->max_lazy_match = configuration_table[level].max_lazy; | ||
359 | s->good_match = configuration_table[level].good_length; | ||
360 | s->nice_match = configuration_table[level].nice_length; | ||
361 | s->max_chain_length = configuration_table[level].max_chain; | ||
362 | } | ||
363 | s->strategy = strategy; | ||
364 | return err; | ||
365 | } | ||
366 | #endif /* 0 */ | ||
367 | |||
368 | /* ========================================================================= | 283 | /* ========================================================================= |
369 | * Put a short in the pending buffer. The 16-bit value is put in MSB order. | 284 | * Put a short in the pending buffer. The 16-bit value is put in MSB order. |
370 | * IN assertion: the stream state is correct and there is enough room in | 285 | * IN assertion: the stream state is correct and there is enough room in |
@@ -568,64 +483,6 @@ int zlib_deflateEnd( | |||
568 | return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK; | 483 | return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK; |
569 | } | 484 | } |
570 | 485 | ||
571 | /* ========================================================================= | ||
572 | * Copy the source state to the destination state. | ||
573 | */ | ||
574 | #if 0 | ||
575 | int zlib_deflateCopy ( | ||
576 | z_streamp dest, | ||
577 | z_streamp source | ||
578 | ) | ||
579 | { | ||
580 | #ifdef MAXSEG_64K | ||
581 | return Z_STREAM_ERROR; | ||
582 | #else | ||
583 | deflate_state *ds; | ||
584 | deflate_state *ss; | ||
585 | ush *overlay; | ||
586 | deflate_workspace *mem; | ||
587 | |||
588 | |||
589 | if (source == NULL || dest == NULL || source->state == NULL) { | ||
590 | return Z_STREAM_ERROR; | ||
591 | } | ||
592 | |||
593 | ss = (deflate_state *) source->state; | ||
594 | |||
595 | *dest = *source; | ||
596 | |||
597 | mem = (deflate_workspace *) dest->workspace; | ||
598 | |||
599 | ds = &(mem->deflate_memory); | ||
600 | |||
601 | dest->state = (struct internal_state *) ds; | ||
602 | *ds = *ss; | ||
603 | ds->strm = dest; | ||
604 | |||
605 | ds->window = (Byte *) mem->window_memory; | ||
606 | ds->prev = (Pos *) mem->prev_memory; | ||
607 | ds->head = (Pos *) mem->head_memory; | ||
608 | overlay = (ush *) mem->overlay_memory; | ||
609 | ds->pending_buf = (uch *) overlay; | ||
610 | |||
611 | memcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte)); | ||
612 | memcpy(ds->prev, ss->prev, ds->w_size * sizeof(Pos)); | ||
613 | memcpy(ds->head, ss->head, ds->hash_size * sizeof(Pos)); | ||
614 | memcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size); | ||
615 | |||
616 | ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf); | ||
617 | ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush); | ||
618 | ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize; | ||
619 | |||
620 | ds->l_desc.dyn_tree = ds->dyn_ltree; | ||
621 | ds->d_desc.dyn_tree = ds->dyn_dtree; | ||
622 | ds->bl_desc.dyn_tree = ds->bl_tree; | ||
623 | |||
624 | return Z_OK; | ||
625 | #endif | ||
626 | } | ||
627 | #endif /* 0 */ | ||
628 | |||
629 | /* =========================================================================== | 486 | /* =========================================================================== |
630 | * Read a new buffer from the current input stream, update the adler32 | 487 | * Read a new buffer from the current input stream, update the adler32 |
631 | * and total number of bytes read. All deflate() input goes through | 488 | * and total number of bytes read. All deflate() input goes through |
diff --git a/lib/zlib_inflate/inflate.c b/lib/zlib_inflate/inflate.c index f5ce87b0800e..58a733b10387 100644 --- a/lib/zlib_inflate/inflate.c +++ b/lib/zlib_inflate/inflate.c | |||
@@ -45,21 +45,6 @@ int zlib_inflateReset(z_streamp strm) | |||
45 | return Z_OK; | 45 | return Z_OK; |
46 | } | 46 | } |
47 | 47 | ||
48 | #if 0 | ||
49 | int zlib_inflatePrime(z_streamp strm, int bits, int value) | ||
50 | { | ||
51 | struct inflate_state *state; | ||
52 | |||
53 | if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR; | ||
54 | state = (struct inflate_state *)strm->state; | ||
55 | if (bits > 16 || state->bits + bits > 32) return Z_STREAM_ERROR; | ||
56 | value &= (1L << bits) - 1; | ||
57 | state->hold += value << state->bits; | ||
58 | state->bits += bits; | ||
59 | return Z_OK; | ||
60 | } | ||
61 | #endif | ||
62 | |||
63 | int zlib_inflateInit2(z_streamp strm, int windowBits) | 48 | int zlib_inflateInit2(z_streamp strm, int windowBits) |
64 | { | 49 | { |
65 | struct inflate_state *state; | 50 | struct inflate_state *state; |
@@ -761,123 +746,6 @@ int zlib_inflateEnd(z_streamp strm) | |||
761 | return Z_OK; | 746 | return Z_OK; |
762 | } | 747 | } |
763 | 748 | ||
764 | #if 0 | ||
765 | int zlib_inflateSetDictionary(z_streamp strm, const Byte *dictionary, | ||
766 | uInt dictLength) | ||
767 | { | ||
768 | struct inflate_state *state; | ||
769 | unsigned long id; | ||
770 | |||
771 | /* check state */ | ||
772 | if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR; | ||
773 | state = (struct inflate_state *)strm->state; | ||
774 | if (state->wrap != 0 && state->mode != DICT) | ||
775 | return Z_STREAM_ERROR; | ||
776 | |||
777 | /* check for correct dictionary id */ | ||
778 | if (state->mode == DICT) { | ||
779 | id = zlib_adler32(0L, NULL, 0); | ||
780 | id = zlib_adler32(id, dictionary, dictLength); | ||
781 | if (id != state->check) | ||
782 | return Z_DATA_ERROR; | ||
783 | } | ||
784 | |||
785 | /* copy dictionary to window */ | ||
786 | zlib_updatewindow(strm, strm->avail_out); | ||
787 | |||
788 | if (dictLength > state->wsize) { | ||
789 | memcpy(state->window, dictionary + dictLength - state->wsize, | ||
790 | state->wsize); | ||
791 | state->whave = state->wsize; | ||
792 | } | ||
793 | else { | ||
794 | memcpy(state->window + state->wsize - dictLength, dictionary, | ||
795 | dictLength); | ||
796 | state->whave = dictLength; | ||
797 | } | ||
798 | state->havedict = 1; | ||
799 | return Z_OK; | ||
800 | } | ||
801 | #endif | ||
802 | |||
803 | #if 0 | ||
804 | /* | ||
805 | Search buf[0..len-1] for the pattern: 0, 0, 0xff, 0xff. Return when found | ||
806 | or when out of input. When called, *have is the number of pattern bytes | ||
807 | found in order so far, in 0..3. On return *have is updated to the new | ||
808 | state. If on return *have equals four, then the pattern was found and the | ||
809 | return value is how many bytes were read including the last byte of the | ||
810 | pattern. If *have is less than four, then the pattern has not been found | ||
811 | yet and the return value is len. In the latter case, zlib_syncsearch() can be | ||
812 | called again with more data and the *have state. *have is initialized to | ||
813 | zero for the first call. | ||
814 | */ | ||
815 | static unsigned zlib_syncsearch(unsigned *have, unsigned char *buf, | ||
816 | unsigned len) | ||
817 | { | ||
818 | unsigned got; | ||
819 | unsigned next; | ||
820 | |||
821 | got = *have; | ||
822 | next = 0; | ||
823 | while (next < len && got < 4) { | ||
824 | if ((int)(buf[next]) == (got < 2 ? 0 : 0xff)) | ||
825 | got++; | ||
826 | else if (buf[next]) | ||
827 | got = 0; | ||
828 | else | ||
829 | got = 4 - got; | ||
830 | next++; | ||
831 | } | ||
832 | *have = got; | ||
833 | return next; | ||
834 | } | ||
835 | #endif | ||
836 | |||
837 | #if 0 | ||
838 | int zlib_inflateSync(z_streamp strm) | ||
839 | { | ||
840 | unsigned len; /* number of bytes to look at or looked at */ | ||
841 | unsigned long in, out; /* temporary to save total_in and total_out */ | ||
842 | unsigned char buf[4]; /* to restore bit buffer to byte string */ | ||
843 | struct inflate_state *state; | ||
844 | |||
845 | /* check parameters */ | ||
846 | if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR; | ||
847 | state = (struct inflate_state *)strm->state; | ||
848 | if (strm->avail_in == 0 && state->bits < 8) return Z_BUF_ERROR; | ||
849 | |||
850 | /* if first time, start search in bit buffer */ | ||
851 | if (state->mode != SYNC) { | ||
852 | state->mode = SYNC; | ||
853 | state->hold <<= state->bits & 7; | ||
854 | state->bits -= state->bits & 7; | ||
855 | len = 0; | ||
856 | while (state->bits >= 8) { | ||
857 | buf[len++] = (unsigned char)(state->hold); | ||
858 | state->hold >>= 8; | ||
859 | state->bits -= 8; | ||
860 | } | ||
861 | state->have = 0; | ||
862 | zlib_syncsearch(&(state->have), buf, len); | ||
863 | } | ||
864 | |||
865 | /* search available input */ | ||
866 | len = zlib_syncsearch(&(state->have), strm->next_in, strm->avail_in); | ||
867 | strm->avail_in -= len; | ||
868 | strm->next_in += len; | ||
869 | strm->total_in += len; | ||
870 | |||
871 | /* return no joy or set up to restart inflate() on a new block */ | ||
872 | if (state->have != 4) return Z_DATA_ERROR; | ||
873 | in = strm->total_in; out = strm->total_out; | ||
874 | zlib_inflateReset(strm); | ||
875 | strm->total_in = in; strm->total_out = out; | ||
876 | state->mode = TYPE; | ||
877 | return Z_OK; | ||
878 | } | ||
879 | #endif | ||
880 | |||
881 | /* | 749 | /* |
882 | * This subroutine adds the data at next_in/avail_in to the output history | 750 | * This subroutine adds the data at next_in/avail_in to the output history |
883 | * without performing any output. The output buffer must be "caught up"; | 751 | * without performing any output. The output buffer must be "caught up"; |