diff options
Diffstat (limited to 'lib')
50 files changed, 1470 insertions, 558 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index b1445b22a6de..c5e84fbcb30b 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
| @@ -46,10 +46,6 @@ config GENERIC_IOMAP | |||
| 46 | bool | 46 | bool |
| 47 | select GENERIC_PCI_IOMAP | 47 | select GENERIC_PCI_IOMAP |
| 48 | 48 | ||
| 49 | config GENERIC_IO | ||
| 50 | bool | ||
| 51 | default n | ||
| 52 | |||
| 53 | config STMP_DEVICE | 49 | config STMP_DEVICE |
| 54 | bool | 50 | bool |
| 55 | 51 | ||
| @@ -584,6 +580,24 @@ config PRIME_NUMBERS | |||
| 584 | tristate | 580 | tristate |
| 585 | 581 | ||
| 586 | config STRING_SELFTEST | 582 | config STRING_SELFTEST |
| 587 | bool "Test string functions" | 583 | tristate "Test string functions" |
| 588 | 584 | ||
| 589 | endmenu | 585 | endmenu |
| 586 | |||
| 587 | config GENERIC_ASHLDI3 | ||
| 588 | bool | ||
| 589 | |||
| 590 | config GENERIC_ASHRDI3 | ||
| 591 | bool | ||
| 592 | |||
| 593 | config GENERIC_LSHRDI3 | ||
| 594 | bool | ||
| 595 | |||
| 596 | config GENERIC_MULDI3 | ||
| 597 | bool | ||
| 598 | |||
| 599 | config GENERIC_CMPDI2 | ||
| 600 | bool | ||
| 601 | |||
| 602 | config GENERIC_UCMPDI2 | ||
| 603 | bool | ||
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index dfdad67d8f6c..9d5b78aad4c5 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -280,7 +280,6 @@ config PAGE_OWNER | |||
| 280 | 280 | ||
| 281 | config DEBUG_FS | 281 | config DEBUG_FS |
| 282 | bool "Debug Filesystem" | 282 | bool "Debug Filesystem" |
| 283 | select SRCU | ||
| 284 | help | 283 | help |
| 285 | debugfs is a virtual file system that kernel developers use to put | 284 | debugfs is a virtual file system that kernel developers use to put |
| 286 | debugging files into. Enable this option to be able to read and | 285 | debugging files into. Enable this option to be able to read and |
| @@ -376,7 +375,7 @@ config STACK_VALIDATION | |||
| 376 | that runtime stack traces are more reliable. | 375 | that runtime stack traces are more reliable. |
| 377 | 376 | ||
| 378 | This is also a prerequisite for generation of ORC unwind data, which | 377 | This is also a prerequisite for generation of ORC unwind data, which |
| 379 | is needed for CONFIG_ORC_UNWINDER. | 378 | is needed for CONFIG_UNWINDER_ORC. |
| 380 | 379 | ||
| 381 | For more information, see | 380 | For more information, see |
| 382 | tools/objtool/Documentation/stack-validation.txt. | 381 | tools/objtool/Documentation/stack-validation.txt. |
| @@ -504,7 +503,7 @@ config DEBUG_OBJECTS_ENABLE_DEFAULT | |||
| 504 | 503 | ||
| 505 | config DEBUG_SLAB | 504 | config DEBUG_SLAB |
| 506 | bool "Debug slab memory allocations" | 505 | bool "Debug slab memory allocations" |
| 507 | depends on DEBUG_KERNEL && SLAB && !KMEMCHECK | 506 | depends on DEBUG_KERNEL && SLAB |
| 508 | help | 507 | help |
| 509 | Say Y here to have the kernel do limited verification on memory | 508 | Say Y here to have the kernel do limited verification on memory |
| 510 | allocation as well as poisoning memory on free to catch use of freed | 509 | allocation as well as poisoning memory on free to catch use of freed |
| @@ -516,7 +515,7 @@ config DEBUG_SLAB_LEAK | |||
| 516 | 515 | ||
| 517 | config SLUB_DEBUG_ON | 516 | config SLUB_DEBUG_ON |
| 518 | bool "SLUB debugging on by default" | 517 | bool "SLUB debugging on by default" |
| 519 | depends on SLUB && SLUB_DEBUG && !KMEMCHECK | 518 | depends on SLUB && SLUB_DEBUG |
| 520 | default n | 519 | default n |
| 521 | help | 520 | help |
| 522 | Boot with debugging on by default. SLUB boots by default with | 521 | Boot with debugging on by default. SLUB boots by default with |
| @@ -730,8 +729,6 @@ config DEBUG_STACKOVERFLOW | |||
| 730 | 729 | ||
| 731 | If in doubt, say "N". | 730 | If in doubt, say "N". |
| 732 | 731 | ||
| 733 | source "lib/Kconfig.kmemcheck" | ||
| 734 | |||
| 735 | source "lib/Kconfig.kasan" | 732 | source "lib/Kconfig.kasan" |
| 736 | 733 | ||
| 737 | endmenu # "Memory Debugging" | 734 | endmenu # "Memory Debugging" |
| @@ -759,6 +756,16 @@ config KCOV | |||
| 759 | 756 | ||
| 760 | For more details, see Documentation/dev-tools/kcov.rst. | 757 | For more details, see Documentation/dev-tools/kcov.rst. |
| 761 | 758 | ||
| 759 | config KCOV_ENABLE_COMPARISONS | ||
| 760 | bool "Enable comparison operands collection by KCOV" | ||
| 761 | depends on KCOV | ||
| 762 | default n | ||
| 763 | help | ||
| 764 | KCOV also exposes operands of every comparison in the instrumented | ||
| 765 | code along with operand sizes and PCs of the comparison instructions. | ||
| 766 | These operands can be used by fuzzing engines to improve the quality | ||
| 767 | of fuzzing coverage. | ||
| 768 | |||
| 762 | config KCOV_INSTRUMENT_ALL | 769 | config KCOV_INSTRUMENT_ALL |
| 763 | bool "Instrument all code by default" | 770 | bool "Instrument all code by default" |
| 764 | depends on KCOV | 771 | depends on KCOV |
| @@ -1092,8 +1099,6 @@ config PROVE_LOCKING | |||
| 1092 | select DEBUG_MUTEXES | 1099 | select DEBUG_MUTEXES |
| 1093 | select DEBUG_RT_MUTEXES if RT_MUTEXES | 1100 | select DEBUG_RT_MUTEXES if RT_MUTEXES |
| 1094 | select DEBUG_LOCK_ALLOC | 1101 | select DEBUG_LOCK_ALLOC |
| 1095 | select LOCKDEP_CROSSRELEASE if BROKEN | ||
| 1096 | select LOCKDEP_COMPLETIONS if BROKEN | ||
| 1097 | select TRACE_IRQFLAGS | 1102 | select TRACE_IRQFLAGS |
| 1098 | default n | 1103 | default n |
| 1099 | help | 1104 | help |
| @@ -1163,22 +1168,6 @@ config LOCK_STAT | |||
| 1163 | CONFIG_LOCK_STAT defines "contended" and "acquired" lock events. | 1168 | CONFIG_LOCK_STAT defines "contended" and "acquired" lock events. |
| 1164 | (CONFIG_LOCKDEP defines "acquire" and "release" events.) | 1169 | (CONFIG_LOCKDEP defines "acquire" and "release" events.) |
| 1165 | 1170 | ||
| 1166 | config LOCKDEP_CROSSRELEASE | ||
| 1167 | bool | ||
| 1168 | help | ||
| 1169 | This makes lockdep work for crosslock which is a lock allowed to | ||
| 1170 | be released in a different context from the acquisition context. | ||
| 1171 | Normally a lock must be released in the context acquiring the lock. | ||
| 1172 | However, relexing this constraint helps synchronization primitives | ||
| 1173 | such as page locks or completions can use the lock correctness | ||
| 1174 | detector, lockdep. | ||
| 1175 | |||
| 1176 | config LOCKDEP_COMPLETIONS | ||
| 1177 | bool | ||
| 1178 | help | ||
| 1179 | A deadlock caused by wait_for_completion() and complete() can be | ||
| 1180 | detected by lockdep using crossrelease feature. | ||
| 1181 | |||
| 1182 | config DEBUG_LOCKDEP | 1171 | config DEBUG_LOCKDEP |
| 1183 | bool "Lock dependency engine debugging" | 1172 | bool "Lock dependency engine debugging" |
| 1184 | depends on DEBUG_KERNEL && LOCKDEP | 1173 | depends on DEBUG_KERNEL && LOCKDEP |
| @@ -1838,6 +1827,15 @@ config TEST_BPF | |||
| 1838 | 1827 | ||
| 1839 | If unsure, say N. | 1828 | If unsure, say N. |
| 1840 | 1829 | ||
| 1830 | config TEST_FIND_BIT | ||
| 1831 | tristate "Test find_bit functions" | ||
| 1832 | default n | ||
| 1833 | help | ||
| 1834 | This builds the "test_find_bit" module that measure find_*_bit() | ||
| 1835 | functions performance. | ||
| 1836 | |||
| 1837 | If unsure, say N. | ||
| 1838 | |||
| 1841 | config TEST_FIRMWARE | 1839 | config TEST_FIRMWARE |
| 1842 | tristate "Test firmware loading via userspace interface" | 1840 | tristate "Test firmware loading via userspace interface" |
| 1843 | default n | 1841 | default n |
diff --git a/lib/Kconfig.kmemcheck b/lib/Kconfig.kmemcheck deleted file mode 100644 index 846e039a86b4..000000000000 --- a/lib/Kconfig.kmemcheck +++ /dev/null | |||
| @@ -1,94 +0,0 @@ | |||
| 1 | config HAVE_ARCH_KMEMCHECK | ||
| 2 | bool | ||
| 3 | |||
| 4 | if HAVE_ARCH_KMEMCHECK | ||
| 5 | |||
| 6 | menuconfig KMEMCHECK | ||
| 7 | bool "kmemcheck: trap use of uninitialized memory" | ||
| 8 | depends on DEBUG_KERNEL | ||
| 9 | depends on !X86_USE_3DNOW | ||
| 10 | depends on SLUB || SLAB | ||
| 11 | depends on !CC_OPTIMIZE_FOR_SIZE | ||
| 12 | depends on !FUNCTION_TRACER | ||
| 13 | select FRAME_POINTER | ||
| 14 | select STACKTRACE | ||
| 15 | default n | ||
| 16 | help | ||
| 17 | This option enables tracing of dynamically allocated kernel memory | ||
| 18 | to see if memory is used before it has been given an initial value. | ||
| 19 | Be aware that this requires half of your memory for bookkeeping and | ||
| 20 | will insert extra code at *every* read and write to tracked memory | ||
| 21 | thus slow down the kernel code (but user code is unaffected). | ||
| 22 | |||
| 23 | The kernel may be started with kmemcheck=0 or kmemcheck=1 to disable | ||
| 24 | or enable kmemcheck at boot-time. If the kernel is started with | ||
| 25 | kmemcheck=0, the large memory and CPU overhead is not incurred. | ||
| 26 | |||
| 27 | choice | ||
| 28 | prompt "kmemcheck: default mode at boot" | ||
| 29 | depends on KMEMCHECK | ||
| 30 | default KMEMCHECK_ONESHOT_BY_DEFAULT | ||
| 31 | help | ||
| 32 | This option controls the default behaviour of kmemcheck when the | ||
| 33 | kernel boots and no kmemcheck= parameter is given. | ||
| 34 | |||
| 35 | config KMEMCHECK_DISABLED_BY_DEFAULT | ||
| 36 | bool "disabled" | ||
| 37 | depends on KMEMCHECK | ||
| 38 | |||
| 39 | config KMEMCHECK_ENABLED_BY_DEFAULT | ||
| 40 | bool "enabled" | ||
| 41 | depends on KMEMCHECK | ||
| 42 | |||
| 43 | config KMEMCHECK_ONESHOT_BY_DEFAULT | ||
| 44 | bool "one-shot" | ||
| 45 | depends on KMEMCHECK | ||
| 46 | help | ||
| 47 | In one-shot mode, only the first error detected is reported before | ||
| 48 | kmemcheck is disabled. | ||
| 49 | |||
| 50 | endchoice | ||
| 51 | |||
| 52 | config KMEMCHECK_QUEUE_SIZE | ||
| 53 | int "kmemcheck: error queue size" | ||
| 54 | depends on KMEMCHECK | ||
| 55 | default 64 | ||
| 56 | help | ||
| 57 | Select the maximum number of errors to store in the queue. Since | ||
| 58 | errors can occur virtually anywhere and in any context, we need a | ||
| 59 | temporary storage area which is guarantueed not to generate any | ||
| 60 | other faults. The queue will be emptied as soon as a tasklet may | ||
| 61 | be scheduled. If the queue is full, new error reports will be | ||
| 62 | lost. | ||
| 63 | |||
| 64 | config KMEMCHECK_SHADOW_COPY_SHIFT | ||
| 65 | int "kmemcheck: shadow copy size (5 => 32 bytes, 6 => 64 bytes)" | ||
| 66 | depends on KMEMCHECK | ||
| 67 | range 2 8 | ||
| 68 | default 5 | ||
| 69 | help | ||
| 70 | Select the number of shadow bytes to save along with each entry of | ||
| 71 | the queue. These bytes indicate what parts of an allocation are | ||
| 72 | initialized, uninitialized, etc. and will be displayed when an | ||
| 73 | error is detected to help the debugging of a particular problem. | ||
| 74 | |||
| 75 | config KMEMCHECK_PARTIAL_OK | ||
| 76 | bool "kmemcheck: allow partially uninitialized memory" | ||
| 77 | depends on KMEMCHECK | ||
| 78 | default y | ||
| 79 | help | ||
| 80 | This option works around certain GCC optimizations that produce | ||
| 81 | 32-bit reads from 16-bit variables where the upper 16 bits are | ||
| 82 | thrown away afterwards. This may of course also hide some real | ||
| 83 | bugs. | ||
| 84 | |||
| 85 | config KMEMCHECK_BITOPS_OK | ||
| 86 | bool "kmemcheck: allow bit-field manipulation" | ||
| 87 | depends on KMEMCHECK | ||
| 88 | default n | ||
| 89 | help | ||
| 90 | This option silences warnings that would be generated for bit-field | ||
| 91 | accesses where not all the bits are initialized at the same time. | ||
| 92 | This may also hide some real bugs. | ||
| 93 | |||
| 94 | endif | ||
diff --git a/lib/Makefile b/lib/Makefile index b8f2c16fccaa..d11c48ec8ffd 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
| @@ -40,12 +40,14 @@ obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \ | |||
| 40 | bsearch.o find_bit.o llist.o memweight.o kfifo.o \ | 40 | bsearch.o find_bit.o llist.o memweight.o kfifo.o \ |
| 41 | percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \ | 41 | percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \ |
| 42 | once.o refcount.o usercopy.o errseq.o | 42 | once.o refcount.o usercopy.o errseq.o |
| 43 | obj-$(CONFIG_STRING_SELFTEST) += test_string.o | ||
| 43 | obj-y += string_helpers.o | 44 | obj-y += string_helpers.o |
| 44 | obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o | 45 | obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o |
| 45 | obj-y += hexdump.o | 46 | obj-y += hexdump.o |
| 46 | obj-$(CONFIG_TEST_HEXDUMP) += test_hexdump.o | 47 | obj-$(CONFIG_TEST_HEXDUMP) += test_hexdump.o |
| 47 | obj-y += kstrtox.o | 48 | obj-y += kstrtox.o |
| 48 | obj-$(CONFIG_TEST_BPF) += test_bpf.o | 49 | obj-$(CONFIG_TEST_BPF) += test_bpf.o |
| 50 | obj-$(CONFIG_TEST_FIND_BIT) += test_find_bit.o | ||
| 49 | obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o | 51 | obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o |
| 50 | obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o | 52 | obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o |
| 51 | obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o | 53 | obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o |
| @@ -248,3 +250,11 @@ UBSAN_SANITIZE_ubsan.o := n | |||
| 248 | obj-$(CONFIG_SBITMAP) += sbitmap.o | 250 | obj-$(CONFIG_SBITMAP) += sbitmap.o |
| 249 | 251 | ||
| 250 | obj-$(CONFIG_PARMAN) += parman.o | 252 | obj-$(CONFIG_PARMAN) += parman.o |
| 253 | |||
| 254 | # GCC library routines | ||
| 255 | obj-$(CONFIG_GENERIC_ASHLDI3) += ashldi3.o | ||
| 256 | obj-$(CONFIG_GENERIC_ASHRDI3) += ashrdi3.o | ||
| 257 | obj-$(CONFIG_GENERIC_LSHRDI3) += lshrdi3.o | ||
| 258 | obj-$(CONFIG_GENERIC_MULDI3) += muldi3.o | ||
| 259 | obj-$(CONFIG_GENERIC_CMPDI2) += cmpdi2.o | ||
| 260 | obj-$(CONFIG_GENERIC_UCMPDI2) += ucmpdi2.o | ||
diff --git a/lib/ashldi3.c b/lib/ashldi3.c new file mode 100644 index 000000000000..3ffc46e3bb6c --- /dev/null +++ b/lib/ashldi3.c | |||
| @@ -0,0 +1,44 @@ | |||
| 1 | /* | ||
| 2 | * This program is free software; you can redistribute it and/or modify | ||
| 3 | * it under the terms of the GNU General Public License as published by | ||
| 4 | * the Free Software Foundation; either version 2 of the License, or | ||
| 5 | * (at your option) any later version. | ||
| 6 | * | ||
| 7 | * This program is distributed in the hope that it will be useful, | ||
| 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 10 | * GNU General Public License for more details. | ||
| 11 | * | ||
| 12 | * You should have received a copy of the GNU General Public License | ||
| 13 | * along with this program; if not, see the file COPYING, or write | ||
| 14 | * to the Free Software Foundation, Inc. | ||
| 15 | */ | ||
| 16 | |||
| 17 | #include <linux/export.h> | ||
| 18 | |||
| 19 | #include <linux/libgcc.h> | ||
| 20 | |||
| 21 | long long notrace __ashldi3(long long u, word_type b) | ||
| 22 | { | ||
| 23 | DWunion uu, w; | ||
| 24 | word_type bm; | ||
| 25 | |||
| 26 | if (b == 0) | ||
| 27 | return u; | ||
| 28 | |||
| 29 | uu.ll = u; | ||
| 30 | bm = 32 - b; | ||
| 31 | |||
| 32 | if (bm <= 0) { | ||
| 33 | w.s.low = 0; | ||
| 34 | w.s.high = (unsigned int) uu.s.low << -bm; | ||
| 35 | } else { | ||
| 36 | const unsigned int carries = (unsigned int) uu.s.low >> bm; | ||
| 37 | |||
| 38 | w.s.low = (unsigned int) uu.s.low << b; | ||
| 39 | w.s.high = ((unsigned int) uu.s.high << b) | carries; | ||
| 40 | } | ||
| 41 | |||
| 42 | return w.ll; | ||
| 43 | } | ||
| 44 | EXPORT_SYMBOL(__ashldi3); | ||
diff --git a/lib/ashrdi3.c b/lib/ashrdi3.c new file mode 100644 index 000000000000..ea054550f0e8 --- /dev/null +++ b/lib/ashrdi3.c | |||
| @@ -0,0 +1,46 @@ | |||
| 1 | /* | ||
| 2 | * This program is free software; you can redistribute it and/or modify | ||
| 3 | * it under the terms of the GNU General Public License as published by | ||
| 4 | * the Free Software Foundation; either version 2 of the License, or | ||
| 5 | * (at your option) any later version. | ||
| 6 | * | ||
| 7 | * This program is distributed in the hope that it will be useful, | ||
| 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 10 | * GNU General Public License for more details. | ||
| 11 | * | ||
| 12 | * You should have received a copy of the GNU General Public License | ||
| 13 | * along with this program; if not, see the file COPYING, or write | ||
| 14 | * to the Free Software Foundation, Inc. | ||
| 15 | */ | ||
| 16 | |||
| 17 | #include <linux/export.h> | ||
| 18 | |||
| 19 | #include <linux/libgcc.h> | ||
| 20 | |||
| 21 | long long notrace __ashrdi3(long long u, word_type b) | ||
| 22 | { | ||
| 23 | DWunion uu, w; | ||
| 24 | word_type bm; | ||
| 25 | |||
| 26 | if (b == 0) | ||
| 27 | return u; | ||
| 28 | |||
| 29 | uu.ll = u; | ||
| 30 | bm = 32 - b; | ||
| 31 | |||
| 32 | if (bm <= 0) { | ||
| 33 | /* w.s.high = 1..1 or 0..0 */ | ||
| 34 | w.s.high = | ||
| 35 | uu.s.high >> 31; | ||
| 36 | w.s.low = uu.s.high >> -bm; | ||
| 37 | } else { | ||
| 38 | const unsigned int carries = (unsigned int) uu.s.high << bm; | ||
| 39 | |||
| 40 | w.s.high = uu.s.high >> b; | ||
| 41 | w.s.low = ((unsigned int) uu.s.low >> b) | carries; | ||
| 42 | } | ||
| 43 | |||
| 44 | return w.ll; | ||
| 45 | } | ||
| 46 | EXPORT_SYMBOL(__ashrdi3); | ||
diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c index 1ef0cec38d78..dc14beae2c9a 100644 --- a/lib/asn1_decoder.c +++ b/lib/asn1_decoder.c | |||
| @@ -313,42 +313,47 @@ next_op: | |||
| 313 | 313 | ||
| 314 | /* Decide how to handle the operation */ | 314 | /* Decide how to handle the operation */ |
| 315 | switch (op) { | 315 | switch (op) { |
| 316 | case ASN1_OP_MATCH_ANY_ACT: | ||
| 317 | case ASN1_OP_MATCH_ANY_ACT_OR_SKIP: | ||
| 318 | case ASN1_OP_COND_MATCH_ANY_ACT: | ||
| 319 | case ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP: | ||
| 320 | ret = actions[machine[pc + 1]](context, hdr, tag, data + dp, len); | ||
| 321 | if (ret < 0) | ||
| 322 | return ret; | ||
| 323 | goto skip_data; | ||
| 324 | |||
| 325 | case ASN1_OP_MATCH_ACT: | ||
| 326 | case ASN1_OP_MATCH_ACT_OR_SKIP: | ||
| 327 | case ASN1_OP_COND_MATCH_ACT_OR_SKIP: | ||
| 328 | ret = actions[machine[pc + 2]](context, hdr, tag, data + dp, len); | ||
| 329 | if (ret < 0) | ||
| 330 | return ret; | ||
| 331 | goto skip_data; | ||
| 332 | |||
| 333 | case ASN1_OP_MATCH: | 316 | case ASN1_OP_MATCH: |
| 334 | case ASN1_OP_MATCH_OR_SKIP: | 317 | case ASN1_OP_MATCH_OR_SKIP: |
| 318 | case ASN1_OP_MATCH_ACT: | ||
| 319 | case ASN1_OP_MATCH_ACT_OR_SKIP: | ||
| 335 | case ASN1_OP_MATCH_ANY: | 320 | case ASN1_OP_MATCH_ANY: |
| 336 | case ASN1_OP_MATCH_ANY_OR_SKIP: | 321 | case ASN1_OP_MATCH_ANY_OR_SKIP: |
| 322 | case ASN1_OP_MATCH_ANY_ACT: | ||
| 323 | case ASN1_OP_MATCH_ANY_ACT_OR_SKIP: | ||
| 337 | case ASN1_OP_COND_MATCH_OR_SKIP: | 324 | case ASN1_OP_COND_MATCH_OR_SKIP: |
| 325 | case ASN1_OP_COND_MATCH_ACT_OR_SKIP: | ||
| 338 | case ASN1_OP_COND_MATCH_ANY: | 326 | case ASN1_OP_COND_MATCH_ANY: |
| 339 | case ASN1_OP_COND_MATCH_ANY_OR_SKIP: | 327 | case ASN1_OP_COND_MATCH_ANY_OR_SKIP: |
| 340 | skip_data: | 328 | case ASN1_OP_COND_MATCH_ANY_ACT: |
| 329 | case ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP: | ||
| 330 | |||
| 341 | if (!(flags & FLAG_CONS)) { | 331 | if (!(flags & FLAG_CONS)) { |
| 342 | if (flags & FLAG_INDEFINITE_LENGTH) { | 332 | if (flags & FLAG_INDEFINITE_LENGTH) { |
| 333 | size_t tmp = dp; | ||
| 334 | |||
| 343 | ret = asn1_find_indefinite_length( | 335 | ret = asn1_find_indefinite_length( |
| 344 | data, datalen, &dp, &len, &errmsg); | 336 | data, datalen, &tmp, &len, &errmsg); |
| 345 | if (ret < 0) | 337 | if (ret < 0) |
| 346 | goto error; | 338 | goto error; |
| 347 | } else { | ||
| 348 | dp += len; | ||
| 349 | } | 339 | } |
| 350 | pr_debug("- LEAF: %zu\n", len); | 340 | pr_debug("- LEAF: %zu\n", len); |
| 351 | } | 341 | } |
| 342 | |||
| 343 | if (op & ASN1_OP_MATCH__ACT) { | ||
| 344 | unsigned char act; | ||
| 345 | |||
| 346 | if (op & ASN1_OP_MATCH__ANY) | ||
| 347 | act = machine[pc + 1]; | ||
| 348 | else | ||
| 349 | act = machine[pc + 2]; | ||
| 350 | ret = actions[act](context, hdr, tag, data + dp, len); | ||
| 351 | if (ret < 0) | ||
| 352 | return ret; | ||
| 353 | } | ||
| 354 | |||
| 355 | if (!(flags & FLAG_CONS)) | ||
| 356 | dp += len; | ||
| 352 | pc += asn1_op_lengths[op]; | 357 | pc += asn1_op_lengths[op]; |
| 353 | goto next_op; | 358 | goto next_op; |
| 354 | 359 | ||
| @@ -434,6 +439,8 @@ next_op: | |||
| 434 | else | 439 | else |
| 435 | act = machine[pc + 1]; | 440 | act = machine[pc + 1]; |
| 436 | ret = actions[act](context, hdr, 0, data + tdp, len); | 441 | ret = actions[act](context, hdr, 0, data + tdp, len); |
| 442 | if (ret < 0) | ||
| 443 | return ret; | ||
| 437 | } | 444 | } |
| 438 | pc += asn1_op_lengths[op]; | 445 | pc += asn1_op_lengths[op]; |
| 439 | goto next_op; | 446 | goto next_op; |
diff --git a/lib/assoc_array.c b/lib/assoc_array.c index 4e53be8bc590..b77d51da8c73 100644 --- a/lib/assoc_array.c +++ b/lib/assoc_array.c | |||
| @@ -39,7 +39,7 @@ begin_node: | |||
| 39 | /* Descend through a shortcut */ | 39 | /* Descend through a shortcut */ |
| 40 | shortcut = assoc_array_ptr_to_shortcut(cursor); | 40 | shortcut = assoc_array_ptr_to_shortcut(cursor); |
| 41 | smp_read_barrier_depends(); | 41 | smp_read_barrier_depends(); |
| 42 | cursor = ACCESS_ONCE(shortcut->next_node); | 42 | cursor = READ_ONCE(shortcut->next_node); |
| 43 | } | 43 | } |
| 44 | 44 | ||
| 45 | node = assoc_array_ptr_to_node(cursor); | 45 | node = assoc_array_ptr_to_node(cursor); |
| @@ -55,7 +55,7 @@ begin_node: | |||
| 55 | */ | 55 | */ |
| 56 | has_meta = 0; | 56 | has_meta = 0; |
| 57 | for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) { | 57 | for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) { |
| 58 | ptr = ACCESS_ONCE(node->slots[slot]); | 58 | ptr = READ_ONCE(node->slots[slot]); |
| 59 | has_meta |= (unsigned long)ptr; | 59 | has_meta |= (unsigned long)ptr; |
| 60 | if (ptr && assoc_array_ptr_is_leaf(ptr)) { | 60 | if (ptr && assoc_array_ptr_is_leaf(ptr)) { |
| 61 | /* We need a barrier between the read of the pointer | 61 | /* We need a barrier between the read of the pointer |
| @@ -89,7 +89,7 @@ continue_node: | |||
| 89 | smp_read_barrier_depends(); | 89 | smp_read_barrier_depends(); |
| 90 | 90 | ||
| 91 | for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) { | 91 | for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) { |
| 92 | ptr = ACCESS_ONCE(node->slots[slot]); | 92 | ptr = READ_ONCE(node->slots[slot]); |
| 93 | if (assoc_array_ptr_is_meta(ptr)) { | 93 | if (assoc_array_ptr_is_meta(ptr)) { |
| 94 | cursor = ptr; | 94 | cursor = ptr; |
| 95 | goto begin_node; | 95 | goto begin_node; |
| @@ -98,7 +98,7 @@ continue_node: | |||
| 98 | 98 | ||
| 99 | finished_node: | 99 | finished_node: |
| 100 | /* Move up to the parent (may need to skip back over a shortcut) */ | 100 | /* Move up to the parent (may need to skip back over a shortcut) */ |
| 101 | parent = ACCESS_ONCE(node->back_pointer); | 101 | parent = READ_ONCE(node->back_pointer); |
| 102 | slot = node->parent_slot; | 102 | slot = node->parent_slot; |
| 103 | if (parent == stop) | 103 | if (parent == stop) |
| 104 | return 0; | 104 | return 0; |
| @@ -107,7 +107,7 @@ finished_node: | |||
| 107 | shortcut = assoc_array_ptr_to_shortcut(parent); | 107 | shortcut = assoc_array_ptr_to_shortcut(parent); |
| 108 | smp_read_barrier_depends(); | 108 | smp_read_barrier_depends(); |
| 109 | cursor = parent; | 109 | cursor = parent; |
| 110 | parent = ACCESS_ONCE(shortcut->back_pointer); | 110 | parent = READ_ONCE(shortcut->back_pointer); |
| 111 | slot = shortcut->parent_slot; | 111 | slot = shortcut->parent_slot; |
| 112 | if (parent == stop) | 112 | if (parent == stop) |
| 113 | return 0; | 113 | return 0; |
| @@ -147,7 +147,7 @@ int assoc_array_iterate(const struct assoc_array *array, | |||
| 147 | void *iterator_data), | 147 | void *iterator_data), |
| 148 | void *iterator_data) | 148 | void *iterator_data) |
| 149 | { | 149 | { |
| 150 | struct assoc_array_ptr *root = ACCESS_ONCE(array->root); | 150 | struct assoc_array_ptr *root = READ_ONCE(array->root); |
| 151 | 151 | ||
| 152 | if (!root) | 152 | if (!root) |
| 153 | return 0; | 153 | return 0; |
| @@ -194,7 +194,7 @@ assoc_array_walk(const struct assoc_array *array, | |||
| 194 | 194 | ||
| 195 | pr_devel("-->%s()\n", __func__); | 195 | pr_devel("-->%s()\n", __func__); |
| 196 | 196 | ||
| 197 | cursor = ACCESS_ONCE(array->root); | 197 | cursor = READ_ONCE(array->root); |
| 198 | if (!cursor) | 198 | if (!cursor) |
| 199 | return assoc_array_walk_tree_empty; | 199 | return assoc_array_walk_tree_empty; |
| 200 | 200 | ||
| @@ -220,7 +220,7 @@ consider_node: | |||
| 220 | 220 | ||
| 221 | slot = segments >> (level & ASSOC_ARRAY_KEY_CHUNK_MASK); | 221 | slot = segments >> (level & ASSOC_ARRAY_KEY_CHUNK_MASK); |
| 222 | slot &= ASSOC_ARRAY_FAN_MASK; | 222 | slot &= ASSOC_ARRAY_FAN_MASK; |
| 223 | ptr = ACCESS_ONCE(node->slots[slot]); | 223 | ptr = READ_ONCE(node->slots[slot]); |
| 224 | 224 | ||
| 225 | pr_devel("consider slot %x [ix=%d type=%lu]\n", | 225 | pr_devel("consider slot %x [ix=%d type=%lu]\n", |
| 226 | slot, level, (unsigned long)ptr & 3); | 226 | slot, level, (unsigned long)ptr & 3); |
| @@ -294,7 +294,7 @@ follow_shortcut: | |||
| 294 | } while (sc_level < shortcut->skip_to_level); | 294 | } while (sc_level < shortcut->skip_to_level); |
| 295 | 295 | ||
| 296 | /* The shortcut matches the leaf's index to this point. */ | 296 | /* The shortcut matches the leaf's index to this point. */ |
| 297 | cursor = ACCESS_ONCE(shortcut->next_node); | 297 | cursor = READ_ONCE(shortcut->next_node); |
| 298 | if (((level ^ sc_level) & ~ASSOC_ARRAY_KEY_CHUNK_MASK) != 0) { | 298 | if (((level ^ sc_level) & ~ASSOC_ARRAY_KEY_CHUNK_MASK) != 0) { |
| 299 | level = sc_level; | 299 | level = sc_level; |
| 300 | goto jumped; | 300 | goto jumped; |
| @@ -337,7 +337,7 @@ void *assoc_array_find(const struct assoc_array *array, | |||
| 337 | * the terminal node. | 337 | * the terminal node. |
| 338 | */ | 338 | */ |
| 339 | for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) { | 339 | for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) { |
| 340 | ptr = ACCESS_ONCE(node->slots[slot]); | 340 | ptr = READ_ONCE(node->slots[slot]); |
| 341 | if (ptr && assoc_array_ptr_is_leaf(ptr)) { | 341 | if (ptr && assoc_array_ptr_is_leaf(ptr)) { |
| 342 | /* We need a barrier between the read of the pointer | 342 | /* We need a barrier between the read of the pointer |
| 343 | * and dereferencing the pointer - but only if we are | 343 | * and dereferencing the pointer - but only if we are |
diff --git a/lib/bitmap.c b/lib/bitmap.c index c82c61b66e16..d8f0c094b18e 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c | |||
| @@ -18,7 +18,9 @@ | |||
| 18 | 18 | ||
| 19 | #include <asm/page.h> | 19 | #include <asm/page.h> |
| 20 | 20 | ||
| 21 | /* | 21 | /** |
| 22 | * DOC: bitmap introduction | ||
| 23 | * | ||
| 22 | * bitmaps provide an array of bits, implemented using an an | 24 | * bitmaps provide an array of bits, implemented using an an |
| 23 | * array of unsigned longs. The number of valid bits in a | 25 | * array of unsigned longs. The number of valid bits in a |
| 24 | * given bitmap does _not_ need to be an exact multiple of | 26 | * given bitmap does _not_ need to be an exact multiple of |
| @@ -186,7 +186,7 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) | |||
| 186 | return BUG_TRAP_TYPE_WARN; | 186 | return BUG_TRAP_TYPE_WARN; |
| 187 | } | 187 | } |
| 188 | 188 | ||
| 189 | printk(KERN_DEFAULT "------------[ cut here ]------------\n"); | 189 | printk(KERN_DEFAULT CUT_HERE); |
| 190 | 190 | ||
| 191 | if (file) | 191 | if (file) |
| 192 | pr_crit("kernel BUG at %s:%u!\n", file, line); | 192 | pr_crit("kernel BUG at %s:%u!\n", file, line); |
| @@ -196,3 +196,26 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) | |||
| 196 | 196 | ||
| 197 | return BUG_TRAP_TYPE_BUG; | 197 | return BUG_TRAP_TYPE_BUG; |
| 198 | } | 198 | } |
| 199 | |||
| 200 | static void clear_once_table(struct bug_entry *start, struct bug_entry *end) | ||
| 201 | { | ||
| 202 | struct bug_entry *bug; | ||
| 203 | |||
| 204 | for (bug = start; bug < end; bug++) | ||
| 205 | bug->flags &= ~BUGFLAG_DONE; | ||
| 206 | } | ||
| 207 | |||
| 208 | void generic_bug_clear_once(void) | ||
| 209 | { | ||
| 210 | #ifdef CONFIG_MODULES | ||
| 211 | struct module *mod; | ||
| 212 | |||
| 213 | rcu_read_lock_sched(); | ||
| 214 | list_for_each_entry_rcu(mod, &module_bug_list, bug_list) | ||
| 215 | clear_once_table(mod->bug_table, | ||
| 216 | mod->bug_table + mod->num_bugs); | ||
| 217 | rcu_read_unlock_sched(); | ||
| 218 | #endif | ||
| 219 | |||
| 220 | clear_once_table(__start___bug_table, __stop___bug_table); | ||
| 221 | } | ||
diff --git a/lib/cmpdi2.c b/lib/cmpdi2.c new file mode 100644 index 000000000000..2250da7e503e --- /dev/null +++ b/lib/cmpdi2.c | |||
| @@ -0,0 +1,42 @@ | |||
| 1 | /* | ||
| 2 | * This program is free software; you can redistribute it and/or modify | ||
| 3 | * it under the terms of the GNU General Public License as published by | ||
| 4 | * the Free Software Foundation; either version 2 of the License, or | ||
| 5 | * (at your option) any later version. | ||
| 6 | * | ||
| 7 | * This program is distributed in the hope that it will be useful, | ||
| 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 10 | * GNU General Public License for more details. | ||
| 11 | * | ||
| 12 | * You should have received a copy of the GNU General Public License | ||
| 13 | * along with this program; if not, see the file COPYING, or write | ||
| 14 | * to the Free Software Foundation, Inc. | ||
| 15 | */ | ||
| 16 | |||
| 17 | #include <linux/export.h> | ||
| 18 | |||
| 19 | #include <linux/libgcc.h> | ||
| 20 | |||
| 21 | word_type notrace __cmpdi2(long long a, long long b) | ||
| 22 | { | ||
| 23 | const DWunion au = { | ||
| 24 | .ll = a | ||
| 25 | }; | ||
| 26 | const DWunion bu = { | ||
| 27 | .ll = b | ||
| 28 | }; | ||
| 29 | |||
| 30 | if (au.s.high < bu.s.high) | ||
| 31 | return 0; | ||
| 32 | else if (au.s.high > bu.s.high) | ||
| 33 | return 2; | ||
| 34 | |||
| 35 | if ((unsigned int) au.s.low < (unsigned int) bu.s.low) | ||
| 36 | return 0; | ||
| 37 | else if ((unsigned int) au.s.low > (unsigned int) bu.s.low) | ||
| 38 | return 2; | ||
| 39 | |||
| 40 | return 1; | ||
| 41 | } | ||
| 42 | EXPORT_SYMBOL(__cmpdi2); | ||
diff --git a/lib/crc32.c b/lib/crc32.c index 6ddc92bc1460..2ef20fe84b69 100644 --- a/lib/crc32.c +++ b/lib/crc32.c | |||
| @@ -225,7 +225,7 @@ static u32 __attribute_const__ gf2_multiply(u32 x, u32 y, u32 modulus) | |||
| 225 | } | 225 | } |
| 226 | 226 | ||
| 227 | /** | 227 | /** |
| 228 | * crc32_generic_shift - Append len 0 bytes to crc, in logarithmic time | 228 | * crc32_generic_shift - Append @len 0 bytes to crc, in logarithmic time |
| 229 | * @crc: The original little-endian CRC (i.e. lsbit is x^31 coefficient) | 229 | * @crc: The original little-endian CRC (i.e. lsbit is x^31 coefficient) |
| 230 | * @len: The number of bytes. @crc is multiplied by x^(8*@len) | 230 | * @len: The number of bytes. @crc is multiplied by x^(8*@len) |
| 231 | * @polynomial: The modulus used to reduce the result to 32 bits. | 231 | * @polynomial: The modulus used to reduce the result to 32 bits. |
diff --git a/lib/crc4.c b/lib/crc4.c index cf6db46661be..164ed9444cd3 100644 --- a/lib/crc4.c +++ b/lib/crc4.c | |||
| @@ -15,7 +15,7 @@ static const uint8_t crc4_tab[] = { | |||
| 15 | 15 | ||
| 16 | /** | 16 | /** |
| 17 | * crc4 - calculate the 4-bit crc of a value. | 17 | * crc4 - calculate the 4-bit crc of a value. |
| 18 | * @crc: starting crc4 | 18 | * @c: starting crc4 |
| 19 | * @x: value to checksum | 19 | * @x: value to checksum |
| 20 | * @bits: number of bits in @x to checksum | 20 | * @bits: number of bits in @x to checksum |
| 21 | * | 21 | * |
diff --git a/lib/crc8.c b/lib/crc8.c index 87b59cafdb83..595a5a75e3cd 100644 --- a/lib/crc8.c +++ b/lib/crc8.c | |||
| @@ -20,11 +20,11 @@ | |||
| 20 | #include <linux/crc8.h> | 20 | #include <linux/crc8.h> |
| 21 | #include <linux/printk.h> | 21 | #include <linux/printk.h> |
| 22 | 22 | ||
| 23 | /* | 23 | /** |
| 24 | * crc8_populate_msb - fill crc table for given polynomial in reverse bit order. | 24 | * crc8_populate_msb - fill crc table for given polynomial in reverse bit order. |
| 25 | * | 25 | * |
| 26 | * table: table to be filled. | 26 | * @table: table to be filled. |
| 27 | * polynomial: polynomial for which table is to be filled. | 27 | * @polynomial: polynomial for which table is to be filled. |
| 28 | */ | 28 | */ |
| 29 | void crc8_populate_msb(u8 table[CRC8_TABLE_SIZE], u8 polynomial) | 29 | void crc8_populate_msb(u8 table[CRC8_TABLE_SIZE], u8 polynomial) |
| 30 | { | 30 | { |
| @@ -42,11 +42,11 @@ void crc8_populate_msb(u8 table[CRC8_TABLE_SIZE], u8 polynomial) | |||
| 42 | } | 42 | } |
| 43 | EXPORT_SYMBOL(crc8_populate_msb); | 43 | EXPORT_SYMBOL(crc8_populate_msb); |
| 44 | 44 | ||
| 45 | /* | 45 | /** |
| 46 | * crc8_populate_lsb - fill crc table for given polynomial in regular bit order. | 46 | * crc8_populate_lsb - fill crc table for given polynomial in regular bit order. |
| 47 | * | 47 | * |
| 48 | * table: table to be filled. | 48 | * @table: table to be filled. |
| 49 | * polynomial: polynomial for which table is to be filled. | 49 | * @polynomial: polynomial for which table is to be filled. |
| 50 | */ | 50 | */ |
| 51 | void crc8_populate_lsb(u8 table[CRC8_TABLE_SIZE], u8 polynomial) | 51 | void crc8_populate_lsb(u8 table[CRC8_TABLE_SIZE], u8 polynomial) |
| 52 | { | 52 | { |
| @@ -63,13 +63,13 @@ void crc8_populate_lsb(u8 table[CRC8_TABLE_SIZE], u8 polynomial) | |||
| 63 | } | 63 | } |
| 64 | EXPORT_SYMBOL(crc8_populate_lsb); | 64 | EXPORT_SYMBOL(crc8_populate_lsb); |
| 65 | 65 | ||
| 66 | /* | 66 | /** |
| 67 | * crc8 - calculate a crc8 over the given input data. | 67 | * crc8 - calculate a crc8 over the given input data. |
| 68 | * | 68 | * |
| 69 | * table: crc table used for calculation. | 69 | * @table: crc table used for calculation. |
| 70 | * pdata: pointer to data buffer. | 70 | * @pdata: pointer to data buffer. |
| 71 | * nbytes: number of bytes in data buffer. | 71 | * @nbytes: number of bytes in data buffer. |
| 72 | * crc: previous returned crc8 value. | 72 | * @crc: previous returned crc8 value. |
| 73 | */ | 73 | */ |
| 74 | u8 crc8(const u8 table[CRC8_TABLE_SIZE], u8 *pdata, size_t nbytes, u8 crc) | 74 | u8 crc8(const u8 table[CRC8_TABLE_SIZE], u8 *pdata, size_t nbytes, u8 crc) |
| 75 | { | 75 | { |
diff --git a/lib/div64.c b/lib/div64.c index 58e2a404097e..01c8602bb6ff 100644 --- a/lib/div64.c +++ b/lib/div64.c | |||
| @@ -61,6 +61,12 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base) | |||
| 61 | EXPORT_SYMBOL(__div64_32); | 61 | EXPORT_SYMBOL(__div64_32); |
| 62 | #endif | 62 | #endif |
| 63 | 63 | ||
| 64 | /** | ||
| 65 | * div_s64_rem - signed 64bit divide with 64bit divisor and remainder | ||
| 66 | * @dividend: 64bit dividend | ||
| 67 | * @divisor: 64bit divisor | ||
| 68 | * @remainder: 64bit remainder | ||
| 69 | */ | ||
| 64 | #ifndef div_s64_rem | 70 | #ifndef div_s64_rem |
| 65 | s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) | 71 | s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) |
| 66 | { | 72 | { |
diff --git a/lib/dma-debug.c b/lib/dma-debug.c index ea4cc3dde4f1..1b34d210452c 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c | |||
| @@ -1495,14 +1495,22 @@ void debug_dma_alloc_coherent(struct device *dev, size_t size, | |||
| 1495 | if (!entry) | 1495 | if (!entry) |
| 1496 | return; | 1496 | return; |
| 1497 | 1497 | ||
| 1498 | /* handle vmalloc and linear addresses */ | ||
| 1499 | if (!is_vmalloc_addr(virt) && !virt_to_page(virt)) | ||
| 1500 | return; | ||
| 1501 | |||
| 1498 | entry->type = dma_debug_coherent; | 1502 | entry->type = dma_debug_coherent; |
| 1499 | entry->dev = dev; | 1503 | entry->dev = dev; |
| 1500 | entry->pfn = page_to_pfn(virt_to_page(virt)); | ||
| 1501 | entry->offset = offset_in_page(virt); | 1504 | entry->offset = offset_in_page(virt); |
| 1502 | entry->size = size; | 1505 | entry->size = size; |
| 1503 | entry->dev_addr = dma_addr; | 1506 | entry->dev_addr = dma_addr; |
| 1504 | entry->direction = DMA_BIDIRECTIONAL; | 1507 | entry->direction = DMA_BIDIRECTIONAL; |
| 1505 | 1508 | ||
| 1509 | if (is_vmalloc_addr(virt)) | ||
| 1510 | entry->pfn = vmalloc_to_pfn(virt); | ||
| 1511 | else | ||
| 1512 | entry->pfn = page_to_pfn(virt_to_page(virt)); | ||
| 1513 | |||
| 1506 | add_dma_entry(entry); | 1514 | add_dma_entry(entry); |
| 1507 | } | 1515 | } |
| 1508 | EXPORT_SYMBOL(debug_dma_alloc_coherent); | 1516 | EXPORT_SYMBOL(debug_dma_alloc_coherent); |
| @@ -1513,13 +1521,21 @@ void debug_dma_free_coherent(struct device *dev, size_t size, | |||
| 1513 | struct dma_debug_entry ref = { | 1521 | struct dma_debug_entry ref = { |
| 1514 | .type = dma_debug_coherent, | 1522 | .type = dma_debug_coherent, |
| 1515 | .dev = dev, | 1523 | .dev = dev, |
| 1516 | .pfn = page_to_pfn(virt_to_page(virt)), | ||
| 1517 | .offset = offset_in_page(virt), | 1524 | .offset = offset_in_page(virt), |
| 1518 | .dev_addr = addr, | 1525 | .dev_addr = addr, |
| 1519 | .size = size, | 1526 | .size = size, |
| 1520 | .direction = DMA_BIDIRECTIONAL, | 1527 | .direction = DMA_BIDIRECTIONAL, |
| 1521 | }; | 1528 | }; |
| 1522 | 1529 | ||
| 1530 | /* handle vmalloc and linear addresses */ | ||
| 1531 | if (!is_vmalloc_addr(virt) && !virt_to_page(virt)) | ||
| 1532 | return; | ||
| 1533 | |||
| 1534 | if (is_vmalloc_addr(virt)) | ||
| 1535 | ref.pfn = vmalloc_to_pfn(virt); | ||
| 1536 | else | ||
| 1537 | ref.pfn = page_to_pfn(virt_to_page(virt)); | ||
| 1538 | |||
| 1523 | if (unlikely(dma_debug_disabled())) | 1539 | if (unlikely(dma_debug_disabled())) |
| 1524 | return; | 1540 | return; |
| 1525 | 1541 | ||
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index da796e2dc4f5..c7c96bc7654a 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c | |||
| @@ -360,6 +360,10 @@ static int ddebug_parse_query(char *words[], int nwords, | |||
| 360 | if (parse_lineno(last, &query->last_lineno) < 0) | 360 | if (parse_lineno(last, &query->last_lineno) < 0) |
| 361 | return -EINVAL; | 361 | return -EINVAL; |
| 362 | 362 | ||
| 363 | /* special case for last lineno not specified */ | ||
| 364 | if (query->last_lineno == 0) | ||
| 365 | query->last_lineno = UINT_MAX; | ||
| 366 | |||
| 363 | if (query->last_lineno < query->first_lineno) { | 367 | if (query->last_lineno < query->first_lineno) { |
| 364 | pr_err("last-line:%d < 1st-line:%d\n", | 368 | pr_err("last-line:%d < 1st-line:%d\n", |
| 365 | query->last_lineno, | 369 | query->last_lineno, |
diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c index 6a406fafb5d6..e659a027036e 100644 --- a/lib/dynamic_queue_limits.c +++ b/lib/dynamic_queue_limits.c | |||
| @@ -21,7 +21,7 @@ void dql_completed(struct dql *dql, unsigned int count) | |||
| 21 | unsigned int ovlimit, completed, num_queued; | 21 | unsigned int ovlimit, completed, num_queued; |
| 22 | bool all_prev_completed; | 22 | bool all_prev_completed; |
| 23 | 23 | ||
| 24 | num_queued = ACCESS_ONCE(dql->num_queued); | 24 | num_queued = READ_ONCE(dql->num_queued); |
| 25 | 25 | ||
| 26 | /* Can't complete more than what's in queue */ | 26 | /* Can't complete more than what's in queue */ |
| 27 | BUG_ON(count > num_queued - dql->num_completed); | 27 | BUG_ON(count > num_queued - dql->num_completed); |
| @@ -128,12 +128,11 @@ void dql_reset(struct dql *dql) | |||
| 128 | } | 128 | } |
| 129 | EXPORT_SYMBOL(dql_reset); | 129 | EXPORT_SYMBOL(dql_reset); |
| 130 | 130 | ||
| 131 | int dql_init(struct dql *dql, unsigned hold_time) | 131 | void dql_init(struct dql *dql, unsigned int hold_time) |
| 132 | { | 132 | { |
| 133 | dql->max_limit = DQL_MAX_LIMIT; | 133 | dql->max_limit = DQL_MAX_LIMIT; |
| 134 | dql->min_limit = 0; | 134 | dql->min_limit = 0; |
| 135 | dql->slack_hold_time = hold_time; | 135 | dql->slack_hold_time = hold_time; |
| 136 | dql_reset(dql); | 136 | dql_reset(dql); |
| 137 | return 0; | ||
| 138 | } | 137 | } |
| 139 | EXPORT_SYMBOL(dql_init); | 138 | EXPORT_SYMBOL(dql_init); |
| @@ -13,6 +13,12 @@ | |||
| 13 | #if !defined(CONFIG_CPU_NO_EFFICIENT_FFS) && !defined(CPU_NO_EFFICIENT_FFS) | 13 | #if !defined(CONFIG_CPU_NO_EFFICIENT_FFS) && !defined(CPU_NO_EFFICIENT_FFS) |
| 14 | 14 | ||
| 15 | /* If __ffs is available, the even/odd algorithm benchmarks slower. */ | 15 | /* If __ffs is available, the even/odd algorithm benchmarks slower. */ |
| 16 | |||
| 17 | /** | ||
| 18 | * gcd - calculate and return the greatest common divisor of 2 unsigned longs | ||
| 19 | * @a: first value | ||
| 20 | * @b: second value | ||
| 21 | */ | ||
| 16 | unsigned long gcd(unsigned long a, unsigned long b) | 22 | unsigned long gcd(unsigned long a, unsigned long b) |
| 17 | { | 23 | { |
| 18 | unsigned long r = a | b; | 24 | unsigned long r = a | b; |
diff --git a/lib/genalloc.c b/lib/genalloc.c index 144fe6b1a03e..ca06adc4f445 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c | |||
| @@ -194,7 +194,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy | |||
| 194 | chunk->phys_addr = phys; | 194 | chunk->phys_addr = phys; |
| 195 | chunk->start_addr = virt; | 195 | chunk->start_addr = virt; |
| 196 | chunk->end_addr = virt + size - 1; | 196 | chunk->end_addr = virt + size - 1; |
| 197 | atomic_set(&chunk->avail, size); | 197 | atomic_long_set(&chunk->avail, size); |
| 198 | 198 | ||
| 199 | spin_lock(&pool->lock); | 199 | spin_lock(&pool->lock); |
| 200 | list_add_rcu(&chunk->next_chunk, &pool->chunks); | 200 | list_add_rcu(&chunk->next_chunk, &pool->chunks); |
| @@ -304,7 +304,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size, | |||
| 304 | nbits = (size + (1UL << order) - 1) >> order; | 304 | nbits = (size + (1UL << order) - 1) >> order; |
| 305 | rcu_read_lock(); | 305 | rcu_read_lock(); |
| 306 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { | 306 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { |
| 307 | if (size > atomic_read(&chunk->avail)) | 307 | if (size > atomic_long_read(&chunk->avail)) |
| 308 | continue; | 308 | continue; |
| 309 | 309 | ||
| 310 | start_bit = 0; | 310 | start_bit = 0; |
| @@ -324,7 +324,7 @@ retry: | |||
| 324 | 324 | ||
| 325 | addr = chunk->start_addr + ((unsigned long)start_bit << order); | 325 | addr = chunk->start_addr + ((unsigned long)start_bit << order); |
| 326 | size = nbits << order; | 326 | size = nbits << order; |
| 327 | atomic_sub(size, &chunk->avail); | 327 | atomic_long_sub(size, &chunk->avail); |
| 328 | break; | 328 | break; |
| 329 | } | 329 | } |
| 330 | rcu_read_unlock(); | 330 | rcu_read_unlock(); |
| @@ -390,7 +390,7 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) | |||
| 390 | remain = bitmap_clear_ll(chunk->bits, start_bit, nbits); | 390 | remain = bitmap_clear_ll(chunk->bits, start_bit, nbits); |
| 391 | BUG_ON(remain); | 391 | BUG_ON(remain); |
| 392 | size = nbits << order; | 392 | size = nbits << order; |
| 393 | atomic_add(size, &chunk->avail); | 393 | atomic_long_add(size, &chunk->avail); |
| 394 | rcu_read_unlock(); | 394 | rcu_read_unlock(); |
| 395 | return; | 395 | return; |
| 396 | } | 396 | } |
| @@ -464,7 +464,7 @@ size_t gen_pool_avail(struct gen_pool *pool) | |||
| 464 | 464 | ||
| 465 | rcu_read_lock(); | 465 | rcu_read_lock(); |
| 466 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) | 466 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) |
| 467 | avail += atomic_read(&chunk->avail); | 467 | avail += atomic_long_read(&chunk->avail); |
| 468 | rcu_read_unlock(); | 468 | rcu_read_unlock(); |
| 469 | return avail; | 469 | return avail; |
| 470 | } | 470 | } |
| @@ -171,7 +171,7 @@ void *idr_replace_ext(struct idr *idr, void *ptr, unsigned long id) | |||
| 171 | if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE)) | 171 | if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE)) |
| 172 | return ERR_PTR(-ENOENT); | 172 | return ERR_PTR(-ENOENT); |
| 173 | 173 | ||
| 174 | __radix_tree_replace(&idr->idr_rt, node, slot, ptr, NULL, NULL); | 174 | __radix_tree_replace(&idr->idr_rt, node, slot, ptr, NULL); |
| 175 | 175 | ||
| 176 | return entry; | 176 | return entry; |
| 177 | } | 177 | } |
diff --git a/lib/int_sqrt.c b/lib/int_sqrt.c index db0b5aa071fc..e2d329099bf7 100644 --- a/lib/int_sqrt.c +++ b/lib/int_sqrt.c | |||
| @@ -8,12 +8,13 @@ | |||
| 8 | 8 | ||
| 9 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
| 10 | #include <linux/export.h> | 10 | #include <linux/export.h> |
| 11 | #include <linux/bitops.h> | ||
| 11 | 12 | ||
| 12 | /** | 13 | /** |
| 13 | * int_sqrt - rough approximation to sqrt | 14 | * int_sqrt - computes the integer square root |
| 14 | * @x: integer of which to calculate the sqrt | 15 | * @x: integer of which to calculate the sqrt |
| 15 | * | 16 | * |
| 16 | * A very rough approximation to the sqrt() function. | 17 | * Computes: floor(sqrt(x)) |
| 17 | */ | 18 | */ |
| 18 | unsigned long int_sqrt(unsigned long x) | 19 | unsigned long int_sqrt(unsigned long x) |
| 19 | { | 20 | { |
| @@ -22,7 +23,7 @@ unsigned long int_sqrt(unsigned long x) | |||
| 22 | if (x <= 1) | 23 | if (x <= 1) |
| 23 | return x; | 24 | return x; |
| 24 | 25 | ||
| 25 | m = 1UL << (BITS_PER_LONG - 2); | 26 | m = 1UL << (__fls(x) & ~1UL); |
| 26 | while (m != 0) { | 27 | while (m != 0) { |
| 27 | b = y + m; | 28 | b = y + m; |
| 28 | y >>= 1; | 29 | y >>= 1; |
diff --git a/lib/interval_tree_test.c b/lib/interval_tree_test.c index 0e343fd29570..835242e74aaa 100644 --- a/lib/interval_tree_test.c +++ b/lib/interval_tree_test.c | |||
| @@ -11,10 +11,10 @@ | |||
| 11 | MODULE_PARM_DESC(name, msg); | 11 | MODULE_PARM_DESC(name, msg); |
| 12 | 12 | ||
| 13 | __param(int, nnodes, 100, "Number of nodes in the interval tree"); | 13 | __param(int, nnodes, 100, "Number of nodes in the interval tree"); |
| 14 | __param(int, perf_loops, 100000, "Number of iterations modifying the tree"); | 14 | __param(int, perf_loops, 1000, "Number of iterations modifying the tree"); |
| 15 | 15 | ||
| 16 | __param(int, nsearches, 100, "Number of searches to the interval tree"); | 16 | __param(int, nsearches, 100, "Number of searches to the interval tree"); |
| 17 | __param(int, search_loops, 10000, "Number of iterations searching the tree"); | 17 | __param(int, search_loops, 1000, "Number of iterations searching the tree"); |
| 18 | __param(bool, search_all, false, "Searches will iterate all nodes in the tree"); | 18 | __param(bool, search_all, false, "Searches will iterate all nodes in the tree"); |
| 19 | 19 | ||
| 20 | __param(uint, max_endpoint, ~0, "Largest value for the interval's endpoint"); | 20 | __param(uint, max_endpoint, ~0, "Largest value for the interval's endpoint"); |
diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 1c1c06ddc20a..970212670b6a 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c | |||
| @@ -1446,3 +1446,25 @@ int import_single_range(int rw, void __user *buf, size_t len, | |||
| 1446 | return 0; | 1446 | return 0; |
| 1447 | } | 1447 | } |
| 1448 | EXPORT_SYMBOL(import_single_range); | 1448 | EXPORT_SYMBOL(import_single_range); |
| 1449 | |||
| 1450 | int iov_iter_for_each_range(struct iov_iter *i, size_t bytes, | ||
| 1451 | int (*f)(struct kvec *vec, void *context), | ||
| 1452 | void *context) | ||
| 1453 | { | ||
| 1454 | struct kvec w; | ||
| 1455 | int err = -EINVAL; | ||
| 1456 | if (!bytes) | ||
| 1457 | return 0; | ||
| 1458 | |||
| 1459 | iterate_all_kinds(i, bytes, v, -EINVAL, ({ | ||
| 1460 | w.iov_base = kmap(v.bv_page) + v.bv_offset; | ||
| 1461 | w.iov_len = v.bv_len; | ||
| 1462 | err = f(&w, context); | ||
| 1463 | kunmap(v.bv_page); | ||
| 1464 | err;}), ({ | ||
| 1465 | w = v; | ||
| 1466 | err = f(&w, context);}) | ||
| 1467 | ) | ||
| 1468 | return err; | ||
| 1469 | } | ||
| 1470 | EXPORT_SYMBOL(iov_iter_for_each_range); | ||
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index f237a09a5862..c3e84edc47c9 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c | |||
| @@ -294,6 +294,55 @@ static void cleanup_uevent_env(struct subprocess_info *info) | |||
| 294 | } | 294 | } |
| 295 | #endif | 295 | #endif |
| 296 | 296 | ||
| 297 | static int kobject_uevent_net_broadcast(struct kobject *kobj, | ||
| 298 | struct kobj_uevent_env *env, | ||
| 299 | const char *action_string, | ||
| 300 | const char *devpath) | ||
| 301 | { | ||
| 302 | int retval = 0; | ||
| 303 | #if defined(CONFIG_NET) | ||
| 304 | struct sk_buff *skb = NULL; | ||
| 305 | struct uevent_sock *ue_sk; | ||
| 306 | |||
| 307 | /* send netlink message */ | ||
| 308 | list_for_each_entry(ue_sk, &uevent_sock_list, list) { | ||
| 309 | struct sock *uevent_sock = ue_sk->sk; | ||
| 310 | |||
| 311 | if (!netlink_has_listeners(uevent_sock, 1)) | ||
| 312 | continue; | ||
| 313 | |||
| 314 | if (!skb) { | ||
| 315 | /* allocate message with the maximum possible size */ | ||
| 316 | size_t len = strlen(action_string) + strlen(devpath) + 2; | ||
| 317 | char *scratch; | ||
| 318 | |||
| 319 | retval = -ENOMEM; | ||
| 320 | skb = alloc_skb(len + env->buflen, GFP_KERNEL); | ||
| 321 | if (!skb) | ||
| 322 | continue; | ||
| 323 | |||
| 324 | /* add header */ | ||
| 325 | scratch = skb_put(skb, len); | ||
| 326 | sprintf(scratch, "%s@%s", action_string, devpath); | ||
| 327 | |||
| 328 | skb_put_data(skb, env->buf, env->buflen); | ||
| 329 | |||
| 330 | NETLINK_CB(skb).dst_group = 1; | ||
| 331 | } | ||
| 332 | |||
| 333 | retval = netlink_broadcast_filtered(uevent_sock, skb_get(skb), | ||
| 334 | 0, 1, GFP_KERNEL, | ||
| 335 | kobj_bcast_filter, | ||
| 336 | kobj); | ||
| 337 | /* ENOBUFS should be handled in userspace */ | ||
| 338 | if (retval == -ENOBUFS || retval == -ESRCH) | ||
| 339 | retval = 0; | ||
| 340 | } | ||
| 341 | consume_skb(skb); | ||
| 342 | #endif | ||
| 343 | return retval; | ||
| 344 | } | ||
| 345 | |||
| 297 | static void zap_modalias_env(struct kobj_uevent_env *env) | 346 | static void zap_modalias_env(struct kobj_uevent_env *env) |
| 298 | { | 347 | { |
| 299 | static const char modalias_prefix[] = "MODALIAS="; | 348 | static const char modalias_prefix[] = "MODALIAS="; |
| @@ -336,9 +385,6 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, | |||
| 336 | const struct kset_uevent_ops *uevent_ops; | 385 | const struct kset_uevent_ops *uevent_ops; |
| 337 | int i = 0; | 386 | int i = 0; |
| 338 | int retval = 0; | 387 | int retval = 0; |
| 339 | #ifdef CONFIG_NET | ||
| 340 | struct uevent_sock *ue_sk; | ||
| 341 | #endif | ||
| 342 | 388 | ||
| 343 | pr_debug("kobject: '%s' (%p): %s\n", | 389 | pr_debug("kobject: '%s' (%p): %s\n", |
| 344 | kobject_name(kobj), kobj, __func__); | 390 | kobject_name(kobj), kobj, __func__); |
| @@ -460,46 +506,8 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, | |||
| 460 | mutex_unlock(&uevent_sock_mutex); | 506 | mutex_unlock(&uevent_sock_mutex); |
| 461 | goto exit; | 507 | goto exit; |
| 462 | } | 508 | } |
| 463 | 509 | retval = kobject_uevent_net_broadcast(kobj, env, action_string, | |
| 464 | #if defined(CONFIG_NET) | 510 | devpath); |
| 465 | /* send netlink message */ | ||
| 466 | list_for_each_entry(ue_sk, &uevent_sock_list, list) { | ||
| 467 | struct sock *uevent_sock = ue_sk->sk; | ||
| 468 | struct sk_buff *skb; | ||
| 469 | size_t len; | ||
| 470 | |||
| 471 | if (!netlink_has_listeners(uevent_sock, 1)) | ||
| 472 | continue; | ||
| 473 | |||
| 474 | /* allocate message with the maximum possible size */ | ||
| 475 | len = strlen(action_string) + strlen(devpath) + 2; | ||
| 476 | skb = alloc_skb(len + env->buflen, GFP_KERNEL); | ||
| 477 | if (skb) { | ||
| 478 | char *scratch; | ||
| 479 | |||
| 480 | /* add header */ | ||
| 481 | scratch = skb_put(skb, len); | ||
| 482 | sprintf(scratch, "%s@%s", action_string, devpath); | ||
| 483 | |||
| 484 | /* copy keys to our continuous event payload buffer */ | ||
| 485 | for (i = 0; i < env->envp_idx; i++) { | ||
| 486 | len = strlen(env->envp[i]) + 1; | ||
| 487 | scratch = skb_put(skb, len); | ||
| 488 | strcpy(scratch, env->envp[i]); | ||
| 489 | } | ||
| 490 | |||
| 491 | NETLINK_CB(skb).dst_group = 1; | ||
| 492 | retval = netlink_broadcast_filtered(uevent_sock, skb, | ||
| 493 | 0, 1, GFP_KERNEL, | ||
| 494 | kobj_bcast_filter, | ||
| 495 | kobj); | ||
| 496 | /* ENOBUFS should be handled in userspace */ | ||
| 497 | if (retval == -ENOBUFS || retval == -ESRCH) | ||
| 498 | retval = 0; | ||
| 499 | } else | ||
| 500 | retval = -ENOMEM; | ||
| 501 | } | ||
| 502 | #endif | ||
| 503 | mutex_unlock(&uevent_sock_mutex); | 511 | mutex_unlock(&uevent_sock_mutex); |
| 504 | 512 | ||
| 505 | #ifdef CONFIG_UEVENT_HELPER | 513 | #ifdef CONFIG_UEVENT_HELPER |
diff --git a/lib/llist.c b/lib/llist.c index ae5872b1df0c..7062e931a7bb 100644 --- a/lib/llist.c +++ b/lib/llist.c | |||
| @@ -41,7 +41,7 @@ bool llist_add_batch(struct llist_node *new_first, struct llist_node *new_last, | |||
| 41 | struct llist_node *first; | 41 | struct llist_node *first; |
| 42 | 42 | ||
| 43 | do { | 43 | do { |
| 44 | new_last->next = first = ACCESS_ONCE(head->first); | 44 | new_last->next = first = READ_ONCE(head->first); |
| 45 | } while (cmpxchg(&head->first, first, new_first) != first); | 45 | } while (cmpxchg(&head->first, first, new_first) != first); |
| 46 | 46 | ||
| 47 | return !first; | 47 | return !first; |
diff --git a/lib/lshrdi3.c b/lib/lshrdi3.c new file mode 100644 index 000000000000..99cfa5721f2d --- /dev/null +++ b/lib/lshrdi3.c | |||
| @@ -0,0 +1,45 @@ | |||
| 1 | /* | ||
| 2 | * lib/lshrdi3.c | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License as published by | ||
| 6 | * the Free Software Foundation; either version 2 of the License, or | ||
| 7 | * (at your option) any later version. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, | ||
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 12 | * GNU General Public License for more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License | ||
| 15 | * along with this program; if not, see the file COPYING, or write | ||
| 16 | * to the Free Software Foundation, Inc. | ||
| 17 | */ | ||
| 18 | |||
| 19 | #include <linux/module.h> | ||
| 20 | #include <linux/libgcc.h> | ||
| 21 | |||
| 22 | long long notrace __lshrdi3(long long u, word_type b) | ||
| 23 | { | ||
| 24 | DWunion uu, w; | ||
| 25 | word_type bm; | ||
| 26 | |||
| 27 | if (b == 0) | ||
| 28 | return u; | ||
| 29 | |||
| 30 | uu.ll = u; | ||
| 31 | bm = 32 - b; | ||
| 32 | |||
| 33 | if (bm <= 0) { | ||
| 34 | w.s.high = 0; | ||
| 35 | w.s.low = (unsigned int) uu.s.high >> -bm; | ||
| 36 | } else { | ||
| 37 | const unsigned int carries = (unsigned int) uu.s.high << bm; | ||
| 38 | |||
| 39 | w.s.high = (unsigned int) uu.s.high >> b; | ||
| 40 | w.s.low = ((unsigned int) uu.s.low >> b) | carries; | ||
| 41 | } | ||
| 42 | |||
| 43 | return w.ll; | ||
| 44 | } | ||
| 45 | EXPORT_SYMBOL(__lshrdi3); | ||
diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c index e24388a863a7..468fb7cd1221 100644 --- a/lib/mpi/mpi-pow.c +++ b/lib/mpi/mpi-pow.c | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | * however I decided to publish this code under the plain GPL. | 26 | * however I decided to publish this code under the plain GPL. |
| 27 | */ | 27 | */ |
| 28 | 28 | ||
| 29 | #include <linux/sched.h> | ||
| 29 | #include <linux/string.h> | 30 | #include <linux/string.h> |
| 30 | #include "mpi-internal.h" | 31 | #include "mpi-internal.h" |
| 31 | #include "longlong.h" | 32 | #include "longlong.h" |
| @@ -256,6 +257,7 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod) | |||
| 256 | } | 257 | } |
| 257 | e <<= 1; | 258 | e <<= 1; |
| 258 | c--; | 259 | c--; |
| 260 | cond_resched(); | ||
| 259 | } | 261 | } |
| 260 | 262 | ||
| 261 | i--; | 263 | i--; |
diff --git a/lib/muldi3.c b/lib/muldi3.c new file mode 100644 index 000000000000..54c8b3123376 --- /dev/null +++ b/lib/muldi3.c | |||
| @@ -0,0 +1,72 @@ | |||
| 1 | /* | ||
| 2 | * This program is free software; you can redistribute it and/or modify | ||
| 3 | * it under the terms of the GNU General Public License as published by | ||
| 4 | * the Free Software Foundation; either version 2 of the License, or | ||
| 5 | * (at your option) any later version. | ||
| 6 | * | ||
| 7 | * This program is distributed in the hope that it will be useful, | ||
| 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 10 | * GNU General Public License for more details. | ||
| 11 | * | ||
| 12 | * You should have received a copy of the GNU General Public License | ||
| 13 | * along with this program; if not, see the file COPYING, or write | ||
| 14 | * to the Free Software Foundation, Inc. | ||
| 15 | */ | ||
| 16 | |||
| 17 | #include <linux/export.h> | ||
| 18 | #include <linux/libgcc.h> | ||
| 19 | |||
| 20 | #define W_TYPE_SIZE 32 | ||
| 21 | |||
| 22 | #define __ll_B ((unsigned long) 1 << (W_TYPE_SIZE / 2)) | ||
| 23 | #define __ll_lowpart(t) ((unsigned long) (t) & (__ll_B - 1)) | ||
| 24 | #define __ll_highpart(t) ((unsigned long) (t) >> (W_TYPE_SIZE / 2)) | ||
| 25 | |||
| 26 | /* If we still don't have umul_ppmm, define it using plain C. */ | ||
| 27 | #if !defined(umul_ppmm) | ||
| 28 | #define umul_ppmm(w1, w0, u, v) \ | ||
| 29 | do { \ | ||
| 30 | unsigned long __x0, __x1, __x2, __x3; \ | ||
| 31 | unsigned short __ul, __vl, __uh, __vh; \ | ||
| 32 | \ | ||
| 33 | __ul = __ll_lowpart(u); \ | ||
| 34 | __uh = __ll_highpart(u); \ | ||
| 35 | __vl = __ll_lowpart(v); \ | ||
| 36 | __vh = __ll_highpart(v); \ | ||
| 37 | \ | ||
| 38 | __x0 = (unsigned long) __ul * __vl; \ | ||
| 39 | __x1 = (unsigned long) __ul * __vh; \ | ||
| 40 | __x2 = (unsigned long) __uh * __vl; \ | ||
| 41 | __x3 = (unsigned long) __uh * __vh; \ | ||
| 42 | \ | ||
| 43 | __x1 += __ll_highpart(__x0); /* this can't give carry */\ | ||
| 44 | __x1 += __x2; /* but this indeed can */ \ | ||
| 45 | if (__x1 < __x2) /* did we get it? */ \ | ||
| 46 | __x3 += __ll_B; /* yes, add it in the proper pos */ \ | ||
| 47 | \ | ||
| 48 | (w1) = __x3 + __ll_highpart(__x1); \ | ||
| 49 | (w0) = __ll_lowpart(__x1) * __ll_B + __ll_lowpart(__x0);\ | ||
| 50 | } while (0) | ||
| 51 | #endif | ||
| 52 | |||
| 53 | #if !defined(__umulsidi3) | ||
| 54 | #define __umulsidi3(u, v) ({ \ | ||
| 55 | DWunion __w; \ | ||
| 56 | umul_ppmm(__w.s.high, __w.s.low, u, v); \ | ||
| 57 | __w.ll; \ | ||
| 58 | }) | ||
| 59 | #endif | ||
| 60 | |||
| 61 | long long notrace __muldi3(long long u, long long v) | ||
| 62 | { | ||
| 63 | const DWunion uu = {.ll = u}; | ||
| 64 | const DWunion vv = {.ll = v}; | ||
| 65 | DWunion w = {.ll = __umulsidi3(uu.s.low, vv.s.low)}; | ||
| 66 | |||
| 67 | w.s.high += ((unsigned long) uu.s.low * (unsigned long) vv.s.high | ||
| 68 | + (unsigned long) uu.s.high * (unsigned long) vv.s.low); | ||
| 69 | |||
| 70 | return w.ll; | ||
| 71 | } | ||
| 72 | EXPORT_SYMBOL(__muldi3); | ||
diff --git a/lib/nlattr.c b/lib/nlattr.c index 3d8295c85505..dfa55c873c13 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c | |||
| @@ -15,6 +15,22 @@ | |||
| 15 | #include <linux/types.h> | 15 | #include <linux/types.h> |
| 16 | #include <net/netlink.h> | 16 | #include <net/netlink.h> |
| 17 | 17 | ||
| 18 | /* For these data types, attribute length should be exactly the given | ||
| 19 | * size. However, to maintain compatibility with broken commands, if the | ||
| 20 | * attribute length does not match the expected size a warning is emitted | ||
| 21 | * to the user that the command is sending invalid data and needs to be fixed. | ||
| 22 | */ | ||
| 23 | static const u8 nla_attr_len[NLA_TYPE_MAX+1] = { | ||
| 24 | [NLA_U8] = sizeof(u8), | ||
| 25 | [NLA_U16] = sizeof(u16), | ||
| 26 | [NLA_U32] = sizeof(u32), | ||
| 27 | [NLA_U64] = sizeof(u64), | ||
| 28 | [NLA_S8] = sizeof(s8), | ||
| 29 | [NLA_S16] = sizeof(s16), | ||
| 30 | [NLA_S32] = sizeof(s32), | ||
| 31 | [NLA_S64] = sizeof(s64), | ||
| 32 | }; | ||
| 33 | |||
| 18 | static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = { | 34 | static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = { |
| 19 | [NLA_U8] = sizeof(u8), | 35 | [NLA_U8] = sizeof(u8), |
| 20 | [NLA_U16] = sizeof(u16), | 36 | [NLA_U16] = sizeof(u16), |
| @@ -65,6 +81,11 @@ static int validate_nla(const struct nlattr *nla, int maxtype, | |||
| 65 | 81 | ||
| 66 | BUG_ON(pt->type > NLA_TYPE_MAX); | 82 | BUG_ON(pt->type > NLA_TYPE_MAX); |
| 67 | 83 | ||
| 84 | if (nla_attr_len[pt->type] && attrlen != nla_attr_len[pt->type]) { | ||
| 85 | pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n", | ||
| 86 | current->comm, type); | ||
| 87 | } | ||
| 88 | |||
| 68 | switch (pt->type) { | 89 | switch (pt->type) { |
| 69 | case NLA_FLAG: | 90 | case NLA_FLAG: |
| 70 | if (attrlen > 0) | 91 | if (attrlen > 0) |
| @@ -191,6 +212,8 @@ nla_policy_len(const struct nla_policy *p, int n) | |||
| 191 | for (i = 0; i < n; i++, p++) { | 212 | for (i = 0; i < n; i++, p++) { |
| 192 | if (p->len) | 213 | if (p->len) |
| 193 | len += nla_total_size(p->len); | 214 | len += nla_total_size(p->len); |
| 215 | else if (nla_attr_len[p->type]) | ||
| 216 | len += nla_total_size(nla_attr_len[p->type]); | ||
| 194 | else if (nla_attr_minlen[p->type]) | 217 | else if (nla_attr_minlen[p->type]) |
| 195 | len += nla_total_size(nla_attr_minlen[p->type]); | 218 | len += nla_total_size(nla_attr_minlen[p->type]); |
| 196 | } | 219 | } |
diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c index 46e4c749e4eb..61a6b5aab07e 100644 --- a/lib/nmi_backtrace.c +++ b/lib/nmi_backtrace.c | |||
| @@ -93,8 +93,8 @@ bool nmi_cpu_backtrace(struct pt_regs *regs) | |||
| 93 | if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { | 93 | if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { |
| 94 | arch_spin_lock(&lock); | 94 | arch_spin_lock(&lock); |
| 95 | if (regs && cpu_in_idle(instruction_pointer(regs))) { | 95 | if (regs && cpu_in_idle(instruction_pointer(regs))) { |
| 96 | pr_warn("NMI backtrace for cpu %d skipped: idling at pc %#lx\n", | 96 | pr_warn("NMI backtrace for cpu %d skipped: idling at %pS\n", |
| 97 | cpu, instruction_pointer(regs)); | 97 | cpu, (void *)instruction_pointer(regs)); |
| 98 | } else { | 98 | } else { |
| 99 | pr_warn("NMI backtrace for cpu %d\n", cpu); | 99 | pr_warn("NMI backtrace for cpu %d\n", cpu); |
| 100 | if (regs) | 100 | if (regs) |
diff --git a/lib/oid_registry.c b/lib/oid_registry.c index 41b9e50711a7..0bcac6ccb1b2 100644 --- a/lib/oid_registry.c +++ b/lib/oid_registry.c | |||
| @@ -116,14 +116,14 @@ int sprint_oid(const void *data, size_t datasize, char *buffer, size_t bufsize) | |||
| 116 | int count; | 116 | int count; |
| 117 | 117 | ||
| 118 | if (v >= end) | 118 | if (v >= end) |
| 119 | return -EBADMSG; | 119 | goto bad; |
| 120 | 120 | ||
| 121 | n = *v++; | 121 | n = *v++; |
| 122 | ret = count = snprintf(buffer, bufsize, "%u.%u", n / 40, n % 40); | 122 | ret = count = snprintf(buffer, bufsize, "%u.%u", n / 40, n % 40); |
| 123 | if (count >= bufsize) | ||
| 124 | return -ENOBUFS; | ||
| 123 | buffer += count; | 125 | buffer += count; |
| 124 | bufsize -= count; | 126 | bufsize -= count; |
| 125 | if (bufsize == 0) | ||
| 126 | return -ENOBUFS; | ||
| 127 | 127 | ||
| 128 | while (v < end) { | 128 | while (v < end) { |
| 129 | num = 0; | 129 | num = 0; |
| @@ -134,20 +134,24 @@ int sprint_oid(const void *data, size_t datasize, char *buffer, size_t bufsize) | |||
| 134 | num = n & 0x7f; | 134 | num = n & 0x7f; |
| 135 | do { | 135 | do { |
| 136 | if (v >= end) | 136 | if (v >= end) |
| 137 | return -EBADMSG; | 137 | goto bad; |
| 138 | n = *v++; | 138 | n = *v++; |
| 139 | num <<= 7; | 139 | num <<= 7; |
| 140 | num |= n & 0x7f; | 140 | num |= n & 0x7f; |
| 141 | } while (n & 0x80); | 141 | } while (n & 0x80); |
| 142 | } | 142 | } |
| 143 | ret += count = snprintf(buffer, bufsize, ".%lu", num); | 143 | ret += count = snprintf(buffer, bufsize, ".%lu", num); |
| 144 | buffer += count; | 144 | if (count >= bufsize) |
| 145 | if (bufsize <= count) | ||
| 146 | return -ENOBUFS; | 145 | return -ENOBUFS; |
| 146 | buffer += count; | ||
| 147 | bufsize -= count; | 147 | bufsize -= count; |
| 148 | } | 148 | } |
| 149 | 149 | ||
| 150 | return ret; | 150 | return ret; |
| 151 | |||
| 152 | bad: | ||
| 153 | snprintf(buffer, bufsize, "(bad)"); | ||
| 154 | return -EBADMSG; | ||
| 151 | } | 155 | } |
| 152 | EXPORT_SYMBOL_GPL(sprint_oid); | 156 | EXPORT_SYMBOL_GPL(sprint_oid); |
| 153 | 157 | ||
diff --git a/lib/once.c b/lib/once.c index bfb7420d0de3..8b7d6235217e 100644 --- a/lib/once.c +++ b/lib/once.c | |||
| @@ -6,7 +6,7 @@ | |||
| 6 | 6 | ||
| 7 | struct once_work { | 7 | struct once_work { |
| 8 | struct work_struct work; | 8 | struct work_struct work; |
| 9 | struct static_key *key; | 9 | struct static_key_true *key; |
| 10 | }; | 10 | }; |
| 11 | 11 | ||
| 12 | static void once_deferred(struct work_struct *w) | 12 | static void once_deferred(struct work_struct *w) |
| @@ -15,11 +15,11 @@ static void once_deferred(struct work_struct *w) | |||
| 15 | 15 | ||
| 16 | work = container_of(w, struct once_work, work); | 16 | work = container_of(w, struct once_work, work); |
| 17 | BUG_ON(!static_key_enabled(work->key)); | 17 | BUG_ON(!static_key_enabled(work->key)); |
| 18 | static_key_slow_dec(work->key); | 18 | static_branch_disable(work->key); |
| 19 | kfree(work); | 19 | kfree(work); |
| 20 | } | 20 | } |
| 21 | 21 | ||
| 22 | static void once_disable_jump(struct static_key *key) | 22 | static void once_disable_jump(struct static_key_true *key) |
| 23 | { | 23 | { |
| 24 | struct once_work *w; | 24 | struct once_work *w; |
| 25 | 25 | ||
| @@ -52,7 +52,7 @@ bool __do_once_start(bool *done, unsigned long *flags) | |||
| 52 | } | 52 | } |
| 53 | EXPORT_SYMBOL(__do_once_start); | 53 | EXPORT_SYMBOL(__do_once_start); |
| 54 | 54 | ||
| 55 | void __do_once_done(bool *done, struct static_key *once_key, | 55 | void __do_once_done(bool *done, struct static_key_true *once_key, |
| 56 | unsigned long *flags) | 56 | unsigned long *flags) |
| 57 | __releases(once_lock) | 57 | __releases(once_lock) |
| 58 | { | 58 | { |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 8b1feca1230a..c8d55565fafa 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
| @@ -677,8 +677,7 @@ out: | |||
| 677 | * @root radix tree root | 677 | * @root radix tree root |
| 678 | */ | 678 | */ |
| 679 | static inline bool radix_tree_shrink(struct radix_tree_root *root, | 679 | static inline bool radix_tree_shrink(struct radix_tree_root *root, |
| 680 | radix_tree_update_node_t update_node, | 680 | radix_tree_update_node_t update_node) |
| 681 | void *private) | ||
| 682 | { | 681 | { |
| 683 | bool shrunk = false; | 682 | bool shrunk = false; |
| 684 | 683 | ||
| @@ -739,7 +738,7 @@ static inline bool radix_tree_shrink(struct radix_tree_root *root, | |||
| 739 | if (!radix_tree_is_internal_node(child)) { | 738 | if (!radix_tree_is_internal_node(child)) { |
| 740 | node->slots[0] = (void __rcu *)RADIX_TREE_RETRY; | 739 | node->slots[0] = (void __rcu *)RADIX_TREE_RETRY; |
| 741 | if (update_node) | 740 | if (update_node) |
| 742 | update_node(node, private); | 741 | update_node(node); |
| 743 | } | 742 | } |
| 744 | 743 | ||
| 745 | WARN_ON_ONCE(!list_empty(&node->private_list)); | 744 | WARN_ON_ONCE(!list_empty(&node->private_list)); |
| @@ -752,7 +751,7 @@ static inline bool radix_tree_shrink(struct radix_tree_root *root, | |||
| 752 | 751 | ||
| 753 | static bool delete_node(struct radix_tree_root *root, | 752 | static bool delete_node(struct radix_tree_root *root, |
| 754 | struct radix_tree_node *node, | 753 | struct radix_tree_node *node, |
| 755 | radix_tree_update_node_t update_node, void *private) | 754 | radix_tree_update_node_t update_node) |
| 756 | { | 755 | { |
| 757 | bool deleted = false; | 756 | bool deleted = false; |
| 758 | 757 | ||
| @@ -762,8 +761,8 @@ static bool delete_node(struct radix_tree_root *root, | |||
| 762 | if (node->count) { | 761 | if (node->count) { |
| 763 | if (node_to_entry(node) == | 762 | if (node_to_entry(node) == |
| 764 | rcu_dereference_raw(root->rnode)) | 763 | rcu_dereference_raw(root->rnode)) |
| 765 | deleted |= radix_tree_shrink(root, update_node, | 764 | deleted |= radix_tree_shrink(root, |
| 766 | private); | 765 | update_node); |
| 767 | return deleted; | 766 | return deleted; |
| 768 | } | 767 | } |
| 769 | 768 | ||
| @@ -1173,7 +1172,6 @@ static int calculate_count(struct radix_tree_root *root, | |||
| 1173 | * @slot: pointer to slot in @node | 1172 | * @slot: pointer to slot in @node |
| 1174 | * @item: new item to store in the slot. | 1173 | * @item: new item to store in the slot. |
| 1175 | * @update_node: callback for changing leaf nodes | 1174 | * @update_node: callback for changing leaf nodes |
| 1176 | * @private: private data to pass to @update_node | ||
| 1177 | * | 1175 | * |
| 1178 | * For use with __radix_tree_lookup(). Caller must hold tree write locked | 1176 | * For use with __radix_tree_lookup(). Caller must hold tree write locked |
| 1179 | * across slot lookup and replacement. | 1177 | * across slot lookup and replacement. |
| @@ -1181,7 +1179,7 @@ static int calculate_count(struct radix_tree_root *root, | |||
| 1181 | void __radix_tree_replace(struct radix_tree_root *root, | 1179 | void __radix_tree_replace(struct radix_tree_root *root, |
| 1182 | struct radix_tree_node *node, | 1180 | struct radix_tree_node *node, |
| 1183 | void __rcu **slot, void *item, | 1181 | void __rcu **slot, void *item, |
| 1184 | radix_tree_update_node_t update_node, void *private) | 1182 | radix_tree_update_node_t update_node) |
| 1185 | { | 1183 | { |
| 1186 | void *old = rcu_dereference_raw(*slot); | 1184 | void *old = rcu_dereference_raw(*slot); |
| 1187 | int exceptional = !!radix_tree_exceptional_entry(item) - | 1185 | int exceptional = !!radix_tree_exceptional_entry(item) - |
| @@ -1201,9 +1199,9 @@ void __radix_tree_replace(struct radix_tree_root *root, | |||
| 1201 | return; | 1199 | return; |
| 1202 | 1200 | ||
| 1203 | if (update_node) | 1201 | if (update_node) |
| 1204 | update_node(node, private); | 1202 | update_node(node); |
| 1205 | 1203 | ||
| 1206 | delete_node(root, node, update_node, private); | 1204 | delete_node(root, node, update_node); |
| 1207 | } | 1205 | } |
| 1208 | 1206 | ||
| 1209 | /** | 1207 | /** |
| @@ -1225,7 +1223,7 @@ void __radix_tree_replace(struct radix_tree_root *root, | |||
| 1225 | void radix_tree_replace_slot(struct radix_tree_root *root, | 1223 | void radix_tree_replace_slot(struct radix_tree_root *root, |
| 1226 | void __rcu **slot, void *item) | 1224 | void __rcu **slot, void *item) |
| 1227 | { | 1225 | { |
| 1228 | __radix_tree_replace(root, NULL, slot, item, NULL, NULL); | 1226 | __radix_tree_replace(root, NULL, slot, item, NULL); |
| 1229 | } | 1227 | } |
| 1230 | EXPORT_SYMBOL(radix_tree_replace_slot); | 1228 | EXPORT_SYMBOL(radix_tree_replace_slot); |
| 1231 | 1229 | ||
| @@ -1242,7 +1240,7 @@ void radix_tree_iter_replace(struct radix_tree_root *root, | |||
| 1242 | const struct radix_tree_iter *iter, | 1240 | const struct radix_tree_iter *iter, |
| 1243 | void __rcu **slot, void *item) | 1241 | void __rcu **slot, void *item) |
| 1244 | { | 1242 | { |
| 1245 | __radix_tree_replace(root, iter->node, slot, item, NULL, NULL); | 1243 | __radix_tree_replace(root, iter->node, slot, item, NULL); |
| 1246 | } | 1244 | } |
| 1247 | 1245 | ||
| 1248 | #ifdef CONFIG_RADIX_TREE_MULTIORDER | 1246 | #ifdef CONFIG_RADIX_TREE_MULTIORDER |
| @@ -1972,7 +1970,6 @@ EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot); | |||
| 1972 | * @root: radix tree root | 1970 | * @root: radix tree root |
| 1973 | * @node: node containing @index | 1971 | * @node: node containing @index |
| 1974 | * @update_node: callback for changing leaf nodes | 1972 | * @update_node: callback for changing leaf nodes |
| 1975 | * @private: private data to pass to @update_node | ||
| 1976 | * | 1973 | * |
| 1977 | * After clearing the slot at @index in @node from radix tree | 1974 | * After clearing the slot at @index in @node from radix tree |
| 1978 | * rooted at @root, call this function to attempt freeing the | 1975 | * rooted at @root, call this function to attempt freeing the |
| @@ -1980,10 +1977,9 @@ EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot); | |||
| 1980 | */ | 1977 | */ |
| 1981 | void __radix_tree_delete_node(struct radix_tree_root *root, | 1978 | void __radix_tree_delete_node(struct radix_tree_root *root, |
| 1982 | struct radix_tree_node *node, | 1979 | struct radix_tree_node *node, |
| 1983 | radix_tree_update_node_t update_node, | 1980 | radix_tree_update_node_t update_node) |
| 1984 | void *private) | ||
| 1985 | { | 1981 | { |
| 1986 | delete_node(root, node, update_node, private); | 1982 | delete_node(root, node, update_node); |
| 1987 | } | 1983 | } |
| 1988 | 1984 | ||
| 1989 | static bool __radix_tree_delete(struct radix_tree_root *root, | 1985 | static bool __radix_tree_delete(struct radix_tree_root *root, |
| @@ -2001,7 +1997,7 @@ static bool __radix_tree_delete(struct radix_tree_root *root, | |||
| 2001 | node_tag_clear(root, node, tag, offset); | 1997 | node_tag_clear(root, node, tag, offset); |
| 2002 | 1998 | ||
| 2003 | replace_slot(slot, NULL, node, -1, exceptional); | 1999 | replace_slot(slot, NULL, node, -1, exceptional); |
| 2004 | return node && delete_node(root, node, NULL, NULL); | 2000 | return node && delete_node(root, node, NULL); |
| 2005 | } | 2001 | } |
| 2006 | 2002 | ||
| 2007 | /** | 2003 | /** |
diff --git a/lib/random32.c b/lib/random32.c index 0a90cb0e0fb6..4aaa76404d56 100644 --- a/lib/random32.c +++ b/lib/random32.c | |||
| @@ -213,11 +213,11 @@ static int __init prandom_init(void) | |||
| 213 | } | 213 | } |
| 214 | core_initcall(prandom_init); | 214 | core_initcall(prandom_init); |
| 215 | 215 | ||
| 216 | static void __prandom_timer(unsigned long dontcare); | 216 | static void __prandom_timer(struct timer_list *unused); |
| 217 | 217 | ||
| 218 | static DEFINE_TIMER(seed_timer, __prandom_timer, 0, 0); | 218 | static DEFINE_TIMER(seed_timer, __prandom_timer); |
| 219 | 219 | ||
| 220 | static void __prandom_timer(unsigned long dontcare) | 220 | static void __prandom_timer(struct timer_list *unused) |
| 221 | { | 221 | { |
| 222 | u32 entropy; | 222 | u32 entropy; |
| 223 | unsigned long expires; | 223 | unsigned long expires; |
diff --git a/lib/rbtree.c b/lib/rbtree.c index ba4a9d165f1b..d3ff682fd4b8 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c | |||
| @@ -603,6 +603,16 @@ void rb_replace_node(struct rb_node *victim, struct rb_node *new, | |||
| 603 | } | 603 | } |
| 604 | EXPORT_SYMBOL(rb_replace_node); | 604 | EXPORT_SYMBOL(rb_replace_node); |
| 605 | 605 | ||
| 606 | void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new, | ||
| 607 | struct rb_root_cached *root) | ||
| 608 | { | ||
| 609 | rb_replace_node(victim, new, &root->rb_root); | ||
| 610 | |||
| 611 | if (root->rb_leftmost == victim) | ||
| 612 | root->rb_leftmost = new; | ||
| 613 | } | ||
| 614 | EXPORT_SYMBOL(rb_replace_node_cached); | ||
| 615 | |||
| 606 | void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new, | 616 | void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new, |
| 607 | struct rb_root *root) | 617 | struct rb_root *root) |
| 608 | { | 618 | { |
diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c index 191a238e5a9d..7d36c1e27ff6 100644 --- a/lib/rbtree_test.c +++ b/lib/rbtree_test.c | |||
| @@ -11,7 +11,7 @@ | |||
| 11 | MODULE_PARM_DESC(name, msg); | 11 | MODULE_PARM_DESC(name, msg); |
| 12 | 12 | ||
| 13 | __param(int, nnodes, 100, "Number of nodes in the rb-tree"); | 13 | __param(int, nnodes, 100, "Number of nodes in the rb-tree"); |
| 14 | __param(int, perf_loops, 100000, "Number of iterations modifying the rb-tree"); | 14 | __param(int, perf_loops, 1000, "Number of iterations modifying the rb-tree"); |
| 15 | __param(int, check_loops, 100, "Number of iterations modifying and verifying the rb-tree"); | 15 | __param(int, check_loops, 100, "Number of iterations modifying and verifying the rb-tree"); |
| 16 | 16 | ||
| 17 | struct test_node { | 17 | struct test_node { |
diff --git a/lib/scatterlist.c b/lib/scatterlist.c index be7b4dd6b68d..7c1c55f7daaa 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c | |||
| @@ -370,41 +370,49 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) | |||
| 370 | EXPORT_SYMBOL(sg_alloc_table); | 370 | EXPORT_SYMBOL(sg_alloc_table); |
| 371 | 371 | ||
| 372 | /** | 372 | /** |
| 373 | * sg_alloc_table_from_pages - Allocate and initialize an sg table from | 373 | * __sg_alloc_table_from_pages - Allocate and initialize an sg table from |
| 374 | * an array of pages | 374 | * an array of pages |
| 375 | * @sgt: The sg table header to use | 375 | * @sgt: The sg table header to use |
| 376 | * @pages: Pointer to an array of page pointers | 376 | * @pages: Pointer to an array of page pointers |
| 377 | * @n_pages: Number of pages in the pages array | 377 | * @n_pages: Number of pages in the pages array |
| 378 | * @offset: Offset from start of the first page to the start of a buffer | 378 | * @offset: Offset from start of the first page to the start of a buffer |
| 379 | * @size: Number of valid bytes in the buffer (after offset) | 379 | * @size: Number of valid bytes in the buffer (after offset) |
| 380 | * @gfp_mask: GFP allocation mask | 380 | * @max_segment: Maximum size of a scatterlist node in bytes (page aligned) |
| 381 | * @gfp_mask: GFP allocation mask | ||
| 381 | * | 382 | * |
| 382 | * Description: | 383 | * Description: |
| 383 | * Allocate and initialize an sg table from a list of pages. Contiguous | 384 | * Allocate and initialize an sg table from a list of pages. Contiguous |
| 384 | * ranges of the pages are squashed into a single scatterlist node. A user | 385 | * ranges of the pages are squashed into a single scatterlist node up to the |
| 385 | * may provide an offset at a start and a size of valid data in a buffer | 386 | * maximum size specified in @max_segment. An user may provide an offset at a |
| 386 | * specified by the page array. The returned sg table is released by | 387 | * start and a size of valid data in a buffer specified by the page array. |
| 387 | * sg_free_table. | 388 | * The returned sg table is released by sg_free_table. |
| 388 | * | 389 | * |
| 389 | * Returns: | 390 | * Returns: |
| 390 | * 0 on success, negative error on failure | 391 | * 0 on success, negative error on failure |
| 391 | */ | 392 | */ |
| 392 | int sg_alloc_table_from_pages(struct sg_table *sgt, | 393 | int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, |
| 393 | struct page **pages, unsigned int n_pages, | 394 | unsigned int n_pages, unsigned int offset, |
| 394 | unsigned long offset, unsigned long size, | 395 | unsigned long size, unsigned int max_segment, |
| 395 | gfp_t gfp_mask) | 396 | gfp_t gfp_mask) |
| 396 | { | 397 | { |
| 397 | unsigned int chunks; | 398 | unsigned int chunks, cur_page, seg_len, i; |
| 398 | unsigned int i; | ||
| 399 | unsigned int cur_page; | ||
| 400 | int ret; | 399 | int ret; |
| 401 | struct scatterlist *s; | 400 | struct scatterlist *s; |
| 402 | 401 | ||
| 402 | if (WARN_ON(!max_segment || offset_in_page(max_segment))) | ||
| 403 | return -EINVAL; | ||
| 404 | |||
| 403 | /* compute number of contiguous chunks */ | 405 | /* compute number of contiguous chunks */ |
| 404 | chunks = 1; | 406 | chunks = 1; |
| 405 | for (i = 1; i < n_pages; ++i) | 407 | seg_len = 0; |
| 406 | if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) | 408 | for (i = 1; i < n_pages; i++) { |
| 407 | ++chunks; | 409 | seg_len += PAGE_SIZE; |
| 410 | if (seg_len >= max_segment || | ||
| 411 | page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) { | ||
| 412 | chunks++; | ||
| 413 | seg_len = 0; | ||
| 414 | } | ||
| 415 | } | ||
| 408 | 416 | ||
| 409 | ret = sg_alloc_table(sgt, chunks, gfp_mask); | 417 | ret = sg_alloc_table(sgt, chunks, gfp_mask); |
| 410 | if (unlikely(ret)) | 418 | if (unlikely(ret)) |
| @@ -413,17 +421,21 @@ int sg_alloc_table_from_pages(struct sg_table *sgt, | |||
| 413 | /* merging chunks and putting them into the scatterlist */ | 421 | /* merging chunks and putting them into the scatterlist */ |
| 414 | cur_page = 0; | 422 | cur_page = 0; |
| 415 | for_each_sg(sgt->sgl, s, sgt->orig_nents, i) { | 423 | for_each_sg(sgt->sgl, s, sgt->orig_nents, i) { |
| 416 | unsigned long chunk_size; | 424 | unsigned int j, chunk_size; |
| 417 | unsigned int j; | ||
| 418 | 425 | ||
| 419 | /* look for the end of the current chunk */ | 426 | /* look for the end of the current chunk */ |
| 420 | for (j = cur_page + 1; j < n_pages; ++j) | 427 | seg_len = 0; |
| 421 | if (page_to_pfn(pages[j]) != | 428 | for (j = cur_page + 1; j < n_pages; j++) { |
| 429 | seg_len += PAGE_SIZE; | ||
| 430 | if (seg_len >= max_segment || | ||
| 431 | page_to_pfn(pages[j]) != | ||
| 422 | page_to_pfn(pages[j - 1]) + 1) | 432 | page_to_pfn(pages[j - 1]) + 1) |
| 423 | break; | 433 | break; |
| 434 | } | ||
| 424 | 435 | ||
| 425 | chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset; | 436 | chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset; |
| 426 | sg_set_page(s, pages[cur_page], min(size, chunk_size), offset); | 437 | sg_set_page(s, pages[cur_page], |
| 438 | min_t(unsigned long, size, chunk_size), offset); | ||
| 427 | size -= chunk_size; | 439 | size -= chunk_size; |
| 428 | offset = 0; | 440 | offset = 0; |
| 429 | cur_page = j; | 441 | cur_page = j; |
| @@ -431,6 +443,35 @@ int sg_alloc_table_from_pages(struct sg_table *sgt, | |||
| 431 | 443 | ||
| 432 | return 0; | 444 | return 0; |
| 433 | } | 445 | } |
| 446 | EXPORT_SYMBOL(__sg_alloc_table_from_pages); | ||
| 447 | |||
| 448 | /** | ||
| 449 | * sg_alloc_table_from_pages - Allocate and initialize an sg table from | ||
| 450 | * an array of pages | ||
| 451 | * @sgt: The sg table header to use | ||
| 452 | * @pages: Pointer to an array of page pointers | ||
| 453 | * @n_pages: Number of pages in the pages array | ||
| 454 | * @offset: Offset from start of the first page to the start of a buffer | ||
| 455 | * @size: Number of valid bytes in the buffer (after offset) | ||
| 456 | * @gfp_mask: GFP allocation mask | ||
| 457 | * | ||
| 458 | * Description: | ||
| 459 | * Allocate and initialize an sg table from a list of pages. Contiguous | ||
| 460 | * ranges of the pages are squashed into a single scatterlist node. A user | ||
| 461 | * may provide an offset at a start and a size of valid data in a buffer | ||
| 462 | * specified by the page array. The returned sg table is released by | ||
| 463 | * sg_free_table. | ||
| 464 | * | ||
| 465 | * Returns: | ||
| 466 | * 0 on success, negative error on failure | ||
| 467 | */ | ||
| 468 | int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, | ||
| 469 | unsigned int n_pages, unsigned int offset, | ||
| 470 | unsigned long size, gfp_t gfp_mask) | ||
| 471 | { | ||
| 472 | return __sg_alloc_table_from_pages(sgt, pages, n_pages, offset, size, | ||
| 473 | SCATTERLIST_MAX_SEGMENT, gfp_mask); | ||
| 474 | } | ||
| 434 | EXPORT_SYMBOL(sg_alloc_table_from_pages); | 475 | EXPORT_SYMBOL(sg_alloc_table_from_pages); |
| 435 | 476 | ||
| 436 | void __sg_page_iter_start(struct sg_page_iter *piter, | 477 | void __sg_page_iter_start(struct sg_page_iter *piter, |
diff --git a/lib/string.c b/lib/string.c index 5e8d410a93df..64a9e33f1daa 100644 --- a/lib/string.c +++ b/lib/string.c | |||
| @@ -1052,144 +1052,3 @@ void fortify_panic(const char *name) | |||
| 1052 | BUG(); | 1052 | BUG(); |
| 1053 | } | 1053 | } |
| 1054 | EXPORT_SYMBOL(fortify_panic); | 1054 | EXPORT_SYMBOL(fortify_panic); |
| 1055 | |||
| 1056 | #ifdef CONFIG_STRING_SELFTEST | ||
| 1057 | #include <linux/slab.h> | ||
| 1058 | #include <linux/module.h> | ||
| 1059 | |||
| 1060 | static __init int memset16_selftest(void) | ||
| 1061 | { | ||
| 1062 | unsigned i, j, k; | ||
| 1063 | u16 v, *p; | ||
| 1064 | |||
| 1065 | p = kmalloc(256 * 2 * 2, GFP_KERNEL); | ||
| 1066 | if (!p) | ||
| 1067 | return -1; | ||
| 1068 | |||
| 1069 | for (i = 0; i < 256; i++) { | ||
| 1070 | for (j = 0; j < 256; j++) { | ||
| 1071 | memset(p, 0xa1, 256 * 2 * sizeof(v)); | ||
| 1072 | memset16(p + i, 0xb1b2, j); | ||
| 1073 | for (k = 0; k < 512; k++) { | ||
| 1074 | v = p[k]; | ||
| 1075 | if (k < i) { | ||
| 1076 | if (v != 0xa1a1) | ||
| 1077 | goto fail; | ||
| 1078 | } else if (k < i + j) { | ||
| 1079 | if (v != 0xb1b2) | ||
| 1080 | goto fail; | ||
| 1081 | } else { | ||
| 1082 | if (v != 0xa1a1) | ||
| 1083 | goto fail; | ||
| 1084 | } | ||
| 1085 | } | ||
| 1086 | } | ||
| 1087 | } | ||
| 1088 | |||
| 1089 | fail: | ||
| 1090 | kfree(p); | ||
| 1091 | if (i < 256) | ||
| 1092 | return (i << 24) | (j << 16) | k; | ||
| 1093 | return 0; | ||
| 1094 | } | ||
| 1095 | |||
| 1096 | static __init int memset32_selftest(void) | ||
| 1097 | { | ||
| 1098 | unsigned i, j, k; | ||
| 1099 | u32 v, *p; | ||
| 1100 | |||
| 1101 | p = kmalloc(256 * 2 * 4, GFP_KERNEL); | ||
| 1102 | if (!p) | ||
| 1103 | return -1; | ||
| 1104 | |||
| 1105 | for (i = 0; i < 256; i++) { | ||
| 1106 | for (j = 0; j < 256; j++) { | ||
| 1107 | memset(p, 0xa1, 256 * 2 * sizeof(v)); | ||
| 1108 | memset32(p + i, 0xb1b2b3b4, j); | ||
| 1109 | for (k = 0; k < 512; k++) { | ||
| 1110 | v = p[k]; | ||
| 1111 | if (k < i) { | ||
| 1112 | if (v != 0xa1a1a1a1) | ||
| 1113 | goto fail; | ||
| 1114 | } else if (k < i + j) { | ||
| 1115 | if (v != 0xb1b2b3b4) | ||
| 1116 | goto fail; | ||
| 1117 | } else { | ||
| 1118 | if (v != 0xa1a1a1a1) | ||
| 1119 | goto fail; | ||
| 1120 | } | ||
| 1121 | } | ||
| 1122 | } | ||
| 1123 | } | ||
| 1124 | |||
| 1125 | fail: | ||
| 1126 | kfree(p); | ||
| 1127 | if (i < 256) | ||
| 1128 | return (i << 24) | (j << 16) | k; | ||
| 1129 | return 0; | ||
| 1130 | } | ||
| 1131 | |||
| 1132 | static __init int memset64_selftest(void) | ||
| 1133 | { | ||
| 1134 | unsigned i, j, k; | ||
| 1135 | u64 v, *p; | ||
| 1136 | |||
| 1137 | p = kmalloc(256 * 2 * 8, GFP_KERNEL); | ||
| 1138 | if (!p) | ||
| 1139 | return -1; | ||
| 1140 | |||
| 1141 | for (i = 0; i < 256; i++) { | ||
| 1142 | for (j = 0; j < 256; j++) { | ||
| 1143 | memset(p, 0xa1, 256 * 2 * sizeof(v)); | ||
| 1144 | memset64(p + i, 0xb1b2b3b4b5b6b7b8ULL, j); | ||
| 1145 | for (k = 0; k < 512; k++) { | ||
| 1146 | v = p[k]; | ||
| 1147 | if (k < i) { | ||
| 1148 | if (v != 0xa1a1a1a1a1a1a1a1ULL) | ||
| 1149 | goto fail; | ||
| 1150 | } else if (k < i + j) { | ||
| 1151 | if (v != 0xb1b2b3b4b5b6b7b8ULL) | ||
| 1152 | goto fail; | ||
| 1153 | } else { | ||
| 1154 | if (v != 0xa1a1a1a1a1a1a1a1ULL) | ||
| 1155 | goto fail; | ||
| 1156 | } | ||
| 1157 | } | ||
| 1158 | } | ||
| 1159 | } | ||
| 1160 | |||
| 1161 | fail: | ||
| 1162 | kfree(p); | ||
| 1163 | if (i < 256) | ||
| 1164 | return (i << 24) | (j << 16) | k; | ||
| 1165 | return 0; | ||
| 1166 | } | ||
| 1167 | |||
| 1168 | static __init int string_selftest_init(void) | ||
| 1169 | { | ||
| 1170 | int test, subtest; | ||
| 1171 | |||
| 1172 | test = 1; | ||
| 1173 | subtest = memset16_selftest(); | ||
| 1174 | if (subtest) | ||
| 1175 | goto fail; | ||
| 1176 | |||
| 1177 | test = 2; | ||
| 1178 | subtest = memset32_selftest(); | ||
| 1179 | if (subtest) | ||
| 1180 | goto fail; | ||
| 1181 | |||
| 1182 | test = 3; | ||
| 1183 | subtest = memset64_selftest(); | ||
| 1184 | if (subtest) | ||
| 1185 | goto fail; | ||
| 1186 | |||
| 1187 | pr_info("String selftests succeeded\n"); | ||
| 1188 | return 0; | ||
| 1189 | fail: | ||
| 1190 | pr_crit("String selftest failure %d.%08x\n", test, subtest); | ||
| 1191 | return 0; | ||
| 1192 | } | ||
| 1193 | |||
| 1194 | module_init(string_selftest_init); | ||
| 1195 | #endif /* CONFIG_STRING_SELFTEST */ | ||
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 8c6c83ef57a4..cea19aaf303c 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
| @@ -507,8 +507,9 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, | |||
| 507 | if (no_iotlb_memory) | 507 | if (no_iotlb_memory) |
| 508 | panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer"); | 508 | panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer"); |
| 509 | 509 | ||
| 510 | if (sme_active()) | 510 | if (mem_encrypt_active()) |
| 511 | pr_warn_once("SME is active and system is using DMA bounce buffers\n"); | 511 | pr_warn_once("%s is active and system is using DMA bounce buffers\n", |
| 512 | sme_active() ? "SME" : "SEV"); | ||
| 512 | 513 | ||
| 513 | mask = dma_get_seg_boundary(hwdev); | 514 | mask = dma_get_seg_boundary(hwdev); |
| 514 | 515 | ||
diff --git a/lib/test_find_bit.c b/lib/test_find_bit.c new file mode 100644 index 000000000000..f4394a36f9aa --- /dev/null +++ b/lib/test_find_bit.c | |||
| @@ -0,0 +1,144 @@ | |||
| 1 | /* | ||
| 2 | * Test for find_*_bit functions. | ||
| 3 | * | ||
| 4 | * Copyright (c) 2017 Cavium. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or | ||
| 7 | * modify it under the terms of version 2 of the GNU General Public | ||
| 8 | * License as published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, but | ||
| 11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 13 | * General Public License for more details. | ||
| 14 | */ | ||
| 15 | |||
| 16 | /* | ||
| 17 | * find_bit functions are widely used in kernel, so the successful boot | ||
| 18 | * is good enough test for correctness. | ||
| 19 | * | ||
| 20 | * This test is focused on performance of traversing bitmaps. Two typical | ||
| 21 | * scenarios are reproduced: | ||
| 22 | * - randomly filled bitmap with approximately equal number of set and | ||
| 23 | * cleared bits; | ||
| 24 | * - sparse bitmap with few set bits at random positions. | ||
| 25 | */ | ||
| 26 | |||
| 27 | #include <linux/bitops.h> | ||
| 28 | #include <linux/kernel.h> | ||
| 29 | #include <linux/list.h> | ||
| 30 | #include <linux/module.h> | ||
| 31 | #include <linux/printk.h> | ||
| 32 | #include <linux/random.h> | ||
| 33 | |||
| 34 | #define BITMAP_LEN (4096UL * 8 * 10) | ||
| 35 | #define SPARSE 500 | ||
| 36 | |||
| 37 | static DECLARE_BITMAP(bitmap, BITMAP_LEN) __initdata; | ||
| 38 | |||
| 39 | /* | ||
| 40 | * This is Schlemiel the Painter's algorithm. It should be called after | ||
| 41 | * all other tests for the same bitmap because it sets all bits of bitmap to 1. | ||
| 42 | */ | ||
| 43 | static int __init test_find_first_bit(void *bitmap, unsigned long len) | ||
| 44 | { | ||
| 45 | unsigned long i, cnt; | ||
| 46 | cycles_t cycles; | ||
| 47 | |||
| 48 | cycles = get_cycles(); | ||
| 49 | for (cnt = i = 0; i < len; cnt++) { | ||
| 50 | i = find_first_bit(bitmap, len); | ||
| 51 | __clear_bit(i, bitmap); | ||
| 52 | } | ||
| 53 | cycles = get_cycles() - cycles; | ||
| 54 | pr_err("find_first_bit:\t\t%llu cycles,\t%ld iterations\n", | ||
| 55 | (u64)cycles, cnt); | ||
| 56 | |||
| 57 | return 0; | ||
| 58 | } | ||
| 59 | |||
| 60 | static int __init test_find_next_bit(const void *bitmap, unsigned long len) | ||
| 61 | { | ||
| 62 | unsigned long i, cnt; | ||
| 63 | cycles_t cycles; | ||
| 64 | |||
| 65 | cycles = get_cycles(); | ||
| 66 | for (cnt = i = 0; i < BITMAP_LEN; cnt++) | ||
| 67 | i = find_next_bit(bitmap, BITMAP_LEN, i) + 1; | ||
| 68 | cycles = get_cycles() - cycles; | ||
| 69 | pr_err("find_next_bit:\t\t%llu cycles,\t%ld iterations\n", | ||
| 70 | (u64)cycles, cnt); | ||
| 71 | |||
| 72 | return 0; | ||
| 73 | } | ||
| 74 | |||
| 75 | static int __init test_find_next_zero_bit(const void *bitmap, unsigned long len) | ||
| 76 | { | ||
| 77 | unsigned long i, cnt; | ||
| 78 | cycles_t cycles; | ||
| 79 | |||
| 80 | cycles = get_cycles(); | ||
| 81 | for (cnt = i = 0; i < BITMAP_LEN; cnt++) | ||
| 82 | i = find_next_zero_bit(bitmap, len, i) + 1; | ||
| 83 | cycles = get_cycles() - cycles; | ||
| 84 | pr_err("find_next_zero_bit:\t%llu cycles,\t%ld iterations\n", | ||
| 85 | (u64)cycles, cnt); | ||
| 86 | |||
| 87 | return 0; | ||
| 88 | } | ||
| 89 | |||
| 90 | static int __init test_find_last_bit(const void *bitmap, unsigned long len) | ||
| 91 | { | ||
| 92 | unsigned long l, cnt = 0; | ||
| 93 | cycles_t cycles; | ||
| 94 | |||
| 95 | cycles = get_cycles(); | ||
| 96 | do { | ||
| 97 | cnt++; | ||
| 98 | l = find_last_bit(bitmap, len); | ||
| 99 | if (l >= len) | ||
| 100 | break; | ||
| 101 | len = l; | ||
| 102 | } while (len); | ||
| 103 | cycles = get_cycles() - cycles; | ||
| 104 | pr_err("find_last_bit:\t\t%llu cycles,\t%ld iterations\n", | ||
| 105 | (u64)cycles, cnt); | ||
| 106 | |||
| 107 | return 0; | ||
| 108 | } | ||
| 109 | |||
| 110 | static int __init find_bit_test(void) | ||
| 111 | { | ||
| 112 | unsigned long nbits = BITMAP_LEN / SPARSE; | ||
| 113 | |||
| 114 | pr_err("\nStart testing find_bit() with random-filled bitmap\n"); | ||
| 115 | |||
| 116 | get_random_bytes(bitmap, sizeof(bitmap)); | ||
| 117 | |||
| 118 | test_find_next_bit(bitmap, BITMAP_LEN); | ||
| 119 | test_find_next_zero_bit(bitmap, BITMAP_LEN); | ||
| 120 | test_find_last_bit(bitmap, BITMAP_LEN); | ||
| 121 | test_find_first_bit(bitmap, BITMAP_LEN); | ||
| 122 | |||
| 123 | pr_err("\nStart testing find_bit() with sparse bitmap\n"); | ||
| 124 | |||
| 125 | bitmap_zero(bitmap, BITMAP_LEN); | ||
| 126 | |||
| 127 | while (nbits--) | ||
| 128 | __set_bit(prandom_u32() % BITMAP_LEN, bitmap); | ||
| 129 | |||
| 130 | test_find_next_bit(bitmap, BITMAP_LEN); | ||
| 131 | test_find_next_zero_bit(bitmap, BITMAP_LEN); | ||
| 132 | test_find_last_bit(bitmap, BITMAP_LEN); | ||
| 133 | test_find_first_bit(bitmap, BITMAP_LEN); | ||
| 134 | |||
| 135 | return 0; | ||
| 136 | } | ||
| 137 | module_init(find_bit_test); | ||
| 138 | |||
| 139 | static void __exit test_find_bit_cleanup(void) | ||
| 140 | { | ||
| 141 | } | ||
| 142 | module_exit(test_find_bit_cleanup); | ||
| 143 | |||
| 144 | MODULE_LICENSE("GPL"); | ||
diff --git a/lib/test_kasan.c b/lib/test_kasan.c index a25c9763fce1..ef1a3ac1397e 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c | |||
| @@ -353,10 +353,9 @@ static noinline void __init memcg_accounted_kmem_cache(void) | |||
| 353 | */ | 353 | */ |
| 354 | for (i = 0; i < 5; i++) { | 354 | for (i = 0; i < 5; i++) { |
| 355 | p = kmem_cache_alloc(cache, GFP_KERNEL); | 355 | p = kmem_cache_alloc(cache, GFP_KERNEL); |
| 356 | if (!p) { | 356 | if (!p) |
| 357 | pr_err("Allocation failed\n"); | ||
| 358 | goto free_cache; | 357 | goto free_cache; |
| 359 | } | 358 | |
| 360 | kmem_cache_free(cache, p); | 359 | kmem_cache_free(cache, p); |
| 361 | msleep(100); | 360 | msleep(100); |
| 362 | } | 361 | } |
diff --git a/lib/test_kmod.c b/lib/test_kmod.c index fba78d25e825..337f408b4de6 100644 --- a/lib/test_kmod.c +++ b/lib/test_kmod.c | |||
| @@ -783,10 +783,8 @@ static int kmod_config_sync_info(struct kmod_test_device *test_dev) | |||
| 783 | free_test_dev_info(test_dev); | 783 | free_test_dev_info(test_dev); |
| 784 | test_dev->info = vzalloc(config->num_threads * | 784 | test_dev->info = vzalloc(config->num_threads * |
| 785 | sizeof(struct kmod_test_device_info)); | 785 | sizeof(struct kmod_test_device_info)); |
| 786 | if (!test_dev->info) { | 786 | if (!test_dev->info) |
| 787 | dev_err(test_dev->dev, "Cannot alloc test_dev info\n"); | ||
| 788 | return -ENOMEM; | 787 | return -ENOMEM; |
| 789 | } | ||
| 790 | 788 | ||
| 791 | return 0; | 789 | return 0; |
| 792 | } | 790 | } |
| @@ -1089,10 +1087,8 @@ static struct kmod_test_device *alloc_test_dev_kmod(int idx) | |||
| 1089 | struct miscdevice *misc_dev; | 1087 | struct miscdevice *misc_dev; |
| 1090 | 1088 | ||
| 1091 | test_dev = vzalloc(sizeof(struct kmod_test_device)); | 1089 | test_dev = vzalloc(sizeof(struct kmod_test_device)); |
| 1092 | if (!test_dev) { | 1090 | if (!test_dev) |
| 1093 | pr_err("Cannot alloc test_dev\n"); | ||
| 1094 | goto err_out; | 1091 | goto err_out; |
| 1095 | } | ||
| 1096 | 1092 | ||
| 1097 | mutex_init(&test_dev->config_mutex); | 1093 | mutex_init(&test_dev->config_mutex); |
| 1098 | mutex_init(&test_dev->trigger_mutex); | 1094 | mutex_init(&test_dev->trigger_mutex); |
diff --git a/lib/test_list_sort.c b/lib/test_list_sort.c index 28e817387b04..5474f3f3e41d 100644 --- a/lib/test_list_sort.c +++ b/lib/test_list_sort.c | |||
| @@ -76,17 +76,14 @@ static int __init list_sort_test(void) | |||
| 76 | pr_debug("start testing list_sort()\n"); | 76 | pr_debug("start testing list_sort()\n"); |
| 77 | 77 | ||
| 78 | elts = kcalloc(TEST_LIST_LEN, sizeof(*elts), GFP_KERNEL); | 78 | elts = kcalloc(TEST_LIST_LEN, sizeof(*elts), GFP_KERNEL); |
| 79 | if (!elts) { | 79 | if (!elts) |
| 80 | pr_err("error: cannot allocate memory\n"); | ||
| 81 | return err; | 80 | return err; |
| 82 | } | ||
| 83 | 81 | ||
| 84 | for (i = 0; i < TEST_LIST_LEN; i++) { | 82 | for (i = 0; i < TEST_LIST_LEN; i++) { |
| 85 | el = kmalloc(sizeof(*el), GFP_KERNEL); | 83 | el = kmalloc(sizeof(*el), GFP_KERNEL); |
| 86 | if (!el) { | 84 | if (!el) |
| 87 | pr_err("error: cannot allocate memory\n"); | ||
| 88 | goto exit; | 85 | goto exit; |
| 89 | } | 86 | |
| 90 | /* force some equivalencies */ | 87 | /* force some equivalencies */ |
| 91 | el->value = prandom_u32() % (TEST_LIST_LEN / 3); | 88 | el->value = prandom_u32() % (TEST_LIST_LEN / 3); |
| 92 | el->serial = i; | 89 | el->serial = i; |
diff --git a/lib/test_printf.c b/lib/test_printf.c index 563f10e6876a..71ebfa43ad05 100644 --- a/lib/test_printf.c +++ b/lib/test_printf.c | |||
| @@ -24,24 +24,6 @@ | |||
| 24 | #define PAD_SIZE 16 | 24 | #define PAD_SIZE 16 |
| 25 | #define FILL_CHAR '$' | 25 | #define FILL_CHAR '$' |
| 26 | 26 | ||
| 27 | #define PTR1 ((void*)0x01234567) | ||
| 28 | #define PTR2 ((void*)(long)(int)0xfedcba98) | ||
| 29 | |||
| 30 | #if BITS_PER_LONG == 64 | ||
| 31 | #define PTR1_ZEROES "000000000" | ||
| 32 | #define PTR1_SPACES " " | ||
| 33 | #define PTR1_STR "1234567" | ||
| 34 | #define PTR2_STR "fffffffffedcba98" | ||
| 35 | #define PTR_WIDTH 16 | ||
| 36 | #else | ||
| 37 | #define PTR1_ZEROES "0" | ||
| 38 | #define PTR1_SPACES " " | ||
| 39 | #define PTR1_STR "1234567" | ||
| 40 | #define PTR2_STR "fedcba98" | ||
| 41 | #define PTR_WIDTH 8 | ||
| 42 | #endif | ||
| 43 | #define PTR_WIDTH_STR stringify(PTR_WIDTH) | ||
| 44 | |||
| 45 | static unsigned total_tests __initdata; | 27 | static unsigned total_tests __initdata; |
| 46 | static unsigned failed_tests __initdata; | 28 | static unsigned failed_tests __initdata; |
| 47 | static char *test_buffer __initdata; | 29 | static char *test_buffer __initdata; |
| @@ -217,30 +199,79 @@ test_string(void) | |||
| 217 | test("a | | ", "%-3.s|%-3.0s|%-3.*s", "a", "b", 0, "c"); | 199 | test("a | | ", "%-3.s|%-3.0s|%-3.*s", "a", "b", 0, "c"); |
| 218 | } | 200 | } |
| 219 | 201 | ||
| 202 | #define PLAIN_BUF_SIZE 64 /* leave some space so we don't oops */ | ||
| 203 | |||
| 204 | #if BITS_PER_LONG == 64 | ||
| 205 | |||
| 206 | #define PTR_WIDTH 16 | ||
| 207 | #define PTR ((void *)0xffff0123456789ab) | ||
| 208 | #define PTR_STR "ffff0123456789ab" | ||
| 209 | #define ZEROS "00000000" /* hex 32 zero bits */ | ||
| 210 | |||
| 211 | static int __init | ||
| 212 | plain_format(void) | ||
| 213 | { | ||
| 214 | char buf[PLAIN_BUF_SIZE]; | ||
| 215 | int nchars; | ||
| 216 | |||
| 217 | nchars = snprintf(buf, PLAIN_BUF_SIZE, "%p", PTR); | ||
| 218 | |||
| 219 | if (nchars != PTR_WIDTH || strncmp(buf, ZEROS, strlen(ZEROS)) != 0) | ||
| 220 | return -1; | ||
| 221 | |||
| 222 | return 0; | ||
| 223 | } | ||
| 224 | |||
| 225 | #else | ||
| 226 | |||
| 227 | #define PTR_WIDTH 8 | ||
| 228 | #define PTR ((void *)0x456789ab) | ||
| 229 | #define PTR_STR "456789ab" | ||
| 230 | |||
| 231 | static int __init | ||
| 232 | plain_format(void) | ||
| 233 | { | ||
| 234 | /* Format is implicitly tested for 32 bit machines by plain_hash() */ | ||
| 235 | return 0; | ||
| 236 | } | ||
| 237 | |||
| 238 | #endif /* BITS_PER_LONG == 64 */ | ||
| 239 | |||
| 240 | static int __init | ||
| 241 | plain_hash(void) | ||
| 242 | { | ||
| 243 | char buf[PLAIN_BUF_SIZE]; | ||
| 244 | int nchars; | ||
| 245 | |||
| 246 | nchars = snprintf(buf, PLAIN_BUF_SIZE, "%p", PTR); | ||
| 247 | |||
| 248 | if (nchars != PTR_WIDTH || strncmp(buf, PTR_STR, PTR_WIDTH) == 0) | ||
| 249 | return -1; | ||
| 250 | |||
| 251 | return 0; | ||
| 252 | } | ||
| 253 | |||
| 254 | /* | ||
| 255 | * We can't use test() to test %p because we don't know what output to expect | ||
| 256 | * after an address is hashed. | ||
| 257 | */ | ||
| 220 | static void __init | 258 | static void __init |
| 221 | plain(void) | 259 | plain(void) |
| 222 | { | 260 | { |
| 223 | test(PTR1_ZEROES PTR1_STR " " PTR2_STR, "%p %p", PTR1, PTR2); | 261 | int err; |
| 224 | /* | ||
| 225 | * The field width is overloaded for some %p extensions to | ||
| 226 | * pass another piece of information. For plain pointers, the | ||
| 227 | * behaviour is slightly odd: One cannot pass either the 0 | ||
| 228 | * flag nor a precision to %p without gcc complaining, and if | ||
| 229 | * one explicitly gives a field width, the number is no longer | ||
| 230 | * zero-padded. | ||
| 231 | */ | ||
| 232 | test("|" PTR1_STR PTR1_SPACES " | " PTR1_SPACES PTR1_STR "|", | ||
| 233 | "|%-*p|%*p|", PTR_WIDTH+2, PTR1, PTR_WIDTH+2, PTR1); | ||
| 234 | test("|" PTR2_STR " | " PTR2_STR "|", | ||
| 235 | "|%-*p|%*p|", PTR_WIDTH+2, PTR2, PTR_WIDTH+2, PTR2); | ||
| 236 | 262 | ||
| 237 | /* | 263 | err = plain_hash(); |
| 238 | * Unrecognized %p extensions are treated as plain %p, but the | 264 | if (err) { |
| 239 | * alphanumeric suffix is ignored (that is, does not occur in | 265 | pr_warn("plain 'p' does not appear to be hashed\n"); |
| 240 | * the output.) | 266 | failed_tests++; |
| 241 | */ | 267 | return; |
| 242 | test("|"PTR1_ZEROES PTR1_STR"|", "|%p0y|", PTR1); | 268 | } |
| 243 | test("|"PTR2_STR"|", "|%p0y|", PTR2); | 269 | |
| 270 | err = plain_format(); | ||
| 271 | if (err) { | ||
| 272 | pr_warn("hashing plain 'p' has unexpected format\n"); | ||
| 273 | failed_tests++; | ||
| 274 | } | ||
| 244 | } | 275 | } |
| 245 | 276 | ||
| 246 | static void __init | 277 | static void __init |
| @@ -251,6 +282,7 @@ symbol_ptr(void) | |||
| 251 | static void __init | 282 | static void __init |
| 252 | kernel_ptr(void) | 283 | kernel_ptr(void) |
| 253 | { | 284 | { |
| 285 | /* We can't test this without access to kptr_restrict. */ | ||
| 254 | } | 286 | } |
| 255 | 287 | ||
| 256 | static void __init | 288 | static void __init |
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index 0ffca990a833..8e83cbdc049c 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c | |||
| @@ -23,14 +23,15 @@ | |||
| 23 | #include <linux/semaphore.h> | 23 | #include <linux/semaphore.h> |
| 24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
| 25 | #include <linux/sched.h> | 25 | #include <linux/sched.h> |
| 26 | #include <linux/random.h> | ||
| 26 | #include <linux/vmalloc.h> | 27 | #include <linux/vmalloc.h> |
| 27 | 28 | ||
| 28 | #define MAX_ENTRIES 1000000 | 29 | #define MAX_ENTRIES 1000000 |
| 29 | #define TEST_INSERT_FAIL INT_MAX | 30 | #define TEST_INSERT_FAIL INT_MAX |
| 30 | 31 | ||
| 31 | static int entries = 50000; | 32 | static int parm_entries = 50000; |
| 32 | module_param(entries, int, 0); | 33 | module_param(parm_entries, int, 0); |
| 33 | MODULE_PARM_DESC(entries, "Number of entries to add (default: 50000)"); | 34 | MODULE_PARM_DESC(parm_entries, "Number of entries to add (default: 50000)"); |
| 34 | 35 | ||
| 35 | static int runs = 4; | 36 | static int runs = 4; |
| 36 | module_param(runs, int, 0); | 37 | module_param(runs, int, 0); |
| @@ -66,14 +67,18 @@ struct test_obj { | |||
| 66 | struct rhash_head node; | 67 | struct rhash_head node; |
| 67 | }; | 68 | }; |
| 68 | 69 | ||
| 70 | struct test_obj_rhl { | ||
| 71 | struct test_obj_val value; | ||
| 72 | struct rhlist_head list_node; | ||
| 73 | }; | ||
| 74 | |||
| 69 | struct thread_data { | 75 | struct thread_data { |
| 76 | unsigned int entries; | ||
| 70 | int id; | 77 | int id; |
| 71 | struct task_struct *task; | 78 | struct task_struct *task; |
| 72 | struct test_obj *objs; | 79 | struct test_obj *objs; |
| 73 | }; | 80 | }; |
| 74 | 81 | ||
| 75 | static struct test_obj array[MAX_ENTRIES]; | ||
| 76 | |||
| 77 | static struct rhashtable_params test_rht_params = { | 82 | static struct rhashtable_params test_rht_params = { |
| 78 | .head_offset = offsetof(struct test_obj, node), | 83 | .head_offset = offsetof(struct test_obj, node), |
| 79 | .key_offset = offsetof(struct test_obj, value), | 84 | .key_offset = offsetof(struct test_obj, value), |
| @@ -85,7 +90,7 @@ static struct rhashtable_params test_rht_params = { | |||
| 85 | static struct semaphore prestart_sem; | 90 | static struct semaphore prestart_sem; |
| 86 | static struct semaphore startup_sem = __SEMAPHORE_INITIALIZER(startup_sem, 0); | 91 | static struct semaphore startup_sem = __SEMAPHORE_INITIALIZER(startup_sem, 0); |
| 87 | 92 | ||
| 88 | static int insert_retry(struct rhashtable *ht, struct rhash_head *obj, | 93 | static int insert_retry(struct rhashtable *ht, struct test_obj *obj, |
| 89 | const struct rhashtable_params params) | 94 | const struct rhashtable_params params) |
| 90 | { | 95 | { |
| 91 | int err, retries = -1, enomem_retries = 0; | 96 | int err, retries = -1, enomem_retries = 0; |
| @@ -93,7 +98,7 @@ static int insert_retry(struct rhashtable *ht, struct rhash_head *obj, | |||
| 93 | do { | 98 | do { |
| 94 | retries++; | 99 | retries++; |
| 95 | cond_resched(); | 100 | cond_resched(); |
| 96 | err = rhashtable_insert_fast(ht, obj, params); | 101 | err = rhashtable_insert_fast(ht, &obj->node, params); |
| 97 | if (err == -ENOMEM && enomem_retry) { | 102 | if (err == -ENOMEM && enomem_retry) { |
| 98 | enomem_retries++; | 103 | enomem_retries++; |
| 99 | err = -EBUSY; | 104 | err = -EBUSY; |
| @@ -107,11 +112,12 @@ static int insert_retry(struct rhashtable *ht, struct rhash_head *obj, | |||
| 107 | return err ? : retries; | 112 | return err ? : retries; |
| 108 | } | 113 | } |
| 109 | 114 | ||
| 110 | static int __init test_rht_lookup(struct rhashtable *ht) | 115 | static int __init test_rht_lookup(struct rhashtable *ht, struct test_obj *array, |
| 116 | unsigned int entries) | ||
| 111 | { | 117 | { |
| 112 | unsigned int i; | 118 | unsigned int i; |
| 113 | 119 | ||
| 114 | for (i = 0; i < entries * 2; i++) { | 120 | for (i = 0; i < entries; i++) { |
| 115 | struct test_obj *obj; | 121 | struct test_obj *obj; |
| 116 | bool expected = !(i % 2); | 122 | bool expected = !(i % 2); |
| 117 | struct test_obj_val key = { | 123 | struct test_obj_val key = { |
| @@ -144,7 +150,7 @@ static int __init test_rht_lookup(struct rhashtable *ht) | |||
| 144 | return 0; | 150 | return 0; |
| 145 | } | 151 | } |
| 146 | 152 | ||
| 147 | static void test_bucket_stats(struct rhashtable *ht) | 153 | static void test_bucket_stats(struct rhashtable *ht, unsigned int entries) |
| 148 | { | 154 | { |
| 149 | unsigned int err, total = 0, chain_len = 0; | 155 | unsigned int err, total = 0, chain_len = 0; |
| 150 | struct rhashtable_iter hti; | 156 | struct rhashtable_iter hti; |
| @@ -186,7 +192,8 @@ static void test_bucket_stats(struct rhashtable *ht) | |||
| 186 | pr_warn("Test failed: Total count mismatch ^^^"); | 192 | pr_warn("Test failed: Total count mismatch ^^^"); |
| 187 | } | 193 | } |
| 188 | 194 | ||
| 189 | static s64 __init test_rhashtable(struct rhashtable *ht) | 195 | static s64 __init test_rhashtable(struct rhashtable *ht, struct test_obj *array, |
| 196 | unsigned int entries) | ||
| 190 | { | 197 | { |
| 191 | struct test_obj *obj; | 198 | struct test_obj *obj; |
| 192 | int err; | 199 | int err; |
| @@ -203,7 +210,7 @@ static s64 __init test_rhashtable(struct rhashtable *ht) | |||
| 203 | struct test_obj *obj = &array[i]; | 210 | struct test_obj *obj = &array[i]; |
| 204 | 211 | ||
| 205 | obj->value.id = i * 2; | 212 | obj->value.id = i * 2; |
| 206 | err = insert_retry(ht, &obj->node, test_rht_params); | 213 | err = insert_retry(ht, obj, test_rht_params); |
| 207 | if (err > 0) | 214 | if (err > 0) |
| 208 | insert_retries += err; | 215 | insert_retries += err; |
| 209 | else if (err) | 216 | else if (err) |
| @@ -214,12 +221,12 @@ static s64 __init test_rhashtable(struct rhashtable *ht) | |||
| 214 | pr_info(" %u insertions retried due to memory pressure\n", | 221 | pr_info(" %u insertions retried due to memory pressure\n", |
| 215 | insert_retries); | 222 | insert_retries); |
| 216 | 223 | ||
| 217 | test_bucket_stats(ht); | 224 | test_bucket_stats(ht, entries); |
| 218 | rcu_read_lock(); | 225 | rcu_read_lock(); |
| 219 | test_rht_lookup(ht); | 226 | test_rht_lookup(ht, array, entries); |
| 220 | rcu_read_unlock(); | 227 | rcu_read_unlock(); |
| 221 | 228 | ||
| 222 | test_bucket_stats(ht); | 229 | test_bucket_stats(ht, entries); |
| 223 | 230 | ||
| 224 | pr_info(" Deleting %d keys\n", entries); | 231 | pr_info(" Deleting %d keys\n", entries); |
| 225 | for (i = 0; i < entries; i++) { | 232 | for (i = 0; i < entries; i++) { |
| @@ -244,9 +251,227 @@ static s64 __init test_rhashtable(struct rhashtable *ht) | |||
| 244 | } | 251 | } |
| 245 | 252 | ||
| 246 | static struct rhashtable ht; | 253 | static struct rhashtable ht; |
| 254 | static struct rhltable rhlt; | ||
| 255 | |||
| 256 | static int __init test_rhltable(unsigned int entries) | ||
| 257 | { | ||
| 258 | struct test_obj_rhl *rhl_test_objects; | ||
| 259 | unsigned long *obj_in_table; | ||
| 260 | unsigned int i, j, k; | ||
| 261 | int ret, err; | ||
| 262 | |||
| 263 | if (entries == 0) | ||
| 264 | entries = 1; | ||
| 265 | |||
| 266 | rhl_test_objects = vzalloc(sizeof(*rhl_test_objects) * entries); | ||
| 267 | if (!rhl_test_objects) | ||
| 268 | return -ENOMEM; | ||
| 269 | |||
| 270 | ret = -ENOMEM; | ||
| 271 | obj_in_table = vzalloc(BITS_TO_LONGS(entries) * sizeof(unsigned long)); | ||
| 272 | if (!obj_in_table) | ||
| 273 | goto out_free; | ||
| 274 | |||
| 275 | /* nulls_base not supported in rhlist interface */ | ||
| 276 | test_rht_params.nulls_base = 0; | ||
| 277 | err = rhltable_init(&rhlt, &test_rht_params); | ||
| 278 | if (WARN_ON(err)) | ||
| 279 | goto out_free; | ||
| 280 | |||
| 281 | k = prandom_u32(); | ||
| 282 | ret = 0; | ||
| 283 | for (i = 0; i < entries; i++) { | ||
| 284 | rhl_test_objects[i].value.id = k; | ||
| 285 | err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node, | ||
| 286 | test_rht_params); | ||
| 287 | if (WARN(err, "error %d on element %d\n", err, i)) | ||
| 288 | break; | ||
| 289 | if (err == 0) | ||
| 290 | set_bit(i, obj_in_table); | ||
| 291 | } | ||
| 292 | |||
| 293 | if (err) | ||
| 294 | ret = err; | ||
| 295 | |||
| 296 | pr_info("test %d add/delete pairs into rhlist\n", entries); | ||
| 297 | for (i = 0; i < entries; i++) { | ||
| 298 | struct rhlist_head *h, *pos; | ||
| 299 | struct test_obj_rhl *obj; | ||
| 300 | struct test_obj_val key = { | ||
| 301 | .id = k, | ||
| 302 | }; | ||
| 303 | bool found; | ||
| 304 | |||
| 305 | rcu_read_lock(); | ||
| 306 | h = rhltable_lookup(&rhlt, &key, test_rht_params); | ||
| 307 | if (WARN(!h, "key not found during iteration %d of %d", i, entries)) { | ||
| 308 | rcu_read_unlock(); | ||
| 309 | break; | ||
| 310 | } | ||
| 311 | |||
| 312 | if (i) { | ||
| 313 | j = i - 1; | ||
| 314 | rhl_for_each_entry_rcu(obj, pos, h, list_node) { | ||
| 315 | if (WARN(pos == &rhl_test_objects[j].list_node, "old element found, should be gone")) | ||
| 316 | break; | ||
| 317 | } | ||
| 318 | } | ||
| 319 | |||
| 320 | cond_resched_rcu(); | ||
| 321 | |||
| 322 | found = false; | ||
| 323 | |||
| 324 | rhl_for_each_entry_rcu(obj, pos, h, list_node) { | ||
| 325 | if (pos == &rhl_test_objects[i].list_node) { | ||
| 326 | found = true; | ||
| 327 | break; | ||
| 328 | } | ||
| 329 | } | ||
| 330 | |||
| 331 | rcu_read_unlock(); | ||
| 332 | |||
| 333 | if (WARN(!found, "element %d not found", i)) | ||
| 334 | break; | ||
| 335 | |||
| 336 | err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params); | ||
| 337 | WARN(err, "rhltable_remove: err %d for iteration %d\n", err, i); | ||
| 338 | if (err == 0) | ||
| 339 | clear_bit(i, obj_in_table); | ||
| 340 | } | ||
| 341 | |||
| 342 | if (ret == 0 && err) | ||
| 343 | ret = err; | ||
| 344 | |||
| 345 | for (i = 0; i < entries; i++) { | ||
| 346 | WARN(test_bit(i, obj_in_table), "elem %d allegedly still present", i); | ||
| 347 | |||
| 348 | err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node, | ||
| 349 | test_rht_params); | ||
| 350 | if (WARN(err, "error %d on element %d\n", err, i)) | ||
| 351 | break; | ||
| 352 | if (err == 0) | ||
| 353 | set_bit(i, obj_in_table); | ||
| 354 | } | ||
| 355 | |||
| 356 | pr_info("test %d random rhlist add/delete operations\n", entries); | ||
| 357 | for (j = 0; j < entries; j++) { | ||
| 358 | u32 i = prandom_u32_max(entries); | ||
| 359 | u32 prand = prandom_u32(); | ||
| 360 | |||
| 361 | cond_resched(); | ||
| 362 | |||
| 363 | if (prand == 0) | ||
| 364 | prand = prandom_u32(); | ||
| 365 | |||
| 366 | if (prand & 1) { | ||
| 367 | prand >>= 1; | ||
| 368 | continue; | ||
| 369 | } | ||
| 370 | |||
| 371 | err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params); | ||
| 372 | if (test_bit(i, obj_in_table)) { | ||
| 373 | clear_bit(i, obj_in_table); | ||
| 374 | if (WARN(err, "cannot remove element at slot %d", i)) | ||
| 375 | continue; | ||
| 376 | } else { | ||
| 377 | if (WARN(err != -ENOENT, "removed non-existant element %d, error %d not %d", | ||
| 378 | i, err, -ENOENT)) | ||
| 379 | continue; | ||
| 380 | } | ||
| 381 | |||
| 382 | if (prand & 1) { | ||
| 383 | prand >>= 1; | ||
| 384 | continue; | ||
| 385 | } | ||
| 386 | |||
| 387 | err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node, test_rht_params); | ||
| 388 | if (err == 0) { | ||
| 389 | if (WARN(test_and_set_bit(i, obj_in_table), "succeeded to insert same object %d", i)) | ||
| 390 | continue; | ||
| 391 | } else { | ||
| 392 | if (WARN(!test_bit(i, obj_in_table), "failed to insert object %d", i)) | ||
| 393 | continue; | ||
| 394 | } | ||
| 395 | |||
| 396 | if (prand & 1) { | ||
| 397 | prand >>= 1; | ||
| 398 | continue; | ||
| 399 | } | ||
| 400 | |||
| 401 | i = prandom_u32_max(entries); | ||
| 402 | if (test_bit(i, obj_in_table)) { | ||
| 403 | err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params); | ||
| 404 | WARN(err, "cannot remove element at slot %d", i); | ||
| 405 | if (err == 0) | ||
| 406 | clear_bit(i, obj_in_table); | ||
| 407 | } else { | ||
| 408 | err = rhltable_insert(&rhlt, &rhl_test_objects[i].list_node, test_rht_params); | ||
| 409 | WARN(err, "failed to insert object %d", i); | ||
| 410 | if (err == 0) | ||
| 411 | set_bit(i, obj_in_table); | ||
| 412 | } | ||
| 413 | } | ||
| 414 | |||
| 415 | for (i = 0; i < entries; i++) { | ||
| 416 | cond_resched(); | ||
| 417 | err = rhltable_remove(&rhlt, &rhl_test_objects[i].list_node, test_rht_params); | ||
| 418 | if (test_bit(i, obj_in_table)) { | ||
| 419 | if (WARN(err, "cannot remove element at slot %d", i)) | ||
| 420 | continue; | ||
| 421 | } else { | ||
| 422 | if (WARN(err != -ENOENT, "removed non-existant element, error %d not %d", | ||
| 423 | err, -ENOENT)) | ||
| 424 | continue; | ||
| 425 | } | ||
| 426 | } | ||
| 427 | |||
| 428 | rhltable_destroy(&rhlt); | ||
| 429 | out_free: | ||
| 430 | vfree(rhl_test_objects); | ||
| 431 | vfree(obj_in_table); | ||
| 432 | return ret; | ||
| 433 | } | ||
| 434 | |||
| 435 | static int __init test_rhashtable_max(struct test_obj *array, | ||
| 436 | unsigned int entries) | ||
| 437 | { | ||
| 438 | unsigned int i, insert_retries = 0; | ||
| 439 | int err; | ||
| 440 | |||
| 441 | test_rht_params.max_size = roundup_pow_of_two(entries / 8); | ||
| 442 | err = rhashtable_init(&ht, &test_rht_params); | ||
| 443 | if (err) | ||
| 444 | return err; | ||
| 445 | |||
| 446 | for (i = 0; i < ht.max_elems; i++) { | ||
| 447 | struct test_obj *obj = &array[i]; | ||
| 448 | |||
| 449 | obj->value.id = i * 2; | ||
| 450 | err = insert_retry(&ht, obj, test_rht_params); | ||
| 451 | if (err > 0) | ||
| 452 | insert_retries += err; | ||
| 453 | else if (err) | ||
| 454 | return err; | ||
| 455 | } | ||
| 456 | |||
| 457 | err = insert_retry(&ht, &array[ht.max_elems], test_rht_params); | ||
| 458 | if (err == -E2BIG) { | ||
| 459 | err = 0; | ||
| 460 | } else { | ||
| 461 | pr_info("insert element %u should have failed with %d, got %d\n", | ||
| 462 | ht.max_elems, -E2BIG, err); | ||
| 463 | if (err == 0) | ||
| 464 | err = -1; | ||
| 465 | } | ||
| 466 | |||
| 467 | rhashtable_destroy(&ht); | ||
| 468 | |||
| 469 | return err; | ||
| 470 | } | ||
| 247 | 471 | ||
| 248 | static int thread_lookup_test(struct thread_data *tdata) | 472 | static int thread_lookup_test(struct thread_data *tdata) |
| 249 | { | 473 | { |
| 474 | unsigned int entries = tdata->entries; | ||
| 250 | int i, err = 0; | 475 | int i, err = 0; |
| 251 | 476 | ||
| 252 | for (i = 0; i < entries; i++) { | 477 | for (i = 0; i < entries; i++) { |
| @@ -283,10 +508,10 @@ static int threadfunc(void *data) | |||
| 283 | if (down_interruptible(&startup_sem)) | 508 | if (down_interruptible(&startup_sem)) |
| 284 | pr_err(" thread[%d]: down_interruptible failed\n", tdata->id); | 509 | pr_err(" thread[%d]: down_interruptible failed\n", tdata->id); |
| 285 | 510 | ||
| 286 | for (i = 0; i < entries; i++) { | 511 | for (i = 0; i < tdata->entries; i++) { |
| 287 | tdata->objs[i].value.id = i; | 512 | tdata->objs[i].value.id = i; |
| 288 | tdata->objs[i].value.tid = tdata->id; | 513 | tdata->objs[i].value.tid = tdata->id; |
| 289 | err = insert_retry(&ht, &tdata->objs[i].node, test_rht_params); | 514 | err = insert_retry(&ht, &tdata->objs[i], test_rht_params); |
| 290 | if (err > 0) { | 515 | if (err > 0) { |
| 291 | insert_retries += err; | 516 | insert_retries += err; |
| 292 | } else if (err) { | 517 | } else if (err) { |
| @@ -307,7 +532,7 @@ static int threadfunc(void *data) | |||
| 307 | } | 532 | } |
| 308 | 533 | ||
| 309 | for (step = 10; step > 0; step--) { | 534 | for (step = 10; step > 0; step--) { |
| 310 | for (i = 0; i < entries; i += step) { | 535 | for (i = 0; i < tdata->entries; i += step) { |
| 311 | if (tdata->objs[i].value.id == TEST_INSERT_FAIL) | 536 | if (tdata->objs[i].value.id == TEST_INSERT_FAIL) |
| 312 | continue; | 537 | continue; |
| 313 | err = rhashtable_remove_fast(&ht, &tdata->objs[i].node, | 538 | err = rhashtable_remove_fast(&ht, &tdata->objs[i].node, |
| @@ -338,17 +563,25 @@ out: | |||
| 338 | 563 | ||
| 339 | static int __init test_rht_init(void) | 564 | static int __init test_rht_init(void) |
| 340 | { | 565 | { |
| 566 | unsigned int entries; | ||
| 341 | int i, err, started_threads = 0, failed_threads = 0; | 567 | int i, err, started_threads = 0, failed_threads = 0; |
| 342 | u64 total_time = 0; | 568 | u64 total_time = 0; |
| 343 | struct thread_data *tdata; | 569 | struct thread_data *tdata; |
| 344 | struct test_obj *objs; | 570 | struct test_obj *objs; |
| 345 | 571 | ||
| 346 | entries = min(entries, MAX_ENTRIES); | 572 | if (parm_entries < 0) |
| 573 | parm_entries = 1; | ||
| 574 | |||
| 575 | entries = min(parm_entries, MAX_ENTRIES); | ||
| 347 | 576 | ||
| 348 | test_rht_params.automatic_shrinking = shrinking; | 577 | test_rht_params.automatic_shrinking = shrinking; |
| 349 | test_rht_params.max_size = max_size ? : roundup_pow_of_two(entries); | 578 | test_rht_params.max_size = max_size ? : roundup_pow_of_two(entries); |
| 350 | test_rht_params.nelem_hint = size; | 579 | test_rht_params.nelem_hint = size; |
| 351 | 580 | ||
| 581 | objs = vzalloc((test_rht_params.max_size + 1) * sizeof(struct test_obj)); | ||
| 582 | if (!objs) | ||
| 583 | return -ENOMEM; | ||
| 584 | |||
| 352 | pr_info("Running rhashtable test nelem=%d, max_size=%d, shrinking=%d\n", | 585 | pr_info("Running rhashtable test nelem=%d, max_size=%d, shrinking=%d\n", |
| 353 | size, max_size, shrinking); | 586 | size, max_size, shrinking); |
| 354 | 587 | ||
| @@ -356,7 +589,8 @@ static int __init test_rht_init(void) | |||
| 356 | s64 time; | 589 | s64 time; |
| 357 | 590 | ||
| 358 | pr_info("Test %02d:\n", i); | 591 | pr_info("Test %02d:\n", i); |
| 359 | memset(&array, 0, sizeof(array)); | 592 | memset(objs, 0, test_rht_params.max_size * sizeof(struct test_obj)); |
| 593 | |||
| 360 | err = rhashtable_init(&ht, &test_rht_params); | 594 | err = rhashtable_init(&ht, &test_rht_params); |
| 361 | if (err < 0) { | 595 | if (err < 0) { |
| 362 | pr_warn("Test failed: Unable to initialize hashtable: %d\n", | 596 | pr_warn("Test failed: Unable to initialize hashtable: %d\n", |
| @@ -364,9 +598,10 @@ static int __init test_rht_init(void) | |||
| 364 | continue; | 598 | continue; |
| 365 | } | 599 | } |
| 366 | 600 | ||
| 367 | time = test_rhashtable(&ht); | 601 | time = test_rhashtable(&ht, objs, entries); |
| 368 | rhashtable_destroy(&ht); | 602 | rhashtable_destroy(&ht); |
| 369 | if (time < 0) { | 603 | if (time < 0) { |
| 604 | vfree(objs); | ||
| 370 | pr_warn("Test failed: return code %lld\n", time); | 605 | pr_warn("Test failed: return code %lld\n", time); |
| 371 | return -EINVAL; | 606 | return -EINVAL; |
| 372 | } | 607 | } |
| @@ -374,6 +609,11 @@ static int __init test_rht_init(void) | |||
| 374 | total_time += time; | 609 | total_time += time; |
| 375 | } | 610 | } |
| 376 | 611 | ||
| 612 | pr_info("test if its possible to exceed max_size %d: %s\n", | ||
| 613 | test_rht_params.max_size, test_rhashtable_max(objs, entries) == 0 ? | ||
| 614 | "no, ok" : "YES, failed"); | ||
| 615 | vfree(objs); | ||
| 616 | |||
| 377 | do_div(total_time, runs); | 617 | do_div(total_time, runs); |
| 378 | pr_info("Average test time: %llu\n", total_time); | 618 | pr_info("Average test time: %llu\n", total_time); |
| 379 | 619 | ||
| @@ -404,6 +644,7 @@ static int __init test_rht_init(void) | |||
| 404 | } | 644 | } |
| 405 | for (i = 0; i < tcount; i++) { | 645 | for (i = 0; i < tcount; i++) { |
| 406 | tdata[i].id = i; | 646 | tdata[i].id = i; |
| 647 | tdata[i].entries = entries; | ||
| 407 | tdata[i].objs = objs + i * entries; | 648 | tdata[i].objs = objs + i * entries; |
| 408 | tdata[i].task = kthread_run(threadfunc, &tdata[i], | 649 | tdata[i].task = kthread_run(threadfunc, &tdata[i], |
| 409 | "rhashtable_thrad[%d]", i); | 650 | "rhashtable_thrad[%d]", i); |
| @@ -425,11 +666,17 @@ static int __init test_rht_init(void) | |||
| 425 | failed_threads++; | 666 | failed_threads++; |
| 426 | } | 667 | } |
| 427 | } | 668 | } |
| 428 | pr_info("Started %d threads, %d failed\n", | ||
| 429 | started_threads, failed_threads); | ||
| 430 | rhashtable_destroy(&ht); | 669 | rhashtable_destroy(&ht); |
| 431 | vfree(tdata); | 670 | vfree(tdata); |
| 432 | vfree(objs); | 671 | vfree(objs); |
| 672 | |||
| 673 | /* | ||
| 674 | * rhltable_remove is very expensive, default values can cause test | ||
| 675 | * to run for 2 minutes or more, use a smaller number instead. | ||
| 676 | */ | ||
| 677 | err = test_rhltable(entries / 16); | ||
| 678 | pr_info("Started %d threads, %d failed, rhltable test returns %d\n", | ||
| 679 | started_threads, failed_threads, err); | ||
| 433 | return 0; | 680 | return 0; |
| 434 | } | 681 | } |
| 435 | 682 | ||
diff --git a/lib/test_string.c b/lib/test_string.c new file mode 100644 index 000000000000..0fcdb82dca86 --- /dev/null +++ b/lib/test_string.c | |||
| @@ -0,0 +1,141 @@ | |||
| 1 | #include <linux/module.h> | ||
| 2 | #include <linux/printk.h> | ||
| 3 | #include <linux/slab.h> | ||
| 4 | #include <linux/string.h> | ||
| 5 | |||
| 6 | static __init int memset16_selftest(void) | ||
| 7 | { | ||
| 8 | unsigned i, j, k; | ||
| 9 | u16 v, *p; | ||
| 10 | |||
| 11 | p = kmalloc(256 * 2 * 2, GFP_KERNEL); | ||
| 12 | if (!p) | ||
| 13 | return -1; | ||
| 14 | |||
| 15 | for (i = 0; i < 256; i++) { | ||
| 16 | for (j = 0; j < 256; j++) { | ||
| 17 | memset(p, 0xa1, 256 * 2 * sizeof(v)); | ||
| 18 | memset16(p + i, 0xb1b2, j); | ||
| 19 | for (k = 0; k < 512; k++) { | ||
| 20 | v = p[k]; | ||
| 21 | if (k < i) { | ||
| 22 | if (v != 0xa1a1) | ||
| 23 | goto fail; | ||
| 24 | } else if (k < i + j) { | ||
| 25 | if (v != 0xb1b2) | ||
| 26 | goto fail; | ||
| 27 | } else { | ||
| 28 | if (v != 0xa1a1) | ||
| 29 | goto fail; | ||
| 30 | } | ||
| 31 | } | ||
| 32 | } | ||
| 33 | } | ||
| 34 | |||
| 35 | fail: | ||
| 36 | kfree(p); | ||
| 37 | if (i < 256) | ||
| 38 | return (i << 24) | (j << 16) | k; | ||
| 39 | return 0; | ||
| 40 | } | ||
| 41 | |||
| 42 | static __init int memset32_selftest(void) | ||
| 43 | { | ||
| 44 | unsigned i, j, k; | ||
| 45 | u32 v, *p; | ||
| 46 | |||
| 47 | p = kmalloc(256 * 2 * 4, GFP_KERNEL); | ||
| 48 | if (!p) | ||
| 49 | return -1; | ||
| 50 | |||
| 51 | for (i = 0; i < 256; i++) { | ||
| 52 | for (j = 0; j < 256; j++) { | ||
| 53 | memset(p, 0xa1, 256 * 2 * sizeof(v)); | ||
| 54 | memset32(p + i, 0xb1b2b3b4, j); | ||
| 55 | for (k = 0; k < 512; k++) { | ||
| 56 | v = p[k]; | ||
| 57 | if (k < i) { | ||
| 58 | if (v != 0xa1a1a1a1) | ||
| 59 | goto fail; | ||
| 60 | } else if (k < i + j) { | ||
| 61 | if (v != 0xb1b2b3b4) | ||
| 62 | goto fail; | ||
| 63 | } else { | ||
| 64 | if (v != 0xa1a1a1a1) | ||
| 65 | goto fail; | ||
| 66 | } | ||
| 67 | } | ||
| 68 | } | ||
| 69 | } | ||
| 70 | |||
| 71 | fail: | ||
| 72 | kfree(p); | ||
| 73 | if (i < 256) | ||
| 74 | return (i << 24) | (j << 16) | k; | ||
| 75 | return 0; | ||
| 76 | } | ||
| 77 | |||
| 78 | static __init int memset64_selftest(void) | ||
| 79 | { | ||
| 80 | unsigned i, j, k; | ||
| 81 | u64 v, *p; | ||
| 82 | |||
| 83 | p = kmalloc(256 * 2 * 8, GFP_KERNEL); | ||
| 84 | if (!p) | ||
| 85 | return -1; | ||
| 86 | |||
| 87 | for (i = 0; i < 256; i++) { | ||
| 88 | for (j = 0; j < 256; j++) { | ||
| 89 | memset(p, 0xa1, 256 * 2 * sizeof(v)); | ||
| 90 | memset64(p + i, 0xb1b2b3b4b5b6b7b8ULL, j); | ||
| 91 | for (k = 0; k < 512; k++) { | ||
| 92 | v = p[k]; | ||
| 93 | if (k < i) { | ||
| 94 | if (v != 0xa1a1a1a1a1a1a1a1ULL) | ||
| 95 | goto fail; | ||
| 96 | } else if (k < i + j) { | ||
| 97 | if (v != 0xb1b2b3b4b5b6b7b8ULL) | ||
| 98 | goto fail; | ||
| 99 | } else { | ||
| 100 | if (v != 0xa1a1a1a1a1a1a1a1ULL) | ||
| 101 | goto fail; | ||
| 102 | } | ||
| 103 | } | ||
| 104 | } | ||
| 105 | } | ||
| 106 | |||
| 107 | fail: | ||
| 108 | kfree(p); | ||
| 109 | if (i < 256) | ||
| 110 | return (i << 24) | (j << 16) | k; | ||
| 111 | return 0; | ||
| 112 | } | ||
| 113 | |||
| 114 | static __init int string_selftest_init(void) | ||
| 115 | { | ||
| 116 | int test, subtest; | ||
| 117 | |||
| 118 | test = 1; | ||
| 119 | subtest = memset16_selftest(); | ||
| 120 | if (subtest) | ||
| 121 | goto fail; | ||
| 122 | |||
| 123 | test = 2; | ||
| 124 | subtest = memset32_selftest(); | ||
| 125 | if (subtest) | ||
| 126 | goto fail; | ||
| 127 | |||
| 128 | test = 3; | ||
| 129 | subtest = memset64_selftest(); | ||
| 130 | if (subtest) | ||
| 131 | goto fail; | ||
| 132 | |||
| 133 | pr_info("String selftests succeeded\n"); | ||
| 134 | return 0; | ||
| 135 | fail: | ||
| 136 | pr_crit("String selftest failure %d.%08x\n", test, subtest); | ||
| 137 | return 0; | ||
| 138 | } | ||
| 139 | |||
| 140 | module_init(string_selftest_init); | ||
| 141 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/lib/ucmpdi2.c b/lib/ucmpdi2.c new file mode 100644 index 000000000000..25ca2d4c1e19 --- /dev/null +++ b/lib/ucmpdi2.c | |||
| @@ -0,0 +1,35 @@ | |||
| 1 | /* | ||
| 2 | * This program is free software; you can redistribute it and/or modify | ||
| 3 | * it under the terms of the GNU General Public License as published by | ||
| 4 | * the Free Software Foundation; either version 2 of the License, or | ||
| 5 | * (at your option) any later version. | ||
| 6 | * | ||
| 7 | * This program is distributed in the hope that it will be useful, | ||
| 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 10 | * GNU General Public License for more details. | ||
| 11 | * | ||
| 12 | * You should have received a copy of the GNU General Public License | ||
| 13 | * along with this program; if not, see the file COPYING, or write | ||
| 14 | * to the Free Software Foundation, Inc. | ||
| 15 | */ | ||
| 16 | |||
| 17 | #include <linux/module.h> | ||
| 18 | #include <linux/libgcc.h> | ||
| 19 | |||
| 20 | word_type __ucmpdi2(unsigned long long a, unsigned long long b) | ||
| 21 | { | ||
| 22 | const DWunion au = {.ll = a}; | ||
| 23 | const DWunion bu = {.ll = b}; | ||
| 24 | |||
| 25 | if ((unsigned int) au.s.high < (unsigned int) bu.s.high) | ||
| 26 | return 0; | ||
| 27 | else if ((unsigned int) au.s.high > (unsigned int) bu.s.high) | ||
| 28 | return 2; | ||
| 29 | if ((unsigned int) au.s.low < (unsigned int) bu.s.low) | ||
| 30 | return 0; | ||
| 31 | else if ((unsigned int) au.s.low > (unsigned int) bu.s.low) | ||
| 32 | return 2; | ||
| 33 | return 1; | ||
| 34 | } | ||
| 35 | EXPORT_SYMBOL(__ucmpdi2); | ||
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 86c3385b9eb3..01c3957b2de6 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
| @@ -33,6 +33,8 @@ | |||
| 33 | #include <linux/uuid.h> | 33 | #include <linux/uuid.h> |
| 34 | #include <linux/of.h> | 34 | #include <linux/of.h> |
| 35 | #include <net/addrconf.h> | 35 | #include <net/addrconf.h> |
| 36 | #include <linux/siphash.h> | ||
| 37 | #include <linux/compiler.h> | ||
| 36 | #ifdef CONFIG_BLOCK | 38 | #ifdef CONFIG_BLOCK |
| 37 | #include <linux/blkdev.h> | 39 | #include <linux/blkdev.h> |
| 38 | #endif | 40 | #endif |
| @@ -620,8 +622,8 @@ char *dentry_name(char *buf, char *end, const struct dentry *d, struct printf_sp | |||
| 620 | 622 | ||
| 621 | rcu_read_lock(); | 623 | rcu_read_lock(); |
| 622 | for (i = 0; i < depth; i++, d = p) { | 624 | for (i = 0; i < depth; i++, d = p) { |
| 623 | p = ACCESS_ONCE(d->d_parent); | 625 | p = READ_ONCE(d->d_parent); |
| 624 | array[i] = ACCESS_ONCE(d->d_name.name); | 626 | array[i] = READ_ONCE(d->d_name.name); |
| 625 | if (p == d) { | 627 | if (p == d) { |
| 626 | if (i) | 628 | if (i) |
| 627 | array[i] = ""; | 629 | array[i] = ""; |
| @@ -1343,6 +1345,59 @@ char *uuid_string(char *buf, char *end, const u8 *addr, | |||
| 1343 | return string(buf, end, uuid, spec); | 1345 | return string(buf, end, uuid, spec); |
| 1344 | } | 1346 | } |
| 1345 | 1347 | ||
| 1348 | int kptr_restrict __read_mostly; | ||
| 1349 | |||
| 1350 | static noinline_for_stack | ||
| 1351 | char *restricted_pointer(char *buf, char *end, const void *ptr, | ||
| 1352 | struct printf_spec spec) | ||
| 1353 | { | ||
| 1354 | spec.base = 16; | ||
| 1355 | spec.flags |= SMALL; | ||
| 1356 | if (spec.field_width == -1) { | ||
| 1357 | spec.field_width = 2 * sizeof(ptr); | ||
| 1358 | spec.flags |= ZEROPAD; | ||
| 1359 | } | ||
| 1360 | |||
| 1361 | switch (kptr_restrict) { | ||
| 1362 | case 0: | ||
| 1363 | /* Always print %pK values */ | ||
| 1364 | break; | ||
| 1365 | case 1: { | ||
| 1366 | const struct cred *cred; | ||
| 1367 | |||
| 1368 | /* | ||
| 1369 | * kptr_restrict==1 cannot be used in IRQ context | ||
| 1370 | * because its test for CAP_SYSLOG would be meaningless. | ||
| 1371 | */ | ||
| 1372 | if (in_irq() || in_serving_softirq() || in_nmi()) | ||
| 1373 | return string(buf, end, "pK-error", spec); | ||
| 1374 | |||
| 1375 | /* | ||
| 1376 | * Only print the real pointer value if the current | ||
| 1377 | * process has CAP_SYSLOG and is running with the | ||
| 1378 | * same credentials it started with. This is because | ||
| 1379 | * access to files is checked at open() time, but %pK | ||
| 1380 | * checks permission at read() time. We don't want to | ||
| 1381 | * leak pointer values if a binary opens a file using | ||
| 1382 | * %pK and then elevates privileges before reading it. | ||
| 1383 | */ | ||
| 1384 | cred = current_cred(); | ||
| 1385 | if (!has_capability_noaudit(current, CAP_SYSLOG) || | ||
| 1386 | !uid_eq(cred->euid, cred->uid) || | ||
| 1387 | !gid_eq(cred->egid, cred->gid)) | ||
| 1388 | ptr = NULL; | ||
| 1389 | break; | ||
| 1390 | } | ||
| 1391 | case 2: | ||
| 1392 | default: | ||
| 1393 | /* Always print 0's for %pK */ | ||
| 1394 | ptr = NULL; | ||
| 1395 | break; | ||
| 1396 | } | ||
| 1397 | |||
| 1398 | return number(buf, end, (unsigned long)ptr, spec); | ||
| 1399 | } | ||
| 1400 | |||
| 1346 | static noinline_for_stack | 1401 | static noinline_for_stack |
| 1347 | char *netdev_bits(char *buf, char *end, const void *addr, const char *fmt) | 1402 | char *netdev_bits(char *buf, char *end, const void *addr, const char *fmt) |
| 1348 | { | 1403 | { |
| @@ -1591,7 +1646,86 @@ char *device_node_string(char *buf, char *end, struct device_node *dn, | |||
| 1591 | return widen_string(buf, buf - buf_start, end, spec); | 1646 | return widen_string(buf, buf - buf_start, end, spec); |
| 1592 | } | 1647 | } |
| 1593 | 1648 | ||
| 1594 | int kptr_restrict __read_mostly; | 1649 | static noinline_for_stack |
| 1650 | char *pointer_string(char *buf, char *end, const void *ptr, | ||
| 1651 | struct printf_spec spec) | ||
| 1652 | { | ||
| 1653 | spec.base = 16; | ||
| 1654 | spec.flags |= SMALL; | ||
| 1655 | if (spec.field_width == -1) { | ||
| 1656 | spec.field_width = 2 * sizeof(ptr); | ||
| 1657 | spec.flags |= ZEROPAD; | ||
| 1658 | } | ||
| 1659 | |||
| 1660 | return number(buf, end, (unsigned long int)ptr, spec); | ||
| 1661 | } | ||
| 1662 | |||
| 1663 | static bool have_filled_random_ptr_key __read_mostly; | ||
| 1664 | static siphash_key_t ptr_key __read_mostly; | ||
| 1665 | |||
| 1666 | static void fill_random_ptr_key(struct random_ready_callback *unused) | ||
| 1667 | { | ||
| 1668 | get_random_bytes(&ptr_key, sizeof(ptr_key)); | ||
| 1669 | /* | ||
| 1670 | * have_filled_random_ptr_key==true is dependent on get_random_bytes(). | ||
| 1671 | * ptr_to_id() needs to see have_filled_random_ptr_key==true | ||
| 1672 | * after get_random_bytes() returns. | ||
| 1673 | */ | ||
| 1674 | smp_mb(); | ||
| 1675 | WRITE_ONCE(have_filled_random_ptr_key, true); | ||
| 1676 | } | ||
| 1677 | |||
| 1678 | static struct random_ready_callback random_ready = { | ||
| 1679 | .func = fill_random_ptr_key | ||
| 1680 | }; | ||
| 1681 | |||
| 1682 | static int __init initialize_ptr_random(void) | ||
| 1683 | { | ||
| 1684 | int ret = add_random_ready_callback(&random_ready); | ||
| 1685 | |||
| 1686 | if (!ret) { | ||
| 1687 | return 0; | ||
| 1688 | } else if (ret == -EALREADY) { | ||
| 1689 | fill_random_ptr_key(&random_ready); | ||
| 1690 | return 0; | ||
| 1691 | } | ||
| 1692 | |||
| 1693 | return ret; | ||
| 1694 | } | ||
| 1695 | early_initcall(initialize_ptr_random); | ||
| 1696 | |||
| 1697 | /* Maps a pointer to a 32 bit unique identifier. */ | ||
| 1698 | static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec) | ||
| 1699 | { | ||
| 1700 | unsigned long hashval; | ||
| 1701 | const int default_width = 2 * sizeof(ptr); | ||
| 1702 | |||
| 1703 | if (unlikely(!have_filled_random_ptr_key)) { | ||
| 1704 | spec.field_width = default_width; | ||
| 1705 | /* string length must be less than default_width */ | ||
| 1706 | return string(buf, end, "(ptrval)", spec); | ||
| 1707 | } | ||
| 1708 | |||
| 1709 | #ifdef CONFIG_64BIT | ||
| 1710 | hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key); | ||
| 1711 | /* | ||
| 1712 | * Mask off the first 32 bits, this makes explicit that we have | ||
| 1713 | * modified the address (and 32 bits is plenty for a unique ID). | ||
| 1714 | */ | ||
| 1715 | hashval = hashval & 0xffffffff; | ||
| 1716 | #else | ||
| 1717 | hashval = (unsigned long)siphash_1u32((u32)ptr, &ptr_key); | ||
| 1718 | #endif | ||
| 1719 | |||
| 1720 | spec.flags |= SMALL; | ||
| 1721 | if (spec.field_width == -1) { | ||
| 1722 | spec.field_width = default_width; | ||
| 1723 | spec.flags |= ZEROPAD; | ||
| 1724 | } | ||
| 1725 | spec.base = 16; | ||
| 1726 | |||
| 1727 | return number(buf, end, hashval, spec); | ||
| 1728 | } | ||
| 1595 | 1729 | ||
| 1596 | /* | 1730 | /* |
| 1597 | * Show a '%p' thing. A kernel extension is that the '%p' is followed | 1731 | * Show a '%p' thing. A kernel extension is that the '%p' is followed |
| @@ -1698,11 +1832,16 @@ int kptr_restrict __read_mostly; | |||
| 1698 | * c major compatible string | 1832 | * c major compatible string |
| 1699 | * C full compatible string | 1833 | * C full compatible string |
| 1700 | * | 1834 | * |
| 1835 | * - 'x' For printing the address. Equivalent to "%lx". | ||
| 1836 | * | ||
| 1701 | * ** Please update also Documentation/printk-formats.txt when making changes ** | 1837 | * ** Please update also Documentation/printk-formats.txt when making changes ** |
| 1702 | * | 1838 | * |
| 1703 | * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 | 1839 | * Note: The difference between 'S' and 'F' is that on ia64 and ppc64 |
| 1704 | * function pointers are really function descriptors, which contain a | 1840 | * function pointers are really function descriptors, which contain a |
| 1705 | * pointer to the real address. | 1841 | * pointer to the real address. |
| 1842 | * | ||
| 1843 | * Note: The default behaviour (unadorned %p) is to hash the address, | ||
| 1844 | * rendering it useful as a unique identifier. | ||
| 1706 | */ | 1845 | */ |
| 1707 | static noinline_for_stack | 1846 | static noinline_for_stack |
| 1708 | char *pointer(const char *fmt, char *buf, char *end, void *ptr, | 1847 | char *pointer(const char *fmt, char *buf, char *end, void *ptr, |
| @@ -1792,47 +1931,9 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, | |||
| 1792 | return buf; | 1931 | return buf; |
| 1793 | } | 1932 | } |
| 1794 | case 'K': | 1933 | case 'K': |
| 1795 | switch (kptr_restrict) { | 1934 | if (!kptr_restrict) |
| 1796 | case 0: | ||
| 1797 | /* Always print %pK values */ | ||
| 1798 | break; | ||
| 1799 | case 1: { | ||
| 1800 | const struct cred *cred; | ||
| 1801 | |||
| 1802 | /* | ||
| 1803 | * kptr_restrict==1 cannot be used in IRQ context | ||
| 1804 | * because its test for CAP_SYSLOG would be meaningless. | ||
| 1805 | */ | ||
| 1806 | if (in_irq() || in_serving_softirq() || in_nmi()) { | ||
| 1807 | if (spec.field_width == -1) | ||
| 1808 | spec.field_width = default_width; | ||
| 1809 | return string(buf, end, "pK-error", spec); | ||
| 1810 | } | ||
| 1811 | |||
| 1812 | /* | ||
| 1813 | * Only print the real pointer value if the current | ||
| 1814 | * process has CAP_SYSLOG and is running with the | ||
| 1815 | * same credentials it started with. This is because | ||
| 1816 | * access to files is checked at open() time, but %pK | ||
| 1817 | * checks permission at read() time. We don't want to | ||
| 1818 | * leak pointer values if a binary opens a file using | ||
| 1819 | * %pK and then elevates privileges before reading it. | ||
| 1820 | */ | ||
| 1821 | cred = current_cred(); | ||
| 1822 | if (!has_capability_noaudit(current, CAP_SYSLOG) || | ||
| 1823 | !uid_eq(cred->euid, cred->uid) || | ||
| 1824 | !gid_eq(cred->egid, cred->gid)) | ||
| 1825 | ptr = NULL; | ||
| 1826 | break; | ||
| 1827 | } | ||
| 1828 | case 2: | ||
| 1829 | default: | ||
| 1830 | /* Always print 0's for %pK */ | ||
| 1831 | ptr = NULL; | ||
| 1832 | break; | 1935 | break; |
| 1833 | } | 1936 | return restricted_pointer(buf, end, ptr, spec); |
| 1834 | break; | ||
| 1835 | |||
| 1836 | case 'N': | 1937 | case 'N': |
| 1837 | return netdev_bits(buf, end, ptr, fmt); | 1938 | return netdev_bits(buf, end, ptr, fmt); |
| 1838 | case 'a': | 1939 | case 'a': |
| @@ -1857,15 +1958,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, | |||
| 1857 | case 'F': | 1958 | case 'F': |
| 1858 | return device_node_string(buf, end, ptr, spec, fmt + 1); | 1959 | return device_node_string(buf, end, ptr, spec, fmt + 1); |
| 1859 | } | 1960 | } |
| 1961 | case 'x': | ||
| 1962 | return pointer_string(buf, end, ptr, spec); | ||
| 1860 | } | 1963 | } |
| 1861 | spec.flags |= SMALL; | ||
| 1862 | if (spec.field_width == -1) { | ||
| 1863 | spec.field_width = default_width; | ||
| 1864 | spec.flags |= ZEROPAD; | ||
| 1865 | } | ||
| 1866 | spec.base = 16; | ||
| 1867 | 1964 | ||
| 1868 | return number(buf, end, (unsigned long) ptr, spec); | 1965 | /* default is to _not_ leak addresses, hash before printing */ |
| 1966 | return ptr_to_id(buf, end, ptr, spec); | ||
| 1869 | } | 1967 | } |
| 1870 | 1968 | ||
| 1871 | /* | 1969 | /* |
diff --git a/lib/xz/xz_dec_stream.c b/lib/xz/xz_dec_stream.c index ac809b1e64f7..bd1d182419d7 100644 --- a/lib/xz/xz_dec_stream.c +++ b/lib/xz/xz_dec_stream.c | |||
| @@ -583,6 +583,8 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) | |||
| 583 | if (ret != XZ_OK) | 583 | if (ret != XZ_OK) |
| 584 | return ret; | 584 | return ret; |
| 585 | 585 | ||
| 586 | /* Fall through */ | ||
| 587 | |||
| 586 | case SEQ_BLOCK_START: | 588 | case SEQ_BLOCK_START: |
| 587 | /* We need one byte of input to continue. */ | 589 | /* We need one byte of input to continue. */ |
| 588 | if (b->in_pos == b->in_size) | 590 | if (b->in_pos == b->in_size) |
| @@ -606,6 +608,8 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) | |||
| 606 | s->temp.pos = 0; | 608 | s->temp.pos = 0; |
| 607 | s->sequence = SEQ_BLOCK_HEADER; | 609 | s->sequence = SEQ_BLOCK_HEADER; |
| 608 | 610 | ||
| 611 | /* Fall through */ | ||
| 612 | |||
| 609 | case SEQ_BLOCK_HEADER: | 613 | case SEQ_BLOCK_HEADER: |
| 610 | if (!fill_temp(s, b)) | 614 | if (!fill_temp(s, b)) |
| 611 | return XZ_OK; | 615 | return XZ_OK; |
| @@ -616,6 +620,8 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) | |||
| 616 | 620 | ||
| 617 | s->sequence = SEQ_BLOCK_UNCOMPRESS; | 621 | s->sequence = SEQ_BLOCK_UNCOMPRESS; |
| 618 | 622 | ||
| 623 | /* Fall through */ | ||
| 624 | |||
| 619 | case SEQ_BLOCK_UNCOMPRESS: | 625 | case SEQ_BLOCK_UNCOMPRESS: |
| 620 | ret = dec_block(s, b); | 626 | ret = dec_block(s, b); |
| 621 | if (ret != XZ_STREAM_END) | 627 | if (ret != XZ_STREAM_END) |
| @@ -623,6 +629,8 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) | |||
| 623 | 629 | ||
| 624 | s->sequence = SEQ_BLOCK_PADDING; | 630 | s->sequence = SEQ_BLOCK_PADDING; |
| 625 | 631 | ||
| 632 | /* Fall through */ | ||
| 633 | |||
| 626 | case SEQ_BLOCK_PADDING: | 634 | case SEQ_BLOCK_PADDING: |
| 627 | /* | 635 | /* |
| 628 | * Size of Compressed Data + Block Padding | 636 | * Size of Compressed Data + Block Padding |
| @@ -643,6 +651,8 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) | |||
| 643 | 651 | ||
| 644 | s->sequence = SEQ_BLOCK_CHECK; | 652 | s->sequence = SEQ_BLOCK_CHECK; |
| 645 | 653 | ||
| 654 | /* Fall through */ | ||
| 655 | |||
| 646 | case SEQ_BLOCK_CHECK: | 656 | case SEQ_BLOCK_CHECK: |
| 647 | if (s->check_type == XZ_CHECK_CRC32) { | 657 | if (s->check_type == XZ_CHECK_CRC32) { |
| 648 | ret = crc32_validate(s, b); | 658 | ret = crc32_validate(s, b); |
| @@ -665,6 +675,8 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) | |||
| 665 | 675 | ||
| 666 | s->sequence = SEQ_INDEX_PADDING; | 676 | s->sequence = SEQ_INDEX_PADDING; |
| 667 | 677 | ||
| 678 | /* Fall through */ | ||
| 679 | |||
| 668 | case SEQ_INDEX_PADDING: | 680 | case SEQ_INDEX_PADDING: |
| 669 | while ((s->index.size + (b->in_pos - s->in_start)) | 681 | while ((s->index.size + (b->in_pos - s->in_start)) |
| 670 | & 3) { | 682 | & 3) { |
| @@ -687,6 +699,8 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) | |||
| 687 | 699 | ||
| 688 | s->sequence = SEQ_INDEX_CRC32; | 700 | s->sequence = SEQ_INDEX_CRC32; |
| 689 | 701 | ||
| 702 | /* Fall through */ | ||
| 703 | |||
| 690 | case SEQ_INDEX_CRC32: | 704 | case SEQ_INDEX_CRC32: |
| 691 | ret = crc32_validate(s, b); | 705 | ret = crc32_validate(s, b); |
| 692 | if (ret != XZ_STREAM_END) | 706 | if (ret != XZ_STREAM_END) |
| @@ -695,6 +709,8 @@ static enum xz_ret dec_main(struct xz_dec *s, struct xz_buf *b) | |||
| 695 | s->temp.size = STREAM_HEADER_SIZE; | 709 | s->temp.size = STREAM_HEADER_SIZE; |
| 696 | s->sequence = SEQ_STREAM_FOOTER; | 710 | s->sequence = SEQ_STREAM_FOOTER; |
| 697 | 711 | ||
| 712 | /* Fall through */ | ||
| 713 | |||
| 698 | case SEQ_STREAM_FOOTER: | 714 | case SEQ_STREAM_FOOTER: |
| 699 | if (!fill_temp(s, b)) | 715 | if (!fill_temp(s, b)) |
| 700 | return XZ_OK; | 716 | return XZ_OK; |
