diff options
Diffstat (limited to 'lib')
44 files changed, 1840 insertions, 862 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 54cf309a92a5..cb9758e0ba0c 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
| @@ -13,6 +13,15 @@ config RAID6_PQ | |||
| 13 | config BITREVERSE | 13 | config BITREVERSE |
| 14 | tristate | 14 | tristate |
| 15 | 15 | ||
| 16 | config HAVE_ARCH_BITREVERSE | ||
| 17 | bool | ||
| 18 | default n | ||
| 19 | depends on BITREVERSE | ||
| 20 | help | ||
| 21 | This option provides an config for the architecture which have instruction | ||
| 22 | can do bitreverse operation, we use the hardware instruction if the architecture | ||
| 23 | have this capability. | ||
| 24 | |||
| 16 | config RATIONAL | 25 | config RATIONAL |
| 17 | boolean | 26 | boolean |
| 18 | 27 | ||
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 5f2ce616c046..ecb3516f6546 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -636,7 +636,7 @@ config DEBUG_STACKOVERFLOW | |||
| 636 | depends on DEBUG_KERNEL && HAVE_DEBUG_STACKOVERFLOW | 636 | depends on DEBUG_KERNEL && HAVE_DEBUG_STACKOVERFLOW |
| 637 | ---help--- | 637 | ---help--- |
| 638 | Say Y here if you want to check for overflows of kernel, IRQ | 638 | Say Y here if you want to check for overflows of kernel, IRQ |
| 639 | and exception stacks (if your archicture uses them). This | 639 | and exception stacks (if your architecture uses them). This |
| 640 | option will show detailed messages if free stack space drops | 640 | option will show detailed messages if free stack space drops |
| 641 | below a certain limit. | 641 | below a certain limit. |
| 642 | 642 | ||
| @@ -651,6 +651,8 @@ config DEBUG_STACKOVERFLOW | |||
| 651 | 651 | ||
| 652 | source "lib/Kconfig.kmemcheck" | 652 | source "lib/Kconfig.kmemcheck" |
| 653 | 653 | ||
| 654 | source "lib/Kconfig.kasan" | ||
| 655 | |||
| 654 | endmenu # "Memory Debugging" | 656 | endmenu # "Memory Debugging" |
| 655 | 657 | ||
| 656 | config DEBUG_SHIRQ | 658 | config DEBUG_SHIRQ |
| @@ -1215,6 +1217,7 @@ config RCU_TORTURE_TEST | |||
| 1215 | tristate "torture tests for RCU" | 1217 | tristate "torture tests for RCU" |
| 1216 | depends on DEBUG_KERNEL | 1218 | depends on DEBUG_KERNEL |
| 1217 | select TORTURE_TEST | 1219 | select TORTURE_TEST |
| 1220 | select SRCU | ||
| 1218 | default n | 1221 | default n |
| 1219 | help | 1222 | help |
| 1220 | This option provides a kernel module that runs torture tests | 1223 | This option provides a kernel module that runs torture tests |
| @@ -1257,7 +1260,7 @@ config RCU_CPU_STALL_TIMEOUT | |||
| 1257 | config RCU_CPU_STALL_INFO | 1260 | config RCU_CPU_STALL_INFO |
| 1258 | bool "Print additional diagnostics on RCU CPU stall" | 1261 | bool "Print additional diagnostics on RCU CPU stall" |
| 1259 | depends on (TREE_RCU || PREEMPT_RCU) && DEBUG_KERNEL | 1262 | depends on (TREE_RCU || PREEMPT_RCU) && DEBUG_KERNEL |
| 1260 | default n | 1263 | default y |
| 1261 | help | 1264 | help |
| 1262 | For each stalled CPU that is aware of the current RCU grace | 1265 | For each stalled CPU that is aware of the current RCU grace |
| 1263 | period, print out additional per-CPU diagnostic information | 1266 | period, print out additional per-CPU diagnostic information |
| @@ -1579,6 +1582,9 @@ config ASYNC_RAID6_TEST | |||
| 1579 | 1582 | ||
| 1580 | If unsure, say N. | 1583 | If unsure, say N. |
| 1581 | 1584 | ||
| 1585 | config TEST_HEXDUMP | ||
| 1586 | tristate "Test functions located in the hexdump module at runtime" | ||
| 1587 | |||
| 1582 | config TEST_STRING_HELPERS | 1588 | config TEST_STRING_HELPERS |
| 1583 | tristate "Test functions located in the string_helpers module at runtime" | 1589 | tristate "Test functions located in the string_helpers module at runtime" |
| 1584 | 1590 | ||
| @@ -1586,7 +1592,7 @@ config TEST_KSTRTOX | |||
| 1586 | tristate "Test kstrto*() family of functions at runtime" | 1592 | tristate "Test kstrto*() family of functions at runtime" |
| 1587 | 1593 | ||
| 1588 | config TEST_RHASHTABLE | 1594 | config TEST_RHASHTABLE |
| 1589 | bool "Perform selftest on resizable hash table" | 1595 | tristate "Perform selftest on resizable hash table" |
| 1590 | default n | 1596 | default n |
| 1591 | help | 1597 | help |
| 1592 | Enable this option to test the rhashtable functions at boot. | 1598 | Enable this option to test the rhashtable functions at boot. |
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan new file mode 100644 index 000000000000..4fecaedc80a2 --- /dev/null +++ b/lib/Kconfig.kasan | |||
| @@ -0,0 +1,54 @@ | |||
| 1 | config HAVE_ARCH_KASAN | ||
| 2 | bool | ||
| 3 | |||
| 4 | if HAVE_ARCH_KASAN | ||
| 5 | |||
| 6 | config KASAN | ||
| 7 | bool "KASan: runtime memory debugger" | ||
| 8 | depends on SLUB_DEBUG | ||
| 9 | select CONSTRUCTORS | ||
| 10 | help | ||
| 11 | Enables kernel address sanitizer - runtime memory debugger, | ||
| 12 | designed to find out-of-bounds accesses and use-after-free bugs. | ||
| 13 | This is strictly debugging feature. It consumes about 1/8 | ||
| 14 | of available memory and brings about ~x3 performance slowdown. | ||
| 15 | For better error detection enable CONFIG_STACKTRACE, | ||
| 16 | and add slub_debug=U to boot cmdline. | ||
| 17 | |||
| 18 | config KASAN_SHADOW_OFFSET | ||
| 19 | hex | ||
| 20 | default 0xdffffc0000000000 if X86_64 | ||
| 21 | |||
| 22 | choice | ||
| 23 | prompt "Instrumentation type" | ||
| 24 | depends on KASAN | ||
| 25 | default KASAN_OUTLINE | ||
| 26 | |||
| 27 | config KASAN_OUTLINE | ||
| 28 | bool "Outline instrumentation" | ||
| 29 | help | ||
| 30 | Before every memory access compiler insert function call | ||
| 31 | __asan_load*/__asan_store*. These functions performs check | ||
| 32 | of shadow memory. This is slower than inline instrumentation, | ||
| 33 | however it doesn't bloat size of kernel's .text section so | ||
| 34 | much as inline does. | ||
| 35 | |||
| 36 | config KASAN_INLINE | ||
| 37 | bool "Inline instrumentation" | ||
| 38 | help | ||
| 39 | Compiler directly inserts code checking shadow memory before | ||
| 40 | memory accesses. This is faster than outline (in some workloads | ||
| 41 | it gives about x2 boost over outline instrumentation), but | ||
| 42 | make kernel's .text size much bigger. | ||
| 43 | |||
| 44 | endchoice | ||
| 45 | |||
| 46 | config TEST_KASAN | ||
| 47 | tristate "Module for testing kasan for bug detection" | ||
| 48 | depends on m && KASAN | ||
| 49 | help | ||
| 50 | This is a test module doing various nasty things like | ||
| 51 | out of bounds accesses, use after free. It is useful for testing | ||
| 52 | kernel debugging features like kernel address sanitizer. | ||
| 53 | |||
| 54 | endif | ||
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb index 358eb81fa28d..c635a107a7de 100644 --- a/lib/Kconfig.kgdb +++ b/lib/Kconfig.kgdb | |||
| @@ -73,6 +73,31 @@ config KGDB_KDB | |||
| 73 | help | 73 | help |
| 74 | KDB frontend for kernel | 74 | KDB frontend for kernel |
| 75 | 75 | ||
| 76 | config KDB_DEFAULT_ENABLE | ||
| 77 | hex "KDB: Select kdb command functions to be enabled by default" | ||
| 78 | depends on KGDB_KDB | ||
| 79 | default 0x1 | ||
| 80 | help | ||
| 81 | Specifiers which kdb commands are enabled by default. This may | ||
| 82 | be set to 1 or 0 to enable all commands or disable almost all | ||
| 83 | commands. | ||
| 84 | |||
| 85 | Alternatively the following bitmask applies: | ||
| 86 | |||
| 87 | 0x0002 - allow arbitrary reads from memory and symbol lookup | ||
| 88 | 0x0004 - allow arbitrary writes to memory | ||
| 89 | 0x0008 - allow current register state to be inspected | ||
| 90 | 0x0010 - allow current register state to be modified | ||
| 91 | 0x0020 - allow passive inspection (backtrace, process list, lsmod) | ||
| 92 | 0x0040 - allow flow control management (breakpoint, single step) | ||
| 93 | 0x0080 - enable signalling of processes | ||
| 94 | 0x0100 - allow machine to be rebooted | ||
| 95 | |||
| 96 | The config option merely sets the default at boot time. Both | ||
| 97 | issuing 'echo X > /sys/module/kdb/parameters/cmd_enable' or | ||
| 98 | setting with kdb.cmd_enable=X kernel command line option will | ||
| 99 | override the default settings. | ||
| 100 | |||
| 76 | config KDB_KEYBOARD | 101 | config KDB_KEYBOARD |
| 77 | bool "KGDB_KDB: keyboard as input device" | 102 | bool "KGDB_KDB: keyboard as input device" |
| 78 | depends on VT && KGDB_KDB | 103 | depends on VT && KGDB_KDB |
diff --git a/lib/Makefile b/lib/Makefile index 3c3b30b9e020..87eb3bffc283 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
| @@ -4,7 +4,7 @@ | |||
| 4 | 4 | ||
| 5 | ifdef CONFIG_FUNCTION_TRACER | 5 | ifdef CONFIG_FUNCTION_TRACER |
| 6 | ORIG_CFLAGS := $(KBUILD_CFLAGS) | 6 | ORIG_CFLAGS := $(KBUILD_CFLAGS) |
| 7 | KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS)) | 7 | KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS)) |
| 8 | endif | 8 | endif |
| 9 | 9 | ||
| 10 | lib-y := ctype.o string.o vsprintf.o cmdline.o \ | 10 | lib-y := ctype.o string.o vsprintf.o cmdline.o \ |
| @@ -23,18 +23,22 @@ lib-y += kobject.o klist.o | |||
| 23 | obj-y += lockref.o | 23 | obj-y += lockref.o |
| 24 | 24 | ||
| 25 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ | 25 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ |
| 26 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ | 26 | bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \ |
| 27 | gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \ | 27 | gcd.o lcm.o list_sort.o uuid.o flex_array.o clz_ctz.o \ |
| 28 | bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ | 28 | bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ |
| 29 | percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o | 29 | percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o |
| 30 | obj-y += string_helpers.o | 30 | obj-y += string_helpers.o |
| 31 | obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o | 31 | obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o |
| 32 | obj-y += hexdump.o | ||
| 33 | obj-$(CONFIG_TEST_HEXDUMP) += test-hexdump.o | ||
| 32 | obj-y += kstrtox.o | 34 | obj-y += kstrtox.o |
| 35 | obj-$(CONFIG_TEST_BPF) += test_bpf.o | ||
| 36 | obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o | ||
| 37 | obj-$(CONFIG_TEST_KASAN) += test_kasan.o | ||
| 33 | obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o | 38 | obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o |
| 34 | obj-$(CONFIG_TEST_LKM) += test_module.o | 39 | obj-$(CONFIG_TEST_LKM) += test_module.o |
| 40 | obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o | ||
| 35 | obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o | 41 | obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o |
| 36 | obj-$(CONFIG_TEST_BPF) += test_bpf.o | ||
| 37 | obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o | ||
| 38 | 42 | ||
| 39 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) | 43 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) |
| 40 | CFLAGS_kobject.o += -DDEBUG | 44 | CFLAGS_kobject.o += -DDEBUG |
diff --git a/lib/assoc_array.c b/lib/assoc_array.c index 2404d03e251a..03dd576e6773 100644 --- a/lib/assoc_array.c +++ b/lib/assoc_array.c | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | * 2 of the Licence, or (at your option) any later version. | 11 | * 2 of the Licence, or (at your option) any later version. |
| 12 | */ | 12 | */ |
| 13 | //#define DEBUG | 13 | //#define DEBUG |
| 14 | #include <linux/rcupdate.h> | ||
| 14 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
| 15 | #include <linux/err.h> | 16 | #include <linux/err.h> |
| 16 | #include <linux/assoc_array_priv.h> | 17 | #include <linux/assoc_array_priv.h> |
diff --git a/lib/bitmap.c b/lib/bitmap.c index 324ea9eab8c1..d456f4c15a9f 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c | |||
| @@ -104,18 +104,18 @@ EXPORT_SYMBOL(__bitmap_complement); | |||
| 104 | * @dst : destination bitmap | 104 | * @dst : destination bitmap |
| 105 | * @src : source bitmap | 105 | * @src : source bitmap |
| 106 | * @shift : shift by this many bits | 106 | * @shift : shift by this many bits |
| 107 | * @bits : bitmap size, in bits | 107 | * @nbits : bitmap size, in bits |
| 108 | * | 108 | * |
| 109 | * Shifting right (dividing) means moving bits in the MS -> LS bit | 109 | * Shifting right (dividing) means moving bits in the MS -> LS bit |
| 110 | * direction. Zeros are fed into the vacated MS positions and the | 110 | * direction. Zeros are fed into the vacated MS positions and the |
| 111 | * LS bits shifted off the bottom are lost. | 111 | * LS bits shifted off the bottom are lost. |
| 112 | */ | 112 | */ |
| 113 | void __bitmap_shift_right(unsigned long *dst, | 113 | void __bitmap_shift_right(unsigned long *dst, const unsigned long *src, |
| 114 | const unsigned long *src, int shift, int bits) | 114 | unsigned shift, unsigned nbits) |
| 115 | { | 115 | { |
| 116 | int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG; | 116 | unsigned k, lim = BITS_TO_LONGS(nbits); |
| 117 | int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG; | 117 | unsigned off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG; |
| 118 | unsigned long mask = (1UL << left) - 1; | 118 | unsigned long mask = BITMAP_LAST_WORD_MASK(nbits); |
| 119 | for (k = 0; off + k < lim; ++k) { | 119 | for (k = 0; off + k < lim; ++k) { |
| 120 | unsigned long upper, lower; | 120 | unsigned long upper, lower; |
| 121 | 121 | ||
| @@ -127,17 +127,15 @@ void __bitmap_shift_right(unsigned long *dst, | |||
| 127 | upper = 0; | 127 | upper = 0; |
| 128 | else { | 128 | else { |
| 129 | upper = src[off + k + 1]; | 129 | upper = src[off + k + 1]; |
| 130 | if (off + k + 1 == lim - 1 && left) | 130 | if (off + k + 1 == lim - 1) |
| 131 | upper &= mask; | 131 | upper &= mask; |
| 132 | upper <<= (BITS_PER_LONG - rem); | ||
| 132 | } | 133 | } |
| 133 | lower = src[off + k]; | 134 | lower = src[off + k]; |
| 134 | if (left && off + k == lim - 1) | 135 | if (off + k == lim - 1) |
| 135 | lower &= mask; | 136 | lower &= mask; |
| 136 | dst[k] = lower >> rem; | 137 | lower >>= rem; |
| 137 | if (rem) | 138 | dst[k] = lower | upper; |
| 138 | dst[k] |= upper << (BITS_PER_LONG - rem); | ||
| 139 | if (left && k == lim - 1) | ||
| 140 | dst[k] &= mask; | ||
| 141 | } | 139 | } |
| 142 | if (off) | 140 | if (off) |
| 143 | memset(&dst[lim - off], 0, off*sizeof(unsigned long)); | 141 | memset(&dst[lim - off], 0, off*sizeof(unsigned long)); |
| @@ -150,18 +148,19 @@ EXPORT_SYMBOL(__bitmap_shift_right); | |||
| 150 | * @dst : destination bitmap | 148 | * @dst : destination bitmap |
| 151 | * @src : source bitmap | 149 | * @src : source bitmap |
| 152 | * @shift : shift by this many bits | 150 | * @shift : shift by this many bits |
| 153 | * @bits : bitmap size, in bits | 151 | * @nbits : bitmap size, in bits |
| 154 | * | 152 | * |
| 155 | * Shifting left (multiplying) means moving bits in the LS -> MS | 153 | * Shifting left (multiplying) means moving bits in the LS -> MS |
| 156 | * direction. Zeros are fed into the vacated LS bit positions | 154 | * direction. Zeros are fed into the vacated LS bit positions |
| 157 | * and those MS bits shifted off the top are lost. | 155 | * and those MS bits shifted off the top are lost. |
| 158 | */ | 156 | */ |
| 159 | 157 | ||
| 160 | void __bitmap_shift_left(unsigned long *dst, | 158 | void __bitmap_shift_left(unsigned long *dst, const unsigned long *src, |
| 161 | const unsigned long *src, int shift, int bits) | 159 | unsigned int shift, unsigned int nbits) |
| 162 | { | 160 | { |
| 163 | int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG; | 161 | int k; |
| 164 | int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG; | 162 | unsigned int lim = BITS_TO_LONGS(nbits); |
| 163 | unsigned int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG; | ||
| 165 | for (k = lim - off - 1; k >= 0; --k) { | 164 | for (k = lim - off - 1; k >= 0; --k) { |
| 166 | unsigned long upper, lower; | 165 | unsigned long upper, lower; |
| 167 | 166 | ||
| @@ -170,17 +169,11 @@ void __bitmap_shift_left(unsigned long *dst, | |||
| 170 | * word below and make them the bottom rem bits of result. | 169 | * word below and make them the bottom rem bits of result. |
| 171 | */ | 170 | */ |
| 172 | if (rem && k > 0) | 171 | if (rem && k > 0) |
| 173 | lower = src[k - 1]; | 172 | lower = src[k - 1] >> (BITS_PER_LONG - rem); |
| 174 | else | 173 | else |
| 175 | lower = 0; | 174 | lower = 0; |
| 176 | upper = src[k]; | 175 | upper = src[k] << rem; |
| 177 | if (left && k == lim - 1) | 176 | dst[k + off] = lower | upper; |
| 178 | upper &= (1UL << left) - 1; | ||
| 179 | dst[k + off] = upper << rem; | ||
| 180 | if (rem) | ||
| 181 | dst[k + off] |= lower >> (BITS_PER_LONG - rem); | ||
| 182 | if (left && k + off == lim - 1) | ||
| 183 | dst[k + off] &= (1UL << left) - 1; | ||
| 184 | } | 177 | } |
| 185 | if (off) | 178 | if (off) |
| 186 | memset(dst, 0, off*sizeof(unsigned long)); | 179 | memset(dst, 0, off*sizeof(unsigned long)); |
| @@ -377,45 +370,6 @@ EXPORT_SYMBOL(bitmap_find_next_zero_area_off); | |||
| 377 | #define BASEDEC 10 /* fancier cpuset lists input in decimal */ | 370 | #define BASEDEC 10 /* fancier cpuset lists input in decimal */ |
| 378 | 371 | ||
| 379 | /** | 372 | /** |
| 380 | * bitmap_scnprintf - convert bitmap to an ASCII hex string. | ||
| 381 | * @buf: byte buffer into which string is placed | ||
| 382 | * @buflen: reserved size of @buf, in bytes | ||
| 383 | * @maskp: pointer to bitmap to convert | ||
| 384 | * @nmaskbits: size of bitmap, in bits | ||
| 385 | * | ||
| 386 | * Exactly @nmaskbits bits are displayed. Hex digits are grouped into | ||
| 387 | * comma-separated sets of eight digits per set. Returns the number of | ||
| 388 | * characters which were written to *buf, excluding the trailing \0. | ||
| 389 | */ | ||
| 390 | int bitmap_scnprintf(char *buf, unsigned int buflen, | ||
| 391 | const unsigned long *maskp, int nmaskbits) | ||
| 392 | { | ||
| 393 | int i, word, bit, len = 0; | ||
| 394 | unsigned long val; | ||
| 395 | const char *sep = ""; | ||
| 396 | int chunksz; | ||
| 397 | u32 chunkmask; | ||
| 398 | |||
| 399 | chunksz = nmaskbits & (CHUNKSZ - 1); | ||
| 400 | if (chunksz == 0) | ||
| 401 | chunksz = CHUNKSZ; | ||
| 402 | |||
| 403 | i = ALIGN(nmaskbits, CHUNKSZ) - CHUNKSZ; | ||
| 404 | for (; i >= 0; i -= CHUNKSZ) { | ||
| 405 | chunkmask = ((1ULL << chunksz) - 1); | ||
| 406 | word = i / BITS_PER_LONG; | ||
| 407 | bit = i % BITS_PER_LONG; | ||
| 408 | val = (maskp[word] >> bit) & chunkmask; | ||
| 409 | len += scnprintf(buf+len, buflen-len, "%s%0*lx", sep, | ||
| 410 | (chunksz+3)/4, val); | ||
| 411 | chunksz = CHUNKSZ; | ||
| 412 | sep = ","; | ||
| 413 | } | ||
| 414 | return len; | ||
| 415 | } | ||
| 416 | EXPORT_SYMBOL(bitmap_scnprintf); | ||
| 417 | |||
| 418 | /** | ||
| 419 | * __bitmap_parse - convert an ASCII hex string into a bitmap. | 373 | * __bitmap_parse - convert an ASCII hex string into a bitmap. |
| 420 | * @buf: pointer to buffer containing string. | 374 | * @buf: pointer to buffer containing string. |
| 421 | * @buflen: buffer size in bytes. If string is smaller than this | 375 | * @buflen: buffer size in bytes. If string is smaller than this |
| @@ -528,65 +482,6 @@ int bitmap_parse_user(const char __user *ubuf, | |||
| 528 | } | 482 | } |
| 529 | EXPORT_SYMBOL(bitmap_parse_user); | 483 | EXPORT_SYMBOL(bitmap_parse_user); |
| 530 | 484 | ||
| 531 | /* | ||
| 532 | * bscnl_emit(buf, buflen, rbot, rtop, bp) | ||
| 533 | * | ||
| 534 | * Helper routine for bitmap_scnlistprintf(). Write decimal number | ||
| 535 | * or range to buf, suppressing output past buf+buflen, with optional | ||
| 536 | * comma-prefix. Return len of what was written to *buf, excluding the | ||
| 537 | * trailing \0. | ||
| 538 | */ | ||
| 539 | static inline int bscnl_emit(char *buf, int buflen, int rbot, int rtop, int len) | ||
| 540 | { | ||
| 541 | if (len > 0) | ||
| 542 | len += scnprintf(buf + len, buflen - len, ","); | ||
| 543 | if (rbot == rtop) | ||
| 544 | len += scnprintf(buf + len, buflen - len, "%d", rbot); | ||
| 545 | else | ||
| 546 | len += scnprintf(buf + len, buflen - len, "%d-%d", rbot, rtop); | ||
| 547 | return len; | ||
| 548 | } | ||
| 549 | |||
| 550 | /** | ||
| 551 | * bitmap_scnlistprintf - convert bitmap to list format ASCII string | ||
| 552 | * @buf: byte buffer into which string is placed | ||
| 553 | * @buflen: reserved size of @buf, in bytes | ||
| 554 | * @maskp: pointer to bitmap to convert | ||
| 555 | * @nmaskbits: size of bitmap, in bits | ||
| 556 | * | ||
| 557 | * Output format is a comma-separated list of decimal numbers and | ||
| 558 | * ranges. Consecutively set bits are shown as two hyphen-separated | ||
| 559 | * decimal numbers, the smallest and largest bit numbers set in | ||
| 560 | * the range. Output format is compatible with the format | ||
| 561 | * accepted as input by bitmap_parselist(). | ||
| 562 | * | ||
| 563 | * The return value is the number of characters which were written to *buf | ||
| 564 | * excluding the trailing '\0', as per ISO C99's scnprintf. | ||
| 565 | */ | ||
| 566 | int bitmap_scnlistprintf(char *buf, unsigned int buflen, | ||
| 567 | const unsigned long *maskp, int nmaskbits) | ||
| 568 | { | ||
| 569 | int len = 0; | ||
| 570 | /* current bit is 'cur', most recently seen range is [rbot, rtop] */ | ||
| 571 | int cur, rbot, rtop; | ||
| 572 | |||
| 573 | if (buflen == 0) | ||
| 574 | return 0; | ||
| 575 | buf[0] = 0; | ||
| 576 | |||
| 577 | rbot = cur = find_first_bit(maskp, nmaskbits); | ||
| 578 | while (cur < nmaskbits) { | ||
| 579 | rtop = cur; | ||
| 580 | cur = find_next_bit(maskp, nmaskbits, cur+1); | ||
| 581 | if (cur >= nmaskbits || cur > rtop + 1) { | ||
| 582 | len = bscnl_emit(buf, buflen, rbot, rtop, len); | ||
| 583 | rbot = cur; | ||
| 584 | } | ||
| 585 | } | ||
| 586 | return len; | ||
| 587 | } | ||
| 588 | EXPORT_SYMBOL(bitmap_scnlistprintf); | ||
| 589 | |||
| 590 | /** | 485 | /** |
| 591 | * bitmap_print_to_pagebuf - convert bitmap to list or hex format ASCII string | 486 | * bitmap_print_to_pagebuf - convert bitmap to list or hex format ASCII string |
| 592 | * @list: indicates whether the bitmap must be list | 487 | * @list: indicates whether the bitmap must be list |
| @@ -605,8 +500,8 @@ int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp, | |||
| 605 | int n = 0; | 500 | int n = 0; |
| 606 | 501 | ||
| 607 | if (len > 1) { | 502 | if (len > 1) { |
| 608 | n = list ? bitmap_scnlistprintf(buf, len, maskp, nmaskbits) : | 503 | n = list ? scnprintf(buf, len, "%*pbl", nmaskbits, maskp) : |
| 609 | bitmap_scnprintf(buf, len, maskp, nmaskbits); | 504 | scnprintf(buf, len, "%*pb", nmaskbits, maskp); |
| 610 | buf[n++] = '\n'; | 505 | buf[n++] = '\n'; |
| 611 | buf[n] = '\0'; | 506 | buf[n] = '\0'; |
| 612 | } | 507 | } |
| @@ -744,10 +639,10 @@ EXPORT_SYMBOL(bitmap_parselist_user); | |||
| 744 | /** | 639 | /** |
| 745 | * bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap | 640 | * bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap |
| 746 | * @buf: pointer to a bitmap | 641 | * @buf: pointer to a bitmap |
| 747 | * @pos: a bit position in @buf (0 <= @pos < @bits) | 642 | * @pos: a bit position in @buf (0 <= @pos < @nbits) |
| 748 | * @bits: number of valid bit positions in @buf | 643 | * @nbits: number of valid bit positions in @buf |
| 749 | * | 644 | * |
| 750 | * Map the bit at position @pos in @buf (of length @bits) to the | 645 | * Map the bit at position @pos in @buf (of length @nbits) to the |
| 751 | * ordinal of which set bit it is. If it is not set or if @pos | 646 | * ordinal of which set bit it is. If it is not set or if @pos |
| 752 | * is not a valid bit position, map to -1. | 647 | * is not a valid bit position, map to -1. |
| 753 | * | 648 | * |
| @@ -759,56 +654,40 @@ EXPORT_SYMBOL(bitmap_parselist_user); | |||
| 759 | * | 654 | * |
| 760 | * The bit positions 0 through @bits are valid positions in @buf. | 655 | * The bit positions 0 through @bits are valid positions in @buf. |
| 761 | */ | 656 | */ |
| 762 | static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits) | 657 | static int bitmap_pos_to_ord(const unsigned long *buf, unsigned int pos, unsigned int nbits) |
| 763 | { | 658 | { |
| 764 | int i, ord; | 659 | if (pos >= nbits || !test_bit(pos, buf)) |
| 765 | |||
| 766 | if (pos < 0 || pos >= bits || !test_bit(pos, buf)) | ||
| 767 | return -1; | 660 | return -1; |
| 768 | 661 | ||
| 769 | i = find_first_bit(buf, bits); | 662 | return __bitmap_weight(buf, pos); |
| 770 | ord = 0; | ||
| 771 | while (i < pos) { | ||
| 772 | i = find_next_bit(buf, bits, i + 1); | ||
| 773 | ord++; | ||
| 774 | } | ||
| 775 | BUG_ON(i != pos); | ||
| 776 | |||
| 777 | return ord; | ||
| 778 | } | 663 | } |
| 779 | 664 | ||
| 780 | /** | 665 | /** |
| 781 | * bitmap_ord_to_pos - find position of n-th set bit in bitmap | 666 | * bitmap_ord_to_pos - find position of n-th set bit in bitmap |
| 782 | * @buf: pointer to bitmap | 667 | * @buf: pointer to bitmap |
| 783 | * @ord: ordinal bit position (n-th set bit, n >= 0) | 668 | * @ord: ordinal bit position (n-th set bit, n >= 0) |
| 784 | * @bits: number of valid bit positions in @buf | 669 | * @nbits: number of valid bit positions in @buf |
| 785 | * | 670 | * |
| 786 | * Map the ordinal offset of bit @ord in @buf to its position in @buf. | 671 | * Map the ordinal offset of bit @ord in @buf to its position in @buf. |
| 787 | * Value of @ord should be in range 0 <= @ord < weight(buf), else | 672 | * Value of @ord should be in range 0 <= @ord < weight(buf). If @ord |
| 788 | * results are undefined. | 673 | * >= weight(buf), returns @nbits. |
| 789 | * | 674 | * |
| 790 | * If for example, just bits 4 through 7 are set in @buf, then @ord | 675 | * If for example, just bits 4 through 7 are set in @buf, then @ord |
| 791 | * values 0 through 3 will get mapped to 4 through 7, respectively, | 676 | * values 0 through 3 will get mapped to 4 through 7, respectively, |
| 792 | * and all other @ord values return undefined values. When @ord value 3 | 677 | * and all other @ord values returns @nbits. When @ord value 3 |
| 793 | * gets mapped to (returns) @pos value 7 in this example, that means | 678 | * gets mapped to (returns) @pos value 7 in this example, that means |
| 794 | * that the 3rd set bit (starting with 0th) is at position 7 in @buf. | 679 | * that the 3rd set bit (starting with 0th) is at position 7 in @buf. |
| 795 | * | 680 | * |
| 796 | * The bit positions 0 through @bits are valid positions in @buf. | 681 | * The bit positions 0 through @nbits-1 are valid positions in @buf. |
| 797 | */ | 682 | */ |
| 798 | int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits) | 683 | unsigned int bitmap_ord_to_pos(const unsigned long *buf, unsigned int ord, unsigned int nbits) |
| 799 | { | 684 | { |
| 800 | int pos = 0; | 685 | unsigned int pos; |
| 801 | |||
| 802 | if (ord >= 0 && ord < bits) { | ||
| 803 | int i; | ||
| 804 | 686 | ||
| 805 | for (i = find_first_bit(buf, bits); | 687 | for (pos = find_first_bit(buf, nbits); |
| 806 | i < bits && ord > 0; | 688 | pos < nbits && ord; |
| 807 | i = find_next_bit(buf, bits, i + 1)) | 689 | pos = find_next_bit(buf, nbits, pos + 1)) |
| 808 | ord--; | 690 | ord--; |
| 809 | if (i < bits && ord == 0) | ||
| 810 | pos = i; | ||
| 811 | } | ||
| 812 | 691 | ||
| 813 | return pos; | 692 | return pos; |
| 814 | } | 693 | } |
| @@ -819,7 +698,7 @@ int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits) | |||
| 819 | * @src: subset to be remapped | 698 | * @src: subset to be remapped |
| 820 | * @old: defines domain of map | 699 | * @old: defines domain of map |
| 821 | * @new: defines range of map | 700 | * @new: defines range of map |
| 822 | * @bits: number of bits in each of these bitmaps | 701 | * @nbits: number of bits in each of these bitmaps |
| 823 | * | 702 | * |
| 824 | * Let @old and @new define a mapping of bit positions, such that | 703 | * Let @old and @new define a mapping of bit positions, such that |
| 825 | * whatever position is held by the n-th set bit in @old is mapped | 704 | * whatever position is held by the n-th set bit in @old is mapped |
| @@ -847,22 +726,22 @@ int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits) | |||
| 847 | */ | 726 | */ |
| 848 | void bitmap_remap(unsigned long *dst, const unsigned long *src, | 727 | void bitmap_remap(unsigned long *dst, const unsigned long *src, |
| 849 | const unsigned long *old, const unsigned long *new, | 728 | const unsigned long *old, const unsigned long *new, |
| 850 | int bits) | 729 | unsigned int nbits) |
| 851 | { | 730 | { |
| 852 | int oldbit, w; | 731 | unsigned int oldbit, w; |
| 853 | 732 | ||
| 854 | if (dst == src) /* following doesn't handle inplace remaps */ | 733 | if (dst == src) /* following doesn't handle inplace remaps */ |
| 855 | return; | 734 | return; |
| 856 | bitmap_zero(dst, bits); | 735 | bitmap_zero(dst, nbits); |
| 857 | 736 | ||
| 858 | w = bitmap_weight(new, bits); | 737 | w = bitmap_weight(new, nbits); |
| 859 | for_each_set_bit(oldbit, src, bits) { | 738 | for_each_set_bit(oldbit, src, nbits) { |
| 860 | int n = bitmap_pos_to_ord(old, oldbit, bits); | 739 | int n = bitmap_pos_to_ord(old, oldbit, nbits); |
| 861 | 740 | ||
| 862 | if (n < 0 || w == 0) | 741 | if (n < 0 || w == 0) |
| 863 | set_bit(oldbit, dst); /* identity map */ | 742 | set_bit(oldbit, dst); /* identity map */ |
| 864 | else | 743 | else |
| 865 | set_bit(bitmap_ord_to_pos(new, n % w, bits), dst); | 744 | set_bit(bitmap_ord_to_pos(new, n % w, nbits), dst); |
| 866 | } | 745 | } |
| 867 | } | 746 | } |
| 868 | EXPORT_SYMBOL(bitmap_remap); | 747 | EXPORT_SYMBOL(bitmap_remap); |
| @@ -1006,9 +885,9 @@ EXPORT_SYMBOL(bitmap_bitremap); | |||
| 1006 | * All bits in @dst not set by the above rule are cleared. | 885 | * All bits in @dst not set by the above rule are cleared. |
| 1007 | */ | 886 | */ |
| 1008 | void bitmap_onto(unsigned long *dst, const unsigned long *orig, | 887 | void bitmap_onto(unsigned long *dst, const unsigned long *orig, |
| 1009 | const unsigned long *relmap, int bits) | 888 | const unsigned long *relmap, unsigned int bits) |
| 1010 | { | 889 | { |
| 1011 | int n, m; /* same meaning as in above comment */ | 890 | unsigned int n, m; /* same meaning as in above comment */ |
| 1012 | 891 | ||
| 1013 | if (dst == orig) /* following doesn't handle inplace mappings */ | 892 | if (dst == orig) /* following doesn't handle inplace mappings */ |
| 1014 | return; | 893 | return; |
| @@ -1039,22 +918,22 @@ EXPORT_SYMBOL(bitmap_onto); | |||
| 1039 | * @dst: resulting smaller bitmap | 918 | * @dst: resulting smaller bitmap |
| 1040 | * @orig: original larger bitmap | 919 | * @orig: original larger bitmap |
| 1041 | * @sz: specified size | 920 | * @sz: specified size |
| 1042 | * @bits: number of bits in each of these bitmaps | 921 | * @nbits: number of bits in each of these bitmaps |
| 1043 | * | 922 | * |
| 1044 | * For each bit oldbit in @orig, set bit oldbit mod @sz in @dst. | 923 | * For each bit oldbit in @orig, set bit oldbit mod @sz in @dst. |
| 1045 | * Clear all other bits in @dst. See further the comment and | 924 | * Clear all other bits in @dst. See further the comment and |
| 1046 | * Example [2] for bitmap_onto() for why and how to use this. | 925 | * Example [2] for bitmap_onto() for why and how to use this. |
| 1047 | */ | 926 | */ |
| 1048 | void bitmap_fold(unsigned long *dst, const unsigned long *orig, | 927 | void bitmap_fold(unsigned long *dst, const unsigned long *orig, |
| 1049 | int sz, int bits) | 928 | unsigned int sz, unsigned int nbits) |
| 1050 | { | 929 | { |
| 1051 | int oldbit; | 930 | unsigned int oldbit; |
| 1052 | 931 | ||
| 1053 | if (dst == orig) /* following doesn't handle inplace mappings */ | 932 | if (dst == orig) /* following doesn't handle inplace mappings */ |
| 1054 | return; | 933 | return; |
| 1055 | bitmap_zero(dst, bits); | 934 | bitmap_zero(dst, nbits); |
| 1056 | 935 | ||
| 1057 | for_each_set_bit(oldbit, orig, bits) | 936 | for_each_set_bit(oldbit, orig, nbits) |
| 1058 | set_bit(oldbit % sz, dst); | 937 | set_bit(oldbit % sz, dst); |
| 1059 | } | 938 | } |
| 1060 | EXPORT_SYMBOL(bitmap_fold); | 939 | EXPORT_SYMBOL(bitmap_fold); |
| @@ -1207,16 +1086,17 @@ EXPORT_SYMBOL(bitmap_allocate_region); | |||
| 1207 | * | 1086 | * |
| 1208 | * Require nbits % BITS_PER_LONG == 0. | 1087 | * Require nbits % BITS_PER_LONG == 0. |
| 1209 | */ | 1088 | */ |
| 1210 | void bitmap_copy_le(void *dst, const unsigned long *src, int nbits) | 1089 | #ifdef __BIG_ENDIAN |
| 1090 | void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits) | ||
| 1211 | { | 1091 | { |
| 1212 | unsigned long *d = dst; | 1092 | unsigned int i; |
| 1213 | int i; | ||
| 1214 | 1093 | ||
| 1215 | for (i = 0; i < nbits/BITS_PER_LONG; i++) { | 1094 | for (i = 0; i < nbits/BITS_PER_LONG; i++) { |
| 1216 | if (BITS_PER_LONG == 64) | 1095 | if (BITS_PER_LONG == 64) |
| 1217 | d[i] = cpu_to_le64(src[i]); | 1096 | dst[i] = cpu_to_le64(src[i]); |
| 1218 | else | 1097 | else |
| 1219 | d[i] = cpu_to_le32(src[i]); | 1098 | dst[i] = cpu_to_le32(src[i]); |
| 1220 | } | 1099 | } |
| 1221 | } | 1100 | } |
| 1222 | EXPORT_SYMBOL(bitmap_copy_le); | 1101 | EXPORT_SYMBOL(bitmap_copy_le); |
| 1102 | #endif | ||
diff --git a/lib/bitrev.c b/lib/bitrev.c index 3956203456d4..40ffda94cc5d 100644 --- a/lib/bitrev.c +++ b/lib/bitrev.c | |||
| @@ -1,3 +1,4 @@ | |||
| 1 | #ifndef CONFIG_HAVE_ARCH_BITREVERSE | ||
| 1 | #include <linux/types.h> | 2 | #include <linux/types.h> |
| 2 | #include <linux/module.h> | 3 | #include <linux/module.h> |
| 3 | #include <linux/bitrev.h> | 4 | #include <linux/bitrev.h> |
| @@ -42,18 +43,4 @@ const u8 byte_rev_table[256] = { | |||
| 42 | }; | 43 | }; |
| 43 | EXPORT_SYMBOL_GPL(byte_rev_table); | 44 | EXPORT_SYMBOL_GPL(byte_rev_table); |
| 44 | 45 | ||
| 45 | u16 bitrev16(u16 x) | 46 | #endif /* CONFIG_HAVE_ARCH_BITREVERSE */ |
| 46 | { | ||
| 47 | return (bitrev8(x & 0xff) << 8) | bitrev8(x >> 8); | ||
| 48 | } | ||
| 49 | EXPORT_SYMBOL(bitrev16); | ||
| 50 | |||
| 51 | /** | ||
| 52 | * bitrev32 - reverse the order of bits in a u32 value | ||
| 53 | * @x: value to be bit-reversed | ||
| 54 | */ | ||
| 55 | u32 bitrev32(u32 x) | ||
| 56 | { | ||
| 57 | return (bitrev16(x & 0xffff) << 16) | bitrev16(x >> 16); | ||
| 58 | } | ||
| 59 | EXPORT_SYMBOL(bitrev32); | ||
diff --git a/lib/checksum.c b/lib/checksum.c index 129775eb6de6..8b39e86dbab5 100644 --- a/lib/checksum.c +++ b/lib/checksum.c | |||
| @@ -181,6 +181,15 @@ csum_partial_copy(const void *src, void *dst, int len, __wsum sum) | |||
| 181 | EXPORT_SYMBOL(csum_partial_copy); | 181 | EXPORT_SYMBOL(csum_partial_copy); |
| 182 | 182 | ||
| 183 | #ifndef csum_tcpudp_nofold | 183 | #ifndef csum_tcpudp_nofold |
| 184 | static inline u32 from64to32(u64 x) | ||
| 185 | { | ||
| 186 | /* add up 32-bit and 32-bit for 32+c bit */ | ||
| 187 | x = (x & 0xffffffff) + (x >> 32); | ||
| 188 | /* add up carry.. */ | ||
| 189 | x = (x & 0xffffffff) + (x >> 32); | ||
| 190 | return (u32)x; | ||
| 191 | } | ||
| 192 | |||
| 184 | __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, | 193 | __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, |
| 185 | unsigned short len, | 194 | unsigned short len, |
| 186 | unsigned short proto, | 195 | unsigned short proto, |
| @@ -195,8 +204,7 @@ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, | |||
| 195 | #else | 204 | #else |
| 196 | s += (proto + len) << 8; | 205 | s += (proto + len) << 8; |
| 197 | #endif | 206 | #endif |
| 198 | s += (s >> 32); | 207 | return (__force __wsum)from64to32(s); |
| 199 | return (__force __wsum)s; | ||
| 200 | } | 208 | } |
| 201 | EXPORT_SYMBOL(csum_tcpudp_nofold); | 209 | EXPORT_SYMBOL(csum_tcpudp_nofold); |
| 202 | #endif | 210 | #endif |
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index 527799d44476..d8f3d3150603 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c | |||
| @@ -641,7 +641,7 @@ static __init int ddebug_setup_query(char *str) | |||
| 641 | __setup("ddebug_query=", ddebug_setup_query); | 641 | __setup("ddebug_query=", ddebug_setup_query); |
| 642 | 642 | ||
| 643 | /* | 643 | /* |
| 644 | * File_ops->write method for <debugfs>/dynamic_debug/conrol. Gathers the | 644 | * File_ops->write method for <debugfs>/dynamic_debug/control. Gathers the |
| 645 | * command text from userspace, parses and executes it. | 645 | * command text from userspace, parses and executes it. |
| 646 | */ | 646 | */ |
| 647 | #define USER_BUF_PAGE 4096 | 647 | #define USER_BUF_PAGE 4096 |
diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c index 0777c5a45fa0..f346715e2255 100644 --- a/lib/dynamic_queue_limits.c +++ b/lib/dynamic_queue_limits.c | |||
| @@ -3,12 +3,12 @@ | |||
| 3 | * | 3 | * |
| 4 | * Copyright (c) 2011, Tom Herbert <therbert@google.com> | 4 | * Copyright (c) 2011, Tom Herbert <therbert@google.com> |
| 5 | */ | 5 | */ |
| 6 | #include <linux/module.h> | ||
| 7 | #include <linux/types.h> | 6 | #include <linux/types.h> |
| 8 | #include <linux/ctype.h> | ||
| 9 | #include <linux/kernel.h> | 7 | #include <linux/kernel.h> |
| 10 | #include <linux/jiffies.h> | 8 | #include <linux/jiffies.h> |
| 11 | #include <linux/dynamic_queue_limits.h> | 9 | #include <linux/dynamic_queue_limits.h> |
| 10 | #include <linux/compiler.h> | ||
| 11 | #include <linux/export.h> | ||
| 12 | 12 | ||
| 13 | #define POSDIFF(A, B) ((int)((A) - (B)) > 0 ? (A) - (B) : 0) | 13 | #define POSDIFF(A, B) ((int)((A) - (B)) > 0 ? (A) - (B) : 0) |
| 14 | #define AFTER_EQ(A, B) ((int)((A) - (B)) >= 0) | 14 | #define AFTER_EQ(A, B) ((int)((A) - (B)) >= 0) |
diff --git a/lib/gen_crc32table.c b/lib/gen_crc32table.c index 71fcfcd96410..d83a372fa76f 100644 --- a/lib/gen_crc32table.c +++ b/lib/gen_crc32table.c | |||
| @@ -109,7 +109,7 @@ int main(int argc, char** argv) | |||
| 109 | 109 | ||
| 110 | if (CRC_LE_BITS > 1) { | 110 | if (CRC_LE_BITS > 1) { |
| 111 | crc32init_le(); | 111 | crc32init_le(); |
| 112 | printf("static u32 __cacheline_aligned " | 112 | printf("static const u32 ____cacheline_aligned " |
| 113 | "crc32table_le[%d][%d] = {", | 113 | "crc32table_le[%d][%d] = {", |
| 114 | LE_TABLE_ROWS, LE_TABLE_SIZE); | 114 | LE_TABLE_ROWS, LE_TABLE_SIZE); |
| 115 | output_table(crc32table_le, LE_TABLE_ROWS, | 115 | output_table(crc32table_le, LE_TABLE_ROWS, |
| @@ -119,7 +119,7 @@ int main(int argc, char** argv) | |||
| 119 | 119 | ||
| 120 | if (CRC_BE_BITS > 1) { | 120 | if (CRC_BE_BITS > 1) { |
| 121 | crc32init_be(); | 121 | crc32init_be(); |
| 122 | printf("static u32 __cacheline_aligned " | 122 | printf("static const u32 ____cacheline_aligned " |
| 123 | "crc32table_be[%d][%d] = {", | 123 | "crc32table_be[%d][%d] = {", |
| 124 | BE_TABLE_ROWS, BE_TABLE_SIZE); | 124 | BE_TABLE_ROWS, BE_TABLE_SIZE); |
| 125 | output_table(crc32table_be, LE_TABLE_ROWS, | 125 | output_table(crc32table_be, LE_TABLE_ROWS, |
| @@ -128,7 +128,7 @@ int main(int argc, char** argv) | |||
| 128 | } | 128 | } |
| 129 | if (CRC_LE_BITS > 1) { | 129 | if (CRC_LE_BITS > 1) { |
| 130 | crc32cinit_le(); | 130 | crc32cinit_le(); |
| 131 | printf("static u32 __cacheline_aligned " | 131 | printf("static const u32 ____cacheline_aligned " |
| 132 | "crc32ctable_le[%d][%d] = {", | 132 | "crc32ctable_le[%d][%d] = {", |
| 133 | LE_TABLE_ROWS, LE_TABLE_SIZE); | 133 | LE_TABLE_ROWS, LE_TABLE_SIZE); |
| 134 | output_table(crc32ctable_le, LE_TABLE_ROWS, | 134 | output_table(crc32ctable_le, LE_TABLE_ROWS, |
diff --git a/lib/genalloc.c b/lib/genalloc.c index 2e65d206b01c..d214866eeea2 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c | |||
| @@ -34,7 +34,6 @@ | |||
| 34 | #include <linux/rculist.h> | 34 | #include <linux/rculist.h> |
| 35 | #include <linux/interrupt.h> | 35 | #include <linux/interrupt.h> |
| 36 | #include <linux/genalloc.h> | 36 | #include <linux/genalloc.h> |
| 37 | #include <linux/of_address.h> | ||
| 38 | #include <linux/of_device.h> | 37 | #include <linux/of_device.h> |
| 39 | 38 | ||
| 40 | static inline size_t chunk_size(const struct gen_pool_chunk *chunk) | 39 | static inline size_t chunk_size(const struct gen_pool_chunk *chunk) |
| @@ -415,7 +414,7 @@ bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start, | |||
| 415 | size_t size) | 414 | size_t size) |
| 416 | { | 415 | { |
| 417 | bool found = false; | 416 | bool found = false; |
| 418 | unsigned long end = start + size; | 417 | unsigned long end = start + size - 1; |
| 419 | struct gen_pool_chunk *chunk; | 418 | struct gen_pool_chunk *chunk; |
| 420 | 419 | ||
| 421 | rcu_read_lock(); | 420 | rcu_read_lock(); |
| @@ -587,6 +586,8 @@ struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order, | |||
| 587 | struct gen_pool **ptr, *pool; | 586 | struct gen_pool **ptr, *pool; |
| 588 | 587 | ||
| 589 | ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL); | 588 | ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL); |
| 589 | if (!ptr) | ||
| 590 | return NULL; | ||
| 590 | 591 | ||
| 591 | pool = gen_pool_create(min_alloc_order, nid); | 592 | pool = gen_pool_create(min_alloc_order, nid); |
| 592 | if (pool) { | 593 | if (pool) { |
diff --git a/lib/halfmd4.c b/lib/halfmd4.c index 66d0ee8b7776..a8fe6274a13c 100644 --- a/lib/halfmd4.c +++ b/lib/halfmd4.c | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | #include <linux/kernel.h> | 1 | #include <linux/compiler.h> |
| 2 | #include <linux/export.h> | 2 | #include <linux/export.h> |
| 3 | #include <linux/cryptohash.h> | 3 | #include <linux/cryptohash.h> |
| 4 | 4 | ||
diff --git a/lib/hexdump.c b/lib/hexdump.c index 270773b91923..7ea09699855d 100644 --- a/lib/hexdump.c +++ b/lib/hexdump.c | |||
| @@ -97,63 +97,79 @@ EXPORT_SYMBOL(bin2hex); | |||
| 97 | * | 97 | * |
| 98 | * example output buffer: | 98 | * example output buffer: |
| 99 | * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO | 99 | * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO |
| 100 | * | ||
| 101 | * Return: | ||
| 102 | * The amount of bytes placed in the buffer without terminating NUL. If the | ||
| 103 | * output was truncated, then the return value is the number of bytes | ||
| 104 | * (excluding the terminating NUL) which would have been written to the final | ||
| 105 | * string if enough space had been available. | ||
| 100 | */ | 106 | */ |
| 101 | void hex_dump_to_buffer(const void *buf, size_t len, int rowsize, | 107 | int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize, |
| 102 | int groupsize, char *linebuf, size_t linebuflen, | 108 | char *linebuf, size_t linebuflen, bool ascii) |
| 103 | bool ascii) | ||
| 104 | { | 109 | { |
| 105 | const u8 *ptr = buf; | 110 | const u8 *ptr = buf; |
| 111 | int ngroups; | ||
| 106 | u8 ch; | 112 | u8 ch; |
| 107 | int j, lx = 0; | 113 | int j, lx = 0; |
| 108 | int ascii_column; | 114 | int ascii_column; |
| 115 | int ret; | ||
| 109 | 116 | ||
| 110 | if (rowsize != 16 && rowsize != 32) | 117 | if (rowsize != 16 && rowsize != 32) |
| 111 | rowsize = 16; | 118 | rowsize = 16; |
| 112 | 119 | ||
| 113 | if (!len) | ||
| 114 | goto nil; | ||
| 115 | if (len > rowsize) /* limit to one line at a time */ | 120 | if (len > rowsize) /* limit to one line at a time */ |
| 116 | len = rowsize; | 121 | len = rowsize; |
| 122 | if (!is_power_of_2(groupsize) || groupsize > 8) | ||
| 123 | groupsize = 1; | ||
| 117 | if ((len % groupsize) != 0) /* no mixed size output */ | 124 | if ((len % groupsize) != 0) /* no mixed size output */ |
| 118 | groupsize = 1; | 125 | groupsize = 1; |
| 119 | 126 | ||
| 120 | switch (groupsize) { | 127 | ngroups = len / groupsize; |
| 121 | case 8: { | 128 | ascii_column = rowsize * 2 + rowsize / groupsize + 1; |
| 122 | const u64 *ptr8 = buf; | ||
| 123 | int ngroups = len / groupsize; | ||
| 124 | 129 | ||
| 125 | for (j = 0; j < ngroups; j++) | 130 | if (!linebuflen) |
| 126 | lx += scnprintf(linebuf + lx, linebuflen - lx, | 131 | goto overflow1; |
| 127 | "%s%16.16llx", j ? " " : "", | ||
| 128 | (unsigned long long)*(ptr8 + j)); | ||
| 129 | ascii_column = 17 * ngroups + 2; | ||
| 130 | break; | ||
| 131 | } | ||
| 132 | 132 | ||
| 133 | case 4: { | 133 | if (!len) |
| 134 | const u32 *ptr4 = buf; | 134 | goto nil; |
| 135 | int ngroups = len / groupsize; | ||
| 136 | 135 | ||
| 137 | for (j = 0; j < ngroups; j++) | 136 | if (groupsize == 8) { |
| 138 | lx += scnprintf(linebuf + lx, linebuflen - lx, | 137 | const u64 *ptr8 = buf; |
| 139 | "%s%8.8x", j ? " " : "", *(ptr4 + j)); | ||
| 140 | ascii_column = 9 * ngroups + 2; | ||
| 141 | break; | ||
| 142 | } | ||
| 143 | 138 | ||
| 144 | case 2: { | 139 | for (j = 0; j < ngroups; j++) { |
| 145 | const u16 *ptr2 = buf; | 140 | ret = snprintf(linebuf + lx, linebuflen - lx, |
| 146 | int ngroups = len / groupsize; | 141 | "%s%16.16llx", j ? " " : "", |
| 142 | (unsigned long long)*(ptr8 + j)); | ||
| 143 | if (ret >= linebuflen - lx) | ||
| 144 | goto overflow1; | ||
| 145 | lx += ret; | ||
| 146 | } | ||
| 147 | } else if (groupsize == 4) { | ||
| 148 | const u32 *ptr4 = buf; | ||
| 147 | 149 | ||
| 148 | for (j = 0; j < ngroups; j++) | 150 | for (j = 0; j < ngroups; j++) { |
| 149 | lx += scnprintf(linebuf + lx, linebuflen - lx, | 151 | ret = snprintf(linebuf + lx, linebuflen - lx, |
| 150 | "%s%4.4x", j ? " " : "", *(ptr2 + j)); | 152 | "%s%8.8x", j ? " " : "", |
| 151 | ascii_column = 5 * ngroups + 2; | 153 | *(ptr4 + j)); |
| 152 | break; | 154 | if (ret >= linebuflen - lx) |
| 153 | } | 155 | goto overflow1; |
| 156 | lx += ret; | ||
| 157 | } | ||
| 158 | } else if (groupsize == 2) { | ||
| 159 | const u16 *ptr2 = buf; | ||
| 154 | 160 | ||
| 155 | default: | 161 | for (j = 0; j < ngroups; j++) { |
| 156 | for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) { | 162 | ret = snprintf(linebuf + lx, linebuflen - lx, |
| 163 | "%s%4.4x", j ? " " : "", | ||
| 164 | *(ptr2 + j)); | ||
| 165 | if (ret >= linebuflen - lx) | ||
| 166 | goto overflow1; | ||
| 167 | lx += ret; | ||
| 168 | } | ||
| 169 | } else { | ||
| 170 | for (j = 0; j < len; j++) { | ||
| 171 | if (linebuflen < lx + 3) | ||
| 172 | goto overflow2; | ||
| 157 | ch = ptr[j]; | 173 | ch = ptr[j]; |
| 158 | linebuf[lx++] = hex_asc_hi(ch); | 174 | linebuf[lx++] = hex_asc_hi(ch); |
| 159 | linebuf[lx++] = hex_asc_lo(ch); | 175 | linebuf[lx++] = hex_asc_lo(ch); |
| @@ -161,21 +177,28 @@ void hex_dump_to_buffer(const void *buf, size_t len, int rowsize, | |||
| 161 | } | 177 | } |
| 162 | if (j) | 178 | if (j) |
| 163 | lx--; | 179 | lx--; |
| 164 | |||
| 165 | ascii_column = 3 * rowsize + 2; | ||
| 166 | break; | ||
| 167 | } | 180 | } |
| 168 | if (!ascii) | 181 | if (!ascii) |
| 169 | goto nil; | 182 | goto nil; |
| 170 | 183 | ||
| 171 | while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) | 184 | while (lx < ascii_column) { |
| 185 | if (linebuflen < lx + 2) | ||
| 186 | goto overflow2; | ||
| 172 | linebuf[lx++] = ' '; | 187 | linebuf[lx++] = ' '; |
| 173 | for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) { | 188 | } |
| 189 | for (j = 0; j < len; j++) { | ||
| 190 | if (linebuflen < lx + 2) | ||
| 191 | goto overflow2; | ||
| 174 | ch = ptr[j]; | 192 | ch = ptr[j]; |
| 175 | linebuf[lx++] = (isascii(ch) && isprint(ch)) ? ch : '.'; | 193 | linebuf[lx++] = (isascii(ch) && isprint(ch)) ? ch : '.'; |
| 176 | } | 194 | } |
| 177 | nil: | 195 | nil: |
| 196 | linebuf[lx] = '\0'; | ||
| 197 | return lx; | ||
| 198 | overflow2: | ||
| 178 | linebuf[lx++] = '\0'; | 199 | linebuf[lx++] = '\0'; |
| 200 | overflow1: | ||
| 201 | return ascii ? ascii_column + len : (groupsize * 2 + 1) * ngroups - 1; | ||
| 179 | } | 202 | } |
| 180 | EXPORT_SYMBOL(hex_dump_to_buffer); | 203 | EXPORT_SYMBOL(hex_dump_to_buffer); |
| 181 | 204 | ||
| @@ -30,7 +30,6 @@ | |||
| 30 | #include <linux/idr.h> | 30 | #include <linux/idr.h> |
| 31 | #include <linux/spinlock.h> | 31 | #include <linux/spinlock.h> |
| 32 | #include <linux/percpu.h> | 32 | #include <linux/percpu.h> |
| 33 | #include <linux/hardirq.h> | ||
| 34 | 33 | ||
| 35 | #define MAX_IDR_SHIFT (sizeof(int) * 8 - 1) | 34 | #define MAX_IDR_SHIFT (sizeof(int) * 8 - 1) |
| 36 | #define MAX_IDR_BIT (1U << MAX_IDR_SHIFT) | 35 | #define MAX_IDR_BIT (1U << MAX_IDR_SHIFT) |
diff --git a/lib/interval_tree.c b/lib/interval_tree.c index f367f9ad544c..c85f6600a5f8 100644 --- a/lib/interval_tree.c +++ b/lib/interval_tree.c | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | #include <linux/init.h> | ||
| 2 | #include <linux/interval_tree.h> | 1 | #include <linux/interval_tree.h> |
| 3 | #include <linux/interval_tree_generic.h> | 2 | #include <linux/interval_tree_generic.h> |
| 4 | #include <linux/module.h> | 3 | #include <linux/compiler.h> |
| 4 | #include <linux/export.h> | ||
| 5 | 5 | ||
| 6 | #define START(node) ((node)->start) | 6 | #define START(node) ((node)->start) |
| 7 | #define LAST(node) ((node)->last) | 7 | #define LAST(node) ((node)->last) |
diff --git a/lib/iovec.c b/lib/iovec.c deleted file mode 100644 index 2d99cb4a5006..000000000000 --- a/lib/iovec.c +++ /dev/null | |||
| @@ -1,87 +0,0 @@ | |||
| 1 | #include <linux/uaccess.h> | ||
| 2 | #include <linux/export.h> | ||
| 3 | #include <linux/uio.h> | ||
| 4 | |||
| 5 | /* | ||
| 6 | * Copy iovec to kernel. Returns -EFAULT on error. | ||
| 7 | * | ||
| 8 | * Note: this modifies the original iovec. | ||
| 9 | */ | ||
| 10 | |||
| 11 | int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len) | ||
| 12 | { | ||
| 13 | while (len > 0) { | ||
| 14 | if (iov->iov_len) { | ||
| 15 | int copy = min_t(unsigned int, len, iov->iov_len); | ||
| 16 | if (copy_from_user(kdata, iov->iov_base, copy)) | ||
| 17 | return -EFAULT; | ||
| 18 | len -= copy; | ||
| 19 | kdata += copy; | ||
| 20 | iov->iov_base += copy; | ||
| 21 | iov->iov_len -= copy; | ||
| 22 | } | ||
| 23 | iov++; | ||
| 24 | } | ||
| 25 | |||
| 26 | return 0; | ||
| 27 | } | ||
| 28 | EXPORT_SYMBOL(memcpy_fromiovec); | ||
| 29 | |||
| 30 | /* | ||
| 31 | * Copy kernel to iovec. Returns -EFAULT on error. | ||
| 32 | */ | ||
| 33 | |||
| 34 | int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata, | ||
| 35 | int offset, int len) | ||
| 36 | { | ||
| 37 | int copy; | ||
| 38 | for (; len > 0; ++iov) { | ||
| 39 | /* Skip over the finished iovecs */ | ||
| 40 | if (unlikely(offset >= iov->iov_len)) { | ||
| 41 | offset -= iov->iov_len; | ||
| 42 | continue; | ||
| 43 | } | ||
| 44 | copy = min_t(unsigned int, iov->iov_len - offset, len); | ||
| 45 | if (copy_to_user(iov->iov_base + offset, kdata, copy)) | ||
| 46 | return -EFAULT; | ||
| 47 | offset = 0; | ||
| 48 | kdata += copy; | ||
| 49 | len -= copy; | ||
| 50 | } | ||
| 51 | |||
| 52 | return 0; | ||
| 53 | } | ||
| 54 | EXPORT_SYMBOL(memcpy_toiovecend); | ||
| 55 | |||
| 56 | /* | ||
| 57 | * Copy iovec to kernel. Returns -EFAULT on error. | ||
| 58 | */ | ||
| 59 | |||
| 60 | int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, | ||
| 61 | int offset, int len) | ||
| 62 | { | ||
| 63 | /* No data? Done! */ | ||
| 64 | if (len == 0) | ||
| 65 | return 0; | ||
| 66 | |||
| 67 | /* Skip over the finished iovecs */ | ||
| 68 | while (offset >= iov->iov_len) { | ||
| 69 | offset -= iov->iov_len; | ||
| 70 | iov++; | ||
| 71 | } | ||
| 72 | |||
| 73 | while (len > 0) { | ||
| 74 | u8 __user *base = iov->iov_base + offset; | ||
| 75 | int copy = min_t(unsigned int, len, iov->iov_len - offset); | ||
| 76 | |||
| 77 | offset = 0; | ||
| 78 | if (copy_from_user(kdata, base, copy)) | ||
| 79 | return -EFAULT; | ||
| 80 | len -= copy; | ||
| 81 | kdata += copy; | ||
| 82 | iov++; | ||
| 83 | } | ||
| 84 | |||
| 85 | return 0; | ||
| 86 | } | ||
| 87 | EXPORT_SYMBOL(memcpy_fromiovecend); | ||
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 9ebf9e20de53..f6c2c1e7779c 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c | |||
| @@ -20,7 +20,6 @@ | |||
| 20 | #include <linux/export.h> | 20 | #include <linux/export.h> |
| 21 | #include <linux/kmod.h> | 21 | #include <linux/kmod.h> |
| 22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
| 23 | #include <linux/user_namespace.h> | ||
| 24 | #include <linux/socket.h> | 23 | #include <linux/socket.h> |
| 25 | #include <linux/skbuff.h> | 24 | #include <linux/skbuff.h> |
| 26 | #include <linux/netlink.h> | 25 | #include <linux/netlink.h> |
| @@ -1,4 +1,4 @@ | |||
| 1 | #include <linux/kernel.h> | 1 | #include <linux/compiler.h> |
| 2 | #include <linux/gcd.h> | 2 | #include <linux/gcd.h> |
| 3 | #include <linux/export.h> | 3 | #include <linux/export.h> |
| 4 | #include <linux/lcm.h> | 4 | #include <linux/lcm.h> |
diff --git a/lib/list_sort.c b/lib/list_sort.c index 12bcba1c8612..b29015102698 100644 --- a/lib/list_sort.c +++ b/lib/list_sort.c | |||
| @@ -2,9 +2,11 @@ | |||
| 2 | #define pr_fmt(fmt) "list_sort_test: " fmt | 2 | #define pr_fmt(fmt) "list_sort_test: " fmt |
| 3 | 3 | ||
| 4 | #include <linux/kernel.h> | 4 | #include <linux/kernel.h> |
| 5 | #include <linux/module.h> | 5 | #include <linux/bug.h> |
| 6 | #include <linux/compiler.h> | ||
| 7 | #include <linux/export.h> | ||
| 8 | #include <linux/string.h> | ||
| 6 | #include <linux/list_sort.h> | 9 | #include <linux/list_sort.h> |
| 7 | #include <linux/slab.h> | ||
| 8 | #include <linux/list.h> | 10 | #include <linux/list.h> |
| 9 | 11 | ||
| 10 | #define MAX_LIST_LENGTH_BITS 20 | 12 | #define MAX_LIST_LENGTH_BITS 20 |
| @@ -146,6 +148,7 @@ EXPORT_SYMBOL(list_sort); | |||
| 146 | 148 | ||
| 147 | #ifdef CONFIG_TEST_LIST_SORT | 149 | #ifdef CONFIG_TEST_LIST_SORT |
| 148 | 150 | ||
| 151 | #include <linux/slab.h> | ||
| 149 | #include <linux/random.h> | 152 | #include <linux/random.h> |
| 150 | 153 | ||
| 151 | /* | 154 | /* |
diff --git a/lib/llist.c b/lib/llist.c index f76196d07409..0b0e9779d675 100644 --- a/lib/llist.c +++ b/lib/llist.c | |||
| @@ -24,7 +24,6 @@ | |||
| 24 | */ | 24 | */ |
| 25 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
| 26 | #include <linux/export.h> | 26 | #include <linux/export.h> |
| 27 | #include <linux/interrupt.h> | ||
| 28 | #include <linux/llist.h> | 27 | #include <linux/llist.h> |
| 29 | 28 | ||
| 30 | 29 | ||
| @@ -1,4 +1,4 @@ | |||
| 1 | #include <linux/kernel.h> | 1 | #include <linux/compiler.h> |
| 2 | #include <linux/export.h> | 2 | #include <linux/export.h> |
| 3 | #include <linux/cryptohash.h> | 3 | #include <linux/cryptohash.h> |
| 4 | 4 | ||
diff --git a/lib/mpi/mpi-cmp.c b/lib/mpi/mpi-cmp.c index 1871e7b61ca0..d25e9e96c310 100644 --- a/lib/mpi/mpi-cmp.c +++ b/lib/mpi/mpi-cmp.c | |||
| @@ -57,14 +57,12 @@ int mpi_cmp(MPI u, MPI v) | |||
| 57 | if (usize != vsize && !u->sign && !v->sign) | 57 | if (usize != vsize && !u->sign && !v->sign) |
| 58 | return usize - vsize; | 58 | return usize - vsize; |
| 59 | if (usize != vsize && u->sign && v->sign) | 59 | if (usize != vsize && u->sign && v->sign) |
| 60 | return vsize + usize; | 60 | return vsize - usize; |
| 61 | if (!usize) | 61 | if (!usize) |
| 62 | return 0; | 62 | return 0; |
| 63 | cmp = mpihelp_cmp(u->d, v->d, usize); | 63 | cmp = mpihelp_cmp(u->d, v->d, usize); |
| 64 | if (!cmp) | 64 | if (u->sign) |
| 65 | return 0; | 65 | return -cmp; |
| 66 | if ((cmp < 0 ? 1 : 0) == (u->sign ? 1 : 0)) | 66 | return cmp; |
| 67 | return 1; | ||
| 68 | return -1; | ||
| 69 | } | 67 | } |
| 70 | EXPORT_SYMBOL_GPL(mpi_cmp); | 68 | EXPORT_SYMBOL_GPL(mpi_cmp); |
diff --git a/lib/mpi/mpi-internal.h b/lib/mpi/mpi-internal.h index 60cf765628e9..c65dd1bff45a 100644 --- a/lib/mpi/mpi-internal.h +++ b/lib/mpi/mpi-internal.h | |||
| @@ -84,7 +84,7 @@ static inline int RESIZE_IF_NEEDED(MPI a, unsigned b) | |||
| 84 | do { \ | 84 | do { \ |
| 85 | mpi_size_t _i; \ | 85 | mpi_size_t _i; \ |
| 86 | for (_i = 0; _i < (n); _i++) \ | 86 | for (_i = 0; _i < (n); _i++) \ |
| 87 | (d)[_i] = (d)[_i]; \ | 87 | (d)[_i] = (s)[_i]; \ |
| 88 | } while (0) | 88 | } while (0) |
| 89 | 89 | ||
| 90 | #define MPN_COPY_DECR(d, s, n) \ | 90 | #define MPN_COPY_DECR(d, s, n) \ |
diff --git a/lib/nlattr.c b/lib/nlattr.c index 9c3e85ff0a6c..76a1b59523ab 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c | |||
| @@ -9,7 +9,6 @@ | |||
| 9 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
| 10 | #include <linux/errno.h> | 10 | #include <linux/errno.h> |
| 11 | #include <linux/jiffies.h> | 11 | #include <linux/jiffies.h> |
| 12 | #include <linux/netdevice.h> | ||
| 13 | #include <linux/skbuff.h> | 12 | #include <linux/skbuff.h> |
| 14 | #include <linux/string.h> | 13 | #include <linux/string.h> |
| 15 | #include <linux/types.h> | 14 | #include <linux/types.h> |
diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c index 93d145e5539c..f75715131f20 100644 --- a/lib/percpu_ida.c +++ b/lib/percpu_ida.c | |||
| @@ -19,13 +19,10 @@ | |||
| 19 | #include <linux/bug.h> | 19 | #include <linux/bug.h> |
| 20 | #include <linux/err.h> | 20 | #include <linux/err.h> |
| 21 | #include <linux/export.h> | 21 | #include <linux/export.h> |
| 22 | #include <linux/hardirq.h> | ||
| 23 | #include <linux/idr.h> | ||
| 24 | #include <linux/init.h> | 22 | #include <linux/init.h> |
| 25 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
| 26 | #include <linux/percpu.h> | 24 | #include <linux/percpu.h> |
| 27 | #include <linux/sched.h> | 25 | #include <linux/sched.h> |
| 28 | #include <linux/slab.h> | ||
| 29 | #include <linux/string.h> | 26 | #include <linux/string.h> |
| 30 | #include <linux/spinlock.h> | 27 | #include <linux/spinlock.h> |
| 31 | #include <linux/percpu_ida.h> | 28 | #include <linux/percpu_ida.h> |
diff --git a/lib/plist.c b/lib/plist.c index d408e774b746..3a30c53db061 100644 --- a/lib/plist.c +++ b/lib/plist.c | |||
| @@ -25,7 +25,6 @@ | |||
| 25 | 25 | ||
| 26 | #include <linux/bug.h> | 26 | #include <linux/bug.h> |
| 27 | #include <linux/plist.h> | 27 | #include <linux/plist.h> |
| 28 | #include <linux/spinlock.h> | ||
| 29 | 28 | ||
| 30 | #ifdef CONFIG_DEBUG_PI_LIST | 29 | #ifdef CONFIG_DEBUG_PI_LIST |
| 31 | 30 | ||
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 3291a8e37490..3d2aa27b845b 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
| @@ -33,7 +33,7 @@ | |||
| 33 | #include <linux/string.h> | 33 | #include <linux/string.h> |
| 34 | #include <linux/bitops.h> | 34 | #include <linux/bitops.h> |
| 35 | #include <linux/rcupdate.h> | 35 | #include <linux/rcupdate.h> |
| 36 | #include <linux/hardirq.h> /* in_interrupt() */ | 36 | #include <linux/preempt_mask.h> /* in_interrupt() */ |
| 37 | 37 | ||
| 38 | 38 | ||
| 39 | /* | 39 | /* |
diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c index 7d0e5cd7b570..dbef2314901e 100644 --- a/lib/raid6/algos.c +++ b/lib/raid6/algos.c | |||
| @@ -89,10 +89,10 @@ void (*raid6_datap_recov)(int, size_t, int, void **); | |||
| 89 | EXPORT_SYMBOL_GPL(raid6_datap_recov); | 89 | EXPORT_SYMBOL_GPL(raid6_datap_recov); |
| 90 | 90 | ||
| 91 | const struct raid6_recov_calls *const raid6_recov_algos[] = { | 91 | const struct raid6_recov_calls *const raid6_recov_algos[] = { |
| 92 | #if (defined(__i386__) || defined(__x86_64__)) && !defined(__arch_um__) | ||
| 93 | #ifdef CONFIG_AS_AVX2 | 92 | #ifdef CONFIG_AS_AVX2 |
| 94 | &raid6_recov_avx2, | 93 | &raid6_recov_avx2, |
| 95 | #endif | 94 | #endif |
| 95 | #ifdef CONFIG_AS_SSSE3 | ||
| 96 | &raid6_recov_ssse3, | 96 | &raid6_recov_ssse3, |
| 97 | #endif | 97 | #endif |
| 98 | &raid6_recov_intx1, | 98 | &raid6_recov_intx1, |
diff --git a/lib/raid6/recov_avx2.c b/lib/raid6/recov_avx2.c index e1eea433a493..53fe3d7bdfb3 100644 --- a/lib/raid6/recov_avx2.c +++ b/lib/raid6/recov_avx2.c | |||
| @@ -8,7 +8,7 @@ | |||
| 8 | * of the License. | 8 | * of the License. |
| 9 | */ | 9 | */ |
| 10 | 10 | ||
| 11 | #if CONFIG_AS_AVX2 | 11 | #ifdef CONFIG_AS_AVX2 |
| 12 | 12 | ||
| 13 | #include <linux/raid/pq.h> | 13 | #include <linux/raid/pq.h> |
| 14 | #include "x86.h" | 14 | #include "x86.h" |
diff --git a/lib/raid6/recov_ssse3.c b/lib/raid6/recov_ssse3.c index a9168328f03b..cda33e56a5e3 100644 --- a/lib/raid6/recov_ssse3.c +++ b/lib/raid6/recov_ssse3.c | |||
| @@ -7,6 +7,8 @@ | |||
| 7 | * of the License. | 7 | * of the License. |
| 8 | */ | 8 | */ |
| 9 | 9 | ||
| 10 | #ifdef CONFIG_AS_SSSE3 | ||
| 11 | |||
| 10 | #include <linux/raid/pq.h> | 12 | #include <linux/raid/pq.h> |
| 11 | #include "x86.h" | 13 | #include "x86.h" |
| 12 | 14 | ||
| @@ -330,3 +332,7 @@ const struct raid6_recov_calls raid6_recov_ssse3 = { | |||
| 330 | #endif | 332 | #endif |
| 331 | .priority = 1, | 333 | .priority = 1, |
| 332 | }; | 334 | }; |
| 335 | |||
| 336 | #else | ||
| 337 | #warning "your version of binutils lacks SSSE3 support" | ||
| 338 | #endif | ||
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 6c3c723e902b..9cc4c4a90d00 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Resizable, Scalable, Concurrent Hash Table | 2 | * Resizable, Scalable, Concurrent Hash Table |
| 3 | * | 3 | * |
| 4 | * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch> | 4 | * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch> |
| 5 | * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> | 5 | * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> |
| 6 | * | 6 | * |
| 7 | * Based on the following paper: | 7 | * Based on the following paper: |
| @@ -23,94 +23,203 @@ | |||
| 23 | #include <linux/jhash.h> | 23 | #include <linux/jhash.h> |
| 24 | #include <linux/random.h> | 24 | #include <linux/random.h> |
| 25 | #include <linux/rhashtable.h> | 25 | #include <linux/rhashtable.h> |
| 26 | #include <linux/err.h> | ||
| 26 | 27 | ||
| 27 | #define HASH_DEFAULT_SIZE 64UL | 28 | #define HASH_DEFAULT_SIZE 64UL |
| 28 | #define HASH_MIN_SIZE 4UL | 29 | #define HASH_MIN_SIZE 4UL |
| 30 | #define BUCKET_LOCKS_PER_CPU 128UL | ||
| 29 | 31 | ||
| 30 | #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) | 32 | /* Base bits plus 1 bit for nulls marker */ |
| 33 | #define HASH_RESERVED_SPACE (RHT_BASE_BITS + 1) | ||
| 31 | 34 | ||
| 32 | #ifdef CONFIG_PROVE_LOCKING | 35 | enum { |
| 33 | int lockdep_rht_mutex_is_held(const struct rhashtable *ht) | 36 | RHT_LOCK_NORMAL, |
| 37 | RHT_LOCK_NESTED, | ||
| 38 | }; | ||
| 39 | |||
| 40 | /* The bucket lock is selected based on the hash and protects mutations | ||
| 41 | * on a group of hash buckets. | ||
| 42 | * | ||
| 43 | * A maximum of tbl->size/2 bucket locks is allocated. This ensures that | ||
| 44 | * a single lock always covers both buckets which may both contains | ||
| 45 | * entries which link to the same bucket of the old table during resizing. | ||
| 46 | * This allows to simplify the locking as locking the bucket in both | ||
| 47 | * tables during resize always guarantee protection. | ||
| 48 | * | ||
| 49 | * IMPORTANT: When holding the bucket lock of both the old and new table | ||
| 50 | * during expansions and shrinking, the old bucket lock must always be | ||
| 51 | * acquired first. | ||
| 52 | */ | ||
| 53 | static spinlock_t *bucket_lock(const struct bucket_table *tbl, u32 hash) | ||
| 34 | { | 54 | { |
| 35 | return ht->p.mutex_is_held(ht->p.parent); | 55 | return &tbl->locks[hash & tbl->locks_mask]; |
| 36 | } | 56 | } |
| 37 | EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); | ||
| 38 | #endif | ||
| 39 | 57 | ||
| 40 | static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he) | 58 | static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he) |
| 41 | { | 59 | { |
| 42 | return (void *) he - ht->p.head_offset; | 60 | return (void *) he - ht->p.head_offset; |
| 43 | } | 61 | } |
| 44 | 62 | ||
| 45 | static u32 __hashfn(const struct rhashtable *ht, const void *key, | 63 | static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash) |
| 46 | u32 len, u32 hsize) | 64 | { |
| 65 | return hash & (tbl->size - 1); | ||
| 66 | } | ||
| 67 | |||
| 68 | static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr) | ||
| 47 | { | 69 | { |
| 48 | u32 h; | 70 | u32 hash; |
| 49 | 71 | ||
| 50 | h = ht->p.hashfn(key, len, ht->p.hash_rnd); | 72 | if (unlikely(!ht->p.key_len)) |
| 73 | hash = ht->p.obj_hashfn(ptr, ht->p.hash_rnd); | ||
| 74 | else | ||
| 75 | hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len, | ||
| 76 | ht->p.hash_rnd); | ||
| 51 | 77 | ||
| 52 | return h & (hsize - 1); | 78 | return hash >> HASH_RESERVED_SPACE; |
| 53 | } | 79 | } |
| 54 | 80 | ||
| 55 | /** | 81 | static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len) |
| 56 | * rhashtable_hashfn - compute hash for key of given length | ||
| 57 | * @ht: hash table to compute for | ||
| 58 | * @key: pointer to key | ||
| 59 | * @len: length of key | ||
| 60 | * | ||
| 61 | * Computes the hash value using the hash function provided in the 'hashfn' | ||
| 62 | * of struct rhashtable_params. The returned value is guaranteed to be | ||
| 63 | * smaller than the number of buckets in the hash table. | ||
| 64 | */ | ||
| 65 | u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len) | ||
| 66 | { | 82 | { |
| 67 | struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); | 83 | return ht->p.hashfn(key, len, ht->p.hash_rnd) >> HASH_RESERVED_SPACE; |
| 84 | } | ||
| 68 | 85 | ||
| 69 | return __hashfn(ht, key, len, tbl->size); | 86 | static u32 head_hashfn(const struct rhashtable *ht, |
| 87 | const struct bucket_table *tbl, | ||
| 88 | const struct rhash_head *he) | ||
| 89 | { | ||
| 90 | return rht_bucket_index(tbl, obj_raw_hashfn(ht, rht_obj(ht, he))); | ||
| 70 | } | 91 | } |
| 71 | EXPORT_SYMBOL_GPL(rhashtable_hashfn); | ||
| 72 | 92 | ||
| 73 | static u32 obj_hashfn(const struct rhashtable *ht, const void *ptr, u32 hsize) | 93 | #ifdef CONFIG_PROVE_LOCKING |
| 94 | static void debug_dump_buckets(const struct rhashtable *ht, | ||
| 95 | const struct bucket_table *tbl) | ||
| 74 | { | 96 | { |
| 75 | if (unlikely(!ht->p.key_len)) { | 97 | struct rhash_head *he; |
| 76 | u32 h; | 98 | unsigned int i, hash; |
| 77 | 99 | ||
| 78 | h = ht->p.obj_hashfn(ptr, ht->p.hash_rnd); | 100 | for (i = 0; i < tbl->size; i++) { |
| 101 | pr_warn(" [Bucket %d] ", i); | ||
| 102 | rht_for_each_rcu(he, tbl, i) { | ||
| 103 | hash = head_hashfn(ht, tbl, he); | ||
| 104 | pr_cont("[hash = %#x, lock = %p] ", | ||
| 105 | hash, bucket_lock(tbl, hash)); | ||
| 106 | } | ||
| 107 | pr_cont("\n"); | ||
| 108 | } | ||
| 109 | |||
| 110 | } | ||
| 111 | |||
| 112 | static void debug_dump_table(struct rhashtable *ht, | ||
| 113 | const struct bucket_table *tbl, | ||
| 114 | unsigned int hash) | ||
| 115 | { | ||
| 116 | struct bucket_table *old_tbl, *future_tbl; | ||
| 117 | |||
| 118 | pr_emerg("BUG: lock for hash %#x in table %p not held\n", | ||
| 119 | hash, tbl); | ||
| 79 | 120 | ||
| 80 | return h & (hsize - 1); | 121 | rcu_read_lock(); |
| 122 | future_tbl = rht_dereference_rcu(ht->future_tbl, ht); | ||
| 123 | old_tbl = rht_dereference_rcu(ht->tbl, ht); | ||
| 124 | if (future_tbl != old_tbl) { | ||
| 125 | pr_warn("Future table %p (size: %zd)\n", | ||
| 126 | future_tbl, future_tbl->size); | ||
| 127 | debug_dump_buckets(ht, future_tbl); | ||
| 81 | } | 128 | } |
| 82 | 129 | ||
| 83 | return __hashfn(ht, ptr + ht->p.key_offset, ht->p.key_len, hsize); | 130 | pr_warn("Table %p (size: %zd)\n", old_tbl, old_tbl->size); |
| 131 | debug_dump_buckets(ht, old_tbl); | ||
| 132 | |||
| 133 | rcu_read_unlock(); | ||
| 84 | } | 134 | } |
| 85 | 135 | ||
| 86 | /** | 136 | #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) |
| 87 | * rhashtable_obj_hashfn - compute hash for hashed object | 137 | #define ASSERT_BUCKET_LOCK(HT, TBL, HASH) \ |
| 88 | * @ht: hash table to compute for | 138 | do { \ |
| 89 | * @ptr: pointer to hashed object | 139 | if (unlikely(!lockdep_rht_bucket_is_held(TBL, HASH))) { \ |
| 90 | * | 140 | debug_dump_table(HT, TBL, HASH); \ |
| 91 | * Computes the hash value using the hash function `hashfn` respectively | 141 | BUG(); \ |
| 92 | * 'obj_hashfn' depending on whether the hash table is set up to work with | 142 | } \ |
| 93 | * a fixed length key. The returned value is guaranteed to be smaller than | 143 | } while (0) |
| 94 | * the number of buckets in the hash table. | 144 | |
| 95 | */ | 145 | int lockdep_rht_mutex_is_held(struct rhashtable *ht) |
| 96 | u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr) | ||
| 97 | { | 146 | { |
| 98 | struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); | 147 | return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1; |
| 148 | } | ||
| 149 | EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); | ||
| 99 | 150 | ||
| 100 | return obj_hashfn(ht, ptr, tbl->size); | 151 | int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) |
| 152 | { | ||
| 153 | spinlock_t *lock = bucket_lock(tbl, hash); | ||
| 154 | |||
| 155 | return (debug_locks) ? lockdep_is_held(lock) : 1; | ||
| 101 | } | 156 | } |
| 102 | EXPORT_SYMBOL_GPL(rhashtable_obj_hashfn); | 157 | EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); |
| 158 | #else | ||
| 159 | #define ASSERT_RHT_MUTEX(HT) | ||
| 160 | #define ASSERT_BUCKET_LOCK(HT, TBL, HASH) | ||
| 161 | #endif | ||
| 103 | 162 | ||
| 104 | static u32 head_hashfn(const struct rhashtable *ht, | 163 | |
| 105 | const struct rhash_head *he, u32 hsize) | 164 | static struct rhash_head __rcu **bucket_tail(struct bucket_table *tbl, u32 n) |
| 106 | { | 165 | { |
| 107 | return obj_hashfn(ht, rht_obj(ht, he), hsize); | 166 | struct rhash_head __rcu **pprev; |
| 167 | |||
| 168 | for (pprev = &tbl->buckets[n]; | ||
| 169 | !rht_is_a_nulls(rht_dereference_bucket(*pprev, tbl, n)); | ||
| 170 | pprev = &rht_dereference_bucket(*pprev, tbl, n)->next) | ||
| 171 | ; | ||
| 172 | |||
| 173 | return pprev; | ||
| 108 | } | 174 | } |
| 109 | 175 | ||
| 110 | static struct bucket_table *bucket_table_alloc(size_t nbuckets) | 176 | static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl) |
| 177 | { | ||
| 178 | unsigned int i, size; | ||
| 179 | #if defined(CONFIG_PROVE_LOCKING) | ||
| 180 | unsigned int nr_pcpus = 2; | ||
| 181 | #else | ||
| 182 | unsigned int nr_pcpus = num_possible_cpus(); | ||
| 183 | #endif | ||
| 184 | |||
| 185 | nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL); | ||
| 186 | size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul); | ||
| 187 | |||
| 188 | /* Never allocate more than 0.5 locks per bucket */ | ||
| 189 | size = min_t(unsigned int, size, tbl->size >> 1); | ||
| 190 | |||
| 191 | if (sizeof(spinlock_t) != 0) { | ||
| 192 | #ifdef CONFIG_NUMA | ||
| 193 | if (size * sizeof(spinlock_t) > PAGE_SIZE) | ||
| 194 | tbl->locks = vmalloc(size * sizeof(spinlock_t)); | ||
| 195 | else | ||
| 196 | #endif | ||
| 197 | tbl->locks = kmalloc_array(size, sizeof(spinlock_t), | ||
| 198 | GFP_KERNEL); | ||
| 199 | if (!tbl->locks) | ||
| 200 | return -ENOMEM; | ||
| 201 | for (i = 0; i < size; i++) | ||
| 202 | spin_lock_init(&tbl->locks[i]); | ||
| 203 | } | ||
| 204 | tbl->locks_mask = size - 1; | ||
| 205 | |||
| 206 | return 0; | ||
| 207 | } | ||
| 208 | |||
| 209 | static void bucket_table_free(const struct bucket_table *tbl) | ||
| 210 | { | ||
| 211 | if (tbl) | ||
| 212 | kvfree(tbl->locks); | ||
| 213 | |||
| 214 | kvfree(tbl); | ||
| 215 | } | ||
| 216 | |||
| 217 | static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, | ||
| 218 | size_t nbuckets) | ||
| 111 | { | 219 | { |
| 112 | struct bucket_table *tbl; | 220 | struct bucket_table *tbl; |
| 113 | size_t size; | 221 | size_t size; |
| 222 | int i; | ||
| 114 | 223 | ||
| 115 | size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); | 224 | size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); |
| 116 | tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); | 225 | tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); |
| @@ -122,12 +231,15 @@ static struct bucket_table *bucket_table_alloc(size_t nbuckets) | |||
| 122 | 231 | ||
| 123 | tbl->size = nbuckets; | 232 | tbl->size = nbuckets; |
| 124 | 233 | ||
| 125 | return tbl; | 234 | if (alloc_bucket_locks(ht, tbl) < 0) { |
| 126 | } | 235 | bucket_table_free(tbl); |
| 236 | return NULL; | ||
| 237 | } | ||
| 127 | 238 | ||
| 128 | static void bucket_table_free(const struct bucket_table *tbl) | 239 | for (i = 0; i < nbuckets; i++) |
| 129 | { | 240 | INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i); |
| 130 | kvfree(tbl); | 241 | |
| 242 | return tbl; | ||
| 131 | } | 243 | } |
| 132 | 244 | ||
| 133 | /** | 245 | /** |
| @@ -138,7 +250,8 @@ static void bucket_table_free(const struct bucket_table *tbl) | |||
| 138 | bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size) | 250 | bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size) |
| 139 | { | 251 | { |
| 140 | /* Expand table when exceeding 75% load */ | 252 | /* Expand table when exceeding 75% load */ |
| 141 | return ht->nelems > (new_size / 4 * 3); | 253 | return atomic_read(&ht->nelems) > (new_size / 4 * 3) && |
| 254 | (ht->p.max_shift && atomic_read(&ht->shift) < ht->p.max_shift); | ||
| 142 | } | 255 | } |
| 143 | EXPORT_SYMBOL_GPL(rht_grow_above_75); | 256 | EXPORT_SYMBOL_GPL(rht_grow_above_75); |
| 144 | 257 | ||
| @@ -150,41 +263,75 @@ EXPORT_SYMBOL_GPL(rht_grow_above_75); | |||
| 150 | bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size) | 263 | bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size) |
| 151 | { | 264 | { |
| 152 | /* Shrink table beneath 30% load */ | 265 | /* Shrink table beneath 30% load */ |
| 153 | return ht->nelems < (new_size * 3 / 10); | 266 | return atomic_read(&ht->nelems) < (new_size * 3 / 10) && |
| 267 | (atomic_read(&ht->shift) > ht->p.min_shift); | ||
| 154 | } | 268 | } |
| 155 | EXPORT_SYMBOL_GPL(rht_shrink_below_30); | 269 | EXPORT_SYMBOL_GPL(rht_shrink_below_30); |
| 156 | 270 | ||
| 157 | static void hashtable_chain_unzip(const struct rhashtable *ht, | 271 | static void lock_buckets(struct bucket_table *new_tbl, |
| 272 | struct bucket_table *old_tbl, unsigned int hash) | ||
| 273 | __acquires(old_bucket_lock) | ||
| 274 | { | ||
| 275 | spin_lock_bh(bucket_lock(old_tbl, hash)); | ||
| 276 | if (new_tbl != old_tbl) | ||
| 277 | spin_lock_bh_nested(bucket_lock(new_tbl, hash), | ||
| 278 | RHT_LOCK_NESTED); | ||
| 279 | } | ||
| 280 | |||
| 281 | static void unlock_buckets(struct bucket_table *new_tbl, | ||
| 282 | struct bucket_table *old_tbl, unsigned int hash) | ||
| 283 | __releases(old_bucket_lock) | ||
| 284 | { | ||
| 285 | if (new_tbl != old_tbl) | ||
| 286 | spin_unlock_bh(bucket_lock(new_tbl, hash)); | ||
| 287 | spin_unlock_bh(bucket_lock(old_tbl, hash)); | ||
| 288 | } | ||
| 289 | |||
| 290 | /** | ||
| 291 | * Unlink entries on bucket which hash to different bucket. | ||
| 292 | * | ||
| 293 | * Returns true if no more work needs to be performed on the bucket. | ||
| 294 | */ | ||
| 295 | static bool hashtable_chain_unzip(struct rhashtable *ht, | ||
| 158 | const struct bucket_table *new_tbl, | 296 | const struct bucket_table *new_tbl, |
| 159 | struct bucket_table *old_tbl, size_t n) | 297 | struct bucket_table *old_tbl, |
| 298 | size_t old_hash) | ||
| 160 | { | 299 | { |
| 161 | struct rhash_head *he, *p, *next; | 300 | struct rhash_head *he, *p, *next; |
| 162 | unsigned int h; | 301 | unsigned int new_hash, new_hash2; |
| 302 | |||
| 303 | ASSERT_BUCKET_LOCK(ht, old_tbl, old_hash); | ||
| 163 | 304 | ||
| 164 | /* Old bucket empty, no work needed. */ | 305 | /* Old bucket empty, no work needed. */ |
| 165 | p = rht_dereference(old_tbl->buckets[n], ht); | 306 | p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl, |
| 166 | if (!p) | 307 | old_hash); |
| 167 | return; | 308 | if (rht_is_a_nulls(p)) |
| 309 | return false; | ||
| 310 | |||
| 311 | new_hash = head_hashfn(ht, new_tbl, p); | ||
| 312 | ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash); | ||
| 168 | 313 | ||
| 169 | /* Advance the old bucket pointer one or more times until it | 314 | /* Advance the old bucket pointer one or more times until it |
| 170 | * reaches a node that doesn't hash to the same bucket as the | 315 | * reaches a node that doesn't hash to the same bucket as the |
| 171 | * previous node p. Call the previous node p; | 316 | * previous node p. Call the previous node p; |
| 172 | */ | 317 | */ |
| 173 | h = head_hashfn(ht, p, new_tbl->size); | 318 | rht_for_each_continue(he, p->next, old_tbl, old_hash) { |
| 174 | rht_for_each(he, p->next, ht) { | 319 | new_hash2 = head_hashfn(ht, new_tbl, he); |
| 175 | if (head_hashfn(ht, he, new_tbl->size) != h) | 320 | ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash2); |
| 321 | |||
| 322 | if (new_hash != new_hash2) | ||
| 176 | break; | 323 | break; |
| 177 | p = he; | 324 | p = he; |
| 178 | } | 325 | } |
| 179 | RCU_INIT_POINTER(old_tbl->buckets[n], p->next); | 326 | rcu_assign_pointer(old_tbl->buckets[old_hash], p->next); |
| 180 | 327 | ||
| 181 | /* Find the subsequent node which does hash to the same | 328 | /* Find the subsequent node which does hash to the same |
| 182 | * bucket as node P, or NULL if no such node exists. | 329 | * bucket as node P, or NULL if no such node exists. |
| 183 | */ | 330 | */ |
| 184 | next = NULL; | 331 | INIT_RHT_NULLS_HEAD(next, ht, old_hash); |
| 185 | if (he) { | 332 | if (!rht_is_a_nulls(he)) { |
| 186 | rht_for_each(he, he->next, ht) { | 333 | rht_for_each_continue(he, he->next, old_tbl, old_hash) { |
| 187 | if (head_hashfn(ht, he, new_tbl->size) == h) { | 334 | if (head_hashfn(ht, new_tbl, he) == new_hash) { |
| 188 | next = he; | 335 | next = he; |
| 189 | break; | 336 | break; |
| 190 | } | 337 | } |
| @@ -194,7 +341,20 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, | |||
| 194 | /* Set p's next pointer to that subsequent node pointer, | 341 | /* Set p's next pointer to that subsequent node pointer, |
| 195 | * bypassing the nodes which do not hash to p's bucket | 342 | * bypassing the nodes which do not hash to p's bucket |
| 196 | */ | 343 | */ |
| 197 | RCU_INIT_POINTER(p->next, next); | 344 | rcu_assign_pointer(p->next, next); |
| 345 | |||
| 346 | p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl, | ||
| 347 | old_hash); | ||
| 348 | |||
| 349 | return !rht_is_a_nulls(p); | ||
| 350 | } | ||
| 351 | |||
| 352 | static void link_old_to_new(struct rhashtable *ht, struct bucket_table *new_tbl, | ||
| 353 | unsigned int new_hash, struct rhash_head *entry) | ||
| 354 | { | ||
| 355 | ASSERT_BUCKET_LOCK(ht, new_tbl, new_hash); | ||
| 356 | |||
| 357 | rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), entry); | ||
| 198 | } | 358 | } |
| 199 | 359 | ||
| 200 | /** | 360 | /** |
| @@ -207,53 +367,57 @@ static void hashtable_chain_unzip(const struct rhashtable *ht, | |||
| 207 | * This function may only be called in a context where it is safe to call | 367 | * This function may only be called in a context where it is safe to call |
| 208 | * synchronize_rcu(), e.g. not within a rcu_read_lock() section. | 368 | * synchronize_rcu(), e.g. not within a rcu_read_lock() section. |
| 209 | * | 369 | * |
| 210 | * The caller must ensure that no concurrent table mutations take place. | 370 | * The caller must ensure that no concurrent resizing occurs by holding |
| 211 | * It is however valid to have concurrent lookups if they are RCU protected. | 371 | * ht->mutex. |
| 372 | * | ||
| 373 | * It is valid to have concurrent insertions and deletions protected by per | ||
| 374 | * bucket locks or concurrent RCU protected lookups and traversals. | ||
| 212 | */ | 375 | */ |
| 213 | int rhashtable_expand(struct rhashtable *ht) | 376 | int rhashtable_expand(struct rhashtable *ht) |
| 214 | { | 377 | { |
| 215 | struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); | 378 | struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); |
| 216 | struct rhash_head *he; | 379 | struct rhash_head *he; |
| 217 | unsigned int i, h; | 380 | unsigned int new_hash, old_hash; |
| 218 | bool complete; | 381 | bool complete = false; |
| 219 | 382 | ||
| 220 | ASSERT_RHT_MUTEX(ht); | 383 | ASSERT_RHT_MUTEX(ht); |
| 221 | 384 | ||
| 222 | if (ht->p.max_shift && ht->shift >= ht->p.max_shift) | 385 | new_tbl = bucket_table_alloc(ht, old_tbl->size * 2); |
| 223 | return 0; | ||
| 224 | |||
| 225 | new_tbl = bucket_table_alloc(old_tbl->size * 2); | ||
| 226 | if (new_tbl == NULL) | 386 | if (new_tbl == NULL) |
| 227 | return -ENOMEM; | 387 | return -ENOMEM; |
| 228 | 388 | ||
| 229 | ht->shift++; | 389 | atomic_inc(&ht->shift); |
| 390 | |||
| 391 | /* Make insertions go into the new, empty table right away. Deletions | ||
| 392 | * and lookups will be attempted in both tables until we synchronize. | ||
| 393 | * The synchronize_rcu() guarantees for the new table to be picked up | ||
| 394 | * so no new additions go into the old table while we relink. | ||
| 395 | */ | ||
| 396 | rcu_assign_pointer(ht->future_tbl, new_tbl); | ||
| 397 | synchronize_rcu(); | ||
| 230 | 398 | ||
| 231 | /* For each new bucket, search the corresponding old bucket | 399 | /* For each new bucket, search the corresponding old bucket for the |
| 232 | * for the first entry that hashes to the new bucket, and | 400 | * first entry that hashes to the new bucket, and link the end of |
| 233 | * link the new bucket to that entry. Since all the entries | 401 | * newly formed bucket chain (containing entries added to future |
| 234 | * which will end up in the new bucket appear in the same | 402 | * table) to that entry. Since all the entries which will end up in |
| 235 | * old bucket, this constructs an entirely valid new hash | 403 | * the new bucket appear in the same old bucket, this constructs an |
| 236 | * table, but with multiple buckets "zipped" together into a | 404 | * entirely valid new hash table, but with multiple buckets |
| 237 | * single imprecise chain. | 405 | * "zipped" together into a single imprecise chain. |
| 238 | */ | 406 | */ |
| 239 | for (i = 0; i < new_tbl->size; i++) { | 407 | for (new_hash = 0; new_hash < new_tbl->size; new_hash++) { |
| 240 | h = i & (old_tbl->size - 1); | 408 | old_hash = rht_bucket_index(old_tbl, new_hash); |
| 241 | rht_for_each(he, old_tbl->buckets[h], ht) { | 409 | lock_buckets(new_tbl, old_tbl, new_hash); |
| 242 | if (head_hashfn(ht, he, new_tbl->size) == i) { | 410 | rht_for_each(he, old_tbl, old_hash) { |
| 243 | RCU_INIT_POINTER(new_tbl->buckets[i], he); | 411 | if (head_hashfn(ht, new_tbl, he) == new_hash) { |
| 412 | link_old_to_new(ht, new_tbl, new_hash, he); | ||
| 244 | break; | 413 | break; |
| 245 | } | 414 | } |
| 246 | } | 415 | } |
| 416 | unlock_buckets(new_tbl, old_tbl, new_hash); | ||
| 247 | } | 417 | } |
| 248 | 418 | ||
| 249 | /* Publish the new table pointer. Lookups may now traverse | ||
| 250 | * the new table, but they will not benefit from any | ||
| 251 | * additional efficiency until later steps unzip the buckets. | ||
| 252 | */ | ||
| 253 | rcu_assign_pointer(ht->tbl, new_tbl); | ||
| 254 | |||
| 255 | /* Unzip interleaved hash chains */ | 419 | /* Unzip interleaved hash chains */ |
| 256 | do { | 420 | while (!complete && !ht->being_destroyed) { |
| 257 | /* Wait for readers. All new readers will see the new | 421 | /* Wait for readers. All new readers will see the new |
| 258 | * table, and thus no references to the old table will | 422 | * table, and thus no references to the old table will |
| 259 | * remain. | 423 | * remain. |
| @@ -265,12 +429,19 @@ int rhashtable_expand(struct rhashtable *ht) | |||
| 265 | * table): ... | 429 | * table): ... |
| 266 | */ | 430 | */ |
| 267 | complete = true; | 431 | complete = true; |
| 268 | for (i = 0; i < old_tbl->size; i++) { | 432 | for (old_hash = 0; old_hash < old_tbl->size; old_hash++) { |
| 269 | hashtable_chain_unzip(ht, new_tbl, old_tbl, i); | 433 | lock_buckets(new_tbl, old_tbl, old_hash); |
| 270 | if (old_tbl->buckets[i] != NULL) | 434 | |
| 435 | if (hashtable_chain_unzip(ht, new_tbl, old_tbl, | ||
| 436 | old_hash)) | ||
| 271 | complete = false; | 437 | complete = false; |
| 438 | |||
| 439 | unlock_buckets(new_tbl, old_tbl, old_hash); | ||
| 272 | } | 440 | } |
| 273 | } while (!complete); | 441 | } |
| 442 | |||
| 443 | rcu_assign_pointer(ht->tbl, new_tbl); | ||
| 444 | synchronize_rcu(); | ||
| 274 | 445 | ||
| 275 | bucket_table_free(old_tbl); | 446 | bucket_table_free(old_tbl); |
| 276 | return 0; | 447 | return 0; |
| @@ -284,45 +455,51 @@ EXPORT_SYMBOL_GPL(rhashtable_expand); | |||
| 284 | * This function may only be called in a context where it is safe to call | 455 | * This function may only be called in a context where it is safe to call |
| 285 | * synchronize_rcu(), e.g. not within a rcu_read_lock() section. | 456 | * synchronize_rcu(), e.g. not within a rcu_read_lock() section. |
| 286 | * | 457 | * |
| 458 | * The caller must ensure that no concurrent resizing occurs by holding | ||
| 459 | * ht->mutex. | ||
| 460 | * | ||
| 287 | * The caller must ensure that no concurrent table mutations take place. | 461 | * The caller must ensure that no concurrent table mutations take place. |
| 288 | * It is however valid to have concurrent lookups if they are RCU protected. | 462 | * It is however valid to have concurrent lookups if they are RCU protected. |
| 463 | * | ||
| 464 | * It is valid to have concurrent insertions and deletions protected by per | ||
| 465 | * bucket locks or concurrent RCU protected lookups and traversals. | ||
| 289 | */ | 466 | */ |
| 290 | int rhashtable_shrink(struct rhashtable *ht) | 467 | int rhashtable_shrink(struct rhashtable *ht) |
| 291 | { | 468 | { |
| 292 | struct bucket_table *ntbl, *tbl = rht_dereference(ht->tbl, ht); | 469 | struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht); |
| 293 | struct rhash_head __rcu **pprev; | 470 | unsigned int new_hash; |
| 294 | unsigned int i; | ||
| 295 | 471 | ||
| 296 | ASSERT_RHT_MUTEX(ht); | 472 | ASSERT_RHT_MUTEX(ht); |
| 297 | 473 | ||
| 298 | if (ht->shift <= ht->p.min_shift) | 474 | new_tbl = bucket_table_alloc(ht, tbl->size / 2); |
| 299 | return 0; | 475 | if (new_tbl == NULL) |
| 300 | |||
| 301 | ntbl = bucket_table_alloc(tbl->size / 2); | ||
| 302 | if (ntbl == NULL) | ||
| 303 | return -ENOMEM; | 476 | return -ENOMEM; |
| 304 | 477 | ||
| 305 | ht->shift--; | 478 | rcu_assign_pointer(ht->future_tbl, new_tbl); |
| 479 | synchronize_rcu(); | ||
| 306 | 480 | ||
| 307 | /* Link each bucket in the new table to the first bucket | 481 | /* Link the first entry in the old bucket to the end of the |
| 308 | * in the old table that contains entries which will hash | 482 | * bucket in the new table. As entries are concurrently being |
| 309 | * to the new bucket. | 483 | * added to the new table, lock down the new bucket. As we |
| 484 | * always divide the size in half when shrinking, each bucket | ||
| 485 | * in the new table maps to exactly two buckets in the old | ||
| 486 | * table. | ||
| 310 | */ | 487 | */ |
| 311 | for (i = 0; i < ntbl->size; i++) { | 488 | for (new_hash = 0; new_hash < new_tbl->size; new_hash++) { |
| 312 | ntbl->buckets[i] = tbl->buckets[i]; | 489 | lock_buckets(new_tbl, tbl, new_hash); |
| 313 | 490 | ||
| 314 | /* Link each bucket in the new table to the first bucket | 491 | rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), |
| 315 | * in the old table that contains entries which will hash | 492 | tbl->buckets[new_hash]); |
| 316 | * to the new bucket. | 493 | ASSERT_BUCKET_LOCK(ht, tbl, new_hash + new_tbl->size); |
| 317 | */ | 494 | rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), |
| 318 | for (pprev = &ntbl->buckets[i]; *pprev != NULL; | 495 | tbl->buckets[new_hash + new_tbl->size]); |
| 319 | pprev = &rht_dereference(*pprev, ht)->next) | 496 | |
| 320 | ; | 497 | unlock_buckets(new_tbl, tbl, new_hash); |
| 321 | RCU_INIT_POINTER(*pprev, tbl->buckets[i + ntbl->size]); | ||
| 322 | } | 498 | } |
| 323 | 499 | ||
| 324 | /* Publish the new, valid hash table */ | 500 | /* Publish the new, valid hash table */ |
| 325 | rcu_assign_pointer(ht->tbl, ntbl); | 501 | rcu_assign_pointer(ht->tbl, new_tbl); |
| 502 | atomic_dec(&ht->shift); | ||
| 326 | 503 | ||
| 327 | /* Wait for readers. No new readers will have references to the | 504 | /* Wait for readers. No new readers will have references to the |
| 328 | * old hash table. | 505 | * old hash table. |
| @@ -335,59 +512,99 @@ int rhashtable_shrink(struct rhashtable *ht) | |||
| 335 | } | 512 | } |
| 336 | EXPORT_SYMBOL_GPL(rhashtable_shrink); | 513 | EXPORT_SYMBOL_GPL(rhashtable_shrink); |
| 337 | 514 | ||
| 338 | /** | 515 | static void rht_deferred_worker(struct work_struct *work) |
| 339 | * rhashtable_insert - insert object into hash hash table | ||
| 340 | * @ht: hash table | ||
| 341 | * @obj: pointer to hash head inside object | ||
| 342 | * | ||
| 343 | * Will automatically grow the table via rhashtable_expand() if the the | ||
| 344 | * grow_decision function specified at rhashtable_init() returns true. | ||
| 345 | * | ||
| 346 | * The caller must ensure that no concurrent table mutations occur. It is | ||
| 347 | * however valid to have concurrent lookups if they are RCU protected. | ||
| 348 | */ | ||
| 349 | void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) | ||
| 350 | { | 516 | { |
| 351 | struct bucket_table *tbl = rht_dereference(ht->tbl, ht); | 517 | struct rhashtable *ht; |
| 352 | u32 hash; | 518 | struct bucket_table *tbl; |
| 519 | struct rhashtable_walker *walker; | ||
| 353 | 520 | ||
| 354 | ASSERT_RHT_MUTEX(ht); | 521 | ht = container_of(work, struct rhashtable, run_work); |
| 522 | mutex_lock(&ht->mutex); | ||
| 523 | if (ht->being_destroyed) | ||
| 524 | goto unlock; | ||
| 355 | 525 | ||
| 356 | hash = head_hashfn(ht, obj, tbl->size); | 526 | tbl = rht_dereference(ht->tbl, ht); |
| 357 | RCU_INIT_POINTER(obj->next, tbl->buckets[hash]); | 527 | |
| 358 | rcu_assign_pointer(tbl->buckets[hash], obj); | 528 | list_for_each_entry(walker, &ht->walkers, list) |
| 359 | ht->nelems++; | 529 | walker->resize = true; |
| 360 | 530 | ||
| 361 | if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) | 531 | if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) |
| 362 | rhashtable_expand(ht); | 532 | rhashtable_expand(ht); |
| 533 | else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size)) | ||
| 534 | rhashtable_shrink(ht); | ||
| 535 | |||
| 536 | unlock: | ||
| 537 | mutex_unlock(&ht->mutex); | ||
| 538 | } | ||
| 539 | |||
| 540 | static void rhashtable_wakeup_worker(struct rhashtable *ht) | ||
| 541 | { | ||
| 542 | struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); | ||
| 543 | struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht); | ||
| 544 | size_t size = tbl->size; | ||
| 545 | |||
| 546 | /* Only adjust the table if no resizing is currently in progress. */ | ||
| 547 | if (tbl == new_tbl && | ||
| 548 | ((ht->p.grow_decision && ht->p.grow_decision(ht, size)) || | ||
| 549 | (ht->p.shrink_decision && ht->p.shrink_decision(ht, size)))) | ||
| 550 | schedule_work(&ht->run_work); | ||
| 551 | } | ||
| 552 | |||
| 553 | static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, | ||
| 554 | struct bucket_table *tbl, u32 hash) | ||
| 555 | { | ||
| 556 | struct rhash_head *head; | ||
| 557 | |||
| 558 | hash = rht_bucket_index(tbl, hash); | ||
| 559 | head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); | ||
| 560 | |||
| 561 | ASSERT_BUCKET_LOCK(ht, tbl, hash); | ||
| 562 | |||
| 563 | if (rht_is_a_nulls(head)) | ||
| 564 | INIT_RHT_NULLS_HEAD(obj->next, ht, hash); | ||
| 565 | else | ||
| 566 | RCU_INIT_POINTER(obj->next, head); | ||
| 567 | |||
| 568 | rcu_assign_pointer(tbl->buckets[hash], obj); | ||
| 569 | |||
| 570 | atomic_inc(&ht->nelems); | ||
| 571 | |||
| 572 | rhashtable_wakeup_worker(ht); | ||
| 363 | } | 573 | } |
| 364 | EXPORT_SYMBOL_GPL(rhashtable_insert); | ||
| 365 | 574 | ||
| 366 | /** | 575 | /** |
| 367 | * rhashtable_remove_pprev - remove object from hash table given previous element | 576 | * rhashtable_insert - insert object into hash table |
| 368 | * @ht: hash table | 577 | * @ht: hash table |
| 369 | * @obj: pointer to hash head inside object | 578 | * @obj: pointer to hash head inside object |
| 370 | * @pprev: pointer to previous element | ||
| 371 | * | 579 | * |
| 372 | * Identical to rhashtable_remove() but caller is alreayd aware of the element | 580 | * Will take a per bucket spinlock to protect against mutual mutations |
| 373 | * in front of the element to be deleted. This is in particular useful for | 581 | * on the same bucket. Multiple insertions may occur in parallel unless |
| 374 | * deletion when combined with walking or lookup. | 582 | * they map to the same bucket lock. |
| 583 | * | ||
| 584 | * It is safe to call this function from atomic context. | ||
| 585 | * | ||
| 586 | * Will trigger an automatic deferred table resizing if the size grows | ||
| 587 | * beyond the watermark indicated by grow_decision() which can be passed | ||
| 588 | * to rhashtable_init(). | ||
| 375 | */ | 589 | */ |
| 376 | void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, | 590 | void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) |
| 377 | struct rhash_head __rcu **pprev) | ||
| 378 | { | 591 | { |
| 379 | struct bucket_table *tbl = rht_dereference(ht->tbl, ht); | 592 | struct bucket_table *tbl, *old_tbl; |
| 593 | unsigned hash; | ||
| 380 | 594 | ||
| 381 | ASSERT_RHT_MUTEX(ht); | 595 | rcu_read_lock(); |
| 382 | 596 | ||
| 383 | RCU_INIT_POINTER(*pprev, obj->next); | 597 | tbl = rht_dereference_rcu(ht->future_tbl, ht); |
| 384 | ht->nelems--; | 598 | old_tbl = rht_dereference_rcu(ht->tbl, ht); |
| 599 | hash = obj_raw_hashfn(ht, rht_obj(ht, obj)); | ||
| 385 | 600 | ||
| 386 | if (ht->p.shrink_decision && | 601 | lock_buckets(tbl, old_tbl, hash); |
| 387 | ht->p.shrink_decision(ht, tbl->size)) | 602 | __rhashtable_insert(ht, obj, tbl, hash); |
| 388 | rhashtable_shrink(ht); | 603 | unlock_buckets(tbl, old_tbl, hash); |
| 604 | |||
| 605 | rcu_read_unlock(); | ||
| 389 | } | 606 | } |
| 390 | EXPORT_SYMBOL_GPL(rhashtable_remove_pprev); | 607 | EXPORT_SYMBOL_GPL(rhashtable_insert); |
| 391 | 608 | ||
| 392 | /** | 609 | /** |
| 393 | * rhashtable_remove - remove object from hash table | 610 | * rhashtable_remove - remove object from hash table |
| @@ -398,7 +615,7 @@ EXPORT_SYMBOL_GPL(rhashtable_remove_pprev); | |||
| 398 | * walk the bucket chain upon removal. The removal operation is thus | 615 | * walk the bucket chain upon removal. The removal operation is thus |
| 399 | * considerable slow if the hash table is not correctly sized. | 616 | * considerable slow if the hash table is not correctly sized. |
| 400 | * | 617 | * |
| 401 | * Will automatically shrink the table via rhashtable_expand() if the the | 618 | * Will automatically shrink the table via rhashtable_expand() if the |
| 402 | * shrink_decision function specified at rhashtable_init() returns true. | 619 | * shrink_decision function specified at rhashtable_init() returns true. |
| 403 | * | 620 | * |
| 404 | * The caller must ensure that no concurrent table mutations occur. It is | 621 | * The caller must ensure that no concurrent table mutations occur. It is |
| @@ -406,30 +623,87 @@ EXPORT_SYMBOL_GPL(rhashtable_remove_pprev); | |||
| 406 | */ | 623 | */ |
| 407 | bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj) | 624 | bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj) |
| 408 | { | 625 | { |
| 409 | struct bucket_table *tbl = rht_dereference(ht->tbl, ht); | 626 | struct bucket_table *tbl, *new_tbl, *old_tbl; |
| 410 | struct rhash_head __rcu **pprev; | 627 | struct rhash_head __rcu **pprev; |
| 411 | struct rhash_head *he; | 628 | struct rhash_head *he, *he2; |
| 412 | u32 h; | 629 | unsigned int hash, new_hash; |
| 630 | bool ret = false; | ||
| 413 | 631 | ||
| 414 | ASSERT_RHT_MUTEX(ht); | 632 | rcu_read_lock(); |
| 415 | 633 | old_tbl = rht_dereference_rcu(ht->tbl, ht); | |
| 416 | h = head_hashfn(ht, obj, tbl->size); | 634 | tbl = new_tbl = rht_dereference_rcu(ht->future_tbl, ht); |
| 417 | 635 | new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj)); | |
| 418 | pprev = &tbl->buckets[h]; | 636 | |
| 419 | rht_for_each(he, tbl->buckets[h], ht) { | 637 | lock_buckets(new_tbl, old_tbl, new_hash); |
| 638 | restart: | ||
| 639 | hash = rht_bucket_index(tbl, new_hash); | ||
| 640 | pprev = &tbl->buckets[hash]; | ||
| 641 | rht_for_each(he, tbl, hash) { | ||
| 420 | if (he != obj) { | 642 | if (he != obj) { |
| 421 | pprev = &he->next; | 643 | pprev = &he->next; |
| 422 | continue; | 644 | continue; |
| 423 | } | 645 | } |
| 424 | 646 | ||
| 425 | rhashtable_remove_pprev(ht, he, pprev); | 647 | ASSERT_BUCKET_LOCK(ht, tbl, hash); |
| 426 | return true; | 648 | |
| 649 | if (old_tbl->size > new_tbl->size && tbl == old_tbl && | ||
| 650 | !rht_is_a_nulls(obj->next) && | ||
| 651 | head_hashfn(ht, tbl, obj->next) != hash) { | ||
| 652 | rcu_assign_pointer(*pprev, (struct rhash_head *) rht_marker(ht, hash)); | ||
| 653 | } else if (unlikely(old_tbl->size < new_tbl->size && tbl == new_tbl)) { | ||
| 654 | rht_for_each_continue(he2, obj->next, tbl, hash) { | ||
| 655 | if (head_hashfn(ht, tbl, he2) == hash) { | ||
| 656 | rcu_assign_pointer(*pprev, he2); | ||
| 657 | goto found; | ||
| 658 | } | ||
| 659 | } | ||
| 660 | |||
| 661 | rcu_assign_pointer(*pprev, (struct rhash_head *) rht_marker(ht, hash)); | ||
| 662 | } else { | ||
| 663 | rcu_assign_pointer(*pprev, obj->next); | ||
| 664 | } | ||
| 665 | |||
| 666 | found: | ||
| 667 | ret = true; | ||
| 668 | break; | ||
| 669 | } | ||
| 670 | |||
| 671 | /* The entry may be linked in either 'tbl', 'future_tbl', or both. | ||
| 672 | * 'future_tbl' only exists for a short period of time during | ||
| 673 | * resizing. Thus traversing both is fine and the added cost is | ||
| 674 | * very rare. | ||
| 675 | */ | ||
| 676 | if (tbl != old_tbl) { | ||
| 677 | tbl = old_tbl; | ||
| 678 | goto restart; | ||
| 679 | } | ||
| 680 | |||
| 681 | unlock_buckets(new_tbl, old_tbl, new_hash); | ||
| 682 | |||
| 683 | if (ret) { | ||
| 684 | atomic_dec(&ht->nelems); | ||
| 685 | rhashtable_wakeup_worker(ht); | ||
| 427 | } | 686 | } |
| 428 | 687 | ||
| 429 | return false; | 688 | rcu_read_unlock(); |
| 689 | |||
| 690 | return ret; | ||
| 430 | } | 691 | } |
| 431 | EXPORT_SYMBOL_GPL(rhashtable_remove); | 692 | EXPORT_SYMBOL_GPL(rhashtable_remove); |
| 432 | 693 | ||
| 694 | struct rhashtable_compare_arg { | ||
| 695 | struct rhashtable *ht; | ||
| 696 | const void *key; | ||
| 697 | }; | ||
| 698 | |||
| 699 | static bool rhashtable_compare(void *ptr, void *arg) | ||
| 700 | { | ||
| 701 | struct rhashtable_compare_arg *x = arg; | ||
| 702 | struct rhashtable *ht = x->ht; | ||
| 703 | |||
| 704 | return !memcmp(ptr + ht->p.key_offset, x->key, ht->p.key_len); | ||
| 705 | } | ||
| 706 | |||
| 433 | /** | 707 | /** |
| 434 | * rhashtable_lookup - lookup key in hash table | 708 | * rhashtable_lookup - lookup key in hash table |
| 435 | * @ht: hash table | 709 | * @ht: hash table |
| @@ -439,65 +713,313 @@ EXPORT_SYMBOL_GPL(rhashtable_remove); | |||
| 439 | * for a entry with an identical key. The first matching entry is returned. | 713 | * for a entry with an identical key. The first matching entry is returned. |
| 440 | * | 714 | * |
| 441 | * This lookup function may only be used for fixed key hash table (key_len | 715 | * This lookup function may only be used for fixed key hash table (key_len |
| 442 | * paramter set). It will BUG() if used inappropriately. | 716 | * parameter set). It will BUG() if used inappropriately. |
| 443 | * | 717 | * |
| 444 | * Lookups may occur in parallel with hash mutations as long as the lookup is | 718 | * Lookups may occur in parallel with hashtable mutations and resizing. |
| 445 | * guarded by rcu_read_lock(). The caller must take care of this. | ||
| 446 | */ | 719 | */ |
| 447 | void *rhashtable_lookup(const struct rhashtable *ht, const void *key) | 720 | void *rhashtable_lookup(struct rhashtable *ht, const void *key) |
| 448 | { | 721 | { |
| 449 | const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); | 722 | struct rhashtable_compare_arg arg = { |
| 450 | struct rhash_head *he; | 723 | .ht = ht, |
| 451 | u32 h; | 724 | .key = key, |
| 725 | }; | ||
| 452 | 726 | ||
| 453 | BUG_ON(!ht->p.key_len); | 727 | BUG_ON(!ht->p.key_len); |
| 454 | 728 | ||
| 455 | h = __hashfn(ht, key, ht->p.key_len, tbl->size); | 729 | return rhashtable_lookup_compare(ht, key, &rhashtable_compare, &arg); |
| 456 | rht_for_each_rcu(he, tbl->buckets[h], ht) { | ||
| 457 | if (memcmp(rht_obj(ht, he) + ht->p.key_offset, key, | ||
| 458 | ht->p.key_len)) | ||
| 459 | continue; | ||
| 460 | return (void *) he - ht->p.head_offset; | ||
| 461 | } | ||
| 462 | |||
| 463 | return NULL; | ||
| 464 | } | 730 | } |
| 465 | EXPORT_SYMBOL_GPL(rhashtable_lookup); | 731 | EXPORT_SYMBOL_GPL(rhashtable_lookup); |
| 466 | 732 | ||
| 467 | /** | 733 | /** |
| 468 | * rhashtable_lookup_compare - search hash table with compare function | 734 | * rhashtable_lookup_compare - search hash table with compare function |
| 469 | * @ht: hash table | 735 | * @ht: hash table |
| 470 | * @hash: hash value of desired entry | 736 | * @key: the pointer to the key |
| 471 | * @compare: compare function, must return true on match | 737 | * @compare: compare function, must return true on match |
| 472 | * @arg: argument passed on to compare function | 738 | * @arg: argument passed on to compare function |
| 473 | * | 739 | * |
| 474 | * Traverses the bucket chain behind the provided hash value and calls the | 740 | * Traverses the bucket chain behind the provided hash value and calls the |
| 475 | * specified compare function for each entry. | 741 | * specified compare function for each entry. |
| 476 | * | 742 | * |
| 477 | * Lookups may occur in parallel with hash mutations as long as the lookup is | 743 | * Lookups may occur in parallel with hashtable mutations and resizing. |
| 478 | * guarded by rcu_read_lock(). The caller must take care of this. | ||
| 479 | * | 744 | * |
| 480 | * Returns the first entry on which the compare function returned true. | 745 | * Returns the first entry on which the compare function returned true. |
| 481 | */ | 746 | */ |
| 482 | void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash, | 747 | void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key, |
| 483 | bool (*compare)(void *, void *), void *arg) | 748 | bool (*compare)(void *, void *), void *arg) |
| 484 | { | 749 | { |
| 485 | const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); | 750 | const struct bucket_table *tbl, *old_tbl; |
| 486 | struct rhash_head *he; | 751 | struct rhash_head *he; |
| 752 | u32 hash; | ||
| 487 | 753 | ||
| 488 | if (unlikely(hash >= tbl->size)) | 754 | rcu_read_lock(); |
| 489 | return NULL; | ||
| 490 | 755 | ||
| 491 | rht_for_each_rcu(he, tbl->buckets[hash], ht) { | 756 | old_tbl = rht_dereference_rcu(ht->tbl, ht); |
| 757 | tbl = rht_dereference_rcu(ht->future_tbl, ht); | ||
| 758 | hash = key_hashfn(ht, key, ht->p.key_len); | ||
| 759 | restart: | ||
| 760 | rht_for_each_rcu(he, tbl, rht_bucket_index(tbl, hash)) { | ||
| 492 | if (!compare(rht_obj(ht, he), arg)) | 761 | if (!compare(rht_obj(ht, he), arg)) |
| 493 | continue; | 762 | continue; |
| 494 | return (void *) he - ht->p.head_offset; | 763 | rcu_read_unlock(); |
| 764 | return rht_obj(ht, he); | ||
| 765 | } | ||
| 766 | |||
| 767 | if (unlikely(tbl != old_tbl)) { | ||
| 768 | tbl = old_tbl; | ||
| 769 | goto restart; | ||
| 495 | } | 770 | } |
| 771 | rcu_read_unlock(); | ||
| 496 | 772 | ||
| 497 | return NULL; | 773 | return NULL; |
| 498 | } | 774 | } |
| 499 | EXPORT_SYMBOL_GPL(rhashtable_lookup_compare); | 775 | EXPORT_SYMBOL_GPL(rhashtable_lookup_compare); |
| 500 | 776 | ||
| 777 | /** | ||
| 778 | * rhashtable_lookup_insert - lookup and insert object into hash table | ||
| 779 | * @ht: hash table | ||
| 780 | * @obj: pointer to hash head inside object | ||
| 781 | * | ||
| 782 | * Locks down the bucket chain in both the old and new table if a resize | ||
| 783 | * is in progress to ensure that writers can't remove from the old table | ||
| 784 | * and can't insert to the new table during the atomic operation of search | ||
| 785 | * and insertion. Searches for duplicates in both the old and new table if | ||
| 786 | * a resize is in progress. | ||
| 787 | * | ||
| 788 | * This lookup function may only be used for fixed key hash table (key_len | ||
| 789 | * parameter set). It will BUG() if used inappropriately. | ||
| 790 | * | ||
| 791 | * It is safe to call this function from atomic context. | ||
| 792 | * | ||
| 793 | * Will trigger an automatic deferred table resizing if the size grows | ||
| 794 | * beyond the watermark indicated by grow_decision() which can be passed | ||
| 795 | * to rhashtable_init(). | ||
| 796 | */ | ||
| 797 | bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj) | ||
| 798 | { | ||
| 799 | struct rhashtable_compare_arg arg = { | ||
| 800 | .ht = ht, | ||
| 801 | .key = rht_obj(ht, obj) + ht->p.key_offset, | ||
| 802 | }; | ||
| 803 | |||
| 804 | BUG_ON(!ht->p.key_len); | ||
| 805 | |||
| 806 | return rhashtable_lookup_compare_insert(ht, obj, &rhashtable_compare, | ||
| 807 | &arg); | ||
| 808 | } | ||
| 809 | EXPORT_SYMBOL_GPL(rhashtable_lookup_insert); | ||
| 810 | |||
| 811 | /** | ||
| 812 | * rhashtable_lookup_compare_insert - search and insert object to hash table | ||
| 813 | * with compare function | ||
| 814 | * @ht: hash table | ||
| 815 | * @obj: pointer to hash head inside object | ||
| 816 | * @compare: compare function, must return true on match | ||
| 817 | * @arg: argument passed on to compare function | ||
| 818 | * | ||
| 819 | * Locks down the bucket chain in both the old and new table if a resize | ||
| 820 | * is in progress to ensure that writers can't remove from the old table | ||
| 821 | * and can't insert to the new table during the atomic operation of search | ||
| 822 | * and insertion. Searches for duplicates in both the old and new table if | ||
| 823 | * a resize is in progress. | ||
| 824 | * | ||
| 825 | * Lookups may occur in parallel with hashtable mutations and resizing. | ||
| 826 | * | ||
| 827 | * Will trigger an automatic deferred table resizing if the size grows | ||
| 828 | * beyond the watermark indicated by grow_decision() which can be passed | ||
| 829 | * to rhashtable_init(). | ||
| 830 | */ | ||
| 831 | bool rhashtable_lookup_compare_insert(struct rhashtable *ht, | ||
| 832 | struct rhash_head *obj, | ||
| 833 | bool (*compare)(void *, void *), | ||
| 834 | void *arg) | ||
| 835 | { | ||
| 836 | struct bucket_table *new_tbl, *old_tbl; | ||
| 837 | u32 new_hash; | ||
| 838 | bool success = true; | ||
| 839 | |||
| 840 | BUG_ON(!ht->p.key_len); | ||
| 841 | |||
| 842 | rcu_read_lock(); | ||
| 843 | old_tbl = rht_dereference_rcu(ht->tbl, ht); | ||
| 844 | new_tbl = rht_dereference_rcu(ht->future_tbl, ht); | ||
| 845 | new_hash = obj_raw_hashfn(ht, rht_obj(ht, obj)); | ||
| 846 | |||
| 847 | lock_buckets(new_tbl, old_tbl, new_hash); | ||
| 848 | |||
| 849 | if (rhashtable_lookup_compare(ht, rht_obj(ht, obj) + ht->p.key_offset, | ||
| 850 | compare, arg)) { | ||
| 851 | success = false; | ||
| 852 | goto exit; | ||
| 853 | } | ||
| 854 | |||
| 855 | __rhashtable_insert(ht, obj, new_tbl, new_hash); | ||
| 856 | |||
| 857 | exit: | ||
| 858 | unlock_buckets(new_tbl, old_tbl, new_hash); | ||
| 859 | rcu_read_unlock(); | ||
| 860 | |||
| 861 | return success; | ||
| 862 | } | ||
| 863 | EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert); | ||
| 864 | |||
| 865 | /** | ||
| 866 | * rhashtable_walk_init - Initialise an iterator | ||
| 867 | * @ht: Table to walk over | ||
| 868 | * @iter: Hash table Iterator | ||
| 869 | * | ||
| 870 | * This function prepares a hash table walk. | ||
| 871 | * | ||
| 872 | * Note that if you restart a walk after rhashtable_walk_stop you | ||
| 873 | * may see the same object twice. Also, you may miss objects if | ||
| 874 | * there are removals in between rhashtable_walk_stop and the next | ||
| 875 | * call to rhashtable_walk_start. | ||
| 876 | * | ||
| 877 | * For a completely stable walk you should construct your own data | ||
| 878 | * structure outside the hash table. | ||
| 879 | * | ||
| 880 | * This function may sleep so you must not call it from interrupt | ||
| 881 | * context or with spin locks held. | ||
| 882 | * | ||
| 883 | * You must call rhashtable_walk_exit if this function returns | ||
| 884 | * successfully. | ||
| 885 | */ | ||
| 886 | int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter) | ||
| 887 | { | ||
| 888 | iter->ht = ht; | ||
| 889 | iter->p = NULL; | ||
| 890 | iter->slot = 0; | ||
| 891 | iter->skip = 0; | ||
| 892 | |||
| 893 | iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL); | ||
| 894 | if (!iter->walker) | ||
| 895 | return -ENOMEM; | ||
| 896 | |||
| 897 | mutex_lock(&ht->mutex); | ||
| 898 | list_add(&iter->walker->list, &ht->walkers); | ||
| 899 | mutex_unlock(&ht->mutex); | ||
| 900 | |||
| 901 | return 0; | ||
| 902 | } | ||
| 903 | EXPORT_SYMBOL_GPL(rhashtable_walk_init); | ||
| 904 | |||
| 905 | /** | ||
| 906 | * rhashtable_walk_exit - Free an iterator | ||
| 907 | * @iter: Hash table Iterator | ||
| 908 | * | ||
| 909 | * This function frees resources allocated by rhashtable_walk_init. | ||
| 910 | */ | ||
| 911 | void rhashtable_walk_exit(struct rhashtable_iter *iter) | ||
| 912 | { | ||
| 913 | mutex_lock(&iter->ht->mutex); | ||
| 914 | list_del(&iter->walker->list); | ||
| 915 | mutex_unlock(&iter->ht->mutex); | ||
| 916 | kfree(iter->walker); | ||
| 917 | } | ||
| 918 | EXPORT_SYMBOL_GPL(rhashtable_walk_exit); | ||
| 919 | |||
| 920 | /** | ||
| 921 | * rhashtable_walk_start - Start a hash table walk | ||
| 922 | * @iter: Hash table iterator | ||
| 923 | * | ||
| 924 | * Start a hash table walk. Note that we take the RCU lock in all | ||
| 925 | * cases including when we return an error. So you must always call | ||
| 926 | * rhashtable_walk_stop to clean up. | ||
| 927 | * | ||
| 928 | * Returns zero if successful. | ||
| 929 | * | ||
| 930 | * Returns -EAGAIN if resize event occured. Note that the iterator | ||
| 931 | * will rewind back to the beginning and you may use it immediately | ||
| 932 | * by calling rhashtable_walk_next. | ||
| 933 | */ | ||
| 934 | int rhashtable_walk_start(struct rhashtable_iter *iter) | ||
| 935 | { | ||
| 936 | rcu_read_lock(); | ||
| 937 | |||
| 938 | if (iter->walker->resize) { | ||
| 939 | iter->slot = 0; | ||
| 940 | iter->skip = 0; | ||
| 941 | iter->walker->resize = false; | ||
| 942 | return -EAGAIN; | ||
| 943 | } | ||
| 944 | |||
| 945 | return 0; | ||
| 946 | } | ||
| 947 | EXPORT_SYMBOL_GPL(rhashtable_walk_start); | ||
| 948 | |||
| 949 | /** | ||
| 950 | * rhashtable_walk_next - Return the next object and advance the iterator | ||
| 951 | * @iter: Hash table iterator | ||
| 952 | * | ||
| 953 | * Note that you must call rhashtable_walk_stop when you are finished | ||
| 954 | * with the walk. | ||
| 955 | * | ||
| 956 | * Returns the next object or NULL when the end of the table is reached. | ||
| 957 | * | ||
| 958 | * Returns -EAGAIN if resize event occured. Note that the iterator | ||
| 959 | * will rewind back to the beginning and you may continue to use it. | ||
| 960 | */ | ||
| 961 | void *rhashtable_walk_next(struct rhashtable_iter *iter) | ||
| 962 | { | ||
| 963 | const struct bucket_table *tbl; | ||
| 964 | struct rhashtable *ht = iter->ht; | ||
| 965 | struct rhash_head *p = iter->p; | ||
| 966 | void *obj = NULL; | ||
| 967 | |||
| 968 | tbl = rht_dereference_rcu(ht->tbl, ht); | ||
| 969 | |||
| 970 | if (p) { | ||
| 971 | p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot); | ||
| 972 | goto next; | ||
| 973 | } | ||
| 974 | |||
| 975 | for (; iter->slot < tbl->size; iter->slot++) { | ||
| 976 | int skip = iter->skip; | ||
| 977 | |||
| 978 | rht_for_each_rcu(p, tbl, iter->slot) { | ||
| 979 | if (!skip) | ||
| 980 | break; | ||
| 981 | skip--; | ||
| 982 | } | ||
| 983 | |||
| 984 | next: | ||
| 985 | if (!rht_is_a_nulls(p)) { | ||
| 986 | iter->skip++; | ||
| 987 | iter->p = p; | ||
| 988 | obj = rht_obj(ht, p); | ||
| 989 | goto out; | ||
| 990 | } | ||
| 991 | |||
| 992 | iter->skip = 0; | ||
| 993 | } | ||
| 994 | |||
| 995 | iter->p = NULL; | ||
| 996 | |||
| 997 | out: | ||
| 998 | if (iter->walker->resize) { | ||
| 999 | iter->p = NULL; | ||
| 1000 | iter->slot = 0; | ||
| 1001 | iter->skip = 0; | ||
| 1002 | iter->walker->resize = false; | ||
| 1003 | return ERR_PTR(-EAGAIN); | ||
| 1004 | } | ||
| 1005 | |||
| 1006 | return obj; | ||
| 1007 | } | ||
| 1008 | EXPORT_SYMBOL_GPL(rhashtable_walk_next); | ||
| 1009 | |||
| 1010 | /** | ||
| 1011 | * rhashtable_walk_stop - Finish a hash table walk | ||
| 1012 | * @iter: Hash table iterator | ||
| 1013 | * | ||
| 1014 | * Finish a hash table walk. | ||
| 1015 | */ | ||
| 1016 | void rhashtable_walk_stop(struct rhashtable_iter *iter) | ||
| 1017 | { | ||
| 1018 | rcu_read_unlock(); | ||
| 1019 | iter->p = NULL; | ||
| 1020 | } | ||
| 1021 | EXPORT_SYMBOL_GPL(rhashtable_walk_stop); | ||
| 1022 | |||
| 501 | static size_t rounded_hashtable_size(struct rhashtable_params *params) | 1023 | static size_t rounded_hashtable_size(struct rhashtable_params *params) |
| 502 | { | 1024 | { |
| 503 | return max(roundup_pow_of_two(params->nelem_hint * 4 / 3), | 1025 | return max(roundup_pow_of_two(params->nelem_hint * 4 / 3), |
| @@ -525,9 +1047,7 @@ static size_t rounded_hashtable_size(struct rhashtable_params *params) | |||
| 525 | * .key_offset = offsetof(struct test_obj, key), | 1047 | * .key_offset = offsetof(struct test_obj, key), |
| 526 | * .key_len = sizeof(int), | 1048 | * .key_len = sizeof(int), |
| 527 | * .hashfn = jhash, | 1049 | * .hashfn = jhash, |
| 528 | * #ifdef CONFIG_PROVE_LOCKING | 1050 | * .nulls_base = (1U << RHT_BASE_SHIFT), |
| 529 | * .mutex_is_held = &my_mutex_is_held, | ||
| 530 | * #endif | ||
| 531 | * }; | 1051 | * }; |
| 532 | * | 1052 | * |
| 533 | * Configuration Example 2: Variable length keys | 1053 | * Configuration Example 2: Variable length keys |
| @@ -547,9 +1067,6 @@ static size_t rounded_hashtable_size(struct rhashtable_params *params) | |||
| 547 | * .head_offset = offsetof(struct test_obj, node), | 1067 | * .head_offset = offsetof(struct test_obj, node), |
| 548 | * .hashfn = jhash, | 1068 | * .hashfn = jhash, |
| 549 | * .obj_hashfn = my_hash_fn, | 1069 | * .obj_hashfn = my_hash_fn, |
| 550 | * #ifdef CONFIG_PROVE_LOCKING | ||
| 551 | * .mutex_is_held = &my_mutex_is_held, | ||
| 552 | * #endif | ||
| 553 | * }; | 1070 | * }; |
| 554 | */ | 1071 | */ |
| 555 | int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) | 1072 | int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) |
| @@ -563,24 +1080,40 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) | |||
| 563 | (!params->key_len && !params->obj_hashfn)) | 1080 | (!params->key_len && !params->obj_hashfn)) |
| 564 | return -EINVAL; | 1081 | return -EINVAL; |
| 565 | 1082 | ||
| 1083 | if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) | ||
| 1084 | return -EINVAL; | ||
| 1085 | |||
| 566 | params->min_shift = max_t(size_t, params->min_shift, | 1086 | params->min_shift = max_t(size_t, params->min_shift, |
| 567 | ilog2(HASH_MIN_SIZE)); | 1087 | ilog2(HASH_MIN_SIZE)); |
| 568 | 1088 | ||
| 569 | if (params->nelem_hint) | 1089 | if (params->nelem_hint) |
| 570 | size = rounded_hashtable_size(params); | 1090 | size = rounded_hashtable_size(params); |
| 571 | 1091 | ||
| 572 | tbl = bucket_table_alloc(size); | 1092 | memset(ht, 0, sizeof(*ht)); |
| 1093 | mutex_init(&ht->mutex); | ||
| 1094 | memcpy(&ht->p, params, sizeof(*params)); | ||
| 1095 | INIT_LIST_HEAD(&ht->walkers); | ||
| 1096 | |||
| 1097 | if (params->locks_mul) | ||
| 1098 | ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); | ||
| 1099 | else | ||
| 1100 | ht->p.locks_mul = BUCKET_LOCKS_PER_CPU; | ||
| 1101 | |||
| 1102 | tbl = bucket_table_alloc(ht, size); | ||
| 573 | if (tbl == NULL) | 1103 | if (tbl == NULL) |
| 574 | return -ENOMEM; | 1104 | return -ENOMEM; |
| 575 | 1105 | ||
| 576 | memset(ht, 0, sizeof(*ht)); | 1106 | atomic_set(&ht->nelems, 0); |
| 577 | ht->shift = ilog2(tbl->size); | 1107 | atomic_set(&ht->shift, ilog2(tbl->size)); |
| 578 | memcpy(&ht->p, params, sizeof(*params)); | ||
| 579 | RCU_INIT_POINTER(ht->tbl, tbl); | 1108 | RCU_INIT_POINTER(ht->tbl, tbl); |
| 1109 | RCU_INIT_POINTER(ht->future_tbl, tbl); | ||
| 580 | 1110 | ||
| 581 | if (!ht->p.hash_rnd) | 1111 | if (!ht->p.hash_rnd) |
| 582 | get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd)); | 1112 | get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd)); |
| 583 | 1113 | ||
| 1114 | if (ht->p.grow_decision || ht->p.shrink_decision) | ||
| 1115 | INIT_WORK(&ht->run_work, rht_deferred_worker); | ||
| 1116 | |||
| 584 | return 0; | 1117 | return 0; |
| 585 | } | 1118 | } |
| 586 | EXPORT_SYMBOL_GPL(rhashtable_init); | 1119 | EXPORT_SYMBOL_GPL(rhashtable_init); |
| @@ -593,216 +1126,15 @@ EXPORT_SYMBOL_GPL(rhashtable_init); | |||
| 593 | * has to make sure that no resizing may happen by unpublishing the hashtable | 1126 | * has to make sure that no resizing may happen by unpublishing the hashtable |
| 594 | * and waiting for the quiescent cycle before releasing the bucket array. | 1127 | * and waiting for the quiescent cycle before releasing the bucket array. |
| 595 | */ | 1128 | */ |
| 596 | void rhashtable_destroy(const struct rhashtable *ht) | 1129 | void rhashtable_destroy(struct rhashtable *ht) |
| 597 | { | 1130 | { |
| 598 | bucket_table_free(ht->tbl); | 1131 | ht->being_destroyed = true; |
| 599 | } | ||
| 600 | EXPORT_SYMBOL_GPL(rhashtable_destroy); | ||
| 601 | |||
| 602 | /************************************************************************** | ||
| 603 | * Self Test | ||
| 604 | **************************************************************************/ | ||
| 605 | |||
| 606 | #ifdef CONFIG_TEST_RHASHTABLE | ||
| 607 | 1132 | ||
| 608 | #define TEST_HT_SIZE 8 | 1133 | if (ht->p.grow_decision || ht->p.shrink_decision) |
| 609 | #define TEST_ENTRIES 2048 | 1134 | cancel_work_sync(&ht->run_work); |
| 610 | #define TEST_PTR ((void *) 0xdeadbeef) | ||
| 611 | #define TEST_NEXPANDS 4 | ||
| 612 | 1135 | ||
| 613 | #ifdef CONFIG_PROVE_LOCKING | 1136 | mutex_lock(&ht->mutex); |
| 614 | static int test_mutex_is_held(void *parent) | 1137 | bucket_table_free(rht_dereference(ht->tbl, ht)); |
| 615 | { | 1138 | mutex_unlock(&ht->mutex); |
| 616 | return 1; | ||
| 617 | } | 1139 | } |
| 618 | #endif | 1140 | EXPORT_SYMBOL_GPL(rhashtable_destroy); |
| 619 | |||
| 620 | struct test_obj { | ||
| 621 | void *ptr; | ||
| 622 | int value; | ||
| 623 | struct rhash_head node; | ||
| 624 | }; | ||
| 625 | |||
| 626 | static int __init test_rht_lookup(struct rhashtable *ht) | ||
| 627 | { | ||
| 628 | unsigned int i; | ||
| 629 | |||
| 630 | for (i = 0; i < TEST_ENTRIES * 2; i++) { | ||
| 631 | struct test_obj *obj; | ||
| 632 | bool expected = !(i % 2); | ||
| 633 | u32 key = i; | ||
| 634 | |||
| 635 | obj = rhashtable_lookup(ht, &key); | ||
| 636 | |||
| 637 | if (expected && !obj) { | ||
| 638 | pr_warn("Test failed: Could not find key %u\n", key); | ||
| 639 | return -ENOENT; | ||
| 640 | } else if (!expected && obj) { | ||
| 641 | pr_warn("Test failed: Unexpected entry found for key %u\n", | ||
| 642 | key); | ||
| 643 | return -EEXIST; | ||
| 644 | } else if (expected && obj) { | ||
| 645 | if (obj->ptr != TEST_PTR || obj->value != i) { | ||
| 646 | pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n", | ||
| 647 | obj->ptr, TEST_PTR, obj->value, i); | ||
| 648 | return -EINVAL; | ||
| 649 | } | ||
| 650 | } | ||
| 651 | } | ||
| 652 | |||
| 653 | return 0; | ||
| 654 | } | ||
| 655 | |||
| 656 | static void test_bucket_stats(struct rhashtable *ht, bool quiet) | ||
| 657 | { | ||
| 658 | unsigned int cnt, rcu_cnt, i, total = 0; | ||
| 659 | struct test_obj *obj; | ||
| 660 | struct bucket_table *tbl; | ||
| 661 | |||
| 662 | tbl = rht_dereference_rcu(ht->tbl, ht); | ||
| 663 | for (i = 0; i < tbl->size; i++) { | ||
| 664 | rcu_cnt = cnt = 0; | ||
| 665 | |||
| 666 | if (!quiet) | ||
| 667 | pr_info(" [%#4x/%zu]", i, tbl->size); | ||
| 668 | |||
| 669 | rht_for_each_entry_rcu(obj, tbl->buckets[i], node) { | ||
| 670 | cnt++; | ||
| 671 | total++; | ||
| 672 | if (!quiet) | ||
| 673 | pr_cont(" [%p],", obj); | ||
| 674 | } | ||
| 675 | |||
| 676 | rht_for_each_entry_rcu(obj, tbl->buckets[i], node) | ||
| 677 | rcu_cnt++; | ||
| 678 | |||
| 679 | if (rcu_cnt != cnt) | ||
| 680 | pr_warn("Test failed: Chain count mismach %d != %d", | ||
| 681 | cnt, rcu_cnt); | ||
| 682 | |||
| 683 | if (!quiet) | ||
| 684 | pr_cont("\n [%#x] first element: %p, chain length: %u\n", | ||
| 685 | i, tbl->buckets[i], cnt); | ||
| 686 | } | ||
| 687 | |||
| 688 | pr_info(" Traversal complete: counted=%u, nelems=%zu, entries=%d\n", | ||
| 689 | total, ht->nelems, TEST_ENTRIES); | ||
| 690 | |||
| 691 | if (total != ht->nelems || total != TEST_ENTRIES) | ||
| 692 | pr_warn("Test failed: Total count mismatch ^^^"); | ||
| 693 | } | ||
| 694 | |||
| 695 | static int __init test_rhashtable(struct rhashtable *ht) | ||
| 696 | { | ||
| 697 | struct bucket_table *tbl; | ||
| 698 | struct test_obj *obj, *next; | ||
| 699 | int err; | ||
| 700 | unsigned int i; | ||
| 701 | |||
| 702 | /* | ||
| 703 | * Insertion Test: | ||
| 704 | * Insert TEST_ENTRIES into table with all keys even numbers | ||
| 705 | */ | ||
| 706 | pr_info(" Adding %d keys\n", TEST_ENTRIES); | ||
| 707 | for (i = 0; i < TEST_ENTRIES; i++) { | ||
| 708 | struct test_obj *obj; | ||
| 709 | |||
| 710 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); | ||
| 711 | if (!obj) { | ||
| 712 | err = -ENOMEM; | ||
| 713 | goto error; | ||
| 714 | } | ||
| 715 | |||
| 716 | obj->ptr = TEST_PTR; | ||
| 717 | obj->value = i * 2; | ||
| 718 | |||
| 719 | rhashtable_insert(ht, &obj->node); | ||
| 720 | } | ||
| 721 | |||
| 722 | rcu_read_lock(); | ||
| 723 | test_bucket_stats(ht, true); | ||
| 724 | test_rht_lookup(ht); | ||
| 725 | rcu_read_unlock(); | ||
| 726 | |||
| 727 | for (i = 0; i < TEST_NEXPANDS; i++) { | ||
| 728 | pr_info(" Table expansion iteration %u...\n", i); | ||
| 729 | rhashtable_expand(ht); | ||
| 730 | |||
| 731 | rcu_read_lock(); | ||
| 732 | pr_info(" Verifying lookups...\n"); | ||
| 733 | test_rht_lookup(ht); | ||
| 734 | rcu_read_unlock(); | ||
| 735 | } | ||
| 736 | |||
| 737 | for (i = 0; i < TEST_NEXPANDS; i++) { | ||
| 738 | pr_info(" Table shrinkage iteration %u...\n", i); | ||
| 739 | rhashtable_shrink(ht); | ||
| 740 | |||
| 741 | rcu_read_lock(); | ||
| 742 | pr_info(" Verifying lookups...\n"); | ||
| 743 | test_rht_lookup(ht); | ||
| 744 | rcu_read_unlock(); | ||
| 745 | } | ||
| 746 | |||
| 747 | rcu_read_lock(); | ||
| 748 | test_bucket_stats(ht, true); | ||
| 749 | rcu_read_unlock(); | ||
| 750 | |||
| 751 | pr_info(" Deleting %d keys\n", TEST_ENTRIES); | ||
| 752 | for (i = 0; i < TEST_ENTRIES; i++) { | ||
| 753 | u32 key = i * 2; | ||
| 754 | |||
| 755 | obj = rhashtable_lookup(ht, &key); | ||
| 756 | BUG_ON(!obj); | ||
| 757 | |||
| 758 | rhashtable_remove(ht, &obj->node); | ||
| 759 | kfree(obj); | ||
| 760 | } | ||
| 761 | |||
| 762 | return 0; | ||
| 763 | |||
| 764 | error: | ||
| 765 | tbl = rht_dereference_rcu(ht->tbl, ht); | ||
| 766 | for (i = 0; i < tbl->size; i++) | ||
| 767 | rht_for_each_entry_safe(obj, next, tbl->buckets[i], ht, node) | ||
| 768 | kfree(obj); | ||
| 769 | |||
| 770 | return err; | ||
| 771 | } | ||
| 772 | |||
| 773 | static int __init test_rht_init(void) | ||
| 774 | { | ||
| 775 | struct rhashtable ht; | ||
| 776 | struct rhashtable_params params = { | ||
| 777 | .nelem_hint = TEST_HT_SIZE, | ||
| 778 | .head_offset = offsetof(struct test_obj, node), | ||
| 779 | .key_offset = offsetof(struct test_obj, value), | ||
| 780 | .key_len = sizeof(int), | ||
| 781 | .hashfn = jhash, | ||
| 782 | #ifdef CONFIG_PROVE_LOCKING | ||
| 783 | .mutex_is_held = &test_mutex_is_held, | ||
| 784 | #endif | ||
| 785 | .grow_decision = rht_grow_above_75, | ||
| 786 | .shrink_decision = rht_shrink_below_30, | ||
| 787 | }; | ||
| 788 | int err; | ||
| 789 | |||
| 790 | pr_info("Running resizable hashtable tests...\n"); | ||
| 791 | |||
| 792 | err = rhashtable_init(&ht, ¶ms); | ||
| 793 | if (err < 0) { | ||
| 794 | pr_warn("Test failed: Unable to initialize hashtable: %d\n", | ||
| 795 | err); | ||
| 796 | return err; | ||
| 797 | } | ||
| 798 | |||
| 799 | err = test_rhashtable(&ht); | ||
| 800 | |||
| 801 | rhashtable_destroy(&ht); | ||
| 802 | |||
| 803 | return err; | ||
| 804 | } | ||
| 805 | |||
| 806 | subsys_initcall(test_rht_init); | ||
| 807 | |||
| 808 | #endif /* CONFIG_TEST_RHASHTABLE */ | ||
diff --git a/lib/seq_buf.c b/lib/seq_buf.c index 4eedfedb9e31..88c0854bd752 100644 --- a/lib/seq_buf.c +++ b/lib/seq_buf.c | |||
| @@ -91,42 +91,6 @@ int seq_buf_printf(struct seq_buf *s, const char *fmt, ...) | |||
| 91 | return ret; | 91 | return ret; |
| 92 | } | 92 | } |
| 93 | 93 | ||
| 94 | /** | ||
| 95 | * seq_buf_bitmask - write a bitmask array in its ASCII representation | ||
| 96 | * @s: seq_buf descriptor | ||
| 97 | * @maskp: points to an array of unsigned longs that represent a bitmask | ||
| 98 | * @nmaskbits: The number of bits that are valid in @maskp | ||
| 99 | * | ||
| 100 | * Writes a ASCII representation of a bitmask string into @s. | ||
| 101 | * | ||
| 102 | * Returns zero on success, -1 on overflow. | ||
| 103 | */ | ||
| 104 | int seq_buf_bitmask(struct seq_buf *s, const unsigned long *maskp, | ||
| 105 | int nmaskbits) | ||
| 106 | { | ||
| 107 | unsigned int len = seq_buf_buffer_left(s); | ||
| 108 | int ret; | ||
| 109 | |||
| 110 | WARN_ON(s->size == 0); | ||
| 111 | |||
| 112 | /* | ||
| 113 | * Note, because bitmap_scnprintf() only returns the number of bytes | ||
| 114 | * written and not the number that would be written, we use the last | ||
| 115 | * byte of the buffer to let us know if we overflowed. There's a small | ||
| 116 | * chance that the bitmap could have fit exactly inside the buffer, but | ||
| 117 | * it's not that critical if that does happen. | ||
| 118 | */ | ||
| 119 | if (len > 1) { | ||
| 120 | ret = bitmap_scnprintf(s->buffer + s->len, len, maskp, nmaskbits); | ||
| 121 | if (ret < len) { | ||
| 122 | s->len += ret; | ||
| 123 | return 0; | ||
| 124 | } | ||
| 125 | } | ||
| 126 | seq_buf_set_overflow(s); | ||
| 127 | return -1; | ||
| 128 | } | ||
| 129 | |||
| 130 | #ifdef CONFIG_BINARY_PRINTF | 94 | #ifdef CONFIG_BINARY_PRINTF |
| 131 | /** | 95 | /** |
| 132 | * seq_buf_bprintf - Write the printf string from binary arguments | 96 | * seq_buf_bprintf - Write the printf string from binary arguments |
diff --git a/lib/show_mem.c b/lib/show_mem.c index 7de89f4a36cf..adc98e1825ba 100644 --- a/lib/show_mem.c +++ b/lib/show_mem.c | |||
| @@ -6,7 +6,6 @@ | |||
| 6 | */ | 6 | */ |
| 7 | 7 | ||
| 8 | #include <linux/mm.h> | 8 | #include <linux/mm.h> |
| 9 | #include <linux/nmi.h> | ||
| 10 | #include <linux/quicklist.h> | 9 | #include <linux/quicklist.h> |
| 11 | #include <linux/cma.h> | 10 | #include <linux/cma.h> |
| 12 | 11 | ||
diff --git a/lib/sort.c b/lib/sort.c index 926d00429ed2..43c9fe73ae2e 100644 --- a/lib/sort.c +++ b/lib/sort.c | |||
| @@ -4,10 +4,9 @@ | |||
| 4 | * Jan 23 2005 Matt Mackall <mpm@selenic.com> | 4 | * Jan 23 2005 Matt Mackall <mpm@selenic.com> |
| 5 | */ | 5 | */ |
| 6 | 6 | ||
| 7 | #include <linux/kernel.h> | 7 | #include <linux/types.h> |
| 8 | #include <linux/module.h> | 8 | #include <linux/export.h> |
| 9 | #include <linux/sort.h> | 9 | #include <linux/sort.h> |
| 10 | #include <linux/slab.h> | ||
| 11 | 10 | ||
| 12 | static void u32_swap(void *a, void *b, int size) | 11 | static void u32_swap(void *a, void *b, int size) |
| 13 | { | 12 | { |
| @@ -85,6 +84,7 @@ void sort(void *base, size_t num, size_t size, | |||
| 85 | EXPORT_SYMBOL(sort); | 84 | EXPORT_SYMBOL(sort); |
| 86 | 85 | ||
| 87 | #if 0 | 86 | #if 0 |
| 87 | #include <linux/slab.h> | ||
| 88 | /* a simple boot-time regression test */ | 88 | /* a simple boot-time regression test */ |
| 89 | 89 | ||
| 90 | int cmpint(const void *a, const void *b) | 90 | int cmpint(const void *a, const void *b) |
diff --git a/lib/stmp_device.c b/lib/stmp_device.c index 8ac9bcc4289a..a904656f4fd7 100644 --- a/lib/stmp_device.c +++ b/lib/stmp_device.c | |||
| @@ -15,7 +15,8 @@ | |||
| 15 | #include <linux/io.h> | 15 | #include <linux/io.h> |
| 16 | #include <linux/errno.h> | 16 | #include <linux/errno.h> |
| 17 | #include <linux/delay.h> | 17 | #include <linux/delay.h> |
| 18 | #include <linux/module.h> | 18 | #include <linux/compiler.h> |
| 19 | #include <linux/export.h> | ||
| 19 | #include <linux/stmp_device.h> | 20 | #include <linux/stmp_device.h> |
| 20 | 21 | ||
| 21 | #define STMP_MODULE_CLKGATE (1 << 30) | 22 | #define STMP_MODULE_CLKGATE (1 << 30) |
diff --git a/lib/string.c b/lib/string.c index 10063300b830..ce81aaec3839 100644 --- a/lib/string.c +++ b/lib/string.c | |||
| @@ -58,14 +58,6 @@ int strncasecmp(const char *s1, const char *s2, size_t len) | |||
| 58 | } | 58 | } |
| 59 | EXPORT_SYMBOL(strncasecmp); | 59 | EXPORT_SYMBOL(strncasecmp); |
| 60 | #endif | 60 | #endif |
| 61 | #ifndef __HAVE_ARCH_STRNICMP | ||
| 62 | #undef strnicmp | ||
| 63 | int strnicmp(const char *s1, const char *s2, size_t len) | ||
| 64 | { | ||
| 65 | return strncasecmp(s1, s2, len); | ||
| 66 | } | ||
| 67 | EXPORT_SYMBOL(strnicmp); | ||
| 68 | #endif | ||
| 69 | 61 | ||
| 70 | #ifndef __HAVE_ARCH_STRCASECMP | 62 | #ifndef __HAVE_ARCH_STRCASECMP |
| 71 | int strcasecmp(const char *s1, const char *s2) | 63 | int strcasecmp(const char *s1, const char *s2) |
| @@ -321,12 +313,12 @@ EXPORT_SYMBOL(strchrnul); | |||
| 321 | */ | 313 | */ |
| 322 | char *strrchr(const char *s, int c) | 314 | char *strrchr(const char *s, int c) |
| 323 | { | 315 | { |
| 324 | const char *p = s + strlen(s); | 316 | const char *last = NULL; |
| 325 | do { | 317 | do { |
| 326 | if (*p == (char)c) | 318 | if (*s == (char)c) |
| 327 | return (char *)p; | 319 | last = s; |
| 328 | } while (--p >= s); | 320 | } while (*s++); |
| 329 | return NULL; | 321 | return (char *)last; |
| 330 | } | 322 | } |
| 331 | EXPORT_SYMBOL(strrchr); | 323 | EXPORT_SYMBOL(strrchr); |
| 332 | #endif | 324 | #endif |
| @@ -604,6 +596,11 @@ EXPORT_SYMBOL(memset); | |||
| 604 | * @s: Pointer to the start of the area. | 596 | * @s: Pointer to the start of the area. |
| 605 | * @count: The size of the area. | 597 | * @count: The size of the area. |
| 606 | * | 598 | * |
| 599 | * Note: usually using memset() is just fine (!), but in cases | ||
| 600 | * where clearing out _local_ data at the end of a scope is | ||
| 601 | * necessary, memzero_explicit() should be used instead in | ||
| 602 | * order to prevent the compiler from optimising away zeroing. | ||
| 603 | * | ||
| 607 | * memzero_explicit() doesn't need an arch-specific version as | 604 | * memzero_explicit() doesn't need an arch-specific version as |
| 608 | * it just invokes the one of memset() implicitly. | 605 | * it just invokes the one of memset() implicitly. |
| 609 | */ | 606 | */ |
diff --git a/lib/string_helpers.c b/lib/string_helpers.c index 58b78ba57439..8f8c4417f228 100644 --- a/lib/string_helpers.c +++ b/lib/string_helpers.c | |||
| @@ -20,19 +20,18 @@ | |||
| 20 | * @len: length of buffer | 20 | * @len: length of buffer |
| 21 | * | 21 | * |
| 22 | * This function returns a string formatted to 3 significant figures | 22 | * This function returns a string formatted to 3 significant figures |
| 23 | * giving the size in the required units. Returns 0 on success or | 23 | * giving the size in the required units. @buf should have room for |
| 24 | * error on failure. @buf is always zero terminated. | 24 | * at least 9 bytes and will always be zero terminated. |
| 25 | * | 25 | * |
| 26 | */ | 26 | */ |
| 27 | int string_get_size(u64 size, const enum string_size_units units, | 27 | void string_get_size(u64 size, const enum string_size_units units, |
| 28 | char *buf, int len) | 28 | char *buf, int len) |
| 29 | { | 29 | { |
| 30 | static const char *const units_10[] = { | 30 | static const char *const units_10[] = { |
| 31 | "B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB", NULL | 31 | "B", "kB", "MB", "GB", "TB", "PB", "EB" |
| 32 | }; | 32 | }; |
| 33 | static const char *const units_2[] = { | 33 | static const char *const units_2[] = { |
| 34 | "B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB", | 34 | "B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB" |
| 35 | NULL | ||
| 36 | }; | 35 | }; |
| 37 | static const char *const *const units_str[] = { | 36 | static const char *const *const units_str[] = { |
| 38 | [STRING_UNITS_10] = units_10, | 37 | [STRING_UNITS_10] = units_10, |
| @@ -43,13 +42,13 @@ int string_get_size(u64 size, const enum string_size_units units, | |||
| 43 | [STRING_UNITS_2] = 1024, | 42 | [STRING_UNITS_2] = 1024, |
| 44 | }; | 43 | }; |
| 45 | int i, j; | 44 | int i, j; |
| 46 | u64 remainder = 0, sf_cap; | 45 | u32 remainder = 0, sf_cap; |
| 47 | char tmp[8]; | 46 | char tmp[8]; |
| 48 | 47 | ||
| 49 | tmp[0] = '\0'; | 48 | tmp[0] = '\0'; |
| 50 | i = 0; | 49 | i = 0; |
| 51 | if (size >= divisor[units]) { | 50 | if (size >= divisor[units]) { |
| 52 | while (size >= divisor[units] && units_str[units][i]) { | 51 | while (size >= divisor[units]) { |
| 53 | remainder = do_div(size, divisor[units]); | 52 | remainder = do_div(size, divisor[units]); |
| 54 | i++; | 53 | i++; |
| 55 | } | 54 | } |
| @@ -60,17 +59,14 @@ int string_get_size(u64 size, const enum string_size_units units, | |||
| 60 | 59 | ||
| 61 | if (j) { | 60 | if (j) { |
| 62 | remainder *= 1000; | 61 | remainder *= 1000; |
| 63 | do_div(remainder, divisor[units]); | 62 | remainder /= divisor[units]; |
| 64 | snprintf(tmp, sizeof(tmp), ".%03lld", | 63 | snprintf(tmp, sizeof(tmp), ".%03u", remainder); |
| 65 | (unsigned long long)remainder); | ||
| 66 | tmp[j+1] = '\0'; | 64 | tmp[j+1] = '\0'; |
| 67 | } | 65 | } |
| 68 | } | 66 | } |
| 69 | 67 | ||
| 70 | snprintf(buf, len, "%lld%s %s", (unsigned long long)size, | 68 | snprintf(buf, len, "%u%s %s", (u32)size, |
| 71 | tmp, units_str[units][i]); | 69 | tmp, units_str[units][i]); |
| 72 | |||
| 73 | return 0; | ||
| 74 | } | 70 | } |
| 75 | EXPORT_SYMBOL(string_get_size); | 71 | EXPORT_SYMBOL(string_get_size); |
| 76 | 72 | ||
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c index bb2b201d6ad0..e0af6ff73d14 100644 --- a/lib/strncpy_from_user.c +++ b/lib/strncpy_from_user.c | |||
| @@ -1,4 +1,5 @@ | |||
| 1 | #include <linux/module.h> | 1 | #include <linux/compiler.h> |
| 2 | #include <linux/export.h> | ||
| 2 | #include <linux/uaccess.h> | 3 | #include <linux/uaccess.h> |
| 3 | #include <linux/kernel.h> | 4 | #include <linux/kernel.h> |
| 4 | #include <linux/errno.h> | 5 | #include <linux/errno.h> |
diff --git a/lib/test-hexdump.c b/lib/test-hexdump.c new file mode 100644 index 000000000000..daf29a390a89 --- /dev/null +++ b/lib/test-hexdump.c | |||
| @@ -0,0 +1,180 @@ | |||
| 1 | /* | ||
| 2 | * Test cases for lib/hexdump.c module. | ||
| 3 | */ | ||
| 4 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
| 5 | |||
| 6 | #include <linux/init.h> | ||
| 7 | #include <linux/kernel.h> | ||
| 8 | #include <linux/module.h> | ||
| 9 | #include <linux/random.h> | ||
| 10 | #include <linux/string.h> | ||
| 11 | |||
| 12 | static const unsigned char data_b[] = { | ||
| 13 | '\xbe', '\x32', '\xdb', '\x7b', '\x0a', '\x18', '\x93', '\xb2', /* 00 - 07 */ | ||
| 14 | '\x70', '\xba', '\xc4', '\x24', '\x7d', '\x83', '\x34', '\x9b', /* 08 - 0f */ | ||
| 15 | '\xa6', '\x9c', '\x31', '\xad', '\x9c', '\x0f', '\xac', '\xe9', /* 10 - 17 */ | ||
| 16 | '\x4c', '\xd1', '\x19', '\x99', '\x43', '\xb1', '\xaf', '\x0c', /* 18 - 1f */ | ||
| 17 | }; | ||
| 18 | |||
| 19 | static const unsigned char data_a[] = ".2.{....p..$}.4...1.....L...C..."; | ||
| 20 | |||
| 21 | static const char *test_data_1_le[] __initconst = { | ||
| 22 | "be", "32", "db", "7b", "0a", "18", "93", "b2", | ||
| 23 | "70", "ba", "c4", "24", "7d", "83", "34", "9b", | ||
| 24 | "a6", "9c", "31", "ad", "9c", "0f", "ac", "e9", | ||
| 25 | "4c", "d1", "19", "99", "43", "b1", "af", "0c", | ||
| 26 | }; | ||
| 27 | |||
| 28 | static const char *test_data_2_le[] __initconst = { | ||
| 29 | "32be", "7bdb", "180a", "b293", | ||
| 30 | "ba70", "24c4", "837d", "9b34", | ||
| 31 | "9ca6", "ad31", "0f9c", "e9ac", | ||
| 32 | "d14c", "9919", "b143", "0caf", | ||
| 33 | }; | ||
| 34 | |||
| 35 | static const char *test_data_4_le[] __initconst = { | ||
| 36 | "7bdb32be", "b293180a", "24c4ba70", "9b34837d", | ||
| 37 | "ad319ca6", "e9ac0f9c", "9919d14c", "0cafb143", | ||
| 38 | }; | ||
| 39 | |||
| 40 | static const char *test_data_8_le[] __initconst = { | ||
| 41 | "b293180a7bdb32be", "9b34837d24c4ba70", | ||
| 42 | "e9ac0f9cad319ca6", "0cafb1439919d14c", | ||
| 43 | }; | ||
| 44 | |||
| 45 | static void __init test_hexdump(size_t len, int rowsize, int groupsize, | ||
| 46 | bool ascii) | ||
| 47 | { | ||
| 48 | char test[32 * 3 + 2 + 32 + 1]; | ||
| 49 | char real[32 * 3 + 2 + 32 + 1]; | ||
| 50 | char *p; | ||
| 51 | const char **result; | ||
| 52 | size_t l = len; | ||
| 53 | int gs = groupsize, rs = rowsize; | ||
| 54 | unsigned int i; | ||
| 55 | |||
| 56 | hex_dump_to_buffer(data_b, l, rs, gs, real, sizeof(real), ascii); | ||
| 57 | |||
| 58 | if (rs != 16 && rs != 32) | ||
| 59 | rs = 16; | ||
| 60 | |||
| 61 | if (l > rs) | ||
| 62 | l = rs; | ||
| 63 | |||
| 64 | if (!is_power_of_2(gs) || gs > 8 || (len % gs != 0)) | ||
| 65 | gs = 1; | ||
| 66 | |||
| 67 | if (gs == 8) | ||
| 68 | result = test_data_8_le; | ||
| 69 | else if (gs == 4) | ||
| 70 | result = test_data_4_le; | ||
| 71 | else if (gs == 2) | ||
| 72 | result = test_data_2_le; | ||
| 73 | else | ||
| 74 | result = test_data_1_le; | ||
| 75 | |||
| 76 | memset(test, ' ', sizeof(test)); | ||
| 77 | |||
| 78 | /* hex dump */ | ||
| 79 | p = test; | ||
| 80 | for (i = 0; i < l / gs; i++) { | ||
| 81 | const char *q = *result++; | ||
| 82 | size_t amount = strlen(q); | ||
| 83 | |||
| 84 | strncpy(p, q, amount); | ||
| 85 | p += amount + 1; | ||
| 86 | } | ||
| 87 | if (i) | ||
| 88 | p--; | ||
| 89 | |||
| 90 | /* ASCII part */ | ||
| 91 | if (ascii) { | ||
| 92 | p = test + rs * 2 + rs / gs + 1; | ||
| 93 | strncpy(p, data_a, l); | ||
| 94 | p += l; | ||
| 95 | } | ||
| 96 | |||
| 97 | *p = '\0'; | ||
| 98 | |||
| 99 | if (strcmp(test, real)) { | ||
| 100 | pr_err("Len: %zu row: %d group: %d\n", len, rowsize, groupsize); | ||
| 101 | pr_err("Result: '%s'\n", real); | ||
| 102 | pr_err("Expect: '%s'\n", test); | ||
| 103 | } | ||
| 104 | } | ||
| 105 | |||
| 106 | static void __init test_hexdump_set(int rowsize, bool ascii) | ||
| 107 | { | ||
| 108 | size_t d = min_t(size_t, sizeof(data_b), rowsize); | ||
| 109 | size_t len = get_random_int() % d + 1; | ||
| 110 | |||
| 111 | test_hexdump(len, rowsize, 4, ascii); | ||
| 112 | test_hexdump(len, rowsize, 2, ascii); | ||
| 113 | test_hexdump(len, rowsize, 8, ascii); | ||
| 114 | test_hexdump(len, rowsize, 1, ascii); | ||
| 115 | } | ||
| 116 | |||
| 117 | static void __init test_hexdump_overflow(bool ascii) | ||
| 118 | { | ||
| 119 | char buf[56]; | ||
| 120 | const char *t = test_data_1_le[0]; | ||
| 121 | size_t l = get_random_int() % sizeof(buf); | ||
| 122 | bool a; | ||
| 123 | int e, r; | ||
| 124 | |||
| 125 | memset(buf, ' ', sizeof(buf)); | ||
| 126 | |||
| 127 | r = hex_dump_to_buffer(data_b, 1, 16, 1, buf, l, ascii); | ||
| 128 | |||
| 129 | if (ascii) | ||
| 130 | e = 50; | ||
| 131 | else | ||
| 132 | e = 2; | ||
| 133 | buf[e + 2] = '\0'; | ||
| 134 | |||
| 135 | if (!l) { | ||
| 136 | a = r == e && buf[0] == ' '; | ||
| 137 | } else if (l < 3) { | ||
| 138 | a = r == e && buf[0] == '\0'; | ||
| 139 | } else if (l < 4) { | ||
| 140 | a = r == e && !strcmp(buf, t); | ||
| 141 | } else if (ascii) { | ||
| 142 | if (l < 51) | ||
| 143 | a = r == e && buf[l - 1] == '\0' && buf[l - 2] == ' '; | ||
| 144 | else | ||
| 145 | a = r == e && buf[50] == '\0' && buf[49] == '.'; | ||
| 146 | } else { | ||
| 147 | a = r == e && buf[e] == '\0'; | ||
| 148 | } | ||
| 149 | |||
| 150 | if (!a) { | ||
| 151 | pr_err("Len: %zu rc: %u strlen: %zu\n", l, r, strlen(buf)); | ||
| 152 | pr_err("Result: '%s'\n", buf); | ||
| 153 | } | ||
| 154 | } | ||
| 155 | |||
| 156 | static int __init test_hexdump_init(void) | ||
| 157 | { | ||
| 158 | unsigned int i; | ||
| 159 | int rowsize; | ||
| 160 | |||
| 161 | pr_info("Running tests...\n"); | ||
| 162 | |||
| 163 | rowsize = (get_random_int() % 2 + 1) * 16; | ||
| 164 | for (i = 0; i < 16; i++) | ||
| 165 | test_hexdump_set(rowsize, false); | ||
| 166 | |||
| 167 | rowsize = (get_random_int() % 2 + 1) * 16; | ||
| 168 | for (i = 0; i < 16; i++) | ||
| 169 | test_hexdump_set(rowsize, true); | ||
| 170 | |||
| 171 | for (i = 0; i < 16; i++) | ||
| 172 | test_hexdump_overflow(false); | ||
| 173 | |||
| 174 | for (i = 0; i < 16; i++) | ||
| 175 | test_hexdump_overflow(true); | ||
| 176 | |||
| 177 | return -EINVAL; | ||
| 178 | } | ||
| 179 | module_init(test_hexdump_init); | ||
| 180 | MODULE_LICENSE("Dual BSD/GPL"); | ||
diff --git a/lib/test_kasan.c b/lib/test_kasan.c new file mode 100644 index 000000000000..098c08eddfab --- /dev/null +++ b/lib/test_kasan.c | |||
| @@ -0,0 +1,277 @@ | |||
| 1 | /* | ||
| 2 | * | ||
| 3 | * Copyright (c) 2014 Samsung Electronics Co., Ltd. | ||
| 4 | * Author: Andrey Ryabinin <a.ryabinin@samsung.com> | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | */ | ||
| 11 | |||
| 12 | #define pr_fmt(fmt) "kasan test: %s " fmt, __func__ | ||
| 13 | |||
| 14 | #include <linux/kernel.h> | ||
| 15 | #include <linux/printk.h> | ||
| 16 | #include <linux/slab.h> | ||
| 17 | #include <linux/string.h> | ||
| 18 | #include <linux/module.h> | ||
| 19 | |||
| 20 | static noinline void __init kmalloc_oob_right(void) | ||
| 21 | { | ||
| 22 | char *ptr; | ||
| 23 | size_t size = 123; | ||
| 24 | |||
| 25 | pr_info("out-of-bounds to right\n"); | ||
| 26 | ptr = kmalloc(size, GFP_KERNEL); | ||
| 27 | if (!ptr) { | ||
| 28 | pr_err("Allocation failed\n"); | ||
| 29 | return; | ||
| 30 | } | ||
| 31 | |||
| 32 | ptr[size] = 'x'; | ||
| 33 | kfree(ptr); | ||
| 34 | } | ||
| 35 | |||
| 36 | static noinline void __init kmalloc_oob_left(void) | ||
| 37 | { | ||
| 38 | char *ptr; | ||
| 39 | size_t size = 15; | ||
| 40 | |||
| 41 | pr_info("out-of-bounds to left\n"); | ||
| 42 | ptr = kmalloc(size, GFP_KERNEL); | ||
| 43 | if (!ptr) { | ||
| 44 | pr_err("Allocation failed\n"); | ||
| 45 | return; | ||
| 46 | } | ||
| 47 | |||
| 48 | *ptr = *(ptr - 1); | ||
| 49 | kfree(ptr); | ||
| 50 | } | ||
| 51 | |||
| 52 | static noinline void __init kmalloc_node_oob_right(void) | ||
| 53 | { | ||
| 54 | char *ptr; | ||
| 55 | size_t size = 4096; | ||
| 56 | |||
| 57 | pr_info("kmalloc_node(): out-of-bounds to right\n"); | ||
| 58 | ptr = kmalloc_node(size, GFP_KERNEL, 0); | ||
| 59 | if (!ptr) { | ||
| 60 | pr_err("Allocation failed\n"); | ||
| 61 | return; | ||
| 62 | } | ||
| 63 | |||
| 64 | ptr[size] = 0; | ||
| 65 | kfree(ptr); | ||
| 66 | } | ||
| 67 | |||
| 68 | static noinline void __init kmalloc_large_oob_rigth(void) | ||
| 69 | { | ||
| 70 | char *ptr; | ||
| 71 | size_t size = KMALLOC_MAX_CACHE_SIZE + 10; | ||
| 72 | |||
| 73 | pr_info("kmalloc large allocation: out-of-bounds to right\n"); | ||
| 74 | ptr = kmalloc(size, GFP_KERNEL); | ||
| 75 | if (!ptr) { | ||
| 76 | pr_err("Allocation failed\n"); | ||
| 77 | return; | ||
| 78 | } | ||
| 79 | |||
| 80 | ptr[size] = 0; | ||
| 81 | kfree(ptr); | ||
| 82 | } | ||
| 83 | |||
| 84 | static noinline void __init kmalloc_oob_krealloc_more(void) | ||
| 85 | { | ||
| 86 | char *ptr1, *ptr2; | ||
| 87 | size_t size1 = 17; | ||
| 88 | size_t size2 = 19; | ||
| 89 | |||
| 90 | pr_info("out-of-bounds after krealloc more\n"); | ||
| 91 | ptr1 = kmalloc(size1, GFP_KERNEL); | ||
| 92 | ptr2 = krealloc(ptr1, size2, GFP_KERNEL); | ||
| 93 | if (!ptr1 || !ptr2) { | ||
| 94 | pr_err("Allocation failed\n"); | ||
| 95 | kfree(ptr1); | ||
| 96 | return; | ||
| 97 | } | ||
| 98 | |||
| 99 | ptr2[size2] = 'x'; | ||
| 100 | kfree(ptr2); | ||
| 101 | } | ||
| 102 | |||
| 103 | static noinline void __init kmalloc_oob_krealloc_less(void) | ||
| 104 | { | ||
| 105 | char *ptr1, *ptr2; | ||
| 106 | size_t size1 = 17; | ||
| 107 | size_t size2 = 15; | ||
| 108 | |||
| 109 | pr_info("out-of-bounds after krealloc less\n"); | ||
| 110 | ptr1 = kmalloc(size1, GFP_KERNEL); | ||
| 111 | ptr2 = krealloc(ptr1, size2, GFP_KERNEL); | ||
| 112 | if (!ptr1 || !ptr2) { | ||
| 113 | pr_err("Allocation failed\n"); | ||
| 114 | kfree(ptr1); | ||
| 115 | return; | ||
| 116 | } | ||
| 117 | ptr2[size1] = 'x'; | ||
| 118 | kfree(ptr2); | ||
| 119 | } | ||
| 120 | |||
| 121 | static noinline void __init kmalloc_oob_16(void) | ||
| 122 | { | ||
| 123 | struct { | ||
| 124 | u64 words[2]; | ||
| 125 | } *ptr1, *ptr2; | ||
| 126 | |||
| 127 | pr_info("kmalloc out-of-bounds for 16-bytes access\n"); | ||
| 128 | ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL); | ||
| 129 | ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL); | ||
| 130 | if (!ptr1 || !ptr2) { | ||
| 131 | pr_err("Allocation failed\n"); | ||
| 132 | kfree(ptr1); | ||
| 133 | kfree(ptr2); | ||
| 134 | return; | ||
| 135 | } | ||
| 136 | *ptr1 = *ptr2; | ||
| 137 | kfree(ptr1); | ||
| 138 | kfree(ptr2); | ||
| 139 | } | ||
| 140 | |||
| 141 | static noinline void __init kmalloc_oob_in_memset(void) | ||
| 142 | { | ||
| 143 | char *ptr; | ||
| 144 | size_t size = 666; | ||
| 145 | |||
| 146 | pr_info("out-of-bounds in memset\n"); | ||
| 147 | ptr = kmalloc(size, GFP_KERNEL); | ||
| 148 | if (!ptr) { | ||
| 149 | pr_err("Allocation failed\n"); | ||
| 150 | return; | ||
| 151 | } | ||
| 152 | |||
| 153 | memset(ptr, 0, size+5); | ||
| 154 | kfree(ptr); | ||
| 155 | } | ||
| 156 | |||
| 157 | static noinline void __init kmalloc_uaf(void) | ||
| 158 | { | ||
| 159 | char *ptr; | ||
| 160 | size_t size = 10; | ||
| 161 | |||
| 162 | pr_info("use-after-free\n"); | ||
| 163 | ptr = kmalloc(size, GFP_KERNEL); | ||
| 164 | if (!ptr) { | ||
| 165 | pr_err("Allocation failed\n"); | ||
| 166 | return; | ||
| 167 | } | ||
| 168 | |||
| 169 | kfree(ptr); | ||
| 170 | *(ptr + 8) = 'x'; | ||
| 171 | } | ||
| 172 | |||
| 173 | static noinline void __init kmalloc_uaf_memset(void) | ||
| 174 | { | ||
| 175 | char *ptr; | ||
| 176 | size_t size = 33; | ||
| 177 | |||
| 178 | pr_info("use-after-free in memset\n"); | ||
| 179 | ptr = kmalloc(size, GFP_KERNEL); | ||
| 180 | if (!ptr) { | ||
| 181 | pr_err("Allocation failed\n"); | ||
| 182 | return; | ||
| 183 | } | ||
| 184 | |||
| 185 | kfree(ptr); | ||
| 186 | memset(ptr, 0, size); | ||
| 187 | } | ||
| 188 | |||
| 189 | static noinline void __init kmalloc_uaf2(void) | ||
| 190 | { | ||
| 191 | char *ptr1, *ptr2; | ||
| 192 | size_t size = 43; | ||
| 193 | |||
| 194 | pr_info("use-after-free after another kmalloc\n"); | ||
| 195 | ptr1 = kmalloc(size, GFP_KERNEL); | ||
| 196 | if (!ptr1) { | ||
| 197 | pr_err("Allocation failed\n"); | ||
| 198 | return; | ||
| 199 | } | ||
| 200 | |||
| 201 | kfree(ptr1); | ||
| 202 | ptr2 = kmalloc(size, GFP_KERNEL); | ||
| 203 | if (!ptr2) { | ||
| 204 | pr_err("Allocation failed\n"); | ||
| 205 | return; | ||
| 206 | } | ||
| 207 | |||
| 208 | ptr1[40] = 'x'; | ||
| 209 | kfree(ptr2); | ||
| 210 | } | ||
| 211 | |||
| 212 | static noinline void __init kmem_cache_oob(void) | ||
| 213 | { | ||
| 214 | char *p; | ||
| 215 | size_t size = 200; | ||
| 216 | struct kmem_cache *cache = kmem_cache_create("test_cache", | ||
| 217 | size, 0, | ||
| 218 | 0, NULL); | ||
| 219 | if (!cache) { | ||
| 220 | pr_err("Cache allocation failed\n"); | ||
| 221 | return; | ||
| 222 | } | ||
| 223 | pr_info("out-of-bounds in kmem_cache_alloc\n"); | ||
| 224 | p = kmem_cache_alloc(cache, GFP_KERNEL); | ||
| 225 | if (!p) { | ||
| 226 | pr_err("Allocation failed\n"); | ||
| 227 | kmem_cache_destroy(cache); | ||
| 228 | return; | ||
| 229 | } | ||
| 230 | |||
| 231 | *p = p[size]; | ||
| 232 | kmem_cache_free(cache, p); | ||
| 233 | kmem_cache_destroy(cache); | ||
| 234 | } | ||
| 235 | |||
| 236 | static char global_array[10]; | ||
| 237 | |||
| 238 | static noinline void __init kasan_global_oob(void) | ||
| 239 | { | ||
| 240 | volatile int i = 3; | ||
| 241 | char *p = &global_array[ARRAY_SIZE(global_array) + i]; | ||
| 242 | |||
| 243 | pr_info("out-of-bounds global variable\n"); | ||
| 244 | *(volatile char *)p; | ||
| 245 | } | ||
| 246 | |||
| 247 | static noinline void __init kasan_stack_oob(void) | ||
| 248 | { | ||
| 249 | char stack_array[10]; | ||
| 250 | volatile int i = 0; | ||
| 251 | char *p = &stack_array[ARRAY_SIZE(stack_array) + i]; | ||
| 252 | |||
| 253 | pr_info("out-of-bounds on stack\n"); | ||
| 254 | *(volatile char *)p; | ||
| 255 | } | ||
| 256 | |||
| 257 | static int __init kmalloc_tests_init(void) | ||
| 258 | { | ||
| 259 | kmalloc_oob_right(); | ||
| 260 | kmalloc_oob_left(); | ||
| 261 | kmalloc_node_oob_right(); | ||
| 262 | kmalloc_large_oob_rigth(); | ||
| 263 | kmalloc_oob_krealloc_more(); | ||
| 264 | kmalloc_oob_krealloc_less(); | ||
| 265 | kmalloc_oob_16(); | ||
| 266 | kmalloc_oob_in_memset(); | ||
| 267 | kmalloc_uaf(); | ||
| 268 | kmalloc_uaf_memset(); | ||
| 269 | kmalloc_uaf2(); | ||
| 270 | kmem_cache_oob(); | ||
| 271 | kasan_stack_oob(); | ||
| 272 | kasan_global_oob(); | ||
| 273 | return -EAGAIN; | ||
| 274 | } | ||
| 275 | |||
| 276 | module_init(kmalloc_tests_init); | ||
| 277 | MODULE_LICENSE("GPL"); | ||
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c new file mode 100644 index 000000000000..1dfeba73fc74 --- /dev/null +++ b/lib/test_rhashtable.c | |||
| @@ -0,0 +1,227 @@ | |||
| 1 | /* | ||
| 2 | * Resizable, Scalable, Concurrent Hash Table | ||
| 3 | * | ||
| 4 | * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch> | ||
| 5 | * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> | ||
| 6 | * | ||
| 7 | * Based on the following paper: | ||
| 8 | * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf | ||
| 9 | * | ||
| 10 | * Code partially derived from nft_hash | ||
| 11 | * | ||
| 12 | * This program is free software; you can redistribute it and/or modify | ||
| 13 | * it under the terms of the GNU General Public License version 2 as | ||
| 14 | * published by the Free Software Foundation. | ||
| 15 | */ | ||
| 16 | |||
| 17 | /************************************************************************** | ||
| 18 | * Self Test | ||
| 19 | **************************************************************************/ | ||
| 20 | |||
| 21 | #include <linux/init.h> | ||
| 22 | #include <linux/jhash.h> | ||
| 23 | #include <linux/kernel.h> | ||
| 24 | #include <linux/module.h> | ||
| 25 | #include <linux/rcupdate.h> | ||
| 26 | #include <linux/rhashtable.h> | ||
| 27 | #include <linux/slab.h> | ||
| 28 | |||
| 29 | |||
| 30 | #define TEST_HT_SIZE 8 | ||
| 31 | #define TEST_ENTRIES 2048 | ||
| 32 | #define TEST_PTR ((void *) 0xdeadbeef) | ||
| 33 | #define TEST_NEXPANDS 4 | ||
| 34 | |||
| 35 | struct test_obj { | ||
| 36 | void *ptr; | ||
| 37 | int value; | ||
| 38 | struct rhash_head node; | ||
| 39 | }; | ||
| 40 | |||
| 41 | static int __init test_rht_lookup(struct rhashtable *ht) | ||
| 42 | { | ||
| 43 | unsigned int i; | ||
| 44 | |||
| 45 | for (i = 0; i < TEST_ENTRIES * 2; i++) { | ||
| 46 | struct test_obj *obj; | ||
| 47 | bool expected = !(i % 2); | ||
| 48 | u32 key = i; | ||
| 49 | |||
| 50 | obj = rhashtable_lookup(ht, &key); | ||
| 51 | |||
| 52 | if (expected && !obj) { | ||
| 53 | pr_warn("Test failed: Could not find key %u\n", key); | ||
| 54 | return -ENOENT; | ||
| 55 | } else if (!expected && obj) { | ||
| 56 | pr_warn("Test failed: Unexpected entry found for key %u\n", | ||
| 57 | key); | ||
| 58 | return -EEXIST; | ||
| 59 | } else if (expected && obj) { | ||
| 60 | if (obj->ptr != TEST_PTR || obj->value != i) { | ||
| 61 | pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n", | ||
| 62 | obj->ptr, TEST_PTR, obj->value, i); | ||
| 63 | return -EINVAL; | ||
| 64 | } | ||
| 65 | } | ||
| 66 | } | ||
| 67 | |||
| 68 | return 0; | ||
| 69 | } | ||
| 70 | |||
| 71 | static void test_bucket_stats(struct rhashtable *ht, bool quiet) | ||
| 72 | { | ||
| 73 | unsigned int cnt, rcu_cnt, i, total = 0; | ||
| 74 | struct rhash_head *pos; | ||
| 75 | struct test_obj *obj; | ||
| 76 | struct bucket_table *tbl; | ||
| 77 | |||
| 78 | tbl = rht_dereference_rcu(ht->tbl, ht); | ||
| 79 | for (i = 0; i < tbl->size; i++) { | ||
| 80 | rcu_cnt = cnt = 0; | ||
| 81 | |||
| 82 | if (!quiet) | ||
| 83 | pr_info(" [%#4x/%zu]", i, tbl->size); | ||
| 84 | |||
| 85 | rht_for_each_entry_rcu(obj, pos, tbl, i, node) { | ||
| 86 | cnt++; | ||
| 87 | total++; | ||
| 88 | if (!quiet) | ||
| 89 | pr_cont(" [%p],", obj); | ||
| 90 | } | ||
| 91 | |||
| 92 | rht_for_each_entry_rcu(obj, pos, tbl, i, node) | ||
| 93 | rcu_cnt++; | ||
| 94 | |||
| 95 | if (rcu_cnt != cnt) | ||
| 96 | pr_warn("Test failed: Chain count mismach %d != %d", | ||
| 97 | cnt, rcu_cnt); | ||
| 98 | |||
| 99 | if (!quiet) | ||
| 100 | pr_cont("\n [%#x] first element: %p, chain length: %u\n", | ||
| 101 | i, tbl->buckets[i], cnt); | ||
| 102 | } | ||
| 103 | |||
| 104 | pr_info(" Traversal complete: counted=%u, nelems=%u, entries=%d\n", | ||
| 105 | total, atomic_read(&ht->nelems), TEST_ENTRIES); | ||
| 106 | |||
| 107 | if (total != atomic_read(&ht->nelems) || total != TEST_ENTRIES) | ||
| 108 | pr_warn("Test failed: Total count mismatch ^^^"); | ||
| 109 | } | ||
| 110 | |||
| 111 | static int __init test_rhashtable(struct rhashtable *ht) | ||
| 112 | { | ||
| 113 | struct bucket_table *tbl; | ||
| 114 | struct test_obj *obj; | ||
| 115 | struct rhash_head *pos, *next; | ||
| 116 | int err; | ||
| 117 | unsigned int i; | ||
| 118 | |||
| 119 | /* | ||
| 120 | * Insertion Test: | ||
| 121 | * Insert TEST_ENTRIES into table with all keys even numbers | ||
| 122 | */ | ||
| 123 | pr_info(" Adding %d keys\n", TEST_ENTRIES); | ||
| 124 | for (i = 0; i < TEST_ENTRIES; i++) { | ||
| 125 | struct test_obj *obj; | ||
| 126 | |||
| 127 | obj = kzalloc(sizeof(*obj), GFP_KERNEL); | ||
| 128 | if (!obj) { | ||
| 129 | err = -ENOMEM; | ||
| 130 | goto error; | ||
| 131 | } | ||
| 132 | |||
| 133 | obj->ptr = TEST_PTR; | ||
| 134 | obj->value = i * 2; | ||
| 135 | |||
| 136 | rhashtable_insert(ht, &obj->node); | ||
| 137 | } | ||
| 138 | |||
| 139 | rcu_read_lock(); | ||
| 140 | test_bucket_stats(ht, true); | ||
| 141 | test_rht_lookup(ht); | ||
| 142 | rcu_read_unlock(); | ||
| 143 | |||
| 144 | for (i = 0; i < TEST_NEXPANDS; i++) { | ||
| 145 | pr_info(" Table expansion iteration %u...\n", i); | ||
| 146 | mutex_lock(&ht->mutex); | ||
| 147 | rhashtable_expand(ht); | ||
| 148 | mutex_unlock(&ht->mutex); | ||
| 149 | |||
| 150 | rcu_read_lock(); | ||
| 151 | pr_info(" Verifying lookups...\n"); | ||
| 152 | test_rht_lookup(ht); | ||
| 153 | rcu_read_unlock(); | ||
| 154 | } | ||
| 155 | |||
| 156 | for (i = 0; i < TEST_NEXPANDS; i++) { | ||
| 157 | pr_info(" Table shrinkage iteration %u...\n", i); | ||
| 158 | mutex_lock(&ht->mutex); | ||
| 159 | rhashtable_shrink(ht); | ||
| 160 | mutex_unlock(&ht->mutex); | ||
| 161 | |||
| 162 | rcu_read_lock(); | ||
| 163 | pr_info(" Verifying lookups...\n"); | ||
| 164 | test_rht_lookup(ht); | ||
| 165 | rcu_read_unlock(); | ||
| 166 | } | ||
| 167 | |||
| 168 | rcu_read_lock(); | ||
| 169 | test_bucket_stats(ht, true); | ||
| 170 | rcu_read_unlock(); | ||
| 171 | |||
| 172 | pr_info(" Deleting %d keys\n", TEST_ENTRIES); | ||
| 173 | for (i = 0; i < TEST_ENTRIES; i++) { | ||
| 174 | u32 key = i * 2; | ||
| 175 | |||
| 176 | obj = rhashtable_lookup(ht, &key); | ||
| 177 | BUG_ON(!obj); | ||
| 178 | |||
| 179 | rhashtable_remove(ht, &obj->node); | ||
| 180 | kfree(obj); | ||
| 181 | } | ||
| 182 | |||
| 183 | return 0; | ||
| 184 | |||
| 185 | error: | ||
| 186 | tbl = rht_dereference_rcu(ht->tbl, ht); | ||
| 187 | for (i = 0; i < tbl->size; i++) | ||
| 188 | rht_for_each_entry_safe(obj, pos, next, tbl, i, node) | ||
| 189 | kfree(obj); | ||
| 190 | |||
| 191 | return err; | ||
| 192 | } | ||
| 193 | |||
| 194 | static int __init test_rht_init(void) | ||
| 195 | { | ||
| 196 | struct rhashtable ht; | ||
| 197 | struct rhashtable_params params = { | ||
| 198 | .nelem_hint = TEST_HT_SIZE, | ||
| 199 | .head_offset = offsetof(struct test_obj, node), | ||
| 200 | .key_offset = offsetof(struct test_obj, value), | ||
| 201 | .key_len = sizeof(int), | ||
| 202 | .hashfn = jhash, | ||
| 203 | .nulls_base = (3U << RHT_BASE_SHIFT), | ||
| 204 | .grow_decision = rht_grow_above_75, | ||
| 205 | .shrink_decision = rht_shrink_below_30, | ||
| 206 | }; | ||
| 207 | int err; | ||
| 208 | |||
| 209 | pr_info("Running resizable hashtable tests...\n"); | ||
| 210 | |||
| 211 | err = rhashtable_init(&ht, ¶ms); | ||
| 212 | if (err < 0) { | ||
| 213 | pr_warn("Test failed: Unable to initialize hashtable: %d\n", | ||
| 214 | err); | ||
| 215 | return err; | ||
| 216 | } | ||
| 217 | |||
| 218 | err = test_rhashtable(&ht); | ||
| 219 | |||
| 220 | rhashtable_destroy(&ht); | ||
| 221 | |||
| 222 | return err; | ||
| 223 | } | ||
| 224 | |||
| 225 | module_init(test_rht_init); | ||
| 226 | |||
| 227 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/lib/vsprintf.c b/lib/vsprintf.c index ec337f64f52d..b235c96167d3 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c | |||
| @@ -114,8 +114,9 @@ int skip_atoi(const char **s) | |||
| 114 | { | 114 | { |
| 115 | int i = 0; | 115 | int i = 0; |
| 116 | 116 | ||
| 117 | while (isdigit(**s)) | 117 | do { |
| 118 | i = i*10 + *((*s)++) - '0'; | 118 | i = i*10 + *((*s)++) - '0'; |
| 119 | } while (isdigit(**s)); | ||
| 119 | 120 | ||
| 120 | return i; | 121 | return i; |
| 121 | } | 122 | } |
| @@ -793,6 +794,87 @@ char *hex_string(char *buf, char *end, u8 *addr, struct printf_spec spec, | |||
| 793 | } | 794 | } |
| 794 | 795 | ||
| 795 | static noinline_for_stack | 796 | static noinline_for_stack |
| 797 | char *bitmap_string(char *buf, char *end, unsigned long *bitmap, | ||
| 798 | struct printf_spec spec, const char *fmt) | ||
| 799 | { | ||
| 800 | const int CHUNKSZ = 32; | ||
| 801 | int nr_bits = max_t(int, spec.field_width, 0); | ||
| 802 | int i, chunksz; | ||
| 803 | bool first = true; | ||
| 804 | |||
| 805 | /* reused to print numbers */ | ||
| 806 | spec = (struct printf_spec){ .flags = SMALL | ZEROPAD, .base = 16 }; | ||
| 807 | |||
| 808 | chunksz = nr_bits & (CHUNKSZ - 1); | ||
| 809 | if (chunksz == 0) | ||
| 810 | chunksz = CHUNKSZ; | ||
| 811 | |||
| 812 | i = ALIGN(nr_bits, CHUNKSZ) - CHUNKSZ; | ||
| 813 | for (; i >= 0; i -= CHUNKSZ) { | ||
| 814 | u32 chunkmask, val; | ||
| 815 | int word, bit; | ||
| 816 | |||
| 817 | chunkmask = ((1ULL << chunksz) - 1); | ||
| 818 | word = i / BITS_PER_LONG; | ||
| 819 | bit = i % BITS_PER_LONG; | ||
| 820 | val = (bitmap[word] >> bit) & chunkmask; | ||
| 821 | |||
| 822 | if (!first) { | ||
| 823 | if (buf < end) | ||
| 824 | *buf = ','; | ||
| 825 | buf++; | ||
| 826 | } | ||
| 827 | first = false; | ||
| 828 | |||
| 829 | spec.field_width = DIV_ROUND_UP(chunksz, 4); | ||
| 830 | buf = number(buf, end, val, spec); | ||
| 831 | |||
| 832 | chunksz = CHUNKSZ; | ||
| 833 | } | ||
| 834 | return buf; | ||
| 835 | } | ||
| 836 | |||
| 837 | static noinline_for_stack | ||
| 838 | char *bitmap_list_string(char *buf, char *end, unsigned long *bitmap, | ||
| 839 | struct printf_spec spec, const char *fmt) | ||
| 840 | { | ||
| 841 | int nr_bits = max_t(int, spec.field_width, 0); | ||
| 842 | /* current bit is 'cur', most recently seen range is [rbot, rtop] */ | ||
| 843 | int cur, rbot, rtop; | ||
| 844 | bool first = true; | ||
| 845 | |||
| 846 | /* reused to print numbers */ | ||
| 847 | spec = (struct printf_spec){ .base = 10 }; | ||
| 848 | |||
| 849 | rbot = cur = find_first_bit(bitmap, nr_bits); | ||
| 850 | while (cur < nr_bits) { | ||
| 851 | rtop = cur; | ||
| 852 | cur = find_next_bit(bitmap, nr_bits, cur + 1); | ||
| 853 | if (cur < nr_bits && cur <= rtop + 1) | ||
| 854 | continue; | ||
| 855 | |||
| 856 | if (!first) { | ||
| 857 | if (buf < end) | ||
| 858 | *buf = ','; | ||
| 859 | buf++; | ||
| 860 | } | ||
| 861 | first = false; | ||
| 862 | |||
| 863 | buf = number(buf, end, rbot, spec); | ||
| 864 | if (rbot < rtop) { | ||
| 865 | if (buf < end) | ||
| 866 | *buf = '-'; | ||
| 867 | buf++; | ||
| 868 | |||
| 869 | buf = number(buf, end, rtop, spec); | ||
| 870 | } | ||
| 871 | |||
| 872 | rbot = cur; | ||
| 873 | } | ||
| 874 | return buf; | ||
| 875 | } | ||
| 876 | |||
| 877 | static noinline_for_stack | ||
| 796 | char *mac_address_string(char *buf, char *end, u8 *addr, | 878 | char *mac_address_string(char *buf, char *end, u8 *addr, |
| 797 | struct printf_spec spec, const char *fmt) | 879 | struct printf_spec spec, const char *fmt) |
| 798 | { | 880 | { |
| @@ -1257,6 +1339,10 @@ int kptr_restrict __read_mostly; | |||
| 1257 | * - 'B' For backtraced symbolic direct pointers with offset | 1339 | * - 'B' For backtraced symbolic direct pointers with offset |
| 1258 | * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref] | 1340 | * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref] |
| 1259 | * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201] | 1341 | * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201] |
| 1342 | * - 'b[l]' For a bitmap, the number of bits is determined by the field | ||
| 1343 | * width which must be explicitly specified either as part of the | ||
| 1344 | * format string '%32b[l]' or through '%*b[l]', [l] selects | ||
| 1345 | * range-list format instead of hex format | ||
| 1260 | * - 'M' For a 6-byte MAC address, it prints the address in the | 1346 | * - 'M' For a 6-byte MAC address, it prints the address in the |
| 1261 | * usual colon-separated hex notation | 1347 | * usual colon-separated hex notation |
| 1262 | * - 'm' For a 6-byte MAC address, it prints the hex address without colons | 1348 | * - 'm' For a 6-byte MAC address, it prints the hex address without colons |
| @@ -1353,6 +1439,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, | |||
| 1353 | return resource_string(buf, end, ptr, spec, fmt); | 1439 | return resource_string(buf, end, ptr, spec, fmt); |
| 1354 | case 'h': | 1440 | case 'h': |
| 1355 | return hex_string(buf, end, ptr, spec, fmt); | 1441 | return hex_string(buf, end, ptr, spec, fmt); |
| 1442 | case 'b': | ||
| 1443 | switch (fmt[1]) { | ||
| 1444 | case 'l': | ||
| 1445 | return bitmap_list_string(buf, end, ptr, spec, fmt); | ||
| 1446 | default: | ||
| 1447 | return bitmap_string(buf, end, ptr, spec, fmt); | ||
| 1448 | } | ||
| 1356 | case 'M': /* Colon separated: 00:01:02:03:04:05 */ | 1449 | case 'M': /* Colon separated: 00:01:02:03:04:05 */ |
| 1357 | case 'm': /* Contiguous: 000102030405 */ | 1450 | case 'm': /* Contiguous: 000102030405 */ |
| 1358 | /* [mM]F (FDDI) */ | 1451 | /* [mM]F (FDDI) */ |
| @@ -1604,8 +1697,7 @@ qualifier: | |||
| 1604 | 1697 | ||
| 1605 | case 'p': | 1698 | case 'p': |
| 1606 | spec->type = FORMAT_TYPE_PTR; | 1699 | spec->type = FORMAT_TYPE_PTR; |
| 1607 | return fmt - start; | 1700 | return ++fmt - start; |
| 1608 | /* skip alnum */ | ||
| 1609 | 1701 | ||
| 1610 | case '%': | 1702 | case '%': |
| 1611 | spec->type = FORMAT_TYPE_PERCENT_CHAR; | 1703 | spec->type = FORMAT_TYPE_PERCENT_CHAR; |
| @@ -1689,6 +1781,8 @@ qualifier: | |||
| 1689 | * %pB output the name of a backtrace symbol with its offset | 1781 | * %pB output the name of a backtrace symbol with its offset |
| 1690 | * %pR output the address range in a struct resource with decoded flags | 1782 | * %pR output the address range in a struct resource with decoded flags |
| 1691 | * %pr output the address range in a struct resource with raw flags | 1783 | * %pr output the address range in a struct resource with raw flags |
| 1784 | * %pb output the bitmap with field width as the number of bits | ||
| 1785 | * %pbl output the bitmap as range list with field width as the number of bits | ||
| 1692 | * %pM output a 6-byte MAC address with colons | 1786 | * %pM output a 6-byte MAC address with colons |
| 1693 | * %pMR output a 6-byte MAC address with colons in reversed order | 1787 | * %pMR output a 6-byte MAC address with colons in reversed order |
| 1694 | * %pMF output a 6-byte MAC address with dashes | 1788 | * %pMF output a 6-byte MAC address with dashes |
| @@ -1728,7 +1822,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) | |||
| 1728 | 1822 | ||
| 1729 | /* Reject out-of-range values early. Large positive sizes are | 1823 | /* Reject out-of-range values early. Large positive sizes are |
| 1730 | used for unknown buffer sizes. */ | 1824 | used for unknown buffer sizes. */ |
| 1731 | if (WARN_ON_ONCE((int) size < 0)) | 1825 | if (WARN_ON_ONCE(size > INT_MAX)) |
| 1732 | return 0; | 1826 | return 0; |
| 1733 | 1827 | ||
| 1734 | str = buf; | 1828 | str = buf; |
| @@ -1794,7 +1888,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) | |||
| 1794 | break; | 1888 | break; |
| 1795 | 1889 | ||
| 1796 | case FORMAT_TYPE_PTR: | 1890 | case FORMAT_TYPE_PTR: |
| 1797 | str = pointer(fmt+1, str, end, va_arg(args, void *), | 1891 | str = pointer(fmt, str, end, va_arg(args, void *), |
| 1798 | spec); | 1892 | spec); |
| 1799 | while (isalnum(*fmt)) | 1893 | while (isalnum(*fmt)) |
| 1800 | fmt++; | 1894 | fmt++; |
| @@ -2232,7 +2326,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf) | |||
| 2232 | } | 2326 | } |
| 2233 | 2327 | ||
| 2234 | case FORMAT_TYPE_PTR: | 2328 | case FORMAT_TYPE_PTR: |
| 2235 | str = pointer(fmt+1, str, end, get_arg(void *), spec); | 2329 | str = pointer(fmt, str, end, get_arg(void *), spec); |
| 2236 | while (isalnum(*fmt)) | 2330 | while (isalnum(*fmt)) |
| 2237 | fmt++; | 2331 | fmt++; |
| 2238 | break; | 2332 | break; |
