aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2017-05-02 05:02:41 -0400
committerJiri Kosina <jkosina@suse.cz>2017-05-02 05:02:41 -0400
commit4d6ca227c768b50b05cf183974b40abe444e9d0c (patch)
treebf953d8e895281053548b9967a2c4b58d641df00 /lib
parent800f3eef8ebc1264e9c135bfa892c8ae41fa4792 (diff)
parentaf22a610bc38508d5ea760507d31be6b6983dfa8 (diff)
Merge branch 'for-4.12/asus' into for-linus
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig22
-rw-r--r--lib/Kconfig.debug57
-rw-r--r--lib/Makefile21
-rw-r--r--lib/atomic64_test.c10
-rw-r--r--lib/bug.c1
-rw-r--r--lib/crc32.c824
-rw-r--r--lib/crc32test.c856
-rw-r--r--lib/debugobjects.c1
-rw-r--r--lib/decompress_unlz4.c13
-rw-r--r--lib/digsig.c2
-rw-r--r--lib/dma-debug.c7
-rw-r--r--lib/dma-noop.c4
-rw-r--r--lib/dma-virt.c72
-rw-r--r--lib/dump_stack.c1
-rw-r--r--lib/find_bit.c4
-rw-r--r--lib/fonts/Kconfig16
-rw-r--r--lib/glob.c164
-rw-r--r--lib/globtest.c167
-rw-r--r--lib/idr.c1242
-rw-r--r--lib/ioremap.c39
-rw-r--r--lib/is_single_threaded.c5
-rw-r--r--lib/list_debug.c45
-rw-r--r--lib/lz4/Makefile2
-rw-r--r--lib/lz4/lz4_compress.c1141
-rw-r--r--lib/lz4/lz4_decompress.c665
-rw-r--r--lib/lz4/lz4defs.h338
-rw-r--r--lib/lz4/lz4hc_compress.c846
-rw-r--r--lib/nmi_backtrace.c3
-rw-r--r--lib/parman.c376
-rw-r--r--lib/percpu_counter.c5
-rw-r--r--lib/percpu_ida.c3
-rw-r--r--lib/plist.c1
-rw-r--r--lib/prime_numbers.c315
-rw-r--r--lib/radix-tree.c762
-rw-r--r--lib/rbtree.c4
-rw-r--r--lib/refcount.c267
-rw-r--r--lib/rhashtable.c274
-rw-r--r--lib/sbitmap.c1
-rw-r--r--lib/scatterlist.c6
-rw-r--r--lib/show_mem.c4
-rw-r--r--lib/siphash.c551
-rw-r--r--lib/smp_processor_id.c2
-rw-r--r--lib/sort.c41
-rw-r--r--lib/syscall.c1
-rw-r--r--lib/test_firmware.c92
-rw-r--r--lib/test_kasan.c34
-rw-r--r--lib/test_parman.c395
-rw-r--r--lib/test_siphash.c223
-rw-r--r--lib/test_sort.c44
-rw-r--r--lib/test_user_copy.c118
-rw-r--r--lib/vsprintf.c8
51 files changed, 6680 insertions, 3415 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 260a80e313b9..0c8b78a9ae2e 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -103,8 +103,7 @@ config CRC32
103 functions require M here. 103 functions require M here.
104 104
105config CRC32_SELFTEST 105config CRC32_SELFTEST
106 bool "CRC32 perform self test on init" 106 tristate "CRC32 perform self test on init"
107 default n
108 depends on CRC32 107 depends on CRC32
109 help 108 help
110 This option enables the CRC32 library functions to perform a 109 This option enables the CRC32 library functions to perform a
@@ -395,6 +394,16 @@ config HAS_DMA
395 depends on !NO_DMA 394 depends on !NO_DMA
396 default y 395 default y
397 396
397config DMA_NOOP_OPS
398 bool
399 depends on HAS_DMA && (!64BIT || ARCH_DMA_ADDR_T_64BIT)
400 default n
401
402config DMA_VIRT_OPS
403 bool
404 depends on HAS_DMA && (!64BIT || ARCH_DMA_ADDR_T_64BIT)
405 default n
406
398config CHECK_SIGNATURE 407config CHECK_SIGNATURE
399 bool 408 bool
400 409
@@ -432,8 +441,7 @@ config GLOB
432 depends on this. 441 depends on this.
433 442
434config GLOB_SELFTEST 443config GLOB_SELFTEST
435 bool "glob self-test on init" 444 tristate "glob self-test on init"
436 default n
437 depends on GLOB 445 depends on GLOB
438 help 446 help
439 This option enables a simple self-test of the glob_match 447 This option enables a simple self-test of the glob_match
@@ -550,4 +558,10 @@ config STACKDEPOT
550config SBITMAP 558config SBITMAP
551 bool 559 bool
552 560
561config PARMAN
562 tristate "parman" if COMPILE_TEST
563
564config PRIME_NUMBERS
565 tristate
566
553endmenu 567endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index acedbe626d47..97d62c2da6c2 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -416,6 +416,16 @@ config MAGIC_SYSRQ_DEFAULT_ENABLE
416 This may be set to 1 or 0 to enable or disable them all, or 416 This may be set to 1 or 0 to enable or disable them all, or
417 to a bitmask as described in Documentation/sysrq.txt. 417 to a bitmask as described in Documentation/sysrq.txt.
418 418
419config MAGIC_SYSRQ_SERIAL
420 bool "Enable magic SysRq key over serial"
421 depends on MAGIC_SYSRQ
422 default y
423 help
424 Many embedded boards have a disconnected TTL level serial which can
425 generate some garbage that can lead to spurious false sysrq detects.
426 This option allows you to decide whether you want to enable the
427 magic SysRq key.
428
419config DEBUG_KERNEL 429config DEBUG_KERNEL
420 bool "Kernel debugging" 430 bool "Kernel debugging"
421 help 431 help
@@ -622,9 +632,12 @@ config DEBUG_VM_PGFLAGS
622 632
623 If unsure, say N. 633 If unsure, say N.
624 634
635config ARCH_HAS_DEBUG_VIRTUAL
636 bool
637
625config DEBUG_VIRTUAL 638config DEBUG_VIRTUAL
626 bool "Debug VM translations" 639 bool "Debug VM translations"
627 depends on DEBUG_KERNEL && X86 640 depends on DEBUG_KERNEL && ARCH_HAS_DEBUG_VIRTUAL
628 help 641 help
629 Enable some costly sanity checks in virtual to page code. This can 642 Enable some costly sanity checks in virtual to page code. This can
630 catch mistakes with virt_to_page() and friends. 643 catch mistakes with virt_to_page() and friends.
@@ -716,19 +729,6 @@ source "lib/Kconfig.kmemcheck"
716 729
717source "lib/Kconfig.kasan" 730source "lib/Kconfig.kasan"
718 731
719config DEBUG_REFCOUNT
720 bool "Verbose refcount checks"
721 help
722 Say Y here if you want reference counters (refcount_t and kref) to
723 generate WARNs on dubious usage. Without this refcount_t will still
724 be a saturating counter and avoid Use-After-Free by turning it into
725 a resource leak Denial-Of-Service.
726
727 Use of this option will increase kernel text size but will alert the
728 admin of potential abuse.
729
730 If in doubt, say "N".
731
732endmenu # "Memory Debugging" 732endmenu # "Memory Debugging"
733 733
734config ARCH_HAS_KCOV 734config ARCH_HAS_KCOV
@@ -1726,6 +1726,14 @@ config TEST_LIST_SORT
1726 1726
1727 If unsure, say N. 1727 If unsure, say N.
1728 1728
1729config TEST_SORT
1730 bool "Array-based sort test"
1731 depends on DEBUG_KERNEL
1732 help
1733 This option enables the self-test function of 'sort()' at boot.
1734
1735 If unsure, say N.
1736
1729config KPROBES_SANITY_TEST 1737config KPROBES_SANITY_TEST
1730 bool "Kprobes sanity tests" 1738 bool "Kprobes sanity tests"
1731 depends on DEBUG_KERNEL 1739 depends on DEBUG_KERNEL
@@ -1777,9 +1785,10 @@ config PERCPU_TEST
1777 If unsure, say N. 1785 If unsure, say N.
1778 1786
1779config ATOMIC64_SELFTEST 1787config ATOMIC64_SELFTEST
1780 bool "Perform an atomic64_t self-test at boot" 1788 tristate "Perform an atomic64_t self-test"
1781 help 1789 help
1782 Enable this option to test the atomic64_t functions at boot. 1790 Enable this option to test the atomic64_t functions at boot or
1791 at module load time.
1783 1792
1784 If unsure, say N. 1793 If unsure, say N.
1785 1794
@@ -1831,13 +1840,23 @@ config TEST_HASH
1831 tristate "Perform selftest on hash functions" 1840 tristate "Perform selftest on hash functions"
1832 default n 1841 default n
1833 help 1842 help
1834 Enable this option to test the kernel's integer (<linux/hash,h>) 1843 Enable this option to test the kernel's integer (<linux/hash.h>),
1835 and string (<linux/stringhash.h>) hash functions on boot 1844 string (<linux/stringhash.h>), and siphash (<linux/siphash.h>)
1836 (or module load). 1845 hash functions on boot (or module load).
1837 1846
1838 This is intended to help people writing architecture-specific 1847 This is intended to help people writing architecture-specific
1839 optimized versions. If unsure, say N. 1848 optimized versions. If unsure, say N.
1840 1849
1850config TEST_PARMAN
1851 tristate "Perform selftest on priority array manager"
1852 default n
1853 depends on PARMAN
1854 help
1855 Enable this option to test priority array manager on boot
1856 (or module load).
1857
1858 If unsure, say N.
1859
1841endmenu # runtime tests 1860endmenu # runtime tests
1842 1861
1843config PROVIDE_OHCI1394_DMA_INIT 1862config PROVIDE_OHCI1394_DMA_INIT
diff --git a/lib/Makefile b/lib/Makefile
index 19ea76149a37..320ac46a8725 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -22,11 +22,16 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
22 sha1.o chacha20.o md5.o irq_regs.o argv_split.o \ 22 sha1.o chacha20.o md5.o irq_regs.o argv_split.o \
23 flex_proportions.o ratelimit.o show_mem.o \ 23 flex_proportions.o ratelimit.o show_mem.o \
24 is_single_threaded.o plist.o decompress.o kobject_uevent.o \ 24 is_single_threaded.o plist.o decompress.o kobject_uevent.o \
25 earlycpio.o seq_buf.o nmi_backtrace.o nodemask.o win_minmax.o 25 earlycpio.o seq_buf.o siphash.o \
26 nmi_backtrace.o nodemask.o win_minmax.o
27
28CFLAGS_radix-tree.o += -DCONFIG_SPARSE_RCU_POINTER
29CFLAGS_idr.o += -DCONFIG_SPARSE_RCU_POINTER
26 30
27lib-$(CONFIG_MMU) += ioremap.o 31lib-$(CONFIG_MMU) += ioremap.o
28lib-$(CONFIG_SMP) += cpumask.o 32lib-$(CONFIG_SMP) += cpumask.o
29lib-$(CONFIG_HAS_DMA) += dma-noop.o 33lib-$(CONFIG_DMA_NOOP_OPS) += dma-noop.o
34lib-$(CONFIG_DMA_VIRT_OPS) += dma-virt.o
30 35
31lib-y += kobject.o klist.o 36lib-y += kobject.o klist.o
32obj-y += lockref.o 37obj-y += lockref.o
@@ -36,7 +41,7 @@ obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \
36 gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \ 41 gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
37 bsearch.o find_bit.o llist.o memweight.o kfifo.o \ 42 bsearch.o find_bit.o llist.o memweight.o kfifo.o \
38 percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \ 43 percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \
39 once.o 44 once.o refcount.o
40obj-y += string_helpers.o 45obj-y += string_helpers.o
41obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o 46obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
42obj-y += hexdump.o 47obj-y += hexdump.o
@@ -44,17 +49,19 @@ obj-$(CONFIG_TEST_HEXDUMP) += test_hexdump.o
44obj-y += kstrtox.o 49obj-y += kstrtox.o
45obj-$(CONFIG_TEST_BPF) += test_bpf.o 50obj-$(CONFIG_TEST_BPF) += test_bpf.o
46obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o 51obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
47obj-$(CONFIG_TEST_HASH) += test_hash.o 52obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o
48obj-$(CONFIG_TEST_KASAN) += test_kasan.o 53obj-$(CONFIG_TEST_KASAN) += test_kasan.o
49obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o 54obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
50obj-$(CONFIG_TEST_LKM) += test_module.o 55obj-$(CONFIG_TEST_LKM) += test_module.o
51obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o 56obj-$(CONFIG_TEST_RHASHTABLE) += test_rhashtable.o
57obj-$(CONFIG_TEST_SORT) += test_sort.o
52obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o 58obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o
53obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o 59obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o
54obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o 60obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o
55obj-$(CONFIG_TEST_PRINTF) += test_printf.o 61obj-$(CONFIG_TEST_PRINTF) += test_printf.o
56obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o 62obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
57obj-$(CONFIG_TEST_UUID) += test_uuid.o 63obj-$(CONFIG_TEST_UUID) += test_uuid.o
64obj-$(CONFIG_TEST_PARMAN) += test_parman.o
58 65
59ifeq ($(CONFIG_DEBUG_KOBJECT),y) 66ifeq ($(CONFIG_DEBUG_KOBJECT),y)
60CFLAGS_kobject.o += -DDEBUG 67CFLAGS_kobject.o += -DDEBUG
@@ -90,6 +97,7 @@ obj-$(CONFIG_CRC16) += crc16.o
90obj-$(CONFIG_CRC_T10DIF)+= crc-t10dif.o 97obj-$(CONFIG_CRC_T10DIF)+= crc-t10dif.o
91obj-$(CONFIG_CRC_ITU_T) += crc-itu-t.o 98obj-$(CONFIG_CRC_ITU_T) += crc-itu-t.o
92obj-$(CONFIG_CRC32) += crc32.o 99obj-$(CONFIG_CRC32) += crc32.o
100obj-$(CONFIG_CRC32_SELFTEST) += crc32test.o
93obj-$(CONFIG_CRC7) += crc7.o 101obj-$(CONFIG_CRC7) += crc7.o
94obj-$(CONFIG_LIBCRC32C) += libcrc32c.o 102obj-$(CONFIG_LIBCRC32C) += libcrc32c.o
95obj-$(CONFIG_CRC8) += crc8.o 103obj-$(CONFIG_CRC8) += crc8.o
@@ -159,6 +167,7 @@ obj-$(CONFIG_CORDIC) += cordic.o
159obj-$(CONFIG_DQL) += dynamic_queue_limits.o 167obj-$(CONFIG_DQL) += dynamic_queue_limits.o
160 168
161obj-$(CONFIG_GLOB) += glob.o 169obj-$(CONFIG_GLOB) += glob.o
170obj-$(CONFIG_GLOB_SELFTEST) += globtest.o
162 171
163obj-$(CONFIG_MPILIB) += mpi/ 172obj-$(CONFIG_MPILIB) += mpi/
164obj-$(CONFIG_SIGNATURE) += digsig.o 173obj-$(CONFIG_SIGNATURE) += digsig.o
@@ -196,6 +205,8 @@ obj-$(CONFIG_ASN1) += asn1_decoder.o
196 205
197obj-$(CONFIG_FONT_SUPPORT) += fonts/ 206obj-$(CONFIG_FONT_SUPPORT) += fonts/
198 207
208obj-$(CONFIG_PRIME_NUMBERS) += prime_numbers.o
209
199hostprogs-y := gen_crc32table 210hostprogs-y := gen_crc32table
200clean-files := crc32table.h 211clean-files := crc32table.h
201 212
@@ -229,3 +240,5 @@ obj-$(CONFIG_UBSAN) += ubsan.o
229UBSAN_SANITIZE_ubsan.o := n 240UBSAN_SANITIZE_ubsan.o := n
230 241
231obj-$(CONFIG_SBITMAP) += sbitmap.o 242obj-$(CONFIG_SBITMAP) += sbitmap.o
243
244obj-$(CONFIG_PARMAN) += parman.o
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
index 46042901130f..fd70c0e0e673 100644
--- a/lib/atomic64_test.c
+++ b/lib/atomic64_test.c
@@ -15,6 +15,7 @@
15#include <linux/bug.h> 15#include <linux/bug.h>
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/atomic.h> 17#include <linux/atomic.h>
18#include <linux/module.h>
18 19
19#ifdef CONFIG_X86 20#ifdef CONFIG_X86
20#include <asm/cpufeature.h> /* for boot_cpu_has below */ 21#include <asm/cpufeature.h> /* for boot_cpu_has below */
@@ -241,7 +242,7 @@ static __init void test_atomic64(void)
241 BUG_ON(v.counter != r); 242 BUG_ON(v.counter != r);
242} 243}
243 244
244static __init int test_atomics(void) 245static __init int test_atomics_init(void)
245{ 246{
246 test_atomic(); 247 test_atomic();
247 test_atomic64(); 248 test_atomic64();
@@ -264,4 +265,9 @@ static __init int test_atomics(void)
264 return 0; 265 return 0;
265} 266}
266 267
267core_initcall(test_atomics); 268static __exit void test_atomics_exit(void) {}
269
270module_init(test_atomics_init);
271module_exit(test_atomics_exit);
272
273MODULE_LICENSE("GPL");
diff --git a/lib/bug.c b/lib/bug.c
index bc3656e944d2..06edbbef0623 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -45,6 +45,7 @@
45#include <linux/kernel.h> 45#include <linux/kernel.h>
46#include <linux/bug.h> 46#include <linux/bug.h>
47#include <linux/sched.h> 47#include <linux/sched.h>
48#include <linux/rculist.h>
48 49
49extern const struct bug_entry __start___bug_table[], __stop___bug_table[]; 50extern const struct bug_entry __start___bug_table[], __stop___bug_table[];
50 51
diff --git a/lib/crc32.c b/lib/crc32.c
index 7fbd1a112b9d..6ddc92bc1460 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -340,827 +340,3 @@ u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
340} 340}
341#endif 341#endif
342EXPORT_SYMBOL(crc32_be); 342EXPORT_SYMBOL(crc32_be);
343
344#ifdef CONFIG_CRC32_SELFTEST
345
346/* 4096 random bytes */
347static u8 const __aligned(8) test_buf[] __initconst =
348{
349 0x5b, 0x85, 0x21, 0xcb, 0x09, 0x68, 0x7d, 0x30,
350 0xc7, 0x69, 0xd7, 0x30, 0x92, 0xde, 0x59, 0xe4,
351 0xc9, 0x6e, 0x8b, 0xdb, 0x98, 0x6b, 0xaa, 0x60,
352 0xa8, 0xb5, 0xbc, 0x6c, 0xa9, 0xb1, 0x5b, 0x2c,
353 0xea, 0xb4, 0x92, 0x6a, 0x3f, 0x79, 0x91, 0xe4,
354 0xe9, 0x70, 0x51, 0x8c, 0x7f, 0x95, 0x6f, 0x1a,
355 0x56, 0xa1, 0x5c, 0x27, 0x03, 0x67, 0x9f, 0x3a,
356 0xe2, 0x31, 0x11, 0x29, 0x6b, 0x98, 0xfc, 0xc4,
357 0x53, 0x24, 0xc5, 0x8b, 0xce, 0x47, 0xb2, 0xb9,
358 0x32, 0xcb, 0xc1, 0xd0, 0x03, 0x57, 0x4e, 0xd4,
359 0xe9, 0x3c, 0xa1, 0x63, 0xcf, 0x12, 0x0e, 0xca,
360 0xe1, 0x13, 0xd1, 0x93, 0xa6, 0x88, 0x5c, 0x61,
361 0x5b, 0xbb, 0xf0, 0x19, 0x46, 0xb4, 0xcf, 0x9e,
362 0xb6, 0x6b, 0x4c, 0x3a, 0xcf, 0x60, 0xf9, 0x7a,
363 0x8d, 0x07, 0x63, 0xdb, 0x40, 0xe9, 0x0b, 0x6f,
364 0xad, 0x97, 0xf1, 0xed, 0xd0, 0x1e, 0x26, 0xfd,
365 0xbf, 0xb7, 0xc8, 0x04, 0x94, 0xf8, 0x8b, 0x8c,
366 0xf1, 0xab, 0x7a, 0xd4, 0xdd, 0xf3, 0xe8, 0x88,
367 0xc3, 0xed, 0x17, 0x8a, 0x9b, 0x40, 0x0d, 0x53,
368 0x62, 0x12, 0x03, 0x5f, 0x1b, 0x35, 0x32, 0x1f,
369 0xb4, 0x7b, 0x93, 0x78, 0x0d, 0xdb, 0xce, 0xa4,
370 0xc0, 0x47, 0xd5, 0xbf, 0x68, 0xe8, 0x5d, 0x74,
371 0x8f, 0x8e, 0x75, 0x1c, 0xb2, 0x4f, 0x9a, 0x60,
372 0xd1, 0xbe, 0x10, 0xf4, 0x5c, 0xa1, 0x53, 0x09,
373 0xa5, 0xe0, 0x09, 0x54, 0x85, 0x5c, 0xdc, 0x07,
374 0xe7, 0x21, 0x69, 0x7b, 0x8a, 0xfd, 0x90, 0xf1,
375 0x22, 0xd0, 0xb4, 0x36, 0x28, 0xe6, 0xb8, 0x0f,
376 0x39, 0xde, 0xc8, 0xf3, 0x86, 0x60, 0x34, 0xd2,
377 0x5e, 0xdf, 0xfd, 0xcf, 0x0f, 0xa9, 0x65, 0xf0,
378 0xd5, 0x4d, 0x96, 0x40, 0xe3, 0xdf, 0x3f, 0x95,
379 0x5a, 0x39, 0x19, 0x93, 0xf4, 0x75, 0xce, 0x22,
380 0x00, 0x1c, 0x93, 0xe2, 0x03, 0x66, 0xf4, 0x93,
381 0x73, 0x86, 0x81, 0x8e, 0x29, 0x44, 0x48, 0x86,
382 0x61, 0x7c, 0x48, 0xa3, 0x43, 0xd2, 0x9c, 0x8d,
383 0xd4, 0x95, 0xdd, 0xe1, 0x22, 0x89, 0x3a, 0x40,
384 0x4c, 0x1b, 0x8a, 0x04, 0xa8, 0x09, 0x69, 0x8b,
385 0xea, 0xc6, 0x55, 0x8e, 0x57, 0xe6, 0x64, 0x35,
386 0xf0, 0xc7, 0x16, 0x9f, 0x5d, 0x5e, 0x86, 0x40,
387 0x46, 0xbb, 0xe5, 0x45, 0x88, 0xfe, 0xc9, 0x63,
388 0x15, 0xfb, 0xf5, 0xbd, 0x71, 0x61, 0xeb, 0x7b,
389 0x78, 0x70, 0x07, 0x31, 0x03, 0x9f, 0xb2, 0xc8,
390 0xa7, 0xab, 0x47, 0xfd, 0xdf, 0xa0, 0x78, 0x72,
391 0xa4, 0x2a, 0xe4, 0xb6, 0xba, 0xc0, 0x1e, 0x86,
392 0x71, 0xe6, 0x3d, 0x18, 0x37, 0x70, 0xe6, 0xff,
393 0xe0, 0xbc, 0x0b, 0x22, 0xa0, 0x1f, 0xd3, 0xed,
394 0xa2, 0x55, 0x39, 0xab, 0xa8, 0x13, 0x73, 0x7c,
395 0x3f, 0xb2, 0xd6, 0x19, 0xac, 0xff, 0x99, 0xed,
396 0xe8, 0xe6, 0xa6, 0x22, 0xe3, 0x9c, 0xf1, 0x30,
397 0xdc, 0x01, 0x0a, 0x56, 0xfa, 0xe4, 0xc9, 0x99,
398 0xdd, 0xa8, 0xd8, 0xda, 0x35, 0x51, 0x73, 0xb4,
399 0x40, 0x86, 0x85, 0xdb, 0x5c, 0xd5, 0x85, 0x80,
400 0x14, 0x9c, 0xfd, 0x98, 0xa9, 0x82, 0xc5, 0x37,
401 0xff, 0x32, 0x5d, 0xd0, 0x0b, 0xfa, 0xdc, 0x04,
402 0x5e, 0x09, 0xd2, 0xca, 0x17, 0x4b, 0x1a, 0x8e,
403 0x15, 0xe1, 0xcc, 0x4e, 0x52, 0x88, 0x35, 0xbd,
404 0x48, 0xfe, 0x15, 0xa0, 0x91, 0xfd, 0x7e, 0x6c,
405 0x0e, 0x5d, 0x79, 0x1b, 0x81, 0x79, 0xd2, 0x09,
406 0x34, 0x70, 0x3d, 0x81, 0xec, 0xf6, 0x24, 0xbb,
407 0xfb, 0xf1, 0x7b, 0xdf, 0x54, 0xea, 0x80, 0x9b,
408 0xc7, 0x99, 0x9e, 0xbd, 0x16, 0x78, 0x12, 0x53,
409 0x5e, 0x01, 0xa7, 0x4e, 0xbd, 0x67, 0xe1, 0x9b,
410 0x4c, 0x0e, 0x61, 0x45, 0x97, 0xd2, 0xf0, 0x0f,
411 0xfe, 0x15, 0x08, 0xb7, 0x11, 0x4c, 0xe7, 0xff,
412 0x81, 0x53, 0xff, 0x91, 0x25, 0x38, 0x7e, 0x40,
413 0x94, 0xe5, 0xe0, 0xad, 0xe6, 0xd9, 0x79, 0xb6,
414 0x92, 0xc9, 0xfc, 0xde, 0xc3, 0x1a, 0x23, 0xbb,
415 0xdd, 0xc8, 0x51, 0x0c, 0x3a, 0x72, 0xfa, 0x73,
416 0x6f, 0xb7, 0xee, 0x61, 0x39, 0x03, 0x01, 0x3f,
417 0x7f, 0x94, 0x2e, 0x2e, 0xba, 0x3a, 0xbb, 0xb4,
418 0xfa, 0x6a, 0x17, 0xfe, 0xea, 0xef, 0x5e, 0x66,
419 0x97, 0x3f, 0x32, 0x3d, 0xd7, 0x3e, 0xb1, 0xf1,
420 0x6c, 0x14, 0x4c, 0xfd, 0x37, 0xd3, 0x38, 0x80,
421 0xfb, 0xde, 0xa6, 0x24, 0x1e, 0xc8, 0xca, 0x7f,
422 0x3a, 0x93, 0xd8, 0x8b, 0x18, 0x13, 0xb2, 0xe5,
423 0xe4, 0x93, 0x05, 0x53, 0x4f, 0x84, 0x66, 0xa7,
424 0x58, 0x5c, 0x7b, 0x86, 0x52, 0x6d, 0x0d, 0xce,
425 0xa4, 0x30, 0x7d, 0xb6, 0x18, 0x9f, 0xeb, 0xff,
426 0x22, 0xbb, 0x72, 0x29, 0xb9, 0x44, 0x0b, 0x48,
427 0x1e, 0x84, 0x71, 0x81, 0xe3, 0x6d, 0x73, 0x26,
428 0x92, 0xb4, 0x4d, 0x2a, 0x29, 0xb8, 0x1f, 0x72,
429 0xed, 0xd0, 0xe1, 0x64, 0x77, 0xea, 0x8e, 0x88,
430 0x0f, 0xef, 0x3f, 0xb1, 0x3b, 0xad, 0xf9, 0xc9,
431 0x8b, 0xd0, 0xac, 0xc6, 0xcc, 0xa9, 0x40, 0xcc,
432 0x76, 0xf6, 0x3b, 0x53, 0xb5, 0x88, 0xcb, 0xc8,
433 0x37, 0xf1, 0xa2, 0xba, 0x23, 0x15, 0x99, 0x09,
434 0xcc, 0xe7, 0x7a, 0x3b, 0x37, 0xf7, 0x58, 0xc8,
435 0x46, 0x8c, 0x2b, 0x2f, 0x4e, 0x0e, 0xa6, 0x5c,
436 0xea, 0x85, 0x55, 0xba, 0x02, 0x0e, 0x0e, 0x48,
437 0xbc, 0xe1, 0xb1, 0x01, 0x35, 0x79, 0x13, 0x3d,
438 0x1b, 0xc0, 0x53, 0x68, 0x11, 0xe7, 0x95, 0x0f,
439 0x9d, 0x3f, 0x4c, 0x47, 0x7b, 0x4d, 0x1c, 0xae,
440 0x50, 0x9b, 0xcb, 0xdd, 0x05, 0x8d, 0x9a, 0x97,
441 0xfd, 0x8c, 0xef, 0x0c, 0x1d, 0x67, 0x73, 0xa8,
442 0x28, 0x36, 0xd5, 0xb6, 0x92, 0x33, 0x40, 0x75,
443 0x0b, 0x51, 0xc3, 0x64, 0xba, 0x1d, 0xc2, 0xcc,
444 0xee, 0x7d, 0x54, 0x0f, 0x27, 0x69, 0xa7, 0x27,
445 0x63, 0x30, 0x29, 0xd9, 0xc8, 0x84, 0xd8, 0xdf,
446 0x9f, 0x68, 0x8d, 0x04, 0xca, 0xa6, 0xc5, 0xc7,
447 0x7a, 0x5c, 0xc8, 0xd1, 0xcb, 0x4a, 0xec, 0xd0,
448 0xd8, 0x20, 0x69, 0xc5, 0x17, 0xcd, 0x78, 0xc8,
449 0x75, 0x23, 0x30, 0x69, 0xc9, 0xd4, 0xea, 0x5c,
450 0x4f, 0x6b, 0x86, 0x3f, 0x8b, 0xfe, 0xee, 0x44,
451 0xc9, 0x7c, 0xb7, 0xdd, 0x3e, 0xe5, 0xec, 0x54,
452 0x03, 0x3e, 0xaa, 0x82, 0xc6, 0xdf, 0xb2, 0x38,
453 0x0e, 0x5d, 0xb3, 0x88, 0xd9, 0xd3, 0x69, 0x5f,
454 0x8f, 0x70, 0x8a, 0x7e, 0x11, 0xd9, 0x1e, 0x7b,
455 0x38, 0xf1, 0x42, 0x1a, 0xc0, 0x35, 0xf5, 0xc7,
456 0x36, 0x85, 0xf5, 0xf7, 0xb8, 0x7e, 0xc7, 0xef,
457 0x18, 0xf1, 0x63, 0xd6, 0x7a, 0xc6, 0xc9, 0x0e,
458 0x4d, 0x69, 0x4f, 0x84, 0xef, 0x26, 0x41, 0x0c,
459 0xec, 0xc7, 0xe0, 0x7e, 0x3c, 0x67, 0x01, 0x4c,
460 0x62, 0x1a, 0x20, 0x6f, 0xee, 0x47, 0x4d, 0xc0,
461 0x99, 0x13, 0x8d, 0x91, 0x4a, 0x26, 0xd4, 0x37,
462 0x28, 0x90, 0x58, 0x75, 0x66, 0x2b, 0x0a, 0xdf,
463 0xda, 0xee, 0x92, 0x25, 0x90, 0x62, 0x39, 0x9e,
464 0x44, 0x98, 0xad, 0xc1, 0x88, 0xed, 0xe4, 0xb4,
465 0xaf, 0xf5, 0x8c, 0x9b, 0x48, 0x4d, 0x56, 0x60,
466 0x97, 0x0f, 0x61, 0x59, 0x9e, 0xa6, 0x27, 0xfe,
467 0xc1, 0x91, 0x15, 0x38, 0xb8, 0x0f, 0xae, 0x61,
468 0x7d, 0x26, 0x13, 0x5a, 0x73, 0xff, 0x1c, 0xa3,
469 0x61, 0x04, 0x58, 0x48, 0x55, 0x44, 0x11, 0xfe,
470 0x15, 0xca, 0xc3, 0xbd, 0xca, 0xc5, 0xb4, 0x40,
471 0x5d, 0x1b, 0x7f, 0x39, 0xb5, 0x9c, 0x35, 0xec,
472 0x61, 0x15, 0x32, 0x32, 0xb8, 0x4e, 0x40, 0x9f,
473 0x17, 0x1f, 0x0a, 0x4d, 0xa9, 0x91, 0xef, 0xb7,
474 0xb0, 0xeb, 0xc2, 0x83, 0x9a, 0x6c, 0xd2, 0x79,
475 0x43, 0x78, 0x5e, 0x2f, 0xe5, 0xdd, 0x1a, 0x3c,
476 0x45, 0xab, 0x29, 0x40, 0x3a, 0x37, 0x5b, 0x6f,
477 0xd7, 0xfc, 0x48, 0x64, 0x3c, 0x49, 0xfb, 0x21,
478 0xbe, 0xc3, 0xff, 0x07, 0xfb, 0x17, 0xe9, 0xc9,
479 0x0c, 0x4c, 0x5c, 0x15, 0x9e, 0x8e, 0x22, 0x30,
480 0x0a, 0xde, 0x48, 0x7f, 0xdb, 0x0d, 0xd1, 0x2b,
481 0x87, 0x38, 0x9e, 0xcc, 0x5a, 0x01, 0x16, 0xee,
482 0x75, 0x49, 0x0d, 0x30, 0x01, 0x34, 0x6a, 0xb6,
483 0x9a, 0x5a, 0x2a, 0xec, 0xbb, 0x48, 0xac, 0xd3,
484 0x77, 0x83, 0xd8, 0x08, 0x86, 0x4f, 0x48, 0x09,
485 0x29, 0x41, 0x79, 0xa1, 0x03, 0x12, 0xc4, 0xcd,
486 0x90, 0x55, 0x47, 0x66, 0x74, 0x9a, 0xcc, 0x4f,
487 0x35, 0x8c, 0xd6, 0x98, 0xef, 0xeb, 0x45, 0xb9,
488 0x9a, 0x26, 0x2f, 0x39, 0xa5, 0x70, 0x6d, 0xfc,
489 0xb4, 0x51, 0xee, 0xf4, 0x9c, 0xe7, 0x38, 0x59,
490 0xad, 0xf4, 0xbc, 0x46, 0xff, 0x46, 0x8e, 0x60,
491 0x9c, 0xa3, 0x60, 0x1d, 0xf8, 0x26, 0x72, 0xf5,
492 0x72, 0x9d, 0x68, 0x80, 0x04, 0xf6, 0x0b, 0xa1,
493 0x0a, 0xd5, 0xa7, 0x82, 0x3a, 0x3e, 0x47, 0xa8,
494 0x5a, 0xde, 0x59, 0x4f, 0x7b, 0x07, 0xb3, 0xe9,
495 0x24, 0x19, 0x3d, 0x34, 0x05, 0xec, 0xf1, 0xab,
496 0x6e, 0x64, 0x8f, 0xd3, 0xe6, 0x41, 0x86, 0x80,
497 0x70, 0xe3, 0x8d, 0x60, 0x9c, 0x34, 0x25, 0x01,
498 0x07, 0x4d, 0x19, 0x41, 0x4e, 0x3d, 0x5c, 0x7e,
499 0xa8, 0xf5, 0xcc, 0xd5, 0x7b, 0xe2, 0x7d, 0x3d,
500 0x49, 0x86, 0x7d, 0x07, 0xb7, 0x10, 0xe3, 0x35,
501 0xb8, 0x84, 0x6d, 0x76, 0xab, 0x17, 0xc6, 0x38,
502 0xb4, 0xd3, 0x28, 0x57, 0xad, 0xd3, 0x88, 0x5a,
503 0xda, 0xea, 0xc8, 0x94, 0xcc, 0x37, 0x19, 0xac,
504 0x9c, 0x9f, 0x4b, 0x00, 0x15, 0xc0, 0xc8, 0xca,
505 0x1f, 0x15, 0xaa, 0xe0, 0xdb, 0xf9, 0x2f, 0x57,
506 0x1b, 0x24, 0xc7, 0x6f, 0x76, 0x29, 0xfb, 0xed,
507 0x25, 0x0d, 0xc0, 0xfe, 0xbd, 0x5a, 0xbf, 0x20,
508 0x08, 0x51, 0x05, 0xec, 0x71, 0xa3, 0xbf, 0xef,
509 0x5e, 0x99, 0x75, 0xdb, 0x3c, 0x5f, 0x9a, 0x8c,
510 0xbb, 0x19, 0x5c, 0x0e, 0x93, 0x19, 0xf8, 0x6a,
511 0xbc, 0xf2, 0x12, 0x54, 0x2f, 0xcb, 0x28, 0x64,
512 0x88, 0xb3, 0x92, 0x0d, 0x96, 0xd1, 0xa6, 0xe4,
513 0x1f, 0xf1, 0x4d, 0xa4, 0xab, 0x1c, 0xee, 0x54,
514 0xf2, 0xad, 0x29, 0x6d, 0x32, 0x37, 0xb2, 0x16,
515 0x77, 0x5c, 0xdc, 0x2e, 0x54, 0xec, 0x75, 0x26,
516 0xc6, 0x36, 0xd9, 0x17, 0x2c, 0xf1, 0x7a, 0xdc,
517 0x4b, 0xf1, 0xe2, 0xd9, 0x95, 0xba, 0xac, 0x87,
518 0xc1, 0xf3, 0x8e, 0x58, 0x08, 0xd8, 0x87, 0x60,
519 0xc9, 0xee, 0x6a, 0xde, 0xa4, 0xd2, 0xfc, 0x0d,
520 0xe5, 0x36, 0xc4, 0x5c, 0x52, 0xb3, 0x07, 0x54,
521 0x65, 0x24, 0xc1, 0xb1, 0xd1, 0xb1, 0x53, 0x13,
522 0x31, 0x79, 0x7f, 0x05, 0x76, 0xeb, 0x37, 0x59,
523 0x15, 0x2b, 0xd1, 0x3f, 0xac, 0x08, 0x97, 0xeb,
524 0x91, 0x98, 0xdf, 0x6c, 0x09, 0x0d, 0x04, 0x9f,
525 0xdc, 0x3b, 0x0e, 0x60, 0x68, 0x47, 0x23, 0x15,
526 0x16, 0xc6, 0x0b, 0x35, 0xf8, 0x77, 0xa2, 0x78,
527 0x50, 0xd4, 0x64, 0x22, 0x33, 0xff, 0xfb, 0x93,
528 0x71, 0x46, 0x50, 0x39, 0x1b, 0x9c, 0xea, 0x4e,
529 0x8d, 0x0c, 0x37, 0xe5, 0x5c, 0x51, 0x3a, 0x31,
530 0xb2, 0x85, 0x84, 0x3f, 0x41, 0xee, 0xa2, 0xc1,
531 0xc6, 0x13, 0x3b, 0x54, 0x28, 0xd2, 0x18, 0x37,
532 0xcc, 0x46, 0x9f, 0x6a, 0x91, 0x3d, 0x5a, 0x15,
533 0x3c, 0x89, 0xa3, 0x61, 0x06, 0x7d, 0x2e, 0x78,
534 0xbe, 0x7d, 0x40, 0xba, 0x2f, 0x95, 0xb1, 0x2f,
535 0x87, 0x3b, 0x8a, 0xbe, 0x6a, 0xf4, 0xc2, 0x31,
536 0x74, 0xee, 0x91, 0xe0, 0x23, 0xaa, 0x5d, 0x7f,
537 0xdd, 0xf0, 0x44, 0x8c, 0x0b, 0x59, 0x2b, 0xfc,
538 0x48, 0x3a, 0xdf, 0x07, 0x05, 0x38, 0x6c, 0xc9,
539 0xeb, 0x18, 0x24, 0x68, 0x8d, 0x58, 0x98, 0xd3,
540 0x31, 0xa3, 0xe4, 0x70, 0x59, 0xb1, 0x21, 0xbe,
541 0x7e, 0x65, 0x7d, 0xb8, 0x04, 0xab, 0xf6, 0xe4,
542 0xd7, 0xda, 0xec, 0x09, 0x8f, 0xda, 0x6d, 0x24,
543 0x07, 0xcc, 0x29, 0x17, 0x05, 0x78, 0x1a, 0xc1,
544 0xb1, 0xce, 0xfc, 0xaa, 0x2d, 0xe7, 0xcc, 0x85,
545 0x84, 0x84, 0x03, 0x2a, 0x0c, 0x3f, 0xa9, 0xf8,
546 0xfd, 0x84, 0x53, 0x59, 0x5c, 0xf0, 0xd4, 0x09,
547 0xf0, 0xd2, 0x6c, 0x32, 0x03, 0xb0, 0xa0, 0x8c,
548 0x52, 0xeb, 0x23, 0x91, 0x88, 0x43, 0x13, 0x46,
549 0xf6, 0x1e, 0xb4, 0x1b, 0xf5, 0x8e, 0x3a, 0xb5,
550 0x3d, 0x00, 0xf6, 0xe5, 0x08, 0x3d, 0x5f, 0x39,
551 0xd3, 0x21, 0x69, 0xbc, 0x03, 0x22, 0x3a, 0xd2,
552 0x5c, 0x84, 0xf8, 0x15, 0xc4, 0x80, 0x0b, 0xbc,
553 0x29, 0x3c, 0xf3, 0x95, 0x98, 0xcd, 0x8f, 0x35,
554 0xbc, 0xa5, 0x3e, 0xfc, 0xd4, 0x13, 0x9e, 0xde,
555 0x4f, 0xce, 0x71, 0x9d, 0x09, 0xad, 0xf2, 0x80,
556 0x6b, 0x65, 0x7f, 0x03, 0x00, 0x14, 0x7c, 0x15,
557 0x85, 0x40, 0x6d, 0x70, 0xea, 0xdc, 0xb3, 0x63,
558 0x35, 0x4f, 0x4d, 0xe0, 0xd9, 0xd5, 0x3c, 0x58,
559 0x56, 0x23, 0x80, 0xe2, 0x36, 0xdd, 0x75, 0x1d,
560 0x94, 0x11, 0x41, 0x8e, 0xe0, 0x81, 0x8e, 0xcf,
561 0xe0, 0xe5, 0xf6, 0xde, 0xd1, 0xe7, 0x04, 0x12,
562 0x79, 0x92, 0x2b, 0x71, 0x2a, 0x79, 0x8b, 0x7c,
563 0x44, 0x79, 0x16, 0x30, 0x4e, 0xf4, 0xf6, 0x9b,
564 0xb7, 0x40, 0xa3, 0x5a, 0xa7, 0x69, 0x3e, 0xc1,
565 0x3a, 0x04, 0xd0, 0x88, 0xa0, 0x3b, 0xdd, 0xc6,
566 0x9e, 0x7e, 0x1e, 0x1e, 0x8f, 0x44, 0xf7, 0x73,
567 0x67, 0x1e, 0x1a, 0x78, 0xfa, 0x62, 0xf4, 0xa9,
568 0xa8, 0xc6, 0x5b, 0xb8, 0xfa, 0x06, 0x7d, 0x5e,
569 0x38, 0x1c, 0x9a, 0x39, 0xe9, 0x39, 0x98, 0x22,
570 0x0b, 0xa7, 0xac, 0x0b, 0xf3, 0xbc, 0xf1, 0xeb,
571 0x8c, 0x81, 0xe3, 0x48, 0x8a, 0xed, 0x42, 0xc2,
572 0x38, 0xcf, 0x3e, 0xda, 0xd2, 0x89, 0x8d, 0x9c,
573 0x53, 0xb5, 0x2f, 0x41, 0x01, 0x26, 0x84, 0x9c,
574 0xa3, 0x56, 0xf6, 0x49, 0xc7, 0xd4, 0x9f, 0x93,
575 0x1b, 0x96, 0x49, 0x5e, 0xad, 0xb3, 0x84, 0x1f,
576 0x3c, 0xa4, 0xe0, 0x9b, 0xd1, 0x90, 0xbc, 0x38,
577 0x6c, 0xdd, 0x95, 0x4d, 0x9d, 0xb1, 0x71, 0x57,
578 0x2d, 0x34, 0xe8, 0xb8, 0x42, 0xc7, 0x99, 0x03,
579 0xc7, 0x07, 0x30, 0x65, 0x91, 0x55, 0xd5, 0x90,
580 0x70, 0x97, 0x37, 0x68, 0xd4, 0x11, 0xf9, 0xe8,
581 0xce, 0xec, 0xdc, 0x34, 0xd5, 0xd3, 0xb7, 0xc4,
582 0xb8, 0x97, 0x05, 0x92, 0xad, 0xf8, 0xe2, 0x36,
583 0x64, 0x41, 0xc9, 0xc5, 0x41, 0x77, 0x52, 0xd7,
584 0x2c, 0xa5, 0x24, 0x2f, 0xd9, 0x34, 0x0b, 0x47,
585 0x35, 0xa7, 0x28, 0x8b, 0xc5, 0xcd, 0xe9, 0x46,
586 0xac, 0x39, 0x94, 0x3c, 0x10, 0xc6, 0x29, 0x73,
587 0x0e, 0x0e, 0x5d, 0xe0, 0x71, 0x03, 0x8a, 0x72,
588 0x0e, 0x26, 0xb0, 0x7d, 0x84, 0xed, 0x95, 0x23,
589 0x49, 0x5a, 0x45, 0x83, 0x45, 0x60, 0x11, 0x4a,
590 0x46, 0x31, 0xd4, 0xd8, 0x16, 0x54, 0x98, 0x58,
591 0xed, 0x6d, 0xcc, 0x5d, 0xd6, 0x50, 0x61, 0x9f,
592 0x9d, 0xc5, 0x3e, 0x9d, 0x32, 0x47, 0xde, 0x96,
593 0xe1, 0x5d, 0xd8, 0xf8, 0xb4, 0x69, 0x6f, 0xb9,
594 0x15, 0x90, 0x57, 0x7a, 0xf6, 0xad, 0xb0, 0x5b,
595 0xf5, 0xa6, 0x36, 0x94, 0xfd, 0x84, 0xce, 0x1c,
596 0x0f, 0x4b, 0xd0, 0xc2, 0x5b, 0x6b, 0x56, 0xef,
597 0x73, 0x93, 0x0b, 0xc3, 0xee, 0xd9, 0xcf, 0xd3,
598 0xa4, 0x22, 0x58, 0xcd, 0x50, 0x6e, 0x65, 0xf4,
599 0xe9, 0xb7, 0x71, 0xaf, 0x4b, 0xb3, 0xb6, 0x2f,
600 0x0f, 0x0e, 0x3b, 0xc9, 0x85, 0x14, 0xf5, 0x17,
601 0xe8, 0x7a, 0x3a, 0xbf, 0x5f, 0x5e, 0xf8, 0x18,
602 0x48, 0xa6, 0x72, 0xab, 0x06, 0x95, 0xe9, 0xc8,
603 0xa7, 0xf4, 0x32, 0x44, 0x04, 0x0c, 0x84, 0x98,
604 0x73, 0xe3, 0x89, 0x8d, 0x5f, 0x7e, 0x4a, 0x42,
605 0x8f, 0xc5, 0x28, 0xb1, 0x82, 0xef, 0x1c, 0x97,
606 0x31, 0x3b, 0x4d, 0xe0, 0x0e, 0x10, 0x10, 0x97,
607 0x93, 0x49, 0x78, 0x2f, 0x0d, 0x86, 0x8b, 0xa1,
608 0x53, 0xa9, 0x81, 0x20, 0x79, 0xe7, 0x07, 0x77,
609 0xb6, 0xac, 0x5e, 0xd2, 0x05, 0xcd, 0xe9, 0xdb,
610 0x8a, 0x94, 0x82, 0x8a, 0x23, 0xb9, 0x3d, 0x1c,
611 0xa9, 0x7d, 0x72, 0x4a, 0xed, 0x33, 0xa3, 0xdb,
612 0x21, 0xa7, 0x86, 0x33, 0x45, 0xa5, 0xaa, 0x56,
613 0x45, 0xb5, 0x83, 0x29, 0x40, 0x47, 0x79, 0x04,
614 0x6e, 0xb9, 0x95, 0xd0, 0x81, 0x77, 0x2d, 0x48,
615 0x1e, 0xfe, 0xc3, 0xc2, 0x1e, 0xe5, 0xf2, 0xbe,
616 0xfd, 0x3b, 0x94, 0x9f, 0xc4, 0xc4, 0x26, 0x9d,
617 0xe4, 0x66, 0x1e, 0x19, 0xee, 0x6c, 0x79, 0x97,
618 0x11, 0x31, 0x4b, 0x0d, 0x01, 0xcb, 0xde, 0xa8,
619 0xf6, 0x6d, 0x7c, 0x39, 0x46, 0x4e, 0x7e, 0x3f,
620 0x94, 0x17, 0xdf, 0xa1, 0x7d, 0xd9, 0x1c, 0x8e,
621 0xbc, 0x7d, 0x33, 0x7d, 0xe3, 0x12, 0x40, 0xca,
622 0xab, 0x37, 0x11, 0x46, 0xd4, 0xae, 0xef, 0x44,
623 0xa2, 0xb3, 0x6a, 0x66, 0x0e, 0x0c, 0x90, 0x7f,
624 0xdf, 0x5c, 0x66, 0x5f, 0xf2, 0x94, 0x9f, 0xa6,
625 0x73, 0x4f, 0xeb, 0x0d, 0xad, 0xbf, 0xc0, 0x63,
626 0x5c, 0xdc, 0x46, 0x51, 0xe8, 0x8e, 0x90, 0x19,
627 0xa8, 0xa4, 0x3c, 0x91, 0x79, 0xfa, 0x7e, 0x58,
628 0x85, 0x13, 0x55, 0xc5, 0x19, 0x82, 0x37, 0x1b,
629 0x0a, 0x02, 0x1f, 0x99, 0x6b, 0x18, 0xf1, 0x28,
630 0x08, 0xa2, 0x73, 0xb8, 0x0f, 0x2e, 0xcd, 0xbf,
631 0xf3, 0x86, 0x7f, 0xea, 0xef, 0xd0, 0xbb, 0xa6,
632 0x21, 0xdf, 0x49, 0x73, 0x51, 0xcc, 0x36, 0xd3,
633 0x3e, 0xa0, 0xf8, 0x44, 0xdf, 0xd3, 0xa6, 0xbe,
634 0x8a, 0xd4, 0x57, 0xdd, 0x72, 0x94, 0x61, 0x0f,
635 0x82, 0xd1, 0x07, 0xb8, 0x7c, 0x18, 0x83, 0xdf,
636 0x3a, 0xe5, 0x50, 0x6a, 0x82, 0x20, 0xac, 0xa9,
637 0xa8, 0xff, 0xd9, 0xf3, 0x77, 0x33, 0x5a, 0x9e,
638 0x7f, 0x6d, 0xfe, 0x5d, 0x33, 0x41, 0x42, 0xe7,
639 0x6c, 0x19, 0xe0, 0x44, 0x8a, 0x15, 0xf6, 0x70,
640 0x98, 0xb7, 0x68, 0x4d, 0xfa, 0x97, 0x39, 0xb0,
641 0x8e, 0xe8, 0x84, 0x8b, 0x75, 0x30, 0xb7, 0x7d,
642 0x92, 0x69, 0x20, 0x9c, 0x81, 0xfb, 0x4b, 0xf4,
643 0x01, 0x50, 0xeb, 0xce, 0x0c, 0x1c, 0x6c, 0xb5,
644 0x4a, 0xd7, 0x27, 0x0c, 0xce, 0xbb, 0xe5, 0x85,
645 0xf0, 0xb6, 0xee, 0xd5, 0x70, 0xdd, 0x3b, 0xfc,
646 0xd4, 0x99, 0xf1, 0x33, 0xdd, 0x8b, 0xc4, 0x2f,
647 0xae, 0xab, 0x74, 0x96, 0x32, 0xc7, 0x4c, 0x56,
648 0x3c, 0x89, 0x0f, 0x96, 0x0b, 0x42, 0xc0, 0xcb,
649 0xee, 0x0f, 0x0b, 0x8c, 0xfb, 0x7e, 0x47, 0x7b,
650 0x64, 0x48, 0xfd, 0xb2, 0x00, 0x80, 0x89, 0xa5,
651 0x13, 0x55, 0x62, 0xfc, 0x8f, 0xe2, 0x42, 0x03,
652 0xb7, 0x4e, 0x2a, 0x79, 0xb4, 0x82, 0xea, 0x23,
653 0x49, 0xda, 0xaf, 0x52, 0x63, 0x1e, 0x60, 0x03,
654 0x89, 0x06, 0x44, 0x46, 0x08, 0xc3, 0xc4, 0x87,
655 0x70, 0x2e, 0xda, 0x94, 0xad, 0x6b, 0xe0, 0xe4,
656 0xd1, 0x8a, 0x06, 0xc2, 0xa8, 0xc0, 0xa7, 0x43,
657 0x3c, 0x47, 0x52, 0x0e, 0xc3, 0x77, 0x81, 0x11,
658 0x67, 0x0e, 0xa0, 0x70, 0x04, 0x47, 0x29, 0x40,
659 0x86, 0x0d, 0x34, 0x56, 0xa7, 0xc9, 0x35, 0x59,
660 0x68, 0xdc, 0x93, 0x81, 0x70, 0xee, 0x86, 0xd9,
661 0x80, 0x06, 0x40, 0x4f, 0x1a, 0x0d, 0x40, 0x30,
662 0x0b, 0xcb, 0x96, 0x47, 0xc1, 0xb7, 0x52, 0xfd,
663 0x56, 0xe0, 0x72, 0x4b, 0xfb, 0xbd, 0x92, 0x45,
664 0x61, 0x71, 0xc2, 0x33, 0x11, 0xbf, 0x52, 0x83,
665 0x79, 0x26, 0xe0, 0x49, 0x6b, 0xb7, 0x05, 0x8b,
666 0xe8, 0x0e, 0x87, 0x31, 0xd7, 0x9d, 0x8a, 0xf5,
667 0xc0, 0x5f, 0x2e, 0x58, 0x4a, 0xdb, 0x11, 0xb3,
668 0x6c, 0x30, 0x2a, 0x46, 0x19, 0xe3, 0x27, 0x84,
669 0x1f, 0x63, 0x6e, 0xf6, 0x57, 0xc7, 0xc9, 0xd8,
670 0x5e, 0xba, 0xb3, 0x87, 0xd5, 0x83, 0x26, 0x34,
671 0x21, 0x9e, 0x65, 0xde, 0x42, 0xd3, 0xbe, 0x7b,
672 0xbc, 0x91, 0x71, 0x44, 0x4d, 0x99, 0x3b, 0x31,
673 0xe5, 0x3f, 0x11, 0x4e, 0x7f, 0x13, 0x51, 0x3b,
674 0xae, 0x79, 0xc9, 0xd3, 0x81, 0x8e, 0x25, 0x40,
675 0x10, 0xfc, 0x07, 0x1e, 0xf9, 0x7b, 0x9a, 0x4b,
676 0x6c, 0xe3, 0xb3, 0xad, 0x1a, 0x0a, 0xdd, 0x9e,
677 0x59, 0x0c, 0xa2, 0xcd, 0xae, 0x48, 0x4a, 0x38,
678 0x5b, 0x47, 0x41, 0x94, 0x65, 0x6b, 0xbb, 0xeb,
679 0x5b, 0xe3, 0xaf, 0x07, 0x5b, 0xd4, 0x4a, 0xa2,
680 0xc9, 0x5d, 0x2f, 0x64, 0x03, 0xd7, 0x3a, 0x2c,
681 0x6e, 0xce, 0x76, 0x95, 0xb4, 0xb3, 0xc0, 0xf1,
682 0xe2, 0x45, 0x73, 0x7a, 0x5c, 0xab, 0xc1, 0xfc,
683 0x02, 0x8d, 0x81, 0x29, 0xb3, 0xac, 0x07, 0xec,
684 0x40, 0x7d, 0x45, 0xd9, 0x7a, 0x59, 0xee, 0x34,
685 0xf0, 0xe9, 0xd5, 0x7b, 0x96, 0xb1, 0x3d, 0x95,
686 0xcc, 0x86, 0xb5, 0xb6, 0x04, 0x2d, 0xb5, 0x92,
687 0x7e, 0x76, 0xf4, 0x06, 0xa9, 0xa3, 0x12, 0x0f,
688 0xb1, 0xaf, 0x26, 0xba, 0x7c, 0xfc, 0x7e, 0x1c,
689 0xbc, 0x2c, 0x49, 0x97, 0x53, 0x60, 0x13, 0x0b,
690 0xa6, 0x61, 0x83, 0x89, 0x42, 0xd4, 0x17, 0x0c,
691 0x6c, 0x26, 0x52, 0xc3, 0xb3, 0xd4, 0x67, 0xf5,
692 0xe3, 0x04, 0xb7, 0xf4, 0xcb, 0x80, 0xb8, 0xcb,
693 0x77, 0x56, 0x3e, 0xaa, 0x57, 0x54, 0xee, 0xb4,
694 0x2c, 0x67, 0xcf, 0xf2, 0xdc, 0xbe, 0x55, 0xf9,
695 0x43, 0x1f, 0x6e, 0x22, 0x97, 0x67, 0x7f, 0xc4,
696 0xef, 0xb1, 0x26, 0x31, 0x1e, 0x27, 0xdf, 0x41,
697 0x80, 0x47, 0x6c, 0xe2, 0xfa, 0xa9, 0x8c, 0x2a,
698 0xf6, 0xf2, 0xab, 0xf0, 0x15, 0xda, 0x6c, 0xc8,
699 0xfe, 0xb5, 0x23, 0xde, 0xa9, 0x05, 0x3f, 0x06,
700 0x54, 0x4c, 0xcd, 0xe1, 0xab, 0xfc, 0x0e, 0x62,
701 0x33, 0x31, 0x73, 0x2c, 0x76, 0xcb, 0xb4, 0x47,
702 0x1e, 0x20, 0xad, 0xd8, 0xf2, 0x31, 0xdd, 0xc4,
703 0x8b, 0x0c, 0x77, 0xbe, 0xe1, 0x8b, 0x26, 0x00,
704 0x02, 0x58, 0xd6, 0x8d, 0xef, 0xad, 0x74, 0x67,
705 0xab, 0x3f, 0xef, 0xcb, 0x6f, 0xb0, 0xcc, 0x81,
706 0x44, 0x4c, 0xaf, 0xe9, 0x49, 0x4f, 0xdb, 0xa0,
707 0x25, 0xa4, 0xf0, 0x89, 0xf1, 0xbe, 0xd8, 0x10,
708 0xff, 0xb1, 0x3b, 0x4b, 0xfa, 0x98, 0xf5, 0x79,
709 0x6d, 0x1e, 0x69, 0x4d, 0x57, 0xb1, 0xc8, 0x19,
710 0x1b, 0xbd, 0x1e, 0x8c, 0x84, 0xb7, 0x7b, 0xe8,
711 0xd2, 0x2d, 0x09, 0x41, 0x41, 0x37, 0x3d, 0xb1,
712 0x6f, 0x26, 0x5d, 0x71, 0x16, 0x3d, 0xb7, 0x83,
713 0x27, 0x2c, 0xa7, 0xb6, 0x50, 0xbd, 0x91, 0x86,
714 0xab, 0x24, 0xa1, 0x38, 0xfd, 0xea, 0x71, 0x55,
715 0x7e, 0x9a, 0x07, 0x77, 0x4b, 0xfa, 0x61, 0x66,
716 0x20, 0x1e, 0x28, 0x95, 0x18, 0x1b, 0xa4, 0xa0,
717 0xfd, 0xc0, 0x89, 0x72, 0x43, 0xd9, 0x3b, 0x49,
718 0x5a, 0x3f, 0x9d, 0xbf, 0xdb, 0xb4, 0x46, 0xea,
719 0x42, 0x01, 0x77, 0x23, 0x68, 0x95, 0xb6, 0x24,
720 0xb3, 0xa8, 0x6c, 0x28, 0x3b, 0x11, 0x40, 0x7e,
721 0x18, 0x65, 0x6d, 0xd8, 0x24, 0x42, 0x7d, 0x88,
722 0xc0, 0x52, 0xd9, 0x05, 0xe4, 0x95, 0x90, 0x87,
723 0x8c, 0xf4, 0xd0, 0x6b, 0xb9, 0x83, 0x99, 0x34,
724 0x6d, 0xfe, 0x54, 0x40, 0x94, 0x52, 0x21, 0x4f,
725 0x14, 0x25, 0xc5, 0xd6, 0x5e, 0x95, 0xdc, 0x0a,
726 0x2b, 0x89, 0x20, 0x11, 0x84, 0x48, 0xd6, 0x3a,
727 0xcd, 0x5c, 0x24, 0xad, 0x62, 0xe3, 0xb1, 0x93,
728 0x25, 0x8d, 0xcd, 0x7e, 0xfc, 0x27, 0xa3, 0x37,
729 0xfd, 0x84, 0xfc, 0x1b, 0xb2, 0xf1, 0x27, 0x38,
730 0x5a, 0xb7, 0xfc, 0xf2, 0xfa, 0x95, 0x66, 0xd4,
731 0xfb, 0xba, 0xa7, 0xd7, 0xa3, 0x72, 0x69, 0x48,
732 0x48, 0x8c, 0xeb, 0x28, 0x89, 0xfe, 0x33, 0x65,
733 0x5a, 0x36, 0x01, 0x7e, 0x06, 0x79, 0x0a, 0x09,
734 0x3b, 0x74, 0x11, 0x9a, 0x6e, 0xbf, 0xd4, 0x9e,
735 0x58, 0x90, 0x49, 0x4f, 0x4d, 0x08, 0xd4, 0xe5,
736 0x4a, 0x09, 0x21, 0xef, 0x8b, 0xb8, 0x74, 0x3b,
737 0x91, 0xdd, 0x36, 0x85, 0x60, 0x2d, 0xfa, 0xd4,
738 0x45, 0x7b, 0x45, 0x53, 0xf5, 0x47, 0x87, 0x7e,
739 0xa6, 0x37, 0xc8, 0x78, 0x7a, 0x68, 0x9d, 0x8d,
740 0x65, 0x2c, 0x0e, 0x91, 0x5c, 0xa2, 0x60, 0xf0,
741 0x8e, 0x3f, 0xe9, 0x1a, 0xcd, 0xaa, 0xe7, 0xd5,
742 0x77, 0x18, 0xaf, 0xc9, 0xbc, 0x18, 0xea, 0x48,
743 0x1b, 0xfb, 0x22, 0x48, 0x70, 0x16, 0x29, 0x9e,
744 0x5b, 0xc1, 0x2c, 0x66, 0x23, 0xbc, 0xf0, 0x1f,
745 0xef, 0xaf, 0xe4, 0xd6, 0x04, 0x19, 0x82, 0x7a,
746 0x0b, 0xba, 0x4b, 0x46, 0xb1, 0x6a, 0x85, 0x5d,
747 0xb4, 0x73, 0xd6, 0x21, 0xa1, 0x71, 0x60, 0x14,
748 0xee, 0x0a, 0x77, 0xc4, 0x66, 0x2e, 0xf9, 0x69,
749 0x30, 0xaf, 0x41, 0x0b, 0xc8, 0x83, 0x3c, 0x53,
750 0x99, 0x19, 0x27, 0x46, 0xf7, 0x41, 0x6e, 0x56,
751 0xdc, 0x94, 0x28, 0x67, 0x4e, 0xb7, 0x25, 0x48,
752 0x8a, 0xc2, 0xe0, 0x60, 0x96, 0xcc, 0x18, 0xf4,
753 0x84, 0xdd, 0xa7, 0x5e, 0x3e, 0x05, 0x0b, 0x26,
754 0x26, 0xb2, 0x5c, 0x1f, 0x57, 0x1a, 0x04, 0x7e,
755 0x6a, 0xe3, 0x2f, 0xb4, 0x35, 0xb6, 0x38, 0x40,
756 0x40, 0xcd, 0x6f, 0x87, 0x2e, 0xef, 0xa3, 0xd7,
757 0xa9, 0xc2, 0xe8, 0x0d, 0x27, 0xdf, 0x44, 0x62,
758 0x99, 0xa0, 0xfc, 0xcf, 0x81, 0x78, 0xcb, 0xfe,
759 0xe5, 0xa0, 0x03, 0x4e, 0x6c, 0xd7, 0xf4, 0xaf,
760 0x7a, 0xbb, 0x61, 0x82, 0xfe, 0x71, 0x89, 0xb2,
761 0x22, 0x7c, 0x8e, 0x83, 0x04, 0xce, 0xf6, 0x5d,
762 0x84, 0x8f, 0x95, 0x6a, 0x7f, 0xad, 0xfd, 0x32,
763 0x9c, 0x5e, 0xe4, 0x9c, 0x89, 0x60, 0x54, 0xaa,
764 0x96, 0x72, 0xd2, 0xd7, 0x36, 0x85, 0xa9, 0x45,
765 0xd2, 0x2a, 0xa1, 0x81, 0x49, 0x6f, 0x7e, 0x04,
766 0xfa, 0xe2, 0xfe, 0x90, 0x26, 0x77, 0x5a, 0x33,
767 0xb8, 0x04, 0x9a, 0x7a, 0xe6, 0x4c, 0x4f, 0xad,
768 0x72, 0x96, 0x08, 0x28, 0x58, 0x13, 0xf8, 0xc4,
769 0x1c, 0xf0, 0xc3, 0x45, 0x95, 0x49, 0x20, 0x8c,
770 0x9f, 0x39, 0x70, 0xe1, 0x77, 0xfe, 0xd5, 0x4b,
771 0xaf, 0x86, 0xda, 0xef, 0x22, 0x06, 0x83, 0x36,
772 0x29, 0x12, 0x11, 0x40, 0xbc, 0x3b, 0x86, 0xaa,
773 0xaa, 0x65, 0x60, 0xc3, 0x80, 0xca, 0xed, 0xa9,
774 0xf3, 0xb0, 0x79, 0x96, 0xa2, 0x55, 0x27, 0x28,
775 0x55, 0x73, 0x26, 0xa5, 0x50, 0xea, 0x92, 0x4b,
776 0x3c, 0x5c, 0x82, 0x33, 0xf0, 0x01, 0x3f, 0x03,
777 0xc1, 0x08, 0x05, 0xbf, 0x98, 0xf4, 0x9b, 0x6d,
778 0xa5, 0xa8, 0xb4, 0x82, 0x0c, 0x06, 0xfa, 0xff,
779 0x2d, 0x08, 0xf3, 0x05, 0x4f, 0x57, 0x2a, 0x39,
780 0xd4, 0x83, 0x0d, 0x75, 0x51, 0xd8, 0x5b, 0x1b,
781 0xd3, 0x51, 0x5a, 0x32, 0x2a, 0x9b, 0x32, 0xb2,
782 0xf2, 0xa4, 0x96, 0x12, 0xf2, 0xae, 0x40, 0x34,
783 0x67, 0xa8, 0xf5, 0x44, 0xd5, 0x35, 0x53, 0xfe,
784 0xa3, 0x60, 0x96, 0x63, 0x0f, 0x1f, 0x6e, 0xb0,
785 0x5a, 0x42, 0xa6, 0xfc, 0x51, 0x0b, 0x60, 0x27,
786 0xbc, 0x06, 0x71, 0xed, 0x65, 0x5b, 0x23, 0x86,
787 0x4a, 0x07, 0x3b, 0x22, 0x07, 0x46, 0xe6, 0x90,
788 0x3e, 0xf3, 0x25, 0x50, 0x1b, 0x4c, 0x7f, 0x03,
789 0x08, 0xa8, 0x36, 0x6b, 0x87, 0xe5, 0xe3, 0xdb,
790 0x9a, 0x38, 0x83, 0xff, 0x9f, 0x1a, 0x9f, 0x57,
791 0xa4, 0x2a, 0xf6, 0x37, 0xbc, 0x1a, 0xff, 0xc9,
792 0x1e, 0x35, 0x0c, 0xc3, 0x7c, 0xa3, 0xb2, 0xe5,
793 0xd2, 0xc6, 0xb4, 0x57, 0x47, 0xe4, 0x32, 0x16,
794 0x6d, 0xa9, 0xae, 0x64, 0xe6, 0x2d, 0x8d, 0xc5,
795 0x8d, 0x50, 0x8e, 0xe8, 0x1a, 0x22, 0x34, 0x2a,
796 0xd9, 0xeb, 0x51, 0x90, 0x4a, 0xb1, 0x41, 0x7d,
797 0x64, 0xf9, 0xb9, 0x0d, 0xf6, 0x23, 0x33, 0xb0,
798 0x33, 0xf4, 0xf7, 0x3f, 0x27, 0x84, 0xc6, 0x0f,
799 0x54, 0xa5, 0xc0, 0x2e, 0xec, 0x0b, 0x3a, 0x48,
800 0x6e, 0x80, 0x35, 0x81, 0x43, 0x9b, 0x90, 0xb1,
801 0xd0, 0x2b, 0xea, 0x21, 0xdc, 0xda, 0x5b, 0x09,
802 0xf4, 0xcc, 0x10, 0xb4, 0xc7, 0xfe, 0x79, 0x51,
803 0xc3, 0xc5, 0xac, 0x88, 0x74, 0x84, 0x0b, 0x4b,
804 0xca, 0x79, 0x16, 0x29, 0xfb, 0x69, 0x54, 0xdf,
805 0x41, 0x7e, 0xe9, 0xc7, 0x8e, 0xea, 0xa5, 0xfe,
806 0xfc, 0x76, 0x0e, 0x90, 0xc4, 0x92, 0x38, 0xad,
807 0x7b, 0x48, 0xe6, 0x6e, 0xf7, 0x21, 0xfd, 0x4e,
808 0x93, 0x0a, 0x7b, 0x41, 0x83, 0x68, 0xfb, 0x57,
809 0x51, 0x76, 0x34, 0xa9, 0x6c, 0x00, 0xaa, 0x4f,
810 0x66, 0x65, 0x98, 0x4a, 0x4f, 0xa3, 0xa0, 0xef,
811 0x69, 0x3f, 0xe3, 0x1c, 0x92, 0x8c, 0xfd, 0xd8,
812 0xe8, 0xde, 0x7c, 0x7f, 0x3e, 0x84, 0x8e, 0x69,
813 0x3c, 0xf1, 0xf2, 0x05, 0x46, 0xdc, 0x2f, 0x9d,
814 0x5e, 0x6e, 0x4c, 0xfb, 0xb5, 0x99, 0x2a, 0x59,
815 0x63, 0xc1, 0x34, 0xbc, 0x57, 0xc0, 0x0d, 0xb9,
816 0x61, 0x25, 0xf3, 0x33, 0x23, 0x51, 0xb6, 0x0d,
817 0x07, 0xa6, 0xab, 0x94, 0x4a, 0xb7, 0x2a, 0xea,
818 0xee, 0xac, 0xa3, 0xc3, 0x04, 0x8b, 0x0e, 0x56,
819 0xfe, 0x44, 0xa7, 0x39, 0xe2, 0xed, 0xed, 0xb4,
820 0x22, 0x2b, 0xac, 0x12, 0x32, 0x28, 0x91, 0xd8,
821 0xa5, 0xab, 0xff, 0x5f, 0xe0, 0x4b, 0xda, 0x78,
822 0x17, 0xda, 0xf1, 0x01, 0x5b, 0xcd, 0xe2, 0x5f,
823 0x50, 0x45, 0x73, 0x2b, 0xe4, 0x76, 0x77, 0xf4,
824 0x64, 0x1d, 0x43, 0xfb, 0x84, 0x7a, 0xea, 0x91,
825 0xae, 0xf9, 0x9e, 0xb7, 0xb4, 0xb0, 0x91, 0x5f,
826 0x16, 0x35, 0x9a, 0x11, 0xb8, 0xc7, 0xc1, 0x8c,
827 0xc6, 0x10, 0x8d, 0x2f, 0x63, 0x4a, 0xa7, 0x57,
828 0x3a, 0x51, 0xd6, 0x32, 0x2d, 0x64, 0x72, 0xd4,
829 0x66, 0xdc, 0x10, 0xa6, 0x67, 0xd6, 0x04, 0x23,
830 0x9d, 0x0a, 0x11, 0x77, 0xdd, 0x37, 0x94, 0x17,
831 0x3c, 0xbf, 0x8b, 0x65, 0xb0, 0x2e, 0x5e, 0x66,
832 0x47, 0x64, 0xac, 0xdd, 0xf0, 0x84, 0xfd, 0x39,
833 0xfa, 0x15, 0x5d, 0xef, 0xae, 0xca, 0xc1, 0x36,
834 0xa7, 0x5c, 0xbf, 0xc7, 0x08, 0xc2, 0x66, 0x00,
835 0x74, 0x74, 0x4e, 0x27, 0x3f, 0x55, 0x8a, 0xb7,
836 0x38, 0x66, 0x83, 0x6d, 0xcf, 0x99, 0x9e, 0x60,
837 0x8f, 0xdd, 0x2e, 0x62, 0x22, 0x0e, 0xef, 0x0c,
838 0x98, 0xa7, 0x85, 0x74, 0x3b, 0x9d, 0xec, 0x9e,
839 0xa9, 0x19, 0x72, 0xa5, 0x7f, 0x2c, 0x39, 0xb7,
840 0x7d, 0xb7, 0xf1, 0x12, 0x65, 0x27, 0x4b, 0x5a,
841 0xde, 0x17, 0xfe, 0xad, 0x44, 0xf3, 0x20, 0x4d,
842 0xfd, 0xe4, 0x1f, 0xb5, 0x81, 0xb0, 0x36, 0x37,
843 0x08, 0x6f, 0xc3, 0x0c, 0xe9, 0x85, 0x98, 0x82,
844 0xa9, 0x62, 0x0c, 0xc4, 0x97, 0xc0, 0x50, 0xc8,
845 0xa7, 0x3c, 0x50, 0x9f, 0x43, 0xb9, 0xcd, 0x5e,
846 0x4d, 0xfa, 0x1c, 0x4b, 0x0b, 0xa9, 0x98, 0x85,
847 0x38, 0x92, 0xac, 0x8d, 0xe4, 0xad, 0x9b, 0x98,
848 0xab, 0xd9, 0x38, 0xac, 0x62, 0x52, 0xa3, 0x22,
849 0x63, 0x0f, 0xbf, 0x95, 0x48, 0xdf, 0x69, 0xe7,
850 0x8b, 0x33, 0xd5, 0xb2, 0xbd, 0x05, 0x49, 0x49,
851 0x9d, 0x57, 0x73, 0x19, 0x33, 0xae, 0xfa, 0x33,
852 0xf1, 0x19, 0xa8, 0x80, 0xce, 0x04, 0x9f, 0xbc,
853 0x1d, 0x65, 0x82, 0x1b, 0xe5, 0x3a, 0x51, 0xc8,
854 0x1c, 0x21, 0xe3, 0x5d, 0xf3, 0x7d, 0x9b, 0x2f,
855 0x2c, 0x1d, 0x4a, 0x7f, 0x9b, 0x68, 0x35, 0xa3,
856 0xb2, 0x50, 0xf7, 0x62, 0x79, 0xcd, 0xf4, 0x98,
857 0x4f, 0xe5, 0x63, 0x7c, 0x3e, 0x45, 0x31, 0x8c,
858 0x16, 0xa0, 0x12, 0xc8, 0x58, 0xce, 0x39, 0xa6,
859 0xbc, 0x54, 0xdb, 0xc5, 0xe0, 0xd5, 0xba, 0xbc,
860 0xb9, 0x04, 0xf4, 0x8d, 0xe8, 0x2f, 0x15, 0x9d,
861};
862
863/* 100 test cases */
864static struct crc_test {
865 u32 crc; /* random starting crc */
866 u32 start; /* random 6 bit offset in buf */
867 u32 length; /* random 11 bit length of test */
868 u32 crc_le; /* expected crc32_le result */
869 u32 crc_be; /* expected crc32_be result */
870 u32 crc32c_le; /* expected crc32c_le result */
871} const test[] __initconst =
872{
873 {0x674bf11d, 0x00000038, 0x00000542, 0x0af6d466, 0xd8b6e4c1, 0xf6e93d6c},
874 {0x35c672c6, 0x0000003a, 0x000001aa, 0xc6d3dfba, 0x28aaf3ad, 0x0fe92aca},
875 {0x496da28e, 0x00000039, 0x000005af, 0xd933660f, 0x5d57e81f, 0x52e1ebb8},
876 {0x09a9b90e, 0x00000027, 0x000001f8, 0xb45fe007, 0xf45fca9a, 0x0798af9a},
877 {0xdc97e5a9, 0x00000025, 0x000003b6, 0xf81a3562, 0xe0126ba2, 0x18eb3152},
878 {0x47c58900, 0x0000000a, 0x000000b9, 0x8e58eccf, 0xf3afc793, 0xd00d08c7},
879 {0x292561e8, 0x0000000c, 0x00000403, 0xa2ba8aaf, 0x0b797aed, 0x8ba966bc},
880 {0x415037f6, 0x00000003, 0x00000676, 0xa17d52e8, 0x7f0fdf35, 0x11d694a2},
881 {0x3466e707, 0x00000026, 0x00000042, 0x258319be, 0x75c484a2, 0x6ab3208d},
882 {0xafd1281b, 0x00000023, 0x000002ee, 0x4428eaf8, 0x06c7ad10, 0xba4603c5},
883 {0xd3857b18, 0x00000028, 0x000004a2, 0x5c430821, 0xb062b7cb, 0xe6071c6f},
884 {0x1d825a8f, 0x0000002b, 0x0000050b, 0xd2c45f0c, 0xd68634e0, 0x179ec30a},
885 {0x5033e3bc, 0x0000000b, 0x00000078, 0xa3ea4113, 0xac6d31fb, 0x0903beb8},
886 {0x94f1fb5e, 0x0000000f, 0x000003a2, 0xfbfc50b1, 0x3cfe50ed, 0x6a7cb4fa},
887 {0xc9a0fe14, 0x00000009, 0x00000473, 0x5fb61894, 0x87070591, 0xdb535801},
888 {0x88a034b1, 0x0000001c, 0x000005ad, 0xc1b16053, 0x46f95c67, 0x92bed597},
889 {0xf0f72239, 0x00000020, 0x0000026d, 0xa6fa58f3, 0xf8c2c1dd, 0x192a3f1b},
890 {0xcc20a5e3, 0x0000003b, 0x0000067a, 0x7740185a, 0x308b979a, 0xccbaec1a},
891 {0xce589c95, 0x0000002b, 0x00000641, 0xd055e987, 0x40aae25b, 0x7eabae4d},
892 {0x78edc885, 0x00000035, 0x000005be, 0xa39cb14b, 0x035b0d1f, 0x28c72982},
893 {0x9d40a377, 0x0000003b, 0x00000038, 0x1f47ccd2, 0x197fbc9d, 0xc3cd4d18},
894 {0x703d0e01, 0x0000003c, 0x000006f1, 0x88735e7c, 0xfed57c5a, 0xbca8f0e7},
895 {0x776bf505, 0x0000000f, 0x000005b2, 0x5cc4fc01, 0xf32efb97, 0x713f60b3},
896 {0x4a3e7854, 0x00000027, 0x000004b8, 0x8d923c82, 0x0cbfb4a2, 0xebd08fd5},
897 {0x209172dd, 0x0000003b, 0x00000356, 0xb89e9c2b, 0xd7868138, 0x64406c59},
898 {0x3ba4cc5b, 0x0000002f, 0x00000203, 0xe51601a9, 0x5b2a1032, 0x7421890e},
899 {0xfc62f297, 0x00000000, 0x00000079, 0x71a8e1a2, 0x5d88685f, 0xe9347603},
900 {0x64280b8b, 0x00000016, 0x000007ab, 0x0fa7a30c, 0xda3a455f, 0x1bef9060},
901 {0x97dd724b, 0x00000033, 0x000007ad, 0x5788b2f4, 0xd7326d32, 0x34720072},
902 {0x61394b52, 0x00000035, 0x00000571, 0xc66525f1, 0xcabe7fef, 0x48310f59},
903 {0x29b4faff, 0x00000024, 0x0000006e, 0xca13751e, 0x993648e0, 0x783a4213},
904 {0x29bfb1dc, 0x0000000b, 0x00000244, 0x436c43f7, 0x429f7a59, 0x9e8efd41},
905 {0x86ae934b, 0x00000035, 0x00000104, 0x0760ec93, 0x9cf7d0f4, 0xfc3d34a5},
906 {0xc4c1024e, 0x0000002e, 0x000006b1, 0x6516a3ec, 0x19321f9c, 0x17a52ae2},
907 {0x3287a80a, 0x00000026, 0x00000496, 0x0b257eb1, 0x754ebd51, 0x886d935a},
908 {0xa4db423e, 0x00000023, 0x0000045d, 0x9b3a66dc, 0x873e9f11, 0xeaaeaeb2},
909 {0x7a1078df, 0x00000015, 0x0000014a, 0x8c2484c5, 0x6a628659, 0x8e900a4b},
910 {0x6048bd5b, 0x00000006, 0x0000006a, 0x897e3559, 0xac9961af, 0xd74662b1},
911 {0xd8f9ea20, 0x0000003d, 0x00000277, 0x60eb905b, 0xed2aaf99, 0xd26752ba},
912 {0xea5ec3b4, 0x0000002a, 0x000004fe, 0x869965dc, 0x6c1f833b, 0x8b1fcd62},
913 {0x2dfb005d, 0x00000016, 0x00000345, 0x6a3b117e, 0xf05e8521, 0xf54342fe},
914 {0x5a214ade, 0x00000020, 0x000005b6, 0x467f70be, 0xcb22ccd3, 0x5b95b988},
915 {0xf0ab9cca, 0x00000032, 0x00000515, 0xed223df3, 0x7f3ef01d, 0x2e1176be},
916 {0x91b444f9, 0x0000002e, 0x000007f8, 0x84e9a983, 0x5676756f, 0x66120546},
917 {0x1b5d2ddb, 0x0000002e, 0x0000012c, 0xba638c4c, 0x3f42047b, 0xf256a5cc},
918 {0xd824d1bb, 0x0000003a, 0x000007b5, 0x6288653b, 0x3a3ebea0, 0x4af1dd69},
919 {0x0470180c, 0x00000034, 0x000001f0, 0x9d5b80d6, 0x3de08195, 0x56f0a04a},
920 {0xffaa3a3f, 0x00000036, 0x00000299, 0xf3a82ab8, 0x53e0c13d, 0x74f6b6b2},
921 {0x6406cfeb, 0x00000023, 0x00000600, 0xa920b8e8, 0xe4e2acf4, 0x085951fd},
922 {0xb24aaa38, 0x0000003e, 0x000004a1, 0x657cc328, 0x5077b2c3, 0xc65387eb},
923 {0x58b2ab7c, 0x00000039, 0x000002b4, 0x3a17ee7e, 0x9dcb3643, 0x1ca9257b},
924 {0x3db85970, 0x00000006, 0x000002b6, 0x95268b59, 0xb9812c10, 0xfd196d76},
925 {0x857830c5, 0x00000003, 0x00000590, 0x4ef439d5, 0xf042161d, 0x5ef88339},
926 {0xe1fcd978, 0x0000003e, 0x000007d8, 0xae8d8699, 0xce0a1ef5, 0x2c3714d9},
927 {0xb982a768, 0x00000016, 0x000006e0, 0x62fad3df, 0x5f8a067b, 0x58576548},
928 {0x1d581ce8, 0x0000001e, 0x0000058b, 0xf0f5da53, 0x26e39eee, 0xfd7c57de},
929 {0x2456719b, 0x00000025, 0x00000503, 0x4296ac64, 0xd50e4c14, 0xd5fedd59},
930 {0xfae6d8f2, 0x00000000, 0x0000055d, 0x057fdf2e, 0x2a31391a, 0x1cc3b17b},
931 {0xcba828e3, 0x00000039, 0x000002ce, 0xe3f22351, 0x8f00877b, 0x270eed73},
932 {0x13d25952, 0x0000000a, 0x0000072d, 0x76d4b4cc, 0x5eb67ec3, 0x91ecbb11},
933 {0x0342be3f, 0x00000015, 0x00000599, 0xec75d9f1, 0x9d4d2826, 0x05ed8d0c},
934 {0xeaa344e0, 0x00000014, 0x000004d8, 0x72a4c981, 0x2064ea06, 0x0b09ad5b},
935 {0xbbb52021, 0x0000003b, 0x00000272, 0x04af99fc, 0xaf042d35, 0xf8d511fb},
936 {0xb66384dc, 0x0000001d, 0x000007fc, 0xd7629116, 0x782bd801, 0x5ad832cc},
937 {0x616c01b6, 0x00000022, 0x000002c8, 0x5b1dab30, 0x783ce7d2, 0x1214d196},
938 {0xce2bdaad, 0x00000016, 0x0000062a, 0x932535c8, 0x3f02926d, 0x5747218a},
939 {0x00fe84d7, 0x00000005, 0x00000205, 0x850e50aa, 0x753d649c, 0xde8f14de},
940 {0xbebdcb4c, 0x00000006, 0x0000055d, 0xbeaa37a2, 0x2d8c9eba, 0x3563b7b9},
941 {0xd8b1a02a, 0x00000010, 0x00000387, 0x5017d2fc, 0x503541a5, 0x071475d0},
942 {0x3b96cad2, 0x00000036, 0x00000347, 0x1d2372ae, 0x926cd90b, 0x54c79d60},
943 {0xc94c1ed7, 0x00000005, 0x0000038b, 0x9e9fdb22, 0x144a9178, 0x4c53eee6},
944 {0x1aad454e, 0x00000025, 0x000002b2, 0xc3f6315c, 0x5c7a35b3, 0x10137a3c},
945 {0xa4fec9a6, 0x00000000, 0x000006d6, 0x90be5080, 0xa4107605, 0xaa9d6c73},
946 {0x1bbe71e2, 0x0000001f, 0x000002fd, 0x4e504c3b, 0x284ccaf1, 0xb63d23e7},
947 {0x4201c7e4, 0x00000002, 0x000002b7, 0x7822e3f9, 0x0cc912a9, 0x7f53e9cf},
948 {0x23fddc96, 0x00000003, 0x00000627, 0x8a385125, 0x07767e78, 0x13c1cd83},
949 {0xd82ba25c, 0x00000016, 0x0000063e, 0x98e4148a, 0x283330c9, 0x49ff5867},
950 {0x786f2032, 0x0000002d, 0x0000060f, 0xf201600a, 0xf561bfcd, 0x8467f211},
951 {0xfebe4e1f, 0x0000002a, 0x000004f2, 0x95e51961, 0xfd80dcab, 0x3f9683b2},
952 {0x1a6e0a39, 0x00000008, 0x00000672, 0x8af6c2a5, 0x78dd84cb, 0x76a3f874},
953 {0x56000ab8, 0x0000000e, 0x000000e5, 0x36bacb8f, 0x22ee1f77, 0x863b702f},
954 {0x4717fe0c, 0x00000000, 0x000006ec, 0x8439f342, 0x5c8e03da, 0xdc6c58ff},
955 {0xd5d5d68e, 0x0000003c, 0x000003a3, 0x46fff083, 0x177d1b39, 0x0622cc95},
956 {0xc25dd6c6, 0x00000024, 0x000006c0, 0x5ceb8eb4, 0x892b0d16, 0xe85605cd},
957 {0xe9b11300, 0x00000023, 0x00000683, 0x07a5d59a, 0x6c6a3208, 0x31da5f06},
958 {0x95cd285e, 0x00000001, 0x00000047, 0x7b3a4368, 0x0202c07e, 0xa1f2e784},
959 {0xd9245a25, 0x0000001e, 0x000003a6, 0xd33c1841, 0x1936c0d5, 0xb07cc616},
960 {0x103279db, 0x00000006, 0x0000039b, 0xca09b8a0, 0x77d62892, 0xbf943b6c},
961 {0x1cba3172, 0x00000027, 0x000001c8, 0xcb377194, 0xebe682db, 0x2c01af1c},
962 {0x8f613739, 0x0000000c, 0x000001df, 0xb4b0bc87, 0x7710bd43, 0x0fe5f56d},
963 {0x1c6aa90d, 0x0000001b, 0x0000053c, 0x70559245, 0xda7894ac, 0xf8943b2d},
964 {0xaabe5b93, 0x0000003d, 0x00000715, 0xcdbf42fa, 0x0c3b99e7, 0xe4d89272},
965 {0xf15dd038, 0x00000006, 0x000006db, 0x6e104aea, 0x8d5967f2, 0x7c2f6bbb},
966 {0x584dd49c, 0x00000020, 0x000007bc, 0x36b6cfd6, 0xad4e23b2, 0xabbf388b},
967 {0x5d8c9506, 0x00000020, 0x00000470, 0x4c62378e, 0x31d92640, 0x1dca1f4e},
968 {0xb80d17b0, 0x00000032, 0x00000346, 0x22a5bb88, 0x9a7ec89f, 0x5c170e23},
969 {0xdaf0592e, 0x00000023, 0x000007b0, 0x3cab3f99, 0x9b1fdd99, 0xc0e9d672},
970 {0x4793cc85, 0x0000000d, 0x00000706, 0xe82e04f6, 0xed3db6b7, 0xc18bdc86},
971 {0x82ebf64e, 0x00000009, 0x000007c3, 0x69d590a9, 0x9efa8499, 0xa874fcdd},
972 {0xb18a0319, 0x00000026, 0x000007db, 0x1cf98dcc, 0x8fa9ad6a, 0x9dc0bb48},
973};
974
975#include <linux/time.h>
976
977static int __init crc32c_test(void)
978{
979 int i;
980 int errors = 0;
981 int bytes = 0;
982 u64 nsec;
983 unsigned long flags;
984
985 /* keep static to prevent cache warming code from
986 * getting eliminated by the compiler */
987 static u32 crc;
988
989 /* pre-warm the cache */
990 for (i = 0; i < 100; i++) {
991 bytes += 2*test[i].length;
992
993 crc ^= __crc32c_le(test[i].crc, test_buf +
994 test[i].start, test[i].length);
995 }
996
997 /* reduce OS noise */
998 local_irq_save(flags);
999 local_irq_disable();
1000
1001 nsec = ktime_get_ns();
1002 for (i = 0; i < 100; i++) {
1003 if (test[i].crc32c_le != __crc32c_le(test[i].crc, test_buf +
1004 test[i].start, test[i].length))
1005 errors++;
1006 }
1007 nsec = ktime_get_ns() - nsec;
1008
1009 local_irq_restore(flags);
1010 local_irq_enable();
1011
1012 pr_info("crc32c: CRC_LE_BITS = %d\n", CRC_LE_BITS);
1013
1014 if (errors)
1015 pr_warn("crc32c: %d self tests failed\n", errors);
1016 else {
1017 pr_info("crc32c: self tests passed, processed %d bytes in %lld nsec\n",
1018 bytes, nsec);
1019 }
1020
1021 return 0;
1022}
1023
1024static int __init crc32c_combine_test(void)
1025{
1026 int i, j;
1027 int errors = 0, runs = 0;
1028
1029 for (i = 0; i < 10; i++) {
1030 u32 crc_full;
1031
1032 crc_full = __crc32c_le(test[i].crc, test_buf + test[i].start,
1033 test[i].length);
1034 for (j = 0; j <= test[i].length; ++j) {
1035 u32 crc1, crc2;
1036 u32 len1 = j, len2 = test[i].length - j;
1037
1038 crc1 = __crc32c_le(test[i].crc, test_buf +
1039 test[i].start, len1);
1040 crc2 = __crc32c_le(0, test_buf + test[i].start +
1041 len1, len2);
1042
1043 if (!(crc_full == __crc32c_le_combine(crc1, crc2, len2) &&
1044 crc_full == test[i].crc32c_le))
1045 errors++;
1046 runs++;
1047 cond_resched();
1048 }
1049 }
1050
1051 if (errors)
1052 pr_warn("crc32c_combine: %d/%d self tests failed\n", errors, runs);
1053 else
1054 pr_info("crc32c_combine: %d self tests passed\n", runs);
1055
1056 return 0;
1057}
1058
1059static int __init crc32_test(void)
1060{
1061 int i;
1062 int errors = 0;
1063 int bytes = 0;
1064 u64 nsec;
1065 unsigned long flags;
1066
1067 /* keep static to prevent cache warming code from
1068 * getting eliminated by the compiler */
1069 static u32 crc;
1070
1071 /* pre-warm the cache */
1072 for (i = 0; i < 100; i++) {
1073 bytes += 2*test[i].length;
1074
1075 crc ^= crc32_le(test[i].crc, test_buf +
1076 test[i].start, test[i].length);
1077
1078 crc ^= crc32_be(test[i].crc, test_buf +
1079 test[i].start, test[i].length);
1080 }
1081
1082 /* reduce OS noise */
1083 local_irq_save(flags);
1084 local_irq_disable();
1085
1086 nsec = ktime_get_ns();
1087 for (i = 0; i < 100; i++) {
1088 if (test[i].crc_le != crc32_le(test[i].crc, test_buf +
1089 test[i].start, test[i].length))
1090 errors++;
1091
1092 if (test[i].crc_be != crc32_be(test[i].crc, test_buf +
1093 test[i].start, test[i].length))
1094 errors++;
1095 }
1096 nsec = ktime_get_ns() - nsec;
1097
1098 local_irq_restore(flags);
1099 local_irq_enable();
1100
1101 pr_info("crc32: CRC_LE_BITS = %d, CRC_BE BITS = %d\n",
1102 CRC_LE_BITS, CRC_BE_BITS);
1103
1104 if (errors)
1105 pr_warn("crc32: %d self tests failed\n", errors);
1106 else {
1107 pr_info("crc32: self tests passed, processed %d bytes in %lld nsec\n",
1108 bytes, nsec);
1109 }
1110
1111 return 0;
1112}
1113
1114static int __init crc32_combine_test(void)
1115{
1116 int i, j;
1117 int errors = 0, runs = 0;
1118
1119 for (i = 0; i < 10; i++) {
1120 u32 crc_full;
1121
1122 crc_full = crc32_le(test[i].crc, test_buf + test[i].start,
1123 test[i].length);
1124 for (j = 0; j <= test[i].length; ++j) {
1125 u32 crc1, crc2;
1126 u32 len1 = j, len2 = test[i].length - j;
1127
1128 crc1 = crc32_le(test[i].crc, test_buf +
1129 test[i].start, len1);
1130 crc2 = crc32_le(0, test_buf + test[i].start +
1131 len1, len2);
1132
1133 if (!(crc_full == crc32_le_combine(crc1, crc2, len2) &&
1134 crc_full == test[i].crc_le))
1135 errors++;
1136 runs++;
1137 cond_resched();
1138 }
1139 }
1140
1141 if (errors)
1142 pr_warn("crc32_combine: %d/%d self tests failed\n", errors, runs);
1143 else
1144 pr_info("crc32_combine: %d self tests passed\n", runs);
1145
1146 return 0;
1147}
1148
1149static int __init crc32test_init(void)
1150{
1151 crc32_test();
1152 crc32c_test();
1153
1154 crc32_combine_test();
1155 crc32c_combine_test();
1156
1157 return 0;
1158}
1159
1160static void __exit crc32_exit(void)
1161{
1162}
1163
1164module_init(crc32test_init);
1165module_exit(crc32_exit);
1166#endif /* CONFIG_CRC32_SELFTEST */
diff --git a/lib/crc32test.c b/lib/crc32test.c
new file mode 100644
index 000000000000..97d6a57cefcc
--- /dev/null
+++ b/lib/crc32test.c
@@ -0,0 +1,856 @@
1/*
2 * Aug 8, 2011 Bob Pearson with help from Joakim Tjernlund and George Spelvin
3 * cleaned up code to current version of sparse and added the slicing-by-8
4 * algorithm to the closely similar existing slicing-by-4 algorithm.
5 *
6 * Oct 15, 2000 Matt Domsch <Matt_Domsch@dell.com>
7 * Nicer crc32 functions/docs submitted by linux@horizon.com. Thanks!
8 * Code was from the public domain, copyright abandoned. Code was
9 * subsequently included in the kernel, thus was re-licensed under the
10 * GNU GPL v2.
11 *
12 * Oct 12, 2000 Matt Domsch <Matt_Domsch@dell.com>
13 * Same crc32 function was used in 5 other places in the kernel.
14 * I made one version, and deleted the others.
15 * There are various incantations of crc32(). Some use a seed of 0 or ~0.
16 * Some xor at the end with ~0. The generic crc32() function takes
17 * seed as an argument, and doesn't xor at the end. Then individual
18 * users can do whatever they need.
19 * drivers/net/smc9194.c uses seed ~0, doesn't xor with ~0.
20 * fs/jffs2 uses seed 0, doesn't xor with ~0.
21 * fs/partitions/efi.c uses seed ~0, xor's with ~0.
22 *
23 * This source code is licensed under the GNU General Public License,
24 * Version 2. See the file COPYING for more details.
25 */
26
27#include <linux/crc32.h>
28#include <linux/module.h>
29#include <linux/sched.h>
30
31#include "crc32defs.h"
32
33/* 4096 random bytes */
34static u8 const __aligned(8) test_buf[] __initconst =
35{
36 0x5b, 0x85, 0x21, 0xcb, 0x09, 0x68, 0x7d, 0x30,
37 0xc7, 0x69, 0xd7, 0x30, 0x92, 0xde, 0x59, 0xe4,
38 0xc9, 0x6e, 0x8b, 0xdb, 0x98, 0x6b, 0xaa, 0x60,
39 0xa8, 0xb5, 0xbc, 0x6c, 0xa9, 0xb1, 0x5b, 0x2c,
40 0xea, 0xb4, 0x92, 0x6a, 0x3f, 0x79, 0x91, 0xe4,
41 0xe9, 0x70, 0x51, 0x8c, 0x7f, 0x95, 0x6f, 0x1a,
42 0x56, 0xa1, 0x5c, 0x27, 0x03, 0x67, 0x9f, 0x3a,
43 0xe2, 0x31, 0x11, 0x29, 0x6b, 0x98, 0xfc, 0xc4,
44 0x53, 0x24, 0xc5, 0x8b, 0xce, 0x47, 0xb2, 0xb9,
45 0x32, 0xcb, 0xc1, 0xd0, 0x03, 0x57, 0x4e, 0xd4,
46 0xe9, 0x3c, 0xa1, 0x63, 0xcf, 0x12, 0x0e, 0xca,
47 0xe1, 0x13, 0xd1, 0x93, 0xa6, 0x88, 0x5c, 0x61,
48 0x5b, 0xbb, 0xf0, 0x19, 0x46, 0xb4, 0xcf, 0x9e,
49 0xb6, 0x6b, 0x4c, 0x3a, 0xcf, 0x60, 0xf9, 0x7a,
50 0x8d, 0x07, 0x63, 0xdb, 0x40, 0xe9, 0x0b, 0x6f,
51 0xad, 0x97, 0xf1, 0xed, 0xd0, 0x1e, 0x26, 0xfd,
52 0xbf, 0xb7, 0xc8, 0x04, 0x94, 0xf8, 0x8b, 0x8c,
53 0xf1, 0xab, 0x7a, 0xd4, 0xdd, 0xf3, 0xe8, 0x88,
54 0xc3, 0xed, 0x17, 0x8a, 0x9b, 0x40, 0x0d, 0x53,
55 0x62, 0x12, 0x03, 0x5f, 0x1b, 0x35, 0x32, 0x1f,
56 0xb4, 0x7b, 0x93, 0x78, 0x0d, 0xdb, 0xce, 0xa4,
57 0xc0, 0x47, 0xd5, 0xbf, 0x68, 0xe8, 0x5d, 0x74,
58 0x8f, 0x8e, 0x75, 0x1c, 0xb2, 0x4f, 0x9a, 0x60,
59 0xd1, 0xbe, 0x10, 0xf4, 0x5c, 0xa1, 0x53, 0x09,
60 0xa5, 0xe0, 0x09, 0x54, 0x85, 0x5c, 0xdc, 0x07,
61 0xe7, 0x21, 0x69, 0x7b, 0x8a, 0xfd, 0x90, 0xf1,
62 0x22, 0xd0, 0xb4, 0x36, 0x28, 0xe6, 0xb8, 0x0f,
63 0x39, 0xde, 0xc8, 0xf3, 0x86, 0x60, 0x34, 0xd2,
64 0x5e, 0xdf, 0xfd, 0xcf, 0x0f, 0xa9, 0x65, 0xf0,
65 0xd5, 0x4d, 0x96, 0x40, 0xe3, 0xdf, 0x3f, 0x95,
66 0x5a, 0x39, 0x19, 0x93, 0xf4, 0x75, 0xce, 0x22,
67 0x00, 0x1c, 0x93, 0xe2, 0x03, 0x66, 0xf4, 0x93,
68 0x73, 0x86, 0x81, 0x8e, 0x29, 0x44, 0x48, 0x86,
69 0x61, 0x7c, 0x48, 0xa3, 0x43, 0xd2, 0x9c, 0x8d,
70 0xd4, 0x95, 0xdd, 0xe1, 0x22, 0x89, 0x3a, 0x40,
71 0x4c, 0x1b, 0x8a, 0x04, 0xa8, 0x09, 0x69, 0x8b,
72 0xea, 0xc6, 0x55, 0x8e, 0x57, 0xe6, 0x64, 0x35,
73 0xf0, 0xc7, 0x16, 0x9f, 0x5d, 0x5e, 0x86, 0x40,
74 0x46, 0xbb, 0xe5, 0x45, 0x88, 0xfe, 0xc9, 0x63,
75 0x15, 0xfb, 0xf5, 0xbd, 0x71, 0x61, 0xeb, 0x7b,
76 0x78, 0x70, 0x07, 0x31, 0x03, 0x9f, 0xb2, 0xc8,
77 0xa7, 0xab, 0x47, 0xfd, 0xdf, 0xa0, 0x78, 0x72,
78 0xa4, 0x2a, 0xe4, 0xb6, 0xba, 0xc0, 0x1e, 0x86,
79 0x71, 0xe6, 0x3d, 0x18, 0x37, 0x70, 0xe6, 0xff,
80 0xe0, 0xbc, 0x0b, 0x22, 0xa0, 0x1f, 0xd3, 0xed,
81 0xa2, 0x55, 0x39, 0xab, 0xa8, 0x13, 0x73, 0x7c,
82 0x3f, 0xb2, 0xd6, 0x19, 0xac, 0xff, 0x99, 0xed,
83 0xe8, 0xe6, 0xa6, 0x22, 0xe3, 0x9c, 0xf1, 0x30,
84 0xdc, 0x01, 0x0a, 0x56, 0xfa, 0xe4, 0xc9, 0x99,
85 0xdd, 0xa8, 0xd8, 0xda, 0x35, 0x51, 0x73, 0xb4,
86 0x40, 0x86, 0x85, 0xdb, 0x5c, 0xd5, 0x85, 0x80,
87 0x14, 0x9c, 0xfd, 0x98, 0xa9, 0x82, 0xc5, 0x37,
88 0xff, 0x32, 0x5d, 0xd0, 0x0b, 0xfa, 0xdc, 0x04,
89 0x5e, 0x09, 0xd2, 0xca, 0x17, 0x4b, 0x1a, 0x8e,
90 0x15, 0xe1, 0xcc, 0x4e, 0x52, 0x88, 0x35, 0xbd,
91 0x48, 0xfe, 0x15, 0xa0, 0x91, 0xfd, 0x7e, 0x6c,
92 0x0e, 0x5d, 0x79, 0x1b, 0x81, 0x79, 0xd2, 0x09,
93 0x34, 0x70, 0x3d, 0x81, 0xec, 0xf6, 0x24, 0xbb,
94 0xfb, 0xf1, 0x7b, 0xdf, 0x54, 0xea, 0x80, 0x9b,
95 0xc7, 0x99, 0x9e, 0xbd, 0x16, 0x78, 0x12, 0x53,
96 0x5e, 0x01, 0xa7, 0x4e, 0xbd, 0x67, 0xe1, 0x9b,
97 0x4c, 0x0e, 0x61, 0x45, 0x97, 0xd2, 0xf0, 0x0f,
98 0xfe, 0x15, 0x08, 0xb7, 0x11, 0x4c, 0xe7, 0xff,
99 0x81, 0x53, 0xff, 0x91, 0x25, 0x38, 0x7e, 0x40,
100 0x94, 0xe5, 0xe0, 0xad, 0xe6, 0xd9, 0x79, 0xb6,
101 0x92, 0xc9, 0xfc, 0xde, 0xc3, 0x1a, 0x23, 0xbb,
102 0xdd, 0xc8, 0x51, 0x0c, 0x3a, 0x72, 0xfa, 0x73,
103 0x6f, 0xb7, 0xee, 0x61, 0x39, 0x03, 0x01, 0x3f,
104 0x7f, 0x94, 0x2e, 0x2e, 0xba, 0x3a, 0xbb, 0xb4,
105 0xfa, 0x6a, 0x17, 0xfe, 0xea, 0xef, 0x5e, 0x66,
106 0x97, 0x3f, 0x32, 0x3d, 0xd7, 0x3e, 0xb1, 0xf1,
107 0x6c, 0x14, 0x4c, 0xfd, 0x37, 0xd3, 0x38, 0x80,
108 0xfb, 0xde, 0xa6, 0x24, 0x1e, 0xc8, 0xca, 0x7f,
109 0x3a, 0x93, 0xd8, 0x8b, 0x18, 0x13, 0xb2, 0xe5,
110 0xe4, 0x93, 0x05, 0x53, 0x4f, 0x84, 0x66, 0xa7,
111 0x58, 0x5c, 0x7b, 0x86, 0x52, 0x6d, 0x0d, 0xce,
112 0xa4, 0x30, 0x7d, 0xb6, 0x18, 0x9f, 0xeb, 0xff,
113 0x22, 0xbb, 0x72, 0x29, 0xb9, 0x44, 0x0b, 0x48,
114 0x1e, 0x84, 0x71, 0x81, 0xe3, 0x6d, 0x73, 0x26,
115 0x92, 0xb4, 0x4d, 0x2a, 0x29, 0xb8, 0x1f, 0x72,
116 0xed, 0xd0, 0xe1, 0x64, 0x77, 0xea, 0x8e, 0x88,
117 0x0f, 0xef, 0x3f, 0xb1, 0x3b, 0xad, 0xf9, 0xc9,
118 0x8b, 0xd0, 0xac, 0xc6, 0xcc, 0xa9, 0x40, 0xcc,
119 0x76, 0xf6, 0x3b, 0x53, 0xb5, 0x88, 0xcb, 0xc8,
120 0x37, 0xf1, 0xa2, 0xba, 0x23, 0x15, 0x99, 0x09,
121 0xcc, 0xe7, 0x7a, 0x3b, 0x37, 0xf7, 0x58, 0xc8,
122 0x46, 0x8c, 0x2b, 0x2f, 0x4e, 0x0e, 0xa6, 0x5c,
123 0xea, 0x85, 0x55, 0xba, 0x02, 0x0e, 0x0e, 0x48,
124 0xbc, 0xe1, 0xb1, 0x01, 0x35, 0x79, 0x13, 0x3d,
125 0x1b, 0xc0, 0x53, 0x68, 0x11, 0xe7, 0x95, 0x0f,
126 0x9d, 0x3f, 0x4c, 0x47, 0x7b, 0x4d, 0x1c, 0xae,
127 0x50, 0x9b, 0xcb, 0xdd, 0x05, 0x8d, 0x9a, 0x97,
128 0xfd, 0x8c, 0xef, 0x0c, 0x1d, 0x67, 0x73, 0xa8,
129 0x28, 0x36, 0xd5, 0xb6, 0x92, 0x33, 0x40, 0x75,
130 0x0b, 0x51, 0xc3, 0x64, 0xba, 0x1d, 0xc2, 0xcc,
131 0xee, 0x7d, 0x54, 0x0f, 0x27, 0x69, 0xa7, 0x27,
132 0x63, 0x30, 0x29, 0xd9, 0xc8, 0x84, 0xd8, 0xdf,
133 0x9f, 0x68, 0x8d, 0x04, 0xca, 0xa6, 0xc5, 0xc7,
134 0x7a, 0x5c, 0xc8, 0xd1, 0xcb, 0x4a, 0xec, 0xd0,
135 0xd8, 0x20, 0x69, 0xc5, 0x17, 0xcd, 0x78, 0xc8,
136 0x75, 0x23, 0x30, 0x69, 0xc9, 0xd4, 0xea, 0x5c,
137 0x4f, 0x6b, 0x86, 0x3f, 0x8b, 0xfe, 0xee, 0x44,
138 0xc9, 0x7c, 0xb7, 0xdd, 0x3e, 0xe5, 0xec, 0x54,
139 0x03, 0x3e, 0xaa, 0x82, 0xc6, 0xdf, 0xb2, 0x38,
140 0x0e, 0x5d, 0xb3, 0x88, 0xd9, 0xd3, 0x69, 0x5f,
141 0x8f, 0x70, 0x8a, 0x7e, 0x11, 0xd9, 0x1e, 0x7b,
142 0x38, 0xf1, 0x42, 0x1a, 0xc0, 0x35, 0xf5, 0xc7,
143 0x36, 0x85, 0xf5, 0xf7, 0xb8, 0x7e, 0xc7, 0xef,
144 0x18, 0xf1, 0x63, 0xd6, 0x7a, 0xc6, 0xc9, 0x0e,
145 0x4d, 0x69, 0x4f, 0x84, 0xef, 0x26, 0x41, 0x0c,
146 0xec, 0xc7, 0xe0, 0x7e, 0x3c, 0x67, 0x01, 0x4c,
147 0x62, 0x1a, 0x20, 0x6f, 0xee, 0x47, 0x4d, 0xc0,
148 0x99, 0x13, 0x8d, 0x91, 0x4a, 0x26, 0xd4, 0x37,
149 0x28, 0x90, 0x58, 0x75, 0x66, 0x2b, 0x0a, 0xdf,
150 0xda, 0xee, 0x92, 0x25, 0x90, 0x62, 0x39, 0x9e,
151 0x44, 0x98, 0xad, 0xc1, 0x88, 0xed, 0xe4, 0xb4,
152 0xaf, 0xf5, 0x8c, 0x9b, 0x48, 0x4d, 0x56, 0x60,
153 0x97, 0x0f, 0x61, 0x59, 0x9e, 0xa6, 0x27, 0xfe,
154 0xc1, 0x91, 0x15, 0x38, 0xb8, 0x0f, 0xae, 0x61,
155 0x7d, 0x26, 0x13, 0x5a, 0x73, 0xff, 0x1c, 0xa3,
156 0x61, 0x04, 0x58, 0x48, 0x55, 0x44, 0x11, 0xfe,
157 0x15, 0xca, 0xc3, 0xbd, 0xca, 0xc5, 0xb4, 0x40,
158 0x5d, 0x1b, 0x7f, 0x39, 0xb5, 0x9c, 0x35, 0xec,
159 0x61, 0x15, 0x32, 0x32, 0xb8, 0x4e, 0x40, 0x9f,
160 0x17, 0x1f, 0x0a, 0x4d, 0xa9, 0x91, 0xef, 0xb7,
161 0xb0, 0xeb, 0xc2, 0x83, 0x9a, 0x6c, 0xd2, 0x79,
162 0x43, 0x78, 0x5e, 0x2f, 0xe5, 0xdd, 0x1a, 0x3c,
163 0x45, 0xab, 0x29, 0x40, 0x3a, 0x37, 0x5b, 0x6f,
164 0xd7, 0xfc, 0x48, 0x64, 0x3c, 0x49, 0xfb, 0x21,
165 0xbe, 0xc3, 0xff, 0x07, 0xfb, 0x17, 0xe9, 0xc9,
166 0x0c, 0x4c, 0x5c, 0x15, 0x9e, 0x8e, 0x22, 0x30,
167 0x0a, 0xde, 0x48, 0x7f, 0xdb, 0x0d, 0xd1, 0x2b,
168 0x87, 0x38, 0x9e, 0xcc, 0x5a, 0x01, 0x16, 0xee,
169 0x75, 0x49, 0x0d, 0x30, 0x01, 0x34, 0x6a, 0xb6,
170 0x9a, 0x5a, 0x2a, 0xec, 0xbb, 0x48, 0xac, 0xd3,
171 0x77, 0x83, 0xd8, 0x08, 0x86, 0x4f, 0x48, 0x09,
172 0x29, 0x41, 0x79, 0xa1, 0x03, 0x12, 0xc4, 0xcd,
173 0x90, 0x55, 0x47, 0x66, 0x74, 0x9a, 0xcc, 0x4f,
174 0x35, 0x8c, 0xd6, 0x98, 0xef, 0xeb, 0x45, 0xb9,
175 0x9a, 0x26, 0x2f, 0x39, 0xa5, 0x70, 0x6d, 0xfc,
176 0xb4, 0x51, 0xee, 0xf4, 0x9c, 0xe7, 0x38, 0x59,
177 0xad, 0xf4, 0xbc, 0x46, 0xff, 0x46, 0x8e, 0x60,
178 0x9c, 0xa3, 0x60, 0x1d, 0xf8, 0x26, 0x72, 0xf5,
179 0x72, 0x9d, 0x68, 0x80, 0x04, 0xf6, 0x0b, 0xa1,
180 0x0a, 0xd5, 0xa7, 0x82, 0x3a, 0x3e, 0x47, 0xa8,
181 0x5a, 0xde, 0x59, 0x4f, 0x7b, 0x07, 0xb3, 0xe9,
182 0x24, 0x19, 0x3d, 0x34, 0x05, 0xec, 0xf1, 0xab,
183 0x6e, 0x64, 0x8f, 0xd3, 0xe6, 0x41, 0x86, 0x80,
184 0x70, 0xe3, 0x8d, 0x60, 0x9c, 0x34, 0x25, 0x01,
185 0x07, 0x4d, 0x19, 0x41, 0x4e, 0x3d, 0x5c, 0x7e,
186 0xa8, 0xf5, 0xcc, 0xd5, 0x7b, 0xe2, 0x7d, 0x3d,
187 0x49, 0x86, 0x7d, 0x07, 0xb7, 0x10, 0xe3, 0x35,
188 0xb8, 0x84, 0x6d, 0x76, 0xab, 0x17, 0xc6, 0x38,
189 0xb4, 0xd3, 0x28, 0x57, 0xad, 0xd3, 0x88, 0x5a,
190 0xda, 0xea, 0xc8, 0x94, 0xcc, 0x37, 0x19, 0xac,
191 0x9c, 0x9f, 0x4b, 0x00, 0x15, 0xc0, 0xc8, 0xca,
192 0x1f, 0x15, 0xaa, 0xe0, 0xdb, 0xf9, 0x2f, 0x57,
193 0x1b, 0x24, 0xc7, 0x6f, 0x76, 0x29, 0xfb, 0xed,
194 0x25, 0x0d, 0xc0, 0xfe, 0xbd, 0x5a, 0xbf, 0x20,
195 0x08, 0x51, 0x05, 0xec, 0x71, 0xa3, 0xbf, 0xef,
196 0x5e, 0x99, 0x75, 0xdb, 0x3c, 0x5f, 0x9a, 0x8c,
197 0xbb, 0x19, 0x5c, 0x0e, 0x93, 0x19, 0xf8, 0x6a,
198 0xbc, 0xf2, 0x12, 0x54, 0x2f, 0xcb, 0x28, 0x64,
199 0x88, 0xb3, 0x92, 0x0d, 0x96, 0xd1, 0xa6, 0xe4,
200 0x1f, 0xf1, 0x4d, 0xa4, 0xab, 0x1c, 0xee, 0x54,
201 0xf2, 0xad, 0x29, 0x6d, 0x32, 0x37, 0xb2, 0x16,
202 0x77, 0x5c, 0xdc, 0x2e, 0x54, 0xec, 0x75, 0x26,
203 0xc6, 0x36, 0xd9, 0x17, 0x2c, 0xf1, 0x7a, 0xdc,
204 0x4b, 0xf1, 0xe2, 0xd9, 0x95, 0xba, 0xac, 0x87,
205 0xc1, 0xf3, 0x8e, 0x58, 0x08, 0xd8, 0x87, 0x60,
206 0xc9, 0xee, 0x6a, 0xde, 0xa4, 0xd2, 0xfc, 0x0d,
207 0xe5, 0x36, 0xc4, 0x5c, 0x52, 0xb3, 0x07, 0x54,
208 0x65, 0x24, 0xc1, 0xb1, 0xd1, 0xb1, 0x53, 0x13,
209 0x31, 0x79, 0x7f, 0x05, 0x76, 0xeb, 0x37, 0x59,
210 0x15, 0x2b, 0xd1, 0x3f, 0xac, 0x08, 0x97, 0xeb,
211 0x91, 0x98, 0xdf, 0x6c, 0x09, 0x0d, 0x04, 0x9f,
212 0xdc, 0x3b, 0x0e, 0x60, 0x68, 0x47, 0x23, 0x15,
213 0x16, 0xc6, 0x0b, 0x35, 0xf8, 0x77, 0xa2, 0x78,
214 0x50, 0xd4, 0x64, 0x22, 0x33, 0xff, 0xfb, 0x93,
215 0x71, 0x46, 0x50, 0x39, 0x1b, 0x9c, 0xea, 0x4e,
216 0x8d, 0x0c, 0x37, 0xe5, 0x5c, 0x51, 0x3a, 0x31,
217 0xb2, 0x85, 0x84, 0x3f, 0x41, 0xee, 0xa2, 0xc1,
218 0xc6, 0x13, 0x3b, 0x54, 0x28, 0xd2, 0x18, 0x37,
219 0xcc, 0x46, 0x9f, 0x6a, 0x91, 0x3d, 0x5a, 0x15,
220 0x3c, 0x89, 0xa3, 0x61, 0x06, 0x7d, 0x2e, 0x78,
221 0xbe, 0x7d, 0x40, 0xba, 0x2f, 0x95, 0xb1, 0x2f,
222 0x87, 0x3b, 0x8a, 0xbe, 0x6a, 0xf4, 0xc2, 0x31,
223 0x74, 0xee, 0x91, 0xe0, 0x23, 0xaa, 0x5d, 0x7f,
224 0xdd, 0xf0, 0x44, 0x8c, 0x0b, 0x59, 0x2b, 0xfc,
225 0x48, 0x3a, 0xdf, 0x07, 0x05, 0x38, 0x6c, 0xc9,
226 0xeb, 0x18, 0x24, 0x68, 0x8d, 0x58, 0x98, 0xd3,
227 0x31, 0xa3, 0xe4, 0x70, 0x59, 0xb1, 0x21, 0xbe,
228 0x7e, 0x65, 0x7d, 0xb8, 0x04, 0xab, 0xf6, 0xe4,
229 0xd7, 0xda, 0xec, 0x09, 0x8f, 0xda, 0x6d, 0x24,
230 0x07, 0xcc, 0x29, 0x17, 0x05, 0x78, 0x1a, 0xc1,
231 0xb1, 0xce, 0xfc, 0xaa, 0x2d, 0xe7, 0xcc, 0x85,
232 0x84, 0x84, 0x03, 0x2a, 0x0c, 0x3f, 0xa9, 0xf8,
233 0xfd, 0x84, 0x53, 0x59, 0x5c, 0xf0, 0xd4, 0x09,
234 0xf0, 0xd2, 0x6c, 0x32, 0x03, 0xb0, 0xa0, 0x8c,
235 0x52, 0xeb, 0x23, 0x91, 0x88, 0x43, 0x13, 0x46,
236 0xf6, 0x1e, 0xb4, 0x1b, 0xf5, 0x8e, 0x3a, 0xb5,
237 0x3d, 0x00, 0xf6, 0xe5, 0x08, 0x3d, 0x5f, 0x39,
238 0xd3, 0x21, 0x69, 0xbc, 0x03, 0x22, 0x3a, 0xd2,
239 0x5c, 0x84, 0xf8, 0x15, 0xc4, 0x80, 0x0b, 0xbc,
240 0x29, 0x3c, 0xf3, 0x95, 0x98, 0xcd, 0x8f, 0x35,
241 0xbc, 0xa5, 0x3e, 0xfc, 0xd4, 0x13, 0x9e, 0xde,
242 0x4f, 0xce, 0x71, 0x9d, 0x09, 0xad, 0xf2, 0x80,
243 0x6b, 0x65, 0x7f, 0x03, 0x00, 0x14, 0x7c, 0x15,
244 0x85, 0x40, 0x6d, 0x70, 0xea, 0xdc, 0xb3, 0x63,
245 0x35, 0x4f, 0x4d, 0xe0, 0xd9, 0xd5, 0x3c, 0x58,
246 0x56, 0x23, 0x80, 0xe2, 0x36, 0xdd, 0x75, 0x1d,
247 0x94, 0x11, 0x41, 0x8e, 0xe0, 0x81, 0x8e, 0xcf,
248 0xe0, 0xe5, 0xf6, 0xde, 0xd1, 0xe7, 0x04, 0x12,
249 0x79, 0x92, 0x2b, 0x71, 0x2a, 0x79, 0x8b, 0x7c,
250 0x44, 0x79, 0x16, 0x30, 0x4e, 0xf4, 0xf6, 0x9b,
251 0xb7, 0x40, 0xa3, 0x5a, 0xa7, 0x69, 0x3e, 0xc1,
252 0x3a, 0x04, 0xd0, 0x88, 0xa0, 0x3b, 0xdd, 0xc6,
253 0x9e, 0x7e, 0x1e, 0x1e, 0x8f, 0x44, 0xf7, 0x73,
254 0x67, 0x1e, 0x1a, 0x78, 0xfa, 0x62, 0xf4, 0xa9,
255 0xa8, 0xc6, 0x5b, 0xb8, 0xfa, 0x06, 0x7d, 0x5e,
256 0x38, 0x1c, 0x9a, 0x39, 0xe9, 0x39, 0x98, 0x22,
257 0x0b, 0xa7, 0xac, 0x0b, 0xf3, 0xbc, 0xf1, 0xeb,
258 0x8c, 0x81, 0xe3, 0x48, 0x8a, 0xed, 0x42, 0xc2,
259 0x38, 0xcf, 0x3e, 0xda, 0xd2, 0x89, 0x8d, 0x9c,
260 0x53, 0xb5, 0x2f, 0x41, 0x01, 0x26, 0x84, 0x9c,
261 0xa3, 0x56, 0xf6, 0x49, 0xc7, 0xd4, 0x9f, 0x93,
262 0x1b, 0x96, 0x49, 0x5e, 0xad, 0xb3, 0x84, 0x1f,
263 0x3c, 0xa4, 0xe0, 0x9b, 0xd1, 0x90, 0xbc, 0x38,
264 0x6c, 0xdd, 0x95, 0x4d, 0x9d, 0xb1, 0x71, 0x57,
265 0x2d, 0x34, 0xe8, 0xb8, 0x42, 0xc7, 0x99, 0x03,
266 0xc7, 0x07, 0x30, 0x65, 0x91, 0x55, 0xd5, 0x90,
267 0x70, 0x97, 0x37, 0x68, 0xd4, 0x11, 0xf9, 0xe8,
268 0xce, 0xec, 0xdc, 0x34, 0xd5, 0xd3, 0xb7, 0xc4,
269 0xb8, 0x97, 0x05, 0x92, 0xad, 0xf8, 0xe2, 0x36,
270 0x64, 0x41, 0xc9, 0xc5, 0x41, 0x77, 0x52, 0xd7,
271 0x2c, 0xa5, 0x24, 0x2f, 0xd9, 0x34, 0x0b, 0x47,
272 0x35, 0xa7, 0x28, 0x8b, 0xc5, 0xcd, 0xe9, 0x46,
273 0xac, 0x39, 0x94, 0x3c, 0x10, 0xc6, 0x29, 0x73,
274 0x0e, 0x0e, 0x5d, 0xe0, 0x71, 0x03, 0x8a, 0x72,
275 0x0e, 0x26, 0xb0, 0x7d, 0x84, 0xed, 0x95, 0x23,
276 0x49, 0x5a, 0x45, 0x83, 0x45, 0x60, 0x11, 0x4a,
277 0x46, 0x31, 0xd4, 0xd8, 0x16, 0x54, 0x98, 0x58,
278 0xed, 0x6d, 0xcc, 0x5d, 0xd6, 0x50, 0x61, 0x9f,
279 0x9d, 0xc5, 0x3e, 0x9d, 0x32, 0x47, 0xde, 0x96,
280 0xe1, 0x5d, 0xd8, 0xf8, 0xb4, 0x69, 0x6f, 0xb9,
281 0x15, 0x90, 0x57, 0x7a, 0xf6, 0xad, 0xb0, 0x5b,
282 0xf5, 0xa6, 0x36, 0x94, 0xfd, 0x84, 0xce, 0x1c,
283 0x0f, 0x4b, 0xd0, 0xc2, 0x5b, 0x6b, 0x56, 0xef,
284 0x73, 0x93, 0x0b, 0xc3, 0xee, 0xd9, 0xcf, 0xd3,
285 0xa4, 0x22, 0x58, 0xcd, 0x50, 0x6e, 0x65, 0xf4,
286 0xe9, 0xb7, 0x71, 0xaf, 0x4b, 0xb3, 0xb6, 0x2f,
287 0x0f, 0x0e, 0x3b, 0xc9, 0x85, 0x14, 0xf5, 0x17,
288 0xe8, 0x7a, 0x3a, 0xbf, 0x5f, 0x5e, 0xf8, 0x18,
289 0x48, 0xa6, 0x72, 0xab, 0x06, 0x95, 0xe9, 0xc8,
290 0xa7, 0xf4, 0x32, 0x44, 0x04, 0x0c, 0x84, 0x98,
291 0x73, 0xe3, 0x89, 0x8d, 0x5f, 0x7e, 0x4a, 0x42,
292 0x8f, 0xc5, 0x28, 0xb1, 0x82, 0xef, 0x1c, 0x97,
293 0x31, 0x3b, 0x4d, 0xe0, 0x0e, 0x10, 0x10, 0x97,
294 0x93, 0x49, 0x78, 0x2f, 0x0d, 0x86, 0x8b, 0xa1,
295 0x53, 0xa9, 0x81, 0x20, 0x79, 0xe7, 0x07, 0x77,
296 0xb6, 0xac, 0x5e, 0xd2, 0x05, 0xcd, 0xe9, 0xdb,
297 0x8a, 0x94, 0x82, 0x8a, 0x23, 0xb9, 0x3d, 0x1c,
298 0xa9, 0x7d, 0x72, 0x4a, 0xed, 0x33, 0xa3, 0xdb,
299 0x21, 0xa7, 0x86, 0x33, 0x45, 0xa5, 0xaa, 0x56,
300 0x45, 0xb5, 0x83, 0x29, 0x40, 0x47, 0x79, 0x04,
301 0x6e, 0xb9, 0x95, 0xd0, 0x81, 0x77, 0x2d, 0x48,
302 0x1e, 0xfe, 0xc3, 0xc2, 0x1e, 0xe5, 0xf2, 0xbe,
303 0xfd, 0x3b, 0x94, 0x9f, 0xc4, 0xc4, 0x26, 0x9d,
304 0xe4, 0x66, 0x1e, 0x19, 0xee, 0x6c, 0x79, 0x97,
305 0x11, 0x31, 0x4b, 0x0d, 0x01, 0xcb, 0xde, 0xa8,
306 0xf6, 0x6d, 0x7c, 0x39, 0x46, 0x4e, 0x7e, 0x3f,
307 0x94, 0x17, 0xdf, 0xa1, 0x7d, 0xd9, 0x1c, 0x8e,
308 0xbc, 0x7d, 0x33, 0x7d, 0xe3, 0x12, 0x40, 0xca,
309 0xab, 0x37, 0x11, 0x46, 0xd4, 0xae, 0xef, 0x44,
310 0xa2, 0xb3, 0x6a, 0x66, 0x0e, 0x0c, 0x90, 0x7f,
311 0xdf, 0x5c, 0x66, 0x5f, 0xf2, 0x94, 0x9f, 0xa6,
312 0x73, 0x4f, 0xeb, 0x0d, 0xad, 0xbf, 0xc0, 0x63,
313 0x5c, 0xdc, 0x46, 0x51, 0xe8, 0x8e, 0x90, 0x19,
314 0xa8, 0xa4, 0x3c, 0x91, 0x79, 0xfa, 0x7e, 0x58,
315 0x85, 0x13, 0x55, 0xc5, 0x19, 0x82, 0x37, 0x1b,
316 0x0a, 0x02, 0x1f, 0x99, 0x6b, 0x18, 0xf1, 0x28,
317 0x08, 0xa2, 0x73, 0xb8, 0x0f, 0x2e, 0xcd, 0xbf,
318 0xf3, 0x86, 0x7f, 0xea, 0xef, 0xd0, 0xbb, 0xa6,
319 0x21, 0xdf, 0x49, 0x73, 0x51, 0xcc, 0x36, 0xd3,
320 0x3e, 0xa0, 0xf8, 0x44, 0xdf, 0xd3, 0xa6, 0xbe,
321 0x8a, 0xd4, 0x57, 0xdd, 0x72, 0x94, 0x61, 0x0f,
322 0x82, 0xd1, 0x07, 0xb8, 0x7c, 0x18, 0x83, 0xdf,
323 0x3a, 0xe5, 0x50, 0x6a, 0x82, 0x20, 0xac, 0xa9,
324 0xa8, 0xff, 0xd9, 0xf3, 0x77, 0x33, 0x5a, 0x9e,
325 0x7f, 0x6d, 0xfe, 0x5d, 0x33, 0x41, 0x42, 0xe7,
326 0x6c, 0x19, 0xe0, 0x44, 0x8a, 0x15, 0xf6, 0x70,
327 0x98, 0xb7, 0x68, 0x4d, 0xfa, 0x97, 0x39, 0xb0,
328 0x8e, 0xe8, 0x84, 0x8b, 0x75, 0x30, 0xb7, 0x7d,
329 0x92, 0x69, 0x20, 0x9c, 0x81, 0xfb, 0x4b, 0xf4,
330 0x01, 0x50, 0xeb, 0xce, 0x0c, 0x1c, 0x6c, 0xb5,
331 0x4a, 0xd7, 0x27, 0x0c, 0xce, 0xbb, 0xe5, 0x85,
332 0xf0, 0xb6, 0xee, 0xd5, 0x70, 0xdd, 0x3b, 0xfc,
333 0xd4, 0x99, 0xf1, 0x33, 0xdd, 0x8b, 0xc4, 0x2f,
334 0xae, 0xab, 0x74, 0x96, 0x32, 0xc7, 0x4c, 0x56,
335 0x3c, 0x89, 0x0f, 0x96, 0x0b, 0x42, 0xc0, 0xcb,
336 0xee, 0x0f, 0x0b, 0x8c, 0xfb, 0x7e, 0x47, 0x7b,
337 0x64, 0x48, 0xfd, 0xb2, 0x00, 0x80, 0x89, 0xa5,
338 0x13, 0x55, 0x62, 0xfc, 0x8f, 0xe2, 0x42, 0x03,
339 0xb7, 0x4e, 0x2a, 0x79, 0xb4, 0x82, 0xea, 0x23,
340 0x49, 0xda, 0xaf, 0x52, 0x63, 0x1e, 0x60, 0x03,
341 0x89, 0x06, 0x44, 0x46, 0x08, 0xc3, 0xc4, 0x87,
342 0x70, 0x2e, 0xda, 0x94, 0xad, 0x6b, 0xe0, 0xe4,
343 0xd1, 0x8a, 0x06, 0xc2, 0xa8, 0xc0, 0xa7, 0x43,
344 0x3c, 0x47, 0x52, 0x0e, 0xc3, 0x77, 0x81, 0x11,
345 0x67, 0x0e, 0xa0, 0x70, 0x04, 0x47, 0x29, 0x40,
346 0x86, 0x0d, 0x34, 0x56, 0xa7, 0xc9, 0x35, 0x59,
347 0x68, 0xdc, 0x93, 0x81, 0x70, 0xee, 0x86, 0xd9,
348 0x80, 0x06, 0x40, 0x4f, 0x1a, 0x0d, 0x40, 0x30,
349 0x0b, 0xcb, 0x96, 0x47, 0xc1, 0xb7, 0x52, 0xfd,
350 0x56, 0xe0, 0x72, 0x4b, 0xfb, 0xbd, 0x92, 0x45,
351 0x61, 0x71, 0xc2, 0x33, 0x11, 0xbf, 0x52, 0x83,
352 0x79, 0x26, 0xe0, 0x49, 0x6b, 0xb7, 0x05, 0x8b,
353 0xe8, 0x0e, 0x87, 0x31, 0xd7, 0x9d, 0x8a, 0xf5,
354 0xc0, 0x5f, 0x2e, 0x58, 0x4a, 0xdb, 0x11, 0xb3,
355 0x6c, 0x30, 0x2a, 0x46, 0x19, 0xe3, 0x27, 0x84,
356 0x1f, 0x63, 0x6e, 0xf6, 0x57, 0xc7, 0xc9, 0xd8,
357 0x5e, 0xba, 0xb3, 0x87, 0xd5, 0x83, 0x26, 0x34,
358 0x21, 0x9e, 0x65, 0xde, 0x42, 0xd3, 0xbe, 0x7b,
359 0xbc, 0x91, 0x71, 0x44, 0x4d, 0x99, 0x3b, 0x31,
360 0xe5, 0x3f, 0x11, 0x4e, 0x7f, 0x13, 0x51, 0x3b,
361 0xae, 0x79, 0xc9, 0xd3, 0x81, 0x8e, 0x25, 0x40,
362 0x10, 0xfc, 0x07, 0x1e, 0xf9, 0x7b, 0x9a, 0x4b,
363 0x6c, 0xe3, 0xb3, 0xad, 0x1a, 0x0a, 0xdd, 0x9e,
364 0x59, 0x0c, 0xa2, 0xcd, 0xae, 0x48, 0x4a, 0x38,
365 0x5b, 0x47, 0x41, 0x94, 0x65, 0x6b, 0xbb, 0xeb,
366 0x5b, 0xe3, 0xaf, 0x07, 0x5b, 0xd4, 0x4a, 0xa2,
367 0xc9, 0x5d, 0x2f, 0x64, 0x03, 0xd7, 0x3a, 0x2c,
368 0x6e, 0xce, 0x76, 0x95, 0xb4, 0xb3, 0xc0, 0xf1,
369 0xe2, 0x45, 0x73, 0x7a, 0x5c, 0xab, 0xc1, 0xfc,
370 0x02, 0x8d, 0x81, 0x29, 0xb3, 0xac, 0x07, 0xec,
371 0x40, 0x7d, 0x45, 0xd9, 0x7a, 0x59, 0xee, 0x34,
372 0xf0, 0xe9, 0xd5, 0x7b, 0x96, 0xb1, 0x3d, 0x95,
373 0xcc, 0x86, 0xb5, 0xb6, 0x04, 0x2d, 0xb5, 0x92,
374 0x7e, 0x76, 0xf4, 0x06, 0xa9, 0xa3, 0x12, 0x0f,
375 0xb1, 0xaf, 0x26, 0xba, 0x7c, 0xfc, 0x7e, 0x1c,
376 0xbc, 0x2c, 0x49, 0x97, 0x53, 0x60, 0x13, 0x0b,
377 0xa6, 0x61, 0x83, 0x89, 0x42, 0xd4, 0x17, 0x0c,
378 0x6c, 0x26, 0x52, 0xc3, 0xb3, 0xd4, 0x67, 0xf5,
379 0xe3, 0x04, 0xb7, 0xf4, 0xcb, 0x80, 0xb8, 0xcb,
380 0x77, 0x56, 0x3e, 0xaa, 0x57, 0x54, 0xee, 0xb4,
381 0x2c, 0x67, 0xcf, 0xf2, 0xdc, 0xbe, 0x55, 0xf9,
382 0x43, 0x1f, 0x6e, 0x22, 0x97, 0x67, 0x7f, 0xc4,
383 0xef, 0xb1, 0x26, 0x31, 0x1e, 0x27, 0xdf, 0x41,
384 0x80, 0x47, 0x6c, 0xe2, 0xfa, 0xa9, 0x8c, 0x2a,
385 0xf6, 0xf2, 0xab, 0xf0, 0x15, 0xda, 0x6c, 0xc8,
386 0xfe, 0xb5, 0x23, 0xde, 0xa9, 0x05, 0x3f, 0x06,
387 0x54, 0x4c, 0xcd, 0xe1, 0xab, 0xfc, 0x0e, 0x62,
388 0x33, 0x31, 0x73, 0x2c, 0x76, 0xcb, 0xb4, 0x47,
389 0x1e, 0x20, 0xad, 0xd8, 0xf2, 0x31, 0xdd, 0xc4,
390 0x8b, 0x0c, 0x77, 0xbe, 0xe1, 0x8b, 0x26, 0x00,
391 0x02, 0x58, 0xd6, 0x8d, 0xef, 0xad, 0x74, 0x67,
392 0xab, 0x3f, 0xef, 0xcb, 0x6f, 0xb0, 0xcc, 0x81,
393 0x44, 0x4c, 0xaf, 0xe9, 0x49, 0x4f, 0xdb, 0xa0,
394 0x25, 0xa4, 0xf0, 0x89, 0xf1, 0xbe, 0xd8, 0x10,
395 0xff, 0xb1, 0x3b, 0x4b, 0xfa, 0x98, 0xf5, 0x79,
396 0x6d, 0x1e, 0x69, 0x4d, 0x57, 0xb1, 0xc8, 0x19,
397 0x1b, 0xbd, 0x1e, 0x8c, 0x84, 0xb7, 0x7b, 0xe8,
398 0xd2, 0x2d, 0x09, 0x41, 0x41, 0x37, 0x3d, 0xb1,
399 0x6f, 0x26, 0x5d, 0x71, 0x16, 0x3d, 0xb7, 0x83,
400 0x27, 0x2c, 0xa7, 0xb6, 0x50, 0xbd, 0x91, 0x86,
401 0xab, 0x24, 0xa1, 0x38, 0xfd, 0xea, 0x71, 0x55,
402 0x7e, 0x9a, 0x07, 0x77, 0x4b, 0xfa, 0x61, 0x66,
403 0x20, 0x1e, 0x28, 0x95, 0x18, 0x1b, 0xa4, 0xa0,
404 0xfd, 0xc0, 0x89, 0x72, 0x43, 0xd9, 0x3b, 0x49,
405 0x5a, 0x3f, 0x9d, 0xbf, 0xdb, 0xb4, 0x46, 0xea,
406 0x42, 0x01, 0x77, 0x23, 0x68, 0x95, 0xb6, 0x24,
407 0xb3, 0xa8, 0x6c, 0x28, 0x3b, 0x11, 0x40, 0x7e,
408 0x18, 0x65, 0x6d, 0xd8, 0x24, 0x42, 0x7d, 0x88,
409 0xc0, 0x52, 0xd9, 0x05, 0xe4, 0x95, 0x90, 0x87,
410 0x8c, 0xf4, 0xd0, 0x6b, 0xb9, 0x83, 0x99, 0x34,
411 0x6d, 0xfe, 0x54, 0x40, 0x94, 0x52, 0x21, 0x4f,
412 0x14, 0x25, 0xc5, 0xd6, 0x5e, 0x95, 0xdc, 0x0a,
413 0x2b, 0x89, 0x20, 0x11, 0x84, 0x48, 0xd6, 0x3a,
414 0xcd, 0x5c, 0x24, 0xad, 0x62, 0xe3, 0xb1, 0x93,
415 0x25, 0x8d, 0xcd, 0x7e, 0xfc, 0x27, 0xa3, 0x37,
416 0xfd, 0x84, 0xfc, 0x1b, 0xb2, 0xf1, 0x27, 0x38,
417 0x5a, 0xb7, 0xfc, 0xf2, 0xfa, 0x95, 0x66, 0xd4,
418 0xfb, 0xba, 0xa7, 0xd7, 0xa3, 0x72, 0x69, 0x48,
419 0x48, 0x8c, 0xeb, 0x28, 0x89, 0xfe, 0x33, 0x65,
420 0x5a, 0x36, 0x01, 0x7e, 0x06, 0x79, 0x0a, 0x09,
421 0x3b, 0x74, 0x11, 0x9a, 0x6e, 0xbf, 0xd4, 0x9e,
422 0x58, 0x90, 0x49, 0x4f, 0x4d, 0x08, 0xd4, 0xe5,
423 0x4a, 0x09, 0x21, 0xef, 0x8b, 0xb8, 0x74, 0x3b,
424 0x91, 0xdd, 0x36, 0x85, 0x60, 0x2d, 0xfa, 0xd4,
425 0x45, 0x7b, 0x45, 0x53, 0xf5, 0x47, 0x87, 0x7e,
426 0xa6, 0x37, 0xc8, 0x78, 0x7a, 0x68, 0x9d, 0x8d,
427 0x65, 0x2c, 0x0e, 0x91, 0x5c, 0xa2, 0x60, 0xf0,
428 0x8e, 0x3f, 0xe9, 0x1a, 0xcd, 0xaa, 0xe7, 0xd5,
429 0x77, 0x18, 0xaf, 0xc9, 0xbc, 0x18, 0xea, 0x48,
430 0x1b, 0xfb, 0x22, 0x48, 0x70, 0x16, 0x29, 0x9e,
431 0x5b, 0xc1, 0x2c, 0x66, 0x23, 0xbc, 0xf0, 0x1f,
432 0xef, 0xaf, 0xe4, 0xd6, 0x04, 0x19, 0x82, 0x7a,
433 0x0b, 0xba, 0x4b, 0x46, 0xb1, 0x6a, 0x85, 0x5d,
434 0xb4, 0x73, 0xd6, 0x21, 0xa1, 0x71, 0x60, 0x14,
435 0xee, 0x0a, 0x77, 0xc4, 0x66, 0x2e, 0xf9, 0x69,
436 0x30, 0xaf, 0x41, 0x0b, 0xc8, 0x83, 0x3c, 0x53,
437 0x99, 0x19, 0x27, 0x46, 0xf7, 0x41, 0x6e, 0x56,
438 0xdc, 0x94, 0x28, 0x67, 0x4e, 0xb7, 0x25, 0x48,
439 0x8a, 0xc2, 0xe0, 0x60, 0x96, 0xcc, 0x18, 0xf4,
440 0x84, 0xdd, 0xa7, 0x5e, 0x3e, 0x05, 0x0b, 0x26,
441 0x26, 0xb2, 0x5c, 0x1f, 0x57, 0x1a, 0x04, 0x7e,
442 0x6a, 0xe3, 0x2f, 0xb4, 0x35, 0xb6, 0x38, 0x40,
443 0x40, 0xcd, 0x6f, 0x87, 0x2e, 0xef, 0xa3, 0xd7,
444 0xa9, 0xc2, 0xe8, 0x0d, 0x27, 0xdf, 0x44, 0x62,
445 0x99, 0xa0, 0xfc, 0xcf, 0x81, 0x78, 0xcb, 0xfe,
446 0xe5, 0xa0, 0x03, 0x4e, 0x6c, 0xd7, 0xf4, 0xaf,
447 0x7a, 0xbb, 0x61, 0x82, 0xfe, 0x71, 0x89, 0xb2,
448 0x22, 0x7c, 0x8e, 0x83, 0x04, 0xce, 0xf6, 0x5d,
449 0x84, 0x8f, 0x95, 0x6a, 0x7f, 0xad, 0xfd, 0x32,
450 0x9c, 0x5e, 0xe4, 0x9c, 0x89, 0x60, 0x54, 0xaa,
451 0x96, 0x72, 0xd2, 0xd7, 0x36, 0x85, 0xa9, 0x45,
452 0xd2, 0x2a, 0xa1, 0x81, 0x49, 0x6f, 0x7e, 0x04,
453 0xfa, 0xe2, 0xfe, 0x90, 0x26, 0x77, 0x5a, 0x33,
454 0xb8, 0x04, 0x9a, 0x7a, 0xe6, 0x4c, 0x4f, 0xad,
455 0x72, 0x96, 0x08, 0x28, 0x58, 0x13, 0xf8, 0xc4,
456 0x1c, 0xf0, 0xc3, 0x45, 0x95, 0x49, 0x20, 0x8c,
457 0x9f, 0x39, 0x70, 0xe1, 0x77, 0xfe, 0xd5, 0x4b,
458 0xaf, 0x86, 0xda, 0xef, 0x22, 0x06, 0x83, 0x36,
459 0x29, 0x12, 0x11, 0x40, 0xbc, 0x3b, 0x86, 0xaa,
460 0xaa, 0x65, 0x60, 0xc3, 0x80, 0xca, 0xed, 0xa9,
461 0xf3, 0xb0, 0x79, 0x96, 0xa2, 0x55, 0x27, 0x28,
462 0x55, 0x73, 0x26, 0xa5, 0x50, 0xea, 0x92, 0x4b,
463 0x3c, 0x5c, 0x82, 0x33, 0xf0, 0x01, 0x3f, 0x03,
464 0xc1, 0x08, 0x05, 0xbf, 0x98, 0xf4, 0x9b, 0x6d,
465 0xa5, 0xa8, 0xb4, 0x82, 0x0c, 0x06, 0xfa, 0xff,
466 0x2d, 0x08, 0xf3, 0x05, 0x4f, 0x57, 0x2a, 0x39,
467 0xd4, 0x83, 0x0d, 0x75, 0x51, 0xd8, 0x5b, 0x1b,
468 0xd3, 0x51, 0x5a, 0x32, 0x2a, 0x9b, 0x32, 0xb2,
469 0xf2, 0xa4, 0x96, 0x12, 0xf2, 0xae, 0x40, 0x34,
470 0x67, 0xa8, 0xf5, 0x44, 0xd5, 0x35, 0x53, 0xfe,
471 0xa3, 0x60, 0x96, 0x63, 0x0f, 0x1f, 0x6e, 0xb0,
472 0x5a, 0x42, 0xa6, 0xfc, 0x51, 0x0b, 0x60, 0x27,
473 0xbc, 0x06, 0x71, 0xed, 0x65, 0x5b, 0x23, 0x86,
474 0x4a, 0x07, 0x3b, 0x22, 0x07, 0x46, 0xe6, 0x90,
475 0x3e, 0xf3, 0x25, 0x50, 0x1b, 0x4c, 0x7f, 0x03,
476 0x08, 0xa8, 0x36, 0x6b, 0x87, 0xe5, 0xe3, 0xdb,
477 0x9a, 0x38, 0x83, 0xff, 0x9f, 0x1a, 0x9f, 0x57,
478 0xa4, 0x2a, 0xf6, 0x37, 0xbc, 0x1a, 0xff, 0xc9,
479 0x1e, 0x35, 0x0c, 0xc3, 0x7c, 0xa3, 0xb2, 0xe5,
480 0xd2, 0xc6, 0xb4, 0x57, 0x47, 0xe4, 0x32, 0x16,
481 0x6d, 0xa9, 0xae, 0x64, 0xe6, 0x2d, 0x8d, 0xc5,
482 0x8d, 0x50, 0x8e, 0xe8, 0x1a, 0x22, 0x34, 0x2a,
483 0xd9, 0xeb, 0x51, 0x90, 0x4a, 0xb1, 0x41, 0x7d,
484 0x64, 0xf9, 0xb9, 0x0d, 0xf6, 0x23, 0x33, 0xb0,
485 0x33, 0xf4, 0xf7, 0x3f, 0x27, 0x84, 0xc6, 0x0f,
486 0x54, 0xa5, 0xc0, 0x2e, 0xec, 0x0b, 0x3a, 0x48,
487 0x6e, 0x80, 0x35, 0x81, 0x43, 0x9b, 0x90, 0xb1,
488 0xd0, 0x2b, 0xea, 0x21, 0xdc, 0xda, 0x5b, 0x09,
489 0xf4, 0xcc, 0x10, 0xb4, 0xc7, 0xfe, 0x79, 0x51,
490 0xc3, 0xc5, 0xac, 0x88, 0x74, 0x84, 0x0b, 0x4b,
491 0xca, 0x79, 0x16, 0x29, 0xfb, 0x69, 0x54, 0xdf,
492 0x41, 0x7e, 0xe9, 0xc7, 0x8e, 0xea, 0xa5, 0xfe,
493 0xfc, 0x76, 0x0e, 0x90, 0xc4, 0x92, 0x38, 0xad,
494 0x7b, 0x48, 0xe6, 0x6e, 0xf7, 0x21, 0xfd, 0x4e,
495 0x93, 0x0a, 0x7b, 0x41, 0x83, 0x68, 0xfb, 0x57,
496 0x51, 0x76, 0x34, 0xa9, 0x6c, 0x00, 0xaa, 0x4f,
497 0x66, 0x65, 0x98, 0x4a, 0x4f, 0xa3, 0xa0, 0xef,
498 0x69, 0x3f, 0xe3, 0x1c, 0x92, 0x8c, 0xfd, 0xd8,
499 0xe8, 0xde, 0x7c, 0x7f, 0x3e, 0x84, 0x8e, 0x69,
500 0x3c, 0xf1, 0xf2, 0x05, 0x46, 0xdc, 0x2f, 0x9d,
501 0x5e, 0x6e, 0x4c, 0xfb, 0xb5, 0x99, 0x2a, 0x59,
502 0x63, 0xc1, 0x34, 0xbc, 0x57, 0xc0, 0x0d, 0xb9,
503 0x61, 0x25, 0xf3, 0x33, 0x23, 0x51, 0xb6, 0x0d,
504 0x07, 0xa6, 0xab, 0x94, 0x4a, 0xb7, 0x2a, 0xea,
505 0xee, 0xac, 0xa3, 0xc3, 0x04, 0x8b, 0x0e, 0x56,
506 0xfe, 0x44, 0xa7, 0x39, 0xe2, 0xed, 0xed, 0xb4,
507 0x22, 0x2b, 0xac, 0x12, 0x32, 0x28, 0x91, 0xd8,
508 0xa5, 0xab, 0xff, 0x5f, 0xe0, 0x4b, 0xda, 0x78,
509 0x17, 0xda, 0xf1, 0x01, 0x5b, 0xcd, 0xe2, 0x5f,
510 0x50, 0x45, 0x73, 0x2b, 0xe4, 0x76, 0x77, 0xf4,
511 0x64, 0x1d, 0x43, 0xfb, 0x84, 0x7a, 0xea, 0x91,
512 0xae, 0xf9, 0x9e, 0xb7, 0xb4, 0xb0, 0x91, 0x5f,
513 0x16, 0x35, 0x9a, 0x11, 0xb8, 0xc7, 0xc1, 0x8c,
514 0xc6, 0x10, 0x8d, 0x2f, 0x63, 0x4a, 0xa7, 0x57,
515 0x3a, 0x51, 0xd6, 0x32, 0x2d, 0x64, 0x72, 0xd4,
516 0x66, 0xdc, 0x10, 0xa6, 0x67, 0xd6, 0x04, 0x23,
517 0x9d, 0x0a, 0x11, 0x77, 0xdd, 0x37, 0x94, 0x17,
518 0x3c, 0xbf, 0x8b, 0x65, 0xb0, 0x2e, 0x5e, 0x66,
519 0x47, 0x64, 0xac, 0xdd, 0xf0, 0x84, 0xfd, 0x39,
520 0xfa, 0x15, 0x5d, 0xef, 0xae, 0xca, 0xc1, 0x36,
521 0xa7, 0x5c, 0xbf, 0xc7, 0x08, 0xc2, 0x66, 0x00,
522 0x74, 0x74, 0x4e, 0x27, 0x3f, 0x55, 0x8a, 0xb7,
523 0x38, 0x66, 0x83, 0x6d, 0xcf, 0x99, 0x9e, 0x60,
524 0x8f, 0xdd, 0x2e, 0x62, 0x22, 0x0e, 0xef, 0x0c,
525 0x98, 0xa7, 0x85, 0x74, 0x3b, 0x9d, 0xec, 0x9e,
526 0xa9, 0x19, 0x72, 0xa5, 0x7f, 0x2c, 0x39, 0xb7,
527 0x7d, 0xb7, 0xf1, 0x12, 0x65, 0x27, 0x4b, 0x5a,
528 0xde, 0x17, 0xfe, 0xad, 0x44, 0xf3, 0x20, 0x4d,
529 0xfd, 0xe4, 0x1f, 0xb5, 0x81, 0xb0, 0x36, 0x37,
530 0x08, 0x6f, 0xc3, 0x0c, 0xe9, 0x85, 0x98, 0x82,
531 0xa9, 0x62, 0x0c, 0xc4, 0x97, 0xc0, 0x50, 0xc8,
532 0xa7, 0x3c, 0x50, 0x9f, 0x43, 0xb9, 0xcd, 0x5e,
533 0x4d, 0xfa, 0x1c, 0x4b, 0x0b, 0xa9, 0x98, 0x85,
534 0x38, 0x92, 0xac, 0x8d, 0xe4, 0xad, 0x9b, 0x98,
535 0xab, 0xd9, 0x38, 0xac, 0x62, 0x52, 0xa3, 0x22,
536 0x63, 0x0f, 0xbf, 0x95, 0x48, 0xdf, 0x69, 0xe7,
537 0x8b, 0x33, 0xd5, 0xb2, 0xbd, 0x05, 0x49, 0x49,
538 0x9d, 0x57, 0x73, 0x19, 0x33, 0xae, 0xfa, 0x33,
539 0xf1, 0x19, 0xa8, 0x80, 0xce, 0x04, 0x9f, 0xbc,
540 0x1d, 0x65, 0x82, 0x1b, 0xe5, 0x3a, 0x51, 0xc8,
541 0x1c, 0x21, 0xe3, 0x5d, 0xf3, 0x7d, 0x9b, 0x2f,
542 0x2c, 0x1d, 0x4a, 0x7f, 0x9b, 0x68, 0x35, 0xa3,
543 0xb2, 0x50, 0xf7, 0x62, 0x79, 0xcd, 0xf4, 0x98,
544 0x4f, 0xe5, 0x63, 0x7c, 0x3e, 0x45, 0x31, 0x8c,
545 0x16, 0xa0, 0x12, 0xc8, 0x58, 0xce, 0x39, 0xa6,
546 0xbc, 0x54, 0xdb, 0xc5, 0xe0, 0xd5, 0xba, 0xbc,
547 0xb9, 0x04, 0xf4, 0x8d, 0xe8, 0x2f, 0x15, 0x9d,
548};
549
550/* 100 test cases */
551static struct crc_test {
552 u32 crc; /* random starting crc */
553 u32 start; /* random 6 bit offset in buf */
554 u32 length; /* random 11 bit length of test */
555 u32 crc_le; /* expected crc32_le result */
556 u32 crc_be; /* expected crc32_be result */
557 u32 crc32c_le; /* expected crc32c_le result */
558} const test[] __initconst =
559{
560 {0x674bf11d, 0x00000038, 0x00000542, 0x0af6d466, 0xd8b6e4c1, 0xf6e93d6c},
561 {0x35c672c6, 0x0000003a, 0x000001aa, 0xc6d3dfba, 0x28aaf3ad, 0x0fe92aca},
562 {0x496da28e, 0x00000039, 0x000005af, 0xd933660f, 0x5d57e81f, 0x52e1ebb8},
563 {0x09a9b90e, 0x00000027, 0x000001f8, 0xb45fe007, 0xf45fca9a, 0x0798af9a},
564 {0xdc97e5a9, 0x00000025, 0x000003b6, 0xf81a3562, 0xe0126ba2, 0x18eb3152},
565 {0x47c58900, 0x0000000a, 0x000000b9, 0x8e58eccf, 0xf3afc793, 0xd00d08c7},
566 {0x292561e8, 0x0000000c, 0x00000403, 0xa2ba8aaf, 0x0b797aed, 0x8ba966bc},
567 {0x415037f6, 0x00000003, 0x00000676, 0xa17d52e8, 0x7f0fdf35, 0x11d694a2},
568 {0x3466e707, 0x00000026, 0x00000042, 0x258319be, 0x75c484a2, 0x6ab3208d},
569 {0xafd1281b, 0x00000023, 0x000002ee, 0x4428eaf8, 0x06c7ad10, 0xba4603c5},
570 {0xd3857b18, 0x00000028, 0x000004a2, 0x5c430821, 0xb062b7cb, 0xe6071c6f},
571 {0x1d825a8f, 0x0000002b, 0x0000050b, 0xd2c45f0c, 0xd68634e0, 0x179ec30a},
572 {0x5033e3bc, 0x0000000b, 0x00000078, 0xa3ea4113, 0xac6d31fb, 0x0903beb8},
573 {0x94f1fb5e, 0x0000000f, 0x000003a2, 0xfbfc50b1, 0x3cfe50ed, 0x6a7cb4fa},
574 {0xc9a0fe14, 0x00000009, 0x00000473, 0x5fb61894, 0x87070591, 0xdb535801},
575 {0x88a034b1, 0x0000001c, 0x000005ad, 0xc1b16053, 0x46f95c67, 0x92bed597},
576 {0xf0f72239, 0x00000020, 0x0000026d, 0xa6fa58f3, 0xf8c2c1dd, 0x192a3f1b},
577 {0xcc20a5e3, 0x0000003b, 0x0000067a, 0x7740185a, 0x308b979a, 0xccbaec1a},
578 {0xce589c95, 0x0000002b, 0x00000641, 0xd055e987, 0x40aae25b, 0x7eabae4d},
579 {0x78edc885, 0x00000035, 0x000005be, 0xa39cb14b, 0x035b0d1f, 0x28c72982},
580 {0x9d40a377, 0x0000003b, 0x00000038, 0x1f47ccd2, 0x197fbc9d, 0xc3cd4d18},
581 {0x703d0e01, 0x0000003c, 0x000006f1, 0x88735e7c, 0xfed57c5a, 0xbca8f0e7},
582 {0x776bf505, 0x0000000f, 0x000005b2, 0x5cc4fc01, 0xf32efb97, 0x713f60b3},
583 {0x4a3e7854, 0x00000027, 0x000004b8, 0x8d923c82, 0x0cbfb4a2, 0xebd08fd5},
584 {0x209172dd, 0x0000003b, 0x00000356, 0xb89e9c2b, 0xd7868138, 0x64406c59},
585 {0x3ba4cc5b, 0x0000002f, 0x00000203, 0xe51601a9, 0x5b2a1032, 0x7421890e},
586 {0xfc62f297, 0x00000000, 0x00000079, 0x71a8e1a2, 0x5d88685f, 0xe9347603},
587 {0x64280b8b, 0x00000016, 0x000007ab, 0x0fa7a30c, 0xda3a455f, 0x1bef9060},
588 {0x97dd724b, 0x00000033, 0x000007ad, 0x5788b2f4, 0xd7326d32, 0x34720072},
589 {0x61394b52, 0x00000035, 0x00000571, 0xc66525f1, 0xcabe7fef, 0x48310f59},
590 {0x29b4faff, 0x00000024, 0x0000006e, 0xca13751e, 0x993648e0, 0x783a4213},
591 {0x29bfb1dc, 0x0000000b, 0x00000244, 0x436c43f7, 0x429f7a59, 0x9e8efd41},
592 {0x86ae934b, 0x00000035, 0x00000104, 0x0760ec93, 0x9cf7d0f4, 0xfc3d34a5},
593 {0xc4c1024e, 0x0000002e, 0x000006b1, 0x6516a3ec, 0x19321f9c, 0x17a52ae2},
594 {0x3287a80a, 0x00000026, 0x00000496, 0x0b257eb1, 0x754ebd51, 0x886d935a},
595 {0xa4db423e, 0x00000023, 0x0000045d, 0x9b3a66dc, 0x873e9f11, 0xeaaeaeb2},
596 {0x7a1078df, 0x00000015, 0x0000014a, 0x8c2484c5, 0x6a628659, 0x8e900a4b},
597 {0x6048bd5b, 0x00000006, 0x0000006a, 0x897e3559, 0xac9961af, 0xd74662b1},
598 {0xd8f9ea20, 0x0000003d, 0x00000277, 0x60eb905b, 0xed2aaf99, 0xd26752ba},
599 {0xea5ec3b4, 0x0000002a, 0x000004fe, 0x869965dc, 0x6c1f833b, 0x8b1fcd62},
600 {0x2dfb005d, 0x00000016, 0x00000345, 0x6a3b117e, 0xf05e8521, 0xf54342fe},
601 {0x5a214ade, 0x00000020, 0x000005b6, 0x467f70be, 0xcb22ccd3, 0x5b95b988},
602 {0xf0ab9cca, 0x00000032, 0x00000515, 0xed223df3, 0x7f3ef01d, 0x2e1176be},
603 {0x91b444f9, 0x0000002e, 0x000007f8, 0x84e9a983, 0x5676756f, 0x66120546},
604 {0x1b5d2ddb, 0x0000002e, 0x0000012c, 0xba638c4c, 0x3f42047b, 0xf256a5cc},
605 {0xd824d1bb, 0x0000003a, 0x000007b5, 0x6288653b, 0x3a3ebea0, 0x4af1dd69},
606 {0x0470180c, 0x00000034, 0x000001f0, 0x9d5b80d6, 0x3de08195, 0x56f0a04a},
607 {0xffaa3a3f, 0x00000036, 0x00000299, 0xf3a82ab8, 0x53e0c13d, 0x74f6b6b2},
608 {0x6406cfeb, 0x00000023, 0x00000600, 0xa920b8e8, 0xe4e2acf4, 0x085951fd},
609 {0xb24aaa38, 0x0000003e, 0x000004a1, 0x657cc328, 0x5077b2c3, 0xc65387eb},
610 {0x58b2ab7c, 0x00000039, 0x000002b4, 0x3a17ee7e, 0x9dcb3643, 0x1ca9257b},
611 {0x3db85970, 0x00000006, 0x000002b6, 0x95268b59, 0xb9812c10, 0xfd196d76},
612 {0x857830c5, 0x00000003, 0x00000590, 0x4ef439d5, 0xf042161d, 0x5ef88339},
613 {0xe1fcd978, 0x0000003e, 0x000007d8, 0xae8d8699, 0xce0a1ef5, 0x2c3714d9},
614 {0xb982a768, 0x00000016, 0x000006e0, 0x62fad3df, 0x5f8a067b, 0x58576548},
615 {0x1d581ce8, 0x0000001e, 0x0000058b, 0xf0f5da53, 0x26e39eee, 0xfd7c57de},
616 {0x2456719b, 0x00000025, 0x00000503, 0x4296ac64, 0xd50e4c14, 0xd5fedd59},
617 {0xfae6d8f2, 0x00000000, 0x0000055d, 0x057fdf2e, 0x2a31391a, 0x1cc3b17b},
618 {0xcba828e3, 0x00000039, 0x000002ce, 0xe3f22351, 0x8f00877b, 0x270eed73},
619 {0x13d25952, 0x0000000a, 0x0000072d, 0x76d4b4cc, 0x5eb67ec3, 0x91ecbb11},
620 {0x0342be3f, 0x00000015, 0x00000599, 0xec75d9f1, 0x9d4d2826, 0x05ed8d0c},
621 {0xeaa344e0, 0x00000014, 0x000004d8, 0x72a4c981, 0x2064ea06, 0x0b09ad5b},
622 {0xbbb52021, 0x0000003b, 0x00000272, 0x04af99fc, 0xaf042d35, 0xf8d511fb},
623 {0xb66384dc, 0x0000001d, 0x000007fc, 0xd7629116, 0x782bd801, 0x5ad832cc},
624 {0x616c01b6, 0x00000022, 0x000002c8, 0x5b1dab30, 0x783ce7d2, 0x1214d196},
625 {0xce2bdaad, 0x00000016, 0x0000062a, 0x932535c8, 0x3f02926d, 0x5747218a},
626 {0x00fe84d7, 0x00000005, 0x00000205, 0x850e50aa, 0x753d649c, 0xde8f14de},
627 {0xbebdcb4c, 0x00000006, 0x0000055d, 0xbeaa37a2, 0x2d8c9eba, 0x3563b7b9},
628 {0xd8b1a02a, 0x00000010, 0x00000387, 0x5017d2fc, 0x503541a5, 0x071475d0},
629 {0x3b96cad2, 0x00000036, 0x00000347, 0x1d2372ae, 0x926cd90b, 0x54c79d60},
630 {0xc94c1ed7, 0x00000005, 0x0000038b, 0x9e9fdb22, 0x144a9178, 0x4c53eee6},
631 {0x1aad454e, 0x00000025, 0x000002b2, 0xc3f6315c, 0x5c7a35b3, 0x10137a3c},
632 {0xa4fec9a6, 0x00000000, 0x000006d6, 0x90be5080, 0xa4107605, 0xaa9d6c73},
633 {0x1bbe71e2, 0x0000001f, 0x000002fd, 0x4e504c3b, 0x284ccaf1, 0xb63d23e7},
634 {0x4201c7e4, 0x00000002, 0x000002b7, 0x7822e3f9, 0x0cc912a9, 0x7f53e9cf},
635 {0x23fddc96, 0x00000003, 0x00000627, 0x8a385125, 0x07767e78, 0x13c1cd83},
636 {0xd82ba25c, 0x00000016, 0x0000063e, 0x98e4148a, 0x283330c9, 0x49ff5867},
637 {0x786f2032, 0x0000002d, 0x0000060f, 0xf201600a, 0xf561bfcd, 0x8467f211},
638 {0xfebe4e1f, 0x0000002a, 0x000004f2, 0x95e51961, 0xfd80dcab, 0x3f9683b2},
639 {0x1a6e0a39, 0x00000008, 0x00000672, 0x8af6c2a5, 0x78dd84cb, 0x76a3f874},
640 {0x56000ab8, 0x0000000e, 0x000000e5, 0x36bacb8f, 0x22ee1f77, 0x863b702f},
641 {0x4717fe0c, 0x00000000, 0x000006ec, 0x8439f342, 0x5c8e03da, 0xdc6c58ff},
642 {0xd5d5d68e, 0x0000003c, 0x000003a3, 0x46fff083, 0x177d1b39, 0x0622cc95},
643 {0xc25dd6c6, 0x00000024, 0x000006c0, 0x5ceb8eb4, 0x892b0d16, 0xe85605cd},
644 {0xe9b11300, 0x00000023, 0x00000683, 0x07a5d59a, 0x6c6a3208, 0x31da5f06},
645 {0x95cd285e, 0x00000001, 0x00000047, 0x7b3a4368, 0x0202c07e, 0xa1f2e784},
646 {0xd9245a25, 0x0000001e, 0x000003a6, 0xd33c1841, 0x1936c0d5, 0xb07cc616},
647 {0x103279db, 0x00000006, 0x0000039b, 0xca09b8a0, 0x77d62892, 0xbf943b6c},
648 {0x1cba3172, 0x00000027, 0x000001c8, 0xcb377194, 0xebe682db, 0x2c01af1c},
649 {0x8f613739, 0x0000000c, 0x000001df, 0xb4b0bc87, 0x7710bd43, 0x0fe5f56d},
650 {0x1c6aa90d, 0x0000001b, 0x0000053c, 0x70559245, 0xda7894ac, 0xf8943b2d},
651 {0xaabe5b93, 0x0000003d, 0x00000715, 0xcdbf42fa, 0x0c3b99e7, 0xe4d89272},
652 {0xf15dd038, 0x00000006, 0x000006db, 0x6e104aea, 0x8d5967f2, 0x7c2f6bbb},
653 {0x584dd49c, 0x00000020, 0x000007bc, 0x36b6cfd6, 0xad4e23b2, 0xabbf388b},
654 {0x5d8c9506, 0x00000020, 0x00000470, 0x4c62378e, 0x31d92640, 0x1dca1f4e},
655 {0xb80d17b0, 0x00000032, 0x00000346, 0x22a5bb88, 0x9a7ec89f, 0x5c170e23},
656 {0xdaf0592e, 0x00000023, 0x000007b0, 0x3cab3f99, 0x9b1fdd99, 0xc0e9d672},
657 {0x4793cc85, 0x0000000d, 0x00000706, 0xe82e04f6, 0xed3db6b7, 0xc18bdc86},
658 {0x82ebf64e, 0x00000009, 0x000007c3, 0x69d590a9, 0x9efa8499, 0xa874fcdd},
659 {0xb18a0319, 0x00000026, 0x000007db, 0x1cf98dcc, 0x8fa9ad6a, 0x9dc0bb48},
660};
661
662#include <linux/time.h>
663
664static int __init crc32c_test(void)
665{
666 int i;
667 int errors = 0;
668 int bytes = 0;
669 u64 nsec;
670 unsigned long flags;
671
672 /* keep static to prevent cache warming code from
673 * getting eliminated by the compiler */
674 static u32 crc;
675
676 /* pre-warm the cache */
677 for (i = 0; i < 100; i++) {
678 bytes += 2*test[i].length;
679
680 crc ^= __crc32c_le(test[i].crc, test_buf +
681 test[i].start, test[i].length);
682 }
683
684 /* reduce OS noise */
685 local_irq_save(flags);
686 local_irq_disable();
687
688 nsec = ktime_get_ns();
689 for (i = 0; i < 100; i++) {
690 if (test[i].crc32c_le != __crc32c_le(test[i].crc, test_buf +
691 test[i].start, test[i].length))
692 errors++;
693 }
694 nsec = ktime_get_ns() - nsec;
695
696 local_irq_restore(flags);
697 local_irq_enable();
698
699 pr_info("crc32c: CRC_LE_BITS = %d\n", CRC_LE_BITS);
700
701 if (errors)
702 pr_warn("crc32c: %d self tests failed\n", errors);
703 else {
704 pr_info("crc32c: self tests passed, processed %d bytes in %lld nsec\n",
705 bytes, nsec);
706 }
707
708 return 0;
709}
710
711static int __init crc32c_combine_test(void)
712{
713 int i, j;
714 int errors = 0, runs = 0;
715
716 for (i = 0; i < 10; i++) {
717 u32 crc_full;
718
719 crc_full = __crc32c_le(test[i].crc, test_buf + test[i].start,
720 test[i].length);
721 for (j = 0; j <= test[i].length; ++j) {
722 u32 crc1, crc2;
723 u32 len1 = j, len2 = test[i].length - j;
724
725 crc1 = __crc32c_le(test[i].crc, test_buf +
726 test[i].start, len1);
727 crc2 = __crc32c_le(0, test_buf + test[i].start +
728 len1, len2);
729
730 if (!(crc_full == __crc32c_le_combine(crc1, crc2, len2) &&
731 crc_full == test[i].crc32c_le))
732 errors++;
733 runs++;
734 cond_resched();
735 }
736 }
737
738 if (errors)
739 pr_warn("crc32c_combine: %d/%d self tests failed\n", errors, runs);
740 else
741 pr_info("crc32c_combine: %d self tests passed\n", runs);
742
743 return 0;
744}
745
746static int __init crc32_test(void)
747{
748 int i;
749 int errors = 0;
750 int bytes = 0;
751 u64 nsec;
752 unsigned long flags;
753
754 /* keep static to prevent cache warming code from
755 * getting eliminated by the compiler */
756 static u32 crc;
757
758 /* pre-warm the cache */
759 for (i = 0; i < 100; i++) {
760 bytes += 2*test[i].length;
761
762 crc ^= crc32_le(test[i].crc, test_buf +
763 test[i].start, test[i].length);
764
765 crc ^= crc32_be(test[i].crc, test_buf +
766 test[i].start, test[i].length);
767 }
768
769 /* reduce OS noise */
770 local_irq_save(flags);
771 local_irq_disable();
772
773 nsec = ktime_get_ns();
774 for (i = 0; i < 100; i++) {
775 if (test[i].crc_le != crc32_le(test[i].crc, test_buf +
776 test[i].start, test[i].length))
777 errors++;
778
779 if (test[i].crc_be != crc32_be(test[i].crc, test_buf +
780 test[i].start, test[i].length))
781 errors++;
782 }
783 nsec = ktime_get_ns() - nsec;
784
785 local_irq_restore(flags);
786 local_irq_enable();
787
788 pr_info("crc32: CRC_LE_BITS = %d, CRC_BE BITS = %d\n",
789 CRC_LE_BITS, CRC_BE_BITS);
790
791 if (errors)
792 pr_warn("crc32: %d self tests failed\n", errors);
793 else {
794 pr_info("crc32: self tests passed, processed %d bytes in %lld nsec\n",
795 bytes, nsec);
796 }
797
798 return 0;
799}
800
801static int __init crc32_combine_test(void)
802{
803 int i, j;
804 int errors = 0, runs = 0;
805
806 for (i = 0; i < 10; i++) {
807 u32 crc_full;
808
809 crc_full = crc32_le(test[i].crc, test_buf + test[i].start,
810 test[i].length);
811 for (j = 0; j <= test[i].length; ++j) {
812 u32 crc1, crc2;
813 u32 len1 = j, len2 = test[i].length - j;
814
815 crc1 = crc32_le(test[i].crc, test_buf +
816 test[i].start, len1);
817 crc2 = crc32_le(0, test_buf + test[i].start +
818 len1, len2);
819
820 if (!(crc_full == crc32_le_combine(crc1, crc2, len2) &&
821 crc_full == test[i].crc_le))
822 errors++;
823 runs++;
824 cond_resched();
825 }
826 }
827
828 if (errors)
829 pr_warn("crc32_combine: %d/%d self tests failed\n", errors, runs);
830 else
831 pr_info("crc32_combine: %d self tests passed\n", runs);
832
833 return 0;
834}
835
836static int __init crc32test_init(void)
837{
838 crc32_test();
839 crc32c_test();
840
841 crc32_combine_test();
842 crc32c_combine_test();
843
844 return 0;
845}
846
847static void __exit crc32_exit(void)
848{
849}
850
851module_init(crc32test_init);
852module_exit(crc32_exit);
853
854MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>");
855MODULE_DESCRIPTION("CRC32 selftest");
856MODULE_LICENSE("GPL");
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 8c28cbd7e104..17afb0430161 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -13,6 +13,7 @@
13#include <linux/debugobjects.h> 13#include <linux/debugobjects.h>
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/sched/task_stack.h>
16#include <linux/seq_file.h> 17#include <linux/seq_file.h>
17#include <linux/debugfs.h> 18#include <linux/debugfs.h>
18#include <linux/slab.h> 19#include <linux/slab.h>
diff --git a/lib/decompress_unlz4.c b/lib/decompress_unlz4.c
index 036fc882cd72..1b0baf3008ea 100644
--- a/lib/decompress_unlz4.c
+++ b/lib/decompress_unlz4.c
@@ -72,7 +72,7 @@ STATIC inline int INIT unlz4(u8 *input, long in_len,
72 error("NULL input pointer and missing fill function"); 72 error("NULL input pointer and missing fill function");
73 goto exit_1; 73 goto exit_1;
74 } else { 74 } else {
75 inp = large_malloc(lz4_compressbound(uncomp_chunksize)); 75 inp = large_malloc(LZ4_compressBound(uncomp_chunksize));
76 if (!inp) { 76 if (!inp) {
77 error("Could not allocate input buffer"); 77 error("Could not allocate input buffer");
78 goto exit_1; 78 goto exit_1;
@@ -136,7 +136,7 @@ STATIC inline int INIT unlz4(u8 *input, long in_len,
136 inp += 4; 136 inp += 4;
137 size -= 4; 137 size -= 4;
138 } else { 138 } else {
139 if (chunksize > lz4_compressbound(uncomp_chunksize)) { 139 if (chunksize > LZ4_compressBound(uncomp_chunksize)) {
140 error("chunk length is longer than allocated"); 140 error("chunk length is longer than allocated");
141 goto exit_2; 141 goto exit_2;
142 } 142 }
@@ -152,11 +152,14 @@ STATIC inline int INIT unlz4(u8 *input, long in_len,
152 out_len -= dest_len; 152 out_len -= dest_len;
153 } else 153 } else
154 dest_len = out_len; 154 dest_len = out_len;
155 ret = lz4_decompress(inp, &chunksize, outp, dest_len); 155
156 ret = LZ4_decompress_fast(inp, outp, dest_len);
157 chunksize = ret;
156#else 158#else
157 dest_len = uncomp_chunksize; 159 dest_len = uncomp_chunksize;
158 ret = lz4_decompress_unknownoutputsize(inp, chunksize, outp, 160
159 &dest_len); 161 ret = LZ4_decompress_safe(inp, outp, chunksize, dest_len);
162 dest_len = ret;
160#endif 163#endif
161 if (ret < 0) { 164 if (ret < 0) {
162 error("Decoding failed"); 165 error("Decoding failed");
diff --git a/lib/digsig.c b/lib/digsig.c
index 55b8b2f41a9e..03d7c63837ae 100644
--- a/lib/digsig.c
+++ b/lib/digsig.c
@@ -85,7 +85,7 @@ static int digsig_verify_rsa(struct key *key,
85 struct pubkey_hdr *pkh; 85 struct pubkey_hdr *pkh;
86 86
87 down_read(&key->sem); 87 down_read(&key->sem);
88 ukp = user_key_payload(key); 88 ukp = user_key_payload_locked(key);
89 89
90 if (ukp->datalen < sizeof(*pkh)) 90 if (ukp->datalen < sizeof(*pkh))
91 goto err1; 91 goto err1;
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 8971370bfb16..b157b46cc9a6 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -17,8 +17,10 @@
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 18 */
19 19
20#include <linux/sched/task_stack.h>
20#include <linux/scatterlist.h> 21#include <linux/scatterlist.h>
21#include <linux/dma-mapping.h> 22#include <linux/dma-mapping.h>
23#include <linux/sched/task.h>
22#include <linux/stacktrace.h> 24#include <linux/stacktrace.h>
23#include <linux/dma-debug.h> 25#include <linux/dma-debug.h>
24#include <linux/spinlock.h> 26#include <linux/spinlock.h>
@@ -1155,6 +1157,11 @@ static void check_unmap(struct dma_debug_entry *ref)
1155 dir2name[ref->direction]); 1157 dir2name[ref->direction]);
1156 } 1158 }
1157 1159
1160 /*
1161 * Drivers should use dma_mapping_error() to check the returned
1162 * addresses of dma_map_single() and dma_map_page().
1163 * If not, print this warning message. See Documentation/DMA-API.txt.
1164 */
1158 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { 1165 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1159 err_printk(ref->dev, entry, 1166 err_printk(ref->dev, entry,
1160 "DMA-API: device driver failed to check map error" 1167 "DMA-API: device driver failed to check map error"
diff --git a/lib/dma-noop.c b/lib/dma-noop.c
index 3d766e78fbe2..de26c8b68f34 100644
--- a/lib/dma-noop.c
+++ b/lib/dma-noop.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * lib/dma-noop.c 2 * lib/dma-noop.c
3 * 3 *
4 * Simple DMA noop-ops that map 1:1 with memory 4 * DMA operations that map to physical addresses without flushing memory.
5 */ 5 */
6#include <linux/export.h> 6#include <linux/export.h>
7#include <linux/mm.h> 7#include <linux/mm.h>
@@ -64,7 +64,7 @@ static int dma_noop_supported(struct device *dev, u64 mask)
64 return 1; 64 return 1;
65} 65}
66 66
67struct dma_map_ops dma_noop_ops = { 67const struct dma_map_ops dma_noop_ops = {
68 .alloc = dma_noop_alloc, 68 .alloc = dma_noop_alloc,
69 .free = dma_noop_free, 69 .free = dma_noop_free,
70 .map_page = dma_noop_map_page, 70 .map_page = dma_noop_map_page,
diff --git a/lib/dma-virt.c b/lib/dma-virt.c
new file mode 100644
index 000000000000..dcd4df1f7174
--- /dev/null
+++ b/lib/dma-virt.c
@@ -0,0 +1,72 @@
1/*
2 * lib/dma-virt.c
3 *
4 * DMA operations that map to virtual addresses without flushing memory.
5 */
6#include <linux/export.h>
7#include <linux/mm.h>
8#include <linux/dma-mapping.h>
9#include <linux/scatterlist.h>
10
11static void *dma_virt_alloc(struct device *dev, size_t size,
12 dma_addr_t *dma_handle, gfp_t gfp,
13 unsigned long attrs)
14{
15 void *ret;
16
17 ret = (void *)__get_free_pages(gfp, get_order(size));
18 if (ret)
19 *dma_handle = (uintptr_t)ret;
20 return ret;
21}
22
23static void dma_virt_free(struct device *dev, size_t size,
24 void *cpu_addr, dma_addr_t dma_addr,
25 unsigned long attrs)
26{
27 free_pages((unsigned long)cpu_addr, get_order(size));
28}
29
30static dma_addr_t dma_virt_map_page(struct device *dev, struct page *page,
31 unsigned long offset, size_t size,
32 enum dma_data_direction dir,
33 unsigned long attrs)
34{
35 return (uintptr_t)(page_address(page) + offset);
36}
37
38static int dma_virt_map_sg(struct device *dev, struct scatterlist *sgl,
39 int nents, enum dma_data_direction dir,
40 unsigned long attrs)
41{
42 int i;
43 struct scatterlist *sg;
44
45 for_each_sg(sgl, sg, nents, i) {
46 BUG_ON(!sg_page(sg));
47 sg_dma_address(sg) = (uintptr_t)sg_virt(sg);
48 sg_dma_len(sg) = sg->length;
49 }
50
51 return nents;
52}
53
54static int dma_virt_mapping_error(struct device *dev, dma_addr_t dma_addr)
55{
56 return false;
57}
58
59static int dma_virt_supported(struct device *dev, u64 mask)
60{
61 return true;
62}
63
64const struct dma_map_ops dma_virt_ops = {
65 .alloc = dma_virt_alloc,
66 .free = dma_virt_free,
67 .map_page = dma_virt_map_page,
68 .map_sg = dma_virt_map_sg,
69 .mapping_error = dma_virt_mapping_error,
70 .dma_supported = dma_virt_supported,
71};
72EXPORT_SYMBOL(dma_virt_ops);
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index c30d07e99dba..625375e7f11f 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -6,6 +6,7 @@
6#include <linux/kernel.h> 6#include <linux/kernel.h>
7#include <linux/export.h> 7#include <linux/export.h>
8#include <linux/sched.h> 8#include <linux/sched.h>
9#include <linux/sched/debug.h>
9#include <linux/smp.h> 10#include <linux/smp.h>
10#include <linux/atomic.h> 11#include <linux/atomic.h>
11 12
diff --git a/lib/find_bit.c b/lib/find_bit.c
index 18072ea9c20e..6ed74f78380c 100644
--- a/lib/find_bit.c
+++ b/lib/find_bit.c
@@ -33,7 +33,7 @@ static unsigned long _find_next_bit(const unsigned long *addr,
33{ 33{
34 unsigned long tmp; 34 unsigned long tmp;
35 35
36 if (!nbits || start >= nbits) 36 if (unlikely(start >= nbits))
37 return nbits; 37 return nbits;
38 38
39 tmp = addr[start / BITS_PER_LONG] ^ invert; 39 tmp = addr[start / BITS_PER_LONG] ^ invert;
@@ -151,7 +151,7 @@ static unsigned long _find_next_bit_le(const unsigned long *addr,
151{ 151{
152 unsigned long tmp; 152 unsigned long tmp;
153 153
154 if (!nbits || start >= nbits) 154 if (unlikely(start >= nbits))
155 return nbits; 155 return nbits;
156 156
157 tmp = addr[start / BITS_PER_LONG] ^ invert; 157 tmp = addr[start / BITS_PER_LONG] ^ invert;
diff --git a/lib/fonts/Kconfig b/lib/fonts/Kconfig
index e77dfe00de36..8fa0791e8a1e 100644
--- a/lib/fonts/Kconfig
+++ b/lib/fonts/Kconfig
@@ -87,6 +87,14 @@ config FONT_6x10
87 embedded devices with a 320x240 screen, to get a reasonable number 87 embedded devices with a 320x240 screen, to get a reasonable number
88 of characters (53x24) that are still at a readable size. 88 of characters (53x24) that are still at a readable size.
89 89
90config FONT_10x18
91 bool "console 10x18 font (not supported by all drivers)" if FONTS
92 depends on FRAMEBUFFER_CONSOLE
93 help
94 This is a high resolution console font for machines with very
95 big letters. It fits between the sun 12x22 and the normal 8x16 font.
96 If other fonts are too big or too small for you, say Y, otherwise say N.
97
90config FONT_SUN8x16 98config FONT_SUN8x16
91 bool "Sparc console 8x16 font" 99 bool "Sparc console 8x16 font"
92 depends on FRAMEBUFFER_CONSOLE && (!SPARC && FONTS || SPARC) 100 depends on FRAMEBUFFER_CONSOLE && (!SPARC && FONTS || SPARC)
@@ -101,14 +109,6 @@ config FONT_SUN12x22
101 big letters (like the letters used in the SPARC PROM). If the 109 big letters (like the letters used in the SPARC PROM). If the
102 standard font is unreadable for you, say Y, otherwise say N. 110 standard font is unreadable for you, say Y, otherwise say N.
103 111
104config FONT_10x18
105 bool "console 10x18 font (not supported by all drivers)" if FONTS
106 depends on FRAMEBUFFER_CONSOLE
107 help
108 This is a high resolution console font for machines with very
109 big letters. It fits between the sun 12x22 and the normal 8x16 font.
110 If other fonts are too big or too small for you, say Y, otherwise say N.
111
112config FONT_AUTOSELECT 112config FONT_AUTOSELECT
113 def_bool y 113 def_bool y
114 depends on !FONT_8x8 114 depends on !FONT_8x8
diff --git a/lib/glob.c b/lib/glob.c
index 500fc80d23e1..0ba3ea86b546 100644
--- a/lib/glob.c
+++ b/lib/glob.c
@@ -121,167 +121,3 @@ backtrack:
121 } 121 }
122} 122}
123EXPORT_SYMBOL(glob_match); 123EXPORT_SYMBOL(glob_match);
124
125
126#ifdef CONFIG_GLOB_SELFTEST
127
128#include <linux/printk.h>
129#include <linux/moduleparam.h>
130
131/* Boot with "glob.verbose=1" to show successful tests, too */
132static bool verbose = false;
133module_param(verbose, bool, 0);
134
135struct glob_test {
136 char const *pat, *str;
137 bool expected;
138};
139
140static bool __pure __init test(char const *pat, char const *str, bool expected)
141{
142 bool match = glob_match(pat, str);
143 bool success = match == expected;
144
145 /* Can't get string literals into a particular section, so... */
146 static char const msg_error[] __initconst =
147 KERN_ERR "glob: \"%s\" vs. \"%s\": %s *** ERROR ***\n";
148 static char const msg_ok[] __initconst =
149 KERN_DEBUG "glob: \"%s\" vs. \"%s\": %s OK\n";
150 static char const mismatch[] __initconst = "mismatch";
151 char const *message;
152
153 if (!success)
154 message = msg_error;
155 else if (verbose)
156 message = msg_ok;
157 else
158 return success;
159
160 printk(message, pat, str, mismatch + 3*match);
161 return success;
162}
163
164/*
165 * The tests are all jammed together in one array to make it simpler
166 * to place that array in the .init.rodata section. The obvious
167 * "array of structures containing char *" has no way to force the
168 * pointed-to strings to be in a particular section.
169 *
170 * Anyway, a test consists of:
171 * 1. Expected glob_match result: '1' or '0'.
172 * 2. Pattern to match: null-terminated string
173 * 3. String to match against: null-terminated string
174 *
175 * The list of tests is terminated with a final '\0' instead of
176 * a glob_match result character.
177 */
178static char const glob_tests[] __initconst =
179 /* Some basic tests */
180 "1" "a\0" "a\0"
181 "0" "a\0" "b\0"
182 "0" "a\0" "aa\0"
183 "0" "a\0" "\0"
184 "1" "\0" "\0"
185 "0" "\0" "a\0"
186 /* Simple character class tests */
187 "1" "[a]\0" "a\0"
188 "0" "[a]\0" "b\0"
189 "0" "[!a]\0" "a\0"
190 "1" "[!a]\0" "b\0"
191 "1" "[ab]\0" "a\0"
192 "1" "[ab]\0" "b\0"
193 "0" "[ab]\0" "c\0"
194 "1" "[!ab]\0" "c\0"
195 "1" "[a-c]\0" "b\0"
196 "0" "[a-c]\0" "d\0"
197 /* Corner cases in character class parsing */
198 "1" "[a-c-e-g]\0" "-\0"
199 "0" "[a-c-e-g]\0" "d\0"
200 "1" "[a-c-e-g]\0" "f\0"
201 "1" "[]a-ceg-ik[]\0" "a\0"
202 "1" "[]a-ceg-ik[]\0" "]\0"
203 "1" "[]a-ceg-ik[]\0" "[\0"
204 "1" "[]a-ceg-ik[]\0" "h\0"
205 "0" "[]a-ceg-ik[]\0" "f\0"
206 "0" "[!]a-ceg-ik[]\0" "h\0"
207 "0" "[!]a-ceg-ik[]\0" "]\0"
208 "1" "[!]a-ceg-ik[]\0" "f\0"
209 /* Simple wild cards */
210 "1" "?\0" "a\0"
211 "0" "?\0" "aa\0"
212 "0" "??\0" "a\0"
213 "1" "?x?\0" "axb\0"
214 "0" "?x?\0" "abx\0"
215 "0" "?x?\0" "xab\0"
216 /* Asterisk wild cards (backtracking) */
217 "0" "*??\0" "a\0"
218 "1" "*??\0" "ab\0"
219 "1" "*??\0" "abc\0"
220 "1" "*??\0" "abcd\0"
221 "0" "??*\0" "a\0"
222 "1" "??*\0" "ab\0"
223 "1" "??*\0" "abc\0"
224 "1" "??*\0" "abcd\0"
225 "0" "?*?\0" "a\0"
226 "1" "?*?\0" "ab\0"
227 "1" "?*?\0" "abc\0"
228 "1" "?*?\0" "abcd\0"
229 "1" "*b\0" "b\0"
230 "1" "*b\0" "ab\0"
231 "0" "*b\0" "ba\0"
232 "1" "*b\0" "bb\0"
233 "1" "*b\0" "abb\0"
234 "1" "*b\0" "bab\0"
235 "1" "*bc\0" "abbc\0"
236 "1" "*bc\0" "bc\0"
237 "1" "*bc\0" "bbc\0"
238 "1" "*bc\0" "bcbc\0"
239 /* Multiple asterisks (complex backtracking) */
240 "1" "*ac*\0" "abacadaeafag\0"
241 "1" "*ac*ae*ag*\0" "abacadaeafag\0"
242 "1" "*a*b*[bc]*[ef]*g*\0" "abacadaeafag\0"
243 "0" "*a*b*[ef]*[cd]*g*\0" "abacadaeafag\0"
244 "1" "*abcd*\0" "abcabcabcabcdefg\0"
245 "1" "*ab*cd*\0" "abcabcabcabcdefg\0"
246 "1" "*abcd*abcdef*\0" "abcabcdabcdeabcdefg\0"
247 "0" "*abcd*\0" "abcabcabcabcefg\0"
248 "0" "*ab*cd*\0" "abcabcabcabcefg\0";
249
250static int __init glob_init(void)
251{
252 unsigned successes = 0;
253 unsigned n = 0;
254 char const *p = glob_tests;
255 static char const message[] __initconst =
256 KERN_INFO "glob: %u self-tests passed, %u failed\n";
257
258 /*
259 * Tests are jammed together in a string. The first byte is '1'
260 * or '0' to indicate the expected outcome, or '\0' to indicate the
261 * end of the tests. Then come two null-terminated strings: the
262 * pattern and the string to match it against.
263 */
264 while (*p) {
265 bool expected = *p++ & 1;
266 char const *pat = p;
267
268 p += strlen(p) + 1;
269 successes += test(pat, p, expected);
270 p += strlen(p) + 1;
271 n++;
272 }
273
274 n -= successes;
275 printk(message, successes, n);
276
277 /* What's the errno for "kernel bug detected"? Guess... */
278 return n ? -ECANCELED : 0;
279}
280
281/* We need a dummy exit function to allow unload */
282static void __exit glob_fini(void) { }
283
284module_init(glob_init);
285module_exit(glob_fini);
286
287#endif /* CONFIG_GLOB_SELFTEST */
diff --git a/lib/globtest.c b/lib/globtest.c
new file mode 100644
index 000000000000..d8e97d43b905
--- /dev/null
+++ b/lib/globtest.c
@@ -0,0 +1,167 @@
1/*
2 * Extracted fronm glob.c
3 */
4
5#include <linux/module.h>
6#include <linux/moduleparam.h>
7#include <linux/glob.h>
8#include <linux/printk.h>
9
10/* Boot with "glob.verbose=1" to show successful tests, too */
11static bool verbose = false;
12module_param(verbose, bool, 0);
13
14struct glob_test {
15 char const *pat, *str;
16 bool expected;
17};
18
19static bool __pure __init test(char const *pat, char const *str, bool expected)
20{
21 bool match = glob_match(pat, str);
22 bool success = match == expected;
23
24 /* Can't get string literals into a particular section, so... */
25 static char const msg_error[] __initconst =
26 KERN_ERR "glob: \"%s\" vs. \"%s\": %s *** ERROR ***\n";
27 static char const msg_ok[] __initconst =
28 KERN_DEBUG "glob: \"%s\" vs. \"%s\": %s OK\n";
29 static char const mismatch[] __initconst = "mismatch";
30 char const *message;
31
32 if (!success)
33 message = msg_error;
34 else if (verbose)
35 message = msg_ok;
36 else
37 return success;
38
39 printk(message, pat, str, mismatch + 3*match);
40 return success;
41}
42
43/*
44 * The tests are all jammed together in one array to make it simpler
45 * to place that array in the .init.rodata section. The obvious
46 * "array of structures containing char *" has no way to force the
47 * pointed-to strings to be in a particular section.
48 *
49 * Anyway, a test consists of:
50 * 1. Expected glob_match result: '1' or '0'.
51 * 2. Pattern to match: null-terminated string
52 * 3. String to match against: null-terminated string
53 *
54 * The list of tests is terminated with a final '\0' instead of
55 * a glob_match result character.
56 */
57static char const glob_tests[] __initconst =
58 /* Some basic tests */
59 "1" "a\0" "a\0"
60 "0" "a\0" "b\0"
61 "0" "a\0" "aa\0"
62 "0" "a\0" "\0"
63 "1" "\0" "\0"
64 "0" "\0" "a\0"
65 /* Simple character class tests */
66 "1" "[a]\0" "a\0"
67 "0" "[a]\0" "b\0"
68 "0" "[!a]\0" "a\0"
69 "1" "[!a]\0" "b\0"
70 "1" "[ab]\0" "a\0"
71 "1" "[ab]\0" "b\0"
72 "0" "[ab]\0" "c\0"
73 "1" "[!ab]\0" "c\0"
74 "1" "[a-c]\0" "b\0"
75 "0" "[a-c]\0" "d\0"
76 /* Corner cases in character class parsing */
77 "1" "[a-c-e-g]\0" "-\0"
78 "0" "[a-c-e-g]\0" "d\0"
79 "1" "[a-c-e-g]\0" "f\0"
80 "1" "[]a-ceg-ik[]\0" "a\0"
81 "1" "[]a-ceg-ik[]\0" "]\0"
82 "1" "[]a-ceg-ik[]\0" "[\0"
83 "1" "[]a-ceg-ik[]\0" "h\0"
84 "0" "[]a-ceg-ik[]\0" "f\0"
85 "0" "[!]a-ceg-ik[]\0" "h\0"
86 "0" "[!]a-ceg-ik[]\0" "]\0"
87 "1" "[!]a-ceg-ik[]\0" "f\0"
88 /* Simple wild cards */
89 "1" "?\0" "a\0"
90 "0" "?\0" "aa\0"
91 "0" "??\0" "a\0"
92 "1" "?x?\0" "axb\0"
93 "0" "?x?\0" "abx\0"
94 "0" "?x?\0" "xab\0"
95 /* Asterisk wild cards (backtracking) */
96 "0" "*??\0" "a\0"
97 "1" "*??\0" "ab\0"
98 "1" "*??\0" "abc\0"
99 "1" "*??\0" "abcd\0"
100 "0" "??*\0" "a\0"
101 "1" "??*\0" "ab\0"
102 "1" "??*\0" "abc\0"
103 "1" "??*\0" "abcd\0"
104 "0" "?*?\0" "a\0"
105 "1" "?*?\0" "ab\0"
106 "1" "?*?\0" "abc\0"
107 "1" "?*?\0" "abcd\0"
108 "1" "*b\0" "b\0"
109 "1" "*b\0" "ab\0"
110 "0" "*b\0" "ba\0"
111 "1" "*b\0" "bb\0"
112 "1" "*b\0" "abb\0"
113 "1" "*b\0" "bab\0"
114 "1" "*bc\0" "abbc\0"
115 "1" "*bc\0" "bc\0"
116 "1" "*bc\0" "bbc\0"
117 "1" "*bc\0" "bcbc\0"
118 /* Multiple asterisks (complex backtracking) */
119 "1" "*ac*\0" "abacadaeafag\0"
120 "1" "*ac*ae*ag*\0" "abacadaeafag\0"
121 "1" "*a*b*[bc]*[ef]*g*\0" "abacadaeafag\0"
122 "0" "*a*b*[ef]*[cd]*g*\0" "abacadaeafag\0"
123 "1" "*abcd*\0" "abcabcabcabcdefg\0"
124 "1" "*ab*cd*\0" "abcabcabcabcdefg\0"
125 "1" "*abcd*abcdef*\0" "abcabcdabcdeabcdefg\0"
126 "0" "*abcd*\0" "abcabcabcabcefg\0"
127 "0" "*ab*cd*\0" "abcabcabcabcefg\0";
128
129static int __init glob_init(void)
130{
131 unsigned successes = 0;
132 unsigned n = 0;
133 char const *p = glob_tests;
134 static char const message[] __initconst =
135 KERN_INFO "glob: %u self-tests passed, %u failed\n";
136
137 /*
138 * Tests are jammed together in a string. The first byte is '1'
139 * or '0' to indicate the expected outcome, or '\0' to indicate the
140 * end of the tests. Then come two null-terminated strings: the
141 * pattern and the string to match it against.
142 */
143 while (*p) {
144 bool expected = *p++ & 1;
145 char const *pat = p;
146
147 p += strlen(p) + 1;
148 successes += test(pat, p, expected);
149 p += strlen(p) + 1;
150 n++;
151 }
152
153 n -= successes;
154 printk(message, successes, n);
155
156 /* What's the errno for "kernel bug detected"? Guess... */
157 return n ? -ECANCELED : 0;
158}
159
160/* We need a dummy exit function to allow unload */
161static void __exit glob_fini(void) { }
162
163module_init(glob_init);
164module_exit(glob_fini);
165
166MODULE_DESCRIPTION("glob(7) matching tests");
167MODULE_LICENSE("Dual MIT/GPL");
diff --git a/lib/idr.c b/lib/idr.c
index 52d2979a05e8..b13682bb0a1c 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -1,1068 +1,409 @@
1/* 1#include <linux/bitmap.h>
2 * 2002-10-18 written by Jim Houston jim.houston@ccur.com
3 * Copyright (C) 2002 by Concurrent Computer Corporation
4 * Distributed under the GNU GPL license version 2.
5 *
6 * Modified by George Anzinger to reuse immediately and to use
7 * find bit instructions. Also removed _irq on spinlocks.
8 *
9 * Modified by Nadia Derbey to make it RCU safe.
10 *
11 * Small id to pointer translation service.
12 *
13 * It uses a radix tree like structure as a sparse array indexed
14 * by the id to obtain the pointer. The bitmap makes allocating
15 * a new id quick.
16 *
17 * You call it to allocate an id (an int) an associate with that id a
18 * pointer or what ever, we treat it as a (void *). You can pass this
19 * id to a user for him to pass back at a later time. You then pass
20 * that id to this code and it returns your pointer.
21 */
22
23#ifndef TEST // to test in user space...
24#include <linux/slab.h>
25#include <linux/init.h>
26#include <linux/export.h> 2#include <linux/export.h>
27#endif
28#include <linux/err.h>
29#include <linux/string.h>
30#include <linux/idr.h> 3#include <linux/idr.h>
4#include <linux/slab.h>
31#include <linux/spinlock.h> 5#include <linux/spinlock.h>
32#include <linux/percpu.h>
33
34#define MAX_IDR_SHIFT (sizeof(int) * 8 - 1)
35#define MAX_IDR_BIT (1U << MAX_IDR_SHIFT)
36
37/* Leave the possibility of an incomplete final layer */
38#define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS)
39 6
40/* Number of id_layer structs to leave in free list */ 7DEFINE_PER_CPU(struct ida_bitmap *, ida_bitmap);
41#define MAX_IDR_FREE (MAX_IDR_LEVEL * 2)
42
43static struct kmem_cache *idr_layer_cache;
44static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head);
45static DEFINE_PER_CPU(int, idr_preload_cnt);
46static DEFINE_SPINLOCK(simple_ida_lock); 8static DEFINE_SPINLOCK(simple_ida_lock);
47 9
48/* the maximum ID which can be allocated given idr->layers */
49static int idr_max(int layers)
50{
51 int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT);
52
53 return (1 << bits) - 1;
54}
55
56/*
57 * Prefix mask for an idr_layer at @layer. For layer 0, the prefix mask is
58 * all bits except for the lower IDR_BITS. For layer 1, 2 * IDR_BITS, and
59 * so on.
60 */
61static int idr_layer_prefix_mask(int layer)
62{
63 return ~idr_max(layer + 1);
64}
65
66static struct idr_layer *get_from_free_list(struct idr *idp)
67{
68 struct idr_layer *p;
69 unsigned long flags;
70
71 spin_lock_irqsave(&idp->lock, flags);
72 if ((p = idp->id_free)) {
73 idp->id_free = p->ary[0];
74 idp->id_free_cnt--;
75 p->ary[0] = NULL;
76 }
77 spin_unlock_irqrestore(&idp->lock, flags);
78 return(p);
79}
80
81/** 10/**
82 * idr_layer_alloc - allocate a new idr_layer 11 * idr_alloc - allocate an id
83 * @gfp_mask: allocation mask 12 * @idr: idr handle
84 * @layer_idr: optional idr to allocate from
85 *
86 * If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch
87 * one from the per-cpu preload buffer. If @layer_idr is not %NULL, fetch
88 * an idr_layer from @idr->id_free.
89 *
90 * @layer_idr is to maintain backward compatibility with the old alloc
91 * interface - idr_pre_get() and idr_get_new*() - and will be removed
92 * together with per-pool preload buffer.
93 */
94static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
95{
96 struct idr_layer *new;
97
98 /* this is the old path, bypass to get_from_free_list() */
99 if (layer_idr)
100 return get_from_free_list(layer_idr);
101
102 /*
103 * Try to allocate directly from kmem_cache. We want to try this
104 * before preload buffer; otherwise, non-preloading idr_alloc()
105 * users will end up taking advantage of preloading ones. As the
106 * following is allowed to fail for preloaded cases, suppress
107 * warning this time.
108 */
109 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask | __GFP_NOWARN);
110 if (new)
111 return new;
112
113 /*
114 * Try to fetch one from the per-cpu preload buffer if in process
115 * context. See idr_preload() for details.
116 */
117 if (!in_interrupt()) {
118 preempt_disable();
119 new = __this_cpu_read(idr_preload_head);
120 if (new) {
121 __this_cpu_write(idr_preload_head, new->ary[0]);
122 __this_cpu_dec(idr_preload_cnt);
123 new->ary[0] = NULL;
124 }
125 preempt_enable();
126 if (new)
127 return new;
128 }
129
130 /*
131 * Both failed. Try kmem_cache again w/o adding __GFP_NOWARN so
132 * that memory allocation failure warning is printed as intended.
133 */
134 return kmem_cache_zalloc(idr_layer_cache, gfp_mask);
135}
136
137static void idr_layer_rcu_free(struct rcu_head *head)
138{
139 struct idr_layer *layer;
140
141 layer = container_of(head, struct idr_layer, rcu_head);
142 kmem_cache_free(idr_layer_cache, layer);
143}
144
145static inline void free_layer(struct idr *idr, struct idr_layer *p)
146{
147 if (idr->hint == p)
148 RCU_INIT_POINTER(idr->hint, NULL);
149 call_rcu(&p->rcu_head, idr_layer_rcu_free);
150}
151
152/* only called when idp->lock is held */
153static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
154{
155 p->ary[0] = idp->id_free;
156 idp->id_free = p;
157 idp->id_free_cnt++;
158}
159
160static void move_to_free_list(struct idr *idp, struct idr_layer *p)
161{
162 unsigned long flags;
163
164 /*
165 * Depends on the return element being zeroed.
166 */
167 spin_lock_irqsave(&idp->lock, flags);
168 __move_to_free_list(idp, p);
169 spin_unlock_irqrestore(&idp->lock, flags);
170}
171
172static void idr_mark_full(struct idr_layer **pa, int id)
173{
174 struct idr_layer *p = pa[0];
175 int l = 0;
176
177 __set_bit(id & IDR_MASK, p->bitmap);
178 /*
179 * If this layer is full mark the bit in the layer above to
180 * show that this part of the radix tree is full. This may
181 * complete the layer above and require walking up the radix
182 * tree.
183 */
184 while (bitmap_full(p->bitmap, IDR_SIZE)) {
185 if (!(p = pa[++l]))
186 break;
187 id = id >> IDR_BITS;
188 __set_bit((id & IDR_MASK), p->bitmap);
189 }
190}
191
192static int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
193{
194 while (idp->id_free_cnt < MAX_IDR_FREE) {
195 struct idr_layer *new;
196 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
197 if (new == NULL)
198 return (0);
199 move_to_free_list(idp, new);
200 }
201 return 1;
202}
203
204/**
205 * sub_alloc - try to allocate an id without growing the tree depth
206 * @idp: idr handle
207 * @starting_id: id to start search at
208 * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer
209 * @gfp_mask: allocation mask for idr_layer_alloc()
210 * @layer_idr: optional idr passed to idr_layer_alloc()
211 *
212 * Allocate an id in range [@starting_id, INT_MAX] from @idp without
213 * growing its depth. Returns
214 *
215 * the allocated id >= 0 if successful,
216 * -EAGAIN if the tree needs to grow for allocation to succeed,
217 * -ENOSPC if the id space is exhausted,
218 * -ENOMEM if more idr_layers need to be allocated.
219 */
220static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa,
221 gfp_t gfp_mask, struct idr *layer_idr)
222{
223 int n, m, sh;
224 struct idr_layer *p, *new;
225 int l, id, oid;
226
227 id = *starting_id;
228 restart:
229 p = idp->top;
230 l = idp->layers;
231 pa[l--] = NULL;
232 while (1) {
233 /*
234 * We run around this while until we reach the leaf node...
235 */
236 n = (id >> (IDR_BITS*l)) & IDR_MASK;
237 m = find_next_zero_bit(p->bitmap, IDR_SIZE, n);
238 if (m == IDR_SIZE) {
239 /* no space available go back to previous layer. */
240 l++;
241 oid = id;
242 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
243
244 /* if already at the top layer, we need to grow */
245 if (id > idr_max(idp->layers)) {
246 *starting_id = id;
247 return -EAGAIN;
248 }
249 p = pa[l];
250 BUG_ON(!p);
251
252 /* If we need to go up one layer, continue the
253 * loop; otherwise, restart from the top.
254 */
255 sh = IDR_BITS * (l + 1);
256 if (oid >> sh == id >> sh)
257 continue;
258 else
259 goto restart;
260 }
261 if (m != n) {
262 sh = IDR_BITS*l;
263 id = ((id >> sh) ^ n ^ m) << sh;
264 }
265 if ((id >= MAX_IDR_BIT) || (id < 0))
266 return -ENOSPC;
267 if (l == 0)
268 break;
269 /*
270 * Create the layer below if it is missing.
271 */
272 if (!p->ary[m]) {
273 new = idr_layer_alloc(gfp_mask, layer_idr);
274 if (!new)
275 return -ENOMEM;
276 new->layer = l-1;
277 new->prefix = id & idr_layer_prefix_mask(new->layer);
278 rcu_assign_pointer(p->ary[m], new);
279 p->count++;
280 }
281 pa[l--] = p;
282 p = p->ary[m];
283 }
284
285 pa[l] = p;
286 return id;
287}
288
289static int idr_get_empty_slot(struct idr *idp, int starting_id,
290 struct idr_layer **pa, gfp_t gfp_mask,
291 struct idr *layer_idr)
292{
293 struct idr_layer *p, *new;
294 int layers, v, id;
295 unsigned long flags;
296
297 id = starting_id;
298build_up:
299 p = idp->top;
300 layers = idp->layers;
301 if (unlikely(!p)) {
302 if (!(p = idr_layer_alloc(gfp_mask, layer_idr)))
303 return -ENOMEM;
304 p->layer = 0;
305 layers = 1;
306 }
307 /*
308 * Add a new layer to the top of the tree if the requested
309 * id is larger than the currently allocated space.
310 */
311 while (id > idr_max(layers)) {
312 layers++;
313 if (!p->count) {
314 /* special case: if the tree is currently empty,
315 * then we grow the tree by moving the top node
316 * upwards.
317 */
318 p->layer++;
319 WARN_ON_ONCE(p->prefix);
320 continue;
321 }
322 if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) {
323 /*
324 * The allocation failed. If we built part of
325 * the structure tear it down.
326 */
327 spin_lock_irqsave(&idp->lock, flags);
328 for (new = p; p && p != idp->top; new = p) {
329 p = p->ary[0];
330 new->ary[0] = NULL;
331 new->count = 0;
332 bitmap_clear(new->bitmap, 0, IDR_SIZE);
333 __move_to_free_list(idp, new);
334 }
335 spin_unlock_irqrestore(&idp->lock, flags);
336 return -ENOMEM;
337 }
338 new->ary[0] = p;
339 new->count = 1;
340 new->layer = layers-1;
341 new->prefix = id & idr_layer_prefix_mask(new->layer);
342 if (bitmap_full(p->bitmap, IDR_SIZE))
343 __set_bit(0, new->bitmap);
344 p = new;
345 }
346 rcu_assign_pointer(idp->top, p);
347 idp->layers = layers;
348 v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr);
349 if (v == -EAGAIN)
350 goto build_up;
351 return(v);
352}
353
354/*
355 * @id and @pa are from a successful allocation from idr_get_empty_slot().
356 * Install the user pointer @ptr and mark the slot full.
357 */
358static void idr_fill_slot(struct idr *idr, void *ptr, int id,
359 struct idr_layer **pa)
360{
361 /* update hint used for lookup, cleared from free_layer() */
362 rcu_assign_pointer(idr->hint, pa[0]);
363
364 rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr);
365 pa[0]->count++;
366 idr_mark_full(pa, id);
367}
368
369
370/**
371 * idr_preload - preload for idr_alloc()
372 * @gfp_mask: allocation mask to use for preloading
373 *
374 * Preload per-cpu layer buffer for idr_alloc(). Can only be used from
375 * process context and each idr_preload() invocation should be matched with
376 * idr_preload_end(). Note that preemption is disabled while preloaded.
377 *
378 * The first idr_alloc() in the preloaded section can be treated as if it
379 * were invoked with @gfp_mask used for preloading. This allows using more
380 * permissive allocation masks for idrs protected by spinlocks.
381 *
382 * For example, if idr_alloc() below fails, the failure can be treated as
383 * if idr_alloc() were called with GFP_KERNEL rather than GFP_NOWAIT.
384 *
385 * idr_preload(GFP_KERNEL);
386 * spin_lock(lock);
387 *
388 * id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT);
389 *
390 * spin_unlock(lock);
391 * idr_preload_end();
392 * if (id < 0)
393 * error;
394 */
395void idr_preload(gfp_t gfp_mask)
396{
397 /*
398 * Consuming preload buffer from non-process context breaks preload
399 * allocation guarantee. Disallow usage from those contexts.
400 */
401 WARN_ON_ONCE(in_interrupt());
402 might_sleep_if(gfpflags_allow_blocking(gfp_mask));
403
404 preempt_disable();
405
406 /*
407 * idr_alloc() is likely to succeed w/o full idr_layer buffer and
408 * return value from idr_alloc() needs to be checked for failure
409 * anyway. Silently give up if allocation fails. The caller can
410 * treat failures from idr_alloc() as if idr_alloc() were called
411 * with @gfp_mask which should be enough.
412 */
413 while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) {
414 struct idr_layer *new;
415
416 preempt_enable();
417 new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
418 preempt_disable();
419 if (!new)
420 break;
421
422 /* link the new one to per-cpu preload list */
423 new->ary[0] = __this_cpu_read(idr_preload_head);
424 __this_cpu_write(idr_preload_head, new);
425 __this_cpu_inc(idr_preload_cnt);
426 }
427}
428EXPORT_SYMBOL(idr_preload);
429
430/**
431 * idr_alloc - allocate new idr entry
432 * @idr: the (initialized) idr
433 * @ptr: pointer to be associated with the new id 13 * @ptr: pointer to be associated with the new id
434 * @start: the minimum id (inclusive) 14 * @start: the minimum id (inclusive)
435 * @end: the maximum id (exclusive, <= 0 for max) 15 * @end: the maximum id (exclusive)
436 * @gfp_mask: memory allocation flags 16 * @gfp: memory allocation flags
437 * 17 *
438 * Allocate an id in [start, end) and associate it with @ptr. If no ID is 18 * Allocates an unused ID in the range [start, end). Returns -ENOSPC
439 * available in the specified range, returns -ENOSPC. On memory allocation 19 * if there are no unused IDs in that range.
440 * failure, returns -ENOMEM.
441 * 20 *
442 * Note that @end is treated as max when <= 0. This is to always allow 21 * Note that @end is treated as max when <= 0. This is to always allow
443 * using @start + N as @end as long as N is inside integer range. 22 * using @start + N as @end as long as N is inside integer range.
444 * 23 *
445 * The user is responsible for exclusively synchronizing all operations 24 * Simultaneous modifications to the @idr are not allowed and should be
446 * which may modify @idr. However, read-only accesses such as idr_find() 25 * prevented by the user, usually with a lock. idr_alloc() may be called
447 * or iteration can be performed under RCU read lock provided the user 26 * concurrently with read-only accesses to the @idr, such as idr_find() and
448 * destroys @ptr in RCU-safe way after removal from idr. 27 * idr_for_each_entry().
449 */ 28 */
450int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask) 29int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
451{ 30{
452 int max = end > 0 ? end - 1 : INT_MAX; /* inclusive upper limit */ 31 void __rcu **slot;
453 struct idr_layer *pa[MAX_IDR_LEVEL + 1]; 32 struct radix_tree_iter iter;
454 int id;
455 33
456 might_sleep_if(gfpflags_allow_blocking(gfp_mask));
457
458 /* sanity checks */
459 if (WARN_ON_ONCE(start < 0)) 34 if (WARN_ON_ONCE(start < 0))
460 return -EINVAL; 35 return -EINVAL;
461 if (unlikely(max < start)) 36 if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr)))
462 return -ENOSPC; 37 return -EINVAL;
463 38
464 /* allocate id */ 39 radix_tree_iter_init(&iter, start);
465 id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL); 40 slot = idr_get_free(&idr->idr_rt, &iter, gfp, end);
466 if (unlikely(id < 0)) 41 if (IS_ERR(slot))
467 return id; 42 return PTR_ERR(slot);
468 if (unlikely(id > max))
469 return -ENOSPC;
470 43
471 idr_fill_slot(idr, ptr, id, pa); 44 radix_tree_iter_replace(&idr->idr_rt, &iter, slot, ptr);
472 return id; 45 radix_tree_iter_tag_clear(&idr->idr_rt, &iter, IDR_FREE);
46 return iter.index;
473} 47}
474EXPORT_SYMBOL_GPL(idr_alloc); 48EXPORT_SYMBOL_GPL(idr_alloc);
475 49
476/** 50/**
477 * idr_alloc_cyclic - allocate new idr entry in a cyclical fashion 51 * idr_alloc_cyclic - allocate new idr entry in a cyclical fashion
478 * @idr: the (initialized) idr 52 * @idr: idr handle
479 * @ptr: pointer to be associated with the new id 53 * @ptr: pointer to be associated with the new id
480 * @start: the minimum id (inclusive) 54 * @start: the minimum id (inclusive)
481 * @end: the maximum id (exclusive, <= 0 for max) 55 * @end: the maximum id (exclusive)
482 * @gfp_mask: memory allocation flags 56 * @gfp: memory allocation flags
483 *
484 * Essentially the same as idr_alloc, but prefers to allocate progressively
485 * higher ids if it can. If the "cur" counter wraps, then it will start again
486 * at the "start" end of the range and allocate one that has already been used.
487 */
488int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end,
489 gfp_t gfp_mask)
490{
491 int id;
492
493 id = idr_alloc(idr, ptr, max(start, idr->cur), end, gfp_mask);
494 if (id == -ENOSPC)
495 id = idr_alloc(idr, ptr, start, end, gfp_mask);
496
497 if (likely(id >= 0))
498 idr->cur = id + 1;
499 return id;
500}
501EXPORT_SYMBOL(idr_alloc_cyclic);
502
503static void idr_remove_warning(int id)
504{
505 WARN(1, "idr_remove called for id=%d which is not allocated.\n", id);
506}
507
508static void sub_remove(struct idr *idp, int shift, int id)
509{
510 struct idr_layer *p = idp->top;
511 struct idr_layer **pa[MAX_IDR_LEVEL + 1];
512 struct idr_layer ***paa = &pa[0];
513 struct idr_layer *to_free;
514 int n;
515
516 *paa = NULL;
517 *++paa = &idp->top;
518
519 while ((shift > 0) && p) {
520 n = (id >> shift) & IDR_MASK;
521 __clear_bit(n, p->bitmap);
522 *++paa = &p->ary[n];
523 p = p->ary[n];
524 shift -= IDR_BITS;
525 }
526 n = id & IDR_MASK;
527 if (likely(p != NULL && test_bit(n, p->bitmap))) {
528 __clear_bit(n, p->bitmap);
529 RCU_INIT_POINTER(p->ary[n], NULL);
530 to_free = NULL;
531 while(*paa && ! --((**paa)->count)){
532 if (to_free)
533 free_layer(idp, to_free);
534 to_free = **paa;
535 **paa-- = NULL;
536 }
537 if (!*paa)
538 idp->layers = 0;
539 if (to_free)
540 free_layer(idp, to_free);
541 } else
542 idr_remove_warning(id);
543}
544
545/**
546 * idr_remove - remove the given id and free its slot
547 * @idp: idr handle
548 * @id: unique key
549 */
550void idr_remove(struct idr *idp, int id)
551{
552 struct idr_layer *p;
553 struct idr_layer *to_free;
554
555 if (id < 0)
556 return;
557
558 if (id > idr_max(idp->layers)) {
559 idr_remove_warning(id);
560 return;
561 }
562
563 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
564 if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
565 idp->top->ary[0]) {
566 /*
567 * Single child at leftmost slot: we can shrink the tree.
568 * This level is not needed anymore since when layers are
569 * inserted, they are inserted at the top of the existing
570 * tree.
571 */
572 to_free = idp->top;
573 p = idp->top->ary[0];
574 rcu_assign_pointer(idp->top, p);
575 --idp->layers;
576 to_free->count = 0;
577 bitmap_clear(to_free->bitmap, 0, IDR_SIZE);
578 free_layer(idp, to_free);
579 }
580}
581EXPORT_SYMBOL(idr_remove);
582
583static void __idr_remove_all(struct idr *idp)
584{
585 int n, id, max;
586 int bt_mask;
587 struct idr_layer *p;
588 struct idr_layer *pa[MAX_IDR_LEVEL + 1];
589 struct idr_layer **paa = &pa[0];
590
591 n = idp->layers * IDR_BITS;
592 *paa = idp->top;
593 RCU_INIT_POINTER(idp->top, NULL);
594 max = idr_max(idp->layers);
595
596 id = 0;
597 while (id >= 0 && id <= max) {
598 p = *paa;
599 while (n > IDR_BITS && p) {
600 n -= IDR_BITS;
601 p = p->ary[(id >> n) & IDR_MASK];
602 *++paa = p;
603 }
604
605 bt_mask = id;
606 id += 1 << n;
607 /* Get the highest bit that the above add changed from 0->1. */
608 while (n < fls(id ^ bt_mask)) {
609 if (*paa)
610 free_layer(idp, *paa);
611 n += IDR_BITS;
612 --paa;
613 }
614 }
615 idp->layers = 0;
616}
617
618/**
619 * idr_destroy - release all cached layers within an idr tree
620 * @idp: idr handle
621 *
622 * Free all id mappings and all idp_layers. After this function, @idp is
623 * completely unused and can be freed / recycled. The caller is
624 * responsible for ensuring that no one else accesses @idp during or after
625 * idr_destroy().
626 * 57 *
627 * A typical clean-up sequence for objects stored in an idr tree will use 58 * Allocates an ID larger than the last ID allocated if one is available.
628 * idr_for_each() to free all objects, if necessary, then idr_destroy() to 59 * If not, it will attempt to allocate the smallest ID that is larger or
629 * free up the id mappings and cached idr_layers. 60 * equal to @start.
630 */ 61 */
631void idr_destroy(struct idr *idp) 62int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
632{ 63{
633 __idr_remove_all(idp); 64 int id, curr = idr->idr_next;
634 65
635 while (idp->id_free_cnt) { 66 if (curr < start)
636 struct idr_layer *p = get_from_free_list(idp); 67 curr = start;
637 kmem_cache_free(idr_layer_cache, p);
638 }
639}
640EXPORT_SYMBOL(idr_destroy);
641 68
642void *idr_find_slowpath(struct idr *idp, int id) 69 id = idr_alloc(idr, ptr, curr, end, gfp);
643{ 70 if ((id == -ENOSPC) && (curr > start))
644 int n; 71 id = idr_alloc(idr, ptr, start, curr, gfp);
645 struct idr_layer *p;
646
647 if (id < 0)
648 return NULL;
649
650 p = rcu_dereference_raw(idp->top);
651 if (!p)
652 return NULL;
653 n = (p->layer+1) * IDR_BITS;
654 72
655 if (id > idr_max(p->layer + 1)) 73 if (id >= 0)
656 return NULL; 74 idr->idr_next = id + 1U;
657 BUG_ON(n == 0);
658 75
659 while (n > 0 && p) { 76 return id;
660 n -= IDR_BITS;
661 BUG_ON(n != p->layer*IDR_BITS);
662 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
663 }
664 return((void *)p);
665} 77}
666EXPORT_SYMBOL(idr_find_slowpath); 78EXPORT_SYMBOL(idr_alloc_cyclic);
667 79
668/** 80/**
669 * idr_for_each - iterate through all stored pointers 81 * idr_for_each - iterate through all stored pointers
670 * @idp: idr handle 82 * @idr: idr handle
671 * @fn: function to be called for each pointer 83 * @fn: function to be called for each pointer
672 * @data: data passed back to callback function 84 * @data: data passed to callback function
673 * 85 *
674 * Iterate over the pointers registered with the given idr. The 86 * The callback function will be called for each entry in @idr, passing
675 * callback function will be called for each pointer currently 87 * the id, the pointer and the data pointer passed to this function.
676 * registered, passing the id, the pointer and the data pointer passed
677 * to this function. It is not safe to modify the idr tree while in
678 * the callback, so functions such as idr_get_new and idr_remove are
679 * not allowed.
680 * 88 *
681 * We check the return of @fn each time. If it returns anything other 89 * If @fn returns anything other than %0, the iteration stops and that
682 * than %0, we break out and return that value. 90 * value is returned from this function.
683 * 91 *
684 * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). 92 * idr_for_each() can be called concurrently with idr_alloc() and
93 * idr_remove() if protected by RCU. Newly added entries may not be
94 * seen and deleted entries may be seen, but adding and removing entries
95 * will not cause other entries to be skipped, nor spurious ones to be seen.
685 */ 96 */
686int idr_for_each(struct idr *idp, 97int idr_for_each(const struct idr *idr,
687 int (*fn)(int id, void *p, void *data), void *data) 98 int (*fn)(int id, void *p, void *data), void *data)
688{ 99{
689 int n, id, max, error = 0; 100 struct radix_tree_iter iter;
690 struct idr_layer *p; 101 void __rcu **slot;
691 struct idr_layer *pa[MAX_IDR_LEVEL + 1];
692 struct idr_layer **paa = &pa[0];
693
694 n = idp->layers * IDR_BITS;
695 *paa = rcu_dereference_raw(idp->top);
696 max = idr_max(idp->layers);
697 102
698 id = 0; 103 radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) {
699 while (id >= 0 && id <= max) { 104 int ret = fn(iter.index, rcu_dereference_raw(*slot), data);
700 p = *paa; 105 if (ret)
701 while (n > 0 && p) { 106 return ret;
702 n -= IDR_BITS;
703 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
704 *++paa = p;
705 }
706
707 if (p) {
708 error = fn(id, (void *)p, data);
709 if (error)
710 break;
711 }
712
713 id += 1 << n;
714 while (n < fls(id)) {
715 n += IDR_BITS;
716 --paa;
717 }
718 } 107 }
719 108
720 return error; 109 return 0;
721} 110}
722EXPORT_SYMBOL(idr_for_each); 111EXPORT_SYMBOL(idr_for_each);
723 112
724/** 113/**
725 * idr_get_next - lookup next object of id to given id. 114 * idr_get_next - Find next populated entry
726 * @idp: idr handle 115 * @idr: idr handle
727 * @nextidp: pointer to lookup key 116 * @nextid: Pointer to lowest possible ID to return
728 * 117 *
729 * Returns pointer to registered object with id, which is next number to 118 * Returns the next populated entry in the tree with an ID greater than
730 * given id. After being looked up, *@nextidp will be updated for the next 119 * or equal to the value pointed to by @nextid. On exit, @nextid is updated
731 * iteration. 120 * to the ID of the found value. To use in a loop, the value pointed to by
732 * 121 * nextid must be incremented by the user.
733 * This function can be called under rcu_read_lock(), given that the leaf
734 * pointers lifetimes are correctly managed.
735 */ 122 */
736void *idr_get_next(struct idr *idp, int *nextidp) 123void *idr_get_next(struct idr *idr, int *nextid)
737{ 124{
738 struct idr_layer *p, *pa[MAX_IDR_LEVEL + 1]; 125 struct radix_tree_iter iter;
739 struct idr_layer **paa = &pa[0]; 126 void __rcu **slot;
740 int id = *nextidp;
741 int n, max;
742 127
743 /* find first ent */ 128 slot = radix_tree_iter_find(&idr->idr_rt, &iter, *nextid);
744 p = *paa = rcu_dereference_raw(idp->top); 129 if (!slot)
745 if (!p)
746 return NULL; 130 return NULL;
747 n = (p->layer + 1) * IDR_BITS;
748 max = idr_max(p->layer + 1);
749
750 while (id >= 0 && id <= max) {
751 p = *paa;
752 while (n > 0 && p) {
753 n -= IDR_BITS;
754 p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
755 *++paa = p;
756 }
757
758 if (p) {
759 *nextidp = id;
760 return p;
761 }
762 131
763 /* 132 *nextid = iter.index;
764 * Proceed to the next layer at the current level. Unlike 133 return rcu_dereference_raw(*slot);
765 * idr_for_each(), @id isn't guaranteed to be aligned to
766 * layer boundary at this point and adding 1 << n may
767 * incorrectly skip IDs. Make sure we jump to the
768 * beginning of the next layer using round_up().
769 */
770 id = round_up(id + 1, 1 << n);
771 while (n < fls(id)) {
772 n += IDR_BITS;
773 --paa;
774 }
775 }
776 return NULL;
777} 134}
778EXPORT_SYMBOL(idr_get_next); 135EXPORT_SYMBOL(idr_get_next);
779 136
780
781/** 137/**
782 * idr_replace - replace pointer for given id 138 * idr_replace - replace pointer for given id
783 * @idp: idr handle 139 * @idr: idr handle
784 * @ptr: pointer you want associated with the id 140 * @ptr: New pointer to associate with the ID
785 * @id: lookup key 141 * @id: Lookup key
786 * 142 *
787 * Replace the pointer registered with an id and return the old value. 143 * Replace the pointer registered with an ID and return the old value.
788 * A %-ENOENT return indicates that @id was not found. 144 * This function can be called under the RCU read lock concurrently with
789 * A %-EINVAL return indicates that @id was not within valid constraints. 145 * idr_alloc() and idr_remove() (as long as the ID being removed is not
146 * the one being replaced!).
790 * 147 *
791 * The caller must serialize with writers. 148 * Returns: 0 on success. %-ENOENT indicates that @id was not found.
149 * %-EINVAL indicates that @id or @ptr were not valid.
792 */ 150 */
793void *idr_replace(struct idr *idp, void *ptr, int id) 151void *idr_replace(struct idr *idr, void *ptr, int id)
794{ 152{
795 int n; 153 struct radix_tree_node *node;
796 struct idr_layer *p, *old_p; 154 void __rcu **slot = NULL;
155 void *entry;
797 156
798 if (id < 0) 157 if (WARN_ON_ONCE(id < 0))
158 return ERR_PTR(-EINVAL);
159 if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr)))
799 return ERR_PTR(-EINVAL); 160 return ERR_PTR(-EINVAL);
800 161
801 p = idp->top; 162 entry = __radix_tree_lookup(&idr->idr_rt, id, &node, &slot);
802 if (!p) 163 if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE))
803 return ERR_PTR(-ENOENT);
804
805 if (id > idr_max(p->layer + 1))
806 return ERR_PTR(-ENOENT);
807
808 n = p->layer * IDR_BITS;
809 while ((n > 0) && p) {
810 p = p->ary[(id >> n) & IDR_MASK];
811 n -= IDR_BITS;
812 }
813
814 n = id & IDR_MASK;
815 if (unlikely(p == NULL || !test_bit(n, p->bitmap)))
816 return ERR_PTR(-ENOENT); 164 return ERR_PTR(-ENOENT);
817 165
818 old_p = p->ary[n]; 166 __radix_tree_replace(&idr->idr_rt, node, slot, ptr, NULL, NULL);
819 rcu_assign_pointer(p->ary[n], ptr);
820 167
821 return old_p; 168 return entry;
822} 169}
823EXPORT_SYMBOL(idr_replace); 170EXPORT_SYMBOL(idr_replace);
824 171
825void __init idr_init_cache(void)
826{
827 idr_layer_cache = kmem_cache_create("idr_layer_cache",
828 sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
829}
830
831/**
832 * idr_init - initialize idr handle
833 * @idp: idr handle
834 *
835 * This function is use to set up the handle (@idp) that you will pass
836 * to the rest of the functions.
837 */
838void idr_init(struct idr *idp)
839{
840 memset(idp, 0, sizeof(struct idr));
841 spin_lock_init(&idp->lock);
842}
843EXPORT_SYMBOL(idr_init);
844
845static int idr_has_entry(int id, void *p, void *data)
846{
847 return 1;
848}
849
850bool idr_is_empty(struct idr *idp)
851{
852 return !idr_for_each(idp, idr_has_entry, NULL);
853}
854EXPORT_SYMBOL(idr_is_empty);
855
856/** 172/**
857 * DOC: IDA description 173 * DOC: IDA description
858 * IDA - IDR based ID allocator
859 * 174 *
860 * This is id allocator without id -> pointer translation. Memory 175 * The IDA is an ID allocator which does not provide the ability to
861 * usage is much lower than full blown idr because each id only 176 * associate an ID with a pointer. As such, it only needs to store one
862 * occupies a bit. ida uses a custom leaf node which contains 177 * bit per ID, and so is more space efficient than an IDR. To use an IDA,
863 * IDA_BITMAP_BITS slots. 178 * define it using DEFINE_IDA() (or embed a &struct ida in a data structure,
864 * 179 * then initialise it using ida_init()). To allocate a new ID, call
865 * 2007-04-25 written by Tejun Heo <htejun@gmail.com> 180 * ida_simple_get(). To free an ID, call ida_simple_remove().
181 *
182 * If you have more complex locking requirements, use a loop around
183 * ida_pre_get() and ida_get_new() to allocate a new ID. Then use
184 * ida_remove() to free an ID. You must make sure that ida_get_new() and
185 * ida_remove() cannot be called at the same time as each other for the
186 * same IDA.
187 *
188 * You can also use ida_get_new_above() if you need an ID to be allocated
189 * above a particular number. ida_destroy() can be used to dispose of an
190 * IDA without needing to free the individual IDs in it. You can use
191 * ida_is_empty() to find out whether the IDA has any IDs currently allocated.
192 *
193 * IDs are currently limited to the range [0-INT_MAX]. If this is an awkward
194 * limitation, it should be quite straightforward to raise the maximum.
866 */ 195 */
867 196
868static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) 197/*
869{ 198 * Developer's notes:
870 unsigned long flags; 199 *
871 200 * The IDA uses the functionality provided by the IDR & radix tree to store
872 if (!ida->free_bitmap) { 201 * bitmaps in each entry. The IDR_FREE tag means there is at least one bit
873 spin_lock_irqsave(&ida->idr.lock, flags); 202 * free, unlike the IDR where it means at least one entry is free.
874 if (!ida->free_bitmap) { 203 *
875 ida->free_bitmap = bitmap; 204 * I considered telling the radix tree that each slot is an order-10 node
876 bitmap = NULL; 205 * and storing the bit numbers in the radix tree, but the radix tree can't
877 } 206 * allow a single multiorder entry at index 0, which would significantly
878 spin_unlock_irqrestore(&ida->idr.lock, flags); 207 * increase memory consumption for the IDA. So instead we divide the index
879 } 208 * by the number of bits in the leaf bitmap before doing a radix tree lookup.
880 209 *
881 kfree(bitmap); 210 * As an optimisation, if there are only a few low bits set in any given
882} 211 * leaf, instead of allocating a 128-byte bitmap, we use the 'exceptional
883 212 * entry' functionality of the radix tree to store BITS_PER_LONG - 2 bits
884/** 213 * directly in the entry. By being really tricksy, we could store
885 * ida_pre_get - reserve resources for ida allocation 214 * BITS_PER_LONG - 1 bits, but there're diminishing returns after optimising
886 * @ida: ida handle 215 * for 0-3 allocated IDs.
887 * @gfp_mask: memory allocation flag 216 *
888 * 217 * We allow the radix tree 'exceptional' count to get out of date. Nothing
889 * This function should be called prior to locking and calling the 218 * in the IDA nor the radix tree code checks it. If it becomes important
890 * following function. It preallocates enough memory to satisfy the 219 * to maintain an accurate exceptional count, switch the rcu_assign_pointer()
891 * worst possible allocation. 220 * calls to radix_tree_iter_replace() which will correct the exceptional
892 * 221 * count.
893 * If the system is REALLY out of memory this function returns %0, 222 *
894 * otherwise %1. 223 * The IDA always requires a lock to alloc/free. If we add a 'test_bit'
224 * equivalent, it will still need locking. Going to RCU lookup would require
225 * using RCU to free bitmaps, and that's not trivial without embedding an
226 * RCU head in the bitmap, which adds a 2-pointer overhead to each 128-byte
227 * bitmap, which is excessive.
895 */ 228 */
896int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
897{
898 /* allocate idr_layers */
899 if (!__idr_pre_get(&ida->idr, gfp_mask))
900 return 0;
901 229
902 /* allocate free_bitmap */ 230#define IDA_MAX (0x80000000U / IDA_BITMAP_BITS)
903 if (!ida->free_bitmap) {
904 struct ida_bitmap *bitmap;
905
906 bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
907 if (!bitmap)
908 return 0;
909
910 free_bitmap(ida, bitmap);
911 }
912
913 return 1;
914}
915EXPORT_SYMBOL(ida_pre_get);
916 231
917/** 232/**
918 * ida_get_new_above - allocate new ID above or equal to a start id 233 * ida_get_new_above - allocate new ID above or equal to a start id
919 * @ida: ida handle 234 * @ida: ida handle
920 * @starting_id: id to start search at 235 * @start: id to start search at
921 * @p_id: pointer to the allocated handle 236 * @id: pointer to the allocated handle
922 * 237 *
923 * Allocate new ID above or equal to @starting_id. It should be called 238 * Allocate new ID above or equal to @start. It should be called
924 * with any required locks. 239 * with any required locks to ensure that concurrent calls to
240 * ida_get_new_above() / ida_get_new() / ida_remove() are not allowed.
241 * Consider using ida_simple_get() if you do not have complex locking
242 * requirements.
925 * 243 *
926 * If memory is required, it will return %-EAGAIN, you should unlock 244 * If memory is required, it will return %-EAGAIN, you should unlock
927 * and go back to the ida_pre_get() call. If the ida is full, it will 245 * and go back to the ida_pre_get() call. If the ida is full, it will
928 * return %-ENOSPC. 246 * return %-ENOSPC. On success, it will return 0.
929 *
930 * Note that callers must ensure that concurrent access to @ida is not possible.
931 * See ida_simple_get() for a varaint which takes care of locking.
932 * 247 *
933 * @p_id returns a value in the range @starting_id ... %0x7fffffff. 248 * @id returns a value in the range @start ... %0x7fffffff.
934 */ 249 */
935int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) 250int ida_get_new_above(struct ida *ida, int start, int *id)
936{ 251{
937 struct idr_layer *pa[MAX_IDR_LEVEL + 1]; 252 struct radix_tree_root *root = &ida->ida_rt;
253 void __rcu **slot;
254 struct radix_tree_iter iter;
938 struct ida_bitmap *bitmap; 255 struct ida_bitmap *bitmap;
939 unsigned long flags; 256 unsigned long index;
940 int idr_id = starting_id / IDA_BITMAP_BITS; 257 unsigned bit, ebit;
941 int offset = starting_id % IDA_BITMAP_BITS; 258 int new;
942 int t, id; 259
943 260 index = start / IDA_BITMAP_BITS;
944 restart: 261 bit = start % IDA_BITMAP_BITS;
945 /* get vacant slot */ 262 ebit = bit + RADIX_TREE_EXCEPTIONAL_SHIFT;
946 t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr); 263
947 if (t < 0) 264 slot = radix_tree_iter_init(&iter, index);
948 return t == -ENOMEM ? -EAGAIN : t; 265 for (;;) {
949 266 if (slot)
950 if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT) 267 slot = radix_tree_next_slot(slot, &iter,
951 return -ENOSPC; 268 RADIX_TREE_ITER_TAGGED);
952 269 if (!slot) {
953 if (t != idr_id) 270 slot = idr_get_free(root, &iter, GFP_NOWAIT, IDA_MAX);
954 offset = 0; 271 if (IS_ERR(slot)) {
955 idr_id = t; 272 if (slot == ERR_PTR(-ENOMEM))
956 273 return -EAGAIN;
957 /* if bitmap isn't there, create a new one */ 274 return PTR_ERR(slot);
958 bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK]; 275 }
959 if (!bitmap) { 276 }
960 spin_lock_irqsave(&ida->idr.lock, flags); 277 if (iter.index > index) {
961 bitmap = ida->free_bitmap; 278 bit = 0;
962 ida->free_bitmap = NULL; 279 ebit = RADIX_TREE_EXCEPTIONAL_SHIFT;
963 spin_unlock_irqrestore(&ida->idr.lock, flags); 280 }
964 281 new = iter.index * IDA_BITMAP_BITS;
965 if (!bitmap) 282 bitmap = rcu_dereference_raw(*slot);
966 return -EAGAIN; 283 if (radix_tree_exception(bitmap)) {
967 284 unsigned long tmp = (unsigned long)bitmap;
968 memset(bitmap, 0, sizeof(struct ida_bitmap)); 285 ebit = find_next_zero_bit(&tmp, BITS_PER_LONG, ebit);
969 rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK], 286 if (ebit < BITS_PER_LONG) {
970 (void *)bitmap); 287 tmp |= 1UL << ebit;
971 pa[0]->count++; 288 rcu_assign_pointer(*slot, (void *)tmp);
972 } 289 *id = new + ebit - RADIX_TREE_EXCEPTIONAL_SHIFT;
973 290 return 0;
974 /* lookup for empty slot */ 291 }
975 t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset); 292 bitmap = this_cpu_xchg(ida_bitmap, NULL);
976 if (t == IDA_BITMAP_BITS) { 293 if (!bitmap)
977 /* no empty slot after offset, continue to the next chunk */ 294 return -EAGAIN;
978 idr_id++; 295 memset(bitmap, 0, sizeof(*bitmap));
979 offset = 0; 296 bitmap->bitmap[0] = tmp >> RADIX_TREE_EXCEPTIONAL_SHIFT;
980 goto restart; 297 rcu_assign_pointer(*slot, bitmap);
981 } 298 }
982
983 id = idr_id * IDA_BITMAP_BITS + t;
984 if (id >= MAX_IDR_BIT)
985 return -ENOSPC;
986 299
987 __set_bit(t, bitmap->bitmap); 300 if (bitmap) {
988 if (++bitmap->nr_busy == IDA_BITMAP_BITS) 301 bit = find_next_zero_bit(bitmap->bitmap,
989 idr_mark_full(pa, idr_id); 302 IDA_BITMAP_BITS, bit);
303 new += bit;
304 if (new < 0)
305 return -ENOSPC;
306 if (bit == IDA_BITMAP_BITS)
307 continue;
990 308
991 *p_id = id; 309 __set_bit(bit, bitmap->bitmap);
310 if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS))
311 radix_tree_iter_tag_clear(root, &iter,
312 IDR_FREE);
313 } else {
314 new += bit;
315 if (new < 0)
316 return -ENOSPC;
317 if (ebit < BITS_PER_LONG) {
318 bitmap = (void *)((1UL << ebit) |
319 RADIX_TREE_EXCEPTIONAL_ENTRY);
320 radix_tree_iter_replace(root, &iter, slot,
321 bitmap);
322 *id = new;
323 return 0;
324 }
325 bitmap = this_cpu_xchg(ida_bitmap, NULL);
326 if (!bitmap)
327 return -EAGAIN;
328 memset(bitmap, 0, sizeof(*bitmap));
329 __set_bit(bit, bitmap->bitmap);
330 radix_tree_iter_replace(root, &iter, slot, bitmap);
331 }
992 332
993 /* Each leaf node can handle nearly a thousand slots and the 333 *id = new;
994 * whole idea of ida is to have small memory foot print. 334 return 0;
995 * Throw away extra resources one by one after each successful
996 * allocation.
997 */
998 if (ida->idr.id_free_cnt || ida->free_bitmap) {
999 struct idr_layer *p = get_from_free_list(&ida->idr);
1000 if (p)
1001 kmem_cache_free(idr_layer_cache, p);
1002 } 335 }
1003
1004 return 0;
1005} 336}
1006EXPORT_SYMBOL(ida_get_new_above); 337EXPORT_SYMBOL(ida_get_new_above);
1007 338
1008/** 339/**
1009 * ida_remove - remove the given ID 340 * ida_remove - Free the given ID
1010 * @ida: ida handle 341 * @ida: ida handle
1011 * @id: ID to free 342 * @id: ID to free
343 *
344 * This function should not be called at the same time as ida_get_new_above().
1012 */ 345 */
1013void ida_remove(struct ida *ida, int id) 346void ida_remove(struct ida *ida, int id)
1014{ 347{
1015 struct idr_layer *p = ida->idr.top; 348 unsigned long index = id / IDA_BITMAP_BITS;
1016 int shift = (ida->idr.layers - 1) * IDR_BITS; 349 unsigned offset = id % IDA_BITMAP_BITS;
1017 int idr_id = id / IDA_BITMAP_BITS;
1018 int offset = id % IDA_BITMAP_BITS;
1019 int n;
1020 struct ida_bitmap *bitmap; 350 struct ida_bitmap *bitmap;
351 unsigned long *btmp;
352 struct radix_tree_iter iter;
353 void __rcu **slot;
1021 354
1022 if (idr_id > idr_max(ida->idr.layers)) 355 slot = radix_tree_iter_lookup(&ida->ida_rt, &iter, index);
356 if (!slot)
1023 goto err; 357 goto err;
1024 358
1025 /* clear full bits while looking up the leaf idr_layer */ 359 bitmap = rcu_dereference_raw(*slot);
1026 while ((shift > 0) && p) { 360 if (radix_tree_exception(bitmap)) {
1027 n = (idr_id >> shift) & IDR_MASK; 361 btmp = (unsigned long *)slot;
1028 __clear_bit(n, p->bitmap); 362 offset += RADIX_TREE_EXCEPTIONAL_SHIFT;
1029 p = p->ary[n]; 363 if (offset >= BITS_PER_LONG)
1030 shift -= IDR_BITS; 364 goto err;
365 } else {
366 btmp = bitmap->bitmap;
1031 } 367 }
1032 368 if (!test_bit(offset, btmp))
1033 if (p == NULL)
1034 goto err;
1035
1036 n = idr_id & IDR_MASK;
1037 __clear_bit(n, p->bitmap);
1038
1039 bitmap = (void *)p->ary[n];
1040 if (!bitmap || !test_bit(offset, bitmap->bitmap))
1041 goto err; 369 goto err;
1042 370
1043 /* update bitmap and remove it if empty */ 371 __clear_bit(offset, btmp);
1044 __clear_bit(offset, bitmap->bitmap); 372 radix_tree_iter_tag_set(&ida->ida_rt, &iter, IDR_FREE);
1045 if (--bitmap->nr_busy == 0) { 373 if (radix_tree_exception(bitmap)) {
1046 __set_bit(n, p->bitmap); /* to please idr_remove() */ 374 if (rcu_dereference_raw(*slot) ==
1047 idr_remove(&ida->idr, idr_id); 375 (void *)RADIX_TREE_EXCEPTIONAL_ENTRY)
1048 free_bitmap(ida, bitmap); 376 radix_tree_iter_delete(&ida->ida_rt, &iter, slot);
377 } else if (bitmap_empty(btmp, IDA_BITMAP_BITS)) {
378 kfree(bitmap);
379 radix_tree_iter_delete(&ida->ida_rt, &iter, slot);
1049 } 380 }
1050
1051 return; 381 return;
1052
1053 err: 382 err:
1054 WARN(1, "ida_remove called for id=%d which is not allocated.\n", id); 383 WARN(1, "ida_remove called for id=%d which is not allocated.\n", id);
1055} 384}
1056EXPORT_SYMBOL(ida_remove); 385EXPORT_SYMBOL(ida_remove);
1057 386
1058/** 387/**
1059 * ida_destroy - release all cached layers within an ida tree 388 * ida_destroy - Free the contents of an ida
1060 * @ida: ida handle 389 * @ida: ida handle
390 *
391 * Calling this function releases all resources associated with an IDA. When
392 * this call returns, the IDA is empty and can be reused or freed. The caller
393 * should not allow ida_remove() or ida_get_new_above() to be called at the
394 * same time.
1061 */ 395 */
1062void ida_destroy(struct ida *ida) 396void ida_destroy(struct ida *ida)
1063{ 397{
1064 idr_destroy(&ida->idr); 398 struct radix_tree_iter iter;
1065 kfree(ida->free_bitmap); 399 void __rcu **slot;
400
401 radix_tree_for_each_slot(slot, &ida->ida_rt, &iter, 0) {
402 struct ida_bitmap *bitmap = rcu_dereference_raw(*slot);
403 if (!radix_tree_exception(bitmap))
404 kfree(bitmap);
405 radix_tree_iter_delete(&ida->ida_rt, &iter, slot);
406 }
1066} 407}
1067EXPORT_SYMBOL(ida_destroy); 408EXPORT_SYMBOL(ida_destroy);
1068 409
@@ -1141,18 +482,3 @@ void ida_simple_remove(struct ida *ida, unsigned int id)
1141 spin_unlock_irqrestore(&simple_ida_lock, flags); 482 spin_unlock_irqrestore(&simple_ida_lock, flags);
1142} 483}
1143EXPORT_SYMBOL(ida_simple_remove); 484EXPORT_SYMBOL(ida_simple_remove);
1144
1145/**
1146 * ida_init - initialize ida handle
1147 * @ida: ida handle
1148 *
1149 * This function is use to set up the handle (@ida) that you will pass
1150 * to the rest of the functions.
1151 */
1152void ida_init(struct ida *ida)
1153{
1154 memset(ida, 0, sizeof(struct ida));
1155 idr_init(&ida->idr);
1156
1157}
1158EXPORT_SYMBOL(ida_init);
diff --git a/lib/ioremap.c b/lib/ioremap.c
index a3e14ce92a56..4bb30206b942 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -14,6 +14,7 @@
14#include <asm/pgtable.h> 14#include <asm/pgtable.h>
15 15
16#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP 16#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
17static int __read_mostly ioremap_p4d_capable;
17static int __read_mostly ioremap_pud_capable; 18static int __read_mostly ioremap_pud_capable;
18static int __read_mostly ioremap_pmd_capable; 19static int __read_mostly ioremap_pmd_capable;
19static int __read_mostly ioremap_huge_disabled; 20static int __read_mostly ioremap_huge_disabled;
@@ -35,6 +36,11 @@ void __init ioremap_huge_init(void)
35 } 36 }
36} 37}
37 38
39static inline int ioremap_p4d_enabled(void)
40{
41 return ioremap_p4d_capable;
42}
43
38static inline int ioremap_pud_enabled(void) 44static inline int ioremap_pud_enabled(void)
39{ 45{
40 return ioremap_pud_capable; 46 return ioremap_pud_capable;
@@ -46,6 +52,7 @@ static inline int ioremap_pmd_enabled(void)
46} 52}
47 53
48#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ 54#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
55static inline int ioremap_p4d_enabled(void) { return 0; }
49static inline int ioremap_pud_enabled(void) { return 0; } 56static inline int ioremap_pud_enabled(void) { return 0; }
50static inline int ioremap_pmd_enabled(void) { return 0; } 57static inline int ioremap_pmd_enabled(void) { return 0; }
51#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ 58#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
@@ -94,14 +101,14 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
94 return 0; 101 return 0;
95} 102}
96 103
97static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr, 104static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
98 unsigned long end, phys_addr_t phys_addr, pgprot_t prot) 105 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
99{ 106{
100 pud_t *pud; 107 pud_t *pud;
101 unsigned long next; 108 unsigned long next;
102 109
103 phys_addr -= addr; 110 phys_addr -= addr;
104 pud = pud_alloc(&init_mm, pgd, addr); 111 pud = pud_alloc(&init_mm, p4d, addr);
105 if (!pud) 112 if (!pud)
106 return -ENOMEM; 113 return -ENOMEM;
107 do { 114 do {
@@ -120,6 +127,32 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
120 return 0; 127 return 0;
121} 128}
122 129
130static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr,
131 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
132{
133 p4d_t *p4d;
134 unsigned long next;
135
136 phys_addr -= addr;
137 p4d = p4d_alloc(&init_mm, pgd, addr);
138 if (!p4d)
139 return -ENOMEM;
140 do {
141 next = p4d_addr_end(addr, end);
142
143 if (ioremap_p4d_enabled() &&
144 ((next - addr) == P4D_SIZE) &&
145 IS_ALIGNED(phys_addr + addr, P4D_SIZE)) {
146 if (p4d_set_huge(p4d, phys_addr + addr, prot))
147 continue;
148 }
149
150 if (ioremap_pud_range(p4d, addr, next, phys_addr + addr, prot))
151 return -ENOMEM;
152 } while (p4d++, addr = next, addr != end);
153 return 0;
154}
155
123int ioremap_page_range(unsigned long addr, 156int ioremap_page_range(unsigned long addr,
124 unsigned long end, phys_addr_t phys_addr, pgprot_t prot) 157 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
125{ 158{
@@ -135,7 +168,7 @@ int ioremap_page_range(unsigned long addr,
135 pgd = pgd_offset_k(addr); 168 pgd = pgd_offset_k(addr);
136 do { 169 do {
137 next = pgd_addr_end(addr, end); 170 next = pgd_addr_end(addr, end);
138 err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, prot); 171 err = ioremap_p4d_range(pgd, addr, next, phys_addr+addr, prot);
139 if (err) 172 if (err)
140 break; 173 break;
141 } while (pgd++, addr = next, addr != end); 174 } while (pgd++, addr = next, addr != end);
diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
index 391fd23976a2..9c7d89df40ed 100644
--- a/lib/is_single_threaded.c
+++ b/lib/is_single_threaded.c
@@ -9,8 +9,9 @@
9 * as published by the Free Software Foundation; either version 9 * as published by the Free Software Foundation; either version
10 * 2 of the Licence, or (at your option) any later version. 10 * 2 of the Licence, or (at your option) any later version.
11 */ 11 */
12 12#include <linux/sched/signal.h>
13#include <linux/sched.h> 13#include <linux/sched/task.h>
14#include <linux/sched/mm.h>
14 15
15/* 16/*
16 * Returns true if the task does not share ->mm with another thread/process. 17 * Returns true if the task does not share ->mm with another thread/process.
diff --git a/lib/list_debug.c b/lib/list_debug.c
index 7f7bfa55eb6d..a34db8d27667 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -20,15 +20,16 @@
20bool __list_add_valid(struct list_head *new, struct list_head *prev, 20bool __list_add_valid(struct list_head *new, struct list_head *prev,
21 struct list_head *next) 21 struct list_head *next)
22{ 22{
23 CHECK_DATA_CORRUPTION(next->prev != prev, 23 if (CHECK_DATA_CORRUPTION(next->prev != prev,
24 "list_add corruption. next->prev should be prev (%p), but was %p. (next=%p).\n", 24 "list_add corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
25 prev, next->prev, next); 25 prev, next->prev, next) ||
26 CHECK_DATA_CORRUPTION(prev->next != next, 26 CHECK_DATA_CORRUPTION(prev->next != next,
27 "list_add corruption. prev->next should be next (%p), but was %p. (prev=%p).\n", 27 "list_add corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
28 next, prev->next, prev); 28 next, prev->next, prev) ||
29 CHECK_DATA_CORRUPTION(new == prev || new == next, 29 CHECK_DATA_CORRUPTION(new == prev || new == next,
30 "list_add double add: new=%p, prev=%p, next=%p.\n", 30 "list_add double add: new=%p, prev=%p, next=%p.\n",
31 new, prev, next); 31 new, prev, next))
32 return false;
32 33
33 return true; 34 return true;
34} 35}
@@ -41,18 +42,20 @@ bool __list_del_entry_valid(struct list_head *entry)
41 prev = entry->prev; 42 prev = entry->prev;
42 next = entry->next; 43 next = entry->next;
43 44
44 CHECK_DATA_CORRUPTION(next == LIST_POISON1, 45 if (CHECK_DATA_CORRUPTION(next == LIST_POISON1,
45 "list_del corruption, %p->next is LIST_POISON1 (%p)\n", 46 "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
46 entry, LIST_POISON1); 47 entry, LIST_POISON1) ||
47 CHECK_DATA_CORRUPTION(prev == LIST_POISON2, 48 CHECK_DATA_CORRUPTION(prev == LIST_POISON2,
48 "list_del corruption, %p->prev is LIST_POISON2 (%p)\n", 49 "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
49 entry, LIST_POISON2); 50 entry, LIST_POISON2) ||
50 CHECK_DATA_CORRUPTION(prev->next != entry, 51 CHECK_DATA_CORRUPTION(prev->next != entry,
51 "list_del corruption. prev->next should be %p, but was %p\n", 52 "list_del corruption. prev->next should be %p, but was %p\n",
52 entry, prev->next); 53 entry, prev->next) ||
53 CHECK_DATA_CORRUPTION(next->prev != entry, 54 CHECK_DATA_CORRUPTION(next->prev != entry,
54 "list_del corruption. next->prev should be %p, but was %p\n", 55 "list_del corruption. next->prev should be %p, but was %p\n",
55 entry, next->prev); 56 entry, next->prev))
57 return false;
58
56 return true; 59 return true;
57 60
58} 61}
diff --git a/lib/lz4/Makefile b/lib/lz4/Makefile
index 8085d04e9309..f7b113271d13 100644
--- a/lib/lz4/Makefile
+++ b/lib/lz4/Makefile
@@ -1,3 +1,5 @@
1ccflags-y += -O3
2
1obj-$(CONFIG_LZ4_COMPRESS) += lz4_compress.o 3obj-$(CONFIG_LZ4_COMPRESS) += lz4_compress.o
2obj-$(CONFIG_LZ4HC_COMPRESS) += lz4hc_compress.o 4obj-$(CONFIG_LZ4HC_COMPRESS) += lz4hc_compress.o
3obj-$(CONFIG_LZ4_DECOMPRESS) += lz4_decompress.o 5obj-$(CONFIG_LZ4_DECOMPRESS) += lz4_decompress.o
diff --git a/lib/lz4/lz4_compress.c b/lib/lz4/lz4_compress.c
index 28321d8f75ef..cc7b6d4cc7c7 100644
--- a/lib/lz4/lz4_compress.c
+++ b/lib/lz4/lz4_compress.c
@@ -1,19 +1,16 @@
1/* 1/*
2 * LZ4 - Fast LZ compression algorithm 2 * LZ4 - Fast LZ compression algorithm
3 * Copyright (C) 2011-2012, Yann Collet. 3 * Copyright (C) 2011 - 2016, Yann Collet.
4 * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) 4 * BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php)
5
6 * Redistribution and use in source and binary forms, with or without 5 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are 6 * modification, are permitted provided that the following conditions are
8 * met: 7 * met:
9 * 8 * * Redistributions of source code must retain the above copyright
10 * * Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer.
11 * notice, this list of conditions and the following disclaimer. 10 * * Redistributions in binary form must reproduce the above
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer 11 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the 12 * in the documentation and/or other materials provided with the
15 * distribution. 13 * distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 15 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 16 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -25,419 +22,919 @@
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * You can contact the author at : 25 * You can contact the author at :
30 * - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html 26 * - LZ4 homepage : http://www.lz4.org
31 * - LZ4 source repository : http://code.google.com/p/lz4/ 27 * - LZ4 source repository : https://github.com/lz4/lz4
32 * 28 *
33 * Changed for kernel use by: 29 * Changed for kernel usage by:
34 * Chanho Min <chanho.min@lge.com> 30 * Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
35 */ 31 */
36 32
33/*-************************************
34 * Dependencies
35 **************************************/
36#include <linux/lz4.h>
37#include "lz4defs.h"
37#include <linux/module.h> 38#include <linux/module.h>
38#include <linux/kernel.h> 39#include <linux/kernel.h>
39#include <linux/lz4.h>
40#include <asm/unaligned.h> 40#include <asm/unaligned.h>
41#include "lz4defs.h"
42 41
43/* 42static const int LZ4_minLength = (MFLIMIT + 1);
44 * LZ4_compressCtx : 43static const int LZ4_64Klimit = ((64 * KB) + (MFLIMIT - 1));
45 * ----------------- 44
46 * Compress 'isize' bytes from 'source' into an output buffer 'dest' of 45/*-******************************
47 * maximum size 'maxOutputSize'. * If it cannot achieve it, compression 46 * Compression functions
48 * will stop, and result of the function will be zero. 47 ********************************/
49 * return : the number of bytes written in buffer 'dest', or 0 if the 48static FORCE_INLINE U32 LZ4_hash4(
50 * compression fails 49 U32 sequence,
51 */ 50 tableType_t const tableType)
52static inline int lz4_compressctx(void *ctx,
53 const char *source,
54 char *dest,
55 int isize,
56 int maxoutputsize)
57{ 51{
58 HTYPE *hashtable = (HTYPE *)ctx; 52 if (tableType == byU16)
59 const u8 *ip = (u8 *)source; 53 return ((sequence * 2654435761U)
60#if LZ4_ARCH64 54 >> ((MINMATCH * 8) - (LZ4_HASHLOG + 1)));
61 const BYTE * const base = ip; 55 else
56 return ((sequence * 2654435761U)
57 >> ((MINMATCH * 8) - LZ4_HASHLOG));
58}
59
60static FORCE_INLINE U32 LZ4_hash5(
61 U64 sequence,
62 tableType_t const tableType)
63{
64 const U32 hashLog = (tableType == byU16)
65 ? LZ4_HASHLOG + 1
66 : LZ4_HASHLOG;
67
68#if LZ4_LITTLE_ENDIAN
69 static const U64 prime5bytes = 889523592379ULL;
70
71 return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));
62#else 72#else
63 const int base = 0; 73 static const U64 prime8bytes = 11400714785074694791ULL;
74
75 return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));
64#endif 76#endif
65 const u8 *anchor = ip; 77}
66 const u8 *const iend = ip + isize; 78
67 const u8 *const mflimit = iend - MFLIMIT; 79static FORCE_INLINE U32 LZ4_hashPosition(
68 #define MATCHLIMIT (iend - LASTLITERALS) 80 const void *p,
69 81 tableType_t const tableType)
70 u8 *op = (u8 *) dest; 82{
71 u8 *const oend = op + maxoutputsize; 83#if LZ4_ARCH64
72 int length; 84 if (tableType == byU32)
73 const int skipstrength = SKIPSTRENGTH; 85 return LZ4_hash5(LZ4_read_ARCH(p), tableType);
74 u32 forwardh; 86#endif
75 int lastrun; 87
76 88 return LZ4_hash4(LZ4_read32(p), tableType);
77 /* Init */ 89}
78 if (isize < MINLENGTH) 90
79 goto _last_literals; 91static void LZ4_putPositionOnHash(
92 const BYTE *p,
93 U32 h,
94 void *tableBase,
95 tableType_t const tableType,
96 const BYTE *srcBase)
97{
98 switch (tableType) {
99 case byPtr:
100 {
101 const BYTE **hashTable = (const BYTE **)tableBase;
102
103 hashTable[h] = p;
104 return;
105 }
106 case byU32:
107 {
108 U32 *hashTable = (U32 *) tableBase;
109
110 hashTable[h] = (U32)(p - srcBase);
111 return;
112 }
113 case byU16:
114 {
115 U16 *hashTable = (U16 *) tableBase;
116
117 hashTable[h] = (U16)(p - srcBase);
118 return;
119 }
120 }
121}
122
123static FORCE_INLINE void LZ4_putPosition(
124 const BYTE *p,
125 void *tableBase,
126 tableType_t tableType,
127 const BYTE *srcBase)
128{
129 U32 const h = LZ4_hashPosition(p, tableType);
130
131 LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
132}
133
134static const BYTE *LZ4_getPositionOnHash(
135 U32 h,
136 void *tableBase,
137 tableType_t tableType,
138 const BYTE *srcBase)
139{
140 if (tableType == byPtr) {
141 const BYTE **hashTable = (const BYTE **) tableBase;
142
143 return hashTable[h];
144 }
145
146 if (tableType == byU32) {
147 const U32 * const hashTable = (U32 *) tableBase;
148
149 return hashTable[h] + srcBase;
150 }
151
152 {
153 /* default, to ensure a return */
154 const U16 * const hashTable = (U16 *) tableBase;
155
156 return hashTable[h] + srcBase;
157 }
158}
159
160static FORCE_INLINE const BYTE *LZ4_getPosition(
161 const BYTE *p,
162 void *tableBase,
163 tableType_t tableType,
164 const BYTE *srcBase)
165{
166 U32 const h = LZ4_hashPosition(p, tableType);
167
168 return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
169}
80 170
81 memset((void *)hashtable, 0, LZ4_MEM_COMPRESS); 171
172/*
173 * LZ4_compress_generic() :
174 * inlined, to ensure branches are decided at compilation time
175 */
176static FORCE_INLINE int LZ4_compress_generic(
177 LZ4_stream_t_internal * const dictPtr,
178 const char * const source,
179 char * const dest,
180 const int inputSize,
181 const int maxOutputSize,
182 const limitedOutput_directive outputLimited,
183 const tableType_t tableType,
184 const dict_directive dict,
185 const dictIssue_directive dictIssue,
186 const U32 acceleration)
187{
188 const BYTE *ip = (const BYTE *) source;
189 const BYTE *base;
190 const BYTE *lowLimit;
191 const BYTE * const lowRefLimit = ip - dictPtr->dictSize;
192 const BYTE * const dictionary = dictPtr->dictionary;
193 const BYTE * const dictEnd = dictionary + dictPtr->dictSize;
194 const size_t dictDelta = dictEnd - (const BYTE *)source;
195 const BYTE *anchor = (const BYTE *) source;
196 const BYTE * const iend = ip + inputSize;
197 const BYTE * const mflimit = iend - MFLIMIT;
198 const BYTE * const matchlimit = iend - LASTLITERALS;
199
200 BYTE *op = (BYTE *) dest;
201 BYTE * const olimit = op + maxOutputSize;
202
203 U32 forwardH;
204 size_t refDelta = 0;
205
206 /* Init conditions */
207 if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) {
208 /* Unsupported inputSize, too large (or negative) */
209 return 0;
210 }
211
212 switch (dict) {
213 case noDict:
214 default:
215 base = (const BYTE *)source;
216 lowLimit = (const BYTE *)source;
217 break;
218 case withPrefix64k:
219 base = (const BYTE *)source - dictPtr->currentOffset;
220 lowLimit = (const BYTE *)source - dictPtr->dictSize;
221 break;
222 case usingExtDict:
223 base = (const BYTE *)source - dictPtr->currentOffset;
224 lowLimit = (const BYTE *)source;
225 break;
226 }
227
228 if ((tableType == byU16)
229 && (inputSize >= LZ4_64Klimit)) {
230 /* Size too large (not within 64K limit) */
231 return 0;
232 }
233
234 if (inputSize < LZ4_minLength) {
235 /* Input too small, no compression (all literals) */
236 goto _last_literals;
237 }
82 238
83 /* First Byte */ 239 /* First Byte */
84 hashtable[LZ4_HASH_VALUE(ip)] = ip - base; 240 LZ4_putPosition(ip, dictPtr->hashTable, tableType, base);
85 ip++; 241 ip++;
86 forwardh = LZ4_HASH_VALUE(ip); 242 forwardH = LZ4_hashPosition(ip, tableType);
87 243
88 /* Main Loop */ 244 /* Main Loop */
89 for (;;) { 245 for ( ; ; ) {
90 int findmatchattempts = (1U << skipstrength) + 3; 246 const BYTE *match;
91 const u8 *forwardip = ip; 247 BYTE *token;
92 const u8 *ref;
93 u8 *token;
94 248
95 /* Find a match */ 249 /* Find a match */
96 do { 250 {
97 u32 h = forwardh; 251 const BYTE *forwardIp = ip;
98 int step = findmatchattempts++ >> skipstrength; 252 unsigned int step = 1;
99 ip = forwardip; 253 unsigned int searchMatchNb = acceleration << LZ4_SKIPTRIGGER;
100 forwardip = ip + step; 254
101 255 do {
102 if (unlikely(forwardip > mflimit)) 256 U32 const h = forwardH;
103 goto _last_literals; 257
104 258 ip = forwardIp;
105 forwardh = LZ4_HASH_VALUE(forwardip); 259 forwardIp += step;
106 ref = base + hashtable[h]; 260 step = (searchMatchNb++ >> LZ4_SKIPTRIGGER);
107 hashtable[h] = ip - base; 261
108 } while ((ref < ip - MAX_DISTANCE) || (A32(ref) != A32(ip))); 262 if (unlikely(forwardIp > mflimit))
263 goto _last_literals;
264
265 match = LZ4_getPositionOnHash(h,
266 dictPtr->hashTable,
267 tableType, base);
268
269 if (dict == usingExtDict) {
270 if (match < (const BYTE *)source) {
271 refDelta = dictDelta;
272 lowLimit = dictionary;
273 } else {
274 refDelta = 0;
275 lowLimit = (const BYTE *)source;
276 } }
277
278 forwardH = LZ4_hashPosition(forwardIp,
279 tableType);
280
281 LZ4_putPositionOnHash(ip, h, dictPtr->hashTable,
282 tableType, base);
283 } while (((dictIssue == dictSmall)
284 ? (match < lowRefLimit)
285 : 0)
286 || ((tableType == byU16)
287 ? 0
288 : (match + MAX_DISTANCE < ip))
289 || (LZ4_read32(match + refDelta)
290 != LZ4_read32(ip)));
291 }
109 292
110 /* Catch up */ 293 /* Catch up */
111 while ((ip > anchor) && (ref > (u8 *)source) && 294 while (((ip > anchor) & (match + refDelta > lowLimit))
112 unlikely(ip[-1] == ref[-1])) { 295 && (unlikely(ip[-1] == match[refDelta - 1]))) {
113 ip--; 296 ip--;
114 ref--; 297 match--;
115 } 298 }
116 299
117 /* Encode Literal length */ 300 /* Encode Literals */
118 length = (int)(ip - anchor); 301 {
119 token = op++; 302 unsigned const int litLength = (unsigned int)(ip - anchor);
120 /* check output limit */
121 if (unlikely(op + length + (2 + 1 + LASTLITERALS) +
122 (length >> 8) > oend))
123 return 0;
124 303
125 if (length >= (int)RUN_MASK) { 304 token = op++;
126 int len; 305
127 *token = (RUN_MASK << ML_BITS); 306 if ((outputLimited) &&
128 len = length - RUN_MASK; 307 /* Check output buffer overflow */
129 for (; len > 254 ; len -= 255) 308 (unlikely(op + litLength +
130 *op++ = 255; 309 (2 + 1 + LASTLITERALS) +
131 *op++ = (u8)len; 310 (litLength / 255) > olimit)))
132 } else 311 return 0;
133 *token = (length << ML_BITS); 312
313 if (litLength >= RUN_MASK) {
314 int len = (int)litLength - RUN_MASK;
315
316 *token = (RUN_MASK << ML_BITS);
317
318 for (; len >= 255; len -= 255)
319 *op++ = 255;
320 *op++ = (BYTE)len;
321 } else
322 *token = (BYTE)(litLength << ML_BITS);
323
324 /* Copy Literals */
325 LZ4_wildCopy(op, anchor, op + litLength);
326 op += litLength;
327 }
134 328
135 /* Copy Literals */
136 LZ4_BLINDCOPY(anchor, op, length);
137_next_match: 329_next_match:
138 /* Encode Offset */ 330 /* Encode Offset */
139 LZ4_WRITE_LITTLEENDIAN_16(op, (u16)(ip - ref)); 331 LZ4_writeLE16(op, (U16)(ip - match));
332 op += 2;
140 333
141 /* Start Counting */
142 ip += MINMATCH;
143 /* MinMatch verified */
144 ref += MINMATCH;
145 anchor = ip;
146 while (likely(ip < MATCHLIMIT - (STEPSIZE - 1))) {
147 #if LZ4_ARCH64
148 u64 diff = A64(ref) ^ A64(ip);
149 #else
150 u32 diff = A32(ref) ^ A32(ip);
151 #endif
152 if (!diff) {
153 ip += STEPSIZE;
154 ref += STEPSIZE;
155 continue;
156 }
157 ip += LZ4_NBCOMMONBYTES(diff);
158 goto _endcount;
159 }
160 #if LZ4_ARCH64
161 if ((ip < (MATCHLIMIT - 3)) && (A32(ref) == A32(ip))) {
162 ip += 4;
163 ref += 4;
164 }
165 #endif
166 if ((ip < (MATCHLIMIT - 1)) && (A16(ref) == A16(ip))) {
167 ip += 2;
168 ref += 2;
169 }
170 if ((ip < MATCHLIMIT) && (*ref == *ip))
171 ip++;
172_endcount:
173 /* Encode MatchLength */ 334 /* Encode MatchLength */
174 length = (int)(ip - anchor); 335 {
175 /* Check output limit */ 336 unsigned int matchCode;
176 if (unlikely(op + (1 + LASTLITERALS) + (length >> 8) > oend)) 337
177 return 0; 338 if ((dict == usingExtDict)
178 if (length >= (int)ML_MASK) { 339 && (lowLimit == dictionary)) {
179 *token += ML_MASK; 340 const BYTE *limit;
180 length -= ML_MASK; 341
181 for (; length > 509 ; length -= 510) { 342 match += refDelta;
182 *op++ = 255; 343 limit = ip + (dictEnd - match);
183 *op++ = 255; 344
184 } 345 if (limit > matchlimit)
185 if (length > 254) { 346 limit = matchlimit;
186 length -= 255; 347
187 *op++ = 255; 348 matchCode = LZ4_count(ip + MINMATCH,
349 match + MINMATCH, limit);
350
351 ip += MINMATCH + matchCode;
352
353 if (ip == limit) {
354 unsigned const int more = LZ4_count(ip,
355 (const BYTE *)source,
356 matchlimit);
357
358 matchCode += more;
359 ip += more;
360 }
361 } else {
362 matchCode = LZ4_count(ip + MINMATCH,
363 match + MINMATCH, matchlimit);
364 ip += MINMATCH + matchCode;
188 } 365 }
189 *op++ = (u8)length; 366
190 } else 367 if (outputLimited &&
191 *token += length; 368 /* Check output buffer overflow */
369 (unlikely(op +
370 (1 + LASTLITERALS) +
371 (matchCode >> 8) > olimit)))
372 return 0;
373
374 if (matchCode >= ML_MASK) {
375 *token += ML_MASK;
376 matchCode -= ML_MASK;
377 LZ4_write32(op, 0xFFFFFFFF);
378
379 while (matchCode >= 4 * 255) {
380 op += 4;
381 LZ4_write32(op, 0xFFFFFFFF);
382 matchCode -= 4 * 255;
383 }
384
385 op += matchCode / 255;
386 *op++ = (BYTE)(matchCode % 255);
387 } else
388 *token += (BYTE)(matchCode);
389 }
390
391 anchor = ip;
192 392
193 /* Test end of chunk */ 393 /* Test end of chunk */
194 if (ip > mflimit) { 394 if (ip > mflimit)
195 anchor = ip;
196 break; 395 break;
197 }
198 396
199 /* Fill table */ 397 /* Fill table */
200 hashtable[LZ4_HASH_VALUE(ip-2)] = ip - 2 - base; 398 LZ4_putPosition(ip - 2, dictPtr->hashTable, tableType, base);
201 399
202 /* Test next position */ 400 /* Test next position */
203 ref = base + hashtable[LZ4_HASH_VALUE(ip)]; 401 match = LZ4_getPosition(ip, dictPtr->hashTable,
204 hashtable[LZ4_HASH_VALUE(ip)] = ip - base; 402 tableType, base);
205 if ((ref > ip - (MAX_DISTANCE + 1)) && (A32(ref) == A32(ip))) { 403
404 if (dict == usingExtDict) {
405 if (match < (const BYTE *)source) {
406 refDelta = dictDelta;
407 lowLimit = dictionary;
408 } else {
409 refDelta = 0;
410 lowLimit = (const BYTE *)source;
411 }
412 }
413
414 LZ4_putPosition(ip, dictPtr->hashTable, tableType, base);
415
416 if (((dictIssue == dictSmall) ? (match >= lowRefLimit) : 1)
417 && (match + MAX_DISTANCE >= ip)
418 && (LZ4_read32(match + refDelta) == LZ4_read32(ip))) {
206 token = op++; 419 token = op++;
207 *token = 0; 420 *token = 0;
208 goto _next_match; 421 goto _next_match;
209 } 422 }
210 423
211 /* Prepare next loop */ 424 /* Prepare next loop */
212 anchor = ip++; 425 forwardH = LZ4_hashPosition(++ip, tableType);
213 forwardh = LZ4_HASH_VALUE(ip);
214 } 426 }
215 427
216_last_literals: 428_last_literals:
217 /* Encode Last Literals */ 429 /* Encode Last Literals */
218 lastrun = (int)(iend - anchor); 430 {
219 if (((char *)op - dest) + lastrun + 1 431 size_t const lastRun = (size_t)(iend - anchor);
220 + ((lastrun + 255 - RUN_MASK) / 255) > (u32)maxoutputsize) 432
221 return 0; 433 if ((outputLimited) &&
434 /* Check output buffer overflow */
435 ((op - (BYTE *)dest) + lastRun + 1 +
436 ((lastRun + 255 - RUN_MASK) / 255) > (U32)maxOutputSize))
437 return 0;
438
439 if (lastRun >= RUN_MASK) {
440 size_t accumulator = lastRun - RUN_MASK;
441 *op++ = RUN_MASK << ML_BITS;
442 for (; accumulator >= 255; accumulator -= 255)
443 *op++ = 255;
444 *op++ = (BYTE) accumulator;
445 } else {
446 *op++ = (BYTE)(lastRun << ML_BITS);
447 }
222 448
223 if (lastrun >= (int)RUN_MASK) { 449 memcpy(op, anchor, lastRun);
224 *op++ = (RUN_MASK << ML_BITS); 450
225 lastrun -= RUN_MASK; 451 op += lastRun;
226 for (; lastrun > 254 ; lastrun -= 255) 452 }
227 *op++ = 255;
228 *op++ = (u8)lastrun;
229 } else
230 *op++ = (lastrun << ML_BITS);
231 memcpy(op, anchor, iend - anchor);
232 op += iend - anchor;
233 453
234 /* End */ 454 /* End */
235 return (int)(((char *)op) - dest); 455 return (int) (((char *)op) - dest);
236} 456}
237 457
238static inline int lz4_compress64kctx(void *ctx, 458static int LZ4_compress_fast_extState(
239 const char *source, 459 void *state,
240 char *dest, 460 const char *source,
241 int isize, 461 char *dest,
242 int maxoutputsize) 462 int inputSize,
463 int maxOutputSize,
464 int acceleration)
243{ 465{
244 u16 *hashtable = (u16 *)ctx; 466 LZ4_stream_t_internal *ctx = &((LZ4_stream_t *)state)->internal_donotuse;
245 const u8 *ip = (u8 *) source; 467#if LZ4_ARCH64
246 const u8 *anchor = ip; 468 const tableType_t tableType = byU32;
247 const u8 *const base = ip; 469#else
248 const u8 *const iend = ip + isize; 470 const tableType_t tableType = byPtr;
249 const u8 *const mflimit = iend - MFLIMIT; 471#endif
250 #define MATCHLIMIT (iend - LASTLITERALS) 472
251 473 LZ4_resetStream((LZ4_stream_t *)state);
252 u8 *op = (u8 *) dest; 474
253 u8 *const oend = op + maxoutputsize; 475 if (acceleration < 1)
254 int len, length; 476 acceleration = LZ4_ACCELERATION_DEFAULT;
255 const int skipstrength = SKIPSTRENGTH; 477
256 u32 forwardh; 478 if (maxOutputSize >= LZ4_COMPRESSBOUND(inputSize)) {
257 int lastrun; 479 if (inputSize < LZ4_64Klimit)
258 480 return LZ4_compress_generic(ctx, source,
259 /* Init */ 481 dest, inputSize, 0,
260 if (isize < MINLENGTH) 482 noLimit, byU16, noDict,
261 goto _last_literals; 483 noDictIssue, acceleration);
484 else
485 return LZ4_compress_generic(ctx, source,
486 dest, inputSize, 0,
487 noLimit, tableType, noDict,
488 noDictIssue, acceleration);
489 } else {
490 if (inputSize < LZ4_64Klimit)
491 return LZ4_compress_generic(ctx, source,
492 dest, inputSize,
493 maxOutputSize, limitedOutput, byU16, noDict,
494 noDictIssue, acceleration);
495 else
496 return LZ4_compress_generic(ctx, source,
497 dest, inputSize,
498 maxOutputSize, limitedOutput, tableType, noDict,
499 noDictIssue, acceleration);
500 }
501}
502
503int LZ4_compress_fast(const char *source, char *dest, int inputSize,
504 int maxOutputSize, int acceleration, void *wrkmem)
505{
506 return LZ4_compress_fast_extState(wrkmem, source, dest, inputSize,
507 maxOutputSize, acceleration);
508}
509EXPORT_SYMBOL(LZ4_compress_fast);
262 510
263 memset((void *)hashtable, 0, LZ4_MEM_COMPRESS); 511int LZ4_compress_default(const char *source, char *dest, int inputSize,
512 int maxOutputSize, void *wrkmem)
513{
514 return LZ4_compress_fast(source, dest, inputSize,
515 maxOutputSize, LZ4_ACCELERATION_DEFAULT, wrkmem);
516}
517EXPORT_SYMBOL(LZ4_compress_default);
518
519/*-******************************
520 * *_destSize() variant
521 ********************************/
522static int LZ4_compress_destSize_generic(
523 LZ4_stream_t_internal * const ctx,
524 const char * const src,
525 char * const dst,
526 int * const srcSizePtr,
527 const int targetDstSize,
528 const tableType_t tableType)
529{
530 const BYTE *ip = (const BYTE *) src;
531 const BYTE *base = (const BYTE *) src;
532 const BYTE *lowLimit = (const BYTE *) src;
533 const BYTE *anchor = ip;
534 const BYTE * const iend = ip + *srcSizePtr;
535 const BYTE * const mflimit = iend - MFLIMIT;
536 const BYTE * const matchlimit = iend - LASTLITERALS;
537
538 BYTE *op = (BYTE *) dst;
539 BYTE * const oend = op + targetDstSize;
540 BYTE * const oMaxLit = op + targetDstSize - 2 /* offset */
541 - 8 /* because 8 + MINMATCH == MFLIMIT */ - 1 /* token */;
542 BYTE * const oMaxMatch = op + targetDstSize
543 - (LASTLITERALS + 1 /* token */);
544 BYTE * const oMaxSeq = oMaxLit - 1 /* token */;
545
546 U32 forwardH;
547
548 /* Init conditions */
549 /* Impossible to store anything */
550 if (targetDstSize < 1)
551 return 0;
552 /* Unsupported input size, too large (or negative) */
553 if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE)
554 return 0;
555 /* Size too large (not within 64K limit) */
556 if ((tableType == byU16) && (*srcSizePtr >= LZ4_64Klimit))
557 return 0;
558 /* Input too small, no compression (all literals) */
559 if (*srcSizePtr < LZ4_minLength)
560 goto _last_literals;
264 561
265 /* First Byte */ 562 /* First Byte */
266 ip++; 563 *srcSizePtr = 0;
267 forwardh = LZ4_HASH64K_VALUE(ip); 564 LZ4_putPosition(ip, ctx->hashTable, tableType, base);
565 ip++; forwardH = LZ4_hashPosition(ip, tableType);
268 566
269 /* Main Loop */ 567 /* Main Loop */
270 for (;;) { 568 for ( ; ; ) {
271 int findmatchattempts = (1U << skipstrength) + 3; 569 const BYTE *match;
272 const u8 *forwardip = ip; 570 BYTE *token;
273 const u8 *ref;
274 u8 *token;
275 571
276 /* Find a match */ 572 /* Find a match */
277 do { 573 {
278 u32 h = forwardh; 574 const BYTE *forwardIp = ip;
279 int step = findmatchattempts++ >> skipstrength; 575 unsigned int step = 1;
280 ip = forwardip; 576 unsigned int searchMatchNb = 1 << LZ4_SKIPTRIGGER;
281 forwardip = ip + step; 577
282 578 do {
283 if (forwardip > mflimit) 579 U32 h = forwardH;
284 goto _last_literals; 580
285 581 ip = forwardIp;
286 forwardh = LZ4_HASH64K_VALUE(forwardip); 582 forwardIp += step;
287 ref = base + hashtable[h]; 583 step = (searchMatchNb++ >> LZ4_SKIPTRIGGER);
288 hashtable[h] = (u16)(ip - base); 584
289 } while (A32(ref) != A32(ip)); 585 if (unlikely(forwardIp > mflimit))
586 goto _last_literals;
587
588 match = LZ4_getPositionOnHash(h, ctx->hashTable,
589 tableType, base);
590 forwardH = LZ4_hashPosition(forwardIp,
591 tableType);
592 LZ4_putPositionOnHash(ip, h,
593 ctx->hashTable, tableType,
594 base);
595
596 } while (((tableType == byU16)
597 ? 0
598 : (match + MAX_DISTANCE < ip))
599 || (LZ4_read32(match) != LZ4_read32(ip)));
600 }
290 601
291 /* Catch up */ 602 /* Catch up */
292 while ((ip > anchor) && (ref > (u8 *)source) 603 while ((ip > anchor)
293 && (ip[-1] == ref[-1])) { 604 && (match > lowLimit)
605 && (unlikely(ip[-1] == match[-1]))) {
294 ip--; 606 ip--;
295 ref--; 607 match--;
296 } 608 }
297 609
298 /* Encode Literal length */ 610 /* Encode Literal length */
299 length = (int)(ip - anchor); 611 {
300 token = op++; 612 unsigned int litLength = (unsigned int)(ip - anchor);
301 /* Check output limit */
302 if (unlikely(op + length + (2 + 1 + LASTLITERALS)
303 + (length >> 8) > oend))
304 return 0;
305 if (length >= (int)RUN_MASK) {
306 *token = (RUN_MASK << ML_BITS);
307 len = length - RUN_MASK;
308 for (; len > 254 ; len -= 255)
309 *op++ = 255;
310 *op++ = (u8)len;
311 } else
312 *token = (length << ML_BITS);
313 613
314 /* Copy Literals */ 614 token = op++;
315 LZ4_BLINDCOPY(anchor, op, length); 615 if (op + ((litLength + 240) / 255)
616 + litLength > oMaxLit) {
617 /* Not enough space for a last match */
618 op--;
619 goto _last_literals;
620 }
621 if (litLength >= RUN_MASK) {
622 unsigned int len = litLength - RUN_MASK;
623 *token = (RUN_MASK<<ML_BITS);
624 for (; len >= 255; len -= 255)
625 *op++ = 255;
626 *op++ = (BYTE)len;
627 } else
628 *token = (BYTE)(litLength << ML_BITS);
629
630 /* Copy Literals */
631 LZ4_wildCopy(op, anchor, op + litLength);
632 op += litLength;
633 }
316 634
317_next_match: 635_next_match:
318 /* Encode Offset */ 636 /* Encode Offset */
319 LZ4_WRITE_LITTLEENDIAN_16(op, (u16)(ip - ref)); 637 LZ4_writeLE16(op, (U16)(ip - match)); op += 2;
320 638
321 /* Start Counting */ 639 /* Encode MatchLength */
322 ip += MINMATCH; 640 {
323 /* MinMatch verified */ 641 size_t matchLength = LZ4_count(ip + MINMATCH,
324 ref += MINMATCH; 642 match + MINMATCH, matchlimit);
325 anchor = ip;
326 643
327 while (ip < MATCHLIMIT - (STEPSIZE - 1)) { 644 if (op + ((matchLength + 240)/255) > oMaxMatch) {
328 #if LZ4_ARCH64 645 /* Match description too long : reduce it */
329 u64 diff = A64(ref) ^ A64(ip); 646 matchLength = (15 - 1) + (oMaxMatch - op) * 255;
330 #else
331 u32 diff = A32(ref) ^ A32(ip);
332 #endif
333
334 if (!diff) {
335 ip += STEPSIZE;
336 ref += STEPSIZE;
337 continue;
338 } 647 }
339 ip += LZ4_NBCOMMONBYTES(diff); 648 ip += MINMATCH + matchLength;
340 goto _endcount; 649
341 } 650 if (matchLength >= ML_MASK) {
342 #if LZ4_ARCH64 651 *token += ML_MASK;
343 if ((ip < (MATCHLIMIT - 3)) && (A32(ref) == A32(ip))) { 652 matchLength -= ML_MASK;
344 ip += 4; 653 while (matchLength >= 255) {
345 ref += 4; 654 matchLength -= 255;
655 *op++ = 255;
656 }
657 *op++ = (BYTE)matchLength;
658 } else
659 *token += (BYTE)(matchLength);
346 } 660 }
347 #endif
348 if ((ip < (MATCHLIMIT - 1)) && (A16(ref) == A16(ip))) {
349 ip += 2;
350 ref += 2;
351 }
352 if ((ip < MATCHLIMIT) && (*ref == *ip))
353 ip++;
354_endcount:
355 661
356 /* Encode MatchLength */ 662 anchor = ip;
357 len = (int)(ip - anchor);
358 /* Check output limit */
359 if (unlikely(op + (1 + LASTLITERALS) + (len >> 8) > oend))
360 return 0;
361 if (len >= (int)ML_MASK) {
362 *token += ML_MASK;
363 len -= ML_MASK;
364 for (; len > 509 ; len -= 510) {
365 *op++ = 255;
366 *op++ = 255;
367 }
368 if (len > 254) {
369 len -= 255;
370 *op++ = 255;
371 }
372 *op++ = (u8)len;
373 } else
374 *token += len;
375 663
376 /* Test end of chunk */ 664 /* Test end of block */
377 if (ip > mflimit) { 665 if (ip > mflimit)
378 anchor = ip; 666 break;
667 if (op > oMaxSeq)
379 break; 668 break;
380 }
381 669
382 /* Fill table */ 670 /* Fill table */
383 hashtable[LZ4_HASH64K_VALUE(ip-2)] = (u16)(ip - 2 - base); 671 LZ4_putPosition(ip - 2, ctx->hashTable, tableType, base);
384 672
385 /* Test next position */ 673 /* Test next position */
386 ref = base + hashtable[LZ4_HASH64K_VALUE(ip)]; 674 match = LZ4_getPosition(ip, ctx->hashTable, tableType, base);
387 hashtable[LZ4_HASH64K_VALUE(ip)] = (u16)(ip - base); 675 LZ4_putPosition(ip, ctx->hashTable, tableType, base);
388 if (A32(ref) == A32(ip)) { 676
389 token = op++; 677 if ((match + MAX_DISTANCE >= ip)
390 *token = 0; 678 && (LZ4_read32(match) == LZ4_read32(ip))) {
679 token = op++; *token = 0;
391 goto _next_match; 680 goto _next_match;
392 } 681 }
393 682
394 /* Prepare next loop */ 683 /* Prepare next loop */
395 anchor = ip++; 684 forwardH = LZ4_hashPosition(++ip, tableType);
396 forwardh = LZ4_HASH64K_VALUE(ip);
397 } 685 }
398 686
399_last_literals: 687_last_literals:
400 /* Encode Last Literals */ 688 /* Encode Last Literals */
401 lastrun = (int)(iend - anchor); 689 {
402 if (op + lastrun + 1 + (lastrun - RUN_MASK + 255) / 255 > oend) 690 size_t lastRunSize = (size_t)(iend - anchor);
403 return 0; 691
404 if (lastrun >= (int)RUN_MASK) { 692 if (op + 1 /* token */
405 *op++ = (RUN_MASK << ML_BITS); 693 + ((lastRunSize + 240) / 255) /* litLength */
406 lastrun -= RUN_MASK; 694 + lastRunSize /* literals */ > oend) {
407 for (; lastrun > 254 ; lastrun -= 255) 695 /* adapt lastRunSize to fill 'dst' */
408 *op++ = 255; 696 lastRunSize = (oend - op) - 1;
409 *op++ = (u8)lastrun; 697 lastRunSize -= (lastRunSize + 240) / 255;
410 } else 698 }
411 *op++ = (lastrun << ML_BITS); 699 ip = anchor + lastRunSize;
412 memcpy(op, anchor, iend - anchor); 700
413 op += iend - anchor; 701 if (lastRunSize >= RUN_MASK) {
702 size_t accumulator = lastRunSize - RUN_MASK;
703
704 *op++ = RUN_MASK << ML_BITS;
705 for (; accumulator >= 255; accumulator -= 255)
706 *op++ = 255;
707 *op++ = (BYTE) accumulator;
708 } else {
709 *op++ = (BYTE)(lastRunSize<<ML_BITS);
710 }
711 memcpy(op, anchor, lastRunSize);
712 op += lastRunSize;
713 }
714
414 /* End */ 715 /* End */
415 return (int)(((char *)op) - dest); 716 *srcSizePtr = (int) (((const char *)ip) - src);
717 return (int) (((char *)op) - dst);
416} 718}
417 719
418int lz4_compress(const unsigned char *src, size_t src_len, 720static int LZ4_compress_destSize_extState(
419 unsigned char *dst, size_t *dst_len, void *wrkmem) 721 LZ4_stream_t *state,
722 const char *src,
723 char *dst,
724 int *srcSizePtr,
725 int targetDstSize)
420{ 726{
421 int ret = -1; 727#if LZ4_ARCH64
422 int out_len = 0; 728 const tableType_t tableType = byU32;
729#else
730 const tableType_t tableType = byPtr;
731#endif
423 732
424 if (src_len < LZ4_64KLIMIT) 733 LZ4_resetStream(state);
425 out_len = lz4_compress64kctx(wrkmem, src, dst, src_len, 734
426 lz4_compressbound(src_len)); 735 if (targetDstSize >= LZ4_COMPRESSBOUND(*srcSizePtr)) {
427 else 736 /* compression success is guaranteed */
428 out_len = lz4_compressctx(wrkmem, src, dst, src_len, 737 return LZ4_compress_fast_extState(
429 lz4_compressbound(src_len)); 738 state, src, dst, *srcSizePtr,
739 targetDstSize, 1);
740 } else {
741 if (*srcSizePtr < LZ4_64Klimit)
742 return LZ4_compress_destSize_generic(
743 &state->internal_donotuse,
744 src, dst, srcSizePtr,
745 targetDstSize, byU16);
746 else
747 return LZ4_compress_destSize_generic(
748 &state->internal_donotuse,
749 src, dst, srcSizePtr,
750 targetDstSize, tableType);
751 }
752}
753
754
755int LZ4_compress_destSize(
756 const char *src,
757 char *dst,
758 int *srcSizePtr,
759 int targetDstSize,
760 void *wrkmem)
761{
762 return LZ4_compress_destSize_extState(wrkmem, src, dst, srcSizePtr,
763 targetDstSize);
764}
765EXPORT_SYMBOL(LZ4_compress_destSize);
766
767/*-******************************
768 * Streaming functions
769 ********************************/
770void LZ4_resetStream(LZ4_stream_t *LZ4_stream)
771{
772 memset(LZ4_stream, 0, sizeof(LZ4_stream_t));
773}
774
775int LZ4_loadDict(LZ4_stream_t *LZ4_dict,
776 const char *dictionary, int dictSize)
777{
778 LZ4_stream_t_internal *dict = &LZ4_dict->internal_donotuse;
779 const BYTE *p = (const BYTE *)dictionary;
780 const BYTE * const dictEnd = p + dictSize;
781 const BYTE *base;
782
783 if ((dict->initCheck)
784 || (dict->currentOffset > 1 * GB)) {
785 /* Uninitialized structure, or reuse overflow */
786 LZ4_resetStream(LZ4_dict);
787 }
788
789 if (dictSize < (int)HASH_UNIT) {
790 dict->dictionary = NULL;
791 dict->dictSize = 0;
792 return 0;
793 }
794
795 if ((dictEnd - p) > 64 * KB)
796 p = dictEnd - 64 * KB;
797 dict->currentOffset += 64 * KB;
798 base = p - dict->currentOffset;
799 dict->dictionary = p;
800 dict->dictSize = (U32)(dictEnd - p);
801 dict->currentOffset += dict->dictSize;
802
803 while (p <= dictEnd - HASH_UNIT) {
804 LZ4_putPosition(p, dict->hashTable, byU32, base);
805 p += 3;
806 }
807
808 return dict->dictSize;
809}
810EXPORT_SYMBOL(LZ4_loadDict);
811
812static void LZ4_renormDictT(LZ4_stream_t_internal *LZ4_dict,
813 const BYTE *src)
814{
815 if ((LZ4_dict->currentOffset > 0x80000000) ||
816 ((uptrval)LZ4_dict->currentOffset > (uptrval)src)) {
817 /* address space overflow */
818 /* rescale hash table */
819 U32 const delta = LZ4_dict->currentOffset - 64 * KB;
820 const BYTE *dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
821 int i;
822
823 for (i = 0; i < LZ4_HASH_SIZE_U32; i++) {
824 if (LZ4_dict->hashTable[i] < delta)
825 LZ4_dict->hashTable[i] = 0;
826 else
827 LZ4_dict->hashTable[i] -= delta;
828 }
829 LZ4_dict->currentOffset = 64 * KB;
830 if (LZ4_dict->dictSize > 64 * KB)
831 LZ4_dict->dictSize = 64 * KB;
832 LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
833 }
834}
835
836int LZ4_saveDict(LZ4_stream_t *LZ4_dict, char *safeBuffer, int dictSize)
837{
838 LZ4_stream_t_internal * const dict = &LZ4_dict->internal_donotuse;
839 const BYTE * const previousDictEnd = dict->dictionary + dict->dictSize;
840
841 if ((U32)dictSize > 64 * KB) {
842 /* useless to define a dictionary > 64 * KB */
843 dictSize = 64 * KB;
844 }
845 if ((U32)dictSize > dict->dictSize)
846 dictSize = dict->dictSize;
847
848 memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
849
850 dict->dictionary = (const BYTE *)safeBuffer;
851 dict->dictSize = (U32)dictSize;
852
853 return dictSize;
854}
855EXPORT_SYMBOL(LZ4_saveDict);
856
857int LZ4_compress_fast_continue(LZ4_stream_t *LZ4_stream, const char *source,
858 char *dest, int inputSize, int maxOutputSize, int acceleration)
859{
860 LZ4_stream_t_internal *streamPtr = &LZ4_stream->internal_donotuse;
861 const BYTE * const dictEnd = streamPtr->dictionary
862 + streamPtr->dictSize;
430 863
431 if (out_len < 0) 864 const BYTE *smallest = (const BYTE *) source;
432 goto exit;
433 865
434 *dst_len = out_len; 866 if (streamPtr->initCheck) {
867 /* Uninitialized structure detected */
868 return 0;
869 }
870
871 if ((streamPtr->dictSize > 0) && (smallest > dictEnd))
872 smallest = dictEnd;
873
874 LZ4_renormDictT(streamPtr, smallest);
875
876 if (acceleration < 1)
877 acceleration = LZ4_ACCELERATION_DEFAULT;
878
879 /* Check overlapping input/dictionary space */
880 {
881 const BYTE *sourceEnd = (const BYTE *) source + inputSize;
882
883 if ((sourceEnd > streamPtr->dictionary)
884 && (sourceEnd < dictEnd)) {
885 streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
886 if (streamPtr->dictSize > 64 * KB)
887 streamPtr->dictSize = 64 * KB;
888 if (streamPtr->dictSize < 4)
889 streamPtr->dictSize = 0;
890 streamPtr->dictionary = dictEnd - streamPtr->dictSize;
891 }
892 }
435 893
436 return 0; 894 /* prefix mode : source data follows dictionary */
437exit: 895 if (dictEnd == (const BYTE *)source) {
438 return ret; 896 int result;
897
898 if ((streamPtr->dictSize < 64 * KB) &&
899 (streamPtr->dictSize < streamPtr->currentOffset)) {
900 result = LZ4_compress_generic(
901 streamPtr, source, dest, inputSize,
902 maxOutputSize, limitedOutput, byU32,
903 withPrefix64k, dictSmall, acceleration);
904 } else {
905 result = LZ4_compress_generic(
906 streamPtr, source, dest, inputSize,
907 maxOutputSize, limitedOutput, byU32,
908 withPrefix64k, noDictIssue, acceleration);
909 }
910 streamPtr->dictSize += (U32)inputSize;
911 streamPtr->currentOffset += (U32)inputSize;
912 return result;
913 }
914
915 /* external dictionary mode */
916 {
917 int result;
918
919 if ((streamPtr->dictSize < 64 * KB) &&
920 (streamPtr->dictSize < streamPtr->currentOffset)) {
921 result = LZ4_compress_generic(
922 streamPtr, source, dest, inputSize,
923 maxOutputSize, limitedOutput, byU32,
924 usingExtDict, dictSmall, acceleration);
925 } else {
926 result = LZ4_compress_generic(
927 streamPtr, source, dest, inputSize,
928 maxOutputSize, limitedOutput, byU32,
929 usingExtDict, noDictIssue, acceleration);
930 }
931 streamPtr->dictionary = (const BYTE *)source;
932 streamPtr->dictSize = (U32)inputSize;
933 streamPtr->currentOffset += (U32)inputSize;
934 return result;
935 }
439} 936}
440EXPORT_SYMBOL(lz4_compress); 937EXPORT_SYMBOL(LZ4_compress_fast_continue);
441 938
442MODULE_LICENSE("Dual BSD/GPL"); 939MODULE_LICENSE("Dual BSD/GPL");
443MODULE_DESCRIPTION("LZ4 compressor"); 940MODULE_DESCRIPTION("LZ4 compressor");
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index 6d940c72b5fc..bd3574312b82 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -1,25 +1,16 @@
1/* 1/*
2 * LZ4 Decompressor for Linux kernel
3 *
4 * Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com>
5 *
6 * Based on LZ4 implementation by Yann Collet.
7 *
8 * LZ4 - Fast LZ compression algorithm 2 * LZ4 - Fast LZ compression algorithm
9 * Copyright (C) 2011-2012, Yann Collet. 3 * Copyright (C) 2011 - 2016, Yann Collet.
10 * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) 4 * BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php)
11 *
12 * Redistribution and use in source and binary forms, with or without 5 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions are 6 * modification, are permitted provided that the following conditions are
14 * met: 7 * met:
15 * 8 * * Redistributions of source code must retain the above copyright
16 * * Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer.
17 * notice, this list of conditions and the following disclaimer. 10 * * Redistributions in binary form must reproduce the above
18 * * Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following disclaimer 11 * copyright notice, this list of conditions and the following disclaimer
20 * in the documentation and/or other materials provided with the 12 * in the documentation and/or other materials provided with the
21 * distribution. 13 * distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 15 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 16 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -31,313 +22,487 @@
31 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
33 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 * You can contact the author at :
26 * - LZ4 homepage : http://www.lz4.org
27 * - LZ4 source repository : https://github.com/lz4/lz4
34 * 28 *
35 * You can contact the author at : 29 * Changed for kernel usage by:
36 * - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html 30 * Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
37 * - LZ4 source repository : http://code.google.com/p/lz4/
38 */ 31 */
39 32
40#ifndef STATIC 33/*-************************************
34 * Dependencies
35 **************************************/
36#include <linux/lz4.h>
37#include "lz4defs.h"
38#include <linux/init.h>
41#include <linux/module.h> 39#include <linux/module.h>
42#include <linux/kernel.h> 40#include <linux/kernel.h>
43#endif
44#include <linux/lz4.h>
45
46#include <asm/unaligned.h> 41#include <asm/unaligned.h>
47 42
48#include "lz4defs.h" 43/*-*****************************
49 44 * Decompression functions
50static const int dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0}; 45 *******************************/
51#if LZ4_ARCH64 46/* LZ4_decompress_generic() :
52static const int dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3}; 47 * This generic decompression function cover all use cases.
53#endif 48 * It shall be instantiated several times, using different sets of directives
54 49 * Note that it is important this generic function is really inlined,
55static int lz4_uncompress(const char *source, char *dest, int osize) 50 * in order to remove useless branches during compilation optimization.
51 */
52static FORCE_INLINE int LZ4_decompress_generic(
53 const char * const source,
54 char * const dest,
55 int inputSize,
56 /*
57 * If endOnInput == endOnInputSize,
58 * this value is the max size of Output Buffer.
59 */
60 int outputSize,
61 /* endOnOutputSize, endOnInputSize */
62 int endOnInput,
63 /* full, partial */
64 int partialDecoding,
65 /* only used if partialDecoding == partial */
66 int targetOutputSize,
67 /* noDict, withPrefix64k, usingExtDict */
68 int dict,
69 /* == dest when no prefix */
70 const BYTE * const lowPrefix,
71 /* only if dict == usingExtDict */
72 const BYTE * const dictStart,
73 /* note : = 0 if noDict */
74 const size_t dictSize
75 )
56{ 76{
77 /* Local Variables */
57 const BYTE *ip = (const BYTE *) source; 78 const BYTE *ip = (const BYTE *) source;
58 const BYTE *ref; 79 const BYTE * const iend = ip + inputSize;
80
59 BYTE *op = (BYTE *) dest; 81 BYTE *op = (BYTE *) dest;
60 BYTE * const oend = op + osize; 82 BYTE * const oend = op + outputSize;
61 BYTE *cpy; 83 BYTE *cpy;
62 unsigned token; 84 BYTE *oexit = op + targetOutputSize;
63 size_t length; 85 const BYTE * const lowLimit = lowPrefix - dictSize;
64 86
87 const BYTE * const dictEnd = (const BYTE *)dictStart + dictSize;
88 const unsigned int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };
89 const int dec64table[] = { 0, 0, 0, -1, 0, 1, 2, 3 };
90
91 const int safeDecode = (endOnInput == endOnInputSize);
92 const int checkOffset = ((safeDecode) && (dictSize < (int)(64 * KB)));
93
94 /* Special cases */
95 /* targetOutputSize too high => decode everything */
96 if ((partialDecoding) && (oexit > oend - MFLIMIT))
97 oexit = oend - MFLIMIT;
98
99 /* Empty output buffer */
100 if ((endOnInput) && (unlikely(outputSize == 0)))
101 return ((inputSize == 1) && (*ip == 0)) ? 0 : -1;
102
103 if ((!endOnInput) && (unlikely(outputSize == 0)))
104 return (*ip == 0 ? 1 : -1);
105
106 /* Main Loop : decode sequences */
65 while (1) { 107 while (1) {
108 size_t length;
109 const BYTE *match;
110 size_t offset;
111
112 /* get literal length */
113 unsigned int const token = *ip++;
114
115 length = token>>ML_BITS;
66 116
67 /* get runlength */
68 token = *ip++;
69 length = (token >> ML_BITS);
70 if (length == RUN_MASK) { 117 if (length == RUN_MASK) {
71 size_t len; 118 unsigned int s;
72 119
73 len = *ip++; 120 do {
74 for (; len == 255; length += 255) 121 s = *ip++;
75 len = *ip++; 122 length += s;
76 if (unlikely(length > (size_t)(length + len))) 123 } while (likely(endOnInput
124 ? ip < iend - RUN_MASK
125 : 1) & (s == 255));
126
127 if ((safeDecode)
128 && unlikely(
129 (size_t)(op + length) < (size_t)(op))) {
130 /* overflow detection */
131 goto _output_error;
132 }
133 if ((safeDecode)
134 && unlikely(
135 (size_t)(ip + length) < (size_t)(ip))) {
136 /* overflow detection */
77 goto _output_error; 137 goto _output_error;
78 length += len; 138 }
79 } 139 }
80 140
81 /* copy literals */ 141 /* copy literals */
82 cpy = op + length; 142 cpy = op + length;
83 if (unlikely(cpy > oend - COPYLENGTH)) { 143 if (((endOnInput) && ((cpy > (partialDecoding ? oexit : oend - MFLIMIT))
84 /* 144 || (ip + length > iend - (2 + 1 + LASTLITERALS))))
85 * Error: not enough place for another match 145 || ((!endOnInput) && (cpy > oend - WILDCOPYLENGTH))) {
86 * (min 4) + 5 literals 146 if (partialDecoding) {
87 */ 147 if (cpy > oend) {
88 if (cpy != oend) 148 /*
89 goto _output_error; 149 * Error :
150 * write attempt beyond end of output buffer
151 */
152 goto _output_error;
153 }
154 if ((endOnInput)
155 && (ip + length > iend)) {
156 /*
157 * Error :
158 * read attempt beyond
159 * end of input buffer
160 */
161 goto _output_error;
162 }
163 } else {
164 if ((!endOnInput)
165 && (cpy != oend)) {
166 /*
167 * Error :
168 * block decoding must
169 * stop exactly there
170 */
171 goto _output_error;
172 }
173 if ((endOnInput)
174 && ((ip + length != iend)
175 || (cpy > oend))) {
176 /*
177 * Error :
178 * input must be consumed
179 */
180 goto _output_error;
181 }
182 }
90 183
91 memcpy(op, ip, length); 184 memcpy(op, ip, length);
92 ip += length; 185 ip += length;
93 break; /* EOF */ 186 op += length;
187 /* Necessarily EOF, due to parsing restrictions */
188 break;
94 } 189 }
95 LZ4_WILDCOPY(ip, op, cpy); 190
96 ip -= (op - cpy); 191 LZ4_wildCopy(op, ip, cpy);
192 ip += length;
97 op = cpy; 193 op = cpy;
98 194
99 /* get offset */ 195 /* get offset */
100 LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip); 196 offset = LZ4_readLE16(ip);
101 ip += 2; 197 ip += 2;
198 match = op - offset;
102 199
103 /* Error: offset create reference outside destination buffer */ 200 if ((checkOffset) && (unlikely(match < lowLimit))) {
104 if (unlikely(ref < (BYTE *const) dest)) 201 /* Error : offset outside buffers */
105 goto _output_error; 202 goto _output_error;
203 }
204
205 /* costs ~1%; silence an msan warning when offset == 0 */
206 LZ4_write32(op, (U32)offset);
106 207
107 /* get matchlength */ 208 /* get matchlength */
108 length = token & ML_MASK; 209 length = token & ML_MASK;
109 if (length == ML_MASK) { 210 if (length == ML_MASK) {
110 for (; *ip == 255; length += 255) 211 unsigned int s;
111 ip++; 212
112 if (unlikely(length > (size_t)(length + *ip))) 213 do {
214 s = *ip++;
215
216 if ((endOnInput) && (ip > iend - LASTLITERALS))
217 goto _output_error;
218
219 length += s;
220 } while (s == 255);
221
222 if ((safeDecode)
223 && unlikely(
224 (size_t)(op + length) < (size_t)op)) {
225 /* overflow detection */
113 goto _output_error; 226 goto _output_error;
114 length += *ip++; 227 }
115 } 228 }
116 229
117 /* copy repeated sequence */ 230 length += MINMATCH;
118 if (unlikely((op - ref) < STEPSIZE)) { 231
119#if LZ4_ARCH64 232 /* check external dictionary */
120 int dec64 = dec64table[op - ref]; 233 if ((dict == usingExtDict) && (match < lowPrefix)) {
121#else 234 if (unlikely(op + length > oend - LASTLITERALS)) {
122 const int dec64 = 0; 235 /* doesn't respect parsing restriction */
123#endif 236 goto _output_error;
124 op[0] = ref[0]; 237 }
125 op[1] = ref[1]; 238
126 op[2] = ref[2]; 239 if (length <= (size_t)(lowPrefix - match)) {
127 op[3] = ref[3]; 240 /*
128 op += 4; 241 * match can be copied as a single segment
129 ref += 4; 242 * from external dictionary
130 ref -= dec32table[op-ref]; 243 */
131 PUT4(ref, op); 244 memmove(op, dictEnd - (lowPrefix - match),
132 op += STEPSIZE - 4; 245 length);
133 ref -= dec64; 246 op += length;
247 } else {
248 /*
249 * match encompass external
250 * dictionary and current block
251 */
252 size_t const copySize = (size_t)(lowPrefix - match);
253 size_t const restSize = length - copySize;
254
255 memcpy(op, dictEnd - copySize, copySize);
256 op += copySize;
257
258 if (restSize > (size_t)(op - lowPrefix)) {
259 /* overlap copy */
260 BYTE * const endOfMatch = op + restSize;
261 const BYTE *copyFrom = lowPrefix;
262
263 while (op < endOfMatch)
264 *op++ = *copyFrom++;
265 } else {
266 memcpy(op, lowPrefix, restSize);
267 op += restSize;
268 }
269 }
270
271 continue;
272 }
273
274 /* copy match within block */
275 cpy = op + length;
276
277 if (unlikely(offset < 8)) {
278 const int dec64 = dec64table[offset];
279
280 op[0] = match[0];
281 op[1] = match[1];
282 op[2] = match[2];
283 op[3] = match[3];
284 match += dec32table[offset];
285 memcpy(op + 4, match, 4);
286 match -= dec64;
134 } else { 287 } else {
135 LZ4_COPYSTEP(ref, op); 288 LZ4_copy8(op, match);
289 match += 8;
136 } 290 }
137 cpy = op + length - (STEPSIZE - 4);
138 if (cpy > (oend - COPYLENGTH)) {
139 291
140 /* Error: request to write beyond destination buffer */ 292 op += 8;
141 if (cpy > oend) 293
142 goto _output_error; 294 if (unlikely(cpy > oend - 12)) {
143#if LZ4_ARCH64 295 BYTE * const oCopyLimit = oend - (WILDCOPYLENGTH - 1);
144 if ((ref + COPYLENGTH) > oend) 296
145#else 297 if (cpy > oend - LASTLITERALS) {
146 if ((ref + COPYLENGTH) > oend || 298 /*
147 (op + COPYLENGTH) > oend) 299 * Error : last LASTLITERALS bytes
148#endif 300 * must be literals (uncompressed)
301 */
149 goto _output_error; 302 goto _output_error;
150 LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH)); 303 }
304
305 if (op < oCopyLimit) {
306 LZ4_wildCopy(op, match, oCopyLimit);
307 match += oCopyLimit - op;
308 op = oCopyLimit;
309 }
310
151 while (op < cpy) 311 while (op < cpy)
152 *op++ = *ref++; 312 *op++ = *match++;
153 op = cpy; 313 } else {
154 /* 314 LZ4_copy8(op, match);
155 * Check EOF (should never happen, since last 5 bytes 315
156 * are supposed to be literals) 316 if (length > 16)
157 */ 317 LZ4_wildCopy(op + 8, match + 8, cpy);
158 if (op == oend)
159 goto _output_error;
160 continue;
161 } 318 }
162 LZ4_SECURECOPY(ref, op, cpy); 319
163 op = cpy; /* correction */ 320 op = cpy; /* correction */
164 } 321 }
322
165 /* end of decoding */ 323 /* end of decoding */
166 return (int) (((char *)ip) - source); 324 if (endOnInput) {
325 /* Nb of output bytes decoded */
326 return (int) (((char *)op) - dest);
327 } else {
328 /* Nb of input bytes read */
329 return (int) (((const char *)ip) - source);
330 }
167 331
168 /* write overflow error detected */ 332 /* Overflow error detected */
169_output_error: 333_output_error:
170 return -1; 334 return -1;
171} 335}
172 336
173static int lz4_uncompress_unknownoutputsize(const char *source, char *dest, 337int LZ4_decompress_safe(const char *source, char *dest,
174 int isize, size_t maxoutputsize) 338 int compressedSize, int maxDecompressedSize)
175{ 339{
176 const BYTE *ip = (const BYTE *) source; 340 return LZ4_decompress_generic(source, dest, compressedSize,
177 const BYTE *const iend = ip + isize; 341 maxDecompressedSize, endOnInputSize, full, 0,
178 const BYTE *ref; 342 noDict, (BYTE *)dest, NULL, 0);
179 343}
180 344
181 BYTE *op = (BYTE *) dest; 345int LZ4_decompress_safe_partial(const char *source, char *dest,
182 BYTE * const oend = op + maxoutputsize; 346 int compressedSize, int targetOutputSize, int maxDecompressedSize)
183 BYTE *cpy; 347{
348 return LZ4_decompress_generic(source, dest, compressedSize,
349 maxDecompressedSize, endOnInputSize, partial,
350 targetOutputSize, noDict, (BYTE *)dest, NULL, 0);
351}
184 352
185 /* Main Loop */ 353int LZ4_decompress_fast(const char *source, char *dest, int originalSize)
186 while (ip < iend) { 354{
355 return LZ4_decompress_generic(source, dest, 0, originalSize,
356 endOnOutputSize, full, 0, withPrefix64k,
357 (BYTE *)(dest - 64 * KB), NULL, 64 * KB);
358}
187 359
188 unsigned token; 360int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
189 size_t length; 361 const char *dictionary, int dictSize)
362{
363 LZ4_streamDecode_t_internal *lz4sd = (LZ4_streamDecode_t_internal *) LZ4_streamDecode;
190 364
191 /* get runlength */ 365 lz4sd->prefixSize = (size_t) dictSize;
192 token = *ip++; 366 lz4sd->prefixEnd = (const BYTE *) dictionary + dictSize;
193 length = (token >> ML_BITS); 367 lz4sd->externalDict = NULL;
194 if (length == RUN_MASK) { 368 lz4sd->extDictSize = 0;
195 int s = 255; 369 return 1;
196 while ((ip < iend) && (s == 255)) { 370}
197 s = *ip++;
198 if (unlikely(length > (size_t)(length + s)))
199 goto _output_error;
200 length += s;
201 }
202 }
203 /* copy literals */
204 cpy = op + length;
205 if ((cpy > oend - COPYLENGTH) ||
206 (ip + length > iend - COPYLENGTH)) {
207
208 if (cpy > oend)
209 goto _output_error;/* writes beyond buffer */
210
211 if (ip + length != iend)
212 goto _output_error;/*
213 * Error: LZ4 format requires
214 * to consume all input
215 * at this stage
216 */
217 memcpy(op, ip, length);
218 op += length;
219 break;/* Necessarily EOF, due to parsing restrictions */
220 }
221 LZ4_WILDCOPY(ip, op, cpy);
222 ip -= (op - cpy);
223 op = cpy;
224 371
225 /* get offset */ 372/*
226 LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip); 373 * *_continue() :
227 ip += 2; 374 * These decoding functions allow decompression of multiple blocks
228 if (ref < (BYTE * const) dest) 375 * in "streaming" mode.
229 goto _output_error; 376 * Previously decoded blocks must still be available at the memory
230 /* 377 * position where they were decoded.
231 * Error : offset creates reference 378 * If it's not possible, save the relevant part of
232 * outside of destination buffer 379 * decoded data into a safe buffer,
233 */ 380 * and indicate where it stands using LZ4_setStreamDecode()
381 */
382int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode,
383 const char *source, char *dest, int compressedSize, int maxOutputSize)
384{
385 LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse;
386 int result;
387
388 if (lz4sd->prefixEnd == (BYTE *)dest) {
389 result = LZ4_decompress_generic(source, dest,
390 compressedSize,
391 maxOutputSize,
392 endOnInputSize, full, 0,
393 usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize,
394 lz4sd->externalDict,
395 lz4sd->extDictSize);
396
397 if (result <= 0)
398 return result;
399
400 lz4sd->prefixSize += result;
401 lz4sd->prefixEnd += result;
402 } else {
403 lz4sd->extDictSize = lz4sd->prefixSize;
404 lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
405 result = LZ4_decompress_generic(source, dest,
406 compressedSize, maxOutputSize,
407 endOnInputSize, full, 0,
408 usingExtDict, (BYTE *)dest,
409 lz4sd->externalDict, lz4sd->extDictSize);
410 if (result <= 0)
411 return result;
412 lz4sd->prefixSize = result;
413 lz4sd->prefixEnd = (BYTE *)dest + result;
414 }
234 415
235 /* get matchlength */ 416 return result;
236 length = (token & ML_MASK); 417}
237 if (length == ML_MASK) {
238 while (ip < iend) {
239 int s = *ip++;
240 if (unlikely(length > (size_t)(length + s)))
241 goto _output_error;
242 length += s;
243 if (s == 255)
244 continue;
245 break;
246 }
247 }
248 418
249 /* copy repeated sequence */ 419int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode,
250 if (unlikely((op - ref) < STEPSIZE)) { 420 const char *source, char *dest, int originalSize)
251#if LZ4_ARCH64 421{
252 int dec64 = dec64table[op - ref]; 422 LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse;
253#else 423 int result;
254 const int dec64 = 0; 424
255#endif 425 if (lz4sd->prefixEnd == (BYTE *)dest) {
256 op[0] = ref[0]; 426 result = LZ4_decompress_generic(source, dest, 0, originalSize,
257 op[1] = ref[1]; 427 endOnOutputSize, full, 0,
258 op[2] = ref[2]; 428 usingExtDict,
259 op[3] = ref[3]; 429 lz4sd->prefixEnd - lz4sd->prefixSize,
260 op += 4; 430 lz4sd->externalDict, lz4sd->extDictSize);
261 ref += 4; 431
262 ref -= dec32table[op - ref]; 432 if (result <= 0)
263 PUT4(ref, op); 433 return result;
264 op += STEPSIZE - 4; 434
265 ref -= dec64; 435 lz4sd->prefixSize += originalSize;
266 } else { 436 lz4sd->prefixEnd += originalSize;
267 LZ4_COPYSTEP(ref, op); 437 } else {
268 } 438 lz4sd->extDictSize = lz4sd->prefixSize;
269 cpy = op + length - (STEPSIZE-4); 439 lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
270 if (cpy > oend - COPYLENGTH) { 440 result = LZ4_decompress_generic(source, dest, 0, originalSize,
271 if (cpy > oend) 441 endOnOutputSize, full, 0,
272 goto _output_error; /* write outside of buf */ 442 usingExtDict, (BYTE *)dest,
273#if LZ4_ARCH64 443 lz4sd->externalDict, lz4sd->extDictSize);
274 if ((ref + COPYLENGTH) > oend) 444 if (result <= 0)
275#else 445 return result;
276 if ((ref + COPYLENGTH) > oend || 446 lz4sd->prefixSize = originalSize;
277 (op + COPYLENGTH) > oend) 447 lz4sd->prefixEnd = (BYTE *)dest + originalSize;
278#endif
279 goto _output_error;
280 LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
281 while (op < cpy)
282 *op++ = *ref++;
283 op = cpy;
284 /*
285 * Check EOF (should never happen, since last 5 bytes
286 * are supposed to be literals)
287 */
288 if (op == oend)
289 goto _output_error;
290 continue;
291 }
292 LZ4_SECURECOPY(ref, op, cpy);
293 op = cpy; /* correction */
294 } 448 }
295 /* end of decoding */
296 return (int) (((char *) op) - dest);
297 449
298 /* write overflow error detected */ 450 return result;
299_output_error:
300 return -1;
301} 451}
302 452
303int lz4_decompress(const unsigned char *src, size_t *src_len, 453/*
304 unsigned char *dest, size_t actual_dest_len) 454 * Advanced decoding functions :
455 * *_usingDict() :
456 * These decoding functions work the same as "_continue" ones,
457 * the dictionary must be explicitly provided within parameters
458 */
459static FORCE_INLINE int LZ4_decompress_usingDict_generic(const char *source,
460 char *dest, int compressedSize, int maxOutputSize, int safe,
461 const char *dictStart, int dictSize)
305{ 462{
306 int ret = -1; 463 if (dictSize == 0)
307 int input_len = 0; 464 return LZ4_decompress_generic(source, dest,
308 465 compressedSize, maxOutputSize, safe, full, 0,
309 input_len = lz4_uncompress(src, dest, actual_dest_len); 466 noDict, (BYTE *)dest, NULL, 0);
310 if (input_len < 0) 467 if (dictStart + dictSize == dest) {
311 goto exit_0; 468 if (dictSize >= (int)(64 * KB - 1))
312 *src_len = input_len; 469 return LZ4_decompress_generic(source, dest,
470 compressedSize, maxOutputSize, safe, full, 0,
471 withPrefix64k, (BYTE *)dest - 64 * KB, NULL, 0);
472 return LZ4_decompress_generic(source, dest, compressedSize,
473 maxOutputSize, safe, full, 0, noDict,
474 (BYTE *)dest - dictSize, NULL, 0);
475 }
476 return LZ4_decompress_generic(source, dest, compressedSize,
477 maxOutputSize, safe, full, 0, usingExtDict,
478 (BYTE *)dest, (const BYTE *)dictStart, dictSize);
479}
313 480
314 return 0; 481int LZ4_decompress_safe_usingDict(const char *source, char *dest,
315exit_0: 482 int compressedSize, int maxOutputSize,
316 return ret; 483 const char *dictStart, int dictSize)
484{
485 return LZ4_decompress_usingDict_generic(source, dest,
486 compressedSize, maxOutputSize, 1, dictStart, dictSize);
317} 487}
318#ifndef STATIC
319EXPORT_SYMBOL(lz4_decompress);
320#endif
321 488
322int lz4_decompress_unknownoutputsize(const unsigned char *src, size_t src_len, 489int LZ4_decompress_fast_usingDict(const char *source, char *dest,
323 unsigned char *dest, size_t *dest_len) 490 int originalSize, const char *dictStart, int dictSize)
324{ 491{
325 int ret = -1; 492 return LZ4_decompress_usingDict_generic(source, dest, 0,
326 int out_len = 0; 493 originalSize, 0, dictStart, dictSize);
327
328 out_len = lz4_uncompress_unknownoutputsize(src, dest, src_len,
329 *dest_len);
330 if (out_len < 0)
331 goto exit_0;
332 *dest_len = out_len;
333
334 return 0;
335exit_0:
336 return ret;
337} 494}
495
338#ifndef STATIC 496#ifndef STATIC
339EXPORT_SYMBOL(lz4_decompress_unknownoutputsize); 497EXPORT_SYMBOL(LZ4_decompress_safe);
498EXPORT_SYMBOL(LZ4_decompress_safe_partial);
499EXPORT_SYMBOL(LZ4_decompress_fast);
500EXPORT_SYMBOL(LZ4_setStreamDecode);
501EXPORT_SYMBOL(LZ4_decompress_safe_continue);
502EXPORT_SYMBOL(LZ4_decompress_fast_continue);
503EXPORT_SYMBOL(LZ4_decompress_safe_usingDict);
504EXPORT_SYMBOL(LZ4_decompress_fast_usingDict);
340 505
341MODULE_LICENSE("Dual BSD/GPL"); 506MODULE_LICENSE("Dual BSD/GPL");
342MODULE_DESCRIPTION("LZ4 Decompressor"); 507MODULE_DESCRIPTION("LZ4 decompressor");
343#endif 508#endif
diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
index c79d7ea8a38e..00a0b58a0871 100644
--- a/lib/lz4/lz4defs.h
+++ b/lib/lz4/lz4defs.h
@@ -1,157 +1,227 @@
1#ifndef __LZ4DEFS_H__
2#define __LZ4DEFS_H__
3
1/* 4/*
2 * lz4defs.h -- architecture specific defines 5 * lz4defs.h -- common and architecture specific defines for the kernel usage
3 * 6
4 * Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com> 7 * LZ4 - Fast LZ compression algorithm
8 * Copyright (C) 2011-2016, Yann Collet.
9 * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are
12 * met:
13 * * Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * * Redistributions in binary form must reproduce the above
16 * copyright notice, this list of conditions and the following disclaimer
17 * in the documentation and/or other materials provided with the
18 * distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * You can contact the author at :
31 * - LZ4 homepage : http://www.lz4.org
32 * - LZ4 source repository : https://github.com/lz4/lz4
5 * 33 *
6 * This program is free software; you can redistribute it and/or modify 34 * Changed for kernel usage by:
7 * it under the terms of the GNU General Public License version 2 as 35 * Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
8 * published by the Free Software Foundation.
9 */ 36 */
10 37
11/* 38#include <asm/unaligned.h>
12 * Detects 64 bits mode 39#include <linux/string.h> /* memset, memcpy */
13 */ 40
41#define FORCE_INLINE __always_inline
42
43/*-************************************
44 * Basic Types
45 **************************************/
46#include <linux/types.h>
47
48typedef uint8_t BYTE;
49typedef uint16_t U16;
50typedef uint32_t U32;
51typedef int32_t S32;
52typedef uint64_t U64;
53typedef uintptr_t uptrval;
54
55/*-************************************
56 * Architecture specifics
57 **************************************/
14#if defined(CONFIG_64BIT) 58#if defined(CONFIG_64BIT)
15#define LZ4_ARCH64 1 59#define LZ4_ARCH64 1
16#else 60#else
17#define LZ4_ARCH64 0 61#define LZ4_ARCH64 0
18#endif 62#endif
19 63
20/* 64#if defined(__LITTLE_ENDIAN)
21 * Architecture-specific macros 65#define LZ4_LITTLE_ENDIAN 1
22 */ 66#else
23#define BYTE u8 67#define LZ4_LITTLE_ENDIAN 0
24typedef struct _U16_S { u16 v; } U16_S;
25typedef struct _U32_S { u32 v; } U32_S;
26typedef struct _U64_S { u64 v; } U64_S;
27#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
28
29#define A16(x) (((U16_S *)(x))->v)
30#define A32(x) (((U32_S *)(x))->v)
31#define A64(x) (((U64_S *)(x))->v)
32
33#define PUT4(s, d) (A32(d) = A32(s))
34#define PUT8(s, d) (A64(d) = A64(s))
35
36#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
37 (d = s - A16(p))
38
39#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
40 do { \
41 A16(p) = v; \
42 p += 2; \
43 } while (0)
44#else /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
45
46#define A64(x) get_unaligned((u64 *)&(((U16_S *)(x))->v))
47#define A32(x) get_unaligned((u32 *)&(((U16_S *)(x))->v))
48#define A16(x) get_unaligned((u16 *)&(((U16_S *)(x))->v))
49
50#define PUT4(s, d) \
51 put_unaligned(get_unaligned((const u32 *) s), (u32 *) d)
52#define PUT8(s, d) \
53 put_unaligned(get_unaligned((const u64 *) s), (u64 *) d)
54
55#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
56 (d = s - get_unaligned_le16(p))
57
58#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
59 do { \
60 put_unaligned_le16(v, (u16 *)(p)); \
61 p += 2; \
62 } while (0)
63#endif 68#endif
64 69
65#define COPYLENGTH 8 70/*-************************************
66#define ML_BITS 4 71 * Constants
67#define ML_MASK ((1U << ML_BITS) - 1) 72 **************************************/
73#define MINMATCH 4
74
75#define WILDCOPYLENGTH 8
76#define LASTLITERALS 5
77#define MFLIMIT (WILDCOPYLENGTH + MINMATCH)
78
79/* Increase this value ==> compression run slower on incompressible data */
80#define LZ4_SKIPTRIGGER 6
81
82#define HASH_UNIT sizeof(size_t)
83
84#define KB (1 << 10)
85#define MB (1 << 20)
86#define GB (1U << 30)
87
88#define MAXD_LOG 16
89#define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
90#define STEPSIZE sizeof(size_t)
91
92#define ML_BITS 4
93#define ML_MASK ((1U << ML_BITS) - 1)
68#define RUN_BITS (8 - ML_BITS) 94#define RUN_BITS (8 - ML_BITS)
69#define RUN_MASK ((1U << RUN_BITS) - 1) 95#define RUN_MASK ((1U << RUN_BITS) - 1)
70#define MEMORY_USAGE 14 96
71#define MINMATCH 4 97/*-************************************
72#define SKIPSTRENGTH 6 98 * Reading and writing into memory
73#define LASTLITERALS 5 99 **************************************/
74#define MFLIMIT (COPYLENGTH + MINMATCH) 100static FORCE_INLINE U16 LZ4_read16(const void *ptr)
75#define MINLENGTH (MFLIMIT + 1) 101{
76#define MAXD_LOG 16 102 return get_unaligned((const U16 *)ptr);
77#define MAXD (1 << MAXD_LOG) 103}
78#define MAXD_MASK (u32)(MAXD - 1) 104
79#define MAX_DISTANCE (MAXD - 1) 105static FORCE_INLINE U32 LZ4_read32(const void *ptr)
80#define HASH_LOG (MAXD_LOG - 1) 106{
81#define HASHTABLESIZE (1 << HASH_LOG) 107 return get_unaligned((const U32 *)ptr);
82#define MAX_NB_ATTEMPTS 256 108}
83#define OPTIMAL_ML (int)((ML_MASK-1)+MINMATCH) 109
84#define LZ4_64KLIMIT ((1<<16) + (MFLIMIT - 1)) 110static FORCE_INLINE size_t LZ4_read_ARCH(const void *ptr)
85#define HASHLOG64K ((MEMORY_USAGE - 2) + 1) 111{
86#define HASH64KTABLESIZE (1U << HASHLOG64K) 112 return get_unaligned((const size_t *)ptr);
87#define LZ4_HASH_VALUE(p) (((A32(p)) * 2654435761U) >> \ 113}
88 ((MINMATCH * 8) - (MEMORY_USAGE-2))) 114
89#define LZ4_HASH64K_VALUE(p) (((A32(p)) * 2654435761U) >> \ 115static FORCE_INLINE void LZ4_write16(void *memPtr, U16 value)
90 ((MINMATCH * 8) - HASHLOG64K)) 116{
91#define HASH_VALUE(p) (((A32(p)) * 2654435761U) >> \ 117 put_unaligned(value, (U16 *)memPtr);
92 ((MINMATCH * 8) - HASH_LOG)) 118}
93 119
94#if LZ4_ARCH64/* 64-bit */ 120static FORCE_INLINE void LZ4_write32(void *memPtr, U32 value)
95#define STEPSIZE 8 121{
96 122 put_unaligned(value, (U32 *)memPtr);
97#define LZ4_COPYSTEP(s, d) \ 123}
98 do { \ 124
99 PUT8(s, d); \ 125static FORCE_INLINE U16 LZ4_readLE16(const void *memPtr)
100 d += 8; \ 126{
101 s += 8; \ 127 return get_unaligned_le16(memPtr);
102 } while (0) 128}
103 129
104#define LZ4_COPYPACKET(s, d) LZ4_COPYSTEP(s, d) 130static FORCE_INLINE void LZ4_writeLE16(void *memPtr, U16 value)
105 131{
106#define LZ4_SECURECOPY(s, d, e) \ 132 return put_unaligned_le16(value, memPtr);
107 do { \ 133}
108 if (d < e) { \ 134
109 LZ4_WILDCOPY(s, d, e); \ 135static FORCE_INLINE void LZ4_copy8(void *dst, const void *src)
110 } \ 136{
111 } while (0) 137#if LZ4_ARCH64
112#define HTYPE u32 138 U64 a = get_unaligned((const U64 *)src);
113 139
114#ifdef __BIG_ENDIAN 140 put_unaligned(a, (U64 *)dst);
115#define LZ4_NBCOMMONBYTES(val) (__builtin_clzll(val) >> 3) 141#else
142 U32 a = get_unaligned((const U32 *)src);
143 U32 b = get_unaligned((const U32 *)src + 1);
144
145 put_unaligned(a, (U32 *)dst);
146 put_unaligned(b, (U32 *)dst + 1);
147#endif
148}
149
150/*
151 * customized variant of memcpy,
152 * which can overwrite up to 7 bytes beyond dstEnd
153 */
154static FORCE_INLINE void LZ4_wildCopy(void *dstPtr,
155 const void *srcPtr, void *dstEnd)
156{
157 BYTE *d = (BYTE *)dstPtr;
158 const BYTE *s = (const BYTE *)srcPtr;
159 BYTE *const e = (BYTE *)dstEnd;
160
161 do {
162 LZ4_copy8(d, s);
163 d += 8;
164 s += 8;
165 } while (d < e);
166}
167
168static FORCE_INLINE unsigned int LZ4_NbCommonBytes(register size_t val)
169{
170#if LZ4_LITTLE_ENDIAN
171 return __ffs(val) >> 3;
116#else 172#else
117#define LZ4_NBCOMMONBYTES(val) (__builtin_ctzll(val) >> 3) 173 return (BITS_PER_LONG - 1 - __fls(val)) >> 3;
174#endif
175}
176
177static FORCE_INLINE unsigned int LZ4_count(
178 const BYTE *pIn,
179 const BYTE *pMatch,
180 const BYTE *pInLimit)
181{
182 const BYTE *const pStart = pIn;
183
184 while (likely(pIn < pInLimit - (STEPSIZE - 1))) {
185 size_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
186
187 if (!diff) {
188 pIn += STEPSIZE;
189 pMatch += STEPSIZE;
190 continue;
191 }
192
193 pIn += LZ4_NbCommonBytes(diff);
194
195 return (unsigned int)(pIn - pStart);
196 }
197
198#if LZ4_ARCH64
199 if ((pIn < (pInLimit - 3))
200 && (LZ4_read32(pMatch) == LZ4_read32(pIn))) {
201 pIn += 4;
202 pMatch += 4;
203 }
118#endif 204#endif
119 205
120#else /* 32-bit */ 206 if ((pIn < (pInLimit - 1))
121#define STEPSIZE 4 207 && (LZ4_read16(pMatch) == LZ4_read16(pIn))) {
208 pIn += 2;
209 pMatch += 2;
210 }
122 211
123#define LZ4_COPYSTEP(s, d) \ 212 if ((pIn < pInLimit) && (*pMatch == *pIn))
124 do { \ 213 pIn++;
125 PUT4(s, d); \
126 d += 4; \
127 s += 4; \
128 } while (0)
129 214
130#define LZ4_COPYPACKET(s, d) \ 215 return (unsigned int)(pIn - pStart);
131 do { \ 216}
132 LZ4_COPYSTEP(s, d); \
133 LZ4_COPYSTEP(s, d); \
134 } while (0)
135 217
136#define LZ4_SECURECOPY LZ4_WILDCOPY 218typedef enum { noLimit = 0, limitedOutput = 1 } limitedOutput_directive;
137#define HTYPE const u8* 219typedef enum { byPtr, byU32, byU16 } tableType_t;
138 220
139#ifdef __BIG_ENDIAN 221typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive;
140#define LZ4_NBCOMMONBYTES(val) (__builtin_clz(val) >> 3) 222typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
141#else
142#define LZ4_NBCOMMONBYTES(val) (__builtin_ctz(val) >> 3)
143#endif
144 223
145#endif 224typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
225typedef enum { full = 0, partial = 1 } earlyEnd_directive;
146 226
147#define LZ4_WILDCOPY(s, d, e) \ 227#endif
148 do { \
149 LZ4_COPYPACKET(s, d); \
150 } while (d < e)
151
152#define LZ4_BLINDCOPY(s, d, l) \
153 do { \
154 u8 *e = (d) + l; \
155 LZ4_WILDCOPY(s, d, e); \
156 d = e; \
157 } while (0)
diff --git a/lib/lz4/lz4hc_compress.c b/lib/lz4/lz4hc_compress.c
index f344f76b6559..176f03b83e56 100644
--- a/lib/lz4/lz4hc_compress.c
+++ b/lib/lz4/lz4hc_compress.c
@@ -1,19 +1,17 @@
1/* 1/*
2 * LZ4 HC - High Compression Mode of LZ4 2 * LZ4 HC - High Compression Mode of LZ4
3 * Copyright (C) 2011-2012, Yann Collet. 3 * Copyright (C) 2011-2015, Yann Collet.
4 * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
5 * 4 *
5 * BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php)
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are 7 * modification, are permitted provided that the following conditions are
8 * met: 8 * met:
9 * 9 * * Redistributions of source code must retain the above copyright
10 * * Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer.
11 * notice, this list of conditions and the following disclaimer. 11 * * Redistributions in binary form must reproduce the above
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer 12 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the 13 * in the documentation and/or other materials provided with the
15 * distribution. 14 * distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -25,323 +23,361 @@
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * You can contact the author at : 26 * You can contact the author at :
30 * - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html 27 * - LZ4 homepage : http://www.lz4.org
31 * - LZ4 source repository : http://code.google.com/p/lz4/ 28 * - LZ4 source repository : https://github.com/lz4/lz4
32 * 29 *
33 * Changed for kernel use by: 30 * Changed for kernel usage by:
34 * Chanho Min <chanho.min@lge.com> 31 * Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
35 */ 32 */
36 33
37#include <linux/module.h> 34/*-************************************
38#include <linux/kernel.h> 35 * Dependencies
36 **************************************/
39#include <linux/lz4.h> 37#include <linux/lz4.h>
40#include <asm/unaligned.h>
41#include "lz4defs.h" 38#include "lz4defs.h"
39#include <linux/module.h>
40#include <linux/kernel.h>
41#include <linux/string.h> /* memset */
42 42
43struct lz4hc_data { 43/* *************************************
44 const u8 *base; 44 * Local Constants and types
45 HTYPE hashtable[HASHTABLESIZE]; 45 ***************************************/
46 u16 chaintable[MAXD];
47 const u8 *nexttoupdate;
48} __attribute__((__packed__));
49 46
50static inline int lz4hc_init(struct lz4hc_data *hc4, const u8 *base) 47#define OPTIMAL_ML (int)((ML_MASK - 1) + MINMATCH)
48
49#define HASH_FUNCTION(i) (((i) * 2654435761U) \
50 >> ((MINMATCH*8) - LZ4HC_HASH_LOG))
51#define DELTANEXTU16(p) chainTable[(U16)(p)] /* faster */
52
53static U32 LZ4HC_hashPtr(const void *ptr)
51{ 54{
52 memset((void *)hc4->hashtable, 0, sizeof(hc4->hashtable)); 55 return HASH_FUNCTION(LZ4_read32(ptr));
53 memset(hc4->chaintable, 0xFF, sizeof(hc4->chaintable)); 56}
54 57
55#if LZ4_ARCH64 58/**************************************
56 hc4->nexttoupdate = base + 1; 59 * HC Compression
57#else 60 **************************************/
58 hc4->nexttoupdate = base; 61static void LZ4HC_init(LZ4HC_CCtx_internal *hc4, const BYTE *start)
59#endif 62{
60 hc4->base = base; 63 memset((void *)hc4->hashTable, 0, sizeof(hc4->hashTable));
61 return 1; 64 memset(hc4->chainTable, 0xFF, sizeof(hc4->chainTable));
65 hc4->nextToUpdate = 64 * KB;
66 hc4->base = start - 64 * KB;
67 hc4->end = start;
68 hc4->dictBase = start - 64 * KB;
69 hc4->dictLimit = 64 * KB;
70 hc4->lowLimit = 64 * KB;
62} 71}
63 72
64/* Update chains up to ip (excluded) */ 73/* Update chains up to ip (excluded) */
65static inline void lz4hc_insert(struct lz4hc_data *hc4, const u8 *ip) 74static FORCE_INLINE void LZ4HC_Insert(LZ4HC_CCtx_internal *hc4,
75 const BYTE *ip)
66{ 76{
67 u16 *chaintable = hc4->chaintable; 77 U16 * const chainTable = hc4->chainTable;
68 HTYPE *hashtable = hc4->hashtable; 78 U32 * const hashTable = hc4->hashTable;
69#if LZ4_ARCH64
70 const BYTE * const base = hc4->base; 79 const BYTE * const base = hc4->base;
71#else 80 U32 const target = (U32)(ip - base);
72 const int base = 0; 81 U32 idx = hc4->nextToUpdate;
73#endif 82
83 while (idx < target) {
84 U32 const h = LZ4HC_hashPtr(base + idx);
85 size_t delta = idx - hashTable[h];
74 86
75 while (hc4->nexttoupdate < ip) {
76 const u8 *p = hc4->nexttoupdate;
77 size_t delta = p - (hashtable[HASH_VALUE(p)] + base);
78 if (delta > MAX_DISTANCE) 87 if (delta > MAX_DISTANCE)
79 delta = MAX_DISTANCE; 88 delta = MAX_DISTANCE;
80 chaintable[(size_t)(p) & MAXD_MASK] = (u16)delta;
81 hashtable[HASH_VALUE(p)] = (p) - base;
82 hc4->nexttoupdate++;
83 }
84}
85 89
86static inline size_t lz4hc_commonlength(const u8 *p1, const u8 *p2, 90 DELTANEXTU16(idx) = (U16)delta;
87 const u8 *const matchlimit)
88{
89 const u8 *p1t = p1;
90
91 while (p1t < matchlimit - (STEPSIZE - 1)) {
92#if LZ4_ARCH64
93 u64 diff = A64(p2) ^ A64(p1t);
94#else
95 u32 diff = A32(p2) ^ A32(p1t);
96#endif
97 if (!diff) {
98 p1t += STEPSIZE;
99 p2 += STEPSIZE;
100 continue;
101 }
102 p1t += LZ4_NBCOMMONBYTES(diff);
103 return p1t - p1;
104 }
105#if LZ4_ARCH64
106 if ((p1t < (matchlimit-3)) && (A32(p2) == A32(p1t))) {
107 p1t += 4;
108 p2 += 4;
109 }
110#endif
111 91
112 if ((p1t < (matchlimit - 1)) && (A16(p2) == A16(p1t))) { 92 hashTable[h] = idx;
113 p1t += 2; 93 idx++;
114 p2 += 2;
115 } 94 }
116 if ((p1t < matchlimit) && (*p2 == *p1t)) 95
117 p1t++; 96 hc4->nextToUpdate = target;
118 return p1t - p1;
119} 97}
120 98
121static inline int lz4hc_insertandfindbestmatch(struct lz4hc_data *hc4, 99static FORCE_INLINE int LZ4HC_InsertAndFindBestMatch(
122 const u8 *ip, const u8 *const matchlimit, const u8 **matchpos) 100 LZ4HC_CCtx_internal *hc4, /* Index table will be updated */
101 const BYTE *ip,
102 const BYTE * const iLimit,
103 const BYTE **matchpos,
104 const int maxNbAttempts)
123{ 105{
124 u16 *const chaintable = hc4->chaintable; 106 U16 * const chainTable = hc4->chainTable;
125 HTYPE *const hashtable = hc4->hashtable; 107 U32 * const HashTable = hc4->hashTable;
126 const u8 *ref;
127#if LZ4_ARCH64
128 const BYTE * const base = hc4->base; 108 const BYTE * const base = hc4->base;
129#else 109 const BYTE * const dictBase = hc4->dictBase;
130 const int base = 0; 110 const U32 dictLimit = hc4->dictLimit;
131#endif 111 const U32 lowLimit = (hc4->lowLimit + 64 * KB > (U32)(ip - base))
132 int nbattempts = MAX_NB_ATTEMPTS; 112 ? hc4->lowLimit
133 size_t repl = 0, ml = 0; 113 : (U32)(ip - base) - (64 * KB - 1);
134 u16 delta; 114 U32 matchIndex;
115 int nbAttempts = maxNbAttempts;
116 size_t ml = 0;
135 117
136 /* HC4 match finder */ 118 /* HC4 match finder */
137 lz4hc_insert(hc4, ip); 119 LZ4HC_Insert(hc4, ip);
138 ref = hashtable[HASH_VALUE(ip)] + base; 120 matchIndex = HashTable[LZ4HC_hashPtr(ip)];
139 121
140 /* potential repetition */ 122 while ((matchIndex >= lowLimit)
141 if (ref >= ip-4) { 123 && (nbAttempts)) {
142 /* confirmed */ 124 nbAttempts--;
143 if (A32(ref) == A32(ip)) { 125 if (matchIndex >= dictLimit) {
144 delta = (u16)(ip-ref); 126 const BYTE * const match = base + matchIndex;
145 repl = ml = lz4hc_commonlength(ip + MINMATCH, 127
146 ref + MINMATCH, matchlimit) + MINMATCH; 128 if (*(match + ml) == *(ip + ml)
147 *matchpos = ref; 129 && (LZ4_read32(match) == LZ4_read32(ip))) {
148 } 130 size_t const mlt = LZ4_count(ip + MINMATCH,
149 ref -= (size_t)chaintable[(size_t)(ref) & MAXD_MASK]; 131 match + MINMATCH, iLimit) + MINMATCH;
150 }
151 132
152 while ((ref >= ip - MAX_DISTANCE) && nbattempts) {
153 nbattempts--;
154 if (*(ref + ml) == *(ip + ml)) {
155 if (A32(ref) == A32(ip)) {
156 size_t mlt =
157 lz4hc_commonlength(ip + MINMATCH,
158 ref + MINMATCH, matchlimit) + MINMATCH;
159 if (mlt > ml) { 133 if (mlt > ml) {
160 ml = mlt; 134 ml = mlt;
161 *matchpos = ref; 135 *matchpos = match;
136 }
137 }
138 } else {
139 const BYTE * const match = dictBase + matchIndex;
140
141 if (LZ4_read32(match) == LZ4_read32(ip)) {
142 size_t mlt;
143 const BYTE *vLimit = ip
144 + (dictLimit - matchIndex);
145
146 if (vLimit > iLimit)
147 vLimit = iLimit;
148 mlt = LZ4_count(ip + MINMATCH,
149 match + MINMATCH, vLimit) + MINMATCH;
150 if ((ip + mlt == vLimit)
151 && (vLimit < iLimit))
152 mlt += LZ4_count(ip + mlt,
153 base + dictLimit,
154 iLimit);
155 if (mlt > ml) {
156 /* virtual matchpos */
157 ml = mlt;
158 *matchpos = base + matchIndex;
162 } 159 }
163 } 160 }
164 } 161 }
165 ref -= (size_t)chaintable[(size_t)(ref) & MAXD_MASK]; 162 matchIndex -= DELTANEXTU16(matchIndex);
166 }
167
168 /* Complete table */
169 if (repl) {
170 const BYTE *ptr = ip;
171 const BYTE *end;
172 end = ip + repl - (MINMATCH-1);
173 /* Pre-Load */
174 while (ptr < end - delta) {
175 chaintable[(size_t)(ptr) & MAXD_MASK] = delta;
176 ptr++;
177 }
178 do {
179 chaintable[(size_t)(ptr) & MAXD_MASK] = delta;
180 /* Head of chain */
181 hashtable[HASH_VALUE(ptr)] = (ptr) - base;
182 ptr++;
183 } while (ptr < end);
184 hc4->nexttoupdate = end;
185 } 163 }
186 164
187 return (int)ml; 165 return (int)ml;
188} 166}
189 167
190static inline int lz4hc_insertandgetwidermatch(struct lz4hc_data *hc4, 168static FORCE_INLINE int LZ4HC_InsertAndGetWiderMatch(
191 const u8 *ip, const u8 *startlimit, const u8 *matchlimit, int longest, 169 LZ4HC_CCtx_internal *hc4,
192 const u8 **matchpos, const u8 **startpos) 170 const BYTE * const ip,
171 const BYTE * const iLowLimit,
172 const BYTE * const iHighLimit,
173 int longest,
174 const BYTE **matchpos,
175 const BYTE **startpos,
176 const int maxNbAttempts)
193{ 177{
194 u16 *const chaintable = hc4->chaintable; 178 U16 * const chainTable = hc4->chainTable;
195 HTYPE *const hashtable = hc4->hashtable; 179 U32 * const HashTable = hc4->hashTable;
196#if LZ4_ARCH64
197 const BYTE * const base = hc4->base; 180 const BYTE * const base = hc4->base;
198#else 181 const U32 dictLimit = hc4->dictLimit;
199 const int base = 0; 182 const BYTE * const lowPrefixPtr = base + dictLimit;
200#endif 183 const U32 lowLimit = (hc4->lowLimit + 64 * KB > (U32)(ip - base))
201 const u8 *ref; 184 ? hc4->lowLimit
202 int nbattempts = MAX_NB_ATTEMPTS; 185 : (U32)(ip - base) - (64 * KB - 1);
203 int delta = (int)(ip - startlimit); 186 const BYTE * const dictBase = hc4->dictBase;
187 U32 matchIndex;
188 int nbAttempts = maxNbAttempts;
189 int delta = (int)(ip - iLowLimit);
204 190
205 /* First Match */ 191 /* First Match */
206 lz4hc_insert(hc4, ip); 192 LZ4HC_Insert(hc4, ip);
207 ref = hashtable[HASH_VALUE(ip)] + base; 193 matchIndex = HashTable[LZ4HC_hashPtr(ip)];
208 194
209 while ((ref >= ip - MAX_DISTANCE) && (ref >= hc4->base) 195 while ((matchIndex >= lowLimit)
210 && (nbattempts)) { 196 && (nbAttempts)) {
211 nbattempts--; 197 nbAttempts--;
212 if (*(startlimit + longest) == *(ref - delta + longest)) { 198 if (matchIndex >= dictLimit) {
213 if (A32(ref) == A32(ip)) { 199 const BYTE *matchPtr = base + matchIndex;
214 const u8 *reft = ref + MINMATCH; 200
215 const u8 *ipt = ip + MINMATCH; 201 if (*(iLowLimit + longest)
216 const u8 *startt = ip; 202 == *(matchPtr - delta + longest)) {
217 203 if (LZ4_read32(matchPtr) == LZ4_read32(ip)) {
218 while (ipt < matchlimit-(STEPSIZE - 1)) { 204 int mlt = MINMATCH + LZ4_count(
219 #if LZ4_ARCH64 205 ip + MINMATCH,
220 u64 diff = A64(reft) ^ A64(ipt); 206 matchPtr + MINMATCH,
221 #else 207 iHighLimit);
222 u32 diff = A32(reft) ^ A32(ipt); 208 int back = 0;
223 #endif 209
224 210 while ((ip + back > iLowLimit)
225 if (!diff) { 211 && (matchPtr + back > lowPrefixPtr)
226 ipt += STEPSIZE; 212 && (ip[back - 1] == matchPtr[back - 1]))
227 reft += STEPSIZE; 213 back--;
228 continue; 214
215 mlt -= back;
216
217 if (mlt > longest) {
218 longest = (int)mlt;
219 *matchpos = matchPtr + back;
220 *startpos = ip + back;
229 } 221 }
230 ipt += LZ4_NBCOMMONBYTES(diff);
231 goto _endcount;
232 }
233 #if LZ4_ARCH64
234 if ((ipt < (matchlimit - 3))
235 && (A32(reft) == A32(ipt))) {
236 ipt += 4;
237 reft += 4;
238 }
239 ipt += 2;
240 #endif
241 if ((ipt < (matchlimit - 1))
242 && (A16(reft) == A16(ipt))) {
243 reft += 2;
244 } 222 }
245 if ((ipt < matchlimit) && (*reft == *ipt)) 223 }
246 ipt++; 224 } else {
247_endcount: 225 const BYTE * const matchPtr = dictBase + matchIndex;
248 reft = ref; 226
249 227 if (LZ4_read32(matchPtr) == LZ4_read32(ip)) {
250 while ((startt > startlimit) 228 size_t mlt;
251 && (reft > hc4->base) 229 int back = 0;
252 && (startt[-1] == reft[-1])) { 230 const BYTE *vLimit = ip + (dictLimit - matchIndex);
253 startt--; 231
254 reft--; 232 if (vLimit > iHighLimit)
255 } 233 vLimit = iHighLimit;
256 234
257 if ((ipt - startt) > longest) { 235 mlt = LZ4_count(ip + MINMATCH,
258 longest = (int)(ipt - startt); 236 matchPtr + MINMATCH, vLimit) + MINMATCH;
259 *matchpos = reft; 237
260 *startpos = startt; 238 if ((ip + mlt == vLimit) && (vLimit < iHighLimit))
239 mlt += LZ4_count(ip + mlt, base + dictLimit,
240 iHighLimit);
241 while ((ip + back > iLowLimit)
242 && (matchIndex + back > lowLimit)
243 && (ip[back - 1] == matchPtr[back - 1]))
244 back--;
245
246 mlt -= back;
247
248 if ((int)mlt > longest) {
249 longest = (int)mlt;
250 *matchpos = base + matchIndex + back;
251 *startpos = ip + back;
261 } 252 }
262 } 253 }
263 } 254 }
264 ref -= (size_t)chaintable[(size_t)(ref) & MAXD_MASK]; 255
256 matchIndex -= DELTANEXTU16(matchIndex);
265 } 257 }
258
266 return longest; 259 return longest;
267} 260}
268 261
269static inline int lz4_encodesequence(const u8 **ip, u8 **op, const u8 **anchor, 262static FORCE_INLINE int LZ4HC_encodeSequence(
270 int ml, const u8 *ref) 263 const BYTE **ip,
264 BYTE **op,
265 const BYTE **anchor,
266 int matchLength,
267 const BYTE * const match,
268 limitedOutput_directive limitedOutputBuffer,
269 BYTE *oend)
271{ 270{
272 int length, len; 271 int length;
273 u8 *token; 272 BYTE *token;
274 273
275 /* Encode Literal length */ 274 /* Encode Literal length */
276 length = (int)(*ip - *anchor); 275 length = (int)(*ip - *anchor);
277 token = (*op)++; 276 token = (*op)++;
277
278 if ((limitedOutputBuffer)
279 && ((*op + (length>>8)
280 + length + (2 + 1 + LASTLITERALS)) > oend)) {
281 /* Check output limit */
282 return 1;
283 }
278 if (length >= (int)RUN_MASK) { 284 if (length >= (int)RUN_MASK) {
279 *token = (RUN_MASK << ML_BITS); 285 int len;
286
287 *token = (RUN_MASK<<ML_BITS);
280 len = length - RUN_MASK; 288 len = length - RUN_MASK;
281 for (; len > 254 ; len -= 255) 289 for (; len > 254 ; len -= 255)
282 *(*op)++ = 255; 290 *(*op)++ = 255;
283 *(*op)++ = (u8)len; 291 *(*op)++ = (BYTE)len;
284 } else 292 } else
285 *token = (length << ML_BITS); 293 *token = (BYTE)(length<<ML_BITS);
286 294
287 /* Copy Literals */ 295 /* Copy Literals */
288 LZ4_BLINDCOPY(*anchor, *op, length); 296 LZ4_wildCopy(*op, *anchor, (*op) + length);
297 *op += length;
289 298
290 /* Encode Offset */ 299 /* Encode Offset */
291 LZ4_WRITE_LITTLEENDIAN_16(*op, (u16)(*ip - ref)); 300 LZ4_writeLE16(*op, (U16)(*ip - match));
301 *op += 2;
292 302
293 /* Encode MatchLength */ 303 /* Encode MatchLength */
294 len = (int)(ml - MINMATCH); 304 length = (int)(matchLength - MINMATCH);
295 if (len >= (int)ML_MASK) { 305
306 if ((limitedOutputBuffer)
307 && (*op + (length>>8)
308 + (1 + LASTLITERALS) > oend)) {
309 /* Check output limit */
310 return 1;
311 }
312
313 if (length >= (int)ML_MASK) {
296 *token += ML_MASK; 314 *token += ML_MASK;
297 len -= ML_MASK; 315 length -= ML_MASK;
298 for (; len > 509 ; len -= 510) { 316
317 for (; length > 509 ; length -= 510) {
299 *(*op)++ = 255; 318 *(*op)++ = 255;
300 *(*op)++ = 255; 319 *(*op)++ = 255;
301 } 320 }
302 if (len > 254) { 321
303 len -= 255; 322 if (length > 254) {
323 length -= 255;
304 *(*op)++ = 255; 324 *(*op)++ = 255;
305 } 325 }
306 *(*op)++ = (u8)len; 326
327 *(*op)++ = (BYTE)length;
307 } else 328 } else
308 *token += len; 329 *token += (BYTE)(length);
309 330
310 /* Prepare next loop */ 331 /* Prepare next loop */
311 *ip += ml; 332 *ip += matchLength;
312 *anchor = *ip; 333 *anchor = *ip;
313 334
314 return 0; 335 return 0;
315} 336}
316 337
317static int lz4_compresshcctx(struct lz4hc_data *ctx, 338static int LZ4HC_compress_generic(
318 const char *source, 339 LZ4HC_CCtx_internal *const ctx,
319 char *dest, 340 const char * const source,
320 int isize) 341 char * const dest,
342 int const inputSize,
343 int const maxOutputSize,
344 int compressionLevel,
345 limitedOutput_directive limit
346 )
321{ 347{
322 const u8 *ip = (const u8 *)source; 348 const BYTE *ip = (const BYTE *) source;
323 const u8 *anchor = ip; 349 const BYTE *anchor = ip;
324 const u8 *const iend = ip + isize; 350 const BYTE * const iend = ip + inputSize;
325 const u8 *const mflimit = iend - MFLIMIT; 351 const BYTE * const mflimit = iend - MFLIMIT;
326 const u8 *const matchlimit = (iend - LASTLITERALS); 352 const BYTE * const matchlimit = (iend - LASTLITERALS);
327 353
328 u8 *op = (u8 *)dest; 354 BYTE *op = (BYTE *) dest;
355 BYTE * const oend = op + maxOutputSize;
329 356
357 unsigned int maxNbAttempts;
330 int ml, ml2, ml3, ml0; 358 int ml, ml2, ml3, ml0;
331 const u8 *ref = NULL; 359 const BYTE *ref = NULL;
332 const u8 *start2 = NULL; 360 const BYTE *start2 = NULL;
333 const u8 *ref2 = NULL; 361 const BYTE *ref2 = NULL;
334 const u8 *start3 = NULL; 362 const BYTE *start3 = NULL;
335 const u8 *ref3 = NULL; 363 const BYTE *ref3 = NULL;
336 const u8 *start0; 364 const BYTE *start0;
337 const u8 *ref0; 365 const BYTE *ref0;
338 int lastrun; 366
367 /* init */
368 if (compressionLevel > LZ4HC_MAX_CLEVEL)
369 compressionLevel = LZ4HC_MAX_CLEVEL;
370 if (compressionLevel < 1)
371 compressionLevel = LZ4HC_DEFAULT_CLEVEL;
372 maxNbAttempts = 1 << (compressionLevel - 1);
373 ctx->end += inputSize;
339 374
340 ip++; 375 ip++;
341 376
342 /* Main Loop */ 377 /* Main Loop */
343 while (ip < mflimit) { 378 while (ip < mflimit) {
344 ml = lz4hc_insertandfindbestmatch(ctx, ip, matchlimit, (&ref)); 379 ml = LZ4HC_InsertAndFindBestMatch(ctx, ip,
380 matchlimit, (&ref), maxNbAttempts);
345 if (!ml) { 381 if (!ml) {
346 ip++; 382 ip++;
347 continue; 383 continue;
@@ -351,51 +387,59 @@ static int lz4_compresshcctx(struct lz4hc_data *ctx,
351 start0 = ip; 387 start0 = ip;
352 ref0 = ref; 388 ref0 = ref;
353 ml0 = ml; 389 ml0 = ml;
354_search2: 390
355 if (ip+ml < mflimit) 391_Search2:
356 ml2 = lz4hc_insertandgetwidermatch(ctx, ip + ml - 2, 392 if (ip + ml < mflimit)
357 ip + 1, matchlimit, ml, &ref2, &start2); 393 ml2 = LZ4HC_InsertAndGetWiderMatch(ctx,
394 ip + ml - 2, ip + 0,
395 matchlimit, ml, &ref2,
396 &start2, maxNbAttempts);
358 else 397 else
359 ml2 = ml; 398 ml2 = ml;
360 /* No better match */ 399
361 if (ml2 == ml) { 400 if (ml2 == ml) {
362 lz4_encodesequence(&ip, &op, &anchor, ml, ref); 401 /* No better match */
402 if (LZ4HC_encodeSequence(&ip, &op,
403 &anchor, ml, ref, limit, oend))
404 return 0;
363 continue; 405 continue;
364 } 406 }
365 407
366 if (start0 < ip) { 408 if (start0 < ip) {
367 /* empirical */
368 if (start2 < ip + ml0) { 409 if (start2 < ip + ml0) {
410 /* empirical */
369 ip = start0; 411 ip = start0;
370 ref = ref0; 412 ref = ref0;
371 ml = ml0; 413 ml = ml0;
372 } 414 }
373 } 415 }
374 /* 416
375 * Here, start0==ip 417 /* Here, start0 == ip */
376 * First Match too small : removed
377 */
378 if ((start2 - ip) < 3) { 418 if ((start2 - ip) < 3) {
419 /* First Match too small : removed */
379 ml = ml2; 420 ml = ml2;
380 ip = start2; 421 ip = start2;
381 ref = ref2; 422 ref = ref2;
382 goto _search2; 423 goto _Search2;
383 } 424 }
384 425
385_search3: 426_Search3:
386 /* 427 /*
387 * Currently we have : 428 * Currently we have :
388 * ml2 > ml1, and 429 * ml2 > ml1, and
389 * ip1+3 <= ip2 (usually < ip1+ml1) 430 * ip1 + 3 <= ip2 (usually < ip1 + ml1)
390 */ 431 */
391 if ((start2 - ip) < OPTIMAL_ML) { 432 if ((start2 - ip) < OPTIMAL_ML) {
392 int correction; 433 int correction;
393 int new_ml = ml; 434 int new_ml = ml;
435
394 if (new_ml > OPTIMAL_ML) 436 if (new_ml > OPTIMAL_ML)
395 new_ml = OPTIMAL_ML; 437 new_ml = OPTIMAL_ML;
396 if (ip + new_ml > start2 + ml2 - MINMATCH) 438 if (ip + new_ml > start2 + ml2 - MINMATCH)
397 new_ml = (int)(start2 - ip) + ml2 - MINMATCH; 439 new_ml = (int)(start2 - ip) + ml2 - MINMATCH;
440
398 correction = new_ml - (int)(start2 - ip); 441 correction = new_ml - (int)(start2 - ip);
442
399 if (correction > 0) { 443 if (correction > 0) {
400 start2 += correction; 444 start2 += correction;
401 ref2 += correction; 445 ref2 += correction;
@@ -403,39 +447,44 @@ _search3:
403 } 447 }
404 } 448 }
405 /* 449 /*
406 * Now, we have start2 = ip+new_ml, 450 * Now, we have start2 = ip + new_ml,
407 * with new_ml=min(ml, OPTIMAL_ML=18) 451 * with new_ml = min(ml, OPTIMAL_ML = 18)
408 */ 452 */
453
409 if (start2 + ml2 < mflimit) 454 if (start2 + ml2 < mflimit)
410 ml3 = lz4hc_insertandgetwidermatch(ctx, 455 ml3 = LZ4HC_InsertAndGetWiderMatch(ctx,
411 start2 + ml2 - 3, start2, matchlimit, 456 start2 + ml2 - 3, start2,
412 ml2, &ref3, &start3); 457 matchlimit, ml2, &ref3, &start3,
458 maxNbAttempts);
413 else 459 else
414 ml3 = ml2; 460 ml3 = ml2;
415 461
416 /* No better match : 2 sequences to encode */
417 if (ml3 == ml2) { 462 if (ml3 == ml2) {
463 /* No better match : 2 sequences to encode */
418 /* ip & ref are known; Now for ml */ 464 /* ip & ref are known; Now for ml */
419 if (start2 < ip+ml) 465 if (start2 < ip + ml)
420 ml = (int)(start2 - ip); 466 ml = (int)(start2 - ip);
421
422 /* Now, encode 2 sequences */ 467 /* Now, encode 2 sequences */
423 lz4_encodesequence(&ip, &op, &anchor, ml, ref); 468 if (LZ4HC_encodeSequence(&ip, &op, &anchor,
469 ml, ref, limit, oend))
470 return 0;
424 ip = start2; 471 ip = start2;
425 lz4_encodesequence(&ip, &op, &anchor, ml2, ref2); 472 if (LZ4HC_encodeSequence(&ip, &op, &anchor,
473 ml2, ref2, limit, oend))
474 return 0;
426 continue; 475 continue;
427 } 476 }
428 477
429 /* Not enough space for match 2 : remove it */
430 if (start3 < ip + ml + 3) { 478 if (start3 < ip + ml + 3) {
431 /* 479 /* Not enough space for match 2 : remove it */
432 * can write Seq1 immediately ==> Seq2 is removed,
433 * so Seq3 becomes Seq1
434 */
435 if (start3 >= (ip + ml)) { 480 if (start3 >= (ip + ml)) {
481 /* can write Seq1 immediately
482 * ==> Seq2 is removed,
483 * so Seq3 becomes Seq1
484 */
436 if (start2 < ip + ml) { 485 if (start2 < ip + ml) {
437 int correction = 486 int correction = (int)(ip + ml - start2);
438 (int)(ip + ml - start2); 487
439 start2 += correction; 488 start2 += correction;
440 ref2 += correction; 489 ref2 += correction;
441 ml2 -= correction; 490 ml2 -= correction;
@@ -446,35 +495,38 @@ _search3:
446 } 495 }
447 } 496 }
448 497
449 lz4_encodesequence(&ip, &op, &anchor, ml, ref); 498 if (LZ4HC_encodeSequence(&ip, &op, &anchor,
450 ip = start3; 499 ml, ref, limit, oend))
500 return 0;
501 ip = start3;
451 ref = ref3; 502 ref = ref3;
452 ml = ml3; 503 ml = ml3;
453 504
454 start0 = start2; 505 start0 = start2;
455 ref0 = ref2; 506 ref0 = ref2;
456 ml0 = ml2; 507 ml0 = ml2;
457 goto _search2; 508 goto _Search2;
458 } 509 }
459 510
460 start2 = start3; 511 start2 = start3;
461 ref2 = ref3; 512 ref2 = ref3;
462 ml2 = ml3; 513 ml2 = ml3;
463 goto _search3; 514 goto _Search3;
464 } 515 }
465 516
466 /* 517 /*
467 * OK, now we have 3 ascending matches; let's write at least 518 * OK, now we have 3 ascending matches;
468 * the first one ip & ref are known; Now for ml 519 * let's write at least the first one
469 */ 520 * ip & ref are known; Now for ml
521 */
470 if (start2 < ip + ml) { 522 if (start2 < ip + ml) {
471 if ((start2 - ip) < (int)ML_MASK) { 523 if ((start2 - ip) < (int)ML_MASK) {
472 int correction; 524 int correction;
525
473 if (ml > OPTIMAL_ML) 526 if (ml > OPTIMAL_ML)
474 ml = OPTIMAL_ML; 527 ml = OPTIMAL_ML;
475 if (ip + ml > start2 + ml2 - MINMATCH) 528 if (ip + ml > start2 + ml2 - MINMATCH)
476 ml = (int)(start2 - ip) + ml2 529 ml = (int)(start2 - ip) + ml2 - MINMATCH;
477 - MINMATCH;
478 correction = ml - (int)(start2 - ip); 530 correction = ml - (int)(start2 - ip);
479 if (correction > 0) { 531 if (correction > 0) {
480 start2 += correction; 532 start2 += correction;
@@ -484,7 +536,9 @@ _search3:
484 } else 536 } else
485 ml = (int)(start2 - ip); 537 ml = (int)(start2 - ip);
486 } 538 }
487 lz4_encodesequence(&ip, &op, &anchor, ml, ref); 539 if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml,
540 ref, limit, oend))
541 return 0;
488 542
489 ip = start2; 543 ip = start2;
490 ref = ref2; 544 ref = ref2;
@@ -494,46 +548,222 @@ _search3:
494 ref2 = ref3; 548 ref2 = ref3;
495 ml2 = ml3; 549 ml2 = ml3;
496 550
497 goto _search3; 551 goto _Search3;
498 } 552 }
499 553
500 /* Encode Last Literals */ 554 /* Encode Last Literals */
501 lastrun = (int)(iend - anchor); 555 {
502 if (lastrun >= (int)RUN_MASK) { 556 int lastRun = (int)(iend - anchor);
503 *op++ = (RUN_MASK << ML_BITS); 557
504 lastrun -= RUN_MASK; 558 if ((limit)
505 for (; lastrun > 254 ; lastrun -= 255) 559 && (((char *)op - dest) + lastRun + 1
506 *op++ = 255; 560 + ((lastRun + 255 - RUN_MASK)/255)
507 *op++ = (u8) lastrun; 561 > (U32)maxOutputSize)) {
508 } else 562 /* Check output limit */
509 *op++ = (lastrun << ML_BITS); 563 return 0;
510 memcpy(op, anchor, iend - anchor); 564 }
511 op += iend - anchor; 565 if (lastRun >= (int)RUN_MASK) {
566 *op++ = (RUN_MASK<<ML_BITS);
567 lastRun -= RUN_MASK;
568 for (; lastRun > 254 ; lastRun -= 255)
569 *op++ = 255;
570 *op++ = (BYTE) lastRun;
571 } else
572 *op++ = (BYTE)(lastRun<<ML_BITS);
573 memcpy(op, anchor, iend - anchor);
574 op += iend - anchor;
575 }
576
512 /* End */ 577 /* End */
513 return (int) (((char *)op) - dest); 578 return (int) (((char *)op) - dest);
514} 579}
515 580
516int lz4hc_compress(const unsigned char *src, size_t src_len, 581static int LZ4_compress_HC_extStateHC(
517 unsigned char *dst, size_t *dst_len, void *wrkmem) 582 void *state,
583 const char *src,
584 char *dst,
585 int srcSize,
586 int maxDstSize,
587 int compressionLevel)
518{ 588{
519 int ret = -1; 589 LZ4HC_CCtx_internal *ctx = &((LZ4_streamHC_t *)state)->internal_donotuse;
520 int out_len = 0;
521 590
522 struct lz4hc_data *hc4 = (struct lz4hc_data *)wrkmem; 591 if (((size_t)(state)&(sizeof(void *) - 1)) != 0) {
523 lz4hc_init(hc4, (const u8 *)src); 592 /* Error : state is not aligned
524 out_len = lz4_compresshcctx((struct lz4hc_data *)hc4, (const u8 *)src, 593 * for pointers (32 or 64 bits)
525 (char *)dst, (int)src_len); 594 */
595 return 0;
596 }
526 597
527 if (out_len < 0) 598 LZ4HC_init(ctx, (const BYTE *)src);
528 goto exit;
529 599
530 *dst_len = out_len; 600 if (maxDstSize < LZ4_compressBound(srcSize))
531 return 0; 601 return LZ4HC_compress_generic(ctx, src, dst,
602 srcSize, maxDstSize, compressionLevel, limitedOutput);
603 else
604 return LZ4HC_compress_generic(ctx, src, dst,
605 srcSize, maxDstSize, compressionLevel, noLimit);
606}
607
608int LZ4_compress_HC(const char *src, char *dst, int srcSize,
609 int maxDstSize, int compressionLevel, void *wrkmem)
610{
611 return LZ4_compress_HC_extStateHC(wrkmem, src, dst,
612 srcSize, maxDstSize, compressionLevel);
613}
614EXPORT_SYMBOL(LZ4_compress_HC);
615
616/**************************************
617 * Streaming Functions
618 **************************************/
619void LZ4_resetStreamHC(LZ4_streamHC_t *LZ4_streamHCPtr, int compressionLevel)
620{
621 LZ4_streamHCPtr->internal_donotuse.base = NULL;
622 LZ4_streamHCPtr->internal_donotuse.compressionLevel = (unsigned int)compressionLevel;
623}
624
625int LZ4_loadDictHC(LZ4_streamHC_t *LZ4_streamHCPtr,
626 const char *dictionary,
627 int dictSize)
628{
629 LZ4HC_CCtx_internal *ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
630
631 if (dictSize > 64 * KB) {
632 dictionary += dictSize - 64 * KB;
633 dictSize = 64 * KB;
634 }
635 LZ4HC_init(ctxPtr, (const BYTE *)dictionary);
636 if (dictSize >= 4)
637 LZ4HC_Insert(ctxPtr, (const BYTE *)dictionary + (dictSize - 3));
638 ctxPtr->end = (const BYTE *)dictionary + dictSize;
639 return dictSize;
640}
641EXPORT_SYMBOL(LZ4_loadDictHC);
532 642
533exit: 643/* compression */
534 return ret; 644
645static void LZ4HC_setExternalDict(
646 LZ4HC_CCtx_internal *ctxPtr,
647 const BYTE *newBlock)
648{
649 if (ctxPtr->end >= ctxPtr->base + 4) {
650 /* Referencing remaining dictionary content */
651 LZ4HC_Insert(ctxPtr, ctxPtr->end - 3);
652 }
653
654 /*
655 * Only one memory segment for extDict,
656 * so any previous extDict is lost at this stage
657 */
658 ctxPtr->lowLimit = ctxPtr->dictLimit;
659 ctxPtr->dictLimit = (U32)(ctxPtr->end - ctxPtr->base);
660 ctxPtr->dictBase = ctxPtr->base;
661 ctxPtr->base = newBlock - ctxPtr->dictLimit;
662 ctxPtr->end = newBlock;
663 /* match referencing will resume from there */
664 ctxPtr->nextToUpdate = ctxPtr->dictLimit;
665}
666EXPORT_SYMBOL(LZ4HC_setExternalDict);
667
668static int LZ4_compressHC_continue_generic(
669 LZ4_streamHC_t *LZ4_streamHCPtr,
670 const char *source,
671 char *dest,
672 int inputSize,
673 int maxOutputSize,
674 limitedOutput_directive limit)
675{
676 LZ4HC_CCtx_internal *ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
677
678 /* auto - init if forgotten */
679 if (ctxPtr->base == NULL)
680 LZ4HC_init(ctxPtr, (const BYTE *) source);
681
682 /* Check overflow */
683 if ((size_t)(ctxPtr->end - ctxPtr->base) > 2 * GB) {
684 size_t dictSize = (size_t)(ctxPtr->end - ctxPtr->base)
685 - ctxPtr->dictLimit;
686 if (dictSize > 64 * KB)
687 dictSize = 64 * KB;
688 LZ4_loadDictHC(LZ4_streamHCPtr,
689 (const char *)(ctxPtr->end) - dictSize, (int)dictSize);
690 }
691
692 /* Check if blocks follow each other */
693 if ((const BYTE *)source != ctxPtr->end)
694 LZ4HC_setExternalDict(ctxPtr, (const BYTE *)source);
695
696 /* Check overlapping input/dictionary space */
697 {
698 const BYTE *sourceEnd = (const BYTE *) source + inputSize;
699 const BYTE * const dictBegin = ctxPtr->dictBase + ctxPtr->lowLimit;
700 const BYTE * const dictEnd = ctxPtr->dictBase + ctxPtr->dictLimit;
701
702 if ((sourceEnd > dictBegin)
703 && ((const BYTE *)source < dictEnd)) {
704 if (sourceEnd > dictEnd)
705 sourceEnd = dictEnd;
706 ctxPtr->lowLimit = (U32)(sourceEnd - ctxPtr->dictBase);
707
708 if (ctxPtr->dictLimit - ctxPtr->lowLimit < 4)
709 ctxPtr->lowLimit = ctxPtr->dictLimit;
710 }
711 }
712
713 return LZ4HC_compress_generic(ctxPtr, source, dest,
714 inputSize, maxOutputSize, ctxPtr->compressionLevel, limit);
715}
716
717int LZ4_compress_HC_continue(
718 LZ4_streamHC_t *LZ4_streamHCPtr,
719 const char *source,
720 char *dest,
721 int inputSize,
722 int maxOutputSize)
723{
724 if (maxOutputSize < LZ4_compressBound(inputSize))
725 return LZ4_compressHC_continue_generic(LZ4_streamHCPtr,
726 source, dest, inputSize, maxOutputSize, limitedOutput);
727 else
728 return LZ4_compressHC_continue_generic(LZ4_streamHCPtr,
729 source, dest, inputSize, maxOutputSize, noLimit);
730}
731EXPORT_SYMBOL(LZ4_compress_HC_continue);
732
733/* dictionary saving */
734
735int LZ4_saveDictHC(
736 LZ4_streamHC_t *LZ4_streamHCPtr,
737 char *safeBuffer,
738 int dictSize)
739{
740 LZ4HC_CCtx_internal *const streamPtr = &LZ4_streamHCPtr->internal_donotuse;
741 int const prefixSize = (int)(streamPtr->end
742 - (streamPtr->base + streamPtr->dictLimit));
743
744 if (dictSize > 64 * KB)
745 dictSize = 64 * KB;
746 if (dictSize < 4)
747 dictSize = 0;
748 if (dictSize > prefixSize)
749 dictSize = prefixSize;
750
751 memmove(safeBuffer, streamPtr->end - dictSize, dictSize);
752
753 {
754 U32 const endIndex = (U32)(streamPtr->end - streamPtr->base);
755
756 streamPtr->end = (const BYTE *)safeBuffer + dictSize;
757 streamPtr->base = streamPtr->end - endIndex;
758 streamPtr->dictLimit = endIndex - dictSize;
759 streamPtr->lowLimit = endIndex - dictSize;
760
761 if (streamPtr->nextToUpdate < streamPtr->dictLimit)
762 streamPtr->nextToUpdate = streamPtr->dictLimit;
763 }
764 return dictSize;
535} 765}
536EXPORT_SYMBOL(lz4hc_compress); 766EXPORT_SYMBOL(LZ4_saveDictHC);
537 767
538MODULE_LICENSE("Dual BSD/GPL"); 768MODULE_LICENSE("Dual BSD/GPL");
539MODULE_DESCRIPTION("LZ4HC compressor"); 769MODULE_DESCRIPTION("LZ4 HC compressor");
diff --git a/lib/nmi_backtrace.c b/lib/nmi_backtrace.c
index 75554754eadf..4e8a30d1c22f 100644
--- a/lib/nmi_backtrace.c
+++ b/lib/nmi_backtrace.c
@@ -17,6 +17,7 @@
17#include <linux/kprobes.h> 17#include <linux/kprobes.h>
18#include <linux/nmi.h> 18#include <linux/nmi.h>
19#include <linux/cpu.h> 19#include <linux/cpu.h>
20#include <linux/sched/debug.h>
20 21
21#ifdef arch_trigger_cpumask_backtrace 22#ifdef arch_trigger_cpumask_backtrace
22/* For reliability, we're prepared to waste bits here. */ 23/* For reliability, we're prepared to waste bits here. */
@@ -77,7 +78,7 @@ void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
77 * Force flush any remote buffers that might be stuck in IRQ context 78 * Force flush any remote buffers that might be stuck in IRQ context
78 * and therefore could not run their irq_work. 79 * and therefore could not run their irq_work.
79 */ 80 */
80 printk_nmi_flush(); 81 printk_safe_flush();
81 82
82 clear_bit_unlock(0, &backtrace_flag); 83 clear_bit_unlock(0, &backtrace_flag);
83 put_cpu(); 84 put_cpu();
diff --git a/lib/parman.c b/lib/parman.c
new file mode 100644
index 000000000000..c6e42a8db824
--- /dev/null
+++ b/lib/parman.c
@@ -0,0 +1,376 @@
1/*
2 * lib/parman.c - Manager for linear priority array areas
3 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the names of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2 as published by the Free
20 * Software Foundation.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/slab.h>
38#include <linux/export.h>
39#include <linux/list.h>
40#include <linux/err.h>
41#include <linux/parman.h>
42
43struct parman_algo {
44 int (*item_add)(struct parman *parman, struct parman_prio *prio,
45 struct parman_item *item);
46 void (*item_remove)(struct parman *parman, struct parman_prio *prio,
47 struct parman_item *item);
48};
49
50struct parman {
51 const struct parman_ops *ops;
52 void *priv;
53 const struct parman_algo *algo;
54 unsigned long count;
55 unsigned long limit_count;
56 struct list_head prio_list;
57};
58
59static int parman_enlarge(struct parman *parman)
60{
61 unsigned long new_count = parman->limit_count +
62 parman->ops->resize_step;
63 int err;
64
65 err = parman->ops->resize(parman->priv, new_count);
66 if (err)
67 return err;
68 parman->limit_count = new_count;
69 return 0;
70}
71
72static int parman_shrink(struct parman *parman)
73{
74 unsigned long new_count = parman->limit_count -
75 parman->ops->resize_step;
76 int err;
77
78 if (new_count < parman->ops->base_count)
79 return 0;
80 err = parman->ops->resize(parman->priv, new_count);
81 if (err)
82 return err;
83 parman->limit_count = new_count;
84 return 0;
85}
86
87static bool parman_prio_used(struct parman_prio *prio)
88
89{
90 return !list_empty(&prio->item_list);
91}
92
93static struct parman_item *parman_prio_first_item(struct parman_prio *prio)
94{
95 return list_first_entry(&prio->item_list,
96 typeof(struct parman_item), list);
97}
98
99static unsigned long parman_prio_first_index(struct parman_prio *prio)
100{
101 return parman_prio_first_item(prio)->index;
102}
103
104static struct parman_item *parman_prio_last_item(struct parman_prio *prio)
105{
106 return list_last_entry(&prio->item_list,
107 typeof(struct parman_item), list);
108}
109
110static unsigned long parman_prio_last_index(struct parman_prio *prio)
111{
112 return parman_prio_last_item(prio)->index;
113}
114
115static unsigned long parman_lsort_new_index_find(struct parman *parman,
116 struct parman_prio *prio)
117{
118 list_for_each_entry_from_reverse(prio, &parman->prio_list, list) {
119 if (!parman_prio_used(prio))
120 continue;
121 return parman_prio_last_index(prio) + 1;
122 }
123 return 0;
124}
125
126static void __parman_prio_move(struct parman *parman, struct parman_prio *prio,
127 struct parman_item *item, unsigned long to_index,
128 unsigned long count)
129{
130 parman->ops->move(parman->priv, item->index, to_index, count);
131}
132
133static void parman_prio_shift_down(struct parman *parman,
134 struct parman_prio *prio)
135{
136 struct parman_item *item;
137 unsigned long to_index;
138
139 if (!parman_prio_used(prio))
140 return;
141 item = parman_prio_first_item(prio);
142 to_index = parman_prio_last_index(prio) + 1;
143 __parman_prio_move(parman, prio, item, to_index, 1);
144 list_move_tail(&item->list, &prio->item_list);
145 item->index = to_index;
146}
147
148static void parman_prio_shift_up(struct parman *parman,
149 struct parman_prio *prio)
150{
151 struct parman_item *item;
152 unsigned long to_index;
153
154 if (!parman_prio_used(prio))
155 return;
156 item = parman_prio_last_item(prio);
157 to_index = parman_prio_first_index(prio) - 1;
158 __parman_prio_move(parman, prio, item, to_index, 1);
159 list_move(&item->list, &prio->item_list);
160 item->index = to_index;
161}
162
163static void parman_prio_item_remove(struct parman *parman,
164 struct parman_prio *prio,
165 struct parman_item *item)
166{
167 struct parman_item *last_item;
168 unsigned long to_index;
169
170 last_item = parman_prio_last_item(prio);
171 if (last_item == item) {
172 list_del(&item->list);
173 return;
174 }
175 to_index = item->index;
176 __parman_prio_move(parman, prio, last_item, to_index, 1);
177 list_del(&last_item->list);
178 list_replace(&item->list, &last_item->list);
179 last_item->index = to_index;
180}
181
182static int parman_lsort_item_add(struct parman *parman,
183 struct parman_prio *prio,
184 struct parman_item *item)
185{
186 struct parman_prio *prio2;
187 unsigned long new_index;
188 int err;
189
190 if (parman->count + 1 > parman->limit_count) {
191 err = parman_enlarge(parman);
192 if (err)
193 return err;
194 }
195
196 new_index = parman_lsort_new_index_find(parman, prio);
197 list_for_each_entry_reverse(prio2, &parman->prio_list, list) {
198 if (prio2 == prio)
199 break;
200 parman_prio_shift_down(parman, prio2);
201 }
202 item->index = new_index;
203 list_add_tail(&item->list, &prio->item_list);
204 parman->count++;
205 return 0;
206}
207
208static void parman_lsort_item_remove(struct parman *parman,
209 struct parman_prio *prio,
210 struct parman_item *item)
211{
212 parman_prio_item_remove(parman, prio, item);
213 list_for_each_entry_continue(prio, &parman->prio_list, list)
214 parman_prio_shift_up(parman, prio);
215 parman->count--;
216 if (parman->limit_count - parman->count >= parman->ops->resize_step)
217 parman_shrink(parman);
218}
219
220static const struct parman_algo parman_lsort = {
221 .item_add = parman_lsort_item_add,
222 .item_remove = parman_lsort_item_remove,
223};
224
225static const struct parman_algo *parman_algos[] = {
226 &parman_lsort,
227};
228
229/**
230 * parman_create - creates a new parman instance
231 * @ops: caller-specific callbacks
232 * @priv: pointer to a private data passed to the ops
233 *
234 * Note: all locking must be provided by the caller.
235 *
236 * Each parman instance manages an array area with chunks of entries
237 * with the same priority. Consider following example:
238 *
239 * item 1 with prio 10
240 * item 2 with prio 10
241 * item 3 with prio 10
242 * item 4 with prio 20
243 * item 5 with prio 20
244 * item 6 with prio 30
245 * item 7 with prio 30
246 * item 8 with prio 30
247 *
248 * In this example, there are 3 priority chunks. The order of the priorities
249 * matters, however the order of items within a single priority chunk does not
250 * matter. So the same array could be ordered as follows:
251 *
252 * item 2 with prio 10
253 * item 3 with prio 10
254 * item 1 with prio 10
255 * item 5 with prio 20
256 * item 4 with prio 20
257 * item 7 with prio 30
258 * item 8 with prio 30
259 * item 6 with prio 30
260 *
261 * The goal of parman is to maintain the priority ordering. The caller
262 * provides @ops with callbacks parman uses to move the items
263 * and resize the array area.
264 *
265 * Returns a pointer to newly created parman instance in case of success,
266 * otherwise it returns NULL.
267 */
268struct parman *parman_create(const struct parman_ops *ops, void *priv)
269{
270 struct parman *parman;
271
272 parman = kzalloc(sizeof(*parman), GFP_KERNEL);
273 if (!parman)
274 return NULL;
275 INIT_LIST_HEAD(&parman->prio_list);
276 parman->ops = ops;
277 parman->priv = priv;
278 parman->limit_count = ops->base_count;
279 parman->algo = parman_algos[ops->algo];
280 return parman;
281}
282EXPORT_SYMBOL(parman_create);
283
284/**
285 * parman_destroy - destroys existing parman instance
286 * @parman: parman instance
287 *
288 * Note: all locking must be provided by the caller.
289 */
290void parman_destroy(struct parman *parman)
291{
292 WARN_ON(!list_empty(&parman->prio_list));
293 kfree(parman);
294}
295EXPORT_SYMBOL(parman_destroy);
296
297/**
298 * parman_prio_init - initializes a parman priority chunk
299 * @parman: parman instance
300 * @prio: parman prio structure to be initialized
301 * @prority: desired priority of the chunk
302 *
303 * Note: all locking must be provided by the caller.
304 *
305 * Before caller could add an item with certain priority, he has to
306 * initialize a priority chunk for it using this function.
307 */
308void parman_prio_init(struct parman *parman, struct parman_prio *prio,
309 unsigned long priority)
310{
311 struct parman_prio *prio2;
312 struct list_head *pos;
313
314 INIT_LIST_HEAD(&prio->item_list);
315 prio->priority = priority;
316
317 /* Position inside the list according to priority */
318 list_for_each(pos, &parman->prio_list) {
319 prio2 = list_entry(pos, typeof(*prio2), list);
320 if (prio2->priority > prio->priority)
321 break;
322 }
323 list_add_tail(&prio->list, pos);
324}
325EXPORT_SYMBOL(parman_prio_init);
326
327/**
328 * parman_prio_fini - finalizes use of parman priority chunk
329 * @prio: parman prio structure
330 *
331 * Note: all locking must be provided by the caller.
332 */
333void parman_prio_fini(struct parman_prio *prio)
334{
335 WARN_ON(parman_prio_used(prio));
336 list_del(&prio->list);
337}
338EXPORT_SYMBOL(parman_prio_fini);
339
340/**
341 * parman_item_add - adds a parman item under defined priority
342 * @parman: parman instance
343 * @prio: parman prio instance to add the item to
344 * @item: parman item instance
345 *
346 * Note: all locking must be provided by the caller.
347 *
348 * Adds item to a array managed by parman instance under the specified priority.
349 *
350 * Returns 0 in case of success, negative number to indicate an error.
351 */
352int parman_item_add(struct parman *parman, struct parman_prio *prio,
353 struct parman_item *item)
354{
355 return parman->algo->item_add(parman, prio, item);
356}
357EXPORT_SYMBOL(parman_item_add);
358
359/**
360 * parman_item_del - deletes parman item
361 * @parman: parman instance
362 * @prio: parman prio instance to delete the item from
363 * @item: parman item instance
364 *
365 * Note: all locking must be provided by the caller.
366 */
367void parman_item_remove(struct parman *parman, struct parman_prio *prio,
368 struct parman_item *item)
369{
370 parman->algo->item_remove(parman, prio, item);
371}
372EXPORT_SYMBOL(parman_item_remove);
373
374MODULE_LICENSE("Dual BSD/GPL");
375MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
376MODULE_DESCRIPTION("Priority-based array manager");
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index c8cebb137076..9c21000df0b5 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -176,13 +176,12 @@ static int percpu_counter_cpu_dead(unsigned int cpu)
176 spin_lock_irq(&percpu_counters_lock); 176 spin_lock_irq(&percpu_counters_lock);
177 list_for_each_entry(fbc, &percpu_counters, list) { 177 list_for_each_entry(fbc, &percpu_counters, list) {
178 s32 *pcount; 178 s32 *pcount;
179 unsigned long flags;
180 179
181 raw_spin_lock_irqsave(&fbc->lock, flags); 180 raw_spin_lock(&fbc->lock);
182 pcount = per_cpu_ptr(fbc->counters, cpu); 181 pcount = per_cpu_ptr(fbc->counters, cpu);
183 fbc->count += *pcount; 182 fbc->count += *pcount;
184 *pcount = 0; 183 *pcount = 0;
185 raw_spin_unlock_irqrestore(&fbc->lock, flags); 184 raw_spin_unlock(&fbc->lock);
186 } 185 }
187 spin_unlock_irq(&percpu_counters_lock); 186 spin_unlock_irq(&percpu_counters_lock);
188#endif 187#endif
diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c
index 6d40944960de..6016f1deb1f5 100644
--- a/lib/percpu_ida.c
+++ b/lib/percpu_ida.c
@@ -14,6 +14,7 @@
14 * General Public License for more details. 14 * General Public License for more details.
15 */ 15 */
16 16
17#include <linux/mm.h>
17#include <linux/bitmap.h> 18#include <linux/bitmap.h>
18#include <linux/bitops.h> 19#include <linux/bitops.h>
19#include <linux/bug.h> 20#include <linux/bug.h>
@@ -22,7 +23,7 @@
22#include <linux/init.h> 23#include <linux/init.h>
23#include <linux/kernel.h> 24#include <linux/kernel.h>
24#include <linux/percpu.h> 25#include <linux/percpu.h>
25#include <linux/sched.h> 26#include <linux/sched/signal.h>
26#include <linux/string.h> 27#include <linux/string.h>
27#include <linux/spinlock.h> 28#include <linux/spinlock.h>
28#include <linux/percpu_ida.h> 29#include <linux/percpu_ida.h>
diff --git a/lib/plist.c b/lib/plist.c
index 3a30c53db061..199408f91057 100644
--- a/lib/plist.c
+++ b/lib/plist.c
@@ -175,6 +175,7 @@ void plist_requeue(struct plist_node *node, struct plist_head *head)
175 175
176#ifdef CONFIG_DEBUG_PI_LIST 176#ifdef CONFIG_DEBUG_PI_LIST
177#include <linux/sched.h> 177#include <linux/sched.h>
178#include <linux/sched/clock.h>
178#include <linux/module.h> 179#include <linux/module.h>
179#include <linux/init.h> 180#include <linux/init.h>
180 181
diff --git a/lib/prime_numbers.c b/lib/prime_numbers.c
new file mode 100644
index 000000000000..550eec457c2e
--- /dev/null
+++ b/lib/prime_numbers.c
@@ -0,0 +1,315 @@
1#define pr_fmt(fmt) "prime numbers: " fmt "\n"
2
3#include <linux/module.h>
4#include <linux/mutex.h>
5#include <linux/prime_numbers.h>
6#include <linux/slab.h>
7
8#define bitmap_size(nbits) (BITS_TO_LONGS(nbits) * sizeof(unsigned long))
9
10struct primes {
11 struct rcu_head rcu;
12 unsigned long last, sz;
13 unsigned long primes[];
14};
15
16#if BITS_PER_LONG == 64
17static const struct primes small_primes = {
18 .last = 61,
19 .sz = 64,
20 .primes = {
21 BIT(2) |
22 BIT(3) |
23 BIT(5) |
24 BIT(7) |
25 BIT(11) |
26 BIT(13) |
27 BIT(17) |
28 BIT(19) |
29 BIT(23) |
30 BIT(29) |
31 BIT(31) |
32 BIT(37) |
33 BIT(41) |
34 BIT(43) |
35 BIT(47) |
36 BIT(53) |
37 BIT(59) |
38 BIT(61)
39 }
40};
41#elif BITS_PER_LONG == 32
42static const struct primes small_primes = {
43 .last = 31,
44 .sz = 32,
45 .primes = {
46 BIT(2) |
47 BIT(3) |
48 BIT(5) |
49 BIT(7) |
50 BIT(11) |
51 BIT(13) |
52 BIT(17) |
53 BIT(19) |
54 BIT(23) |
55 BIT(29) |
56 BIT(31)
57 }
58};
59#else
60#error "unhandled BITS_PER_LONG"
61#endif
62
63static DEFINE_MUTEX(lock);
64static const struct primes __rcu *primes = RCU_INITIALIZER(&small_primes);
65
66static unsigned long selftest_max;
67
68static bool slow_is_prime_number(unsigned long x)
69{
70 unsigned long y = int_sqrt(x);
71
72 while (y > 1) {
73 if ((x % y) == 0)
74 break;
75 y--;
76 }
77
78 return y == 1;
79}
80
81static unsigned long slow_next_prime_number(unsigned long x)
82{
83 while (x < ULONG_MAX && !slow_is_prime_number(++x))
84 ;
85
86 return x;
87}
88
89static unsigned long clear_multiples(unsigned long x,
90 unsigned long *p,
91 unsigned long start,
92 unsigned long end)
93{
94 unsigned long m;
95
96 m = 2 * x;
97 if (m < start)
98 m = roundup(start, x);
99
100 while (m < end) {
101 __clear_bit(m, p);
102 m += x;
103 }
104
105 return x;
106}
107
108static bool expand_to_next_prime(unsigned long x)
109{
110 const struct primes *p;
111 struct primes *new;
112 unsigned long sz, y;
113
114 /* Betrand's Postulate (or Chebyshev's theorem) states that if n > 3,
115 * there is always at least one prime p between n and 2n - 2.
116 * Equivalently, if n > 1, then there is always at least one prime p
117 * such that n < p < 2n.
118 *
119 * http://mathworld.wolfram.com/BertrandsPostulate.html
120 * https://en.wikipedia.org/wiki/Bertrand's_postulate
121 */
122 sz = 2 * x;
123 if (sz < x)
124 return false;
125
126 sz = round_up(sz, BITS_PER_LONG);
127 new = kmalloc(sizeof(*new) + bitmap_size(sz),
128 GFP_KERNEL | __GFP_NOWARN);
129 if (!new)
130 return false;
131
132 mutex_lock(&lock);
133 p = rcu_dereference_protected(primes, lockdep_is_held(&lock));
134 if (x < p->last) {
135 kfree(new);
136 goto unlock;
137 }
138
139 /* Where memory permits, track the primes using the
140 * Sieve of Eratosthenes. The sieve is to remove all multiples of known
141 * primes from the set, what remains in the set is therefore prime.
142 */
143 bitmap_fill(new->primes, sz);
144 bitmap_copy(new->primes, p->primes, p->sz);
145 for (y = 2UL; y < sz; y = find_next_bit(new->primes, sz, y + 1))
146 new->last = clear_multiples(y, new->primes, p->sz, sz);
147 new->sz = sz;
148
149 BUG_ON(new->last <= x);
150
151 rcu_assign_pointer(primes, new);
152 if (p != &small_primes)
153 kfree_rcu((struct primes *)p, rcu);
154
155unlock:
156 mutex_unlock(&lock);
157 return true;
158}
159
160static void free_primes(void)
161{
162 const struct primes *p;
163
164 mutex_lock(&lock);
165 p = rcu_dereference_protected(primes, lockdep_is_held(&lock));
166 if (p != &small_primes) {
167 rcu_assign_pointer(primes, &small_primes);
168 kfree_rcu((struct primes *)p, rcu);
169 }
170 mutex_unlock(&lock);
171}
172
173/**
174 * next_prime_number - return the next prime number
175 * @x: the starting point for searching to test
176 *
177 * A prime number is an integer greater than 1 that is only divisible by
178 * itself and 1. The set of prime numbers is computed using the Sieve of
179 * Eratoshenes (on finding a prime, all multiples of that prime are removed
180 * from the set) enabling a fast lookup of the next prime number larger than
181 * @x. If the sieve fails (memory limitation), the search falls back to using
182 * slow trial-divison, up to the value of ULONG_MAX (which is reported as the
183 * final prime as a sentinel).
184 *
185 * Returns: the next prime number larger than @x
186 */
187unsigned long next_prime_number(unsigned long x)
188{
189 const struct primes *p;
190
191 rcu_read_lock();
192 p = rcu_dereference(primes);
193 while (x >= p->last) {
194 rcu_read_unlock();
195
196 if (!expand_to_next_prime(x))
197 return slow_next_prime_number(x);
198
199 rcu_read_lock();
200 p = rcu_dereference(primes);
201 }
202 x = find_next_bit(p->primes, p->last, x + 1);
203 rcu_read_unlock();
204
205 return x;
206}
207EXPORT_SYMBOL(next_prime_number);
208
209/**
210 * is_prime_number - test whether the given number is prime
211 * @x: the number to test
212 *
213 * A prime number is an integer greater than 1 that is only divisible by
214 * itself and 1. Internally a cache of prime numbers is kept (to speed up
215 * searching for sequential primes, see next_prime_number()), but if the number
216 * falls outside of that cache, its primality is tested using trial-divison.
217 *
218 * Returns: true if @x is prime, false for composite numbers.
219 */
220bool is_prime_number(unsigned long x)
221{
222 const struct primes *p;
223 bool result;
224
225 rcu_read_lock();
226 p = rcu_dereference(primes);
227 while (x >= p->sz) {
228 rcu_read_unlock();
229
230 if (!expand_to_next_prime(x))
231 return slow_is_prime_number(x);
232
233 rcu_read_lock();
234 p = rcu_dereference(primes);
235 }
236 result = test_bit(x, p->primes);
237 rcu_read_unlock();
238
239 return result;
240}
241EXPORT_SYMBOL(is_prime_number);
242
243static void dump_primes(void)
244{
245 const struct primes *p;
246 char *buf;
247
248 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
249
250 rcu_read_lock();
251 p = rcu_dereference(primes);
252
253 if (buf)
254 bitmap_print_to_pagebuf(true, buf, p->primes, p->sz);
255 pr_info("primes.{last=%lu, .sz=%lu, .primes[]=...x%lx} = %s",
256 p->last, p->sz, p->primes[BITS_TO_LONGS(p->sz) - 1], buf);
257
258 rcu_read_unlock();
259
260 kfree(buf);
261}
262
263static int selftest(unsigned long max)
264{
265 unsigned long x, last;
266
267 if (!max)
268 return 0;
269
270 for (last = 0, x = 2; x < max; x++) {
271 bool slow = slow_is_prime_number(x);
272 bool fast = is_prime_number(x);
273
274 if (slow != fast) {
275 pr_err("inconsistent result for is-prime(%lu): slow=%s, fast=%s!",
276 x, slow ? "yes" : "no", fast ? "yes" : "no");
277 goto err;
278 }
279
280 if (!slow)
281 continue;
282
283 if (next_prime_number(last) != x) {
284 pr_err("incorrect result for next-prime(%lu): expected %lu, got %lu",
285 last, x, next_prime_number(last));
286 goto err;
287 }
288 last = x;
289 }
290
291 pr_info("selftest(%lu) passed, last prime was %lu", x, last);
292 return 0;
293
294err:
295 dump_primes();
296 return -EINVAL;
297}
298
299static int __init primes_init(void)
300{
301 return selftest(selftest_max);
302}
303
304static void __exit primes_exit(void)
305{
306 free_primes();
307}
308
309module_init(primes_init);
310module_exit(primes_exit);
311
312module_param_named(selftest, selftest_max, ulong, 0400);
313
314MODULE_AUTHOR("Intel Corporation");
315MODULE_LICENSE("GPL");
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 84812a9fb16f..691a9ad48497 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -22,20 +22,21 @@
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */ 23 */
24 24
25#include <linux/bitmap.h>
26#include <linux/bitops.h>
25#include <linux/cpu.h> 27#include <linux/cpu.h>
26#include <linux/errno.h> 28#include <linux/errno.h>
29#include <linux/export.h>
30#include <linux/idr.h>
27#include <linux/init.h> 31#include <linux/init.h>
28#include <linux/kernel.h> 32#include <linux/kernel.h>
29#include <linux/export.h> 33#include <linux/kmemleak.h>
30#include <linux/radix-tree.h>
31#include <linux/percpu.h> 34#include <linux/percpu.h>
35#include <linux/preempt.h> /* in_interrupt() */
36#include <linux/radix-tree.h>
37#include <linux/rcupdate.h>
32#include <linux/slab.h> 38#include <linux/slab.h>
33#include <linux/kmemleak.h>
34#include <linux/cpu.h>
35#include <linux/string.h> 39#include <linux/string.h>
36#include <linux/bitops.h>
37#include <linux/rcupdate.h>
38#include <linux/preempt.h> /* in_interrupt() */
39 40
40 41
41/* Number of nodes in fully populated tree of given height */ 42/* Number of nodes in fully populated tree of given height */
@@ -60,11 +61,28 @@ static struct kmem_cache *radix_tree_node_cachep;
60#define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1) 61#define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1)
61 62
62/* 63/*
64 * The IDR does not have to be as high as the radix tree since it uses
65 * signed integers, not unsigned longs.
66 */
67#define IDR_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(int) - 1)
68#define IDR_MAX_PATH (DIV_ROUND_UP(IDR_INDEX_BITS, \
69 RADIX_TREE_MAP_SHIFT))
70#define IDR_PRELOAD_SIZE (IDR_MAX_PATH * 2 - 1)
71
72/*
73 * The IDA is even shorter since it uses a bitmap at the last level.
74 */
75#define IDA_INDEX_BITS (8 * sizeof(int) - 1 - ilog2(IDA_BITMAP_BITS))
76#define IDA_MAX_PATH (DIV_ROUND_UP(IDA_INDEX_BITS, \
77 RADIX_TREE_MAP_SHIFT))
78#define IDA_PRELOAD_SIZE (IDA_MAX_PATH * 2 - 1)
79
80/*
63 * Per-cpu pool of preloaded nodes 81 * Per-cpu pool of preloaded nodes
64 */ 82 */
65struct radix_tree_preload { 83struct radix_tree_preload {
66 unsigned nr; 84 unsigned nr;
67 /* nodes->private_data points to next preallocated node */ 85 /* nodes->parent points to next preallocated node */
68 struct radix_tree_node *nodes; 86 struct radix_tree_node *nodes;
69}; 87};
70static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; 88static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
@@ -83,35 +101,38 @@ static inline void *node_to_entry(void *ptr)
83 101
84#ifdef CONFIG_RADIX_TREE_MULTIORDER 102#ifdef CONFIG_RADIX_TREE_MULTIORDER
85/* Sibling slots point directly to another slot in the same node */ 103/* Sibling slots point directly to another slot in the same node */
86static inline bool is_sibling_entry(struct radix_tree_node *parent, void *node) 104static inline
105bool is_sibling_entry(const struct radix_tree_node *parent, void *node)
87{ 106{
88 void **ptr = node; 107 void __rcu **ptr = node;
89 return (parent->slots <= ptr) && 108 return (parent->slots <= ptr) &&
90 (ptr < parent->slots + RADIX_TREE_MAP_SIZE); 109 (ptr < parent->slots + RADIX_TREE_MAP_SIZE);
91} 110}
92#else 111#else
93static inline bool is_sibling_entry(struct radix_tree_node *parent, void *node) 112static inline
113bool is_sibling_entry(const struct radix_tree_node *parent, void *node)
94{ 114{
95 return false; 115 return false;
96} 116}
97#endif 117#endif
98 118
99static inline unsigned long get_slot_offset(struct radix_tree_node *parent, 119static inline unsigned long
100 void **slot) 120get_slot_offset(const struct radix_tree_node *parent, void __rcu **slot)
101{ 121{
102 return slot - parent->slots; 122 return slot - parent->slots;
103} 123}
104 124
105static unsigned int radix_tree_descend(struct radix_tree_node *parent, 125static unsigned int radix_tree_descend(const struct radix_tree_node *parent,
106 struct radix_tree_node **nodep, unsigned long index) 126 struct radix_tree_node **nodep, unsigned long index)
107{ 127{
108 unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK; 128 unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK;
109 void **entry = rcu_dereference_raw(parent->slots[offset]); 129 void __rcu **entry = rcu_dereference_raw(parent->slots[offset]);
110 130
111#ifdef CONFIG_RADIX_TREE_MULTIORDER 131#ifdef CONFIG_RADIX_TREE_MULTIORDER
112 if (radix_tree_is_internal_node(entry)) { 132 if (radix_tree_is_internal_node(entry)) {
113 if (is_sibling_entry(parent, entry)) { 133 if (is_sibling_entry(parent, entry)) {
114 void **sibentry = (void **) entry_to_node(entry); 134 void __rcu **sibentry;
135 sibentry = (void __rcu **) entry_to_node(entry);
115 offset = get_slot_offset(parent, sibentry); 136 offset = get_slot_offset(parent, sibentry);
116 entry = rcu_dereference_raw(*sibentry); 137 entry = rcu_dereference_raw(*sibentry);
117 } 138 }
@@ -122,7 +143,7 @@ static unsigned int radix_tree_descend(struct radix_tree_node *parent,
122 return offset; 143 return offset;
123} 144}
124 145
125static inline gfp_t root_gfp_mask(struct radix_tree_root *root) 146static inline gfp_t root_gfp_mask(const struct radix_tree_root *root)
126{ 147{
127 return root->gfp_mask & __GFP_BITS_MASK; 148 return root->gfp_mask & __GFP_BITS_MASK;
128} 149}
@@ -139,42 +160,48 @@ static inline void tag_clear(struct radix_tree_node *node, unsigned int tag,
139 __clear_bit(offset, node->tags[tag]); 160 __clear_bit(offset, node->tags[tag]);
140} 161}
141 162
142static inline int tag_get(struct radix_tree_node *node, unsigned int tag, 163static inline int tag_get(const struct radix_tree_node *node, unsigned int tag,
143 int offset) 164 int offset)
144{ 165{
145 return test_bit(offset, node->tags[tag]); 166 return test_bit(offset, node->tags[tag]);
146} 167}
147 168
148static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag) 169static inline void root_tag_set(struct radix_tree_root *root, unsigned tag)
149{ 170{
150 root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT)); 171 root->gfp_mask |= (__force gfp_t)(1 << (tag + ROOT_TAG_SHIFT));
151} 172}
152 173
153static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag) 174static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag)
154{ 175{
155 root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT)); 176 root->gfp_mask &= (__force gfp_t)~(1 << (tag + ROOT_TAG_SHIFT));
156} 177}
157 178
158static inline void root_tag_clear_all(struct radix_tree_root *root) 179static inline void root_tag_clear_all(struct radix_tree_root *root)
159{ 180{
160 root->gfp_mask &= __GFP_BITS_MASK; 181 root->gfp_mask &= (1 << ROOT_TAG_SHIFT) - 1;
182}
183
184static inline int root_tag_get(const struct radix_tree_root *root, unsigned tag)
185{
186 return (__force int)root->gfp_mask & (1 << (tag + ROOT_TAG_SHIFT));
161} 187}
162 188
163static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag) 189static inline unsigned root_tags_get(const struct radix_tree_root *root)
164{ 190{
165 return (__force int)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT)); 191 return (__force unsigned)root->gfp_mask >> ROOT_TAG_SHIFT;
166} 192}
167 193
168static inline unsigned root_tags_get(struct radix_tree_root *root) 194static inline bool is_idr(const struct radix_tree_root *root)
169{ 195{
170 return (__force unsigned)root->gfp_mask >> __GFP_BITS_SHIFT; 196 return !!(root->gfp_mask & ROOT_IS_IDR);
171} 197}
172 198
173/* 199/*
174 * Returns 1 if any slot in the node has this tag set. 200 * Returns 1 if any slot in the node has this tag set.
175 * Otherwise returns 0. 201 * Otherwise returns 0.
176 */ 202 */
177static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag) 203static inline int any_tag_set(const struct radix_tree_node *node,
204 unsigned int tag)
178{ 205{
179 unsigned idx; 206 unsigned idx;
180 for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { 207 for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
@@ -184,6 +211,11 @@ static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag)
184 return 0; 211 return 0;
185} 212}
186 213
214static inline void all_tag_set(struct radix_tree_node *node, unsigned int tag)
215{
216 bitmap_fill(node->tags[tag], RADIX_TREE_MAP_SIZE);
217}
218
187/** 219/**
188 * radix_tree_find_next_bit - find the next set bit in a memory region 220 * radix_tree_find_next_bit - find the next set bit in a memory region
189 * 221 *
@@ -232,11 +264,18 @@ static inline unsigned long shift_maxindex(unsigned int shift)
232 return (RADIX_TREE_MAP_SIZE << shift) - 1; 264 return (RADIX_TREE_MAP_SIZE << shift) - 1;
233} 265}
234 266
235static inline unsigned long node_maxindex(struct radix_tree_node *node) 267static inline unsigned long node_maxindex(const struct radix_tree_node *node)
236{ 268{
237 return shift_maxindex(node->shift); 269 return shift_maxindex(node->shift);
238} 270}
239 271
272static unsigned long next_index(unsigned long index,
273 const struct radix_tree_node *node,
274 unsigned long offset)
275{
276 return (index & ~node_maxindex(node)) + (offset << node->shift);
277}
278
240#ifndef __KERNEL__ 279#ifndef __KERNEL__
241static void dump_node(struct radix_tree_node *node, unsigned long index) 280static void dump_node(struct radix_tree_node *node, unsigned long index)
242{ 281{
@@ -275,11 +314,59 @@ static void radix_tree_dump(struct radix_tree_root *root)
275{ 314{
276 pr_debug("radix root: %p rnode %p tags %x\n", 315 pr_debug("radix root: %p rnode %p tags %x\n",
277 root, root->rnode, 316 root, root->rnode,
278 root->gfp_mask >> __GFP_BITS_SHIFT); 317 root->gfp_mask >> ROOT_TAG_SHIFT);
279 if (!radix_tree_is_internal_node(root->rnode)) 318 if (!radix_tree_is_internal_node(root->rnode))
280 return; 319 return;
281 dump_node(entry_to_node(root->rnode), 0); 320 dump_node(entry_to_node(root->rnode), 0);
282} 321}
322
323static void dump_ida_node(void *entry, unsigned long index)
324{
325 unsigned long i;
326
327 if (!entry)
328 return;
329
330 if (radix_tree_is_internal_node(entry)) {
331 struct radix_tree_node *node = entry_to_node(entry);
332
333 pr_debug("ida node: %p offset %d indices %lu-%lu parent %p free %lx shift %d count %d\n",
334 node, node->offset, index * IDA_BITMAP_BITS,
335 ((index | node_maxindex(node)) + 1) *
336 IDA_BITMAP_BITS - 1,
337 node->parent, node->tags[0][0], node->shift,
338 node->count);
339 for (i = 0; i < RADIX_TREE_MAP_SIZE; i++)
340 dump_ida_node(node->slots[i],
341 index | (i << node->shift));
342 } else if (radix_tree_exceptional_entry(entry)) {
343 pr_debug("ida excp: %p offset %d indices %lu-%lu data %lx\n",
344 entry, (int)(index & RADIX_TREE_MAP_MASK),
345 index * IDA_BITMAP_BITS,
346 index * IDA_BITMAP_BITS + BITS_PER_LONG -
347 RADIX_TREE_EXCEPTIONAL_SHIFT,
348 (unsigned long)entry >>
349 RADIX_TREE_EXCEPTIONAL_SHIFT);
350 } else {
351 struct ida_bitmap *bitmap = entry;
352
353 pr_debug("ida btmp: %p offset %d indices %lu-%lu data", bitmap,
354 (int)(index & RADIX_TREE_MAP_MASK),
355 index * IDA_BITMAP_BITS,
356 (index + 1) * IDA_BITMAP_BITS - 1);
357 for (i = 0; i < IDA_BITMAP_LONGS; i++)
358 pr_cont(" %lx", bitmap->bitmap[i]);
359 pr_cont("\n");
360 }
361}
362
363static void ida_dump(struct ida *ida)
364{
365 struct radix_tree_root *root = &ida->ida_rt;
366 pr_debug("ida: %p node %p free %d\n", ida, root->rnode,
367 root->gfp_mask >> ROOT_TAG_SHIFT);
368 dump_ida_node(root->rnode, 0);
369}
283#endif 370#endif
284 371
285/* 372/*
@@ -287,13 +374,12 @@ static void radix_tree_dump(struct radix_tree_root *root)
287 * that the caller has pinned this thread of control to the current CPU. 374 * that the caller has pinned this thread of control to the current CPU.
288 */ 375 */
289static struct radix_tree_node * 376static struct radix_tree_node *
290radix_tree_node_alloc(struct radix_tree_root *root, 377radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent,
291 struct radix_tree_node *parent, 378 struct radix_tree_root *root,
292 unsigned int shift, unsigned int offset, 379 unsigned int shift, unsigned int offset,
293 unsigned int count, unsigned int exceptional) 380 unsigned int count, unsigned int exceptional)
294{ 381{
295 struct radix_tree_node *ret = NULL; 382 struct radix_tree_node *ret = NULL;
296 gfp_t gfp_mask = root_gfp_mask(root);
297 383
298 /* 384 /*
299 * Preload code isn't irq safe and it doesn't make sense to use 385 * Preload code isn't irq safe and it doesn't make sense to use
@@ -321,8 +407,7 @@ radix_tree_node_alloc(struct radix_tree_root *root,
321 rtp = this_cpu_ptr(&radix_tree_preloads); 407 rtp = this_cpu_ptr(&radix_tree_preloads);
322 if (rtp->nr) { 408 if (rtp->nr) {
323 ret = rtp->nodes; 409 ret = rtp->nodes;
324 rtp->nodes = ret->private_data; 410 rtp->nodes = ret->parent;
325 ret->private_data = NULL;
326 rtp->nr--; 411 rtp->nr--;
327 } 412 }
328 /* 413 /*
@@ -336,11 +421,12 @@ radix_tree_node_alloc(struct radix_tree_root *root,
336out: 421out:
337 BUG_ON(radix_tree_is_internal_node(ret)); 422 BUG_ON(radix_tree_is_internal_node(ret));
338 if (ret) { 423 if (ret) {
339 ret->parent = parent;
340 ret->shift = shift; 424 ret->shift = shift;
341 ret->offset = offset; 425 ret->offset = offset;
342 ret->count = count; 426 ret->count = count;
343 ret->exceptional = exceptional; 427 ret->exceptional = exceptional;
428 ret->parent = parent;
429 ret->root = root;
344 } 430 }
345 return ret; 431 return ret;
346} 432}
@@ -399,7 +485,7 @@ static int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
399 preempt_disable(); 485 preempt_disable();
400 rtp = this_cpu_ptr(&radix_tree_preloads); 486 rtp = this_cpu_ptr(&radix_tree_preloads);
401 if (rtp->nr < nr) { 487 if (rtp->nr < nr) {
402 node->private_data = rtp->nodes; 488 node->parent = rtp->nodes;
403 rtp->nodes = node; 489 rtp->nodes = node;
404 rtp->nr++; 490 rtp->nr++;
405 } else { 491 } else {
@@ -510,7 +596,7 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order)
510 return __radix_tree_preload(gfp_mask, nr_nodes); 596 return __radix_tree_preload(gfp_mask, nr_nodes);
511} 597}
512 598
513static unsigned radix_tree_load_root(struct radix_tree_root *root, 599static unsigned radix_tree_load_root(const struct radix_tree_root *root,
514 struct radix_tree_node **nodep, unsigned long *maxindex) 600 struct radix_tree_node **nodep, unsigned long *maxindex)
515{ 601{
516 struct radix_tree_node *node = rcu_dereference_raw(root->rnode); 602 struct radix_tree_node *node = rcu_dereference_raw(root->rnode);
@@ -530,10 +616,10 @@ static unsigned radix_tree_load_root(struct radix_tree_root *root,
530/* 616/*
531 * Extend a radix tree so it can store key @index. 617 * Extend a radix tree so it can store key @index.
532 */ 618 */
533static int radix_tree_extend(struct radix_tree_root *root, 619static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp,
534 unsigned long index, unsigned int shift) 620 unsigned long index, unsigned int shift)
535{ 621{
536 struct radix_tree_node *slot; 622 void *entry;
537 unsigned int maxshift; 623 unsigned int maxshift;
538 int tag; 624 int tag;
539 625
@@ -542,32 +628,44 @@ static int radix_tree_extend(struct radix_tree_root *root,
542 while (index > shift_maxindex(maxshift)) 628 while (index > shift_maxindex(maxshift))
543 maxshift += RADIX_TREE_MAP_SHIFT; 629 maxshift += RADIX_TREE_MAP_SHIFT;
544 630
545 slot = root->rnode; 631 entry = rcu_dereference_raw(root->rnode);
546 if (!slot) 632 if (!entry && (!is_idr(root) || root_tag_get(root, IDR_FREE)))
547 goto out; 633 goto out;
548 634
549 do { 635 do {
550 struct radix_tree_node *node = radix_tree_node_alloc(root, 636 struct radix_tree_node *node = radix_tree_node_alloc(gfp, NULL,
551 NULL, shift, 0, 1, 0); 637 root, shift, 0, 1, 0);
552 if (!node) 638 if (!node)
553 return -ENOMEM; 639 return -ENOMEM;
554 640
555 /* Propagate the aggregated tag info into the new root */ 641 if (is_idr(root)) {
556 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { 642 all_tag_set(node, IDR_FREE);
557 if (root_tag_get(root, tag)) 643 if (!root_tag_get(root, IDR_FREE)) {
558 tag_set(node, tag, 0); 644 tag_clear(node, IDR_FREE, 0);
645 root_tag_set(root, IDR_FREE);
646 }
647 } else {
648 /* Propagate the aggregated tag info to the new child */
649 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
650 if (root_tag_get(root, tag))
651 tag_set(node, tag, 0);
652 }
559 } 653 }
560 654
561 BUG_ON(shift > BITS_PER_LONG); 655 BUG_ON(shift > BITS_PER_LONG);
562 if (radix_tree_is_internal_node(slot)) { 656 if (radix_tree_is_internal_node(entry)) {
563 entry_to_node(slot)->parent = node; 657 entry_to_node(entry)->parent = node;
564 } else if (radix_tree_exceptional_entry(slot)) { 658 } else if (radix_tree_exceptional_entry(entry)) {
565 /* Moving an exceptional root->rnode to a node */ 659 /* Moving an exceptional root->rnode to a node */
566 node->exceptional = 1; 660 node->exceptional = 1;
567 } 661 }
568 node->slots[0] = slot; 662 /*
569 slot = node_to_entry(node); 663 * entry was already in the radix tree, so we do not need
570 rcu_assign_pointer(root->rnode, slot); 664 * rcu_assign_pointer here
665 */
666 node->slots[0] = (void __rcu *)entry;
667 entry = node_to_entry(node);
668 rcu_assign_pointer(root->rnode, entry);
571 shift += RADIX_TREE_MAP_SHIFT; 669 shift += RADIX_TREE_MAP_SHIFT;
572 } while (shift <= maxshift); 670 } while (shift <= maxshift);
573out: 671out:
@@ -578,12 +676,14 @@ out:
578 * radix_tree_shrink - shrink radix tree to minimum height 676 * radix_tree_shrink - shrink radix tree to minimum height
579 * @root radix tree root 677 * @root radix tree root
580 */ 678 */
581static inline void radix_tree_shrink(struct radix_tree_root *root, 679static inline bool radix_tree_shrink(struct radix_tree_root *root,
582 radix_tree_update_node_t update_node, 680 radix_tree_update_node_t update_node,
583 void *private) 681 void *private)
584{ 682{
683 bool shrunk = false;
684
585 for (;;) { 685 for (;;) {
586 struct radix_tree_node *node = root->rnode; 686 struct radix_tree_node *node = rcu_dereference_raw(root->rnode);
587 struct radix_tree_node *child; 687 struct radix_tree_node *child;
588 688
589 if (!radix_tree_is_internal_node(node)) 689 if (!radix_tree_is_internal_node(node))
@@ -597,7 +697,7 @@ static inline void radix_tree_shrink(struct radix_tree_root *root,
597 */ 697 */
598 if (node->count != 1) 698 if (node->count != 1)
599 break; 699 break;
600 child = node->slots[0]; 700 child = rcu_dereference_raw(node->slots[0]);
601 if (!child) 701 if (!child)
602 break; 702 break;
603 if (!radix_tree_is_internal_node(child) && node->shift) 703 if (!radix_tree_is_internal_node(child) && node->shift)
@@ -613,7 +713,9 @@ static inline void radix_tree_shrink(struct radix_tree_root *root,
613 * (node->slots[0]), it will be safe to dereference the new 713 * (node->slots[0]), it will be safe to dereference the new
614 * one (root->rnode) as far as dependent read barriers go. 714 * one (root->rnode) as far as dependent read barriers go.
615 */ 715 */
616 root->rnode = child; 716 root->rnode = (void __rcu *)child;
717 if (is_idr(root) && !tag_get(node, IDR_FREE, 0))
718 root_tag_clear(root, IDR_FREE);
617 719
618 /* 720 /*
619 * We have a dilemma here. The node's slot[0] must not be 721 * We have a dilemma here. The node's slot[0] must not be
@@ -635,27 +737,34 @@ static inline void radix_tree_shrink(struct radix_tree_root *root,
635 */ 737 */
636 node->count = 0; 738 node->count = 0;
637 if (!radix_tree_is_internal_node(child)) { 739 if (!radix_tree_is_internal_node(child)) {
638 node->slots[0] = RADIX_TREE_RETRY; 740 node->slots[0] = (void __rcu *)RADIX_TREE_RETRY;
639 if (update_node) 741 if (update_node)
640 update_node(node, private); 742 update_node(node, private);
641 } 743 }
642 744
643 WARN_ON_ONCE(!list_empty(&node->private_list)); 745 WARN_ON_ONCE(!list_empty(&node->private_list));
644 radix_tree_node_free(node); 746 radix_tree_node_free(node);
747 shrunk = true;
645 } 748 }
749
750 return shrunk;
646} 751}
647 752
648static void delete_node(struct radix_tree_root *root, 753static bool delete_node(struct radix_tree_root *root,
649 struct radix_tree_node *node, 754 struct radix_tree_node *node,
650 radix_tree_update_node_t update_node, void *private) 755 radix_tree_update_node_t update_node, void *private)
651{ 756{
757 bool deleted = false;
758
652 do { 759 do {
653 struct radix_tree_node *parent; 760 struct radix_tree_node *parent;
654 761
655 if (node->count) { 762 if (node->count) {
656 if (node == entry_to_node(root->rnode)) 763 if (node_to_entry(node) ==
657 radix_tree_shrink(root, update_node, private); 764 rcu_dereference_raw(root->rnode))
658 return; 765 deleted |= radix_tree_shrink(root, update_node,
766 private);
767 return deleted;
659 } 768 }
660 769
661 parent = node->parent; 770 parent = node->parent;
@@ -663,15 +772,23 @@ static void delete_node(struct radix_tree_root *root,
663 parent->slots[node->offset] = NULL; 772 parent->slots[node->offset] = NULL;
664 parent->count--; 773 parent->count--;
665 } else { 774 } else {
666 root_tag_clear_all(root); 775 /*
776 * Shouldn't the tags already have all been cleared
777 * by the caller?
778 */
779 if (!is_idr(root))
780 root_tag_clear_all(root);
667 root->rnode = NULL; 781 root->rnode = NULL;
668 } 782 }
669 783
670 WARN_ON_ONCE(!list_empty(&node->private_list)); 784 WARN_ON_ONCE(!list_empty(&node->private_list));
671 radix_tree_node_free(node); 785 radix_tree_node_free(node);
786 deleted = true;
672 787
673 node = parent; 788 node = parent;
674 } while (node); 789 } while (node);
790
791 return deleted;
675} 792}
676 793
677/** 794/**
@@ -693,13 +810,14 @@ static void delete_node(struct radix_tree_root *root,
693 */ 810 */
694int __radix_tree_create(struct radix_tree_root *root, unsigned long index, 811int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
695 unsigned order, struct radix_tree_node **nodep, 812 unsigned order, struct radix_tree_node **nodep,
696 void ***slotp) 813 void __rcu ***slotp)
697{ 814{
698 struct radix_tree_node *node = NULL, *child; 815 struct radix_tree_node *node = NULL, *child;
699 void **slot = (void **)&root->rnode; 816 void __rcu **slot = (void __rcu **)&root->rnode;
700 unsigned long maxindex; 817 unsigned long maxindex;
701 unsigned int shift, offset = 0; 818 unsigned int shift, offset = 0;
702 unsigned long max = index | ((1UL << order) - 1); 819 unsigned long max = index | ((1UL << order) - 1);
820 gfp_t gfp = root_gfp_mask(root);
703 821
704 shift = radix_tree_load_root(root, &child, &maxindex); 822 shift = radix_tree_load_root(root, &child, &maxindex);
705 823
@@ -707,18 +825,18 @@ int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
707 if (order > 0 && max == ((1UL << order) - 1)) 825 if (order > 0 && max == ((1UL << order) - 1))
708 max++; 826 max++;
709 if (max > maxindex) { 827 if (max > maxindex) {
710 int error = radix_tree_extend(root, max, shift); 828 int error = radix_tree_extend(root, gfp, max, shift);
711 if (error < 0) 829 if (error < 0)
712 return error; 830 return error;
713 shift = error; 831 shift = error;
714 child = root->rnode; 832 child = rcu_dereference_raw(root->rnode);
715 } 833 }
716 834
717 while (shift > order) { 835 while (shift > order) {
718 shift -= RADIX_TREE_MAP_SHIFT; 836 shift -= RADIX_TREE_MAP_SHIFT;
719 if (child == NULL) { 837 if (child == NULL) {
720 /* Have to add a child node. */ 838 /* Have to add a child node. */
721 child = radix_tree_node_alloc(root, node, shift, 839 child = radix_tree_node_alloc(gfp, node, root, shift,
722 offset, 0, 0); 840 offset, 0, 0);
723 if (!child) 841 if (!child)
724 return -ENOMEM; 842 return -ENOMEM;
@@ -741,7 +859,6 @@ int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
741 return 0; 859 return 0;
742} 860}
743 861
744#ifdef CONFIG_RADIX_TREE_MULTIORDER
745/* 862/*
746 * Free any nodes below this node. The tree is presumed to not need 863 * Free any nodes below this node. The tree is presumed to not need
747 * shrinking, and any user data in the tree is presumed to not need a 864 * shrinking, and any user data in the tree is presumed to not need a
@@ -757,7 +874,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
757 struct radix_tree_node *child = entry_to_node(node); 874 struct radix_tree_node *child = entry_to_node(node);
758 875
759 for (;;) { 876 for (;;) {
760 void *entry = child->slots[offset]; 877 void *entry = rcu_dereference_raw(child->slots[offset]);
761 if (radix_tree_is_internal_node(entry) && 878 if (radix_tree_is_internal_node(entry) &&
762 !is_sibling_entry(child, entry)) { 879 !is_sibling_entry(child, entry)) {
763 child = entry_to_node(entry); 880 child = entry_to_node(entry);
@@ -777,8 +894,9 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
777 } 894 }
778} 895}
779 896
780static inline int insert_entries(struct radix_tree_node *node, void **slot, 897#ifdef CONFIG_RADIX_TREE_MULTIORDER
781 void *item, unsigned order, bool replace) 898static inline int insert_entries(struct radix_tree_node *node,
899 void __rcu **slot, void *item, unsigned order, bool replace)
782{ 900{
783 struct radix_tree_node *child; 901 struct radix_tree_node *child;
784 unsigned i, n, tag, offset, tags = 0; 902 unsigned i, n, tag, offset, tags = 0;
@@ -813,7 +931,7 @@ static inline int insert_entries(struct radix_tree_node *node, void **slot,
813 } 931 }
814 932
815 for (i = 0; i < n; i++) { 933 for (i = 0; i < n; i++) {
816 struct radix_tree_node *old = slot[i]; 934 struct radix_tree_node *old = rcu_dereference_raw(slot[i]);
817 if (i) { 935 if (i) {
818 rcu_assign_pointer(slot[i], child); 936 rcu_assign_pointer(slot[i], child);
819 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) 937 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
@@ -840,8 +958,8 @@ static inline int insert_entries(struct radix_tree_node *node, void **slot,
840 return n; 958 return n;
841} 959}
842#else 960#else
843static inline int insert_entries(struct radix_tree_node *node, void **slot, 961static inline int insert_entries(struct radix_tree_node *node,
844 void *item, unsigned order, bool replace) 962 void __rcu **slot, void *item, unsigned order, bool replace)
845{ 963{
846 if (*slot) 964 if (*slot)
847 return -EEXIST; 965 return -EEXIST;
@@ -868,7 +986,7 @@ int __radix_tree_insert(struct radix_tree_root *root, unsigned long index,
868 unsigned order, void *item) 986 unsigned order, void *item)
869{ 987{
870 struct radix_tree_node *node; 988 struct radix_tree_node *node;
871 void **slot; 989 void __rcu **slot;
872 int error; 990 int error;
873 991
874 BUG_ON(radix_tree_is_internal_node(item)); 992 BUG_ON(radix_tree_is_internal_node(item));
@@ -908,16 +1026,17 @@ EXPORT_SYMBOL(__radix_tree_insert);
908 * allocated and @root->rnode is used as a direct slot instead of 1026 * allocated and @root->rnode is used as a direct slot instead of
909 * pointing to a node, in which case *@nodep will be NULL. 1027 * pointing to a node, in which case *@nodep will be NULL.
910 */ 1028 */
911void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index, 1029void *__radix_tree_lookup(const struct radix_tree_root *root,
912 struct radix_tree_node **nodep, void ***slotp) 1030 unsigned long index, struct radix_tree_node **nodep,
1031 void __rcu ***slotp)
913{ 1032{
914 struct radix_tree_node *node, *parent; 1033 struct radix_tree_node *node, *parent;
915 unsigned long maxindex; 1034 unsigned long maxindex;
916 void **slot; 1035 void __rcu **slot;
917 1036
918 restart: 1037 restart:
919 parent = NULL; 1038 parent = NULL;
920 slot = (void **)&root->rnode; 1039 slot = (void __rcu **)&root->rnode;
921 radix_tree_load_root(root, &node, &maxindex); 1040 radix_tree_load_root(root, &node, &maxindex);
922 if (index > maxindex) 1041 if (index > maxindex)
923 return NULL; 1042 return NULL;
@@ -952,9 +1071,10 @@ void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index,
952 * exclusive from other writers. Any dereference of the slot must be done 1071 * exclusive from other writers. Any dereference of the slot must be done
953 * using radix_tree_deref_slot. 1072 * using radix_tree_deref_slot.
954 */ 1073 */
955void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index) 1074void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *root,
1075 unsigned long index)
956{ 1076{
957 void **slot; 1077 void __rcu **slot;
958 1078
959 if (!__radix_tree_lookup(root, index, NULL, &slot)) 1079 if (!__radix_tree_lookup(root, index, NULL, &slot))
960 return NULL; 1080 return NULL;
@@ -974,75 +1094,76 @@ EXPORT_SYMBOL(radix_tree_lookup_slot);
974 * them safely). No RCU barriers are required to access or modify the 1094 * them safely). No RCU barriers are required to access or modify the
975 * returned item, however. 1095 * returned item, however.
976 */ 1096 */
977void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index) 1097void *radix_tree_lookup(const struct radix_tree_root *root, unsigned long index)
978{ 1098{
979 return __radix_tree_lookup(root, index, NULL, NULL); 1099 return __radix_tree_lookup(root, index, NULL, NULL);
980} 1100}
981EXPORT_SYMBOL(radix_tree_lookup); 1101EXPORT_SYMBOL(radix_tree_lookup);
982 1102
983static inline int slot_count(struct radix_tree_node *node, 1103static inline void replace_sibling_entries(struct radix_tree_node *node,
984 void **slot) 1104 void __rcu **slot, int count, int exceptional)
985{ 1105{
986 int n = 1;
987#ifdef CONFIG_RADIX_TREE_MULTIORDER 1106#ifdef CONFIG_RADIX_TREE_MULTIORDER
988 void *ptr = node_to_entry(slot); 1107 void *ptr = node_to_entry(slot);
989 unsigned offset = get_slot_offset(node, slot); 1108 unsigned offset = get_slot_offset(node, slot) + 1;
990 int i;
991 1109
992 for (i = 1; offset + i < RADIX_TREE_MAP_SIZE; i++) { 1110 while (offset < RADIX_TREE_MAP_SIZE) {
993 if (node->slots[offset + i] != ptr) 1111 if (rcu_dereference_raw(node->slots[offset]) != ptr)
994 break; 1112 break;
995 n++; 1113 if (count < 0) {
1114 node->slots[offset] = NULL;
1115 node->count--;
1116 }
1117 node->exceptional += exceptional;
1118 offset++;
996 } 1119 }
997#endif 1120#endif
998 return n;
999} 1121}
1000 1122
1001static void replace_slot(struct radix_tree_root *root, 1123static void replace_slot(void __rcu **slot, void *item,
1002 struct radix_tree_node *node, 1124 struct radix_tree_node *node, int count, int exceptional)
1003 void **slot, void *item,
1004 bool warn_typeswitch)
1005{ 1125{
1006 void *old = rcu_dereference_raw(*slot); 1126 if (WARN_ON_ONCE(radix_tree_is_internal_node(item)))
1007 int count, exceptional; 1127 return;
1008
1009 WARN_ON_ONCE(radix_tree_is_internal_node(item));
1010
1011 count = !!item - !!old;
1012 exceptional = !!radix_tree_exceptional_entry(item) -
1013 !!radix_tree_exceptional_entry(old);
1014
1015 WARN_ON_ONCE(warn_typeswitch && (count || exceptional));
1016 1128
1017 if (node) { 1129 if (node && (count || exceptional)) {
1018 node->count += count; 1130 node->count += count;
1019 if (exceptional) { 1131 node->exceptional += exceptional;
1020 exceptional *= slot_count(node, slot); 1132 replace_sibling_entries(node, slot, count, exceptional);
1021 node->exceptional += exceptional;
1022 }
1023 } 1133 }
1024 1134
1025 rcu_assign_pointer(*slot, item); 1135 rcu_assign_pointer(*slot, item);
1026} 1136}
1027 1137
1028static inline void delete_sibling_entries(struct radix_tree_node *node, 1138static bool node_tag_get(const struct radix_tree_root *root,
1029 void **slot) 1139 const struct radix_tree_node *node,
1140 unsigned int tag, unsigned int offset)
1030{ 1141{
1031#ifdef CONFIG_RADIX_TREE_MULTIORDER 1142 if (node)
1032 bool exceptional = radix_tree_exceptional_entry(*slot); 1143 return tag_get(node, tag, offset);
1033 void *ptr = node_to_entry(slot); 1144 return root_tag_get(root, tag);
1034 unsigned offset = get_slot_offset(node, slot); 1145}
1035 int i;
1036 1146
1037 for (i = 1; offset + i < RADIX_TREE_MAP_SIZE; i++) { 1147/*
1038 if (node->slots[offset + i] != ptr) 1148 * IDR users want to be able to store NULL in the tree, so if the slot isn't
1039 break; 1149 * free, don't adjust the count, even if it's transitioning between NULL and
1040 node->slots[offset + i] = NULL; 1150 * non-NULL. For the IDA, we mark slots as being IDR_FREE while they still
1041 node->count--; 1151 * have empty bits, but it only stores NULL in slots when they're being
1042 if (exceptional) 1152 * deleted.
1043 node->exceptional--; 1153 */
1154static int calculate_count(struct radix_tree_root *root,
1155 struct radix_tree_node *node, void __rcu **slot,
1156 void *item, void *old)
1157{
1158 if (is_idr(root)) {
1159 unsigned offset = get_slot_offset(node, slot);
1160 bool free = node_tag_get(root, node, IDR_FREE, offset);
1161 if (!free)
1162 return 0;
1163 if (!old)
1164 return 1;
1044 } 1165 }
1045#endif 1166 return !!item - !!old;
1046} 1167}
1047 1168
1048/** 1169/**
@@ -1059,18 +1180,22 @@ static inline void delete_sibling_entries(struct radix_tree_node *node,
1059 */ 1180 */
1060void __radix_tree_replace(struct radix_tree_root *root, 1181void __radix_tree_replace(struct radix_tree_root *root,
1061 struct radix_tree_node *node, 1182 struct radix_tree_node *node,
1062 void **slot, void *item, 1183 void __rcu **slot, void *item,
1063 radix_tree_update_node_t update_node, void *private) 1184 radix_tree_update_node_t update_node, void *private)
1064{ 1185{
1065 if (!item) 1186 void *old = rcu_dereference_raw(*slot);
1066 delete_sibling_entries(node, slot); 1187 int exceptional = !!radix_tree_exceptional_entry(item) -
1188 !!radix_tree_exceptional_entry(old);
1189 int count = calculate_count(root, node, slot, item, old);
1190
1067 /* 1191 /*
1068 * This function supports replacing exceptional entries and 1192 * This function supports replacing exceptional entries and
1069 * deleting entries, but that needs accounting against the 1193 * deleting entries, but that needs accounting against the
1070 * node unless the slot is root->rnode. 1194 * node unless the slot is root->rnode.
1071 */ 1195 */
1072 replace_slot(root, node, slot, item, 1196 WARN_ON_ONCE(!node && (slot != (void __rcu **)&root->rnode) &&
1073 !node && slot != (void **)&root->rnode); 1197 (count || exceptional));
1198 replace_slot(slot, item, node, count, exceptional);
1074 1199
1075 if (!node) 1200 if (!node)
1076 return; 1201 return;
@@ -1098,10 +1223,11 @@ void __radix_tree_replace(struct radix_tree_root *root,
1098 * radix_tree_iter_replace(). 1223 * radix_tree_iter_replace().
1099 */ 1224 */
1100void radix_tree_replace_slot(struct radix_tree_root *root, 1225void radix_tree_replace_slot(struct radix_tree_root *root,
1101 void **slot, void *item) 1226 void __rcu **slot, void *item)
1102{ 1227{
1103 replace_slot(root, NULL, slot, item, true); 1228 __radix_tree_replace(root, NULL, slot, item, NULL, NULL);
1104} 1229}
1230EXPORT_SYMBOL(radix_tree_replace_slot);
1105 1231
1106/** 1232/**
1107 * radix_tree_iter_replace - replace item in a slot 1233 * radix_tree_iter_replace - replace item in a slot
@@ -1113,7 +1239,8 @@ void radix_tree_replace_slot(struct radix_tree_root *root,
1113 * Caller must hold tree write locked across split and replacement. 1239 * Caller must hold tree write locked across split and replacement.
1114 */ 1240 */
1115void radix_tree_iter_replace(struct radix_tree_root *root, 1241void radix_tree_iter_replace(struct radix_tree_root *root,
1116 const struct radix_tree_iter *iter, void **slot, void *item) 1242 const struct radix_tree_iter *iter,
1243 void __rcu **slot, void *item)
1117{ 1244{
1118 __radix_tree_replace(root, iter->node, slot, item, NULL, NULL); 1245 __radix_tree_replace(root, iter->node, slot, item, NULL, NULL);
1119} 1246}
@@ -1137,7 +1264,7 @@ int radix_tree_join(struct radix_tree_root *root, unsigned long index,
1137 unsigned order, void *item) 1264 unsigned order, void *item)
1138{ 1265{
1139 struct radix_tree_node *node; 1266 struct radix_tree_node *node;
1140 void **slot; 1267 void __rcu **slot;
1141 int error; 1268 int error;
1142 1269
1143 BUG_ON(radix_tree_is_internal_node(item)); 1270 BUG_ON(radix_tree_is_internal_node(item));
@@ -1172,9 +1299,10 @@ int radix_tree_split(struct radix_tree_root *root, unsigned long index,
1172 unsigned order) 1299 unsigned order)
1173{ 1300{
1174 struct radix_tree_node *parent, *node, *child; 1301 struct radix_tree_node *parent, *node, *child;
1175 void **slot; 1302 void __rcu **slot;
1176 unsigned int offset, end; 1303 unsigned int offset, end;
1177 unsigned n, tag, tags = 0; 1304 unsigned n, tag, tags = 0;
1305 gfp_t gfp = root_gfp_mask(root);
1178 1306
1179 if (!__radix_tree_lookup(root, index, &parent, &slot)) 1307 if (!__radix_tree_lookup(root, index, &parent, &slot))
1180 return -ENOENT; 1308 return -ENOENT;
@@ -1188,7 +1316,8 @@ int radix_tree_split(struct radix_tree_root *root, unsigned long index,
1188 tags |= 1 << tag; 1316 tags |= 1 << tag;
1189 1317
1190 for (end = offset + 1; end < RADIX_TREE_MAP_SIZE; end++) { 1318 for (end = offset + 1; end < RADIX_TREE_MAP_SIZE; end++) {
1191 if (!is_sibling_entry(parent, parent->slots[end])) 1319 if (!is_sibling_entry(parent,
1320 rcu_dereference_raw(parent->slots[end])))
1192 break; 1321 break;
1193 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) 1322 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
1194 if (tags & (1 << tag)) 1323 if (tags & (1 << tag))
@@ -1212,14 +1341,15 @@ int radix_tree_split(struct radix_tree_root *root, unsigned long index,
1212 1341
1213 for (;;) { 1342 for (;;) {
1214 if (node->shift > order) { 1343 if (node->shift > order) {
1215 child = radix_tree_node_alloc(root, node, 1344 child = radix_tree_node_alloc(gfp, node, root,
1216 node->shift - RADIX_TREE_MAP_SHIFT, 1345 node->shift - RADIX_TREE_MAP_SHIFT,
1217 offset, 0, 0); 1346 offset, 0, 0);
1218 if (!child) 1347 if (!child)
1219 goto nomem; 1348 goto nomem;
1220 if (node != parent) { 1349 if (node != parent) {
1221 node->count++; 1350 node->count++;
1222 node->slots[offset] = node_to_entry(child); 1351 rcu_assign_pointer(node->slots[offset],
1352 node_to_entry(child));
1223 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) 1353 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
1224 if (tags & (1 << tag)) 1354 if (tags & (1 << tag))
1225 tag_set(node, tag, offset); 1355 tag_set(node, tag, offset);
@@ -1261,6 +1391,22 @@ int radix_tree_split(struct radix_tree_root *root, unsigned long index,
1261} 1391}
1262#endif 1392#endif
1263 1393
1394static void node_tag_set(struct radix_tree_root *root,
1395 struct radix_tree_node *node,
1396 unsigned int tag, unsigned int offset)
1397{
1398 while (node) {
1399 if (tag_get(node, tag, offset))
1400 return;
1401 tag_set(node, tag, offset);
1402 offset = node->offset;
1403 node = node->parent;
1404 }
1405
1406 if (!root_tag_get(root, tag))
1407 root_tag_set(root, tag);
1408}
1409
1264/** 1410/**
1265 * radix_tree_tag_set - set a tag on a radix tree node 1411 * radix_tree_tag_set - set a tag on a radix tree node
1266 * @root: radix tree root 1412 * @root: radix tree root
@@ -1302,6 +1448,18 @@ void *radix_tree_tag_set(struct radix_tree_root *root,
1302} 1448}
1303EXPORT_SYMBOL(radix_tree_tag_set); 1449EXPORT_SYMBOL(radix_tree_tag_set);
1304 1450
1451/**
1452 * radix_tree_iter_tag_set - set a tag on the current iterator entry
1453 * @root: radix tree root
1454 * @iter: iterator state
1455 * @tag: tag to set
1456 */
1457void radix_tree_iter_tag_set(struct radix_tree_root *root,
1458 const struct radix_tree_iter *iter, unsigned int tag)
1459{
1460 node_tag_set(root, iter->node, tag, iter_offset(iter));
1461}
1462
1305static void node_tag_clear(struct radix_tree_root *root, 1463static void node_tag_clear(struct radix_tree_root *root,
1306 struct radix_tree_node *node, 1464 struct radix_tree_node *node,
1307 unsigned int tag, unsigned int offset) 1465 unsigned int tag, unsigned int offset)
@@ -1322,34 +1480,6 @@ static void node_tag_clear(struct radix_tree_root *root,
1322 root_tag_clear(root, tag); 1480 root_tag_clear(root, tag);
1323} 1481}
1324 1482
1325static void node_tag_set(struct radix_tree_root *root,
1326 struct radix_tree_node *node,
1327 unsigned int tag, unsigned int offset)
1328{
1329 while (node) {
1330 if (tag_get(node, tag, offset))
1331 return;
1332 tag_set(node, tag, offset);
1333 offset = node->offset;
1334 node = node->parent;
1335 }
1336
1337 if (!root_tag_get(root, tag))
1338 root_tag_set(root, tag);
1339}
1340
1341/**
1342 * radix_tree_iter_tag_set - set a tag on the current iterator entry
1343 * @root: radix tree root
1344 * @iter: iterator state
1345 * @tag: tag to set
1346 */
1347void radix_tree_iter_tag_set(struct radix_tree_root *root,
1348 const struct radix_tree_iter *iter, unsigned int tag)
1349{
1350 node_tag_set(root, iter->node, tag, iter_offset(iter));
1351}
1352
1353/** 1483/**
1354 * radix_tree_tag_clear - clear a tag on a radix tree node 1484 * radix_tree_tag_clear - clear a tag on a radix tree node
1355 * @root: radix tree root 1485 * @root: radix tree root
@@ -1390,6 +1520,18 @@ void *radix_tree_tag_clear(struct radix_tree_root *root,
1390EXPORT_SYMBOL(radix_tree_tag_clear); 1520EXPORT_SYMBOL(radix_tree_tag_clear);
1391 1521
1392/** 1522/**
1523 * radix_tree_iter_tag_clear - clear a tag on the current iterator entry
1524 * @root: radix tree root
1525 * @iter: iterator state
1526 * @tag: tag to clear
1527 */
1528void radix_tree_iter_tag_clear(struct radix_tree_root *root,
1529 const struct radix_tree_iter *iter, unsigned int tag)
1530{
1531 node_tag_clear(root, iter->node, tag, iter_offset(iter));
1532}
1533
1534/**
1393 * radix_tree_tag_get - get a tag on a radix tree node 1535 * radix_tree_tag_get - get a tag on a radix tree node
1394 * @root: radix tree root 1536 * @root: radix tree root
1395 * @index: index key 1537 * @index: index key
@@ -1404,7 +1546,7 @@ EXPORT_SYMBOL(radix_tree_tag_clear);
1404 * the RCU lock is held, unless tag modification and node deletion are excluded 1546 * the RCU lock is held, unless tag modification and node deletion are excluded
1405 * from concurrency. 1547 * from concurrency.
1406 */ 1548 */
1407int radix_tree_tag_get(struct radix_tree_root *root, 1549int radix_tree_tag_get(const struct radix_tree_root *root,
1408 unsigned long index, unsigned int tag) 1550 unsigned long index, unsigned int tag)
1409{ 1551{
1410 struct radix_tree_node *node, *parent; 1552 struct radix_tree_node *node, *parent;
@@ -1416,8 +1558,6 @@ int radix_tree_tag_get(struct radix_tree_root *root,
1416 radix_tree_load_root(root, &node, &maxindex); 1558 radix_tree_load_root(root, &node, &maxindex);
1417 if (index > maxindex) 1559 if (index > maxindex)
1418 return 0; 1560 return 0;
1419 if (node == NULL)
1420 return 0;
1421 1561
1422 while (radix_tree_is_internal_node(node)) { 1562 while (radix_tree_is_internal_node(node)) {
1423 unsigned offset; 1563 unsigned offset;
@@ -1425,8 +1565,6 @@ int radix_tree_tag_get(struct radix_tree_root *root,
1425 parent = entry_to_node(node); 1565 parent = entry_to_node(node);
1426 offset = radix_tree_descend(parent, &node, index); 1566 offset = radix_tree_descend(parent, &node, index);
1427 1567
1428 if (!node)
1429 return 0;
1430 if (!tag_get(parent, tag, offset)) 1568 if (!tag_get(parent, tag, offset))
1431 return 0; 1569 return 0;
1432 if (node == RADIX_TREE_RETRY) 1570 if (node == RADIX_TREE_RETRY)
@@ -1453,6 +1591,11 @@ static void set_iter_tags(struct radix_tree_iter *iter,
1453 unsigned tag_long = offset / BITS_PER_LONG; 1591 unsigned tag_long = offset / BITS_PER_LONG;
1454 unsigned tag_bit = offset % BITS_PER_LONG; 1592 unsigned tag_bit = offset % BITS_PER_LONG;
1455 1593
1594 if (!node) {
1595 iter->tags = 1;
1596 return;
1597 }
1598
1456 iter->tags = node->tags[tag][tag_long] >> tag_bit; 1599 iter->tags = node->tags[tag][tag_long] >> tag_bit;
1457 1600
1458 /* This never happens if RADIX_TREE_TAG_LONGS == 1 */ 1601 /* This never happens if RADIX_TREE_TAG_LONGS == 1 */
@@ -1467,8 +1610,8 @@ static void set_iter_tags(struct radix_tree_iter *iter,
1467} 1610}
1468 1611
1469#ifdef CONFIG_RADIX_TREE_MULTIORDER 1612#ifdef CONFIG_RADIX_TREE_MULTIORDER
1470static void **skip_siblings(struct radix_tree_node **nodep, 1613static void __rcu **skip_siblings(struct radix_tree_node **nodep,
1471 void **slot, struct radix_tree_iter *iter) 1614 void __rcu **slot, struct radix_tree_iter *iter)
1472{ 1615{
1473 void *sib = node_to_entry(slot - 1); 1616 void *sib = node_to_entry(slot - 1);
1474 1617
@@ -1485,8 +1628,8 @@ static void **skip_siblings(struct radix_tree_node **nodep,
1485 return NULL; 1628 return NULL;
1486} 1629}
1487 1630
1488void ** __radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, 1631void __rcu **__radix_tree_next_slot(void __rcu **slot,
1489 unsigned flags) 1632 struct radix_tree_iter *iter, unsigned flags)
1490{ 1633{
1491 unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK; 1634 unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK;
1492 struct radix_tree_node *node = rcu_dereference_raw(*slot); 1635 struct radix_tree_node *node = rcu_dereference_raw(*slot);
@@ -1539,20 +1682,20 @@ void ** __radix_tree_next_slot(void **slot, struct radix_tree_iter *iter,
1539} 1682}
1540EXPORT_SYMBOL(__radix_tree_next_slot); 1683EXPORT_SYMBOL(__radix_tree_next_slot);
1541#else 1684#else
1542static void **skip_siblings(struct radix_tree_node **nodep, 1685static void __rcu **skip_siblings(struct radix_tree_node **nodep,
1543 void **slot, struct radix_tree_iter *iter) 1686 void __rcu **slot, struct radix_tree_iter *iter)
1544{ 1687{
1545 return slot; 1688 return slot;
1546} 1689}
1547#endif 1690#endif
1548 1691
1549void **radix_tree_iter_resume(void **slot, struct radix_tree_iter *iter) 1692void __rcu **radix_tree_iter_resume(void __rcu **slot,
1693 struct radix_tree_iter *iter)
1550{ 1694{
1551 struct radix_tree_node *node; 1695 struct radix_tree_node *node;
1552 1696
1553 slot++; 1697 slot++;
1554 iter->index = __radix_tree_iter_add(iter, 1); 1698 iter->index = __radix_tree_iter_add(iter, 1);
1555 node = rcu_dereference_raw(*slot);
1556 skip_siblings(&node, slot, iter); 1699 skip_siblings(&node, slot, iter);
1557 iter->next_index = iter->index; 1700 iter->next_index = iter->index;
1558 iter->tags = 0; 1701 iter->tags = 0;
@@ -1568,7 +1711,7 @@ EXPORT_SYMBOL(radix_tree_iter_resume);
1568 * @flags: RADIX_TREE_ITER_* flags and tag index 1711 * @flags: RADIX_TREE_ITER_* flags and tag index
1569 * Returns: pointer to chunk first slot, or NULL if iteration is over 1712 * Returns: pointer to chunk first slot, or NULL if iteration is over
1570 */ 1713 */
1571void **radix_tree_next_chunk(struct radix_tree_root *root, 1714void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root,
1572 struct radix_tree_iter *iter, unsigned flags) 1715 struct radix_tree_iter *iter, unsigned flags)
1573{ 1716{
1574 unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK; 1717 unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK;
@@ -1605,7 +1748,7 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
1605 iter->tags = 1; 1748 iter->tags = 1;
1606 iter->node = NULL; 1749 iter->node = NULL;
1607 __set_iter_shift(iter, 0); 1750 __set_iter_shift(iter, 0);
1608 return (void **)&root->rnode; 1751 return (void __rcu **)&root->rnode;
1609 } 1752 }
1610 1753
1611 do { 1754 do {
@@ -1623,7 +1766,8 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
1623 offset + 1); 1766 offset + 1);
1624 else 1767 else
1625 while (++offset < RADIX_TREE_MAP_SIZE) { 1768 while (++offset < RADIX_TREE_MAP_SIZE) {
1626 void *slot = node->slots[offset]; 1769 void *slot = rcu_dereference_raw(
1770 node->slots[offset]);
1627 if (is_sibling_entry(node, slot)) 1771 if (is_sibling_entry(node, slot))
1628 continue; 1772 continue;
1629 if (slot) 1773 if (slot)
@@ -1679,11 +1823,11 @@ EXPORT_SYMBOL(radix_tree_next_chunk);
1679 * stored in 'results'. 1823 * stored in 'results'.
1680 */ 1824 */
1681unsigned int 1825unsigned int
1682radix_tree_gang_lookup(struct radix_tree_root *root, void **results, 1826radix_tree_gang_lookup(const struct radix_tree_root *root, void **results,
1683 unsigned long first_index, unsigned int max_items) 1827 unsigned long first_index, unsigned int max_items)
1684{ 1828{
1685 struct radix_tree_iter iter; 1829 struct radix_tree_iter iter;
1686 void **slot; 1830 void __rcu **slot;
1687 unsigned int ret = 0; 1831 unsigned int ret = 0;
1688 1832
1689 if (unlikely(!max_items)) 1833 if (unlikely(!max_items))
@@ -1724,12 +1868,12 @@ EXPORT_SYMBOL(radix_tree_gang_lookup);
1724 * protection, radix_tree_deref_slot may fail requiring a retry. 1868 * protection, radix_tree_deref_slot may fail requiring a retry.
1725 */ 1869 */
1726unsigned int 1870unsigned int
1727radix_tree_gang_lookup_slot(struct radix_tree_root *root, 1871radix_tree_gang_lookup_slot(const struct radix_tree_root *root,
1728 void ***results, unsigned long *indices, 1872 void __rcu ***results, unsigned long *indices,
1729 unsigned long first_index, unsigned int max_items) 1873 unsigned long first_index, unsigned int max_items)
1730{ 1874{
1731 struct radix_tree_iter iter; 1875 struct radix_tree_iter iter;
1732 void **slot; 1876 void __rcu **slot;
1733 unsigned int ret = 0; 1877 unsigned int ret = 0;
1734 1878
1735 if (unlikely(!max_items)) 1879 if (unlikely(!max_items))
@@ -1761,12 +1905,12 @@ EXPORT_SYMBOL(radix_tree_gang_lookup_slot);
1761 * returns the number of items which were placed at *@results. 1905 * returns the number of items which were placed at *@results.
1762 */ 1906 */
1763unsigned int 1907unsigned int
1764radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, 1908radix_tree_gang_lookup_tag(const struct radix_tree_root *root, void **results,
1765 unsigned long first_index, unsigned int max_items, 1909 unsigned long first_index, unsigned int max_items,
1766 unsigned int tag) 1910 unsigned int tag)
1767{ 1911{
1768 struct radix_tree_iter iter; 1912 struct radix_tree_iter iter;
1769 void **slot; 1913 void __rcu **slot;
1770 unsigned int ret = 0; 1914 unsigned int ret = 0;
1771 1915
1772 if (unlikely(!max_items)) 1916 if (unlikely(!max_items))
@@ -1802,12 +1946,12 @@ EXPORT_SYMBOL(radix_tree_gang_lookup_tag);
1802 * returns the number of slots which were placed at *@results. 1946 * returns the number of slots which were placed at *@results.
1803 */ 1947 */
1804unsigned int 1948unsigned int
1805radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, 1949radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *root,
1806 unsigned long first_index, unsigned int max_items, 1950 void __rcu ***results, unsigned long first_index,
1807 unsigned int tag) 1951 unsigned int max_items, unsigned int tag)
1808{ 1952{
1809 struct radix_tree_iter iter; 1953 struct radix_tree_iter iter;
1810 void **slot; 1954 void __rcu **slot;
1811 unsigned int ret = 0; 1955 unsigned int ret = 0;
1812 1956
1813 if (unlikely(!max_items)) 1957 if (unlikely(!max_items))
@@ -1842,59 +1986,83 @@ void __radix_tree_delete_node(struct radix_tree_root *root,
1842 delete_node(root, node, update_node, private); 1986 delete_node(root, node, update_node, private);
1843} 1987}
1844 1988
1989static bool __radix_tree_delete(struct radix_tree_root *root,
1990 struct radix_tree_node *node, void __rcu **slot)
1991{
1992 void *old = rcu_dereference_raw(*slot);
1993 int exceptional = radix_tree_exceptional_entry(old) ? -1 : 0;
1994 unsigned offset = get_slot_offset(node, slot);
1995 int tag;
1996
1997 if (is_idr(root))
1998 node_tag_set(root, node, IDR_FREE, offset);
1999 else
2000 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
2001 node_tag_clear(root, node, tag, offset);
2002
2003 replace_slot(slot, NULL, node, -1, exceptional);
2004 return node && delete_node(root, node, NULL, NULL);
2005}
2006
1845/** 2007/**
1846 * radix_tree_delete_item - delete an item from a radix tree 2008 * radix_tree_iter_delete - delete the entry at this iterator position
1847 * @root: radix tree root 2009 * @root: radix tree root
1848 * @index: index key 2010 * @iter: iterator state
1849 * @item: expected item 2011 * @slot: pointer to slot
1850 * 2012 *
1851 * Remove @item at @index from the radix tree rooted at @root. 2013 * Delete the entry at the position currently pointed to by the iterator.
2014 * This may result in the current node being freed; if it is, the iterator
2015 * is advanced so that it will not reference the freed memory. This
2016 * function may be called without any locking if there are no other threads
2017 * which can access this tree.
2018 */
2019void radix_tree_iter_delete(struct radix_tree_root *root,
2020 struct radix_tree_iter *iter, void __rcu **slot)
2021{
2022 if (__radix_tree_delete(root, iter->node, slot))
2023 iter->index = iter->next_index;
2024}
2025
2026/**
2027 * radix_tree_delete_item - delete an item from a radix tree
2028 * @root: radix tree root
2029 * @index: index key
2030 * @item: expected item
1852 * 2031 *
1853 * Returns the address of the deleted item, or NULL if it was not present 2032 * Remove @item at @index from the radix tree rooted at @root.
1854 * or the entry at the given @index was not @item. 2033 *
2034 * Return: the deleted entry, or %NULL if it was not present
2035 * or the entry at the given @index was not @item.
1855 */ 2036 */
1856void *radix_tree_delete_item(struct radix_tree_root *root, 2037void *radix_tree_delete_item(struct radix_tree_root *root,
1857 unsigned long index, void *item) 2038 unsigned long index, void *item)
1858{ 2039{
1859 struct radix_tree_node *node; 2040 struct radix_tree_node *node = NULL;
1860 unsigned int offset; 2041 void __rcu **slot;
1861 void **slot;
1862 void *entry; 2042 void *entry;
1863 int tag;
1864 2043
1865 entry = __radix_tree_lookup(root, index, &node, &slot); 2044 entry = __radix_tree_lookup(root, index, &node, &slot);
1866 if (!entry) 2045 if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE,
2046 get_slot_offset(node, slot))))
1867 return NULL; 2047 return NULL;
1868 2048
1869 if (item && entry != item) 2049 if (item && entry != item)
1870 return NULL; 2050 return NULL;
1871 2051
1872 if (!node) { 2052 __radix_tree_delete(root, node, slot);
1873 root_tag_clear_all(root);
1874 root->rnode = NULL;
1875 return entry;
1876 }
1877
1878 offset = get_slot_offset(node, slot);
1879
1880 /* Clear all tags associated with the item to be deleted. */
1881 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
1882 node_tag_clear(root, node, tag, offset);
1883
1884 __radix_tree_replace(root, node, slot, NULL, NULL, NULL);
1885 2053
1886 return entry; 2054 return entry;
1887} 2055}
1888EXPORT_SYMBOL(radix_tree_delete_item); 2056EXPORT_SYMBOL(radix_tree_delete_item);
1889 2057
1890/** 2058/**
1891 * radix_tree_delete - delete an item from a radix tree 2059 * radix_tree_delete - delete an entry from a radix tree
1892 * @root: radix tree root 2060 * @root: radix tree root
1893 * @index: index key 2061 * @index: index key
1894 * 2062 *
1895 * Remove the item at @index from the radix tree rooted at @root. 2063 * Remove the entry at @index from the radix tree rooted at @root.
1896 * 2064 *
1897 * Returns the address of the deleted item, or NULL if it was not present. 2065 * Return: The deleted entry, or %NULL if it was not present.
1898 */ 2066 */
1899void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) 2067void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
1900{ 2068{
@@ -1904,15 +2072,14 @@ EXPORT_SYMBOL(radix_tree_delete);
1904 2072
1905void radix_tree_clear_tags(struct radix_tree_root *root, 2073void radix_tree_clear_tags(struct radix_tree_root *root,
1906 struct radix_tree_node *node, 2074 struct radix_tree_node *node,
1907 void **slot) 2075 void __rcu **slot)
1908{ 2076{
1909 if (node) { 2077 if (node) {
1910 unsigned int tag, offset = get_slot_offset(node, slot); 2078 unsigned int tag, offset = get_slot_offset(node, slot);
1911 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) 2079 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
1912 node_tag_clear(root, node, tag, offset); 2080 node_tag_clear(root, node, tag, offset);
1913 } else { 2081 } else {
1914 /* Clear root node tags */ 2082 root_tag_clear_all(root);
1915 root->gfp_mask &= __GFP_BITS_MASK;
1916 } 2083 }
1917} 2084}
1918 2085
@@ -1921,12 +2088,147 @@ void radix_tree_clear_tags(struct radix_tree_root *root,
1921 * @root: radix tree root 2088 * @root: radix tree root
1922 * @tag: tag to test 2089 * @tag: tag to test
1923 */ 2090 */
1924int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag) 2091int radix_tree_tagged(const struct radix_tree_root *root, unsigned int tag)
1925{ 2092{
1926 return root_tag_get(root, tag); 2093 return root_tag_get(root, tag);
1927} 2094}
1928EXPORT_SYMBOL(radix_tree_tagged); 2095EXPORT_SYMBOL(radix_tree_tagged);
1929 2096
2097/**
2098 * idr_preload - preload for idr_alloc()
2099 * @gfp_mask: allocation mask to use for preloading
2100 *
2101 * Preallocate memory to use for the next call to idr_alloc(). This function
2102 * returns with preemption disabled. It will be enabled by idr_preload_end().
2103 */
2104void idr_preload(gfp_t gfp_mask)
2105{
2106 __radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE);
2107}
2108EXPORT_SYMBOL(idr_preload);
2109
2110/**
2111 * ida_pre_get - reserve resources for ida allocation
2112 * @ida: ida handle
2113 * @gfp: memory allocation flags
2114 *
2115 * This function should be called before calling ida_get_new_above(). If it
2116 * is unable to allocate memory, it will return %0. On success, it returns %1.
2117 */
2118int ida_pre_get(struct ida *ida, gfp_t gfp)
2119{
2120 __radix_tree_preload(gfp, IDA_PRELOAD_SIZE);
2121 /*
2122 * The IDA API has no preload_end() equivalent. Instead,
2123 * ida_get_new() can return -EAGAIN, prompting the caller
2124 * to return to the ida_pre_get() step.
2125 */
2126 preempt_enable();
2127
2128 if (!this_cpu_read(ida_bitmap)) {
2129 struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp);
2130 if (!bitmap)
2131 return 0;
2132 if (this_cpu_cmpxchg(ida_bitmap, NULL, bitmap))
2133 kfree(bitmap);
2134 }
2135
2136 return 1;
2137}
2138EXPORT_SYMBOL(ida_pre_get);
2139
2140void __rcu **idr_get_free(struct radix_tree_root *root,
2141 struct radix_tree_iter *iter, gfp_t gfp, int end)
2142{
2143 struct radix_tree_node *node = NULL, *child;
2144 void __rcu **slot = (void __rcu **)&root->rnode;
2145 unsigned long maxindex, start = iter->next_index;
2146 unsigned long max = end > 0 ? end - 1 : INT_MAX;
2147 unsigned int shift, offset = 0;
2148
2149 grow:
2150 shift = radix_tree_load_root(root, &child, &maxindex);
2151 if (!radix_tree_tagged(root, IDR_FREE))
2152 start = max(start, maxindex + 1);
2153 if (start > max)
2154 return ERR_PTR(-ENOSPC);
2155
2156 if (start > maxindex) {
2157 int error = radix_tree_extend(root, gfp, start, shift);
2158 if (error < 0)
2159 return ERR_PTR(error);
2160 shift = error;
2161 child = rcu_dereference_raw(root->rnode);
2162 }
2163
2164 while (shift) {
2165 shift -= RADIX_TREE_MAP_SHIFT;
2166 if (child == NULL) {
2167 /* Have to add a child node. */
2168 child = radix_tree_node_alloc(gfp, node, root, shift,
2169 offset, 0, 0);
2170 if (!child)
2171 return ERR_PTR(-ENOMEM);
2172 all_tag_set(child, IDR_FREE);
2173 rcu_assign_pointer(*slot, node_to_entry(child));
2174 if (node)
2175 node->count++;
2176 } else if (!radix_tree_is_internal_node(child))
2177 break;
2178
2179 node = entry_to_node(child);
2180 offset = radix_tree_descend(node, &child, start);
2181 if (!tag_get(node, IDR_FREE, offset)) {
2182 offset = radix_tree_find_next_bit(node, IDR_FREE,
2183 offset + 1);
2184 start = next_index(start, node, offset);
2185 if (start > max)
2186 return ERR_PTR(-ENOSPC);
2187 while (offset == RADIX_TREE_MAP_SIZE) {
2188 offset = node->offset + 1;
2189 node = node->parent;
2190 if (!node)
2191 goto grow;
2192 shift = node->shift;
2193 }
2194 child = rcu_dereference_raw(node->slots[offset]);
2195 }
2196 slot = &node->slots[offset];
2197 }
2198
2199 iter->index = start;
2200 if (node)
2201 iter->next_index = 1 + min(max, (start | node_maxindex(node)));
2202 else
2203 iter->next_index = 1;
2204 iter->node = node;
2205 __set_iter_shift(iter, shift);
2206 set_iter_tags(iter, node, offset, IDR_FREE);
2207
2208 return slot;
2209}
2210
2211/**
2212 * idr_destroy - release all internal memory from an IDR
2213 * @idr: idr handle
2214 *
2215 * After this function is called, the IDR is empty, and may be reused or
2216 * the data structure containing it may be freed.
2217 *
2218 * A typical clean-up sequence for objects stored in an idr tree will use
2219 * idr_for_each() to free all objects, if necessary, then idr_destroy() to
2220 * free the memory used to keep track of those objects.
2221 */
2222void idr_destroy(struct idr *idr)
2223{
2224 struct radix_tree_node *node = rcu_dereference_raw(idr->idr_rt.rnode);
2225 if (radix_tree_is_internal_node(node))
2226 radix_tree_free_nodes(node);
2227 idr->idr_rt.rnode = NULL;
2228 root_tag_set(&idr->idr_rt, IDR_FREE);
2229}
2230EXPORT_SYMBOL(idr_destroy);
2231
1930static void 2232static void
1931radix_tree_node_ctor(void *arg) 2233radix_tree_node_ctor(void *arg)
1932{ 2234{
@@ -1970,10 +2272,12 @@ static int radix_tree_cpu_dead(unsigned int cpu)
1970 rtp = &per_cpu(radix_tree_preloads, cpu); 2272 rtp = &per_cpu(radix_tree_preloads, cpu);
1971 while (rtp->nr) { 2273 while (rtp->nr) {
1972 node = rtp->nodes; 2274 node = rtp->nodes;
1973 rtp->nodes = node->private_data; 2275 rtp->nodes = node->parent;
1974 kmem_cache_free(radix_tree_node_cachep, node); 2276 kmem_cache_free(radix_tree_node_cachep, node);
1975 rtp->nr--; 2277 rtp->nr--;
1976 } 2278 }
2279 kfree(per_cpu(ida_bitmap, cpu));
2280 per_cpu(ida_bitmap, cpu) = NULL;
1977 return 0; 2281 return 0;
1978} 2282}
1979 2283
diff --git a/lib/rbtree.c b/lib/rbtree.c
index 1f8b112a7c35..4ba2828a67c0 100644
--- a/lib/rbtree.c
+++ b/lib/rbtree.c
@@ -427,7 +427,9 @@ static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
427static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {} 427static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
428 428
429static const struct rb_augment_callbacks dummy_callbacks = { 429static const struct rb_augment_callbacks dummy_callbacks = {
430 dummy_propagate, dummy_copy, dummy_rotate 430 .propagate = dummy_propagate,
431 .copy = dummy_copy,
432 .rotate = dummy_rotate
431}; 433};
432 434
433void rb_insert_color(struct rb_node *node, struct rb_root *root) 435void rb_insert_color(struct rb_node *node, struct rb_root *root)
diff --git a/lib/refcount.c b/lib/refcount.c
new file mode 100644
index 000000000000..aa09ad3c30b0
--- /dev/null
+++ b/lib/refcount.c
@@ -0,0 +1,267 @@
1/*
2 * Variant of atomic_t specialized for reference counts.
3 *
4 * The interface matches the atomic_t interface (to aid in porting) but only
5 * provides the few functions one should use for reference counting.
6 *
7 * It differs in that the counter saturates at UINT_MAX and will not move once
8 * there. This avoids wrapping the counter and causing 'spurious'
9 * use-after-free issues.
10 *
11 * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
12 * and provide only what is strictly required for refcounts.
13 *
14 * The increments are fully relaxed; these will not provide ordering. The
15 * rationale is that whatever is used to obtain the object we're increasing the
16 * reference count on will provide the ordering. For locked data structures,
17 * its the lock acquire, for RCU/lockless data structures its the dependent
18 * load.
19 *
20 * Do note that inc_not_zero() provides a control dependency which will order
21 * future stores against the inc, this ensures we'll never modify the object
22 * if we did not in fact acquire a reference.
23 *
24 * The decrements will provide release order, such that all the prior loads and
25 * stores will be issued before, it also provides a control dependency, which
26 * will order us against the subsequent free().
27 *
28 * The control dependency is against the load of the cmpxchg (ll/sc) that
29 * succeeded. This means the stores aren't fully ordered, but this is fine
30 * because the 1->0 transition indicates no concurrency.
31 *
32 * Note that the allocator is responsible for ordering things between free()
33 * and alloc().
34 *
35 */
36
37#include <linux/refcount.h>
38#include <linux/bug.h>
39
40bool refcount_add_not_zero(unsigned int i, refcount_t *r)
41{
42 unsigned int old, new, val = atomic_read(&r->refs);
43
44 for (;;) {
45 if (!val)
46 return false;
47
48 if (unlikely(val == UINT_MAX))
49 return true;
50
51 new = val + i;
52 if (new < val)
53 new = UINT_MAX;
54 old = atomic_cmpxchg_relaxed(&r->refs, val, new);
55 if (old == val)
56 break;
57
58 val = old;
59 }
60
61 WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
62
63 return true;
64}
65EXPORT_SYMBOL_GPL(refcount_add_not_zero);
66
67void refcount_add(unsigned int i, refcount_t *r)
68{
69 WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
70}
71EXPORT_SYMBOL_GPL(refcount_add);
72
73/*
74 * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN.
75 *
76 * Provides no memory ordering, it is assumed the caller has guaranteed the
77 * object memory to be stable (RCU, etc.). It does provide a control dependency
78 * and thereby orders future stores. See the comment on top.
79 */
80bool refcount_inc_not_zero(refcount_t *r)
81{
82 unsigned int old, new, val = atomic_read(&r->refs);
83
84 for (;;) {
85 new = val + 1;
86
87 if (!val)
88 return false;
89
90 if (unlikely(!new))
91 return true;
92
93 old = atomic_cmpxchg_relaxed(&r->refs, val, new);
94 if (old == val)
95 break;
96
97 val = old;
98 }
99
100 WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
101
102 return true;
103}
104EXPORT_SYMBOL_GPL(refcount_inc_not_zero);
105
106/*
107 * Similar to atomic_inc(), will saturate at UINT_MAX and WARN.
108 *
109 * Provides no memory ordering, it is assumed the caller already has a
110 * reference on the object, will WARN when this is not so.
111 */
112void refcount_inc(refcount_t *r)
113{
114 WARN_ONCE(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
115}
116EXPORT_SYMBOL_GPL(refcount_inc);
117
118bool refcount_sub_and_test(unsigned int i, refcount_t *r)
119{
120 unsigned int old, new, val = atomic_read(&r->refs);
121
122 for (;;) {
123 if (unlikely(val == UINT_MAX))
124 return false;
125
126 new = val - i;
127 if (new > val) {
128 WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
129 return false;
130 }
131
132 old = atomic_cmpxchg_release(&r->refs, val, new);
133 if (old == val)
134 break;
135
136 val = old;
137 }
138
139 return !new;
140}
141EXPORT_SYMBOL_GPL(refcount_sub_and_test);
142
143/*
144 * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
145 * decrement when saturated at UINT_MAX.
146 *
147 * Provides release memory ordering, such that prior loads and stores are done
148 * before, and provides a control dependency such that free() must come after.
149 * See the comment on top.
150 */
151bool refcount_dec_and_test(refcount_t *r)
152{
153 return refcount_sub_and_test(1, r);
154}
155EXPORT_SYMBOL_GPL(refcount_dec_and_test);
156
157/*
158 * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
159 * when saturated at UINT_MAX.
160 *
161 * Provides release memory ordering, such that prior loads and stores are done
162 * before.
163 */
164
165void refcount_dec(refcount_t *r)
166{
167 WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
168}
169EXPORT_SYMBOL_GPL(refcount_dec);
170
171/*
172 * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
173 * success thereof.
174 *
175 * Like all decrement operations, it provides release memory order and provides
176 * a control dependency.
177 *
178 * It can be used like a try-delete operator; this explicit case is provided
179 * and not cmpxchg in generic, because that would allow implementing unsafe
180 * operations.
181 */
182bool refcount_dec_if_one(refcount_t *r)
183{
184 return atomic_cmpxchg_release(&r->refs, 1, 0) == 1;
185}
186EXPORT_SYMBOL_GPL(refcount_dec_if_one);
187
188/*
189 * No atomic_t counterpart, it decrements unless the value is 1, in which case
190 * it will return false.
191 *
192 * Was often done like: atomic_add_unless(&var, -1, 1)
193 */
194bool refcount_dec_not_one(refcount_t *r)
195{
196 unsigned int old, new, val = atomic_read(&r->refs);
197
198 for (;;) {
199 if (unlikely(val == UINT_MAX))
200 return true;
201
202 if (val == 1)
203 return false;
204
205 new = val - 1;
206 if (new > val) {
207 WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
208 return true;
209 }
210
211 old = atomic_cmpxchg_release(&r->refs, val, new);
212 if (old == val)
213 break;
214
215 val = old;
216 }
217
218 return true;
219}
220EXPORT_SYMBOL_GPL(refcount_dec_not_one);
221
222/*
223 * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
224 * to decrement when saturated at UINT_MAX.
225 *
226 * Provides release memory ordering, such that prior loads and stores are done
227 * before, and provides a control dependency such that free() must come after.
228 * See the comment on top.
229 */
230bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
231{
232 if (refcount_dec_not_one(r))
233 return false;
234
235 mutex_lock(lock);
236 if (!refcount_dec_and_test(r)) {
237 mutex_unlock(lock);
238 return false;
239 }
240
241 return true;
242}
243EXPORT_SYMBOL_GPL(refcount_dec_and_mutex_lock);
244
245/*
246 * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
247 * decrement when saturated at UINT_MAX.
248 *
249 * Provides release memory ordering, such that prior loads and stores are done
250 * before, and provides a control dependency such that free() must come after.
251 * See the comment on top.
252 */
253bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
254{
255 if (refcount_dec_not_one(r))
256 return false;
257
258 spin_lock(lock);
259 if (!refcount_dec_and_test(r)) {
260 spin_unlock(lock);
261 return false;
262 }
263
264 return true;
265}
266EXPORT_SYMBOL_GPL(refcount_dec_and_lock);
267
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 32d0ad058380..f8635fd57442 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -19,6 +19,7 @@
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/log2.h> 20#include <linux/log2.h>
21#include <linux/sched.h> 21#include <linux/sched.h>
22#include <linux/rculist.h>
22#include <linux/slab.h> 23#include <linux/slab.h>
23#include <linux/vmalloc.h> 24#include <linux/vmalloc.h>
24#include <linux/mm.h> 25#include <linux/mm.h>
@@ -32,6 +33,11 @@
32#define HASH_MIN_SIZE 4U 33#define HASH_MIN_SIZE 4U
33#define BUCKET_LOCKS_PER_CPU 32UL 34#define BUCKET_LOCKS_PER_CPU 32UL
34 35
36union nested_table {
37 union nested_table __rcu *table;
38 struct rhash_head __rcu *bucket;
39};
40
35static u32 head_hashfn(struct rhashtable *ht, 41static u32 head_hashfn(struct rhashtable *ht,
36 const struct bucket_table *tbl, 42 const struct bucket_table *tbl,
37 const struct rhash_head *he) 43 const struct rhash_head *he)
@@ -76,6 +82,9 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
76 /* Never allocate more than 0.5 locks per bucket */ 82 /* Never allocate more than 0.5 locks per bucket */
77 size = min_t(unsigned int, size, tbl->size >> 1); 83 size = min_t(unsigned int, size, tbl->size >> 1);
78 84
85 if (tbl->nest)
86 size = min(size, 1U << tbl->nest);
87
79 if (sizeof(spinlock_t) != 0) { 88 if (sizeof(spinlock_t) != 0) {
80 tbl->locks = NULL; 89 tbl->locks = NULL;
81#ifdef CONFIG_NUMA 90#ifdef CONFIG_NUMA
@@ -99,11 +108,46 @@ static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
99 return 0; 108 return 0;
100} 109}
101 110
111static void nested_table_free(union nested_table *ntbl, unsigned int size)
112{
113 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
114 const unsigned int len = 1 << shift;
115 unsigned int i;
116
117 ntbl = rcu_dereference_raw(ntbl->table);
118 if (!ntbl)
119 return;
120
121 if (size > len) {
122 size >>= shift;
123 for (i = 0; i < len; i++)
124 nested_table_free(ntbl + i, size);
125 }
126
127 kfree(ntbl);
128}
129
130static void nested_bucket_table_free(const struct bucket_table *tbl)
131{
132 unsigned int size = tbl->size >> tbl->nest;
133 unsigned int len = 1 << tbl->nest;
134 union nested_table *ntbl;
135 unsigned int i;
136
137 ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
138
139 for (i = 0; i < len; i++)
140 nested_table_free(ntbl + i, size);
141
142 kfree(ntbl);
143}
144
102static void bucket_table_free(const struct bucket_table *tbl) 145static void bucket_table_free(const struct bucket_table *tbl)
103{ 146{
104 if (tbl) 147 if (tbl->nest)
105 kvfree(tbl->locks); 148 nested_bucket_table_free(tbl);
106 149
150 kvfree(tbl->locks);
107 kvfree(tbl); 151 kvfree(tbl);
108} 152}
109 153
@@ -112,6 +156,59 @@ static void bucket_table_free_rcu(struct rcu_head *head)
112 bucket_table_free(container_of(head, struct bucket_table, rcu)); 156 bucket_table_free(container_of(head, struct bucket_table, rcu));
113} 157}
114 158
159static union nested_table *nested_table_alloc(struct rhashtable *ht,
160 union nested_table __rcu **prev,
161 unsigned int shifted,
162 unsigned int nhash)
163{
164 union nested_table *ntbl;
165 int i;
166
167 ntbl = rcu_dereference(*prev);
168 if (ntbl)
169 return ntbl;
170
171 ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC);
172
173 if (ntbl && shifted) {
174 for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0].bucket); i++)
175 INIT_RHT_NULLS_HEAD(ntbl[i].bucket, ht,
176 (i << shifted) | nhash);
177 }
178
179 rcu_assign_pointer(*prev, ntbl);
180
181 return ntbl;
182}
183
184static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
185 size_t nbuckets,
186 gfp_t gfp)
187{
188 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
189 struct bucket_table *tbl;
190 size_t size;
191
192 if (nbuckets < (1 << (shift + 1)))
193 return NULL;
194
195 size = sizeof(*tbl) + sizeof(tbl->buckets[0]);
196
197 tbl = kzalloc(size, gfp);
198 if (!tbl)
199 return NULL;
200
201 if (!nested_table_alloc(ht, (union nested_table __rcu **)tbl->buckets,
202 0, 0)) {
203 kfree(tbl);
204 return NULL;
205 }
206
207 tbl->nest = (ilog2(nbuckets) - 1) % shift + 1;
208
209 return tbl;
210}
211
115static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, 212static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
116 size_t nbuckets, 213 size_t nbuckets,
117 gfp_t gfp) 214 gfp_t gfp)
@@ -126,10 +223,17 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
126 tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY); 223 tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
127 if (tbl == NULL && gfp == GFP_KERNEL) 224 if (tbl == NULL && gfp == GFP_KERNEL)
128 tbl = vzalloc(size); 225 tbl = vzalloc(size);
226
227 size = nbuckets;
228
229 if (tbl == NULL && gfp != GFP_KERNEL) {
230 tbl = nested_bucket_table_alloc(ht, nbuckets, gfp);
231 nbuckets = 0;
232 }
129 if (tbl == NULL) 233 if (tbl == NULL)
130 return NULL; 234 return NULL;
131 235
132 tbl->size = nbuckets; 236 tbl->size = size;
133 237
134 if (alloc_bucket_locks(ht, tbl, gfp) < 0) { 238 if (alloc_bucket_locks(ht, tbl, gfp) < 0) {
135 bucket_table_free(tbl); 239 bucket_table_free(tbl);
@@ -164,12 +268,17 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
164 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); 268 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
165 struct bucket_table *new_tbl = rhashtable_last_table(ht, 269 struct bucket_table *new_tbl = rhashtable_last_table(ht,
166 rht_dereference_rcu(old_tbl->future_tbl, ht)); 270 rht_dereference_rcu(old_tbl->future_tbl, ht));
167 struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash]; 271 struct rhash_head __rcu **pprev = rht_bucket_var(old_tbl, old_hash);
168 int err = -ENOENT; 272 int err = -EAGAIN;
169 struct rhash_head *head, *next, *entry; 273 struct rhash_head *head, *next, *entry;
170 spinlock_t *new_bucket_lock; 274 spinlock_t *new_bucket_lock;
171 unsigned int new_hash; 275 unsigned int new_hash;
172 276
277 if (new_tbl->nest)
278 goto out;
279
280 err = -ENOENT;
281
173 rht_for_each(entry, old_tbl, old_hash) { 282 rht_for_each(entry, old_tbl, old_hash) {
174 err = 0; 283 err = 0;
175 next = rht_dereference_bucket(entry->next, old_tbl, old_hash); 284 next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
@@ -202,19 +311,26 @@ out:
202 return err; 311 return err;
203} 312}
204 313
205static void rhashtable_rehash_chain(struct rhashtable *ht, 314static int rhashtable_rehash_chain(struct rhashtable *ht,
206 unsigned int old_hash) 315 unsigned int old_hash)
207{ 316{
208 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); 317 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
209 spinlock_t *old_bucket_lock; 318 spinlock_t *old_bucket_lock;
319 int err;
210 320
211 old_bucket_lock = rht_bucket_lock(old_tbl, old_hash); 321 old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
212 322
213 spin_lock_bh(old_bucket_lock); 323 spin_lock_bh(old_bucket_lock);
214 while (!rhashtable_rehash_one(ht, old_hash)) 324 while (!(err = rhashtable_rehash_one(ht, old_hash)))
215 ; 325 ;
216 old_tbl->rehash++; 326
327 if (err == -ENOENT) {
328 old_tbl->rehash++;
329 err = 0;
330 }
217 spin_unlock_bh(old_bucket_lock); 331 spin_unlock_bh(old_bucket_lock);
332
333 return err;
218} 334}
219 335
220static int rhashtable_rehash_attach(struct rhashtable *ht, 336static int rhashtable_rehash_attach(struct rhashtable *ht,
@@ -246,13 +362,17 @@ static int rhashtable_rehash_table(struct rhashtable *ht)
246 struct bucket_table *new_tbl; 362 struct bucket_table *new_tbl;
247 struct rhashtable_walker *walker; 363 struct rhashtable_walker *walker;
248 unsigned int old_hash; 364 unsigned int old_hash;
365 int err;
249 366
250 new_tbl = rht_dereference(old_tbl->future_tbl, ht); 367 new_tbl = rht_dereference(old_tbl->future_tbl, ht);
251 if (!new_tbl) 368 if (!new_tbl)
252 return 0; 369 return 0;
253 370
254 for (old_hash = 0; old_hash < old_tbl->size; old_hash++) 371 for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
255 rhashtable_rehash_chain(ht, old_hash); 372 err = rhashtable_rehash_chain(ht, old_hash);
373 if (err)
374 return err;
375 }
256 376
257 /* Publish the new table pointer. */ 377 /* Publish the new table pointer. */
258 rcu_assign_pointer(ht->tbl, new_tbl); 378 rcu_assign_pointer(ht->tbl, new_tbl);
@@ -271,31 +391,16 @@ static int rhashtable_rehash_table(struct rhashtable *ht)
271 return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0; 391 return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
272} 392}
273 393
274/** 394static int rhashtable_rehash_alloc(struct rhashtable *ht,
275 * rhashtable_expand - Expand hash table while allowing concurrent lookups 395 struct bucket_table *old_tbl,
276 * @ht: the hash table to expand 396 unsigned int size)
277 *
278 * A secondary bucket array is allocated and the hash entries are migrated.
279 *
280 * This function may only be called in a context where it is safe to call
281 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
282 *
283 * The caller must ensure that no concurrent resizing occurs by holding
284 * ht->mutex.
285 *
286 * It is valid to have concurrent insertions and deletions protected by per
287 * bucket locks or concurrent RCU protected lookups and traversals.
288 */
289static int rhashtable_expand(struct rhashtable *ht)
290{ 397{
291 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); 398 struct bucket_table *new_tbl;
292 int err; 399 int err;
293 400
294 ASSERT_RHT_MUTEX(ht); 401 ASSERT_RHT_MUTEX(ht);
295 402
296 old_tbl = rhashtable_last_table(ht, old_tbl); 403 new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
297
298 new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL);
299 if (new_tbl == NULL) 404 if (new_tbl == NULL)
300 return -ENOMEM; 405 return -ENOMEM;
301 406
@@ -324,12 +429,9 @@ static int rhashtable_expand(struct rhashtable *ht)
324 */ 429 */
325static int rhashtable_shrink(struct rhashtable *ht) 430static int rhashtable_shrink(struct rhashtable *ht)
326{ 431{
327 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); 432 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
328 unsigned int nelems = atomic_read(&ht->nelems); 433 unsigned int nelems = atomic_read(&ht->nelems);
329 unsigned int size = 0; 434 unsigned int size = 0;
330 int err;
331
332 ASSERT_RHT_MUTEX(ht);
333 435
334 if (nelems) 436 if (nelems)
335 size = roundup_pow_of_two(nelems * 3 / 2); 437 size = roundup_pow_of_two(nelems * 3 / 2);
@@ -342,15 +444,7 @@ static int rhashtable_shrink(struct rhashtable *ht)
342 if (rht_dereference(old_tbl->future_tbl, ht)) 444 if (rht_dereference(old_tbl->future_tbl, ht))
343 return -EEXIST; 445 return -EEXIST;
344 446
345 new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL); 447 return rhashtable_rehash_alloc(ht, old_tbl, size);
346 if (new_tbl == NULL)
347 return -ENOMEM;
348
349 err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
350 if (err)
351 bucket_table_free(new_tbl);
352
353 return err;
354} 448}
355 449
356static void rht_deferred_worker(struct work_struct *work) 450static void rht_deferred_worker(struct work_struct *work)
@@ -366,11 +460,14 @@ static void rht_deferred_worker(struct work_struct *work)
366 tbl = rhashtable_last_table(ht, tbl); 460 tbl = rhashtable_last_table(ht, tbl);
367 461
368 if (rht_grow_above_75(ht, tbl)) 462 if (rht_grow_above_75(ht, tbl))
369 rhashtable_expand(ht); 463 err = rhashtable_rehash_alloc(ht, tbl, tbl->size * 2);
370 else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl)) 464 else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl))
371 rhashtable_shrink(ht); 465 err = rhashtable_shrink(ht);
466 else if (tbl->nest)
467 err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
372 468
373 err = rhashtable_rehash_table(ht); 469 if (!err)
470 err = rhashtable_rehash_table(ht);
374 471
375 mutex_unlock(&ht->mutex); 472 mutex_unlock(&ht->mutex);
376 473
@@ -439,8 +536,8 @@ static void *rhashtable_lookup_one(struct rhashtable *ht,
439 int elasticity; 536 int elasticity;
440 537
441 elasticity = ht->elasticity; 538 elasticity = ht->elasticity;
442 pprev = &tbl->buckets[hash]; 539 pprev = rht_bucket_var(tbl, hash);
443 rht_for_each(head, tbl, hash) { 540 rht_for_each_continue(head, *pprev, tbl, hash) {
444 struct rhlist_head *list; 541 struct rhlist_head *list;
445 struct rhlist_head *plist; 542 struct rhlist_head *plist;
446 543
@@ -477,6 +574,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
477 struct rhash_head *obj, 574 struct rhash_head *obj,
478 void *data) 575 void *data)
479{ 576{
577 struct rhash_head __rcu **pprev;
480 struct bucket_table *new_tbl; 578 struct bucket_table *new_tbl;
481 struct rhash_head *head; 579 struct rhash_head *head;
482 580
@@ -499,7 +597,11 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
499 if (unlikely(rht_grow_above_100(ht, tbl))) 597 if (unlikely(rht_grow_above_100(ht, tbl)))
500 return ERR_PTR(-EAGAIN); 598 return ERR_PTR(-EAGAIN);
501 599
502 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); 600 pprev = rht_bucket_insert(ht, tbl, hash);
601 if (!pprev)
602 return ERR_PTR(-ENOMEM);
603
604 head = rht_dereference_bucket(*pprev, tbl, hash);
503 605
504 RCU_INIT_POINTER(obj->next, head); 606 RCU_INIT_POINTER(obj->next, head);
505 if (ht->rhlist) { 607 if (ht->rhlist) {
@@ -509,7 +611,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht,
509 RCU_INIT_POINTER(list->next, NULL); 611 RCU_INIT_POINTER(list->next, NULL);
510 } 612 }
511 613
512 rcu_assign_pointer(tbl->buckets[hash], obj); 614 rcu_assign_pointer(*pprev, obj);
513 615
514 atomic_inc(&ht->nelems); 616 atomic_inc(&ht->nelems);
515 if (rht_grow_above_75(ht, tbl)) 617 if (rht_grow_above_75(ht, tbl))
@@ -975,7 +1077,7 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
975 void (*free_fn)(void *ptr, void *arg), 1077 void (*free_fn)(void *ptr, void *arg),
976 void *arg) 1078 void *arg)
977{ 1079{
978 const struct bucket_table *tbl; 1080 struct bucket_table *tbl;
979 unsigned int i; 1081 unsigned int i;
980 1082
981 cancel_work_sync(&ht->run_work); 1083 cancel_work_sync(&ht->run_work);
@@ -986,7 +1088,7 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
986 for (i = 0; i < tbl->size; i++) { 1088 for (i = 0; i < tbl->size; i++) {
987 struct rhash_head *pos, *next; 1089 struct rhash_head *pos, *next;
988 1090
989 for (pos = rht_dereference(tbl->buckets[i], ht), 1091 for (pos = rht_dereference(*rht_bucket(tbl, i), ht),
990 next = !rht_is_a_nulls(pos) ? 1092 next = !rht_is_a_nulls(pos) ?
991 rht_dereference(pos->next, ht) : NULL; 1093 rht_dereference(pos->next, ht) : NULL;
992 !rht_is_a_nulls(pos); 1094 !rht_is_a_nulls(pos);
@@ -1007,3 +1109,71 @@ void rhashtable_destroy(struct rhashtable *ht)
1007 return rhashtable_free_and_destroy(ht, NULL, NULL); 1109 return rhashtable_free_and_destroy(ht, NULL, NULL);
1008} 1110}
1009EXPORT_SYMBOL_GPL(rhashtable_destroy); 1111EXPORT_SYMBOL_GPL(rhashtable_destroy);
1112
1113struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
1114 unsigned int hash)
1115{
1116 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1117 static struct rhash_head __rcu *rhnull =
1118 (struct rhash_head __rcu *)NULLS_MARKER(0);
1119 unsigned int index = hash & ((1 << tbl->nest) - 1);
1120 unsigned int size = tbl->size >> tbl->nest;
1121 unsigned int subhash = hash;
1122 union nested_table *ntbl;
1123
1124 ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
1125 ntbl = rht_dereference_bucket_rcu(ntbl[index].table, tbl, hash);
1126 subhash >>= tbl->nest;
1127
1128 while (ntbl && size > (1 << shift)) {
1129 index = subhash & ((1 << shift) - 1);
1130 ntbl = rht_dereference_bucket_rcu(ntbl[index].table,
1131 tbl, hash);
1132 size >>= shift;
1133 subhash >>= shift;
1134 }
1135
1136 if (!ntbl)
1137 return &rhnull;
1138
1139 return &ntbl[subhash].bucket;
1140
1141}
1142EXPORT_SYMBOL_GPL(rht_bucket_nested);
1143
1144struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
1145 struct bucket_table *tbl,
1146 unsigned int hash)
1147{
1148 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1149 unsigned int index = hash & ((1 << tbl->nest) - 1);
1150 unsigned int size = tbl->size >> tbl->nest;
1151 union nested_table *ntbl;
1152 unsigned int shifted;
1153 unsigned int nhash;
1154
1155 ntbl = (union nested_table *)rcu_dereference_raw(tbl->buckets[0]);
1156 hash >>= tbl->nest;
1157 nhash = index;
1158 shifted = tbl->nest;
1159 ntbl = nested_table_alloc(ht, &ntbl[index].table,
1160 size <= (1 << shift) ? shifted : 0, nhash);
1161
1162 while (ntbl && size > (1 << shift)) {
1163 index = hash & ((1 << shift) - 1);
1164 size >>= shift;
1165 hash >>= shift;
1166 nhash |= index << shifted;
1167 shifted += shift;
1168 ntbl = nested_table_alloc(ht, &ntbl[index].table,
1169 size <= (1 << shift) ? shifted : 0,
1170 nhash);
1171 }
1172
1173 if (!ntbl)
1174 return NULL;
1175
1176 return &ntbl[hash].bucket;
1177
1178}
1179EXPORT_SYMBOL_GPL(rht_bucket_nested_insert);
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 55e11c4b2f3b..60e800e0b5a0 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -15,6 +15,7 @@
15 * along with this program. If not, see <https://www.gnu.org/licenses/>. 15 * along with this program. If not, see <https://www.gnu.org/licenses/>.
16 */ 16 */
17 17
18#include <linux/sched.h>
18#include <linux/random.h> 19#include <linux/random.h>
19#include <linux/sbitmap.h> 20#include <linux/sbitmap.h>
20#include <linux/seq_file.h> 21#include <linux/seq_file.h>
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 004fc70fc56a..c6cf82242d65 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -651,7 +651,6 @@ size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
651{ 651{
652 unsigned int offset = 0; 652 unsigned int offset = 0;
653 struct sg_mapping_iter miter; 653 struct sg_mapping_iter miter;
654 unsigned long flags;
655 unsigned int sg_flags = SG_MITER_ATOMIC; 654 unsigned int sg_flags = SG_MITER_ATOMIC;
656 655
657 if (to_buffer) 656 if (to_buffer)
@@ -664,9 +663,7 @@ size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
664 if (!sg_miter_skip(&miter, skip)) 663 if (!sg_miter_skip(&miter, skip))
665 return false; 664 return false;
666 665
667 local_irq_save(flags); 666 while ((offset < buflen) && sg_miter_next(&miter)) {
668
669 while (sg_miter_next(&miter) && offset < buflen) {
670 unsigned int len; 667 unsigned int len;
671 668
672 len = min(miter.length, buflen - offset); 669 len = min(miter.length, buflen - offset);
@@ -681,7 +678,6 @@ size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
681 678
682 sg_miter_stop(&miter); 679 sg_miter_stop(&miter);
683 680
684 local_irq_restore(flags);
685 return offset; 681 return offset;
686} 682}
687EXPORT_SYMBOL(sg_copy_buffer); 683EXPORT_SYMBOL(sg_copy_buffer);
diff --git a/lib/show_mem.c b/lib/show_mem.c
index 1feed6a2b12a..0beaa1d899aa 100644
--- a/lib/show_mem.c
+++ b/lib/show_mem.c
@@ -9,13 +9,13 @@
9#include <linux/quicklist.h> 9#include <linux/quicklist.h>
10#include <linux/cma.h> 10#include <linux/cma.h>
11 11
12void show_mem(unsigned int filter) 12void show_mem(unsigned int filter, nodemask_t *nodemask)
13{ 13{
14 pg_data_t *pgdat; 14 pg_data_t *pgdat;
15 unsigned long total = 0, reserved = 0, highmem = 0; 15 unsigned long total = 0, reserved = 0, highmem = 0;
16 16
17 printk("Mem-Info:\n"); 17 printk("Mem-Info:\n");
18 show_free_areas(filter); 18 show_free_areas(filter, nodemask);
19 19
20 for_each_online_pgdat(pgdat) { 20 for_each_online_pgdat(pgdat) {
21 unsigned long flags; 21 unsigned long flags;
diff --git a/lib/siphash.c b/lib/siphash.c
new file mode 100644
index 000000000000..3ae58b4edad6
--- /dev/null
+++ b/lib/siphash.c
@@ -0,0 +1,551 @@
1/* Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
2 *
3 * This file is provided under a dual BSD/GPLv2 license.
4 *
5 * SipHash: a fast short-input PRF
6 * https://131002.net/siphash/
7 *
8 * This implementation is specifically for SipHash2-4 for a secure PRF
9 * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for
10 * hashtables.
11 */
12
13#include <linux/siphash.h>
14#include <asm/unaligned.h>
15
16#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
17#include <linux/dcache.h>
18#include <asm/word-at-a-time.h>
19#endif
20
21#define SIPROUND \
22 do { \
23 v0 += v1; v1 = rol64(v1, 13); v1 ^= v0; v0 = rol64(v0, 32); \
24 v2 += v3; v3 = rol64(v3, 16); v3 ^= v2; \
25 v0 += v3; v3 = rol64(v3, 21); v3 ^= v0; \
26 v2 += v1; v1 = rol64(v1, 17); v1 ^= v2; v2 = rol64(v2, 32); \
27 } while (0)
28
29#define PREAMBLE(len) \
30 u64 v0 = 0x736f6d6570736575ULL; \
31 u64 v1 = 0x646f72616e646f6dULL; \
32 u64 v2 = 0x6c7967656e657261ULL; \
33 u64 v3 = 0x7465646279746573ULL; \
34 u64 b = ((u64)(len)) << 56; \
35 v3 ^= key->key[1]; \
36 v2 ^= key->key[0]; \
37 v1 ^= key->key[1]; \
38 v0 ^= key->key[0];
39
40#define POSTAMBLE \
41 v3 ^= b; \
42 SIPROUND; \
43 SIPROUND; \
44 v0 ^= b; \
45 v2 ^= 0xff; \
46 SIPROUND; \
47 SIPROUND; \
48 SIPROUND; \
49 SIPROUND; \
50 return (v0 ^ v1) ^ (v2 ^ v3);
51
52u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
53{
54 const u8 *end = data + len - (len % sizeof(u64));
55 const u8 left = len & (sizeof(u64) - 1);
56 u64 m;
57 PREAMBLE(len)
58 for (; data != end; data += sizeof(u64)) {
59 m = le64_to_cpup(data);
60 v3 ^= m;
61 SIPROUND;
62 SIPROUND;
63 v0 ^= m;
64 }
65#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
66 if (left)
67 b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
68 bytemask_from_count(left)));
69#else
70 switch (left) {
71 case 7: b |= ((u64)end[6]) << 48;
72 case 6: b |= ((u64)end[5]) << 40;
73 case 5: b |= ((u64)end[4]) << 32;
74 case 4: b |= le32_to_cpup(data); break;
75 case 3: b |= ((u64)end[2]) << 16;
76 case 2: b |= le16_to_cpup(data); break;
77 case 1: b |= end[0];
78 }
79#endif
80 POSTAMBLE
81}
82EXPORT_SYMBOL(__siphash_aligned);
83
84#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
85u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
86{
87 const u8 *end = data + len - (len % sizeof(u64));
88 const u8 left = len & (sizeof(u64) - 1);
89 u64 m;
90 PREAMBLE(len)
91 for (; data != end; data += sizeof(u64)) {
92 m = get_unaligned_le64(data);
93 v3 ^= m;
94 SIPROUND;
95 SIPROUND;
96 v0 ^= m;
97 }
98#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
99 if (left)
100 b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
101 bytemask_from_count(left)));
102#else
103 switch (left) {
104 case 7: b |= ((u64)end[6]) << 48;
105 case 6: b |= ((u64)end[5]) << 40;
106 case 5: b |= ((u64)end[4]) << 32;
107 case 4: b |= get_unaligned_le32(end); break;
108 case 3: b |= ((u64)end[2]) << 16;
109 case 2: b |= get_unaligned_le16(end); break;
110 case 1: b |= end[0];
111 }
112#endif
113 POSTAMBLE
114}
115EXPORT_SYMBOL(__siphash_unaligned);
116#endif
117
118/**
119 * siphash_1u64 - compute 64-bit siphash PRF value of a u64
120 * @first: first u64
121 * @key: the siphash key
122 */
123u64 siphash_1u64(const u64 first, const siphash_key_t *key)
124{
125 PREAMBLE(8)
126 v3 ^= first;
127 SIPROUND;
128 SIPROUND;
129 v0 ^= first;
130 POSTAMBLE
131}
132EXPORT_SYMBOL(siphash_1u64);
133
134/**
135 * siphash_2u64 - compute 64-bit siphash PRF value of 2 u64
136 * @first: first u64
137 * @second: second u64
138 * @key: the siphash key
139 */
140u64 siphash_2u64(const u64 first, const u64 second, const siphash_key_t *key)
141{
142 PREAMBLE(16)
143 v3 ^= first;
144 SIPROUND;
145 SIPROUND;
146 v0 ^= first;
147 v3 ^= second;
148 SIPROUND;
149 SIPROUND;
150 v0 ^= second;
151 POSTAMBLE
152}
153EXPORT_SYMBOL(siphash_2u64);
154
155/**
156 * siphash_3u64 - compute 64-bit siphash PRF value of 3 u64
157 * @first: first u64
158 * @second: second u64
159 * @third: third u64
160 * @key: the siphash key
161 */
162u64 siphash_3u64(const u64 first, const u64 second, const u64 third,
163 const siphash_key_t *key)
164{
165 PREAMBLE(24)
166 v3 ^= first;
167 SIPROUND;
168 SIPROUND;
169 v0 ^= first;
170 v3 ^= second;
171 SIPROUND;
172 SIPROUND;
173 v0 ^= second;
174 v3 ^= third;
175 SIPROUND;
176 SIPROUND;
177 v0 ^= third;
178 POSTAMBLE
179}
180EXPORT_SYMBOL(siphash_3u64);
181
182/**
183 * siphash_4u64 - compute 64-bit siphash PRF value of 4 u64
184 * @first: first u64
185 * @second: second u64
186 * @third: third u64
187 * @forth: forth u64
188 * @key: the siphash key
189 */
190u64 siphash_4u64(const u64 first, const u64 second, const u64 third,
191 const u64 forth, const siphash_key_t *key)
192{
193 PREAMBLE(32)
194 v3 ^= first;
195 SIPROUND;
196 SIPROUND;
197 v0 ^= first;
198 v3 ^= second;
199 SIPROUND;
200 SIPROUND;
201 v0 ^= second;
202 v3 ^= third;
203 SIPROUND;
204 SIPROUND;
205 v0 ^= third;
206 v3 ^= forth;
207 SIPROUND;
208 SIPROUND;
209 v0 ^= forth;
210 POSTAMBLE
211}
212EXPORT_SYMBOL(siphash_4u64);
213
214u64 siphash_1u32(const u32 first, const siphash_key_t *key)
215{
216 PREAMBLE(4)
217 b |= first;
218 POSTAMBLE
219}
220EXPORT_SYMBOL(siphash_1u32);
221
222u64 siphash_3u32(const u32 first, const u32 second, const u32 third,
223 const siphash_key_t *key)
224{
225 u64 combined = (u64)second << 32 | first;
226 PREAMBLE(12)
227 v3 ^= combined;
228 SIPROUND;
229 SIPROUND;
230 v0 ^= combined;
231 b |= third;
232 POSTAMBLE
233}
234EXPORT_SYMBOL(siphash_3u32);
235
236#if BITS_PER_LONG == 64
237/* Note that on 64-bit, we make HalfSipHash1-3 actually be SipHash1-3, for
238 * performance reasons. On 32-bit, below, we actually implement HalfSipHash1-3.
239 */
240
241#define HSIPROUND SIPROUND
242#define HPREAMBLE(len) PREAMBLE(len)
243#define HPOSTAMBLE \
244 v3 ^= b; \
245 HSIPROUND; \
246 v0 ^= b; \
247 v2 ^= 0xff; \
248 HSIPROUND; \
249 HSIPROUND; \
250 HSIPROUND; \
251 return (v0 ^ v1) ^ (v2 ^ v3);
252
253u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
254{
255 const u8 *end = data + len - (len % sizeof(u64));
256 const u8 left = len & (sizeof(u64) - 1);
257 u64 m;
258 HPREAMBLE(len)
259 for (; data != end; data += sizeof(u64)) {
260 m = le64_to_cpup(data);
261 v3 ^= m;
262 HSIPROUND;
263 v0 ^= m;
264 }
265#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
266 if (left)
267 b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
268 bytemask_from_count(left)));
269#else
270 switch (left) {
271 case 7: b |= ((u64)end[6]) << 48;
272 case 6: b |= ((u64)end[5]) << 40;
273 case 5: b |= ((u64)end[4]) << 32;
274 case 4: b |= le32_to_cpup(data); break;
275 case 3: b |= ((u64)end[2]) << 16;
276 case 2: b |= le16_to_cpup(data); break;
277 case 1: b |= end[0];
278 }
279#endif
280 HPOSTAMBLE
281}
282EXPORT_SYMBOL(__hsiphash_aligned);
283
284#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
285u32 __hsiphash_unaligned(const void *data, size_t len,
286 const hsiphash_key_t *key)
287{
288 const u8 *end = data + len - (len % sizeof(u64));
289 const u8 left = len & (sizeof(u64) - 1);
290 u64 m;
291 HPREAMBLE(len)
292 for (; data != end; data += sizeof(u64)) {
293 m = get_unaligned_le64(data);
294 v3 ^= m;
295 HSIPROUND;
296 v0 ^= m;
297 }
298#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
299 if (left)
300 b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
301 bytemask_from_count(left)));
302#else
303 switch (left) {
304 case 7: b |= ((u64)end[6]) << 48;
305 case 6: b |= ((u64)end[5]) << 40;
306 case 5: b |= ((u64)end[4]) << 32;
307 case 4: b |= get_unaligned_le32(end); break;
308 case 3: b |= ((u64)end[2]) << 16;
309 case 2: b |= get_unaligned_le16(end); break;
310 case 1: b |= end[0];
311 }
312#endif
313 HPOSTAMBLE
314}
315EXPORT_SYMBOL(__hsiphash_unaligned);
316#endif
317
318/**
319 * hsiphash_1u32 - compute 64-bit hsiphash PRF value of a u32
320 * @first: first u32
321 * @key: the hsiphash key
322 */
323u32 hsiphash_1u32(const u32 first, const hsiphash_key_t *key)
324{
325 HPREAMBLE(4)
326 b |= first;
327 HPOSTAMBLE
328}
329EXPORT_SYMBOL(hsiphash_1u32);
330
331/**
332 * hsiphash_2u32 - compute 32-bit hsiphash PRF value of 2 u32
333 * @first: first u32
334 * @second: second u32
335 * @key: the hsiphash key
336 */
337u32 hsiphash_2u32(const u32 first, const u32 second, const hsiphash_key_t *key)
338{
339 u64 combined = (u64)second << 32 | first;
340 HPREAMBLE(8)
341 v3 ^= combined;
342 HSIPROUND;
343 v0 ^= combined;
344 HPOSTAMBLE
345}
346EXPORT_SYMBOL(hsiphash_2u32);
347
348/**
349 * hsiphash_3u32 - compute 32-bit hsiphash PRF value of 3 u32
350 * @first: first u32
351 * @second: second u32
352 * @third: third u32
353 * @key: the hsiphash key
354 */
355u32 hsiphash_3u32(const u32 first, const u32 second, const u32 third,
356 const hsiphash_key_t *key)
357{
358 u64 combined = (u64)second << 32 | first;
359 HPREAMBLE(12)
360 v3 ^= combined;
361 HSIPROUND;
362 v0 ^= combined;
363 b |= third;
364 HPOSTAMBLE
365}
366EXPORT_SYMBOL(hsiphash_3u32);
367
368/**
369 * hsiphash_4u32 - compute 32-bit hsiphash PRF value of 4 u32
370 * @first: first u32
371 * @second: second u32
372 * @third: third u32
373 * @forth: forth u32
374 * @key: the hsiphash key
375 */
376u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third,
377 const u32 forth, const hsiphash_key_t *key)
378{
379 u64 combined = (u64)second << 32 | first;
380 HPREAMBLE(16)
381 v3 ^= combined;
382 HSIPROUND;
383 v0 ^= combined;
384 combined = (u64)forth << 32 | third;
385 v3 ^= combined;
386 HSIPROUND;
387 v0 ^= combined;
388 HPOSTAMBLE
389}
390EXPORT_SYMBOL(hsiphash_4u32);
391#else
392#define HSIPROUND \
393 do { \
394 v0 += v1; v1 = rol32(v1, 5); v1 ^= v0; v0 = rol32(v0, 16); \
395 v2 += v3; v3 = rol32(v3, 8); v3 ^= v2; \
396 v0 += v3; v3 = rol32(v3, 7); v3 ^= v0; \
397 v2 += v1; v1 = rol32(v1, 13); v1 ^= v2; v2 = rol32(v2, 16); \
398 } while (0)
399
400#define HPREAMBLE(len) \
401 u32 v0 = 0; \
402 u32 v1 = 0; \
403 u32 v2 = 0x6c796765U; \
404 u32 v3 = 0x74656462U; \
405 u32 b = ((u32)(len)) << 24; \
406 v3 ^= key->key[1]; \
407 v2 ^= key->key[0]; \
408 v1 ^= key->key[1]; \
409 v0 ^= key->key[0];
410
411#define HPOSTAMBLE \
412 v3 ^= b; \
413 HSIPROUND; \
414 v0 ^= b; \
415 v2 ^= 0xff; \
416 HSIPROUND; \
417 HSIPROUND; \
418 HSIPROUND; \
419 return v1 ^ v3;
420
421u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
422{
423 const u8 *end = data + len - (len % sizeof(u32));
424 const u8 left = len & (sizeof(u32) - 1);
425 u32 m;
426 HPREAMBLE(len)
427 for (; data != end; data += sizeof(u32)) {
428 m = le32_to_cpup(data);
429 v3 ^= m;
430 HSIPROUND;
431 v0 ^= m;
432 }
433 switch (left) {
434 case 3: b |= ((u32)end[2]) << 16;
435 case 2: b |= le16_to_cpup(data); break;
436 case 1: b |= end[0];
437 }
438 HPOSTAMBLE
439}
440EXPORT_SYMBOL(__hsiphash_aligned);
441
442#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
443u32 __hsiphash_unaligned(const void *data, size_t len,
444 const hsiphash_key_t *key)
445{
446 const u8 *end = data + len - (len % sizeof(u32));
447 const u8 left = len & (sizeof(u32) - 1);
448 u32 m;
449 HPREAMBLE(len)
450 for (; data != end; data += sizeof(u32)) {
451 m = get_unaligned_le32(data);
452 v3 ^= m;
453 HSIPROUND;
454 v0 ^= m;
455 }
456 switch (left) {
457 case 3: b |= ((u32)end[2]) << 16;
458 case 2: b |= get_unaligned_le16(end); break;
459 case 1: b |= end[0];
460 }
461 HPOSTAMBLE
462}
463EXPORT_SYMBOL(__hsiphash_unaligned);
464#endif
465
466/**
467 * hsiphash_1u32 - compute 32-bit hsiphash PRF value of a u32
468 * @first: first u32
469 * @key: the hsiphash key
470 */
471u32 hsiphash_1u32(const u32 first, const hsiphash_key_t *key)
472{
473 HPREAMBLE(4)
474 v3 ^= first;
475 HSIPROUND;
476 v0 ^= first;
477 HPOSTAMBLE
478}
479EXPORT_SYMBOL(hsiphash_1u32);
480
481/**
482 * hsiphash_2u32 - compute 32-bit hsiphash PRF value of 2 u32
483 * @first: first u32
484 * @second: second u32
485 * @key: the hsiphash key
486 */
487u32 hsiphash_2u32(const u32 first, const u32 second, const hsiphash_key_t *key)
488{
489 HPREAMBLE(8)
490 v3 ^= first;
491 HSIPROUND;
492 v0 ^= first;
493 v3 ^= second;
494 HSIPROUND;
495 v0 ^= second;
496 HPOSTAMBLE
497}
498EXPORT_SYMBOL(hsiphash_2u32);
499
500/**
501 * hsiphash_3u32 - compute 32-bit hsiphash PRF value of 3 u32
502 * @first: first u32
503 * @second: second u32
504 * @third: third u32
505 * @key: the hsiphash key
506 */
507u32 hsiphash_3u32(const u32 first, const u32 second, const u32 third,
508 const hsiphash_key_t *key)
509{
510 HPREAMBLE(12)
511 v3 ^= first;
512 HSIPROUND;
513 v0 ^= first;
514 v3 ^= second;
515 HSIPROUND;
516 v0 ^= second;
517 v3 ^= third;
518 HSIPROUND;
519 v0 ^= third;
520 HPOSTAMBLE
521}
522EXPORT_SYMBOL(hsiphash_3u32);
523
524/**
525 * hsiphash_4u32 - compute 32-bit hsiphash PRF value of 4 u32
526 * @first: first u32
527 * @second: second u32
528 * @third: third u32
529 * @forth: forth u32
530 * @key: the hsiphash key
531 */
532u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third,
533 const u32 forth, const hsiphash_key_t *key)
534{
535 HPREAMBLE(16)
536 v3 ^= first;
537 HSIPROUND;
538 v0 ^= first;
539 v3 ^= second;
540 HSIPROUND;
541 v0 ^= second;
542 v3 ^= third;
543 HSIPROUND;
544 v0 ^= third;
545 v3 ^= forth;
546 HSIPROUND;
547 v0 ^= forth;
548 HPOSTAMBLE
549}
550EXPORT_SYMBOL(hsiphash_4u32);
551#endif
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 1afec32de6f2..690d75b132fa 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -22,7 +22,7 @@ notrace static unsigned int check_preemption_disabled(const char *what1,
22 * Kernel threads bound to a single CPU can safely use 22 * Kernel threads bound to a single CPU can safely use
23 * smp_processor_id(): 23 * smp_processor_id():
24 */ 24 */
25 if (cpumask_equal(tsk_cpus_allowed(current), cpumask_of(this_cpu))) 25 if (cpumask_equal(&current->cpus_allowed, cpumask_of(this_cpu)))
26 goto out; 26 goto out;
27 27
28 /* 28 /*
diff --git a/lib/sort.c b/lib/sort.c
index fc20df42aa6f..975c6ef6fec7 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -4,6 +4,8 @@
4 * Jan 23 2005 Matt Mackall <mpm@selenic.com> 4 * Jan 23 2005 Matt Mackall <mpm@selenic.com>
5 */ 5 */
6 6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
7#include <linux/types.h> 9#include <linux/types.h>
8#include <linux/export.h> 10#include <linux/export.h>
9#include <linux/sort.h> 11#include <linux/sort.h>
@@ -101,42 +103,3 @@ void sort(void *base, size_t num, size_t size,
101} 103}
102 104
103EXPORT_SYMBOL(sort); 105EXPORT_SYMBOL(sort);
104
105#if 0
106#include <linux/slab.h>
107/* a simple boot-time regression test */
108
109int cmpint(const void *a, const void *b)
110{
111 return *(int *)a - *(int *)b;
112}
113
114static int sort_test(void)
115{
116 int *a, i, r = 1;
117
118 a = kmalloc(1000 * sizeof(int), GFP_KERNEL);
119 BUG_ON(!a);
120
121 printk("testing sort()\n");
122
123 for (i = 0; i < 1000; i++) {
124 r = (r * 725861) % 6599;
125 a[i] = r;
126 }
127
128 sort(a, 1000, sizeof(int), cmpint, NULL);
129
130 for (i = 0; i < 999; i++)
131 if (a[i] > a[i+1]) {
132 printk("sort() failed!\n");
133 break;
134 }
135
136 kfree(a);
137
138 return 0;
139}
140
141module_init(sort_test);
142#endif
diff --git a/lib/syscall.c b/lib/syscall.c
index 63239e097b13..17d5ff5fa6a3 100644
--- a/lib/syscall.c
+++ b/lib/syscall.c
@@ -1,5 +1,6 @@
1#include <linux/ptrace.h> 1#include <linux/ptrace.h>
2#include <linux/sched.h> 2#include <linux/sched.h>
3#include <linux/sched/task_stack.h>
3#include <linux/export.h> 4#include <linux/export.h>
4#include <asm/syscall.h> 5#include <asm/syscall.h>
5 6
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index a3e8ec3fb1c5..09371b0a9baf 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -42,12 +42,6 @@ static const struct file_operations test_fw_fops = {
42 .read = test_fw_misc_read, 42 .read = test_fw_misc_read,
43}; 43};
44 44
45static struct miscdevice test_fw_misc_device = {
46 .minor = MISC_DYNAMIC_MINOR,
47 .name = "test_firmware",
48 .fops = &test_fw_fops,
49};
50
51static ssize_t trigger_request_store(struct device *dev, 45static ssize_t trigger_request_store(struct device *dev,
52 struct device_attribute *attr, 46 struct device_attribute *attr,
53 const char *buf, size_t count) 47 const char *buf, size_t count)
@@ -132,39 +126,81 @@ out:
132} 126}
133static DEVICE_ATTR_WO(trigger_async_request); 127static DEVICE_ATTR_WO(trigger_async_request);
134 128
135static int __init test_firmware_init(void) 129static ssize_t trigger_custom_fallback_store(struct device *dev,
130 struct device_attribute *attr,
131 const char *buf, size_t count)
136{ 132{
137 int rc; 133 int rc;
134 char *name;
138 135
139 rc = misc_register(&test_fw_misc_device); 136 name = kstrndup(buf, count, GFP_KERNEL);
137 if (!name)
138 return -ENOSPC;
139
140 pr_info("loading '%s' using custom fallback mechanism\n", name);
141
142 mutex_lock(&test_fw_mutex);
143 release_firmware(test_firmware);
144 test_firmware = NULL;
145 rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG, name,
146 dev, GFP_KERNEL, NULL,
147 trigger_async_request_cb);
140 if (rc) { 148 if (rc) {
141 pr_err("could not register misc device: %d\n", rc); 149 pr_info("async load of '%s' failed: %d\n", name, rc);
142 return rc; 150 kfree(name);
151 goto out;
143 } 152 }
144 rc = device_create_file(test_fw_misc_device.this_device, 153 /* Free 'name' ASAP, to test for race conditions */
145 &dev_attr_trigger_request); 154 kfree(name);
146 if (rc) { 155
147 pr_err("could not create sysfs interface: %d\n", rc); 156 wait_for_completion(&async_fw_done);
148 goto dereg; 157
158 if (test_firmware) {
159 pr_info("loaded: %zu\n", test_firmware->size);
160 rc = count;
161 } else {
162 pr_err("failed to async load firmware\n");
163 rc = -ENODEV;
149 } 164 }
150 165
151 rc = device_create_file(test_fw_misc_device.this_device, 166out:
152 &dev_attr_trigger_async_request); 167 mutex_unlock(&test_fw_mutex);
168
169 return rc;
170}
171static DEVICE_ATTR_WO(trigger_custom_fallback);
172
173#define TEST_FW_DEV_ATTR(name) &dev_attr_##name.attr
174
175static struct attribute *test_dev_attrs[] = {
176 TEST_FW_DEV_ATTR(trigger_request),
177 TEST_FW_DEV_ATTR(trigger_async_request),
178 TEST_FW_DEV_ATTR(trigger_custom_fallback),
179 NULL,
180};
181
182ATTRIBUTE_GROUPS(test_dev);
183
184static struct miscdevice test_fw_misc_device = {
185 .minor = MISC_DYNAMIC_MINOR,
186 .name = "test_firmware",
187 .fops = &test_fw_fops,
188 .groups = test_dev_groups,
189};
190
191static int __init test_firmware_init(void)
192{
193 int rc;
194
195 rc = misc_register(&test_fw_misc_device);
153 if (rc) { 196 if (rc) {
154 pr_err("could not create async sysfs interface: %d\n", rc); 197 pr_err("could not register misc device: %d\n", rc);
155 goto remove_file; 198 return rc;
156 } 199 }
157 200
158 pr_warn("interface ready\n"); 201 pr_warn("interface ready\n");
159 202
160 return 0; 203 return 0;
161
162remove_file:
163 device_remove_file(test_fw_misc_device.this_device,
164 &dev_attr_trigger_async_request);
165dereg:
166 misc_deregister(&test_fw_misc_device);
167 return rc;
168} 204}
169 205
170module_init(test_firmware_init); 206module_init(test_firmware_init);
@@ -172,10 +208,6 @@ module_init(test_firmware_init);
172static void __exit test_firmware_exit(void) 208static void __exit test_firmware_exit(void)
173{ 209{
174 release_firmware(test_firmware); 210 release_firmware(test_firmware);
175 device_remove_file(test_fw_misc_device.this_device,
176 &dev_attr_trigger_async_request);
177 device_remove_file(test_fw_misc_device.this_device,
178 &dev_attr_trigger_request);
179 misc_deregister(&test_fw_misc_device); 211 misc_deregister(&test_fw_misc_device);
180 pr_warn("removed interface\n"); 212 pr_warn("removed interface\n");
181} 213}
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index fbdf87920093..0b1d3140fbb8 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -11,6 +11,7 @@
11 11
12#define pr_fmt(fmt) "kasan test: %s " fmt, __func__ 12#define pr_fmt(fmt) "kasan test: %s " fmt, __func__
13 13
14#include <linux/delay.h>
14#include <linux/kernel.h> 15#include <linux/kernel.h>
15#include <linux/mman.h> 16#include <linux/mman.h>
16#include <linux/mm.h> 17#include <linux/mm.h>
@@ -331,6 +332,38 @@ static noinline void __init kmem_cache_oob(void)
331 kmem_cache_destroy(cache); 332 kmem_cache_destroy(cache);
332} 333}
333 334
335static noinline void __init memcg_accounted_kmem_cache(void)
336{
337 int i;
338 char *p;
339 size_t size = 200;
340 struct kmem_cache *cache;
341
342 cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL);
343 if (!cache) {
344 pr_err("Cache allocation failed\n");
345 return;
346 }
347
348 pr_info("allocate memcg accounted object\n");
349 /*
350 * Several allocations with a delay to allow for lazy per memcg kmem
351 * cache creation.
352 */
353 for (i = 0; i < 5; i++) {
354 p = kmem_cache_alloc(cache, GFP_KERNEL);
355 if (!p) {
356 pr_err("Allocation failed\n");
357 goto free_cache;
358 }
359 kmem_cache_free(cache, p);
360 msleep(100);
361 }
362
363free_cache:
364 kmem_cache_destroy(cache);
365}
366
334static char global_array[10]; 367static char global_array[10];
335 368
336static noinline void __init kasan_global_oob(void) 369static noinline void __init kasan_global_oob(void)
@@ -460,6 +493,7 @@ static int __init kmalloc_tests_init(void)
460 kmalloc_uaf_memset(); 493 kmalloc_uaf_memset();
461 kmalloc_uaf2(); 494 kmalloc_uaf2();
462 kmem_cache_oob(); 495 kmem_cache_oob();
496 memcg_accounted_kmem_cache();
463 kasan_stack_oob(); 497 kasan_stack_oob();
464 kasan_global_oob(); 498 kasan_global_oob();
465 ksize_unpoisons_memory(); 499 ksize_unpoisons_memory();
diff --git a/lib/test_parman.c b/lib/test_parman.c
new file mode 100644
index 000000000000..35e32243693c
--- /dev/null
+++ b/lib/test_parman.c
@@ -0,0 +1,395 @@
1/*
2 * lib/test_parman.c - Test module for parman
3 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the names of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2 as published by the Free
20 * Software Foundation.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/kernel.h>
38#include <linux/module.h>
39#include <linux/slab.h>
40#include <linux/bitops.h>
41#include <linux/err.h>
42#include <linux/random.h>
43#include <linux/parman.h>
44
45#define TEST_PARMAN_PRIO_SHIFT 7 /* defines number of prios for testing */
46#define TEST_PARMAN_PRIO_COUNT BIT(TEST_PARMAN_PRIO_SHIFT)
47#define TEST_PARMAN_PRIO_MASK (TEST_PARMAN_PRIO_COUNT - 1)
48
49#define TEST_PARMAN_ITEM_SHIFT 13 /* defines a total number
50 * of items for testing
51 */
52#define TEST_PARMAN_ITEM_COUNT BIT(TEST_PARMAN_ITEM_SHIFT)
53#define TEST_PARMAN_ITEM_MASK (TEST_PARMAN_ITEM_COUNT - 1)
54
55#define TEST_PARMAN_BASE_SHIFT 8
56#define TEST_PARMAN_BASE_COUNT BIT(TEST_PARMAN_BASE_SHIFT)
57#define TEST_PARMAN_RESIZE_STEP_SHIFT 7
58#define TEST_PARMAN_RESIZE_STEP_COUNT BIT(TEST_PARMAN_RESIZE_STEP_SHIFT)
59
60#define TEST_PARMAN_BULK_MAX_SHIFT (2 + TEST_PARMAN_RESIZE_STEP_SHIFT)
61#define TEST_PARMAN_BULK_MAX_COUNT BIT(TEST_PARMAN_BULK_MAX_SHIFT)
62#define TEST_PARMAN_BULK_MAX_MASK (TEST_PARMAN_BULK_MAX_COUNT - 1)
63
64#define TEST_PARMAN_RUN_BUDGET (TEST_PARMAN_ITEM_COUNT * 256)
65
66struct test_parman_prio {
67 struct parman_prio parman_prio;
68 unsigned long priority;
69};
70
71struct test_parman_item {
72 struct parman_item parman_item;
73 struct test_parman_prio *prio;
74 bool used;
75};
76
77struct test_parman {
78 struct parman *parman;
79 struct test_parman_item **prio_array;
80 unsigned long prio_array_limit;
81 struct test_parman_prio prios[TEST_PARMAN_PRIO_COUNT];
82 struct test_parman_item items[TEST_PARMAN_ITEM_COUNT];
83 struct rnd_state rnd;
84 unsigned long run_budget;
85 unsigned long bulk_budget;
86 bool bulk_noop;
87 unsigned int used_items;
88};
89
90#define ITEM_PTRS_SIZE(count) (sizeof(struct test_parman_item *) * (count))
91
92static int test_parman_resize(void *priv, unsigned long new_count)
93{
94 struct test_parman *test_parman = priv;
95 struct test_parman_item **prio_array;
96 unsigned long old_count;
97
98 prio_array = krealloc(test_parman->prio_array,
99 ITEM_PTRS_SIZE(new_count), GFP_KERNEL);
100 if (new_count == 0)
101 return 0;
102 if (!prio_array)
103 return -ENOMEM;
104 old_count = test_parman->prio_array_limit;
105 if (new_count > old_count)
106 memset(&prio_array[old_count], 0,
107 ITEM_PTRS_SIZE(new_count - old_count));
108 test_parman->prio_array = prio_array;
109 test_parman->prio_array_limit = new_count;
110 return 0;
111}
112
113static void test_parman_move(void *priv, unsigned long from_index,
114 unsigned long to_index, unsigned long count)
115{
116 struct test_parman *test_parman = priv;
117 struct test_parman_item **prio_array = test_parman->prio_array;
118
119 memmove(&prio_array[to_index], &prio_array[from_index],
120 ITEM_PTRS_SIZE(count));
121 memset(&prio_array[from_index], 0, ITEM_PTRS_SIZE(count));
122}
123
124static const struct parman_ops test_parman_lsort_ops = {
125 .base_count = TEST_PARMAN_BASE_COUNT,
126 .resize_step = TEST_PARMAN_RESIZE_STEP_COUNT,
127 .resize = test_parman_resize,
128 .move = test_parman_move,
129 .algo = PARMAN_ALGO_TYPE_LSORT,
130};
131
132static void test_parman_rnd_init(struct test_parman *test_parman)
133{
134 prandom_seed_state(&test_parman->rnd, 3141592653589793238ULL);
135}
136
137static u32 test_parman_rnd_get(struct test_parman *test_parman)
138{
139 return prandom_u32_state(&test_parman->rnd);
140}
141
142static unsigned long test_parman_priority_gen(struct test_parman *test_parman)
143{
144 unsigned long priority;
145 int i;
146
147again:
148 priority = test_parman_rnd_get(test_parman);
149 if (priority == 0)
150 goto again;
151
152 for (i = 0; i < TEST_PARMAN_PRIO_COUNT; i++) {
153 struct test_parman_prio *prio = &test_parman->prios[i];
154
155 if (prio->priority == 0)
156 break;
157 if (prio->priority == priority)
158 goto again;
159 }
160 return priority;
161}
162
163static void test_parman_prios_init(struct test_parman *test_parman)
164{
165 int i;
166
167 for (i = 0; i < TEST_PARMAN_PRIO_COUNT; i++) {
168 struct test_parman_prio *prio = &test_parman->prios[i];
169
170 /* Assign random uniqueue priority to each prio structure */
171 prio->priority = test_parman_priority_gen(test_parman);
172 parman_prio_init(test_parman->parman, &prio->parman_prio,
173 prio->priority);
174 }
175}
176
177static void test_parman_prios_fini(struct test_parman *test_parman)
178{
179 int i;
180
181 for (i = 0; i < TEST_PARMAN_PRIO_COUNT; i++) {
182 struct test_parman_prio *prio = &test_parman->prios[i];
183
184 parman_prio_fini(&prio->parman_prio);
185 }
186}
187
188static void test_parman_items_init(struct test_parman *test_parman)
189{
190 int i;
191
192 for (i = 0; i < TEST_PARMAN_ITEM_COUNT; i++) {
193 struct test_parman_item *item = &test_parman->items[i];
194 unsigned int prio_index = test_parman_rnd_get(test_parman) &
195 TEST_PARMAN_PRIO_MASK;
196
197 /* Assign random prio to each item structure */
198 item->prio = &test_parman->prios[prio_index];
199 }
200}
201
202static void test_parman_items_fini(struct test_parman *test_parman)
203{
204 int i;
205
206 for (i = 0; i < TEST_PARMAN_ITEM_COUNT; i++) {
207 struct test_parman_item *item = &test_parman->items[i];
208
209 if (!item->used)
210 continue;
211 parman_item_remove(test_parman->parman,
212 &item->prio->parman_prio,
213 &item->parman_item);
214 }
215}
216
217static struct test_parman *test_parman_create(const struct parman_ops *ops)
218{
219 struct test_parman *test_parman;
220 int err;
221
222 test_parman = kzalloc(sizeof(*test_parman), GFP_KERNEL);
223 if (!test_parman)
224 return ERR_PTR(-ENOMEM);
225 err = test_parman_resize(test_parman, TEST_PARMAN_BASE_COUNT);
226 if (err)
227 goto err_resize;
228 test_parman->parman = parman_create(ops, test_parman);
229 if (!test_parman->parman) {
230 err = -ENOMEM;
231 goto err_parman_create;
232 }
233 test_parman_rnd_init(test_parman);
234 test_parman_prios_init(test_parman);
235 test_parman_items_init(test_parman);
236 test_parman->run_budget = TEST_PARMAN_RUN_BUDGET;
237 return test_parman;
238
239err_parman_create:
240 test_parman_resize(test_parman, 0);
241err_resize:
242 kfree(test_parman);
243 return ERR_PTR(err);
244}
245
246static void test_parman_destroy(struct test_parman *test_parman)
247{
248 test_parman_items_fini(test_parman);
249 test_parman_prios_fini(test_parman);
250 parman_destroy(test_parman->parman);
251 test_parman_resize(test_parman, 0);
252 kfree(test_parman);
253}
254
255static bool test_parman_run_check_budgets(struct test_parman *test_parman)
256{
257 if (test_parman->run_budget-- == 0)
258 return false;
259 if (test_parman->bulk_budget-- != 0)
260 return true;
261
262 test_parman->bulk_budget = test_parman_rnd_get(test_parman) &
263 TEST_PARMAN_BULK_MAX_MASK;
264 test_parman->bulk_noop = test_parman_rnd_get(test_parman) & 1;
265 return true;
266}
267
268static int test_parman_run(struct test_parman *test_parman)
269{
270 unsigned int i = test_parman_rnd_get(test_parman);
271 int err;
272
273 while (test_parman_run_check_budgets(test_parman)) {
274 unsigned int item_index = i++ & TEST_PARMAN_ITEM_MASK;
275 struct test_parman_item *item = &test_parman->items[item_index];
276
277 if (test_parman->bulk_noop)
278 continue;
279
280 if (!item->used) {
281 err = parman_item_add(test_parman->parman,
282 &item->prio->parman_prio,
283 &item->parman_item);
284 if (err)
285 return err;
286 test_parman->prio_array[item->parman_item.index] = item;
287 test_parman->used_items++;
288 } else {
289 test_parman->prio_array[item->parman_item.index] = NULL;
290 parman_item_remove(test_parman->parman,
291 &item->prio->parman_prio,
292 &item->parman_item);
293 test_parman->used_items--;
294 }
295 item->used = !item->used;
296 }
297 return 0;
298}
299
300static int test_parman_check_array(struct test_parman *test_parman,
301 bool gaps_allowed)
302{
303 unsigned int last_unused_items = 0;
304 unsigned long last_priority = 0;
305 unsigned int used_items = 0;
306 int i;
307
308 if (test_parman->prio_array_limit < TEST_PARMAN_BASE_COUNT) {
309 pr_err("Array limit is lower than the base count (%lu < %lu)\n",
310 test_parman->prio_array_limit, TEST_PARMAN_BASE_COUNT);
311 return -EINVAL;
312 }
313
314 for (i = 0; i < test_parman->prio_array_limit; i++) {
315 struct test_parman_item *item = test_parman->prio_array[i];
316
317 if (!item) {
318 last_unused_items++;
319 continue;
320 }
321 if (last_unused_items && !gaps_allowed) {
322 pr_err("Gap found in array even though they are forbidden\n");
323 return -EINVAL;
324 }
325
326 last_unused_items = 0;
327 used_items++;
328
329 if (item->prio->priority < last_priority) {
330 pr_err("Item belongs under higher priority then the last one (current: %lu, previous: %lu)\n",
331 item->prio->priority, last_priority);
332 return -EINVAL;
333 }
334 last_priority = item->prio->priority;
335
336 if (item->parman_item.index != i) {
337 pr_err("Item has different index in compare to where it actually is (%lu != %d)\n",
338 item->parman_item.index, i);
339 return -EINVAL;
340 }
341 }
342
343 if (used_items != test_parman->used_items) {
344 pr_err("Number of used items in array does not match (%u != %u)\n",
345 used_items, test_parman->used_items);
346 return -EINVAL;
347 }
348
349 if (last_unused_items >= TEST_PARMAN_RESIZE_STEP_COUNT) {
350 pr_err("Number of unused item at the end of array is bigger than resize step (%u >= %lu)\n",
351 last_unused_items, TEST_PARMAN_RESIZE_STEP_COUNT);
352 return -EINVAL;
353 }
354
355 pr_info("Priority array check successful\n");
356
357 return 0;
358}
359
360static int test_parman_lsort(void)
361{
362 struct test_parman *test_parman;
363 int err;
364
365 test_parman = test_parman_create(&test_parman_lsort_ops);
366 if (IS_ERR(test_parman))
367 return PTR_ERR(test_parman);
368
369 err = test_parman_run(test_parman);
370 if (err)
371 goto out;
372
373 err = test_parman_check_array(test_parman, false);
374 if (err)
375 goto out;
376out:
377 test_parman_destroy(test_parman);
378 return err;
379}
380
381static int __init test_parman_init(void)
382{
383 return test_parman_lsort();
384}
385
386static void __exit test_parman_exit(void)
387{
388}
389
390module_init(test_parman_init);
391module_exit(test_parman_exit);
392
393MODULE_LICENSE("Dual BSD/GPL");
394MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
395MODULE_DESCRIPTION("Test module for parman");
diff --git a/lib/test_siphash.c b/lib/test_siphash.c
new file mode 100644
index 000000000000..a6d854d933bf
--- /dev/null
+++ b/lib/test_siphash.c
@@ -0,0 +1,223 @@
1/* Test cases for siphash.c
2 *
3 * Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
4 *
5 * This file is provided under a dual BSD/GPLv2 license.
6 *
7 * SipHash: a fast short-input PRF
8 * https://131002.net/siphash/
9 *
10 * This implementation is specifically for SipHash2-4 for a secure PRF
11 * and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for
12 * hashtables.
13 */
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/siphash.h>
18#include <linux/kernel.h>
19#include <linux/string.h>
20#include <linux/errno.h>
21#include <linux/module.h>
22
23/* Test vectors taken from reference source available at:
24 * https://github.com/veorq/SipHash
25 */
26
27static const siphash_key_t test_key_siphash =
28 {{ 0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL }};
29
30static const u64 test_vectors_siphash[64] = {
31 0x726fdb47dd0e0e31ULL, 0x74f839c593dc67fdULL, 0x0d6c8009d9a94f5aULL,
32 0x85676696d7fb7e2dULL, 0xcf2794e0277187b7ULL, 0x18765564cd99a68dULL,
33 0xcbc9466e58fee3ceULL, 0xab0200f58b01d137ULL, 0x93f5f5799a932462ULL,
34 0x9e0082df0ba9e4b0ULL, 0x7a5dbbc594ddb9f3ULL, 0xf4b32f46226bada7ULL,
35 0x751e8fbc860ee5fbULL, 0x14ea5627c0843d90ULL, 0xf723ca908e7af2eeULL,
36 0xa129ca6149be45e5ULL, 0x3f2acc7f57c29bdbULL, 0x699ae9f52cbe4794ULL,
37 0x4bc1b3f0968dd39cULL, 0xbb6dc91da77961bdULL, 0xbed65cf21aa2ee98ULL,
38 0xd0f2cbb02e3b67c7ULL, 0x93536795e3a33e88ULL, 0xa80c038ccd5ccec8ULL,
39 0xb8ad50c6f649af94ULL, 0xbce192de8a85b8eaULL, 0x17d835b85bbb15f3ULL,
40 0x2f2e6163076bcfadULL, 0xde4daaaca71dc9a5ULL, 0xa6a2506687956571ULL,
41 0xad87a3535c49ef28ULL, 0x32d892fad841c342ULL, 0x7127512f72f27cceULL,
42 0xa7f32346f95978e3ULL, 0x12e0b01abb051238ULL, 0x15e034d40fa197aeULL,
43 0x314dffbe0815a3b4ULL, 0x027990f029623981ULL, 0xcadcd4e59ef40c4dULL,
44 0x9abfd8766a33735cULL, 0x0e3ea96b5304a7d0ULL, 0xad0c42d6fc585992ULL,
45 0x187306c89bc215a9ULL, 0xd4a60abcf3792b95ULL, 0xf935451de4f21df2ULL,
46 0xa9538f0419755787ULL, 0xdb9acddff56ca510ULL, 0xd06c98cd5c0975ebULL,
47 0xe612a3cb9ecba951ULL, 0xc766e62cfcadaf96ULL, 0xee64435a9752fe72ULL,
48 0xa192d576b245165aULL, 0x0a8787bf8ecb74b2ULL, 0x81b3e73d20b49b6fULL,
49 0x7fa8220ba3b2eceaULL, 0x245731c13ca42499ULL, 0xb78dbfaf3a8d83bdULL,
50 0xea1ad565322a1a0bULL, 0x60e61c23a3795013ULL, 0x6606d7e446282b93ULL,
51 0x6ca4ecb15c5f91e1ULL, 0x9f626da15c9625f3ULL, 0xe51b38608ef25f57ULL,
52 0x958a324ceb064572ULL
53};
54
55#if BITS_PER_LONG == 64
56static const hsiphash_key_t test_key_hsiphash =
57 {{ 0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL }};
58
59static const u32 test_vectors_hsiphash[64] = {
60 0x050fc4dcU, 0x7d57ca93U, 0x4dc7d44dU,
61 0xe7ddf7fbU, 0x88d38328U, 0x49533b67U,
62 0xc59f22a7U, 0x9bb11140U, 0x8d299a8eU,
63 0x6c063de4U, 0x92ff097fU, 0xf94dc352U,
64 0x57b4d9a2U, 0x1229ffa7U, 0xc0f95d34U,
65 0x2a519956U, 0x7d908b66U, 0x63dbd80cU,
66 0xb473e63eU, 0x8d297d1cU, 0xa6cce040U,
67 0x2b45f844U, 0xa320872eU, 0xdae6c123U,
68 0x67349c8cU, 0x705b0979U, 0xca9913a5U,
69 0x4ade3b35U, 0xef6cd00dU, 0x4ab1e1f4U,
70 0x43c5e663U, 0x8c21d1bcU, 0x16a7b60dU,
71 0x7a8ff9bfU, 0x1f2a753eU, 0xbf186b91U,
72 0xada26206U, 0xa3c33057U, 0xae3a36a1U,
73 0x7b108392U, 0x99e41531U, 0x3f1ad944U,
74 0xc8138825U, 0xc28949a6U, 0xfaf8876bU,
75 0x9f042196U, 0x68b1d623U, 0x8b5114fdU,
76 0xdf074c46U, 0x12cc86b3U, 0x0a52098fU,
77 0x9d292f9aU, 0xa2f41f12U, 0x43a71ed0U,
78 0x73f0bce6U, 0x70a7e980U, 0x243c6d75U,
79 0xfdb71513U, 0xa67d8a08U, 0xb7e8f148U,
80 0xf7a644eeU, 0x0f1837f2U, 0x4b6694e0U,
81 0xb7bbb3a8U
82};
83#else
84static const hsiphash_key_t test_key_hsiphash =
85 {{ 0x03020100U, 0x07060504U }};
86
87static const u32 test_vectors_hsiphash[64] = {
88 0x5814c896U, 0xe7e864caU, 0xbc4b0e30U,
89 0x01539939U, 0x7e059ea6U, 0x88e3d89bU,
90 0xa0080b65U, 0x9d38d9d6U, 0x577999b1U,
91 0xc839caedU, 0xe4fa32cfU, 0x959246eeU,
92 0x6b28096cU, 0x66dd9cd6U, 0x16658a7cU,
93 0xd0257b04U, 0x8b31d501U, 0x2b1cd04bU,
94 0x06712339U, 0x522aca67U, 0x911bb605U,
95 0x90a65f0eU, 0xf826ef7bU, 0x62512debU,
96 0x57150ad7U, 0x5d473507U, 0x1ec47442U,
97 0xab64afd3U, 0x0a4100d0U, 0x6d2ce652U,
98 0x2331b6a3U, 0x08d8791aU, 0xbc6dda8dU,
99 0xe0f6c934U, 0xb0652033U, 0x9b9851ccU,
100 0x7c46fb7fU, 0x732ba8cbU, 0xf142997aU,
101 0xfcc9aa1bU, 0x05327eb2U, 0xe110131cU,
102 0xf9e5e7c0U, 0xa7d708a6U, 0x11795ab1U,
103 0x65671619U, 0x9f5fff91U, 0xd89c5267U,
104 0x007783ebU, 0x95766243U, 0xab639262U,
105 0x9c7e1390U, 0xc368dda6U, 0x38ddc455U,
106 0xfa13d379U, 0x979ea4e8U, 0x53ecd77eU,
107 0x2ee80657U, 0x33dbb66aU, 0xae3f0577U,
108 0x88b4c4ccU, 0x3e7f480bU, 0x74c1ebf8U,
109 0x87178304U
110};
111#endif
112
113static int __init siphash_test_init(void)
114{
115 u8 in[64] __aligned(SIPHASH_ALIGNMENT);
116 u8 in_unaligned[65] __aligned(SIPHASH_ALIGNMENT);
117 u8 i;
118 int ret = 0;
119
120 for (i = 0; i < 64; ++i) {
121 in[i] = i;
122 in_unaligned[i + 1] = i;
123 if (siphash(in, i, &test_key_siphash) !=
124 test_vectors_siphash[i]) {
125 pr_info("siphash self-test aligned %u: FAIL\n", i + 1);
126 ret = -EINVAL;
127 }
128 if (siphash(in_unaligned + 1, i, &test_key_siphash) !=
129 test_vectors_siphash[i]) {
130 pr_info("siphash self-test unaligned %u: FAIL\n", i + 1);
131 ret = -EINVAL;
132 }
133 if (hsiphash(in, i, &test_key_hsiphash) !=
134 test_vectors_hsiphash[i]) {
135 pr_info("hsiphash self-test aligned %u: FAIL\n", i + 1);
136 ret = -EINVAL;
137 }
138 if (hsiphash(in_unaligned + 1, i, &test_key_hsiphash) !=
139 test_vectors_hsiphash[i]) {
140 pr_info("hsiphash self-test unaligned %u: FAIL\n", i + 1);
141 ret = -EINVAL;
142 }
143 }
144 if (siphash_1u64(0x0706050403020100ULL, &test_key_siphash) !=
145 test_vectors_siphash[8]) {
146 pr_info("siphash self-test 1u64: FAIL\n");
147 ret = -EINVAL;
148 }
149 if (siphash_2u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL,
150 &test_key_siphash) != test_vectors_siphash[16]) {
151 pr_info("siphash self-test 2u64: FAIL\n");
152 ret = -EINVAL;
153 }
154 if (siphash_3u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL,
155 0x1716151413121110ULL, &test_key_siphash) !=
156 test_vectors_siphash[24]) {
157 pr_info("siphash self-test 3u64: FAIL\n");
158 ret = -EINVAL;
159 }
160 if (siphash_4u64(0x0706050403020100ULL, 0x0f0e0d0c0b0a0908ULL,
161 0x1716151413121110ULL, 0x1f1e1d1c1b1a1918ULL,
162 &test_key_siphash) != test_vectors_siphash[32]) {
163 pr_info("siphash self-test 4u64: FAIL\n");
164 ret = -EINVAL;
165 }
166 if (siphash_1u32(0x03020100U, &test_key_siphash) !=
167 test_vectors_siphash[4]) {
168 pr_info("siphash self-test 1u32: FAIL\n");
169 ret = -EINVAL;
170 }
171 if (siphash_2u32(0x03020100U, 0x07060504U, &test_key_siphash) !=
172 test_vectors_siphash[8]) {
173 pr_info("siphash self-test 2u32: FAIL\n");
174 ret = -EINVAL;
175 }
176 if (siphash_3u32(0x03020100U, 0x07060504U,
177 0x0b0a0908U, &test_key_siphash) !=
178 test_vectors_siphash[12]) {
179 pr_info("siphash self-test 3u32: FAIL\n");
180 ret = -EINVAL;
181 }
182 if (siphash_4u32(0x03020100U, 0x07060504U,
183 0x0b0a0908U, 0x0f0e0d0cU, &test_key_siphash) !=
184 test_vectors_siphash[16]) {
185 pr_info("siphash self-test 4u32: FAIL\n");
186 ret = -EINVAL;
187 }
188 if (hsiphash_1u32(0x03020100U, &test_key_hsiphash) !=
189 test_vectors_hsiphash[4]) {
190 pr_info("hsiphash self-test 1u32: FAIL\n");
191 ret = -EINVAL;
192 }
193 if (hsiphash_2u32(0x03020100U, 0x07060504U, &test_key_hsiphash) !=
194 test_vectors_hsiphash[8]) {
195 pr_info("hsiphash self-test 2u32: FAIL\n");
196 ret = -EINVAL;
197 }
198 if (hsiphash_3u32(0x03020100U, 0x07060504U,
199 0x0b0a0908U, &test_key_hsiphash) !=
200 test_vectors_hsiphash[12]) {
201 pr_info("hsiphash self-test 3u32: FAIL\n");
202 ret = -EINVAL;
203 }
204 if (hsiphash_4u32(0x03020100U, 0x07060504U,
205 0x0b0a0908U, 0x0f0e0d0cU, &test_key_hsiphash) !=
206 test_vectors_hsiphash[16]) {
207 pr_info("hsiphash self-test 4u32: FAIL\n");
208 ret = -EINVAL;
209 }
210 if (!ret)
211 pr_info("self-tests: pass\n");
212 return ret;
213}
214
215static void __exit siphash_test_exit(void)
216{
217}
218
219module_init(siphash_test_init);
220module_exit(siphash_test_exit);
221
222MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
223MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/test_sort.c b/lib/test_sort.c
new file mode 100644
index 000000000000..4db3911db50a
--- /dev/null
+++ b/lib/test_sort.c
@@ -0,0 +1,44 @@
1#include <linux/sort.h>
2#include <linux/slab.h>
3#include <linux/init.h>
4
5/*
6 * A simple boot-time regression test
7 * License: GPL
8 */
9
10#define TEST_LEN 1000
11
12static int __init cmpint(const void *a, const void *b)
13{
14 return *(int *)a - *(int *)b;
15}
16
17static int __init test_sort_init(void)
18{
19 int *a, i, r = 1, err = -ENOMEM;
20
21 a = kmalloc_array(TEST_LEN, sizeof(*a), GFP_KERNEL);
22 if (!a)
23 return err;
24
25 for (i = 0; i < TEST_LEN; i++) {
26 r = (r * 725861) % 6599;
27 a[i] = r;
28 }
29
30 sort(a, TEST_LEN, sizeof(*a), cmpint, NULL);
31
32 err = -EINVAL;
33 for (i = 0; i < TEST_LEN-1; i++)
34 if (a[i] > a[i+1]) {
35 pr_err("test has failed\n");
36 goto exit;
37 }
38 err = 0;
39 pr_info("test passed\n");
40exit:
41 kfree(a);
42 return err;
43}
44subsys_initcall(test_sort_init);
diff --git a/lib/test_user_copy.c b/lib/test_user_copy.c
index 0ecef3e4690e..1a8d71a68531 100644
--- a/lib/test_user_copy.c
+++ b/lib/test_user_copy.c
@@ -25,6 +25,24 @@
25#include <linux/uaccess.h> 25#include <linux/uaccess.h>
26#include <linux/vmalloc.h> 26#include <linux/vmalloc.h>
27 27
28/*
29 * Several 32-bit architectures support 64-bit {get,put}_user() calls.
30 * As there doesn't appear to be anything that can safely determine
31 * their capability at compile-time, we just have to opt-out certain archs.
32 */
33#if BITS_PER_LONG == 64 || (!(defined(CONFIG_ARM) && !defined(MMU)) && \
34 !defined(CONFIG_AVR32) && \
35 !defined(CONFIG_BLACKFIN) && \
36 !defined(CONFIG_M32R) && \
37 !defined(CONFIG_M68K) && \
38 !defined(CONFIG_MICROBLAZE) && \
39 !defined(CONFIG_MN10300) && \
40 !defined(CONFIG_NIOS2) && \
41 !defined(CONFIG_PPC32) && \
42 !defined(CONFIG_SUPERH))
43# define TEST_U64
44#endif
45
28#define test(condition, msg) \ 46#define test(condition, msg) \
29({ \ 47({ \
30 int cond = (condition); \ 48 int cond = (condition); \
@@ -40,7 +58,12 @@ static int __init test_user_copy_init(void)
40 char __user *usermem; 58 char __user *usermem;
41 char *bad_usermem; 59 char *bad_usermem;
42 unsigned long user_addr; 60 unsigned long user_addr;
43 unsigned long value = 0x5A; 61 u8 val_u8;
62 u16 val_u16;
63 u32 val_u32;
64#ifdef TEST_U64
65 u64 val_u64;
66#endif
44 67
45 kmem = kmalloc(PAGE_SIZE * 2, GFP_KERNEL); 68 kmem = kmalloc(PAGE_SIZE * 2, GFP_KERNEL);
46 if (!kmem) 69 if (!kmem)
@@ -58,33 +81,100 @@ static int __init test_user_copy_init(void)
58 usermem = (char __user *)user_addr; 81 usermem = (char __user *)user_addr;
59 bad_usermem = (char *)user_addr; 82 bad_usermem = (char *)user_addr;
60 83
61 /* Legitimate usage: none of these should fail. */ 84 /*
62 ret |= test(copy_from_user(kmem, usermem, PAGE_SIZE), 85 * Legitimate usage: none of these copies should fail.
63 "legitimate copy_from_user failed"); 86 */
87 memset(kmem, 0x3a, PAGE_SIZE * 2);
64 ret |= test(copy_to_user(usermem, kmem, PAGE_SIZE), 88 ret |= test(copy_to_user(usermem, kmem, PAGE_SIZE),
65 "legitimate copy_to_user failed"); 89 "legitimate copy_to_user failed");
66 ret |= test(get_user(value, (unsigned long __user *)usermem), 90 memset(kmem, 0x0, PAGE_SIZE);
67 "legitimate get_user failed"); 91 ret |= test(copy_from_user(kmem, usermem, PAGE_SIZE),
68 ret |= test(put_user(value, (unsigned long __user *)usermem), 92 "legitimate copy_from_user failed");
69 "legitimate put_user failed"); 93 ret |= test(memcmp(kmem, kmem + PAGE_SIZE, PAGE_SIZE),
70 94 "legitimate usercopy failed to copy data");
71 /* Invalid usage: none of these should succeed. */ 95
96#define test_legit(size, check) \
97 do { \
98 val_##size = check; \
99 ret |= test(put_user(val_##size, (size __user *)usermem), \
100 "legitimate put_user (" #size ") failed"); \
101 val_##size = 0; \
102 ret |= test(get_user(val_##size, (size __user *)usermem), \
103 "legitimate get_user (" #size ") failed"); \
104 ret |= test(val_##size != check, \
105 "legitimate get_user (" #size ") failed to do copy"); \
106 if (val_##size != check) { \
107 pr_info("0x%llx != 0x%llx\n", \
108 (unsigned long long)val_##size, \
109 (unsigned long long)check); \
110 } \
111 } while (0)
112
113 test_legit(u8, 0x5a);
114 test_legit(u16, 0x5a5b);
115 test_legit(u32, 0x5a5b5c5d);
116#ifdef TEST_U64
117 test_legit(u64, 0x5a5b5c5d6a6b6c6d);
118#endif
119#undef test_legit
120
121 /*
122 * Invalid usage: none of these copies should succeed.
123 */
124
125 /* Prepare kernel memory with check values. */
126 memset(kmem, 0x5a, PAGE_SIZE);
127 memset(kmem + PAGE_SIZE, 0, PAGE_SIZE);
128
129 /* Reject kernel-to-kernel copies through copy_from_user(). */
72 ret |= test(!copy_from_user(kmem, (char __user *)(kmem + PAGE_SIZE), 130 ret |= test(!copy_from_user(kmem, (char __user *)(kmem + PAGE_SIZE),
73 PAGE_SIZE), 131 PAGE_SIZE),
74 "illegal all-kernel copy_from_user passed"); 132 "illegal all-kernel copy_from_user passed");
133
134 /* Destination half of buffer should have been zeroed. */
135 ret |= test(memcmp(kmem + PAGE_SIZE, kmem, PAGE_SIZE),
136 "zeroing failure for illegal all-kernel copy_from_user");
137
138#if 0
139 /*
140 * When running with SMAP/PAN/etc, this will Oops the kernel
141 * due to the zeroing of userspace memory on failure. This needs
142 * to be tested in LKDTM instead, since this test module does not
143 * expect to explode.
144 */
75 ret |= test(!copy_from_user(bad_usermem, (char __user *)kmem, 145 ret |= test(!copy_from_user(bad_usermem, (char __user *)kmem,
76 PAGE_SIZE), 146 PAGE_SIZE),
77 "illegal reversed copy_from_user passed"); 147 "illegal reversed copy_from_user passed");
148#endif
78 ret |= test(!copy_to_user((char __user *)kmem, kmem + PAGE_SIZE, 149 ret |= test(!copy_to_user((char __user *)kmem, kmem + PAGE_SIZE,
79 PAGE_SIZE), 150 PAGE_SIZE),
80 "illegal all-kernel copy_to_user passed"); 151 "illegal all-kernel copy_to_user passed");
81 ret |= test(!copy_to_user((char __user *)kmem, bad_usermem, 152 ret |= test(!copy_to_user((char __user *)kmem, bad_usermem,
82 PAGE_SIZE), 153 PAGE_SIZE),
83 "illegal reversed copy_to_user passed"); 154 "illegal reversed copy_to_user passed");
84 ret |= test(!get_user(value, (unsigned long __user *)kmem), 155
85 "illegal get_user passed"); 156#define test_illegal(size, check) \
86 ret |= test(!put_user(value, (unsigned long __user *)kmem), 157 do { \
87 "illegal put_user passed"); 158 val_##size = (check); \
159 ret |= test(!get_user(val_##size, (size __user *)kmem), \
160 "illegal get_user (" #size ") passed"); \
161 ret |= test(val_##size != (size)0, \
162 "zeroing failure for illegal get_user (" #size ")"); \
163 if (val_##size != (size)0) { \
164 pr_info("0x%llx != 0\n", \
165 (unsigned long long)val_##size); \
166 } \
167 ret |= test(!put_user(val_##size, (size __user *)kmem), \
168 "illegal put_user (" #size ") passed"); \
169 } while (0)
170
171 test_illegal(u8, 0x5a);
172 test_illegal(u16, 0x5a5b);
173 test_illegal(u32, 0x5a5b5c5d);
174#ifdef TEST_U64
175 test_illegal(u64, 0x5a5b5c5d6a6b6c6d);
176#endif
177#undef test_illegal
88 178
89 vm_munmap(user_addr, PAGE_SIZE * 2); 179 vm_munmap(user_addr, PAGE_SIZE * 2);
90 kfree(kmem); 180 kfree(kmem);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 0967771d8f7f..e3bf4e0f10b5 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1739,6 +1739,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
1739 * 'h', 'l', or 'L' for integer fields 1739 * 'h', 'l', or 'L' for integer fields
1740 * 'z' support added 23/7/1999 S.H. 1740 * 'z' support added 23/7/1999 S.H.
1741 * 'z' changed to 'Z' --davidm 1/25/99 1741 * 'z' changed to 'Z' --davidm 1/25/99
1742 * 'Z' changed to 'z' --adobriyan 2017-01-25
1742 * 't' added for ptrdiff_t 1743 * 't' added for ptrdiff_t
1743 * 1744 *
1744 * @fmt: the format string 1745 * @fmt: the format string
@@ -1838,7 +1839,7 @@ qualifier:
1838 /* get the conversion qualifier */ 1839 /* get the conversion qualifier */
1839 qualifier = 0; 1840 qualifier = 0;
1840 if (*fmt == 'h' || _tolower(*fmt) == 'l' || 1841 if (*fmt == 'h' || _tolower(*fmt) == 'l' ||
1841 _tolower(*fmt) == 'z' || *fmt == 't') { 1842 *fmt == 'z' || *fmt == 't') {
1842 qualifier = *fmt++; 1843 qualifier = *fmt++;
1843 if (unlikely(qualifier == *fmt)) { 1844 if (unlikely(qualifier == *fmt)) {
1844 if (qualifier == 'l') { 1845 if (qualifier == 'l') {
@@ -1907,7 +1908,7 @@ qualifier:
1907 else if (qualifier == 'l') { 1908 else if (qualifier == 'l') {
1908 BUILD_BUG_ON(FORMAT_TYPE_ULONG + SIGN != FORMAT_TYPE_LONG); 1909 BUILD_BUG_ON(FORMAT_TYPE_ULONG + SIGN != FORMAT_TYPE_LONG);
1909 spec->type = FORMAT_TYPE_ULONG + (spec->flags & SIGN); 1910 spec->type = FORMAT_TYPE_ULONG + (spec->flags & SIGN);
1910 } else if (_tolower(qualifier) == 'z') { 1911 } else if (qualifier == 'z') {
1911 spec->type = FORMAT_TYPE_SIZE_T; 1912 spec->type = FORMAT_TYPE_SIZE_T;
1912 } else if (qualifier == 't') { 1913 } else if (qualifier == 't') {
1913 spec->type = FORMAT_TYPE_PTRDIFF; 1914 spec->type = FORMAT_TYPE_PTRDIFF;
@@ -2657,7 +2658,7 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
2657 /* get conversion qualifier */ 2658 /* get conversion qualifier */
2658 qualifier = -1; 2659 qualifier = -1;
2659 if (*fmt == 'h' || _tolower(*fmt) == 'l' || 2660 if (*fmt == 'h' || _tolower(*fmt) == 'l' ||
2660 _tolower(*fmt) == 'z') { 2661 *fmt == 'z') {
2661 qualifier = *fmt++; 2662 qualifier = *fmt++;
2662 if (unlikely(qualifier == *fmt)) { 2663 if (unlikely(qualifier == *fmt)) {
2663 if (qualifier == 'h') { 2664 if (qualifier == 'h') {
@@ -2851,7 +2852,6 @@ int vsscanf(const char *buf, const char *fmt, va_list args)
2851 else 2852 else
2852 *va_arg(args, unsigned long long *) = val.u; 2853 *va_arg(args, unsigned long long *) = val.u;
2853 break; 2854 break;
2854 case 'Z':
2855 case 'z': 2855 case 'z':
2856 *va_arg(args, size_t *) = val.u; 2856 *va_arg(args, size_t *) = val.u;
2857 break; 2857 break;