aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig18
-rw-r--r--lib/Kconfig.debug49
-rw-r--r--lib/Kconfig.kasan107
-rw-r--r--lib/Makefile12
-rw-r--r--lib/bitmap.c26
-rw-r--r--lib/bust_spinlocks.c6
-rw-r--r--lib/chacha.c (renamed from lib/chacha20.c)61
-rw-r--r--lib/cordic.c23
-rw-r--r--lib/cpumask.c4
-rw-r--r--lib/crc-t10dif.c57
-rw-r--r--lib/crc32.c11
-rw-r--r--lib/debug_locks.c6
-rw-r--r--lib/debugobjects.c13
-rw-r--r--lib/dynamic_debug.c2
-rw-r--r--lib/find_bit_benchmark.c11
-rw-r--r--lib/fonts/Kconfig10
-rw-r--r--lib/fonts/Makefile1
-rw-r--r--lib/fonts/font_ter16x32.c2072
-rw-r--r--lib/fonts/fonts.c4
-rw-r--r--lib/gcd.c2
-rw-r--r--lib/gen_crc64table.c2
-rw-r--r--lib/genalloc.c25
-rw-r--r--lib/idr.c401
-rw-r--r--lib/ioremap.c103
-rw-r--r--lib/iov_iter.c236
-rw-r--r--lib/kobject.c2
-rw-r--r--lib/kobject_uevent.c4
-rw-r--r--lib/kstrtox.c16
-rw-r--r--lib/lz4/lz4_decompress.c481
-rw-r--r--lib/lz4/lz4defs.h9
-rw-r--r--lib/memcat_p.c34
-rw-r--r--lib/nlattr.c269
-rw-r--r--lib/objagg.c501
-rw-r--r--lib/parser.c16
-rw-r--r--lib/percpu-refcount.c30
-rw-r--r--lib/radix-tree.c838
-rw-r--r--lib/raid6/Makefile20
-rw-r--r--lib/raid6/algos.c81
-rw-r--r--lib/raid6/test/Makefile7
-rw-r--r--lib/rhashtable.c8
-rw-r--r--lib/sbitmap.c170
-rw-r--r--lib/scatterlist.c2
-rw-r--r--lib/seq_buf.c8
-rw-r--r--lib/sg_pool.c7
-rw-r--r--lib/show_mem.c5
-rw-r--r--lib/string.c1
-rw-r--r--lib/strncpy_from_user.c9
-rw-r--r--lib/strnlen_user.c9
-rw-r--r--lib/test_bpf.c15
-rw-r--r--lib/test_debug_virtual.c1
-rw-r--r--lib/test_firmware.c1
-rw-r--r--lib/test_hexdump.c2
-rw-r--r--lib/test_kasan.c70
-rw-r--r--lib/test_kmod.c1
-rw-r--r--lib/test_memcat_p.c115
-rw-r--r--lib/test_objagg.c836
-rw-r--r--lib/test_printf.c61
-rw-r--r--lib/test_rhashtable.c32
-rw-r--r--lib/test_xarray.c1351
-rw-r--r--lib/ubsan.c3
-rw-r--r--lib/usercopy.c4
-rw-r--r--lib/vsprintf.c343
-rw-r--r--lib/xarray.c2015
-rw-r--r--lib/zlib_inflate/inflate.c12
64 files changed, 9046 insertions, 1605 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index a3928d4438b5..a9e56539bd11 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -10,6 +10,14 @@ menu "Library routines"
10config RAID6_PQ 10config RAID6_PQ
11 tristate 11 tristate
12 12
13config RAID6_PQ_BENCHMARK
14 bool "Automatically choose fastest RAID6 PQ functions"
15 depends on RAID6_PQ
16 default y
17 help
18 Benchmark all available RAID6 PQ functions on init and choose the
19 fastest one.
20
13config BITREVERSE 21config BITREVERSE
14 tristate 22 tristate
15 23
@@ -399,8 +407,11 @@ config INTERVAL_TREE
399 407
400 for more information. 408 for more information.
401 409
402config RADIX_TREE_MULTIORDER 410config XARRAY_MULTI
403 bool 411 bool
412 help
413 Support entries which occupy multiple consecutive indices in the
414 XArray.
404 415
405config ASSOCIATIVE_ARRAY 416config ASSOCIATIVE_ARRAY
406 bool 417 bool
@@ -574,7 +585,7 @@ config SG_POOL
574# sg chaining option 585# sg chaining option
575# 586#
576 587
577config ARCH_HAS_SG_CHAIN 588config ARCH_NO_SG_CHAIN
578 def_bool n 589 def_bool n
579 590
580config ARCH_HAS_PMEM_API 591config ARCH_HAS_PMEM_API
@@ -621,3 +632,6 @@ config GENERIC_LIB_CMPDI2
621 632
622config GENERIC_LIB_UCMPDI2 633config GENERIC_LIB_UCMPDI2
623 bool 634 bool
635
636config OBJAGG
637 tristate "objagg" if COMPILE_TEST
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 4966c4fbe7f7..d4df5b24d75e 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -439,7 +439,7 @@ config DEBUG_KERNEL
439 439
440menu "Memory Debugging" 440menu "Memory Debugging"
441 441
442source mm/Kconfig.debug 442source "mm/Kconfig.debug"
443 443
444config DEBUG_OBJECTS 444config DEBUG_OBJECTS
445 bool "Debug object operations" 445 bool "Debug object operations"
@@ -593,6 +593,21 @@ config DEBUG_KMEMLEAK_DEFAULT_OFF
593 Say Y here to disable kmemleak by default. It can then be enabled 593 Say Y here to disable kmemleak by default. It can then be enabled
594 on the command line via kmemleak=on. 594 on the command line via kmemleak=on.
595 595
596config DEBUG_KMEMLEAK_AUTO_SCAN
597 bool "Enable kmemleak auto scan thread on boot up"
598 default y
599 depends on DEBUG_KMEMLEAK
600 help
601 Depending on the cpu, kmemleak scan may be cpu intensive and can
602 stall user tasks at times. This option enables/disables automatic
603 kmemleak scan at boot up.
604
605 Say N here to disable kmemleak auto scan thread to stop automatic
606 scanning. Disabling this option disables automatic reporting of
607 memory leaks.
608
609 If unsure, say Y.
610
596config DEBUG_STACK_USAGE 611config DEBUG_STACK_USAGE
597 bool "Stack utilization instrumentation" 612 bool "Stack utilization instrumentation"
598 depends on DEBUG_KERNEL && !IA64 613 depends on DEBUG_KERNEL && !IA64
@@ -1179,7 +1194,7 @@ config LOCKDEP
1179 bool 1194 bool
1180 depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT 1195 depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
1181 select STACKTRACE 1196 select STACKTRACE
1182 select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC && !X86 1197 select FRAME_POINTER if !MIPS && !PPC && !ARM && !S390 && !MICROBLAZE && !ARC && !X86
1183 select KALLSYMS 1198 select KALLSYMS
1184 select KALLSYMS_ALL 1199 select KALLSYMS_ALL
1185 1200
@@ -1292,7 +1307,7 @@ config DEBUG_KOBJECT
1292 depends on DEBUG_KERNEL 1307 depends on DEBUG_KERNEL
1293 help 1308 help
1294 If you say Y here, some extra kobject debugging messages will be sent 1309 If you say Y here, some extra kobject debugging messages will be sent
1295 to the syslog. 1310 to the syslog.
1296 1311
1297config DEBUG_KOBJECT_RELEASE 1312config DEBUG_KOBJECT_RELEASE
1298 bool "kobject release debugging" 1313 bool "kobject release debugging"
@@ -1590,7 +1605,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER
1590 depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT 1605 depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
1591 depends on !X86_64 1606 depends on !X86_64
1592 select STACKTRACE 1607 select STACKTRACE
1593 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !X86 1608 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86
1594 help 1609 help
1595 Provide stacktrace filter for fault-injection capabilities 1610 Provide stacktrace filter for fault-injection capabilities
1596 1611
@@ -1599,7 +1614,7 @@ config LATENCYTOP
1599 depends on DEBUG_KERNEL 1614 depends on DEBUG_KERNEL
1600 depends on STACKTRACE_SUPPORT 1615 depends on STACKTRACE_SUPPORT
1601 depends on PROC_FS 1616 depends on PROC_FS
1602 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !X86 1617 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM && !ARC && !X86
1603 select KALLSYMS 1618 select KALLSYMS
1604 select KALLSYMS_ALL 1619 select KALLSYMS_ALL
1605 select STACKTRACE 1620 select STACKTRACE
@@ -1609,7 +1624,7 @@ config LATENCYTOP
1609 Enable this option if you want to use the LatencyTOP tool 1624 Enable this option if you want to use the LatencyTOP tool
1610 to find out which userspace is blocking on what kernel operations. 1625 to find out which userspace is blocking on what kernel operations.
1611 1626
1612source kernel/trace/Kconfig 1627source "kernel/trace/Kconfig"
1613 1628
1614config PROVIDE_OHCI1394_DMA_INIT 1629config PROVIDE_OHCI1394_DMA_INIT
1615 bool "Remote debugging over FireWire early on boot" 1630 bool "Remote debugging over FireWire early on boot"
@@ -1813,6 +1828,9 @@ config TEST_BITFIELD
1813config TEST_UUID 1828config TEST_UUID
1814 tristate "Test functions located in the uuid module at runtime" 1829 tristate "Test functions located in the uuid module at runtime"
1815 1830
1831config TEST_XARRAY
1832 tristate "Test the XArray code at runtime"
1833
1816config TEST_OVERFLOW 1834config TEST_OVERFLOW
1817 tristate "Test check_*_overflow() functions at runtime" 1835 tristate "Test check_*_overflow() functions at runtime"
1818 1836
@@ -1965,11 +1983,28 @@ config TEST_DEBUG_VIRTUAL
1965 1983
1966 If unsure, say N. 1984 If unsure, say N.
1967 1985
1986config TEST_MEMCAT_P
1987 tristate "Test memcat_p() helper function"
1988 help
1989 Test the memcat_p() helper for correctly merging two
1990 pointer arrays together.
1991
1992 If unsure, say N.
1993
1994config TEST_OBJAGG
1995 tristate "Perform selftest on object aggreration manager"
1996 default n
1997 depends on OBJAGG
1998 help
1999 Enable this option to test object aggregation manager on boot
2000 (or module load).
2001
2002 If unsure, say N.
2003
1968endif # RUNTIME_TESTING_MENU 2004endif # RUNTIME_TESTING_MENU
1969 2005
1970config MEMTEST 2006config MEMTEST
1971 bool "Memtest" 2007 bool "Memtest"
1972 depends on HAVE_MEMBLOCK
1973 ---help--- 2008 ---help---
1974 This option adds a kernel parameter 'memtest', which allows memtest 2009 This option adds a kernel parameter 'memtest', which allows memtest
1975 to be set. 2010 to be set.
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index befb127507c0..d8c474b6691e 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -1,36 +1,92 @@
1# This config refers to the generic KASAN mode.
1config HAVE_ARCH_KASAN 2config HAVE_ARCH_KASAN
2 bool 3 bool
3 4
4if HAVE_ARCH_KASAN 5config HAVE_ARCH_KASAN_SW_TAGS
6 bool
7
8config CC_HAS_KASAN_GENERIC
9 def_bool $(cc-option, -fsanitize=kernel-address)
10
11config CC_HAS_KASAN_SW_TAGS
12 def_bool $(cc-option, -fsanitize=kernel-hwaddress)
5 13
6config KASAN 14config KASAN
7 bool "KASan: runtime memory debugger" 15 bool "KASAN: runtime memory debugger"
16 depends on (HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC) || \
17 (HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS)
18 depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
19 help
20 Enables KASAN (KernelAddressSANitizer) - runtime memory debugger,
21 designed to find out-of-bounds accesses and use-after-free bugs.
22 See Documentation/dev-tools/kasan.rst for details.
23
24choice
25 prompt "KASAN mode"
26 depends on KASAN
27 default KASAN_GENERIC
28 help
29 KASAN has two modes: generic KASAN (similar to userspace ASan,
30 x86_64/arm64/xtensa, enabled with CONFIG_KASAN_GENERIC) and
31 software tag-based KASAN (a version based on software memory
32 tagging, arm64 only, similar to userspace HWASan, enabled with
33 CONFIG_KASAN_SW_TAGS).
34 Both generic and tag-based KASAN are strictly debugging features.
35
36config KASAN_GENERIC
37 bool "Generic mode"
38 depends on HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC
8 depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB) 39 depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
9 select SLUB_DEBUG if SLUB 40 select SLUB_DEBUG if SLUB
10 select CONSTRUCTORS 41 select CONSTRUCTORS
11 select STACKDEPOT 42 select STACKDEPOT
12 help 43 help
13 Enables kernel address sanitizer - runtime memory debugger, 44 Enables generic KASAN mode.
14 designed to find out-of-bounds accesses and use-after-free bugs. 45 Supported in both GCC and Clang. With GCC it requires version 4.9.2
15 This is strictly a debugging feature and it requires a gcc version 46 or later for basic support and version 5.0 or later for detection of
16 of 4.9.2 or later. Detection of out of bounds accesses to stack or 47 out-of-bounds accesses for stack and global variables and for inline
17 global variables requires gcc 5.0 or later. 48 instrumentation mode (CONFIG_KASAN_INLINE). With Clang it requires
18 This feature consumes about 1/8 of available memory and brings about 49 version 3.7.0 or later and it doesn't support detection of
19 ~x3 performance slowdown. 50 out-of-bounds accesses for global variables yet.
51 This mode consumes about 1/8th of available memory at kernel start
52 and introduces an overhead of ~x1.5 for the rest of the allocations.
53 The performance slowdown is ~x3.
54 For better error detection enable CONFIG_STACKTRACE.
55 Currently CONFIG_KASAN_GENERIC doesn't work with CONFIG_DEBUG_SLAB
56 (the resulting kernel does not boot).
57
58config KASAN_SW_TAGS
59 bool "Software tag-based mode"
60 depends on HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS
61 depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
62 select SLUB_DEBUG if SLUB
63 select CONSTRUCTORS
64 select STACKDEPOT
65 help
66 Enables software tag-based KASAN mode.
67 This mode requires Top Byte Ignore support by the CPU and therefore
68 is only supported for arm64.
69 This mode requires Clang version 7.0.0 or later.
70 This mode consumes about 1/16th of available memory at kernel start
71 and introduces an overhead of ~20% for the rest of the allocations.
72 This mode may potentially introduce problems relating to pointer
73 casting and comparison, as it embeds tags into the top byte of each
74 pointer.
20 For better error detection enable CONFIG_STACKTRACE. 75 For better error detection enable CONFIG_STACKTRACE.
21 Currently CONFIG_KASAN doesn't work with CONFIG_DEBUG_SLAB 76 Currently CONFIG_KASAN_SW_TAGS doesn't work with CONFIG_DEBUG_SLAB
22 (the resulting kernel does not boot). 77 (the resulting kernel does not boot).
23 78
79endchoice
80
24config KASAN_EXTRA 81config KASAN_EXTRA
25 bool "KAsan: extra checks" 82 bool "KASAN: extra checks"
26 depends on KASAN && DEBUG_KERNEL && !COMPILE_TEST 83 depends on KASAN_GENERIC && DEBUG_KERNEL && !COMPILE_TEST
27 help 84 help
28 This enables further checks in the kernel address sanitizer, for now 85 This enables further checks in generic KASAN, for now it only
29 it only includes the address-use-after-scope check that can lead 86 includes the address-use-after-scope check that can lead to
30 to excessive kernel stack usage, frame size warnings and longer 87 excessive kernel stack usage, frame size warnings and longer
31 compile time. 88 compile time.
32 https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 has more 89 See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81715
33
34 90
35choice 91choice
36 prompt "Instrumentation type" 92 prompt "Instrumentation type"
@@ -53,16 +109,23 @@ config KASAN_INLINE
53 memory accesses. This is faster than outline (in some workloads 109 memory accesses. This is faster than outline (in some workloads
54 it gives about x2 boost over outline instrumentation), but 110 it gives about x2 boost over outline instrumentation), but
55 make kernel's .text size much bigger. 111 make kernel's .text size much bigger.
56 This requires a gcc version of 5.0 or later. 112 For CONFIG_KASAN_GENERIC this requires GCC 5.0 or later.
57 113
58endchoice 114endchoice
59 115
116config KASAN_S390_4_LEVEL_PAGING
117 bool "KASan: use 4-level paging"
118 depends on KASAN && S390
119 help
120 Compiling the kernel with KASan disables automatic 3-level vs
121 4-level paging selection. 3-level paging is used by default (up
122 to 3TB of RAM with KASan enabled). This options allows to force
123 4-level paging instead.
124
60config TEST_KASAN 125config TEST_KASAN
61 tristate "Module for testing kasan for bug detection" 126 tristate "Module for testing KASAN for bug detection"
62 depends on m && KASAN 127 depends on m && KASAN
63 help 128 help
64 This is a test module doing various nasty things like 129 This is a test module doing various nasty things like
65 out of bounds accesses, use after free. It is useful for testing 130 out of bounds accesses, use after free. It is useful for testing
66 kernel debugging features like kernel address sanitizer. 131 kernel debugging features like KASAN.
67
68endif
diff --git a/lib/Makefile b/lib/Makefile
index 423876446810..e1b59da71418 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -18,13 +18,13 @@ KCOV_INSTRUMENT_debugobjects.o := n
18KCOV_INSTRUMENT_dynamic_debug.o := n 18KCOV_INSTRUMENT_dynamic_debug.o := n
19 19
20lib-y := ctype.o string.o vsprintf.o cmdline.o \ 20lib-y := ctype.o string.o vsprintf.o cmdline.o \
21 rbtree.o radix-tree.o timerqueue.o\ 21 rbtree.o radix-tree.o timerqueue.o xarray.o \
22 idr.o int_sqrt.o extable.o \ 22 idr.o int_sqrt.o extable.o \
23 sha1.o chacha20.o irq_regs.o argv_split.o \ 23 sha1.o chacha.o irq_regs.o argv_split.o \
24 flex_proportions.o ratelimit.o show_mem.o \ 24 flex_proportions.o ratelimit.o show_mem.o \
25 is_single_threaded.o plist.o decompress.o kobject_uevent.o \ 25 is_single_threaded.o plist.o decompress.o kobject_uevent.o \
26 earlycpio.o seq_buf.o siphash.o dec_and_lock.o \ 26 earlycpio.o seq_buf.o siphash.o dec_and_lock.o \
27 nmi_backtrace.o nodemask.o win_minmax.o 27 nmi_backtrace.o nodemask.o win_minmax.o memcat_p.o
28 28
29lib-$(CONFIG_PRINTK) += dump_stack.o 29lib-$(CONFIG_PRINTK) += dump_stack.o
30lib-$(CONFIG_MMU) += ioremap.o 30lib-$(CONFIG_MMU) += ioremap.o
@@ -53,7 +53,9 @@ obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o
53obj-$(CONFIG_TEST_IDA) += test_ida.o 53obj-$(CONFIG_TEST_IDA) += test_ida.o
54obj-$(CONFIG_TEST_KASAN) += test_kasan.o 54obj-$(CONFIG_TEST_KASAN) += test_kasan.o
55CFLAGS_test_kasan.o += -fno-builtin 55CFLAGS_test_kasan.o += -fno-builtin
56CFLAGS_test_kasan.o += $(call cc-disable-warning, vla)
56obj-$(CONFIG_TEST_UBSAN) += test_ubsan.o 57obj-$(CONFIG_TEST_UBSAN) += test_ubsan.o
58CFLAGS_test_ubsan.o += $(call cc-disable-warning, vla)
57UBSAN_SANITIZE_test_ubsan.o := y 59UBSAN_SANITIZE_test_ubsan.o := y
58obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o 60obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
59obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o 61obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o
@@ -68,9 +70,12 @@ obj-$(CONFIG_TEST_PRINTF) += test_printf.o
68obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o 70obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
69obj-$(CONFIG_TEST_BITFIELD) += test_bitfield.o 71obj-$(CONFIG_TEST_BITFIELD) += test_bitfield.o
70obj-$(CONFIG_TEST_UUID) += test_uuid.o 72obj-$(CONFIG_TEST_UUID) += test_uuid.o
73obj-$(CONFIG_TEST_XARRAY) += test_xarray.o
71obj-$(CONFIG_TEST_PARMAN) += test_parman.o 74obj-$(CONFIG_TEST_PARMAN) += test_parman.o
72obj-$(CONFIG_TEST_KMOD) += test_kmod.o 75obj-$(CONFIG_TEST_KMOD) += test_kmod.o
73obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o 76obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o
77obj-$(CONFIG_TEST_MEMCAT_P) += test_memcat_p.o
78obj-$(CONFIG_TEST_OBJAGG) += test_objagg.o
74 79
75ifeq ($(CONFIG_DEBUG_KOBJECT),y) 80ifeq ($(CONFIG_DEBUG_KOBJECT),y)
76CFLAGS_kobject.o += -DDEBUG 81CFLAGS_kobject.o += -DDEBUG
@@ -270,3 +275,4 @@ obj-$(CONFIG_GENERIC_LIB_LSHRDI3) += lshrdi3.o
270obj-$(CONFIG_GENERIC_LIB_MULDI3) += muldi3.o 275obj-$(CONFIG_GENERIC_LIB_MULDI3) += muldi3.o
271obj-$(CONFIG_GENERIC_LIB_CMPDI2) += cmpdi2.o 276obj-$(CONFIG_GENERIC_LIB_CMPDI2) += cmpdi2.o
272obj-$(CONFIG_GENERIC_LIB_UCMPDI2) += ucmpdi2.o 277obj-$(CONFIG_GENERIC_LIB_UCMPDI2) += ucmpdi2.o
278obj-$(CONFIG_OBJAGG) += objagg.o
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 2fd07f6df0b8..98872e9025da 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -13,6 +13,7 @@
13#include <linux/bitops.h> 13#include <linux/bitops.h>
14#include <linux/bug.h> 14#include <linux/bug.h>
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/mm.h>
16#include <linux/slab.h> 17#include <linux/slab.h>
17#include <linux/string.h> 18#include <linux/string.h>
18#include <linux/uaccess.h> 19#include <linux/uaccess.h>
@@ -36,11 +37,6 @@
36 * carefully filter out these unused bits from impacting their 37 * carefully filter out these unused bits from impacting their
37 * results. 38 * results.
38 * 39 *
39 * These operations actually hold to a slightly stronger rule:
40 * if you don't input any bitmaps to these ops that have some
41 * unused bits set, then they won't output any set unused bits
42 * in output bitmaps.
43 *
44 * The byte ordering of bitmaps is more natural on little 40 * The byte ordering of bitmaps is more natural on little
45 * endian architectures. See the big-endian headers 41 * endian architectures. See the big-endian headers
46 * include/asm-ppc64/bitops.h and include/asm-s390/bitops.h 42 * include/asm-ppc64/bitops.h and include/asm-s390/bitops.h
@@ -447,7 +443,7 @@ int bitmap_parse_user(const char __user *ubuf,
447 unsigned int ulen, unsigned long *maskp, 443 unsigned int ulen, unsigned long *maskp,
448 int nmaskbits) 444 int nmaskbits)
449{ 445{
450 if (!access_ok(VERIFY_READ, ubuf, ulen)) 446 if (!access_ok(ubuf, ulen))
451 return -EFAULT; 447 return -EFAULT;
452 return __bitmap_parse((const char __force *)ubuf, 448 return __bitmap_parse((const char __force *)ubuf,
453 ulen, 1, maskp, nmaskbits); 449 ulen, 1, maskp, nmaskbits);
@@ -466,20 +462,18 @@ EXPORT_SYMBOL(bitmap_parse_user);
466 * ranges if list is specified or hex digits grouped into comma-separated 462 * ranges if list is specified or hex digits grouped into comma-separated
467 * sets of 8 digits/set. Returns the number of characters written to buf. 463 * sets of 8 digits/set. Returns the number of characters written to buf.
468 * 464 *
469 * It is assumed that @buf is a pointer into a PAGE_SIZE area and that 465 * It is assumed that @buf is a pointer into a PAGE_SIZE, page-aligned
470 * sufficient storage remains at @buf to accommodate the 466 * area and that sufficient storage remains at @buf to accommodate the
471 * bitmap_print_to_pagebuf() output. 467 * bitmap_print_to_pagebuf() output. Returns the number of characters
468 * actually printed to @buf, excluding terminating '\0'.
472 */ 469 */
473int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp, 470int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp,
474 int nmaskbits) 471 int nmaskbits)
475{ 472{
476 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf; 473 ptrdiff_t len = PAGE_SIZE - offset_in_page(buf);
477 int n = 0;
478 474
479 if (len > 1) 475 return list ? scnprintf(buf, len, "%*pbl\n", nmaskbits, maskp) :
480 n = list ? scnprintf(buf, len, "%*pbl\n", nmaskbits, maskp) : 476 scnprintf(buf, len, "%*pb\n", nmaskbits, maskp);
481 scnprintf(buf, len, "%*pb\n", nmaskbits, maskp);
482 return n;
483} 477}
484EXPORT_SYMBOL(bitmap_print_to_pagebuf); 478EXPORT_SYMBOL(bitmap_print_to_pagebuf);
485 479
@@ -647,7 +641,7 @@ int bitmap_parselist_user(const char __user *ubuf,
647 unsigned int ulen, unsigned long *maskp, 641 unsigned int ulen, unsigned long *maskp,
648 int nmaskbits) 642 int nmaskbits)
649{ 643{
650 if (!access_ok(VERIFY_READ, ubuf, ulen)) 644 if (!access_ok(ubuf, ulen))
651 return -EFAULT; 645 return -EFAULT;
652 return __bitmap_parselist((const char __force *)ubuf, 646 return __bitmap_parselist((const char __force *)ubuf,
653 ulen, 1, maskp, nmaskbits); 647 ulen, 1, maskp, nmaskbits);
diff --git a/lib/bust_spinlocks.c b/lib/bust_spinlocks.c
index ab719495e2cb..8be59f84eaea 100644
--- a/lib/bust_spinlocks.c
+++ b/lib/bust_spinlocks.c
@@ -2,7 +2,8 @@
2/* 2/*
3 * lib/bust_spinlocks.c 3 * lib/bust_spinlocks.c
4 * 4 *
5 * Provides a minimal bust_spinlocks for architectures which don't have one of their own. 5 * Provides a minimal bust_spinlocks for architectures which don't
6 * have one of their own.
6 * 7 *
7 * bust_spinlocks() clears any spinlocks which would prevent oops, die(), BUG() 8 * bust_spinlocks() clears any spinlocks which would prevent oops, die(), BUG()
8 * and panic() information from reaching the user. 9 * and panic() information from reaching the user.
@@ -16,8 +17,7 @@
16#include <linux/vt_kern.h> 17#include <linux/vt_kern.h>
17#include <linux/console.h> 18#include <linux/console.h>
18 19
19 20void bust_spinlocks(int yes)
20void __attribute__((weak)) bust_spinlocks(int yes)
21{ 21{
22 if (yes) { 22 if (yes) {
23 ++oops_in_progress; 23 ++oops_in_progress;
diff --git a/lib/chacha20.c b/lib/chacha.c
index c1cc50fb68c9..a46d2832dbab 100644
--- a/lib/chacha20.c
+++ b/lib/chacha.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * ChaCha20 256-bit cipher algorithm, RFC7539 2 * The "hash function" used as the core of the ChaCha stream cipher (RFC7539)
3 * 3 *
4 * Copyright (C) 2015 Martin Willi 4 * Copyright (C) 2015 Martin Willi
5 * 5 *
@@ -14,17 +14,16 @@
14#include <linux/bitops.h> 14#include <linux/bitops.h>
15#include <linux/cryptohash.h> 15#include <linux/cryptohash.h>
16#include <asm/unaligned.h> 16#include <asm/unaligned.h>
17#include <crypto/chacha20.h> 17#include <crypto/chacha.h>
18 18
19void chacha20_block(u32 *state, u32 *stream) 19static void chacha_permute(u32 *x, int nrounds)
20{ 20{
21 u32 x[16], *out = stream;
22 int i; 21 int i;
23 22
24 for (i = 0; i < ARRAY_SIZE(x); i++) 23 /* whitelist the allowed round counts */
25 x[i] = state[i]; 24 WARN_ON_ONCE(nrounds != 20 && nrounds != 12);
26 25
27 for (i = 0; i < 20; i += 2) { 26 for (i = 0; i < nrounds; i += 2) {
28 x[0] += x[4]; x[12] = rol32(x[12] ^ x[0], 16); 27 x[0] += x[4]; x[12] = rol32(x[12] ^ x[0], 16);
29 x[1] += x[5]; x[13] = rol32(x[13] ^ x[1], 16); 28 x[1] += x[5]; x[13] = rol32(x[13] ^ x[1], 16);
30 x[2] += x[6]; x[14] = rol32(x[14] ^ x[2], 16); 29 x[2] += x[6]; x[14] = rol32(x[14] ^ x[2], 16);
@@ -65,10 +64,54 @@ void chacha20_block(u32 *state, u32 *stream)
65 x[8] += x[13]; x[7] = rol32(x[7] ^ x[8], 7); 64 x[8] += x[13]; x[7] = rol32(x[7] ^ x[8], 7);
66 x[9] += x[14]; x[4] = rol32(x[4] ^ x[9], 7); 65 x[9] += x[14]; x[4] = rol32(x[4] ^ x[9], 7);
67 } 66 }
67}
68
69/**
70 * chacha_block - generate one keystream block and increment block counter
71 * @state: input state matrix (16 32-bit words)
72 * @stream: output keystream block (64 bytes)
73 * @nrounds: number of rounds (20 or 12; 20 is recommended)
74 *
75 * This is the ChaCha core, a function from 64-byte strings to 64-byte strings.
76 * The caller has already converted the endianness of the input. This function
77 * also handles incrementing the block counter in the input matrix.
78 */
79void chacha_block(u32 *state, u8 *stream, int nrounds)
80{
81 u32 x[16];
82 int i;
83
84 memcpy(x, state, 64);
85
86 chacha_permute(x, nrounds);
68 87
69 for (i = 0; i < ARRAY_SIZE(x); i++) 88 for (i = 0; i < ARRAY_SIZE(x); i++)
70 out[i] = cpu_to_le32(x[i] + state[i]); 89 put_unaligned_le32(x[i] + state[i], &stream[i * sizeof(u32)]);
71 90
72 state[12]++; 91 state[12]++;
73} 92}
74EXPORT_SYMBOL(chacha20_block); 93EXPORT_SYMBOL(chacha_block);
94
95/**
96 * hchacha_block - abbreviated ChaCha core, for XChaCha
97 * @in: input state matrix (16 32-bit words)
98 * @out: output (8 32-bit words)
99 * @nrounds: number of rounds (20 or 12; 20 is recommended)
100 *
101 * HChaCha is the ChaCha equivalent of HSalsa and is an intermediate step
102 * towards XChaCha (see https://cr.yp.to/snuffle/xsalsa-20081128.pdf). HChaCha
103 * skips the final addition of the initial state, and outputs only certain words
104 * of the state. It should not be used for streaming directly.
105 */
106void hchacha_block(const u32 *in, u32 *out, int nrounds)
107{
108 u32 x[16];
109
110 memcpy(x, in, 64);
111
112 chacha_permute(x, nrounds);
113
114 memcpy(&out[0], &x[0], 16);
115 memcpy(&out[4], &x[12], 16);
116}
117EXPORT_SYMBOL(hchacha_block);
diff --git a/lib/cordic.c b/lib/cordic.c
index 6cf477839ebd..8ef27c12956f 100644
--- a/lib/cordic.c
+++ b/lib/cordic.c
@@ -16,15 +16,6 @@
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/cordic.h> 17#include <linux/cordic.h>
18 18
19#define CORDIC_ANGLE_GEN 39797
20#define CORDIC_PRECISION_SHIFT 16
21#define CORDIC_NUM_ITER (CORDIC_PRECISION_SHIFT + 2)
22
23#define FIXED(X) ((s32)((X) << CORDIC_PRECISION_SHIFT))
24#define FLOAT(X) (((X) >= 0) \
25 ? ((((X) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1) \
26 : -((((-(X)) >> (CORDIC_PRECISION_SHIFT - 1)) + 1) >> 1))
27
28static const s32 arctan_table[] = { 19static const s32 arctan_table[] = {
29 2949120, 20 2949120,
30 1740967, 21 1740967,
@@ -64,16 +55,16 @@ struct cordic_iq cordic_calc_iq(s32 theta)
64 coord.q = 0; 55 coord.q = 0;
65 angle = 0; 56 angle = 0;
66 57
67 theta = FIXED(theta); 58 theta = CORDIC_FIXED(theta);
68 signtheta = (theta < 0) ? -1 : 1; 59 signtheta = (theta < 0) ? -1 : 1;
69 theta = ((theta + FIXED(180) * signtheta) % FIXED(360)) - 60 theta = ((theta + CORDIC_FIXED(180) * signtheta) % CORDIC_FIXED(360)) -
70 FIXED(180) * signtheta; 61 CORDIC_FIXED(180) * signtheta;
71 62
72 if (FLOAT(theta) > 90) { 63 if (CORDIC_FLOAT(theta) > 90) {
73 theta -= FIXED(180); 64 theta -= CORDIC_FIXED(180);
74 signx = -1; 65 signx = -1;
75 } else if (FLOAT(theta) < -90) { 66 } else if (CORDIC_FLOAT(theta) < -90) {
76 theta += FIXED(180); 67 theta += CORDIC_FIXED(180);
77 signx = -1; 68 signx = -1;
78 } 69 }
79 70
diff --git a/lib/cpumask.c b/lib/cpumask.c
index beca6244671a..8d666ab84b5c 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -4,7 +4,7 @@
4#include <linux/bitops.h> 4#include <linux/bitops.h>
5#include <linux/cpumask.h> 5#include <linux/cpumask.h>
6#include <linux/export.h> 6#include <linux/export.h>
7#include <linux/bootmem.h> 7#include <linux/memblock.h>
8 8
9/** 9/**
10 * cpumask_next - get the next cpu in a cpumask 10 * cpumask_next - get the next cpu in a cpumask
@@ -163,7 +163,7 @@ EXPORT_SYMBOL(zalloc_cpumask_var);
163 */ 163 */
164void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask) 164void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
165{ 165{
166 *mask = memblock_virt_alloc(cpumask_size(), 0); 166 *mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES);
167} 167}
168 168
169/** 169/**
diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c
index 1ad33e555805..4d0d47c1ffbd 100644
--- a/lib/crc-t10dif.c
+++ b/lib/crc-t10dif.c
@@ -14,10 +14,47 @@
14#include <linux/err.h> 14#include <linux/err.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <crypto/hash.h> 16#include <crypto/hash.h>
17#include <crypto/algapi.h>
17#include <linux/static_key.h> 18#include <linux/static_key.h>
19#include <linux/notifier.h>
18 20
19static struct crypto_shash *crct10dif_tfm; 21static struct crypto_shash __rcu *crct10dif_tfm;
20static struct static_key crct10dif_fallback __read_mostly; 22static struct static_key crct10dif_fallback __read_mostly;
23static DEFINE_MUTEX(crc_t10dif_mutex);
24
25static int crc_t10dif_rehash(struct notifier_block *self, unsigned long val, void *data)
26{
27 struct crypto_alg *alg = data;
28 struct crypto_shash *new, *old;
29
30 if (val != CRYPTO_MSG_ALG_LOADED ||
31 static_key_false(&crct10dif_fallback) ||
32 strncmp(alg->cra_name, CRC_T10DIF_STRING, strlen(CRC_T10DIF_STRING)))
33 return 0;
34
35 mutex_lock(&crc_t10dif_mutex);
36 old = rcu_dereference_protected(crct10dif_tfm,
37 lockdep_is_held(&crc_t10dif_mutex));
38 if (!old) {
39 mutex_unlock(&crc_t10dif_mutex);
40 return 0;
41 }
42 new = crypto_alloc_shash("crct10dif", 0, 0);
43 if (IS_ERR(new)) {
44 mutex_unlock(&crc_t10dif_mutex);
45 return 0;
46 }
47 rcu_assign_pointer(crct10dif_tfm, new);
48 mutex_unlock(&crc_t10dif_mutex);
49
50 synchronize_rcu();
51 crypto_free_shash(old);
52 return 0;
53}
54
55static struct notifier_block crc_t10dif_nb = {
56 .notifier_call = crc_t10dif_rehash,
57};
21 58
22__u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len) 59__u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len)
23{ 60{
@@ -30,11 +67,14 @@ __u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len)
30 if (static_key_false(&crct10dif_fallback)) 67 if (static_key_false(&crct10dif_fallback))
31 return crc_t10dif_generic(crc, buffer, len); 68 return crc_t10dif_generic(crc, buffer, len);
32 69
33 desc.shash.tfm = crct10dif_tfm; 70 rcu_read_lock();
71 desc.shash.tfm = rcu_dereference(crct10dif_tfm);
34 desc.shash.flags = 0; 72 desc.shash.flags = 0;
35 *(__u16 *)desc.ctx = crc; 73 *(__u16 *)desc.ctx = crc;
36 74
37 err = crypto_shash_update(&desc.shash, buffer, len); 75 err = crypto_shash_update(&desc.shash, buffer, len);
76 rcu_read_unlock();
77
38 BUG_ON(err); 78 BUG_ON(err);
39 79
40 return *(__u16 *)desc.ctx; 80 return *(__u16 *)desc.ctx;
@@ -49,6 +89,7 @@ EXPORT_SYMBOL(crc_t10dif);
49 89
50static int __init crc_t10dif_mod_init(void) 90static int __init crc_t10dif_mod_init(void)
51{ 91{
92 crypto_register_notifier(&crc_t10dif_nb);
52 crct10dif_tfm = crypto_alloc_shash("crct10dif", 0, 0); 93 crct10dif_tfm = crypto_alloc_shash("crct10dif", 0, 0);
53 if (IS_ERR(crct10dif_tfm)) { 94 if (IS_ERR(crct10dif_tfm)) {
54 static_key_slow_inc(&crct10dif_fallback); 95 static_key_slow_inc(&crct10dif_fallback);
@@ -59,12 +100,24 @@ static int __init crc_t10dif_mod_init(void)
59 100
60static void __exit crc_t10dif_mod_fini(void) 101static void __exit crc_t10dif_mod_fini(void)
61{ 102{
103 crypto_unregister_notifier(&crc_t10dif_nb);
62 crypto_free_shash(crct10dif_tfm); 104 crypto_free_shash(crct10dif_tfm);
63} 105}
64 106
65module_init(crc_t10dif_mod_init); 107module_init(crc_t10dif_mod_init);
66module_exit(crc_t10dif_mod_fini); 108module_exit(crc_t10dif_mod_fini);
67 109
110static int crc_t10dif_transform_show(char *buffer, const struct kernel_param *kp)
111{
112 if (static_key_false(&crct10dif_fallback))
113 return sprintf(buffer, "fallback\n");
114
115 return sprintf(buffer, "%s\n",
116 crypto_tfm_alg_driver_name(crypto_shash_tfm(crct10dif_tfm)));
117}
118
119module_param_call(transform, NULL, crc_t10dif_transform_show, NULL, 0644);
120
68MODULE_DESCRIPTION("T10 DIF CRC calculation"); 121MODULE_DESCRIPTION("T10 DIF CRC calculation");
69MODULE_LICENSE("GPL"); 122MODULE_LICENSE("GPL");
70MODULE_SOFTDEP("pre: crct10dif"); 123MODULE_SOFTDEP("pre: crct10dif");
diff --git a/lib/crc32.c b/lib/crc32.c
index a6c9afafc8c8..45b1d67a1767 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -183,21 +183,21 @@ static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p,
183} 183}
184 184
185#if CRC_LE_BITS == 1 185#if CRC_LE_BITS == 1
186u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) 186u32 __pure __weak crc32_le(u32 crc, unsigned char const *p, size_t len)
187{ 187{
188 return crc32_le_generic(crc, p, len, NULL, CRC32_POLY_LE); 188 return crc32_le_generic(crc, p, len, NULL, CRC32_POLY_LE);
189} 189}
190u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len) 190u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len)
191{ 191{
192 return crc32_le_generic(crc, p, len, NULL, CRC32C_POLY_LE); 192 return crc32_le_generic(crc, p, len, NULL, CRC32C_POLY_LE);
193} 193}
194#else 194#else
195u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) 195u32 __pure __weak crc32_le(u32 crc, unsigned char const *p, size_t len)
196{ 196{
197 return crc32_le_generic(crc, p, len, 197 return crc32_le_generic(crc, p, len,
198 (const u32 (*)[256])crc32table_le, CRC32_POLY_LE); 198 (const u32 (*)[256])crc32table_le, CRC32_POLY_LE);
199} 199}
200u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len) 200u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len)
201{ 201{
202 return crc32_le_generic(crc, p, len, 202 return crc32_le_generic(crc, p, len,
203 (const u32 (*)[256])crc32ctable_le, CRC32C_POLY_LE); 203 (const u32 (*)[256])crc32ctable_le, CRC32C_POLY_LE);
@@ -206,6 +206,9 @@ u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
206EXPORT_SYMBOL(crc32_le); 206EXPORT_SYMBOL(crc32_le);
207EXPORT_SYMBOL(__crc32c_le); 207EXPORT_SYMBOL(__crc32c_le);
208 208
209u32 crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le);
210u32 __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le);
211
209/* 212/*
210 * This multiplies the polynomials x and y modulo the given modulus. 213 * This multiplies the polynomials x and y modulo the given modulus.
211 * This follows the "little-endian" CRC convention that the lsbit 214 * This follows the "little-endian" CRC convention that the lsbit
diff --git a/lib/debug_locks.c b/lib/debug_locks.c
index 96c4c633d95e..ce51749cc145 100644
--- a/lib/debug_locks.c
+++ b/lib/debug_locks.c
@@ -21,7 +21,7 @@
21 * that would just muddy the log. So we report the first one and 21 * that would just muddy the log. So we report the first one and
22 * shut up after that. 22 * shut up after that.
23 */ 23 */
24int debug_locks = 1; 24int debug_locks __read_mostly = 1;
25EXPORT_SYMBOL_GPL(debug_locks); 25EXPORT_SYMBOL_GPL(debug_locks);
26 26
27/* 27/*
@@ -29,7 +29,7 @@ EXPORT_SYMBOL_GPL(debug_locks);
29 * 'silent failure': nothing is printed to the console when 29 * 'silent failure': nothing is printed to the console when
30 * a locking bug is detected. 30 * a locking bug is detected.
31 */ 31 */
32int debug_locks_silent; 32int debug_locks_silent __read_mostly;
33EXPORT_SYMBOL_GPL(debug_locks_silent); 33EXPORT_SYMBOL_GPL(debug_locks_silent);
34 34
35/* 35/*
@@ -37,7 +37,7 @@ EXPORT_SYMBOL_GPL(debug_locks_silent);
37 */ 37 */
38int debug_locks_off(void) 38int debug_locks_off(void)
39{ 39{
40 if (__debug_locks_off()) { 40 if (debug_locks && __debug_locks_off()) {
41 if (!debug_locks_silent) { 41 if (!debug_locks_silent) {
42 console_verbose(); 42 console_verbose();
43 return 1; 43 return 1;
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 70935ed91125..55437fd5128b 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -135,7 +135,6 @@ static void fill_pool(void)
135 if (!new) 135 if (!new)
136 return; 136 return;
137 137
138 kmemleak_ignore(new);
139 raw_spin_lock_irqsave(&pool_lock, flags); 138 raw_spin_lock_irqsave(&pool_lock, flags);
140 hlist_add_head(&new->node, &obj_pool); 139 hlist_add_head(&new->node, &obj_pool);
141 debug_objects_allocated++; 140 debug_objects_allocated++;
@@ -1128,16 +1127,14 @@ static int __init debug_objects_replace_static_objects(void)
1128 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL); 1127 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1129 if (!obj) 1128 if (!obj)
1130 goto free; 1129 goto free;
1131 kmemleak_ignore(obj);
1132 hlist_add_head(&obj->node, &objects); 1130 hlist_add_head(&obj->node, &objects);
1133 } 1131 }
1134 1132
1135 /* 1133 /*
1136 * When debug_objects_mem_init() is called we know that only 1134 * debug_objects_mem_init() is now called early that only one CPU is up
1137 * one CPU is up, so disabling interrupts is enough 1135 * and interrupts have been disabled, so it is safe to replace the
1138 * protection. This avoids the lockdep hell of lock ordering. 1136 * active object references.
1139 */ 1137 */
1140 local_irq_disable();
1141 1138
1142 /* Remove the statically allocated objects from the pool */ 1139 /* Remove the statically allocated objects from the pool */
1143 hlist_for_each_entry_safe(obj, tmp, &obj_pool, node) 1140 hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
@@ -1158,7 +1155,6 @@ static int __init debug_objects_replace_static_objects(void)
1158 cnt++; 1155 cnt++;
1159 } 1156 }
1160 } 1157 }
1161 local_irq_enable();
1162 1158
1163 pr_debug("%d of %d active objects replaced\n", 1159 pr_debug("%d of %d active objects replaced\n",
1164 cnt, obj_pool_used); 1160 cnt, obj_pool_used);
@@ -1184,7 +1180,8 @@ void __init debug_objects_mem_init(void)
1184 1180
1185 obj_cache = kmem_cache_create("debug_objects_cache", 1181 obj_cache = kmem_cache_create("debug_objects_cache",
1186 sizeof (struct debug_obj), 0, 1182 sizeof (struct debug_obj), 0,
1187 SLAB_DEBUG_OBJECTS, NULL); 1183 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
1184 NULL);
1188 1185
1189 if (!obj_cache || debug_objects_replace_static_objects()) { 1186 if (!obj_cache || debug_objects_replace_static_objects()) {
1190 debug_objects_enabled = 0; 1187 debug_objects_enabled = 0;
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index c7c96bc7654a..dbf2b457e47e 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -188,7 +188,7 @@ static int ddebug_change(const struct ddebug_query *query,
188 newflags = (dp->flags & mask) | flags; 188 newflags = (dp->flags & mask) | flags;
189 if (newflags == dp->flags) 189 if (newflags == dp->flags)
190 continue; 190 continue;
191#ifdef HAVE_JUMP_LABEL 191#ifdef CONFIG_JUMP_LABEL
192 if (dp->flags & _DPRINTK_FLAGS_PRINT) { 192 if (dp->flags & _DPRINTK_FLAGS_PRINT) {
193 if (!(flags & _DPRINTK_FLAGS_PRINT)) 193 if (!(flags & _DPRINTK_FLAGS_PRINT))
194 static_branch_disable(&dp->key.dd_key_true); 194 static_branch_disable(&dp->key.dd_key_true);
diff --git a/lib/find_bit_benchmark.c b/lib/find_bit_benchmark.c
index 5367ffa5c18f..f0e394dd2beb 100644
--- a/lib/find_bit_benchmark.c
+++ b/lib/find_bit_benchmark.c
@@ -108,14 +108,13 @@ static int __init test_find_next_and_bit(const void *bitmap,
108 const void *bitmap2, unsigned long len) 108 const void *bitmap2, unsigned long len)
109{ 109{
110 unsigned long i, cnt; 110 unsigned long i, cnt;
111 cycles_t cycles; 111 ktime_t time;
112 112
113 cycles = get_cycles(); 113 time = ktime_get();
114 for (cnt = i = 0; i < BITMAP_LEN; cnt++) 114 for (cnt = i = 0; i < BITMAP_LEN; cnt++)
115 i = find_next_and_bit(bitmap, bitmap2, BITMAP_LEN, i+1); 115 i = find_next_and_bit(bitmap, bitmap2, BITMAP_LEN, i + 1);
116 cycles = get_cycles() - cycles; 116 time = ktime_get() - time;
117 pr_err("find_next_and_bit:\t\t%llu cycles, %ld iterations\n", 117 pr_err("find_next_and_bit: %18llu ns, %6ld iterations\n", time, cnt);
118 (u64)cycles, cnt);
119 118
120 return 0; 119 return 0;
121} 120}
diff --git a/lib/fonts/Kconfig b/lib/fonts/Kconfig
index 8fa0791e8a1e..3ecdd5204ec5 100644
--- a/lib/fonts/Kconfig
+++ b/lib/fonts/Kconfig
@@ -109,6 +109,15 @@ config FONT_SUN12x22
109 big letters (like the letters used in the SPARC PROM). If the 109 big letters (like the letters used in the SPARC PROM). If the
110 standard font is unreadable for you, say Y, otherwise say N. 110 standard font is unreadable for you, say Y, otherwise say N.
111 111
112config FONT_TER16x32
113 bool "Terminus 16x32 font (not supported by all drivers)"
114 depends on FRAMEBUFFER_CONSOLE && (!SPARC && FONTS || SPARC)
115 help
116 Terminus Font is a clean, fixed width bitmap font, designed
117 for long (8 and more hours per day) work with computers.
118 This is the high resolution, large version for use with HiDPI screens.
119 If the standard font is unreadable for you, say Y, otherwise say N.
120
112config FONT_AUTOSELECT 121config FONT_AUTOSELECT
113 def_bool y 122 def_bool y
114 depends on !FONT_8x8 123 depends on !FONT_8x8
@@ -121,6 +130,7 @@ config FONT_AUTOSELECT
121 depends on !FONT_SUN8x16 130 depends on !FONT_SUN8x16
122 depends on !FONT_SUN12x22 131 depends on !FONT_SUN12x22
123 depends on !FONT_10x18 132 depends on !FONT_10x18
133 depends on !FONT_TER16x32
124 select FONT_8x16 134 select FONT_8x16
125 135
126endif # FONT_SUPPORT 136endif # FONT_SUPPORT
diff --git a/lib/fonts/Makefile b/lib/fonts/Makefile
index d56f02dea83a..ed95070860de 100644
--- a/lib/fonts/Makefile
+++ b/lib/fonts/Makefile
@@ -14,6 +14,7 @@ font-objs-$(CONFIG_FONT_PEARL_8x8) += font_pearl_8x8.o
14font-objs-$(CONFIG_FONT_ACORN_8x8) += font_acorn_8x8.o 14font-objs-$(CONFIG_FONT_ACORN_8x8) += font_acorn_8x8.o
15font-objs-$(CONFIG_FONT_MINI_4x6) += font_mini_4x6.o 15font-objs-$(CONFIG_FONT_MINI_4x6) += font_mini_4x6.o
16font-objs-$(CONFIG_FONT_6x10) += font_6x10.o 16font-objs-$(CONFIG_FONT_6x10) += font_6x10.o
17font-objs-$(CONFIG_FONT_TER16x32) += font_ter16x32.o
17 18
18font-objs += $(font-objs-y) 19font-objs += $(font-objs-y)
19 20
diff --git a/lib/fonts/font_ter16x32.c b/lib/fonts/font_ter16x32.c
new file mode 100644
index 000000000000..3f0cf1ccdf3a
--- /dev/null
+++ b/lib/fonts/font_ter16x32.c
@@ -0,0 +1,2072 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/font.h>
3#include <linux/module.h>
4
5#define FONTDATAMAX 16384
6
7static const unsigned char fontdata_ter16x32[FONTDATAMAX] = {
8
9 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
10 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc,
11 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
12 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
13 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
14 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
15 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00,
16 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0 */
17 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
18 0x00, 0x00, 0x00, 0x00, 0x3f, 0xf8, 0x7f, 0xfc,
19 0xf0, 0x1e, 0xe0, 0x0e, 0xe0, 0x0e, 0xe0, 0x0e,
20 0xee, 0xee, 0xee, 0xee, 0xe0, 0x0e, 0xe0, 0x0e,
21 0xe0, 0x0e, 0xe0, 0x0e, 0xef, 0xee, 0xe7, 0xce,
22 0xe0, 0x0e, 0xe0, 0x0e, 0xe0, 0x0e, 0xf0, 0x1e,
23 0x7f, 0xfc, 0x3f, 0xf8, 0x00, 0x00, 0x00, 0x00,
24 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 1 */
25 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
26 0x00, 0x00, 0x00, 0x00, 0x3f, 0xf8, 0x7f, 0xfc,
27 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe,
28 0xe3, 0x8e, 0xe3, 0x8e, 0xff, 0xfe, 0xff, 0xfe,
29 0xff, 0xfe, 0xff, 0xfe, 0xe0, 0x0e, 0xf0, 0x1e,
30 0xf8, 0x3e, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe,
31 0x7f, 0xfc, 0x3f, 0xf8, 0x00, 0x00, 0x00, 0x00,
32 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 2 */
33 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
34 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
35 0x78, 0x3c, 0xfc, 0x7e, 0xfe, 0xfe, 0xff, 0xfe,
36 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe,
37 0x7f, 0xfc, 0x7f, 0xfc, 0x3f, 0xf8, 0x1f, 0xf0,
38 0x0f, 0xe0, 0x07, 0xc0, 0x03, 0x80, 0x00, 0x00,
39 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
40 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 3 */
41 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
42 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
43 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0, 0x0f, 0xe0,
44 0x1f, 0xf0, 0x3f, 0xf8, 0x7f, 0xfc, 0xff, 0xfe,
45 0xff, 0xfe, 0x7f, 0xfc, 0x3f, 0xf8, 0x1f, 0xf0,
46 0x0f, 0xe0, 0x07, 0xc0, 0x03, 0x80, 0x00, 0x00,
47 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
48 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 4 */
49 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
50 0x00, 0x00, 0x00, 0x00, 0x07, 0xc0, 0x0f, 0xe0,
51 0x0f, 0xe0, 0x0f, 0xe0, 0x0f, 0xe0, 0x0f, 0xe0,
52 0x07, 0xc0, 0x03, 0x80, 0x3b, 0xb8, 0x7f, 0xfc,
53 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe,
54 0x7f, 0xfc, 0x3b, 0xb8, 0x03, 0x80, 0x03, 0x80,
55 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00,
56 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 5 */
57 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
58 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
59 0x07, 0xc0, 0x0f, 0xe0, 0x1f, 0xf0, 0x3f, 0xf8,
60 0x7f, 0xfc, 0x7f, 0xfc, 0xff, 0xfe, 0xff, 0xfe,
61 0xff, 0xfe, 0xff, 0xfe, 0xff, 0xfe, 0x7b, 0xbc,
62 0x3b, 0xb8, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
63 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00,
64 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 6 */
65 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
66 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
67 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
68 0x03, 0xc0, 0x07, 0xe0, 0x0f, 0xf0, 0x0f, 0xf0,
69 0x0f, 0xf0, 0x0f, 0xf0, 0x07, 0xe0, 0x03, 0xc0,
70 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
71 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
72 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 7 */
73 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
74 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
75 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
76 0xfc, 0x3f, 0xf8, 0x1f, 0xf0, 0x0f, 0xf0, 0x0f,
77 0xf0, 0x0f, 0xf0, 0x0f, 0xf8, 0x1f, 0xfc, 0x3f,
78 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
79 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
80 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 8 */
81 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
82 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
83 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
84 0x03, 0xc0, 0x07, 0xe0, 0x0e, 0x70, 0x0c, 0x30,
85 0x0c, 0x30, 0x0e, 0x70, 0x07, 0xe0, 0x03, 0xc0,
86 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
87 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
88 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 9 */
89 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
90 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
91 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
92 0xfc, 0x3f, 0xf8, 0x1f, 0xf1, 0x8f, 0xf3, 0xcf,
93 0xf3, 0xcf, 0xf1, 0x8f, 0xf8, 0x1f, 0xfc, 0x3f,
94 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
95 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
96 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 10 */
97 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
98 0x00, 0x00, 0x00, 0x00, 0x03, 0xfe, 0x03, 0xfe,
99 0x00, 0x1e, 0x00, 0x3e, 0x00, 0x76, 0x00, 0xe6,
100 0x01, 0xc6, 0x03, 0x86, 0x3f, 0xe0, 0x7f, 0xf0,
101 0xf0, 0x78, 0xe0, 0x38, 0xe0, 0x38, 0xe0, 0x38,
102 0xe0, 0x38, 0xe0, 0x38, 0xe0, 0x38, 0xf0, 0x78,
103 0x7f, 0xf0, 0x3f, 0xe0, 0x00, 0x00, 0x00, 0x00,
104 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 11 */
105 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
106 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
107 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
108 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, 0x3f, 0xf8,
109 0x1f, 0xf0, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
110 0x7f, 0xfc, 0x7f, 0xfc, 0x03, 0x80, 0x03, 0x80,
111 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
112 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 12 */
113 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
114 0x00, 0x00, 0x00, 0x00, 0x3f, 0xfc, 0x3f, 0xfc,
115 0x38, 0x1c, 0x38, 0x1c, 0x38, 0x1c, 0x38, 0x1c,
116 0x3f, 0xfc, 0x3f, 0xfc, 0x38, 0x00, 0x38, 0x00,
117 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00,
118 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00,
119 0xf8, 0x00, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00,
120 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 13 */
121 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
122 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfe, 0x7f, 0xfe,
123 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e,
124 0x7f, 0xfe, 0x7f, 0xfe, 0x70, 0x0e, 0x70, 0x0e,
125 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e,
126 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x3e,
127 0xf0, 0x3c, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00,
128 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 14 */
129 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
130 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80,
131 0x03, 0x80, 0x03, 0x80, 0x73, 0x9c, 0x73, 0x9c,
132 0x3b, 0xb8, 0x1f, 0xf0, 0x0f, 0xe0, 0x7c, 0x7c,
133 0x7c, 0x7c, 0x0f, 0xe0, 0x1f, 0xf0, 0x3b, 0xb8,
134 0x73, 0x9c, 0x73, 0x9c, 0x03, 0x80, 0x03, 0x80,
135 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
136 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 15 */
137 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
138 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
139 0xc0, 0x00, 0xf0, 0x00, 0xfc, 0x00, 0xff, 0x00,
140 0xff, 0xc0, 0xff, 0xf0, 0xff, 0xfc, 0xff, 0xff,
141 0xff, 0xff, 0xff, 0xfc, 0xff, 0xf0, 0xff, 0xc0,
142 0xff, 0x00, 0xfc, 0x00, 0xf0, 0x00, 0xc0, 0x00,
143 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
144 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 16 */
145 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
146 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
147 0x00, 0x03, 0x00, 0x0f, 0x00, 0x3f, 0x00, 0xff,
148 0x03, 0xff, 0x0f, 0xff, 0x3f, 0xff, 0xff, 0xff,
149 0xff, 0xff, 0x3f, 0xff, 0x0f, 0xff, 0x03, 0xff,
150 0x00, 0xff, 0x00, 0x3f, 0x00, 0x0f, 0x00, 0x03,
151 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
152 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 17 */
153 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
154 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0,
155 0x0f, 0xe0, 0x1f, 0xf0, 0x3b, 0xb8, 0x73, 0x9c,
156 0x63, 0x8c, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
157 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x63, 0x8c,
158 0x73, 0x9c, 0x3b, 0xb8, 0x1f, 0xf0, 0x0f, 0xe0,
159 0x07, 0xc0, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
160 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 18 */
161 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
162 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70,
163 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70,
164 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70,
165 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00,
166 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70,
167 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
168 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 19 */
169 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
170 0x00, 0x00, 0x00, 0x00, 0x1f, 0xfe, 0x3f, 0xfe,
171 0x79, 0xce, 0x71, 0xce, 0x71, 0xce, 0x71, 0xce,
172 0x71, 0xce, 0x71, 0xce, 0x79, 0xce, 0x3f, 0xce,
173 0x1f, 0xce, 0x01, 0xce, 0x01, 0xce, 0x01, 0xce,
174 0x01, 0xce, 0x01, 0xce, 0x01, 0xce, 0x01, 0xce,
175 0x01, 0xce, 0x01, 0xce, 0x00, 0x00, 0x00, 0x00,
176 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 20 */
177 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
178 0x07, 0xe0, 0x0f, 0xf0, 0x1e, 0x78, 0x1c, 0x38,
179 0x1c, 0x00, 0x1e, 0x00, 0x0f, 0xc0, 0x0f, 0xe0,
180 0x1c, 0xf0, 0x1c, 0x78, 0x1c, 0x38, 0x1c, 0x38,
181 0x1c, 0x38, 0x1e, 0x38, 0x0f, 0x38, 0x07, 0xf0,
182 0x03, 0xf0, 0x00, 0x78, 0x00, 0x38, 0x1c, 0x38,
183 0x1e, 0x78, 0x0f, 0xf0, 0x07, 0xe0, 0x00, 0x00,
184 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 21 */
185 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
186 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
187 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
188 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
189 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfe, 0x7f, 0xfe,
190 0x7f, 0xfe, 0x7f, 0xfe, 0x7f, 0xfe, 0x7f, 0xfe,
191 0x7f, 0xfe, 0x7f, 0xfe, 0x00, 0x00, 0x00, 0x00,
192 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 22 */
193 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
194 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0,
195 0x0f, 0xe0, 0x1f, 0xf0, 0x3b, 0xb8, 0x73, 0x9c,
196 0x63, 0x8c, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
197 0x03, 0x80, 0x63, 0x8c, 0x73, 0x9c, 0x3b, 0xb8,
198 0x1f, 0xf0, 0x0f, 0xe0, 0x07, 0xc0, 0x03, 0x80,
199 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00,
200 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 23 */
201 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
202 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0,
203 0x0f, 0xe0, 0x1f, 0xf0, 0x3b, 0xb8, 0x73, 0x9c,
204 0x63, 0x8c, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
205 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
206 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
207 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
208 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 24 */
209 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
210 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
211 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
212 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
213 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x63, 0x8c,
214 0x73, 0x9c, 0x3b, 0xb8, 0x1f, 0xf0, 0x0f, 0xe0,
215 0x07, 0xc0, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
216 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 25 */
217 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
218 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
219 0x00, 0x00, 0x00, 0xc0, 0x00, 0xe0, 0x00, 0x70,
220 0x00, 0x38, 0x00, 0x1c, 0x7f, 0xfe, 0x7f, 0xfe,
221 0x7f, 0xfe, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x70,
222 0x00, 0xe0, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00,
223 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
224 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 26 */
225 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
226 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
227 0x00, 0x00, 0x03, 0x00, 0x07, 0x00, 0x0e, 0x00,
228 0x1c, 0x00, 0x38, 0x00, 0x7f, 0xfe, 0x7f, 0xfe,
229 0x7f, 0xfe, 0x38, 0x00, 0x1c, 0x00, 0x0e, 0x00,
230 0x07, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
231 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
232 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 27 */
233 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
234 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
235 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x70, 0x00,
236 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
237 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
238 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00,
239 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
240 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 28 */
241 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
242 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
243 0x00, 0x00, 0x06, 0x60, 0x0e, 0x70, 0x1c, 0x38,
244 0x38, 0x1c, 0x70, 0x0e, 0xff, 0xff, 0xff, 0xff,
245 0xff, 0xff, 0x70, 0x0e, 0x38, 0x1c, 0x1c, 0x38,
246 0x0e, 0x70, 0x06, 0x60, 0x00, 0x00, 0x00, 0x00,
247 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
248 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 29 */
249 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
250 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
251 0x01, 0x80, 0x01, 0x80, 0x03, 0xc0, 0x03, 0xc0,
252 0x07, 0xe0, 0x07, 0xe0, 0x0f, 0xf0, 0x0f, 0xf0,
253 0x1f, 0xf8, 0x1f, 0xf8, 0x3f, 0xfc, 0x3f, 0xfc,
254 0x7f, 0xfe, 0x7f, 0xfe, 0xff, 0xff, 0xff, 0xff,
255 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
256 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 30 */
257 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
258 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
259 0xff, 0xff, 0xff, 0xff, 0x7f, 0xfe, 0x7f, 0xfe,
260 0x3f, 0xfc, 0x3f, 0xfc, 0x1f, 0xf8, 0x1f, 0xf8,
261 0x0f, 0xf0, 0x0f, 0xf0, 0x07, 0xe0, 0x07, 0xe0,
262 0x03, 0xc0, 0x03, 0xc0, 0x01, 0x80, 0x01, 0x80,
263 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
264 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 31 */
265 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
266 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
267 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
268 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
269 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
270 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
271 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
272 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 32 */
273 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
274 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
275 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
276 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
277 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00,
278 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
279 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
280 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 33 */
281 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
282 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70,
283 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
284 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
285 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
286 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
287 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
288 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 34 */
289 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
290 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70,
291 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x7f, 0xfc,
292 0x7f, 0xfc, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70,
293 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x7f, 0xfc,
294 0x7f, 0xfc, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70,
295 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
296 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 35 */
297 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
298 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x1f, 0xf0,
299 0x3f, 0xf8, 0x7b, 0xbc, 0x73, 0x9c, 0x73, 0x80,
300 0x73, 0x80, 0x73, 0x80, 0x7b, 0x80, 0x3f, 0xf0,
301 0x1f, 0xf8, 0x03, 0xbc, 0x03, 0x9c, 0x03, 0x9c,
302 0x03, 0x9c, 0x73, 0x9c, 0x7b, 0xbc, 0x3f, 0xf8,
303 0x1f, 0xf0, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
304 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 36 */
305 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
306 0x00, 0x00, 0x00, 0x00, 0x1f, 0x1c, 0x3f, 0x9c,
307 0x3b, 0xb8, 0x3b, 0xb8, 0x3f, 0xf0, 0x1f, 0x70,
308 0x00, 0xe0, 0x00, 0xe0, 0x01, 0xc0, 0x01, 0xc0,
309 0x03, 0x80, 0x03, 0x80, 0x07, 0x00, 0x07, 0x00,
310 0x0e, 0xf8, 0x0f, 0xfc, 0x1d, 0xdc, 0x1d, 0xdc,
311 0x39, 0xfc, 0x38, 0xf8, 0x00, 0x00, 0x00, 0x00,
312 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 37 */
313 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
314 0x00, 0x00, 0x00, 0x00, 0x0f, 0xc0, 0x1f, 0xe0,
315 0x38, 0x70, 0x38, 0x70, 0x38, 0x70, 0x38, 0x70,
316 0x38, 0x70, 0x1c, 0xe0, 0x0f, 0xc0, 0x0f, 0x80,
317 0x1f, 0xce, 0x38, 0xee, 0x70, 0x7c, 0x70, 0x38,
318 0x70, 0x38, 0x70, 0x38, 0x70, 0x38, 0x78, 0x7c,
319 0x3f, 0xee, 0x1f, 0xce, 0x00, 0x00, 0x00, 0x00,
320 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 38 */
321 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
322 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
323 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
324 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
325 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
326 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
327 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
328 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 39 */
329 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
330 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x01, 0xc0,
331 0x03, 0x80, 0x07, 0x00, 0x07, 0x00, 0x0e, 0x00,
332 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00,
333 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00,
334 0x0e, 0x00, 0x07, 0x00, 0x07, 0x00, 0x03, 0x80,
335 0x01, 0xc0, 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00,
336 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 40 */
337 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
338 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x07, 0x00,
339 0x03, 0x80, 0x01, 0xc0, 0x01, 0xc0, 0x00, 0xe0,
340 0x00, 0xe0, 0x00, 0xe0, 0x00, 0xe0, 0x00, 0xe0,
341 0x00, 0xe0, 0x00, 0xe0, 0x00, 0xe0, 0x00, 0xe0,
342 0x00, 0xe0, 0x01, 0xc0, 0x01, 0xc0, 0x03, 0x80,
343 0x07, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00,
344 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 41 */
345 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
346 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
347 0x00, 0x00, 0x00, 0x00, 0x38, 0x38, 0x1c, 0x70,
348 0x0e, 0xe0, 0x07, 0xc0, 0x03, 0x80, 0x7f, 0xfc,
349 0x7f, 0xfc, 0x03, 0x80, 0x07, 0xc0, 0x0e, 0xe0,
350 0x1c, 0x70, 0x38, 0x38, 0x00, 0x00, 0x00, 0x00,
351 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
352 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 42 */
353 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
354 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
355 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
356 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x7f, 0xfc,
357 0x7f, 0xfc, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
358 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
359 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
360 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 43 */
361 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
362 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
363 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
364 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
365 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
366 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
367 0x03, 0x80, 0x03, 0x80, 0x07, 0x00, 0x0e, 0x00,
368 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 44 */
369 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
370 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
371 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
372 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc,
373 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
374 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
375 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
376 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 45 */
377 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
378 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
379 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
380 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
381 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
382 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
383 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
384 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 46 */
385 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
386 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x1c,
387 0x00, 0x38, 0x00, 0x38, 0x00, 0x70, 0x00, 0x70,
388 0x00, 0xe0, 0x00, 0xe0, 0x01, 0xc0, 0x01, 0xc0,
389 0x03, 0x80, 0x03, 0x80, 0x07, 0x00, 0x07, 0x00,
390 0x0e, 0x00, 0x0e, 0x00, 0x1c, 0x00, 0x1c, 0x00,
391 0x38, 0x00, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00,
392 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 47 */
393 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
394 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
395 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x3c,
396 0x70, 0x7c, 0x70, 0xfc, 0x71, 0xdc, 0x73, 0x9c,
397 0x77, 0x1c, 0x7e, 0x1c, 0x7c, 0x1c, 0x78, 0x1c,
398 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
399 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
400 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 48 */
401 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
402 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0x80,
403 0x0f, 0x80, 0x1f, 0x80, 0x1f, 0x80, 0x03, 0x80,
404 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
405 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
406 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
407 0x1f, 0xf0, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
408 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 49 */
409 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
410 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
411 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
412 0x70, 0x1c, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x70,
413 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80, 0x07, 0x00,
414 0x0e, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x70, 0x00,
415 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00,
416 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 50 */
417 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
418 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
419 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x1c,
420 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x3c, 0x0f, 0xf8,
421 0x0f, 0xf8, 0x00, 0x3c, 0x00, 0x1c, 0x00, 0x1c,
422 0x00, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
423 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
424 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 51 */
425 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
426 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x3c,
427 0x00, 0x7c, 0x00, 0xfc, 0x01, 0xdc, 0x03, 0x9c,
428 0x07, 0x1c, 0x0e, 0x1c, 0x1c, 0x1c, 0x38, 0x1c,
429 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x7f, 0xfc,
430 0x7f, 0xfc, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c,
431 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x00,
432 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 52 */
433 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
434 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc,
435 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
436 0x70, 0x00, 0x70, 0x00, 0x7f, 0xf0, 0x7f, 0xf8,
437 0x00, 0x3c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c,
438 0x00, 0x1c, 0x00, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
439 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
440 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 53 */
441 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
442 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf8, 0x3f, 0xf8,
443 0x78, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
444 0x70, 0x00, 0x70, 0x00, 0x7f, 0xf0, 0x7f, 0xf8,
445 0x70, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
446 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
447 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
448 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 54 */
449 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
450 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc,
451 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x38,
452 0x00, 0x38, 0x00, 0x70, 0x00, 0x70, 0x00, 0xe0,
453 0x00, 0xe0, 0x01, 0xc0, 0x01, 0xc0, 0x03, 0x80,
454 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
455 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
456 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 55 */
457 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
458 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
459 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
460 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c, 0x3f, 0xf8,
461 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c,
462 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
463 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
464 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 56 */
465 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
466 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
467 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
468 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
469 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x1c, 0x00, 0x1c,
470 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x3c,
471 0x3f, 0xf8, 0x3f, 0xf0, 0x00, 0x00, 0x00, 0x00,
472 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 57 */
473 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
474 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
475 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
476 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
477 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
478 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
479 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
480 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 58 */
481 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
482 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
483 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
484 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
485 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
486 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
487 0x03, 0x80, 0x03, 0x80, 0x07, 0x00, 0x0e, 0x00,
488 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 59 */
489 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
490 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x38,
491 0x00, 0x70, 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80,
492 0x07, 0x00, 0x0e, 0x00, 0x1c, 0x00, 0x38, 0x00,
493 0x38, 0x00, 0x1c, 0x00, 0x0e, 0x00, 0x07, 0x00,
494 0x03, 0x80, 0x01, 0xc0, 0x00, 0xe0, 0x00, 0x70,
495 0x00, 0x38, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x00,
496 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 60 */
497 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
498 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
499 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
500 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00,
501 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc,
502 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
503 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
504 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 61 */
505 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
506 0x00, 0x00, 0x00, 0x00, 0x38, 0x00, 0x1c, 0x00,
507 0x0e, 0x00, 0x07, 0x00, 0x03, 0x80, 0x01, 0xc0,
508 0x00, 0xe0, 0x00, 0x70, 0x00, 0x38, 0x00, 0x1c,
509 0x00, 0x1c, 0x00, 0x38, 0x00, 0x70, 0x00, 0xe0,
510 0x01, 0xc0, 0x03, 0x80, 0x07, 0x00, 0x0e, 0x00,
511 0x1c, 0x00, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00,
512 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 62 */
513 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
514 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
515 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
516 0x70, 0x1c, 0x00, 0x38, 0x00, 0x70, 0x00, 0xe0,
517 0x01, 0xc0, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
518 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
519 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
520 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 63 */
521 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
522 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf8, 0x3f, 0xfc,
523 0x78, 0x0e, 0x70, 0x06, 0x71, 0xfe, 0x73, 0xfe,
524 0x77, 0x8e, 0x77, 0x0e, 0x77, 0x0e, 0x77, 0x0e,
525 0x77, 0x0e, 0x77, 0x0e, 0x77, 0x0e, 0x77, 0x9e,
526 0x73, 0xfe, 0x71, 0xf6, 0x70, 0x00, 0x78, 0x00,
527 0x3f, 0xfe, 0x1f, 0xfe, 0x00, 0x00, 0x00, 0x00,
528 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 64 */
529 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
530 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
531 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
532 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
533 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x1c, 0x70, 0x1c,
534 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
535 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
536 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 65 */
537 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
538 0x00, 0x00, 0x00, 0x00, 0x7f, 0xf0, 0x7f, 0xf8,
539 0x70, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
540 0x70, 0x1c, 0x70, 0x38, 0x7f, 0xf0, 0x7f, 0xf0,
541 0x70, 0x38, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
542 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x3c,
543 0x7f, 0xf8, 0x7f, 0xf0, 0x00, 0x00, 0x00, 0x00,
544 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 66 */
545 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
546 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
547 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x00,
548 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
549 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
550 0x70, 0x00, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
551 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
552 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 67 */
553 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
554 0x00, 0x00, 0x00, 0x00, 0x7f, 0xc0, 0x7f, 0xf0,
555 0x70, 0x78, 0x70, 0x38, 0x70, 0x1c, 0x70, 0x1c,
556 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
557 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
558 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x38, 0x70, 0x78,
559 0x7f, 0xf0, 0x7f, 0xc0, 0x00, 0x00, 0x00, 0x00,
560 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 68 */
561 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
562 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc,
563 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
564 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x7f, 0xe0,
565 0x7f, 0xe0, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
566 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
567 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00,
568 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 69 */
569 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
570 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc,
571 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
572 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x7f, 0xe0,
573 0x7f, 0xe0, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
574 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
575 0x70, 0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00,
576 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 70 */
577 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
578 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
579 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x00,
580 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x71, 0xfc,
581 0x71, 0xfc, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
582 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
583 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
584 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 71 */
585 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
586 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c,
587 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
588 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x7f, 0xfc,
589 0x7f, 0xfc, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
590 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
591 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
592 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 72 */
593 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
594 0x00, 0x00, 0x00, 0x00, 0x0f, 0xe0, 0x0f, 0xe0,
595 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
596 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
597 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
598 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
599 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00,
600 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 73 */
601 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
602 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x00, 0xfe,
603 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38,
604 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38,
605 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38,
606 0x70, 0x38, 0x70, 0x38, 0x70, 0x38, 0x78, 0x78,
607 0x3f, 0xf0, 0x1f, 0xe0, 0x00, 0x00, 0x00, 0x00,
608 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 74 */
609 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
610 0x00, 0x00, 0x00, 0x00, 0x70, 0x0c, 0x70, 0x1c,
611 0x70, 0x38, 0x70, 0x70, 0x70, 0xe0, 0x71, 0xc0,
612 0x73, 0x80, 0x77, 0x00, 0x7e, 0x00, 0x7c, 0x00,
613 0x7c, 0x00, 0x7e, 0x00, 0x77, 0x00, 0x73, 0x80,
614 0x71, 0xc0, 0x70, 0xe0, 0x70, 0x70, 0x70, 0x38,
615 0x70, 0x1c, 0x70, 0x0c, 0x00, 0x00, 0x00, 0x00,
616 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 75 */
617 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
618 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x70, 0x00,
619 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
620 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
621 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
622 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
623 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00,
624 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 76 */
625 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
626 0x00, 0x00, 0x00, 0x00, 0x70, 0x0e, 0x70, 0x0e,
627 0x78, 0x1e, 0x7c, 0x3e, 0x7e, 0x7e, 0x7e, 0x7e,
628 0x77, 0xee, 0x73, 0xce, 0x73, 0xce, 0x71, 0x8e,
629 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e,
630 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e,
631 0x70, 0x0e, 0x70, 0x0e, 0x00, 0x00, 0x00, 0x00,
632 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 77 */
633 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
634 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c,
635 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
636 0x7c, 0x1c, 0x7e, 0x1c, 0x77, 0x1c, 0x73, 0x9c,
637 0x71, 0xdc, 0x70, 0xfc, 0x70, 0x7c, 0x70, 0x3c,
638 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
639 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
640 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 78 */
641 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
642 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
643 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
644 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
645 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
646 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
647 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
648 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 79 */
649 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
650 0x00, 0x00, 0x00, 0x00, 0x7f, 0xf0, 0x7f, 0xf8,
651 0x70, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
652 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x3c,
653 0x7f, 0xf8, 0x7f, 0xf0, 0x70, 0x00, 0x70, 0x00,
654 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
655 0x70, 0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00,
656 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 80 */
657 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
658 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
659 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
660 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
661 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
662 0x70, 0x1c, 0x70, 0x1c, 0x73, 0x9c, 0x79, 0xfc,
663 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x38, 0x00, 0x1c,
664 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 81 */
665 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
666 0x00, 0x00, 0x00, 0x00, 0x7f, 0xf0, 0x7f, 0xf8,
667 0x70, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
668 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x3c,
669 0x7f, 0xf8, 0x7f, 0xf0, 0x7e, 0x00, 0x77, 0x00,
670 0x73, 0x80, 0x71, 0xc0, 0x70, 0xe0, 0x70, 0x70,
671 0x70, 0x38, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
672 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 82 */
673 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
674 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
675 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x00,
676 0x70, 0x00, 0x70, 0x00, 0x78, 0x00, 0x3f, 0xf0,
677 0x1f, 0xf8, 0x00, 0x3c, 0x00, 0x1c, 0x00, 0x1c,
678 0x00, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
679 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
680 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 83 */
681 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
682 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc,
683 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
684 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
685 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
686 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
687 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
688 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 84 */
689 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
690 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c,
691 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
692 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
693 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
694 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
695 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
696 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 85 */
697 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
698 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c,
699 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x38, 0x38,
700 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38,
701 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70,
702 0x0e, 0xe0, 0x0e, 0xe0, 0x0e, 0xe0, 0x07, 0xc0,
703 0x07, 0xc0, 0x07, 0xc0, 0x00, 0x00, 0x00, 0x00,
704 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 86 */
705 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
706 0x00, 0x00, 0x00, 0x00, 0x70, 0x0e, 0x70, 0x0e,
707 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e,
708 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e,
709 0x71, 0x8e, 0x73, 0xce, 0x73, 0xce, 0x77, 0xee,
710 0x7e, 0x7e, 0x7e, 0x7e, 0x7c, 0x3e, 0x78, 0x1e,
711 0x70, 0x0e, 0x70, 0x0e, 0x00, 0x00, 0x00, 0x00,
712 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 87 */
713 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
714 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c,
715 0x38, 0x38, 0x38, 0x38, 0x1c, 0x70, 0x1c, 0x70,
716 0x0e, 0xe0, 0x0e, 0xe0, 0x07, 0xc0, 0x07, 0xc0,
717 0x07, 0xc0, 0x07, 0xc0, 0x0e, 0xe0, 0x0e, 0xe0,
718 0x1c, 0x70, 0x1c, 0x70, 0x38, 0x38, 0x38, 0x38,
719 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
720 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 88 */
721 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
722 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c,
723 0x70, 0x1c, 0x38, 0x38, 0x38, 0x38, 0x1c, 0x70,
724 0x1c, 0x70, 0x0e, 0xe0, 0x0e, 0xe0, 0x07, 0xc0,
725 0x07, 0xc0, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
726 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
727 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
728 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 89 */
729 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
730 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc,
731 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x38,
732 0x00, 0x70, 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80,
733 0x07, 0x00, 0x0e, 0x00, 0x1c, 0x00, 0x38, 0x00,
734 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
735 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00,
736 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 90 */
737 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
738 0x00, 0x00, 0x00, 0x00, 0x0f, 0xf0, 0x0f, 0xf0,
739 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00,
740 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00,
741 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00,
742 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x0e, 0x00,
743 0x0f, 0xf0, 0x0f, 0xf0, 0x00, 0x00, 0x00, 0x00,
744 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 91 */
745 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
746 0x00, 0x00, 0x00, 0x00, 0x38, 0x00, 0x38, 0x00,
747 0x1c, 0x00, 0x1c, 0x00, 0x0e, 0x00, 0x0e, 0x00,
748 0x07, 0x00, 0x07, 0x00, 0x03, 0x80, 0x03, 0x80,
749 0x01, 0xc0, 0x01, 0xc0, 0x00, 0xe0, 0x00, 0xe0,
750 0x00, 0x70, 0x00, 0x70, 0x00, 0x38, 0x00, 0x38,
751 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x00,
752 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 92 */
753 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
754 0x00, 0x00, 0x00, 0x00, 0x0f, 0xf0, 0x0f, 0xf0,
755 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70,
756 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70,
757 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70,
758 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70,
759 0x0f, 0xf0, 0x0f, 0xf0, 0x00, 0x00, 0x00, 0x00,
760 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 93 */
761 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
762 0x03, 0x80, 0x07, 0xc0, 0x0e, 0xe0, 0x1c, 0x70,
763 0x38, 0x38, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
764 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
765 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
766 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
767 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
768 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 94 */
769 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
770 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
771 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
772 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
773 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
774 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
775 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc,
776 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 95 */
777 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00, 0x07, 0x00,
778 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
779 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
780 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
781 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
782 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
783 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
784 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 96 */
785 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
786 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
787 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
788 0x3f, 0xf0, 0x3f, 0xf8, 0x00, 0x3c, 0x00, 0x1c,
789 0x00, 0x1c, 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c,
790 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
791 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00,
792 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 97 */
793 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
794 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x70, 0x00,
795 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
796 0x7f, 0xf0, 0x7f, 0xf8, 0x70, 0x3c, 0x70, 0x1c,
797 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
798 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x3c,
799 0x7f, 0xf8, 0x7f, 0xf0, 0x00, 0x00, 0x00, 0x00,
800 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 98 */
801 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
802 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
803 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
804 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c,
805 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
806 0x70, 0x00, 0x70, 0x00, 0x70, 0x1c, 0x78, 0x3c,
807 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
808 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 99 */
809 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
810 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x1c,
811 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c,
812 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c, 0x70, 0x1c,
813 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
814 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
815 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00,
816 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 100 */
817 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
818 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
819 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
820 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c,
821 0x70, 0x1c, 0x70, 0x1c, 0x7f, 0xfc, 0x7f, 0xfc,
822 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x78, 0x1c,
823 0x3f, 0xfc, 0x1f, 0xf8, 0x00, 0x00, 0x00, 0x00,
824 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 101 */
825 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
826 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x01, 0xfe,
827 0x03, 0xc0, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
828 0x3f, 0xf8, 0x3f, 0xf8, 0x03, 0x80, 0x03, 0x80,
829 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
830 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
831 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
832 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 102 */
833 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
834 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
835 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
836 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c, 0x70, 0x1c,
837 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
838 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
839 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x1c, 0x00, 0x1c,
840 0x00, 0x3c, 0x3f, 0xf8, 0x3f, 0xf0, 0x00, 0x00, /* 103 */
841 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
842 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x70, 0x00,
843 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
844 0x7f, 0xf0, 0x7f, 0xf8, 0x70, 0x3c, 0x70, 0x1c,
845 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
846 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
847 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
848 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 104 */
849 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
850 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
851 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
852 0x0f, 0x80, 0x0f, 0x80, 0x03, 0x80, 0x03, 0x80,
853 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
854 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
855 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00,
856 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 105 */
857 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
858 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x00, 0x38,
859 0x00, 0x38, 0x00, 0x38, 0x00, 0x00, 0x00, 0x00,
860 0x00, 0xf8, 0x00, 0xf8, 0x00, 0x38, 0x00, 0x38,
861 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38,
862 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38,
863 0x00, 0x38, 0x00, 0x38, 0x38, 0x38, 0x38, 0x38,
864 0x3c, 0x78, 0x1f, 0xf0, 0x0f, 0xe0, 0x00, 0x00, /* 106 */
865 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
866 0x00, 0x00, 0x00, 0x00, 0x38, 0x00, 0x38, 0x00,
867 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00,
868 0x38, 0x1c, 0x38, 0x38, 0x38, 0x70, 0x38, 0xe0,
869 0x39, 0xc0, 0x3b, 0x80, 0x3f, 0x00, 0x3f, 0x00,
870 0x3b, 0x80, 0x39, 0xc0, 0x38, 0xe0, 0x38, 0x70,
871 0x38, 0x38, 0x38, 0x1c, 0x00, 0x00, 0x00, 0x00,
872 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 107 */
873 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
874 0x00, 0x00, 0x00, 0x00, 0x0f, 0x80, 0x0f, 0x80,
875 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
876 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
877 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
878 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
879 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00,
880 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 108 */
881 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
882 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
883 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
884 0x7f, 0xf0, 0x7f, 0xf8, 0x73, 0xbc, 0x73, 0x9c,
885 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c,
886 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c,
887 0x73, 0x9c, 0x73, 0x9c, 0x00, 0x00, 0x00, 0x00,
888 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 109 */
889 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
890 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
891 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
892 0x7f, 0xf0, 0x7f, 0xf8, 0x70, 0x3c, 0x70, 0x1c,
893 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
894 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
895 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
896 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 110 */
897 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
898 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
899 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
900 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c,
901 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
902 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
903 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
904 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 111 */
905 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
906 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
907 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
908 0x7f, 0xf0, 0x7f, 0xf8, 0x70, 0x3c, 0x70, 0x1c,
909 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
910 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x3c,
911 0x7f, 0xf8, 0x7f, 0xf0, 0x70, 0x00, 0x70, 0x00,
912 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x00, 0x00, /* 112 */
913 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
914 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
915 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
916 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c, 0x70, 0x1c,
917 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
918 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
919 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x1c, 0x00, 0x1c,
920 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x00, /* 113 */
921 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
922 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
923 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
924 0x73, 0xfc, 0x77, 0xfc, 0x7e, 0x00, 0x7c, 0x00,
925 0x78, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
926 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
927 0x70, 0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00,
928 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 114 */
929 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
930 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
931 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
932 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x00,
933 0x70, 0x00, 0x78, 0x00, 0x3f, 0xf0, 0x1f, 0xf8,
934 0x00, 0x3c, 0x00, 0x1c, 0x00, 0x1c, 0x78, 0x3c,
935 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
936 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 115 */
937 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
938 0x00, 0x00, 0x00, 0x00, 0x07, 0x00, 0x07, 0x00,
939 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00,
940 0x7f, 0xf0, 0x7f, 0xf0, 0x07, 0x00, 0x07, 0x00,
941 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00,
942 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x07, 0x80,
943 0x03, 0xfc, 0x01, 0xfc, 0x00, 0x00, 0x00, 0x00,
944 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 116 */
945 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
946 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
947 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
948 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
949 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
950 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
951 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00,
952 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 117 */
953 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
954 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
955 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
956 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x38, 0x38,
957 0x38, 0x38, 0x38, 0x38, 0x1c, 0x70, 0x1c, 0x70,
958 0x1c, 0x70, 0x0e, 0xe0, 0x0e, 0xe0, 0x07, 0xc0,
959 0x07, 0xc0, 0x07, 0xc0, 0x00, 0x00, 0x00, 0x00,
960 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 118 */
961 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
962 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
963 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
964 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
965 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c,
966 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x7b, 0xbc,
967 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
968 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 119 */
969 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
970 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
971 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
972 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x38, 0x38,
973 0x1c, 0x70, 0x0e, 0xe0, 0x07, 0xc0, 0x07, 0xc0,
974 0x0e, 0xe0, 0x1c, 0x70, 0x38, 0x38, 0x70, 0x1c,
975 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
976 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 120 */
977 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
978 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
979 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
980 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
981 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
982 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
983 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x1c, 0x00, 0x1c,
984 0x00, 0x3c, 0x3f, 0xf8, 0x3f, 0xf0, 0x00, 0x00, /* 121 */
985 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
986 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
987 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
988 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x38, 0x00, 0x70,
989 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80, 0x07, 0x00,
990 0x0e, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x70, 0x00,
991 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00,
992 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 122 */
993 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
994 0x00, 0x00, 0x00, 0x00, 0x01, 0xf0, 0x03, 0xf0,
995 0x07, 0x80, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00,
996 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x3e, 0x00,
997 0x3e, 0x00, 0x07, 0x00, 0x07, 0x00, 0x07, 0x00,
998 0x07, 0x00, 0x07, 0x00, 0x07, 0x00, 0x07, 0x80,
999 0x03, 0xf0, 0x01, 0xf0, 0x00, 0x00, 0x00, 0x00,
1000 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 123 */
1001 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1002 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
1003 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1004 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1005 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1006 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1007 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
1008 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 124 */
1009 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1010 0x00, 0x00, 0x00, 0x00, 0x3e, 0x00, 0x3f, 0x00,
1011 0x07, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1012 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x01, 0xf0,
1013 0x01, 0xf0, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1014 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x07, 0x80,
1015 0x3f, 0x00, 0x3e, 0x00, 0x00, 0x00, 0x00, 0x00,
1016 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 125 */
1017 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1018 0x1e, 0x1c, 0x3f, 0x1c, 0x77, 0x9c, 0x73, 0xdc,
1019 0x71, 0xf8, 0x70, 0xf0, 0x00, 0x00, 0x00, 0x00,
1020 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1021 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1022 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1023 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1024 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 126 */
1025 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1026 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1027 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0,
1028 0x0f, 0xe0, 0x1e, 0xf0, 0x3c, 0x78, 0x78, 0x3c,
1029 0xf0, 0x1e, 0xe0, 0x0e, 0xe0, 0x0e, 0xe0, 0x0e,
1030 0xe0, 0x0e, 0xe0, 0x0e, 0xe0, 0x0e, 0xe0, 0x0e,
1031 0xff, 0xfe, 0xff, 0xfe, 0x00, 0x00, 0x00, 0x00,
1032 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 127 */
1033 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1034 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
1035 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x00,
1036 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
1037 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
1038 0x70, 0x00, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
1039 0x3f, 0xf8, 0x1f, 0xf0, 0x03, 0x80, 0x03, 0x80,
1040 0x03, 0x80, 0x07, 0x00, 0x0e, 0x00, 0x00, 0x00, /* 128 */
1041 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1042 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70,
1043 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
1044 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1045 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1046 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
1047 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00,
1048 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 129 */
1049 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1050 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0xe0,
1051 0x01, 0xc0, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
1052 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c,
1053 0x70, 0x1c, 0x70, 0x1c, 0x7f, 0xfc, 0x7f, 0xfc,
1054 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x78, 0x1c,
1055 0x3f, 0xfc, 0x1f, 0xf8, 0x00, 0x00, 0x00, 0x00,
1056 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 130 */
1057 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1058 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0,
1059 0x0e, 0xe0, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
1060 0x3f, 0xf0, 0x3f, 0xf8, 0x00, 0x3c, 0x00, 0x1c,
1061 0x00, 0x1c, 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c,
1062 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
1063 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00,
1064 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 131 */
1065 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1066 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70,
1067 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
1068 0x3f, 0xf0, 0x3f, 0xf8, 0x00, 0x3c, 0x00, 0x1c,
1069 0x00, 0x1c, 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c,
1070 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
1071 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00,
1072 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 132 */
1073 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1074 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00,
1075 0x07, 0x00, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
1076 0x3f, 0xf0, 0x3f, 0xf8, 0x00, 0x3c, 0x00, 0x1c,
1077 0x00, 0x1c, 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c,
1078 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
1079 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00,
1080 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 133 */
1081 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1082 0x00, 0x00, 0x00, 0x00, 0x07, 0xc0, 0x0e, 0xe0,
1083 0x0e, 0xe0, 0x0e, 0xe0, 0x07, 0xc0, 0x00, 0x00,
1084 0x3f, 0xf0, 0x3f, 0xf8, 0x00, 0x3c, 0x00, 0x1c,
1085 0x00, 0x1c, 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c,
1086 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
1087 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00,
1088 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 134 */
1089 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1090 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1091 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1092 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c,
1093 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
1094 0x70, 0x00, 0x70, 0x00, 0x70, 0x1c, 0x78, 0x3c,
1095 0x3f, 0xf8, 0x1f, 0xf0, 0x03, 0x80, 0x03, 0x80,
1096 0x03, 0x80, 0x07, 0x00, 0x0e, 0x00, 0x00, 0x00, /* 135 */
1097 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1098 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0,
1099 0x0e, 0xe0, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
1100 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c,
1101 0x70, 0x1c, 0x70, 0x1c, 0x7f, 0xfc, 0x7f, 0xfc,
1102 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x78, 0x1c,
1103 0x3f, 0xfc, 0x1f, 0xf8, 0x00, 0x00, 0x00, 0x00,
1104 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 136 */
1105 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1106 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70,
1107 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
1108 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c,
1109 0x70, 0x1c, 0x70, 0x1c, 0x7f, 0xfc, 0x7f, 0xfc,
1110 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x78, 0x1c,
1111 0x3f, 0xfc, 0x1f, 0xf8, 0x00, 0x00, 0x00, 0x00,
1112 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 137 */
1113 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1114 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00,
1115 0x07, 0x00, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
1116 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c,
1117 0x70, 0x1c, 0x70, 0x1c, 0x7f, 0xfc, 0x7f, 0xfc,
1118 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x78, 0x1c,
1119 0x3f, 0xfc, 0x1f, 0xf8, 0x00, 0x00, 0x00, 0x00,
1120 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 138 */
1121 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1122 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70,
1123 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
1124 0x0f, 0x80, 0x0f, 0x80, 0x03, 0x80, 0x03, 0x80,
1125 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1126 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1127 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00,
1128 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 139 */
1129 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1130 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0,
1131 0x0e, 0xe0, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
1132 0x0f, 0x80, 0x0f, 0x80, 0x03, 0x80, 0x03, 0x80,
1133 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1134 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1135 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00,
1136 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 140 */
1137 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1138 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00,
1139 0x07, 0x00, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
1140 0x0f, 0x80, 0x0f, 0x80, 0x03, 0x80, 0x03, 0x80,
1141 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1142 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1143 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00,
1144 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 141 */
1145 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70,
1146 0x1c, 0x70, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
1147 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1148 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1149 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x1c, 0x70, 0x1c,
1150 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1151 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
1152 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 142 */
1153 0x00, 0x00, 0x07, 0xc0, 0x0e, 0xe0, 0x0e, 0xe0,
1154 0x0e, 0xe0, 0x07, 0xc0, 0x1f, 0xf0, 0x3f, 0xf8,
1155 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1156 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1157 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x1c, 0x70, 0x1c,
1158 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1159 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
1160 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 143 */
1161 0x00, 0x00, 0x00, 0x70, 0x00, 0xe0, 0x01, 0xc0,
1162 0x03, 0x80, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc,
1163 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
1164 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x7f, 0xe0,
1165 0x7f, 0xe0, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
1166 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
1167 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00,
1168 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 144 */
1169 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1170 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1171 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1172 0x7f, 0xf8, 0x7f, 0xfc, 0x03, 0x9e, 0x03, 0x8e,
1173 0x03, 0x8e, 0x3f, 0x8e, 0x7f, 0xfe, 0xf3, 0xfe,
1174 0xe3, 0x80, 0xe3, 0x80, 0xe3, 0x80, 0xf3, 0xce,
1175 0x7f, 0xfe, 0x3e, 0xfc, 0x00, 0x00, 0x00, 0x00,
1176 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 145 */
1177 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1178 0x00, 0x00, 0x00, 0x00, 0x3f, 0xfe, 0x7f, 0xfe,
1179 0xf1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0,
1180 0xe1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0, 0xff, 0xfe,
1181 0xff, 0xfe, 0xe1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0,
1182 0xe1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0,
1183 0xe1, 0xfe, 0xe1, 0xfe, 0x00, 0x00, 0x00, 0x00,
1184 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 146 */
1185 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1186 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0,
1187 0x0e, 0xe0, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
1188 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c,
1189 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1190 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
1191 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
1192 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 147 */
1193 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1194 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70,
1195 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
1196 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c,
1197 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1198 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
1199 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
1200 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 148 */
1201 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1202 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00,
1203 0x07, 0x00, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
1204 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c,
1205 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1206 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
1207 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
1208 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 149 */
1209 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1210 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x07, 0xc0,
1211 0x0e, 0xe0, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
1212 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1213 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1214 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
1215 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00,
1216 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 150 */
1217 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1218 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00,
1219 0x07, 0x00, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
1220 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1221 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1222 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
1223 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00,
1224 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 151 */
1225 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1226 0x00, 0x00, 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70,
1227 0x1c, 0x70, 0x1c, 0x70, 0x00, 0x00, 0x00, 0x00,
1228 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1229 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1230 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
1231 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x1c, 0x00, 0x1c,
1232 0x00, 0x3c, 0x3f, 0xf8, 0x3f, 0xf0, 0x00, 0x00, /* 152 */
1233 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70,
1234 0x1c, 0x70, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
1235 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1236 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1237 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1238 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
1239 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
1240 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 153 */
1241 0x00, 0x00, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70,
1242 0x1c, 0x70, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c,
1243 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1244 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1245 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1246 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
1247 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
1248 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 154 */
1249 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1250 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1251 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1252 0x1f, 0xf0, 0x3f, 0xf8, 0x7b, 0xbc, 0x73, 0x9c,
1253 0x73, 0x80, 0x73, 0x80, 0x73, 0x80, 0x73, 0x80,
1254 0x73, 0x80, 0x73, 0x80, 0x73, 0x9c, 0x7b, 0xbc,
1255 0x3f, 0xf8, 0x1f, 0xf0, 0x03, 0x80, 0x03, 0x80,
1256 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 155 */
1257 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1258 0x00, 0x00, 0x00, 0x00, 0x07, 0xe0, 0x0f, 0xf0,
1259 0x1e, 0x78, 0x1c, 0x38, 0x1c, 0x00, 0x1c, 0x00,
1260 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x7f, 0xe0,
1261 0x7f, 0xe0, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00,
1262 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x1c, 0x1c, 0x1c,
1263 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00,
1264 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 156 */
1265 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1266 0x00, 0x00, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c,
1267 0x38, 0x38, 0x38, 0x38, 0x1c, 0x70, 0x1c, 0x70,
1268 0x0e, 0xe0, 0x0e, 0xe0, 0x07, 0xc0, 0x07, 0xc0,
1269 0x03, 0x80, 0x03, 0x80, 0x3f, 0xf8, 0x3f, 0xf8,
1270 0x03, 0x80, 0x03, 0x80, 0x3f, 0xf8, 0x3f, 0xf8,
1271 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
1272 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 157 */
1273 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1274 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0xff, 0x80,
1275 0xe3, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0, 0xe1, 0xc0,
1276 0xe1, 0xc0, 0xe1, 0xc0, 0xe3, 0xc0, 0xff, 0xf0,
1277 0xff, 0x70, 0xe0, 0x70, 0xe3, 0xfe, 0xe3, 0xfe,
1278 0xe0, 0x70, 0xe0, 0x70, 0xe0, 0x70, 0xe0, 0x70,
1279 0xe0, 0x7e, 0xe0, 0x3e, 0x00, 0x00, 0x00, 0x00,
1280 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 158 */
1281 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1282 0x00, 0x00, 0x00, 0x00, 0x01, 0xf8, 0x03, 0xfc,
1283 0x03, 0x9c, 0x03, 0x9c, 0x03, 0x80, 0x03, 0x80,
1284 0x03, 0x80, 0x03, 0x80, 0x1f, 0xf0, 0x1f, 0xf0,
1285 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1286 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1287 0x03, 0x80, 0x03, 0x80, 0x73, 0x80, 0x73, 0x80,
1288 0x7f, 0x80, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, /* 159 */
1289 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1290 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0xe0,
1291 0x01, 0xc0, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
1292 0x3f, 0xf0, 0x3f, 0xf8, 0x00, 0x3c, 0x00, 0x1c,
1293 0x00, 0x1c, 0x1f, 0xfc, 0x3f, 0xfc, 0x78, 0x1c,
1294 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
1295 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00,
1296 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 160 */
1297 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1298 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0xe0,
1299 0x01, 0xc0, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
1300 0x0f, 0x80, 0x0f, 0x80, 0x03, 0x80, 0x03, 0x80,
1301 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1302 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1303 0x0f, 0xe0, 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00,
1304 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 161 */
1305 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1306 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0xe0,
1307 0x01, 0xc0, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
1308 0x1f, 0xf0, 0x3f, 0xf8, 0x78, 0x3c, 0x70, 0x1c,
1309 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1310 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
1311 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
1312 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 162 */
1313 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1314 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0xe0,
1315 0x01, 0xc0, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
1316 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1317 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1318 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
1319 0x3f, 0xfc, 0x1f, 0xfc, 0x00, 0x00, 0x00, 0x00,
1320 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 163 */
1321 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1322 0x00, 0x00, 0x00, 0x00, 0x1f, 0x38, 0x3b, 0xb8,
1323 0x3b, 0xb8, 0x39, 0xf0, 0x00, 0x00, 0x00, 0x00,
1324 0x7f, 0xf0, 0x7f, 0xf8, 0x70, 0x3c, 0x70, 0x1c,
1325 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1326 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1327 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
1328 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 164 */
1329 0x00, 0x00, 0x1f, 0x38, 0x3b, 0xb8, 0x3b, 0xb8,
1330 0x39, 0xf0, 0x00, 0x00, 0x70, 0x1c, 0x70, 0x1c,
1331 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x1c,
1332 0x7c, 0x1c, 0x7e, 0x1c, 0x77, 0x1c, 0x73, 0x9c,
1333 0x71, 0xdc, 0x70, 0xfc, 0x70, 0x7c, 0x70, 0x3c,
1334 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1335 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
1336 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 165 */
1337 0x00, 0x00, 0x00, 0x00, 0x1f, 0xe0, 0x1f, 0xf0,
1338 0x00, 0x38, 0x00, 0x38, 0x0f, 0xf8, 0x1f, 0xf8,
1339 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x1f, 0xf8,
1340 0x0f, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x3f, 0xf8,
1341 0x3f, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1342 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1343 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1344 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 166 */
1345 0x00, 0x00, 0x00, 0x00, 0x0f, 0xe0, 0x1f, 0xf0,
1346 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x38,
1347 0x38, 0x38, 0x38, 0x38, 0x38, 0x38, 0x1f, 0xf0,
1348 0x0f, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x3f, 0xf8,
1349 0x3f, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1350 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1351 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1352 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 167 */
1353 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1354 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
1355 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
1356 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x07, 0x00,
1357 0x0e, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x70, 0x1c,
1358 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
1359 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
1360 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 168 */
1361 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1362 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1363 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1364 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x00, 0x70, 0x00,
1365 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
1366 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1367 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1368 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 169 */
1369 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1370 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1371 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1372 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x1c, 0x00, 0x1c,
1373 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x1c,
1374 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1375 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1376 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 170 */
1377 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1378 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x3c, 0x00,
1379 0x7c, 0x06, 0x1c, 0x0e, 0x1c, 0x1c, 0x1c, 0x38,
1380 0x1c, 0x70, 0x1c, 0xe0, 0x1d, 0xc0, 0x03, 0x80,
1381 0x07, 0x00, 0x0e, 0xfc, 0x1d, 0xfe, 0x39, 0xce,
1382 0x71, 0xce, 0x60, 0x1c, 0x00, 0x38, 0x00, 0x70,
1383 0x00, 0xfe, 0x01, 0xfe, 0x00, 0x00, 0x00, 0x00,
1384 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 171 */
1385 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1386 0x00, 0x00, 0x00, 0x00, 0x0e, 0x00, 0x1e, 0x00,
1387 0x3e, 0x00, 0x0e, 0x00, 0x0e, 0x06, 0x0e, 0x0e,
1388 0x0e, 0x1c, 0x0e, 0x38, 0x0e, 0x70, 0x00, 0xe0,
1389 0x01, 0xce, 0x03, 0x9e, 0x07, 0x3e, 0x0e, 0x7e,
1390 0x1c, 0xee, 0x39, 0xce, 0x73, 0xfe, 0x63, 0xfe,
1391 0x00, 0x0e, 0x00, 0x0e, 0x00, 0x00, 0x00, 0x00,
1392 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 172 */
1393 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1394 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
1395 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
1396 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1397 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1398 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1399 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
1400 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 173 */
1401 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1402 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1403 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1404 0x01, 0xce, 0x03, 0x9c, 0x07, 0x38, 0x0e, 0x70,
1405 0x1c, 0xe0, 0x39, 0xc0, 0x73, 0x80, 0x73, 0x80,
1406 0x39, 0xc0, 0x1c, 0xe0, 0x0e, 0x70, 0x07, 0x38,
1407 0x03, 0x9c, 0x01, 0xce, 0x00, 0x00, 0x00, 0x00,
1408 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 174 */
1409 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1410 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1411 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1412 0x73, 0x80, 0x39, 0xc0, 0x1c, 0xe0, 0x0e, 0x70,
1413 0x07, 0x38, 0x03, 0x9c, 0x01, 0xce, 0x01, 0xce,
1414 0x03, 0x9c, 0x07, 0x38, 0x0e, 0x70, 0x1c, 0xe0,
1415 0x39, 0xc0, 0x73, 0x80, 0x00, 0x00, 0x00, 0x00,
1416 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 175 */
1417 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00,
1418 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00,
1419 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00,
1420 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00,
1421 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00,
1422 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00,
1423 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00,
1424 0xaa, 0xaa, 0x00, 0x00, 0xaa, 0xaa, 0x00, 0x00, /* 176 */
1425 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55,
1426 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55,
1427 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55,
1428 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55,
1429 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55,
1430 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55,
1431 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55,
1432 0xaa, 0xaa, 0x55, 0x55, 0xaa, 0xaa, 0x55, 0x55, /* 177 */
1433 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa,
1434 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa,
1435 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa,
1436 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa,
1437 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa,
1438 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa,
1439 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa,
1440 0xff, 0xff, 0xaa, 0xaa, 0xff, 0xff, 0xaa, 0xaa, /* 178 */
1441 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1442 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1443 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1444 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1445 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1446 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1447 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1448 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 179 */
1449 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1450 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1451 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1452 0x03, 0x80, 0x03, 0x80, 0xff, 0x80, 0xff, 0x80,
1453 0xff, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1454 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1455 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1456 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 180 */
1457 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1458 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1459 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1460 0xff, 0x80, 0xff, 0x80, 0xff, 0x80, 0x03, 0x80,
1461 0x03, 0x80, 0xff, 0x80, 0xff, 0x80, 0xff, 0x80,
1462 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1463 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1464 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 181 */
1465 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1466 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1467 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1468 0x0e, 0x70, 0x0e, 0x70, 0xfe, 0x70, 0xfe, 0x70,
1469 0xfe, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1470 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1471 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1472 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 182 */
1473 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1474 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1475 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1476 0x00, 0x00, 0x00, 0x00, 0xff, 0xf0, 0xff, 0xf0,
1477 0xff, 0xf0, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1478 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1479 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1480 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 183 */
1481 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1482 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1483 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1484 0xff, 0x80, 0xff, 0x80, 0xff, 0x80, 0x03, 0x80,
1485 0x03, 0x80, 0xff, 0x80, 0xff, 0x80, 0xff, 0x80,
1486 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1487 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1488 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 184 */
1489 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1490 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1491 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1492 0xfe, 0x70, 0xfe, 0x70, 0xfe, 0x70, 0x00, 0x70,
1493 0x00, 0x70, 0xfe, 0x70, 0xfe, 0x70, 0xfe, 0x70,
1494 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1495 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1496 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 185 */
1497 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1498 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1499 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1500 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1501 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1502 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1503 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1504 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 186 */
1505 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1506 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1507 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1508 0xff, 0xf0, 0xff, 0xf0, 0xff, 0xf0, 0x00, 0x70,
1509 0x00, 0x70, 0xfe, 0x70, 0xfe, 0x70, 0xfe, 0x70,
1510 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1511 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1512 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 187 */
1513 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1514 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1515 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1516 0xfe, 0x70, 0xfe, 0x70, 0xfe, 0x70, 0x00, 0x70,
1517 0x00, 0x70, 0xff, 0xf0, 0xff, 0xf0, 0xff, 0xf0,
1518 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1519 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1520 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 188 */
1521 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1522 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1523 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1524 0x0e, 0x70, 0x0e, 0x70, 0xff, 0xf0, 0xff, 0xf0,
1525 0xff, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1526 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1527 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1528 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 189 */
1529 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1530 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1531 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1532 0xff, 0x80, 0xff, 0x80, 0xff, 0x80, 0x03, 0x80,
1533 0x03, 0x80, 0xff, 0x80, 0xff, 0x80, 0xff, 0x80,
1534 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1535 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1536 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 190 */
1537 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1538 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1539 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1540 0x00, 0x00, 0x00, 0x00, 0xff, 0x80, 0xff, 0x80,
1541 0xff, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1542 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1543 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1544 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 191 */
1545 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1546 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1547 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1548 0x03, 0x80, 0x03, 0x80, 0x03, 0xff, 0x03, 0xff,
1549 0x03, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1550 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1551 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1552 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 192 */
1553 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1554 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1555 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1556 0x03, 0x80, 0x03, 0x80, 0xff, 0xff, 0xff, 0xff,
1557 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1558 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1559 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1560 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 193 */
1561 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1562 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1563 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1564 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
1565 0xff, 0xff, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1566 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1567 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1568 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 194 */
1569 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1570 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1571 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1572 0x03, 0x80, 0x03, 0x80, 0x03, 0xff, 0x03, 0xff,
1573 0x03, 0xff, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1574 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1575 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1576 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 195 */
1577 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1578 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1579 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1580 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
1581 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1582 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1583 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1584 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 196 */
1585 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1586 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1587 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1588 0x03, 0x80, 0x03, 0x80, 0xff, 0xff, 0xff, 0xff,
1589 0xff, 0xff, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1590 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1591 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1592 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 197 */
1593 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1594 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1595 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1596 0x03, 0xff, 0x03, 0xff, 0x03, 0xff, 0x03, 0x80,
1597 0x03, 0x80, 0x03, 0xff, 0x03, 0xff, 0x03, 0xff,
1598 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1599 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1600 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 198 */
1601 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1602 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1603 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1604 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x7f, 0x0e, 0x7f,
1605 0x0e, 0x7f, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1606 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1607 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1608 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 199 */
1609 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1610 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1611 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1612 0x0e, 0x7f, 0x0e, 0x7f, 0x0e, 0x7f, 0x0e, 0x00,
1613 0x0e, 0x00, 0x0f, 0xff, 0x0f, 0xff, 0x0f, 0xff,
1614 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1615 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1616 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 200 */
1617 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1618 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1619 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1620 0x0f, 0xff, 0x0f, 0xff, 0x0f, 0xff, 0x0e, 0x00,
1621 0x0e, 0x00, 0x0e, 0x7f, 0x0e, 0x7f, 0x0e, 0x7f,
1622 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1623 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1624 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 201 */
1625 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1626 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1627 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1628 0xfe, 0x7f, 0xfe, 0x7f, 0xfe, 0x7f, 0x00, 0x00,
1629 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1630 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1631 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1632 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 202 */
1633 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1634 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1635 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1636 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00,
1637 0x00, 0x00, 0xfe, 0x7f, 0xfe, 0x7f, 0xfe, 0x7f,
1638 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1639 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1640 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 203 */
1641 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1642 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1643 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1644 0x0e, 0x7f, 0x0e, 0x7f, 0x0e, 0x7f, 0x0e, 0x00,
1645 0x0e, 0x00, 0x0e, 0x7f, 0x0e, 0x7f, 0x0e, 0x7f,
1646 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1647 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1648 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 204 */
1649 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1650 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1651 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1652 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00,
1653 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1654 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1655 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1656 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 205 */
1657 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1658 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1659 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1660 0xfe, 0x7f, 0xfe, 0x7f, 0xfe, 0x7f, 0x00, 0x00,
1661 0x00, 0x00, 0xfe, 0x7f, 0xfe, 0x7f, 0xfe, 0x7f,
1662 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1663 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1664 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 206 */
1665 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1666 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1667 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1668 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00,
1669 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1670 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1671 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1672 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 207 */
1673 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1674 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1675 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1676 0x0e, 0x70, 0x0e, 0x70, 0xff, 0xff, 0xff, 0xff,
1677 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1678 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1679 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1680 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 208 */
1681 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1682 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1683 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1684 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00,
1685 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1686 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1687 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1688 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 209 */
1689 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1690 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1691 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1692 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
1693 0xff, 0xff, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1694 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1695 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1696 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 210 */
1697 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1698 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1699 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1700 0x0e, 0x70, 0x0e, 0x70, 0x0f, 0xff, 0x0f, 0xff,
1701 0x0f, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1702 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1703 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1704 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 211 */
1705 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1706 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1707 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1708 0x03, 0xff, 0x03, 0xff, 0x03, 0xff, 0x03, 0x80,
1709 0x03, 0x80, 0x03, 0xff, 0x03, 0xff, 0x03, 0xff,
1710 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1711 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1712 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 212 */
1713 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1714 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1715 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1716 0x03, 0xff, 0x03, 0xff, 0x03, 0xff, 0x03, 0x80,
1717 0x03, 0x80, 0x03, 0xff, 0x03, 0xff, 0x03, 0xff,
1718 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1719 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1720 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 213 */
1721 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1722 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1723 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1724 0x00, 0x00, 0x00, 0x00, 0x0f, 0xff, 0x0f, 0xff,
1725 0x0f, 0xff, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1726 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1727 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1728 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 214 */
1729 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1730 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1731 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1732 0x0e, 0x70, 0x0e, 0x70, 0xff, 0xff, 0xff, 0xff,
1733 0xff, 0xff, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1734 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1735 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70,
1736 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, 0x0e, 0x70, /* 215 */
1737 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1738 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1739 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1740 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03, 0x80,
1741 0x03, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1742 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1743 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1744 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 216 */
1745 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1746 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1747 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1748 0x03, 0x80, 0x03, 0x80, 0xff, 0x80, 0xff, 0x80,
1749 0xff, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1750 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1751 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1752 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 217 */
1753 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1754 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1755 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1756 0x00, 0x00, 0x00, 0x00, 0x03, 0xff, 0x03, 0xff,
1757 0x03, 0xff, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1758 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1759 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1760 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 218 */
1761 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1762 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1763 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1764 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1765 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1766 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1767 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1768 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 219 */
1769 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1770 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1771 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1772 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1773 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1774 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1775 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1776 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 220 */
1777 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00,
1778 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00,
1779 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00,
1780 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00,
1781 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00,
1782 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00,
1783 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00,
1784 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, /* 221 */
1785 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,
1786 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,
1787 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,
1788 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,
1789 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,
1790 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,
1791 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,
1792 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, /* 222 */
1793 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1794 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1795 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1796 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
1797 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1798 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1799 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1800 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 223 */
1801 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1802 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1803 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1804 0x1f, 0xee, 0x3f, 0xfe, 0x78, 0x3c, 0x70, 0x38,
1805 0x70, 0x38, 0x70, 0x38, 0x70, 0x38, 0x70, 0x38,
1806 0x70, 0x38, 0x70, 0x38, 0x70, 0x38, 0x78, 0x3c,
1807 0x3f, 0xfe, 0x1f, 0xee, 0x00, 0x00, 0x00, 0x00,
1808 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 224 */
1809 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1810 0x00, 0x00, 0x00, 0x00, 0x3f, 0xe0, 0x7f, 0xf0,
1811 0x70, 0x78, 0x70, 0x38, 0x70, 0x38, 0x70, 0x38,
1812 0x70, 0x38, 0x70, 0x70, 0x7f, 0xf0, 0x7f, 0xf0,
1813 0x70, 0x38, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1814 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x3c,
1815 0x7f, 0xf8, 0x7f, 0xf0, 0x70, 0x00, 0x70, 0x00,
1816 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x00, 0x00, /* 225 */
1817 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1818 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc,
1819 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
1820 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
1821 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
1822 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x70, 0x00,
1823 0x70, 0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00,
1824 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 226 */
1825 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1826 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1827 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1828 0x7f, 0xfc, 0x7f, 0xfc, 0x70, 0x1c, 0x70, 0x1c,
1829 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1830 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1831 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
1832 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 227 */
1833 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1834 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc,
1835 0x70, 0x00, 0x38, 0x00, 0x1c, 0x00, 0x0e, 0x00,
1836 0x07, 0x00, 0x03, 0x80, 0x01, 0xc0, 0x00, 0xe0,
1837 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80, 0x07, 0x00,
1838 0x0e, 0x00, 0x1c, 0x00, 0x38, 0x00, 0x70, 0x00,
1839 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00,
1840 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 228 */
1841 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1842 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1843 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1844 0x1f, 0xfe, 0x3f, 0xfe, 0x78, 0xf0, 0x70, 0x78,
1845 0x70, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1846 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
1847 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
1848 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 229 */
1849 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1850 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1851 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1852 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1853 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1854 0x70, 0x1c, 0x70, 0x3c, 0x70, 0x7c, 0x70, 0xfc,
1855 0x7f, 0xdc, 0x7f, 0x9c, 0x70, 0x00, 0x70, 0x00,
1856 0x70, 0x00, 0x70, 0x00, 0x70, 0x00, 0x00, 0x00, /* 230 */
1857 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1858 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1859 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1860 0x7f, 0xfc, 0x7f, 0xfc, 0x03, 0x80, 0x03, 0x80,
1861 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1862 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0xc0,
1863 0x01, 0xf8, 0x00, 0xf8, 0x00, 0x00, 0x00, 0x00,
1864 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 231 */
1865 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1866 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
1867 0x1f, 0xf0, 0x3f, 0xf8, 0x7b, 0xbc, 0x73, 0x9c,
1868 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c,
1869 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c, 0x73, 0x9c,
1870 0x73, 0x9c, 0x7b, 0xbc, 0x3f, 0xf8, 0x1f, 0xf0,
1871 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
1872 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 232 */
1873 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1874 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
1875 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1876 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x77, 0xdc,
1877 0x77, 0xdc, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1878 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
1879 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
1880 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 233 */
1881 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1882 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x3f, 0xf8,
1883 0x78, 0x3c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1884 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1885 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1886 0x38, 0x38, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70,
1887 0x7c, 0x7c, 0x7c, 0x7c, 0x00, 0x00, 0x00, 0x00,
1888 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 234 */
1889 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1890 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf0, 0x1f, 0xf0,
1891 0x0e, 0x00, 0x07, 0x00, 0x03, 0x80, 0x01, 0xc0,
1892 0x0f, 0xe0, 0x1f, 0xf0, 0x38, 0x38, 0x70, 0x1c,
1893 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1894 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x78, 0x3c,
1895 0x3f, 0xf8, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
1896 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 235 */
1897 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1898 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1899 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0xf8,
1900 0x7f, 0xfc, 0xe7, 0xce, 0xe3, 0x8e, 0xe3, 0x8e,
1901 0xe3, 0x8e, 0xe3, 0x8e, 0xe7, 0xce, 0x7f, 0xfc,
1902 0x3e, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1903 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1904 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 236 */
1905 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1906 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x1c,
1907 0x00, 0x38, 0x00, 0x38, 0x0f, 0xf0, 0x1f, 0xf8,
1908 0x38, 0xfc, 0x38, 0xfc, 0x39, 0xdc, 0x39, 0xdc,
1909 0x3b, 0x9c, 0x3b, 0x9c, 0x3f, 0x1c, 0x3f, 0x1c,
1910 0x1f, 0xf8, 0x0f, 0xf0, 0x1c, 0x00, 0x1c, 0x00,
1911 0x38, 0x00, 0x38, 0x00, 0x00, 0x00, 0x00, 0x00,
1912 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 237 */
1913 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1914 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1915 0x00, 0x00, 0x07, 0xfc, 0x1f, 0xfc, 0x3c, 0x00,
1916 0x38, 0x00, 0x70, 0x00, 0x70, 0x00, 0x7f, 0xfc,
1917 0x7f, 0xfc, 0x70, 0x00, 0x70, 0x00, 0x38, 0x00,
1918 0x3c, 0x00, 0x1f, 0xfc, 0x07, 0xfc, 0x00, 0x00,
1919 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1920 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 238 */
1921 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1922 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1923 0x00, 0x00, 0x00, 0x00, 0x07, 0xc0, 0x1f, 0xf0,
1924 0x3c, 0x78, 0x38, 0x38, 0x70, 0x1c, 0x70, 0x1c,
1925 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1926 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c, 0x70, 0x1c,
1927 0x70, 0x1c, 0x70, 0x1c, 0x00, 0x00, 0x00, 0x00,
1928 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 239 */
1929 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1930 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1931 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00,
1932 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc,
1933 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1934 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00,
1935 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1936 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 240 */
1937 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1938 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1939 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
1940 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x7f, 0xfc,
1941 0x7f, 0xfc, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1942 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
1943 0x7f, 0xfc, 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00,
1944 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 241 */
1945 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1946 0x00, 0x00, 0x00, 0x00, 0x1c, 0x00, 0x0e, 0x00,
1947 0x07, 0x00, 0x03, 0x80, 0x01, 0xc0, 0x00, 0xe0,
1948 0x00, 0x70, 0x00, 0x38, 0x00, 0x38, 0x00, 0x70,
1949 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80, 0x07, 0x00,
1950 0x0e, 0x00, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00,
1951 0x3f, 0xfc, 0x3f, 0xfc, 0x00, 0x00, 0x00, 0x00,
1952 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 242 */
1953 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1954 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x00, 0x70,
1955 0x00, 0xe0, 0x01, 0xc0, 0x03, 0x80, 0x07, 0x00,
1956 0x0e, 0x00, 0x1c, 0x00, 0x1c, 0x00, 0x0e, 0x00,
1957 0x07, 0x00, 0x03, 0x80, 0x01, 0xc0, 0x00, 0xe0,
1958 0x00, 0x70, 0x00, 0x38, 0x00, 0x00, 0x00, 0x00,
1959 0x3f, 0xfc, 0x3f, 0xfc, 0x00, 0x00, 0x00, 0x00,
1960 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 243 */
1961 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1962 0x00, 0x00, 0x00, 0x00, 0x01, 0xf8, 0x03, 0xfc,
1963 0x03, 0x9c, 0x03, 0x9c, 0x03, 0x80, 0x03, 0x80,
1964 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1965 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1966 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1967 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1968 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, /* 244 */
1969 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1970 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1971 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1972 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1973 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1974 0x03, 0x80, 0x03, 0x80, 0x73, 0x80, 0x73, 0x80,
1975 0x7f, 0x80, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00,
1976 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 245 */
1977 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1978 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1979 0x00, 0x00, 0x03, 0x80, 0x03, 0x80, 0x03, 0x80,
1980 0x03, 0x80, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc,
1981 0x7f, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x03, 0x80,
1982 0x03, 0x80, 0x03, 0x80, 0x03, 0x80, 0x00, 0x00,
1983 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1984 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 246 */
1985 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1986 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1987 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x1c,
1988 0x7f, 0xbc, 0x7b, 0xfc, 0x70, 0xf8, 0x00, 0x00,
1989 0x00, 0x00, 0x3e, 0x1c, 0x7f, 0xbc, 0x7b, 0xfc,
1990 0x70, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1991 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1992 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 247 */
1993 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1994 0x0f, 0xe0, 0x1f, 0xf0, 0x1c, 0x70, 0x1c, 0x70,
1995 0x1c, 0x70, 0x1c, 0x70, 0x1f, 0xf0, 0x0f, 0xe0,
1996 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1997 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1998 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1999 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2000 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 248 */
2001 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2002 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2003 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2004 0x00, 0x00, 0x03, 0xc0, 0x07, 0xe0, 0x07, 0xe0,
2005 0x07, 0xe0, 0x07, 0xe0, 0x03, 0xc0, 0x00, 0x00,
2006 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2007 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2008 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 249 */
2009 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2010 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2011 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2012 0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
2013 0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
2014 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2015 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2016 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 250 */
2017 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e,
2018 0x00, 0x3e, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38,
2019 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x00, 0x38,
2020 0x00, 0x38, 0x00, 0x38, 0x00, 0x38, 0x70, 0x38,
2021 0x70, 0x38, 0x70, 0x38, 0x78, 0x38, 0x3c, 0x38,
2022 0x1e, 0x38, 0x0f, 0x38, 0x07, 0xb8, 0x03, 0xf8,
2023 0x01, 0xf8, 0x00, 0xf8, 0x00, 0x00, 0x00, 0x00,
2024 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 251 */
2025 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2026 0x1f, 0xe0, 0x1f, 0xf0, 0x1c, 0x38, 0x1c, 0x38,
2027 0x1c, 0x38, 0x1c, 0x38, 0x1c, 0x38, 0x1c, 0x38,
2028 0x1c, 0x38, 0x1c, 0x38, 0x00, 0x00, 0x00, 0x00,
2029 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2030 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2031 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2032 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 252 */
2033 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xe0,
2034 0x1f, 0xf0, 0x1c, 0x70, 0x1c, 0x70, 0x00, 0xe0,
2035 0x01, 0xc0, 0x03, 0x80, 0x07, 0x00, 0x0e, 0x00,
2036 0x1f, 0xf0, 0x1f, 0xf0, 0x00, 0x00, 0x00, 0x00,
2037 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2038 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2039 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2040 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 253 */
2041 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2042 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2043 0x00, 0x00, 0x00, 0x00, 0x1f, 0xf8, 0x1f, 0xf8,
2044 0x1f, 0xf8, 0x1f, 0xf8, 0x1f, 0xf8, 0x1f, 0xf8,
2045 0x1f, 0xf8, 0x1f, 0xf8, 0x1f, 0xf8, 0x1f, 0xf8,
2046 0x1f, 0xf8, 0x1f, 0xf8, 0x00, 0x00, 0x00, 0x00,
2047 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2048 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 254 */
2049 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2050 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2051 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2052 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2053 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2054 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2055 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2056 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 255 */
2057
2058};
2059
2060
2061const struct font_desc font_ter_16x32 = {
2062 .idx = TER16x32_IDX,
2063 .name = "TER16x32",
2064 .width = 16,
2065 .height = 32,
2066 .data = fontdata_ter16x32,
2067#ifdef __sparc__
2068 .pref = 5,
2069#else
2070 .pref = -1,
2071#endif
2072};
diff --git a/lib/fonts/fonts.c b/lib/fonts/fonts.c
index 823376ca0a8b..9969358a7af5 100644
--- a/lib/fonts/fonts.c
+++ b/lib/fonts/fonts.c
@@ -67,6 +67,10 @@ static const struct font_desc *fonts[] = {
67#undef NO_FONTS 67#undef NO_FONTS
68 &font_6x10, 68 &font_6x10,
69#endif 69#endif
70#ifdef CONFIG_FONT_TER16x32
71#undef NO_FONTS
72 &font_ter_16x32,
73#endif
70}; 74};
71 75
72#define num_fonts ARRAY_SIZE(fonts) 76#define num_fonts ARRAY_SIZE(fonts)
diff --git a/lib/gcd.c b/lib/gcd.c
index 227dea924425..7948ab27f0a4 100644
--- a/lib/gcd.c
+++ b/lib/gcd.c
@@ -10,7 +10,7 @@
10 * has decent hardware division. 10 * has decent hardware division.
11 */ 11 */
12 12
13#if !defined(CONFIG_CPU_NO_EFFICIENT_FFS) && !defined(CPU_NO_EFFICIENT_FFS) 13#if !defined(CONFIG_CPU_NO_EFFICIENT_FFS)
14 14
15/* If __ffs is available, the even/odd algorithm benchmarks slower. */ 15/* If __ffs is available, the even/odd algorithm benchmarks slower. */
16 16
diff --git a/lib/gen_crc64table.c b/lib/gen_crc64table.c
index 9011926e4162..094b43aef8db 100644
--- a/lib/gen_crc64table.c
+++ b/lib/gen_crc64table.c
@@ -16,8 +16,6 @@
16#include <inttypes.h> 16#include <inttypes.h>
17#include <stdio.h> 17#include <stdio.h>
18 18
19#include <linux/swab.h>
20
21#define CRC64_ECMA182_POLY 0x42F0E1EBA9EA3693ULL 19#define CRC64_ECMA182_POLY 0x42F0E1EBA9EA3693ULL
22 20
23static uint64_t crc64_table[256] = {0}; 21static uint64_t crc64_table[256] = {0};
diff --git a/lib/genalloc.c b/lib/genalloc.c
index ca06adc4f445..7e85d1e37a6e 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -35,6 +35,7 @@
35#include <linux/interrupt.h> 35#include <linux/interrupt.h>
36#include <linux/genalloc.h> 36#include <linux/genalloc.h>
37#include <linux/of_device.h> 37#include <linux/of_device.h>
38#include <linux/vmalloc.h>
38 39
39static inline size_t chunk_size(const struct gen_pool_chunk *chunk) 40static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
40{ 41{
@@ -187,7 +188,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy
187 int nbytes = sizeof(struct gen_pool_chunk) + 188 int nbytes = sizeof(struct gen_pool_chunk) +
188 BITS_TO_LONGS(nbits) * sizeof(long); 189 BITS_TO_LONGS(nbits) * sizeof(long);
189 190
190 chunk = kzalloc_node(nbytes, GFP_KERNEL, nid); 191 chunk = vzalloc_node(nbytes, nid);
191 if (unlikely(chunk == NULL)) 192 if (unlikely(chunk == NULL))
192 return -ENOMEM; 193 return -ENOMEM;
193 194
@@ -251,7 +252,7 @@ void gen_pool_destroy(struct gen_pool *pool)
251 bit = find_next_bit(chunk->bits, end_bit, 0); 252 bit = find_next_bit(chunk->bits, end_bit, 0);
252 BUG_ON(bit < end_bit); 253 BUG_ON(bit < end_bit);
253 254
254 kfree(chunk); 255 vfree(chunk);
255 } 256 }
256 kfree_const(pool->name); 257 kfree_const(pool->name);
257 kfree(pool); 258 kfree(pool);
@@ -311,7 +312,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
311 end_bit = chunk_size(chunk) >> order; 312 end_bit = chunk_size(chunk) >> order;
312retry: 313retry:
313 start_bit = algo(chunk->bits, end_bit, start_bit, 314 start_bit = algo(chunk->bits, end_bit, start_bit,
314 nbits, data, pool); 315 nbits, data, pool, chunk->start_addr);
315 if (start_bit >= end_bit) 316 if (start_bit >= end_bit)
316 continue; 317 continue;
317 remain = bitmap_set_ll(chunk->bits, start_bit, nbits); 318 remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
@@ -525,7 +526,7 @@ EXPORT_SYMBOL(gen_pool_set_algo);
525 */ 526 */
526unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, 527unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
527 unsigned long start, unsigned int nr, void *data, 528 unsigned long start, unsigned int nr, void *data,
528 struct gen_pool *pool) 529 struct gen_pool *pool, unsigned long start_addr)
529{ 530{
530 return bitmap_find_next_zero_area(map, size, start, nr, 0); 531 return bitmap_find_next_zero_area(map, size, start, nr, 0);
531} 532}
@@ -543,16 +544,19 @@ EXPORT_SYMBOL(gen_pool_first_fit);
543 */ 544 */
544unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size, 545unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size,
545 unsigned long start, unsigned int nr, void *data, 546 unsigned long start, unsigned int nr, void *data,
546 struct gen_pool *pool) 547 struct gen_pool *pool, unsigned long start_addr)
547{ 548{
548 struct genpool_data_align *alignment; 549 struct genpool_data_align *alignment;
549 unsigned long align_mask; 550 unsigned long align_mask, align_off;
550 int order; 551 int order;
551 552
552 alignment = data; 553 alignment = data;
553 order = pool->min_alloc_order; 554 order = pool->min_alloc_order;
554 align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1; 555 align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1;
555 return bitmap_find_next_zero_area(map, size, start, nr, align_mask); 556 align_off = (start_addr & (alignment->align - 1)) >> order;
557
558 return bitmap_find_next_zero_area_off(map, size, start, nr,
559 align_mask, align_off);
556} 560}
557EXPORT_SYMBOL(gen_pool_first_fit_align); 561EXPORT_SYMBOL(gen_pool_first_fit_align);
558 562
@@ -567,7 +571,7 @@ EXPORT_SYMBOL(gen_pool_first_fit_align);
567 */ 571 */
568unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size, 572unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size,
569 unsigned long start, unsigned int nr, void *data, 573 unsigned long start, unsigned int nr, void *data,
570 struct gen_pool *pool) 574 struct gen_pool *pool, unsigned long start_addr)
571{ 575{
572 struct genpool_data_fixed *fixed_data; 576 struct genpool_data_fixed *fixed_data;
573 int order; 577 int order;
@@ -601,7 +605,8 @@ EXPORT_SYMBOL(gen_pool_fixed_alloc);
601 */ 605 */
602unsigned long gen_pool_first_fit_order_align(unsigned long *map, 606unsigned long gen_pool_first_fit_order_align(unsigned long *map,
603 unsigned long size, unsigned long start, 607 unsigned long size, unsigned long start,
604 unsigned int nr, void *data, struct gen_pool *pool) 608 unsigned int nr, void *data, struct gen_pool *pool,
609 unsigned long start_addr)
605{ 610{
606 unsigned long align_mask = roundup_pow_of_two(nr) - 1; 611 unsigned long align_mask = roundup_pow_of_two(nr) - 1;
607 612
@@ -624,7 +629,7 @@ EXPORT_SYMBOL(gen_pool_first_fit_order_align);
624 */ 629 */
625unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, 630unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
626 unsigned long start, unsigned int nr, void *data, 631 unsigned long start, unsigned int nr, void *data,
627 struct gen_pool *pool) 632 struct gen_pool *pool, unsigned long start_addr)
628{ 633{
629 unsigned long start_bit = size; 634 unsigned long start_bit = size;
630 unsigned long len = size + 1; 635 unsigned long len = size + 1;
diff --git a/lib/idr.c b/lib/idr.c
index fab2fd5bc326..cb1db9b8d3f6 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -6,8 +6,6 @@
6#include <linux/spinlock.h> 6#include <linux/spinlock.h>
7#include <linux/xarray.h> 7#include <linux/xarray.h>
8 8
9DEFINE_PER_CPU(struct ida_bitmap *, ida_bitmap);
10
11/** 9/**
12 * idr_alloc_u32() - Allocate an ID. 10 * idr_alloc_u32() - Allocate an ID.
13 * @idr: IDR handle. 11 * @idr: IDR handle.
@@ -39,10 +37,8 @@ int idr_alloc_u32(struct idr *idr, void *ptr, u32 *nextid,
39 unsigned int base = idr->idr_base; 37 unsigned int base = idr->idr_base;
40 unsigned int id = *nextid; 38 unsigned int id = *nextid;
41 39
42 if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr))) 40 if (WARN_ON_ONCE(!(idr->idr_rt.xa_flags & ROOT_IS_IDR)))
43 return -EINVAL; 41 idr->idr_rt.xa_flags |= IDR_RT_MARKER;
44 if (WARN_ON_ONCE(!(idr->idr_rt.gfp_mask & ROOT_IS_IDR)))
45 idr->idr_rt.gfp_mask |= IDR_RT_MARKER;
46 42
47 id = (id < base) ? 0 : id - base; 43 id = (id < base) ? 0 : id - base;
48 radix_tree_iter_init(&iter, id); 44 radix_tree_iter_init(&iter, id);
@@ -295,15 +291,13 @@ void *idr_replace(struct idr *idr, void *ptr, unsigned long id)
295 void __rcu **slot = NULL; 291 void __rcu **slot = NULL;
296 void *entry; 292 void *entry;
297 293
298 if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr)))
299 return ERR_PTR(-EINVAL);
300 id -= idr->idr_base; 294 id -= idr->idr_base;
301 295
302 entry = __radix_tree_lookup(&idr->idr_rt, id, &node, &slot); 296 entry = __radix_tree_lookup(&idr->idr_rt, id, &node, &slot);
303 if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE)) 297 if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE))
304 return ERR_PTR(-ENOENT); 298 return ERR_PTR(-ENOENT);
305 299
306 __radix_tree_replace(&idr->idr_rt, node, slot, ptr, NULL); 300 __radix_tree_replace(&idr->idr_rt, node, slot, ptr);
307 301
308 return entry; 302 return entry;
309} 303}
@@ -324,6 +318,9 @@ EXPORT_SYMBOL(idr_replace);
324 * free the individual IDs in it. You can use ida_is_empty() to find 318 * free the individual IDs in it. You can use ida_is_empty() to find
325 * out whether the IDA has any IDs currently allocated. 319 * out whether the IDA has any IDs currently allocated.
326 * 320 *
321 * The IDA handles its own locking. It is safe to call any of the IDA
322 * functions without synchronisation in your code.
323 *
327 * IDs are currently limited to the range [0-INT_MAX]. If this is an awkward 324 * IDs are currently limited to the range [0-INT_MAX]. If this is an awkward
328 * limitation, it should be quite straightforward to raise the maximum. 325 * limitation, it should be quite straightforward to raise the maximum.
329 */ 326 */
@@ -331,161 +328,197 @@ EXPORT_SYMBOL(idr_replace);
331/* 328/*
332 * Developer's notes: 329 * Developer's notes:
333 * 330 *
334 * The IDA uses the functionality provided by the IDR & radix tree to store 331 * The IDA uses the functionality provided by the XArray to store bitmaps in
335 * bitmaps in each entry. The IDR_FREE tag means there is at least one bit 332 * each entry. The XA_FREE_MARK is only cleared when all bits in the bitmap
336 * free, unlike the IDR where it means at least one entry is free. 333 * have been set.
337 * 334 *
338 * I considered telling the radix tree that each slot is an order-10 node 335 * I considered telling the XArray that each slot is an order-10 node
339 * and storing the bit numbers in the radix tree, but the radix tree can't 336 * and indexing by bit number, but the XArray can't allow a single multi-index
340 * allow a single multiorder entry at index 0, which would significantly 337 * entry in the head, which would significantly increase memory consumption
341 * increase memory consumption for the IDA. So instead we divide the index 338 * for the IDA. So instead we divide the index by the number of bits in the
342 * by the number of bits in the leaf bitmap before doing a radix tree lookup. 339 * leaf bitmap before doing a radix tree lookup.
343 * 340 *
344 * As an optimisation, if there are only a few low bits set in any given 341 * As an optimisation, if there are only a few low bits set in any given
345 * leaf, instead of allocating a 128-byte bitmap, we use the 'exceptional 342 * leaf, instead of allocating a 128-byte bitmap, we store the bits
346 * entry' functionality of the radix tree to store BITS_PER_LONG - 2 bits 343 * as a value entry. Value entries never have the XA_FREE_MARK cleared
347 * directly in the entry. By being really tricksy, we could store 344 * because we can always convert them into a bitmap entry.
348 * BITS_PER_LONG - 1 bits, but there're diminishing returns after optimising 345 *
349 * for 0-3 allocated IDs. 346 * It would be possible to optimise further; once we've run out of a
350 * 347 * single 128-byte bitmap, we currently switch to a 576-byte node, put
351 * We allow the radix tree 'exceptional' count to get out of date. Nothing 348 * the 128-byte bitmap in the first entry and then start allocating extra
352 * in the IDA nor the radix tree code checks it. If it becomes important 349 * 128-byte entries. We could instead use the 512 bytes of the node's
353 * to maintain an accurate exceptional count, switch the rcu_assign_pointer() 350 * data as a bitmap before moving to that scheme. I do not believe this
354 * calls to radix_tree_iter_replace() which will correct the exceptional 351 * is a worthwhile optimisation; Rasmus Villemoes surveyed the current
355 * count. 352 * users of the IDA and almost none of them use more than 1024 entries.
356 * 353 * Those that do use more than the 8192 IDs that the 512 bytes would
357 * The IDA always requires a lock to alloc/free. If we add a 'test_bit' 354 * provide.
355 *
356 * The IDA always uses a lock to alloc/free. If we add a 'test_bit'
358 * equivalent, it will still need locking. Going to RCU lookup would require 357 * equivalent, it will still need locking. Going to RCU lookup would require
359 * using RCU to free bitmaps, and that's not trivial without embedding an 358 * using RCU to free bitmaps, and that's not trivial without embedding an
360 * RCU head in the bitmap, which adds a 2-pointer overhead to each 128-byte 359 * RCU head in the bitmap, which adds a 2-pointer overhead to each 128-byte
361 * bitmap, which is excessive. 360 * bitmap, which is excessive.
362 */ 361 */
363 362
364#define IDA_MAX (0x80000000U / IDA_BITMAP_BITS - 1) 363/**
365 364 * ida_alloc_range() - Allocate an unused ID.
366static int ida_get_new_above(struct ida *ida, int start) 365 * @ida: IDA handle.
366 * @min: Lowest ID to allocate.
367 * @max: Highest ID to allocate.
368 * @gfp: Memory allocation flags.
369 *
370 * Allocate an ID between @min and @max, inclusive. The allocated ID will
371 * not exceed %INT_MAX, even if @max is larger.
372 *
373 * Context: Any context.
374 * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
375 * or %-ENOSPC if there are no free IDs.
376 */
377int ida_alloc_range(struct ida *ida, unsigned int min, unsigned int max,
378 gfp_t gfp)
367{ 379{
368 struct radix_tree_root *root = &ida->ida_rt; 380 XA_STATE(xas, &ida->xa, min / IDA_BITMAP_BITS);
369 void __rcu **slot; 381 unsigned bit = min % IDA_BITMAP_BITS;
370 struct radix_tree_iter iter; 382 unsigned long flags;
371 struct ida_bitmap *bitmap; 383 struct ida_bitmap *bitmap, *alloc = NULL;
372 unsigned long index; 384
373 unsigned bit, ebit; 385 if ((int)min < 0)
374 int new; 386 return -ENOSPC;
375 387
376 index = start / IDA_BITMAP_BITS; 388 if ((int)max < 0)
377 bit = start % IDA_BITMAP_BITS; 389 max = INT_MAX;
378 ebit = bit + RADIX_TREE_EXCEPTIONAL_SHIFT; 390
379 391retry:
380 slot = radix_tree_iter_init(&iter, index); 392 xas_lock_irqsave(&xas, flags);
381 for (;;) { 393next:
382 if (slot) 394 bitmap = xas_find_marked(&xas, max / IDA_BITMAP_BITS, XA_FREE_MARK);
383 slot = radix_tree_next_slot(slot, &iter, 395 if (xas.xa_index > min / IDA_BITMAP_BITS)
384 RADIX_TREE_ITER_TAGGED); 396 bit = 0;
385 if (!slot) { 397 if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
386 slot = idr_get_free(root, &iter, GFP_NOWAIT, IDA_MAX); 398 goto nospc;
387 if (IS_ERR(slot)) { 399
388 if (slot == ERR_PTR(-ENOMEM)) 400 if (xa_is_value(bitmap)) {
389 return -EAGAIN; 401 unsigned long tmp = xa_to_value(bitmap);
390 return PTR_ERR(slot); 402
403 if (bit < BITS_PER_XA_VALUE) {
404 bit = find_next_zero_bit(&tmp, BITS_PER_XA_VALUE, bit);
405 if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
406 goto nospc;
407 if (bit < BITS_PER_XA_VALUE) {
408 tmp |= 1UL << bit;
409 xas_store(&xas, xa_mk_value(tmp));
410 goto out;
391 } 411 }
392 } 412 }
393 if (iter.index > index) { 413 bitmap = alloc;
394 bit = 0; 414 if (!bitmap)
395 ebit = RADIX_TREE_EXCEPTIONAL_SHIFT; 415 bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
396 } 416 if (!bitmap)
397 new = iter.index * IDA_BITMAP_BITS; 417 goto alloc;
398 bitmap = rcu_dereference_raw(*slot); 418 bitmap->bitmap[0] = tmp;
399 if (radix_tree_exception(bitmap)) { 419 xas_store(&xas, bitmap);
400 unsigned long tmp = (unsigned long)bitmap; 420 if (xas_error(&xas)) {
401 ebit = find_next_zero_bit(&tmp, BITS_PER_LONG, ebit); 421 bitmap->bitmap[0] = 0;
402 if (ebit < BITS_PER_LONG) { 422 goto out;
403 tmp |= 1UL << ebit;
404 rcu_assign_pointer(*slot, (void *)tmp);
405 return new + ebit -
406 RADIX_TREE_EXCEPTIONAL_SHIFT;
407 }
408 bitmap = this_cpu_xchg(ida_bitmap, NULL);
409 if (!bitmap)
410 return -EAGAIN;
411 bitmap->bitmap[0] = tmp >> RADIX_TREE_EXCEPTIONAL_SHIFT;
412 rcu_assign_pointer(*slot, bitmap);
413 } 423 }
424 }
414 425
415 if (bitmap) { 426 if (bitmap) {
416 bit = find_next_zero_bit(bitmap->bitmap, 427 bit = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, bit);
417 IDA_BITMAP_BITS, bit); 428 if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
418 new += bit; 429 goto nospc;
419 if (new < 0) 430 if (bit == IDA_BITMAP_BITS)
420 return -ENOSPC; 431 goto next;
421 if (bit == IDA_BITMAP_BITS)
422 continue;
423 432
424 __set_bit(bit, bitmap->bitmap); 433 __set_bit(bit, bitmap->bitmap);
425 if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS)) 434 if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS))
426 radix_tree_iter_tag_clear(root, &iter, 435 xas_clear_mark(&xas, XA_FREE_MARK);
427 IDR_FREE); 436 } else {
437 if (bit < BITS_PER_XA_VALUE) {
438 bitmap = xa_mk_value(1UL << bit);
428 } else { 439 } else {
429 new += bit; 440 bitmap = alloc;
430 if (new < 0)
431 return -ENOSPC;
432 if (ebit < BITS_PER_LONG) {
433 bitmap = (void *)((1UL << ebit) |
434 RADIX_TREE_EXCEPTIONAL_ENTRY);
435 radix_tree_iter_replace(root, &iter, slot,
436 bitmap);
437 return new;
438 }
439 bitmap = this_cpu_xchg(ida_bitmap, NULL);
440 if (!bitmap) 441 if (!bitmap)
441 return -EAGAIN; 442 bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
443 if (!bitmap)
444 goto alloc;
442 __set_bit(bit, bitmap->bitmap); 445 __set_bit(bit, bitmap->bitmap);
443 radix_tree_iter_replace(root, &iter, slot, bitmap);
444 } 446 }
445 447 xas_store(&xas, bitmap);
446 return new; 448 }
449out:
450 xas_unlock_irqrestore(&xas, flags);
451 if (xas_nomem(&xas, gfp)) {
452 xas.xa_index = min / IDA_BITMAP_BITS;
453 bit = min % IDA_BITMAP_BITS;
454 goto retry;
447 } 455 }
456 if (bitmap != alloc)
457 kfree(alloc);
458 if (xas_error(&xas))
459 return xas_error(&xas);
460 return xas.xa_index * IDA_BITMAP_BITS + bit;
461alloc:
462 xas_unlock_irqrestore(&xas, flags);
463 alloc = kzalloc(sizeof(*bitmap), gfp);
464 if (!alloc)
465 return -ENOMEM;
466 xas_set(&xas, min / IDA_BITMAP_BITS);
467 bit = min % IDA_BITMAP_BITS;
468 goto retry;
469nospc:
470 xas_unlock_irqrestore(&xas, flags);
471 return -ENOSPC;
448} 472}
473EXPORT_SYMBOL(ida_alloc_range);
449 474
450static void ida_remove(struct ida *ida, int id) 475/**
476 * ida_free() - Release an allocated ID.
477 * @ida: IDA handle.
478 * @id: Previously allocated ID.
479 *
480 * Context: Any context.
481 */
482void ida_free(struct ida *ida, unsigned int id)
451{ 483{
452 unsigned long index = id / IDA_BITMAP_BITS; 484 XA_STATE(xas, &ida->xa, id / IDA_BITMAP_BITS);
453 unsigned offset = id % IDA_BITMAP_BITS; 485 unsigned bit = id % IDA_BITMAP_BITS;
454 struct ida_bitmap *bitmap; 486 struct ida_bitmap *bitmap;
455 unsigned long *btmp; 487 unsigned long flags;
456 struct radix_tree_iter iter;
457 void __rcu **slot;
458 488
459 slot = radix_tree_iter_lookup(&ida->ida_rt, &iter, index); 489 BUG_ON((int)id < 0);
460 if (!slot) 490
461 goto err; 491 xas_lock_irqsave(&xas, flags);
492 bitmap = xas_load(&xas);
462 493
463 bitmap = rcu_dereference_raw(*slot); 494 if (xa_is_value(bitmap)) {
464 if (radix_tree_exception(bitmap)) { 495 unsigned long v = xa_to_value(bitmap);
465 btmp = (unsigned long *)slot; 496 if (bit >= BITS_PER_XA_VALUE)
466 offset += RADIX_TREE_EXCEPTIONAL_SHIFT;
467 if (offset >= BITS_PER_LONG)
468 goto err; 497 goto err;
498 if (!(v & (1UL << bit)))
499 goto err;
500 v &= ~(1UL << bit);
501 if (!v)
502 goto delete;
503 xas_store(&xas, xa_mk_value(v));
469 } else { 504 } else {
470 btmp = bitmap->bitmap; 505 if (!test_bit(bit, bitmap->bitmap))
471 } 506 goto err;
472 if (!test_bit(offset, btmp)) 507 __clear_bit(bit, bitmap->bitmap);
473 goto err; 508 xas_set_mark(&xas, XA_FREE_MARK);
474 509 if (bitmap_empty(bitmap->bitmap, IDA_BITMAP_BITS)) {
475 __clear_bit(offset, btmp); 510 kfree(bitmap);
476 radix_tree_iter_tag_set(&ida->ida_rt, &iter, IDR_FREE); 511delete:
477 if (radix_tree_exception(bitmap)) { 512 xas_store(&xas, NULL);
478 if (rcu_dereference_raw(*slot) == 513 }
479 (void *)RADIX_TREE_EXCEPTIONAL_ENTRY)
480 radix_tree_iter_delete(&ida->ida_rt, &iter, slot);
481 } else if (bitmap_empty(btmp, IDA_BITMAP_BITS)) {
482 kfree(bitmap);
483 radix_tree_iter_delete(&ida->ida_rt, &iter, slot);
484 } 514 }
515 xas_unlock_irqrestore(&xas, flags);
485 return; 516 return;
486 err: 517 err:
518 xas_unlock_irqrestore(&xas, flags);
487 WARN(1, "ida_free called for id=%d which is not allocated.\n", id); 519 WARN(1, "ida_free called for id=%d which is not allocated.\n", id);
488} 520}
521EXPORT_SYMBOL(ida_free);
489 522
490/** 523/**
491 * ida_destroy() - Free all IDs. 524 * ida_destroy() - Free all IDs.
@@ -500,80 +533,60 @@ static void ida_remove(struct ida *ida, int id)
500 */ 533 */
501void ida_destroy(struct ida *ida) 534void ida_destroy(struct ida *ida)
502{ 535{
536 XA_STATE(xas, &ida->xa, 0);
537 struct ida_bitmap *bitmap;
503 unsigned long flags; 538 unsigned long flags;
504 struct radix_tree_iter iter;
505 void __rcu **slot;
506 539
507 xa_lock_irqsave(&ida->ida_rt, flags); 540 xas_lock_irqsave(&xas, flags);
508 radix_tree_for_each_slot(slot, &ida->ida_rt, &iter, 0) { 541 xas_for_each(&xas, bitmap, ULONG_MAX) {
509 struct ida_bitmap *bitmap = rcu_dereference_raw(*slot); 542 if (!xa_is_value(bitmap))
510 if (!radix_tree_exception(bitmap))
511 kfree(bitmap); 543 kfree(bitmap);
512 radix_tree_iter_delete(&ida->ida_rt, &iter, slot); 544 xas_store(&xas, NULL);
513 } 545 }
514 xa_unlock_irqrestore(&ida->ida_rt, flags); 546 xas_unlock_irqrestore(&xas, flags);
515} 547}
516EXPORT_SYMBOL(ida_destroy); 548EXPORT_SYMBOL(ida_destroy);
517 549
518/** 550#ifndef __KERNEL__
519 * ida_alloc_range() - Allocate an unused ID. 551extern void xa_dump_index(unsigned long index, unsigned int shift);
520 * @ida: IDA handle. 552#define IDA_CHUNK_SHIFT ilog2(IDA_BITMAP_BITS)
521 * @min: Lowest ID to allocate.
522 * @max: Highest ID to allocate.
523 * @gfp: Memory allocation flags.
524 *
525 * Allocate an ID between @min and @max, inclusive. The allocated ID will
526 * not exceed %INT_MAX, even if @max is larger.
527 *
528 * Context: Any context.
529 * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
530 * or %-ENOSPC if there are no free IDs.
531 */
532int ida_alloc_range(struct ida *ida, unsigned int min, unsigned int max,
533 gfp_t gfp)
534{
535 int id = 0;
536 unsigned long flags;
537 553
538 if ((int)min < 0) 554static void ida_dump_entry(void *entry, unsigned long index)
539 return -ENOSPC; 555{
540 556 unsigned long i;
541 if ((int)max < 0) 557
542 max = INT_MAX; 558 if (!entry)
543 559 return;
544again: 560
545 xa_lock_irqsave(&ida->ida_rt, flags); 561 if (xa_is_node(entry)) {
546 id = ida_get_new_above(ida, min); 562 struct xa_node *node = xa_to_node(entry);
547 if (id > (int)max) { 563 unsigned int shift = node->shift + IDA_CHUNK_SHIFT +
548 ida_remove(ida, id); 564 XA_CHUNK_SHIFT;
549 id = -ENOSPC; 565
550 } 566 xa_dump_index(index * IDA_BITMAP_BITS, shift);
551 xa_unlock_irqrestore(&ida->ida_rt, flags); 567 xa_dump_node(node);
568 for (i = 0; i < XA_CHUNK_SIZE; i++)
569 ida_dump_entry(node->slots[i],
570 index | (i << node->shift));
571 } else if (xa_is_value(entry)) {
572 xa_dump_index(index * IDA_BITMAP_BITS, ilog2(BITS_PER_LONG));
573 pr_cont("value: data %lx [%px]\n", xa_to_value(entry), entry);
574 } else {
575 struct ida_bitmap *bitmap = entry;
552 576
553 if (unlikely(id == -EAGAIN)) { 577 xa_dump_index(index * IDA_BITMAP_BITS, IDA_CHUNK_SHIFT);
554 if (!ida_pre_get(ida, gfp)) 578 pr_cont("bitmap: %p data", bitmap);
555 return -ENOMEM; 579 for (i = 0; i < IDA_BITMAP_LONGS; i++)
556 goto again; 580 pr_cont(" %lx", bitmap->bitmap[i]);
581 pr_cont("\n");
557 } 582 }
558
559 return id;
560} 583}
561EXPORT_SYMBOL(ida_alloc_range);
562 584
563/** 585static void ida_dump(struct ida *ida)
564 * ida_free() - Release an allocated ID.
565 * @ida: IDA handle.
566 * @id: Previously allocated ID.
567 *
568 * Context: Any context.
569 */
570void ida_free(struct ida *ida, unsigned int id)
571{ 586{
572 unsigned long flags; 587 struct xarray *xa = &ida->xa;
573 588 pr_debug("ida: %p node %p free %d\n", ida, xa->xa_head,
574 BUG_ON((int)id < 0); 589 xa->xa_flags >> ROOT_TAG_SHIFT);
575 xa_lock_irqsave(&ida->ida_rt, flags); 590 ida_dump_entry(xa->xa_head, 0);
576 ida_remove(ida, id);
577 xa_unlock_irqrestore(&ida->ida_rt, flags);
578} 591}
579EXPORT_SYMBOL(ida_free); 592#endif
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 517f5853ffed..063213685563 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -76,83 +76,123 @@ static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
76 return 0; 76 return 0;
77} 77}
78 78
79static int ioremap_try_huge_pmd(pmd_t *pmd, unsigned long addr,
80 unsigned long end, phys_addr_t phys_addr,
81 pgprot_t prot)
82{
83 if (!ioremap_pmd_enabled())
84 return 0;
85
86 if ((end - addr) != PMD_SIZE)
87 return 0;
88
89 if (!IS_ALIGNED(phys_addr, PMD_SIZE))
90 return 0;
91
92 if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
93 return 0;
94
95 return pmd_set_huge(pmd, phys_addr, prot);
96}
97
79static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr, 98static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
80 unsigned long end, phys_addr_t phys_addr, pgprot_t prot) 99 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
81{ 100{
82 pmd_t *pmd; 101 pmd_t *pmd;
83 unsigned long next; 102 unsigned long next;
84 103
85 phys_addr -= addr;
86 pmd = pmd_alloc(&init_mm, pud, addr); 104 pmd = pmd_alloc(&init_mm, pud, addr);
87 if (!pmd) 105 if (!pmd)
88 return -ENOMEM; 106 return -ENOMEM;
89 do { 107 do {
90 next = pmd_addr_end(addr, end); 108 next = pmd_addr_end(addr, end);
91 109
92 if (ioremap_pmd_enabled() && 110 if (ioremap_try_huge_pmd(pmd, addr, next, phys_addr, prot))
93 ((next - addr) == PMD_SIZE) && 111 continue;
94 IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
95 pmd_free_pte_page(pmd, addr)) {
96 if (pmd_set_huge(pmd, phys_addr + addr, prot))
97 continue;
98 }
99 112
100 if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, prot)) 113 if (ioremap_pte_range(pmd, addr, next, phys_addr, prot))
101 return -ENOMEM; 114 return -ENOMEM;
102 } while (pmd++, addr = next, addr != end); 115 } while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
103 return 0; 116 return 0;
104} 117}
105 118
119static int ioremap_try_huge_pud(pud_t *pud, unsigned long addr,
120 unsigned long end, phys_addr_t phys_addr,
121 pgprot_t prot)
122{
123 if (!ioremap_pud_enabled())
124 return 0;
125
126 if ((end - addr) != PUD_SIZE)
127 return 0;
128
129 if (!IS_ALIGNED(phys_addr, PUD_SIZE))
130 return 0;
131
132 if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
133 return 0;
134
135 return pud_set_huge(pud, phys_addr, prot);
136}
137
106static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr, 138static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
107 unsigned long end, phys_addr_t phys_addr, pgprot_t prot) 139 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
108{ 140{
109 pud_t *pud; 141 pud_t *pud;
110 unsigned long next; 142 unsigned long next;
111 143
112 phys_addr -= addr;
113 pud = pud_alloc(&init_mm, p4d, addr); 144 pud = pud_alloc(&init_mm, p4d, addr);
114 if (!pud) 145 if (!pud)
115 return -ENOMEM; 146 return -ENOMEM;
116 do { 147 do {
117 next = pud_addr_end(addr, end); 148 next = pud_addr_end(addr, end);
118 149
119 if (ioremap_pud_enabled() && 150 if (ioremap_try_huge_pud(pud, addr, next, phys_addr, prot))
120 ((next - addr) == PUD_SIZE) && 151 continue;
121 IS_ALIGNED(phys_addr + addr, PUD_SIZE) &&
122 pud_free_pmd_page(pud, addr)) {
123 if (pud_set_huge(pud, phys_addr + addr, prot))
124 continue;
125 }
126 152
127 if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, prot)) 153 if (ioremap_pmd_range(pud, addr, next, phys_addr, prot))
128 return -ENOMEM; 154 return -ENOMEM;
129 } while (pud++, addr = next, addr != end); 155 } while (pud++, phys_addr += (next - addr), addr = next, addr != end);
130 return 0; 156 return 0;
131} 157}
132 158
159static int ioremap_try_huge_p4d(p4d_t *p4d, unsigned long addr,
160 unsigned long end, phys_addr_t phys_addr,
161 pgprot_t prot)
162{
163 if (!ioremap_p4d_enabled())
164 return 0;
165
166 if ((end - addr) != P4D_SIZE)
167 return 0;
168
169 if (!IS_ALIGNED(phys_addr, P4D_SIZE))
170 return 0;
171
172 if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
173 return 0;
174
175 return p4d_set_huge(p4d, phys_addr, prot);
176}
177
133static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr, 178static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr,
134 unsigned long end, phys_addr_t phys_addr, pgprot_t prot) 179 unsigned long end, phys_addr_t phys_addr, pgprot_t prot)
135{ 180{
136 p4d_t *p4d; 181 p4d_t *p4d;
137 unsigned long next; 182 unsigned long next;
138 183
139 phys_addr -= addr;
140 p4d = p4d_alloc(&init_mm, pgd, addr); 184 p4d = p4d_alloc(&init_mm, pgd, addr);
141 if (!p4d) 185 if (!p4d)
142 return -ENOMEM; 186 return -ENOMEM;
143 do { 187 do {
144 next = p4d_addr_end(addr, end); 188 next = p4d_addr_end(addr, end);
145 189
146 if (ioremap_p4d_enabled() && 190 if (ioremap_try_huge_p4d(p4d, addr, next, phys_addr, prot))
147 ((next - addr) == P4D_SIZE) && 191 continue;
148 IS_ALIGNED(phys_addr + addr, P4D_SIZE)) {
149 if (p4d_set_huge(p4d, phys_addr + addr, prot))
150 continue;
151 }
152 192
153 if (ioremap_pud_range(p4d, addr, next, phys_addr + addr, prot)) 193 if (ioremap_pud_range(p4d, addr, next, phys_addr, prot))
154 return -ENOMEM; 194 return -ENOMEM;
155 } while (p4d++, addr = next, addr != end); 195 } while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
156 return 0; 196 return 0;
157} 197}
158 198
@@ -168,14 +208,13 @@ int ioremap_page_range(unsigned long addr,
168 BUG_ON(addr >= end); 208 BUG_ON(addr >= end);
169 209
170 start = addr; 210 start = addr;
171 phys_addr -= addr;
172 pgd = pgd_offset_k(addr); 211 pgd = pgd_offset_k(addr);
173 do { 212 do {
174 next = pgd_addr_end(addr, end); 213 next = pgd_addr_end(addr, end);
175 err = ioremap_p4d_range(pgd, addr, next, phys_addr+addr, prot); 214 err = ioremap_p4d_range(pgd, addr, next, phys_addr, prot);
176 if (err) 215 if (err)
177 break; 216 break;
178 } while (pgd++, addr = next, addr != end); 217 } while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
179 218
180 flush_cache_vmap(start, end); 219 flush_cache_vmap(start, end);
181 220
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 8be175df3075..be4bd627caf0 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -6,6 +6,7 @@
6#include <linux/vmalloc.h> 6#include <linux/vmalloc.h>
7#include <linux/splice.h> 7#include <linux/splice.h>
8#include <net/checksum.h> 8#include <net/checksum.h>
9#include <linux/scatterlist.h>
9 10
10#define PIPE_PARANOIA /* for now */ 11#define PIPE_PARANOIA /* for now */
11 12
@@ -83,6 +84,7 @@
83 const struct kvec *kvec; \ 84 const struct kvec *kvec; \
84 struct kvec v; \ 85 struct kvec v; \
85 iterate_kvec(i, n, v, kvec, skip, (K)) \ 86 iterate_kvec(i, n, v, kvec, skip, (K)) \
87 } else if (unlikely(i->type & ITER_DISCARD)) { \
86 } else { \ 88 } else { \
87 const struct iovec *iov; \ 89 const struct iovec *iov; \
88 struct iovec v; \ 90 struct iovec v; \
@@ -114,6 +116,8 @@
114 } \ 116 } \
115 i->nr_segs -= kvec - i->kvec; \ 117 i->nr_segs -= kvec - i->kvec; \
116 i->kvec = kvec; \ 118 i->kvec = kvec; \
119 } else if (unlikely(i->type & ITER_DISCARD)) { \
120 skip += n; \
117 } else { \ 121 } else { \
118 const struct iovec *iov; \ 122 const struct iovec *iov; \
119 struct iovec v; \ 123 struct iovec v; \
@@ -132,7 +136,7 @@
132 136
133static int copyout(void __user *to, const void *from, size_t n) 137static int copyout(void __user *to, const void *from, size_t n)
134{ 138{
135 if (access_ok(VERIFY_WRITE, to, n)) { 139 if (access_ok(to, n)) {
136 kasan_check_read(from, n); 140 kasan_check_read(from, n);
137 n = raw_copy_to_user(to, from, n); 141 n = raw_copy_to_user(to, from, n);
138 } 142 }
@@ -141,7 +145,7 @@ static int copyout(void __user *to, const void *from, size_t n)
141 145
142static int copyin(void *to, const void __user *from, size_t n) 146static int copyin(void *to, const void __user *from, size_t n)
143{ 147{
144 if (access_ok(VERIFY_READ, from, n)) { 148 if (access_ok(from, n)) {
145 kasan_check_write(to, n); 149 kasan_check_write(to, n);
146 n = raw_copy_from_user(to, from, n); 150 n = raw_copy_from_user(to, from, n);
147 } 151 }
@@ -428,17 +432,19 @@ int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
428} 432}
429EXPORT_SYMBOL(iov_iter_fault_in_readable); 433EXPORT_SYMBOL(iov_iter_fault_in_readable);
430 434
431void iov_iter_init(struct iov_iter *i, int direction, 435void iov_iter_init(struct iov_iter *i, unsigned int direction,
432 const struct iovec *iov, unsigned long nr_segs, 436 const struct iovec *iov, unsigned long nr_segs,
433 size_t count) 437 size_t count)
434{ 438{
439 WARN_ON(direction & ~(READ | WRITE));
440 direction &= READ | WRITE;
441
435 /* It will get better. Eventually... */ 442 /* It will get better. Eventually... */
436 if (uaccess_kernel()) { 443 if (uaccess_kernel()) {
437 direction |= ITER_KVEC; 444 i->type = ITER_KVEC | direction;
438 i->type = direction;
439 i->kvec = (struct kvec *)iov; 445 i->kvec = (struct kvec *)iov;
440 } else { 446 } else {
441 i->type = direction; 447 i->type = ITER_IOVEC | direction;
442 i->iov = iov; 448 i->iov = iov;
443 } 449 }
444 i->nr_segs = nr_segs; 450 i->nr_segs = nr_segs;
@@ -555,10 +561,48 @@ static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
555 return bytes; 561 return bytes;
556} 562}
557 563
564static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
565 __wsum sum, size_t off)
566{
567 __wsum next = csum_partial_copy_nocheck(from, to, len, 0);
568 return csum_block_add(sum, next, off);
569}
570
571static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
572 __wsum *csum, struct iov_iter *i)
573{
574 struct pipe_inode_info *pipe = i->pipe;
575 size_t n, r;
576 size_t off = 0;
577 __wsum sum = *csum;
578 int idx;
579
580 if (!sanity(i))
581 return 0;
582
583 bytes = n = push_pipe(i, bytes, &idx, &r);
584 if (unlikely(!n))
585 return 0;
586 for ( ; n; idx = next_idx(idx, pipe), r = 0) {
587 size_t chunk = min_t(size_t, n, PAGE_SIZE - r);
588 char *p = kmap_atomic(pipe->bufs[idx].page);
589 sum = csum_and_memcpy(p + r, addr, chunk, sum, off);
590 kunmap_atomic(p);
591 i->idx = idx;
592 i->iov_offset = r + chunk;
593 n -= chunk;
594 off += chunk;
595 addr += chunk;
596 }
597 i->count -= bytes;
598 *csum = sum;
599 return bytes;
600}
601
558size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 602size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
559{ 603{
560 const char *from = addr; 604 const char *from = addr;
561 if (unlikely(i->type & ITER_PIPE)) 605 if (unlikely(iov_iter_is_pipe(i)))
562 return copy_pipe_to_iter(addr, bytes, i); 606 return copy_pipe_to_iter(addr, bytes, i);
563 if (iter_is_iovec(i)) 607 if (iter_is_iovec(i))
564 might_fault(); 608 might_fault();
@@ -576,7 +620,7 @@ EXPORT_SYMBOL(_copy_to_iter);
576#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE 620#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
577static int copyout_mcsafe(void __user *to, const void *from, size_t n) 621static int copyout_mcsafe(void __user *to, const void *from, size_t n)
578{ 622{
579 if (access_ok(VERIFY_WRITE, to, n)) { 623 if (access_ok(to, n)) {
580 kasan_check_read(from, n); 624 kasan_check_read(from, n);
581 n = copy_to_user_mcsafe((__force void *) to, from, n); 625 n = copy_to_user_mcsafe((__force void *) to, from, n);
582 } 626 }
@@ -658,7 +702,7 @@ size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
658 const char *from = addr; 702 const char *from = addr;
659 unsigned long rem, curr_addr, s_addr = (unsigned long) addr; 703 unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
660 704
661 if (unlikely(i->type & ITER_PIPE)) 705 if (unlikely(iov_iter_is_pipe(i)))
662 return copy_pipe_to_iter_mcsafe(addr, bytes, i); 706 return copy_pipe_to_iter_mcsafe(addr, bytes, i);
663 if (iter_is_iovec(i)) 707 if (iter_is_iovec(i))
664 might_fault(); 708 might_fault();
@@ -692,7 +736,7 @@ EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe);
692size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) 736size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
693{ 737{
694 char *to = addr; 738 char *to = addr;
695 if (unlikely(i->type & ITER_PIPE)) { 739 if (unlikely(iov_iter_is_pipe(i))) {
696 WARN_ON(1); 740 WARN_ON(1);
697 return 0; 741 return 0;
698 } 742 }
@@ -712,7 +756,7 @@ EXPORT_SYMBOL(_copy_from_iter);
712bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) 756bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
713{ 757{
714 char *to = addr; 758 char *to = addr;
715 if (unlikely(i->type & ITER_PIPE)) { 759 if (unlikely(iov_iter_is_pipe(i))) {
716 WARN_ON(1); 760 WARN_ON(1);
717 return false; 761 return false;
718 } 762 }
@@ -739,7 +783,7 @@ EXPORT_SYMBOL(_copy_from_iter_full);
739size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) 783size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
740{ 784{
741 char *to = addr; 785 char *to = addr;
742 if (unlikely(i->type & ITER_PIPE)) { 786 if (unlikely(iov_iter_is_pipe(i))) {
743 WARN_ON(1); 787 WARN_ON(1);
744 return 0; 788 return 0;
745 } 789 }
@@ -773,7 +817,7 @@ EXPORT_SYMBOL(_copy_from_iter_nocache);
773size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) 817size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
774{ 818{
775 char *to = addr; 819 char *to = addr;
776 if (unlikely(i->type & ITER_PIPE)) { 820 if (unlikely(iov_iter_is_pipe(i))) {
777 WARN_ON(1); 821 WARN_ON(1);
778 return 0; 822 return 0;
779 } 823 }
@@ -794,7 +838,7 @@ EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
794bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) 838bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
795{ 839{
796 char *to = addr; 840 char *to = addr;
797 if (unlikely(i->type & ITER_PIPE)) { 841 if (unlikely(iov_iter_is_pipe(i))) {
798 WARN_ON(1); 842 WARN_ON(1);
799 return false; 843 return false;
800 } 844 }
@@ -836,7 +880,9 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
836 size_t wanted = copy_to_iter(kaddr + offset, bytes, i); 880 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
837 kunmap_atomic(kaddr); 881 kunmap_atomic(kaddr);
838 return wanted; 882 return wanted;
839 } else if (likely(!(i->type & ITER_PIPE))) 883 } else if (unlikely(iov_iter_is_discard(i)))
884 return bytes;
885 else if (likely(!iov_iter_is_pipe(i)))
840 return copy_page_to_iter_iovec(page, offset, bytes, i); 886 return copy_page_to_iter_iovec(page, offset, bytes, i);
841 else 887 else
842 return copy_page_to_iter_pipe(page, offset, bytes, i); 888 return copy_page_to_iter_pipe(page, offset, bytes, i);
@@ -848,7 +894,7 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
848{ 894{
849 if (unlikely(!page_copy_sane(page, offset, bytes))) 895 if (unlikely(!page_copy_sane(page, offset, bytes)))
850 return 0; 896 return 0;
851 if (unlikely(i->type & ITER_PIPE)) { 897 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
852 WARN_ON(1); 898 WARN_ON(1);
853 return 0; 899 return 0;
854 } 900 }
@@ -888,7 +934,7 @@ static size_t pipe_zero(size_t bytes, struct iov_iter *i)
888 934
889size_t iov_iter_zero(size_t bytes, struct iov_iter *i) 935size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
890{ 936{
891 if (unlikely(i->type & ITER_PIPE)) 937 if (unlikely(iov_iter_is_pipe(i)))
892 return pipe_zero(bytes, i); 938 return pipe_zero(bytes, i);
893 iterate_and_advance(i, bytes, v, 939 iterate_and_advance(i, bytes, v,
894 clear_user(v.iov_base, v.iov_len), 940 clear_user(v.iov_base, v.iov_len),
@@ -908,7 +954,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
908 kunmap_atomic(kaddr); 954 kunmap_atomic(kaddr);
909 return 0; 955 return 0;
910 } 956 }
911 if (unlikely(i->type & ITER_PIPE)) { 957 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
912 kunmap_atomic(kaddr); 958 kunmap_atomic(kaddr);
913 WARN_ON(1); 959 WARN_ON(1);
914 return 0; 960 return 0;
@@ -972,10 +1018,14 @@ static void pipe_advance(struct iov_iter *i, size_t size)
972 1018
973void iov_iter_advance(struct iov_iter *i, size_t size) 1019void iov_iter_advance(struct iov_iter *i, size_t size)
974{ 1020{
975 if (unlikely(i->type & ITER_PIPE)) { 1021 if (unlikely(iov_iter_is_pipe(i))) {
976 pipe_advance(i, size); 1022 pipe_advance(i, size);
977 return; 1023 return;
978 } 1024 }
1025 if (unlikely(iov_iter_is_discard(i))) {
1026 i->count -= size;
1027 return;
1028 }
979 iterate_and_advance(i, size, v, 0, 0, 0) 1029 iterate_and_advance(i, size, v, 0, 0, 0)
980} 1030}
981EXPORT_SYMBOL(iov_iter_advance); 1031EXPORT_SYMBOL(iov_iter_advance);
@@ -987,7 +1037,7 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll)
987 if (WARN_ON(unroll > MAX_RW_COUNT)) 1037 if (WARN_ON(unroll > MAX_RW_COUNT))
988 return; 1038 return;
989 i->count += unroll; 1039 i->count += unroll;
990 if (unlikely(i->type & ITER_PIPE)) { 1040 if (unlikely(iov_iter_is_pipe(i))) {
991 struct pipe_inode_info *pipe = i->pipe; 1041 struct pipe_inode_info *pipe = i->pipe;
992 int idx = i->idx; 1042 int idx = i->idx;
993 size_t off = i->iov_offset; 1043 size_t off = i->iov_offset;
@@ -1011,12 +1061,14 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll)
1011 pipe_truncate(i); 1061 pipe_truncate(i);
1012 return; 1062 return;
1013 } 1063 }
1064 if (unlikely(iov_iter_is_discard(i)))
1065 return;
1014 if (unroll <= i->iov_offset) { 1066 if (unroll <= i->iov_offset) {
1015 i->iov_offset -= unroll; 1067 i->iov_offset -= unroll;
1016 return; 1068 return;
1017 } 1069 }
1018 unroll -= i->iov_offset; 1070 unroll -= i->iov_offset;
1019 if (i->type & ITER_BVEC) { 1071 if (iov_iter_is_bvec(i)) {
1020 const struct bio_vec *bvec = i->bvec; 1072 const struct bio_vec *bvec = i->bvec;
1021 while (1) { 1073 while (1) {
1022 size_t n = (--bvec)->bv_len; 1074 size_t n = (--bvec)->bv_len;
@@ -1049,23 +1101,25 @@ EXPORT_SYMBOL(iov_iter_revert);
1049 */ 1101 */
1050size_t iov_iter_single_seg_count(const struct iov_iter *i) 1102size_t iov_iter_single_seg_count(const struct iov_iter *i)
1051{ 1103{
1052 if (unlikely(i->type & ITER_PIPE)) 1104 if (unlikely(iov_iter_is_pipe(i)))
1053 return i->count; // it is a silly place, anyway 1105 return i->count; // it is a silly place, anyway
1054 if (i->nr_segs == 1) 1106 if (i->nr_segs == 1)
1055 return i->count; 1107 return i->count;
1056 else if (i->type & ITER_BVEC) 1108 if (unlikely(iov_iter_is_discard(i)))
1109 return i->count;
1110 else if (iov_iter_is_bvec(i))
1057 return min(i->count, i->bvec->bv_len - i->iov_offset); 1111 return min(i->count, i->bvec->bv_len - i->iov_offset);
1058 else 1112 else
1059 return min(i->count, i->iov->iov_len - i->iov_offset); 1113 return min(i->count, i->iov->iov_len - i->iov_offset);
1060} 1114}
1061EXPORT_SYMBOL(iov_iter_single_seg_count); 1115EXPORT_SYMBOL(iov_iter_single_seg_count);
1062 1116
1063void iov_iter_kvec(struct iov_iter *i, int direction, 1117void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
1064 const struct kvec *kvec, unsigned long nr_segs, 1118 const struct kvec *kvec, unsigned long nr_segs,
1065 size_t count) 1119 size_t count)
1066{ 1120{
1067 BUG_ON(!(direction & ITER_KVEC)); 1121 WARN_ON(direction & ~(READ | WRITE));
1068 i->type = direction; 1122 i->type = ITER_KVEC | (direction & (READ | WRITE));
1069 i->kvec = kvec; 1123 i->kvec = kvec;
1070 i->nr_segs = nr_segs; 1124 i->nr_segs = nr_segs;
1071 i->iov_offset = 0; 1125 i->iov_offset = 0;
@@ -1073,12 +1127,12 @@ void iov_iter_kvec(struct iov_iter *i, int direction,
1073} 1127}
1074EXPORT_SYMBOL(iov_iter_kvec); 1128EXPORT_SYMBOL(iov_iter_kvec);
1075 1129
1076void iov_iter_bvec(struct iov_iter *i, int direction, 1130void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
1077 const struct bio_vec *bvec, unsigned long nr_segs, 1131 const struct bio_vec *bvec, unsigned long nr_segs,
1078 size_t count) 1132 size_t count)
1079{ 1133{
1080 BUG_ON(!(direction & ITER_BVEC)); 1134 WARN_ON(direction & ~(READ | WRITE));
1081 i->type = direction; 1135 i->type = ITER_BVEC | (direction & (READ | WRITE));
1082 i->bvec = bvec; 1136 i->bvec = bvec;
1083 i->nr_segs = nr_segs; 1137 i->nr_segs = nr_segs;
1084 i->iov_offset = 0; 1138 i->iov_offset = 0;
@@ -1086,13 +1140,13 @@ void iov_iter_bvec(struct iov_iter *i, int direction,
1086} 1140}
1087EXPORT_SYMBOL(iov_iter_bvec); 1141EXPORT_SYMBOL(iov_iter_bvec);
1088 1142
1089void iov_iter_pipe(struct iov_iter *i, int direction, 1143void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
1090 struct pipe_inode_info *pipe, 1144 struct pipe_inode_info *pipe,
1091 size_t count) 1145 size_t count)
1092{ 1146{
1093 BUG_ON(direction != ITER_PIPE); 1147 BUG_ON(direction != READ);
1094 WARN_ON(pipe->nrbufs == pipe->buffers); 1148 WARN_ON(pipe->nrbufs == pipe->buffers);
1095 i->type = direction; 1149 i->type = ITER_PIPE | READ;
1096 i->pipe = pipe; 1150 i->pipe = pipe;
1097 i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1); 1151 i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
1098 i->iov_offset = 0; 1152 i->iov_offset = 0;
@@ -1101,12 +1155,30 @@ void iov_iter_pipe(struct iov_iter *i, int direction,
1101} 1155}
1102EXPORT_SYMBOL(iov_iter_pipe); 1156EXPORT_SYMBOL(iov_iter_pipe);
1103 1157
1158/**
1159 * iov_iter_discard - Initialise an I/O iterator that discards data
1160 * @i: The iterator to initialise.
1161 * @direction: The direction of the transfer.
1162 * @count: The size of the I/O buffer in bytes.
1163 *
1164 * Set up an I/O iterator that just discards everything that's written to it.
1165 * It's only available as a READ iterator.
1166 */
1167void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
1168{
1169 BUG_ON(direction != READ);
1170 i->type = ITER_DISCARD | READ;
1171 i->count = count;
1172 i->iov_offset = 0;
1173}
1174EXPORT_SYMBOL(iov_iter_discard);
1175
1104unsigned long iov_iter_alignment(const struct iov_iter *i) 1176unsigned long iov_iter_alignment(const struct iov_iter *i)
1105{ 1177{
1106 unsigned long res = 0; 1178 unsigned long res = 0;
1107 size_t size = i->count; 1179 size_t size = i->count;
1108 1180
1109 if (unlikely(i->type & ITER_PIPE)) { 1181 if (unlikely(iov_iter_is_pipe(i))) {
1110 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx])) 1182 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
1111 return size | i->iov_offset; 1183 return size | i->iov_offset;
1112 return size; 1184 return size;
@@ -1125,7 +1197,7 @@ unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
1125 unsigned long res = 0; 1197 unsigned long res = 0;
1126 size_t size = i->count; 1198 size_t size = i->count;
1127 1199
1128 if (unlikely(i->type & ITER_PIPE)) { 1200 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1129 WARN_ON(1); 1201 WARN_ON(1);
1130 return ~0U; 1202 return ~0U;
1131 } 1203 }
@@ -1193,8 +1265,11 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
1193 if (maxsize > i->count) 1265 if (maxsize > i->count)
1194 maxsize = i->count; 1266 maxsize = i->count;
1195 1267
1196 if (unlikely(i->type & ITER_PIPE)) 1268 if (unlikely(iov_iter_is_pipe(i)))
1197 return pipe_get_pages(i, pages, maxsize, maxpages, start); 1269 return pipe_get_pages(i, pages, maxsize, maxpages, start);
1270 if (unlikely(iov_iter_is_discard(i)))
1271 return -EFAULT;
1272
1198 iterate_all_kinds(i, maxsize, v, ({ 1273 iterate_all_kinds(i, maxsize, v, ({
1199 unsigned long addr = (unsigned long)v.iov_base; 1274 unsigned long addr = (unsigned long)v.iov_base;
1200 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1)); 1275 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
@@ -1205,7 +1280,7 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
1205 len = maxpages * PAGE_SIZE; 1280 len = maxpages * PAGE_SIZE;
1206 addr &= ~(PAGE_SIZE - 1); 1281 addr &= ~(PAGE_SIZE - 1);
1207 n = DIV_ROUND_UP(len, PAGE_SIZE); 1282 n = DIV_ROUND_UP(len, PAGE_SIZE);
1208 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages); 1283 res = get_user_pages_fast(addr, n, iov_iter_rw(i) != WRITE, pages);
1209 if (unlikely(res < 0)) 1284 if (unlikely(res < 0))
1210 return res; 1285 return res;
1211 return (res == n ? len : res * PAGE_SIZE) - *start; 1286 return (res == n ? len : res * PAGE_SIZE) - *start;
@@ -1270,8 +1345,11 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1270 if (maxsize > i->count) 1345 if (maxsize > i->count)
1271 maxsize = i->count; 1346 maxsize = i->count;
1272 1347
1273 if (unlikely(i->type & ITER_PIPE)) 1348 if (unlikely(iov_iter_is_pipe(i)))
1274 return pipe_get_pages_alloc(i, pages, maxsize, start); 1349 return pipe_get_pages_alloc(i, pages, maxsize, start);
1350 if (unlikely(iov_iter_is_discard(i)))
1351 return -EFAULT;
1352
1275 iterate_all_kinds(i, maxsize, v, ({ 1353 iterate_all_kinds(i, maxsize, v, ({
1276 unsigned long addr = (unsigned long)v.iov_base; 1354 unsigned long addr = (unsigned long)v.iov_base;
1277 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1)); 1355 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
@@ -1283,7 +1361,7 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1283 p = get_pages_array(n); 1361 p = get_pages_array(n);
1284 if (!p) 1362 if (!p)
1285 return -ENOMEM; 1363 return -ENOMEM;
1286 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p); 1364 res = get_user_pages_fast(addr, n, iov_iter_rw(i) != WRITE, p);
1287 if (unlikely(res < 0)) { 1365 if (unlikely(res < 0)) {
1288 kvfree(p); 1366 kvfree(p);
1289 return res; 1367 return res;
@@ -1313,7 +1391,7 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1313 __wsum sum, next; 1391 __wsum sum, next;
1314 size_t off = 0; 1392 size_t off = 0;
1315 sum = *csum; 1393 sum = *csum;
1316 if (unlikely(i->type & ITER_PIPE)) { 1394 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1317 WARN_ON(1); 1395 WARN_ON(1);
1318 return 0; 1396 return 0;
1319 } 1397 }
@@ -1329,17 +1407,15 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1329 err ? v.iov_len : 0; 1407 err ? v.iov_len : 0;
1330 }), ({ 1408 }), ({
1331 char *p = kmap_atomic(v.bv_page); 1409 char *p = kmap_atomic(v.bv_page);
1332 next = csum_partial_copy_nocheck(p + v.bv_offset, 1410 sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
1333 (to += v.bv_len) - v.bv_len, 1411 p + v.bv_offset, v.bv_len,
1334 v.bv_len, 0); 1412 sum, off);
1335 kunmap_atomic(p); 1413 kunmap_atomic(p);
1336 sum = csum_block_add(sum, next, off);
1337 off += v.bv_len; 1414 off += v.bv_len;
1338 }),({ 1415 }),({
1339 next = csum_partial_copy_nocheck(v.iov_base, 1416 sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
1340 (to += v.iov_len) - v.iov_len, 1417 v.iov_base, v.iov_len,
1341 v.iov_len, 0); 1418 sum, off);
1342 sum = csum_block_add(sum, next, off);
1343 off += v.iov_len; 1419 off += v.iov_len;
1344 }) 1420 })
1345 ) 1421 )
@@ -1355,7 +1431,7 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
1355 __wsum sum, next; 1431 __wsum sum, next;
1356 size_t off = 0; 1432 size_t off = 0;
1357 sum = *csum; 1433 sum = *csum;
1358 if (unlikely(i->type & ITER_PIPE)) { 1434 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
1359 WARN_ON(1); 1435 WARN_ON(1);
1360 return false; 1436 return false;
1361 } 1437 }
@@ -1373,17 +1449,15 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
1373 0; 1449 0;
1374 }), ({ 1450 }), ({
1375 char *p = kmap_atomic(v.bv_page); 1451 char *p = kmap_atomic(v.bv_page);
1376 next = csum_partial_copy_nocheck(p + v.bv_offset, 1452 sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
1377 (to += v.bv_len) - v.bv_len, 1453 p + v.bv_offset, v.bv_len,
1378 v.bv_len, 0); 1454 sum, off);
1379 kunmap_atomic(p); 1455 kunmap_atomic(p);
1380 sum = csum_block_add(sum, next, off);
1381 off += v.bv_len; 1456 off += v.bv_len;
1382 }),({ 1457 }),({
1383 next = csum_partial_copy_nocheck(v.iov_base, 1458 sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
1384 (to += v.iov_len) - v.iov_len, 1459 v.iov_base, v.iov_len,
1385 v.iov_len, 0); 1460 sum, off);
1386 sum = csum_block_add(sum, next, off);
1387 off += v.iov_len; 1461 off += v.iov_len;
1388 }) 1462 })
1389 ) 1463 )
@@ -1393,14 +1467,19 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
1393} 1467}
1394EXPORT_SYMBOL(csum_and_copy_from_iter_full); 1468EXPORT_SYMBOL(csum_and_copy_from_iter_full);
1395 1469
1396size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, 1470size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump,
1397 struct iov_iter *i) 1471 struct iov_iter *i)
1398{ 1472{
1399 const char *from = addr; 1473 const char *from = addr;
1474 __wsum *csum = csump;
1400 __wsum sum, next; 1475 __wsum sum, next;
1401 size_t off = 0; 1476 size_t off = 0;
1477
1478 if (unlikely(iov_iter_is_pipe(i)))
1479 return csum_and_copy_to_pipe_iter(addr, bytes, csum, i);
1480
1402 sum = *csum; 1481 sum = *csum;
1403 if (unlikely(i->type & ITER_PIPE)) { 1482 if (unlikely(iov_iter_is_discard(i))) {
1404 WARN_ON(1); /* for now */ 1483 WARN_ON(1); /* for now */
1405 return 0; 1484 return 0;
1406 } 1485 }
@@ -1416,17 +1495,15 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
1416 err ? v.iov_len : 0; 1495 err ? v.iov_len : 0;
1417 }), ({ 1496 }), ({
1418 char *p = kmap_atomic(v.bv_page); 1497 char *p = kmap_atomic(v.bv_page);
1419 next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len, 1498 sum = csum_and_memcpy(p + v.bv_offset,
1420 p + v.bv_offset, 1499 (from += v.bv_len) - v.bv_len,
1421 v.bv_len, 0); 1500 v.bv_len, sum, off);
1422 kunmap_atomic(p); 1501 kunmap_atomic(p);
1423 sum = csum_block_add(sum, next, off);
1424 off += v.bv_len; 1502 off += v.bv_len;
1425 }),({ 1503 }),({
1426 next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len, 1504 sum = csum_and_memcpy(v.iov_base,
1427 v.iov_base, 1505 (from += v.iov_len) - v.iov_len,
1428 v.iov_len, 0); 1506 v.iov_len, sum, off);
1429 sum = csum_block_add(sum, next, off);
1430 off += v.iov_len; 1507 off += v.iov_len;
1431 }) 1508 })
1432 ) 1509 )
@@ -1435,6 +1512,21 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
1435} 1512}
1436EXPORT_SYMBOL(csum_and_copy_to_iter); 1513EXPORT_SYMBOL(csum_and_copy_to_iter);
1437 1514
1515size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1516 struct iov_iter *i)
1517{
1518 struct ahash_request *hash = hashp;
1519 struct scatterlist sg;
1520 size_t copied;
1521
1522 copied = copy_to_iter(addr, bytes, i);
1523 sg_init_one(&sg, addr, copied);
1524 ahash_request_set_crypt(hash, &sg, NULL, copied);
1525 crypto_ahash_update(hash);
1526 return copied;
1527}
1528EXPORT_SYMBOL(hash_and_copy_to_iter);
1529
1438int iov_iter_npages(const struct iov_iter *i, int maxpages) 1530int iov_iter_npages(const struct iov_iter *i, int maxpages)
1439{ 1531{
1440 size_t size = i->count; 1532 size_t size = i->count;
@@ -1442,8 +1534,10 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages)
1442 1534
1443 if (!size) 1535 if (!size)
1444 return 0; 1536 return 0;
1537 if (unlikely(iov_iter_is_discard(i)))
1538 return 0;
1445 1539
1446 if (unlikely(i->type & ITER_PIPE)) { 1540 if (unlikely(iov_iter_is_pipe(i))) {
1447 struct pipe_inode_info *pipe = i->pipe; 1541 struct pipe_inode_info *pipe = i->pipe;
1448 size_t off; 1542 size_t off;
1449 int idx; 1543 int idx;
@@ -1481,11 +1575,13 @@ EXPORT_SYMBOL(iov_iter_npages);
1481const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) 1575const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1482{ 1576{
1483 *new = *old; 1577 *new = *old;
1484 if (unlikely(new->type & ITER_PIPE)) { 1578 if (unlikely(iov_iter_is_pipe(new))) {
1485 WARN_ON(1); 1579 WARN_ON(1);
1486 return NULL; 1580 return NULL;
1487 } 1581 }
1488 if (new->type & ITER_BVEC) 1582 if (unlikely(iov_iter_is_discard(new)))
1583 return NULL;
1584 if (iov_iter_is_bvec(new))
1489 return new->bvec = kmemdup(new->bvec, 1585 return new->bvec = kmemdup(new->bvec,
1490 new->nr_segs * sizeof(struct bio_vec), 1586 new->nr_segs * sizeof(struct bio_vec),
1491 flags); 1587 flags);
@@ -1567,7 +1663,7 @@ int import_single_range(int rw, void __user *buf, size_t len,
1567{ 1663{
1568 if (len > MAX_RW_COUNT) 1664 if (len > MAX_RW_COUNT)
1569 len = MAX_RW_COUNT; 1665 len = MAX_RW_COUNT;
1570 if (unlikely(!access_ok(!rw, buf, len))) 1666 if (unlikely(!access_ok(buf, len)))
1571 return -EFAULT; 1667 return -EFAULT;
1572 1668
1573 iov->iov_base = buf; 1669 iov->iov_base = buf;
diff --git a/lib/kobject.c b/lib/kobject.c
index 97d86dc17c42..b72e00fd7d09 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -639,7 +639,7 @@ static void kobject_cleanup(struct kobject *kobj)
639 kobject_name(kobj), kobj, __func__, kobj->parent); 639 kobject_name(kobj), kobj, __func__, kobj->parent);
640 640
641 if (t && !t->release) 641 if (t && !t->release)
642 pr_debug("kobject: '%s' (%p): does not have a release() function, it is broken and must be fixed.\n", 642 pr_debug("kobject: '%s' (%p): does not have a release() function, it is broken and must be fixed. See Documentation/kobject.txt.\n",
643 kobject_name(kobj), kobj); 643 kobject_name(kobj), kobj);
644 644
645 /* send "remove" if the caller did not do it but sent "add" */ 645 /* send "remove" if the caller did not do it but sent "add" */
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 63d0816ab23b..27c6118afd1c 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -240,6 +240,7 @@ static int kobj_usermode_filter(struct kobject *kobj)
240 ops = kobj_ns_ops(kobj); 240 ops = kobj_ns_ops(kobj);
241 if (ops) { 241 if (ops) {
242 const void *init_ns, *ns; 242 const void *init_ns, *ns;
243
243 ns = kobj->ktype->namespace(kobj); 244 ns = kobj->ktype->namespace(kobj);
244 init_ns = ops->initial_ns(); 245 init_ns = ops->initial_ns();
245 return ns != init_ns; 246 return ns != init_ns;
@@ -390,6 +391,7 @@ static int kobject_uevent_net_broadcast(struct kobject *kobj,
390 ops = kobj_ns_ops(kobj); 391 ops = kobj_ns_ops(kobj);
391 if (!ops && kobj->kset) { 392 if (!ops && kobj->kset) {
392 struct kobject *ksobj = &kobj->kset->kobj; 393 struct kobject *ksobj = &kobj->kset->kobj;
394
393 if (ksobj->parent != NULL) 395 if (ksobj->parent != NULL)
394 ops = kobj_ns_ops(ksobj->parent); 396 ops = kobj_ns_ops(ksobj->parent);
395 } 397 }
@@ -579,7 +581,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
579 581
580 mutex_lock(&uevent_sock_mutex); 582 mutex_lock(&uevent_sock_mutex);
581 /* we will send an event, so request a new sequence number */ 583 /* we will send an event, so request a new sequence number */
582 retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)++uevent_seqnum); 584 retval = add_uevent_var(env, "SEQNUM=%llu", ++uevent_seqnum);
583 if (retval) { 585 if (retval) {
584 mutex_unlock(&uevent_sock_mutex); 586 mutex_unlock(&uevent_sock_mutex);
585 goto exit; 587 goto exit;
diff --git a/lib/kstrtox.c b/lib/kstrtox.c
index 661a1e807bd1..1006bf70bf74 100644
--- a/lib/kstrtox.c
+++ b/lib/kstrtox.c
@@ -175,7 +175,7 @@ int _kstrtoul(const char *s, unsigned int base, unsigned long *res)
175 rv = kstrtoull(s, base, &tmp); 175 rv = kstrtoull(s, base, &tmp);
176 if (rv < 0) 176 if (rv < 0)
177 return rv; 177 return rv;
178 if (tmp != (unsigned long long)(unsigned long)tmp) 178 if (tmp != (unsigned long)tmp)
179 return -ERANGE; 179 return -ERANGE;
180 *res = tmp; 180 *res = tmp;
181 return 0; 181 return 0;
@@ -191,7 +191,7 @@ int _kstrtol(const char *s, unsigned int base, long *res)
191 rv = kstrtoll(s, base, &tmp); 191 rv = kstrtoll(s, base, &tmp);
192 if (rv < 0) 192 if (rv < 0)
193 return rv; 193 return rv;
194 if (tmp != (long long)(long)tmp) 194 if (tmp != (long)tmp)
195 return -ERANGE; 195 return -ERANGE;
196 *res = tmp; 196 *res = tmp;
197 return 0; 197 return 0;
@@ -222,7 +222,7 @@ int kstrtouint(const char *s, unsigned int base, unsigned int *res)
222 rv = kstrtoull(s, base, &tmp); 222 rv = kstrtoull(s, base, &tmp);
223 if (rv < 0) 223 if (rv < 0)
224 return rv; 224 return rv;
225 if (tmp != (unsigned long long)(unsigned int)tmp) 225 if (tmp != (unsigned int)tmp)
226 return -ERANGE; 226 return -ERANGE;
227 *res = tmp; 227 *res = tmp;
228 return 0; 228 return 0;
@@ -253,7 +253,7 @@ int kstrtoint(const char *s, unsigned int base, int *res)
253 rv = kstrtoll(s, base, &tmp); 253 rv = kstrtoll(s, base, &tmp);
254 if (rv < 0) 254 if (rv < 0)
255 return rv; 255 return rv;
256 if (tmp != (long long)(int)tmp) 256 if (tmp != (int)tmp)
257 return -ERANGE; 257 return -ERANGE;
258 *res = tmp; 258 *res = tmp;
259 return 0; 259 return 0;
@@ -268,7 +268,7 @@ int kstrtou16(const char *s, unsigned int base, u16 *res)
268 rv = kstrtoull(s, base, &tmp); 268 rv = kstrtoull(s, base, &tmp);
269 if (rv < 0) 269 if (rv < 0)
270 return rv; 270 return rv;
271 if (tmp != (unsigned long long)(u16)tmp) 271 if (tmp != (u16)tmp)
272 return -ERANGE; 272 return -ERANGE;
273 *res = tmp; 273 *res = tmp;
274 return 0; 274 return 0;
@@ -283,7 +283,7 @@ int kstrtos16(const char *s, unsigned int base, s16 *res)
283 rv = kstrtoll(s, base, &tmp); 283 rv = kstrtoll(s, base, &tmp);
284 if (rv < 0) 284 if (rv < 0)
285 return rv; 285 return rv;
286 if (tmp != (long long)(s16)tmp) 286 if (tmp != (s16)tmp)
287 return -ERANGE; 287 return -ERANGE;
288 *res = tmp; 288 *res = tmp;
289 return 0; 289 return 0;
@@ -298,7 +298,7 @@ int kstrtou8(const char *s, unsigned int base, u8 *res)
298 rv = kstrtoull(s, base, &tmp); 298 rv = kstrtoull(s, base, &tmp);
299 if (rv < 0) 299 if (rv < 0)
300 return rv; 300 return rv;
301 if (tmp != (unsigned long long)(u8)tmp) 301 if (tmp != (u8)tmp)
302 return -ERANGE; 302 return -ERANGE;
303 *res = tmp; 303 *res = tmp;
304 return 0; 304 return 0;
@@ -313,7 +313,7 @@ int kstrtos8(const char *s, unsigned int base, s8 *res)
313 rv = kstrtoll(s, base, &tmp); 313 rv = kstrtoll(s, base, &tmp);
314 if (rv < 0) 314 if (rv < 0)
315 return rv; 315 return rv;
316 if (tmp != (long long)(s8)tmp) 316 if (tmp != (s8)tmp)
317 return -ERANGE; 317 return -ERANGE;
318 *res = tmp; 318 *res = tmp;
319 return 0; 319 return 0;
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index 141734d255e4..0c9d3ad17e0f 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -43,30 +43,36 @@
43/*-***************************** 43/*-*****************************
44 * Decompression functions 44 * Decompression functions
45 *******************************/ 45 *******************************/
46/* LZ4_decompress_generic() : 46
47 * This generic decompression function cover all use cases. 47#define DEBUGLOG(l, ...) {} /* disabled */
48 * It shall be instantiated several times, using different sets of directives 48
49 * Note that it is important this generic function is really inlined, 49#ifndef assert
50#define assert(condition) ((void)0)
51#endif
52
53/*
54 * LZ4_decompress_generic() :
55 * This generic decompression function covers all use cases.
56 * It shall be instantiated several times, using different sets of directives.
57 * Note that it is important for performance that this function really get inlined,
50 * in order to remove useless branches during compilation optimization. 58 * in order to remove useless branches during compilation optimization.
51 */ 59 */
52static FORCE_INLINE int LZ4_decompress_generic( 60static FORCE_INLINE int LZ4_decompress_generic(
53 const char * const source, 61 const char * const src,
54 char * const dest, 62 char * const dst,
55 int inputSize, 63 int srcSize,
56 /* 64 /*
57 * If endOnInput == endOnInputSize, 65 * If endOnInput == endOnInputSize,
58 * this value is the max size of Output Buffer. 66 * this value is `dstCapacity`
59 */ 67 */
60 int outputSize, 68 int outputSize,
61 /* endOnOutputSize, endOnInputSize */ 69 /* endOnOutputSize, endOnInputSize */
62 int endOnInput, 70 endCondition_directive endOnInput,
63 /* full, partial */ 71 /* full, partial */
64 int partialDecoding, 72 earlyEnd_directive partialDecoding,
65 /* only used if partialDecoding == partial */
66 int targetOutputSize,
67 /* noDict, withPrefix64k, usingExtDict */ 73 /* noDict, withPrefix64k, usingExtDict */
68 int dict, 74 dict_directive dict,
69 /* == dest when no prefix */ 75 /* always <= dst, == dst when no prefix */
70 const BYTE * const lowPrefix, 76 const BYTE * const lowPrefix,
71 /* only if dict == usingExtDict */ 77 /* only if dict == usingExtDict */
72 const BYTE * const dictStart, 78 const BYTE * const dictStart,
@@ -74,35 +80,43 @@ static FORCE_INLINE int LZ4_decompress_generic(
74 const size_t dictSize 80 const size_t dictSize
75 ) 81 )
76{ 82{
77 /* Local Variables */ 83 const BYTE *ip = (const BYTE *) src;
78 const BYTE *ip = (const BYTE *) source; 84 const BYTE * const iend = ip + srcSize;
79 const BYTE * const iend = ip + inputSize;
80 85
81 BYTE *op = (BYTE *) dest; 86 BYTE *op = (BYTE *) dst;
82 BYTE * const oend = op + outputSize; 87 BYTE * const oend = op + outputSize;
83 BYTE *cpy; 88 BYTE *cpy;
84 BYTE *oexit = op + targetOutputSize;
85 const BYTE * const lowLimit = lowPrefix - dictSize;
86 89
87 const BYTE * const dictEnd = (const BYTE *)dictStart + dictSize; 90 const BYTE * const dictEnd = (const BYTE *)dictStart + dictSize;
88 static const unsigned int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; 91 static const unsigned int inc32table[8] = {0, 1, 2, 1, 0, 4, 4, 4};
89 static const int dec64table[] = { 0, 0, 0, -1, 0, 1, 2, 3 }; 92 static const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3};
90 93
91 const int safeDecode = (endOnInput == endOnInputSize); 94 const int safeDecode = (endOnInput == endOnInputSize);
92 const int checkOffset = ((safeDecode) && (dictSize < (int)(64 * KB))); 95 const int checkOffset = ((safeDecode) && (dictSize < (int)(64 * KB)));
93 96
97 /* Set up the "end" pointers for the shortcut. */
98 const BYTE *const shortiend = iend -
99 (endOnInput ? 14 : 8) /*maxLL*/ - 2 /*offset*/;
100 const BYTE *const shortoend = oend -
101 (endOnInput ? 14 : 8) /*maxLL*/ - 18 /*maxML*/;
102
103 DEBUGLOG(5, "%s (srcSize:%i, dstSize:%i)", __func__,
104 srcSize, outputSize);
105
94 /* Special cases */ 106 /* Special cases */
95 /* targetOutputSize too high => decode everything */ 107 assert(lowPrefix <= op);
96 if ((partialDecoding) && (oexit > oend - MFLIMIT)) 108 assert(src != NULL);
97 oexit = oend - MFLIMIT;
98 109
99 /* Empty output buffer */ 110 /* Empty output buffer */
100 if ((endOnInput) && (unlikely(outputSize == 0))) 111 if ((endOnInput) && (unlikely(outputSize == 0)))
101 return ((inputSize == 1) && (*ip == 0)) ? 0 : -1; 112 return ((srcSize == 1) && (*ip == 0)) ? 0 : -1;
102 113
103 if ((!endOnInput) && (unlikely(outputSize == 0))) 114 if ((!endOnInput) && (unlikely(outputSize == 0)))
104 return (*ip == 0 ? 1 : -1); 115 return (*ip == 0 ? 1 : -1);
105 116
117 if ((endOnInput) && unlikely(srcSize == 0))
118 return -1;
119
106 /* Main Loop : decode sequences */ 120 /* Main Loop : decode sequences */
107 while (1) { 121 while (1) {
108 size_t length; 122 size_t length;
@@ -111,12 +125,74 @@ static FORCE_INLINE int LZ4_decompress_generic(
111 125
112 /* get literal length */ 126 /* get literal length */
113 unsigned int const token = *ip++; 127 unsigned int const token = *ip++;
114
115 length = token>>ML_BITS; 128 length = token>>ML_BITS;
116 129
130 /* ip < iend before the increment */
131 assert(!endOnInput || ip <= iend);
132
133 /*
134 * A two-stage shortcut for the most common case:
135 * 1) If the literal length is 0..14, and there is enough
136 * space, enter the shortcut and copy 16 bytes on behalf
137 * of the literals (in the fast mode, only 8 bytes can be
138 * safely copied this way).
139 * 2) Further if the match length is 4..18, copy 18 bytes
140 * in a similar manner; but we ensure that there's enough
141 * space in the output for those 18 bytes earlier, upon
142 * entering the shortcut (in other words, there is a
143 * combined check for both stages).
144 */
145 if ((endOnInput ? length != RUN_MASK : length <= 8)
146 /*
147 * strictly "less than" on input, to re-enter
148 * the loop with at least one byte
149 */
150 && likely((endOnInput ? ip < shortiend : 1) &
151 (op <= shortoend))) {
152 /* Copy the literals */
153 memcpy(op, ip, endOnInput ? 16 : 8);
154 op += length; ip += length;
155
156 /*
157 * The second stage:
158 * prepare for match copying, decode full info.
159 * If it doesn't work out, the info won't be wasted.
160 */
161 length = token & ML_MASK; /* match length */
162 offset = LZ4_readLE16(ip);
163 ip += 2;
164 match = op - offset;
165 assert(match <= op); /* check overflow */
166
167 /* Do not deal with overlapping matches. */
168 if ((length != ML_MASK) &&
169 (offset >= 8) &&
170 (dict == withPrefix64k || match >= lowPrefix)) {
171 /* Copy the match. */
172 memcpy(op + 0, match + 0, 8);
173 memcpy(op + 8, match + 8, 8);
174 memcpy(op + 16, match + 16, 2);
175 op += length + MINMATCH;
176 /* Both stages worked, load the next token. */
177 continue;
178 }
179
180 /*
181 * The second stage didn't work out, but the info
182 * is ready. Propel it right to the point of match
183 * copying.
184 */
185 goto _copy_match;
186 }
187
188 /* decode literal length */
117 if (length == RUN_MASK) { 189 if (length == RUN_MASK) {
118 unsigned int s; 190 unsigned int s;
119 191
192 if (unlikely(endOnInput ? ip >= iend - RUN_MASK : 0)) {
193 /* overflow detection */
194 goto _output_error;
195 }
120 do { 196 do {
121 s = *ip++; 197 s = *ip++;
122 length += s; 198 length += s;
@@ -125,14 +201,14 @@ static FORCE_INLINE int LZ4_decompress_generic(
125 : 1) & (s == 255)); 201 : 1) & (s == 255));
126 202
127 if ((safeDecode) 203 if ((safeDecode)
128 && unlikely( 204 && unlikely((uptrval)(op) +
129 (size_t)(op + length) < (size_t)(op))) { 205 length < (uptrval)(op))) {
130 /* overflow detection */ 206 /* overflow detection */
131 goto _output_error; 207 goto _output_error;
132 } 208 }
133 if ((safeDecode) 209 if ((safeDecode)
134 && unlikely( 210 && unlikely((uptrval)(ip) +
135 (size_t)(ip + length) < (size_t)(ip))) { 211 length < (uptrval)(ip))) {
136 /* overflow detection */ 212 /* overflow detection */
137 goto _output_error; 213 goto _output_error;
138 } 214 }
@@ -140,16 +216,19 @@ static FORCE_INLINE int LZ4_decompress_generic(
140 216
141 /* copy literals */ 217 /* copy literals */
142 cpy = op + length; 218 cpy = op + length;
143 if (((endOnInput) && ((cpy > (partialDecoding ? oexit : oend - MFLIMIT)) 219 LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
220
221 if (((endOnInput) && ((cpy > oend - MFLIMIT)
144 || (ip + length > iend - (2 + 1 + LASTLITERALS)))) 222 || (ip + length > iend - (2 + 1 + LASTLITERALS))))
145 || ((!endOnInput) && (cpy > oend - WILDCOPYLENGTH))) { 223 || ((!endOnInput) && (cpy > oend - WILDCOPYLENGTH))) {
146 if (partialDecoding) { 224 if (partialDecoding) {
147 if (cpy > oend) { 225 if (cpy > oend) {
148 /* 226 /*
149 * Error : 227 * Partial decoding :
150 * write attempt beyond end of output buffer 228 * stop in the middle of literal segment
151 */ 229 */
152 goto _output_error; 230 cpy = oend;
231 length = oend - op;
153 } 232 }
154 if ((endOnInput) 233 if ((endOnInput)
155 && (ip + length > iend)) { 234 && (ip + length > iend)) {
@@ -184,29 +263,43 @@ static FORCE_INLINE int LZ4_decompress_generic(
184 memcpy(op, ip, length); 263 memcpy(op, ip, length);
185 ip += length; 264 ip += length;
186 op += length; 265 op += length;
266
187 /* Necessarily EOF, due to parsing restrictions */ 267 /* Necessarily EOF, due to parsing restrictions */
188 break; 268 if (!partialDecoding || (cpy == oend))
269 break;
270 } else {
271 /* may overwrite up to WILDCOPYLENGTH beyond cpy */
272 LZ4_wildCopy(op, ip, cpy);
273 ip += length;
274 op = cpy;
189 } 275 }
190 276
191 LZ4_wildCopy(op, ip, cpy);
192 ip += length;
193 op = cpy;
194
195 /* get offset */ 277 /* get offset */
196 offset = LZ4_readLE16(ip); 278 offset = LZ4_readLE16(ip);
197 ip += 2; 279 ip += 2;
198 match = op - offset; 280 match = op - offset;
199 281
200 if ((checkOffset) && (unlikely(match < lowLimit))) { 282 /* get matchlength */
283 length = token & ML_MASK;
284
285_copy_match:
286 if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) {
201 /* Error : offset outside buffers */ 287 /* Error : offset outside buffers */
202 goto _output_error; 288 goto _output_error;
203 } 289 }
204 290
205 /* costs ~1%; silence an msan warning when offset == 0 */ 291 /* costs ~1%; silence an msan warning when offset == 0 */
206 LZ4_write32(op, (U32)offset); 292 /*
293 * note : when partialDecoding, there is no guarantee that
294 * at least 4 bytes remain available in output buffer
295 */
296 if (!partialDecoding) {
297 assert(oend > op);
298 assert(oend - op >= 4);
299
300 LZ4_write32(op, (U32)offset);
301 }
207 302
208 /* get matchlength */
209 length = token & ML_MASK;
210 if (length == ML_MASK) { 303 if (length == ML_MASK) {
211 unsigned int s; 304 unsigned int s;
212 305
@@ -221,7 +314,7 @@ static FORCE_INLINE int LZ4_decompress_generic(
221 314
222 if ((safeDecode) 315 if ((safeDecode)
223 && unlikely( 316 && unlikely(
224 (size_t)(op + length) < (size_t)op)) { 317 (uptrval)(op) + length < (uptrval)op)) {
225 /* overflow detection */ 318 /* overflow detection */
226 goto _output_error; 319 goto _output_error;
227 } 320 }
@@ -229,24 +322,26 @@ static FORCE_INLINE int LZ4_decompress_generic(
229 322
230 length += MINMATCH; 323 length += MINMATCH;
231 324
232 /* check external dictionary */ 325 /* match starting within external dictionary */
233 if ((dict == usingExtDict) && (match < lowPrefix)) { 326 if ((dict == usingExtDict) && (match < lowPrefix)) {
234 if (unlikely(op + length > oend - LASTLITERALS)) { 327 if (unlikely(op + length > oend - LASTLITERALS)) {
235 /* doesn't respect parsing restriction */ 328 /* doesn't respect parsing restriction */
236 goto _output_error; 329 if (!partialDecoding)
330 goto _output_error;
331 length = min(length, (size_t)(oend - op));
237 } 332 }
238 333
239 if (length <= (size_t)(lowPrefix - match)) { 334 if (length <= (size_t)(lowPrefix - match)) {
240 /* 335 /*
241 * match can be copied as a single segment 336 * match fits entirely within external
242 * from external dictionary 337 * dictionary : just copy
243 */ 338 */
244 memmove(op, dictEnd - (lowPrefix - match), 339 memmove(op, dictEnd - (lowPrefix - match),
245 length); 340 length);
246 op += length; 341 op += length;
247 } else { 342 } else {
248 /* 343 /*
249 * match encompass external 344 * match stretches into both external
250 * dictionary and current block 345 * dictionary and current block
251 */ 346 */
252 size_t const copySize = (size_t)(lowPrefix - match); 347 size_t const copySize = (size_t)(lowPrefix - match);
@@ -254,7 +349,6 @@ static FORCE_INLINE int LZ4_decompress_generic(
254 349
255 memcpy(op, dictEnd - copySize, copySize); 350 memcpy(op, dictEnd - copySize, copySize);
256 op += copySize; 351 op += copySize;
257
258 if (restSize > (size_t)(op - lowPrefix)) { 352 if (restSize > (size_t)(op - lowPrefix)) {
259 /* overlap copy */ 353 /* overlap copy */
260 BYTE * const endOfMatch = op + restSize; 354 BYTE * const endOfMatch = op + restSize;
@@ -267,23 +361,44 @@ static FORCE_INLINE int LZ4_decompress_generic(
267 op += restSize; 361 op += restSize;
268 } 362 }
269 } 363 }
270
271 continue; 364 continue;
272 } 365 }
273 366
274 /* copy match within block */ 367 /* copy match within block */
275 cpy = op + length; 368 cpy = op + length;
276 369
277 if (unlikely(offset < 8)) { 370 /*
278 const int dec64 = dec64table[offset]; 371 * partialDecoding :
372 * may not respect endBlock parsing restrictions
373 */
374 assert(op <= oend);
375 if (partialDecoding &&
376 (cpy > oend - MATCH_SAFEGUARD_DISTANCE)) {
377 size_t const mlen = min(length, (size_t)(oend - op));
378 const BYTE * const matchEnd = match + mlen;
379 BYTE * const copyEnd = op + mlen;
380
381 if (matchEnd > op) {
382 /* overlap copy */
383 while (op < copyEnd)
384 *op++ = *match++;
385 } else {
386 memcpy(op, match, mlen);
387 }
388 op = copyEnd;
389 if (op == oend)
390 break;
391 continue;
392 }
279 393
394 if (unlikely(offset < 8)) {
280 op[0] = match[0]; 395 op[0] = match[0];
281 op[1] = match[1]; 396 op[1] = match[1];
282 op[2] = match[2]; 397 op[2] = match[2];
283 op[3] = match[3]; 398 op[3] = match[3];
284 match += dec32table[offset]; 399 match += inc32table[offset];
285 memcpy(op + 4, match, 4); 400 memcpy(op + 4, match, 4);
286 match -= dec64; 401 match -= dec64table[offset];
287 } else { 402 } else {
288 LZ4_copy8(op, match); 403 LZ4_copy8(op, match);
289 match += 8; 404 match += 8;
@@ -291,7 +406,7 @@ static FORCE_INLINE int LZ4_decompress_generic(
291 406
292 op += 8; 407 op += 8;
293 408
294 if (unlikely(cpy > oend - 12)) { 409 if (unlikely(cpy > oend - MATCH_SAFEGUARD_DISTANCE)) {
295 BYTE * const oCopyLimit = oend - (WILDCOPYLENGTH - 1); 410 BYTE * const oCopyLimit = oend - (WILDCOPYLENGTH - 1);
296 411
297 if (cpy > oend - LASTLITERALS) { 412 if (cpy > oend - LASTLITERALS) {
@@ -307,60 +422,139 @@ static FORCE_INLINE int LZ4_decompress_generic(
307 match += oCopyLimit - op; 422 match += oCopyLimit - op;
308 op = oCopyLimit; 423 op = oCopyLimit;
309 } 424 }
310
311 while (op < cpy) 425 while (op < cpy)
312 *op++ = *match++; 426 *op++ = *match++;
313 } else { 427 } else {
314 LZ4_copy8(op, match); 428 LZ4_copy8(op, match);
315
316 if (length > 16) 429 if (length > 16)
317 LZ4_wildCopy(op + 8, match + 8, cpy); 430 LZ4_wildCopy(op + 8, match + 8, cpy);
318 } 431 }
319 432 op = cpy; /* wildcopy correction */
320 op = cpy; /* correction */
321 } 433 }
322 434
323 /* end of decoding */ 435 /* end of decoding */
324 if (endOnInput) { 436 if (endOnInput) {
325 /* Nb of output bytes decoded */ 437 /* Nb of output bytes decoded */
326 return (int) (((char *)op) - dest); 438 return (int) (((char *)op) - dst);
327 } else { 439 } else {
328 /* Nb of input bytes read */ 440 /* Nb of input bytes read */
329 return (int) (((const char *)ip) - source); 441 return (int) (((const char *)ip) - src);
330 } 442 }
331 443
332 /* Overflow error detected */ 444 /* Overflow error detected */
333_output_error: 445_output_error:
334 return -1; 446 return (int) (-(((const char *)ip) - src)) - 1;
335} 447}
336 448
337int LZ4_decompress_safe(const char *source, char *dest, 449int LZ4_decompress_safe(const char *source, char *dest,
338 int compressedSize, int maxDecompressedSize) 450 int compressedSize, int maxDecompressedSize)
339{ 451{
340 return LZ4_decompress_generic(source, dest, compressedSize, 452 return LZ4_decompress_generic(source, dest,
341 maxDecompressedSize, endOnInputSize, full, 0, 453 compressedSize, maxDecompressedSize,
342 noDict, (BYTE *)dest, NULL, 0); 454 endOnInputSize, decode_full_block,
455 noDict, (BYTE *)dest, NULL, 0);
343} 456}
344 457
345int LZ4_decompress_safe_partial(const char *source, char *dest, 458int LZ4_decompress_safe_partial(const char *src, char *dst,
346 int compressedSize, int targetOutputSize, int maxDecompressedSize) 459 int compressedSize, int targetOutputSize, int dstCapacity)
347{ 460{
348 return LZ4_decompress_generic(source, dest, compressedSize, 461 dstCapacity = min(targetOutputSize, dstCapacity);
349 maxDecompressedSize, endOnInputSize, partial, 462 return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity,
350 targetOutputSize, noDict, (BYTE *)dest, NULL, 0); 463 endOnInputSize, partial_decode,
464 noDict, (BYTE *)dst, NULL, 0);
351} 465}
352 466
353int LZ4_decompress_fast(const char *source, char *dest, int originalSize) 467int LZ4_decompress_fast(const char *source, char *dest, int originalSize)
354{ 468{
355 return LZ4_decompress_generic(source, dest, 0, originalSize, 469 return LZ4_decompress_generic(source, dest, 0, originalSize,
356 endOnOutputSize, full, 0, withPrefix64k, 470 endOnOutputSize, decode_full_block,
357 (BYTE *)(dest - 64 * KB), NULL, 64 * KB); 471 withPrefix64k,
472 (BYTE *)dest - 64 * KB, NULL, 0);
473}
474
475/* ===== Instantiate a few more decoding cases, used more than once. ===== */
476
477int LZ4_decompress_safe_withPrefix64k(const char *source, char *dest,
478 int compressedSize, int maxOutputSize)
479{
480 return LZ4_decompress_generic(source, dest,
481 compressedSize, maxOutputSize,
482 endOnInputSize, decode_full_block,
483 withPrefix64k,
484 (BYTE *)dest - 64 * KB, NULL, 0);
485}
486
487static int LZ4_decompress_safe_withSmallPrefix(const char *source, char *dest,
488 int compressedSize,
489 int maxOutputSize,
490 size_t prefixSize)
491{
492 return LZ4_decompress_generic(source, dest,
493 compressedSize, maxOutputSize,
494 endOnInputSize, decode_full_block,
495 noDict,
496 (BYTE *)dest - prefixSize, NULL, 0);
497}
498
499int LZ4_decompress_safe_forceExtDict(const char *source, char *dest,
500 int compressedSize, int maxOutputSize,
501 const void *dictStart, size_t dictSize)
502{
503 return LZ4_decompress_generic(source, dest,
504 compressedSize, maxOutputSize,
505 endOnInputSize, decode_full_block,
506 usingExtDict, (BYTE *)dest,
507 (const BYTE *)dictStart, dictSize);
358} 508}
359 509
510static int LZ4_decompress_fast_extDict(const char *source, char *dest,
511 int originalSize,
512 const void *dictStart, size_t dictSize)
513{
514 return LZ4_decompress_generic(source, dest,
515 0, originalSize,
516 endOnOutputSize, decode_full_block,
517 usingExtDict, (BYTE *)dest,
518 (const BYTE *)dictStart, dictSize);
519}
520
521/*
522 * The "double dictionary" mode, for use with e.g. ring buffers: the first part
523 * of the dictionary is passed as prefix, and the second via dictStart + dictSize.
524 * These routines are used only once, in LZ4_decompress_*_continue().
525 */
526static FORCE_INLINE
527int LZ4_decompress_safe_doubleDict(const char *source, char *dest,
528 int compressedSize, int maxOutputSize,
529 size_t prefixSize,
530 const void *dictStart, size_t dictSize)
531{
532 return LZ4_decompress_generic(source, dest,
533 compressedSize, maxOutputSize,
534 endOnInputSize, decode_full_block,
535 usingExtDict, (BYTE *)dest - prefixSize,
536 (const BYTE *)dictStart, dictSize);
537}
538
539static FORCE_INLINE
540int LZ4_decompress_fast_doubleDict(const char *source, char *dest,
541 int originalSize, size_t prefixSize,
542 const void *dictStart, size_t dictSize)
543{
544 return LZ4_decompress_generic(source, dest,
545 0, originalSize,
546 endOnOutputSize, decode_full_block,
547 usingExtDict, (BYTE *)dest - prefixSize,
548 (const BYTE *)dictStart, dictSize);
549}
550
551/* ===== streaming decompression functions ===== */
552
360int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode, 553int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
361 const char *dictionary, int dictSize) 554 const char *dictionary, int dictSize)
362{ 555{
363 LZ4_streamDecode_t_internal *lz4sd = (LZ4_streamDecode_t_internal *) LZ4_streamDecode; 556 LZ4_streamDecode_t_internal *lz4sd =
557 &LZ4_streamDecode->internal_donotuse;
364 558
365 lz4sd->prefixSize = (size_t) dictSize; 559 lz4sd->prefixSize = (size_t) dictSize;
366 lz4sd->prefixEnd = (const BYTE *) dictionary + dictSize; 560 lz4sd->prefixEnd = (const BYTE *) dictionary + dictSize;
@@ -382,35 +576,51 @@ int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
382int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode, 576int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode,
383 const char *source, char *dest, int compressedSize, int maxOutputSize) 577 const char *source, char *dest, int compressedSize, int maxOutputSize)
384{ 578{
385 LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse; 579 LZ4_streamDecode_t_internal *lz4sd =
580 &LZ4_streamDecode->internal_donotuse;
386 int result; 581 int result;
387 582
388 if (lz4sd->prefixEnd == (BYTE *)dest) { 583 if (lz4sd->prefixSize == 0) {
389 result = LZ4_decompress_generic(source, dest, 584 /* The first call, no dictionary yet. */
390 compressedSize, 585 assert(lz4sd->extDictSize == 0);
391 maxOutputSize, 586 result = LZ4_decompress_safe(source, dest,
392 endOnInputSize, full, 0, 587 compressedSize, maxOutputSize);
393 usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize, 588 if (result <= 0)
394 lz4sd->externalDict, 589 return result;
395 lz4sd->extDictSize); 590 lz4sd->prefixSize = result;
396 591 lz4sd->prefixEnd = (BYTE *)dest + result;
592 } else if (lz4sd->prefixEnd == (BYTE *)dest) {
593 /* They're rolling the current segment. */
594 if (lz4sd->prefixSize >= 64 * KB - 1)
595 result = LZ4_decompress_safe_withPrefix64k(source, dest,
596 compressedSize, maxOutputSize);
597 else if (lz4sd->extDictSize == 0)
598 result = LZ4_decompress_safe_withSmallPrefix(source,
599 dest, compressedSize, maxOutputSize,
600 lz4sd->prefixSize);
601 else
602 result = LZ4_decompress_safe_doubleDict(source, dest,
603 compressedSize, maxOutputSize,
604 lz4sd->prefixSize,
605 lz4sd->externalDict, lz4sd->extDictSize);
397 if (result <= 0) 606 if (result <= 0)
398 return result; 607 return result;
399
400 lz4sd->prefixSize += result; 608 lz4sd->prefixSize += result;
401 lz4sd->prefixEnd += result; 609 lz4sd->prefixEnd += result;
402 } else { 610 } else {
611 /*
612 * The buffer wraps around, or they're
613 * switching to another buffer.
614 */
403 lz4sd->extDictSize = lz4sd->prefixSize; 615 lz4sd->extDictSize = lz4sd->prefixSize;
404 lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize; 616 lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
405 result = LZ4_decompress_generic(source, dest, 617 result = LZ4_decompress_safe_forceExtDict(source, dest,
406 compressedSize, maxOutputSize, 618 compressedSize, maxOutputSize,
407 endOnInputSize, full, 0,
408 usingExtDict, (BYTE *)dest,
409 lz4sd->externalDict, lz4sd->extDictSize); 619 lz4sd->externalDict, lz4sd->extDictSize);
410 if (result <= 0) 620 if (result <= 0)
411 return result; 621 return result;
412 lz4sd->prefixSize = result; 622 lz4sd->prefixSize = result;
413 lz4sd->prefixEnd = (BYTE *)dest + result; 623 lz4sd->prefixEnd = (BYTE *)dest + result;
414 } 624 }
415 625
416 return result; 626 return result;
@@ -422,75 +632,66 @@ int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode,
422 LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse; 632 LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse;
423 int result; 633 int result;
424 634
425 if (lz4sd->prefixEnd == (BYTE *)dest) { 635 if (lz4sd->prefixSize == 0) {
426 result = LZ4_decompress_generic(source, dest, 0, originalSize, 636 assert(lz4sd->extDictSize == 0);
427 endOnOutputSize, full, 0, 637 result = LZ4_decompress_fast(source, dest, originalSize);
428 usingExtDict, 638 if (result <= 0)
429 lz4sd->prefixEnd - lz4sd->prefixSize, 639 return result;
430 lz4sd->externalDict, lz4sd->extDictSize); 640 lz4sd->prefixSize = originalSize;
431 641 lz4sd->prefixEnd = (BYTE *)dest + originalSize;
642 } else if (lz4sd->prefixEnd == (BYTE *)dest) {
643 if (lz4sd->prefixSize >= 64 * KB - 1 ||
644 lz4sd->extDictSize == 0)
645 result = LZ4_decompress_fast(source, dest,
646 originalSize);
647 else
648 result = LZ4_decompress_fast_doubleDict(source, dest,
649 originalSize, lz4sd->prefixSize,
650 lz4sd->externalDict, lz4sd->extDictSize);
432 if (result <= 0) 651 if (result <= 0)
433 return result; 652 return result;
434
435 lz4sd->prefixSize += originalSize; 653 lz4sd->prefixSize += originalSize;
436 lz4sd->prefixEnd += originalSize; 654 lz4sd->prefixEnd += originalSize;
437 } else { 655 } else {
438 lz4sd->extDictSize = lz4sd->prefixSize; 656 lz4sd->extDictSize = lz4sd->prefixSize;
439 lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize; 657 lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
440 result = LZ4_decompress_generic(source, dest, 0, originalSize, 658 result = LZ4_decompress_fast_extDict(source, dest,
441 endOnOutputSize, full, 0, 659 originalSize, lz4sd->externalDict, lz4sd->extDictSize);
442 usingExtDict, (BYTE *)dest,
443 lz4sd->externalDict, lz4sd->extDictSize);
444 if (result <= 0) 660 if (result <= 0)
445 return result; 661 return result;
446 lz4sd->prefixSize = originalSize; 662 lz4sd->prefixSize = originalSize;
447 lz4sd->prefixEnd = (BYTE *)dest + originalSize; 663 lz4sd->prefixEnd = (BYTE *)dest + originalSize;
448 } 664 }
449
450 return result; 665 return result;
451} 666}
452 667
453/* 668int LZ4_decompress_safe_usingDict(const char *source, char *dest,
454 * Advanced decoding functions : 669 int compressedSize, int maxOutputSize,
455 * *_usingDict() : 670 const char *dictStart, int dictSize)
456 * These decoding functions work the same as "_continue" ones,
457 * the dictionary must be explicitly provided within parameters
458 */
459static FORCE_INLINE int LZ4_decompress_usingDict_generic(const char *source,
460 char *dest, int compressedSize, int maxOutputSize, int safe,
461 const char *dictStart, int dictSize)
462{ 671{
463 if (dictSize == 0) 672 if (dictSize == 0)
464 return LZ4_decompress_generic(source, dest, 673 return LZ4_decompress_safe(source, dest,
465 compressedSize, maxOutputSize, safe, full, 0, 674 compressedSize, maxOutputSize);
466 noDict, (BYTE *)dest, NULL, 0); 675 if (dictStart+dictSize == dest) {
467 if (dictStart + dictSize == dest) { 676 if (dictSize >= 64 * KB - 1)
468 if (dictSize >= (int)(64 * KB - 1)) 677 return LZ4_decompress_safe_withPrefix64k(source, dest,
469 return LZ4_decompress_generic(source, dest, 678 compressedSize, maxOutputSize);
470 compressedSize, maxOutputSize, safe, full, 0, 679 return LZ4_decompress_safe_withSmallPrefix(source, dest,
471 withPrefix64k, (BYTE *)dest - 64 * KB, NULL, 0); 680 compressedSize, maxOutputSize, dictSize);
472 return LZ4_decompress_generic(source, dest, compressedSize,
473 maxOutputSize, safe, full, 0, noDict,
474 (BYTE *)dest - dictSize, NULL, 0);
475 } 681 }
476 return LZ4_decompress_generic(source, dest, compressedSize, 682 return LZ4_decompress_safe_forceExtDict(source, dest,
477 maxOutputSize, safe, full, 0, usingExtDict, 683 compressedSize, maxOutputSize, dictStart, dictSize);
478 (BYTE *)dest, (const BYTE *)dictStart, dictSize);
479}
480
481int LZ4_decompress_safe_usingDict(const char *source, char *dest,
482 int compressedSize, int maxOutputSize,
483 const char *dictStart, int dictSize)
484{
485 return LZ4_decompress_usingDict_generic(source, dest,
486 compressedSize, maxOutputSize, 1, dictStart, dictSize);
487} 684}
488 685
489int LZ4_decompress_fast_usingDict(const char *source, char *dest, 686int LZ4_decompress_fast_usingDict(const char *source, char *dest,
490 int originalSize, const char *dictStart, int dictSize) 687 int originalSize,
688 const char *dictStart, int dictSize)
491{ 689{
492 return LZ4_decompress_usingDict_generic(source, dest, 0, 690 if (dictSize == 0 || dictStart + dictSize == dest)
493 originalSize, 0, dictStart, dictSize); 691 return LZ4_decompress_fast(source, dest, originalSize);
692
693 return LZ4_decompress_fast_extDict(source, dest, originalSize,
694 dictStart, dictSize);
494} 695}
495 696
496#ifndef STATIC 697#ifndef STATIC
diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
index 00a0b58a0871..1a7fa9d9170f 100644
--- a/lib/lz4/lz4defs.h
+++ b/lib/lz4/lz4defs.h
@@ -75,6 +75,11 @@ typedef uintptr_t uptrval;
75#define WILDCOPYLENGTH 8 75#define WILDCOPYLENGTH 8
76#define LASTLITERALS 5 76#define LASTLITERALS 5
77#define MFLIMIT (WILDCOPYLENGTH + MINMATCH) 77#define MFLIMIT (WILDCOPYLENGTH + MINMATCH)
78/*
79 * ensure it's possible to write 2 x wildcopyLength
80 * without overflowing output buffer
81 */
82#define MATCH_SAFEGUARD_DISTANCE ((2 * WILDCOPYLENGTH) - MINMATCH)
78 83
79/* Increase this value ==> compression run slower on incompressible data */ 84/* Increase this value ==> compression run slower on incompressible data */
80#define LZ4_SKIPTRIGGER 6 85#define LZ4_SKIPTRIGGER 6
@@ -222,6 +227,8 @@ typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive;
222typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive; 227typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
223 228
224typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive; 229typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
225typedef enum { full = 0, partial = 1 } earlyEnd_directive; 230typedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive;
231
232#define LZ4_STATIC_ASSERT(c) BUILD_BUG_ON(!(c))
226 233
227#endif 234#endif
diff --git a/lib/memcat_p.c b/lib/memcat_p.c
new file mode 100644
index 000000000000..b810fbc66962
--- /dev/null
+++ b/lib/memcat_p.c
@@ -0,0 +1,34 @@
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/slab.h>
4
5/*
6 * Merge two NULL-terminated pointer arrays into a newly allocated
7 * array, which is also NULL-terminated. Nomenclature is inspired by
8 * memset_p() and memcat() found elsewhere in the kernel source tree.
9 */
10void **__memcat_p(void **a, void **b)
11{
12 void **p = a, **new;
13 int nr;
14
15 /* count the elements in both arrays */
16 for (nr = 0, p = a; *p; nr++, p++)
17 ;
18 for (p = b; *p; nr++, p++)
19 ;
20 /* one for the NULL-terminator */
21 nr++;
22
23 new = kmalloc_array(nr, sizeof(void *), GFP_KERNEL);
24 if (!new)
25 return NULL;
26
27 /* nr -> last index; p points to NULL in b[] */
28 for (nr--; nr >= 0; nr--, p = p == b ? &a[nr] : p - 1)
29 new[nr] = *p;
30
31 return new;
32}
33EXPORT_SYMBOL_GPL(__memcat_p);
34
diff --git a/lib/nlattr.c b/lib/nlattr.c
index e335bcafa9e4..d26de6156b97 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -45,12 +45,11 @@ static const u8 nla_attr_minlen[NLA_TYPE_MAX+1] = {
45}; 45};
46 46
47static int validate_nla_bitfield32(const struct nlattr *nla, 47static int validate_nla_bitfield32(const struct nlattr *nla,
48 u32 *valid_flags_allowed) 48 const u32 *valid_flags_mask)
49{ 49{
50 const struct nla_bitfield32 *bf = nla_data(nla); 50 const struct nla_bitfield32 *bf = nla_data(nla);
51 u32 *valid_flags_mask = valid_flags_allowed;
52 51
53 if (!valid_flags_allowed) 52 if (!valid_flags_mask)
54 return -EINVAL; 53 return -EINVAL;
55 54
56 /*disallow invalid bit selector */ 55 /*disallow invalid bit selector */
@@ -68,11 +67,99 @@ static int validate_nla_bitfield32(const struct nlattr *nla,
68 return 0; 67 return 0;
69} 68}
70 69
70static int nla_validate_array(const struct nlattr *head, int len, int maxtype,
71 const struct nla_policy *policy,
72 struct netlink_ext_ack *extack)
73{
74 const struct nlattr *entry;
75 int rem;
76
77 nla_for_each_attr(entry, head, len, rem) {
78 int ret;
79
80 if (nla_len(entry) == 0)
81 continue;
82
83 if (nla_len(entry) < NLA_HDRLEN) {
84 NL_SET_ERR_MSG_ATTR(extack, entry,
85 "Array element too short");
86 return -ERANGE;
87 }
88
89 ret = nla_validate(nla_data(entry), nla_len(entry),
90 maxtype, policy, extack);
91 if (ret < 0)
92 return ret;
93 }
94
95 return 0;
96}
97
98static int nla_validate_int_range(const struct nla_policy *pt,
99 const struct nlattr *nla,
100 struct netlink_ext_ack *extack)
101{
102 bool validate_min, validate_max;
103 s64 value;
104
105 validate_min = pt->validation_type == NLA_VALIDATE_RANGE ||
106 pt->validation_type == NLA_VALIDATE_MIN;
107 validate_max = pt->validation_type == NLA_VALIDATE_RANGE ||
108 pt->validation_type == NLA_VALIDATE_MAX;
109
110 switch (pt->type) {
111 case NLA_U8:
112 value = nla_get_u8(nla);
113 break;
114 case NLA_U16:
115 value = nla_get_u16(nla);
116 break;
117 case NLA_U32:
118 value = nla_get_u32(nla);
119 break;
120 case NLA_S8:
121 value = nla_get_s8(nla);
122 break;
123 case NLA_S16:
124 value = nla_get_s16(nla);
125 break;
126 case NLA_S32:
127 value = nla_get_s32(nla);
128 break;
129 case NLA_S64:
130 value = nla_get_s64(nla);
131 break;
132 case NLA_U64:
133 /* treat this one specially, since it may not fit into s64 */
134 if ((validate_min && nla_get_u64(nla) < pt->min) ||
135 (validate_max && nla_get_u64(nla) > pt->max)) {
136 NL_SET_ERR_MSG_ATTR(extack, nla,
137 "integer out of range");
138 return -ERANGE;
139 }
140 return 0;
141 default:
142 WARN_ON(1);
143 return -EINVAL;
144 }
145
146 if ((validate_min && value < pt->min) ||
147 (validate_max && value > pt->max)) {
148 NL_SET_ERR_MSG_ATTR(extack, nla,
149 "integer out of range");
150 return -ERANGE;
151 }
152
153 return 0;
154}
155
71static int validate_nla(const struct nlattr *nla, int maxtype, 156static int validate_nla(const struct nlattr *nla, int maxtype,
72 const struct nla_policy *policy) 157 const struct nla_policy *policy,
158 struct netlink_ext_ack *extack)
73{ 159{
74 const struct nla_policy *pt; 160 const struct nla_policy *pt;
75 int minlen = 0, attrlen = nla_len(nla), type = nla_type(nla); 161 int minlen = 0, attrlen = nla_len(nla), type = nla_type(nla);
162 int err = -ERANGE;
76 163
77 if (type <= 0 || type > maxtype) 164 if (type <= 0 || type > maxtype)
78 return 0; 165 return 0;
@@ -81,22 +168,40 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
81 168
82 BUG_ON(pt->type > NLA_TYPE_MAX); 169 BUG_ON(pt->type > NLA_TYPE_MAX);
83 170
84 if (nla_attr_len[pt->type] && attrlen != nla_attr_len[pt->type]) { 171 if ((nla_attr_len[pt->type] && attrlen != nla_attr_len[pt->type]) ||
172 (pt->type == NLA_EXACT_LEN_WARN && attrlen != pt->len)) {
85 pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n", 173 pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n",
86 current->comm, type); 174 current->comm, type);
87 } 175 }
88 176
89 switch (pt->type) { 177 switch (pt->type) {
178 case NLA_EXACT_LEN:
179 if (attrlen != pt->len)
180 goto out_err;
181 break;
182
183 case NLA_REJECT:
184 if (extack && pt->validation_data) {
185 NL_SET_BAD_ATTR(extack, nla);
186 extack->_msg = pt->validation_data;
187 return -EINVAL;
188 }
189 err = -EINVAL;
190 goto out_err;
191
90 case NLA_FLAG: 192 case NLA_FLAG:
91 if (attrlen > 0) 193 if (attrlen > 0)
92 return -ERANGE; 194 goto out_err;
93 break; 195 break;
94 196
95 case NLA_BITFIELD32: 197 case NLA_BITFIELD32:
96 if (attrlen != sizeof(struct nla_bitfield32)) 198 if (attrlen != sizeof(struct nla_bitfield32))
97 return -ERANGE; 199 goto out_err;
98 200
99 return validate_nla_bitfield32(nla, pt->validation_data); 201 err = validate_nla_bitfield32(nla, pt->validation_data);
202 if (err)
203 goto out_err;
204 break;
100 205
101 case NLA_NUL_STRING: 206 case NLA_NUL_STRING:
102 if (pt->len) 207 if (pt->len)
@@ -104,13 +209,15 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
104 else 209 else
105 minlen = attrlen; 210 minlen = attrlen;
106 211
107 if (!minlen || memchr(nla_data(nla), '\0', minlen) == NULL) 212 if (!minlen || memchr(nla_data(nla), '\0', minlen) == NULL) {
108 return -EINVAL; 213 err = -EINVAL;
214 goto out_err;
215 }
109 /* fall through */ 216 /* fall through */
110 217
111 case NLA_STRING: 218 case NLA_STRING:
112 if (attrlen < 1) 219 if (attrlen < 1)
113 return -ERANGE; 220 goto out_err;
114 221
115 if (pt->len) { 222 if (pt->len) {
116 char *buf = nla_data(nla); 223 char *buf = nla_data(nla);
@@ -119,32 +226,58 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
119 attrlen--; 226 attrlen--;
120 227
121 if (attrlen > pt->len) 228 if (attrlen > pt->len)
122 return -ERANGE; 229 goto out_err;
123 } 230 }
124 break; 231 break;
125 232
126 case NLA_BINARY: 233 case NLA_BINARY:
127 if (pt->len && attrlen > pt->len) 234 if (pt->len && attrlen > pt->len)
128 return -ERANGE; 235 goto out_err;
129 break; 236 break;
130 237
131 case NLA_NESTED_COMPAT:
132 if (attrlen < pt->len)
133 return -ERANGE;
134 if (attrlen < NLA_ALIGN(pt->len))
135 break;
136 if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN)
137 return -ERANGE;
138 nla = nla_data(nla) + NLA_ALIGN(pt->len);
139 if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN + nla_len(nla))
140 return -ERANGE;
141 break;
142 case NLA_NESTED: 238 case NLA_NESTED:
143 /* a nested attributes is allowed to be empty; if its not, 239 /* a nested attributes is allowed to be empty; if its not,
144 * it must have a size of at least NLA_HDRLEN. 240 * it must have a size of at least NLA_HDRLEN.
145 */ 241 */
146 if (attrlen == 0) 242 if (attrlen == 0)
147 break; 243 break;
244 if (attrlen < NLA_HDRLEN)
245 goto out_err;
246 if (pt->validation_data) {
247 err = nla_validate(nla_data(nla), nla_len(nla), pt->len,
248 pt->validation_data, extack);
249 if (err < 0) {
250 /*
251 * return directly to preserve the inner
252 * error message/attribute pointer
253 */
254 return err;
255 }
256 }
257 break;
258 case NLA_NESTED_ARRAY:
259 /* a nested array attribute is allowed to be empty; if its not,
260 * it must have a size of at least NLA_HDRLEN.
261 */
262 if (attrlen == 0)
263 break;
264 if (attrlen < NLA_HDRLEN)
265 goto out_err;
266 if (pt->validation_data) {
267 int err;
268
269 err = nla_validate_array(nla_data(nla), nla_len(nla),
270 pt->len, pt->validation_data,
271 extack);
272 if (err < 0) {
273 /*
274 * return directly to preserve the inner
275 * error message/attribute pointer
276 */
277 return err;
278 }
279 }
280 break;
148 default: 281 default:
149 if (pt->len) 282 if (pt->len)
150 minlen = pt->len; 283 minlen = pt->len;
@@ -152,10 +285,34 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
152 minlen = nla_attr_minlen[pt->type]; 285 minlen = nla_attr_minlen[pt->type];
153 286
154 if (attrlen < minlen) 287 if (attrlen < minlen)
155 return -ERANGE; 288 goto out_err;
289 }
290
291 /* further validation */
292 switch (pt->validation_type) {
293 case NLA_VALIDATE_NONE:
294 /* nothing to do */
295 break;
296 case NLA_VALIDATE_RANGE:
297 case NLA_VALIDATE_MIN:
298 case NLA_VALIDATE_MAX:
299 err = nla_validate_int_range(pt, nla, extack);
300 if (err)
301 return err;
302 break;
303 case NLA_VALIDATE_FUNCTION:
304 if (pt->validate) {
305 err = pt->validate(nla, extack);
306 if (err)
307 return err;
308 }
309 break;
156 } 310 }
157 311
158 return 0; 312 return 0;
313out_err:
314 NL_SET_ERR_MSG_ATTR(extack, nla, "Attribute failed policy validation");
315 return err;
159} 316}
160 317
161/** 318/**
@@ -180,13 +337,10 @@ int nla_validate(const struct nlattr *head, int len, int maxtype,
180 int rem; 337 int rem;
181 338
182 nla_for_each_attr(nla, head, len, rem) { 339 nla_for_each_attr(nla, head, len, rem) {
183 int err = validate_nla(nla, maxtype, policy); 340 int err = validate_nla(nla, maxtype, policy, extack);
184 341
185 if (err < 0) { 342 if (err < 0)
186 if (extack)
187 extack->bad_attr = nla;
188 return err; 343 return err;
189 }
190 } 344 }
191 345
192 return 0; 346 return 0;
@@ -237,42 +391,63 @@ EXPORT_SYMBOL(nla_policy_len);
237 * 391 *
238 * Returns 0 on success or a negative error code. 392 * Returns 0 on success or a negative error code.
239 */ 393 */
240int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head, 394static int __nla_parse(struct nlattr **tb, int maxtype,
241 int len, const struct nla_policy *policy, 395 const struct nlattr *head, int len,
242 struct netlink_ext_ack *extack) 396 bool strict, const struct nla_policy *policy,
397 struct netlink_ext_ack *extack)
243{ 398{
244 const struct nlattr *nla; 399 const struct nlattr *nla;
245 int rem, err; 400 int rem;
246 401
247 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); 402 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
248 403
249 nla_for_each_attr(nla, head, len, rem) { 404 nla_for_each_attr(nla, head, len, rem) {
250 u16 type = nla_type(nla); 405 u16 type = nla_type(nla);
251 406
252 if (type > 0 && type <= maxtype) { 407 if (type == 0 || type > maxtype) {
253 if (policy) { 408 if (strict) {
254 err = validate_nla(nla, maxtype, policy); 409 NL_SET_ERR_MSG(extack, "Unknown attribute type");
255 if (err < 0) { 410 return -EINVAL;
256 NL_SET_ERR_MSG_ATTR(extack, nla,
257 "Attribute failed policy validation");
258 goto errout;
259 }
260 } 411 }
412 continue;
413 }
414 if (policy) {
415 int err = validate_nla(nla, maxtype, policy, extack);
261 416
262 tb[type] = (struct nlattr *)nla; 417 if (err < 0)
418 return err;
263 } 419 }
420
421 tb[type] = (struct nlattr *)nla;
264 } 422 }
265 423
266 if (unlikely(rem > 0)) 424 if (unlikely(rem > 0)) {
267 pr_warn_ratelimited("netlink: %d bytes leftover after parsing attributes in process `%s'.\n", 425 pr_warn_ratelimited("netlink: %d bytes leftover after parsing attributes in process `%s'.\n",
268 rem, current->comm); 426 rem, current->comm);
427 NL_SET_ERR_MSG(extack, "bytes leftover after parsing attributes");
428 if (strict)
429 return -EINVAL;
430 }
269 431
270 err = 0; 432 return 0;
271errout: 433}
272 return err; 434
435int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
436 int len, const struct nla_policy *policy,
437 struct netlink_ext_ack *extack)
438{
439 return __nla_parse(tb, maxtype, head, len, false, policy, extack);
273} 440}
274EXPORT_SYMBOL(nla_parse); 441EXPORT_SYMBOL(nla_parse);
275 442
443int nla_parse_strict(struct nlattr **tb, int maxtype, const struct nlattr *head,
444 int len, const struct nla_policy *policy,
445 struct netlink_ext_ack *extack)
446{
447 return __nla_parse(tb, maxtype, head, len, true, policy, extack);
448}
449EXPORT_SYMBOL(nla_parse_strict);
450
276/** 451/**
277 * nla_find - Find a specific attribute in a stream of attributes 452 * nla_find - Find a specific attribute in a stream of attributes
278 * @head: head of attribute stream 453 * @head: head of attribute stream
diff --git a/lib/objagg.c b/lib/objagg.c
new file mode 100644
index 000000000000..c9b457a91153
--- /dev/null
+++ b/lib/objagg.c
@@ -0,0 +1,501 @@
1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
3
4#include <linux/module.h>
5#include <linux/slab.h>
6#include <linux/rhashtable.h>
7#include <linux/list.h>
8#include <linux/sort.h>
9#include <linux/objagg.h>
10
11#define CREATE_TRACE_POINTS
12#include <trace/events/objagg.h>
13
14struct objagg {
15 const struct objagg_ops *ops;
16 void *priv;
17 struct rhashtable obj_ht;
18 struct rhashtable_params ht_params;
19 struct list_head obj_list;
20 unsigned int obj_count;
21};
22
23struct objagg_obj {
24 struct rhash_head ht_node; /* member of objagg->obj_ht */
25 struct list_head list; /* member of objagg->obj_list */
26 struct objagg_obj *parent; /* if the object is nested, this
27 * holds pointer to parent, otherwise NULL
28 */
29 union {
30 void *delta_priv; /* user delta private */
31 void *root_priv; /* user root private */
32 };
33 unsigned int refcount; /* counts number of users of this object
34 * including nested objects
35 */
36 struct objagg_obj_stats stats;
37 unsigned long obj[0];
38};
39
40static unsigned int objagg_obj_ref_inc(struct objagg_obj *objagg_obj)
41{
42 return ++objagg_obj->refcount;
43}
44
45static unsigned int objagg_obj_ref_dec(struct objagg_obj *objagg_obj)
46{
47 return --objagg_obj->refcount;
48}
49
50static void objagg_obj_stats_inc(struct objagg_obj *objagg_obj)
51{
52 objagg_obj->stats.user_count++;
53 objagg_obj->stats.delta_user_count++;
54 if (objagg_obj->parent)
55 objagg_obj->parent->stats.delta_user_count++;
56}
57
58static void objagg_obj_stats_dec(struct objagg_obj *objagg_obj)
59{
60 objagg_obj->stats.user_count--;
61 objagg_obj->stats.delta_user_count--;
62 if (objagg_obj->parent)
63 objagg_obj->parent->stats.delta_user_count--;
64}
65
66static bool objagg_obj_is_root(const struct objagg_obj *objagg_obj)
67{
68 /* Nesting is not supported, so we can use ->parent
69 * to figure out if the object is root.
70 */
71 return !objagg_obj->parent;
72}
73
74/**
75 * objagg_obj_root_priv - obtains root private for an object
76 * @objagg_obj: objagg object instance
77 *
78 * Note: all locking must be provided by the caller.
79 *
80 * Either the object is root itself when the private is returned
81 * directly, or the parent is root and its private is returned
82 * instead.
83 *
84 * Returns a user private root pointer.
85 */
86const void *objagg_obj_root_priv(const struct objagg_obj *objagg_obj)
87{
88 if (objagg_obj_is_root(objagg_obj))
89 return objagg_obj->root_priv;
90 WARN_ON(!objagg_obj_is_root(objagg_obj->parent));
91 return objagg_obj->parent->root_priv;
92}
93EXPORT_SYMBOL(objagg_obj_root_priv);
94
95/**
96 * objagg_obj_delta_priv - obtains delta private for an object
97 * @objagg_obj: objagg object instance
98 *
99 * Note: all locking must be provided by the caller.
100 *
101 * Returns user private delta pointer or NULL in case the passed
102 * object is root.
103 */
104const void *objagg_obj_delta_priv(const struct objagg_obj *objagg_obj)
105{
106 if (objagg_obj_is_root(objagg_obj))
107 return NULL;
108 return objagg_obj->delta_priv;
109}
110EXPORT_SYMBOL(objagg_obj_delta_priv);
111
112/**
113 * objagg_obj_raw - obtains object user private pointer
114 * @objagg_obj: objagg object instance
115 *
116 * Note: all locking must be provided by the caller.
117 *
118 * Returns user private pointer as was passed to objagg_obj_get() by "obj" arg.
119 */
120const void *objagg_obj_raw(const struct objagg_obj *objagg_obj)
121{
122 return objagg_obj->obj;
123}
124EXPORT_SYMBOL(objagg_obj_raw);
125
126static struct objagg_obj *objagg_obj_lookup(struct objagg *objagg, void *obj)
127{
128 return rhashtable_lookup_fast(&objagg->obj_ht, obj, objagg->ht_params);
129}
130
131static int objagg_obj_parent_assign(struct objagg *objagg,
132 struct objagg_obj *objagg_obj,
133 struct objagg_obj *parent)
134{
135 void *delta_priv;
136
137 delta_priv = objagg->ops->delta_create(objagg->priv, parent->obj,
138 objagg_obj->obj);
139 if (IS_ERR(delta_priv))
140 return PTR_ERR(delta_priv);
141
142 /* User returned a delta private, that means that
143 * our object can be aggregated into the parent.
144 */
145 objagg_obj->parent = parent;
146 objagg_obj->delta_priv = delta_priv;
147 objagg_obj_ref_inc(objagg_obj->parent);
148 trace_objagg_obj_parent_assign(objagg, objagg_obj,
149 parent,
150 parent->refcount);
151 return 0;
152}
153
154static int objagg_obj_parent_lookup_assign(struct objagg *objagg,
155 struct objagg_obj *objagg_obj)
156{
157 struct objagg_obj *objagg_obj_cur;
158 int err;
159
160 list_for_each_entry(objagg_obj_cur, &objagg->obj_list, list) {
161 /* Nesting is not supported. In case the object
162 * is not root, it cannot be assigned as parent.
163 */
164 if (!objagg_obj_is_root(objagg_obj_cur))
165 continue;
166 err = objagg_obj_parent_assign(objagg, objagg_obj,
167 objagg_obj_cur);
168 if (!err)
169 return 0;
170 }
171 return -ENOENT;
172}
173
174static void __objagg_obj_put(struct objagg *objagg,
175 struct objagg_obj *objagg_obj);
176
177static void objagg_obj_parent_unassign(struct objagg *objagg,
178 struct objagg_obj *objagg_obj)
179{
180 trace_objagg_obj_parent_unassign(objagg, objagg_obj,
181 objagg_obj->parent,
182 objagg_obj->parent->refcount);
183 objagg->ops->delta_destroy(objagg->priv, objagg_obj->delta_priv);
184 __objagg_obj_put(objagg, objagg_obj->parent);
185}
186
187static int objagg_obj_root_create(struct objagg *objagg,
188 struct objagg_obj *objagg_obj)
189{
190 objagg_obj->root_priv = objagg->ops->root_create(objagg->priv,
191 objagg_obj->obj);
192 if (IS_ERR(objagg_obj->root_priv))
193 return PTR_ERR(objagg_obj->root_priv);
194
195 trace_objagg_obj_root_create(objagg, objagg_obj);
196 return 0;
197}
198
199static void objagg_obj_root_destroy(struct objagg *objagg,
200 struct objagg_obj *objagg_obj)
201{
202 trace_objagg_obj_root_destroy(objagg, objagg_obj);
203 objagg->ops->root_destroy(objagg->priv, objagg_obj->root_priv);
204}
205
206static int objagg_obj_init(struct objagg *objagg,
207 struct objagg_obj *objagg_obj)
208{
209 int err;
210
211 /* Try to find if the object can be aggregated under an existing one. */
212 err = objagg_obj_parent_lookup_assign(objagg, objagg_obj);
213 if (!err)
214 return 0;
215 /* If aggregation is not possible, make the object a root. */
216 return objagg_obj_root_create(objagg, objagg_obj);
217}
218
219static void objagg_obj_fini(struct objagg *objagg,
220 struct objagg_obj *objagg_obj)
221{
222 if (!objagg_obj_is_root(objagg_obj))
223 objagg_obj_parent_unassign(objagg, objagg_obj);
224 else
225 objagg_obj_root_destroy(objagg, objagg_obj);
226}
227
228static struct objagg_obj *objagg_obj_create(struct objagg *objagg, void *obj)
229{
230 struct objagg_obj *objagg_obj;
231 int err;
232
233 objagg_obj = kzalloc(sizeof(*objagg_obj) + objagg->ops->obj_size,
234 GFP_KERNEL);
235 if (!objagg_obj)
236 return ERR_PTR(-ENOMEM);
237 objagg_obj_ref_inc(objagg_obj);
238 memcpy(objagg_obj->obj, obj, objagg->ops->obj_size);
239
240 err = objagg_obj_init(objagg, objagg_obj);
241 if (err)
242 goto err_obj_init;
243
244 err = rhashtable_insert_fast(&objagg->obj_ht, &objagg_obj->ht_node,
245 objagg->ht_params);
246 if (err)
247 goto err_ht_insert;
248 list_add(&objagg_obj->list, &objagg->obj_list);
249 objagg->obj_count++;
250 trace_objagg_obj_create(objagg, objagg_obj);
251
252 return objagg_obj;
253
254err_ht_insert:
255 objagg_obj_fini(objagg, objagg_obj);
256err_obj_init:
257 kfree(objagg_obj);
258 return ERR_PTR(err);
259}
260
261static struct objagg_obj *__objagg_obj_get(struct objagg *objagg, void *obj)
262{
263 struct objagg_obj *objagg_obj;
264
265 /* First, try to find the object exactly as user passed it,
266 * perhaps it is already in use.
267 */
268 objagg_obj = objagg_obj_lookup(objagg, obj);
269 if (objagg_obj) {
270 objagg_obj_ref_inc(objagg_obj);
271 return objagg_obj;
272 }
273
274 return objagg_obj_create(objagg, obj);
275}
276
277/**
278 * objagg_obj_get - gets an object within objagg instance
279 * @objagg: objagg instance
280 * @obj: user-specific private object pointer
281 *
282 * Note: all locking must be provided by the caller.
283 *
284 * Size of the "obj" memory is specified in "objagg->ops".
285 *
286 * There are 3 main options this function wraps:
287 * 1) The object according to "obj" already exist. In that case
288 * the reference counter is incrementes and the object is returned.
289 * 2) The object does not exist, but it can be aggregated within
290 * another object. In that case, user ops->delta_create() is called
291 * to obtain delta data and a new object is created with returned
292 * user-delta private pointer.
293 * 3) The object does not exist and cannot be aggregated into
294 * any of the existing objects. In that case, user ops->root_create()
295 * is called to create the root and a new object is created with
296 * returned user-root private pointer.
297 *
298 * Returns a pointer to objagg object instance in case of success,
299 * otherwise it returns pointer error using ERR_PTR macro.
300 */
301struct objagg_obj *objagg_obj_get(struct objagg *objagg, void *obj)
302{
303 struct objagg_obj *objagg_obj;
304
305 objagg_obj = __objagg_obj_get(objagg, obj);
306 if (IS_ERR(objagg_obj))
307 return objagg_obj;
308 objagg_obj_stats_inc(objagg_obj);
309 trace_objagg_obj_get(objagg, objagg_obj, objagg_obj->refcount);
310 return objagg_obj;
311}
312EXPORT_SYMBOL(objagg_obj_get);
313
314static void objagg_obj_destroy(struct objagg *objagg,
315 struct objagg_obj *objagg_obj)
316{
317 trace_objagg_obj_destroy(objagg, objagg_obj);
318 --objagg->obj_count;
319 list_del(&objagg_obj->list);
320 rhashtable_remove_fast(&objagg->obj_ht, &objagg_obj->ht_node,
321 objagg->ht_params);
322 objagg_obj_fini(objagg, objagg_obj);
323 kfree(objagg_obj);
324}
325
326static void __objagg_obj_put(struct objagg *objagg,
327 struct objagg_obj *objagg_obj)
328{
329 if (!objagg_obj_ref_dec(objagg_obj))
330 objagg_obj_destroy(objagg, objagg_obj);
331}
332
333/**
334 * objagg_obj_put - puts an object within objagg instance
335 * @objagg: objagg instance
336 * @objagg_obj: objagg object instance
337 *
338 * Note: all locking must be provided by the caller.
339 *
340 * Symmetric to objagg_obj_get().
341 */
342void objagg_obj_put(struct objagg *objagg, struct objagg_obj *objagg_obj)
343{
344 trace_objagg_obj_put(objagg, objagg_obj, objagg_obj->refcount);
345 objagg_obj_stats_dec(objagg_obj);
346 __objagg_obj_put(objagg, objagg_obj);
347}
348EXPORT_SYMBOL(objagg_obj_put);
349
350/**
351 * objagg_create - creates a new objagg instance
352 * @ops: user-specific callbacks
353 * @priv: pointer to a private data passed to the ops
354 *
355 * Note: all locking must be provided by the caller.
356 *
357 * The purpose of the library is to provide an infrastructure to
358 * aggregate user-specified objects. Library does not care about the type
359 * of the object. User fills-up ops which take care of the specific
360 * user object manipulation.
361 *
362 * As a very stupid example, consider integer numbers. For example
363 * number 8 as a root object. That can aggregate number 9 with delta 1,
364 * number 10 with delta 2, etc. This example is implemented as
365 * a part of a testing module in test_objagg.c file.
366 *
367 * Each objagg instance contains multiple trees. Each tree node is
368 * represented by "an object". In the current implementation there can be
369 * only roots and leafs nodes. Leaf nodes are called deltas.
370 * But in general, this can be easily extended for intermediate nodes.
371 * In that extension, a delta would be associated with all non-root
372 * nodes.
373 *
374 * Returns a pointer to newly created objagg instance in case of success,
375 * otherwise it returns pointer error using ERR_PTR macro.
376 */
377struct objagg *objagg_create(const struct objagg_ops *ops, void *priv)
378{
379 struct objagg *objagg;
380 int err;
381
382 if (WARN_ON(!ops || !ops->root_create || !ops->root_destroy ||
383 !ops->delta_create || !ops->delta_destroy))
384 return ERR_PTR(-EINVAL);
385 objagg = kzalloc(sizeof(*objagg), GFP_KERNEL);
386 if (!objagg)
387 return ERR_PTR(-ENOMEM);
388 objagg->ops = ops;
389 objagg->priv = priv;
390 INIT_LIST_HEAD(&objagg->obj_list);
391
392 objagg->ht_params.key_len = ops->obj_size;
393 objagg->ht_params.key_offset = offsetof(struct objagg_obj, obj);
394 objagg->ht_params.head_offset = offsetof(struct objagg_obj, ht_node);
395
396 err = rhashtable_init(&objagg->obj_ht, &objagg->ht_params);
397 if (err)
398 goto err_rhashtable_init;
399
400 trace_objagg_create(objagg);
401 return objagg;
402
403err_rhashtable_init:
404 kfree(objagg);
405 return ERR_PTR(err);
406}
407EXPORT_SYMBOL(objagg_create);
408
409/**
410 * objagg_destroy - destroys a new objagg instance
411 * @objagg: objagg instance
412 *
413 * Note: all locking must be provided by the caller.
414 */
415void objagg_destroy(struct objagg *objagg)
416{
417 trace_objagg_destroy(objagg);
418 WARN_ON(!list_empty(&objagg->obj_list));
419 rhashtable_destroy(&objagg->obj_ht);
420 kfree(objagg);
421}
422EXPORT_SYMBOL(objagg_destroy);
423
424static int objagg_stats_info_sort_cmp_func(const void *a, const void *b)
425{
426 const struct objagg_obj_stats_info *stats_info1 = a;
427 const struct objagg_obj_stats_info *stats_info2 = b;
428
429 if (stats_info1->is_root != stats_info2->is_root)
430 return stats_info2->is_root - stats_info1->is_root;
431 if (stats_info1->stats.delta_user_count !=
432 stats_info2->stats.delta_user_count)
433 return stats_info2->stats.delta_user_count -
434 stats_info1->stats.delta_user_count;
435 return stats_info2->stats.user_count - stats_info1->stats.user_count;
436}
437
438/**
439 * objagg_stats_get - obtains stats of the objagg instance
440 * @objagg: objagg instance
441 *
442 * Note: all locking must be provided by the caller.
443 *
444 * The returned structure contains statistics of all object
445 * currently in use, ordered by following rules:
446 * 1) Root objects are always on lower indexes than the rest.
447 * 2) Objects with higher delta user count are always on lower
448 * indexes.
449 * 3) In case more objects have the same delta user count,
450 * the objects are ordered by user count.
451 *
452 * Returns a pointer to stats instance in case of success,
453 * otherwise it returns pointer error using ERR_PTR macro.
454 */
455const struct objagg_stats *objagg_stats_get(struct objagg *objagg)
456{
457 struct objagg_stats *objagg_stats;
458 struct objagg_obj *objagg_obj;
459 size_t alloc_size;
460 int i;
461
462 alloc_size = sizeof(*objagg_stats) +
463 sizeof(objagg_stats->stats_info[0]) * objagg->obj_count;
464 objagg_stats = kzalloc(alloc_size, GFP_KERNEL);
465 if (!objagg_stats)
466 return ERR_PTR(-ENOMEM);
467
468 i = 0;
469 list_for_each_entry(objagg_obj, &objagg->obj_list, list) {
470 memcpy(&objagg_stats->stats_info[i].stats, &objagg_obj->stats,
471 sizeof(objagg_stats->stats_info[0].stats));
472 objagg_stats->stats_info[i].objagg_obj = objagg_obj;
473 objagg_stats->stats_info[i].is_root =
474 objagg_obj_is_root(objagg_obj);
475 i++;
476 }
477 objagg_stats->stats_info_count = i;
478
479 sort(objagg_stats->stats_info, objagg_stats->stats_info_count,
480 sizeof(struct objagg_obj_stats_info),
481 objagg_stats_info_sort_cmp_func, NULL);
482
483 return objagg_stats;
484}
485EXPORT_SYMBOL(objagg_stats_get);
486
487/**
488 * objagg_stats_puts - puts stats of the objagg instance
489 * @objagg_stats: objagg instance stats
490 *
491 * Note: all locking must be provided by the caller.
492 */
493void objagg_stats_put(const struct objagg_stats *objagg_stats)
494{
495 kfree(objagg_stats);
496}
497EXPORT_SYMBOL(objagg_stats_put);
498
499MODULE_LICENSE("Dual BSD/GPL");
500MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
501MODULE_DESCRIPTION("Object aggregation manager");
diff --git a/lib/parser.c b/lib/parser.c
index 3278958b472a..dd70e5e6c9e2 100644
--- a/lib/parser.c
+++ b/lib/parser.c
@@ -131,13 +131,10 @@ static int match_number(substring_t *s, int *result, int base)
131 char *buf; 131 char *buf;
132 int ret; 132 int ret;
133 long val; 133 long val;
134 size_t len = s->to - s->from;
135 134
136 buf = kmalloc(len + 1, GFP_KERNEL); 135 buf = match_strdup(s);
137 if (!buf) 136 if (!buf)
138 return -ENOMEM; 137 return -ENOMEM;
139 memcpy(buf, s->from, len);
140 buf[len] = '\0';
141 138
142 ret = 0; 139 ret = 0;
143 val = simple_strtol(buf, &endp, base); 140 val = simple_strtol(buf, &endp, base);
@@ -166,13 +163,10 @@ static int match_u64int(substring_t *s, u64 *result, int base)
166 char *buf; 163 char *buf;
167 int ret; 164 int ret;
168 u64 val; 165 u64 val;
169 size_t len = s->to - s->from;
170 166
171 buf = kmalloc(len + 1, GFP_KERNEL); 167 buf = match_strdup(s);
172 if (!buf) 168 if (!buf)
173 return -ENOMEM; 169 return -ENOMEM;
174 memcpy(buf, s->from, len);
175 buf[len] = '\0';
176 170
177 ret = kstrtoull(buf, base, &val); 171 ret = kstrtoull(buf, base, &val);
178 if (!ret) 172 if (!ret)
@@ -327,10 +321,6 @@ EXPORT_SYMBOL(match_strlcpy);
327 */ 321 */
328char *match_strdup(const substring_t *s) 322char *match_strdup(const substring_t *s)
329{ 323{
330 size_t sz = s->to - s->from + 1; 324 return kmemdup_nul(s->from, s->to - s->from, GFP_KERNEL);
331 char *p = kmalloc(sz, GFP_KERNEL);
332 if (p)
333 match_strlcpy(p, s, sz);
334 return p;
335} 325}
336EXPORT_SYMBOL(match_strdup); 326EXPORT_SYMBOL(match_strdup);
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 9f96fa7bc000..9877682e49c7 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -181,7 +181,7 @@ static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
181 ref->confirm_switch = confirm_switch ?: percpu_ref_noop_confirm_switch; 181 ref->confirm_switch = confirm_switch ?: percpu_ref_noop_confirm_switch;
182 182
183 percpu_ref_get(ref); /* put after confirmation */ 183 percpu_ref_get(ref); /* put after confirmation */
184 call_rcu_sched(&ref->rcu, percpu_ref_switch_to_atomic_rcu); 184 call_rcu(&ref->rcu, percpu_ref_switch_to_atomic_rcu);
185} 185}
186 186
187static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref) 187static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
@@ -356,11 +356,35 @@ EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
356 */ 356 */
357void percpu_ref_reinit(struct percpu_ref *ref) 357void percpu_ref_reinit(struct percpu_ref *ref)
358{ 358{
359 WARN_ON_ONCE(!percpu_ref_is_zero(ref));
360
361 percpu_ref_resurrect(ref);
362}
363EXPORT_SYMBOL_GPL(percpu_ref_reinit);
364
365/**
366 * percpu_ref_resurrect - modify a percpu refcount from dead to live
367 * @ref: perpcu_ref to resurrect
368 *
369 * Modify @ref so that it's in the same state as before percpu_ref_kill() was
370 * called. @ref must be dead but must not yet have exited.
371 *
372 * If @ref->release() frees @ref then the caller is responsible for
373 * guaranteeing that @ref->release() does not get called while this
374 * function is in progress.
375 *
376 * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
377 * this function is in progress.
378 */
379void percpu_ref_resurrect(struct percpu_ref *ref)
380{
381 unsigned long __percpu *percpu_count;
359 unsigned long flags; 382 unsigned long flags;
360 383
361 spin_lock_irqsave(&percpu_ref_switch_lock, flags); 384 spin_lock_irqsave(&percpu_ref_switch_lock, flags);
362 385
363 WARN_ON_ONCE(!percpu_ref_is_zero(ref)); 386 WARN_ON_ONCE(!(ref->percpu_count_ptr & __PERCPU_REF_DEAD));
387 WARN_ON_ONCE(__ref_is_percpu(ref, &percpu_count));
364 388
365 ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD; 389 ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
366 percpu_ref_get(ref); 390 percpu_ref_get(ref);
@@ -368,4 +392,4 @@ void percpu_ref_reinit(struct percpu_ref *ref)
368 392
369 spin_unlock_irqrestore(&percpu_ref_switch_lock, flags); 393 spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
370} 394}
371EXPORT_SYMBOL_GPL(percpu_ref_reinit); 395EXPORT_SYMBOL_GPL(percpu_ref_resurrect);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index bc03ecc4dfd2..14d51548bea6 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -38,15 +38,13 @@
38#include <linux/rcupdate.h> 38#include <linux/rcupdate.h>
39#include <linux/slab.h> 39#include <linux/slab.h>
40#include <linux/string.h> 40#include <linux/string.h>
41#include <linux/xarray.h>
41 42
42 43
43/* Number of nodes in fully populated tree of given height */
44static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly;
45
46/* 44/*
47 * Radix tree node cache. 45 * Radix tree node cache.
48 */ 46 */
49static struct kmem_cache *radix_tree_node_cachep; 47struct kmem_cache *radix_tree_node_cachep;
50 48
51/* 49/*
52 * The radix tree is variable-height, so an insert operation not only has 50 * The radix tree is variable-height, so an insert operation not only has
@@ -98,24 +96,7 @@ static inline void *node_to_entry(void *ptr)
98 return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE); 96 return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE);
99} 97}
100 98
101#define RADIX_TREE_RETRY node_to_entry(NULL) 99#define RADIX_TREE_RETRY XA_RETRY_ENTRY
102
103#ifdef CONFIG_RADIX_TREE_MULTIORDER
104/* Sibling slots point directly to another slot in the same node */
105static inline
106bool is_sibling_entry(const struct radix_tree_node *parent, void *node)
107{
108 void __rcu **ptr = node;
109 return (parent->slots <= ptr) &&
110 (ptr < parent->slots + RADIX_TREE_MAP_SIZE);
111}
112#else
113static inline
114bool is_sibling_entry(const struct radix_tree_node *parent, void *node)
115{
116 return false;
117}
118#endif
119 100
120static inline unsigned long 101static inline unsigned long
121get_slot_offset(const struct radix_tree_node *parent, void __rcu **slot) 102get_slot_offset(const struct radix_tree_node *parent, void __rcu **slot)
@@ -129,24 +110,13 @@ static unsigned int radix_tree_descend(const struct radix_tree_node *parent,
129 unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK; 110 unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK;
130 void __rcu **entry = rcu_dereference_raw(parent->slots[offset]); 111 void __rcu **entry = rcu_dereference_raw(parent->slots[offset]);
131 112
132#ifdef CONFIG_RADIX_TREE_MULTIORDER
133 if (radix_tree_is_internal_node(entry)) {
134 if (is_sibling_entry(parent, entry)) {
135 void __rcu **sibentry;
136 sibentry = (void __rcu **) entry_to_node(entry);
137 offset = get_slot_offset(parent, sibentry);
138 entry = rcu_dereference_raw(*sibentry);
139 }
140 }
141#endif
142
143 *nodep = (void *)entry; 113 *nodep = (void *)entry;
144 return offset; 114 return offset;
145} 115}
146 116
147static inline gfp_t root_gfp_mask(const struct radix_tree_root *root) 117static inline gfp_t root_gfp_mask(const struct radix_tree_root *root)
148{ 118{
149 return root->gfp_mask & (__GFP_BITS_MASK & ~GFP_ZONEMASK); 119 return root->xa_flags & (__GFP_BITS_MASK & ~GFP_ZONEMASK);
150} 120}
151 121
152static inline void tag_set(struct radix_tree_node *node, unsigned int tag, 122static inline void tag_set(struct radix_tree_node *node, unsigned int tag,
@@ -169,32 +139,32 @@ static inline int tag_get(const struct radix_tree_node *node, unsigned int tag,
169 139
170static inline void root_tag_set(struct radix_tree_root *root, unsigned tag) 140static inline void root_tag_set(struct radix_tree_root *root, unsigned tag)
171{ 141{
172 root->gfp_mask |= (__force gfp_t)(1 << (tag + ROOT_TAG_SHIFT)); 142 root->xa_flags |= (__force gfp_t)(1 << (tag + ROOT_TAG_SHIFT));
173} 143}
174 144
175static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag) 145static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag)
176{ 146{
177 root->gfp_mask &= (__force gfp_t)~(1 << (tag + ROOT_TAG_SHIFT)); 147 root->xa_flags &= (__force gfp_t)~(1 << (tag + ROOT_TAG_SHIFT));
178} 148}
179 149
180static inline void root_tag_clear_all(struct radix_tree_root *root) 150static inline void root_tag_clear_all(struct radix_tree_root *root)
181{ 151{
182 root->gfp_mask &= (1 << ROOT_TAG_SHIFT) - 1; 152 root->xa_flags &= (__force gfp_t)((1 << ROOT_TAG_SHIFT) - 1);
183} 153}
184 154
185static inline int root_tag_get(const struct radix_tree_root *root, unsigned tag) 155static inline int root_tag_get(const struct radix_tree_root *root, unsigned tag)
186{ 156{
187 return (__force int)root->gfp_mask & (1 << (tag + ROOT_TAG_SHIFT)); 157 return (__force int)root->xa_flags & (1 << (tag + ROOT_TAG_SHIFT));
188} 158}
189 159
190static inline unsigned root_tags_get(const struct radix_tree_root *root) 160static inline unsigned root_tags_get(const struct radix_tree_root *root)
191{ 161{
192 return (__force unsigned)root->gfp_mask >> ROOT_TAG_SHIFT; 162 return (__force unsigned)root->xa_flags >> ROOT_TAG_SHIFT;
193} 163}
194 164
195static inline bool is_idr(const struct radix_tree_root *root) 165static inline bool is_idr(const struct radix_tree_root *root)
196{ 166{
197 return !!(root->gfp_mask & ROOT_IS_IDR); 167 return !!(root->xa_flags & ROOT_IS_IDR);
198} 168}
199 169
200/* 170/*
@@ -254,7 +224,7 @@ radix_tree_find_next_bit(struct radix_tree_node *node, unsigned int tag,
254 224
255static unsigned int iter_offset(const struct radix_tree_iter *iter) 225static unsigned int iter_offset(const struct radix_tree_iter *iter)
256{ 226{
257 return (iter->index >> iter_shift(iter)) & RADIX_TREE_MAP_MASK; 227 return iter->index & RADIX_TREE_MAP_MASK;
258} 228}
259 229
260/* 230/*
@@ -277,99 +247,6 @@ static unsigned long next_index(unsigned long index,
277 return (index & ~node_maxindex(node)) + (offset << node->shift); 247 return (index & ~node_maxindex(node)) + (offset << node->shift);
278} 248}
279 249
280#ifndef __KERNEL__
281static void dump_node(struct radix_tree_node *node, unsigned long index)
282{
283 unsigned long i;
284
285 pr_debug("radix node: %p offset %d indices %lu-%lu parent %p tags %lx %lx %lx shift %d count %d exceptional %d\n",
286 node, node->offset, index, index | node_maxindex(node),
287 node->parent,
288 node->tags[0][0], node->tags[1][0], node->tags[2][0],
289 node->shift, node->count, node->exceptional);
290
291 for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
292 unsigned long first = index | (i << node->shift);
293 unsigned long last = first | ((1UL << node->shift) - 1);
294 void *entry = node->slots[i];
295 if (!entry)
296 continue;
297 if (entry == RADIX_TREE_RETRY) {
298 pr_debug("radix retry offset %ld indices %lu-%lu parent %p\n",
299 i, first, last, node);
300 } else if (!radix_tree_is_internal_node(entry)) {
301 pr_debug("radix entry %p offset %ld indices %lu-%lu parent %p\n",
302 entry, i, first, last, node);
303 } else if (is_sibling_entry(node, entry)) {
304 pr_debug("radix sblng %p offset %ld indices %lu-%lu parent %p val %p\n",
305 entry, i, first, last, node,
306 *(void **)entry_to_node(entry));
307 } else {
308 dump_node(entry_to_node(entry), first);
309 }
310 }
311}
312
313/* For debug */
314static void radix_tree_dump(struct radix_tree_root *root)
315{
316 pr_debug("radix root: %p rnode %p tags %x\n",
317 root, root->rnode,
318 root->gfp_mask >> ROOT_TAG_SHIFT);
319 if (!radix_tree_is_internal_node(root->rnode))
320 return;
321 dump_node(entry_to_node(root->rnode), 0);
322}
323
324static void dump_ida_node(void *entry, unsigned long index)
325{
326 unsigned long i;
327
328 if (!entry)
329 return;
330
331 if (radix_tree_is_internal_node(entry)) {
332 struct radix_tree_node *node = entry_to_node(entry);
333
334 pr_debug("ida node: %p offset %d indices %lu-%lu parent %p free %lx shift %d count %d\n",
335 node, node->offset, index * IDA_BITMAP_BITS,
336 ((index | node_maxindex(node)) + 1) *
337 IDA_BITMAP_BITS - 1,
338 node->parent, node->tags[0][0], node->shift,
339 node->count);
340 for (i = 0; i < RADIX_TREE_MAP_SIZE; i++)
341 dump_ida_node(node->slots[i],
342 index | (i << node->shift));
343 } else if (radix_tree_exceptional_entry(entry)) {
344 pr_debug("ida excp: %p offset %d indices %lu-%lu data %lx\n",
345 entry, (int)(index & RADIX_TREE_MAP_MASK),
346 index * IDA_BITMAP_BITS,
347 index * IDA_BITMAP_BITS + BITS_PER_LONG -
348 RADIX_TREE_EXCEPTIONAL_SHIFT,
349 (unsigned long)entry >>
350 RADIX_TREE_EXCEPTIONAL_SHIFT);
351 } else {
352 struct ida_bitmap *bitmap = entry;
353
354 pr_debug("ida btmp: %p offset %d indices %lu-%lu data", bitmap,
355 (int)(index & RADIX_TREE_MAP_MASK),
356 index * IDA_BITMAP_BITS,
357 (index + 1) * IDA_BITMAP_BITS - 1);
358 for (i = 0; i < IDA_BITMAP_LONGS; i++)
359 pr_cont(" %lx", bitmap->bitmap[i]);
360 pr_cont("\n");
361 }
362}
363
364static void ida_dump(struct ida *ida)
365{
366 struct radix_tree_root *root = &ida->ida_rt;
367 pr_debug("ida: %p node %p free %d\n", ida, root->rnode,
368 root->gfp_mask >> ROOT_TAG_SHIFT);
369 dump_ida_node(root->rnode, 0);
370}
371#endif
372
373/* 250/*
374 * This assumes that the caller has performed appropriate preallocation, and 251 * This assumes that the caller has performed appropriate preallocation, and
375 * that the caller has pinned this thread of control to the current CPU. 252 * that the caller has pinned this thread of control to the current CPU.
@@ -378,7 +255,7 @@ static struct radix_tree_node *
378radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent, 255radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent,
379 struct radix_tree_root *root, 256 struct radix_tree_root *root,
380 unsigned int shift, unsigned int offset, 257 unsigned int shift, unsigned int offset,
381 unsigned int count, unsigned int exceptional) 258 unsigned int count, unsigned int nr_values)
382{ 259{
383 struct radix_tree_node *ret = NULL; 260 struct radix_tree_node *ret = NULL;
384 261
@@ -425,14 +302,14 @@ out:
425 ret->shift = shift; 302 ret->shift = shift;
426 ret->offset = offset; 303 ret->offset = offset;
427 ret->count = count; 304 ret->count = count;
428 ret->exceptional = exceptional; 305 ret->nr_values = nr_values;
429 ret->parent = parent; 306 ret->parent = parent;
430 ret->root = root; 307 ret->array = root;
431 } 308 }
432 return ret; 309 return ret;
433} 310}
434 311
435static void radix_tree_node_rcu_free(struct rcu_head *head) 312void radix_tree_node_rcu_free(struct rcu_head *head)
436{ 313{
437 struct radix_tree_node *node = 314 struct radix_tree_node *node =
438 container_of(head, struct radix_tree_node, rcu_head); 315 container_of(head, struct radix_tree_node, rcu_head);
@@ -530,77 +407,10 @@ int radix_tree_maybe_preload(gfp_t gfp_mask)
530} 407}
531EXPORT_SYMBOL(radix_tree_maybe_preload); 408EXPORT_SYMBOL(radix_tree_maybe_preload);
532 409
533#ifdef CONFIG_RADIX_TREE_MULTIORDER
534/*
535 * Preload with enough objects to ensure that we can split a single entry
536 * of order @old_order into many entries of size @new_order
537 */
538int radix_tree_split_preload(unsigned int old_order, unsigned int new_order,
539 gfp_t gfp_mask)
540{
541 unsigned top = 1 << (old_order % RADIX_TREE_MAP_SHIFT);
542 unsigned layers = (old_order / RADIX_TREE_MAP_SHIFT) -
543 (new_order / RADIX_TREE_MAP_SHIFT);
544 unsigned nr = 0;
545
546 WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask));
547 BUG_ON(new_order >= old_order);
548
549 while (layers--)
550 nr = nr * RADIX_TREE_MAP_SIZE + 1;
551 return __radix_tree_preload(gfp_mask, top * nr);
552}
553#endif
554
555/*
556 * The same as function above, but preload number of nodes required to insert
557 * (1 << order) continuous naturally-aligned elements.
558 */
559int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order)
560{
561 unsigned long nr_subtrees;
562 int nr_nodes, subtree_height;
563
564 /* Preloading doesn't help anything with this gfp mask, skip it */
565 if (!gfpflags_allow_blocking(gfp_mask)) {
566 preempt_disable();
567 return 0;
568 }
569
570 /*
571 * Calculate number and height of fully populated subtrees it takes to
572 * store (1 << order) elements.
573 */
574 nr_subtrees = 1 << order;
575 for (subtree_height = 0; nr_subtrees > RADIX_TREE_MAP_SIZE;
576 subtree_height++)
577 nr_subtrees >>= RADIX_TREE_MAP_SHIFT;
578
579 /*
580 * The worst case is zero height tree with a single item at index 0 and
581 * then inserting items starting at ULONG_MAX - (1 << order).
582 *
583 * This requires RADIX_TREE_MAX_PATH nodes to build branch from root to
584 * 0-index item.
585 */
586 nr_nodes = RADIX_TREE_MAX_PATH;
587
588 /* Plus branch to fully populated subtrees. */
589 nr_nodes += RADIX_TREE_MAX_PATH - subtree_height;
590
591 /* Root node is shared. */
592 nr_nodes--;
593
594 /* Plus nodes required to build subtrees. */
595 nr_nodes += nr_subtrees * height_to_maxnodes[subtree_height];
596
597 return __radix_tree_preload(gfp_mask, nr_nodes);
598}
599
600static unsigned radix_tree_load_root(const struct radix_tree_root *root, 410static unsigned radix_tree_load_root(const struct radix_tree_root *root,
601 struct radix_tree_node **nodep, unsigned long *maxindex) 411 struct radix_tree_node **nodep, unsigned long *maxindex)
602{ 412{
603 struct radix_tree_node *node = rcu_dereference_raw(root->rnode); 413 struct radix_tree_node *node = rcu_dereference_raw(root->xa_head);
604 414
605 *nodep = node; 415 *nodep = node;
606 416
@@ -629,7 +439,7 @@ static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp,
629 while (index > shift_maxindex(maxshift)) 439 while (index > shift_maxindex(maxshift))
630 maxshift += RADIX_TREE_MAP_SHIFT; 440 maxshift += RADIX_TREE_MAP_SHIFT;
631 441
632 entry = rcu_dereference_raw(root->rnode); 442 entry = rcu_dereference_raw(root->xa_head);
633 if (!entry && (!is_idr(root) || root_tag_get(root, IDR_FREE))) 443 if (!entry && (!is_idr(root) || root_tag_get(root, IDR_FREE)))
634 goto out; 444 goto out;
635 445
@@ -656,9 +466,9 @@ static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp,
656 BUG_ON(shift > BITS_PER_LONG); 466 BUG_ON(shift > BITS_PER_LONG);
657 if (radix_tree_is_internal_node(entry)) { 467 if (radix_tree_is_internal_node(entry)) {
658 entry_to_node(entry)->parent = node; 468 entry_to_node(entry)->parent = node;
659 } else if (radix_tree_exceptional_entry(entry)) { 469 } else if (xa_is_value(entry)) {
660 /* Moving an exceptional root->rnode to a node */ 470 /* Moving a value entry root->xa_head to a node */
661 node->exceptional = 1; 471 node->nr_values = 1;
662 } 472 }
663 /* 473 /*
664 * entry was already in the radix tree, so we do not need 474 * entry was already in the radix tree, so we do not need
@@ -666,7 +476,7 @@ static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp,
666 */ 476 */
667 node->slots[0] = (void __rcu *)entry; 477 node->slots[0] = (void __rcu *)entry;
668 entry = node_to_entry(node); 478 entry = node_to_entry(node);
669 rcu_assign_pointer(root->rnode, entry); 479 rcu_assign_pointer(root->xa_head, entry);
670 shift += RADIX_TREE_MAP_SHIFT; 480 shift += RADIX_TREE_MAP_SHIFT;
671 } while (shift <= maxshift); 481 } while (shift <= maxshift);
672out: 482out:
@@ -677,13 +487,12 @@ out:
677 * radix_tree_shrink - shrink radix tree to minimum height 487 * radix_tree_shrink - shrink radix tree to minimum height
678 * @root radix tree root 488 * @root radix tree root
679 */ 489 */
680static inline bool radix_tree_shrink(struct radix_tree_root *root, 490static inline bool radix_tree_shrink(struct radix_tree_root *root)
681 radix_tree_update_node_t update_node)
682{ 491{
683 bool shrunk = false; 492 bool shrunk = false;
684 493
685 for (;;) { 494 for (;;) {
686 struct radix_tree_node *node = rcu_dereference_raw(root->rnode); 495 struct radix_tree_node *node = rcu_dereference_raw(root->xa_head);
687 struct radix_tree_node *child; 496 struct radix_tree_node *child;
688 497
689 if (!radix_tree_is_internal_node(node)) 498 if (!radix_tree_is_internal_node(node))
@@ -692,15 +501,20 @@ static inline bool radix_tree_shrink(struct radix_tree_root *root,
692 501
693 /* 502 /*
694 * The candidate node has more than one child, or its child 503 * The candidate node has more than one child, or its child
695 * is not at the leftmost slot, or the child is a multiorder 504 * is not at the leftmost slot, we cannot shrink.
696 * entry, we cannot shrink.
697 */ 505 */
698 if (node->count != 1) 506 if (node->count != 1)
699 break; 507 break;
700 child = rcu_dereference_raw(node->slots[0]); 508 child = rcu_dereference_raw(node->slots[0]);
701 if (!child) 509 if (!child)
702 break; 510 break;
703 if (!radix_tree_is_internal_node(child) && node->shift) 511
512 /*
513 * For an IDR, we must not shrink entry 0 into the root in
514 * case somebody calls idr_replace() with a pointer that
515 * appears to be an internal entry
516 */
517 if (!node->shift && is_idr(root))
704 break; 518 break;
705 519
706 if (radix_tree_is_internal_node(child)) 520 if (radix_tree_is_internal_node(child))
@@ -711,9 +525,9 @@ static inline bool radix_tree_shrink(struct radix_tree_root *root,
711 * moving the node from one part of the tree to another: if it 525 * moving the node from one part of the tree to another: if it
712 * was safe to dereference the old pointer to it 526 * was safe to dereference the old pointer to it
713 * (node->slots[0]), it will be safe to dereference the new 527 * (node->slots[0]), it will be safe to dereference the new
714 * one (root->rnode) as far as dependent read barriers go. 528 * one (root->xa_head) as far as dependent read barriers go.
715 */ 529 */
716 root->rnode = (void __rcu *)child; 530 root->xa_head = (void __rcu *)child;
717 if (is_idr(root) && !tag_get(node, IDR_FREE, 0)) 531 if (is_idr(root) && !tag_get(node, IDR_FREE, 0))
718 root_tag_clear(root, IDR_FREE); 532 root_tag_clear(root, IDR_FREE);
719 533
@@ -738,8 +552,6 @@ static inline bool radix_tree_shrink(struct radix_tree_root *root,
738 node->count = 0; 552 node->count = 0;
739 if (!radix_tree_is_internal_node(child)) { 553 if (!radix_tree_is_internal_node(child)) {
740 node->slots[0] = (void __rcu *)RADIX_TREE_RETRY; 554 node->slots[0] = (void __rcu *)RADIX_TREE_RETRY;
741 if (update_node)
742 update_node(node);
743 } 555 }
744 556
745 WARN_ON_ONCE(!list_empty(&node->private_list)); 557 WARN_ON_ONCE(!list_empty(&node->private_list));
@@ -751,8 +563,7 @@ static inline bool radix_tree_shrink(struct radix_tree_root *root,
751} 563}
752 564
753static bool delete_node(struct radix_tree_root *root, 565static bool delete_node(struct radix_tree_root *root,
754 struct radix_tree_node *node, 566 struct radix_tree_node *node)
755 radix_tree_update_node_t update_node)
756{ 567{
757 bool deleted = false; 568 bool deleted = false;
758 569
@@ -761,9 +572,8 @@ static bool delete_node(struct radix_tree_root *root,
761 572
762 if (node->count) { 573 if (node->count) {
763 if (node_to_entry(node) == 574 if (node_to_entry(node) ==
764 rcu_dereference_raw(root->rnode)) 575 rcu_dereference_raw(root->xa_head))
765 deleted |= radix_tree_shrink(root, 576 deleted |= radix_tree_shrink(root);
766 update_node);
767 return deleted; 577 return deleted;
768 } 578 }
769 579
@@ -778,7 +588,7 @@ static bool delete_node(struct radix_tree_root *root,
778 */ 588 */
779 if (!is_idr(root)) 589 if (!is_idr(root))
780 root_tag_clear_all(root); 590 root_tag_clear_all(root);
781 root->rnode = NULL; 591 root->xa_head = NULL;
782 } 592 }
783 593
784 WARN_ON_ONCE(!list_empty(&node->private_list)); 594 WARN_ON_ONCE(!list_empty(&node->private_list));
@@ -795,7 +605,6 @@ static bool delete_node(struct radix_tree_root *root,
795 * __radix_tree_create - create a slot in a radix tree 605 * __radix_tree_create - create a slot in a radix tree
796 * @root: radix tree root 606 * @root: radix tree root
797 * @index: index key 607 * @index: index key
798 * @order: index occupies 2^order aligned slots
799 * @nodep: returns node 608 * @nodep: returns node
800 * @slotp: returns slot 609 * @slotp: returns slot
801 * 610 *
@@ -803,36 +612,34 @@ static bool delete_node(struct radix_tree_root *root,
803 * at position @index in the radix tree @root. 612 * at position @index in the radix tree @root.
804 * 613 *
805 * Until there is more than one item in the tree, no nodes are 614 * Until there is more than one item in the tree, no nodes are
806 * allocated and @root->rnode is used as a direct slot instead of 615 * allocated and @root->xa_head is used as a direct slot instead of
807 * pointing to a node, in which case *@nodep will be NULL. 616 * pointing to a node, in which case *@nodep will be NULL.
808 * 617 *
809 * Returns -ENOMEM, or 0 for success. 618 * Returns -ENOMEM, or 0 for success.
810 */ 619 */
811int __radix_tree_create(struct radix_tree_root *root, unsigned long index, 620static int __radix_tree_create(struct radix_tree_root *root,
812 unsigned order, struct radix_tree_node **nodep, 621 unsigned long index, struct radix_tree_node **nodep,
813 void __rcu ***slotp) 622 void __rcu ***slotp)
814{ 623{
815 struct radix_tree_node *node = NULL, *child; 624 struct radix_tree_node *node = NULL, *child;
816 void __rcu **slot = (void __rcu **)&root->rnode; 625 void __rcu **slot = (void __rcu **)&root->xa_head;
817 unsigned long maxindex; 626 unsigned long maxindex;
818 unsigned int shift, offset = 0; 627 unsigned int shift, offset = 0;
819 unsigned long max = index | ((1UL << order) - 1); 628 unsigned long max = index;
820 gfp_t gfp = root_gfp_mask(root); 629 gfp_t gfp = root_gfp_mask(root);
821 630
822 shift = radix_tree_load_root(root, &child, &maxindex); 631 shift = radix_tree_load_root(root, &child, &maxindex);
823 632
824 /* Make sure the tree is high enough. */ 633 /* Make sure the tree is high enough. */
825 if (order > 0 && max == ((1UL << order) - 1))
826 max++;
827 if (max > maxindex) { 634 if (max > maxindex) {
828 int error = radix_tree_extend(root, gfp, max, shift); 635 int error = radix_tree_extend(root, gfp, max, shift);
829 if (error < 0) 636 if (error < 0)
830 return error; 637 return error;
831 shift = error; 638 shift = error;
832 child = rcu_dereference_raw(root->rnode); 639 child = rcu_dereference_raw(root->xa_head);
833 } 640 }
834 641
835 while (shift > order) { 642 while (shift > 0) {
836 shift -= RADIX_TREE_MAP_SHIFT; 643 shift -= RADIX_TREE_MAP_SHIFT;
837 if (child == NULL) { 644 if (child == NULL) {
838 /* Have to add a child node. */ 645 /* Have to add a child node. */
@@ -875,8 +682,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
875 682
876 for (;;) { 683 for (;;) {
877 void *entry = rcu_dereference_raw(child->slots[offset]); 684 void *entry = rcu_dereference_raw(child->slots[offset]);
878 if (radix_tree_is_internal_node(entry) && 685 if (xa_is_node(entry) && child->shift) {
879 !is_sibling_entry(child, entry)) {
880 child = entry_to_node(entry); 686 child = entry_to_node(entry);
881 offset = 0; 687 offset = 0;
882 continue; 688 continue;
@@ -894,96 +700,30 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
894 } 700 }
895} 701}
896 702
897#ifdef CONFIG_RADIX_TREE_MULTIORDER
898static inline int insert_entries(struct radix_tree_node *node, 703static inline int insert_entries(struct radix_tree_node *node,
899 void __rcu **slot, void *item, unsigned order, bool replace) 704 void __rcu **slot, void *item, bool replace)
900{
901 struct radix_tree_node *child;
902 unsigned i, n, tag, offset, tags = 0;
903
904 if (node) {
905 if (order > node->shift)
906 n = 1 << (order - node->shift);
907 else
908 n = 1;
909 offset = get_slot_offset(node, slot);
910 } else {
911 n = 1;
912 offset = 0;
913 }
914
915 if (n > 1) {
916 offset = offset & ~(n - 1);
917 slot = &node->slots[offset];
918 }
919 child = node_to_entry(slot);
920
921 for (i = 0; i < n; i++) {
922 if (slot[i]) {
923 if (replace) {
924 node->count--;
925 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
926 if (tag_get(node, tag, offset + i))
927 tags |= 1 << tag;
928 } else
929 return -EEXIST;
930 }
931 }
932
933 for (i = 0; i < n; i++) {
934 struct radix_tree_node *old = rcu_dereference_raw(slot[i]);
935 if (i) {
936 rcu_assign_pointer(slot[i], child);
937 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
938 if (tags & (1 << tag))
939 tag_clear(node, tag, offset + i);
940 } else {
941 rcu_assign_pointer(slot[i], item);
942 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
943 if (tags & (1 << tag))
944 tag_set(node, tag, offset);
945 }
946 if (radix_tree_is_internal_node(old) &&
947 !is_sibling_entry(node, old) &&
948 (old != RADIX_TREE_RETRY))
949 radix_tree_free_nodes(old);
950 if (radix_tree_exceptional_entry(old))
951 node->exceptional--;
952 }
953 if (node) {
954 node->count += n;
955 if (radix_tree_exceptional_entry(item))
956 node->exceptional += n;
957 }
958 return n;
959}
960#else
961static inline int insert_entries(struct radix_tree_node *node,
962 void __rcu **slot, void *item, unsigned order, bool replace)
963{ 705{
964 if (*slot) 706 if (*slot)
965 return -EEXIST; 707 return -EEXIST;
966 rcu_assign_pointer(*slot, item); 708 rcu_assign_pointer(*slot, item);
967 if (node) { 709 if (node) {
968 node->count++; 710 node->count++;
969 if (radix_tree_exceptional_entry(item)) 711 if (xa_is_value(item))
970 node->exceptional++; 712 node->nr_values++;
971 } 713 }
972 return 1; 714 return 1;
973} 715}
974#endif
975 716
976/** 717/**
977 * __radix_tree_insert - insert into a radix tree 718 * __radix_tree_insert - insert into a radix tree
978 * @root: radix tree root 719 * @root: radix tree root
979 * @index: index key 720 * @index: index key
980 * @order: key covers the 2^order indices around index
981 * @item: item to insert 721 * @item: item to insert
982 * 722 *
983 * Insert an item into the radix tree at position @index. 723 * Insert an item into the radix tree at position @index.
984 */ 724 */
985int __radix_tree_insert(struct radix_tree_root *root, unsigned long index, 725int radix_tree_insert(struct radix_tree_root *root, unsigned long index,
986 unsigned order, void *item) 726 void *item)
987{ 727{
988 struct radix_tree_node *node; 728 struct radix_tree_node *node;
989 void __rcu **slot; 729 void __rcu **slot;
@@ -991,11 +731,11 @@ int __radix_tree_insert(struct radix_tree_root *root, unsigned long index,
991 731
992 BUG_ON(radix_tree_is_internal_node(item)); 732 BUG_ON(radix_tree_is_internal_node(item));
993 733
994 error = __radix_tree_create(root, index, order, &node, &slot); 734 error = __radix_tree_create(root, index, &node, &slot);
995 if (error) 735 if (error)
996 return error; 736 return error;
997 737
998 error = insert_entries(node, slot, item, order, false); 738 error = insert_entries(node, slot, item, false);
999 if (error < 0) 739 if (error < 0)
1000 return error; 740 return error;
1001 741
@@ -1010,7 +750,7 @@ int __radix_tree_insert(struct radix_tree_root *root, unsigned long index,
1010 750
1011 return 0; 751 return 0;
1012} 752}
1013EXPORT_SYMBOL(__radix_tree_insert); 753EXPORT_SYMBOL(radix_tree_insert);
1014 754
1015/** 755/**
1016 * __radix_tree_lookup - lookup an item in a radix tree 756 * __radix_tree_lookup - lookup an item in a radix tree
@@ -1023,7 +763,7 @@ EXPORT_SYMBOL(__radix_tree_insert);
1023 * tree @root. 763 * tree @root.
1024 * 764 *
1025 * Until there is more than one item in the tree, no nodes are 765 * Until there is more than one item in the tree, no nodes are
1026 * allocated and @root->rnode is used as a direct slot instead of 766 * allocated and @root->xa_head is used as a direct slot instead of
1027 * pointing to a node, in which case *@nodep will be NULL. 767 * pointing to a node, in which case *@nodep will be NULL.
1028 */ 768 */
1029void *__radix_tree_lookup(const struct radix_tree_root *root, 769void *__radix_tree_lookup(const struct radix_tree_root *root,
@@ -1036,7 +776,7 @@ void *__radix_tree_lookup(const struct radix_tree_root *root,
1036 776
1037 restart: 777 restart:
1038 parent = NULL; 778 parent = NULL;
1039 slot = (void __rcu **)&root->rnode; 779 slot = (void __rcu **)&root->xa_head;
1040 radix_tree_load_root(root, &node, &maxindex); 780 radix_tree_load_root(root, &node, &maxindex);
1041 if (index > maxindex) 781 if (index > maxindex)
1042 return NULL; 782 return NULL;
@@ -1044,11 +784,13 @@ void *__radix_tree_lookup(const struct radix_tree_root *root,
1044 while (radix_tree_is_internal_node(node)) { 784 while (radix_tree_is_internal_node(node)) {
1045 unsigned offset; 785 unsigned offset;
1046 786
1047 if (node == RADIX_TREE_RETRY)
1048 goto restart;
1049 parent = entry_to_node(node); 787 parent = entry_to_node(node);
1050 offset = radix_tree_descend(parent, &node, index); 788 offset = radix_tree_descend(parent, &node, index);
1051 slot = parent->slots + offset; 789 slot = parent->slots + offset;
790 if (node == RADIX_TREE_RETRY)
791 goto restart;
792 if (parent->shift == 0)
793 break;
1052 } 794 }
1053 795
1054 if (nodep) 796 if (nodep)
@@ -1100,36 +842,12 @@ void *radix_tree_lookup(const struct radix_tree_root *root, unsigned long index)
1100} 842}
1101EXPORT_SYMBOL(radix_tree_lookup); 843EXPORT_SYMBOL(radix_tree_lookup);
1102 844
1103static inline void replace_sibling_entries(struct radix_tree_node *node,
1104 void __rcu **slot, int count, int exceptional)
1105{
1106#ifdef CONFIG_RADIX_TREE_MULTIORDER
1107 void *ptr = node_to_entry(slot);
1108 unsigned offset = get_slot_offset(node, slot) + 1;
1109
1110 while (offset < RADIX_TREE_MAP_SIZE) {
1111 if (rcu_dereference_raw(node->slots[offset]) != ptr)
1112 break;
1113 if (count < 0) {
1114 node->slots[offset] = NULL;
1115 node->count--;
1116 }
1117 node->exceptional += exceptional;
1118 offset++;
1119 }
1120#endif
1121}
1122
1123static void replace_slot(void __rcu **slot, void *item, 845static void replace_slot(void __rcu **slot, void *item,
1124 struct radix_tree_node *node, int count, int exceptional) 846 struct radix_tree_node *node, int count, int values)
1125{ 847{
1126 if (WARN_ON_ONCE(radix_tree_is_internal_node(item))) 848 if (node && (count || values)) {
1127 return;
1128
1129 if (node && (count || exceptional)) {
1130 node->count += count; 849 node->count += count;
1131 node->exceptional += exceptional; 850 node->nr_values += values;
1132 replace_sibling_entries(node, slot, count, exceptional);
1133 } 851 }
1134 852
1135 rcu_assign_pointer(*slot, item); 853 rcu_assign_pointer(*slot, item);
@@ -1172,37 +890,31 @@ static int calculate_count(struct radix_tree_root *root,
1172 * @node: pointer to tree node 890 * @node: pointer to tree node
1173 * @slot: pointer to slot in @node 891 * @slot: pointer to slot in @node
1174 * @item: new item to store in the slot. 892 * @item: new item to store in the slot.
1175 * @update_node: callback for changing leaf nodes
1176 * 893 *
1177 * For use with __radix_tree_lookup(). Caller must hold tree write locked 894 * For use with __radix_tree_lookup(). Caller must hold tree write locked
1178 * across slot lookup and replacement. 895 * across slot lookup and replacement.
1179 */ 896 */
1180void __radix_tree_replace(struct radix_tree_root *root, 897void __radix_tree_replace(struct radix_tree_root *root,
1181 struct radix_tree_node *node, 898 struct radix_tree_node *node,
1182 void __rcu **slot, void *item, 899 void __rcu **slot, void *item)
1183 radix_tree_update_node_t update_node)
1184{ 900{
1185 void *old = rcu_dereference_raw(*slot); 901 void *old = rcu_dereference_raw(*slot);
1186 int exceptional = !!radix_tree_exceptional_entry(item) - 902 int values = !!xa_is_value(item) - !!xa_is_value(old);
1187 !!radix_tree_exceptional_entry(old);
1188 int count = calculate_count(root, node, slot, item, old); 903 int count = calculate_count(root, node, slot, item, old);
1189 904
1190 /* 905 /*
1191 * This function supports replacing exceptional entries and 906 * This function supports replacing value entries and
1192 * deleting entries, but that needs accounting against the 907 * deleting entries, but that needs accounting against the
1193 * node unless the slot is root->rnode. 908 * node unless the slot is root->xa_head.
1194 */ 909 */
1195 WARN_ON_ONCE(!node && (slot != (void __rcu **)&root->rnode) && 910 WARN_ON_ONCE(!node && (slot != (void __rcu **)&root->xa_head) &&
1196 (count || exceptional)); 911 (count || values));
1197 replace_slot(slot, item, node, count, exceptional); 912 replace_slot(slot, item, node, count, values);
1198 913
1199 if (!node) 914 if (!node)
1200 return; 915 return;
1201 916
1202 if (update_node) 917 delete_node(root, node);
1203 update_node(node);
1204
1205 delete_node(root, node, update_node);
1206} 918}
1207 919
1208/** 920/**
@@ -1211,12 +923,12 @@ void __radix_tree_replace(struct radix_tree_root *root,
1211 * @slot: pointer to slot 923 * @slot: pointer to slot
1212 * @item: new item to store in the slot. 924 * @item: new item to store in the slot.
1213 * 925 *
1214 * For use with radix_tree_lookup_slot(), radix_tree_gang_lookup_slot(), 926 * For use with radix_tree_lookup_slot() and
1215 * radix_tree_gang_lookup_tag_slot(). Caller must hold tree write locked 927 * radix_tree_gang_lookup_tag_slot(). Caller must hold tree write locked
1216 * across slot lookup and replacement. 928 * across slot lookup and replacement.
1217 * 929 *
1218 * NOTE: This cannot be used to switch between non-entries (empty slots), 930 * NOTE: This cannot be used to switch between non-entries (empty slots),
1219 * regular entries, and exceptional entries, as that requires accounting 931 * regular entries, and value entries, as that requires accounting
1220 * inside the radix tree node. When switching from one type of entry or 932 * inside the radix tree node. When switching from one type of entry or
1221 * deleting, use __radix_tree_lookup() and __radix_tree_replace() or 933 * deleting, use __radix_tree_lookup() and __radix_tree_replace() or
1222 * radix_tree_iter_replace(). 934 * radix_tree_iter_replace().
@@ -1224,7 +936,7 @@ void __radix_tree_replace(struct radix_tree_root *root,
1224void radix_tree_replace_slot(struct radix_tree_root *root, 936void radix_tree_replace_slot(struct radix_tree_root *root,
1225 void __rcu **slot, void *item) 937 void __rcu **slot, void *item)
1226{ 938{
1227 __radix_tree_replace(root, NULL, slot, item, NULL); 939 __radix_tree_replace(root, NULL, slot, item);
1228} 940}
1229EXPORT_SYMBOL(radix_tree_replace_slot); 941EXPORT_SYMBOL(radix_tree_replace_slot);
1230 942
@@ -1234,162 +946,16 @@ EXPORT_SYMBOL(radix_tree_replace_slot);
1234 * @slot: pointer to slot 946 * @slot: pointer to slot
1235 * @item: new item to store in the slot. 947 * @item: new item to store in the slot.
1236 * 948 *
1237 * For use with radix_tree_split() and radix_tree_for_each_slot(). 949 * For use with radix_tree_for_each_slot().
1238 * Caller must hold tree write locked across split and replacement. 950 * Caller must hold tree write locked.
1239 */ 951 */
1240void radix_tree_iter_replace(struct radix_tree_root *root, 952void radix_tree_iter_replace(struct radix_tree_root *root,
1241 const struct radix_tree_iter *iter, 953 const struct radix_tree_iter *iter,
1242 void __rcu **slot, void *item) 954 void __rcu **slot, void *item)
1243{ 955{
1244 __radix_tree_replace(root, iter->node, slot, item, NULL); 956 __radix_tree_replace(root, iter->node, slot, item);
1245}
1246
1247#ifdef CONFIG_RADIX_TREE_MULTIORDER
1248/**
1249 * radix_tree_join - replace multiple entries with one multiorder entry
1250 * @root: radix tree root
1251 * @index: an index inside the new entry
1252 * @order: order of the new entry
1253 * @item: new entry
1254 *
1255 * Call this function to replace several entries with one larger entry.
1256 * The existing entries are presumed to not need freeing as a result of
1257 * this call.
1258 *
1259 * The replacement entry will have all the tags set on it that were set
1260 * on any of the entries it is replacing.
1261 */
1262int radix_tree_join(struct radix_tree_root *root, unsigned long index,
1263 unsigned order, void *item)
1264{
1265 struct radix_tree_node *node;
1266 void __rcu **slot;
1267 int error;
1268
1269 BUG_ON(radix_tree_is_internal_node(item));
1270
1271 error = __radix_tree_create(root, index, order, &node, &slot);
1272 if (!error)
1273 error = insert_entries(node, slot, item, order, true);
1274 if (error > 0)
1275 error = 0;
1276
1277 return error;
1278} 957}
1279 958
1280/**
1281 * radix_tree_split - Split an entry into smaller entries
1282 * @root: radix tree root
1283 * @index: An index within the large entry
1284 * @order: Order of new entries
1285 *
1286 * Call this function as the first step in replacing a multiorder entry
1287 * with several entries of lower order. After this function returns,
1288 * loop over the relevant portion of the tree using radix_tree_for_each_slot()
1289 * and call radix_tree_iter_replace() to set up each new entry.
1290 *
1291 * The tags from this entry are replicated to all the new entries.
1292 *
1293 * The radix tree should be locked against modification during the entire
1294 * replacement operation. Lock-free lookups will see RADIX_TREE_RETRY which
1295 * should prompt RCU walkers to restart the lookup from the root.
1296 */
1297int radix_tree_split(struct radix_tree_root *root, unsigned long index,
1298 unsigned order)
1299{
1300 struct radix_tree_node *parent, *node, *child;
1301 void __rcu **slot;
1302 unsigned int offset, end;
1303 unsigned n, tag, tags = 0;
1304 gfp_t gfp = root_gfp_mask(root);
1305
1306 if (!__radix_tree_lookup(root, index, &parent, &slot))
1307 return -ENOENT;
1308 if (!parent)
1309 return -ENOENT;
1310
1311 offset = get_slot_offset(parent, slot);
1312
1313 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
1314 if (tag_get(parent, tag, offset))
1315 tags |= 1 << tag;
1316
1317 for (end = offset + 1; end < RADIX_TREE_MAP_SIZE; end++) {
1318 if (!is_sibling_entry(parent,
1319 rcu_dereference_raw(parent->slots[end])))
1320 break;
1321 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
1322 if (tags & (1 << tag))
1323 tag_set(parent, tag, end);
1324 /* rcu_assign_pointer ensures tags are set before RETRY */
1325 rcu_assign_pointer(parent->slots[end], RADIX_TREE_RETRY);
1326 }
1327 rcu_assign_pointer(parent->slots[offset], RADIX_TREE_RETRY);
1328 parent->exceptional -= (end - offset);
1329
1330 if (order == parent->shift)
1331 return 0;
1332 if (order > parent->shift) {
1333 while (offset < end)
1334 offset += insert_entries(parent, &parent->slots[offset],
1335 RADIX_TREE_RETRY, order, true);
1336 return 0;
1337 }
1338
1339 node = parent;
1340
1341 for (;;) {
1342 if (node->shift > order) {
1343 child = radix_tree_node_alloc(gfp, node, root,
1344 node->shift - RADIX_TREE_MAP_SHIFT,
1345 offset, 0, 0);
1346 if (!child)
1347 goto nomem;
1348 if (node != parent) {
1349 node->count++;
1350 rcu_assign_pointer(node->slots[offset],
1351 node_to_entry(child));
1352 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
1353 if (tags & (1 << tag))
1354 tag_set(node, tag, offset);
1355 }
1356
1357 node = child;
1358 offset = 0;
1359 continue;
1360 }
1361
1362 n = insert_entries(node, &node->slots[offset],
1363 RADIX_TREE_RETRY, order, false);
1364 BUG_ON(n > RADIX_TREE_MAP_SIZE);
1365
1366 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
1367 if (tags & (1 << tag))
1368 tag_set(node, tag, offset);
1369 offset += n;
1370
1371 while (offset == RADIX_TREE_MAP_SIZE) {
1372 if (node == parent)
1373 break;
1374 offset = node->offset;
1375 child = node;
1376 node = node->parent;
1377 rcu_assign_pointer(node->slots[offset],
1378 node_to_entry(child));
1379 offset++;
1380 }
1381 if ((node == parent) && (offset == end))
1382 return 0;
1383 }
1384
1385 nomem:
1386 /* Shouldn't happen; did user forget to preload? */
1387 /* TODO: free all the allocated nodes */
1388 WARN_ON(1);
1389 return -ENOMEM;
1390}
1391#endif
1392
1393static void node_tag_set(struct radix_tree_root *root, 959static void node_tag_set(struct radix_tree_root *root,
1394 struct radix_tree_node *node, 960 struct radix_tree_node *node,
1395 unsigned int tag, unsigned int offset) 961 unsigned int tag, unsigned int offset)
@@ -1447,18 +1013,6 @@ void *radix_tree_tag_set(struct radix_tree_root *root,
1447} 1013}
1448EXPORT_SYMBOL(radix_tree_tag_set); 1014EXPORT_SYMBOL(radix_tree_tag_set);
1449 1015
1450/**
1451 * radix_tree_iter_tag_set - set a tag on the current iterator entry
1452 * @root: radix tree root
1453 * @iter: iterator state
1454 * @tag: tag to set
1455 */
1456void radix_tree_iter_tag_set(struct radix_tree_root *root,
1457 const struct radix_tree_iter *iter, unsigned int tag)
1458{
1459 node_tag_set(root, iter->node, tag, iter_offset(iter));
1460}
1461
1462static void node_tag_clear(struct radix_tree_root *root, 1016static void node_tag_clear(struct radix_tree_root *root,
1463 struct radix_tree_node *node, 1017 struct radix_tree_node *node,
1464 unsigned int tag, unsigned int offset) 1018 unsigned int tag, unsigned int offset)
@@ -1574,14 +1128,6 @@ int radix_tree_tag_get(const struct radix_tree_root *root,
1574} 1128}
1575EXPORT_SYMBOL(radix_tree_tag_get); 1129EXPORT_SYMBOL(radix_tree_tag_get);
1576 1130
1577static inline void __set_iter_shift(struct radix_tree_iter *iter,
1578 unsigned int shift)
1579{
1580#ifdef CONFIG_RADIX_TREE_MULTIORDER
1581 iter->shift = shift;
1582#endif
1583}
1584
1585/* Construct iter->tags bit-mask from node->tags[tag] array */ 1131/* Construct iter->tags bit-mask from node->tags[tag] array */
1586static void set_iter_tags(struct radix_tree_iter *iter, 1132static void set_iter_tags(struct radix_tree_iter *iter,
1587 struct radix_tree_node *node, unsigned offset, 1133 struct radix_tree_node *node, unsigned offset,
@@ -1608,92 +1154,11 @@ static void set_iter_tags(struct radix_tree_iter *iter,
1608 } 1154 }
1609} 1155}
1610 1156
1611#ifdef CONFIG_RADIX_TREE_MULTIORDER
1612static void __rcu **skip_siblings(struct radix_tree_node **nodep,
1613 void __rcu **slot, struct radix_tree_iter *iter)
1614{
1615 while (iter->index < iter->next_index) {
1616 *nodep = rcu_dereference_raw(*slot);
1617 if (*nodep && !is_sibling_entry(iter->node, *nodep))
1618 return slot;
1619 slot++;
1620 iter->index = __radix_tree_iter_add(iter, 1);
1621 iter->tags >>= 1;
1622 }
1623
1624 *nodep = NULL;
1625 return NULL;
1626}
1627
1628void __rcu **__radix_tree_next_slot(void __rcu **slot,
1629 struct radix_tree_iter *iter, unsigned flags)
1630{
1631 unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK;
1632 struct radix_tree_node *node;
1633
1634 slot = skip_siblings(&node, slot, iter);
1635
1636 while (radix_tree_is_internal_node(node)) {
1637 unsigned offset;
1638 unsigned long next_index;
1639
1640 if (node == RADIX_TREE_RETRY)
1641 return slot;
1642 node = entry_to_node(node);
1643 iter->node = node;
1644 iter->shift = node->shift;
1645
1646 if (flags & RADIX_TREE_ITER_TAGGED) {
1647 offset = radix_tree_find_next_bit(node, tag, 0);
1648 if (offset == RADIX_TREE_MAP_SIZE)
1649 return NULL;
1650 slot = &node->slots[offset];
1651 iter->index = __radix_tree_iter_add(iter, offset);
1652 set_iter_tags(iter, node, offset, tag);
1653 node = rcu_dereference_raw(*slot);
1654 } else {
1655 offset = 0;
1656 slot = &node->slots[0];
1657 for (;;) {
1658 node = rcu_dereference_raw(*slot);
1659 if (node)
1660 break;
1661 slot++;
1662 offset++;
1663 if (offset == RADIX_TREE_MAP_SIZE)
1664 return NULL;
1665 }
1666 iter->index = __radix_tree_iter_add(iter, offset);
1667 }
1668 if ((flags & RADIX_TREE_ITER_CONTIG) && (offset > 0))
1669 goto none;
1670 next_index = (iter->index | shift_maxindex(iter->shift)) + 1;
1671 if (next_index < iter->next_index)
1672 iter->next_index = next_index;
1673 }
1674
1675 return slot;
1676 none:
1677 iter->next_index = 0;
1678 return NULL;
1679}
1680EXPORT_SYMBOL(__radix_tree_next_slot);
1681#else
1682static void __rcu **skip_siblings(struct radix_tree_node **nodep,
1683 void __rcu **slot, struct radix_tree_iter *iter)
1684{
1685 return slot;
1686}
1687#endif
1688
1689void __rcu **radix_tree_iter_resume(void __rcu **slot, 1157void __rcu **radix_tree_iter_resume(void __rcu **slot,
1690 struct radix_tree_iter *iter) 1158 struct radix_tree_iter *iter)
1691{ 1159{
1692 struct radix_tree_node *node;
1693
1694 slot++; 1160 slot++;
1695 iter->index = __radix_tree_iter_add(iter, 1); 1161 iter->index = __radix_tree_iter_add(iter, 1);
1696 skip_siblings(&node, slot, iter);
1697 iter->next_index = iter->index; 1162 iter->next_index = iter->index;
1698 iter->tags = 0; 1163 iter->tags = 0;
1699 return NULL; 1164 return NULL;
@@ -1744,8 +1209,7 @@ void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root,
1744 iter->next_index = maxindex + 1; 1209 iter->next_index = maxindex + 1;
1745 iter->tags = 1; 1210 iter->tags = 1;
1746 iter->node = NULL; 1211 iter->node = NULL;
1747 __set_iter_shift(iter, 0); 1212 return (void __rcu **)&root->xa_head;
1748 return (void __rcu **)&root->rnode;
1749 } 1213 }
1750 1214
1751 do { 1215 do {
@@ -1765,8 +1229,6 @@ void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root,
1765 while (++offset < RADIX_TREE_MAP_SIZE) { 1229 while (++offset < RADIX_TREE_MAP_SIZE) {
1766 void *slot = rcu_dereference_raw( 1230 void *slot = rcu_dereference_raw(
1767 node->slots[offset]); 1231 node->slots[offset]);
1768 if (is_sibling_entry(node, slot))
1769 continue;
1770 if (slot) 1232 if (slot)
1771 break; 1233 break;
1772 } 1234 }
@@ -1784,13 +1246,12 @@ void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root,
1784 goto restart; 1246 goto restart;
1785 if (child == RADIX_TREE_RETRY) 1247 if (child == RADIX_TREE_RETRY)
1786 break; 1248 break;
1787 } while (radix_tree_is_internal_node(child)); 1249 } while (node->shift && radix_tree_is_internal_node(child));
1788 1250
1789 /* Update the iterator state */ 1251 /* Update the iterator state */
1790 iter->index = (index &~ node_maxindex(node)) | (offset << node->shift); 1252 iter->index = (index &~ node_maxindex(node)) | offset;
1791 iter->next_index = (index | node_maxindex(node)) + 1; 1253 iter->next_index = (index | node_maxindex(node)) + 1;
1792 iter->node = node; 1254 iter->node = node;
1793 __set_iter_shift(iter, node->shift);
1794 1255
1795 if (flags & RADIX_TREE_ITER_TAGGED) 1256 if (flags & RADIX_TREE_ITER_TAGGED)
1796 set_iter_tags(iter, node, offset, tag); 1257 set_iter_tags(iter, node, offset, tag);
@@ -1847,48 +1308,6 @@ radix_tree_gang_lookup(const struct radix_tree_root *root, void **results,
1847EXPORT_SYMBOL(radix_tree_gang_lookup); 1308EXPORT_SYMBOL(radix_tree_gang_lookup);
1848 1309
1849/** 1310/**
1850 * radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree
1851 * @root: radix tree root
1852 * @results: where the results of the lookup are placed
1853 * @indices: where their indices should be placed (but usually NULL)
1854 * @first_index: start the lookup from this key
1855 * @max_items: place up to this many items at *results
1856 *
1857 * Performs an index-ascending scan of the tree for present items. Places
1858 * their slots at *@results and returns the number of items which were
1859 * placed at *@results.
1860 *
1861 * The implementation is naive.
1862 *
1863 * Like radix_tree_gang_lookup as far as RCU and locking goes. Slots must
1864 * be dereferenced with radix_tree_deref_slot, and if using only RCU
1865 * protection, radix_tree_deref_slot may fail requiring a retry.
1866 */
1867unsigned int
1868radix_tree_gang_lookup_slot(const struct radix_tree_root *root,
1869 void __rcu ***results, unsigned long *indices,
1870 unsigned long first_index, unsigned int max_items)
1871{
1872 struct radix_tree_iter iter;
1873 void __rcu **slot;
1874 unsigned int ret = 0;
1875
1876 if (unlikely(!max_items))
1877 return 0;
1878
1879 radix_tree_for_each_slot(slot, root, &iter, first_index) {
1880 results[ret] = slot;
1881 if (indices)
1882 indices[ret] = iter.index;
1883 if (++ret == max_items)
1884 break;
1885 }
1886
1887 return ret;
1888}
1889EXPORT_SYMBOL(radix_tree_gang_lookup_slot);
1890
1891/**
1892 * radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree 1311 * radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree
1893 * based on a tag 1312 * based on a tag
1894 * @root: radix tree root 1313 * @root: radix tree root
@@ -1964,28 +1383,11 @@ radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *root,
1964} 1383}
1965EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot); 1384EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot);
1966 1385
1967/**
1968 * __radix_tree_delete_node - try to free node after clearing a slot
1969 * @root: radix tree root
1970 * @node: node containing @index
1971 * @update_node: callback for changing leaf nodes
1972 *
1973 * After clearing the slot at @index in @node from radix tree
1974 * rooted at @root, call this function to attempt freeing the
1975 * node and shrinking the tree.
1976 */
1977void __radix_tree_delete_node(struct radix_tree_root *root,
1978 struct radix_tree_node *node,
1979 radix_tree_update_node_t update_node)
1980{
1981 delete_node(root, node, update_node);
1982}
1983
1984static bool __radix_tree_delete(struct radix_tree_root *root, 1386static bool __radix_tree_delete(struct radix_tree_root *root,
1985 struct radix_tree_node *node, void __rcu **slot) 1387 struct radix_tree_node *node, void __rcu **slot)
1986{ 1388{
1987 void *old = rcu_dereference_raw(*slot); 1389 void *old = rcu_dereference_raw(*slot);
1988 int exceptional = radix_tree_exceptional_entry(old) ? -1 : 0; 1390 int values = xa_is_value(old) ? -1 : 0;
1989 unsigned offset = get_slot_offset(node, slot); 1391 unsigned offset = get_slot_offset(node, slot);
1990 int tag; 1392 int tag;
1991 1393
@@ -1995,8 +1397,8 @@ static bool __radix_tree_delete(struct radix_tree_root *root,
1995 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) 1397 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
1996 node_tag_clear(root, node, tag, offset); 1398 node_tag_clear(root, node, tag, offset);
1997 1399
1998 replace_slot(slot, NULL, node, -1, exceptional); 1400 replace_slot(slot, NULL, node, -1, values);
1999 return node && delete_node(root, node, NULL); 1401 return node && delete_node(root, node);
2000} 1402}
2001 1403
2002/** 1404/**
@@ -2068,19 +1470,6 @@ void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
2068} 1470}
2069EXPORT_SYMBOL(radix_tree_delete); 1471EXPORT_SYMBOL(radix_tree_delete);
2070 1472
2071void radix_tree_clear_tags(struct radix_tree_root *root,
2072 struct radix_tree_node *node,
2073 void __rcu **slot)
2074{
2075 if (node) {
2076 unsigned int tag, offset = get_slot_offset(node, slot);
2077 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
2078 node_tag_clear(root, node, tag, offset);
2079 } else {
2080 root_tag_clear_all(root);
2081 }
2082}
2083
2084/** 1473/**
2085 * radix_tree_tagged - test whether any items in the tree are tagged 1474 * radix_tree_tagged - test whether any items in the tree are tagged
2086 * @root: radix tree root 1475 * @root: radix tree root
@@ -2106,33 +1495,12 @@ void idr_preload(gfp_t gfp_mask)
2106} 1495}
2107EXPORT_SYMBOL(idr_preload); 1496EXPORT_SYMBOL(idr_preload);
2108 1497
2109int ida_pre_get(struct ida *ida, gfp_t gfp)
2110{
2111 /*
2112 * The IDA API has no preload_end() equivalent. Instead,
2113 * ida_get_new() can return -EAGAIN, prompting the caller
2114 * to return to the ida_pre_get() step.
2115 */
2116 if (!__radix_tree_preload(gfp, IDA_PRELOAD_SIZE))
2117 preempt_enable();
2118
2119 if (!this_cpu_read(ida_bitmap)) {
2120 struct ida_bitmap *bitmap = kzalloc(sizeof(*bitmap), gfp);
2121 if (!bitmap)
2122 return 0;
2123 if (this_cpu_cmpxchg(ida_bitmap, NULL, bitmap))
2124 kfree(bitmap);
2125 }
2126
2127 return 1;
2128}
2129
2130void __rcu **idr_get_free(struct radix_tree_root *root, 1498void __rcu **idr_get_free(struct radix_tree_root *root,
2131 struct radix_tree_iter *iter, gfp_t gfp, 1499 struct radix_tree_iter *iter, gfp_t gfp,
2132 unsigned long max) 1500 unsigned long max)
2133{ 1501{
2134 struct radix_tree_node *node = NULL, *child; 1502 struct radix_tree_node *node = NULL, *child;
2135 void __rcu **slot = (void __rcu **)&root->rnode; 1503 void __rcu **slot = (void __rcu **)&root->xa_head;
2136 unsigned long maxindex, start = iter->next_index; 1504 unsigned long maxindex, start = iter->next_index;
2137 unsigned int shift, offset = 0; 1505 unsigned int shift, offset = 0;
2138 1506
@@ -2148,8 +1516,10 @@ void __rcu **idr_get_free(struct radix_tree_root *root,
2148 if (error < 0) 1516 if (error < 0)
2149 return ERR_PTR(error); 1517 return ERR_PTR(error);
2150 shift = error; 1518 shift = error;
2151 child = rcu_dereference_raw(root->rnode); 1519 child = rcu_dereference_raw(root->xa_head);
2152 } 1520 }
1521 if (start == 0 && shift == 0)
1522 shift = RADIX_TREE_MAP_SHIFT;
2153 1523
2154 while (shift) { 1524 while (shift) {
2155 shift -= RADIX_TREE_MAP_SHIFT; 1525 shift -= RADIX_TREE_MAP_SHIFT;
@@ -2192,7 +1562,6 @@ void __rcu **idr_get_free(struct radix_tree_root *root,
2192 else 1562 else
2193 iter->next_index = 1; 1563 iter->next_index = 1;
2194 iter->node = node; 1564 iter->node = node;
2195 __set_iter_shift(iter, shift);
2196 set_iter_tags(iter, node, offset, IDR_FREE); 1565 set_iter_tags(iter, node, offset, IDR_FREE);
2197 1566
2198 return slot; 1567 return slot;
@@ -2211,10 +1580,10 @@ void __rcu **idr_get_free(struct radix_tree_root *root,
2211 */ 1580 */
2212void idr_destroy(struct idr *idr) 1581void idr_destroy(struct idr *idr)
2213{ 1582{
2214 struct radix_tree_node *node = rcu_dereference_raw(idr->idr_rt.rnode); 1583 struct radix_tree_node *node = rcu_dereference_raw(idr->idr_rt.xa_head);
2215 if (radix_tree_is_internal_node(node)) 1584 if (radix_tree_is_internal_node(node))
2216 radix_tree_free_nodes(node); 1585 radix_tree_free_nodes(node);
2217 idr->idr_rt.rnode = NULL; 1586 idr->idr_rt.xa_head = NULL;
2218 root_tag_set(&idr->idr_rt, IDR_FREE); 1587 root_tag_set(&idr->idr_rt, IDR_FREE);
2219} 1588}
2220EXPORT_SYMBOL(idr_destroy); 1589EXPORT_SYMBOL(idr_destroy);
@@ -2228,31 +1597,6 @@ radix_tree_node_ctor(void *arg)
2228 INIT_LIST_HEAD(&node->private_list); 1597 INIT_LIST_HEAD(&node->private_list);
2229} 1598}
2230 1599
2231static __init unsigned long __maxindex(unsigned int height)
2232{
2233 unsigned int width = height * RADIX_TREE_MAP_SHIFT;
2234 int shift = RADIX_TREE_INDEX_BITS - width;
2235
2236 if (shift < 0)
2237 return ~0UL;
2238 if (shift >= BITS_PER_LONG)
2239 return 0UL;
2240 return ~0UL >> shift;
2241}
2242
2243static __init void radix_tree_init_maxnodes(void)
2244{
2245 unsigned long height_to_maxindex[RADIX_TREE_MAX_PATH + 1];
2246 unsigned int i, j;
2247
2248 for (i = 0; i < ARRAY_SIZE(height_to_maxindex); i++)
2249 height_to_maxindex[i] = __maxindex(i);
2250 for (i = 0; i < ARRAY_SIZE(height_to_maxnodes); i++) {
2251 for (j = i; j > 0; j--)
2252 height_to_maxnodes[i] += height_to_maxindex[j - 1] + 1;
2253 }
2254}
2255
2256static int radix_tree_cpu_dead(unsigned int cpu) 1600static int radix_tree_cpu_dead(unsigned int cpu)
2257{ 1601{
2258 struct radix_tree_preload *rtp; 1602 struct radix_tree_preload *rtp;
@@ -2266,8 +1610,6 @@ static int radix_tree_cpu_dead(unsigned int cpu)
2266 kmem_cache_free(radix_tree_node_cachep, node); 1610 kmem_cache_free(radix_tree_node_cachep, node);
2267 rtp->nr--; 1611 rtp->nr--;
2268 } 1612 }
2269 kfree(per_cpu(ida_bitmap, cpu));
2270 per_cpu(ida_bitmap, cpu) = NULL;
2271 return 0; 1613 return 0;
2272} 1614}
2273 1615
@@ -2277,11 +1619,11 @@ void __init radix_tree_init(void)
2277 1619
2278 BUILD_BUG_ON(RADIX_TREE_MAX_TAGS + __GFP_BITS_SHIFT > 32); 1620 BUILD_BUG_ON(RADIX_TREE_MAX_TAGS + __GFP_BITS_SHIFT > 32);
2279 BUILD_BUG_ON(ROOT_IS_IDR & ~GFP_ZONEMASK); 1621 BUILD_BUG_ON(ROOT_IS_IDR & ~GFP_ZONEMASK);
1622 BUILD_BUG_ON(XA_CHUNK_SIZE > 255);
2280 radix_tree_node_cachep = kmem_cache_create("radix_tree_node", 1623 radix_tree_node_cachep = kmem_cache_create("radix_tree_node",
2281 sizeof(struct radix_tree_node), 0, 1624 sizeof(struct radix_tree_node), 0,
2282 SLAB_PANIC | SLAB_RECLAIM_ACCOUNT, 1625 SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
2283 radix_tree_node_ctor); 1626 radix_tree_node_ctor);
2284 radix_tree_init_maxnodes();
2285 ret = cpuhp_setup_state_nocalls(CPUHP_RADIX_DEAD, "lib/radix:dead", 1627 ret = cpuhp_setup_state_nocalls(CPUHP_RADIX_DEAD, "lib/radix:dead",
2286 NULL, radix_tree_cpu_dead); 1628 NULL, radix_tree_cpu_dead);
2287 WARN_ON(ret < 0); 1629 WARN_ON(ret < 0);
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index 2f8b61dfd9b0..4e90d443d1b0 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -13,11 +13,25 @@ raid6_pq-$(CONFIG_S390) += s390vx8.o recov_s390xc.o
13hostprogs-y += mktables 13hostprogs-y += mktables
14 14
15quiet_cmd_unroll = UNROLL $@ 15quiet_cmd_unroll = UNROLL $@
16 cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$(UNROLL) \ 16 cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$(UNROLL) < $< > $@
17 < $< > $@ || ( rm -f $@ && exit 1 )
18 17
19ifeq ($(CONFIG_ALTIVEC),y) 18ifeq ($(CONFIG_ALTIVEC),y)
20altivec_flags := -maltivec $(call cc-option,-mabi=altivec) 19altivec_flags := -maltivec $(call cc-option,-mabi=altivec)
20
21ifdef CONFIG_CC_IS_CLANG
22# clang ppc port does not yet support -maltivec when -msoft-float is
23# enabled. A future release of clang will resolve this
24# https://bugs.llvm.org/show_bug.cgi?id=31177
25CFLAGS_REMOVE_altivec1.o += -msoft-float
26CFLAGS_REMOVE_altivec2.o += -msoft-float
27CFLAGS_REMOVE_altivec4.o += -msoft-float
28CFLAGS_REMOVE_altivec8.o += -msoft-float
29CFLAGS_REMOVE_altivec8.o += -msoft-float
30CFLAGS_REMOVE_vpermxor1.o += -msoft-float
31CFLAGS_REMOVE_vpermxor2.o += -msoft-float
32CFLAGS_REMOVE_vpermxor4.o += -msoft-float
33CFLAGS_REMOVE_vpermxor8.o += -msoft-float
34endif
21endif 35endif
22 36
23# The GCC option -ffreestanding is required in order to compile code containing 37# The GCC option -ffreestanding is required in order to compile code containing
@@ -145,7 +159,7 @@ $(obj)/s390vx8.c: $(src)/s390vx.uc $(src)/unroll.awk FORCE
145 $(call if_changed,unroll) 159 $(call if_changed,unroll)
146 160
147quiet_cmd_mktable = TABLE $@ 161quiet_cmd_mktable = TABLE $@
148 cmd_mktable = $(obj)/mktables > $@ || ( rm -f $@ && exit 1 ) 162 cmd_mktable = $(obj)/mktables > $@
149 163
150targets += tables.c 164targets += tables.c
151$(obj)/tables.c: $(obj)/mktables FORCE 165$(obj)/tables.c: $(obj)/mktables FORCE
diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c
index 5065b1e7e327..7e4f7a8ffa8e 100644
--- a/lib/raid6/algos.c
+++ b/lib/raid6/algos.c
@@ -34,64 +34,64 @@ struct raid6_calls raid6_call;
34EXPORT_SYMBOL_GPL(raid6_call); 34EXPORT_SYMBOL_GPL(raid6_call);
35 35
36const struct raid6_calls * const raid6_algos[] = { 36const struct raid6_calls * const raid6_algos[] = {
37#if defined(__ia64__)
38 &raid6_intx16,
39 &raid6_intx32,
40#endif
41#if defined(__i386__) && !defined(__arch_um__) 37#if defined(__i386__) && !defined(__arch_um__)
42 &raid6_mmxx1,
43 &raid6_mmxx2,
44 &raid6_sse1x1,
45 &raid6_sse1x2,
46 &raid6_sse2x1,
47 &raid6_sse2x2,
48#ifdef CONFIG_AS_AVX2
49 &raid6_avx2x1,
50 &raid6_avx2x2,
51#endif
52#ifdef CONFIG_AS_AVX512 38#ifdef CONFIG_AS_AVX512
53 &raid6_avx512x1,
54 &raid6_avx512x2, 39 &raid6_avx512x2,
40 &raid6_avx512x1,
55#endif 41#endif
56#endif
57#if defined(__x86_64__) && !defined(__arch_um__)
58 &raid6_sse2x1,
59 &raid6_sse2x2,
60 &raid6_sse2x4,
61#ifdef CONFIG_AS_AVX2 42#ifdef CONFIG_AS_AVX2
62 &raid6_avx2x1,
63 &raid6_avx2x2, 43 &raid6_avx2x2,
64 &raid6_avx2x4, 44 &raid6_avx2x1,
45#endif
46 &raid6_sse2x2,
47 &raid6_sse2x1,
48 &raid6_sse1x2,
49 &raid6_sse1x1,
50 &raid6_mmxx2,
51 &raid6_mmxx1,
65#endif 52#endif
53#if defined(__x86_64__) && !defined(__arch_um__)
66#ifdef CONFIG_AS_AVX512 54#ifdef CONFIG_AS_AVX512
67 &raid6_avx512x1,
68 &raid6_avx512x2,
69 &raid6_avx512x4, 55 &raid6_avx512x4,
56 &raid6_avx512x2,
57 &raid6_avx512x1,
70#endif 58#endif
59#ifdef CONFIG_AS_AVX2
60 &raid6_avx2x4,
61 &raid6_avx2x2,
62 &raid6_avx2x1,
63#endif
64 &raid6_sse2x4,
65 &raid6_sse2x2,
66 &raid6_sse2x1,
71#endif 67#endif
72#ifdef CONFIG_ALTIVEC 68#ifdef CONFIG_ALTIVEC
73 &raid6_altivec1,
74 &raid6_altivec2,
75 &raid6_altivec4,
76 &raid6_altivec8,
77 &raid6_vpermxor1,
78 &raid6_vpermxor2,
79 &raid6_vpermxor4,
80 &raid6_vpermxor8, 69 &raid6_vpermxor8,
70 &raid6_vpermxor4,
71 &raid6_vpermxor2,
72 &raid6_vpermxor1,
73 &raid6_altivec8,
74 &raid6_altivec4,
75 &raid6_altivec2,
76 &raid6_altivec1,
81#endif 77#endif
82#if defined(CONFIG_S390) 78#if defined(CONFIG_S390)
83 &raid6_s390vx8, 79 &raid6_s390vx8,
84#endif 80#endif
85 &raid6_intx1,
86 &raid6_intx2,
87 &raid6_intx4,
88 &raid6_intx8,
89#ifdef CONFIG_KERNEL_MODE_NEON 81#ifdef CONFIG_KERNEL_MODE_NEON
90 &raid6_neonx1,
91 &raid6_neonx2,
92 &raid6_neonx4,
93 &raid6_neonx8, 82 &raid6_neonx8,
83 &raid6_neonx4,
84 &raid6_neonx2,
85 &raid6_neonx1,
94#endif 86#endif
87#if defined(__ia64__)
88 &raid6_intx32,
89 &raid6_intx16,
90#endif
91 &raid6_intx8,
92 &raid6_intx4,
93 &raid6_intx2,
94 &raid6_intx1,
95 NULL 95 NULL
96}; 96};
97 97
@@ -163,6 +163,11 @@ static inline const struct raid6_calls *raid6_choose_gen(
163 if ((*algo)->valid && !(*algo)->valid()) 163 if ((*algo)->valid && !(*algo)->valid())
164 continue; 164 continue;
165 165
166 if (!IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK)) {
167 best = *algo;
168 break;
169 }
170
166 perf = 0; 171 perf = 0;
167 172
168 preempt_disable(); 173 preempt_disable();
diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile
index 5d73f5cb4d8a..3ab8720aa2f8 100644
--- a/lib/raid6/test/Makefile
+++ b/lib/raid6/test/Makefile
@@ -27,13 +27,16 @@ ifeq ($(ARCH),arm)
27 CFLAGS += -I../../../arch/arm/include -mfpu=neon 27 CFLAGS += -I../../../arch/arm/include -mfpu=neon
28 HAS_NEON = yes 28 HAS_NEON = yes
29endif 29endif
30ifeq ($(ARCH),arm64) 30ifeq ($(ARCH),aarch64)
31 CFLAGS += -I../../../arch/arm64/include 31 CFLAGS += -I../../../arch/arm64/include
32 HAS_NEON = yes 32 HAS_NEON = yes
33endif 33endif
34 34
35ifeq ($(IS_X86),yes) 35ifeq ($(IS_X86),yes)
36 OBJS += mmx.o sse1.o sse2.o avx2.o recov_ssse3.o recov_avx2.o avx512.o recov_avx512.o 36 OBJS += mmx.o sse1.o sse2.o avx2.o recov_ssse3.o recov_avx2.o avx512.o recov_avx512.o
37 CFLAGS += $(shell echo "pshufb %xmm0, %xmm0" | \
38 gcc -c -x assembler - >&/dev/null && \
39 rm ./-.o && echo -DCONFIG_AS_SSSE3=1)
37 CFLAGS += $(shell echo "vpbroadcastb %xmm0, %ymm1" | \ 40 CFLAGS += $(shell echo "vpbroadcastb %xmm0, %ymm1" | \
38 gcc -c -x assembler - >&/dev/null && \ 41 gcc -c -x assembler - >&/dev/null && \
39 rm ./-.o && echo -DCONFIG_AS_AVX2=1) 42 rm ./-.o && echo -DCONFIG_AS_AVX2=1)
@@ -41,7 +44,7 @@ ifeq ($(IS_X86),yes)
41 gcc -c -x assembler - >&/dev/null && \ 44 gcc -c -x assembler - >&/dev/null && \
42 rm ./-.o && echo -DCONFIG_AS_AVX512=1) 45 rm ./-.o && echo -DCONFIG_AS_AVX512=1)
43else ifeq ($(HAS_NEON),yes) 46else ifeq ($(HAS_NEON),yes)
44 OBJS += neon.o neon1.o neon2.o neon4.o neon8.o 47 OBJS += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o
45 CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1 48 CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1
46else 49else
47 HAS_ALTIVEC := $(shell printf '\#include <altivec.h>\nvector int a;\n' |\ 50 HAS_ALTIVEC := $(shell printf '\#include <altivec.h>\nvector int a;\n' |\
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 30526afa8343..852ffa5160f1 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -1179,8 +1179,7 @@ struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
1179 unsigned int hash) 1179 unsigned int hash)
1180{ 1180{
1181 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); 1181 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
1182 static struct rhash_head __rcu *rhnull = 1182 static struct rhash_head __rcu *rhnull;
1183 (struct rhash_head __rcu *)NULLS_MARKER(0);
1184 unsigned int index = hash & ((1 << tbl->nest) - 1); 1183 unsigned int index = hash & ((1 << tbl->nest) - 1);
1185 unsigned int size = tbl->size >> tbl->nest; 1184 unsigned int size = tbl->size >> tbl->nest;
1186 unsigned int subhash = hash; 1185 unsigned int subhash = hash;
@@ -1198,8 +1197,11 @@ struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
1198 subhash >>= shift; 1197 subhash >>= shift;
1199 } 1198 }
1200 1199
1201 if (!ntbl) 1200 if (!ntbl) {
1201 if (!rhnull)
1202 INIT_RHT_NULLS_HEAD(rhnull);
1202 return &rhnull; 1203 return &rhnull;
1204 }
1203 1205
1204 return &ntbl[subhash].bucket; 1206 return &ntbl[subhash].bucket;
1205 1207
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index fdd1b8aa8ac6..65c2d06250a6 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -20,6 +20,47 @@
20#include <linux/sbitmap.h> 20#include <linux/sbitmap.h>
21#include <linux/seq_file.h> 21#include <linux/seq_file.h>
22 22
23/*
24 * See if we have deferred clears that we can batch move
25 */
26static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index)
27{
28 unsigned long mask, val;
29 unsigned long __maybe_unused flags;
30 bool ret = false;
31
32 /* Silence bogus lockdep warning */
33#if defined(CONFIG_LOCKDEP)
34 local_irq_save(flags);
35#endif
36 spin_lock(&sb->map[index].swap_lock);
37
38 if (!sb->map[index].cleared)
39 goto out_unlock;
40
41 /*
42 * First get a stable cleared mask, setting the old mask to 0.
43 */
44 do {
45 mask = sb->map[index].cleared;
46 } while (cmpxchg(&sb->map[index].cleared, mask, 0) != mask);
47
48 /*
49 * Now clear the masked bits in our free word
50 */
51 do {
52 val = sb->map[index].word;
53 } while (cmpxchg(&sb->map[index].word, val, val & ~mask) != val);
54
55 ret = true;
56out_unlock:
57 spin_unlock(&sb->map[index].swap_lock);
58#if defined(CONFIG_LOCKDEP)
59 local_irq_restore(flags);
60#endif
61 return ret;
62}
63
23int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, 64int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
24 gfp_t flags, int node) 65 gfp_t flags, int node)
25{ 66{
@@ -59,6 +100,7 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
59 for (i = 0; i < sb->map_nr; i++) { 100 for (i = 0; i < sb->map_nr; i++) {
60 sb->map[i].depth = min(depth, bits_per_word); 101 sb->map[i].depth = min(depth, bits_per_word);
61 depth -= sb->map[i].depth; 102 depth -= sb->map[i].depth;
103 spin_lock_init(&sb->map[i].swap_lock);
62 } 104 }
63 return 0; 105 return 0;
64} 106}
@@ -69,6 +111,9 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
69 unsigned int bits_per_word = 1U << sb->shift; 111 unsigned int bits_per_word = 1U << sb->shift;
70 unsigned int i; 112 unsigned int i;
71 113
114 for (i = 0; i < sb->map_nr; i++)
115 sbitmap_deferred_clear(sb, i);
116
72 sb->depth = depth; 117 sb->depth = depth;
73 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); 118 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
74 119
@@ -111,6 +156,24 @@ static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
111 return nr; 156 return nr;
112} 157}
113 158
159static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index,
160 unsigned int alloc_hint, bool round_robin)
161{
162 int nr;
163
164 do {
165 nr = __sbitmap_get_word(&sb->map[index].word,
166 sb->map[index].depth, alloc_hint,
167 !round_robin);
168 if (nr != -1)
169 break;
170 if (!sbitmap_deferred_clear(sb, index))
171 break;
172 } while (1);
173
174 return nr;
175}
176
114int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin) 177int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
115{ 178{
116 unsigned int i, index; 179 unsigned int i, index;
@@ -118,24 +181,28 @@ int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
118 181
119 index = SB_NR_TO_INDEX(sb, alloc_hint); 182 index = SB_NR_TO_INDEX(sb, alloc_hint);
120 183
184 /*
185 * Unless we're doing round robin tag allocation, just use the
186 * alloc_hint to find the right word index. No point in looping
187 * twice in find_next_zero_bit() for that case.
188 */
189 if (round_robin)
190 alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
191 else
192 alloc_hint = 0;
193
121 for (i = 0; i < sb->map_nr; i++) { 194 for (i = 0; i < sb->map_nr; i++) {
122 nr = __sbitmap_get_word(&sb->map[index].word, 195 nr = sbitmap_find_bit_in_index(sb, index, alloc_hint,
123 sb->map[index].depth, 196 round_robin);
124 SB_NR_TO_BIT(sb, alloc_hint),
125 !round_robin);
126 if (nr != -1) { 197 if (nr != -1) {
127 nr += index << sb->shift; 198 nr += index << sb->shift;
128 break; 199 break;
129 } 200 }
130 201
131 /* Jump to next index. */ 202 /* Jump to next index. */
132 index++; 203 alloc_hint = 0;
133 alloc_hint = index << sb->shift; 204 if (++index >= sb->map_nr)
134
135 if (index >= sb->map_nr) {
136 index = 0; 205 index = 0;
137 alloc_hint = 0;
138 }
139 } 206 }
140 207
141 return nr; 208 return nr;
@@ -151,6 +218,7 @@ int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
151 index = SB_NR_TO_INDEX(sb, alloc_hint); 218 index = SB_NR_TO_INDEX(sb, alloc_hint);
152 219
153 for (i = 0; i < sb->map_nr; i++) { 220 for (i = 0; i < sb->map_nr; i++) {
221again:
154 nr = __sbitmap_get_word(&sb->map[index].word, 222 nr = __sbitmap_get_word(&sb->map[index].word,
155 min(sb->map[index].depth, shallow_depth), 223 min(sb->map[index].depth, shallow_depth),
156 SB_NR_TO_BIT(sb, alloc_hint), true); 224 SB_NR_TO_BIT(sb, alloc_hint), true);
@@ -159,6 +227,9 @@ int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
159 break; 227 break;
160 } 228 }
161 229
230 if (sbitmap_deferred_clear(sb, index))
231 goto again;
232
162 /* Jump to next index. */ 233 /* Jump to next index. */
163 index++; 234 index++;
164 alloc_hint = index << sb->shift; 235 alloc_hint = index << sb->shift;
@@ -178,7 +249,7 @@ bool sbitmap_any_bit_set(const struct sbitmap *sb)
178 unsigned int i; 249 unsigned int i;
179 250
180 for (i = 0; i < sb->map_nr; i++) { 251 for (i = 0; i < sb->map_nr; i++) {
181 if (sb->map[i].word) 252 if (sb->map[i].word & ~sb->map[i].cleared)
182 return true; 253 return true;
183 } 254 }
184 return false; 255 return false;
@@ -191,9 +262,10 @@ bool sbitmap_any_bit_clear(const struct sbitmap *sb)
191 262
192 for (i = 0; i < sb->map_nr; i++) { 263 for (i = 0; i < sb->map_nr; i++) {
193 const struct sbitmap_word *word = &sb->map[i]; 264 const struct sbitmap_word *word = &sb->map[i];
265 unsigned long mask = word->word & ~word->cleared;
194 unsigned long ret; 266 unsigned long ret;
195 267
196 ret = find_first_zero_bit(&word->word, word->depth); 268 ret = find_first_zero_bit(&mask, word->depth);
197 if (ret < word->depth) 269 if (ret < word->depth)
198 return true; 270 return true;
199 } 271 }
@@ -201,23 +273,36 @@ bool sbitmap_any_bit_clear(const struct sbitmap *sb)
201} 273}
202EXPORT_SYMBOL_GPL(sbitmap_any_bit_clear); 274EXPORT_SYMBOL_GPL(sbitmap_any_bit_clear);
203 275
204unsigned int sbitmap_weight(const struct sbitmap *sb) 276static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set)
205{ 277{
206 unsigned int i, weight = 0; 278 unsigned int i, weight = 0;
207 279
208 for (i = 0; i < sb->map_nr; i++) { 280 for (i = 0; i < sb->map_nr; i++) {
209 const struct sbitmap_word *word = &sb->map[i]; 281 const struct sbitmap_word *word = &sb->map[i];
210 282
211 weight += bitmap_weight(&word->word, word->depth); 283 if (set)
284 weight += bitmap_weight(&word->word, word->depth);
285 else
286 weight += bitmap_weight(&word->cleared, word->depth);
212 } 287 }
213 return weight; 288 return weight;
214} 289}
215EXPORT_SYMBOL_GPL(sbitmap_weight); 290
291static unsigned int sbitmap_weight(const struct sbitmap *sb)
292{
293 return __sbitmap_weight(sb, true);
294}
295
296static unsigned int sbitmap_cleared(const struct sbitmap *sb)
297{
298 return __sbitmap_weight(sb, false);
299}
216 300
217void sbitmap_show(struct sbitmap *sb, struct seq_file *m) 301void sbitmap_show(struct sbitmap *sb, struct seq_file *m)
218{ 302{
219 seq_printf(m, "depth=%u\n", sb->depth); 303 seq_printf(m, "depth=%u\n", sb->depth);
220 seq_printf(m, "busy=%u\n", sbitmap_weight(sb)); 304 seq_printf(m, "busy=%u\n", sbitmap_weight(sb) - sbitmap_cleared(sb));
305 seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb));
221 seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift); 306 seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift);
222 seq_printf(m, "map_nr=%u\n", sb->map_nr); 307 seq_printf(m, "map_nr=%u\n", sb->map_nr);
223} 308}
@@ -325,6 +410,7 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
325 sbq->min_shallow_depth = UINT_MAX; 410 sbq->min_shallow_depth = UINT_MAX;
326 sbq->wake_batch = sbq_calc_wake_batch(sbq, depth); 411 sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
327 atomic_set(&sbq->wake_index, 0); 412 atomic_set(&sbq->wake_index, 0);
413 atomic_set(&sbq->ws_active, 0);
328 414
329 sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node); 415 sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
330 if (!sbq->ws) { 416 if (!sbq->ws) {
@@ -440,6 +526,9 @@ static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
440{ 526{
441 int i, wake_index; 527 int i, wake_index;
442 528
529 if (!atomic_read(&sbq->ws_active))
530 return NULL;
531
443 wake_index = atomic_read(&sbq->wake_index); 532 wake_index = atomic_read(&sbq->wake_index);
444 for (i = 0; i < SBQ_WAIT_QUEUES; i++) { 533 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
445 struct sbq_wait_state *ws = &sbq->ws[wake_index]; 534 struct sbq_wait_state *ws = &sbq->ws[wake_index];
@@ -509,7 +598,8 @@ EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
509void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, 598void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
510 unsigned int cpu) 599 unsigned int cpu)
511{ 600{
512 sbitmap_clear_bit_unlock(&sbq->sb, nr); 601 sbitmap_deferred_clear_bit(&sbq->sb, nr);
602
513 /* 603 /*
514 * Pairs with the memory barrier in set_current_state() to ensure the 604 * Pairs with the memory barrier in set_current_state() to ensure the
515 * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker 605 * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker
@@ -564,6 +654,7 @@ void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
564 654
565 seq_printf(m, "wake_batch=%u\n", sbq->wake_batch); 655 seq_printf(m, "wake_batch=%u\n", sbq->wake_batch);
566 seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index)); 656 seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index));
657 seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active));
567 658
568 seq_puts(m, "ws={\n"); 659 seq_puts(m, "ws={\n");
569 for (i = 0; i < SBQ_WAIT_QUEUES; i++) { 660 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
@@ -579,3 +670,48 @@ void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
579 seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth); 670 seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth);
580} 671}
581EXPORT_SYMBOL_GPL(sbitmap_queue_show); 672EXPORT_SYMBOL_GPL(sbitmap_queue_show);
673
674void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
675 struct sbq_wait_state *ws,
676 struct sbq_wait *sbq_wait)
677{
678 if (!sbq_wait->sbq) {
679 sbq_wait->sbq = sbq;
680 atomic_inc(&sbq->ws_active);
681 }
682 add_wait_queue(&ws->wait, &sbq_wait->wait);
683}
684EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue);
685
686void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait)
687{
688 list_del_init(&sbq_wait->wait.entry);
689 if (sbq_wait->sbq) {
690 atomic_dec(&sbq_wait->sbq->ws_active);
691 sbq_wait->sbq = NULL;
692 }
693}
694EXPORT_SYMBOL_GPL(sbitmap_del_wait_queue);
695
696void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
697 struct sbq_wait_state *ws,
698 struct sbq_wait *sbq_wait, int state)
699{
700 if (!sbq_wait->sbq) {
701 atomic_inc(&sbq->ws_active);
702 sbq_wait->sbq = sbq;
703 }
704 prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state);
705}
706EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait);
707
708void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
709 struct sbq_wait *sbq_wait)
710{
711 finish_wait(&ws->wait, &sbq_wait->wait);
712 if (sbq_wait->sbq) {
713 atomic_dec(&sbq->ws_active);
714 sbq_wait->sbq = NULL;
715 }
716}
717EXPORT_SYMBOL_GPL(sbitmap_finish_wait);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 7c6096a71704..9ba349e775ef 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -271,7 +271,7 @@ int __sg_alloc_table(struct sg_table *table, unsigned int nents,
271 271
272 if (nents == 0) 272 if (nents == 0)
273 return -EINVAL; 273 return -EINVAL;
274#ifndef CONFIG_ARCH_HAS_SG_CHAIN 274#ifdef CONFIG_ARCH_NO_SG_CHAIN
275 if (WARN_ON_ONCE(nents > max_ents)) 275 if (WARN_ON_ONCE(nents > max_ents))
276 return -EINVAL; 276 return -EINVAL;
277#endif 277#endif
diff --git a/lib/seq_buf.c b/lib/seq_buf.c
index 11f2ae0f9099..bd807f545a9d 100644
--- a/lib/seq_buf.c
+++ b/lib/seq_buf.c
@@ -140,13 +140,17 @@ int seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary)
140 */ 140 */
141int seq_buf_puts(struct seq_buf *s, const char *str) 141int seq_buf_puts(struct seq_buf *s, const char *str)
142{ 142{
143 unsigned int len = strlen(str); 143 size_t len = strlen(str);
144 144
145 WARN_ON(s->size == 0); 145 WARN_ON(s->size == 0);
146 146
147 /* Add 1 to len for the trailing null byte which must be there */
148 len += 1;
149
147 if (seq_buf_can_fit(s, len)) { 150 if (seq_buf_can_fit(s, len)) {
148 memcpy(s->buffer + s->len, str, len); 151 memcpy(s->buffer + s->len, str, len);
149 s->len += len; 152 /* Don't count the trailing null byte against the capacity */
153 s->len += len - 1;
150 return 0; 154 return 0;
151 } 155 }
152 seq_buf_set_overflow(s); 156 seq_buf_set_overflow(s);
diff --git a/lib/sg_pool.c b/lib/sg_pool.c
index 6dd30615a201..d1c1e6388eaa 100644
--- a/lib/sg_pool.c
+++ b/lib/sg_pool.c
@@ -148,10 +148,9 @@ static __init int sg_pool_init(void)
148cleanup_sdb: 148cleanup_sdb:
149 for (i = 0; i < SG_MEMPOOL_NR; i++) { 149 for (i = 0; i < SG_MEMPOOL_NR; i++) {
150 struct sg_pool *sgp = sg_pools + i; 150 struct sg_pool *sgp = sg_pools + i;
151 if (sgp->pool) 151
152 mempool_destroy(sgp->pool); 152 mempool_destroy(sgp->pool);
153 if (sgp->slab) 153 kmem_cache_destroy(sgp->slab);
154 kmem_cache_destroy(sgp->slab);
155 } 154 }
156 155
157 return -ENOMEM; 156 return -ENOMEM;
diff --git a/lib/show_mem.c b/lib/show_mem.c
index 0beaa1d899aa..6a042f53e7bb 100644
--- a/lib/show_mem.c
+++ b/lib/show_mem.c
@@ -18,22 +18,19 @@ void show_mem(unsigned int filter, nodemask_t *nodemask)
18 show_free_areas(filter, nodemask); 18 show_free_areas(filter, nodemask);
19 19
20 for_each_online_pgdat(pgdat) { 20 for_each_online_pgdat(pgdat) {
21 unsigned long flags;
22 int zoneid; 21 int zoneid;
23 22
24 pgdat_resize_lock(pgdat, &flags);
25 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 23 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
26 struct zone *zone = &pgdat->node_zones[zoneid]; 24 struct zone *zone = &pgdat->node_zones[zoneid];
27 if (!populated_zone(zone)) 25 if (!populated_zone(zone))
28 continue; 26 continue;
29 27
30 total += zone->present_pages; 28 total += zone->present_pages;
31 reserved += zone->present_pages - zone->managed_pages; 29 reserved += zone->present_pages - zone_managed_pages(zone);
32 30
33 if (is_highmem_idx(zoneid)) 31 if (is_highmem_idx(zoneid))
34 highmem += zone->present_pages; 32 highmem += zone->present_pages;
35 } 33 }
36 pgdat_resize_unlock(pgdat, &flags);
37 } 34 }
38 35
39 printk("%lu pages RAM\n", total); 36 printk("%lu pages RAM\n", total);
diff --git a/lib/string.c b/lib/string.c
index 2c0900a5d51a..38e4ca08e757 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -27,6 +27,7 @@
27#include <linux/export.h> 27#include <linux/export.h>
28#include <linux/bug.h> 28#include <linux/bug.h>
29#include <linux/errno.h> 29#include <linux/errno.h>
30#include <linux/slab.h>
30 31
31#include <asm/byteorder.h> 32#include <asm/byteorder.h>
32#include <asm/word-at-a-time.h> 33#include <asm/word-at-a-time.h>
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index b53e1b5d80f4..58eacd41526c 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -114,10 +114,11 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
114 114
115 kasan_check_write(dst, count); 115 kasan_check_write(dst, count);
116 check_object_size(dst, count, false); 116 check_object_size(dst, count, false);
117 user_access_begin(); 117 if (user_access_begin(src, max)) {
118 retval = do_strncpy_from_user(dst, src, count, max); 118 retval = do_strncpy_from_user(dst, src, count, max);
119 user_access_end(); 119 user_access_end();
120 return retval; 120 return retval;
121 }
121 } 122 }
122 return -EFAULT; 123 return -EFAULT;
123} 124}
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
index 60d0bbda8f5e..1c1a1b0e38a5 100644
--- a/lib/strnlen_user.c
+++ b/lib/strnlen_user.c
@@ -114,10 +114,11 @@ long strnlen_user(const char __user *str, long count)
114 unsigned long max = max_addr - src_addr; 114 unsigned long max = max_addr - src_addr;
115 long retval; 115 long retval;
116 116
117 user_access_begin(); 117 if (user_access_begin(str, max)) {
118 retval = do_strnlen_user(str, count, max); 118 retval = do_strnlen_user(str, count, max);
119 user_access_end(); 119 user_access_end();
120 return retval; 120 return retval;
121 }
121 } 122 }
122 return 0; 123 return 0;
123} 124}
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 08d3d59dca17..f3e570722a7e 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -39,6 +39,7 @@
39#define SKB_HASH 0x1234aaab 39#define SKB_HASH 0x1234aaab
40#define SKB_QUEUE_MAP 123 40#define SKB_QUEUE_MAP 123
41#define SKB_VLAN_TCI 0xffff 41#define SKB_VLAN_TCI 0xffff
42#define SKB_VLAN_PRESENT 1
42#define SKB_DEV_IFINDEX 577 43#define SKB_DEV_IFINDEX 577
43#define SKB_DEV_TYPE 588 44#define SKB_DEV_TYPE 588
44 45
@@ -725,8 +726,8 @@ static struct bpf_test tests[] = {
725 CLASSIC, 726 CLASSIC,
726 { }, 727 { },
727 { 728 {
728 { 1, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT }, 729 { 1, SKB_VLAN_TCI },
729 { 10, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT } 730 { 10, SKB_VLAN_TCI }
730 }, 731 },
731 }, 732 },
732 { 733 {
@@ -739,8 +740,8 @@ static struct bpf_test tests[] = {
739 CLASSIC, 740 CLASSIC,
740 { }, 741 { },
741 { 742 {
742 { 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }, 743 { 1, SKB_VLAN_PRESENT },
743 { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) } 744 { 10, SKB_VLAN_PRESENT }
744 }, 745 },
745 }, 746 },
746 { 747 {
@@ -5289,8 +5290,8 @@ static struct bpf_test tests[] = {
5289#endif 5290#endif
5290 { }, 5291 { },
5291 { 5292 {
5292 { 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }, 5293 { 1, SKB_VLAN_PRESENT },
5293 { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) } 5294 { 10, SKB_VLAN_PRESENT }
5294 }, 5295 },
5295 .fill_helper = bpf_fill_maxinsns6, 5296 .fill_helper = bpf_fill_maxinsns6,
5296 .expected_errcode = -ENOTSUPP, 5297 .expected_errcode = -ENOTSUPP,
@@ -6493,7 +6494,9 @@ static struct sk_buff *populate_skb(char *buf, int size)
6493 skb->hash = SKB_HASH; 6494 skb->hash = SKB_HASH;
6494 skb->queue_mapping = SKB_QUEUE_MAP; 6495 skb->queue_mapping = SKB_QUEUE_MAP;
6495 skb->vlan_tci = SKB_VLAN_TCI; 6496 skb->vlan_tci = SKB_VLAN_TCI;
6497 skb->vlan_present = SKB_VLAN_PRESENT;
6496 skb->vlan_proto = htons(ETH_P_IP); 6498 skb->vlan_proto = htons(ETH_P_IP);
6499 dev_net_set(&dev, &init_net);
6497 skb->dev = &dev; 6500 skb->dev = &dev;
6498 skb->dev->ifindex = SKB_DEV_IFINDEX; 6501 skb->dev->ifindex = SKB_DEV_IFINDEX;
6499 skb->dev->type = SKB_DEV_TYPE; 6502 skb->dev->type = SKB_DEV_TYPE;
diff --git a/lib/test_debug_virtual.c b/lib/test_debug_virtual.c
index d5a06addeb27..bf864c73e462 100644
--- a/lib/test_debug_virtual.c
+++ b/lib/test_debug_virtual.c
@@ -5,6 +5,7 @@
5#include <linux/vmalloc.h> 5#include <linux/vmalloc.h>
6#include <linux/slab.h> 6#include <linux/slab.h>
7#include <linux/sizes.h> 7#include <linux/sizes.h>
8#include <linux/io.h>
8 9
9#include <asm/page.h> 10#include <asm/page.h>
10#ifdef CONFIG_MIPS 11#ifdef CONFIG_MIPS
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index b984806d7d7b..7cab9a9869ac 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -837,6 +837,7 @@ static ssize_t read_firmware_show(struct device *dev,
837 if (req->fw->size > PAGE_SIZE) { 837 if (req->fw->size > PAGE_SIZE) {
838 pr_err("Testing interface must use PAGE_SIZE firmware for now\n"); 838 pr_err("Testing interface must use PAGE_SIZE firmware for now\n");
839 rc = -EINVAL; 839 rc = -EINVAL;
840 goto out;
840 } 841 }
841 memcpy(buf, req->fw->data, req->fw->size); 842 memcpy(buf, req->fw->data, req->fw->size);
842 843
diff --git a/lib/test_hexdump.c b/lib/test_hexdump.c
index 626f580b4ff7..5144899d3c6b 100644
--- a/lib/test_hexdump.c
+++ b/lib/test_hexdump.c
@@ -99,7 +99,7 @@ static void __init test_hexdump_prepare_test(size_t len, int rowsize,
99 const char *q = *result++; 99 const char *q = *result++;
100 size_t amount = strlen(q); 100 size_t amount = strlen(q);
101 101
102 strncpy(p, q, amount); 102 memcpy(p, q, amount);
103 p += amount; 103 p += amount;
104 104
105 *p++ = ' '; 105 *p++ = ' ';
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index ec657105edbf..51b78405bf24 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -579,6 +579,73 @@ static noinline void __init kmem_cache_invalid_free(void)
579 kmem_cache_destroy(cache); 579 kmem_cache_destroy(cache);
580} 580}
581 581
582static noinline void __init kasan_memchr(void)
583{
584 char *ptr;
585 size_t size = 24;
586
587 pr_info("out-of-bounds in memchr\n");
588 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
589 if (!ptr)
590 return;
591
592 memchr(ptr, '1', size + 1);
593 kfree(ptr);
594}
595
596static noinline void __init kasan_memcmp(void)
597{
598 char *ptr;
599 size_t size = 24;
600 int arr[9];
601
602 pr_info("out-of-bounds in memcmp\n");
603 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
604 if (!ptr)
605 return;
606
607 memset(arr, 0, sizeof(arr));
608 memcmp(ptr, arr, size+1);
609 kfree(ptr);
610}
611
612static noinline void __init kasan_strings(void)
613{
614 char *ptr;
615 size_t size = 24;
616
617 pr_info("use-after-free in strchr\n");
618 ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
619 if (!ptr)
620 return;
621
622 kfree(ptr);
623
624 /*
625 * Try to cause only 1 invalid access (less spam in dmesg).
626 * For that we need ptr to point to zeroed byte.
627 * Skip metadata that could be stored in freed object so ptr
628 * will likely point to zeroed byte.
629 */
630 ptr += 16;
631 strchr(ptr, '1');
632
633 pr_info("use-after-free in strrchr\n");
634 strrchr(ptr, '1');
635
636 pr_info("use-after-free in strcmp\n");
637 strcmp(ptr, "2");
638
639 pr_info("use-after-free in strncmp\n");
640 strncmp(ptr, "2", 1);
641
642 pr_info("use-after-free in strlen\n");
643 strlen(ptr);
644
645 pr_info("use-after-free in strnlen\n");
646 strnlen(ptr, 1);
647}
648
582static int __init kmalloc_tests_init(void) 649static int __init kmalloc_tests_init(void)
583{ 650{
584 /* 651 /*
@@ -618,6 +685,9 @@ static int __init kmalloc_tests_init(void)
618 use_after_scope_test(); 685 use_after_scope_test();
619 kmem_cache_double_free(); 686 kmem_cache_double_free();
620 kmem_cache_invalid_free(); 687 kmem_cache_invalid_free();
688 kasan_memchr();
689 kasan_memcmp();
690 kasan_strings();
621 691
622 kasan_restore_multi_shot(multishot); 692 kasan_restore_multi_shot(multishot);
623 693
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index e3ddd836491f..d82d022111e0 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -1214,7 +1214,6 @@ void unregister_test_dev_kmod(struct kmod_test_device *test_dev)
1214 1214
1215 dev_info(test_dev->dev, "removing interface\n"); 1215 dev_info(test_dev->dev, "removing interface\n");
1216 misc_deregister(&test_dev->misc_dev); 1216 misc_deregister(&test_dev->misc_dev);
1217 kfree(&test_dev->misc_dev.name);
1218 1217
1219 mutex_unlock(&test_dev->config_mutex); 1218 mutex_unlock(&test_dev->config_mutex);
1220 mutex_unlock(&test_dev->trigger_mutex); 1219 mutex_unlock(&test_dev->trigger_mutex);
diff --git a/lib/test_memcat_p.c b/lib/test_memcat_p.c
new file mode 100644
index 000000000000..849c477d49d0
--- /dev/null
+++ b/lib/test_memcat_p.c
@@ -0,0 +1,115 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Test cases for memcat_p() in lib/memcat_p.c
4 */
5#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6
7#include <linux/string.h>
8#include <linux/slab.h>
9#include <linux/module.h>
10
11struct test_struct {
12 int num;
13 unsigned int magic;
14};
15
16#define MAGIC 0xf00ff00f
17/* Size of each of the NULL-terminated input arrays */
18#define INPUT_MAX 128
19/* Expected number of non-NULL elements in the output array */
20#define EXPECT (INPUT_MAX * 2 - 2)
21
22static int __init test_memcat_p_init(void)
23{
24 struct test_struct **in0, **in1, **out, **p;
25 int err = -ENOMEM, i, r, total = 0;
26
27 in0 = kcalloc(INPUT_MAX, sizeof(*in0), GFP_KERNEL);
28 if (!in0)
29 return err;
30
31 in1 = kcalloc(INPUT_MAX, sizeof(*in1), GFP_KERNEL);
32 if (!in1)
33 goto err_free_in0;
34
35 for (i = 0, r = 1; i < INPUT_MAX - 1; i++) {
36 in0[i] = kmalloc(sizeof(**in0), GFP_KERNEL);
37 if (!in0[i])
38 goto err_free_elements;
39
40 in1[i] = kmalloc(sizeof(**in1), GFP_KERNEL);
41 if (!in1[i]) {
42 kfree(in0[i]);
43 goto err_free_elements;
44 }
45
46 /* lifted from test_sort.c */
47 r = (r * 725861) % 6599;
48 in0[i]->num = r;
49 in1[i]->num = -r;
50 in0[i]->magic = MAGIC;
51 in1[i]->magic = MAGIC;
52 }
53
54 in0[i] = in1[i] = NULL;
55
56 out = memcat_p(in0, in1);
57 if (!out)
58 goto err_free_all_elements;
59
60 err = -EINVAL;
61 for (i = 0, p = out; *p && (i < INPUT_MAX * 2 - 1); p++, i++) {
62 total += (*p)->num;
63
64 if ((*p)->magic != MAGIC) {
65 pr_err("test failed: wrong magic at %d: %u\n", i,
66 (*p)->magic);
67 goto err_free_out;
68 }
69 }
70
71 if (total) {
72 pr_err("test failed: expected zero total, got %d\n", total);
73 goto err_free_out;
74 }
75
76 if (i != EXPECT) {
77 pr_err("test failed: expected output size %d, got %d\n",
78 EXPECT, i);
79 goto err_free_out;
80 }
81
82 for (i = 0; i < INPUT_MAX - 1; i++)
83 if (out[i] != in0[i] || out[i + INPUT_MAX - 1] != in1[i]) {
84 pr_err("test failed: wrong element order at %d\n", i);
85 goto err_free_out;
86 }
87
88 err = 0;
89 pr_info("test passed\n");
90
91err_free_out:
92 kfree(out);
93err_free_all_elements:
94 i = INPUT_MAX;
95err_free_elements:
96 for (i--; i >= 0; i--) {
97 kfree(in1[i]);
98 kfree(in0[i]);
99 }
100
101 kfree(in1);
102err_free_in0:
103 kfree(in0);
104
105 return err;
106}
107
108static void __exit test_memcat_p_exit(void)
109{
110}
111
112module_init(test_memcat_p_init);
113module_exit(test_memcat_p_exit);
114
115MODULE_LICENSE("GPL");
diff --git a/lib/test_objagg.c b/lib/test_objagg.c
new file mode 100644
index 000000000000..ab57144bb0cd
--- /dev/null
+++ b/lib/test_objagg.c
@@ -0,0 +1,836 @@
1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
3
4#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5
6#include <linux/kernel.h>
7#include <linux/module.h>
8#include <linux/slab.h>
9#include <linux/random.h>
10#include <linux/objagg.h>
11
12struct tokey {
13 unsigned int id;
14};
15
16#define NUM_KEYS 32
17
18static int key_id_index(unsigned int key_id)
19{
20 if (key_id >= NUM_KEYS) {
21 WARN_ON(1);
22 return 0;
23 }
24 return key_id;
25}
26
27#define BUF_LEN 128
28
29struct world {
30 unsigned int root_count;
31 unsigned int delta_count;
32 char next_root_buf[BUF_LEN];
33 struct objagg_obj *objagg_objs[NUM_KEYS];
34 unsigned int key_refs[NUM_KEYS];
35};
36
37struct root {
38 struct tokey key;
39 char buf[BUF_LEN];
40};
41
42struct delta {
43 unsigned int key_id_diff;
44};
45
46static struct objagg_obj *world_obj_get(struct world *world,
47 struct objagg *objagg,
48 unsigned int key_id)
49{
50 struct objagg_obj *objagg_obj;
51 struct tokey key;
52 int err;
53
54 key.id = key_id;
55 objagg_obj = objagg_obj_get(objagg, &key);
56 if (IS_ERR(objagg_obj)) {
57 pr_err("Key %u: Failed to get object.\n", key_id);
58 return objagg_obj;
59 }
60 if (!world->key_refs[key_id_index(key_id)]) {
61 world->objagg_objs[key_id_index(key_id)] = objagg_obj;
62 } else if (world->objagg_objs[key_id_index(key_id)] != objagg_obj) {
63 pr_err("Key %u: God another object for the same key.\n",
64 key_id);
65 err = -EINVAL;
66 goto err_key_id_check;
67 }
68 world->key_refs[key_id_index(key_id)]++;
69 return objagg_obj;
70
71err_key_id_check:
72 objagg_obj_put(objagg, objagg_obj);
73 return ERR_PTR(err);
74}
75
76static void world_obj_put(struct world *world, struct objagg *objagg,
77 unsigned int key_id)
78{
79 struct objagg_obj *objagg_obj;
80
81 if (!world->key_refs[key_id_index(key_id)])
82 return;
83 objagg_obj = world->objagg_objs[key_id_index(key_id)];
84 objagg_obj_put(objagg, objagg_obj);
85 world->key_refs[key_id_index(key_id)]--;
86}
87
88#define MAX_KEY_ID_DIFF 5
89
90static void *delta_create(void *priv, void *parent_obj, void *obj)
91{
92 struct tokey *parent_key = parent_obj;
93 struct world *world = priv;
94 struct tokey *key = obj;
95 int diff = key->id - parent_key->id;
96 struct delta *delta;
97
98 if (diff < 0 || diff > MAX_KEY_ID_DIFF)
99 return ERR_PTR(-EINVAL);
100
101 delta = kzalloc(sizeof(*delta), GFP_KERNEL);
102 if (!delta)
103 return ERR_PTR(-ENOMEM);
104 delta->key_id_diff = diff;
105 world->delta_count++;
106 return delta;
107}
108
109static void delta_destroy(void *priv, void *delta_priv)
110{
111 struct delta *delta = delta_priv;
112 struct world *world = priv;
113
114 world->delta_count--;
115 kfree(delta);
116}
117
118static void *root_create(void *priv, void *obj)
119{
120 struct world *world = priv;
121 struct tokey *key = obj;
122 struct root *root;
123
124 root = kzalloc(sizeof(*root), GFP_KERNEL);
125 if (!root)
126 return ERR_PTR(-ENOMEM);
127 memcpy(&root->key, key, sizeof(root->key));
128 memcpy(root->buf, world->next_root_buf, sizeof(root->buf));
129 world->root_count++;
130 return root;
131}
132
133static void root_destroy(void *priv, void *root_priv)
134{
135 struct root *root = root_priv;
136 struct world *world = priv;
137
138 world->root_count--;
139 kfree(root);
140}
141
142static int test_nodelta_obj_get(struct world *world, struct objagg *objagg,
143 unsigned int key_id, bool should_create_root)
144{
145 unsigned int orig_root_count = world->root_count;
146 struct objagg_obj *objagg_obj;
147 const struct root *root;
148 int err;
149
150 if (should_create_root)
151 prandom_bytes(world->next_root_buf,
152 sizeof(world->next_root_buf));
153
154 objagg_obj = world_obj_get(world, objagg, key_id);
155 if (IS_ERR(objagg_obj)) {
156 pr_err("Key %u: Failed to get object.\n", key_id);
157 return PTR_ERR(objagg_obj);
158 }
159 if (should_create_root) {
160 if (world->root_count != orig_root_count + 1) {
161 pr_err("Key %u: Root was not created\n", key_id);
162 err = -EINVAL;
163 goto err_check_root_count;
164 }
165 } else {
166 if (world->root_count != orig_root_count) {
167 pr_err("Key %u: Root was incorrectly created\n",
168 key_id);
169 err = -EINVAL;
170 goto err_check_root_count;
171 }
172 }
173 root = objagg_obj_root_priv(objagg_obj);
174 if (root->key.id != key_id) {
175 pr_err("Key %u: Root has unexpected key id\n", key_id);
176 err = -EINVAL;
177 goto err_check_key_id;
178 }
179 if (should_create_root &&
180 memcmp(world->next_root_buf, root->buf, sizeof(root->buf))) {
181 pr_err("Key %u: Buffer does not match the expected content\n",
182 key_id);
183 err = -EINVAL;
184 goto err_check_buf;
185 }
186 return 0;
187
188err_check_buf:
189err_check_key_id:
190err_check_root_count:
191 objagg_obj_put(objagg, objagg_obj);
192 return err;
193}
194
195static int test_nodelta_obj_put(struct world *world, struct objagg *objagg,
196 unsigned int key_id, bool should_destroy_root)
197{
198 unsigned int orig_root_count = world->root_count;
199
200 world_obj_put(world, objagg, key_id);
201
202 if (should_destroy_root) {
203 if (world->root_count != orig_root_count - 1) {
204 pr_err("Key %u: Root was not destroyed\n", key_id);
205 return -EINVAL;
206 }
207 } else {
208 if (world->root_count != orig_root_count) {
209 pr_err("Key %u: Root was incorrectly destroyed\n",
210 key_id);
211 return -EINVAL;
212 }
213 }
214 return 0;
215}
216
217static int check_stats_zero(struct objagg *objagg)
218{
219 const struct objagg_stats *stats;
220 int err = 0;
221
222 stats = objagg_stats_get(objagg);
223 if (IS_ERR(stats))
224 return PTR_ERR(stats);
225
226 if (stats->stats_info_count != 0) {
227 pr_err("Stats: Object count is not zero while it should be\n");
228 err = -EINVAL;
229 }
230
231 objagg_stats_put(stats);
232 return err;
233}
234
235static int check_stats_nodelta(struct objagg *objagg)
236{
237 const struct objagg_stats *stats;
238 int i;
239 int err;
240
241 stats = objagg_stats_get(objagg);
242 if (IS_ERR(stats))
243 return PTR_ERR(stats);
244
245 if (stats->stats_info_count != NUM_KEYS) {
246 pr_err("Stats: Unexpected object count (%u expected, %u returned)\n",
247 NUM_KEYS, stats->stats_info_count);
248 err = -EINVAL;
249 goto stats_put;
250 }
251
252 for (i = 0; i < stats->stats_info_count; i++) {
253 if (stats->stats_info[i].stats.user_count != 2) {
254 pr_err("Stats: incorrect user count\n");
255 err = -EINVAL;
256 goto stats_put;
257 }
258 if (stats->stats_info[i].stats.delta_user_count != 2) {
259 pr_err("Stats: incorrect delta user count\n");
260 err = -EINVAL;
261 goto stats_put;
262 }
263 }
264 err = 0;
265
266stats_put:
267 objagg_stats_put(stats);
268 return err;
269}
270
271static void *delta_create_dummy(void *priv, void *parent_obj, void *obj)
272{
273 return ERR_PTR(-EOPNOTSUPP);
274}
275
276static void delta_destroy_dummy(void *priv, void *delta_priv)
277{
278}
279
280static const struct objagg_ops nodelta_ops = {
281 .obj_size = sizeof(struct tokey),
282 .delta_create = delta_create_dummy,
283 .delta_destroy = delta_destroy_dummy,
284 .root_create = root_create,
285 .root_destroy = root_destroy,
286};
287
288static int test_nodelta(void)
289{
290 struct world world = {};
291 struct objagg *objagg;
292 int i;
293 int err;
294
295 objagg = objagg_create(&nodelta_ops, &world);
296 if (IS_ERR(objagg))
297 return PTR_ERR(objagg);
298
299 err = check_stats_zero(objagg);
300 if (err)
301 goto err_stats_first_zero;
302
303 /* First round of gets, the root objects should be created */
304 for (i = 0; i < NUM_KEYS; i++) {
305 err = test_nodelta_obj_get(&world, objagg, i, true);
306 if (err)
307 goto err_obj_first_get;
308 }
309
310 /* Do the second round of gets, all roots are already created,
311 * make sure that no new root is created
312 */
313 for (i = 0; i < NUM_KEYS; i++) {
314 err = test_nodelta_obj_get(&world, objagg, i, false);
315 if (err)
316 goto err_obj_second_get;
317 }
318
319 err = check_stats_nodelta(objagg);
320 if (err)
321 goto err_stats_nodelta;
322
323 for (i = NUM_KEYS - 1; i >= 0; i--) {
324 err = test_nodelta_obj_put(&world, objagg, i, false);
325 if (err)
326 goto err_obj_first_put;
327 }
328 for (i = NUM_KEYS - 1; i >= 0; i--) {
329 err = test_nodelta_obj_put(&world, objagg, i, true);
330 if (err)
331 goto err_obj_second_put;
332 }
333
334 err = check_stats_zero(objagg);
335 if (err)
336 goto err_stats_second_zero;
337
338 objagg_destroy(objagg);
339 return 0;
340
341err_stats_nodelta:
342err_obj_first_put:
343err_obj_second_get:
344 for (i--; i >= 0; i--)
345 world_obj_put(&world, objagg, i);
346
347 i = NUM_KEYS;
348err_obj_first_get:
349err_obj_second_put:
350 for (i--; i >= 0; i--)
351 world_obj_put(&world, objagg, i);
352err_stats_first_zero:
353err_stats_second_zero:
354 objagg_destroy(objagg);
355 return err;
356}
357
358static const struct objagg_ops delta_ops = {
359 .obj_size = sizeof(struct tokey),
360 .delta_create = delta_create,
361 .delta_destroy = delta_destroy,
362 .root_create = root_create,
363 .root_destroy = root_destroy,
364};
365
366enum action {
367 ACTION_GET,
368 ACTION_PUT,
369};
370
371enum expect_delta {
372 EXPECT_DELTA_SAME,
373 EXPECT_DELTA_INC,
374 EXPECT_DELTA_DEC,
375};
376
377enum expect_root {
378 EXPECT_ROOT_SAME,
379 EXPECT_ROOT_INC,
380 EXPECT_ROOT_DEC,
381};
382
383struct expect_stats_info {
384 struct objagg_obj_stats stats;
385 bool is_root;
386 unsigned int key_id;
387};
388
389struct expect_stats {
390 unsigned int info_count;
391 struct expect_stats_info info[NUM_KEYS];
392};
393
394struct action_item {
395 unsigned int key_id;
396 enum action action;
397 enum expect_delta expect_delta;
398 enum expect_root expect_root;
399 struct expect_stats expect_stats;
400};
401
402#define EXPECT_STATS(count, ...) \
403{ \
404 .info_count = count, \
405 .info = { __VA_ARGS__ } \
406}
407
408#define ROOT(key_id, user_count, delta_user_count) \
409 {{user_count, delta_user_count}, true, key_id}
410
411#define DELTA(key_id, user_count) \
412 {{user_count, user_count}, false, key_id}
413
414static const struct action_item action_items[] = {
415 {
416 1, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC,
417 EXPECT_STATS(1, ROOT(1, 1, 1)),
418 }, /* r: 1 d: */
419 {
420 7, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC,
421 EXPECT_STATS(2, ROOT(1, 1, 1), ROOT(7, 1, 1)),
422 }, /* r: 1, 7 d: */
423 {
424 3, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
425 EXPECT_STATS(3, ROOT(1, 1, 2), ROOT(7, 1, 1),
426 DELTA(3, 1)),
427 }, /* r: 1, 7 d: 3^1 */
428 {
429 5, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
430 EXPECT_STATS(4, ROOT(1, 1, 3), ROOT(7, 1, 1),
431 DELTA(3, 1), DELTA(5, 1)),
432 }, /* r: 1, 7 d: 3^1, 5^1 */
433 {
434 3, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
435 EXPECT_STATS(4, ROOT(1, 1, 4), ROOT(7, 1, 1),
436 DELTA(3, 2), DELTA(5, 1)),
437 }, /* r: 1, 7 d: 3^1, 3^1, 5^1 */
438 {
439 1, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
440 EXPECT_STATS(4, ROOT(1, 2, 5), ROOT(7, 1, 1),
441 DELTA(3, 2), DELTA(5, 1)),
442 }, /* r: 1, 1, 7 d: 3^1, 3^1, 5^1 */
443 {
444 30, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC,
445 EXPECT_STATS(5, ROOT(1, 2, 5), ROOT(7, 1, 1), ROOT(30, 1, 1),
446 DELTA(3, 2), DELTA(5, 1)),
447 }, /* r: 1, 1, 7, 30 d: 3^1, 3^1, 5^1 */
448 {
449 8, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
450 EXPECT_STATS(6, ROOT(1, 2, 5), ROOT(7, 1, 2), ROOT(30, 1, 1),
451 DELTA(3, 2), DELTA(5, 1), DELTA(8, 1)),
452 }, /* r: 1, 1, 7, 30 d: 3^1, 3^1, 5^1, 8^7 */
453 {
454 8, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
455 EXPECT_STATS(6, ROOT(1, 2, 5), ROOT(7, 1, 3), ROOT(30, 1, 1),
456 DELTA(3, 2), DELTA(8, 2), DELTA(5, 1)),
457 }, /* r: 1, 1, 7, 30 d: 3^1, 3^1, 5^1, 8^7, 8^7 */
458 {
459 3, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
460 EXPECT_STATS(6, ROOT(1, 2, 4), ROOT(7, 1, 3), ROOT(30, 1, 1),
461 DELTA(8, 2), DELTA(3, 1), DELTA(5, 1)),
462 }, /* r: 1, 1, 7, 30 d: 3^1, 5^1, 8^7, 8^7 */
463 {
464 3, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_SAME,
465 EXPECT_STATS(5, ROOT(1, 2, 3), ROOT(7, 1, 3), ROOT(30, 1, 1),
466 DELTA(8, 2), DELTA(5, 1)),
467 }, /* r: 1, 1, 7, 30 d: 5^1, 8^7, 8^7 */
468 {
469 1, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
470 EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(1, 1, 2), ROOT(30, 1, 1),
471 DELTA(8, 2), DELTA(5, 1)),
472 }, /* r: 1, 7, 30 d: 5^1, 8^7, 8^7 */
473 {
474 1, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
475 EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(30, 1, 1), ROOT(1, 0, 1),
476 DELTA(8, 2), DELTA(5, 1)),
477 }, /* r: 7, 30 d: 5^1, 8^7, 8^7 */
478 {
479 5, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_DEC,
480 EXPECT_STATS(3, ROOT(7, 1, 3), ROOT(30, 1, 1),
481 DELTA(8, 2)),
482 }, /* r: 7, 30 d: 8^7, 8^7 */
483 {
484 5, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_INC,
485 EXPECT_STATS(4, ROOT(7, 1, 3), ROOT(30, 1, 1), ROOT(5, 1, 1),
486 DELTA(8, 2)),
487 }, /* r: 7, 30, 5 d: 8^7, 8^7 */
488 {
489 6, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
490 EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(5, 1, 2), ROOT(30, 1, 1),
491 DELTA(8, 2), DELTA(6, 1)),
492 }, /* r: 7, 30, 5 d: 8^7, 8^7, 6^5 */
493 {
494 8, ACTION_GET, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
495 EXPECT_STATS(5, ROOT(7, 1, 4), ROOT(5, 1, 2), ROOT(30, 1, 1),
496 DELTA(8, 3), DELTA(6, 1)),
497 }, /* r: 7, 30, 5 d: 8^7, 8^7, 8^7, 6^5 */
498 {
499 8, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
500 EXPECT_STATS(5, ROOT(7, 1, 3), ROOT(5, 1, 2), ROOT(30, 1, 1),
501 DELTA(8, 2), DELTA(6, 1)),
502 }, /* r: 7, 30, 5 d: 8^7, 8^7, 6^5 */
503 {
504 8, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
505 EXPECT_STATS(5, ROOT(7, 1, 2), ROOT(5, 1, 2), ROOT(30, 1, 1),
506 DELTA(8, 1), DELTA(6, 1)),
507 }, /* r: 7, 30, 5 d: 8^7, 6^5 */
508 {
509 8, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_SAME,
510 EXPECT_STATS(4, ROOT(5, 1, 2), ROOT(7, 1, 1), ROOT(30, 1, 1),
511 DELTA(6, 1)),
512 }, /* r: 7, 30, 5 d: 6^5 */
513 {
514 8, ACTION_GET, EXPECT_DELTA_INC, EXPECT_ROOT_SAME,
515 EXPECT_STATS(5, ROOT(5, 1, 3), ROOT(7, 1, 1), ROOT(30, 1, 1),
516 DELTA(6, 1), DELTA(8, 1)),
517 }, /* r: 7, 30, 5 d: 6^5, 8^5 */
518 {
519 7, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_DEC,
520 EXPECT_STATS(4, ROOT(5, 1, 3), ROOT(30, 1, 1),
521 DELTA(6, 1), DELTA(8, 1)),
522 }, /* r: 30, 5 d: 6^5, 8^5 */
523 {
524 30, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_DEC,
525 EXPECT_STATS(3, ROOT(5, 1, 3),
526 DELTA(6, 1), DELTA(8, 1)),
527 }, /* r: 5 d: 6^5, 8^5 */
528 {
529 5, ACTION_PUT, EXPECT_DELTA_SAME, EXPECT_ROOT_SAME,
530 EXPECT_STATS(3, ROOT(5, 0, 2),
531 DELTA(6, 1), DELTA(8, 1)),
532 }, /* r: d: 6^5, 8^5 */
533 {
534 6, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_SAME,
535 EXPECT_STATS(2, ROOT(5, 0, 1),
536 DELTA(8, 1)),
537 }, /* r: d: 6^5 */
538 {
539 8, ACTION_PUT, EXPECT_DELTA_DEC, EXPECT_ROOT_DEC,
540 EXPECT_STATS(0, ),
541 }, /* r: d: */
542};
543
544static int check_expect(struct world *world,
545 const struct action_item *action_item,
546 unsigned int orig_delta_count,
547 unsigned int orig_root_count)
548{
549 unsigned int key_id = action_item->key_id;
550
551 switch (action_item->expect_delta) {
552 case EXPECT_DELTA_SAME:
553 if (orig_delta_count != world->delta_count) {
554 pr_err("Key %u: Delta count changed while expected to remain the same.\n",
555 key_id);
556 return -EINVAL;
557 }
558 break;
559 case EXPECT_DELTA_INC:
560 if (WARN_ON(action_item->action == ACTION_PUT))
561 return -EINVAL;
562 if (orig_delta_count + 1 != world->delta_count) {
563 pr_err("Key %u: Delta count was not incremented.\n",
564 key_id);
565 return -EINVAL;
566 }
567 break;
568 case EXPECT_DELTA_DEC:
569 if (WARN_ON(action_item->action == ACTION_GET))
570 return -EINVAL;
571 if (orig_delta_count - 1 != world->delta_count) {
572 pr_err("Key %u: Delta count was not decremented.\n",
573 key_id);
574 return -EINVAL;
575 }
576 break;
577 }
578
579 switch (action_item->expect_root) {
580 case EXPECT_ROOT_SAME:
581 if (orig_root_count != world->root_count) {
582 pr_err("Key %u: Root count changed while expected to remain the same.\n",
583 key_id);
584 return -EINVAL;
585 }
586 break;
587 case EXPECT_ROOT_INC:
588 if (WARN_ON(action_item->action == ACTION_PUT))
589 return -EINVAL;
590 if (orig_root_count + 1 != world->root_count) {
591 pr_err("Key %u: Root count was not incremented.\n",
592 key_id);
593 return -EINVAL;
594 }
595 break;
596 case EXPECT_ROOT_DEC:
597 if (WARN_ON(action_item->action == ACTION_GET))
598 return -EINVAL;
599 if (orig_root_count - 1 != world->root_count) {
600 pr_err("Key %u: Root count was not decremented.\n",
601 key_id);
602 return -EINVAL;
603 }
604 }
605
606 return 0;
607}
608
609static unsigned int obj_to_key_id(struct objagg_obj *objagg_obj)
610{
611 const struct tokey *root_key;
612 const struct delta *delta;
613 unsigned int key_id;
614
615 root_key = objagg_obj_root_priv(objagg_obj);
616 key_id = root_key->id;
617 delta = objagg_obj_delta_priv(objagg_obj);
618 if (delta)
619 key_id += delta->key_id_diff;
620 return key_id;
621}
622
623static int
624check_expect_stats_nums(const struct objagg_obj_stats_info *stats_info,
625 const struct expect_stats_info *expect_stats_info,
626 const char **errmsg)
627{
628 if (stats_info->is_root != expect_stats_info->is_root) {
629 if (errmsg)
630 *errmsg = "Incorrect root/delta indication";
631 return -EINVAL;
632 }
633 if (stats_info->stats.user_count !=
634 expect_stats_info->stats.user_count) {
635 if (errmsg)
636 *errmsg = "Incorrect user count";
637 return -EINVAL;
638 }
639 if (stats_info->stats.delta_user_count !=
640 expect_stats_info->stats.delta_user_count) {
641 if (errmsg)
642 *errmsg = "Incorrect delta user count";
643 return -EINVAL;
644 }
645 return 0;
646}
647
648static int
649check_expect_stats_key_id(const struct objagg_obj_stats_info *stats_info,
650 const struct expect_stats_info *expect_stats_info,
651 const char **errmsg)
652{
653 if (obj_to_key_id(stats_info->objagg_obj) !=
654 expect_stats_info->key_id) {
655 if (errmsg)
656 *errmsg = "incorrect key id";
657 return -EINVAL;
658 }
659 return 0;
660}
661
662static int check_expect_stats_neigh(const struct objagg_stats *stats,
663 const struct expect_stats *expect_stats,
664 int pos)
665{
666 int i;
667 int err;
668
669 for (i = pos - 1; i >= 0; i--) {
670 err = check_expect_stats_nums(&stats->stats_info[i],
671 &expect_stats->info[pos], NULL);
672 if (err)
673 break;
674 err = check_expect_stats_key_id(&stats->stats_info[i],
675 &expect_stats->info[pos], NULL);
676 if (!err)
677 return 0;
678 }
679 for (i = pos + 1; i < stats->stats_info_count; i++) {
680 err = check_expect_stats_nums(&stats->stats_info[i],
681 &expect_stats->info[pos], NULL);
682 if (err)
683 break;
684 err = check_expect_stats_key_id(&stats->stats_info[i],
685 &expect_stats->info[pos], NULL);
686 if (!err)
687 return 0;
688 }
689 return -EINVAL;
690}
691
692static int __check_expect_stats(const struct objagg_stats *stats,
693 const struct expect_stats *expect_stats,
694 const char **errmsg)
695{
696 int i;
697 int err;
698
699 if (stats->stats_info_count != expect_stats->info_count) {
700 *errmsg = "Unexpected object count";
701 return -EINVAL;
702 }
703
704 for (i = 0; i < stats->stats_info_count; i++) {
705 err = check_expect_stats_nums(&stats->stats_info[i],
706 &expect_stats->info[i], errmsg);
707 if (err)
708 return err;
709 err = check_expect_stats_key_id(&stats->stats_info[i],
710 &expect_stats->info[i], errmsg);
711 if (err) {
712 /* It is possible that one of the neighbor stats with
713 * same numbers have the correct key id, so check it
714 */
715 err = check_expect_stats_neigh(stats, expect_stats, i);
716 if (err)
717 return err;
718 }
719 }
720 return 0;
721}
722
723static int check_expect_stats(struct objagg *objagg,
724 const struct expect_stats *expect_stats,
725 const char **errmsg)
726{
727 const struct objagg_stats *stats;
728 int err;
729
730 stats = objagg_stats_get(objagg);
731 if (IS_ERR(stats))
732 return PTR_ERR(stats);
733 err = __check_expect_stats(stats, expect_stats, errmsg);
734 objagg_stats_put(stats);
735 return err;
736}
737
738static int test_delta_action_item(struct world *world,
739 struct objagg *objagg,
740 const struct action_item *action_item,
741 bool inverse)
742{
743 unsigned int orig_delta_count = world->delta_count;
744 unsigned int orig_root_count = world->root_count;
745 unsigned int key_id = action_item->key_id;
746 enum action action = action_item->action;
747 struct objagg_obj *objagg_obj;
748 const char *errmsg;
749 int err;
750
751 if (inverse)
752 action = action == ACTION_GET ? ACTION_PUT : ACTION_GET;
753
754 switch (action) {
755 case ACTION_GET:
756 objagg_obj = world_obj_get(world, objagg, key_id);
757 if (IS_ERR(objagg_obj))
758 return PTR_ERR(objagg_obj);
759 break;
760 case ACTION_PUT:
761 world_obj_put(world, objagg, key_id);
762 break;
763 }
764
765 if (inverse)
766 return 0;
767 err = check_expect(world, action_item,
768 orig_delta_count, orig_root_count);
769 if (err)
770 goto errout;
771
772 errmsg = NULL;
773 err = check_expect_stats(objagg, &action_item->expect_stats, &errmsg);
774 if (err) {
775 pr_err("Key %u: Stats: %s\n", action_item->key_id, errmsg);
776 goto errout;
777 }
778
779 return 0;
780
781errout:
782 /* This can only happen when action is not inversed.
783 * So in case of an error, cleanup by doing inverse action.
784 */
785 test_delta_action_item(world, objagg, action_item, true);
786 return err;
787}
788
789static int test_delta(void)
790{
791 struct world world = {};
792 struct objagg *objagg;
793 int i;
794 int err;
795
796 objagg = objagg_create(&delta_ops, &world);
797 if (IS_ERR(objagg))
798 return PTR_ERR(objagg);
799
800 for (i = 0; i < ARRAY_SIZE(action_items); i++) {
801 err = test_delta_action_item(&world, objagg,
802 &action_items[i], false);
803 if (err)
804 goto err_do_action_item;
805 }
806
807 objagg_destroy(objagg);
808 return 0;
809
810err_do_action_item:
811 for (i--; i >= 0; i--)
812 test_delta_action_item(&world, objagg, &action_items[i], true);
813
814 objagg_destroy(objagg);
815 return err;
816}
817
818static int __init test_objagg_init(void)
819{
820 int err;
821
822 err = test_nodelta();
823 if (err)
824 return err;
825 return test_delta();
826}
827
828static void __exit test_objagg_exit(void)
829{
830}
831
832module_init(test_objagg_init);
833module_exit(test_objagg_exit);
834MODULE_LICENSE("Dual BSD/GPL");
835MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
836MODULE_DESCRIPTION("Test module for objagg");
diff --git a/lib/test_printf.c b/lib/test_printf.c
index 53527ea822b5..659b6cc0d483 100644
--- a/lib/test_printf.c
+++ b/lib/test_printf.c
@@ -9,6 +9,7 @@
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/printk.h> 10#include <linux/printk.h>
11#include <linux/random.h> 11#include <linux/random.h>
12#include <linux/rtc.h>
12#include <linux/slab.h> 13#include <linux/slab.h>
13#include <linux/string.h> 14#include <linux/string.h>
14 15
@@ -249,12 +250,11 @@ plain_format(void)
249#endif /* BITS_PER_LONG == 64 */ 250#endif /* BITS_PER_LONG == 64 */
250 251
251static int __init 252static int __init
252plain_hash(void) 253plain_hash_to_buffer(const void *p, char *buf, size_t len)
253{ 254{
254 char buf[PLAIN_BUF_SIZE];
255 int nchars; 255 int nchars;
256 256
257 nchars = snprintf(buf, PLAIN_BUF_SIZE, "%p", PTR); 257 nchars = snprintf(buf, len, "%p", p);
258 258
259 if (nchars != PTR_WIDTH) 259 if (nchars != PTR_WIDTH)
260 return -1; 260 return -1;
@@ -265,6 +265,20 @@ plain_hash(void)
265 return 0; 265 return 0;
266 } 266 }
267 267
268 return 0;
269}
270
271
272static int __init
273plain_hash(void)
274{
275 char buf[PLAIN_BUF_SIZE];
276 int ret;
277
278 ret = plain_hash_to_buffer(PTR, buf, PLAIN_BUF_SIZE);
279 if (ret)
280 return ret;
281
268 if (strncmp(buf, PTR_STR, PTR_WIDTH) == 0) 282 if (strncmp(buf, PTR_STR, PTR_WIDTH) == 0)
269 return -1; 283 return -1;
270 284
@@ -295,6 +309,23 @@ plain(void)
295} 309}
296 310
297static void __init 311static void __init
312test_hashed(const char *fmt, const void *p)
313{
314 char buf[PLAIN_BUF_SIZE];
315 int ret;
316
317 /*
318 * No need to increase failed test counter since this is assumed
319 * to be called after plain().
320 */
321 ret = plain_hash_to_buffer(p, buf, PLAIN_BUF_SIZE);
322 if (ret)
323 return;
324
325 test(buf, fmt, p);
326}
327
328static void __init
298symbol_ptr(void) 329symbol_ptr(void)
299{ 330{
300} 331}
@@ -419,6 +450,29 @@ struct_va_format(void)
419} 450}
420 451
421static void __init 452static void __init
453struct_rtc_time(void)
454{
455 /* 1543210543 */
456 const struct rtc_time tm = {
457 .tm_sec = 43,
458 .tm_min = 35,
459 .tm_hour = 5,
460 .tm_mday = 26,
461 .tm_mon = 10,
462 .tm_year = 118,
463 };
464
465 test_hashed("%pt", &tm);
466
467 test("2018-11-26T05:35:43", "%ptR", &tm);
468 test("0118-10-26T05:35:43", "%ptRr", &tm);
469 test("05:35:43|2018-11-26", "%ptRt|%ptRd", &tm, &tm);
470 test("05:35:43|0118-10-26", "%ptRtr|%ptRdr", &tm, &tm);
471 test("05:35:43|2018-11-26", "%ptRttr|%ptRdtr", &tm, &tm);
472 test("05:35:43 tr|2018-11-26 tr", "%ptRt tr|%ptRd tr", &tm, &tm);
473}
474
475static void __init
422struct_clk(void) 476struct_clk(void)
423{ 477{
424} 478}
@@ -529,6 +583,7 @@ test_pointer(void)
529 uuid(); 583 uuid();
530 dentry(); 584 dentry();
531 struct_va_format(); 585 struct_va_format();
586 struct_rtc_time();
532 struct_clk(); 587 struct_clk();
533 bitmap(); 588 bitmap();
534 netdev_features(); 589 netdev_features();
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 82ac39ce5310..6a8ac7626797 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -20,11 +20,11 @@
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/rcupdate.h> 21#include <linux/rcupdate.h>
22#include <linux/rhashtable.h> 22#include <linux/rhashtable.h>
23#include <linux/semaphore.h>
24#include <linux/slab.h> 23#include <linux/slab.h>
25#include <linux/sched.h> 24#include <linux/sched.h>
26#include <linux/random.h> 25#include <linux/random.h>
27#include <linux/vmalloc.h> 26#include <linux/vmalloc.h>
27#include <linux/wait.h>
28 28
29#define MAX_ENTRIES 1000000 29#define MAX_ENTRIES 1000000
30#define TEST_INSERT_FAIL INT_MAX 30#define TEST_INSERT_FAIL INT_MAX
@@ -112,8 +112,8 @@ static struct rhashtable_params test_rht_params_dup = {
112 .automatic_shrinking = false, 112 .automatic_shrinking = false,
113}; 113};
114 114
115static struct semaphore prestart_sem; 115static atomic_t startup_count;
116static struct semaphore startup_sem = __SEMAPHORE_INITIALIZER(startup_sem, 0); 116static DECLARE_WAIT_QUEUE_HEAD(startup_wait);
117 117
118static int insert_retry(struct rhashtable *ht, struct test_obj *obj, 118static int insert_retry(struct rhashtable *ht, struct test_obj *obj,
119 const struct rhashtable_params params) 119 const struct rhashtable_params params)
@@ -634,9 +634,12 @@ static int threadfunc(void *data)
634 int i, step, err = 0, insert_retries = 0; 634 int i, step, err = 0, insert_retries = 0;
635 struct thread_data *tdata = data; 635 struct thread_data *tdata = data;
636 636
637 up(&prestart_sem); 637 if (atomic_dec_and_test(&startup_count))
638 if (down_interruptible(&startup_sem)) 638 wake_up(&startup_wait);
639 pr_err(" thread[%d]: down_interruptible failed\n", tdata->id); 639 if (wait_event_interruptible(startup_wait, atomic_read(&startup_count) == -1)) {
640 pr_err(" thread[%d]: interrupted\n", tdata->id);
641 goto out;
642 }
640 643
641 for (i = 0; i < tdata->entries; i++) { 644 for (i = 0; i < tdata->entries; i++) {
642 tdata->objs[i].value.id = i; 645 tdata->objs[i].value.id = i;
@@ -755,7 +758,7 @@ static int __init test_rht_init(void)
755 758
756 pr_info("Testing concurrent rhashtable access from %d threads\n", 759 pr_info("Testing concurrent rhashtable access from %d threads\n",
757 tcount); 760 tcount);
758 sema_init(&prestart_sem, 1 - tcount); 761 atomic_set(&startup_count, tcount);
759 tdata = vzalloc(array_size(tcount, sizeof(struct thread_data))); 762 tdata = vzalloc(array_size(tcount, sizeof(struct thread_data)));
760 if (!tdata) 763 if (!tdata)
761 return -ENOMEM; 764 return -ENOMEM;
@@ -781,15 +784,18 @@ static int __init test_rht_init(void)
781 tdata[i].objs = objs + i * entries; 784 tdata[i].objs = objs + i * entries;
782 tdata[i].task = kthread_run(threadfunc, &tdata[i], 785 tdata[i].task = kthread_run(threadfunc, &tdata[i],
783 "rhashtable_thrad[%d]", i); 786 "rhashtable_thrad[%d]", i);
784 if (IS_ERR(tdata[i].task)) 787 if (IS_ERR(tdata[i].task)) {
785 pr_err(" kthread_run failed for thread %d\n", i); 788 pr_err(" kthread_run failed for thread %d\n", i);
786 else 789 atomic_dec(&startup_count);
790 } else {
787 started_threads++; 791 started_threads++;
792 }
788 } 793 }
789 if (down_interruptible(&prestart_sem)) 794 if (wait_event_interruptible(startup_wait, atomic_read(&startup_count) == 0))
790 pr_err(" down interruptible failed\n"); 795 pr_err(" wait_event interruptible failed\n");
791 for (i = 0; i < tcount; i++) 796 /* count is 0 now, set it to -1 and wake up all threads together */
792 up(&startup_sem); 797 atomic_dec(&startup_count);
798 wake_up_all(&startup_wait);
793 for (i = 0; i < tcount; i++) { 799 for (i = 0; i < tcount; i++) {
794 if (IS_ERR(tdata[i].task)) 800 if (IS_ERR(tdata[i].task))
795 continue; 801 continue;
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
new file mode 100644
index 000000000000..4676c0a1eeca
--- /dev/null
+++ b/lib/test_xarray.c
@@ -0,0 +1,1351 @@
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * test_xarray.c: Test the XArray API
4 * Copyright (c) 2017-2018 Microsoft Corporation
5 * Author: Matthew Wilcox <willy@infradead.org>
6 */
7
8#include <linux/xarray.h>
9#include <linux/module.h>
10
11static unsigned int tests_run;
12static unsigned int tests_passed;
13
14#ifndef XA_DEBUG
15# ifdef __KERNEL__
16void xa_dump(const struct xarray *xa) { }
17# endif
18#undef XA_BUG_ON
19#define XA_BUG_ON(xa, x) do { \
20 tests_run++; \
21 if (x) { \
22 printk("BUG at %s:%d\n", __func__, __LINE__); \
23 xa_dump(xa); \
24 dump_stack(); \
25 } else { \
26 tests_passed++; \
27 } \
28} while (0)
29#endif
30
31static void *xa_mk_index(unsigned long index)
32{
33 return xa_mk_value(index & LONG_MAX);
34}
35
36static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp)
37{
38 return xa_store(xa, index, xa_mk_index(index), gfp);
39}
40
41static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp)
42{
43 u32 id = 0;
44
45 XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(index),
46 gfp) != 0);
47 XA_BUG_ON(xa, id != index);
48}
49
50static void xa_erase_index(struct xarray *xa, unsigned long index)
51{
52 XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index));
53 XA_BUG_ON(xa, xa_load(xa, index) != NULL);
54}
55
56/*
57 * If anyone needs this, please move it to xarray.c. We have no current
58 * users outside the test suite because all current multislot users want
59 * to use the advanced API.
60 */
61static void *xa_store_order(struct xarray *xa, unsigned long index,
62 unsigned order, void *entry, gfp_t gfp)
63{
64 XA_STATE_ORDER(xas, xa, index, order);
65 void *curr;
66
67 do {
68 xas_lock(&xas);
69 curr = xas_store(&xas, entry);
70 xas_unlock(&xas);
71 } while (xas_nomem(&xas, gfp));
72
73 return curr;
74}
75
76static noinline void check_xa_err(struct xarray *xa)
77{
78 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 0, GFP_NOWAIT)) != 0);
79 XA_BUG_ON(xa, xa_err(xa_erase(xa, 0)) != 0);
80#ifndef __KERNEL__
81 /* The kernel does not fail GFP_NOWAIT allocations */
82 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM);
83 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM);
84#endif
85 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_KERNEL)) != 0);
86 XA_BUG_ON(xa, xa_err(xa_store(xa, 1, xa_mk_value(0), GFP_KERNEL)) != 0);
87 XA_BUG_ON(xa, xa_err(xa_erase(xa, 1)) != 0);
88// kills the test-suite :-(
89// XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL);
90}
91
92static noinline void check_xas_retry(struct xarray *xa)
93{
94 XA_STATE(xas, xa, 0);
95 void *entry;
96
97 xa_store_index(xa, 0, GFP_KERNEL);
98 xa_store_index(xa, 1, GFP_KERNEL);
99
100 rcu_read_lock();
101 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0));
102 xa_erase_index(xa, 1);
103 XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas)));
104 XA_BUG_ON(xa, xas_retry(&xas, NULL));
105 XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0)));
106 xas_reset(&xas);
107 XA_BUG_ON(xa, xas.xa_node != XAS_RESTART);
108 XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
109 XA_BUG_ON(xa, xas.xa_node != NULL);
110
111 XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
112 XA_BUG_ON(xa, !xa_is_internal(xas_reload(&xas)));
113 xas.xa_node = XAS_RESTART;
114 XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
115 rcu_read_unlock();
116
117 /* Make sure we can iterate through retry entries */
118 xas_lock(&xas);
119 xas_set(&xas, 0);
120 xas_store(&xas, XA_RETRY_ENTRY);
121 xas_set(&xas, 1);
122 xas_store(&xas, XA_RETRY_ENTRY);
123
124 xas_set(&xas, 0);
125 xas_for_each(&xas, entry, ULONG_MAX) {
126 xas_store(&xas, xa_mk_index(xas.xa_index));
127 }
128 xas_unlock(&xas);
129
130 xa_erase_index(xa, 0);
131 xa_erase_index(xa, 1);
132}
133
134static noinline void check_xa_load(struct xarray *xa)
135{
136 unsigned long i, j;
137
138 for (i = 0; i < 1024; i++) {
139 for (j = 0; j < 1024; j++) {
140 void *entry = xa_load(xa, j);
141 if (j < i)
142 XA_BUG_ON(xa, xa_to_value(entry) != j);
143 else
144 XA_BUG_ON(xa, entry);
145 }
146 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
147 }
148
149 for (i = 0; i < 1024; i++) {
150 for (j = 0; j < 1024; j++) {
151 void *entry = xa_load(xa, j);
152 if (j >= i)
153 XA_BUG_ON(xa, xa_to_value(entry) != j);
154 else
155 XA_BUG_ON(xa, entry);
156 }
157 xa_erase_index(xa, i);
158 }
159 XA_BUG_ON(xa, !xa_empty(xa));
160}
161
162static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
163{
164 unsigned int order;
165 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 8 : 1;
166
167 /* NULL elements have no marks set */
168 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
169 xa_set_mark(xa, index, XA_MARK_0);
170 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
171
172 /* Storing a pointer will not make a mark appear */
173 XA_BUG_ON(xa, xa_store_index(xa, index, GFP_KERNEL) != NULL);
174 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
175 xa_set_mark(xa, index, XA_MARK_0);
176 XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
177
178 /* Setting one mark will not set another mark */
179 XA_BUG_ON(xa, xa_get_mark(xa, index + 1, XA_MARK_0));
180 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_1));
181
182 /* Storing NULL clears marks, and they can't be set again */
183 xa_erase_index(xa, index);
184 XA_BUG_ON(xa, !xa_empty(xa));
185 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
186 xa_set_mark(xa, index, XA_MARK_0);
187 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
188
189 /*
190 * Storing a multi-index entry over entries with marks gives the
191 * entire entry the union of the marks
192 */
193 BUG_ON((index % 4) != 0);
194 for (order = 2; order < max_order; order++) {
195 unsigned long base = round_down(index, 1UL << order);
196 unsigned long next = base + (1UL << order);
197 unsigned long i;
198
199 XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL));
200 xa_set_mark(xa, index + 1, XA_MARK_0);
201 XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL));
202 xa_set_mark(xa, index + 2, XA_MARK_1);
203 XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL));
204 xa_store_order(xa, index, order, xa_mk_index(index),
205 GFP_KERNEL);
206 for (i = base; i < next; i++) {
207 XA_STATE(xas, xa, i);
208 unsigned int seen = 0;
209 void *entry;
210
211 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
212 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_1));
213 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_2));
214
215 /* We should see two elements in the array */
216 rcu_read_lock();
217 xas_for_each(&xas, entry, ULONG_MAX)
218 seen++;
219 rcu_read_unlock();
220 XA_BUG_ON(xa, seen != 2);
221
222 /* One of which is marked */
223 xas_set(&xas, 0);
224 seen = 0;
225 rcu_read_lock();
226 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0)
227 seen++;
228 rcu_read_unlock();
229 XA_BUG_ON(xa, seen != 1);
230 }
231 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0));
232 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_1));
233 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_2));
234 xa_erase_index(xa, index);
235 xa_erase_index(xa, next);
236 XA_BUG_ON(xa, !xa_empty(xa));
237 }
238 XA_BUG_ON(xa, !xa_empty(xa));
239}
240
241static noinline void check_xa_mark_2(struct xarray *xa)
242{
243 XA_STATE(xas, xa, 0);
244 unsigned long index;
245 unsigned int count = 0;
246 void *entry;
247
248 xa_store_index(xa, 0, GFP_KERNEL);
249 xa_set_mark(xa, 0, XA_MARK_0);
250 xas_lock(&xas);
251 xas_load(&xas);
252 xas_init_marks(&xas);
253 xas_unlock(&xas);
254 XA_BUG_ON(xa, !xa_get_mark(xa, 0, XA_MARK_0) == 0);
255
256 for (index = 3500; index < 4500; index++) {
257 xa_store_index(xa, index, GFP_KERNEL);
258 xa_set_mark(xa, index, XA_MARK_0);
259 }
260
261 xas_reset(&xas);
262 rcu_read_lock();
263 xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0)
264 count++;
265 rcu_read_unlock();
266 XA_BUG_ON(xa, count != 1000);
267
268 xas_lock(&xas);
269 xas_for_each(&xas, entry, ULONG_MAX) {
270 xas_init_marks(&xas);
271 XA_BUG_ON(xa, !xa_get_mark(xa, xas.xa_index, XA_MARK_0));
272 XA_BUG_ON(xa, !xas_get_mark(&xas, XA_MARK_0));
273 }
274 xas_unlock(&xas);
275
276 xa_destroy(xa);
277}
278
279static noinline void check_xa_mark(struct xarray *xa)
280{
281 unsigned long index;
282
283 for (index = 0; index < 16384; index += 4)
284 check_xa_mark_1(xa, index);
285
286 check_xa_mark_2(xa);
287}
288
289static noinline void check_xa_shrink(struct xarray *xa)
290{
291 XA_STATE(xas, xa, 1);
292 struct xa_node *node;
293 unsigned int order;
294 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 15 : 1;
295
296 XA_BUG_ON(xa, !xa_empty(xa));
297 XA_BUG_ON(xa, xa_store_index(xa, 0, GFP_KERNEL) != NULL);
298 XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
299
300 /*
301 * Check that erasing the entry at 1 shrinks the tree and properly
302 * marks the node as being deleted.
303 */
304 xas_lock(&xas);
305 XA_BUG_ON(xa, xas_load(&xas) != xa_mk_value(1));
306 node = xas.xa_node;
307 XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != xa_mk_value(0));
308 XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1));
309 XA_BUG_ON(xa, xa_load(xa, 1) != NULL);
310 XA_BUG_ON(xa, xas.xa_node != XAS_BOUNDS);
311 XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != XA_RETRY_ENTRY);
312 XA_BUG_ON(xa, xas_load(&xas) != NULL);
313 xas_unlock(&xas);
314 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
315 xa_erase_index(xa, 0);
316 XA_BUG_ON(xa, !xa_empty(xa));
317
318 for (order = 0; order < max_order; order++) {
319 unsigned long max = (1UL << order) - 1;
320 xa_store_order(xa, 0, order, xa_mk_value(0), GFP_KERNEL);
321 XA_BUG_ON(xa, xa_load(xa, max) != xa_mk_value(0));
322 XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL);
323 rcu_read_lock();
324 node = xa_head(xa);
325 rcu_read_unlock();
326 XA_BUG_ON(xa, xa_store_index(xa, ULONG_MAX, GFP_KERNEL) !=
327 NULL);
328 rcu_read_lock();
329 XA_BUG_ON(xa, xa_head(xa) == node);
330 rcu_read_unlock();
331 XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL);
332 xa_erase_index(xa, ULONG_MAX);
333 XA_BUG_ON(xa, xa->xa_head != node);
334 xa_erase_index(xa, 0);
335 }
336}
337
338static noinline void check_cmpxchg(struct xarray *xa)
339{
340 void *FIVE = xa_mk_value(5);
341 void *SIX = xa_mk_value(6);
342 void *LOTS = xa_mk_value(12345678);
343
344 XA_BUG_ON(xa, !xa_empty(xa));
345 XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_KERNEL) != NULL);
346 XA_BUG_ON(xa, xa_insert(xa, 12345678, xa, GFP_KERNEL) != -EEXIST);
347 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, SIX, FIVE, GFP_KERNEL) != LOTS);
348 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, LOTS, FIVE, GFP_KERNEL) != LOTS);
349 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, FIVE, LOTS, GFP_KERNEL) != FIVE);
350 XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != NULL);
351 XA_BUG_ON(xa, xa_cmpxchg(xa, 5, NULL, FIVE, GFP_KERNEL) != NULL);
352 xa_erase_index(xa, 12345678);
353 xa_erase_index(xa, 5);
354 XA_BUG_ON(xa, !xa_empty(xa));
355}
356
357static noinline void check_reserve(struct xarray *xa)
358{
359 void *entry;
360 unsigned long index = 0;
361
362 /* An array with a reserved entry is not empty */
363 XA_BUG_ON(xa, !xa_empty(xa));
364 xa_reserve(xa, 12345678, GFP_KERNEL);
365 XA_BUG_ON(xa, xa_empty(xa));
366 XA_BUG_ON(xa, xa_load(xa, 12345678));
367 xa_release(xa, 12345678);
368 XA_BUG_ON(xa, !xa_empty(xa));
369
370 /* Releasing a used entry does nothing */
371 xa_reserve(xa, 12345678, GFP_KERNEL);
372 XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL);
373 xa_release(xa, 12345678);
374 xa_erase_index(xa, 12345678);
375 XA_BUG_ON(xa, !xa_empty(xa));
376
377 /* cmpxchg sees a reserved entry as NULL */
378 xa_reserve(xa, 12345678, GFP_KERNEL);
379 XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, NULL, xa_mk_value(12345678),
380 GFP_NOWAIT) != NULL);
381 xa_release(xa, 12345678);
382 xa_erase_index(xa, 12345678);
383 XA_BUG_ON(xa, !xa_empty(xa));
384
385 /* And so does xa_insert */
386 xa_reserve(xa, 12345678, GFP_KERNEL);
387 XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != 0);
388 xa_erase_index(xa, 12345678);
389 XA_BUG_ON(xa, !xa_empty(xa));
390
391 /* Can iterate through a reserved entry */
392 xa_store_index(xa, 5, GFP_KERNEL);
393 xa_reserve(xa, 6, GFP_KERNEL);
394 xa_store_index(xa, 7, GFP_KERNEL);
395
396 xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) {
397 XA_BUG_ON(xa, index != 5 && index != 7);
398 }
399 xa_destroy(xa);
400}
401
402static noinline void check_xas_erase(struct xarray *xa)
403{
404 XA_STATE(xas, xa, 0);
405 void *entry;
406 unsigned long i, j;
407
408 for (i = 0; i < 200; i++) {
409 for (j = i; j < 2 * i + 17; j++) {
410 xas_set(&xas, j);
411 do {
412 xas_lock(&xas);
413 xas_store(&xas, xa_mk_index(j));
414 xas_unlock(&xas);
415 } while (xas_nomem(&xas, GFP_KERNEL));
416 }
417
418 xas_set(&xas, ULONG_MAX);
419 do {
420 xas_lock(&xas);
421 xas_store(&xas, xa_mk_value(0));
422 xas_unlock(&xas);
423 } while (xas_nomem(&xas, GFP_KERNEL));
424
425 xas_lock(&xas);
426 xas_store(&xas, NULL);
427
428 xas_set(&xas, 0);
429 j = i;
430 xas_for_each(&xas, entry, ULONG_MAX) {
431 XA_BUG_ON(xa, entry != xa_mk_index(j));
432 xas_store(&xas, NULL);
433 j++;
434 }
435 xas_unlock(&xas);
436 XA_BUG_ON(xa, !xa_empty(xa));
437 }
438}
439
440#ifdef CONFIG_XARRAY_MULTI
441static noinline void check_multi_store_1(struct xarray *xa, unsigned long index,
442 unsigned int order)
443{
444 XA_STATE(xas, xa, index);
445 unsigned long min = index & ~((1UL << order) - 1);
446 unsigned long max = min + (1UL << order);
447
448 xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
449 XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(index));
450 XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(index));
451 XA_BUG_ON(xa, xa_load(xa, max) != NULL);
452 XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
453
454 xas_lock(&xas);
455 XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(min)) != xa_mk_index(index));
456 xas_unlock(&xas);
457 XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(min));
458 XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(min));
459 XA_BUG_ON(xa, xa_load(xa, max) != NULL);
460 XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
461
462 xa_erase_index(xa, min);
463 XA_BUG_ON(xa, !xa_empty(xa));
464}
465
466static noinline void check_multi_store_2(struct xarray *xa, unsigned long index,
467 unsigned int order)
468{
469 XA_STATE(xas, xa, index);
470 xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL);
471
472 xas_lock(&xas);
473 XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(1)) != xa_mk_value(0));
474 XA_BUG_ON(xa, xas.xa_index != index);
475 XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1));
476 xas_unlock(&xas);
477 XA_BUG_ON(xa, !xa_empty(xa));
478}
479
480static noinline void check_multi_store_3(struct xarray *xa, unsigned long index,
481 unsigned int order)
482{
483 XA_STATE(xas, xa, 0);
484 void *entry;
485 int n = 0;
486
487 xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
488
489 xas_lock(&xas);
490 xas_for_each(&xas, entry, ULONG_MAX) {
491 XA_BUG_ON(xa, entry != xa_mk_index(index));
492 n++;
493 }
494 XA_BUG_ON(xa, n != 1);
495 xas_set(&xas, index + 1);
496 xas_for_each(&xas, entry, ULONG_MAX) {
497 XA_BUG_ON(xa, entry != xa_mk_index(index));
498 n++;
499 }
500 XA_BUG_ON(xa, n != 2);
501 xas_unlock(&xas);
502
503 xa_destroy(xa);
504}
505#endif
506
507static noinline void check_multi_store(struct xarray *xa)
508{
509#ifdef CONFIG_XARRAY_MULTI
510 unsigned long i, j, k;
511 unsigned int max_order = (sizeof(long) == 4) ? 30 : 60;
512
513 /* Loading from any position returns the same value */
514 xa_store_order(xa, 0, 1, xa_mk_value(0), GFP_KERNEL);
515 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
516 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0));
517 XA_BUG_ON(xa, xa_load(xa, 2) != NULL);
518 rcu_read_lock();
519 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 2);
520 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2);
521 rcu_read_unlock();
522
523 /* Storing adjacent to the value does not alter the value */
524 xa_store(xa, 3, xa, GFP_KERNEL);
525 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
526 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0));
527 XA_BUG_ON(xa, xa_load(xa, 2) != NULL);
528 rcu_read_lock();
529 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 3);
530 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2);
531 rcu_read_unlock();
532
533 /* Overwriting multiple indexes works */
534 xa_store_order(xa, 0, 2, xa_mk_value(1), GFP_KERNEL);
535 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(1));
536 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(1));
537 XA_BUG_ON(xa, xa_load(xa, 2) != xa_mk_value(1));
538 XA_BUG_ON(xa, xa_load(xa, 3) != xa_mk_value(1));
539 XA_BUG_ON(xa, xa_load(xa, 4) != NULL);
540 rcu_read_lock();
541 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 4);
542 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 4);
543 rcu_read_unlock();
544
545 /* We can erase multiple values with a single store */
546 xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL);
547 XA_BUG_ON(xa, !xa_empty(xa));
548
549 /* Even when the first slot is empty but the others aren't */
550 xa_store_index(xa, 1, GFP_KERNEL);
551 xa_store_index(xa, 2, GFP_KERNEL);
552 xa_store_order(xa, 0, 2, NULL, GFP_KERNEL);
553 XA_BUG_ON(xa, !xa_empty(xa));
554
555 for (i = 0; i < max_order; i++) {
556 for (j = 0; j < max_order; j++) {
557 xa_store_order(xa, 0, i, xa_mk_index(i), GFP_KERNEL);
558 xa_store_order(xa, 0, j, xa_mk_index(j), GFP_KERNEL);
559
560 for (k = 0; k < max_order; k++) {
561 void *entry = xa_load(xa, (1UL << k) - 1);
562 if ((i < k) && (j < k))
563 XA_BUG_ON(xa, entry != NULL);
564 else
565 XA_BUG_ON(xa, entry != xa_mk_index(j));
566 }
567
568 xa_erase(xa, 0);
569 XA_BUG_ON(xa, !xa_empty(xa));
570 }
571 }
572
573 for (i = 0; i < 20; i++) {
574 check_multi_store_1(xa, 200, i);
575 check_multi_store_1(xa, 0, i);
576 check_multi_store_1(xa, (1UL << i) + 1, i);
577 }
578 check_multi_store_2(xa, 4095, 9);
579
580 for (i = 1; i < 20; i++) {
581 check_multi_store_3(xa, 0, i);
582 check_multi_store_3(xa, 1UL << i, i);
583 }
584#endif
585}
586
587static DEFINE_XARRAY_ALLOC(xa0);
588
589static noinline void check_xa_alloc(void)
590{
591 int i;
592 u32 id;
593
594 /* An empty array should assign 0 to the first alloc */
595 xa_alloc_index(&xa0, 0, GFP_KERNEL);
596
597 /* Erasing it should make the array empty again */
598 xa_erase_index(&xa0, 0);
599 XA_BUG_ON(&xa0, !xa_empty(&xa0));
600
601 /* And it should assign 0 again */
602 xa_alloc_index(&xa0, 0, GFP_KERNEL);
603
604 /* The next assigned ID should be 1 */
605 xa_alloc_index(&xa0, 1, GFP_KERNEL);
606 xa_erase_index(&xa0, 1);
607
608 /* Storing a value should mark it used */
609 xa_store_index(&xa0, 1, GFP_KERNEL);
610 xa_alloc_index(&xa0, 2, GFP_KERNEL);
611
612 /* If we then erase 0, it should be free */
613 xa_erase_index(&xa0, 0);
614 xa_alloc_index(&xa0, 0, GFP_KERNEL);
615
616 xa_erase_index(&xa0, 1);
617 xa_erase_index(&xa0, 2);
618
619 for (i = 1; i < 5000; i++) {
620 xa_alloc_index(&xa0, i, GFP_KERNEL);
621 }
622
623 xa_destroy(&xa0);
624
625 id = 0xfffffffeU;
626 XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id),
627 GFP_KERNEL) != 0);
628 XA_BUG_ON(&xa0, id != 0xfffffffeU);
629 XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id),
630 GFP_KERNEL) != 0);
631 XA_BUG_ON(&xa0, id != 0xffffffffU);
632 XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id),
633 GFP_KERNEL) != -ENOSPC);
634 XA_BUG_ON(&xa0, id != 0xffffffffU);
635 xa_destroy(&xa0);
636
637 id = 10;
638 XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id),
639 GFP_KERNEL) != -ENOSPC);
640 XA_BUG_ON(&xa0, xa_store_index(&xa0, 3, GFP_KERNEL) != 0);
641 XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id),
642 GFP_KERNEL) != -ENOSPC);
643 xa_erase_index(&xa0, 3);
644 XA_BUG_ON(&xa0, !xa_empty(&xa0));
645}
646
647static noinline void __check_store_iter(struct xarray *xa, unsigned long start,
648 unsigned int order, unsigned int present)
649{
650 XA_STATE_ORDER(xas, xa, start, order);
651 void *entry;
652 unsigned int count = 0;
653
654retry:
655 xas_lock(&xas);
656 xas_for_each_conflict(&xas, entry) {
657 XA_BUG_ON(xa, !xa_is_value(entry));
658 XA_BUG_ON(xa, entry < xa_mk_index(start));
659 XA_BUG_ON(xa, entry > xa_mk_index(start + (1UL << order) - 1));
660 count++;
661 }
662 xas_store(&xas, xa_mk_index(start));
663 xas_unlock(&xas);
664 if (xas_nomem(&xas, GFP_KERNEL)) {
665 count = 0;
666 goto retry;
667 }
668 XA_BUG_ON(xa, xas_error(&xas));
669 XA_BUG_ON(xa, count != present);
670 XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start));
671 XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) !=
672 xa_mk_index(start));
673 xa_erase_index(xa, start);
674}
675
676static noinline void check_store_iter(struct xarray *xa)
677{
678 unsigned int i, j;
679 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
680
681 for (i = 0; i < max_order; i++) {
682 unsigned int min = 1 << i;
683 unsigned int max = (2 << i) - 1;
684 __check_store_iter(xa, 0, i, 0);
685 XA_BUG_ON(xa, !xa_empty(xa));
686 __check_store_iter(xa, min, i, 0);
687 XA_BUG_ON(xa, !xa_empty(xa));
688
689 xa_store_index(xa, min, GFP_KERNEL);
690 __check_store_iter(xa, min, i, 1);
691 XA_BUG_ON(xa, !xa_empty(xa));
692 xa_store_index(xa, max, GFP_KERNEL);
693 __check_store_iter(xa, min, i, 1);
694 XA_BUG_ON(xa, !xa_empty(xa));
695
696 for (j = 0; j < min; j++)
697 xa_store_index(xa, j, GFP_KERNEL);
698 __check_store_iter(xa, 0, i, min);
699 XA_BUG_ON(xa, !xa_empty(xa));
700 for (j = 0; j < min; j++)
701 xa_store_index(xa, min + j, GFP_KERNEL);
702 __check_store_iter(xa, min, i, min);
703 XA_BUG_ON(xa, !xa_empty(xa));
704 }
705#ifdef CONFIG_XARRAY_MULTI
706 xa_store_index(xa, 63, GFP_KERNEL);
707 xa_store_index(xa, 65, GFP_KERNEL);
708 __check_store_iter(xa, 64, 2, 1);
709 xa_erase_index(xa, 63);
710#endif
711 XA_BUG_ON(xa, !xa_empty(xa));
712}
713
714static noinline void check_multi_find(struct xarray *xa)
715{
716#ifdef CONFIG_XARRAY_MULTI
717 unsigned long index;
718
719 xa_store_order(xa, 12, 2, xa_mk_value(12), GFP_KERNEL);
720 XA_BUG_ON(xa, xa_store_index(xa, 16, GFP_KERNEL) != NULL);
721
722 index = 0;
723 XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
724 xa_mk_value(12));
725 XA_BUG_ON(xa, index != 12);
726 index = 13;
727 XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
728 xa_mk_value(12));
729 XA_BUG_ON(xa, (index < 12) || (index >= 16));
730 XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) !=
731 xa_mk_value(16));
732 XA_BUG_ON(xa, index != 16);
733
734 xa_erase_index(xa, 12);
735 xa_erase_index(xa, 16);
736 XA_BUG_ON(xa, !xa_empty(xa));
737#endif
738}
739
740static noinline void check_multi_find_2(struct xarray *xa)
741{
742 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 10 : 1;
743 unsigned int i, j;
744 void *entry;
745
746 for (i = 0; i < max_order; i++) {
747 unsigned long index = 1UL << i;
748 for (j = 0; j < index; j++) {
749 XA_STATE(xas, xa, j + index);
750 xa_store_index(xa, index - 1, GFP_KERNEL);
751 xa_store_order(xa, index, i, xa_mk_index(index),
752 GFP_KERNEL);
753 rcu_read_lock();
754 xas_for_each(&xas, entry, ULONG_MAX) {
755 xa_erase_index(xa, index);
756 }
757 rcu_read_unlock();
758 xa_erase_index(xa, index - 1);
759 XA_BUG_ON(xa, !xa_empty(xa));
760 }
761 }
762}
763
764static noinline void check_find_1(struct xarray *xa)
765{
766 unsigned long i, j, k;
767
768 XA_BUG_ON(xa, !xa_empty(xa));
769
770 /*
771 * Check xa_find with all pairs between 0 and 99 inclusive,
772 * starting at every index between 0 and 99
773 */
774 for (i = 0; i < 100; i++) {
775 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
776 xa_set_mark(xa, i, XA_MARK_0);
777 for (j = 0; j < i; j++) {
778 XA_BUG_ON(xa, xa_store_index(xa, j, GFP_KERNEL) !=
779 NULL);
780 xa_set_mark(xa, j, XA_MARK_0);
781 for (k = 0; k < 100; k++) {
782 unsigned long index = k;
783 void *entry = xa_find(xa, &index, ULONG_MAX,
784 XA_PRESENT);
785 if (k <= j)
786 XA_BUG_ON(xa, index != j);
787 else if (k <= i)
788 XA_BUG_ON(xa, index != i);
789 else
790 XA_BUG_ON(xa, entry != NULL);
791
792 index = k;
793 entry = xa_find(xa, &index, ULONG_MAX,
794 XA_MARK_0);
795 if (k <= j)
796 XA_BUG_ON(xa, index != j);
797 else if (k <= i)
798 XA_BUG_ON(xa, index != i);
799 else
800 XA_BUG_ON(xa, entry != NULL);
801 }
802 xa_erase_index(xa, j);
803 XA_BUG_ON(xa, xa_get_mark(xa, j, XA_MARK_0));
804 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
805 }
806 xa_erase_index(xa, i);
807 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0));
808 }
809 XA_BUG_ON(xa, !xa_empty(xa));
810}
811
812static noinline void check_find_2(struct xarray *xa)
813{
814 void *entry;
815 unsigned long i, j, index = 0;
816
817 xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) {
818 XA_BUG_ON(xa, true);
819 }
820
821 for (i = 0; i < 1024; i++) {
822 xa_store_index(xa, index, GFP_KERNEL);
823 j = 0;
824 index = 0;
825 xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) {
826 XA_BUG_ON(xa, xa_mk_index(index) != entry);
827 XA_BUG_ON(xa, index != j++);
828 }
829 }
830
831 xa_destroy(xa);
832}
833
834static noinline void check_find_3(struct xarray *xa)
835{
836 XA_STATE(xas, xa, 0);
837 unsigned long i, j, k;
838 void *entry;
839
840 for (i = 0; i < 100; i++) {
841 for (j = 0; j < 100; j++) {
842 for (k = 0; k < 100; k++) {
843 xas_set(&xas, j);
844 xas_for_each_marked(&xas, entry, k, XA_MARK_0)
845 ;
846 if (j > k)
847 XA_BUG_ON(xa,
848 xas.xa_node != XAS_RESTART);
849 }
850 }
851 xa_store_index(xa, i, GFP_KERNEL);
852 xa_set_mark(xa, i, XA_MARK_0);
853 }
854 xa_destroy(xa);
855}
856
857static noinline void check_find(struct xarray *xa)
858{
859 check_find_1(xa);
860 check_find_2(xa);
861 check_find_3(xa);
862 check_multi_find(xa);
863 check_multi_find_2(xa);
864}
865
866/* See find_swap_entry() in mm/shmem.c */
867static noinline unsigned long xa_find_entry(struct xarray *xa, void *item)
868{
869 XA_STATE(xas, xa, 0);
870 unsigned int checked = 0;
871 void *entry;
872
873 rcu_read_lock();
874 xas_for_each(&xas, entry, ULONG_MAX) {
875 if (xas_retry(&xas, entry))
876 continue;
877 if (entry == item)
878 break;
879 checked++;
880 if ((checked % 4) != 0)
881 continue;
882 xas_pause(&xas);
883 }
884 rcu_read_unlock();
885
886 return entry ? xas.xa_index : -1;
887}
888
889static noinline void check_find_entry(struct xarray *xa)
890{
891#ifdef CONFIG_XARRAY_MULTI
892 unsigned int order;
893 unsigned long offset, index;
894
895 for (order = 0; order < 20; order++) {
896 for (offset = 0; offset < (1UL << (order + 3));
897 offset += (1UL << order)) {
898 for (index = 0; index < (1UL << (order + 5));
899 index += (1UL << order)) {
900 xa_store_order(xa, index, order,
901 xa_mk_index(index), GFP_KERNEL);
902 XA_BUG_ON(xa, xa_load(xa, index) !=
903 xa_mk_index(index));
904 XA_BUG_ON(xa, xa_find_entry(xa,
905 xa_mk_index(index)) != index);
906 }
907 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
908 xa_destroy(xa);
909 }
910 }
911#endif
912
913 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
914 xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
915 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
916 XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1);
917 xa_erase_index(xa, ULONG_MAX);
918 XA_BUG_ON(xa, !xa_empty(xa));
919}
920
921static noinline void check_move_small(struct xarray *xa, unsigned long idx)
922{
923 XA_STATE(xas, xa, 0);
924 unsigned long i;
925
926 xa_store_index(xa, 0, GFP_KERNEL);
927 xa_store_index(xa, idx, GFP_KERNEL);
928
929 rcu_read_lock();
930 for (i = 0; i < idx * 4; i++) {
931 void *entry = xas_next(&xas);
932 if (i <= idx)
933 XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
934 XA_BUG_ON(xa, xas.xa_index != i);
935 if (i == 0 || i == idx)
936 XA_BUG_ON(xa, entry != xa_mk_index(i));
937 else
938 XA_BUG_ON(xa, entry != NULL);
939 }
940 xas_next(&xas);
941 XA_BUG_ON(xa, xas.xa_index != i);
942
943 do {
944 void *entry = xas_prev(&xas);
945 i--;
946 if (i <= idx)
947 XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
948 XA_BUG_ON(xa, xas.xa_index != i);
949 if (i == 0 || i == idx)
950 XA_BUG_ON(xa, entry != xa_mk_index(i));
951 else
952 XA_BUG_ON(xa, entry != NULL);
953 } while (i > 0);
954
955 xas_set(&xas, ULONG_MAX);
956 XA_BUG_ON(xa, xas_next(&xas) != NULL);
957 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
958 XA_BUG_ON(xa, xas_next(&xas) != xa_mk_value(0));
959 XA_BUG_ON(xa, xas.xa_index != 0);
960 XA_BUG_ON(xa, xas_prev(&xas) != NULL);
961 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
962 rcu_read_unlock();
963
964 xa_erase_index(xa, 0);
965 xa_erase_index(xa, idx);
966 XA_BUG_ON(xa, !xa_empty(xa));
967}
968
969static noinline void check_move(struct xarray *xa)
970{
971 XA_STATE(xas, xa, (1 << 16) - 1);
972 unsigned long i;
973
974 for (i = 0; i < (1 << 16); i++)
975 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
976
977 rcu_read_lock();
978 do {
979 void *entry = xas_prev(&xas);
980 i--;
981 XA_BUG_ON(xa, entry != xa_mk_index(i));
982 XA_BUG_ON(xa, i != xas.xa_index);
983 } while (i != 0);
984
985 XA_BUG_ON(xa, xas_prev(&xas) != NULL);
986 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
987
988 do {
989 void *entry = xas_next(&xas);
990 XA_BUG_ON(xa, entry != xa_mk_index(i));
991 XA_BUG_ON(xa, i != xas.xa_index);
992 i++;
993 } while (i < (1 << 16));
994 rcu_read_unlock();
995
996 for (i = (1 << 8); i < (1 << 15); i++)
997 xa_erase_index(xa, i);
998
999 i = xas.xa_index;
1000
1001 rcu_read_lock();
1002 do {
1003 void *entry = xas_prev(&xas);
1004 i--;
1005 if ((i < (1 << 8)) || (i >= (1 << 15)))
1006 XA_BUG_ON(xa, entry != xa_mk_index(i));
1007 else
1008 XA_BUG_ON(xa, entry != NULL);
1009 XA_BUG_ON(xa, i != xas.xa_index);
1010 } while (i != 0);
1011
1012 XA_BUG_ON(xa, xas_prev(&xas) != NULL);
1013 XA_BUG_ON(xa, xas.xa_index != ULONG_MAX);
1014
1015 do {
1016 void *entry = xas_next(&xas);
1017 if ((i < (1 << 8)) || (i >= (1 << 15)))
1018 XA_BUG_ON(xa, entry != xa_mk_index(i));
1019 else
1020 XA_BUG_ON(xa, entry != NULL);
1021 XA_BUG_ON(xa, i != xas.xa_index);
1022 i++;
1023 } while (i < (1 << 16));
1024 rcu_read_unlock();
1025
1026 xa_destroy(xa);
1027
1028 for (i = 0; i < 16; i++)
1029 check_move_small(xa, 1UL << i);
1030
1031 for (i = 2; i < 16; i++)
1032 check_move_small(xa, (1UL << i) - 1);
1033}
1034
1035static noinline void xa_store_many_order(struct xarray *xa,
1036 unsigned long index, unsigned order)
1037{
1038 XA_STATE_ORDER(xas, xa, index, order);
1039 unsigned int i = 0;
1040
1041 do {
1042 xas_lock(&xas);
1043 XA_BUG_ON(xa, xas_find_conflict(&xas));
1044 xas_create_range(&xas);
1045 if (xas_error(&xas))
1046 goto unlock;
1047 for (i = 0; i < (1U << order); i++) {
1048 XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(index + i)));
1049 xas_next(&xas);
1050 }
1051unlock:
1052 xas_unlock(&xas);
1053 } while (xas_nomem(&xas, GFP_KERNEL));
1054
1055 XA_BUG_ON(xa, xas_error(&xas));
1056}
1057
1058static noinline void check_create_range_1(struct xarray *xa,
1059 unsigned long index, unsigned order)
1060{
1061 unsigned long i;
1062
1063 xa_store_many_order(xa, index, order);
1064 for (i = index; i < index + (1UL << order); i++)
1065 xa_erase_index(xa, i);
1066 XA_BUG_ON(xa, !xa_empty(xa));
1067}
1068
1069static noinline void check_create_range_2(struct xarray *xa, unsigned order)
1070{
1071 unsigned long i;
1072 unsigned long nr = 1UL << order;
1073
1074 for (i = 0; i < nr * nr; i += nr)
1075 xa_store_many_order(xa, i, order);
1076 for (i = 0; i < nr * nr; i++)
1077 xa_erase_index(xa, i);
1078 XA_BUG_ON(xa, !xa_empty(xa));
1079}
1080
1081static noinline void check_create_range_3(void)
1082{
1083 XA_STATE(xas, NULL, 0);
1084 xas_set_err(&xas, -EEXIST);
1085 xas_create_range(&xas);
1086 XA_BUG_ON(NULL, xas_error(&xas) != -EEXIST);
1087}
1088
1089static noinline void check_create_range_4(struct xarray *xa,
1090 unsigned long index, unsigned order)
1091{
1092 XA_STATE_ORDER(xas, xa, index, order);
1093 unsigned long base = xas.xa_index;
1094 unsigned long i = 0;
1095
1096 xa_store_index(xa, index, GFP_KERNEL);
1097 do {
1098 xas_lock(&xas);
1099 xas_create_range(&xas);
1100 if (xas_error(&xas))
1101 goto unlock;
1102 for (i = 0; i < (1UL << order); i++) {
1103 void *old = xas_store(&xas, xa_mk_index(base + i));
1104 if (xas.xa_index == index)
1105 XA_BUG_ON(xa, old != xa_mk_index(base + i));
1106 else
1107 XA_BUG_ON(xa, old != NULL);
1108 xas_next(&xas);
1109 }
1110unlock:
1111 xas_unlock(&xas);
1112 } while (xas_nomem(&xas, GFP_KERNEL));
1113
1114 XA_BUG_ON(xa, xas_error(&xas));
1115
1116 for (i = base; i < base + (1UL << order); i++)
1117 xa_erase_index(xa, i);
1118 XA_BUG_ON(xa, !xa_empty(xa));
1119}
1120
1121static noinline void check_create_range(struct xarray *xa)
1122{
1123 unsigned int order;
1124 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 12 : 1;
1125
1126 for (order = 0; order < max_order; order++) {
1127 check_create_range_1(xa, 0, order);
1128 check_create_range_1(xa, 1U << order, order);
1129 check_create_range_1(xa, 2U << order, order);
1130 check_create_range_1(xa, 3U << order, order);
1131 check_create_range_1(xa, 1U << 24, order);
1132 if (order < 10)
1133 check_create_range_2(xa, order);
1134
1135 check_create_range_4(xa, 0, order);
1136 check_create_range_4(xa, 1U << order, order);
1137 check_create_range_4(xa, 2U << order, order);
1138 check_create_range_4(xa, 3U << order, order);
1139 check_create_range_4(xa, 1U << 24, order);
1140
1141 check_create_range_4(xa, 1, order);
1142 check_create_range_4(xa, (1U << order) + 1, order);
1143 check_create_range_4(xa, (2U << order) + 1, order);
1144 check_create_range_4(xa, (2U << order) - 1, order);
1145 check_create_range_4(xa, (3U << order) + 1, order);
1146 check_create_range_4(xa, (3U << order) - 1, order);
1147 check_create_range_4(xa, (1U << 24) + 1, order);
1148 }
1149
1150 check_create_range_3();
1151}
1152
1153static noinline void __check_store_range(struct xarray *xa, unsigned long first,
1154 unsigned long last)
1155{
1156#ifdef CONFIG_XARRAY_MULTI
1157 xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL);
1158
1159 XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_index(first));
1160 XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_index(first));
1161 XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL);
1162 XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL);
1163
1164 xa_store_range(xa, first, last, NULL, GFP_KERNEL);
1165#endif
1166
1167 XA_BUG_ON(xa, !xa_empty(xa));
1168}
1169
1170static noinline void check_store_range(struct xarray *xa)
1171{
1172 unsigned long i, j;
1173
1174 for (i = 0; i < 128; i++) {
1175 for (j = i; j < 128; j++) {
1176 __check_store_range(xa, i, j);
1177 __check_store_range(xa, 128 + i, 128 + j);
1178 __check_store_range(xa, 4095 + i, 4095 + j);
1179 __check_store_range(xa, 4096 + i, 4096 + j);
1180 __check_store_range(xa, 123456 + i, 123456 + j);
1181 __check_store_range(xa, (1 << 24) + i, (1 << 24) + j);
1182 }
1183 }
1184}
1185
1186static LIST_HEAD(shadow_nodes);
1187
1188static void test_update_node(struct xa_node *node)
1189{
1190 if (node->count && node->count == node->nr_values) {
1191 if (list_empty(&node->private_list))
1192 list_add(&shadow_nodes, &node->private_list);
1193 } else {
1194 if (!list_empty(&node->private_list))
1195 list_del_init(&node->private_list);
1196 }
1197}
1198
1199static noinline void shadow_remove(struct xarray *xa)
1200{
1201 struct xa_node *node;
1202
1203 xa_lock(xa);
1204 while ((node = list_first_entry_or_null(&shadow_nodes,
1205 struct xa_node, private_list))) {
1206 XA_STATE(xas, node->array, 0);
1207 XA_BUG_ON(xa, node->array != xa);
1208 list_del_init(&node->private_list);
1209 xas.xa_node = xa_parent_locked(node->array, node);
1210 xas.xa_offset = node->offset;
1211 xas.xa_shift = node->shift + XA_CHUNK_SHIFT;
1212 xas_set_update(&xas, test_update_node);
1213 xas_store(&xas, NULL);
1214 }
1215 xa_unlock(xa);
1216}
1217
1218static noinline void check_workingset(struct xarray *xa, unsigned long index)
1219{
1220 XA_STATE(xas, xa, index);
1221 xas_set_update(&xas, test_update_node);
1222
1223 do {
1224 xas_lock(&xas);
1225 xas_store(&xas, xa_mk_value(0));
1226 xas_next(&xas);
1227 xas_store(&xas, xa_mk_value(1));
1228 xas_unlock(&xas);
1229 } while (xas_nomem(&xas, GFP_KERNEL));
1230
1231 XA_BUG_ON(xa, list_empty(&shadow_nodes));
1232
1233 xas_lock(&xas);
1234 xas_next(&xas);
1235 xas_store(&xas, &xas);
1236 XA_BUG_ON(xa, !list_empty(&shadow_nodes));
1237
1238 xas_store(&xas, xa_mk_value(2));
1239 xas_unlock(&xas);
1240 XA_BUG_ON(xa, list_empty(&shadow_nodes));
1241
1242 shadow_remove(xa);
1243 XA_BUG_ON(xa, !list_empty(&shadow_nodes));
1244 XA_BUG_ON(xa, !xa_empty(xa));
1245}
1246
1247/*
1248 * Check that the pointer / value / sibling entries are accounted the
1249 * way we expect them to be.
1250 */
1251static noinline void check_account(struct xarray *xa)
1252{
1253#ifdef CONFIG_XARRAY_MULTI
1254 unsigned int order;
1255
1256 for (order = 1; order < 12; order++) {
1257 XA_STATE(xas, xa, 1 << order);
1258
1259 xa_store_order(xa, 0, order, xa, GFP_KERNEL);
1260 rcu_read_lock();
1261 xas_load(&xas);
1262 XA_BUG_ON(xa, xas.xa_node->count == 0);
1263 XA_BUG_ON(xa, xas.xa_node->count > (1 << order));
1264 XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
1265 rcu_read_unlock();
1266
1267 xa_store_order(xa, 1 << order, order, xa_mk_index(1UL << order),
1268 GFP_KERNEL);
1269 XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2);
1270
1271 xa_erase(xa, 1 << order);
1272 XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
1273
1274 xa_erase(xa, 0);
1275 XA_BUG_ON(xa, !xa_empty(xa));
1276 }
1277#endif
1278}
1279
1280static noinline void check_destroy(struct xarray *xa)
1281{
1282 unsigned long index;
1283
1284 XA_BUG_ON(xa, !xa_empty(xa));
1285
1286 /* Destroying an empty array is a no-op */
1287 xa_destroy(xa);
1288 XA_BUG_ON(xa, !xa_empty(xa));
1289
1290 /* Destroying an array with a single entry */
1291 for (index = 0; index < 1000; index++) {
1292 xa_store_index(xa, index, GFP_KERNEL);
1293 XA_BUG_ON(xa, xa_empty(xa));
1294 xa_destroy(xa);
1295 XA_BUG_ON(xa, !xa_empty(xa));
1296 }
1297
1298 /* Destroying an array with a single entry at ULONG_MAX */
1299 xa_store(xa, ULONG_MAX, xa, GFP_KERNEL);
1300 XA_BUG_ON(xa, xa_empty(xa));
1301 xa_destroy(xa);
1302 XA_BUG_ON(xa, !xa_empty(xa));
1303
1304#ifdef CONFIG_XARRAY_MULTI
1305 /* Destroying an array with a multi-index entry */
1306 xa_store_order(xa, 1 << 11, 11, xa, GFP_KERNEL);
1307 XA_BUG_ON(xa, xa_empty(xa));
1308 xa_destroy(xa);
1309 XA_BUG_ON(xa, !xa_empty(xa));
1310#endif
1311}
1312
1313static DEFINE_XARRAY(array);
1314
1315static int xarray_checks(void)
1316{
1317 check_xa_err(&array);
1318 check_xas_retry(&array);
1319 check_xa_load(&array);
1320 check_xa_mark(&array);
1321 check_xa_shrink(&array);
1322 check_xas_erase(&array);
1323 check_cmpxchg(&array);
1324 check_reserve(&array);
1325 check_multi_store(&array);
1326 check_xa_alloc();
1327 check_find(&array);
1328 check_find_entry(&array);
1329 check_account(&array);
1330 check_destroy(&array);
1331 check_move(&array);
1332 check_create_range(&array);
1333 check_store_range(&array);
1334 check_store_iter(&array);
1335
1336 check_workingset(&array, 0);
1337 check_workingset(&array, 64);
1338 check_workingset(&array, 4096);
1339
1340 printk("XArray: %u of %u tests passed\n", tests_passed, tests_run);
1341 return (tests_run == tests_passed) ? 0 : -EINVAL;
1342}
1343
1344static void xarray_exit(void)
1345{
1346}
1347
1348module_init(xarray_checks);
1349module_exit(xarray_exit);
1350MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>");
1351MODULE_LICENSE("GPL");
diff --git a/lib/ubsan.c b/lib/ubsan.c
index 59fee96c29a0..e4162f59a81c 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -427,8 +427,7 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
427EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds); 427EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds);
428 428
429 429
430void __noreturn 430void __ubsan_handle_builtin_unreachable(struct unreachable_data *data)
431__ubsan_handle_builtin_unreachable(struct unreachable_data *data)
432{ 431{
433 unsigned long flags; 432 unsigned long flags;
434 433
diff --git a/lib/usercopy.c b/lib/usercopy.c
index 3744b2a8e591..c2bfbcaeb3dc 100644
--- a/lib/usercopy.c
+++ b/lib/usercopy.c
@@ -8,7 +8,7 @@ unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n
8{ 8{
9 unsigned long res = n; 9 unsigned long res = n;
10 might_fault(); 10 might_fault();
11 if (likely(access_ok(VERIFY_READ, from, n))) { 11 if (likely(access_ok(from, n))) {
12 kasan_check_write(to, n); 12 kasan_check_write(to, n);
13 res = raw_copy_from_user(to, from, n); 13 res = raw_copy_from_user(to, from, n);
14 } 14 }
@@ -23,7 +23,7 @@ EXPORT_SYMBOL(_copy_from_user);
23unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n) 23unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n)
24{ 24{
25 might_fault(); 25 might_fault();
26 if (likely(access_ok(VERIFY_WRITE, to, n))) { 26 if (likely(access_ok(to, n))) {
27 kasan_check_read(from, n); 27 kasan_check_read(from, n);
28 n = raw_copy_to_user(to, from, n); 28 n = raw_copy_to_user(to, from, n);
29 } 29 }
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 812e59e13fe6..3add92329bae 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -30,6 +30,7 @@
30#include <linux/ioport.h> 30#include <linux/ioport.h>
31#include <linux/dcache.h> 31#include <linux/dcache.h>
32#include <linux/cred.h> 32#include <linux/cred.h>
33#include <linux/rtc.h>
33#include <linux/uuid.h> 34#include <linux/uuid.h>
34#include <linux/of.h> 35#include <linux/of.h>
35#include <net/addrconf.h> 36#include <net/addrconf.h>
@@ -613,6 +614,109 @@ char *string(char *buf, char *end, const char *s, struct printf_spec spec)
613} 614}
614 615
615static noinline_for_stack 616static noinline_for_stack
617char *pointer_string(char *buf, char *end, const void *ptr,
618 struct printf_spec spec)
619{
620 spec.base = 16;
621 spec.flags |= SMALL;
622 if (spec.field_width == -1) {
623 spec.field_width = 2 * sizeof(ptr);
624 spec.flags |= ZEROPAD;
625 }
626
627 return number(buf, end, (unsigned long int)ptr, spec);
628}
629
630/* Make pointers available for printing early in the boot sequence. */
631static int debug_boot_weak_hash __ro_after_init;
632
633static int __init debug_boot_weak_hash_enable(char *str)
634{
635 debug_boot_weak_hash = 1;
636 pr_info("debug_boot_weak_hash enabled\n");
637 return 0;
638}
639early_param("debug_boot_weak_hash", debug_boot_weak_hash_enable);
640
641static DEFINE_STATIC_KEY_TRUE(not_filled_random_ptr_key);
642static siphash_key_t ptr_key __read_mostly;
643
644static void enable_ptr_key_workfn(struct work_struct *work)
645{
646 get_random_bytes(&ptr_key, sizeof(ptr_key));
647 /* Needs to run from preemptible context */
648 static_branch_disable(&not_filled_random_ptr_key);
649}
650
651static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
652
653static void fill_random_ptr_key(struct random_ready_callback *unused)
654{
655 /* This may be in an interrupt handler. */
656 queue_work(system_unbound_wq, &enable_ptr_key_work);
657}
658
659static struct random_ready_callback random_ready = {
660 .func = fill_random_ptr_key
661};
662
663static int __init initialize_ptr_random(void)
664{
665 int key_size = sizeof(ptr_key);
666 int ret;
667
668 /* Use hw RNG if available. */
669 if (get_random_bytes_arch(&ptr_key, key_size) == key_size) {
670 static_branch_disable(&not_filled_random_ptr_key);
671 return 0;
672 }
673
674 ret = add_random_ready_callback(&random_ready);
675 if (!ret) {
676 return 0;
677 } else if (ret == -EALREADY) {
678 /* This is in preemptible context */
679 enable_ptr_key_workfn(&enable_ptr_key_work);
680 return 0;
681 }
682
683 return ret;
684}
685early_initcall(initialize_ptr_random);
686
687/* Maps a pointer to a 32 bit unique identifier. */
688static char *ptr_to_id(char *buf, char *end, const void *ptr,
689 struct printf_spec spec)
690{
691 const char *str = sizeof(ptr) == 8 ? "(____ptrval____)" : "(ptrval)";
692 unsigned long hashval;
693
694 /* When debugging early boot use non-cryptographically secure hash. */
695 if (unlikely(debug_boot_weak_hash)) {
696 hashval = hash_long((unsigned long)ptr, 32);
697 return pointer_string(buf, end, (const void *)hashval, spec);
698 }
699
700 if (static_branch_unlikely(&not_filled_random_ptr_key)) {
701 spec.field_width = 2 * sizeof(ptr);
702 /* string length must be less than default_width */
703 return string(buf, end, str, spec);
704 }
705
706#ifdef CONFIG_64BIT
707 hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key);
708 /*
709 * Mask off the first 32 bits, this makes explicit that we have
710 * modified the address (and 32 bits is plenty for a unique ID).
711 */
712 hashval = hashval & 0xffffffff;
713#else
714 hashval = (unsigned long)siphash_1u32((u32)ptr, &ptr_key);
715#endif
716 return pointer_string(buf, end, (const void *)hashval, spec);
717}
718
719static noinline_for_stack
616char *dentry_name(char *buf, char *end, const struct dentry *d, struct printf_spec spec, 720char *dentry_name(char *buf, char *end, const struct dentry *d, struct printf_spec spec,
617 const char *fmt) 721 const char *fmt)
618{ 722{
@@ -719,6 +823,20 @@ static const struct printf_spec default_dec_spec = {
719 .precision = -1, 823 .precision = -1,
720}; 824};
721 825
826static const struct printf_spec default_dec02_spec = {
827 .base = 10,
828 .field_width = 2,
829 .precision = -1,
830 .flags = ZEROPAD,
831};
832
833static const struct printf_spec default_dec04_spec = {
834 .base = 10,
835 .field_width = 4,
836 .precision = -1,
837 .flags = ZEROPAD,
838};
839
722static noinline_for_stack 840static noinline_for_stack
723char *resource_string(char *buf, char *end, struct resource *res, 841char *resource_string(char *buf, char *end, struct resource *res,
724 struct printf_spec spec, const char *fmt) 842 struct printf_spec spec, const char *fmt)
@@ -1357,20 +1475,6 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
1357 return string(buf, end, uuid, spec); 1475 return string(buf, end, uuid, spec);
1358} 1476}
1359 1477
1360static noinline_for_stack
1361char *pointer_string(char *buf, char *end, const void *ptr,
1362 struct printf_spec spec)
1363{
1364 spec.base = 16;
1365 spec.flags |= SMALL;
1366 if (spec.field_width == -1) {
1367 spec.field_width = 2 * sizeof(ptr);
1368 spec.flags |= ZEROPAD;
1369 }
1370
1371 return number(buf, end, (unsigned long int)ptr, spec);
1372}
1373
1374int kptr_restrict __read_mostly; 1478int kptr_restrict __read_mostly;
1375 1479
1376static noinline_for_stack 1480static noinline_for_stack
@@ -1421,7 +1525,8 @@ char *restricted_pointer(char *buf, char *end, const void *ptr,
1421} 1525}
1422 1526
1423static noinline_for_stack 1527static noinline_for_stack
1424char *netdev_bits(char *buf, char *end, const void *addr, const char *fmt) 1528char *netdev_bits(char *buf, char *end, const void *addr,
1529 struct printf_spec spec, const char *fmt)
1425{ 1530{
1426 unsigned long long num; 1531 unsigned long long num;
1427 int size; 1532 int size;
@@ -1432,9 +1537,7 @@ char *netdev_bits(char *buf, char *end, const void *addr, const char *fmt)
1432 size = sizeof(netdev_features_t); 1537 size = sizeof(netdev_features_t);
1433 break; 1538 break;
1434 default: 1539 default:
1435 num = (unsigned long)addr; 1540 return ptr_to_id(buf, end, addr, spec);
1436 size = sizeof(unsigned long);
1437 break;
1438 } 1541 }
1439 1542
1440 return special_hex_number(buf, end, num, size); 1543 return special_hex_number(buf, end, num, size);
@@ -1462,6 +1565,87 @@ char *address_val(char *buf, char *end, const void *addr, const char *fmt)
1462} 1565}
1463 1566
1464static noinline_for_stack 1567static noinline_for_stack
1568char *date_str(char *buf, char *end, const struct rtc_time *tm, bool r)
1569{
1570 int year = tm->tm_year + (r ? 0 : 1900);
1571 int mon = tm->tm_mon + (r ? 0 : 1);
1572
1573 buf = number(buf, end, year, default_dec04_spec);
1574 if (buf < end)
1575 *buf = '-';
1576 buf++;
1577
1578 buf = number(buf, end, mon, default_dec02_spec);
1579 if (buf < end)
1580 *buf = '-';
1581 buf++;
1582
1583 return number(buf, end, tm->tm_mday, default_dec02_spec);
1584}
1585
1586static noinline_for_stack
1587char *time_str(char *buf, char *end, const struct rtc_time *tm, bool r)
1588{
1589 buf = number(buf, end, tm->tm_hour, default_dec02_spec);
1590 if (buf < end)
1591 *buf = ':';
1592 buf++;
1593
1594 buf = number(buf, end, tm->tm_min, default_dec02_spec);
1595 if (buf < end)
1596 *buf = ':';
1597 buf++;
1598
1599 return number(buf, end, tm->tm_sec, default_dec02_spec);
1600}
1601
1602static noinline_for_stack
1603char *rtc_str(char *buf, char *end, const struct rtc_time *tm, const char *fmt)
1604{
1605 bool have_t = true, have_d = true;
1606 bool raw = false;
1607 int count = 2;
1608
1609 switch (fmt[count]) {
1610 case 'd':
1611 have_t = false;
1612 count++;
1613 break;
1614 case 't':
1615 have_d = false;
1616 count++;
1617 break;
1618 }
1619
1620 raw = fmt[count] == 'r';
1621
1622 if (have_d)
1623 buf = date_str(buf, end, tm, raw);
1624 if (have_d && have_t) {
1625 /* Respect ISO 8601 */
1626 if (buf < end)
1627 *buf = 'T';
1628 buf++;
1629 }
1630 if (have_t)
1631 buf = time_str(buf, end, tm, raw);
1632
1633 return buf;
1634}
1635
1636static noinline_for_stack
1637char *time_and_date(char *buf, char *end, void *ptr, struct printf_spec spec,
1638 const char *fmt)
1639{
1640 switch (fmt[1]) {
1641 case 'R':
1642 return rtc_str(buf, end, (const struct rtc_time *)ptr, fmt);
1643 default:
1644 return ptr_to_id(buf, end, ptr, spec);
1645 }
1646}
1647
1648static noinline_for_stack
1465char *clock(char *buf, char *end, struct clk *clk, struct printf_spec spec, 1649char *clock(char *buf, char *end, struct clk *clk, struct printf_spec spec,
1466 const char *fmt) 1650 const char *fmt)
1467{ 1651{
@@ -1474,7 +1658,7 @@ char *clock(char *buf, char *end, struct clk *clk, struct printf_spec spec,
1474#ifdef CONFIG_COMMON_CLK 1658#ifdef CONFIG_COMMON_CLK
1475 return string(buf, end, __clk_get_name(clk), spec); 1659 return string(buf, end, __clk_get_name(clk), spec);
1476#else 1660#else
1477 return special_hex_number(buf, end, (unsigned long)clk, sizeof(unsigned long)); 1661 return ptr_to_id(buf, end, clk, spec);
1478#endif 1662#endif
1479 } 1663 }
1480} 1664}
@@ -1596,6 +1780,7 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
1596 fmt = "f"; 1780 fmt = "f";
1597 1781
1598 for (pass = false; strspn(fmt,"fnpPFcC"); fmt++, pass = true) { 1782 for (pass = false; strspn(fmt,"fnpPFcC"); fmt++, pass = true) {
1783 int precision;
1599 if (pass) { 1784 if (pass) {
1600 if (buf < end) 1785 if (buf < end)
1601 *buf = ':'; 1786 *buf = ':';
@@ -1607,7 +1792,11 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
1607 buf = device_node_gen_full_name(dn, buf, end); 1792 buf = device_node_gen_full_name(dn, buf, end);
1608 break; 1793 break;
1609 case 'n': /* name */ 1794 case 'n': /* name */
1610 buf = string(buf, end, dn->name, str_spec); 1795 p = kbasename(of_node_full_name(dn));
1796 precision = str_spec.precision;
1797 str_spec.precision = strchrnul(p, '@') - p;
1798 buf = string(buf, end, p, str_spec);
1799 str_spec.precision = precision;
1611 break; 1800 break;
1612 case 'p': /* phandle */ 1801 case 'p': /* phandle */
1613 buf = number(buf, end, (unsigned int)dn->phandle, num_spec); 1802 buf = number(buf, end, (unsigned int)dn->phandle, num_spec);
@@ -1651,94 +1840,6 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
1651 return widen_string(buf, buf - buf_start, end, spec); 1840 return widen_string(buf, buf - buf_start, end, spec);
1652} 1841}
1653 1842
1654/* Make pointers available for printing early in the boot sequence. */
1655static int debug_boot_weak_hash __ro_after_init;
1656
1657static int __init debug_boot_weak_hash_enable(char *str)
1658{
1659 debug_boot_weak_hash = 1;
1660 pr_info("debug_boot_weak_hash enabled\n");
1661 return 0;
1662}
1663early_param("debug_boot_weak_hash", debug_boot_weak_hash_enable);
1664
1665static DEFINE_STATIC_KEY_TRUE(not_filled_random_ptr_key);
1666static siphash_key_t ptr_key __read_mostly;
1667
1668static void enable_ptr_key_workfn(struct work_struct *work)
1669{
1670 get_random_bytes(&ptr_key, sizeof(ptr_key));
1671 /* Needs to run from preemptible context */
1672 static_branch_disable(&not_filled_random_ptr_key);
1673}
1674
1675static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
1676
1677static void fill_random_ptr_key(struct random_ready_callback *unused)
1678{
1679 /* This may be in an interrupt handler. */
1680 queue_work(system_unbound_wq, &enable_ptr_key_work);
1681}
1682
1683static struct random_ready_callback random_ready = {
1684 .func = fill_random_ptr_key
1685};
1686
1687static int __init initialize_ptr_random(void)
1688{
1689 int key_size = sizeof(ptr_key);
1690 int ret;
1691
1692 /* Use hw RNG if available. */
1693 if (get_random_bytes_arch(&ptr_key, key_size) == key_size) {
1694 static_branch_disable(&not_filled_random_ptr_key);
1695 return 0;
1696 }
1697
1698 ret = add_random_ready_callback(&random_ready);
1699 if (!ret) {
1700 return 0;
1701 } else if (ret == -EALREADY) {
1702 /* This is in preemptible context */
1703 enable_ptr_key_workfn(&enable_ptr_key_work);
1704 return 0;
1705 }
1706
1707 return ret;
1708}
1709early_initcall(initialize_ptr_random);
1710
1711/* Maps a pointer to a 32 bit unique identifier. */
1712static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec)
1713{
1714 const char *str = sizeof(ptr) == 8 ? "(____ptrval____)" : "(ptrval)";
1715 unsigned long hashval;
1716
1717 /* When debugging early boot use non-cryptographically secure hash. */
1718 if (unlikely(debug_boot_weak_hash)) {
1719 hashval = hash_long((unsigned long)ptr, 32);
1720 return pointer_string(buf, end, (const void *)hashval, spec);
1721 }
1722
1723 if (static_branch_unlikely(&not_filled_random_ptr_key)) {
1724 spec.field_width = 2 * sizeof(ptr);
1725 /* string length must be less than default_width */
1726 return string(buf, end, str, spec);
1727 }
1728
1729#ifdef CONFIG_64BIT
1730 hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key);
1731 /*
1732 * Mask off the first 32 bits, this makes explicit that we have
1733 * modified the address (and 32 bits is plenty for a unique ID).
1734 */
1735 hashval = hashval & 0xffffffff;
1736#else
1737 hashval = (unsigned long)siphash_1u32((u32)ptr, &ptr_key);
1738#endif
1739 return pointer_string(buf, end, (const void *)hashval, spec);
1740}
1741
1742/* 1843/*
1743 * Show a '%p' thing. A kernel extension is that the '%p' is followed 1844 * Show a '%p' thing. A kernel extension is that the '%p' is followed
1744 * by an extra set of alphanumeric characters that are extended format 1845 * by an extra set of alphanumeric characters that are extended format
@@ -1823,6 +1924,8 @@ static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec)
1823 * - 'd[234]' For a dentry name (optionally 2-4 last components) 1924 * - 'd[234]' For a dentry name (optionally 2-4 last components)
1824 * - 'D[234]' Same as 'd' but for a struct file 1925 * - 'D[234]' Same as 'd' but for a struct file
1825 * - 'g' For block_device name (gendisk + partition number) 1926 * - 'g' For block_device name (gendisk + partition number)
1927 * - 't[R][dt][r]' For time and date as represented:
1928 * R struct rtc_time
1826 * - 'C' For a clock, it prints the name (Common Clock Framework) or address 1929 * - 'C' For a clock, it prints the name (Common Clock Framework) or address
1827 * (legacy clock framework) of the clock 1930 * (legacy clock framework) of the clock
1828 * - 'Cn' For a clock, it prints the name (Common Clock Framework) or address 1931 * - 'Cn' For a clock, it prints the name (Common Clock Framework) or address
@@ -1833,17 +1936,15 @@ static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec)
1833 * p page flags (see struct page) given as pointer to unsigned long 1936 * p page flags (see struct page) given as pointer to unsigned long
1834 * g gfp flags (GFP_* and __GFP_*) given as pointer to gfp_t 1937 * g gfp flags (GFP_* and __GFP_*) given as pointer to gfp_t
1835 * v vma flags (VM_*) given as pointer to unsigned long 1938 * v vma flags (VM_*) given as pointer to unsigned long
1836 * - 'O' For a kobject based struct. Must be one of the following: 1939 * - 'OF[fnpPcCF]' For a device tree object
1837 * - 'OF[fnpPcCF]' For a device tree object 1940 * Without any optional arguments prints the full_name
1838 * Without any optional arguments prints the full_name 1941 * f device node full_name
1839 * f device node full_name 1942 * n device node name
1840 * n device node name 1943 * p device node phandle
1841 * p device node phandle 1944 * P device node path spec (name + @unit)
1842 * P device node path spec (name + @unit) 1945 * F device node flags
1843 * F device node flags 1946 * c major compatible string
1844 * c major compatible string 1947 * C full compatible string
1845 * C full compatible string
1846 *
1847 * - 'x' For printing the address. Equivalent to "%lx". 1948 * - 'x' For printing the address. Equivalent to "%lx".
1848 * 1949 *
1849 * ** When making changes please also update: 1950 * ** When making changes please also update:
@@ -1944,11 +2045,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
1944 break; 2045 break;
1945 return restricted_pointer(buf, end, ptr, spec); 2046 return restricted_pointer(buf, end, ptr, spec);
1946 case 'N': 2047 case 'N':
1947 return netdev_bits(buf, end, ptr, fmt); 2048 return netdev_bits(buf, end, ptr, spec, fmt);
1948 case 'a': 2049 case 'a':
1949 return address_val(buf, end, ptr, fmt); 2050 return address_val(buf, end, ptr, fmt);
1950 case 'd': 2051 case 'd':
1951 return dentry_name(buf, end, ptr, spec, fmt); 2052 return dentry_name(buf, end, ptr, spec, fmt);
2053 case 't':
2054 return time_and_date(buf, end, ptr, spec, fmt);
1952 case 'C': 2055 case 'C':
1953 return clock(buf, end, ptr, spec, fmt); 2056 return clock(buf, end, ptr, spec, fmt);
1954 case 'D': 2057 case 'D':
diff --git a/lib/xarray.c b/lib/xarray.c
new file mode 100644
index 000000000000..5f3f9311de89
--- /dev/null
+++ b/lib/xarray.c
@@ -0,0 +1,2015 @@
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * XArray implementation
4 * Copyright (c) 2017 Microsoft Corporation
5 * Author: Matthew Wilcox <willy@infradead.org>
6 */
7
8#include <linux/bitmap.h>
9#include <linux/export.h>
10#include <linux/list.h>
11#include <linux/slab.h>
12#include <linux/xarray.h>
13
14/*
15 * Coding conventions in this file:
16 *
17 * @xa is used to refer to the entire xarray.
18 * @xas is the 'xarray operation state'. It may be either a pointer to
19 * an xa_state, or an xa_state stored on the stack. This is an unfortunate
20 * ambiguity.
21 * @index is the index of the entry being operated on
22 * @mark is an xa_mark_t; a small number indicating one of the mark bits.
23 * @node refers to an xa_node; usually the primary one being operated on by
24 * this function.
25 * @offset is the index into the slots array inside an xa_node.
26 * @parent refers to the @xa_node closer to the head than @node.
27 * @entry refers to something stored in a slot in the xarray
28 */
29
30static inline unsigned int xa_lock_type(const struct xarray *xa)
31{
32 return (__force unsigned int)xa->xa_flags & 3;
33}
34
35static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type)
36{
37 if (lock_type == XA_LOCK_IRQ)
38 xas_lock_irq(xas);
39 else if (lock_type == XA_LOCK_BH)
40 xas_lock_bh(xas);
41 else
42 xas_lock(xas);
43}
44
45static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type)
46{
47 if (lock_type == XA_LOCK_IRQ)
48 xas_unlock_irq(xas);
49 else if (lock_type == XA_LOCK_BH)
50 xas_unlock_bh(xas);
51 else
52 xas_unlock(xas);
53}
54
55static inline bool xa_track_free(const struct xarray *xa)
56{
57 return xa->xa_flags & XA_FLAGS_TRACK_FREE;
58}
59
60static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark)
61{
62 if (!(xa->xa_flags & XA_FLAGS_MARK(mark)))
63 xa->xa_flags |= XA_FLAGS_MARK(mark);
64}
65
66static inline void xa_mark_clear(struct xarray *xa, xa_mark_t mark)
67{
68 if (xa->xa_flags & XA_FLAGS_MARK(mark))
69 xa->xa_flags &= ~(XA_FLAGS_MARK(mark));
70}
71
72static inline unsigned long *node_marks(struct xa_node *node, xa_mark_t mark)
73{
74 return node->marks[(__force unsigned)mark];
75}
76
77static inline bool node_get_mark(struct xa_node *node,
78 unsigned int offset, xa_mark_t mark)
79{
80 return test_bit(offset, node_marks(node, mark));
81}
82
83/* returns true if the bit was set */
84static inline bool node_set_mark(struct xa_node *node, unsigned int offset,
85 xa_mark_t mark)
86{
87 return __test_and_set_bit(offset, node_marks(node, mark));
88}
89
90/* returns true if the bit was set */
91static inline bool node_clear_mark(struct xa_node *node, unsigned int offset,
92 xa_mark_t mark)
93{
94 return __test_and_clear_bit(offset, node_marks(node, mark));
95}
96
97static inline bool node_any_mark(struct xa_node *node, xa_mark_t mark)
98{
99 return !bitmap_empty(node_marks(node, mark), XA_CHUNK_SIZE);
100}
101
102static inline void node_mark_all(struct xa_node *node, xa_mark_t mark)
103{
104 bitmap_fill(node_marks(node, mark), XA_CHUNK_SIZE);
105}
106
107#define mark_inc(mark) do { \
108 mark = (__force xa_mark_t)((__force unsigned)(mark) + 1); \
109} while (0)
110
111/*
112 * xas_squash_marks() - Merge all marks to the first entry
113 * @xas: Array operation state.
114 *
115 * Set a mark on the first entry if any entry has it set. Clear marks on
116 * all sibling entries.
117 */
118static void xas_squash_marks(const struct xa_state *xas)
119{
120 unsigned int mark = 0;
121 unsigned int limit = xas->xa_offset + xas->xa_sibs + 1;
122
123 if (!xas->xa_sibs)
124 return;
125
126 do {
127 unsigned long *marks = xas->xa_node->marks[mark];
128 if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit)
129 continue;
130 __set_bit(xas->xa_offset, marks);
131 bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs);
132 } while (mark++ != (__force unsigned)XA_MARK_MAX);
133}
134
135/* extracts the offset within this node from the index */
136static unsigned int get_offset(unsigned long index, struct xa_node *node)
137{
138 return (index >> node->shift) & XA_CHUNK_MASK;
139}
140
141static void xas_set_offset(struct xa_state *xas)
142{
143 xas->xa_offset = get_offset(xas->xa_index, xas->xa_node);
144}
145
146/* move the index either forwards (find) or backwards (sibling slot) */
147static void xas_move_index(struct xa_state *xas, unsigned long offset)
148{
149 unsigned int shift = xas->xa_node->shift;
150 xas->xa_index &= ~XA_CHUNK_MASK << shift;
151 xas->xa_index += offset << shift;
152}
153
154static void xas_advance(struct xa_state *xas)
155{
156 xas->xa_offset++;
157 xas_move_index(xas, xas->xa_offset);
158}
159
160static void *set_bounds(struct xa_state *xas)
161{
162 xas->xa_node = XAS_BOUNDS;
163 return NULL;
164}
165
166/*
167 * Starts a walk. If the @xas is already valid, we assume that it's on
168 * the right path and just return where we've got to. If we're in an
169 * error state, return NULL. If the index is outside the current scope
170 * of the xarray, return NULL without changing @xas->xa_node. Otherwise
171 * set @xas->xa_node to NULL and return the current head of the array.
172 */
173static void *xas_start(struct xa_state *xas)
174{
175 void *entry;
176
177 if (xas_valid(xas))
178 return xas_reload(xas);
179 if (xas_error(xas))
180 return NULL;
181
182 entry = xa_head(xas->xa);
183 if (!xa_is_node(entry)) {
184 if (xas->xa_index)
185 return set_bounds(xas);
186 } else {
187 if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK)
188 return set_bounds(xas);
189 }
190
191 xas->xa_node = NULL;
192 return entry;
193}
194
195static void *xas_descend(struct xa_state *xas, struct xa_node *node)
196{
197 unsigned int offset = get_offset(xas->xa_index, node);
198 void *entry = xa_entry(xas->xa, node, offset);
199
200 xas->xa_node = node;
201 if (xa_is_sibling(entry)) {
202 offset = xa_to_sibling(entry);
203 entry = xa_entry(xas->xa, node, offset);
204 }
205
206 xas->xa_offset = offset;
207 return entry;
208}
209
210/**
211 * xas_load() - Load an entry from the XArray (advanced).
212 * @xas: XArray operation state.
213 *
214 * Usually walks the @xas to the appropriate state to load the entry
215 * stored at xa_index. However, it will do nothing and return %NULL if
216 * @xas is in an error state. xas_load() will never expand the tree.
217 *
218 * If the xa_state is set up to operate on a multi-index entry, xas_load()
219 * may return %NULL or an internal entry, even if there are entries
220 * present within the range specified by @xas.
221 *
222 * Context: Any context. The caller should hold the xa_lock or the RCU lock.
223 * Return: Usually an entry in the XArray, but see description for exceptions.
224 */
225void *xas_load(struct xa_state *xas)
226{
227 void *entry = xas_start(xas);
228
229 while (xa_is_node(entry)) {
230 struct xa_node *node = xa_to_node(entry);
231
232 if (xas->xa_shift > node->shift)
233 break;
234 entry = xas_descend(xas, node);
235 }
236 return entry;
237}
238EXPORT_SYMBOL_GPL(xas_load);
239
240/* Move the radix tree node cache here */
241extern struct kmem_cache *radix_tree_node_cachep;
242extern void radix_tree_node_rcu_free(struct rcu_head *head);
243
244#define XA_RCU_FREE ((struct xarray *)1)
245
246static void xa_node_free(struct xa_node *node)
247{
248 XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
249 node->array = XA_RCU_FREE;
250 call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
251}
252
253/*
254 * xas_destroy() - Free any resources allocated during the XArray operation.
255 * @xas: XArray operation state.
256 *
257 * This function is now internal-only.
258 */
259static void xas_destroy(struct xa_state *xas)
260{
261 struct xa_node *node = xas->xa_alloc;
262
263 if (!node)
264 return;
265 XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
266 kmem_cache_free(radix_tree_node_cachep, node);
267 xas->xa_alloc = NULL;
268}
269
270/**
271 * xas_nomem() - Allocate memory if needed.
272 * @xas: XArray operation state.
273 * @gfp: Memory allocation flags.
274 *
275 * If we need to add new nodes to the XArray, we try to allocate memory
276 * with GFP_NOWAIT while holding the lock, which will usually succeed.
277 * If it fails, @xas is flagged as needing memory to continue. The caller
278 * should drop the lock and call xas_nomem(). If xas_nomem() succeeds,
279 * the caller should retry the operation.
280 *
281 * Forward progress is guaranteed as one node is allocated here and
282 * stored in the xa_state where it will be found by xas_alloc(). More
283 * nodes will likely be found in the slab allocator, but we do not tie
284 * them up here.
285 *
286 * Return: true if memory was needed, and was successfully allocated.
287 */
288bool xas_nomem(struct xa_state *xas, gfp_t gfp)
289{
290 if (xas->xa_node != XA_ERROR(-ENOMEM)) {
291 xas_destroy(xas);
292 return false;
293 }
294 xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
295 if (!xas->xa_alloc)
296 return false;
297 XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
298 xas->xa_node = XAS_RESTART;
299 return true;
300}
301EXPORT_SYMBOL_GPL(xas_nomem);
302
303/*
304 * __xas_nomem() - Drop locks and allocate memory if needed.
305 * @xas: XArray operation state.
306 * @gfp: Memory allocation flags.
307 *
308 * Internal variant of xas_nomem().
309 *
310 * Return: true if memory was needed, and was successfully allocated.
311 */
312static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
313 __must_hold(xas->xa->xa_lock)
314{
315 unsigned int lock_type = xa_lock_type(xas->xa);
316
317 if (xas->xa_node != XA_ERROR(-ENOMEM)) {
318 xas_destroy(xas);
319 return false;
320 }
321 if (gfpflags_allow_blocking(gfp)) {
322 xas_unlock_type(xas, lock_type);
323 xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
324 xas_lock_type(xas, lock_type);
325 } else {
326 xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
327 }
328 if (!xas->xa_alloc)
329 return false;
330 XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
331 xas->xa_node = XAS_RESTART;
332 return true;
333}
334
335static void xas_update(struct xa_state *xas, struct xa_node *node)
336{
337 if (xas->xa_update)
338 xas->xa_update(node);
339 else
340 XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
341}
342
343static void *xas_alloc(struct xa_state *xas, unsigned int shift)
344{
345 struct xa_node *parent = xas->xa_node;
346 struct xa_node *node = xas->xa_alloc;
347
348 if (xas_invalid(xas))
349 return NULL;
350
351 if (node) {
352 xas->xa_alloc = NULL;
353 } else {
354 node = kmem_cache_alloc(radix_tree_node_cachep,
355 GFP_NOWAIT | __GFP_NOWARN);
356 if (!node) {
357 xas_set_err(xas, -ENOMEM);
358 return NULL;
359 }
360 }
361
362 if (parent) {
363 node->offset = xas->xa_offset;
364 parent->count++;
365 XA_NODE_BUG_ON(node, parent->count > XA_CHUNK_SIZE);
366 xas_update(xas, parent);
367 }
368 XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
369 XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
370 node->shift = shift;
371 node->count = 0;
372 node->nr_values = 0;
373 RCU_INIT_POINTER(node->parent, xas->xa_node);
374 node->array = xas->xa;
375
376 return node;
377}
378
379#ifdef CONFIG_XARRAY_MULTI
380/* Returns the number of indices covered by a given xa_state */
381static unsigned long xas_size(const struct xa_state *xas)
382{
383 return (xas->xa_sibs + 1UL) << xas->xa_shift;
384}
385#endif
386
387/*
388 * Use this to calculate the maximum index that will need to be created
389 * in order to add the entry described by @xas. Because we cannot store a
390 * multiple-index entry at index 0, the calculation is a little more complex
391 * than you might expect.
392 */
393static unsigned long xas_max(struct xa_state *xas)
394{
395 unsigned long max = xas->xa_index;
396
397#ifdef CONFIG_XARRAY_MULTI
398 if (xas->xa_shift || xas->xa_sibs) {
399 unsigned long mask = xas_size(xas) - 1;
400 max |= mask;
401 if (mask == max)
402 max++;
403 }
404#endif
405
406 return max;
407}
408
409/* The maximum index that can be contained in the array without expanding it */
410static unsigned long max_index(void *entry)
411{
412 if (!xa_is_node(entry))
413 return 0;
414 return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1;
415}
416
417static void xas_shrink(struct xa_state *xas)
418{
419 struct xarray *xa = xas->xa;
420 struct xa_node *node = xas->xa_node;
421
422 for (;;) {
423 void *entry;
424
425 XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
426 if (node->count != 1)
427 break;
428 entry = xa_entry_locked(xa, node, 0);
429 if (!entry)
430 break;
431 if (!xa_is_node(entry) && node->shift)
432 break;
433 xas->xa_node = XAS_BOUNDS;
434
435 RCU_INIT_POINTER(xa->xa_head, entry);
436 if (xa_track_free(xa) && !node_get_mark(node, 0, XA_FREE_MARK))
437 xa_mark_clear(xa, XA_FREE_MARK);
438
439 node->count = 0;
440 node->nr_values = 0;
441 if (!xa_is_node(entry))
442 RCU_INIT_POINTER(node->slots[0], XA_RETRY_ENTRY);
443 xas_update(xas, node);
444 xa_node_free(node);
445 if (!xa_is_node(entry))
446 break;
447 node = xa_to_node(entry);
448 node->parent = NULL;
449 }
450}
451
452/*
453 * xas_delete_node() - Attempt to delete an xa_node
454 * @xas: Array operation state.
455 *
456 * Attempts to delete the @xas->xa_node. This will fail if xa->node has
457 * a non-zero reference count.
458 */
459static void xas_delete_node(struct xa_state *xas)
460{
461 struct xa_node *node = xas->xa_node;
462
463 for (;;) {
464 struct xa_node *parent;
465
466 XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
467 if (node->count)
468 break;
469
470 parent = xa_parent_locked(xas->xa, node);
471 xas->xa_node = parent;
472 xas->xa_offset = node->offset;
473 xa_node_free(node);
474
475 if (!parent) {
476 xas->xa->xa_head = NULL;
477 xas->xa_node = XAS_BOUNDS;
478 return;
479 }
480
481 parent->slots[xas->xa_offset] = NULL;
482 parent->count--;
483 XA_NODE_BUG_ON(parent, parent->count > XA_CHUNK_SIZE);
484 node = parent;
485 xas_update(xas, node);
486 }
487
488 if (!node->parent)
489 xas_shrink(xas);
490}
491
492/**
493 * xas_free_nodes() - Free this node and all nodes that it references
494 * @xas: Array operation state.
495 * @top: Node to free
496 *
497 * This node has been removed from the tree. We must now free it and all
498 * of its subnodes. There may be RCU walkers with references into the tree,
499 * so we must replace all entries with retry markers.
500 */
501static void xas_free_nodes(struct xa_state *xas, struct xa_node *top)
502{
503 unsigned int offset = 0;
504 struct xa_node *node = top;
505
506 for (;;) {
507 void *entry = xa_entry_locked(xas->xa, node, offset);
508
509 if (xa_is_node(entry)) {
510 node = xa_to_node(entry);
511 offset = 0;
512 continue;
513 }
514 if (entry)
515 RCU_INIT_POINTER(node->slots[offset], XA_RETRY_ENTRY);
516 offset++;
517 while (offset == XA_CHUNK_SIZE) {
518 struct xa_node *parent;
519
520 parent = xa_parent_locked(xas->xa, node);
521 offset = node->offset + 1;
522 node->count = 0;
523 node->nr_values = 0;
524 xas_update(xas, node);
525 xa_node_free(node);
526 if (node == top)
527 return;
528 node = parent;
529 }
530 }
531}
532
533/*
534 * xas_expand adds nodes to the head of the tree until it has reached
535 * sufficient height to be able to contain @xas->xa_index
536 */
537static int xas_expand(struct xa_state *xas, void *head)
538{
539 struct xarray *xa = xas->xa;
540 struct xa_node *node = NULL;
541 unsigned int shift = 0;
542 unsigned long max = xas_max(xas);
543
544 if (!head) {
545 if (max == 0)
546 return 0;
547 while ((max >> shift) >= XA_CHUNK_SIZE)
548 shift += XA_CHUNK_SHIFT;
549 return shift + XA_CHUNK_SHIFT;
550 } else if (xa_is_node(head)) {
551 node = xa_to_node(head);
552 shift = node->shift + XA_CHUNK_SHIFT;
553 }
554 xas->xa_node = NULL;
555
556 while (max > max_index(head)) {
557 xa_mark_t mark = 0;
558
559 XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
560 node = xas_alloc(xas, shift);
561 if (!node)
562 return -ENOMEM;
563
564 node->count = 1;
565 if (xa_is_value(head))
566 node->nr_values = 1;
567 RCU_INIT_POINTER(node->slots[0], head);
568
569 /* Propagate the aggregated mark info to the new child */
570 for (;;) {
571 if (xa_track_free(xa) && mark == XA_FREE_MARK) {
572 node_mark_all(node, XA_FREE_MARK);
573 if (!xa_marked(xa, XA_FREE_MARK)) {
574 node_clear_mark(node, 0, XA_FREE_MARK);
575 xa_mark_set(xa, XA_FREE_MARK);
576 }
577 } else if (xa_marked(xa, mark)) {
578 node_set_mark(node, 0, mark);
579 }
580 if (mark == XA_MARK_MAX)
581 break;
582 mark_inc(mark);
583 }
584
585 /*
586 * Now that the new node is fully initialised, we can add
587 * it to the tree
588 */
589 if (xa_is_node(head)) {
590 xa_to_node(head)->offset = 0;
591 rcu_assign_pointer(xa_to_node(head)->parent, node);
592 }
593 head = xa_mk_node(node);
594 rcu_assign_pointer(xa->xa_head, head);
595 xas_update(xas, node);
596
597 shift += XA_CHUNK_SHIFT;
598 }
599
600 xas->xa_node = node;
601 return shift;
602}
603
604/*
605 * xas_create() - Create a slot to store an entry in.
606 * @xas: XArray operation state.
607 *
608 * Most users will not need to call this function directly, as it is called
609 * by xas_store(). It is useful for doing conditional store operations
610 * (see the xa_cmpxchg() implementation for an example).
611 *
612 * Return: If the slot already existed, returns the contents of this slot.
613 * If the slot was newly created, returns %NULL. If it failed to create the
614 * slot, returns %NULL and indicates the error in @xas.
615 */
616static void *xas_create(struct xa_state *xas)
617{
618 struct xarray *xa = xas->xa;
619 void *entry;
620 void __rcu **slot;
621 struct xa_node *node = xas->xa_node;
622 int shift;
623 unsigned int order = xas->xa_shift;
624
625 if (xas_top(node)) {
626 entry = xa_head_locked(xa);
627 xas->xa_node = NULL;
628 shift = xas_expand(xas, entry);
629 if (shift < 0)
630 return NULL;
631 entry = xa_head_locked(xa);
632 slot = &xa->xa_head;
633 } else if (xas_error(xas)) {
634 return NULL;
635 } else if (node) {
636 unsigned int offset = xas->xa_offset;
637
638 shift = node->shift;
639 entry = xa_entry_locked(xa, node, offset);
640 slot = &node->slots[offset];
641 } else {
642 shift = 0;
643 entry = xa_head_locked(xa);
644 slot = &xa->xa_head;
645 }
646
647 while (shift > order) {
648 shift -= XA_CHUNK_SHIFT;
649 if (!entry) {
650 node = xas_alloc(xas, shift);
651 if (!node)
652 break;
653 if (xa_track_free(xa))
654 node_mark_all(node, XA_FREE_MARK);
655 rcu_assign_pointer(*slot, xa_mk_node(node));
656 } else if (xa_is_node(entry)) {
657 node = xa_to_node(entry);
658 } else {
659 break;
660 }
661 entry = xas_descend(xas, node);
662 slot = &node->slots[xas->xa_offset];
663 }
664
665 return entry;
666}
667
668/**
669 * xas_create_range() - Ensure that stores to this range will succeed
670 * @xas: XArray operation state.
671 *
672 * Creates all of the slots in the range covered by @xas. Sets @xas to
673 * create single-index entries and positions it at the beginning of the
674 * range. This is for the benefit of users which have not yet been
675 * converted to use multi-index entries.
676 */
677void xas_create_range(struct xa_state *xas)
678{
679 unsigned long index = xas->xa_index;
680 unsigned char shift = xas->xa_shift;
681 unsigned char sibs = xas->xa_sibs;
682
683 xas->xa_index |= ((sibs + 1) << shift) - 1;
684 if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift)
685 xas->xa_offset |= sibs;
686 xas->xa_shift = 0;
687 xas->xa_sibs = 0;
688
689 for (;;) {
690 xas_create(xas);
691 if (xas_error(xas))
692 goto restore;
693 if (xas->xa_index <= (index | XA_CHUNK_MASK))
694 goto success;
695 xas->xa_index -= XA_CHUNK_SIZE;
696
697 for (;;) {
698 struct xa_node *node = xas->xa_node;
699 xas->xa_node = xa_parent_locked(xas->xa, node);
700 xas->xa_offset = node->offset - 1;
701 if (node->offset != 0)
702 break;
703 }
704 }
705
706restore:
707 xas->xa_shift = shift;
708 xas->xa_sibs = sibs;
709 xas->xa_index = index;
710 return;
711success:
712 xas->xa_index = index;
713 if (xas->xa_node)
714 xas_set_offset(xas);
715}
716EXPORT_SYMBOL_GPL(xas_create_range);
717
718static void update_node(struct xa_state *xas, struct xa_node *node,
719 int count, int values)
720{
721 if (!node || (!count && !values))
722 return;
723
724 node->count += count;
725 node->nr_values += values;
726 XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
727 XA_NODE_BUG_ON(node, node->nr_values > XA_CHUNK_SIZE);
728 xas_update(xas, node);
729 if (count < 0)
730 xas_delete_node(xas);
731}
732
733/**
734 * xas_store() - Store this entry in the XArray.
735 * @xas: XArray operation state.
736 * @entry: New entry.
737 *
738 * If @xas is operating on a multi-index entry, the entry returned by this
739 * function is essentially meaningless (it may be an internal entry or it
740 * may be %NULL, even if there are non-NULL entries at some of the indices
741 * covered by the range). This is not a problem for any current users,
742 * and can be changed if needed.
743 *
744 * Return: The old entry at this index.
745 */
746void *xas_store(struct xa_state *xas, void *entry)
747{
748 struct xa_node *node;
749 void __rcu **slot = &xas->xa->xa_head;
750 unsigned int offset, max;
751 int count = 0;
752 int values = 0;
753 void *first, *next;
754 bool value = xa_is_value(entry);
755
756 if (entry)
757 first = xas_create(xas);
758 else
759 first = xas_load(xas);
760
761 if (xas_invalid(xas))
762 return first;
763 node = xas->xa_node;
764 if (node && (xas->xa_shift < node->shift))
765 xas->xa_sibs = 0;
766 if ((first == entry) && !xas->xa_sibs)
767 return first;
768
769 next = first;
770 offset = xas->xa_offset;
771 max = xas->xa_offset + xas->xa_sibs;
772 if (node) {
773 slot = &node->slots[offset];
774 if (xas->xa_sibs)
775 xas_squash_marks(xas);
776 }
777 if (!entry)
778 xas_init_marks(xas);
779
780 for (;;) {
781 /*
782 * Must clear the marks before setting the entry to NULL,
783 * otherwise xas_for_each_marked may find a NULL entry and
784 * stop early. rcu_assign_pointer contains a release barrier
785 * so the mark clearing will appear to happen before the
786 * entry is set to NULL.
787 */
788 rcu_assign_pointer(*slot, entry);
789 if (xa_is_node(next))
790 xas_free_nodes(xas, xa_to_node(next));
791 if (!node)
792 break;
793 count += !next - !entry;
794 values += !xa_is_value(first) - !value;
795 if (entry) {
796 if (offset == max)
797 break;
798 if (!xa_is_sibling(entry))
799 entry = xa_mk_sibling(xas->xa_offset);
800 } else {
801 if (offset == XA_CHUNK_MASK)
802 break;
803 }
804 next = xa_entry_locked(xas->xa, node, ++offset);
805 if (!xa_is_sibling(next)) {
806 if (!entry && (offset > max))
807 break;
808 first = next;
809 }
810 slot++;
811 }
812
813 update_node(xas, node, count, values);
814 return first;
815}
816EXPORT_SYMBOL_GPL(xas_store);
817
818/**
819 * xas_get_mark() - Returns the state of this mark.
820 * @xas: XArray operation state.
821 * @mark: Mark number.
822 *
823 * Return: true if the mark is set, false if the mark is clear or @xas
824 * is in an error state.
825 */
826bool xas_get_mark(const struct xa_state *xas, xa_mark_t mark)
827{
828 if (xas_invalid(xas))
829 return false;
830 if (!xas->xa_node)
831 return xa_marked(xas->xa, mark);
832 return node_get_mark(xas->xa_node, xas->xa_offset, mark);
833}
834EXPORT_SYMBOL_GPL(xas_get_mark);
835
836/**
837 * xas_set_mark() - Sets the mark on this entry and its parents.
838 * @xas: XArray operation state.
839 * @mark: Mark number.
840 *
841 * Sets the specified mark on this entry, and walks up the tree setting it
842 * on all the ancestor entries. Does nothing if @xas has not been walked to
843 * an entry, or is in an error state.
844 */
845void xas_set_mark(const struct xa_state *xas, xa_mark_t mark)
846{
847 struct xa_node *node = xas->xa_node;
848 unsigned int offset = xas->xa_offset;
849
850 if (xas_invalid(xas))
851 return;
852
853 while (node) {
854 if (node_set_mark(node, offset, mark))
855 return;
856 offset = node->offset;
857 node = xa_parent_locked(xas->xa, node);
858 }
859
860 if (!xa_marked(xas->xa, mark))
861 xa_mark_set(xas->xa, mark);
862}
863EXPORT_SYMBOL_GPL(xas_set_mark);
864
865/**
866 * xas_clear_mark() - Clears the mark on this entry and its parents.
867 * @xas: XArray operation state.
868 * @mark: Mark number.
869 *
870 * Clears the specified mark on this entry, and walks back to the head
871 * attempting to clear it on all the ancestor entries. Does nothing if
872 * @xas has not been walked to an entry, or is in an error state.
873 */
874void xas_clear_mark(const struct xa_state *xas, xa_mark_t mark)
875{
876 struct xa_node *node = xas->xa_node;
877 unsigned int offset = xas->xa_offset;
878
879 if (xas_invalid(xas))
880 return;
881
882 while (node) {
883 if (!node_clear_mark(node, offset, mark))
884 return;
885 if (node_any_mark(node, mark))
886 return;
887
888 offset = node->offset;
889 node = xa_parent_locked(xas->xa, node);
890 }
891
892 if (xa_marked(xas->xa, mark))
893 xa_mark_clear(xas->xa, mark);
894}
895EXPORT_SYMBOL_GPL(xas_clear_mark);
896
897/**
898 * xas_init_marks() - Initialise all marks for the entry
899 * @xas: Array operations state.
900 *
901 * Initialise all marks for the entry specified by @xas. If we're tracking
902 * free entries with a mark, we need to set it on all entries. All other
903 * marks are cleared.
904 *
905 * This implementation is not as efficient as it could be; we may walk
906 * up the tree multiple times.
907 */
908void xas_init_marks(const struct xa_state *xas)
909{
910 xa_mark_t mark = 0;
911
912 for (;;) {
913 if (xa_track_free(xas->xa) && mark == XA_FREE_MARK)
914 xas_set_mark(xas, mark);
915 else
916 xas_clear_mark(xas, mark);
917 if (mark == XA_MARK_MAX)
918 break;
919 mark_inc(mark);
920 }
921}
922EXPORT_SYMBOL_GPL(xas_init_marks);
923
924/**
925 * xas_pause() - Pause a walk to drop a lock.
926 * @xas: XArray operation state.
927 *
928 * Some users need to pause a walk and drop the lock they're holding in
929 * order to yield to a higher priority thread or carry out an operation
930 * on an entry. Those users should call this function before they drop
931 * the lock. It resets the @xas to be suitable for the next iteration
932 * of the loop after the user has reacquired the lock. If most entries
933 * found during a walk require you to call xas_pause(), the xa_for_each()
934 * iterator may be more appropriate.
935 *
936 * Note that xas_pause() only works for forward iteration. If a user needs
937 * to pause a reverse iteration, we will need a xas_pause_rev().
938 */
939void xas_pause(struct xa_state *xas)
940{
941 struct xa_node *node = xas->xa_node;
942
943 if (xas_invalid(xas))
944 return;
945
946 if (node) {
947 unsigned int offset = xas->xa_offset;
948 while (++offset < XA_CHUNK_SIZE) {
949 if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
950 break;
951 }
952 xas->xa_index += (offset - xas->xa_offset) << node->shift;
953 } else {
954 xas->xa_index++;
955 }
956 xas->xa_node = XAS_RESTART;
957}
958EXPORT_SYMBOL_GPL(xas_pause);
959
960/*
961 * __xas_prev() - Find the previous entry in the XArray.
962 * @xas: XArray operation state.
963 *
964 * Helper function for xas_prev() which handles all the complex cases
965 * out of line.
966 */
967void *__xas_prev(struct xa_state *xas)
968{
969 void *entry;
970
971 if (!xas_frozen(xas->xa_node))
972 xas->xa_index--;
973 if (xas_not_node(xas->xa_node))
974 return xas_load(xas);
975
976 if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
977 xas->xa_offset--;
978
979 while (xas->xa_offset == 255) {
980 xas->xa_offset = xas->xa_node->offset - 1;
981 xas->xa_node = xa_parent(xas->xa, xas->xa_node);
982 if (!xas->xa_node)
983 return set_bounds(xas);
984 }
985
986 for (;;) {
987 entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
988 if (!xa_is_node(entry))
989 return entry;
990
991 xas->xa_node = xa_to_node(entry);
992 xas_set_offset(xas);
993 }
994}
995EXPORT_SYMBOL_GPL(__xas_prev);
996
997/*
998 * __xas_next() - Find the next entry in the XArray.
999 * @xas: XArray operation state.
1000 *
1001 * Helper function for xas_next() which handles all the complex cases
1002 * out of line.
1003 */
1004void *__xas_next(struct xa_state *xas)
1005{
1006 void *entry;
1007
1008 if (!xas_frozen(xas->xa_node))
1009 xas->xa_index++;
1010 if (xas_not_node(xas->xa_node))
1011 return xas_load(xas);
1012
1013 if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
1014 xas->xa_offset++;
1015
1016 while (xas->xa_offset == XA_CHUNK_SIZE) {
1017 xas->xa_offset = xas->xa_node->offset + 1;
1018 xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1019 if (!xas->xa_node)
1020 return set_bounds(xas);
1021 }
1022
1023 for (;;) {
1024 entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1025 if (!xa_is_node(entry))
1026 return entry;
1027
1028 xas->xa_node = xa_to_node(entry);
1029 xas_set_offset(xas);
1030 }
1031}
1032EXPORT_SYMBOL_GPL(__xas_next);
1033
1034/**
1035 * xas_find() - Find the next present entry in the XArray.
1036 * @xas: XArray operation state.
1037 * @max: Highest index to return.
1038 *
1039 * If the @xas has not yet been walked to an entry, return the entry
1040 * which has an index >= xas.xa_index. If it has been walked, the entry
1041 * currently being pointed at has been processed, and so we move to the
1042 * next entry.
1043 *
1044 * If no entry is found and the array is smaller than @max, the iterator
1045 * is set to the smallest index not yet in the array. This allows @xas
1046 * to be immediately passed to xas_store().
1047 *
1048 * Return: The entry, if found, otherwise %NULL.
1049 */
1050void *xas_find(struct xa_state *xas, unsigned long max)
1051{
1052 void *entry;
1053
1054 if (xas_error(xas))
1055 return NULL;
1056
1057 if (!xas->xa_node) {
1058 xas->xa_index = 1;
1059 return set_bounds(xas);
1060 } else if (xas_top(xas->xa_node)) {
1061 entry = xas_load(xas);
1062 if (entry || xas_not_node(xas->xa_node))
1063 return entry;
1064 } else if (!xas->xa_node->shift &&
1065 xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)) {
1066 xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1;
1067 }
1068
1069 xas_advance(xas);
1070
1071 while (xas->xa_node && (xas->xa_index <= max)) {
1072 if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
1073 xas->xa_offset = xas->xa_node->offset + 1;
1074 xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1075 continue;
1076 }
1077
1078 entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1079 if (xa_is_node(entry)) {
1080 xas->xa_node = xa_to_node(entry);
1081 xas->xa_offset = 0;
1082 continue;
1083 }
1084 if (entry && !xa_is_sibling(entry))
1085 return entry;
1086
1087 xas_advance(xas);
1088 }
1089
1090 if (!xas->xa_node)
1091 xas->xa_node = XAS_BOUNDS;
1092 return NULL;
1093}
1094EXPORT_SYMBOL_GPL(xas_find);
1095
1096/**
1097 * xas_find_marked() - Find the next marked entry in the XArray.
1098 * @xas: XArray operation state.
1099 * @max: Highest index to return.
1100 * @mark: Mark number to search for.
1101 *
1102 * If the @xas has not yet been walked to an entry, return the marked entry
1103 * which has an index >= xas.xa_index. If it has been walked, the entry
1104 * currently being pointed at has been processed, and so we return the
1105 * first marked entry with an index > xas.xa_index.
1106 *
1107 * If no marked entry is found and the array is smaller than @max, @xas is
1108 * set to the bounds state and xas->xa_index is set to the smallest index
1109 * not yet in the array. This allows @xas to be immediately passed to
1110 * xas_store().
1111 *
1112 * If no entry is found before @max is reached, @xas is set to the restart
1113 * state.
1114 *
1115 * Return: The entry, if found, otherwise %NULL.
1116 */
1117void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
1118{
1119 bool advance = true;
1120 unsigned int offset;
1121 void *entry;
1122
1123 if (xas_error(xas))
1124 return NULL;
1125
1126 if (!xas->xa_node) {
1127 xas->xa_index = 1;
1128 goto out;
1129 } else if (xas_top(xas->xa_node)) {
1130 advance = false;
1131 entry = xa_head(xas->xa);
1132 xas->xa_node = NULL;
1133 if (xas->xa_index > max_index(entry))
1134 goto out;
1135 if (!xa_is_node(entry)) {
1136 if (xa_marked(xas->xa, mark))
1137 return entry;
1138 xas->xa_index = 1;
1139 goto out;
1140 }
1141 xas->xa_node = xa_to_node(entry);
1142 xas->xa_offset = xas->xa_index >> xas->xa_node->shift;
1143 }
1144
1145 while (xas->xa_index <= max) {
1146 if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
1147 xas->xa_offset = xas->xa_node->offset + 1;
1148 xas->xa_node = xa_parent(xas->xa, xas->xa_node);
1149 if (!xas->xa_node)
1150 break;
1151 advance = false;
1152 continue;
1153 }
1154
1155 if (!advance) {
1156 entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1157 if (xa_is_sibling(entry)) {
1158 xas->xa_offset = xa_to_sibling(entry);
1159 xas_move_index(xas, xas->xa_offset);
1160 }
1161 }
1162
1163 offset = xas_find_chunk(xas, advance, mark);
1164 if (offset > xas->xa_offset) {
1165 advance = false;
1166 xas_move_index(xas, offset);
1167 /* Mind the wrap */
1168 if ((xas->xa_index - 1) >= max)
1169 goto max;
1170 xas->xa_offset = offset;
1171 if (offset == XA_CHUNK_SIZE)
1172 continue;
1173 }
1174
1175 entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1176 if (!xa_is_node(entry))
1177 return entry;
1178 xas->xa_node = xa_to_node(entry);
1179 xas_set_offset(xas);
1180 }
1181
1182out:
1183 if (xas->xa_index > max)
1184 goto max;
1185 return set_bounds(xas);
1186max:
1187 xas->xa_node = XAS_RESTART;
1188 return NULL;
1189}
1190EXPORT_SYMBOL_GPL(xas_find_marked);
1191
1192/**
1193 * xas_find_conflict() - Find the next present entry in a range.
1194 * @xas: XArray operation state.
1195 *
1196 * The @xas describes both a range and a position within that range.
1197 *
1198 * Context: Any context. Expects xa_lock to be held.
1199 * Return: The next entry in the range covered by @xas or %NULL.
1200 */
1201void *xas_find_conflict(struct xa_state *xas)
1202{
1203 void *curr;
1204
1205 if (xas_error(xas))
1206 return NULL;
1207
1208 if (!xas->xa_node)
1209 return NULL;
1210
1211 if (xas_top(xas->xa_node)) {
1212 curr = xas_start(xas);
1213 if (!curr)
1214 return NULL;
1215 while (xa_is_node(curr)) {
1216 struct xa_node *node = xa_to_node(curr);
1217 curr = xas_descend(xas, node);
1218 }
1219 if (curr)
1220 return curr;
1221 }
1222
1223 if (xas->xa_node->shift > xas->xa_shift)
1224 return NULL;
1225
1226 for (;;) {
1227 if (xas->xa_node->shift == xas->xa_shift) {
1228 if ((xas->xa_offset & xas->xa_sibs) == xas->xa_sibs)
1229 break;
1230 } else if (xas->xa_offset == XA_CHUNK_MASK) {
1231 xas->xa_offset = xas->xa_node->offset;
1232 xas->xa_node = xa_parent_locked(xas->xa, xas->xa_node);
1233 if (!xas->xa_node)
1234 break;
1235 continue;
1236 }
1237 curr = xa_entry_locked(xas->xa, xas->xa_node, ++xas->xa_offset);
1238 if (xa_is_sibling(curr))
1239 continue;
1240 while (xa_is_node(curr)) {
1241 xas->xa_node = xa_to_node(curr);
1242 xas->xa_offset = 0;
1243 curr = xa_entry_locked(xas->xa, xas->xa_node, 0);
1244 }
1245 if (curr)
1246 return curr;
1247 }
1248 xas->xa_offset -= xas->xa_sibs;
1249 return NULL;
1250}
1251EXPORT_SYMBOL_GPL(xas_find_conflict);
1252
1253/**
1254 * xa_init_flags() - Initialise an empty XArray with flags.
1255 * @xa: XArray.
1256 * @flags: XA_FLAG values.
1257 *
1258 * If you need to initialise an XArray with special flags (eg you need
1259 * to take the lock from interrupt context), use this function instead
1260 * of xa_init().
1261 *
1262 * Context: Any context.
1263 */
1264void xa_init_flags(struct xarray *xa, gfp_t flags)
1265{
1266 unsigned int lock_type;
1267 static struct lock_class_key xa_lock_irq;
1268 static struct lock_class_key xa_lock_bh;
1269
1270 spin_lock_init(&xa->xa_lock);
1271 xa->xa_flags = flags;
1272 xa->xa_head = NULL;
1273
1274 lock_type = xa_lock_type(xa);
1275 if (lock_type == XA_LOCK_IRQ)
1276 lockdep_set_class(&xa->xa_lock, &xa_lock_irq);
1277 else if (lock_type == XA_LOCK_BH)
1278 lockdep_set_class(&xa->xa_lock, &xa_lock_bh);
1279}
1280EXPORT_SYMBOL(xa_init_flags);
1281
1282/**
1283 * xa_load() - Load an entry from an XArray.
1284 * @xa: XArray.
1285 * @index: index into array.
1286 *
1287 * Context: Any context. Takes and releases the RCU lock.
1288 * Return: The entry at @index in @xa.
1289 */
1290void *xa_load(struct xarray *xa, unsigned long index)
1291{
1292 XA_STATE(xas, xa, index);
1293 void *entry;
1294
1295 rcu_read_lock();
1296 do {
1297 entry = xas_load(&xas);
1298 if (xa_is_zero(entry))
1299 entry = NULL;
1300 } while (xas_retry(&xas, entry));
1301 rcu_read_unlock();
1302
1303 return entry;
1304}
1305EXPORT_SYMBOL(xa_load);
1306
1307static void *xas_result(struct xa_state *xas, void *curr)
1308{
1309 if (xa_is_zero(curr))
1310 return NULL;
1311 XA_NODE_BUG_ON(xas->xa_node, xa_is_internal(curr));
1312 if (xas_error(xas))
1313 curr = xas->xa_node;
1314 return curr;
1315}
1316
1317/**
1318 * __xa_erase() - Erase this entry from the XArray while locked.
1319 * @xa: XArray.
1320 * @index: Index into array.
1321 *
1322 * If the entry at this index is a multi-index entry then all indices will
1323 * be erased, and the entry will no longer be a multi-index entry.
1324 * This function expects the xa_lock to be held on entry.
1325 *
1326 * Context: Any context. Expects xa_lock to be held on entry. May
1327 * release and reacquire xa_lock if @gfp flags permit.
1328 * Return: The old entry at this index.
1329 */
1330void *__xa_erase(struct xarray *xa, unsigned long index)
1331{
1332 XA_STATE(xas, xa, index);
1333 return xas_result(&xas, xas_store(&xas, NULL));
1334}
1335EXPORT_SYMBOL(__xa_erase);
1336
1337/**
1338 * xa_erase() - Erase this entry from the XArray.
1339 * @xa: XArray.
1340 * @index: Index of entry.
1341 *
1342 * This function is the equivalent of calling xa_store() with %NULL as
1343 * the third argument. The XArray does not need to allocate memory, so
1344 * the user does not need to provide GFP flags.
1345 *
1346 * Context: Any context. Takes and releases the xa_lock.
1347 * Return: The entry which used to be at this index.
1348 */
1349void *xa_erase(struct xarray *xa, unsigned long index)
1350{
1351 void *entry;
1352
1353 xa_lock(xa);
1354 entry = __xa_erase(xa, index);
1355 xa_unlock(xa);
1356
1357 return entry;
1358}
1359EXPORT_SYMBOL(xa_erase);
1360
1361/**
1362 * __xa_store() - Store this entry in the XArray.
1363 * @xa: XArray.
1364 * @index: Index into array.
1365 * @entry: New entry.
1366 * @gfp: Memory allocation flags.
1367 *
1368 * You must already be holding the xa_lock when calling this function.
1369 * It will drop the lock if needed to allocate memory, and then reacquire
1370 * it afterwards.
1371 *
1372 * Context: Any context. Expects xa_lock to be held on entry. May
1373 * release and reacquire xa_lock if @gfp flags permit.
1374 * Return: The old entry at this index or xa_err() if an error happened.
1375 */
1376void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1377{
1378 XA_STATE(xas, xa, index);
1379 void *curr;
1380
1381 if (WARN_ON_ONCE(xa_is_internal(entry)))
1382 return XA_ERROR(-EINVAL);
1383 if (xa_track_free(xa) && !entry)
1384 entry = XA_ZERO_ENTRY;
1385
1386 do {
1387 curr = xas_store(&xas, entry);
1388 if (xa_track_free(xa))
1389 xas_clear_mark(&xas, XA_FREE_MARK);
1390 } while (__xas_nomem(&xas, gfp));
1391
1392 return xas_result(&xas, curr);
1393}
1394EXPORT_SYMBOL(__xa_store);
1395
1396/**
1397 * xa_store() - Store this entry in the XArray.
1398 * @xa: XArray.
1399 * @index: Index into array.
1400 * @entry: New entry.
1401 * @gfp: Memory allocation flags.
1402 *
1403 * After this function returns, loads from this index will return @entry.
1404 * Storing into an existing multislot entry updates the entry of every index.
1405 * The marks associated with @index are unaffected unless @entry is %NULL.
1406 *
1407 * Context: Any context. Takes and releases the xa_lock.
1408 * May sleep if the @gfp flags permit.
1409 * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry
1410 * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation
1411 * failed.
1412 */
1413void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
1414{
1415 void *curr;
1416
1417 xa_lock(xa);
1418 curr = __xa_store(xa, index, entry, gfp);
1419 xa_unlock(xa);
1420
1421 return curr;
1422}
1423EXPORT_SYMBOL(xa_store);
1424
1425/**
1426 * __xa_cmpxchg() - Store this entry in the XArray.
1427 * @xa: XArray.
1428 * @index: Index into array.
1429 * @old: Old value to test against.
1430 * @entry: New entry.
1431 * @gfp: Memory allocation flags.
1432 *
1433 * You must already be holding the xa_lock when calling this function.
1434 * It will drop the lock if needed to allocate memory, and then reacquire
1435 * it afterwards.
1436 *
1437 * Context: Any context. Expects xa_lock to be held on entry. May
1438 * release and reacquire xa_lock if @gfp flags permit.
1439 * Return: The old entry at this index or xa_err() if an error happened.
1440 */
1441void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
1442 void *old, void *entry, gfp_t gfp)
1443{
1444 XA_STATE(xas, xa, index);
1445 void *curr;
1446
1447 if (WARN_ON_ONCE(xa_is_internal(entry)))
1448 return XA_ERROR(-EINVAL);
1449 if (xa_track_free(xa) && !entry)
1450 entry = XA_ZERO_ENTRY;
1451
1452 do {
1453 curr = xas_load(&xas);
1454 if (curr == XA_ZERO_ENTRY)
1455 curr = NULL;
1456 if (curr == old) {
1457 xas_store(&xas, entry);
1458 if (xa_track_free(xa))
1459 xas_clear_mark(&xas, XA_FREE_MARK);
1460 }
1461 } while (__xas_nomem(&xas, gfp));
1462
1463 return xas_result(&xas, curr);
1464}
1465EXPORT_SYMBOL(__xa_cmpxchg);
1466
1467/**
1468 * __xa_reserve() - Reserve this index in the XArray.
1469 * @xa: XArray.
1470 * @index: Index into array.
1471 * @gfp: Memory allocation flags.
1472 *
1473 * Ensures there is somewhere to store an entry at @index in the array.
1474 * If there is already something stored at @index, this function does
1475 * nothing. If there was nothing there, the entry is marked as reserved.
1476 * Loading from a reserved entry returns a %NULL pointer.
1477 *
1478 * If you do not use the entry that you have reserved, call xa_release()
1479 * or xa_erase() to free any unnecessary memory.
1480 *
1481 * Context: Any context. Expects the xa_lock to be held on entry. May
1482 * release the lock, sleep and reacquire the lock if the @gfp flags permit.
1483 * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
1484 */
1485int __xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
1486{
1487 XA_STATE(xas, xa, index);
1488 void *curr;
1489
1490 do {
1491 curr = xas_load(&xas);
1492 if (!curr) {
1493 xas_store(&xas, XA_ZERO_ENTRY);
1494 if (xa_track_free(xa))
1495 xas_clear_mark(&xas, XA_FREE_MARK);
1496 }
1497 } while (__xas_nomem(&xas, gfp));
1498
1499 return xas_error(&xas);
1500}
1501EXPORT_SYMBOL(__xa_reserve);
1502
1503#ifdef CONFIG_XARRAY_MULTI
1504static void xas_set_range(struct xa_state *xas, unsigned long first,
1505 unsigned long last)
1506{
1507 unsigned int shift = 0;
1508 unsigned long sibs = last - first;
1509 unsigned int offset = XA_CHUNK_MASK;
1510
1511 xas_set(xas, first);
1512
1513 while ((first & XA_CHUNK_MASK) == 0) {
1514 if (sibs < XA_CHUNK_MASK)
1515 break;
1516 if ((sibs == XA_CHUNK_MASK) && (offset < XA_CHUNK_MASK))
1517 break;
1518 shift += XA_CHUNK_SHIFT;
1519 if (offset == XA_CHUNK_MASK)
1520 offset = sibs & XA_CHUNK_MASK;
1521 sibs >>= XA_CHUNK_SHIFT;
1522 first >>= XA_CHUNK_SHIFT;
1523 }
1524
1525 offset = first & XA_CHUNK_MASK;
1526 if (offset + sibs > XA_CHUNK_MASK)
1527 sibs = XA_CHUNK_MASK - offset;
1528 if ((((first + sibs + 1) << shift) - 1) > last)
1529 sibs -= 1;
1530
1531 xas->xa_shift = shift;
1532 xas->xa_sibs = sibs;
1533}
1534
1535/**
1536 * xa_store_range() - Store this entry at a range of indices in the XArray.
1537 * @xa: XArray.
1538 * @first: First index to affect.
1539 * @last: Last index to affect.
1540 * @entry: New entry.
1541 * @gfp: Memory allocation flags.
1542 *
1543 * After this function returns, loads from any index between @first and @last,
1544 * inclusive will return @entry.
1545 * Storing into an existing multislot entry updates the entry of every index.
1546 * The marks associated with @index are unaffected unless @entry is %NULL.
1547 *
1548 * Context: Process context. Takes and releases the xa_lock. May sleep
1549 * if the @gfp flags permit.
1550 * Return: %NULL on success, xa_err(-EINVAL) if @entry cannot be stored in
1551 * an XArray, or xa_err(-ENOMEM) if memory allocation failed.
1552 */
1553void *xa_store_range(struct xarray *xa, unsigned long first,
1554 unsigned long last, void *entry, gfp_t gfp)
1555{
1556 XA_STATE(xas, xa, 0);
1557
1558 if (WARN_ON_ONCE(xa_is_internal(entry)))
1559 return XA_ERROR(-EINVAL);
1560 if (last < first)
1561 return XA_ERROR(-EINVAL);
1562
1563 do {
1564 xas_lock(&xas);
1565 if (entry) {
1566 unsigned int order = BITS_PER_LONG;
1567 if (last + 1)
1568 order = __ffs(last + 1);
1569 xas_set_order(&xas, last, order);
1570 xas_create(&xas);
1571 if (xas_error(&xas))
1572 goto unlock;
1573 }
1574 do {
1575 xas_set_range(&xas, first, last);
1576 xas_store(&xas, entry);
1577 if (xas_error(&xas))
1578 goto unlock;
1579 first += xas_size(&xas);
1580 } while (first <= last);
1581unlock:
1582 xas_unlock(&xas);
1583 } while (xas_nomem(&xas, gfp));
1584
1585 return xas_result(&xas, NULL);
1586}
1587EXPORT_SYMBOL(xa_store_range);
1588#endif /* CONFIG_XARRAY_MULTI */
1589
1590/**
1591 * __xa_alloc() - Find somewhere to store this entry in the XArray.
1592 * @xa: XArray.
1593 * @id: Pointer to ID.
1594 * @max: Maximum ID to allocate (inclusive).
1595 * @entry: New entry.
1596 * @gfp: Memory allocation flags.
1597 *
1598 * Allocates an unused ID in the range specified by @id and @max.
1599 * Updates the @id pointer with the index, then stores the entry at that
1600 * index. A concurrent lookup will not see an uninitialised @id.
1601 *
1602 * Context: Any context. Expects xa_lock to be held on entry. May
1603 * release and reacquire xa_lock if @gfp flags permit.
1604 * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if
1605 * there is no more space in the XArray.
1606 */
1607int __xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, gfp_t gfp)
1608{
1609 XA_STATE(xas, xa, 0);
1610 int err;
1611
1612 if (WARN_ON_ONCE(xa_is_internal(entry)))
1613 return -EINVAL;
1614 if (WARN_ON_ONCE(!xa_track_free(xa)))
1615 return -EINVAL;
1616
1617 if (!entry)
1618 entry = XA_ZERO_ENTRY;
1619
1620 do {
1621 xas.xa_index = *id;
1622 xas_find_marked(&xas, max, XA_FREE_MARK);
1623 if (xas.xa_node == XAS_RESTART)
1624 xas_set_err(&xas, -ENOSPC);
1625 xas_store(&xas, entry);
1626 xas_clear_mark(&xas, XA_FREE_MARK);
1627 } while (__xas_nomem(&xas, gfp));
1628
1629 err = xas_error(&xas);
1630 if (!err)
1631 *id = xas.xa_index;
1632 return err;
1633}
1634EXPORT_SYMBOL(__xa_alloc);
1635
1636/**
1637 * __xa_set_mark() - Set this mark on this entry while locked.
1638 * @xa: XArray.
1639 * @index: Index of entry.
1640 * @mark: Mark number.
1641 *
1642 * Attempting to set a mark on a %NULL entry does not succeed.
1643 *
1644 * Context: Any context. Expects xa_lock to be held on entry.
1645 */
1646void __xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1647{
1648 XA_STATE(xas, xa, index);
1649 void *entry = xas_load(&xas);
1650
1651 if (entry)
1652 xas_set_mark(&xas, mark);
1653}
1654EXPORT_SYMBOL(__xa_set_mark);
1655
1656/**
1657 * __xa_clear_mark() - Clear this mark on this entry while locked.
1658 * @xa: XArray.
1659 * @index: Index of entry.
1660 * @mark: Mark number.
1661 *
1662 * Context: Any context. Expects xa_lock to be held on entry.
1663 */
1664void __xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1665{
1666 XA_STATE(xas, xa, index);
1667 void *entry = xas_load(&xas);
1668
1669 if (entry)
1670 xas_clear_mark(&xas, mark);
1671}
1672EXPORT_SYMBOL(__xa_clear_mark);
1673
1674/**
1675 * xa_get_mark() - Inquire whether this mark is set on this entry.
1676 * @xa: XArray.
1677 * @index: Index of entry.
1678 * @mark: Mark number.
1679 *
1680 * This function uses the RCU read lock, so the result may be out of date
1681 * by the time it returns. If you need the result to be stable, use a lock.
1682 *
1683 * Context: Any context. Takes and releases the RCU lock.
1684 * Return: True if the entry at @index has this mark set, false if it doesn't.
1685 */
1686bool xa_get_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1687{
1688 XA_STATE(xas, xa, index);
1689 void *entry;
1690
1691 rcu_read_lock();
1692 entry = xas_start(&xas);
1693 while (xas_get_mark(&xas, mark)) {
1694 if (!xa_is_node(entry))
1695 goto found;
1696 entry = xas_descend(&xas, xa_to_node(entry));
1697 }
1698 rcu_read_unlock();
1699 return false;
1700 found:
1701 rcu_read_unlock();
1702 return true;
1703}
1704EXPORT_SYMBOL(xa_get_mark);
1705
1706/**
1707 * xa_set_mark() - Set this mark on this entry.
1708 * @xa: XArray.
1709 * @index: Index of entry.
1710 * @mark: Mark number.
1711 *
1712 * Attempting to set a mark on a %NULL entry does not succeed.
1713 *
1714 * Context: Process context. Takes and releases the xa_lock.
1715 */
1716void xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1717{
1718 xa_lock(xa);
1719 __xa_set_mark(xa, index, mark);
1720 xa_unlock(xa);
1721}
1722EXPORT_SYMBOL(xa_set_mark);
1723
1724/**
1725 * xa_clear_mark() - Clear this mark on this entry.
1726 * @xa: XArray.
1727 * @index: Index of entry.
1728 * @mark: Mark number.
1729 *
1730 * Clearing a mark always succeeds.
1731 *
1732 * Context: Process context. Takes and releases the xa_lock.
1733 */
1734void xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1735{
1736 xa_lock(xa);
1737 __xa_clear_mark(xa, index, mark);
1738 xa_unlock(xa);
1739}
1740EXPORT_SYMBOL(xa_clear_mark);
1741
1742/**
1743 * xa_find() - Search the XArray for an entry.
1744 * @xa: XArray.
1745 * @indexp: Pointer to an index.
1746 * @max: Maximum index to search to.
1747 * @filter: Selection criterion.
1748 *
1749 * Finds the entry in @xa which matches the @filter, and has the lowest
1750 * index that is at least @indexp and no more than @max.
1751 * If an entry is found, @indexp is updated to be the index of the entry.
1752 * This function is protected by the RCU read lock, so it may not find
1753 * entries which are being simultaneously added. It will not return an
1754 * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
1755 *
1756 * Context: Any context. Takes and releases the RCU lock.
1757 * Return: The entry, if found, otherwise %NULL.
1758 */
1759void *xa_find(struct xarray *xa, unsigned long *indexp,
1760 unsigned long max, xa_mark_t filter)
1761{
1762 XA_STATE(xas, xa, *indexp);
1763 void *entry;
1764
1765 rcu_read_lock();
1766 do {
1767 if ((__force unsigned int)filter < XA_MAX_MARKS)
1768 entry = xas_find_marked(&xas, max, filter);
1769 else
1770 entry = xas_find(&xas, max);
1771 } while (xas_retry(&xas, entry));
1772 rcu_read_unlock();
1773
1774 if (entry)
1775 *indexp = xas.xa_index;
1776 return entry;
1777}
1778EXPORT_SYMBOL(xa_find);
1779
1780/**
1781 * xa_find_after() - Search the XArray for a present entry.
1782 * @xa: XArray.
1783 * @indexp: Pointer to an index.
1784 * @max: Maximum index to search to.
1785 * @filter: Selection criterion.
1786 *
1787 * Finds the entry in @xa which matches the @filter and has the lowest
1788 * index that is above @indexp and no more than @max.
1789 * If an entry is found, @indexp is updated to be the index of the entry.
1790 * This function is protected by the RCU read lock, so it may miss entries
1791 * which are being simultaneously added. It will not return an
1792 * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
1793 *
1794 * Context: Any context. Takes and releases the RCU lock.
1795 * Return: The pointer, if found, otherwise %NULL.
1796 */
1797void *xa_find_after(struct xarray *xa, unsigned long *indexp,
1798 unsigned long max, xa_mark_t filter)
1799{
1800 XA_STATE(xas, xa, *indexp + 1);
1801 void *entry;
1802
1803 rcu_read_lock();
1804 for (;;) {
1805 if ((__force unsigned int)filter < XA_MAX_MARKS)
1806 entry = xas_find_marked(&xas, max, filter);
1807 else
1808 entry = xas_find(&xas, max);
1809 if (xas.xa_node == XAS_BOUNDS)
1810 break;
1811 if (xas.xa_shift) {
1812 if (xas.xa_index & ((1UL << xas.xa_shift) - 1))
1813 continue;
1814 } else {
1815 if (xas.xa_offset < (xas.xa_index & XA_CHUNK_MASK))
1816 continue;
1817 }
1818 if (!xas_retry(&xas, entry))
1819 break;
1820 }
1821 rcu_read_unlock();
1822
1823 if (entry)
1824 *indexp = xas.xa_index;
1825 return entry;
1826}
1827EXPORT_SYMBOL(xa_find_after);
1828
1829static unsigned int xas_extract_present(struct xa_state *xas, void **dst,
1830 unsigned long max, unsigned int n)
1831{
1832 void *entry;
1833 unsigned int i = 0;
1834
1835 rcu_read_lock();
1836 xas_for_each(xas, entry, max) {
1837 if (xas_retry(xas, entry))
1838 continue;
1839 dst[i++] = entry;
1840 if (i == n)
1841 break;
1842 }
1843 rcu_read_unlock();
1844
1845 return i;
1846}
1847
1848static unsigned int xas_extract_marked(struct xa_state *xas, void **dst,
1849 unsigned long max, unsigned int n, xa_mark_t mark)
1850{
1851 void *entry;
1852 unsigned int i = 0;
1853
1854 rcu_read_lock();
1855 xas_for_each_marked(xas, entry, max, mark) {
1856 if (xas_retry(xas, entry))
1857 continue;
1858 dst[i++] = entry;
1859 if (i == n)
1860 break;
1861 }
1862 rcu_read_unlock();
1863
1864 return i;
1865}
1866
1867/**
1868 * xa_extract() - Copy selected entries from the XArray into a normal array.
1869 * @xa: The source XArray to copy from.
1870 * @dst: The buffer to copy entries into.
1871 * @start: The first index in the XArray eligible to be selected.
1872 * @max: The last index in the XArray eligible to be selected.
1873 * @n: The maximum number of entries to copy.
1874 * @filter: Selection criterion.
1875 *
1876 * Copies up to @n entries that match @filter from the XArray. The
1877 * copied entries will have indices between @start and @max, inclusive.
1878 *
1879 * The @filter may be an XArray mark value, in which case entries which are
1880 * marked with that mark will be copied. It may also be %XA_PRESENT, in
1881 * which case all entries which are not %NULL will be copied.
1882 *
1883 * The entries returned may not represent a snapshot of the XArray at a
1884 * moment in time. For example, if another thread stores to index 5, then
1885 * index 10, calling xa_extract() may return the old contents of index 5
1886 * and the new contents of index 10. Indices not modified while this
1887 * function is running will not be skipped.
1888 *
1889 * If you need stronger guarantees, holding the xa_lock across calls to this
1890 * function will prevent concurrent modification.
1891 *
1892 * Context: Any context. Takes and releases the RCU lock.
1893 * Return: The number of entries copied.
1894 */
1895unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start,
1896 unsigned long max, unsigned int n, xa_mark_t filter)
1897{
1898 XA_STATE(xas, xa, start);
1899
1900 if (!n)
1901 return 0;
1902
1903 if ((__force unsigned int)filter < XA_MAX_MARKS)
1904 return xas_extract_marked(&xas, dst, max, n, filter);
1905 return xas_extract_present(&xas, dst, max, n);
1906}
1907EXPORT_SYMBOL(xa_extract);
1908
1909/**
1910 * xa_destroy() - Free all internal data structures.
1911 * @xa: XArray.
1912 *
1913 * After calling this function, the XArray is empty and has freed all memory
1914 * allocated for its internal data structures. You are responsible for
1915 * freeing the objects referenced by the XArray.
1916 *
1917 * Context: Any context. Takes and releases the xa_lock, interrupt-safe.
1918 */
1919void xa_destroy(struct xarray *xa)
1920{
1921 XA_STATE(xas, xa, 0);
1922 unsigned long flags;
1923 void *entry;
1924
1925 xas.xa_node = NULL;
1926 xas_lock_irqsave(&xas, flags);
1927 entry = xa_head_locked(xa);
1928 RCU_INIT_POINTER(xa->xa_head, NULL);
1929 xas_init_marks(&xas);
1930 /* lockdep checks we're still holding the lock in xas_free_nodes() */
1931 if (xa_is_node(entry))
1932 xas_free_nodes(&xas, xa_to_node(entry));
1933 xas_unlock_irqrestore(&xas, flags);
1934}
1935EXPORT_SYMBOL(xa_destroy);
1936
1937#ifdef XA_DEBUG
1938void xa_dump_node(const struct xa_node *node)
1939{
1940 unsigned i, j;
1941
1942 if (!node)
1943 return;
1944 if ((unsigned long)node & 3) {
1945 pr_cont("node %px\n", node);
1946 return;
1947 }
1948
1949 pr_cont("node %px %s %d parent %px shift %d count %d values %d "
1950 "array %px list %px %px marks",
1951 node, node->parent ? "offset" : "max", node->offset,
1952 node->parent, node->shift, node->count, node->nr_values,
1953 node->array, node->private_list.prev, node->private_list.next);
1954 for (i = 0; i < XA_MAX_MARKS; i++)
1955 for (j = 0; j < XA_MARK_LONGS; j++)
1956 pr_cont(" %lx", node->marks[i][j]);
1957 pr_cont("\n");
1958}
1959
1960void xa_dump_index(unsigned long index, unsigned int shift)
1961{
1962 if (!shift)
1963 pr_info("%lu: ", index);
1964 else if (shift >= BITS_PER_LONG)
1965 pr_info("0-%lu: ", ~0UL);
1966 else
1967 pr_info("%lu-%lu: ", index, index | ((1UL << shift) - 1));
1968}
1969
1970void xa_dump_entry(const void *entry, unsigned long index, unsigned long shift)
1971{
1972 if (!entry)
1973 return;
1974
1975 xa_dump_index(index, shift);
1976
1977 if (xa_is_node(entry)) {
1978 if (shift == 0) {
1979 pr_cont("%px\n", entry);
1980 } else {
1981 unsigned long i;
1982 struct xa_node *node = xa_to_node(entry);
1983 xa_dump_node(node);
1984 for (i = 0; i < XA_CHUNK_SIZE; i++)
1985 xa_dump_entry(node->slots[i],
1986 index + (i << node->shift), node->shift);
1987 }
1988 } else if (xa_is_value(entry))
1989 pr_cont("value %ld (0x%lx) [%px]\n", xa_to_value(entry),
1990 xa_to_value(entry), entry);
1991 else if (!xa_is_internal(entry))
1992 pr_cont("%px\n", entry);
1993 else if (xa_is_retry(entry))
1994 pr_cont("retry (%ld)\n", xa_to_internal(entry));
1995 else if (xa_is_sibling(entry))
1996 pr_cont("sibling (slot %ld)\n", xa_to_sibling(entry));
1997 else if (xa_is_zero(entry))
1998 pr_cont("zero (%ld)\n", xa_to_internal(entry));
1999 else
2000 pr_cont("UNKNOWN ENTRY (%px)\n", entry);
2001}
2002
2003void xa_dump(const struct xarray *xa)
2004{
2005 void *entry = xa->xa_head;
2006 unsigned int shift = 0;
2007
2008 pr_info("xarray: %px head %px flags %x marks %d %d %d\n", xa, entry,
2009 xa->xa_flags, xa_marked(xa, XA_MARK_0),
2010 xa_marked(xa, XA_MARK_1), xa_marked(xa, XA_MARK_2));
2011 if (xa_is_node(entry))
2012 shift = xa_to_node(entry)->shift + XA_CHUNK_SHIFT;
2013 xa_dump_entry(entry, 0, shift);
2014}
2015#endif
diff --git a/lib/zlib_inflate/inflate.c b/lib/zlib_inflate/inflate.c
index 58a733b10387..48f14cd58c77 100644
--- a/lib/zlib_inflate/inflate.c
+++ b/lib/zlib_inflate/inflate.c
@@ -382,6 +382,7 @@ int zlib_inflate(z_streamp strm, int flush)
382 strm->adler = state->check = REVERSE(hold); 382 strm->adler = state->check = REVERSE(hold);
383 INITBITS(); 383 INITBITS();
384 state->mode = DICT; 384 state->mode = DICT;
385 /* fall through */
385 case DICT: 386 case DICT:
386 if (state->havedict == 0) { 387 if (state->havedict == 0) {
387 RESTORE(); 388 RESTORE();
@@ -389,8 +390,10 @@ int zlib_inflate(z_streamp strm, int flush)
389 } 390 }
390 strm->adler = state->check = zlib_adler32(0L, NULL, 0); 391 strm->adler = state->check = zlib_adler32(0L, NULL, 0);
391 state->mode = TYPE; 392 state->mode = TYPE;
393 /* fall through */
392 case TYPE: 394 case TYPE:
393 if (flush == Z_BLOCK) goto inf_leave; 395 if (flush == Z_BLOCK) goto inf_leave;
396 /* fall through */
394 case TYPEDO: 397 case TYPEDO:
395 if (state->last) { 398 if (state->last) {
396 BYTEBITS(); 399 BYTEBITS();
@@ -428,6 +431,7 @@ int zlib_inflate(z_streamp strm, int flush)
428 state->length = (unsigned)hold & 0xffff; 431 state->length = (unsigned)hold & 0xffff;
429 INITBITS(); 432 INITBITS();
430 state->mode = COPY; 433 state->mode = COPY;
434 /* fall through */
431 case COPY: 435 case COPY:
432 copy = state->length; 436 copy = state->length;
433 if (copy) { 437 if (copy) {
@@ -461,6 +465,7 @@ int zlib_inflate(z_streamp strm, int flush)
461#endif 465#endif
462 state->have = 0; 466 state->have = 0;
463 state->mode = LENLENS; 467 state->mode = LENLENS;
468 /* fall through */
464 case LENLENS: 469 case LENLENS:
465 while (state->have < state->ncode) { 470 while (state->have < state->ncode) {
466 NEEDBITS(3); 471 NEEDBITS(3);
@@ -481,6 +486,7 @@ int zlib_inflate(z_streamp strm, int flush)
481 } 486 }
482 state->have = 0; 487 state->have = 0;
483 state->mode = CODELENS; 488 state->mode = CODELENS;
489 /* fall through */
484 case CODELENS: 490 case CODELENS:
485 while (state->have < state->nlen + state->ndist) { 491 while (state->have < state->nlen + state->ndist) {
486 for (;;) { 492 for (;;) {
@@ -554,6 +560,7 @@ int zlib_inflate(z_streamp strm, int flush)
554 break; 560 break;
555 } 561 }
556 state->mode = LEN; 562 state->mode = LEN;
563 /* fall through */
557 case LEN: 564 case LEN:
558 if (have >= 6 && left >= 258) { 565 if (have >= 6 && left >= 258) {
559 RESTORE(); 566 RESTORE();
@@ -593,6 +600,7 @@ int zlib_inflate(z_streamp strm, int flush)
593 } 600 }
594 state->extra = (unsigned)(this.op) & 15; 601 state->extra = (unsigned)(this.op) & 15;
595 state->mode = LENEXT; 602 state->mode = LENEXT;
603 /* fall through */
596 case LENEXT: 604 case LENEXT:
597 if (state->extra) { 605 if (state->extra) {
598 NEEDBITS(state->extra); 606 NEEDBITS(state->extra);
@@ -600,6 +608,7 @@ int zlib_inflate(z_streamp strm, int flush)
600 DROPBITS(state->extra); 608 DROPBITS(state->extra);
601 } 609 }
602 state->mode = DIST; 610 state->mode = DIST;
611 /* fall through */
603 case DIST: 612 case DIST:
604 for (;;) { 613 for (;;) {
605 this = state->distcode[BITS(state->distbits)]; 614 this = state->distcode[BITS(state->distbits)];
@@ -625,6 +634,7 @@ int zlib_inflate(z_streamp strm, int flush)
625 state->offset = (unsigned)this.val; 634 state->offset = (unsigned)this.val;
626 state->extra = (unsigned)(this.op) & 15; 635 state->extra = (unsigned)(this.op) & 15;
627 state->mode = DISTEXT; 636 state->mode = DISTEXT;
637 /* fall through */
628 case DISTEXT: 638 case DISTEXT:
629 if (state->extra) { 639 if (state->extra) {
630 NEEDBITS(state->extra); 640 NEEDBITS(state->extra);
@@ -644,6 +654,7 @@ int zlib_inflate(z_streamp strm, int flush)
644 break; 654 break;
645 } 655 }
646 state->mode = MATCH; 656 state->mode = MATCH;
657 /* fall through */
647 case MATCH: 658 case MATCH:
648 if (left == 0) goto inf_leave; 659 if (left == 0) goto inf_leave;
649 copy = out - left; 660 copy = out - left;
@@ -694,6 +705,7 @@ int zlib_inflate(z_streamp strm, int flush)
694 INITBITS(); 705 INITBITS();
695 } 706 }
696 state->mode = DONE; 707 state->mode = DONE;
708 /* fall through */
697 case DONE: 709 case DONE:
698 ret = Z_STREAM_END; 710 ret = Z_STREAM_END;
699 goto inf_leave; 711 goto inf_leave;