aboutsummaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-10-28 14:35:40 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-10-28 14:35:40 -0400
commitdad4f140edaa3f6bb452b6913d41af1ffd672e45 (patch)
tree1c0ebdcdfcdfb4ec9af7810c5ad9bae0f791ff5c /tools
parent69d5b97c597307773fe6c59775a5d5a88bb7e6b3 (diff)
parent3a08cd52c37c793ffc199f6fc2ecfc368e284b2d (diff)
Merge branch 'xarray' of git://git.infradead.org/users/willy/linux-dax
Pull XArray conversion from Matthew Wilcox: "The XArray provides an improved interface to the radix tree data structure, providing locking as part of the API, specifying GFP flags at allocation time, eliminating preloading, less re-walking the tree, more efficient iterations and not exposing RCU-protected pointers to its users. This patch set 1. Introduces the XArray implementation 2. Converts the pagecache to use it 3. Converts memremap to use it The page cache is the most complex and important user of the radix tree, so converting it was most important. Converting the memremap code removes the only other user of the multiorder code, which allows us to remove the radix tree code that supported it. I have 40+ followup patches to convert many other users of the radix tree over to the XArray, but I'd like to get this part in first. The other conversions haven't been in linux-next and aren't suitable for applying yet, but you can see them in the xarray-conv branch if you're interested" * 'xarray' of git://git.infradead.org/users/willy/linux-dax: (90 commits) radix tree: Remove multiorder support radix tree test: Convert multiorder tests to XArray radix tree tests: Convert item_delete_rcu to XArray radix tree tests: Convert item_kill_tree to XArray radix tree tests: Move item_insert_order radix tree test suite: Remove multiorder benchmarking radix tree test suite: Remove __item_insert memremap: Convert to XArray xarray: Add range store functionality xarray: Move multiorder_check to in-kernel tests xarray: Move multiorder_shrink to kernel tests xarray: Move multiorder account test in-kernel radix tree test suite: Convert iteration test to XArray radix tree test suite: Convert tag_tagged_items to XArray radix tree: Remove radix_tree_clear_tags radix tree: Remove radix_tree_maybe_preload_order radix tree: Remove split/join code radix tree: Remove radix_tree_update_node_t page cache: Finish XArray conversion dax: Convert page fault handlers to XArray ...
Diffstat (limited to 'tools')
-rw-r--r--tools/include/asm-generic/bitops.h1
-rw-r--r--tools/include/asm-generic/bitops/atomic.h9
-rw-r--r--tools/include/asm-generic/bitops/non-atomic.h109
-rw-r--r--tools/include/linux/bitmap.h1
-rw-r--r--tools/include/linux/kernel.h1
-rw-r--r--tools/include/linux/spinlock.h12
-rw-r--r--tools/testing/radix-tree/.gitignore1
-rw-r--r--tools/testing/radix-tree/Makefile11
-rw-r--r--tools/testing/radix-tree/benchmark.c141
-rw-r--r--tools/testing/radix-tree/bitmap.c23
-rw-r--r--tools/testing/radix-tree/generated/autoconf.h2
-rw-r--r--tools/testing/radix-tree/idr-test.c71
-rw-r--r--tools/testing/radix-tree/iteration_check.c109
-rw-r--r--tools/testing/radix-tree/linux/bug.h1
-rw-r--r--tools/testing/radix-tree/linux/kconfig.h1
-rw-r--r--tools/testing/radix-tree/linux/kernel.h5
-rw-r--r--tools/testing/radix-tree/linux/lockdep.h11
-rw-r--r--tools/testing/radix-tree/linux/radix-tree.h1
-rw-r--r--tools/testing/radix-tree/linux/rcupdate.h2
-rw-r--r--tools/testing/radix-tree/main.c66
-rw-r--r--tools/testing/radix-tree/multiorder.c609
-rw-r--r--tools/testing/radix-tree/regression1.c75
-rw-r--r--tools/testing/radix-tree/regression2.c8
-rw-r--r--tools/testing/radix-tree/regression3.c23
-rw-r--r--tools/testing/radix-tree/tag_check.c33
-rw-r--r--tools/testing/radix-tree/test.c131
-rw-r--r--tools/testing/radix-tree/test.h13
-rw-r--r--tools/testing/radix-tree/xarray.c35
28 files changed, 497 insertions, 1008 deletions
diff --git a/tools/include/asm-generic/bitops.h b/tools/include/asm-generic/bitops.h
index 9bce3b56b5e7..5d2ab38965cc 100644
--- a/tools/include/asm-generic/bitops.h
+++ b/tools/include/asm-generic/bitops.h
@@ -27,5 +27,6 @@
27#include <asm-generic/bitops/hweight.h> 27#include <asm-generic/bitops/hweight.h>
28 28
29#include <asm-generic/bitops/atomic.h> 29#include <asm-generic/bitops/atomic.h>
30#include <asm-generic/bitops/non-atomic.h>
30 31
31#endif /* __TOOLS_ASM_GENERIC_BITOPS_H */ 32#endif /* __TOOLS_ASM_GENERIC_BITOPS_H */
diff --git a/tools/include/asm-generic/bitops/atomic.h b/tools/include/asm-generic/bitops/atomic.h
index 21c41ccd1266..2f6ea28764a7 100644
--- a/tools/include/asm-generic/bitops/atomic.h
+++ b/tools/include/asm-generic/bitops/atomic.h
@@ -15,13 +15,4 @@ static inline void clear_bit(int nr, unsigned long *addr)
15 addr[nr / __BITS_PER_LONG] &= ~(1UL << (nr % __BITS_PER_LONG)); 15 addr[nr / __BITS_PER_LONG] &= ~(1UL << (nr % __BITS_PER_LONG));
16} 16}
17 17
18static __always_inline int test_bit(unsigned int nr, const unsigned long *addr)
19{
20 return ((1UL << (nr % __BITS_PER_LONG)) &
21 (((unsigned long *)addr)[nr / __BITS_PER_LONG])) != 0;
22}
23
24#define __set_bit(nr, addr) set_bit(nr, addr)
25#define __clear_bit(nr, addr) clear_bit(nr, addr)
26
27#endif /* _TOOLS_LINUX_ASM_GENERIC_BITOPS_ATOMIC_H_ */ 18#endif /* _TOOLS_LINUX_ASM_GENERIC_BITOPS_ATOMIC_H_ */
diff --git a/tools/include/asm-generic/bitops/non-atomic.h b/tools/include/asm-generic/bitops/non-atomic.h
new file mode 100644
index 000000000000..7e10c4b50c5d
--- /dev/null
+++ b/tools/include/asm-generic/bitops/non-atomic.h
@@ -0,0 +1,109 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
3#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
4
5#include <asm/types.h>
6
7/**
8 * __set_bit - Set a bit in memory
9 * @nr: the bit to set
10 * @addr: the address to start counting from
11 *
12 * Unlike set_bit(), this function is non-atomic and may be reordered.
13 * If it's called on the same region of memory simultaneously, the effect
14 * may be that only one operation succeeds.
15 */
16static inline void __set_bit(int nr, volatile unsigned long *addr)
17{
18 unsigned long mask = BIT_MASK(nr);
19 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
20
21 *p |= mask;
22}
23
24static inline void __clear_bit(int nr, volatile unsigned long *addr)
25{
26 unsigned long mask = BIT_MASK(nr);
27 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
28
29 *p &= ~mask;
30}
31
32/**
33 * __change_bit - Toggle a bit in memory
34 * @nr: the bit to change
35 * @addr: the address to start counting from
36 *
37 * Unlike change_bit(), this function is non-atomic and may be reordered.
38 * If it's called on the same region of memory simultaneously, the effect
39 * may be that only one operation succeeds.
40 */
41static inline void __change_bit(int nr, volatile unsigned long *addr)
42{
43 unsigned long mask = BIT_MASK(nr);
44 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
45
46 *p ^= mask;
47}
48
49/**
50 * __test_and_set_bit - Set a bit and return its old value
51 * @nr: Bit to set
52 * @addr: Address to count from
53 *
54 * This operation is non-atomic and can be reordered.
55 * If two examples of this operation race, one can appear to succeed
56 * but actually fail. You must protect multiple accesses with a lock.
57 */
58static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
59{
60 unsigned long mask = BIT_MASK(nr);
61 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
62 unsigned long old = *p;
63
64 *p = old | mask;
65 return (old & mask) != 0;
66}
67
68/**
69 * __test_and_clear_bit - Clear a bit and return its old value
70 * @nr: Bit to clear
71 * @addr: Address to count from
72 *
73 * This operation is non-atomic and can be reordered.
74 * If two examples of this operation race, one can appear to succeed
75 * but actually fail. You must protect multiple accesses with a lock.
76 */
77static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
78{
79 unsigned long mask = BIT_MASK(nr);
80 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
81 unsigned long old = *p;
82
83 *p = old & ~mask;
84 return (old & mask) != 0;
85}
86
87/* WARNING: non atomic and it can be reordered! */
88static inline int __test_and_change_bit(int nr,
89 volatile unsigned long *addr)
90{
91 unsigned long mask = BIT_MASK(nr);
92 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
93 unsigned long old = *p;
94
95 *p = old ^ mask;
96 return (old & mask) != 0;
97}
98
99/**
100 * test_bit - Determine whether a bit is set
101 * @nr: bit number to test
102 * @addr: Address to start counting from
103 */
104static inline int test_bit(int nr, const volatile unsigned long *addr)
105{
106 return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
107}
108
109#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */
diff --git a/tools/include/linux/bitmap.h b/tools/include/linux/bitmap.h
index e63662db131b..05dca5c203f3 100644
--- a/tools/include/linux/bitmap.h
+++ b/tools/include/linux/bitmap.h
@@ -15,6 +15,7 @@ void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
15 const unsigned long *bitmap2, int bits); 15 const unsigned long *bitmap2, int bits);
16int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, 16int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
17 const unsigned long *bitmap2, unsigned int bits); 17 const unsigned long *bitmap2, unsigned int bits);
18void bitmap_clear(unsigned long *map, unsigned int start, int len);
18 19
19#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1))) 20#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
20 21
diff --git a/tools/include/linux/kernel.h b/tools/include/linux/kernel.h
index 0ad884452c5c..6935ef94e77a 100644
--- a/tools/include/linux/kernel.h
+++ b/tools/include/linux/kernel.h
@@ -70,6 +70,7 @@
70#define BUG_ON(cond) assert(!(cond)) 70#define BUG_ON(cond) assert(!(cond))
71#endif 71#endif
72#endif 72#endif
73#define BUG() BUG_ON(1)
73 74
74#if __BYTE_ORDER == __BIG_ENDIAN 75#if __BYTE_ORDER == __BIG_ENDIAN
75#define cpu_to_le16 bswap_16 76#define cpu_to_le16 bswap_16
diff --git a/tools/include/linux/spinlock.h b/tools/include/linux/spinlock.h
index 1738c0391da4..c934572d935c 100644
--- a/tools/include/linux/spinlock.h
+++ b/tools/include/linux/spinlock.h
@@ -8,8 +8,14 @@
8#define spinlock_t pthread_mutex_t 8#define spinlock_t pthread_mutex_t
9#define DEFINE_SPINLOCK(x) pthread_mutex_t x = PTHREAD_MUTEX_INITIALIZER 9#define DEFINE_SPINLOCK(x) pthread_mutex_t x = PTHREAD_MUTEX_INITIALIZER
10#define __SPIN_LOCK_UNLOCKED(x) (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER 10#define __SPIN_LOCK_UNLOCKED(x) (pthread_mutex_t)PTHREAD_MUTEX_INITIALIZER
11#define spin_lock_init(x) pthread_mutex_init(x, NULL) 11#define spin_lock_init(x) pthread_mutex_init(x, NULL)
12 12
13#define spin_lock(x) pthread_mutex_lock(x)
14#define spin_unlock(x) pthread_mutex_unlock(x)
15#define spin_lock_bh(x) pthread_mutex_lock(x)
16#define spin_unlock_bh(x) pthread_mutex_unlock(x)
17#define spin_lock_irq(x) pthread_mutex_lock(x)
18#define spin_unlock_irq(x) pthread_mutex_unlock(x)
13#define spin_lock_irqsave(x, f) (void)f, pthread_mutex_lock(x) 19#define spin_lock_irqsave(x, f) (void)f, pthread_mutex_lock(x)
14#define spin_unlock_irqrestore(x, f) (void)f, pthread_mutex_unlock(x) 20#define spin_unlock_irqrestore(x, f) (void)f, pthread_mutex_unlock(x)
15 21
@@ -31,4 +37,6 @@ static inline bool arch_spin_is_locked(arch_spinlock_t *mutex)
31 return true; 37 return true;
32} 38}
33 39
40#include <linux/lockdep.h>
41
34#endif 42#endif
diff --git a/tools/testing/radix-tree/.gitignore b/tools/testing/radix-tree/.gitignore
index d4706c0ffceb..3834899b6693 100644
--- a/tools/testing/radix-tree/.gitignore
+++ b/tools/testing/radix-tree/.gitignore
@@ -4,3 +4,4 @@ idr-test
4main 4main
5multiorder 5multiorder
6radix-tree.c 6radix-tree.c
7xarray
diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile
index 37baecc3766f..acf1afa01c5b 100644
--- a/tools/testing/radix-tree/Makefile
+++ b/tools/testing/radix-tree/Makefile
@@ -4,8 +4,8 @@ CFLAGS += -I. -I../../include -g -Og -Wall -D_LGPL_SOURCE -fsanitize=address \
4 -fsanitize=undefined 4 -fsanitize=undefined
5LDFLAGS += -fsanitize=address -fsanitize=undefined 5LDFLAGS += -fsanitize=address -fsanitize=undefined
6LDLIBS+= -lpthread -lurcu 6LDLIBS+= -lpthread -lurcu
7TARGETS = main idr-test multiorder 7TARGETS = main idr-test multiorder xarray
8CORE_OFILES := radix-tree.o idr.o linux.o test.o find_bit.o 8CORE_OFILES := xarray.o radix-tree.o idr.o linux.o test.o find_bit.o bitmap.o
9OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \ 9OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \
10 tag_check.o multiorder.o idr-test.o iteration_check.o benchmark.o 10 tag_check.o multiorder.o idr-test.o iteration_check.o benchmark.o
11 11
@@ -25,6 +25,8 @@ main: $(OFILES)
25idr-test.o: ../../../lib/test_ida.c 25idr-test.o: ../../../lib/test_ida.c
26idr-test: idr-test.o $(CORE_OFILES) 26idr-test: idr-test.o $(CORE_OFILES)
27 27
28xarray: $(CORE_OFILES)
29
28multiorder: multiorder.o $(CORE_OFILES) 30multiorder: multiorder.o $(CORE_OFILES)
29 31
30clean: 32clean:
@@ -35,6 +37,7 @@ vpath %.c ../../lib
35$(OFILES): Makefile *.h */*.h generated/map-shift.h \ 37$(OFILES): Makefile *.h */*.h generated/map-shift.h \
36 ../../include/linux/*.h \ 38 ../../include/linux/*.h \
37 ../../include/asm/*.h \ 39 ../../include/asm/*.h \
40 ../../../include/linux/xarray.h \
38 ../../../include/linux/radix-tree.h \ 41 ../../../include/linux/radix-tree.h \
39 ../../../include/linux/idr.h 42 ../../../include/linux/idr.h
40 43
@@ -44,8 +47,10 @@ radix-tree.c: ../../../lib/radix-tree.c
44idr.c: ../../../lib/idr.c 47idr.c: ../../../lib/idr.c
45 sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@ 48 sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@
46 49
50xarray.o: ../../../lib/xarray.c ../../../lib/test_xarray.c
51
47generated/map-shift.h: 52generated/map-shift.h:
48 @if ! grep -qws $(SHIFT) generated/map-shift.h; then \ 53 @if ! grep -qws $(SHIFT) generated/map-shift.h; then \
49 echo "#define RADIX_TREE_MAP_SHIFT $(SHIFT)" > \ 54 echo "#define XA_CHUNK_SHIFT $(SHIFT)" > \
50 generated/map-shift.h; \ 55 generated/map-shift.h; \
51 fi 56 fi
diff --git a/tools/testing/radix-tree/benchmark.c b/tools/testing/radix-tree/benchmark.c
index 99c40f3ed133..7e195ed8e92d 100644
--- a/tools/testing/radix-tree/benchmark.c
+++ b/tools/testing/radix-tree/benchmark.c
@@ -17,9 +17,6 @@
17#include <time.h> 17#include <time.h>
18#include "test.h" 18#include "test.h"
19 19
20#define for_each_index(i, base, order) \
21 for (i = base; i < base + (1 << order); i++)
22
23#define NSEC_PER_SEC 1000000000L 20#define NSEC_PER_SEC 1000000000L
24 21
25static long long benchmark_iter(struct radix_tree_root *root, bool tagged) 22static long long benchmark_iter(struct radix_tree_root *root, bool tagged)
@@ -61,7 +58,7 @@ again:
61} 58}
62 59
63static void benchmark_insert(struct radix_tree_root *root, 60static void benchmark_insert(struct radix_tree_root *root,
64 unsigned long size, unsigned long step, int order) 61 unsigned long size, unsigned long step)
65{ 62{
66 struct timespec start, finish; 63 struct timespec start, finish;
67 unsigned long index; 64 unsigned long index;
@@ -70,19 +67,19 @@ static void benchmark_insert(struct radix_tree_root *root,
70 clock_gettime(CLOCK_MONOTONIC, &start); 67 clock_gettime(CLOCK_MONOTONIC, &start);
71 68
72 for (index = 0 ; index < size ; index += step) 69 for (index = 0 ; index < size ; index += step)
73 item_insert_order(root, index, order); 70 item_insert(root, index);
74 71
75 clock_gettime(CLOCK_MONOTONIC, &finish); 72 clock_gettime(CLOCK_MONOTONIC, &finish);
76 73
77 nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC + 74 nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC +
78 (finish.tv_nsec - start.tv_nsec); 75 (finish.tv_nsec - start.tv_nsec);
79 76
80 printv(2, "Size: %8ld, step: %8ld, order: %d, insertion: %15lld ns\n", 77 printv(2, "Size: %8ld, step: %8ld, insertion: %15lld ns\n",
81 size, step, order, nsec); 78 size, step, nsec);
82} 79}
83 80
84static void benchmark_tagging(struct radix_tree_root *root, 81static void benchmark_tagging(struct radix_tree_root *root,
85 unsigned long size, unsigned long step, int order) 82 unsigned long size, unsigned long step)
86{ 83{
87 struct timespec start, finish; 84 struct timespec start, finish;
88 unsigned long index; 85 unsigned long index;
@@ -98,138 +95,53 @@ static void benchmark_tagging(struct radix_tree_root *root,
98 nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC + 95 nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC +
99 (finish.tv_nsec - start.tv_nsec); 96 (finish.tv_nsec - start.tv_nsec);
100 97
101 printv(2, "Size: %8ld, step: %8ld, order: %d, tagging: %17lld ns\n", 98 printv(2, "Size: %8ld, step: %8ld, tagging: %17lld ns\n",
102 size, step, order, nsec); 99 size, step, nsec);
103} 100}
104 101
105static void benchmark_delete(struct radix_tree_root *root, 102static void benchmark_delete(struct radix_tree_root *root,
106 unsigned long size, unsigned long step, int order) 103 unsigned long size, unsigned long step)
107{ 104{
108 struct timespec start, finish; 105 struct timespec start, finish;
109 unsigned long index, i; 106 unsigned long index;
110 long long nsec; 107 long long nsec;
111 108
112 clock_gettime(CLOCK_MONOTONIC, &start); 109 clock_gettime(CLOCK_MONOTONIC, &start);
113 110
114 for (index = 0 ; index < size ; index += step) 111 for (index = 0 ; index < size ; index += step)
115 for_each_index(i, index, order) 112 item_delete(root, index);
116 item_delete(root, i);
117 113
118 clock_gettime(CLOCK_MONOTONIC, &finish); 114 clock_gettime(CLOCK_MONOTONIC, &finish);
119 115
120 nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC + 116 nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC +
121 (finish.tv_nsec - start.tv_nsec); 117 (finish.tv_nsec - start.tv_nsec);
122 118
123 printv(2, "Size: %8ld, step: %8ld, order: %d, deletion: %16lld ns\n", 119 printv(2, "Size: %8ld, step: %8ld, deletion: %16lld ns\n",
124 size, step, order, nsec); 120 size, step, nsec);
125} 121}
126 122
127static void benchmark_size(unsigned long size, unsigned long step, int order) 123static void benchmark_size(unsigned long size, unsigned long step)
128{ 124{
129 RADIX_TREE(tree, GFP_KERNEL); 125 RADIX_TREE(tree, GFP_KERNEL);
130 long long normal, tagged; 126 long long normal, tagged;
131 127
132 benchmark_insert(&tree, size, step, order); 128 benchmark_insert(&tree, size, step);
133 benchmark_tagging(&tree, size, step, order); 129 benchmark_tagging(&tree, size, step);
134 130
135 tagged = benchmark_iter(&tree, true); 131 tagged = benchmark_iter(&tree, true);
136 normal = benchmark_iter(&tree, false); 132 normal = benchmark_iter(&tree, false);
137 133
138 printv(2, "Size: %8ld, step: %8ld, order: %d, tagged iteration: %8lld ns\n", 134 printv(2, "Size: %8ld, step: %8ld, tagged iteration: %8lld ns\n",
139 size, step, order, tagged); 135 size, step, tagged);
140 printv(2, "Size: %8ld, step: %8ld, order: %d, normal iteration: %8lld ns\n", 136 printv(2, "Size: %8ld, step: %8ld, normal iteration: %8lld ns\n",
141 size, step, order, normal); 137 size, step, normal);
142 138
143 benchmark_delete(&tree, size, step, order); 139 benchmark_delete(&tree, size, step);
144 140
145 item_kill_tree(&tree); 141 item_kill_tree(&tree);
146 rcu_barrier(); 142 rcu_barrier();
147} 143}
148 144
149static long long __benchmark_split(unsigned long index,
150 int old_order, int new_order)
151{
152 struct timespec start, finish;
153 long long nsec;
154 RADIX_TREE(tree, GFP_ATOMIC);
155
156 item_insert_order(&tree, index, old_order);
157
158 clock_gettime(CLOCK_MONOTONIC, &start);
159 radix_tree_split(&tree, index, new_order);
160 clock_gettime(CLOCK_MONOTONIC, &finish);
161 nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC +
162 (finish.tv_nsec - start.tv_nsec);
163
164 item_kill_tree(&tree);
165
166 return nsec;
167
168}
169
170static void benchmark_split(unsigned long size, unsigned long step)
171{
172 int i, j, idx;
173 long long nsec = 0;
174
175
176 for (idx = 0; idx < size; idx += step) {
177 for (i = 3; i < 11; i++) {
178 for (j = 0; j < i; j++) {
179 nsec += __benchmark_split(idx, i, j);
180 }
181 }
182 }
183
184 printv(2, "Size %8ld, step %8ld, split time %10lld ns\n",
185 size, step, nsec);
186
187}
188
189static long long __benchmark_join(unsigned long index,
190 unsigned order1, unsigned order2)
191{
192 unsigned long loc;
193 struct timespec start, finish;
194 long long nsec;
195 void *item, *item2 = item_create(index + 1, order1);
196 RADIX_TREE(tree, GFP_KERNEL);
197
198 item_insert_order(&tree, index, order2);
199 item = radix_tree_lookup(&tree, index);
200
201 clock_gettime(CLOCK_MONOTONIC, &start);
202 radix_tree_join(&tree, index + 1, order1, item2);
203 clock_gettime(CLOCK_MONOTONIC, &finish);
204 nsec = (finish.tv_sec - start.tv_sec) * NSEC_PER_SEC +
205 (finish.tv_nsec - start.tv_nsec);
206
207 loc = find_item(&tree, item);
208 if (loc == -1)
209 free(item);
210
211 item_kill_tree(&tree);
212
213 return nsec;
214}
215
216static void benchmark_join(unsigned long step)
217{
218 int i, j, idx;
219 long long nsec = 0;
220
221 for (idx = 0; idx < 1 << 10; idx += step) {
222 for (i = 1; i < 15; i++) {
223 for (j = 0; j < i; j++) {
224 nsec += __benchmark_join(idx, i, j);
225 }
226 }
227 }
228
229 printv(2, "Size %8d, step %8ld, join time %10lld ns\n",
230 1 << 10, step, nsec);
231}
232
233void benchmark(void) 145void benchmark(void)
234{ 146{
235 unsigned long size[] = {1 << 10, 1 << 20, 0}; 147 unsigned long size[] = {1 << 10, 1 << 20, 0};
@@ -242,16 +154,5 @@ void benchmark(void)
242 154
243 for (c = 0; size[c]; c++) 155 for (c = 0; size[c]; c++)
244 for (s = 0; step[s]; s++) 156 for (s = 0; step[s]; s++)
245 benchmark_size(size[c], step[s], 0); 157 benchmark_size(size[c], step[s]);
246
247 for (c = 0; size[c]; c++)
248 for (s = 0; step[s]; s++)
249 benchmark_size(size[c], step[s] << 9, 9);
250
251 for (c = 0; size[c]; c++)
252 for (s = 0; step[s]; s++)
253 benchmark_split(size[c], step[s]);
254
255 for (s = 0; step[s]; s++)
256 benchmark_join(step[s]);
257} 158}
diff --git a/tools/testing/radix-tree/bitmap.c b/tools/testing/radix-tree/bitmap.c
new file mode 100644
index 000000000000..66ec4a24a203
--- /dev/null
+++ b/tools/testing/radix-tree/bitmap.c
@@ -0,0 +1,23 @@
1/* lib/bitmap.c pulls in at least two other files. */
2
3#include <linux/bitmap.h>
4
5void bitmap_clear(unsigned long *map, unsigned int start, int len)
6{
7 unsigned long *p = map + BIT_WORD(start);
8 const unsigned int size = start + len;
9 int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
10 unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
11
12 while (len - bits_to_clear >= 0) {
13 *p &= ~mask_to_clear;
14 len -= bits_to_clear;
15 bits_to_clear = BITS_PER_LONG;
16 mask_to_clear = ~0UL;
17 p++;
18 }
19 if (len) {
20 mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
21 *p &= ~mask_to_clear;
22 }
23}
diff --git a/tools/testing/radix-tree/generated/autoconf.h b/tools/testing/radix-tree/generated/autoconf.h
index cf88dc5b8832..2218b3cc184e 100644
--- a/tools/testing/radix-tree/generated/autoconf.h
+++ b/tools/testing/radix-tree/generated/autoconf.h
@@ -1 +1 @@
#define CONFIG_RADIX_TREE_MULTIORDER 1 #define CONFIG_XARRAY_MULTI 1
diff --git a/tools/testing/radix-tree/idr-test.c b/tools/testing/radix-tree/idr-test.c
index 321ba92c70d2..1b63bdb7688f 100644
--- a/tools/testing/radix-tree/idr-test.c
+++ b/tools/testing/radix-tree/idr-test.c
@@ -19,7 +19,7 @@
19 19
20#include "test.h" 20#include "test.h"
21 21
22#define DUMMY_PTR ((void *)0x12) 22#define DUMMY_PTR ((void *)0x10)
23 23
24int item_idr_free(int id, void *p, void *data) 24int item_idr_free(int id, void *p, void *data)
25{ 25{
@@ -227,6 +227,66 @@ void idr_u32_test(int base)
227 idr_u32_test1(&idr, 0xffffffff); 227 idr_u32_test1(&idr, 0xffffffff);
228} 228}
229 229
230static void idr_align_test(struct idr *idr)
231{
232 char name[] = "Motorola 68000";
233 int i, id;
234 void *entry;
235
236 for (i = 0; i < 9; i++) {
237 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i);
238 idr_for_each_entry(idr, entry, id);
239 }
240 idr_destroy(idr);
241
242 for (i = 1; i < 10; i++) {
243 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i - 1);
244 idr_for_each_entry(idr, entry, id);
245 }
246 idr_destroy(idr);
247
248 for (i = 2; i < 11; i++) {
249 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i - 2);
250 idr_for_each_entry(idr, entry, id);
251 }
252 idr_destroy(idr);
253
254 for (i = 3; i < 12; i++) {
255 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != i - 3);
256 idr_for_each_entry(idr, entry, id);
257 }
258 idr_destroy(idr);
259
260 for (i = 0; i < 8; i++) {
261 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != 0);
262 BUG_ON(idr_alloc(idr, &name[i + 1], 0, 0, GFP_KERNEL) != 1);
263 idr_for_each_entry(idr, entry, id);
264 idr_remove(idr, 1);
265 idr_for_each_entry(idr, entry, id);
266 idr_remove(idr, 0);
267 BUG_ON(!idr_is_empty(idr));
268 }
269
270 for (i = 0; i < 8; i++) {
271 BUG_ON(idr_alloc(idr, NULL, 0, 0, GFP_KERNEL) != 0);
272 idr_for_each_entry(idr, entry, id);
273 idr_replace(idr, &name[i], 0);
274 idr_for_each_entry(idr, entry, id);
275 BUG_ON(idr_find(idr, 0) != &name[i]);
276 idr_remove(idr, 0);
277 }
278
279 for (i = 0; i < 8; i++) {
280 BUG_ON(idr_alloc(idr, &name[i], 0, 0, GFP_KERNEL) != 0);
281 BUG_ON(idr_alloc(idr, NULL, 0, 0, GFP_KERNEL) != 1);
282 idr_remove(idr, 1);
283 idr_for_each_entry(idr, entry, id);
284 idr_replace(idr, &name[i + 1], 0);
285 idr_for_each_entry(idr, entry, id);
286 idr_remove(idr, 0);
287 }
288}
289
230void idr_checks(void) 290void idr_checks(void)
231{ 291{
232 unsigned long i; 292 unsigned long i;
@@ -307,6 +367,7 @@ void idr_checks(void)
307 idr_u32_test(4); 367 idr_u32_test(4);
308 idr_u32_test(1); 368 idr_u32_test(1);
309 idr_u32_test(0); 369 idr_u32_test(0);
370 idr_align_test(&idr);
310} 371}
311 372
312#define module_init(x) 373#define module_init(x)
@@ -344,16 +405,16 @@ void ida_check_conv_user(void)
344 DEFINE_IDA(ida); 405 DEFINE_IDA(ida);
345 unsigned long i; 406 unsigned long i;
346 407
347 radix_tree_cpu_dead(1);
348 for (i = 0; i < 1000000; i++) { 408 for (i = 0; i < 1000000; i++) {
349 int id = ida_alloc(&ida, GFP_NOWAIT); 409 int id = ida_alloc(&ida, GFP_NOWAIT);
350 if (id == -ENOMEM) { 410 if (id == -ENOMEM) {
351 IDA_BUG_ON(&ida, (i % IDA_BITMAP_BITS) != 411 IDA_BUG_ON(&ida, ((i % IDA_BITMAP_BITS) !=
352 BITS_PER_LONG - 2); 412 BITS_PER_XA_VALUE) &&
413 ((i % IDA_BITMAP_BITS) != 0));
353 id = ida_alloc(&ida, GFP_KERNEL); 414 id = ida_alloc(&ida, GFP_KERNEL);
354 } else { 415 } else {
355 IDA_BUG_ON(&ida, (i % IDA_BITMAP_BITS) == 416 IDA_BUG_ON(&ida, (i % IDA_BITMAP_BITS) ==
356 BITS_PER_LONG - 2); 417 BITS_PER_XA_VALUE);
357 } 418 }
358 IDA_BUG_ON(&ida, id != i); 419 IDA_BUG_ON(&ida, id != i);
359 } 420 }
diff --git a/tools/testing/radix-tree/iteration_check.c b/tools/testing/radix-tree/iteration_check.c
index a92bab513701..238db187aa15 100644
--- a/tools/testing/radix-tree/iteration_check.c
+++ b/tools/testing/radix-tree/iteration_check.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * iteration_check.c: test races having to do with radix tree iteration 2 * iteration_check.c: test races having to do with xarray iteration
3 * Copyright (c) 2016 Intel Corporation 3 * Copyright (c) 2016 Intel Corporation
4 * Author: Ross Zwisler <ross.zwisler@linux.intel.com> 4 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
5 * 5 *
@@ -12,41 +12,54 @@
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details. 13 * more details.
14 */ 14 */
15#include <linux/radix-tree.h>
16#include <pthread.h> 15#include <pthread.h>
17#include "test.h" 16#include "test.h"
18 17
19#define NUM_THREADS 5 18#define NUM_THREADS 5
20#define MAX_IDX 100 19#define MAX_IDX 100
21#define TAG 0 20#define TAG XA_MARK_0
22#define NEW_TAG 1 21#define NEW_TAG XA_MARK_1
23 22
24static pthread_mutex_t tree_lock = PTHREAD_MUTEX_INITIALIZER;
25static pthread_t threads[NUM_THREADS]; 23static pthread_t threads[NUM_THREADS];
26static unsigned int seeds[3]; 24static unsigned int seeds[3];
27static RADIX_TREE(tree, GFP_KERNEL); 25static DEFINE_XARRAY(array);
28static bool test_complete; 26static bool test_complete;
29static int max_order; 27static int max_order;
30 28
31/* relentlessly fill the tree with tagged entries */ 29void my_item_insert(struct xarray *xa, unsigned long index)
30{
31 XA_STATE(xas, xa, index);
32 struct item *item = item_create(index, 0);
33 int order;
34
35retry:
36 xas_lock(&xas);
37 for (order = max_order; order >= 0; order--) {
38 xas_set_order(&xas, index, order);
39 item->order = order;
40 if (xas_find_conflict(&xas))
41 continue;
42 xas_store(&xas, item);
43 xas_set_mark(&xas, TAG);
44 break;
45 }
46 xas_unlock(&xas);
47 if (xas_nomem(&xas, GFP_KERNEL))
48 goto retry;
49 if (order < 0)
50 free(item);
51}
52
53/* relentlessly fill the array with tagged entries */
32static void *add_entries_fn(void *arg) 54static void *add_entries_fn(void *arg)
33{ 55{
34 rcu_register_thread(); 56 rcu_register_thread();
35 57
36 while (!test_complete) { 58 while (!test_complete) {
37 unsigned long pgoff; 59 unsigned long pgoff;
38 int order;
39 60
40 for (pgoff = 0; pgoff < MAX_IDX; pgoff++) { 61 for (pgoff = 0; pgoff < MAX_IDX; pgoff++) {
41 pthread_mutex_lock(&tree_lock); 62 my_item_insert(&array, pgoff);
42 for (order = max_order; order >= 0; order--) {
43 if (item_insert_order(&tree, pgoff, order)
44 == 0) {
45 item_tag_set(&tree, pgoff, TAG);
46 break;
47 }
48 }
49 pthread_mutex_unlock(&tree_lock);
50 } 63 }
51 } 64 }
52 65
@@ -56,33 +69,25 @@ static void *add_entries_fn(void *arg)
56} 69}
57 70
58/* 71/*
59 * Iterate over the tagged entries, doing a radix_tree_iter_retry() as we find 72 * Iterate over tagged entries, retrying when we find ourselves in a deleted
60 * things that have been removed and randomly resetting our iteration to the 73 * node and randomly pausing the iteration.
61 * next chunk with radix_tree_iter_resume(). Both radix_tree_iter_retry() and
62 * radix_tree_iter_resume() cause radix_tree_next_slot() to be called with a
63 * NULL 'slot' variable.
64 */ 74 */
65static void *tagged_iteration_fn(void *arg) 75static void *tagged_iteration_fn(void *arg)
66{ 76{
67 struct radix_tree_iter iter; 77 XA_STATE(xas, &array, 0);
68 void **slot; 78 void *entry;
69 79
70 rcu_register_thread(); 80 rcu_register_thread();
71 81
72 while (!test_complete) { 82 while (!test_complete) {
83 xas_set(&xas, 0);
73 rcu_read_lock(); 84 rcu_read_lock();
74 radix_tree_for_each_tagged(slot, &tree, &iter, 0, TAG) { 85 xas_for_each_marked(&xas, entry, ULONG_MAX, TAG) {
75 void *entry = radix_tree_deref_slot(slot); 86 if (xas_retry(&xas, entry))
76 if (unlikely(!entry))
77 continue; 87 continue;
78 88
79 if (radix_tree_deref_retry(entry)) {
80 slot = radix_tree_iter_retry(&iter);
81 continue;
82 }
83
84 if (rand_r(&seeds[0]) % 50 == 0) { 89 if (rand_r(&seeds[0]) % 50 == 0) {
85 slot = radix_tree_iter_resume(slot, &iter); 90 xas_pause(&xas);
86 rcu_read_unlock(); 91 rcu_read_unlock();
87 rcu_barrier(); 92 rcu_barrier();
88 rcu_read_lock(); 93 rcu_read_lock();
@@ -97,33 +102,25 @@ static void *tagged_iteration_fn(void *arg)
97} 102}
98 103
99/* 104/*
100 * Iterate over the entries, doing a radix_tree_iter_retry() as we find things 105 * Iterate over the entries, retrying when we find ourselves in a deleted
101 * that have been removed and randomly resetting our iteration to the next 106 * node and randomly pausing the iteration.
102 * chunk with radix_tree_iter_resume(). Both radix_tree_iter_retry() and
103 * radix_tree_iter_resume() cause radix_tree_next_slot() to be called with a
104 * NULL 'slot' variable.
105 */ 107 */
106static void *untagged_iteration_fn(void *arg) 108static void *untagged_iteration_fn(void *arg)
107{ 109{
108 struct radix_tree_iter iter; 110 XA_STATE(xas, &array, 0);
109 void **slot; 111 void *entry;
110 112
111 rcu_register_thread(); 113 rcu_register_thread();
112 114
113 while (!test_complete) { 115 while (!test_complete) {
116 xas_set(&xas, 0);
114 rcu_read_lock(); 117 rcu_read_lock();
115 radix_tree_for_each_slot(slot, &tree, &iter, 0) { 118 xas_for_each(&xas, entry, ULONG_MAX) {
116 void *entry = radix_tree_deref_slot(slot); 119 if (xas_retry(&xas, entry))
117 if (unlikely(!entry))
118 continue; 120 continue;
119 121
120 if (radix_tree_deref_retry(entry)) {
121 slot = radix_tree_iter_retry(&iter);
122 continue;
123 }
124
125 if (rand_r(&seeds[1]) % 50 == 0) { 122 if (rand_r(&seeds[1]) % 50 == 0) {
126 slot = radix_tree_iter_resume(slot, &iter); 123 xas_pause(&xas);
127 rcu_read_unlock(); 124 rcu_read_unlock();
128 rcu_barrier(); 125 rcu_barrier();
129 rcu_read_lock(); 126 rcu_read_lock();
@@ -138,7 +135,7 @@ static void *untagged_iteration_fn(void *arg)
138} 135}
139 136
140/* 137/*
141 * Randomly remove entries to help induce radix_tree_iter_retry() calls in the 138 * Randomly remove entries to help induce retries in the
142 * two iteration functions. 139 * two iteration functions.
143 */ 140 */
144static void *remove_entries_fn(void *arg) 141static void *remove_entries_fn(void *arg)
@@ -147,12 +144,13 @@ static void *remove_entries_fn(void *arg)
147 144
148 while (!test_complete) { 145 while (!test_complete) {
149 int pgoff; 146 int pgoff;
147 struct item *item;
150 148
151 pgoff = rand_r(&seeds[2]) % MAX_IDX; 149 pgoff = rand_r(&seeds[2]) % MAX_IDX;
152 150
153 pthread_mutex_lock(&tree_lock); 151 item = xa_erase(&array, pgoff);
154 item_delete(&tree, pgoff); 152 if (item)
155 pthread_mutex_unlock(&tree_lock); 153 item_free(item, pgoff);
156 } 154 }
157 155
158 rcu_unregister_thread(); 156 rcu_unregister_thread();
@@ -165,8 +163,7 @@ static void *tag_entries_fn(void *arg)
165 rcu_register_thread(); 163 rcu_register_thread();
166 164
167 while (!test_complete) { 165 while (!test_complete) {
168 tag_tagged_items(&tree, &tree_lock, 0, MAX_IDX, 10, TAG, 166 tag_tagged_items(&array, 0, MAX_IDX, 10, TAG, NEW_TAG);
169 NEW_TAG);
170 } 167 }
171 rcu_unregister_thread(); 168 rcu_unregister_thread();
172 return NULL; 169 return NULL;
@@ -217,5 +214,5 @@ void iteration_test(unsigned order, unsigned test_duration)
217 } 214 }
218 } 215 }
219 216
220 item_kill_tree(&tree); 217 item_kill_tree(&array);
221} 218}
diff --git a/tools/testing/radix-tree/linux/bug.h b/tools/testing/radix-tree/linux/bug.h
index 23b8ed52f8c8..03dc8a57eb99 100644
--- a/tools/testing/radix-tree/linux/bug.h
+++ b/tools/testing/radix-tree/linux/bug.h
@@ -1 +1,2 @@
1#include <stdio.h>
1#include "asm/bug.h" 2#include "asm/bug.h"
diff --git a/tools/testing/radix-tree/linux/kconfig.h b/tools/testing/radix-tree/linux/kconfig.h
new file mode 100644
index 000000000000..6c8675859913
--- /dev/null
+++ b/tools/testing/radix-tree/linux/kconfig.h
@@ -0,0 +1 @@
#include "../../../../include/linux/kconfig.h"
diff --git a/tools/testing/radix-tree/linux/kernel.h b/tools/testing/radix-tree/linux/kernel.h
index 426f32f28547..4568248222ae 100644
--- a/tools/testing/radix-tree/linux/kernel.h
+++ b/tools/testing/radix-tree/linux/kernel.h
@@ -14,7 +14,12 @@
14#include "../../../include/linux/kconfig.h" 14#include "../../../include/linux/kconfig.h"
15 15
16#define printk printf 16#define printk printf
17#define pr_info printk
17#define pr_debug printk 18#define pr_debug printk
18#define pr_cont printk 19#define pr_cont printk
19 20
21#define __acquires(x)
22#define __releases(x)
23#define __must_hold(x)
24
20#endif /* _KERNEL_H */ 25#endif /* _KERNEL_H */
diff --git a/tools/testing/radix-tree/linux/lockdep.h b/tools/testing/radix-tree/linux/lockdep.h
new file mode 100644
index 000000000000..565fccdfe6e9
--- /dev/null
+++ b/tools/testing/radix-tree/linux/lockdep.h
@@ -0,0 +1,11 @@
1#ifndef _LINUX_LOCKDEP_H
2#define _LINUX_LOCKDEP_H
3struct lock_class_key {
4 unsigned int a;
5};
6
7static inline void lockdep_set_class(spinlock_t *lock,
8 struct lock_class_key *key)
9{
10}
11#endif /* _LINUX_LOCKDEP_H */
diff --git a/tools/testing/radix-tree/linux/radix-tree.h b/tools/testing/radix-tree/linux/radix-tree.h
index 24f13d27a8da..d1635a5bef02 100644
--- a/tools/testing/radix-tree/linux/radix-tree.h
+++ b/tools/testing/radix-tree/linux/radix-tree.h
@@ -2,7 +2,6 @@
2#ifndef _TEST_RADIX_TREE_H 2#ifndef _TEST_RADIX_TREE_H
3#define _TEST_RADIX_TREE_H 3#define _TEST_RADIX_TREE_H
4 4
5#include "generated/map-shift.h"
6#include "../../../../include/linux/radix-tree.h" 5#include "../../../../include/linux/radix-tree.h"
7 6
8extern int kmalloc_verbose; 7extern int kmalloc_verbose;
diff --git a/tools/testing/radix-tree/linux/rcupdate.h b/tools/testing/radix-tree/linux/rcupdate.h
index 73ed33658203..fd280b070fdb 100644
--- a/tools/testing/radix-tree/linux/rcupdate.h
+++ b/tools/testing/radix-tree/linux/rcupdate.h
@@ -6,5 +6,7 @@
6 6
7#define rcu_dereference_raw(p) rcu_dereference(p) 7#define rcu_dereference_raw(p) rcu_dereference(p)
8#define rcu_dereference_protected(p, cond) rcu_dereference(p) 8#define rcu_dereference_protected(p, cond) rcu_dereference(p)
9#define rcu_dereference_check(p, cond) rcu_dereference(p)
10#define RCU_INIT_POINTER(p, v) (p) = (v)
9 11
10#endif 12#endif
diff --git a/tools/testing/radix-tree/main.c b/tools/testing/radix-tree/main.c
index b741686e53d6..77a44c54998f 100644
--- a/tools/testing/radix-tree/main.c
+++ b/tools/testing/radix-tree/main.c
@@ -214,7 +214,7 @@ void copy_tag_check(void)
214 } 214 }
215 215
216// printf("\ncopying tags...\n"); 216// printf("\ncopying tags...\n");
217 tagged = tag_tagged_items(&tree, NULL, start, end, ITEMS, 0, 1); 217 tagged = tag_tagged_items(&tree, start, end, ITEMS, XA_MARK_0, XA_MARK_1);
218 218
219// printf("checking copied tags\n"); 219// printf("checking copied tags\n");
220 assert(tagged == count); 220 assert(tagged == count);
@@ -223,7 +223,7 @@ void copy_tag_check(void)
223 /* Copy tags in several rounds */ 223 /* Copy tags in several rounds */
224// printf("\ncopying tags...\n"); 224// printf("\ncopying tags...\n");
225 tmp = rand() % (count / 10 + 2); 225 tmp = rand() % (count / 10 + 2);
226 tagged = tag_tagged_items(&tree, NULL, start, end, tmp, 0, 2); 226 tagged = tag_tagged_items(&tree, start, end, tmp, XA_MARK_0, XA_MARK_2);
227 assert(tagged == count); 227 assert(tagged == count);
228 228
229// printf("%lu %lu %lu\n", tagged, tmp, count); 229// printf("%lu %lu %lu\n", tagged, tmp, count);
@@ -236,63 +236,6 @@ void copy_tag_check(void)
236 item_kill_tree(&tree); 236 item_kill_tree(&tree);
237} 237}
238 238
239static void __locate_check(struct radix_tree_root *tree, unsigned long index,
240 unsigned order)
241{
242 struct item *item;
243 unsigned long index2;
244
245 item_insert_order(tree, index, order);
246 item = item_lookup(tree, index);
247 index2 = find_item(tree, item);
248 if (index != index2) {
249 printv(2, "index %ld order %d inserted; found %ld\n",
250 index, order, index2);
251 abort();
252 }
253}
254
255static void __order_0_locate_check(void)
256{
257 RADIX_TREE(tree, GFP_KERNEL);
258 int i;
259
260 for (i = 0; i < 50; i++)
261 __locate_check(&tree, rand() % INT_MAX, 0);
262
263 item_kill_tree(&tree);
264}
265
266static void locate_check(void)
267{
268 RADIX_TREE(tree, GFP_KERNEL);
269 unsigned order;
270 unsigned long offset, index;
271
272 __order_0_locate_check();
273
274 for (order = 0; order < 20; order++) {
275 for (offset = 0; offset < (1 << (order + 3));
276 offset += (1UL << order)) {
277 for (index = 0; index < (1UL << (order + 5));
278 index += (1UL << order)) {
279 __locate_check(&tree, index + offset, order);
280 }
281 if (find_item(&tree, &tree) != -1)
282 abort();
283
284 item_kill_tree(&tree);
285 }
286 }
287
288 if (find_item(&tree, &tree) != -1)
289 abort();
290 __locate_check(&tree, -1, 0);
291 if (find_item(&tree, &tree) != -1)
292 abort();
293 item_kill_tree(&tree);
294}
295
296static void single_thread_tests(bool long_run) 239static void single_thread_tests(bool long_run)
297{ 240{
298 int i; 241 int i;
@@ -303,10 +246,6 @@ static void single_thread_tests(bool long_run)
303 rcu_barrier(); 246 rcu_barrier();
304 printv(2, "after multiorder_check: %d allocated, preempt %d\n", 247 printv(2, "after multiorder_check: %d allocated, preempt %d\n",
305 nr_allocated, preempt_count); 248 nr_allocated, preempt_count);
306 locate_check();
307 rcu_barrier();
308 printv(2, "after locate_check: %d allocated, preempt %d\n",
309 nr_allocated, preempt_count);
310 tag_check(); 249 tag_check();
311 rcu_barrier(); 250 rcu_barrier();
312 printv(2, "after tag_check: %d allocated, preempt %d\n", 251 printv(2, "after tag_check: %d allocated, preempt %d\n",
@@ -365,6 +304,7 @@ int main(int argc, char **argv)
365 rcu_register_thread(); 304 rcu_register_thread();
366 radix_tree_init(); 305 radix_tree_init();
367 306
307 xarray_tests();
368 regression1_test(); 308 regression1_test();
369 regression2_test(); 309 regression2_test();
370 regression3_test(); 310 regression3_test();
diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c
index 7bf405638b0b..ff27a74d9762 100644
--- a/tools/testing/radix-tree/multiorder.c
+++ b/tools/testing/radix-tree/multiorder.c
@@ -20,230 +20,39 @@
20 20
21#include "test.h" 21#include "test.h"
22 22
23#define for_each_index(i, base, order) \ 23static int item_insert_order(struct xarray *xa, unsigned long index,
24 for (i = base; i < base + (1 << order); i++) 24 unsigned order)
25
26static void __multiorder_tag_test(int index, int order)
27{
28 RADIX_TREE(tree, GFP_KERNEL);
29 int base, err, i;
30
31 /* our canonical entry */
32 base = index & ~((1 << order) - 1);
33
34 printv(2, "Multiorder tag test with index %d, canonical entry %d\n",
35 index, base);
36
37 err = item_insert_order(&tree, index, order);
38 assert(!err);
39
40 /*
41 * Verify we get collisions for covered indices. We try and fail to
42 * insert an exceptional entry so we don't leak memory via
43 * item_insert_order().
44 */
45 for_each_index(i, base, order) {
46 err = __radix_tree_insert(&tree, i, order,
47 (void *)(0xA0 | RADIX_TREE_EXCEPTIONAL_ENTRY));
48 assert(err == -EEXIST);
49 }
50
51 for_each_index(i, base, order) {
52 assert(!radix_tree_tag_get(&tree, i, 0));
53 assert(!radix_tree_tag_get(&tree, i, 1));
54 }
55
56 assert(radix_tree_tag_set(&tree, index, 0));
57
58 for_each_index(i, base, order) {
59 assert(radix_tree_tag_get(&tree, i, 0));
60 assert(!radix_tree_tag_get(&tree, i, 1));
61 }
62
63 assert(tag_tagged_items(&tree, NULL, 0, ~0UL, 10, 0, 1) == 1);
64 assert(radix_tree_tag_clear(&tree, index, 0));
65
66 for_each_index(i, base, order) {
67 assert(!radix_tree_tag_get(&tree, i, 0));
68 assert(radix_tree_tag_get(&tree, i, 1));
69 }
70
71 assert(radix_tree_tag_clear(&tree, index, 1));
72
73 assert(!radix_tree_tagged(&tree, 0));
74 assert(!radix_tree_tagged(&tree, 1));
75
76 item_kill_tree(&tree);
77}
78
79static void __multiorder_tag_test2(unsigned order, unsigned long index2)
80{ 25{
81 RADIX_TREE(tree, GFP_KERNEL); 26 XA_STATE_ORDER(xas, xa, index, order);
82 unsigned long index = (1 << order); 27 struct item *item = item_create(index, order);
83 index2 += index;
84
85 assert(item_insert_order(&tree, 0, order) == 0);
86 assert(item_insert(&tree, index2) == 0);
87
88 assert(radix_tree_tag_set(&tree, 0, 0));
89 assert(radix_tree_tag_set(&tree, index2, 0));
90
91 assert(tag_tagged_items(&tree, NULL, 0, ~0UL, 10, 0, 1) == 2);
92
93 item_kill_tree(&tree);
94}
95
96static void multiorder_tag_tests(void)
97{
98 int i, j;
99
100 /* test multi-order entry for indices 0-7 with no sibling pointers */
101 __multiorder_tag_test(0, 3);
102 __multiorder_tag_test(5, 3);
103
104 /* test multi-order entry for indices 8-15 with no sibling pointers */
105 __multiorder_tag_test(8, 3);
106 __multiorder_tag_test(15, 3);
107
108 /*
109 * Our order 5 entry covers indices 0-31 in a tree with height=2.
110 * This is broken up as follows:
111 * 0-7: canonical entry
112 * 8-15: sibling 1
113 * 16-23: sibling 2
114 * 24-31: sibling 3
115 */
116 __multiorder_tag_test(0, 5);
117 __multiorder_tag_test(29, 5);
118
119 /* same test, but with indices 32-63 */
120 __multiorder_tag_test(32, 5);
121 __multiorder_tag_test(44, 5);
122
123 /*
124 * Our order 8 entry covers indices 0-255 in a tree with height=3.
125 * This is broken up as follows:
126 * 0-63: canonical entry
127 * 64-127: sibling 1
128 * 128-191: sibling 2
129 * 192-255: sibling 3
130 */
131 __multiorder_tag_test(0, 8);
132 __multiorder_tag_test(190, 8);
133
134 /* same test, but with indices 256-511 */
135 __multiorder_tag_test(256, 8);
136 __multiorder_tag_test(300, 8);
137
138 __multiorder_tag_test(0x12345678UL, 8);
139
140 for (i = 1; i < 10; i++)
141 for (j = 0; j < (10 << i); j++)
142 __multiorder_tag_test2(i, j);
143}
144
145static void multiorder_check(unsigned long index, int order)
146{
147 unsigned long i;
148 unsigned long min = index & ~((1UL << order) - 1);
149 unsigned long max = min + (1UL << order);
150 void **slot;
151 struct item *item2 = item_create(min, order);
152 RADIX_TREE(tree, GFP_KERNEL);
153
154 printv(2, "Multiorder index %ld, order %d\n", index, order);
155
156 assert(item_insert_order(&tree, index, order) == 0);
157
158 for (i = min; i < max; i++) {
159 struct item *item = item_lookup(&tree, i);
160 assert(item != 0);
161 assert(item->index == index);
162 }
163 for (i = 0; i < min; i++)
164 item_check_absent(&tree, i);
165 for (i = max; i < 2*max; i++)
166 item_check_absent(&tree, i);
167 for (i = min; i < max; i++)
168 assert(radix_tree_insert(&tree, i, item2) == -EEXIST);
169
170 slot = radix_tree_lookup_slot(&tree, index);
171 free(*slot);
172 radix_tree_replace_slot(&tree, slot, item2);
173 for (i = min; i < max; i++) {
174 struct item *item = item_lookup(&tree, i);
175 assert(item != 0);
176 assert(item->index == min);
177 }
178
179 assert(item_delete(&tree, min) != 0);
180
181 for (i = 0; i < 2*max; i++)
182 item_check_absent(&tree, i);
183}
184
185static void multiorder_shrink(unsigned long index, int order)
186{
187 unsigned long i;
188 unsigned long max = 1 << order;
189 RADIX_TREE(tree, GFP_KERNEL);
190 struct radix_tree_node *node;
191
192 printv(2, "Multiorder shrink index %ld, order %d\n", index, order);
193 28
194 assert(item_insert_order(&tree, 0, order) == 0); 29 do {
195 30 xas_lock(&xas);
196 node = tree.rnode; 31 xas_store(&xas, item);
197 32 xas_unlock(&xas);
198 assert(item_insert(&tree, index) == 0); 33 } while (xas_nomem(&xas, GFP_KERNEL));
199 assert(node != tree.rnode);
200
201 assert(item_delete(&tree, index) != 0);
202 assert(node == tree.rnode);
203
204 for (i = 0; i < max; i++) {
205 struct item *item = item_lookup(&tree, i);
206 assert(item != 0);
207 assert(item->index == 0);
208 }
209 for (i = max; i < 2*max; i++)
210 item_check_absent(&tree, i);
211
212 if (!item_delete(&tree, 0)) {
213 printv(2, "failed to delete index %ld (order %d)\n", index, order);
214 abort();
215 }
216
217 for (i = 0; i < 2*max; i++)
218 item_check_absent(&tree, i);
219}
220
221static void multiorder_insert_bug(void)
222{
223 RADIX_TREE(tree, GFP_KERNEL);
224 34
225 item_insert(&tree, 0); 35 if (!xas_error(&xas))
226 radix_tree_tag_set(&tree, 0, 0); 36 return 0;
227 item_insert_order(&tree, 3 << 6, 6);
228 37
229 item_kill_tree(&tree); 38 free(item);
39 return xas_error(&xas);
230} 40}
231 41
232void multiorder_iteration(void) 42void multiorder_iteration(struct xarray *xa)
233{ 43{
234 RADIX_TREE(tree, GFP_KERNEL); 44 XA_STATE(xas, xa, 0);
235 struct radix_tree_iter iter; 45 struct item *item;
236 void **slot;
237 int i, j, err; 46 int i, j, err;
238 47
239 printv(1, "Multiorder iteration test\n");
240
241#define NUM_ENTRIES 11 48#define NUM_ENTRIES 11
242 int index[NUM_ENTRIES] = {0, 2, 4, 8, 16, 32, 34, 36, 64, 72, 128}; 49 int index[NUM_ENTRIES] = {0, 2, 4, 8, 16, 32, 34, 36, 64, 72, 128};
243 int order[NUM_ENTRIES] = {1, 1, 2, 3, 4, 1, 0, 1, 3, 0, 7}; 50 int order[NUM_ENTRIES] = {1, 1, 2, 3, 4, 1, 0, 1, 3, 0, 7};
244 51
52 printv(1, "Multiorder iteration test\n");
53
245 for (i = 0; i < NUM_ENTRIES; i++) { 54 for (i = 0; i < NUM_ENTRIES; i++) {
246 err = item_insert_order(&tree, index[i], order[i]); 55 err = item_insert_order(xa, index[i], order[i]);
247 assert(!err); 56 assert(!err);
248 } 57 }
249 58
@@ -252,14 +61,14 @@ void multiorder_iteration(void)
252 if (j <= (index[i] | ((1 << order[i]) - 1))) 61 if (j <= (index[i] | ((1 << order[i]) - 1)))
253 break; 62 break;
254 63
255 radix_tree_for_each_slot(slot, &tree, &iter, j) { 64 xas_set(&xas, j);
256 int height = order[i] / RADIX_TREE_MAP_SHIFT; 65 xas_for_each(&xas, item, ULONG_MAX) {
257 int shift = height * RADIX_TREE_MAP_SHIFT; 66 int height = order[i] / XA_CHUNK_SHIFT;
67 int shift = height * XA_CHUNK_SHIFT;
258 unsigned long mask = (1UL << order[i]) - 1; 68 unsigned long mask = (1UL << order[i]) - 1;
259 struct item *item = *slot;
260 69
261 assert((iter.index | mask) == (index[i] | mask)); 70 assert((xas.xa_index | mask) == (index[i] | mask));
262 assert(iter.shift == shift); 71 assert(xas.xa_node->shift == shift);
263 assert(!radix_tree_is_internal_node(item)); 72 assert(!radix_tree_is_internal_node(item));
264 assert((item->index | mask) == (index[i] | mask)); 73 assert((item->index | mask) == (index[i] | mask));
265 assert(item->order == order[i]); 74 assert(item->order == order[i]);
@@ -267,18 +76,15 @@ void multiorder_iteration(void)
267 } 76 }
268 } 77 }
269 78
270 item_kill_tree(&tree); 79 item_kill_tree(xa);
271} 80}
272 81
273void multiorder_tagged_iteration(void) 82void multiorder_tagged_iteration(struct xarray *xa)
274{ 83{
275 RADIX_TREE(tree, GFP_KERNEL); 84 XA_STATE(xas, xa, 0);
276 struct radix_tree_iter iter; 85 struct item *item;
277 void **slot;
278 int i, j; 86 int i, j;
279 87
280 printv(1, "Multiorder tagged iteration test\n");
281
282#define MT_NUM_ENTRIES 9 88#define MT_NUM_ENTRIES 9
283 int index[MT_NUM_ENTRIES] = {0, 2, 4, 16, 32, 40, 64, 72, 128}; 89 int index[MT_NUM_ENTRIES] = {0, 2, 4, 16, 32, 40, 64, 72, 128};
284 int order[MT_NUM_ENTRIES] = {1, 0, 2, 4, 3, 1, 3, 0, 7}; 90 int order[MT_NUM_ENTRIES] = {1, 0, 2, 4, 3, 1, 3, 0, 7};
@@ -286,13 +92,15 @@ void multiorder_tagged_iteration(void)
286#define TAG_ENTRIES 7 92#define TAG_ENTRIES 7
287 int tag_index[TAG_ENTRIES] = {0, 4, 16, 40, 64, 72, 128}; 93 int tag_index[TAG_ENTRIES] = {0, 4, 16, 40, 64, 72, 128};
288 94
95 printv(1, "Multiorder tagged iteration test\n");
96
289 for (i = 0; i < MT_NUM_ENTRIES; i++) 97 for (i = 0; i < MT_NUM_ENTRIES; i++)
290 assert(!item_insert_order(&tree, index[i], order[i])); 98 assert(!item_insert_order(xa, index[i], order[i]));
291 99
292 assert(!radix_tree_tagged(&tree, 1)); 100 assert(!xa_marked(xa, XA_MARK_1));
293 101
294 for (i = 0; i < TAG_ENTRIES; i++) 102 for (i = 0; i < TAG_ENTRIES; i++)
295 assert(radix_tree_tag_set(&tree, tag_index[i], 1)); 103 xa_set_mark(xa, tag_index[i], XA_MARK_1);
296 104
297 for (j = 0; j < 256; j++) { 105 for (j = 0; j < 256; j++) {
298 int k; 106 int k;
@@ -304,23 +112,23 @@ void multiorder_tagged_iteration(void)
304 break; 112 break;
305 } 113 }
306 114
307 radix_tree_for_each_tagged(slot, &tree, &iter, j, 1) { 115 xas_set(&xas, j);
116 xas_for_each_marked(&xas, item, ULONG_MAX, XA_MARK_1) {
308 unsigned long mask; 117 unsigned long mask;
309 struct item *item = *slot;
310 for (k = i; index[k] < tag_index[i]; k++) 118 for (k = i; index[k] < tag_index[i]; k++)
311 ; 119 ;
312 mask = (1UL << order[k]) - 1; 120 mask = (1UL << order[k]) - 1;
313 121
314 assert((iter.index | mask) == (tag_index[i] | mask)); 122 assert((xas.xa_index | mask) == (tag_index[i] | mask));
315 assert(!radix_tree_is_internal_node(item)); 123 assert(!xa_is_internal(item));
316 assert((item->index | mask) == (tag_index[i] | mask)); 124 assert((item->index | mask) == (tag_index[i] | mask));
317 assert(item->order == order[k]); 125 assert(item->order == order[k]);
318 i++; 126 i++;
319 } 127 }
320 } 128 }
321 129
322 assert(tag_tagged_items(&tree, NULL, 0, ~0UL, TAG_ENTRIES, 1, 2) == 130 assert(tag_tagged_items(xa, 0, ULONG_MAX, TAG_ENTRIES, XA_MARK_1,
323 TAG_ENTRIES); 131 XA_MARK_2) == TAG_ENTRIES);
324 132
325 for (j = 0; j < 256; j++) { 133 for (j = 0; j < 256; j++) {
326 int mask, k; 134 int mask, k;
@@ -332,297 +140,31 @@ void multiorder_tagged_iteration(void)
332 break; 140 break;
333 } 141 }
334 142
335 radix_tree_for_each_tagged(slot, &tree, &iter, j, 2) { 143 xas_set(&xas, j);
336 struct item *item = *slot; 144 xas_for_each_marked(&xas, item, ULONG_MAX, XA_MARK_2) {
337 for (k = i; index[k] < tag_index[i]; k++) 145 for (k = i; index[k] < tag_index[i]; k++)
338 ; 146 ;
339 mask = (1 << order[k]) - 1; 147 mask = (1 << order[k]) - 1;
340 148
341 assert((iter.index | mask) == (tag_index[i] | mask)); 149 assert((xas.xa_index | mask) == (tag_index[i] | mask));
342 assert(!radix_tree_is_internal_node(item)); 150 assert(!xa_is_internal(item));
343 assert((item->index | mask) == (tag_index[i] | mask)); 151 assert((item->index | mask) == (tag_index[i] | mask));
344 assert(item->order == order[k]); 152 assert(item->order == order[k]);
345 i++; 153 i++;
346 } 154 }
347 } 155 }
348 156
349 assert(tag_tagged_items(&tree, NULL, 1, ~0UL, MT_NUM_ENTRIES * 2, 1, 0) 157 assert(tag_tagged_items(xa, 1, ULONG_MAX, MT_NUM_ENTRIES * 2, XA_MARK_1,
350 == TAG_ENTRIES); 158 XA_MARK_0) == TAG_ENTRIES);
351 i = 0; 159 i = 0;
352 radix_tree_for_each_tagged(slot, &tree, &iter, 0, 0) { 160 xas_set(&xas, 0);
353 assert(iter.index == tag_index[i]); 161 xas_for_each_marked(&xas, item, ULONG_MAX, XA_MARK_0) {
162 assert(xas.xa_index == tag_index[i]);
354 i++; 163 i++;
355 } 164 }
165 assert(i == TAG_ENTRIES);
356 166
357 item_kill_tree(&tree); 167 item_kill_tree(xa);
358}
359
360/*
361 * Basic join checks: make sure we can't find an entry in the tree after
362 * a larger entry has replaced it
363 */
364static void multiorder_join1(unsigned long index,
365 unsigned order1, unsigned order2)
366{
367 unsigned long loc;
368 void *item, *item2 = item_create(index + 1, order1);
369 RADIX_TREE(tree, GFP_KERNEL);
370
371 item_insert_order(&tree, index, order2);
372 item = radix_tree_lookup(&tree, index);
373 radix_tree_join(&tree, index + 1, order1, item2);
374 loc = find_item(&tree, item);
375 if (loc == -1)
376 free(item);
377 item = radix_tree_lookup(&tree, index + 1);
378 assert(item == item2);
379 item_kill_tree(&tree);
380}
381
382/*
383 * Check that the accounting of exceptional entries is handled correctly
384 * by joining an exceptional entry to a normal pointer.
385 */
386static void multiorder_join2(unsigned order1, unsigned order2)
387{
388 RADIX_TREE(tree, GFP_KERNEL);
389 struct radix_tree_node *node;
390 void *item1 = item_create(0, order1);
391 void *item2;
392
393 item_insert_order(&tree, 0, order2);
394 radix_tree_insert(&tree, 1 << order2, (void *)0x12UL);
395 item2 = __radix_tree_lookup(&tree, 1 << order2, &node, NULL);
396 assert(item2 == (void *)0x12UL);
397 assert(node->exceptional == 1);
398
399 item2 = radix_tree_lookup(&tree, 0);
400 free(item2);
401
402 radix_tree_join(&tree, 0, order1, item1);
403 item2 = __radix_tree_lookup(&tree, 1 << order2, &node, NULL);
404 assert(item2 == item1);
405 assert(node->exceptional == 0);
406 item_kill_tree(&tree);
407}
408
409/*
410 * This test revealed an accounting bug for exceptional entries at one point.
411 * Nodes were being freed back into the pool with an elevated exception count
412 * by radix_tree_join() and then radix_tree_split() was failing to zero the
413 * count of exceptional entries.
414 */
415static void multiorder_join3(unsigned int order)
416{
417 RADIX_TREE(tree, GFP_KERNEL);
418 struct radix_tree_node *node;
419 void **slot;
420 struct radix_tree_iter iter;
421 unsigned long i;
422
423 for (i = 0; i < (1 << order); i++) {
424 radix_tree_insert(&tree, i, (void *)0x12UL);
425 }
426
427 radix_tree_join(&tree, 0, order, (void *)0x16UL);
428 rcu_barrier();
429
430 radix_tree_split(&tree, 0, 0);
431
432 radix_tree_for_each_slot(slot, &tree, &iter, 0) {
433 radix_tree_iter_replace(&tree, &iter, slot, (void *)0x12UL);
434 }
435
436 __radix_tree_lookup(&tree, 0, &node, NULL);
437 assert(node->exceptional == node->count);
438
439 item_kill_tree(&tree);
440}
441
442static void multiorder_join(void)
443{
444 int i, j, idx;
445
446 for (idx = 0; idx < 1024; idx = idx * 2 + 3) {
447 for (i = 1; i < 15; i++) {
448 for (j = 0; j < i; j++) {
449 multiorder_join1(idx, i, j);
450 }
451 }
452 }
453
454 for (i = 1; i < 15; i++) {
455 for (j = 0; j < i; j++) {
456 multiorder_join2(i, j);
457 }
458 }
459
460 for (i = 3; i < 10; i++) {
461 multiorder_join3(i);
462 }
463}
464
465static void check_mem(unsigned old_order, unsigned new_order, unsigned alloc)
466{
467 struct radix_tree_preload *rtp = &radix_tree_preloads;
468 if (rtp->nr != 0)
469 printv(2, "split(%u %u) remaining %u\n", old_order, new_order,
470 rtp->nr);
471 /*
472 * Can't check for equality here as some nodes may have been
473 * RCU-freed while we ran. But we should never finish with more
474 * nodes allocated since they should have all been preloaded.
475 */
476 if (nr_allocated > alloc)
477 printv(2, "split(%u %u) allocated %u %u\n", old_order, new_order,
478 alloc, nr_allocated);
479}
480
481static void __multiorder_split(int old_order, int new_order)
482{
483 RADIX_TREE(tree, GFP_ATOMIC);
484 void **slot;
485 struct radix_tree_iter iter;
486 unsigned alloc;
487 struct item *item;
488
489 radix_tree_preload(GFP_KERNEL);
490 assert(item_insert_order(&tree, 0, old_order) == 0);
491 radix_tree_preload_end();
492
493 /* Wipe out the preloaded cache or it'll confuse check_mem() */
494 radix_tree_cpu_dead(0);
495
496 item = radix_tree_tag_set(&tree, 0, 2);
497
498 radix_tree_split_preload(old_order, new_order, GFP_KERNEL);
499 alloc = nr_allocated;
500 radix_tree_split(&tree, 0, new_order);
501 check_mem(old_order, new_order, alloc);
502 radix_tree_for_each_slot(slot, &tree, &iter, 0) {
503 radix_tree_iter_replace(&tree, &iter, slot,
504 item_create(iter.index, new_order));
505 }
506 radix_tree_preload_end();
507
508 item_kill_tree(&tree);
509 free(item);
510}
511
512static void __multiorder_split2(int old_order, int new_order)
513{
514 RADIX_TREE(tree, GFP_KERNEL);
515 void **slot;
516 struct radix_tree_iter iter;
517 struct radix_tree_node *node;
518 void *item;
519
520 __radix_tree_insert(&tree, 0, old_order, (void *)0x12);
521
522 item = __radix_tree_lookup(&tree, 0, &node, NULL);
523 assert(item == (void *)0x12);
524 assert(node->exceptional > 0);
525
526 radix_tree_split(&tree, 0, new_order);
527 radix_tree_for_each_slot(slot, &tree, &iter, 0) {
528 radix_tree_iter_replace(&tree, &iter, slot,
529 item_create(iter.index, new_order));
530 }
531
532 item = __radix_tree_lookup(&tree, 0, &node, NULL);
533 assert(item != (void *)0x12);
534 assert(node->exceptional == 0);
535
536 item_kill_tree(&tree);
537}
538
539static void __multiorder_split3(int old_order, int new_order)
540{
541 RADIX_TREE(tree, GFP_KERNEL);
542 void **slot;
543 struct radix_tree_iter iter;
544 struct radix_tree_node *node;
545 void *item;
546
547 __radix_tree_insert(&tree, 0, old_order, (void *)0x12);
548
549 item = __radix_tree_lookup(&tree, 0, &node, NULL);
550 assert(item == (void *)0x12);
551 assert(node->exceptional > 0);
552
553 radix_tree_split(&tree, 0, new_order);
554 radix_tree_for_each_slot(slot, &tree, &iter, 0) {
555 radix_tree_iter_replace(&tree, &iter, slot, (void *)0x16);
556 }
557
558 item = __radix_tree_lookup(&tree, 0, &node, NULL);
559 assert(item == (void *)0x16);
560 assert(node->exceptional > 0);
561
562 item_kill_tree(&tree);
563
564 __radix_tree_insert(&tree, 0, old_order, (void *)0x12);
565
566 item = __radix_tree_lookup(&tree, 0, &node, NULL);
567 assert(item == (void *)0x12);
568 assert(node->exceptional > 0);
569
570 radix_tree_split(&tree, 0, new_order);
571 radix_tree_for_each_slot(slot, &tree, &iter, 0) {
572 if (iter.index == (1 << new_order))
573 radix_tree_iter_replace(&tree, &iter, slot,
574 (void *)0x16);
575 else
576 radix_tree_iter_replace(&tree, &iter, slot, NULL);
577 }
578
579 item = __radix_tree_lookup(&tree, 1 << new_order, &node, NULL);
580 assert(item == (void *)0x16);
581 assert(node->count == node->exceptional);
582 do {
583 node = node->parent;
584 if (!node)
585 break;
586 assert(node->count == 1);
587 assert(node->exceptional == 0);
588 } while (1);
589
590 item_kill_tree(&tree);
591}
592
593static void multiorder_split(void)
594{
595 int i, j;
596
597 for (i = 3; i < 11; i++)
598 for (j = 0; j < i; j++) {
599 __multiorder_split(i, j);
600 __multiorder_split2(i, j);
601 __multiorder_split3(i, j);
602 }
603}
604
605static void multiorder_account(void)
606{
607 RADIX_TREE(tree, GFP_KERNEL);
608 struct radix_tree_node *node;
609 void **slot;
610
611 item_insert_order(&tree, 0, 5);
612
613 __radix_tree_insert(&tree, 1 << 5, 5, (void *)0x12);
614 __radix_tree_lookup(&tree, 0, &node, NULL);
615 assert(node->count == node->exceptional * 2);
616 radix_tree_delete(&tree, 1 << 5);
617 assert(node->exceptional == 0);
618
619 __radix_tree_insert(&tree, 1 << 5, 5, (void *)0x12);
620 __radix_tree_lookup(&tree, 1 << 5, &node, &slot);
621 assert(node->count == node->exceptional * 2);
622 __radix_tree_replace(&tree, node, slot, NULL, NULL);
623 assert(node->exceptional == 0);
624
625 item_kill_tree(&tree);
626} 168}
627 169
628bool stop_iteration = false; 170bool stop_iteration = false;
@@ -645,68 +187,45 @@ static void *creator_func(void *ptr)
645 187
646static void *iterator_func(void *ptr) 188static void *iterator_func(void *ptr)
647{ 189{
648 struct radix_tree_root *tree = ptr; 190 XA_STATE(xas, ptr, 0);
649 struct radix_tree_iter iter;
650 struct item *item; 191 struct item *item;
651 void **slot;
652 192
653 while (!stop_iteration) { 193 while (!stop_iteration) {
654 rcu_read_lock(); 194 rcu_read_lock();
655 radix_tree_for_each_slot(slot, tree, &iter, 0) { 195 xas_for_each(&xas, item, ULONG_MAX) {
656 item = radix_tree_deref_slot(slot); 196 if (xas_retry(&xas, item))
657
658 if (!item)
659 continue; 197 continue;
660 if (radix_tree_deref_retry(item)) {
661 slot = radix_tree_iter_retry(&iter);
662 continue;
663 }
664 198
665 item_sanity(item, iter.index); 199 item_sanity(item, xas.xa_index);
666 } 200 }
667 rcu_read_unlock(); 201 rcu_read_unlock();
668 } 202 }
669 return NULL; 203 return NULL;
670} 204}
671 205
672static void multiorder_iteration_race(void) 206static void multiorder_iteration_race(struct xarray *xa)
673{ 207{
674 const int num_threads = sysconf(_SC_NPROCESSORS_ONLN); 208 const int num_threads = sysconf(_SC_NPROCESSORS_ONLN);
675 pthread_t worker_thread[num_threads]; 209 pthread_t worker_thread[num_threads];
676 RADIX_TREE(tree, GFP_KERNEL);
677 int i; 210 int i;
678 211
679 pthread_create(&worker_thread[0], NULL, &creator_func, &tree); 212 pthread_create(&worker_thread[0], NULL, &creator_func, xa);
680 for (i = 1; i < num_threads; i++) 213 for (i = 1; i < num_threads; i++)
681 pthread_create(&worker_thread[i], NULL, &iterator_func, &tree); 214 pthread_create(&worker_thread[i], NULL, &iterator_func, xa);
682 215
683 for (i = 0; i < num_threads; i++) 216 for (i = 0; i < num_threads; i++)
684 pthread_join(worker_thread[i], NULL); 217 pthread_join(worker_thread[i], NULL);
685 218
686 item_kill_tree(&tree); 219 item_kill_tree(xa);
687} 220}
688 221
222static DEFINE_XARRAY(array);
223
689void multiorder_checks(void) 224void multiorder_checks(void)
690{ 225{
691 int i; 226 multiorder_iteration(&array);
692 227 multiorder_tagged_iteration(&array);
693 for (i = 0; i < 20; i++) { 228 multiorder_iteration_race(&array);
694 multiorder_check(200, i);
695 multiorder_check(0, i);
696 multiorder_check((1UL << i) + 1, i);
697 }
698
699 for (i = 0; i < 15; i++)
700 multiorder_shrink((1UL << (i + RADIX_TREE_MAP_SHIFT)), i);
701
702 multiorder_insert_bug();
703 multiorder_tag_tests();
704 multiorder_iteration();
705 multiorder_tagged_iteration();
706 multiorder_join();
707 multiorder_split();
708 multiorder_account();
709 multiorder_iteration_race();
710 229
711 radix_tree_cpu_dead(0); 230 radix_tree_cpu_dead(0);
712} 231}
diff --git a/tools/testing/radix-tree/regression1.c b/tools/testing/radix-tree/regression1.c
index 0aece092f40e..a61c7bcbc72d 100644
--- a/tools/testing/radix-tree/regression1.c
+++ b/tools/testing/radix-tree/regression1.c
@@ -44,7 +44,6 @@
44#include "regression.h" 44#include "regression.h"
45 45
46static RADIX_TREE(mt_tree, GFP_KERNEL); 46static RADIX_TREE(mt_tree, GFP_KERNEL);
47static pthread_mutex_t mt_lock = PTHREAD_MUTEX_INITIALIZER;
48 47
49struct page { 48struct page {
50 pthread_mutex_t lock; 49 pthread_mutex_t lock;
@@ -53,12 +52,12 @@ struct page {
53 unsigned long index; 52 unsigned long index;
54}; 53};
55 54
56static struct page *page_alloc(void) 55static struct page *page_alloc(int index)
57{ 56{
58 struct page *p; 57 struct page *p;
59 p = malloc(sizeof(struct page)); 58 p = malloc(sizeof(struct page));
60 p->count = 1; 59 p->count = 1;
61 p->index = 1; 60 p->index = index;
62 pthread_mutex_init(&p->lock, NULL); 61 pthread_mutex_init(&p->lock, NULL);
63 62
64 return p; 63 return p;
@@ -80,53 +79,33 @@ static void page_free(struct page *p)
80static unsigned find_get_pages(unsigned long start, 79static unsigned find_get_pages(unsigned long start,
81 unsigned int nr_pages, struct page **pages) 80 unsigned int nr_pages, struct page **pages)
82{ 81{
83 unsigned int i; 82 XA_STATE(xas, &mt_tree, start);
84 unsigned int ret; 83 struct page *page;
85 unsigned int nr_found; 84 unsigned int ret = 0;
86 85
87 rcu_read_lock(); 86 rcu_read_lock();
88restart: 87 xas_for_each(&xas, page, ULONG_MAX) {
89 nr_found = radix_tree_gang_lookup_slot(&mt_tree, 88 if (xas_retry(&xas, page))
90 (void ***)pages, NULL, start, nr_pages);
91 ret = 0;
92 for (i = 0; i < nr_found; i++) {
93 struct page *page;
94repeat:
95 page = radix_tree_deref_slot((void **)pages[i]);
96 if (unlikely(!page))
97 continue; 89 continue;
98 90
99 if (radix_tree_exception(page)) {
100 if (radix_tree_deref_retry(page)) {
101 /*
102 * Transient condition which can only trigger
103 * when entry at index 0 moves out of or back
104 * to root: none yet gotten, safe to restart.
105 */
106 assert((start | i) == 0);
107 goto restart;
108 }
109 /*
110 * No exceptional entries are inserted in this test.
111 */
112 assert(0);
113 }
114
115 pthread_mutex_lock(&page->lock); 91 pthread_mutex_lock(&page->lock);
116 if (!page->count) { 92 if (!page->count)
117 pthread_mutex_unlock(&page->lock); 93 goto unlock;
118 goto repeat; 94
119 }
120 /* don't actually update page refcount */ 95 /* don't actually update page refcount */
121 pthread_mutex_unlock(&page->lock); 96 pthread_mutex_unlock(&page->lock);
122 97
123 /* Has the page moved? */ 98 /* Has the page moved? */
124 if (unlikely(page != *((void **)pages[i]))) { 99 if (unlikely(page != xas_reload(&xas)))
125 goto repeat; 100 goto put_page;
126 }
127 101
128 pages[ret] = page; 102 pages[ret] = page;
129 ret++; 103 ret++;
104 continue;
105unlock:
106 pthread_mutex_unlock(&page->lock);
107put_page:
108 xas_reset(&xas);
130 } 109 }
131 rcu_read_unlock(); 110 rcu_read_unlock();
132 return ret; 111 return ret;
@@ -145,30 +124,30 @@ static void *regression1_fn(void *arg)
145 for (j = 0; j < 1000000; j++) { 124 for (j = 0; j < 1000000; j++) {
146 struct page *p; 125 struct page *p;
147 126
148 p = page_alloc(); 127 p = page_alloc(0);
149 pthread_mutex_lock(&mt_lock); 128 xa_lock(&mt_tree);
150 radix_tree_insert(&mt_tree, 0, p); 129 radix_tree_insert(&mt_tree, 0, p);
151 pthread_mutex_unlock(&mt_lock); 130 xa_unlock(&mt_tree);
152 131
153 p = page_alloc(); 132 p = page_alloc(1);
154 pthread_mutex_lock(&mt_lock); 133 xa_lock(&mt_tree);
155 radix_tree_insert(&mt_tree, 1, p); 134 radix_tree_insert(&mt_tree, 1, p);
156 pthread_mutex_unlock(&mt_lock); 135 xa_unlock(&mt_tree);
157 136
158 pthread_mutex_lock(&mt_lock); 137 xa_lock(&mt_tree);
159 p = radix_tree_delete(&mt_tree, 1); 138 p = radix_tree_delete(&mt_tree, 1);
160 pthread_mutex_lock(&p->lock); 139 pthread_mutex_lock(&p->lock);
161 p->count--; 140 p->count--;
162 pthread_mutex_unlock(&p->lock); 141 pthread_mutex_unlock(&p->lock);
163 pthread_mutex_unlock(&mt_lock); 142 xa_unlock(&mt_tree);
164 page_free(p); 143 page_free(p);
165 144
166 pthread_mutex_lock(&mt_lock); 145 xa_lock(&mt_tree);
167 p = radix_tree_delete(&mt_tree, 0); 146 p = radix_tree_delete(&mt_tree, 0);
168 pthread_mutex_lock(&p->lock); 147 pthread_mutex_lock(&p->lock);
169 p->count--; 148 p->count--;
170 pthread_mutex_unlock(&p->lock); 149 pthread_mutex_unlock(&p->lock);
171 pthread_mutex_unlock(&mt_lock); 150 xa_unlock(&mt_tree);
172 page_free(p); 151 page_free(p);
173 } 152 }
174 } else { 153 } else {
diff --git a/tools/testing/radix-tree/regression2.c b/tools/testing/radix-tree/regression2.c
index 424b91c77831..f2c7e640a919 100644
--- a/tools/testing/radix-tree/regression2.c
+++ b/tools/testing/radix-tree/regression2.c
@@ -53,9 +53,9 @@
53#include "regression.h" 53#include "regression.h"
54#include "test.h" 54#include "test.h"
55 55
56#define PAGECACHE_TAG_DIRTY 0 56#define PAGECACHE_TAG_DIRTY XA_MARK_0
57#define PAGECACHE_TAG_WRITEBACK 1 57#define PAGECACHE_TAG_WRITEBACK XA_MARK_1
58#define PAGECACHE_TAG_TOWRITE 2 58#define PAGECACHE_TAG_TOWRITE XA_MARK_2
59 59
60static RADIX_TREE(mt_tree, GFP_KERNEL); 60static RADIX_TREE(mt_tree, GFP_KERNEL);
61unsigned long page_count = 0; 61unsigned long page_count = 0;
@@ -92,7 +92,7 @@ void regression2_test(void)
92 /* 1. */ 92 /* 1. */
93 start = 0; 93 start = 0;
94 end = max_slots - 2; 94 end = max_slots - 2;
95 tag_tagged_items(&mt_tree, NULL, start, end, 1, 95 tag_tagged_items(&mt_tree, start, end, 1,
96 PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE); 96 PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE);
97 97
98 /* 2. */ 98 /* 2. */
diff --git a/tools/testing/radix-tree/regression3.c b/tools/testing/radix-tree/regression3.c
index ace2543c3eda..9f9a3b280f56 100644
--- a/tools/testing/radix-tree/regression3.c
+++ b/tools/testing/radix-tree/regression3.c
@@ -69,21 +69,6 @@ void regression3_test(void)
69 continue; 69 continue;
70 } 70 }
71 } 71 }
72 radix_tree_delete(&root, 1);
73
74 first = true;
75 radix_tree_for_each_contig(slot, &root, &iter, 0) {
76 printv(2, "contig %ld %p\n", iter.index, *slot);
77 if (first) {
78 radix_tree_insert(&root, 1, ptr);
79 first = false;
80 }
81 if (radix_tree_deref_retry(*slot)) {
82 printv(2, "retry at %ld\n", iter.index);
83 slot = radix_tree_iter_retry(&iter);
84 continue;
85 }
86 }
87 72
88 radix_tree_for_each_slot(slot, &root, &iter, 0) { 73 radix_tree_for_each_slot(slot, &root, &iter, 0) {
89 printv(2, "slot %ld %p\n", iter.index, *slot); 74 printv(2, "slot %ld %p\n", iter.index, *slot);
@@ -93,14 +78,6 @@ void regression3_test(void)
93 } 78 }
94 } 79 }
95 80
96 radix_tree_for_each_contig(slot, &root, &iter, 0) {
97 printv(2, "contig %ld %p\n", iter.index, *slot);
98 if (!iter.index) {
99 printv(2, "next at %ld\n", iter.index);
100 slot = radix_tree_iter_resume(slot, &iter);
101 }
102 }
103
104 radix_tree_tag_set(&root, 0, 0); 81 radix_tree_tag_set(&root, 0, 0);
105 radix_tree_tag_set(&root, 1, 0); 82 radix_tree_tag_set(&root, 1, 0);
106 radix_tree_for_each_tagged(slot, &root, &iter, 0, 0) { 83 radix_tree_for_each_tagged(slot, &root, &iter, 0, 0) {
diff --git a/tools/testing/radix-tree/tag_check.c b/tools/testing/radix-tree/tag_check.c
index 543181e4847b..f898957b1a19 100644
--- a/tools/testing/radix-tree/tag_check.c
+++ b/tools/testing/radix-tree/tag_check.c
@@ -24,7 +24,7 @@ __simple_checks(struct radix_tree_root *tree, unsigned long index, int tag)
24 item_tag_set(tree, index, tag); 24 item_tag_set(tree, index, tag);
25 ret = item_tag_get(tree, index, tag); 25 ret = item_tag_get(tree, index, tag);
26 assert(ret != 0); 26 assert(ret != 0);
27 ret = tag_tagged_items(tree, NULL, first, ~0UL, 10, tag, !tag); 27 ret = tag_tagged_items(tree, first, ~0UL, 10, tag, !tag);
28 assert(ret == 1); 28 assert(ret == 1);
29 ret = item_tag_get(tree, index, !tag); 29 ret = item_tag_get(tree, index, !tag);
30 assert(ret != 0); 30 assert(ret != 0);
@@ -321,7 +321,7 @@ static void single_check(void)
321 assert(ret == 0); 321 assert(ret == 0);
322 verify_tag_consistency(&tree, 0); 322 verify_tag_consistency(&tree, 0);
323 verify_tag_consistency(&tree, 1); 323 verify_tag_consistency(&tree, 1);
324 ret = tag_tagged_items(&tree, NULL, first, 10, 10, 0, 1); 324 ret = tag_tagged_items(&tree, first, 10, 10, XA_MARK_0, XA_MARK_1);
325 assert(ret == 1); 325 assert(ret == 1);
326 ret = radix_tree_gang_lookup_tag(&tree, (void **)items, 0, BATCH, 1); 326 ret = radix_tree_gang_lookup_tag(&tree, (void **)items, 0, BATCH, 1);
327 assert(ret == 1); 327 assert(ret == 1);
@@ -331,34 +331,6 @@ static void single_check(void)
331 item_kill_tree(&tree); 331 item_kill_tree(&tree);
332} 332}
333 333
334void radix_tree_clear_tags_test(void)
335{
336 unsigned long index;
337 struct radix_tree_node *node;
338 struct radix_tree_iter iter;
339 void **slot;
340
341 RADIX_TREE(tree, GFP_KERNEL);
342
343 item_insert(&tree, 0);
344 item_tag_set(&tree, 0, 0);
345 __radix_tree_lookup(&tree, 0, &node, &slot);
346 radix_tree_clear_tags(&tree, node, slot);
347 assert(item_tag_get(&tree, 0, 0) == 0);
348
349 for (index = 0; index < 1000; index++) {
350 item_insert(&tree, index);
351 item_tag_set(&tree, index, 0);
352 }
353
354 radix_tree_for_each_slot(slot, &tree, &iter, 0) {
355 radix_tree_clear_tags(&tree, iter.node, slot);
356 assert(item_tag_get(&tree, iter.index, 0) == 0);
357 }
358
359 item_kill_tree(&tree);
360}
361
362void tag_check(void) 334void tag_check(void)
363{ 335{
364 single_check(); 336 single_check();
@@ -376,5 +348,4 @@ void tag_check(void)
376 thrash_tags(); 348 thrash_tags();
377 rcu_barrier(); 349 rcu_barrier();
378 printv(2, "after thrash_tags: %d allocated\n", nr_allocated); 350 printv(2, "after thrash_tags: %d allocated\n", nr_allocated);
379 radix_tree_clear_tags_test();
380} 351}
diff --git a/tools/testing/radix-tree/test.c b/tools/testing/radix-tree/test.c
index def6015570b2..a15d0512e633 100644
--- a/tools/testing/radix-tree/test.c
+++ b/tools/testing/radix-tree/test.c
@@ -25,11 +25,6 @@ int item_tag_get(struct radix_tree_root *root, unsigned long index, int tag)
25 return radix_tree_tag_get(root, index, tag); 25 return radix_tree_tag_get(root, index, tag);
26} 26}
27 27
28int __item_insert(struct radix_tree_root *root, struct item *item)
29{
30 return __radix_tree_insert(root, item->index, item->order, item);
31}
32
33struct item *item_create(unsigned long index, unsigned int order) 28struct item *item_create(unsigned long index, unsigned int order)
34{ 29{
35 struct item *ret = malloc(sizeof(*ret)); 30 struct item *ret = malloc(sizeof(*ret));
@@ -39,21 +34,15 @@ struct item *item_create(unsigned long index, unsigned int order)
39 return ret; 34 return ret;
40} 35}
41 36
42int item_insert_order(struct radix_tree_root *root, unsigned long index, 37int item_insert(struct radix_tree_root *root, unsigned long index)
43 unsigned order)
44{ 38{
45 struct item *item = item_create(index, order); 39 struct item *item = item_create(index, 0);
46 int err = __item_insert(root, item); 40 int err = radix_tree_insert(root, item->index, item);
47 if (err) 41 if (err)
48 free(item); 42 free(item);
49 return err; 43 return err;
50} 44}
51 45
52int item_insert(struct radix_tree_root *root, unsigned long index)
53{
54 return item_insert_order(root, index, 0);
55}
56
57void item_sanity(struct item *item, unsigned long index) 46void item_sanity(struct item *item, unsigned long index)
58{ 47{
59 unsigned long mask; 48 unsigned long mask;
@@ -63,16 +52,21 @@ void item_sanity(struct item *item, unsigned long index)
63 assert((item->index | mask) == (index | mask)); 52 assert((item->index | mask) == (index | mask));
64} 53}
65 54
55void item_free(struct item *item, unsigned long index)
56{
57 item_sanity(item, index);
58 free(item);
59}
60
66int item_delete(struct radix_tree_root *root, unsigned long index) 61int item_delete(struct radix_tree_root *root, unsigned long index)
67{ 62{
68 struct item *item = radix_tree_delete(root, index); 63 struct item *item = radix_tree_delete(root, index);
69 64
70 if (item) { 65 if (!item)
71 item_sanity(item, index); 66 return 0;
72 free(item); 67
73 return 1; 68 item_free(item, index);
74 } 69 return 1;
75 return 0;
76} 70}
77 71
78static void item_free_rcu(struct rcu_head *head) 72static void item_free_rcu(struct rcu_head *head)
@@ -82,9 +76,9 @@ static void item_free_rcu(struct rcu_head *head)
82 free(item); 76 free(item);
83} 77}
84 78
85int item_delete_rcu(struct radix_tree_root *root, unsigned long index) 79int item_delete_rcu(struct xarray *xa, unsigned long index)
86{ 80{
87 struct item *item = radix_tree_delete(root, index); 81 struct item *item = xa_erase(xa, index);
88 82
89 if (item) { 83 if (item) {
90 item_sanity(item, index); 84 item_sanity(item, index);
@@ -176,59 +170,30 @@ void item_full_scan(struct radix_tree_root *root, unsigned long start,
176} 170}
177 171
178/* Use the same pattern as tag_pages_for_writeback() in mm/page-writeback.c */ 172/* Use the same pattern as tag_pages_for_writeback() in mm/page-writeback.c */
179int tag_tagged_items(struct radix_tree_root *root, pthread_mutex_t *lock, 173int tag_tagged_items(struct xarray *xa, unsigned long start, unsigned long end,
180 unsigned long start, unsigned long end, unsigned batch, 174 unsigned batch, xa_mark_t iftag, xa_mark_t thentag)
181 unsigned iftag, unsigned thentag)
182{ 175{
183 unsigned long tagged = 0; 176 XA_STATE(xas, xa, start);
184 struct radix_tree_iter iter; 177 unsigned int tagged = 0;
185 void **slot; 178 struct item *item;
186 179
187 if (batch == 0) 180 if (batch == 0)
188 batch = 1; 181 batch = 1;
189 182
190 if (lock) 183 xas_lock_irq(&xas);
191 pthread_mutex_lock(lock); 184 xas_for_each_marked(&xas, item, end, iftag) {
192 radix_tree_for_each_tagged(slot, root, &iter, start, iftag) { 185 xas_set_mark(&xas, thentag);
193 if (iter.index > end) 186 if (++tagged % batch)
194 break;
195 radix_tree_iter_tag_set(root, &iter, thentag);
196 tagged++;
197 if ((tagged % batch) != 0)
198 continue; 187 continue;
199 slot = radix_tree_iter_resume(slot, &iter);
200 if (lock) {
201 pthread_mutex_unlock(lock);
202 rcu_barrier();
203 pthread_mutex_lock(lock);
204 }
205 }
206 if (lock)
207 pthread_mutex_unlock(lock);
208
209 return tagged;
210}
211 188
212/* Use the same pattern as find_swap_entry() in mm/shmem.c */ 189 xas_pause(&xas);
213unsigned long find_item(struct radix_tree_root *root, void *item) 190 xas_unlock_irq(&xas);
214{ 191 rcu_barrier();
215 struct radix_tree_iter iter; 192 xas_lock_irq(&xas);
216 void **slot;
217 unsigned long found = -1;
218 unsigned long checked = 0;
219
220 radix_tree_for_each_slot(slot, root, &iter, 0) {
221 if (*slot == item) {
222 found = iter.index;
223 break;
224 }
225 checked++;
226 if ((checked % 4) != 0)
227 continue;
228 slot = radix_tree_iter_resume(slot, &iter);
229 } 193 }
194 xas_unlock_irq(&xas);
230 195
231 return found; 196 return tagged;
232} 197}
233 198
234static int verify_node(struct radix_tree_node *slot, unsigned int tag, 199static int verify_node(struct radix_tree_node *slot, unsigned int tag,
@@ -281,43 +246,31 @@ static int verify_node(struct radix_tree_node *slot, unsigned int tag,
281 246
282void verify_tag_consistency(struct radix_tree_root *root, unsigned int tag) 247void verify_tag_consistency(struct radix_tree_root *root, unsigned int tag)
283{ 248{
284 struct radix_tree_node *node = root->rnode; 249 struct radix_tree_node *node = root->xa_head;
285 if (!radix_tree_is_internal_node(node)) 250 if (!radix_tree_is_internal_node(node))
286 return; 251 return;
287 verify_node(node, tag, !!root_tag_get(root, tag)); 252 verify_node(node, tag, !!root_tag_get(root, tag));
288} 253}
289 254
290void item_kill_tree(struct radix_tree_root *root) 255void item_kill_tree(struct xarray *xa)
291{ 256{
292 struct radix_tree_iter iter; 257 XA_STATE(xas, xa, 0);
293 void **slot; 258 void *entry;
294 struct item *items[32];
295 int nfound;
296
297 radix_tree_for_each_slot(slot, root, &iter, 0) {
298 if (radix_tree_exceptional_entry(*slot))
299 radix_tree_delete(root, iter.index);
300 }
301 259
302 while ((nfound = radix_tree_gang_lookup(root, (void **)items, 0, 32))) { 260 xas_for_each(&xas, entry, ULONG_MAX) {
303 int i; 261 if (!xa_is_value(entry)) {
304 262 item_free(entry, xas.xa_index);
305 for (i = 0; i < nfound; i++) {
306 void *ret;
307
308 ret = radix_tree_delete(root, items[i]->index);
309 assert(ret == items[i]);
310 free(items[i]);
311 } 263 }
264 xas_store(&xas, NULL);
312 } 265 }
313 assert(radix_tree_gang_lookup(root, (void **)items, 0, 32) == 0); 266
314 assert(root->rnode == NULL); 267 assert(xa_empty(xa));
315} 268}
316 269
317void tree_verify_min_height(struct radix_tree_root *root, int maxindex) 270void tree_verify_min_height(struct radix_tree_root *root, int maxindex)
318{ 271{
319 unsigned shift; 272 unsigned shift;
320 struct radix_tree_node *node = root->rnode; 273 struct radix_tree_node *node = root->xa_head;
321 if (!radix_tree_is_internal_node(node)) { 274 if (!radix_tree_is_internal_node(node)) {
322 assert(maxindex == 0); 275 assert(maxindex == 0);
323 return; 276 return;
diff --git a/tools/testing/radix-tree/test.h b/tools/testing/radix-tree/test.h
index 92d901eacf49..1ee4b2c0ad10 100644
--- a/tools/testing/radix-tree/test.h
+++ b/tools/testing/radix-tree/test.h
@@ -11,13 +11,11 @@ struct item {
11}; 11};
12 12
13struct item *item_create(unsigned long index, unsigned int order); 13struct item *item_create(unsigned long index, unsigned int order);
14int __item_insert(struct radix_tree_root *root, struct item *item);
15int item_insert(struct radix_tree_root *root, unsigned long index); 14int item_insert(struct radix_tree_root *root, unsigned long index);
16void item_sanity(struct item *item, unsigned long index); 15void item_sanity(struct item *item, unsigned long index);
17int item_insert_order(struct radix_tree_root *root, unsigned long index, 16void item_free(struct item *item, unsigned long index);
18 unsigned order);
19int item_delete(struct radix_tree_root *root, unsigned long index); 17int item_delete(struct radix_tree_root *root, unsigned long index);
20int item_delete_rcu(struct radix_tree_root *root, unsigned long index); 18int item_delete_rcu(struct xarray *xa, unsigned long index);
21struct item *item_lookup(struct radix_tree_root *root, unsigned long index); 19struct item *item_lookup(struct radix_tree_root *root, unsigned long index);
22 20
23void item_check_present(struct radix_tree_root *root, unsigned long index); 21void item_check_present(struct radix_tree_root *root, unsigned long index);
@@ -29,11 +27,10 @@ void item_full_scan(struct radix_tree_root *root, unsigned long start,
29 unsigned long nr, int chunk); 27 unsigned long nr, int chunk);
30void item_kill_tree(struct radix_tree_root *root); 28void item_kill_tree(struct radix_tree_root *root);
31 29
32int tag_tagged_items(struct radix_tree_root *, pthread_mutex_t *, 30int tag_tagged_items(struct xarray *, unsigned long start, unsigned long end,
33 unsigned long start, unsigned long end, unsigned batch, 31 unsigned batch, xa_mark_t iftag, xa_mark_t thentag);
34 unsigned iftag, unsigned thentag);
35unsigned long find_item(struct radix_tree_root *, void *item);
36 32
33void xarray_tests(void);
37void tag_check(void); 34void tag_check(void);
38void multiorder_checks(void); 35void multiorder_checks(void);
39void iteration_test(unsigned order, unsigned duration); 36void iteration_test(unsigned order, unsigned duration);
diff --git a/tools/testing/radix-tree/xarray.c b/tools/testing/radix-tree/xarray.c
new file mode 100644
index 000000000000..e61e43efe463
--- /dev/null
+++ b/tools/testing/radix-tree/xarray.c
@@ -0,0 +1,35 @@
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * xarray.c: Userspace shim for XArray test-suite
4 * Copyright (c) 2018 Matthew Wilcox <willy@infradead.org>
5 */
6
7#define XA_DEBUG
8#include "test.h"
9
10#define module_init(x)
11#define module_exit(x)
12#define MODULE_AUTHOR(x)
13#define MODULE_LICENSE(x)
14#define dump_stack() assert(0)
15
16#include "../../../lib/xarray.c"
17#undef XA_DEBUG
18#include "../../../lib/test_xarray.c"
19
20void xarray_tests(void)
21{
22 xarray_checks();
23 xarray_exit();
24}
25
26int __weak main(void)
27{
28 radix_tree_init();
29 xarray_tests();
30 radix_tree_cpu_dead(1);
31 rcu_barrier();
32 if (nr_allocated)
33 printf("nr_allocated = %d\n", nr_allocated);
34 return 0;
35}