diff options
-rw-r--r-- | tools/testing/radix-tree/multiorder.c | 105 |
1 files changed, 49 insertions, 56 deletions
diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c index 8c41dca272b1..ff27a74d9762 100644 --- a/tools/testing/radix-tree/multiorder.c +++ b/tools/testing/radix-tree/multiorder.c | |||
@@ -39,21 +39,20 @@ static int item_insert_order(struct xarray *xa, unsigned long index, | |||
39 | return xas_error(&xas); | 39 | return xas_error(&xas); |
40 | } | 40 | } |
41 | 41 | ||
42 | void multiorder_iteration(void) | 42 | void multiorder_iteration(struct xarray *xa) |
43 | { | 43 | { |
44 | RADIX_TREE(tree, GFP_KERNEL); | 44 | XA_STATE(xas, xa, 0); |
45 | struct radix_tree_iter iter; | 45 | struct item *item; |
46 | void **slot; | ||
47 | int i, j, err; | 46 | int i, j, err; |
48 | 47 | ||
49 | printv(1, "Multiorder iteration test\n"); | ||
50 | |||
51 | #define NUM_ENTRIES 11 | 48 | #define NUM_ENTRIES 11 |
52 | int index[NUM_ENTRIES] = {0, 2, 4, 8, 16, 32, 34, 36, 64, 72, 128}; | 49 | int index[NUM_ENTRIES] = {0, 2, 4, 8, 16, 32, 34, 36, 64, 72, 128}; |
53 | int order[NUM_ENTRIES] = {1, 1, 2, 3, 4, 1, 0, 1, 3, 0, 7}; | 50 | int order[NUM_ENTRIES] = {1, 1, 2, 3, 4, 1, 0, 1, 3, 0, 7}; |
54 | 51 | ||
52 | printv(1, "Multiorder iteration test\n"); | ||
53 | |||
55 | for (i = 0; i < NUM_ENTRIES; i++) { | 54 | for (i = 0; i < NUM_ENTRIES; i++) { |
56 | err = item_insert_order(&tree, index[i], order[i]); | 55 | err = item_insert_order(xa, index[i], order[i]); |
57 | assert(!err); | 56 | assert(!err); |
58 | } | 57 | } |
59 | 58 | ||
@@ -62,14 +61,14 @@ void multiorder_iteration(void) | |||
62 | if (j <= (index[i] | ((1 << order[i]) - 1))) | 61 | if (j <= (index[i] | ((1 << order[i]) - 1))) |
63 | break; | 62 | break; |
64 | 63 | ||
65 | radix_tree_for_each_slot(slot, &tree, &iter, j) { | 64 | xas_set(&xas, j); |
66 | int height = order[i] / RADIX_TREE_MAP_SHIFT; | 65 | xas_for_each(&xas, item, ULONG_MAX) { |
67 | int shift = height * RADIX_TREE_MAP_SHIFT; | 66 | int height = order[i] / XA_CHUNK_SHIFT; |
67 | int shift = height * XA_CHUNK_SHIFT; | ||
68 | unsigned long mask = (1UL << order[i]) - 1; | 68 | unsigned long mask = (1UL << order[i]) - 1; |
69 | struct item *item = *slot; | ||
70 | 69 | ||
71 | assert((iter.index | mask) == (index[i] | mask)); | 70 | assert((xas.xa_index | mask) == (index[i] | mask)); |
72 | assert(iter.shift == shift); | 71 | assert(xas.xa_node->shift == shift); |
73 | assert(!radix_tree_is_internal_node(item)); | 72 | assert(!radix_tree_is_internal_node(item)); |
74 | assert((item->index | mask) == (index[i] | mask)); | 73 | assert((item->index | mask) == (index[i] | mask)); |
75 | assert(item->order == order[i]); | 74 | assert(item->order == order[i]); |
@@ -77,18 +76,15 @@ void multiorder_iteration(void) | |||
77 | } | 76 | } |
78 | } | 77 | } |
79 | 78 | ||
80 | item_kill_tree(&tree); | 79 | item_kill_tree(xa); |
81 | } | 80 | } |
82 | 81 | ||
83 | void multiorder_tagged_iteration(void) | 82 | void multiorder_tagged_iteration(struct xarray *xa) |
84 | { | 83 | { |
85 | RADIX_TREE(tree, GFP_KERNEL); | 84 | XA_STATE(xas, xa, 0); |
86 | struct radix_tree_iter iter; | 85 | struct item *item; |
87 | void **slot; | ||
88 | int i, j; | 86 | int i, j; |
89 | 87 | ||
90 | printv(1, "Multiorder tagged iteration test\n"); | ||
91 | |||
92 | #define MT_NUM_ENTRIES 9 | 88 | #define MT_NUM_ENTRIES 9 |
93 | int index[MT_NUM_ENTRIES] = {0, 2, 4, 16, 32, 40, 64, 72, 128}; | 89 | int index[MT_NUM_ENTRIES] = {0, 2, 4, 16, 32, 40, 64, 72, 128}; |
94 | int order[MT_NUM_ENTRIES] = {1, 0, 2, 4, 3, 1, 3, 0, 7}; | 90 | int order[MT_NUM_ENTRIES] = {1, 0, 2, 4, 3, 1, 3, 0, 7}; |
@@ -96,13 +92,15 @@ void multiorder_tagged_iteration(void) | |||
96 | #define TAG_ENTRIES 7 | 92 | #define TAG_ENTRIES 7 |
97 | int tag_index[TAG_ENTRIES] = {0, 4, 16, 40, 64, 72, 128}; | 93 | int tag_index[TAG_ENTRIES] = {0, 4, 16, 40, 64, 72, 128}; |
98 | 94 | ||
95 | printv(1, "Multiorder tagged iteration test\n"); | ||
96 | |||
99 | for (i = 0; i < MT_NUM_ENTRIES; i++) | 97 | for (i = 0; i < MT_NUM_ENTRIES; i++) |
100 | assert(!item_insert_order(&tree, index[i], order[i])); | 98 | assert(!item_insert_order(xa, index[i], order[i])); |
101 | 99 | ||
102 | assert(!radix_tree_tagged(&tree, 1)); | 100 | assert(!xa_marked(xa, XA_MARK_1)); |
103 | 101 | ||
104 | for (i = 0; i < TAG_ENTRIES; i++) | 102 | for (i = 0; i < TAG_ENTRIES; i++) |
105 | assert(radix_tree_tag_set(&tree, tag_index[i], 1)); | 103 | xa_set_mark(xa, tag_index[i], XA_MARK_1); |
106 | 104 | ||
107 | for (j = 0; j < 256; j++) { | 105 | for (j = 0; j < 256; j++) { |
108 | int k; | 106 | int k; |
@@ -114,22 +112,22 @@ void multiorder_tagged_iteration(void) | |||
114 | break; | 112 | break; |
115 | } | 113 | } |
116 | 114 | ||
117 | radix_tree_for_each_tagged(slot, &tree, &iter, j, 1) { | 115 | xas_set(&xas, j); |
116 | xas_for_each_marked(&xas, item, ULONG_MAX, XA_MARK_1) { | ||
118 | unsigned long mask; | 117 | unsigned long mask; |
119 | struct item *item = *slot; | ||
120 | for (k = i; index[k] < tag_index[i]; k++) | 118 | for (k = i; index[k] < tag_index[i]; k++) |
121 | ; | 119 | ; |
122 | mask = (1UL << order[k]) - 1; | 120 | mask = (1UL << order[k]) - 1; |
123 | 121 | ||
124 | assert((iter.index | mask) == (tag_index[i] | mask)); | 122 | assert((xas.xa_index | mask) == (tag_index[i] | mask)); |
125 | assert(!radix_tree_is_internal_node(item)); | 123 | assert(!xa_is_internal(item)); |
126 | assert((item->index | mask) == (tag_index[i] | mask)); | 124 | assert((item->index | mask) == (tag_index[i] | mask)); |
127 | assert(item->order == order[k]); | 125 | assert(item->order == order[k]); |
128 | i++; | 126 | i++; |
129 | } | 127 | } |
130 | } | 128 | } |
131 | 129 | ||
132 | assert(tag_tagged_items(&tree, 0, ~0UL, TAG_ENTRIES, XA_MARK_1, | 130 | assert(tag_tagged_items(xa, 0, ULONG_MAX, TAG_ENTRIES, XA_MARK_1, |
133 | XA_MARK_2) == TAG_ENTRIES); | 131 | XA_MARK_2) == TAG_ENTRIES); |
134 | 132 | ||
135 | for (j = 0; j < 256; j++) { | 133 | for (j = 0; j < 256; j++) { |
@@ -142,29 +140,31 @@ void multiorder_tagged_iteration(void) | |||
142 | break; | 140 | break; |
143 | } | 141 | } |
144 | 142 | ||
145 | radix_tree_for_each_tagged(slot, &tree, &iter, j, 2) { | 143 | xas_set(&xas, j); |
146 | struct item *item = *slot; | 144 | xas_for_each_marked(&xas, item, ULONG_MAX, XA_MARK_2) { |
147 | for (k = i; index[k] < tag_index[i]; k++) | 145 | for (k = i; index[k] < tag_index[i]; k++) |
148 | ; | 146 | ; |
149 | mask = (1 << order[k]) - 1; | 147 | mask = (1 << order[k]) - 1; |
150 | 148 | ||
151 | assert((iter.index | mask) == (tag_index[i] | mask)); | 149 | assert((xas.xa_index | mask) == (tag_index[i] | mask)); |
152 | assert(!radix_tree_is_internal_node(item)); | 150 | assert(!xa_is_internal(item)); |
153 | assert((item->index | mask) == (tag_index[i] | mask)); | 151 | assert((item->index | mask) == (tag_index[i] | mask)); |
154 | assert(item->order == order[k]); | 152 | assert(item->order == order[k]); |
155 | i++; | 153 | i++; |
156 | } | 154 | } |
157 | } | 155 | } |
158 | 156 | ||
159 | assert(tag_tagged_items(&tree, 1, ~0UL, MT_NUM_ENTRIES * 2, XA_MARK_1, | 157 | assert(tag_tagged_items(xa, 1, ULONG_MAX, MT_NUM_ENTRIES * 2, XA_MARK_1, |
160 | XA_MARK_0) == TAG_ENTRIES); | 158 | XA_MARK_0) == TAG_ENTRIES); |
161 | i = 0; | 159 | i = 0; |
162 | radix_tree_for_each_tagged(slot, &tree, &iter, 0, 0) { | 160 | xas_set(&xas, 0); |
163 | assert(iter.index == tag_index[i]); | 161 | xas_for_each_marked(&xas, item, ULONG_MAX, XA_MARK_0) { |
162 | assert(xas.xa_index == tag_index[i]); | ||
164 | i++; | 163 | i++; |
165 | } | 164 | } |
165 | assert(i == TAG_ENTRIES); | ||
166 | 166 | ||
167 | item_kill_tree(&tree); | 167 | item_kill_tree(xa); |
168 | } | 168 | } |
169 | 169 | ||
170 | bool stop_iteration = false; | 170 | bool stop_iteration = false; |
@@ -187,52 +187,45 @@ static void *creator_func(void *ptr) | |||
187 | 187 | ||
188 | static void *iterator_func(void *ptr) | 188 | static void *iterator_func(void *ptr) |
189 | { | 189 | { |
190 | struct radix_tree_root *tree = ptr; | 190 | XA_STATE(xas, ptr, 0); |
191 | struct radix_tree_iter iter; | ||
192 | struct item *item; | 191 | struct item *item; |
193 | void **slot; | ||
194 | 192 | ||
195 | while (!stop_iteration) { | 193 | while (!stop_iteration) { |
196 | rcu_read_lock(); | 194 | rcu_read_lock(); |
197 | radix_tree_for_each_slot(slot, tree, &iter, 0) { | 195 | xas_for_each(&xas, item, ULONG_MAX) { |
198 | item = radix_tree_deref_slot(slot); | 196 | if (xas_retry(&xas, item)) |
199 | |||
200 | if (!item) | ||
201 | continue; | 197 | continue; |
202 | if (radix_tree_deref_retry(item)) { | ||
203 | slot = radix_tree_iter_retry(&iter); | ||
204 | continue; | ||
205 | } | ||
206 | 198 | ||
207 | item_sanity(item, iter.index); | 199 | item_sanity(item, xas.xa_index); |
208 | } | 200 | } |
209 | rcu_read_unlock(); | 201 | rcu_read_unlock(); |
210 | } | 202 | } |
211 | return NULL; | 203 | return NULL; |
212 | } | 204 | } |
213 | 205 | ||
214 | static void multiorder_iteration_race(void) | 206 | static void multiorder_iteration_race(struct xarray *xa) |
215 | { | 207 | { |
216 | const int num_threads = sysconf(_SC_NPROCESSORS_ONLN); | 208 | const int num_threads = sysconf(_SC_NPROCESSORS_ONLN); |
217 | pthread_t worker_thread[num_threads]; | 209 | pthread_t worker_thread[num_threads]; |
218 | RADIX_TREE(tree, GFP_KERNEL); | ||
219 | int i; | 210 | int i; |
220 | 211 | ||
221 | pthread_create(&worker_thread[0], NULL, &creator_func, &tree); | 212 | pthread_create(&worker_thread[0], NULL, &creator_func, xa); |
222 | for (i = 1; i < num_threads; i++) | 213 | for (i = 1; i < num_threads; i++) |
223 | pthread_create(&worker_thread[i], NULL, &iterator_func, &tree); | 214 | pthread_create(&worker_thread[i], NULL, &iterator_func, xa); |
224 | 215 | ||
225 | for (i = 0; i < num_threads; i++) | 216 | for (i = 0; i < num_threads; i++) |
226 | pthread_join(worker_thread[i], NULL); | 217 | pthread_join(worker_thread[i], NULL); |
227 | 218 | ||
228 | item_kill_tree(&tree); | 219 | item_kill_tree(xa); |
229 | } | 220 | } |
230 | 221 | ||
222 | static DEFINE_XARRAY(array); | ||
223 | |||
231 | void multiorder_checks(void) | 224 | void multiorder_checks(void) |
232 | { | 225 | { |
233 | multiorder_iteration(); | 226 | multiorder_iteration(&array); |
234 | multiorder_tagged_iteration(); | 227 | multiorder_tagged_iteration(&array); |
235 | multiorder_iteration_race(); | 228 | multiorder_iteration_race(&array); |
236 | 229 | ||
237 | radix_tree_cpu_dead(0); | 230 | radix_tree_cpu_dead(0); |
238 | } | 231 | } |