aboutsummaryrefslogtreecommitdiffstats
path: root/lib/test_xarray.c
diff options
context:
space:
mode:
authorMatthew Wilcox <willy@infradead.org>2017-11-10 15:15:08 -0500
committerMatthew Wilcox <willy@infradead.org>2018-10-21 10:45:57 -0400
commit58d6ea3085f2e53714810a513c61629f6d2be0a6 (patch)
treeb4b73ea07cd720063fd3a2f8b063422c5a0e698a /lib/test_xarray.c
parent9b89a0355144685a787b0dc5bcf7bdd6f2d02968 (diff)
xarray: Add XArray unconditional store operations
xa_store() differs from radix_tree_insert() in that it will overwrite an existing element in the array rather than returning an error. This is the behaviour which most users want, and those that want more complex behaviour generally want to use the xas family of routines anyway. For memory allocation, xa_store() will first attempt to request memory from the slab allocator; if memory is not immediately available, it will drop the xa_lock and allocate memory, keeping a pointer in the xa_state. It does not use the per-CPU cache, although those will continue to exist until all radix tree users are converted to the xarray. This patch also includes xa_erase() and __xa_erase() for a streamlined way to store NULL. Since there is no need to allocate memory in order to store a NULL in the XArray, we do not need to trouble the user with deciding what memory allocation flags to use. Signed-off-by: Matthew Wilcox <willy@infradead.org>
Diffstat (limited to 'lib/test_xarray.c')
-rw-r--r--lib/test_xarray.c177
1 files changed, 173 insertions, 4 deletions
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index f8b0e9ba80a4..b711030fbc32 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -30,13 +30,49 @@ void xa_dump(const struct xarray *xa) { }
30 30
31static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp) 31static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp)
32{ 32{
33 radix_tree_insert(xa, index, xa_mk_value(index)); 33 return xa_store(xa, index, xa_mk_value(index & LONG_MAX), gfp);
34 return NULL;
35} 34}
36 35
37static void xa_erase_index(struct xarray *xa, unsigned long index) 36static void xa_erase_index(struct xarray *xa, unsigned long index)
38{ 37{
39 radix_tree_delete(xa, index); 38 XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_value(index & LONG_MAX));
39 XA_BUG_ON(xa, xa_load(xa, index) != NULL);
40}
41
42/*
43 * If anyone needs this, please move it to xarray.c. We have no current
44 * users outside the test suite because all current multislot users want
45 * to use the advanced API.
46 */
47static void *xa_store_order(struct xarray *xa, unsigned long index,
48 unsigned order, void *entry, gfp_t gfp)
49{
50 XA_STATE_ORDER(xas, xa, index, order);
51 void *curr;
52
53 do {
54 xas_lock(&xas);
55 curr = xas_store(&xas, entry);
56 xas_unlock(&xas);
57 } while (xas_nomem(&xas, gfp));
58
59 return curr;
60}
61
62static noinline void check_xa_err(struct xarray *xa)
63{
64 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 0, GFP_NOWAIT)) != 0);
65 XA_BUG_ON(xa, xa_err(xa_erase(xa, 0)) != 0);
66#ifndef __KERNEL__
67 /* The kernel does not fail GFP_NOWAIT allocations */
68 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM);
69 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_NOWAIT)) != -ENOMEM);
70#endif
71 XA_BUG_ON(xa, xa_err(xa_store_index(xa, 1, GFP_KERNEL)) != 0);
72 XA_BUG_ON(xa, xa_err(xa_store(xa, 1, xa_mk_value(0), GFP_KERNEL)) != 0);
73 XA_BUG_ON(xa, xa_err(xa_erase(xa, 1)) != 0);
74// kills the test-suite :-(
75// XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL);
40} 76}
41 77
42static noinline void check_xa_load(struct xarray *xa) 78static noinline void check_xa_load(struct xarray *xa)
@@ -69,6 +105,9 @@ static noinline void check_xa_load(struct xarray *xa)
69 105
70static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index) 106static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
71{ 107{
108 unsigned int order;
109 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 8 : 1;
110
72 /* NULL elements have no marks set */ 111 /* NULL elements have no marks set */
73 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 112 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
74 xa_set_mark(xa, index, XA_MARK_0); 113 xa_set_mark(xa, index, XA_MARK_0);
@@ -90,6 +129,37 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
90 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 129 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
91 xa_set_mark(xa, index, XA_MARK_0); 130 xa_set_mark(xa, index, XA_MARK_0);
92 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); 131 XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0));
132
133 /*
134 * Storing a multi-index entry over entries with marks gives the
135 * entire entry the union of the marks
136 */
137 BUG_ON((index % 4) != 0);
138 for (order = 2; order < max_order; order++) {
139 unsigned long base = round_down(index, 1UL << order);
140 unsigned long next = base + (1UL << order);
141 unsigned long i;
142
143 XA_BUG_ON(xa, xa_store_index(xa, index + 1, GFP_KERNEL));
144 xa_set_mark(xa, index + 1, XA_MARK_0);
145 XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL));
146 xa_set_mark(xa, index + 2, XA_MARK_1);
147 XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL));
148 xa_store_order(xa, index, order, xa_mk_value(index),
149 GFP_KERNEL);
150 for (i = base; i < next; i++) {
151 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
152 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_1));
153 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_2));
154 }
155 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0));
156 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_1));
157 XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_2));
158 xa_erase_index(xa, index);
159 xa_erase_index(xa, next);
160 XA_BUG_ON(xa, !xa_empty(xa));
161 }
162 XA_BUG_ON(xa, !xa_empty(xa));
93} 163}
94 164
95static noinline void check_xa_mark(struct xarray *xa) 165static noinline void check_xa_mark(struct xarray *xa)
@@ -100,12 +170,111 @@ static noinline void check_xa_mark(struct xarray *xa)
100 check_xa_mark_1(xa, index); 170 check_xa_mark_1(xa, index);
101} 171}
102 172
103static RADIX_TREE(array, GFP_KERNEL); 173static noinline void check_xa_shrink(struct xarray *xa)
174{
175 XA_STATE(xas, xa, 1);
176 struct xa_node *node;
177
178 XA_BUG_ON(xa, !xa_empty(xa));
179 XA_BUG_ON(xa, xa_store_index(xa, 0, GFP_KERNEL) != NULL);
180 XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
181
182 /*
183 * Check that erasing the entry at 1 shrinks the tree and properly
184 * marks the node as being deleted.
185 */
186 xas_lock(&xas);
187 XA_BUG_ON(xa, xas_load(&xas) != xa_mk_value(1));
188 node = xas.xa_node;
189 XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != xa_mk_value(0));
190 XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1));
191 XA_BUG_ON(xa, xa_load(xa, 1) != NULL);
192 XA_BUG_ON(xa, xas.xa_node != XAS_BOUNDS);
193 XA_BUG_ON(xa, xa_entry_locked(xa, node, 0) != XA_RETRY_ENTRY);
194 XA_BUG_ON(xa, xas_load(&xas) != NULL);
195 xas_unlock(&xas);
196 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
197 xa_erase_index(xa, 0);
198 XA_BUG_ON(xa, !xa_empty(xa));
199}
200
201static noinline void check_multi_store(struct xarray *xa)
202{
203#ifdef CONFIG_XARRAY_MULTI
204 unsigned long i, j, k;
205 unsigned int max_order = (sizeof(long) == 4) ? 30 : 60;
206
207 /* Loading from any position returns the same value */
208 xa_store_order(xa, 0, 1, xa_mk_value(0), GFP_KERNEL);
209 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
210 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0));
211 XA_BUG_ON(xa, xa_load(xa, 2) != NULL);
212 rcu_read_lock();
213 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 2);
214 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2);
215 rcu_read_unlock();
216
217 /* Storing adjacent to the value does not alter the value */
218 xa_store(xa, 3, xa, GFP_KERNEL);
219 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0));
220 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(0));
221 XA_BUG_ON(xa, xa_load(xa, 2) != NULL);
222 rcu_read_lock();
223 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 3);
224 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 2);
225 rcu_read_unlock();
226
227 /* Overwriting multiple indexes works */
228 xa_store_order(xa, 0, 2, xa_mk_value(1), GFP_KERNEL);
229 XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(1));
230 XA_BUG_ON(xa, xa_load(xa, 1) != xa_mk_value(1));
231 XA_BUG_ON(xa, xa_load(xa, 2) != xa_mk_value(1));
232 XA_BUG_ON(xa, xa_load(xa, 3) != xa_mk_value(1));
233 XA_BUG_ON(xa, xa_load(xa, 4) != NULL);
234 rcu_read_lock();
235 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->count != 4);
236 XA_BUG_ON(xa, xa_to_node(xa_head(xa))->nr_values != 4);
237 rcu_read_unlock();
238
239 /* We can erase multiple values with a single store */
240 xa_store_order(xa, 0, 63, NULL, GFP_KERNEL);
241 XA_BUG_ON(xa, !xa_empty(xa));
242
243 /* Even when the first slot is empty but the others aren't */
244 xa_store_index(xa, 1, GFP_KERNEL);
245 xa_store_index(xa, 2, GFP_KERNEL);
246 xa_store_order(xa, 0, 2, NULL, GFP_KERNEL);
247 XA_BUG_ON(xa, !xa_empty(xa));
248
249 for (i = 0; i < max_order; i++) {
250 for (j = 0; j < max_order; j++) {
251 xa_store_order(xa, 0, i, xa_mk_value(i), GFP_KERNEL);
252 xa_store_order(xa, 0, j, xa_mk_value(j), GFP_KERNEL);
253
254 for (k = 0; k < max_order; k++) {
255 void *entry = xa_load(xa, (1UL << k) - 1);
256 if ((i < k) && (j < k))
257 XA_BUG_ON(xa, entry != NULL);
258 else
259 XA_BUG_ON(xa, entry != xa_mk_value(j));
260 }
261
262 xa_erase(xa, 0);
263 XA_BUG_ON(xa, !xa_empty(xa));
264 }
265 }
266#endif
267}
268
269static DEFINE_XARRAY(array);
104 270
105static int xarray_checks(void) 271static int xarray_checks(void)
106{ 272{
273 check_xa_err(&array);
107 check_xa_load(&array); 274 check_xa_load(&array);
108 check_xa_mark(&array); 275 check_xa_mark(&array);
276 check_xa_shrink(&array);
277 check_multi_store(&array);
109 278
110 printk("XArray: %u of %u tests passed\n", tests_passed, tests_run); 279 printk("XArray: %u of %u tests passed\n", tests_passed, tests_run);
111 return (tests_run == tests_passed) ? 0 : -EINVAL; 280 return (tests_run == tests_passed) ? 0 : -EINVAL;