summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatthew Wilcox <willy@infradead.org>2017-11-07 14:57:46 -0500
committerMatthew Wilcox <willy@infradead.org>2018-10-21 10:45:57 -0400
commitad3d6c7263e368abdc151e1cc13dc78aa39cc7a7 (patch)
tree54279b42ac6fbede3abc75ea44fbe1554b633228
parent992a8e60e3fea77c55589fc1c5af2304e78a5baa (diff)
xarray: Add XArray load operation
The xa_load function brings with it a lot of infrastructure; xa_empty(), xa_is_err(), and large chunks of the XArray advanced API that are used to implement xa_load. As the test-suite demonstrates, it is possible to use the XArray functions on a radix tree. The radix tree functions depend on the GFP flags being stored in the root of the tree, so it's not possible to use the radix tree functions on an XArray. Signed-off-by: Matthew Wilcox <willy@infradead.org>
-rw-r--r--include/linux/xarray.h336
-rw-r--r--lib/Kconfig.debug3
-rw-r--r--lib/Makefile1
-rw-r--r--lib/radix-tree.c43
-rw-r--r--lib/test_xarray.c87
-rw-r--r--lib/xarray.c195
-rw-r--r--tools/include/linux/kernel.h1
-rw-r--r--tools/testing/radix-tree/.gitignore1
-rw-r--r--tools/testing/radix-tree/Makefile6
-rw-r--r--tools/testing/radix-tree/linux/kernel.h1
-rw-r--r--tools/testing/radix-tree/linux/rcupdate.h2
-rw-r--r--tools/testing/radix-tree/main.c1
-rw-r--r--tools/testing/radix-tree/test.h1
-rw-r--r--tools/testing/radix-tree/xarray.c28
14 files changed, 661 insertions, 45 deletions
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 52141dfc5a90..a0df8217068c 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -12,6 +12,8 @@
12#include <linux/bug.h> 12#include <linux/bug.h>
13#include <linux/compiler.h> 13#include <linux/compiler.h>
14#include <linux/kconfig.h> 14#include <linux/kconfig.h>
15#include <linux/kernel.h>
16#include <linux/rcupdate.h>
15#include <linux/spinlock.h> 17#include <linux/spinlock.h>
16#include <linux/types.h> 18#include <linux/types.h>
17 19
@@ -30,6 +32,10 @@
30 * 32 *
31 * 0-62: Sibling entries 33 * 0-62: Sibling entries
32 * 256: Retry entry 34 * 256: Retry entry
35 *
36 * Errors are also represented as internal entries, but use the negative
37 * space (-4094 to -2). They're never stored in the slots array; only
38 * returned by the normal API.
33 */ 39 */
34 40
35#define BITS_PER_XA_VALUE (BITS_PER_LONG - 1) 41#define BITS_PER_XA_VALUE (BITS_PER_LONG - 1)
@@ -156,6 +162,42 @@ static inline bool xa_is_internal(const void *entry)
156} 162}
157 163
158/** 164/**
165 * xa_is_err() - Report whether an XArray operation returned an error
166 * @entry: Result from calling an XArray function
167 *
168 * If an XArray operation cannot complete an operation, it will return
169 * a special value indicating an error. This function tells you
170 * whether an error occurred; xa_err() tells you which error occurred.
171 *
172 * Context: Any context.
173 * Return: %true if the entry indicates an error.
174 */
175static inline bool xa_is_err(const void *entry)
176{
177 return unlikely(xa_is_internal(entry));
178}
179
180/**
181 * xa_err() - Turn an XArray result into an errno.
182 * @entry: Result from calling an XArray function.
183 *
184 * If an XArray operation cannot complete an operation, it will return
185 * a special pointer value which encodes an errno. This function extracts
186 * the errno from the pointer value, or returns 0 if the pointer does not
187 * represent an errno.
188 *
189 * Context: Any context.
190 * Return: A negative errno or 0.
191 */
192static inline int xa_err(void *entry)
193{
194 /* xa_to_internal() would not do sign extension. */
195 if (xa_is_err(entry))
196 return (long)entry >> 2;
197 return 0;
198}
199
200/**
159 * struct xarray - The anchor of the XArray. 201 * struct xarray - The anchor of the XArray.
160 * @xa_lock: Lock that protects the contents of the XArray. 202 * @xa_lock: Lock that protects the contents of the XArray.
161 * 203 *
@@ -209,6 +251,7 @@ struct xarray {
209#define DEFINE_XARRAY(name) DEFINE_XARRAY_FLAGS(name, 0) 251#define DEFINE_XARRAY(name) DEFINE_XARRAY_FLAGS(name, 0)
210 252
211void xa_init_flags(struct xarray *, gfp_t flags); 253void xa_init_flags(struct xarray *, gfp_t flags);
254void *xa_load(struct xarray *, unsigned long index);
212 255
213/** 256/**
214 * xa_init() - Initialise an empty XArray. 257 * xa_init() - Initialise an empty XArray.
@@ -223,6 +266,18 @@ static inline void xa_init(struct xarray *xa)
223 xa_init_flags(xa, 0); 266 xa_init_flags(xa, 0);
224} 267}
225 268
269/**
270 * xa_empty() - Determine if an array has any present entries.
271 * @xa: XArray.
272 *
273 * Context: Any context.
274 * Return: %true if the array contains only NULL pointers.
275 */
276static inline bool xa_empty(const struct xarray *xa)
277{
278 return xa->xa_head == NULL;
279}
280
226#define xa_trylock(xa) spin_trylock(&(xa)->xa_lock) 281#define xa_trylock(xa) spin_trylock(&(xa)->xa_lock)
227#define xa_lock(xa) spin_lock(&(xa)->xa_lock) 282#define xa_lock(xa) spin_lock(&(xa)->xa_lock)
228#define xa_unlock(xa) spin_unlock(&(xa)->xa_lock) 283#define xa_unlock(xa) spin_unlock(&(xa)->xa_lock)
@@ -280,6 +335,65 @@ struct xa_node {
280 }; 335 };
281}; 336};
282 337
338void xa_dump(const struct xarray *);
339void xa_dump_node(const struct xa_node *);
340
341#ifdef XA_DEBUG
342#define XA_BUG_ON(xa, x) do { \
343 if (x) { \
344 xa_dump(xa); \
345 BUG(); \
346 } \
347 } while (0)
348#define XA_NODE_BUG_ON(node, x) do { \
349 if (x) { \
350 if (node) xa_dump_node(node); \
351 BUG(); \
352 } \
353 } while (0)
354#else
355#define XA_BUG_ON(xa, x) do { } while (0)
356#define XA_NODE_BUG_ON(node, x) do { } while (0)
357#endif
358
359/* Private */
360static inline void *xa_head(const struct xarray *xa)
361{
362 return rcu_dereference_check(xa->xa_head,
363 lockdep_is_held(&xa->xa_lock));
364}
365
366/* Private */
367static inline void *xa_head_locked(const struct xarray *xa)
368{
369 return rcu_dereference_protected(xa->xa_head,
370 lockdep_is_held(&xa->xa_lock));
371}
372
373/* Private */
374static inline void *xa_entry(const struct xarray *xa,
375 const struct xa_node *node, unsigned int offset)
376{
377 XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE);
378 return rcu_dereference_check(node->slots[offset],
379 lockdep_is_held(&xa->xa_lock));
380}
381
382/* Private */
383static inline void *xa_entry_locked(const struct xarray *xa,
384 const struct xa_node *node, unsigned int offset)
385{
386 XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE);
387 return rcu_dereference_protected(node->slots[offset],
388 lockdep_is_held(&xa->xa_lock));
389}
390
391/* Private */
392static inline struct xa_node *xa_to_node(const void *entry)
393{
394 return (struct xa_node *)((unsigned long)entry - 2);
395}
396
283/* Private */ 397/* Private */
284static inline bool xa_is_node(const void *entry) 398static inline bool xa_is_node(const void *entry)
285{ 399{
@@ -312,4 +426,226 @@ static inline bool xa_is_sibling(const void *entry)
312 426
313#define XA_RETRY_ENTRY xa_mk_internal(256) 427#define XA_RETRY_ENTRY xa_mk_internal(256)
314 428
429/**
430 * xa_is_retry() - Is the entry a retry entry?
431 * @entry: Entry retrieved from the XArray
432 *
433 * Return: %true if the entry is a retry entry.
434 */
435static inline bool xa_is_retry(const void *entry)
436{
437 return unlikely(entry == XA_RETRY_ENTRY);
438}
439
440/**
441 * typedef xa_update_node_t - A callback function from the XArray.
442 * @node: The node which is being processed
443 *
444 * This function is called every time the XArray updates the count of
445 * present and value entries in a node. It allows advanced users to
446 * maintain the private_list in the node.
447 *
448 * Context: The xa_lock is held and interrupts may be disabled.
449 * Implementations should not drop the xa_lock, nor re-enable
450 * interrupts.
451 */
452typedef void (*xa_update_node_t)(struct xa_node *node);
453
454/*
455 * The xa_state is opaque to its users. It contains various different pieces
456 * of state involved in the current operation on the XArray. It should be
457 * declared on the stack and passed between the various internal routines.
458 * The various elements in it should not be accessed directly, but only
459 * through the provided accessor functions. The below documentation is for
460 * the benefit of those working on the code, not for users of the XArray.
461 *
462 * @xa_node usually points to the xa_node containing the slot we're operating
463 * on (and @xa_offset is the offset in the slots array). If there is a
464 * single entry in the array at index 0, there are no allocated xa_nodes to
465 * point to, and so we store %NULL in @xa_node. @xa_node is set to
466 * the value %XAS_RESTART if the xa_state is not walked to the correct
467 * position in the tree of nodes for this operation. If an error occurs
468 * during an operation, it is set to an %XAS_ERROR value. If we run off the
469 * end of the allocated nodes, it is set to %XAS_BOUNDS.
470 */
471struct xa_state {
472 struct xarray *xa;
473 unsigned long xa_index;
474 unsigned char xa_shift;
475 unsigned char xa_sibs;
476 unsigned char xa_offset;
477 unsigned char xa_pad; /* Helps gcc generate better code */
478 struct xa_node *xa_node;
479 struct xa_node *xa_alloc;
480 xa_update_node_t xa_update;
481};
482
483/*
484 * We encode errnos in the xas->xa_node. If an error has happened, we need to
485 * drop the lock to fix it, and once we've done so the xa_state is invalid.
486 */
487#define XA_ERROR(errno) ((struct xa_node *)(((unsigned long)errno << 2) | 2UL))
488#define XAS_BOUNDS ((struct xa_node *)1UL)
489#define XAS_RESTART ((struct xa_node *)3UL)
490
491#define __XA_STATE(array, index, shift, sibs) { \
492 .xa = array, \
493 .xa_index = index, \
494 .xa_shift = shift, \
495 .xa_sibs = sibs, \
496 .xa_offset = 0, \
497 .xa_pad = 0, \
498 .xa_node = XAS_RESTART, \
499 .xa_alloc = NULL, \
500 .xa_update = NULL \
501}
502
503/**
504 * XA_STATE() - Declare an XArray operation state.
505 * @name: Name of this operation state (usually xas).
506 * @array: Array to operate on.
507 * @index: Initial index of interest.
508 *
509 * Declare and initialise an xa_state on the stack.
510 */
511#define XA_STATE(name, array, index) \
512 struct xa_state name = __XA_STATE(array, index, 0, 0)
513
514/**
515 * XA_STATE_ORDER() - Declare an XArray operation state.
516 * @name: Name of this operation state (usually xas).
517 * @array: Array to operate on.
518 * @index: Initial index of interest.
519 * @order: Order of entry.
520 *
521 * Declare and initialise an xa_state on the stack. This variant of
522 * XA_STATE() allows you to specify the 'order' of the element you
523 * want to operate on.`
524 */
525#define XA_STATE_ORDER(name, array, index, order) \
526 struct xa_state name = __XA_STATE(array, \
527 (index >> order) << order, \
528 order - (order % XA_CHUNK_SHIFT), \
529 (1U << (order % XA_CHUNK_SHIFT)) - 1)
530
531#define xas_marked(xas, mark) xa_marked((xas)->xa, (mark))
532#define xas_trylock(xas) xa_trylock((xas)->xa)
533#define xas_lock(xas) xa_lock((xas)->xa)
534#define xas_unlock(xas) xa_unlock((xas)->xa)
535#define xas_lock_bh(xas) xa_lock_bh((xas)->xa)
536#define xas_unlock_bh(xas) xa_unlock_bh((xas)->xa)
537#define xas_lock_irq(xas) xa_lock_irq((xas)->xa)
538#define xas_unlock_irq(xas) xa_unlock_irq((xas)->xa)
539#define xas_lock_irqsave(xas, flags) \
540 xa_lock_irqsave((xas)->xa, flags)
541#define xas_unlock_irqrestore(xas, flags) \
542 xa_unlock_irqrestore((xas)->xa, flags)
543
544/**
545 * xas_error() - Return an errno stored in the xa_state.
546 * @xas: XArray operation state.
547 *
548 * Return: 0 if no error has been noted. A negative errno if one has.
549 */
550static inline int xas_error(const struct xa_state *xas)
551{
552 return xa_err(xas->xa_node);
553}
554
555/**
556 * xas_set_err() - Note an error in the xa_state.
557 * @xas: XArray operation state.
558 * @err: Negative error number.
559 *
560 * Only call this function with a negative @err; zero or positive errors
561 * will probably not behave the way you think they should. If you want
562 * to clear the error from an xa_state, use xas_reset().
563 */
564static inline void xas_set_err(struct xa_state *xas, long err)
565{
566 xas->xa_node = XA_ERROR(err);
567}
568
569/**
570 * xas_invalid() - Is the xas in a retry or error state?
571 * @xas: XArray operation state.
572 *
573 * Return: %true if the xas cannot be used for operations.
574 */
575static inline bool xas_invalid(const struct xa_state *xas)
576{
577 return (unsigned long)xas->xa_node & 3;
578}
579
580/**
581 * xas_valid() - Is the xas a valid cursor into the array?
582 * @xas: XArray operation state.
583 *
584 * Return: %true if the xas can be used for operations.
585 */
586static inline bool xas_valid(const struct xa_state *xas)
587{
588 return !xas_invalid(xas);
589}
590
591/**
592 * xas_reset() - Reset an XArray operation state.
593 * @xas: XArray operation state.
594 *
595 * Resets the error or walk state of the @xas so future walks of the
596 * array will start from the root. Use this if you have dropped the
597 * xarray lock and want to reuse the xa_state.
598 *
599 * Context: Any context.
600 */
601static inline void xas_reset(struct xa_state *xas)
602{
603 xas->xa_node = XAS_RESTART;
604}
605
606/**
607 * xas_retry() - Retry the operation if appropriate.
608 * @xas: XArray operation state.
609 * @entry: Entry from xarray.
610 *
611 * The advanced functions may sometimes return an internal entry, such as
612 * a retry entry or a zero entry. This function sets up the @xas to restart
613 * the walk from the head of the array if needed.
614 *
615 * Context: Any context.
616 * Return: true if the operation needs to be retried.
617 */
618static inline bool xas_retry(struct xa_state *xas, const void *entry)
619{
620 if (!xa_is_retry(entry))
621 return false;
622 xas_reset(xas);
623 return true;
624}
625
626void *xas_load(struct xa_state *);
627
628/**
629 * xas_reload() - Refetch an entry from the xarray.
630 * @xas: XArray operation state.
631 *
632 * Use this function to check that a previously loaded entry still has
633 * the same value. This is useful for the lockless pagecache lookup where
634 * we walk the array with only the RCU lock to protect us, lock the page,
635 * then check that the page hasn't moved since we looked it up.
636 *
637 * The caller guarantees that @xas is still valid. If it may be in an
638 * error or restart state, call xas_load() instead.
639 *
640 * Return: The entry at this location in the xarray.
641 */
642static inline void *xas_reload(struct xa_state *xas)
643{
644 struct xa_node *node = xas->xa_node;
645
646 if (node)
647 return xa_entry(xas->xa, node, xas->xa_offset);
648 return xa_head(xas->xa);
649}
650
315#endif /* _LINUX_XARRAY_H */ 651#endif /* _LINUX_XARRAY_H */
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 4966c4fbe7f7..091155e12422 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1813,6 +1813,9 @@ config TEST_BITFIELD
1813config TEST_UUID 1813config TEST_UUID
1814 tristate "Test functions located in the uuid module at runtime" 1814 tristate "Test functions located in the uuid module at runtime"
1815 1815
1816config TEST_XARRAY
1817 tristate "Test the XArray code at runtime"
1818
1816config TEST_OVERFLOW 1819config TEST_OVERFLOW
1817 tristate "Test check_*_overflow() functions at runtime" 1820 tristate "Test check_*_overflow() functions at runtime"
1818 1821
diff --git a/lib/Makefile b/lib/Makefile
index 057385f1f145..e6f809556af5 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -68,6 +68,7 @@ obj-$(CONFIG_TEST_PRINTF) += test_printf.o
68obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o 68obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
69obj-$(CONFIG_TEST_BITFIELD) += test_bitfield.o 69obj-$(CONFIG_TEST_BITFIELD) += test_bitfield.o
70obj-$(CONFIG_TEST_UUID) += test_uuid.o 70obj-$(CONFIG_TEST_UUID) += test_uuid.o
71obj-$(CONFIG_TEST_XARRAY) += test_xarray.o
71obj-$(CONFIG_TEST_PARMAN) += test_parman.o 72obj-$(CONFIG_TEST_PARMAN) += test_parman.o
72obj-$(CONFIG_TEST_KMOD) += test_kmod.o 73obj-$(CONFIG_TEST_KMOD) += test_kmod.o
73obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o 74obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 8a568cea1237..b8e961428484 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -256,49 +256,6 @@ static unsigned long next_index(unsigned long index,
256} 256}
257 257
258#ifndef __KERNEL__ 258#ifndef __KERNEL__
259static void dump_node(struct radix_tree_node *node, unsigned long index)
260{
261 unsigned long i;
262
263 pr_debug("radix node: %p offset %d indices %lu-%lu parent %p tags %lx %lx %lx shift %d count %d nr_values %d\n",
264 node, node->offset, index, index | node_maxindex(node),
265 node->parent,
266 node->tags[0][0], node->tags[1][0], node->tags[2][0],
267 node->shift, node->count, node->nr_values);
268
269 for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
270 unsigned long first = index | (i << node->shift);
271 unsigned long last = first | ((1UL << node->shift) - 1);
272 void *entry = node->slots[i];
273 if (!entry)
274 continue;
275 if (entry == RADIX_TREE_RETRY) {
276 pr_debug("radix retry offset %ld indices %lu-%lu parent %p\n",
277 i, first, last, node);
278 } else if (!radix_tree_is_internal_node(entry)) {
279 pr_debug("radix entry %p offset %ld indices %lu-%lu parent %p\n",
280 entry, i, first, last, node);
281 } else if (xa_is_sibling(entry)) {
282 pr_debug("radix sblng %p offset %ld indices %lu-%lu parent %p val %p\n",
283 entry, i, first, last, node,
284 node->slots[xa_to_sibling(entry)]);
285 } else {
286 dump_node(entry_to_node(entry), first);
287 }
288 }
289}
290
291/* For debug */
292static void radix_tree_dump(struct radix_tree_root *root)
293{
294 pr_debug("radix root: %p xa_head %p tags %x\n",
295 root, root->xa_head,
296 root->xa_flags >> ROOT_TAG_SHIFT);
297 if (!radix_tree_is_internal_node(root->xa_head))
298 return;
299 dump_node(entry_to_node(root->xa_head), 0);
300}
301
302static void dump_ida_node(void *entry, unsigned long index) 259static void dump_ida_node(void *entry, unsigned long index)
303{ 260{
304 unsigned long i; 261 unsigned long i;
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
new file mode 100644
index 000000000000..a7248b87617f
--- /dev/null
+++ b/lib/test_xarray.c
@@ -0,0 +1,87 @@
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * test_xarray.c: Test the XArray API
4 * Copyright (c) 2017-2018 Microsoft Corporation
5 * Author: Matthew Wilcox <willy@infradead.org>
6 */
7
8#include <linux/xarray.h>
9#include <linux/module.h>
10
11static unsigned int tests_run;
12static unsigned int tests_passed;
13
14#ifndef XA_DEBUG
15# ifdef __KERNEL__
16void xa_dump(const struct xarray *xa) { }
17# endif
18#undef XA_BUG_ON
19#define XA_BUG_ON(xa, x) do { \
20 tests_run++; \
21 if (x) { \
22 printk("BUG at %s:%d\n", __func__, __LINE__); \
23 xa_dump(xa); \
24 dump_stack(); \
25 } else { \
26 tests_passed++; \
27 } \
28} while (0)
29#endif
30
31static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp)
32{
33 radix_tree_insert(xa, index, xa_mk_value(index));
34 return NULL;
35}
36
37static void xa_erase_index(struct xarray *xa, unsigned long index)
38{
39 radix_tree_delete(xa, index);
40}
41
42static noinline void check_xa_load(struct xarray *xa)
43{
44 unsigned long i, j;
45
46 for (i = 0; i < 1024; i++) {
47 for (j = 0; j < 1024; j++) {
48 void *entry = xa_load(xa, j);
49 if (j < i)
50 XA_BUG_ON(xa, xa_to_value(entry) != j);
51 else
52 XA_BUG_ON(xa, entry);
53 }
54 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
55 }
56
57 for (i = 0; i < 1024; i++) {
58 for (j = 0; j < 1024; j++) {
59 void *entry = xa_load(xa, j);
60 if (j >= i)
61 XA_BUG_ON(xa, xa_to_value(entry) != j);
62 else
63 XA_BUG_ON(xa, entry);
64 }
65 xa_erase_index(xa, i);
66 }
67 XA_BUG_ON(xa, !xa_empty(xa));
68}
69
70static RADIX_TREE(array, GFP_KERNEL);
71
72static int xarray_checks(void)
73{
74 check_xa_load(&array);
75
76 printk("XArray: %u of %u tests passed\n", tests_passed, tests_run);
77 return (tests_run == tests_passed) ? 0 : -EINVAL;
78}
79
80static void xarray_exit(void)
81{
82}
83
84module_init(xarray_checks);
85module_exit(xarray_exit);
86MODULE_AUTHOR("Matthew Wilcox <willy@infradead.org>");
87MODULE_LICENSE("GPL");
diff --git a/lib/xarray.c b/lib/xarray.c
index 862f4c64c754..19cfcbc69a68 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -24,6 +24,100 @@
24 * @entry refers to something stored in a slot in the xarray 24 * @entry refers to something stored in a slot in the xarray
25 */ 25 */
26 26
27/* extracts the offset within this node from the index */
28static unsigned int get_offset(unsigned long index, struct xa_node *node)
29{
30 return (index >> node->shift) & XA_CHUNK_MASK;
31}
32
33/* move the index either forwards (find) or backwards (sibling slot) */
34static void xas_move_index(struct xa_state *xas, unsigned long offset)
35{
36 unsigned int shift = xas->xa_node->shift;
37 xas->xa_index &= ~XA_CHUNK_MASK << shift;
38 xas->xa_index += offset << shift;
39}
40
41static void *set_bounds(struct xa_state *xas)
42{
43 xas->xa_node = XAS_BOUNDS;
44 return NULL;
45}
46
47/*
48 * Starts a walk. If the @xas is already valid, we assume that it's on
49 * the right path and just return where we've got to. If we're in an
50 * error state, return NULL. If the index is outside the current scope
51 * of the xarray, return NULL without changing @xas->xa_node. Otherwise
52 * set @xas->xa_node to NULL and return the current head of the array.
53 */
54static void *xas_start(struct xa_state *xas)
55{
56 void *entry;
57
58 if (xas_valid(xas))
59 return xas_reload(xas);
60 if (xas_error(xas))
61 return NULL;
62
63 entry = xa_head(xas->xa);
64 if (!xa_is_node(entry)) {
65 if (xas->xa_index)
66 return set_bounds(xas);
67 } else {
68 if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK)
69 return set_bounds(xas);
70 }
71
72 xas->xa_node = NULL;
73 return entry;
74}
75
76static void *xas_descend(struct xa_state *xas, struct xa_node *node)
77{
78 unsigned int offset = get_offset(xas->xa_index, node);
79 void *entry = xa_entry(xas->xa, node, offset);
80
81 xas->xa_node = node;
82 if (xa_is_sibling(entry)) {
83 offset = xa_to_sibling(entry);
84 entry = xa_entry(xas->xa, node, offset);
85 }
86
87 xas->xa_offset = offset;
88 return entry;
89}
90
91/**
92 * xas_load() - Load an entry from the XArray (advanced).
93 * @xas: XArray operation state.
94 *
95 * Usually walks the @xas to the appropriate state to load the entry
96 * stored at xa_index. However, it will do nothing and return %NULL if
97 * @xas is in an error state. xas_load() will never expand the tree.
98 *
99 * If the xa_state is set up to operate on a multi-index entry, xas_load()
100 * may return %NULL or an internal entry, even if there are entries
101 * present within the range specified by @xas.
102 *
103 * Context: Any context. The caller should hold the xa_lock or the RCU lock.
104 * Return: Usually an entry in the XArray, but see description for exceptions.
105 */
106void *xas_load(struct xa_state *xas)
107{
108 void *entry = xas_start(xas);
109
110 while (xa_is_node(entry)) {
111 struct xa_node *node = xa_to_node(entry);
112
113 if (xas->xa_shift > node->shift)
114 break;
115 entry = xas_descend(xas, node);
116 }
117 return entry;
118}
119EXPORT_SYMBOL_GPL(xas_load);
120
27/** 121/**
28 * xa_init_flags() - Initialise an empty XArray with flags. 122 * xa_init_flags() - Initialise an empty XArray with flags.
29 * @xa: XArray. 123 * @xa: XArray.
@@ -42,3 +136,104 @@ void xa_init_flags(struct xarray *xa, gfp_t flags)
42 xa->xa_head = NULL; 136 xa->xa_head = NULL;
43} 137}
44EXPORT_SYMBOL(xa_init_flags); 138EXPORT_SYMBOL(xa_init_flags);
139
140/**
141 * xa_load() - Load an entry from an XArray.
142 * @xa: XArray.
143 * @index: index into array.
144 *
145 * Context: Any context. Takes and releases the RCU lock.
146 * Return: The entry at @index in @xa.
147 */
148void *xa_load(struct xarray *xa, unsigned long index)
149{
150 XA_STATE(xas, xa, index);
151 void *entry;
152
153 rcu_read_lock();
154 do {
155 entry = xas_load(&xas);
156 } while (xas_retry(&xas, entry));
157 rcu_read_unlock();
158
159 return entry;
160}
161EXPORT_SYMBOL(xa_load);
162
163#ifdef XA_DEBUG
164void xa_dump_node(const struct xa_node *node)
165{
166 unsigned i, j;
167
168 if (!node)
169 return;
170 if ((unsigned long)node & 3) {
171 pr_cont("node %px\n", node);
172 return;
173 }
174
175 pr_cont("node %px %s %d parent %px shift %d count %d values %d "
176 "array %px list %px %px marks",
177 node, node->parent ? "offset" : "max", node->offset,
178 node->parent, node->shift, node->count, node->nr_values,
179 node->array, node->private_list.prev, node->private_list.next);
180 for (i = 0; i < XA_MAX_MARKS; i++)
181 for (j = 0; j < XA_MARK_LONGS; j++)
182 pr_cont(" %lx", node->marks[i][j]);
183 pr_cont("\n");
184}
185
186void xa_dump_index(unsigned long index, unsigned int shift)
187{
188 if (!shift)
189 pr_info("%lu: ", index);
190 else if (shift >= BITS_PER_LONG)
191 pr_info("0-%lu: ", ~0UL);
192 else
193 pr_info("%lu-%lu: ", index, index | ((1UL << shift) - 1));
194}
195
196void xa_dump_entry(const void *entry, unsigned long index, unsigned long shift)
197{
198 if (!entry)
199 return;
200
201 xa_dump_index(index, shift);
202
203 if (xa_is_node(entry)) {
204 if (shift == 0) {
205 pr_cont("%px\n", entry);
206 } else {
207 unsigned long i;
208 struct xa_node *node = xa_to_node(entry);
209 xa_dump_node(node);
210 for (i = 0; i < XA_CHUNK_SIZE; i++)
211 xa_dump_entry(node->slots[i],
212 index + (i << node->shift), node->shift);
213 }
214 } else if (xa_is_value(entry))
215 pr_cont("value %ld (0x%lx) [%px]\n", xa_to_value(entry),
216 xa_to_value(entry), entry);
217 else if (!xa_is_internal(entry))
218 pr_cont("%px\n", entry);
219 else if (xa_is_retry(entry))
220 pr_cont("retry (%ld)\n", xa_to_internal(entry));
221 else if (xa_is_sibling(entry))
222 pr_cont("sibling (slot %ld)\n", xa_to_sibling(entry));
223 else
224 pr_cont("UNKNOWN ENTRY (%px)\n", entry);
225}
226
227void xa_dump(const struct xarray *xa)
228{
229 void *entry = xa->xa_head;
230 unsigned int shift = 0;
231
232 pr_info("xarray: %px head %px flags %x marks %d %d %d\n", xa, entry,
233 xa->xa_flags, radix_tree_tagged(xa, 0),
234 radix_tree_tagged(xa, 1), radix_tree_tagged(xa, 2));
235 if (xa_is_node(entry))
236 shift = xa_to_node(entry)->shift + XA_CHUNK_SHIFT;
237 xa_dump_entry(entry, 0, shift);
238}
239#endif
diff --git a/tools/include/linux/kernel.h b/tools/include/linux/kernel.h
index 0ad884452c5c..6935ef94e77a 100644
--- a/tools/include/linux/kernel.h
+++ b/tools/include/linux/kernel.h
@@ -70,6 +70,7 @@
70#define BUG_ON(cond) assert(!(cond)) 70#define BUG_ON(cond) assert(!(cond))
71#endif 71#endif
72#endif 72#endif
73#define BUG() BUG_ON(1)
73 74
74#if __BYTE_ORDER == __BIG_ENDIAN 75#if __BYTE_ORDER == __BIG_ENDIAN
75#define cpu_to_le16 bswap_16 76#define cpu_to_le16 bswap_16
diff --git a/tools/testing/radix-tree/.gitignore b/tools/testing/radix-tree/.gitignore
index d4706c0ffceb..3834899b6693 100644
--- a/tools/testing/radix-tree/.gitignore
+++ b/tools/testing/radix-tree/.gitignore
@@ -4,3 +4,4 @@ idr-test
4main 4main
5multiorder 5multiorder
6radix-tree.c 6radix-tree.c
7xarray
diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile
index c0cf1c79efd5..1379f1d78d0b 100644
--- a/tools/testing/radix-tree/Makefile
+++ b/tools/testing/radix-tree/Makefile
@@ -4,7 +4,7 @@ CFLAGS += -I. -I../../include -g -Og -Wall -D_LGPL_SOURCE -fsanitize=address \
4 -fsanitize=undefined 4 -fsanitize=undefined
5LDFLAGS += -fsanitize=address -fsanitize=undefined 5LDFLAGS += -fsanitize=address -fsanitize=undefined
6LDLIBS+= -lpthread -lurcu 6LDLIBS+= -lpthread -lurcu
7TARGETS = main idr-test multiorder 7TARGETS = main idr-test multiorder xarray
8CORE_OFILES := xarray.o radix-tree.o idr.o linux.o test.o find_bit.o 8CORE_OFILES := xarray.o radix-tree.o idr.o linux.o test.o find_bit.o
9OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \ 9OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \
10 tag_check.o multiorder.o idr-test.o iteration_check.o benchmark.o 10 tag_check.o multiorder.o idr-test.o iteration_check.o benchmark.o
@@ -25,6 +25,8 @@ main: $(OFILES)
25idr-test.o: ../../../lib/test_ida.c 25idr-test.o: ../../../lib/test_ida.c
26idr-test: idr-test.o $(CORE_OFILES) 26idr-test: idr-test.o $(CORE_OFILES)
27 27
28xarray: $(CORE_OFILES)
29
28multiorder: multiorder.o $(CORE_OFILES) 30multiorder: multiorder.o $(CORE_OFILES)
29 31
30clean: 32clean:
@@ -45,7 +47,7 @@ radix-tree.c: ../../../lib/radix-tree.c
45idr.c: ../../../lib/idr.c 47idr.c: ../../../lib/idr.c
46 sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@ 48 sed -e 's/^static //' -e 's/__always_inline //' -e 's/inline //' < $< > $@
47 49
48xarray.o: ../../../lib/xarray.c 50xarray.o: ../../../lib/xarray.c ../../../lib/test_xarray.c
49 51
50generated/map-shift.h: 52generated/map-shift.h:
51 @if ! grep -qws $(SHIFT) generated/map-shift.h; then \ 53 @if ! grep -qws $(SHIFT) generated/map-shift.h; then \
diff --git a/tools/testing/radix-tree/linux/kernel.h b/tools/testing/radix-tree/linux/kernel.h
index 426f32f28547..5d06ac75a14d 100644
--- a/tools/testing/radix-tree/linux/kernel.h
+++ b/tools/testing/radix-tree/linux/kernel.h
@@ -14,6 +14,7 @@
14#include "../../../include/linux/kconfig.h" 14#include "../../../include/linux/kconfig.h"
15 15
16#define printk printf 16#define printk printf
17#define pr_info printk
17#define pr_debug printk 18#define pr_debug printk
18#define pr_cont printk 19#define pr_cont printk
19 20
diff --git a/tools/testing/radix-tree/linux/rcupdate.h b/tools/testing/radix-tree/linux/rcupdate.h
index 73ed33658203..fd280b070fdb 100644
--- a/tools/testing/radix-tree/linux/rcupdate.h
+++ b/tools/testing/radix-tree/linux/rcupdate.h
@@ -6,5 +6,7 @@
6 6
7#define rcu_dereference_raw(p) rcu_dereference(p) 7#define rcu_dereference_raw(p) rcu_dereference(p)
8#define rcu_dereference_protected(p, cond) rcu_dereference(p) 8#define rcu_dereference_protected(p, cond) rcu_dereference(p)
9#define rcu_dereference_check(p, cond) rcu_dereference(p)
10#define RCU_INIT_POINTER(p, v) (p) = (v)
9 11
10#endif 12#endif
diff --git a/tools/testing/radix-tree/main.c b/tools/testing/radix-tree/main.c
index b741686e53d6..09deaf4f0959 100644
--- a/tools/testing/radix-tree/main.c
+++ b/tools/testing/radix-tree/main.c
@@ -365,6 +365,7 @@ int main(int argc, char **argv)
365 rcu_register_thread(); 365 rcu_register_thread();
366 radix_tree_init(); 366 radix_tree_init();
367 367
368 xarray_tests();
368 regression1_test(); 369 regression1_test();
369 regression2_test(); 370 regression2_test();
370 regression3_test(); 371 regression3_test();
diff --git a/tools/testing/radix-tree/test.h b/tools/testing/radix-tree/test.h
index 92d901eacf49..e3dc7a16f09b 100644
--- a/tools/testing/radix-tree/test.h
+++ b/tools/testing/radix-tree/test.h
@@ -34,6 +34,7 @@ int tag_tagged_items(struct radix_tree_root *, pthread_mutex_t *,
34 unsigned iftag, unsigned thentag); 34 unsigned iftag, unsigned thentag);
35unsigned long find_item(struct radix_tree_root *, void *item); 35unsigned long find_item(struct radix_tree_root *, void *item);
36 36
37void xarray_tests(void);
37void tag_check(void); 38void tag_check(void);
38void multiorder_checks(void); 39void multiorder_checks(void);
39void iteration_test(unsigned order, unsigned duration); 40void iteration_test(unsigned order, unsigned duration);
diff --git a/tools/testing/radix-tree/xarray.c b/tools/testing/radix-tree/xarray.c
index 9bbd667172a7..e61e43efe463 100644
--- a/tools/testing/radix-tree/xarray.c
+++ b/tools/testing/radix-tree/xarray.c
@@ -4,4 +4,32 @@
4 * Copyright (c) 2018 Matthew Wilcox <willy@infradead.org> 4 * Copyright (c) 2018 Matthew Wilcox <willy@infradead.org>
5 */ 5 */
6 6
7#define XA_DEBUG
8#include "test.h"
9
10#define module_init(x)
11#define module_exit(x)
12#define MODULE_AUTHOR(x)
13#define MODULE_LICENSE(x)
14#define dump_stack() assert(0)
15
7#include "../../../lib/xarray.c" 16#include "../../../lib/xarray.c"
17#undef XA_DEBUG
18#include "../../../lib/test_xarray.c"
19
20void xarray_tests(void)
21{
22 xarray_checks();
23 xarray_exit();
24}
25
26int __weak main(void)
27{
28 radix_tree_init();
29 xarray_tests();
30 radix_tree_cpu_dead(1);
31 rcu_barrier();
32 if (nr_allocated)
33 printf("nr_allocated = %d\n", nr_allocated);
34 return 0;
35}