aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatthew Wilcox <willy@infradead.org>2017-11-14 08:30:11 -0500
committerMatthew Wilcox <willy@infradead.org>2018-10-21 10:45:58 -0400
commitb803b42823d0d9e8b6deccf01ffc2aba5d0738df (patch)
treea3ea387087a9f64258e0bc9300f92ee63d1313fa
parent41aec91f55985e7f14ee75fe2f6e7bcfff0d0fae (diff)
xarray: Add XArray iterators
The xa_for_each iterator allows the user to efficiently walk a range of the array, executing the loop body once for each entry in that range that matches the filter. This commit also includes xa_find() and xa_find_after() which are helper functions for xa_for_each() but may also be useful in their own right. In the xas family of functions, we have xas_for_each(), xas_find(), xas_next_entry(), xas_for_each_tagged(), xas_find_tagged(), xas_next_tagged() and xas_pause(). Signed-off-by: Matthew Wilcox <willy@infradead.org>
-rw-r--r--include/linux/xarray.h165
-rw-r--r--lib/test_xarray.c183
-rw-r--r--lib/xarray.c292
3 files changed, 640 insertions, 0 deletions
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 7f740f675b96..66b10efa5a50 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -280,6 +280,10 @@ void *xa_cmpxchg(struct xarray *, unsigned long index,
280bool xa_get_mark(struct xarray *, unsigned long index, xa_mark_t); 280bool xa_get_mark(struct xarray *, unsigned long index, xa_mark_t);
281void xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); 281void xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
282void xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); 282void xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
283void *xa_find(struct xarray *xa, unsigned long *index,
284 unsigned long max, xa_mark_t) __attribute__((nonnull(2)));
285void *xa_find_after(struct xarray *xa, unsigned long *index,
286 unsigned long max, xa_mark_t) __attribute__((nonnull(2)));
283 287
284/** 288/**
285 * xa_init() - Initialise an empty XArray. 289 * xa_init() - Initialise an empty XArray.
@@ -364,6 +368,35 @@ static inline int xa_insert(struct xarray *xa, unsigned long index,
364 return -EEXIST; 368 return -EEXIST;
365} 369}
366 370
371/**
372 * xa_for_each() - Iterate over a portion of an XArray.
373 * @xa: XArray.
374 * @entry: Entry retrieved from array.
375 * @index: Index of @entry.
376 * @max: Maximum index to retrieve from array.
377 * @filter: Selection criterion.
378 *
379 * Initialise @index to the lowest index you want to retrieve from the
380 * array. During the iteration, @entry will have the value of the entry
381 * stored in @xa at @index. The iteration will skip all entries in the
382 * array which do not match @filter. You may modify @index during the
383 * iteration if you want to skip or reprocess indices. It is safe to modify
384 * the array during the iteration. At the end of the iteration, @entry will
385 * be set to NULL and @index will have a value less than or equal to max.
386 *
387 * xa_for_each() is O(n.log(n)) while xas_for_each() is O(n). You have
388 * to handle your own locking with xas_for_each(), and if you have to unlock
389 * after each iteration, it will also end up being O(n.log(n)). xa_for_each()
390 * will spin if it hits a retry entry; if you intend to see retry entries,
391 * you should use the xas_for_each() iterator instead. The xas_for_each()
392 * iterator will expand into more inline code than xa_for_each().
393 *
394 * Context: Any context. Takes and releases the RCU lock.
395 */
396#define xa_for_each(xa, entry, index, max, filter) \
397 for (entry = xa_find(xa, &index, max, filter); entry; \
398 entry = xa_find_after(xa, &index, max, filter))
399
367#define xa_trylock(xa) spin_trylock(&(xa)->xa_lock) 400#define xa_trylock(xa) spin_trylock(&(xa)->xa_lock)
368#define xa_lock(xa) spin_lock(&(xa)->xa_lock) 401#define xa_lock(xa) spin_lock(&(xa)->xa_lock)
369#define xa_unlock(xa) spin_unlock(&(xa)->xa_lock) 402#define xa_unlock(xa) spin_unlock(&(xa)->xa_lock)
@@ -835,13 +868,16 @@ static inline bool xas_retry(struct xa_state *xas, const void *entry)
835 868
836void *xas_load(struct xa_state *); 869void *xas_load(struct xa_state *);
837void *xas_store(struct xa_state *, void *entry); 870void *xas_store(struct xa_state *, void *entry);
871void *xas_find(struct xa_state *, unsigned long max);
838 872
839bool xas_get_mark(const struct xa_state *, xa_mark_t); 873bool xas_get_mark(const struct xa_state *, xa_mark_t);
840void xas_set_mark(const struct xa_state *, xa_mark_t); 874void xas_set_mark(const struct xa_state *, xa_mark_t);
841void xas_clear_mark(const struct xa_state *, xa_mark_t); 875void xas_clear_mark(const struct xa_state *, xa_mark_t);
876void *xas_find_marked(struct xa_state *, unsigned long max, xa_mark_t);
842void xas_init_marks(const struct xa_state *); 877void xas_init_marks(const struct xa_state *);
843 878
844bool xas_nomem(struct xa_state *, gfp_t); 879bool xas_nomem(struct xa_state *, gfp_t);
880void xas_pause(struct xa_state *);
845 881
846/** 882/**
847 * xas_reload() - Refetch an entry from the xarray. 883 * xas_reload() - Refetch an entry from the xarray.
@@ -914,4 +950,133 @@ static inline void xas_set_update(struct xa_state *xas, xa_update_node_t update)
914 xas->xa_update = update; 950 xas->xa_update = update;
915} 951}
916 952
953/**
954 * xas_next_entry() - Advance iterator to next present entry.
955 * @xas: XArray operation state.
956 * @max: Highest index to return.
957 *
958 * xas_next_entry() is an inline function to optimise xarray traversal for
959 * speed. It is equivalent to calling xas_find(), and will call xas_find()
960 * for all the hard cases.
961 *
962 * Return: The next present entry after the one currently referred to by @xas.
963 */
964static inline void *xas_next_entry(struct xa_state *xas, unsigned long max)
965{
966 struct xa_node *node = xas->xa_node;
967 void *entry;
968
969 if (unlikely(xas_not_node(node) || node->shift ||
970 xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)))
971 return xas_find(xas, max);
972
973 do {
974 if (unlikely(xas->xa_index >= max))
975 return xas_find(xas, max);
976 if (unlikely(xas->xa_offset == XA_CHUNK_MASK))
977 return xas_find(xas, max);
978 entry = xa_entry(xas->xa, node, xas->xa_offset + 1);
979 if (unlikely(xa_is_internal(entry)))
980 return xas_find(xas, max);
981 xas->xa_offset++;
982 xas->xa_index++;
983 } while (!entry);
984
985 return entry;
986}
987
988/* Private */
989static inline unsigned int xas_find_chunk(struct xa_state *xas, bool advance,
990 xa_mark_t mark)
991{
992 unsigned long *addr = xas->xa_node->marks[(__force unsigned)mark];
993 unsigned int offset = xas->xa_offset;
994
995 if (advance)
996 offset++;
997 if (XA_CHUNK_SIZE == BITS_PER_LONG) {
998 if (offset < XA_CHUNK_SIZE) {
999 unsigned long data = *addr & (~0UL << offset);
1000 if (data)
1001 return __ffs(data);
1002 }
1003 return XA_CHUNK_SIZE;
1004 }
1005
1006 return find_next_bit(addr, XA_CHUNK_SIZE, offset);
1007}
1008
1009/**
1010 * xas_next_marked() - Advance iterator to next marked entry.
1011 * @xas: XArray operation state.
1012 * @max: Highest index to return.
1013 * @mark: Mark to search for.
1014 *
1015 * xas_next_marked() is an inline function to optimise xarray traversal for
1016 * speed. It is equivalent to calling xas_find_marked(), and will call
1017 * xas_find_marked() for all the hard cases.
1018 *
1019 * Return: The next marked entry after the one currently referred to by @xas.
1020 */
1021static inline void *xas_next_marked(struct xa_state *xas, unsigned long max,
1022 xa_mark_t mark)
1023{
1024 struct xa_node *node = xas->xa_node;
1025 unsigned int offset;
1026
1027 if (unlikely(xas_not_node(node) || node->shift))
1028 return xas_find_marked(xas, max, mark);
1029 offset = xas_find_chunk(xas, true, mark);
1030 xas->xa_offset = offset;
1031 xas->xa_index = (xas->xa_index & ~XA_CHUNK_MASK) + offset;
1032 if (xas->xa_index > max)
1033 return NULL;
1034 if (offset == XA_CHUNK_SIZE)
1035 return xas_find_marked(xas, max, mark);
1036 return xa_entry(xas->xa, node, offset);
1037}
1038
1039/*
1040 * If iterating while holding a lock, drop the lock and reschedule
1041 * every %XA_CHECK_SCHED loops.
1042 */
1043enum {
1044 XA_CHECK_SCHED = 4096,
1045};
1046
1047/**
1048 * xas_for_each() - Iterate over a range of an XArray.
1049 * @xas: XArray operation state.
1050 * @entry: Entry retrieved from the array.
1051 * @max: Maximum index to retrieve from array.
1052 *
1053 * The loop body will be executed for each entry present in the xarray
1054 * between the current xas position and @max. @entry will be set to
1055 * the entry retrieved from the xarray. It is safe to delete entries
1056 * from the array in the loop body. You should hold either the RCU lock
1057 * or the xa_lock while iterating. If you need to drop the lock, call
1058 * xas_pause() first.
1059 */
1060#define xas_for_each(xas, entry, max) \
1061 for (entry = xas_find(xas, max); entry; \
1062 entry = xas_next_entry(xas, max))
1063
1064/**
1065 * xas_for_each_marked() - Iterate over a range of an XArray.
1066 * @xas: XArray operation state.
1067 * @entry: Entry retrieved from the array.
1068 * @max: Maximum index to retrieve from array.
1069 * @mark: Mark to search for.
1070 *
1071 * The loop body will be executed for each marked entry in the xarray
1072 * between the current xas position and @max. @entry will be set to
1073 * the entry retrieved from the xarray. It is safe to delete entries
1074 * from the array in the loop body. You should hold either the RCU lock
1075 * or the xa_lock while iterating. If you need to drop the lock, call
1076 * xas_pause() first.
1077 */
1078#define xas_for_each_marked(xas, entry, max, mark) \
1079 for (entry = xas_find_marked(xas, max, mark); entry; \
1080 entry = xas_next_marked(xas, max, mark))
1081
917#endif /* _LINUX_XARRAY_H */ 1082#endif /* _LINUX_XARRAY_H */
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index fb472258b639..e3c2d4d00b15 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -75,6 +75,48 @@ static noinline void check_xa_err(struct xarray *xa)
75// XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL); 75// XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL);
76} 76}
77 77
78static noinline void check_xas_retry(struct xarray *xa)
79{
80 XA_STATE(xas, xa, 0);
81 void *entry;
82
83 xa_store_index(xa, 0, GFP_KERNEL);
84 xa_store_index(xa, 1, GFP_KERNEL);
85
86 rcu_read_lock();
87 XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0));
88 xa_erase_index(xa, 1);
89 XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas)));
90 XA_BUG_ON(xa, xas_retry(&xas, NULL));
91 XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0)));
92 xas_reset(&xas);
93 XA_BUG_ON(xa, xas.xa_node != XAS_RESTART);
94 XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
95 XA_BUG_ON(xa, xas.xa_node != NULL);
96
97 XA_BUG_ON(xa, xa_store_index(xa, 1, GFP_KERNEL) != NULL);
98 XA_BUG_ON(xa, !xa_is_internal(xas_reload(&xas)));
99 xas.xa_node = XAS_RESTART;
100 XA_BUG_ON(xa, xas_next_entry(&xas, ULONG_MAX) != xa_mk_value(0));
101 rcu_read_unlock();
102
103 /* Make sure we can iterate through retry entries */
104 xas_lock(&xas);
105 xas_set(&xas, 0);
106 xas_store(&xas, XA_RETRY_ENTRY);
107 xas_set(&xas, 1);
108 xas_store(&xas, XA_RETRY_ENTRY);
109
110 xas_set(&xas, 0);
111 xas_for_each(&xas, entry, ULONG_MAX) {
112 xas_store(&xas, xa_mk_value(xas.xa_index));
113 }
114 xas_unlock(&xas);
115
116 xa_erase_index(xa, 0);
117 xa_erase_index(xa, 1);
118}
119
78static noinline void check_xa_load(struct xarray *xa) 120static noinline void check_xa_load(struct xarray *xa)
79{ 121{
80 unsigned long i, j; 122 unsigned long i, j;
@@ -217,6 +259,44 @@ static noinline void check_cmpxchg(struct xarray *xa)
217 XA_BUG_ON(xa, !xa_empty(xa)); 259 XA_BUG_ON(xa, !xa_empty(xa));
218} 260}
219 261
262static noinline void check_xas_erase(struct xarray *xa)
263{
264 XA_STATE(xas, xa, 0);
265 void *entry;
266 unsigned long i, j;
267
268 for (i = 0; i < 200; i++) {
269 for (j = i; j < 2 * i + 17; j++) {
270 xas_set(&xas, j);
271 do {
272 xas_lock(&xas);
273 xas_store(&xas, xa_mk_value(j));
274 xas_unlock(&xas);
275 } while (xas_nomem(&xas, GFP_KERNEL));
276 }
277
278 xas_set(&xas, ULONG_MAX);
279 do {
280 xas_lock(&xas);
281 xas_store(&xas, xa_mk_value(0));
282 xas_unlock(&xas);
283 } while (xas_nomem(&xas, GFP_KERNEL));
284
285 xas_lock(&xas);
286 xas_store(&xas, NULL);
287
288 xas_set(&xas, 0);
289 j = i;
290 xas_for_each(&xas, entry, ULONG_MAX) {
291 XA_BUG_ON(xa, entry != xa_mk_value(j));
292 xas_store(&xas, NULL);
293 j++;
294 }
295 xas_unlock(&xas);
296 XA_BUG_ON(xa, !xa_empty(xa));
297 }
298}
299
220static noinline void check_multi_store(struct xarray *xa) 300static noinline void check_multi_store(struct xarray *xa)
221{ 301{
222#ifdef CONFIG_XARRAY_MULTI 302#ifdef CONFIG_XARRAY_MULTI
@@ -285,16 +365,119 @@ static noinline void check_multi_store(struct xarray *xa)
285#endif 365#endif
286} 366}
287 367
368static noinline void check_multi_find(struct xarray *xa)
369{
370#ifdef CONFIG_XARRAY_MULTI
371 unsigned long index;
372
373 xa_store_order(xa, 12, 2, xa_mk_value(12), GFP_KERNEL);
374 XA_BUG_ON(xa, xa_store_index(xa, 16, GFP_KERNEL) != NULL);
375
376 index = 0;
377 XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
378 xa_mk_value(12));
379 XA_BUG_ON(xa, index != 12);
380 index = 13;
381 XA_BUG_ON(xa, xa_find(xa, &index, ULONG_MAX, XA_PRESENT) !=
382 xa_mk_value(12));
383 XA_BUG_ON(xa, (index < 12) || (index >= 16));
384 XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT) !=
385 xa_mk_value(16));
386 XA_BUG_ON(xa, index != 16);
387
388 xa_erase_index(xa, 12);
389 xa_erase_index(xa, 16);
390 XA_BUG_ON(xa, !xa_empty(xa));
391#endif
392}
393
394static noinline void check_multi_find_2(struct xarray *xa)
395{
396 unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 10 : 1;
397 unsigned int i, j;
398 void *entry;
399
400 for (i = 0; i < max_order; i++) {
401 unsigned long index = 1UL << i;
402 for (j = 0; j < index; j++) {
403 XA_STATE(xas, xa, j + index);
404 xa_store_index(xa, index - 1, GFP_KERNEL);
405 xa_store_order(xa, index, i, xa_mk_value(index),
406 GFP_KERNEL);
407 rcu_read_lock();
408 xas_for_each(&xas, entry, ULONG_MAX) {
409 xa_erase_index(xa, index);
410 }
411 rcu_read_unlock();
412 xa_erase_index(xa, index - 1);
413 XA_BUG_ON(xa, !xa_empty(xa));
414 }
415 }
416}
417
418static noinline void check_find(struct xarray *xa)
419{
420 unsigned long i, j, k;
421
422 XA_BUG_ON(xa, !xa_empty(xa));
423
424 /*
425 * Check xa_find with all pairs between 0 and 99 inclusive,
426 * starting at every index between 0 and 99
427 */
428 for (i = 0; i < 100; i++) {
429 XA_BUG_ON(xa, xa_store_index(xa, i, GFP_KERNEL) != NULL);
430 xa_set_mark(xa, i, XA_MARK_0);
431 for (j = 0; j < i; j++) {
432 XA_BUG_ON(xa, xa_store_index(xa, j, GFP_KERNEL) !=
433 NULL);
434 xa_set_mark(xa, j, XA_MARK_0);
435 for (k = 0; k < 100; k++) {
436 unsigned long index = k;
437 void *entry = xa_find(xa, &index, ULONG_MAX,
438 XA_PRESENT);
439 if (k <= j)
440 XA_BUG_ON(xa, index != j);
441 else if (k <= i)
442 XA_BUG_ON(xa, index != i);
443 else
444 XA_BUG_ON(xa, entry != NULL);
445
446 index = k;
447 entry = xa_find(xa, &index, ULONG_MAX,
448 XA_MARK_0);
449 if (k <= j)
450 XA_BUG_ON(xa, index != j);
451 else if (k <= i)
452 XA_BUG_ON(xa, index != i);
453 else
454 XA_BUG_ON(xa, entry != NULL);
455 }
456 xa_erase_index(xa, j);
457 XA_BUG_ON(xa, xa_get_mark(xa, j, XA_MARK_0));
458 XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0));
459 }
460 xa_erase_index(xa, i);
461 XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0));
462 }
463 XA_BUG_ON(xa, !xa_empty(xa));
464 check_multi_find(xa);
465 check_multi_find_2(xa);
466}
467
288static DEFINE_XARRAY(array); 468static DEFINE_XARRAY(array);
289 469
290static int xarray_checks(void) 470static int xarray_checks(void)
291{ 471{
292 check_xa_err(&array); 472 check_xa_err(&array);
473 check_xas_retry(&array);
293 check_xa_load(&array); 474 check_xa_load(&array);
294 check_xa_mark(&array); 475 check_xa_mark(&array);
295 check_xa_shrink(&array); 476 check_xa_shrink(&array);
477 check_xas_erase(&array);
296 check_cmpxchg(&array); 478 check_cmpxchg(&array);
297 check_multi_store(&array); 479 check_multi_store(&array);
480 check_find(&array);
298 481
299 printk("XArray: %u of %u tests passed\n", tests_passed, tests_run); 482 printk("XArray: %u of %u tests passed\n", tests_passed, tests_run);
300 return (tests_run == tests_passed) ? 0 : -EINVAL; 483 return (tests_run == tests_passed) ? 0 : -EINVAL;
diff --git a/lib/xarray.c b/lib/xarray.c
index 2ba5a98ec618..24494f42daa6 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -128,6 +128,11 @@ static unsigned int get_offset(unsigned long index, struct xa_node *node)
128 return (index >> node->shift) & XA_CHUNK_MASK; 128 return (index >> node->shift) & XA_CHUNK_MASK;
129} 129}
130 130
131static void xas_set_offset(struct xa_state *xas)
132{
133 xas->xa_offset = get_offset(xas->xa_index, xas->xa_node);
134}
135
131/* move the index either forwards (find) or backwards (sibling slot) */ 136/* move the index either forwards (find) or backwards (sibling slot) */
132static void xas_move_index(struct xa_state *xas, unsigned long offset) 137static void xas_move_index(struct xa_state *xas, unsigned long offset)
133{ 138{
@@ -136,6 +141,12 @@ static void xas_move_index(struct xa_state *xas, unsigned long offset)
136 xas->xa_index += offset << shift; 141 xas->xa_index += offset << shift;
137} 142}
138 143
144static void xas_advance(struct xa_state *xas)
145{
146 xas->xa_offset++;
147 xas_move_index(xas, xas->xa_offset);
148}
149
139static void *set_bounds(struct xa_state *xas) 150static void *set_bounds(struct xa_state *xas)
140{ 151{
141 xas->xa_node = XAS_BOUNDS; 152 xas->xa_node = XAS_BOUNDS;
@@ -830,6 +841,202 @@ void xas_init_marks(const struct xa_state *xas)
830EXPORT_SYMBOL_GPL(xas_init_marks); 841EXPORT_SYMBOL_GPL(xas_init_marks);
831 842
832/** 843/**
844 * xas_pause() - Pause a walk to drop a lock.
845 * @xas: XArray operation state.
846 *
847 * Some users need to pause a walk and drop the lock they're holding in
848 * order to yield to a higher priority thread or carry out an operation
849 * on an entry. Those users should call this function before they drop
850 * the lock. It resets the @xas to be suitable for the next iteration
851 * of the loop after the user has reacquired the lock. If most entries
852 * found during a walk require you to call xas_pause(), the xa_for_each()
853 * iterator may be more appropriate.
854 *
855 * Note that xas_pause() only works for forward iteration. If a user needs
856 * to pause a reverse iteration, we will need a xas_pause_rev().
857 */
858void xas_pause(struct xa_state *xas)
859{
860 struct xa_node *node = xas->xa_node;
861
862 if (xas_invalid(xas))
863 return;
864
865 if (node) {
866 unsigned int offset = xas->xa_offset;
867 while (++offset < XA_CHUNK_SIZE) {
868 if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
869 break;
870 }
871 xas->xa_index += (offset - xas->xa_offset) << node->shift;
872 } else {
873 xas->xa_index++;
874 }
875 xas->xa_node = XAS_RESTART;
876}
877EXPORT_SYMBOL_GPL(xas_pause);
878
879/**
880 * xas_find() - Find the next present entry in the XArray.
881 * @xas: XArray operation state.
882 * @max: Highest index to return.
883 *
884 * If the @xas has not yet been walked to an entry, return the entry
885 * which has an index >= xas.xa_index. If it has been walked, the entry
886 * currently being pointed at has been processed, and so we move to the
887 * next entry.
888 *
889 * If no entry is found and the array is smaller than @max, the iterator
890 * is set to the smallest index not yet in the array. This allows @xas
891 * to be immediately passed to xas_store().
892 *
893 * Return: The entry, if found, otherwise %NULL.
894 */
895void *xas_find(struct xa_state *xas, unsigned long max)
896{
897 void *entry;
898
899 if (xas_error(xas))
900 return NULL;
901
902 if (!xas->xa_node) {
903 xas->xa_index = 1;
904 return set_bounds(xas);
905 } else if (xas_top(xas->xa_node)) {
906 entry = xas_load(xas);
907 if (entry || xas_not_node(xas->xa_node))
908 return entry;
909 } else if (!xas->xa_node->shift &&
910 xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)) {
911 xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1;
912 }
913
914 xas_advance(xas);
915
916 while (xas->xa_node && (xas->xa_index <= max)) {
917 if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
918 xas->xa_offset = xas->xa_node->offset + 1;
919 xas->xa_node = xa_parent(xas->xa, xas->xa_node);
920 continue;
921 }
922
923 entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
924 if (xa_is_node(entry)) {
925 xas->xa_node = xa_to_node(entry);
926 xas->xa_offset = 0;
927 continue;
928 }
929 if (entry && !xa_is_sibling(entry))
930 return entry;
931
932 xas_advance(xas);
933 }
934
935 if (!xas->xa_node)
936 xas->xa_node = XAS_BOUNDS;
937 return NULL;
938}
939EXPORT_SYMBOL_GPL(xas_find);
940
941/**
942 * xas_find_marked() - Find the next marked entry in the XArray.
943 * @xas: XArray operation state.
944 * @max: Highest index to return.
945 * @mark: Mark number to search for.
946 *
947 * If the @xas has not yet been walked to an entry, return the marked entry
948 * which has an index >= xas.xa_index. If it has been walked, the entry
949 * currently being pointed at has been processed, and so we return the
950 * first marked entry with an index > xas.xa_index.
951 *
952 * If no marked entry is found and the array is smaller than @max, @xas is
953 * set to the bounds state and xas->xa_index is set to the smallest index
954 * not yet in the array. This allows @xas to be immediately passed to
955 * xas_store().
956 *
957 * If no entry is found before @max is reached, @xas is set to the restart
958 * state.
959 *
960 * Return: The entry, if found, otherwise %NULL.
961 */
962void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
963{
964 bool advance = true;
965 unsigned int offset;
966 void *entry;
967
968 if (xas_error(xas))
969 return NULL;
970
971 if (!xas->xa_node) {
972 xas->xa_index = 1;
973 goto out;
974 } else if (xas_top(xas->xa_node)) {
975 advance = false;
976 entry = xa_head(xas->xa);
977 xas->xa_node = NULL;
978 if (xas->xa_index > max_index(entry))
979 goto bounds;
980 if (!xa_is_node(entry)) {
981 if (xa_marked(xas->xa, mark))
982 return entry;
983 xas->xa_index = 1;
984 goto out;
985 }
986 xas->xa_node = xa_to_node(entry);
987 xas->xa_offset = xas->xa_index >> xas->xa_node->shift;
988 }
989
990 while (xas->xa_index <= max) {
991 if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
992 xas->xa_offset = xas->xa_node->offset + 1;
993 xas->xa_node = xa_parent(xas->xa, xas->xa_node);
994 if (!xas->xa_node)
995 break;
996 advance = false;
997 continue;
998 }
999
1000 if (!advance) {
1001 entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1002 if (xa_is_sibling(entry)) {
1003 xas->xa_offset = xa_to_sibling(entry);
1004 xas_move_index(xas, xas->xa_offset);
1005 }
1006 }
1007
1008 offset = xas_find_chunk(xas, advance, mark);
1009 if (offset > xas->xa_offset) {
1010 advance = false;
1011 xas_move_index(xas, offset);
1012 /* Mind the wrap */
1013 if ((xas->xa_index - 1) >= max)
1014 goto max;
1015 xas->xa_offset = offset;
1016 if (offset == XA_CHUNK_SIZE)
1017 continue;
1018 }
1019
1020 entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
1021 if (!xa_is_node(entry))
1022 return entry;
1023 xas->xa_node = xa_to_node(entry);
1024 xas_set_offset(xas);
1025 }
1026
1027out:
1028 if (!max)
1029 goto max;
1030bounds:
1031 xas->xa_node = XAS_BOUNDS;
1032 return NULL;
1033max:
1034 xas->xa_node = XAS_RESTART;
1035 return NULL;
1036}
1037EXPORT_SYMBOL_GPL(xas_find_marked);
1038
1039/**
833 * xa_init_flags() - Initialise an empty XArray with flags. 1040 * xa_init_flags() - Initialise an empty XArray with flags.
834 * @xa: XArray. 1041 * @xa: XArray.
835 * @flags: XA_FLAG values. 1042 * @flags: XA_FLAG values.
@@ -1152,6 +1359,91 @@ void xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
1152} 1359}
1153EXPORT_SYMBOL(xa_clear_mark); 1360EXPORT_SYMBOL(xa_clear_mark);
1154 1361
1362/**
1363 * xa_find() - Search the XArray for an entry.
1364 * @xa: XArray.
1365 * @indexp: Pointer to an index.
1366 * @max: Maximum index to search to.
1367 * @filter: Selection criterion.
1368 *
1369 * Finds the entry in @xa which matches the @filter, and has the lowest
1370 * index that is at least @indexp and no more than @max.
1371 * If an entry is found, @indexp is updated to be the index of the entry.
1372 * This function is protected by the RCU read lock, so it may not find
1373 * entries which are being simultaneously added. It will not return an
1374 * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
1375 *
1376 * Context: Any context. Takes and releases the RCU lock.
1377 * Return: The entry, if found, otherwise %NULL.
1378 */
1379void *xa_find(struct xarray *xa, unsigned long *indexp,
1380 unsigned long max, xa_mark_t filter)
1381{
1382 XA_STATE(xas, xa, *indexp);
1383 void *entry;
1384
1385 rcu_read_lock();
1386 do {
1387 if ((__force unsigned int)filter < XA_MAX_MARKS)
1388 entry = xas_find_marked(&xas, max, filter);
1389 else
1390 entry = xas_find(&xas, max);
1391 } while (xas_retry(&xas, entry));
1392 rcu_read_unlock();
1393
1394 if (entry)
1395 *indexp = xas.xa_index;
1396 return entry;
1397}
1398EXPORT_SYMBOL(xa_find);
1399
1400/**
1401 * xa_find_after() - Search the XArray for a present entry.
1402 * @xa: XArray.
1403 * @indexp: Pointer to an index.
1404 * @max: Maximum index to search to.
1405 * @filter: Selection criterion.
1406 *
1407 * Finds the entry in @xa which matches the @filter and has the lowest
1408 * index that is above @indexp and no more than @max.
1409 * If an entry is found, @indexp is updated to be the index of the entry.
1410 * This function is protected by the RCU read lock, so it may miss entries
1411 * which are being simultaneously added. It will not return an
1412 * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
1413 *
1414 * Context: Any context. Takes and releases the RCU lock.
1415 * Return: The pointer, if found, otherwise %NULL.
1416 */
1417void *xa_find_after(struct xarray *xa, unsigned long *indexp,
1418 unsigned long max, xa_mark_t filter)
1419{
1420 XA_STATE(xas, xa, *indexp + 1);
1421 void *entry;
1422
1423 rcu_read_lock();
1424 for (;;) {
1425 if ((__force unsigned int)filter < XA_MAX_MARKS)
1426 entry = xas_find_marked(&xas, max, filter);
1427 else
1428 entry = xas_find(&xas, max);
1429 if (xas.xa_shift) {
1430 if (xas.xa_index & ((1UL << xas.xa_shift) - 1))
1431 continue;
1432 } else {
1433 if (xas.xa_offset < (xas.xa_index & XA_CHUNK_MASK))
1434 continue;
1435 }
1436 if (!xas_retry(&xas, entry))
1437 break;
1438 }
1439 rcu_read_unlock();
1440
1441 if (entry)
1442 *indexp = xas.xa_index;
1443 return entry;
1444}
1445EXPORT_SYMBOL(xa_find_after);
1446
1155#ifdef XA_DEBUG 1447#ifdef XA_DEBUG
1156void xa_dump_node(const struct xa_node *node) 1448void xa_dump_node(const struct xa_node *node)
1157{ 1449{