aboutsummaryrefslogtreecommitdiffstats
path: root/lib/xarray.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/xarray.c')
-rw-r--r--lib/xarray.c693
1 files changed, 693 insertions, 0 deletions
diff --git a/lib/xarray.c b/lib/xarray.c
index aa86c47e532f..4596a95ed9cd 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -7,6 +7,8 @@
7 7
8#include <linux/bitmap.h> 8#include <linux/bitmap.h>
9#include <linux/export.h> 9#include <linux/export.h>
10#include <linux/list.h>
11#include <linux/slab.h>
10#include <linux/xarray.h> 12#include <linux/xarray.h>
11 13
12/* 14/*
@@ -25,6 +27,31 @@
25 * @entry refers to something stored in a slot in the xarray 27 * @entry refers to something stored in a slot in the xarray
26 */ 28 */
27 29
30static inline unsigned int xa_lock_type(const struct xarray *xa)
31{
32 return (__force unsigned int)xa->xa_flags & 3;
33}
34
35static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type)
36{
37 if (lock_type == XA_LOCK_IRQ)
38 xas_lock_irq(xas);
39 else if (lock_type == XA_LOCK_BH)
40 xas_lock_bh(xas);
41 else
42 xas_lock(xas);
43}
44
45static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type)
46{
47 if (lock_type == XA_LOCK_IRQ)
48 xas_unlock_irq(xas);
49 else if (lock_type == XA_LOCK_BH)
50 xas_unlock_bh(xas);
51 else
52 xas_unlock(xas);
53}
54
28static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark) 55static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark)
29{ 56{
30 if (!(xa->xa_flags & XA_FLAGS_MARK(mark))) 57 if (!(xa->xa_flags & XA_FLAGS_MARK(mark)))
@@ -67,6 +94,34 @@ static inline bool node_any_mark(struct xa_node *node, xa_mark_t mark)
67 return !bitmap_empty(node_marks(node, mark), XA_CHUNK_SIZE); 94 return !bitmap_empty(node_marks(node, mark), XA_CHUNK_SIZE);
68} 95}
69 96
97#define mark_inc(mark) do { \
98 mark = (__force xa_mark_t)((__force unsigned)(mark) + 1); \
99} while (0)
100
101/*
102 * xas_squash_marks() - Merge all marks to the first entry
103 * @xas: Array operation state.
104 *
105 * Set a mark on the first entry if any entry has it set. Clear marks on
106 * all sibling entries.
107 */
108static void xas_squash_marks(const struct xa_state *xas)
109{
110 unsigned int mark = 0;
111 unsigned int limit = xas->xa_offset + xas->xa_sibs + 1;
112
113 if (!xas->xa_sibs)
114 return;
115
116 do {
117 unsigned long *marks = xas->xa_node->marks[mark];
118 if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit)
119 continue;
120 __set_bit(xas->xa_offset, marks);
121 bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs);
122 } while (mark++ != (__force unsigned)XA_MARK_MAX);
123}
124
70/* extracts the offset within this node from the index */ 125/* extracts the offset within this node from the index */
71static unsigned int get_offset(unsigned long index, struct xa_node *node) 126static unsigned int get_offset(unsigned long index, struct xa_node *node)
72{ 127{
@@ -161,6 +216,516 @@ void *xas_load(struct xa_state *xas)
161} 216}
162EXPORT_SYMBOL_GPL(xas_load); 217EXPORT_SYMBOL_GPL(xas_load);
163 218
219/* Move the radix tree node cache here */
220extern struct kmem_cache *radix_tree_node_cachep;
221extern void radix_tree_node_rcu_free(struct rcu_head *head);
222
223#define XA_RCU_FREE ((struct xarray *)1)
224
225static void xa_node_free(struct xa_node *node)
226{
227 XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
228 node->array = XA_RCU_FREE;
229 call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
230}
231
232/*
233 * xas_destroy() - Free any resources allocated during the XArray operation.
234 * @xas: XArray operation state.
235 *
236 * This function is now internal-only.
237 */
238static void xas_destroy(struct xa_state *xas)
239{
240 struct xa_node *node = xas->xa_alloc;
241
242 if (!node)
243 return;
244 XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
245 kmem_cache_free(radix_tree_node_cachep, node);
246 xas->xa_alloc = NULL;
247}
248
249/**
250 * xas_nomem() - Allocate memory if needed.
251 * @xas: XArray operation state.
252 * @gfp: Memory allocation flags.
253 *
254 * If we need to add new nodes to the XArray, we try to allocate memory
255 * with GFP_NOWAIT while holding the lock, which will usually succeed.
256 * If it fails, @xas is flagged as needing memory to continue. The caller
257 * should drop the lock and call xas_nomem(). If xas_nomem() succeeds,
258 * the caller should retry the operation.
259 *
260 * Forward progress is guaranteed as one node is allocated here and
261 * stored in the xa_state where it will be found by xas_alloc(). More
262 * nodes will likely be found in the slab allocator, but we do not tie
263 * them up here.
264 *
265 * Return: true if memory was needed, and was successfully allocated.
266 */
267bool xas_nomem(struct xa_state *xas, gfp_t gfp)
268{
269 if (xas->xa_node != XA_ERROR(-ENOMEM)) {
270 xas_destroy(xas);
271 return false;
272 }
273 xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
274 if (!xas->xa_alloc)
275 return false;
276 XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
277 xas->xa_node = XAS_RESTART;
278 return true;
279}
280EXPORT_SYMBOL_GPL(xas_nomem);
281
282/*
283 * __xas_nomem() - Drop locks and allocate memory if needed.
284 * @xas: XArray operation state.
285 * @gfp: Memory allocation flags.
286 *
287 * Internal variant of xas_nomem().
288 *
289 * Return: true if memory was needed, and was successfully allocated.
290 */
291static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
292 __must_hold(xas->xa->xa_lock)
293{
294 unsigned int lock_type = xa_lock_type(xas->xa);
295
296 if (xas->xa_node != XA_ERROR(-ENOMEM)) {
297 xas_destroy(xas);
298 return false;
299 }
300 if (gfpflags_allow_blocking(gfp)) {
301 xas_unlock_type(xas, lock_type);
302 xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
303 xas_lock_type(xas, lock_type);
304 } else {
305 xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
306 }
307 if (!xas->xa_alloc)
308 return false;
309 XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
310 xas->xa_node = XAS_RESTART;
311 return true;
312}
313
314static void xas_update(struct xa_state *xas, struct xa_node *node)
315{
316 if (xas->xa_update)
317 xas->xa_update(node);
318 else
319 XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
320}
321
322static void *xas_alloc(struct xa_state *xas, unsigned int shift)
323{
324 struct xa_node *parent = xas->xa_node;
325 struct xa_node *node = xas->xa_alloc;
326
327 if (xas_invalid(xas))
328 return NULL;
329
330 if (node) {
331 xas->xa_alloc = NULL;
332 } else {
333 node = kmem_cache_alloc(radix_tree_node_cachep,
334 GFP_NOWAIT | __GFP_NOWARN);
335 if (!node) {
336 xas_set_err(xas, -ENOMEM);
337 return NULL;
338 }
339 }
340
341 if (parent) {
342 node->offset = xas->xa_offset;
343 parent->count++;
344 XA_NODE_BUG_ON(node, parent->count > XA_CHUNK_SIZE);
345 xas_update(xas, parent);
346 }
347 XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
348 XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
349 node->shift = shift;
350 node->count = 0;
351 node->nr_values = 0;
352 RCU_INIT_POINTER(node->parent, xas->xa_node);
353 node->array = xas->xa;
354
355 return node;
356}
357
358/*
359 * Use this to calculate the maximum index that will need to be created
360 * in order to add the entry described by @xas. Because we cannot store a
361 * multiple-index entry at index 0, the calculation is a little more complex
362 * than you might expect.
363 */
364static unsigned long xas_max(struct xa_state *xas)
365{
366 unsigned long max = xas->xa_index;
367
368#ifdef CONFIG_XARRAY_MULTI
369 if (xas->xa_shift || xas->xa_sibs) {
370 unsigned long mask;
371 mask = (((xas->xa_sibs + 1UL) << xas->xa_shift) - 1);
372 max |= mask;
373 if (mask == max)
374 max++;
375 }
376#endif
377
378 return max;
379}
380
381/* The maximum index that can be contained in the array without expanding it */
382static unsigned long max_index(void *entry)
383{
384 if (!xa_is_node(entry))
385 return 0;
386 return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1;
387}
388
389static void xas_shrink(struct xa_state *xas)
390{
391 struct xarray *xa = xas->xa;
392 struct xa_node *node = xas->xa_node;
393
394 for (;;) {
395 void *entry;
396
397 XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
398 if (node->count != 1)
399 break;
400 entry = xa_entry_locked(xa, node, 0);
401 if (!entry)
402 break;
403 if (!xa_is_node(entry) && node->shift)
404 break;
405 xas->xa_node = XAS_BOUNDS;
406
407 RCU_INIT_POINTER(xa->xa_head, entry);
408
409 node->count = 0;
410 node->nr_values = 0;
411 if (!xa_is_node(entry))
412 RCU_INIT_POINTER(node->slots[0], XA_RETRY_ENTRY);
413 xas_update(xas, node);
414 xa_node_free(node);
415 if (!xa_is_node(entry))
416 break;
417 node = xa_to_node(entry);
418 node->parent = NULL;
419 }
420}
421
422/*
423 * xas_delete_node() - Attempt to delete an xa_node
424 * @xas: Array operation state.
425 *
426 * Attempts to delete the @xas->xa_node. This will fail if xa->node has
427 * a non-zero reference count.
428 */
429static void xas_delete_node(struct xa_state *xas)
430{
431 struct xa_node *node = xas->xa_node;
432
433 for (;;) {
434 struct xa_node *parent;
435
436 XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
437 if (node->count)
438 break;
439
440 parent = xa_parent_locked(xas->xa, node);
441 xas->xa_node = parent;
442 xas->xa_offset = node->offset;
443 xa_node_free(node);
444
445 if (!parent) {
446 xas->xa->xa_head = NULL;
447 xas->xa_node = XAS_BOUNDS;
448 return;
449 }
450
451 parent->slots[xas->xa_offset] = NULL;
452 parent->count--;
453 XA_NODE_BUG_ON(parent, parent->count > XA_CHUNK_SIZE);
454 node = parent;
455 xas_update(xas, node);
456 }
457
458 if (!node->parent)
459 xas_shrink(xas);
460}
461
462/**
463 * xas_free_nodes() - Free this node and all nodes that it references
464 * @xas: Array operation state.
465 * @top: Node to free
466 *
467 * This node has been removed from the tree. We must now free it and all
468 * of its subnodes. There may be RCU walkers with references into the tree,
469 * so we must replace all entries with retry markers.
470 */
471static void xas_free_nodes(struct xa_state *xas, struct xa_node *top)
472{
473 unsigned int offset = 0;
474 struct xa_node *node = top;
475
476 for (;;) {
477 void *entry = xa_entry_locked(xas->xa, node, offset);
478
479 if (xa_is_node(entry)) {
480 node = xa_to_node(entry);
481 offset = 0;
482 continue;
483 }
484 if (entry)
485 RCU_INIT_POINTER(node->slots[offset], XA_RETRY_ENTRY);
486 offset++;
487 while (offset == XA_CHUNK_SIZE) {
488 struct xa_node *parent;
489
490 parent = xa_parent_locked(xas->xa, node);
491 offset = node->offset + 1;
492 node->count = 0;
493 node->nr_values = 0;
494 xas_update(xas, node);
495 xa_node_free(node);
496 if (node == top)
497 return;
498 node = parent;
499 }
500 }
501}
502
503/*
504 * xas_expand adds nodes to the head of the tree until it has reached
505 * sufficient height to be able to contain @xas->xa_index
506 */
507static int xas_expand(struct xa_state *xas, void *head)
508{
509 struct xarray *xa = xas->xa;
510 struct xa_node *node = NULL;
511 unsigned int shift = 0;
512 unsigned long max = xas_max(xas);
513
514 if (!head) {
515 if (max == 0)
516 return 0;
517 while ((max >> shift) >= XA_CHUNK_SIZE)
518 shift += XA_CHUNK_SHIFT;
519 return shift + XA_CHUNK_SHIFT;
520 } else if (xa_is_node(head)) {
521 node = xa_to_node(head);
522 shift = node->shift + XA_CHUNK_SHIFT;
523 }
524 xas->xa_node = NULL;
525
526 while (max > max_index(head)) {
527 xa_mark_t mark = 0;
528
529 XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
530 node = xas_alloc(xas, shift);
531 if (!node)
532 return -ENOMEM;
533
534 node->count = 1;
535 if (xa_is_value(head))
536 node->nr_values = 1;
537 RCU_INIT_POINTER(node->slots[0], head);
538
539 /* Propagate the aggregated mark info to the new child */
540 for (;;) {
541 if (xa_marked(xa, mark))
542 node_set_mark(node, 0, mark);
543 if (mark == XA_MARK_MAX)
544 break;
545 mark_inc(mark);
546 }
547
548 /*
549 * Now that the new node is fully initialised, we can add
550 * it to the tree
551 */
552 if (xa_is_node(head)) {
553 xa_to_node(head)->offset = 0;
554 rcu_assign_pointer(xa_to_node(head)->parent, node);
555 }
556 head = xa_mk_node(node);
557 rcu_assign_pointer(xa->xa_head, head);
558 xas_update(xas, node);
559
560 shift += XA_CHUNK_SHIFT;
561 }
562
563 xas->xa_node = node;
564 return shift;
565}
566
567/*
568 * xas_create() - Create a slot to store an entry in.
569 * @xas: XArray operation state.
570 *
571 * Most users will not need to call this function directly, as it is called
572 * by xas_store(). It is useful for doing conditional store operations
573 * (see the xa_cmpxchg() implementation for an example).
574 *
575 * Return: If the slot already existed, returns the contents of this slot.
576 * If the slot was newly created, returns NULL. If it failed to create the
577 * slot, returns NULL and indicates the error in @xas.
578 */
579static void *xas_create(struct xa_state *xas)
580{
581 struct xarray *xa = xas->xa;
582 void *entry;
583 void __rcu **slot;
584 struct xa_node *node = xas->xa_node;
585 int shift;
586 unsigned int order = xas->xa_shift;
587
588 if (xas_top(node)) {
589 entry = xa_head_locked(xa);
590 xas->xa_node = NULL;
591 shift = xas_expand(xas, entry);
592 if (shift < 0)
593 return NULL;
594 entry = xa_head_locked(xa);
595 slot = &xa->xa_head;
596 } else if (xas_error(xas)) {
597 return NULL;
598 } else if (node) {
599 unsigned int offset = xas->xa_offset;
600
601 shift = node->shift;
602 entry = xa_entry_locked(xa, node, offset);
603 slot = &node->slots[offset];
604 } else {
605 shift = 0;
606 entry = xa_head_locked(xa);
607 slot = &xa->xa_head;
608 }
609
610 while (shift > order) {
611 shift -= XA_CHUNK_SHIFT;
612 if (!entry) {
613 node = xas_alloc(xas, shift);
614 if (!node)
615 break;
616 rcu_assign_pointer(*slot, xa_mk_node(node));
617 } else if (xa_is_node(entry)) {
618 node = xa_to_node(entry);
619 } else {
620 break;
621 }
622 entry = xas_descend(xas, node);
623 slot = &node->slots[xas->xa_offset];
624 }
625
626 return entry;
627}
628
629static void update_node(struct xa_state *xas, struct xa_node *node,
630 int count, int values)
631{
632 if (!node || (!count && !values))
633 return;
634
635 node->count += count;
636 node->nr_values += values;
637 XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
638 XA_NODE_BUG_ON(node, node->nr_values > XA_CHUNK_SIZE);
639 xas_update(xas, node);
640 if (count < 0)
641 xas_delete_node(xas);
642}
643
644/**
645 * xas_store() - Store this entry in the XArray.
646 * @xas: XArray operation state.
647 * @entry: New entry.
648 *
649 * If @xas is operating on a multi-index entry, the entry returned by this
650 * function is essentially meaningless (it may be an internal entry or it
651 * may be %NULL, even if there are non-NULL entries at some of the indices
652 * covered by the range). This is not a problem for any current users,
653 * and can be changed if needed.
654 *
655 * Return: The old entry at this index.
656 */
657void *xas_store(struct xa_state *xas, void *entry)
658{
659 struct xa_node *node;
660 void __rcu **slot = &xas->xa->xa_head;
661 unsigned int offset, max;
662 int count = 0;
663 int values = 0;
664 void *first, *next;
665 bool value = xa_is_value(entry);
666
667 if (entry)
668 first = xas_create(xas);
669 else
670 first = xas_load(xas);
671
672 if (xas_invalid(xas))
673 return first;
674 node = xas->xa_node;
675 if (node && (xas->xa_shift < node->shift))
676 xas->xa_sibs = 0;
677 if ((first == entry) && !xas->xa_sibs)
678 return first;
679
680 next = first;
681 offset = xas->xa_offset;
682 max = xas->xa_offset + xas->xa_sibs;
683 if (node) {
684 slot = &node->slots[offset];
685 if (xas->xa_sibs)
686 xas_squash_marks(xas);
687 }
688 if (!entry)
689 xas_init_marks(xas);
690
691 for (;;) {
692 /*
693 * Must clear the marks before setting the entry to NULL,
694 * otherwise xas_for_each_marked may find a NULL entry and
695 * stop early. rcu_assign_pointer contains a release barrier
696 * so the mark clearing will appear to happen before the
697 * entry is set to NULL.
698 */
699 rcu_assign_pointer(*slot, entry);
700 if (xa_is_node(next))
701 xas_free_nodes(xas, xa_to_node(next));
702 if (!node)
703 break;
704 count += !next - !entry;
705 values += !xa_is_value(first) - !value;
706 if (entry) {
707 if (offset == max)
708 break;
709 if (!xa_is_sibling(entry))
710 entry = xa_mk_sibling(xas->xa_offset);
711 } else {
712 if (offset == XA_CHUNK_MASK)
713 break;
714 }
715 next = xa_entry_locked(xas->xa, node, ++offset);
716 if (!xa_is_sibling(next)) {
717 if (!entry && (offset > max))
718 break;
719 first = next;
720 }
721 slot++;
722 }
723
724 update_node(xas, node, count, values);
725 return first;
726}
727EXPORT_SYMBOL_GPL(xas_store);
728
164/** 729/**
165 * xas_get_mark() - Returns the state of this mark. 730 * xas_get_mark() - Returns the state of this mark.
166 * @xas: XArray operation state. 731 * @xas: XArray operation state.
@@ -241,6 +806,30 @@ void xas_clear_mark(const struct xa_state *xas, xa_mark_t mark)
241EXPORT_SYMBOL_GPL(xas_clear_mark); 806EXPORT_SYMBOL_GPL(xas_clear_mark);
242 807
243/** 808/**
809 * xas_init_marks() - Initialise all marks for the entry
810 * @xas: Array operations state.
811 *
812 * Initialise all marks for the entry specified by @xas. If we're tracking
813 * free entries with a mark, we need to set it on all entries. All other
814 * marks are cleared.
815 *
816 * This implementation is not as efficient as it could be; we may walk
817 * up the tree multiple times.
818 */
819void xas_init_marks(const struct xa_state *xas)
820{
821 xa_mark_t mark = 0;
822
823 for (;;) {
824 xas_clear_mark(xas, mark);
825 if (mark == XA_MARK_MAX)
826 break;
827 mark_inc(mark);
828 }
829}
830EXPORT_SYMBOL_GPL(xas_init_marks);
831
832/**
244 * xa_init_flags() - Initialise an empty XArray with flags. 833 * xa_init_flags() - Initialise an empty XArray with flags.
245 * @xa: XArray. 834 * @xa: XArray.
246 * @flags: XA_FLAG values. 835 * @flags: XA_FLAG values.
@@ -253,9 +842,19 @@ EXPORT_SYMBOL_GPL(xas_clear_mark);
253 */ 842 */
254void xa_init_flags(struct xarray *xa, gfp_t flags) 843void xa_init_flags(struct xarray *xa, gfp_t flags)
255{ 844{
845 unsigned int lock_type;
846 static struct lock_class_key xa_lock_irq;
847 static struct lock_class_key xa_lock_bh;
848
256 spin_lock_init(&xa->xa_lock); 849 spin_lock_init(&xa->xa_lock);
257 xa->xa_flags = flags; 850 xa->xa_flags = flags;
258 xa->xa_head = NULL; 851 xa->xa_head = NULL;
852
853 lock_type = xa_lock_type(xa);
854 if (lock_type == XA_LOCK_IRQ)
855 lockdep_set_class(&xa->xa_lock, &xa_lock_irq);
856 else if (lock_type == XA_LOCK_BH)
857 lockdep_set_class(&xa->xa_lock, &xa_lock_bh);
259} 858}
260EXPORT_SYMBOL(xa_init_flags); 859EXPORT_SYMBOL(xa_init_flags);
261 860
@@ -282,6 +881,100 @@ void *xa_load(struct xarray *xa, unsigned long index)
282} 881}
283EXPORT_SYMBOL(xa_load); 882EXPORT_SYMBOL(xa_load);
284 883
884static void *xas_result(struct xa_state *xas, void *curr)
885{
886 XA_NODE_BUG_ON(xas->xa_node, xa_is_internal(curr));
887 if (xas_error(xas))
888 curr = xas->xa_node;
889 return curr;
890}
891
892/**
893 * __xa_erase() - Erase this entry from the XArray while locked.
894 * @xa: XArray.
895 * @index: Index into array.
896 *
897 * If the entry at this index is a multi-index entry then all indices will
898 * be erased, and the entry will no longer be a multi-index entry.
899 * This function expects the xa_lock to be held on entry.
900 *
901 * Context: Any context. Expects xa_lock to be held on entry. May
902 * release and reacquire xa_lock if @gfp flags permit.
903 * Return: The old entry at this index.
904 */
905void *__xa_erase(struct xarray *xa, unsigned long index)
906{
907 XA_STATE(xas, xa, index);
908 return xas_result(&xas, xas_store(&xas, NULL));
909}
910EXPORT_SYMBOL_GPL(__xa_erase);
911
912/**
913 * xa_store() - Store this entry in the XArray.
914 * @xa: XArray.
915 * @index: Index into array.
916 * @entry: New entry.
917 * @gfp: Memory allocation flags.
918 *
919 * After this function returns, loads from this index will return @entry.
920 * Storing into an existing multislot entry updates the entry of every index.
921 * The marks associated with @index are unaffected unless @entry is %NULL.
922 *
923 * Context: Process context. Takes and releases the xa_lock. May sleep
924 * if the @gfp flags permit.
925 * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry
926 * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation
927 * failed.
928 */
929void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
930{
931 XA_STATE(xas, xa, index);
932 void *curr;
933
934 if (WARN_ON_ONCE(xa_is_internal(entry)))
935 return XA_ERROR(-EINVAL);
936
937 do {
938 xas_lock(&xas);
939 curr = xas_store(&xas, entry);
940 xas_unlock(&xas);
941 } while (xas_nomem(&xas, gfp));
942
943 return xas_result(&xas, curr);
944}
945EXPORT_SYMBOL(xa_store);
946
947/**
948 * __xa_store() - Store this entry in the XArray.
949 * @xa: XArray.
950 * @index: Index into array.
951 * @entry: New entry.
952 * @gfp: Memory allocation flags.
953 *
954 * You must already be holding the xa_lock when calling this function.
955 * It will drop the lock if needed to allocate memory, and then reacquire
956 * it afterwards.
957 *
958 * Context: Any context. Expects xa_lock to be held on entry. May
959 * release and reacquire xa_lock if @gfp flags permit.
960 * Return: The old entry at this index or xa_err() if an error happened.
961 */
962void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
963{
964 XA_STATE(xas, xa, index);
965 void *curr;
966
967 if (WARN_ON_ONCE(xa_is_internal(entry)))
968 return XA_ERROR(-EINVAL);
969
970 do {
971 curr = xas_store(&xas, entry);
972 } while (__xas_nomem(&xas, gfp));
973
974 return xas_result(&xas, curr);
975}
976EXPORT_SYMBOL(__xa_store);
977
285/** 978/**
286 * __xa_set_mark() - Set this mark on this entry while locked. 979 * __xa_set_mark() - Set this mark on this entry while locked.
287 * @xa: XArray. 980 * @xa: XArray.