diff options
author | Matthew Wilcox <willy@infradead.org> | 2018-12-31 10:41:01 -0500 |
---|---|---|
committer | Matthew Wilcox <willy@infradead.org> | 2019-02-06 13:32:23 -0500 |
commit | a3e4d3f97ec844de005a679585c04c5c03dfbdb6 (patch) | |
tree | c4cda3a98cba2d9923e7356e587f6a958b2971d7 | |
parent | 3ccaf57a6a63ad171a951dcaddffc453b2414c7b (diff) |
XArray: Redesign xa_alloc API
It was too easy to forget to initialise the start index. Add an
xa_limit data structure which can be used to pass min & max, and
define a couple of special values for common cases. Also add some
more tests cribbed from the IDR test suite. Change the return value
from -ENOSPC to -EBUSY to match xa_insert().
Signed-off-by: Matthew Wilcox <willy@infradead.org>
-rw-r--r-- | include/linux/xarray.h | 80 | ||||
-rw-r--r-- | lib/test_xarray.c | 86 | ||||
-rw-r--r-- | lib/xarray.c | 29 |
3 files changed, 135 insertions, 60 deletions
diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 99dd0838b4ba..883bb958e462 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h | |||
@@ -200,6 +200,27 @@ static inline int xa_err(void *entry) | |||
200 | return 0; | 200 | return 0; |
201 | } | 201 | } |
202 | 202 | ||
203 | /** | ||
204 | * struct xa_limit - Represents a range of IDs. | ||
205 | * @min: The lowest ID to allocate (inclusive). | ||
206 | * @max: The maximum ID to allocate (inclusive). | ||
207 | * | ||
208 | * This structure is used either directly or via the XA_LIMIT() macro | ||
209 | * to communicate the range of IDs that are valid for allocation. | ||
210 | * Two common ranges are predefined for you: | ||
211 | * * xa_limit_32b - [0 - UINT_MAX] | ||
212 | * * xa_limit_31b - [0 - INT_MAX] | ||
213 | */ | ||
214 | struct xa_limit { | ||
215 | u32 max; | ||
216 | u32 min; | ||
217 | }; | ||
218 | |||
219 | #define XA_LIMIT(_min, _max) (struct xa_limit) { .min = _min, .max = _max } | ||
220 | |||
221 | #define xa_limit_32b XA_LIMIT(0, UINT_MAX) | ||
222 | #define xa_limit_31b XA_LIMIT(0, INT_MAX) | ||
223 | |||
203 | typedef unsigned __bitwise xa_mark_t; | 224 | typedef unsigned __bitwise xa_mark_t; |
204 | #define XA_MARK_0 ((__force xa_mark_t)0U) | 225 | #define XA_MARK_0 ((__force xa_mark_t)0U) |
205 | #define XA_MARK_1 ((__force xa_mark_t)1U) | 226 | #define XA_MARK_1 ((__force xa_mark_t)1U) |
@@ -476,7 +497,8 @@ void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); | |||
476 | void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old, | 497 | void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old, |
477 | void *entry, gfp_t); | 498 | void *entry, gfp_t); |
478 | int __xa_insert(struct xarray *, unsigned long index, void *entry, gfp_t); | 499 | int __xa_insert(struct xarray *, unsigned long index, void *entry, gfp_t); |
479 | int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t); | 500 | int __must_check __xa_alloc(struct xarray *, u32 *id, void *entry, |
501 | struct xa_limit, gfp_t); | ||
480 | int __xa_reserve(struct xarray *, unsigned long index, gfp_t); | 502 | int __xa_reserve(struct xarray *, unsigned long index, gfp_t); |
481 | void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); | 503 | void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); |
482 | void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); | 504 | void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); |
@@ -753,26 +775,26 @@ static inline int xa_insert_irq(struct xarray *xa, unsigned long index, | |||
753 | * xa_alloc() - Find somewhere to store this entry in the XArray. | 775 | * xa_alloc() - Find somewhere to store this entry in the XArray. |
754 | * @xa: XArray. | 776 | * @xa: XArray. |
755 | * @id: Pointer to ID. | 777 | * @id: Pointer to ID. |
756 | * @max: Maximum ID to allocate (inclusive). | ||
757 | * @entry: New entry. | 778 | * @entry: New entry. |
779 | * @limit: Range of ID to allocate. | ||
758 | * @gfp: Memory allocation flags. | 780 | * @gfp: Memory allocation flags. |
759 | * | 781 | * |
760 | * Allocates an unused ID in the range specified by @id and @max. | 782 | * Finds an empty entry in @xa between @limit.min and @limit.max, |
761 | * Updates the @id pointer with the index, then stores the entry at that | 783 | * stores the index into the @id pointer, then stores the entry at |
762 | * index. A concurrent lookup will not see an uninitialised @id. | 784 | * that index. A concurrent lookup will not see an uninitialised @id. |
763 | * | 785 | * |
764 | * Context: Process context. Takes and releases the xa_lock. May sleep if | 786 | * Context: Any context. Takes and releases the xa_lock. May sleep if |
765 | * the @gfp flags permit. | 787 | * the @gfp flags permit. |
766 | * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if | 788 | * Return: 0 on success, -ENOMEM if memory could not be allocated or |
767 | * there is no more space in the XArray. | 789 | * -EBUSY if there are no free entries in @limit. |
768 | */ | 790 | */ |
769 | static inline int xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, | 791 | static inline __must_check int xa_alloc(struct xarray *xa, u32 *id, |
770 | gfp_t gfp) | 792 | void *entry, struct xa_limit limit, gfp_t gfp) |
771 | { | 793 | { |
772 | int err; | 794 | int err; |
773 | 795 | ||
774 | xa_lock(xa); | 796 | xa_lock(xa); |
775 | err = __xa_alloc(xa, id, max, entry, gfp); | 797 | err = __xa_alloc(xa, id, entry, limit, gfp); |
776 | xa_unlock(xa); | 798 | xa_unlock(xa); |
777 | 799 | ||
778 | return err; | 800 | return err; |
@@ -782,26 +804,26 @@ static inline int xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, | |||
782 | * xa_alloc_bh() - Find somewhere to store this entry in the XArray. | 804 | * xa_alloc_bh() - Find somewhere to store this entry in the XArray. |
783 | * @xa: XArray. | 805 | * @xa: XArray. |
784 | * @id: Pointer to ID. | 806 | * @id: Pointer to ID. |
785 | * @max: Maximum ID to allocate (inclusive). | ||
786 | * @entry: New entry. | 807 | * @entry: New entry. |
808 | * @limit: Range of ID to allocate. | ||
787 | * @gfp: Memory allocation flags. | 809 | * @gfp: Memory allocation flags. |
788 | * | 810 | * |
789 | * Allocates an unused ID in the range specified by @id and @max. | 811 | * Finds an empty entry in @xa between @limit.min and @limit.max, |
790 | * Updates the @id pointer with the index, then stores the entry at that | 812 | * stores the index into the @id pointer, then stores the entry at |
791 | * index. A concurrent lookup will not see an uninitialised @id. | 813 | * that index. A concurrent lookup will not see an uninitialised @id. |
792 | * | 814 | * |
793 | * Context: Any context. Takes and releases the xa_lock while | 815 | * Context: Any context. Takes and releases the xa_lock while |
794 | * disabling softirqs. May sleep if the @gfp flags permit. | 816 | * disabling softirqs. May sleep if the @gfp flags permit. |
795 | * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if | 817 | * Return: 0 on success, -ENOMEM if memory could not be allocated or |
796 | * there is no more space in the XArray. | 818 | * -EBUSY if there are no free entries in @limit. |
797 | */ | 819 | */ |
798 | static inline int xa_alloc_bh(struct xarray *xa, u32 *id, u32 max, void *entry, | 820 | static inline int __must_check xa_alloc_bh(struct xarray *xa, u32 *id, |
799 | gfp_t gfp) | 821 | void *entry, struct xa_limit limit, gfp_t gfp) |
800 | { | 822 | { |
801 | int err; | 823 | int err; |
802 | 824 | ||
803 | xa_lock_bh(xa); | 825 | xa_lock_bh(xa); |
804 | err = __xa_alloc(xa, id, max, entry, gfp); | 826 | err = __xa_alloc(xa, id, entry, limit, gfp); |
805 | xa_unlock_bh(xa); | 827 | xa_unlock_bh(xa); |
806 | 828 | ||
807 | return err; | 829 | return err; |
@@ -811,26 +833,26 @@ static inline int xa_alloc_bh(struct xarray *xa, u32 *id, u32 max, void *entry, | |||
811 | * xa_alloc_irq() - Find somewhere to store this entry in the XArray. | 833 | * xa_alloc_irq() - Find somewhere to store this entry in the XArray. |
812 | * @xa: XArray. | 834 | * @xa: XArray. |
813 | * @id: Pointer to ID. | 835 | * @id: Pointer to ID. |
814 | * @max: Maximum ID to allocate (inclusive). | ||
815 | * @entry: New entry. | 836 | * @entry: New entry. |
837 | * @limit: Range of ID to allocate. | ||
816 | * @gfp: Memory allocation flags. | 838 | * @gfp: Memory allocation flags. |
817 | * | 839 | * |
818 | * Allocates an unused ID in the range specified by @id and @max. | 840 | * Finds an empty entry in @xa between @limit.min and @limit.max, |
819 | * Updates the @id pointer with the index, then stores the entry at that | 841 | * stores the index into the @id pointer, then stores the entry at |
820 | * index. A concurrent lookup will not see an uninitialised @id. | 842 | * that index. A concurrent lookup will not see an uninitialised @id. |
821 | * | 843 | * |
822 | * Context: Process context. Takes and releases the xa_lock while | 844 | * Context: Process context. Takes and releases the xa_lock while |
823 | * disabling interrupts. May sleep if the @gfp flags permit. | 845 | * disabling interrupts. May sleep if the @gfp flags permit. |
824 | * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if | 846 | * Return: 0 on success, -ENOMEM if memory could not be allocated or |
825 | * there is no more space in the XArray. | 847 | * -EBUSY if there are no free entries in @limit. |
826 | */ | 848 | */ |
827 | static inline int xa_alloc_irq(struct xarray *xa, u32 *id, u32 max, void *entry, | 849 | static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id, |
828 | gfp_t gfp) | 850 | void *entry, struct xa_limit limit, gfp_t gfp) |
829 | { | 851 | { |
830 | int err; | 852 | int err; |
831 | 853 | ||
832 | xa_lock_irq(xa); | 854 | xa_lock_irq(xa); |
833 | err = __xa_alloc(xa, id, max, entry, gfp); | 855 | err = __xa_alloc(xa, id, entry, limit, gfp); |
834 | xa_unlock_irq(xa); | 856 | xa_unlock_irq(xa); |
835 | 857 | ||
836 | return err; | 858 | return err; |
diff --git a/lib/test_xarray.c b/lib/test_xarray.c index cd74f8f32abe..b5a6b981454d 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c | |||
@@ -40,9 +40,9 @@ static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp) | |||
40 | 40 | ||
41 | static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp) | 41 | static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp) |
42 | { | 42 | { |
43 | u32 id = 0; | 43 | u32 id; |
44 | 44 | ||
45 | XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(index), | 45 | XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(index), xa_limit_32b, |
46 | gfp) != 0); | 46 | gfp) != 0); |
47 | XA_BUG_ON(xa, id != index); | 47 | XA_BUG_ON(xa, id != index); |
48 | } | 48 | } |
@@ -640,28 +640,81 @@ static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base) | |||
640 | xa_destroy(xa); | 640 | xa_destroy(xa); |
641 | 641 | ||
642 | /* Check that we fail properly at the limit of allocation */ | 642 | /* Check that we fail properly at the limit of allocation */ |
643 | id = 0xfffffffeU; | 643 | XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX - 1), |
644 | XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(id), | 644 | XA_LIMIT(UINT_MAX - 1, UINT_MAX), |
645 | GFP_KERNEL) != 0); | 645 | GFP_KERNEL) != 0); |
646 | XA_BUG_ON(xa, id != 0xfffffffeU); | 646 | XA_BUG_ON(xa, id != 0xfffffffeU); |
647 | XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(id), | 647 | XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(UINT_MAX), |
648 | XA_LIMIT(UINT_MAX - 1, UINT_MAX), | ||
648 | GFP_KERNEL) != 0); | 649 | GFP_KERNEL) != 0); |
649 | XA_BUG_ON(xa, id != 0xffffffffU); | 650 | XA_BUG_ON(xa, id != 0xffffffffU); |
650 | XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(id), | 651 | id = 3; |
651 | GFP_KERNEL) != -ENOSPC); | 652 | XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(0), |
652 | XA_BUG_ON(xa, id != 0xffffffffU); | 653 | XA_LIMIT(UINT_MAX - 1, UINT_MAX), |
654 | GFP_KERNEL) != -EBUSY); | ||
655 | XA_BUG_ON(xa, id != 3); | ||
653 | xa_destroy(xa); | 656 | xa_destroy(xa); |
654 | 657 | ||
655 | id = 10; | 658 | XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5), |
656 | XA_BUG_ON(xa, xa_alloc(xa, &id, 5, xa_mk_index(id), | 659 | GFP_KERNEL) != -EBUSY); |
657 | GFP_KERNEL) != -ENOSPC); | ||
658 | XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != 0); | 660 | XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != 0); |
659 | XA_BUG_ON(xa, xa_alloc(xa, &id, 5, xa_mk_index(id), | 661 | XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5), |
660 | GFP_KERNEL) != -ENOSPC); | 662 | GFP_KERNEL) != -EBUSY); |
661 | xa_erase_index(xa, 3); | 663 | xa_erase_index(xa, 3); |
662 | XA_BUG_ON(xa, !xa_empty(xa)); | 664 | XA_BUG_ON(xa, !xa_empty(xa)); |
663 | } | 665 | } |
664 | 666 | ||
667 | static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base) | ||
668 | { | ||
669 | unsigned int i, id; | ||
670 | unsigned long index; | ||
671 | void *entry; | ||
672 | |||
673 | /* Allocate and free a NULL and check xa_empty() behaves */ | ||
674 | XA_BUG_ON(xa, !xa_empty(xa)); | ||
675 | XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0); | ||
676 | XA_BUG_ON(xa, id != base); | ||
677 | XA_BUG_ON(xa, xa_empty(xa)); | ||
678 | XA_BUG_ON(xa, xa_erase(xa, id) != NULL); | ||
679 | XA_BUG_ON(xa, !xa_empty(xa)); | ||
680 | |||
681 | /* Ditto, but check destroy instead of erase */ | ||
682 | XA_BUG_ON(xa, !xa_empty(xa)); | ||
683 | XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0); | ||
684 | XA_BUG_ON(xa, id != base); | ||
685 | XA_BUG_ON(xa, xa_empty(xa)); | ||
686 | xa_destroy(xa); | ||
687 | XA_BUG_ON(xa, !xa_empty(xa)); | ||
688 | |||
689 | for (i = base; i < base + 10; i++) { | ||
690 | XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, | ||
691 | GFP_KERNEL) != 0); | ||
692 | XA_BUG_ON(xa, id != i); | ||
693 | } | ||
694 | |||
695 | XA_BUG_ON(xa, xa_store(xa, 3, xa_mk_index(3), GFP_KERNEL) != NULL); | ||
696 | XA_BUG_ON(xa, xa_store(xa, 4, xa_mk_index(4), GFP_KERNEL) != NULL); | ||
697 | XA_BUG_ON(xa, xa_store(xa, 4, NULL, GFP_KERNEL) != xa_mk_index(4)); | ||
698 | XA_BUG_ON(xa, xa_erase(xa, 5) != NULL); | ||
699 | XA_BUG_ON(xa, xa_alloc(xa, &id, NULL, xa_limit_32b, GFP_KERNEL) != 0); | ||
700 | XA_BUG_ON(xa, id != 5); | ||
701 | |||
702 | xa_for_each(xa, index, entry) { | ||
703 | xa_erase_index(xa, index); | ||
704 | } | ||
705 | |||
706 | for (i = base; i < base + 9; i++) { | ||
707 | XA_BUG_ON(xa, xa_erase(xa, i) != NULL); | ||
708 | XA_BUG_ON(xa, xa_empty(xa)); | ||
709 | } | ||
710 | XA_BUG_ON(xa, xa_erase(xa, 8) != NULL); | ||
711 | XA_BUG_ON(xa, xa_empty(xa)); | ||
712 | XA_BUG_ON(xa, xa_erase(xa, base + 9) != NULL); | ||
713 | XA_BUG_ON(xa, !xa_empty(xa)); | ||
714 | |||
715 | xa_destroy(xa); | ||
716 | } | ||
717 | |||
665 | static DEFINE_XARRAY_ALLOC(xa0); | 718 | static DEFINE_XARRAY_ALLOC(xa0); |
666 | static DEFINE_XARRAY_ALLOC1(xa1); | 719 | static DEFINE_XARRAY_ALLOC1(xa1); |
667 | 720 | ||
@@ -669,6 +722,8 @@ static noinline void check_xa_alloc(void) | |||
669 | { | 722 | { |
670 | check_xa_alloc_1(&xa0, 0); | 723 | check_xa_alloc_1(&xa0, 0); |
671 | check_xa_alloc_1(&xa1, 1); | 724 | check_xa_alloc_1(&xa1, 1); |
725 | check_xa_alloc_2(&xa0, 0); | ||
726 | check_xa_alloc_2(&xa1, 1); | ||
672 | } | 727 | } |
673 | 728 | ||
674 | static noinline void __check_store_iter(struct xarray *xa, unsigned long start, | 729 | static noinline void __check_store_iter(struct xarray *xa, unsigned long start, |
@@ -1219,9 +1274,8 @@ static void check_align_1(struct xarray *xa, char *name) | |||
1219 | void *entry; | 1274 | void *entry; |
1220 | 1275 | ||
1221 | for (i = 0; i < 8; i++) { | 1276 | for (i = 0; i < 8; i++) { |
1222 | id = 0; | 1277 | XA_BUG_ON(xa, xa_alloc(xa, &id, name + i, xa_limit_32b, |
1223 | XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, name + i, GFP_KERNEL) | 1278 | GFP_KERNEL) != 0); |
1224 | != 0); | ||
1225 | XA_BUG_ON(xa, id != i); | 1279 | XA_BUG_ON(xa, id != i); |
1226 | } | 1280 | } |
1227 | xa_for_each(xa, index, entry) | 1281 | xa_for_each(xa, index, entry) |
diff --git a/lib/xarray.c b/lib/xarray.c index 468fb7b7963f..c707388fb05e 100644 --- a/lib/xarray.c +++ b/lib/xarray.c | |||
@@ -1615,23 +1615,23 @@ EXPORT_SYMBOL(xa_store_range); | |||
1615 | * __xa_alloc() - Find somewhere to store this entry in the XArray. | 1615 | * __xa_alloc() - Find somewhere to store this entry in the XArray. |
1616 | * @xa: XArray. | 1616 | * @xa: XArray. |
1617 | * @id: Pointer to ID. | 1617 | * @id: Pointer to ID. |
1618 | * @max: Maximum ID to allocate (inclusive). | 1618 | * @limit: Range for allocated ID. |
1619 | * @entry: New entry. | 1619 | * @entry: New entry. |
1620 | * @gfp: Memory allocation flags. | 1620 | * @gfp: Memory allocation flags. |
1621 | * | 1621 | * |
1622 | * Allocates an unused ID in the range specified by @id and @max. | 1622 | * Finds an empty entry in @xa between @limit.min and @limit.max, |
1623 | * Updates the @id pointer with the index, then stores the entry at that | 1623 | * stores the index into the @id pointer, then stores the entry at |
1624 | * index. A concurrent lookup will not see an uninitialised @id. | 1624 | * that index. A concurrent lookup will not see an uninitialised @id. |
1625 | * | 1625 | * |
1626 | * Context: Any context. Expects xa_lock to be held on entry. May | 1626 | * Context: Any context. Expects xa_lock to be held on entry. May |
1627 | * release and reacquire xa_lock if @gfp flags permit. | 1627 | * release and reacquire xa_lock if @gfp flags permit. |
1628 | * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if | 1628 | * Return: 0 on success, -ENOMEM if memory could not be allocated or |
1629 | * there is no more space in the XArray. | 1629 | * -EBUSY if there are no free entries in @limit. |
1630 | */ | 1630 | */ |
1631 | int __xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, gfp_t gfp) | 1631 | int __xa_alloc(struct xarray *xa, u32 *id, void *entry, |
1632 | struct xa_limit limit, gfp_t gfp) | ||
1632 | { | 1633 | { |
1633 | XA_STATE(xas, xa, 0); | 1634 | XA_STATE(xas, xa, 0); |
1634 | int err; | ||
1635 | 1635 | ||
1636 | if (WARN_ON_ONCE(xa_is_advanced(entry))) | 1636 | if (WARN_ON_ONCE(xa_is_advanced(entry))) |
1637 | return -EINVAL; | 1637 | return -EINVAL; |
@@ -1642,18 +1642,17 @@ int __xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, gfp_t gfp) | |||
1642 | entry = XA_ZERO_ENTRY; | 1642 | entry = XA_ZERO_ENTRY; |
1643 | 1643 | ||
1644 | do { | 1644 | do { |
1645 | xas.xa_index = *id; | 1645 | xas.xa_index = limit.min; |
1646 | xas_find_marked(&xas, max, XA_FREE_MARK); | 1646 | xas_find_marked(&xas, limit.max, XA_FREE_MARK); |
1647 | if (xas.xa_node == XAS_RESTART) | 1647 | if (xas.xa_node == XAS_RESTART) |
1648 | xas_set_err(&xas, -ENOSPC); | 1648 | xas_set_err(&xas, -EBUSY); |
1649 | else | ||
1650 | *id = xas.xa_index; | ||
1649 | xas_store(&xas, entry); | 1651 | xas_store(&xas, entry); |
1650 | xas_clear_mark(&xas, XA_FREE_MARK); | 1652 | xas_clear_mark(&xas, XA_FREE_MARK); |
1651 | } while (__xas_nomem(&xas, gfp)); | 1653 | } while (__xas_nomem(&xas, gfp)); |
1652 | 1654 | ||
1653 | err = xas_error(&xas); | 1655 | return xas_error(&xas); |
1654 | if (!err) | ||
1655 | *id = xas.xa_index; | ||
1656 | return err; | ||
1657 | } | 1656 | } |
1658 | EXPORT_SYMBOL(__xa_alloc); | 1657 | EXPORT_SYMBOL(__xa_alloc); |
1659 | 1658 | ||