aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/core-api/xarray.rst4
-rw-r--r--include/linux/xarray.h102
-rw-r--r--lib/test_xarray.c53
-rw-r--r--lib/xarray.c50
4 files changed, 208 insertions, 1 deletions
diff --git a/Documentation/core-api/xarray.rst b/Documentation/core-api/xarray.rst
index e90c4925cd37..c7436da5c4ad 100644
--- a/Documentation/core-api/xarray.rst
+++ b/Documentation/core-api/xarray.rst
@@ -144,7 +144,9 @@ you only want to free the entry if it's ``NULL``).
144 144
145By default, the lowest free entry is allocated starting from 0. If you 145By default, the lowest free entry is allocated starting from 0. If you
146want to allocate entries starting at 1, it is more efficient to use 146want to allocate entries starting at 1, it is more efficient to use
147:c:func:`DEFINE_XARRAY_ALLOC1` or ``XA_FLAGS_ALLOC1``. 147:c:func:`DEFINE_XARRAY_ALLOC1` or ``XA_FLAGS_ALLOC1``. If you want to
148allocate IDs up to a maximum, then wrap back around to the lowest free
149ID, you can use :c:func:`xa_alloc_cyclic`.
148 150
149You cannot use ``XA_MARK_0`` with an allocating XArray as this mark 151You cannot use ``XA_MARK_0`` with an allocating XArray as this mark
150is used to track whether an entry is free or not. The other marks are 152is used to track whether an entry is free or not. The other marks are
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 883bb958e462..5ed6b462e754 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -242,6 +242,7 @@ enum xa_lock_type {
242#define XA_FLAGS_LOCK_BH ((__force gfp_t)XA_LOCK_BH) 242#define XA_FLAGS_LOCK_BH ((__force gfp_t)XA_LOCK_BH)
243#define XA_FLAGS_TRACK_FREE ((__force gfp_t)4U) 243#define XA_FLAGS_TRACK_FREE ((__force gfp_t)4U)
244#define XA_FLAGS_ZERO_BUSY ((__force gfp_t)8U) 244#define XA_FLAGS_ZERO_BUSY ((__force gfp_t)8U)
245#define XA_FLAGS_ALLOC_WRAPPED ((__force gfp_t)16U)
245#define XA_FLAGS_MARK(mark) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \ 246#define XA_FLAGS_MARK(mark) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \
246 (__force unsigned)(mark))) 247 (__force unsigned)(mark)))
247 248
@@ -499,6 +500,8 @@ void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old,
499int __xa_insert(struct xarray *, unsigned long index, void *entry, gfp_t); 500int __xa_insert(struct xarray *, unsigned long index, void *entry, gfp_t);
500int __must_check __xa_alloc(struct xarray *, u32 *id, void *entry, 501int __must_check __xa_alloc(struct xarray *, u32 *id, void *entry,
501 struct xa_limit, gfp_t); 502 struct xa_limit, gfp_t);
503int __must_check __xa_alloc_cyclic(struct xarray *, u32 *id, void *entry,
504 struct xa_limit, u32 *next, gfp_t);
502int __xa_reserve(struct xarray *, unsigned long index, gfp_t); 505int __xa_reserve(struct xarray *, unsigned long index, gfp_t);
503void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); 506void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
504void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); 507void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
@@ -859,6 +862,105 @@ static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id,
859} 862}
860 863
861/** 864/**
865 * xa_alloc_cyclic() - Find somewhere to store this entry in the XArray.
866 * @xa: XArray.
867 * @id: Pointer to ID.
868 * @entry: New entry.
869 * @limit: Range of allocated ID.
870 * @next: Pointer to next ID to allocate.
871 * @gfp: Memory allocation flags.
872 *
873 * Finds an empty entry in @xa between @limit.min and @limit.max,
874 * stores the index into the @id pointer, then stores the entry at
875 * that index. A concurrent lookup will not see an uninitialised @id.
876 * The search for an empty entry will start at @next and will wrap
877 * around if necessary.
878 *
879 * Context: Any context. Takes and releases the xa_lock. May sleep if
880 * the @gfp flags permit.
881 * Return: 0 if the allocation succeeded without wrapping. 1 if the
882 * allocation succeeded after wrapping, -ENOMEM if memory could not be
883 * allocated or -EBUSY if there are no free entries in @limit.
884 */
885static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
886 struct xa_limit limit, u32 *next, gfp_t gfp)
887{
888 int err;
889
890 xa_lock(xa);
891 err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
892 xa_unlock(xa);
893
894 return err;
895}
896
897/**
898 * xa_alloc_cyclic_bh() - Find somewhere to store this entry in the XArray.
899 * @xa: XArray.
900 * @id: Pointer to ID.
901 * @entry: New entry.
902 * @limit: Range of allocated ID.
903 * @next: Pointer to next ID to allocate.
904 * @gfp: Memory allocation flags.
905 *
906 * Finds an empty entry in @xa between @limit.min and @limit.max,
907 * stores the index into the @id pointer, then stores the entry at
908 * that index. A concurrent lookup will not see an uninitialised @id.
909 * The search for an empty entry will start at @next and will wrap
910 * around if necessary.
911 *
912 * Context: Any context. Takes and releases the xa_lock while
913 * disabling softirqs. May sleep if the @gfp flags permit.
914 * Return: 0 if the allocation succeeded without wrapping. 1 if the
915 * allocation succeeded after wrapping, -ENOMEM if memory could not be
916 * allocated or -EBUSY if there are no free entries in @limit.
917 */
918static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry,
919 struct xa_limit limit, u32 *next, gfp_t gfp)
920{
921 int err;
922
923 xa_lock_bh(xa);
924 err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
925 xa_unlock_bh(xa);
926
927 return err;
928}
929
930/**
931 * xa_alloc_cyclic_irq() - Find somewhere to store this entry in the XArray.
932 * @xa: XArray.
933 * @id: Pointer to ID.
934 * @entry: New entry.
935 * @limit: Range of allocated ID.
936 * @next: Pointer to next ID to allocate.
937 * @gfp: Memory allocation flags.
938 *
939 * Finds an empty entry in @xa between @limit.min and @limit.max,
940 * stores the index into the @id pointer, then stores the entry at
941 * that index. A concurrent lookup will not see an uninitialised @id.
942 * The search for an empty entry will start at @next and will wrap
943 * around if necessary.
944 *
945 * Context: Process context. Takes and releases the xa_lock while
946 * disabling interrupts. May sleep if the @gfp flags permit.
947 * Return: 0 if the allocation succeeded without wrapping. 1 if the
948 * allocation succeeded after wrapping, -ENOMEM if memory could not be
949 * allocated or -EBUSY if there are no free entries in @limit.
950 */
951static inline int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry,
952 struct xa_limit limit, u32 *next, gfp_t gfp)
953{
954 int err;
955
956 xa_lock_irq(xa);
957 err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
958 xa_unlock_irq(xa);
959
960 return err;
961}
962
963/**
862 * xa_reserve() - Reserve this index in the XArray. 964 * xa_reserve() - Reserve this index in the XArray.
863 * @xa: XArray. 965 * @xa: XArray.
864 * @index: Index into array. 966 * @index: Index into array.
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index b5a6b981454d..eaf53f742c72 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -715,6 +715,57 @@ static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base)
715 xa_destroy(xa); 715 xa_destroy(xa);
716} 716}
717 717
718static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base)
719{
720 struct xa_limit limit = XA_LIMIT(1, 0x3fff);
721 u32 next = 0;
722 unsigned int i, id;
723 unsigned long index;
724 void *entry;
725
726 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(1), limit,
727 &next, GFP_KERNEL) != 0);
728 XA_BUG_ON(xa, id != 1);
729
730 next = 0x3ffd;
731 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(0x3ffd), limit,
732 &next, GFP_KERNEL) != 0);
733 XA_BUG_ON(xa, id != 0x3ffd);
734 xa_erase_index(xa, 0x3ffd);
735 xa_erase_index(xa, 1);
736 XA_BUG_ON(xa, !xa_empty(xa));
737
738 for (i = 0x3ffe; i < 0x4003; i++) {
739 if (i < 0x4000)
740 entry = xa_mk_index(i);
741 else
742 entry = xa_mk_index(i - 0x3fff);
743 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, entry, limit,
744 &next, GFP_KERNEL) != (id == 1));
745 XA_BUG_ON(xa, xa_mk_index(id) != entry);
746 }
747
748 /* Check wrap-around is handled correctly */
749 if (base != 0)
750 xa_erase_index(xa, base);
751 xa_erase_index(xa, base + 1);
752 next = UINT_MAX;
753 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(UINT_MAX),
754 xa_limit_32b, &next, GFP_KERNEL) != 0);
755 XA_BUG_ON(xa, id != UINT_MAX);
756 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base),
757 xa_limit_32b, &next, GFP_KERNEL) != 1);
758 XA_BUG_ON(xa, id != base);
759 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base + 1),
760 xa_limit_32b, &next, GFP_KERNEL) != 0);
761 XA_BUG_ON(xa, id != base + 1);
762
763 xa_for_each(xa, index, entry)
764 xa_erase_index(xa, index);
765
766 XA_BUG_ON(xa, !xa_empty(xa));
767}
768
718static DEFINE_XARRAY_ALLOC(xa0); 769static DEFINE_XARRAY_ALLOC(xa0);
719static DEFINE_XARRAY_ALLOC1(xa1); 770static DEFINE_XARRAY_ALLOC1(xa1);
720 771
@@ -724,6 +775,8 @@ static noinline void check_xa_alloc(void)
724 check_xa_alloc_1(&xa1, 1); 775 check_xa_alloc_1(&xa1, 1);
725 check_xa_alloc_2(&xa0, 0); 776 check_xa_alloc_2(&xa0, 0);
726 check_xa_alloc_2(&xa1, 1); 777 check_xa_alloc_2(&xa1, 1);
778 check_xa_alloc_3(&xa0, 0);
779 check_xa_alloc_3(&xa1, 1);
727} 780}
728 781
729static noinline void __check_store_iter(struct xarray *xa, unsigned long start, 782static noinline void __check_store_iter(struct xarray *xa, unsigned long start,
diff --git a/lib/xarray.c b/lib/xarray.c
index c707388fb05e..89e37ac50850 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -1657,6 +1657,56 @@ int __xa_alloc(struct xarray *xa, u32 *id, void *entry,
1657EXPORT_SYMBOL(__xa_alloc); 1657EXPORT_SYMBOL(__xa_alloc);
1658 1658
1659/** 1659/**
1660 * __xa_alloc_cyclic() - Find somewhere to store this entry in the XArray.
1661 * @xa: XArray.
1662 * @id: Pointer to ID.
1663 * @entry: New entry.
1664 * @limit: Range of allocated ID.
1665 * @next: Pointer to next ID to allocate.
1666 * @gfp: Memory allocation flags.
1667 *
1668 * Finds an empty entry in @xa between @limit.min and @limit.max,
1669 * stores the index into the @id pointer, then stores the entry at
1670 * that index. A concurrent lookup will not see an uninitialised @id.
1671 * The search for an empty entry will start at @next and will wrap
1672 * around if necessary.
1673 *
1674 * Context: Any context. Expects xa_lock to be held on entry. May
1675 * release and reacquire xa_lock if @gfp flags permit.
1676 * Return: 0 if the allocation succeeded without wrapping. 1 if the
1677 * allocation succeeded after wrapping, -ENOMEM if memory could not be
1678 * allocated or -EBUSY if there are no free entries in @limit.
1679 */
1680int __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
1681 struct xa_limit limit, u32 *next, gfp_t gfp)
1682{
1683 u32 min = limit.min;
1684 int ret;
1685
1686 limit.min = max(min, *next);
1687 ret = __xa_alloc(xa, id, entry, limit, gfp);
1688 if ((xa->xa_flags & XA_FLAGS_ALLOC_WRAPPED) && ret == 0) {
1689 xa->xa_flags &= ~XA_FLAGS_ALLOC_WRAPPED;
1690 ret = 1;
1691 }
1692
1693 if (ret < 0 && limit.min > min) {
1694 limit.min = min;
1695 ret = __xa_alloc(xa, id, entry, limit, gfp);
1696 if (ret == 0)
1697 ret = 1;
1698 }
1699
1700 if (ret >= 0) {
1701 *next = *id + 1;
1702 if (*next == 0)
1703 xa->xa_flags |= XA_FLAGS_ALLOC_WRAPPED;
1704 }
1705 return ret;
1706}
1707EXPORT_SYMBOL(__xa_alloc_cyclic);
1708
1709/**
1660 * __xa_set_mark() - Set this mark on this entry while locked. 1710 * __xa_set_mark() - Set this mark on this entry while locked.
1661 * @xa: XArray. 1711 * @xa: XArray.
1662 * @index: Index of entry. 1712 * @index: Index of entry.