aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorMatthew Wilcox <willy@infradead.org>2018-11-06 14:13:35 -0500
committerMatthew Wilcox <willy@infradead.org>2019-02-06 13:32:25 -0500
commit2fa044e51a1f35d7b04cbde07ec513b0ba195e38 (patch)
treeca7f9f39820ca4f8241caf7a6eef8f044db5d38a /lib
parenta3e4d3f97ec844de005a679585c04c5c03dfbdb6 (diff)
XArray: Add cyclic allocation
This differs slightly from the IDR equivalent in five ways. 1. It can allocate up to UINT_MAX instead of being limited to INT_MAX, like xa_alloc(). Also like xa_alloc(), it will write to the 'id' pointer before placing the entry in the XArray. 2. The 'next' cursor is allocated separately from the XArray instead of being part of the IDR. This saves memory for all the users which do not use the cyclic allocation API and suits some users better. 3. It returns -EBUSY instead of -ENOSPC. 4. It will attempt to wrap back to the minimum value on memory allocation failure as well as on an -EBUSY error, assuming that a user would rather allocate a small ID than suffer an ID allocation failure. 5. It reports whether it has wrapped, which is important to some users. Signed-off-by: Matthew Wilcox <willy@infradead.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/test_xarray.c53
-rw-r--r--lib/xarray.c50
2 files changed, 103 insertions, 0 deletions
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index b5a6b981454d..eaf53f742c72 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -715,6 +715,57 @@ static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base)
715 xa_destroy(xa); 715 xa_destroy(xa);
716} 716}
717 717
718static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base)
719{
720 struct xa_limit limit = XA_LIMIT(1, 0x3fff);
721 u32 next = 0;
722 unsigned int i, id;
723 unsigned long index;
724 void *entry;
725
726 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(1), limit,
727 &next, GFP_KERNEL) != 0);
728 XA_BUG_ON(xa, id != 1);
729
730 next = 0x3ffd;
731 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(0x3ffd), limit,
732 &next, GFP_KERNEL) != 0);
733 XA_BUG_ON(xa, id != 0x3ffd);
734 xa_erase_index(xa, 0x3ffd);
735 xa_erase_index(xa, 1);
736 XA_BUG_ON(xa, !xa_empty(xa));
737
738 for (i = 0x3ffe; i < 0x4003; i++) {
739 if (i < 0x4000)
740 entry = xa_mk_index(i);
741 else
742 entry = xa_mk_index(i - 0x3fff);
743 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, entry, limit,
744 &next, GFP_KERNEL) != (id == 1));
745 XA_BUG_ON(xa, xa_mk_index(id) != entry);
746 }
747
748 /* Check wrap-around is handled correctly */
749 if (base != 0)
750 xa_erase_index(xa, base);
751 xa_erase_index(xa, base + 1);
752 next = UINT_MAX;
753 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(UINT_MAX),
754 xa_limit_32b, &next, GFP_KERNEL) != 0);
755 XA_BUG_ON(xa, id != UINT_MAX);
756 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base),
757 xa_limit_32b, &next, GFP_KERNEL) != 1);
758 XA_BUG_ON(xa, id != base);
759 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base + 1),
760 xa_limit_32b, &next, GFP_KERNEL) != 0);
761 XA_BUG_ON(xa, id != base + 1);
762
763 xa_for_each(xa, index, entry)
764 xa_erase_index(xa, index);
765
766 XA_BUG_ON(xa, !xa_empty(xa));
767}
768
718static DEFINE_XARRAY_ALLOC(xa0); 769static DEFINE_XARRAY_ALLOC(xa0);
719static DEFINE_XARRAY_ALLOC1(xa1); 770static DEFINE_XARRAY_ALLOC1(xa1);
720 771
@@ -724,6 +775,8 @@ static noinline void check_xa_alloc(void)
724 check_xa_alloc_1(&xa1, 1); 775 check_xa_alloc_1(&xa1, 1);
725 check_xa_alloc_2(&xa0, 0); 776 check_xa_alloc_2(&xa0, 0);
726 check_xa_alloc_2(&xa1, 1); 777 check_xa_alloc_2(&xa1, 1);
778 check_xa_alloc_3(&xa0, 0);
779 check_xa_alloc_3(&xa1, 1);
727} 780}
728 781
729static noinline void __check_store_iter(struct xarray *xa, unsigned long start, 782static noinline void __check_store_iter(struct xarray *xa, unsigned long start,
diff --git a/lib/xarray.c b/lib/xarray.c
index c707388fb05e..89e37ac50850 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -1657,6 +1657,56 @@ int __xa_alloc(struct xarray *xa, u32 *id, void *entry,
1657EXPORT_SYMBOL(__xa_alloc); 1657EXPORT_SYMBOL(__xa_alloc);
1658 1658
1659/** 1659/**
1660 * __xa_alloc_cyclic() - Find somewhere to store this entry in the XArray.
1661 * @xa: XArray.
1662 * @id: Pointer to ID.
1663 * @entry: New entry.
1664 * @limit: Range of allocated ID.
1665 * @next: Pointer to next ID to allocate.
1666 * @gfp: Memory allocation flags.
1667 *
1668 * Finds an empty entry in @xa between @limit.min and @limit.max,
1669 * stores the index into the @id pointer, then stores the entry at
1670 * that index. A concurrent lookup will not see an uninitialised @id.
1671 * The search for an empty entry will start at @next and will wrap
1672 * around if necessary.
1673 *
1674 * Context: Any context. Expects xa_lock to be held on entry. May
1675 * release and reacquire xa_lock if @gfp flags permit.
1676 * Return: 0 if the allocation succeeded without wrapping. 1 if the
1677 * allocation succeeded after wrapping, -ENOMEM if memory could not be
1678 * allocated or -EBUSY if there are no free entries in @limit.
1679 */
1680int __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
1681 struct xa_limit limit, u32 *next, gfp_t gfp)
1682{
1683 u32 min = limit.min;
1684 int ret;
1685
1686 limit.min = max(min, *next);
1687 ret = __xa_alloc(xa, id, entry, limit, gfp);
1688 if ((xa->xa_flags & XA_FLAGS_ALLOC_WRAPPED) && ret == 0) {
1689 xa->xa_flags &= ~XA_FLAGS_ALLOC_WRAPPED;
1690 ret = 1;
1691 }
1692
1693 if (ret < 0 && limit.min > min) {
1694 limit.min = min;
1695 ret = __xa_alloc(xa, id, entry, limit, gfp);
1696 if (ret == 0)
1697 ret = 1;
1698 }
1699
1700 if (ret >= 0) {
1701 *next = *id + 1;
1702 if (*next == 0)
1703 xa->xa_flags |= XA_FLAGS_ALLOC_WRAPPED;
1704 }
1705 return ret;
1706}
1707EXPORT_SYMBOL(__xa_alloc_cyclic);
1708
1709/**
1660 * __xa_set_mark() - Set this mark on this entry while locked. 1710 * __xa_set_mark() - Set this mark on this entry while locked.
1661 * @xa: XArray. 1711 * @xa: XArray.
1662 * @index: Index of entry. 1712 * @index: Index of entry.