aboutsummaryrefslogtreecommitdiffstats
path: root/lib/xarray.c
diff options
context:
space:
mode:
authorMatthew Wilcox <willy@infradead.org>2018-11-06 14:13:35 -0500
committerMatthew Wilcox <willy@infradead.org>2019-02-06 13:32:25 -0500
commit2fa044e51a1f35d7b04cbde07ec513b0ba195e38 (patch)
treeca7f9f39820ca4f8241caf7a6eef8f044db5d38a /lib/xarray.c
parenta3e4d3f97ec844de005a679585c04c5c03dfbdb6 (diff)
XArray: Add cyclic allocation
This differs slightly from the IDR equivalent in five ways. 1. It can allocate up to UINT_MAX instead of being limited to INT_MAX, like xa_alloc(). Also like xa_alloc(), it will write to the 'id' pointer before placing the entry in the XArray. 2. The 'next' cursor is allocated separately from the XArray instead of being part of the IDR. This saves memory for all the users which do not use the cyclic allocation API and suits some users better. 3. It returns -EBUSY instead of -ENOSPC. 4. It will attempt to wrap back to the minimum value on memory allocation failure as well as on an -EBUSY error, assuming that a user would rather allocate a small ID than suffer an ID allocation failure. 5. It reports whether it has wrapped, which is important to some users. Signed-off-by: Matthew Wilcox <willy@infradead.org>
Diffstat (limited to 'lib/xarray.c')
-rw-r--r--lib/xarray.c50
1 files changed, 50 insertions, 0 deletions
diff --git a/lib/xarray.c b/lib/xarray.c
index c707388fb05e..89e37ac50850 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -1657,6 +1657,56 @@ int __xa_alloc(struct xarray *xa, u32 *id, void *entry,
1657EXPORT_SYMBOL(__xa_alloc); 1657EXPORT_SYMBOL(__xa_alloc);
1658 1658
1659/** 1659/**
1660 * __xa_alloc_cyclic() - Find somewhere to store this entry in the XArray.
1661 * @xa: XArray.
1662 * @id: Pointer to ID.
1663 * @entry: New entry.
1664 * @limit: Range of allocated ID.
1665 * @next: Pointer to next ID to allocate.
1666 * @gfp: Memory allocation flags.
1667 *
1668 * Finds an empty entry in @xa between @limit.min and @limit.max,
1669 * stores the index into the @id pointer, then stores the entry at
1670 * that index. A concurrent lookup will not see an uninitialised @id.
1671 * The search for an empty entry will start at @next and will wrap
1672 * around if necessary.
1673 *
1674 * Context: Any context. Expects xa_lock to be held on entry. May
1675 * release and reacquire xa_lock if @gfp flags permit.
1676 * Return: 0 if the allocation succeeded without wrapping. 1 if the
1677 * allocation succeeded after wrapping, -ENOMEM if memory could not be
1678 * allocated or -EBUSY if there are no free entries in @limit.
1679 */
1680int __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
1681 struct xa_limit limit, u32 *next, gfp_t gfp)
1682{
1683 u32 min = limit.min;
1684 int ret;
1685
1686 limit.min = max(min, *next);
1687 ret = __xa_alloc(xa, id, entry, limit, gfp);
1688 if ((xa->xa_flags & XA_FLAGS_ALLOC_WRAPPED) && ret == 0) {
1689 xa->xa_flags &= ~XA_FLAGS_ALLOC_WRAPPED;
1690 ret = 1;
1691 }
1692
1693 if (ret < 0 && limit.min > min) {
1694 limit.min = min;
1695 ret = __xa_alloc(xa, id, entry, limit, gfp);
1696 if (ret == 0)
1697 ret = 1;
1698 }
1699
1700 if (ret >= 0) {
1701 *next = *id + 1;
1702 if (*next == 0)
1703 xa->xa_flags |= XA_FLAGS_ALLOC_WRAPPED;
1704 }
1705 return ret;
1706}
1707EXPORT_SYMBOL(__xa_alloc_cyclic);
1708
1709/**
1660 * __xa_set_mark() - Set this mark on this entry while locked. 1710 * __xa_set_mark() - Set this mark on this entry while locked.
1661 * @xa: XArray. 1711 * @xa: XArray.
1662 * @index: Index of entry. 1712 * @index: Index of entry.