diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/test_xarray.c | 53 | ||||
-rw-r--r-- | lib/xarray.c | 50 |
2 files changed, 103 insertions, 0 deletions
diff --git a/lib/test_xarray.c b/lib/test_xarray.c index b5a6b981454d..eaf53f742c72 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c | |||
@@ -715,6 +715,57 @@ static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base) | |||
715 | xa_destroy(xa); | 715 | xa_destroy(xa); |
716 | } | 716 | } |
717 | 717 | ||
718 | static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base) | ||
719 | { | ||
720 | struct xa_limit limit = XA_LIMIT(1, 0x3fff); | ||
721 | u32 next = 0; | ||
722 | unsigned int i, id; | ||
723 | unsigned long index; | ||
724 | void *entry; | ||
725 | |||
726 | XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(1), limit, | ||
727 | &next, GFP_KERNEL) != 0); | ||
728 | XA_BUG_ON(xa, id != 1); | ||
729 | |||
730 | next = 0x3ffd; | ||
731 | XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(0x3ffd), limit, | ||
732 | &next, GFP_KERNEL) != 0); | ||
733 | XA_BUG_ON(xa, id != 0x3ffd); | ||
734 | xa_erase_index(xa, 0x3ffd); | ||
735 | xa_erase_index(xa, 1); | ||
736 | XA_BUG_ON(xa, !xa_empty(xa)); | ||
737 | |||
738 | for (i = 0x3ffe; i < 0x4003; i++) { | ||
739 | if (i < 0x4000) | ||
740 | entry = xa_mk_index(i); | ||
741 | else | ||
742 | entry = xa_mk_index(i - 0x3fff); | ||
743 | XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, entry, limit, | ||
744 | &next, GFP_KERNEL) != (id == 1)); | ||
745 | XA_BUG_ON(xa, xa_mk_index(id) != entry); | ||
746 | } | ||
747 | |||
748 | /* Check wrap-around is handled correctly */ | ||
749 | if (base != 0) | ||
750 | xa_erase_index(xa, base); | ||
751 | xa_erase_index(xa, base + 1); | ||
752 | next = UINT_MAX; | ||
753 | XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(UINT_MAX), | ||
754 | xa_limit_32b, &next, GFP_KERNEL) != 0); | ||
755 | XA_BUG_ON(xa, id != UINT_MAX); | ||
756 | XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base), | ||
757 | xa_limit_32b, &next, GFP_KERNEL) != 1); | ||
758 | XA_BUG_ON(xa, id != base); | ||
759 | XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base + 1), | ||
760 | xa_limit_32b, &next, GFP_KERNEL) != 0); | ||
761 | XA_BUG_ON(xa, id != base + 1); | ||
762 | |||
763 | xa_for_each(xa, index, entry) | ||
764 | xa_erase_index(xa, index); | ||
765 | |||
766 | XA_BUG_ON(xa, !xa_empty(xa)); | ||
767 | } | ||
768 | |||
718 | static DEFINE_XARRAY_ALLOC(xa0); | 769 | static DEFINE_XARRAY_ALLOC(xa0); |
719 | static DEFINE_XARRAY_ALLOC1(xa1); | 770 | static DEFINE_XARRAY_ALLOC1(xa1); |
720 | 771 | ||
@@ -724,6 +775,8 @@ static noinline void check_xa_alloc(void) | |||
724 | check_xa_alloc_1(&xa1, 1); | 775 | check_xa_alloc_1(&xa1, 1); |
725 | check_xa_alloc_2(&xa0, 0); | 776 | check_xa_alloc_2(&xa0, 0); |
726 | check_xa_alloc_2(&xa1, 1); | 777 | check_xa_alloc_2(&xa1, 1); |
778 | check_xa_alloc_3(&xa0, 0); | ||
779 | check_xa_alloc_3(&xa1, 1); | ||
727 | } | 780 | } |
728 | 781 | ||
729 | static noinline void __check_store_iter(struct xarray *xa, unsigned long start, | 782 | static noinline void __check_store_iter(struct xarray *xa, unsigned long start, |
diff --git a/lib/xarray.c b/lib/xarray.c index c707388fb05e..89e37ac50850 100644 --- a/lib/xarray.c +++ b/lib/xarray.c | |||
@@ -1657,6 +1657,56 @@ int __xa_alloc(struct xarray *xa, u32 *id, void *entry, | |||
1657 | EXPORT_SYMBOL(__xa_alloc); | 1657 | EXPORT_SYMBOL(__xa_alloc); |
1658 | 1658 | ||
1659 | /** | 1659 | /** |
1660 | * __xa_alloc_cyclic() - Find somewhere to store this entry in the XArray. | ||
1661 | * @xa: XArray. | ||
1662 | * @id: Pointer to ID. | ||
1663 | * @entry: New entry. | ||
1664 | * @limit: Range of allocated ID. | ||
1665 | * @next: Pointer to next ID to allocate. | ||
1666 | * @gfp: Memory allocation flags. | ||
1667 | * | ||
1668 | * Finds an empty entry in @xa between @limit.min and @limit.max, | ||
1669 | * stores the index into the @id pointer, then stores the entry at | ||
1670 | * that index. A concurrent lookup will not see an uninitialised @id. | ||
1671 | * The search for an empty entry will start at @next and will wrap | ||
1672 | * around if necessary. | ||
1673 | * | ||
1674 | * Context: Any context. Expects xa_lock to be held on entry. May | ||
1675 | * release and reacquire xa_lock if @gfp flags permit. | ||
1676 | * Return: 0 if the allocation succeeded without wrapping. 1 if the | ||
1677 | * allocation succeeded after wrapping, -ENOMEM if memory could not be | ||
1678 | * allocated or -EBUSY if there are no free entries in @limit. | ||
1679 | */ | ||
1680 | int __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry, | ||
1681 | struct xa_limit limit, u32 *next, gfp_t gfp) | ||
1682 | { | ||
1683 | u32 min = limit.min; | ||
1684 | int ret; | ||
1685 | |||
1686 | limit.min = max(min, *next); | ||
1687 | ret = __xa_alloc(xa, id, entry, limit, gfp); | ||
1688 | if ((xa->xa_flags & XA_FLAGS_ALLOC_WRAPPED) && ret == 0) { | ||
1689 | xa->xa_flags &= ~XA_FLAGS_ALLOC_WRAPPED; | ||
1690 | ret = 1; | ||
1691 | } | ||
1692 | |||
1693 | if (ret < 0 && limit.min > min) { | ||
1694 | limit.min = min; | ||
1695 | ret = __xa_alloc(xa, id, entry, limit, gfp); | ||
1696 | if (ret == 0) | ||
1697 | ret = 1; | ||
1698 | } | ||
1699 | |||
1700 | if (ret >= 0) { | ||
1701 | *next = *id + 1; | ||
1702 | if (*next == 0) | ||
1703 | xa->xa_flags |= XA_FLAGS_ALLOC_WRAPPED; | ||
1704 | } | ||
1705 | return ret; | ||
1706 | } | ||
1707 | EXPORT_SYMBOL(__xa_alloc_cyclic); | ||
1708 | |||
1709 | /** | ||
1660 | * __xa_set_mark() - Set this mark on this entry while locked. | 1710 | * __xa_set_mark() - Set this mark on this entry while locked. |
1661 | * @xa: XArray. | 1711 | * @xa: XArray. |
1662 | * @index: Index of entry. | 1712 | * @index: Index of entry. |