aboutsummaryrefslogtreecommitdiffstats
path: root/lib/test_xarray.c
diff options
context:
space:
mode:
authorMatthew Wilcox <willy@infradead.org>2018-11-06 14:13:35 -0500
committerMatthew Wilcox <willy@infradead.org>2019-02-06 13:32:25 -0500
commit2fa044e51a1f35d7b04cbde07ec513b0ba195e38 (patch)
treeca7f9f39820ca4f8241caf7a6eef8f044db5d38a /lib/test_xarray.c
parenta3e4d3f97ec844de005a679585c04c5c03dfbdb6 (diff)
XArray: Add cyclic allocation
This differs slightly from the IDR equivalent in five ways. 1. It can allocate up to UINT_MAX instead of being limited to INT_MAX, like xa_alloc(). Also like xa_alloc(), it will write to the 'id' pointer before placing the entry in the XArray. 2. The 'next' cursor is allocated separately from the XArray instead of being part of the IDR. This saves memory for all the users which do not use the cyclic allocation API and suits some users better. 3. It returns -EBUSY instead of -ENOSPC. 4. It will attempt to wrap back to the minimum value on memory allocation failure as well as on an -EBUSY error, assuming that a user would rather allocate a small ID than suffer an ID allocation failure. 5. It reports whether it has wrapped, which is important to some users. Signed-off-by: Matthew Wilcox <willy@infradead.org>
Diffstat (limited to 'lib/test_xarray.c')
-rw-r--r--lib/test_xarray.c53
1 files changed, 53 insertions, 0 deletions
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index b5a6b981454d..eaf53f742c72 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -715,6 +715,57 @@ static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base)
715 xa_destroy(xa); 715 xa_destroy(xa);
716} 716}
717 717
718static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base)
719{
720 struct xa_limit limit = XA_LIMIT(1, 0x3fff);
721 u32 next = 0;
722 unsigned int i, id;
723 unsigned long index;
724 void *entry;
725
726 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(1), limit,
727 &next, GFP_KERNEL) != 0);
728 XA_BUG_ON(xa, id != 1);
729
730 next = 0x3ffd;
731 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(0x3ffd), limit,
732 &next, GFP_KERNEL) != 0);
733 XA_BUG_ON(xa, id != 0x3ffd);
734 xa_erase_index(xa, 0x3ffd);
735 xa_erase_index(xa, 1);
736 XA_BUG_ON(xa, !xa_empty(xa));
737
738 for (i = 0x3ffe; i < 0x4003; i++) {
739 if (i < 0x4000)
740 entry = xa_mk_index(i);
741 else
742 entry = xa_mk_index(i - 0x3fff);
743 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, entry, limit,
744 &next, GFP_KERNEL) != (id == 1));
745 XA_BUG_ON(xa, xa_mk_index(id) != entry);
746 }
747
748 /* Check wrap-around is handled correctly */
749 if (base != 0)
750 xa_erase_index(xa, base);
751 xa_erase_index(xa, base + 1);
752 next = UINT_MAX;
753 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(UINT_MAX),
754 xa_limit_32b, &next, GFP_KERNEL) != 0);
755 XA_BUG_ON(xa, id != UINT_MAX);
756 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base),
757 xa_limit_32b, &next, GFP_KERNEL) != 1);
758 XA_BUG_ON(xa, id != base);
759 XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(base + 1),
760 xa_limit_32b, &next, GFP_KERNEL) != 0);
761 XA_BUG_ON(xa, id != base + 1);
762
763 xa_for_each(xa, index, entry)
764 xa_erase_index(xa, index);
765
766 XA_BUG_ON(xa, !xa_empty(xa));
767}
768
718static DEFINE_XARRAY_ALLOC(xa0); 769static DEFINE_XARRAY_ALLOC(xa0);
719static DEFINE_XARRAY_ALLOC1(xa1); 770static DEFINE_XARRAY_ALLOC1(xa1);
720 771
@@ -724,6 +775,8 @@ static noinline void check_xa_alloc(void)
724 check_xa_alloc_1(&xa1, 1); 775 check_xa_alloc_1(&xa1, 1);
725 check_xa_alloc_2(&xa0, 0); 776 check_xa_alloc_2(&xa0, 0);
726 check_xa_alloc_2(&xa1, 1); 777 check_xa_alloc_2(&xa1, 1);
778 check_xa_alloc_3(&xa0, 0);
779 check_xa_alloc_3(&xa1, 1);
727} 780}
728 781
729static noinline void __check_store_iter(struct xarray *xa, unsigned long start, 782static noinline void __check_store_iter(struct xarray *xa, unsigned long start,