aboutsummaryrefslogtreecommitdiffstats
path: root/lib/test_xarray.c
diff options
context:
space:
mode:
authorMatthew Wilcox <willy@infradead.org>2018-10-26 14:43:22 -0400
committerMatthew Wilcox <willy@infradead.org>2019-02-06 13:13:24 -0500
commit3ccaf57a6a63ad171a951dcaddffc453b2414c7b (patch)
treefc2202432a5b50ee5507a2e240b439b2993c2c3f /lib/test_xarray.c
parentfd9dc93e36231fb6d520e0edd467058fad4fd12d (diff)
XArray: Add support for 1s-based allocation
A lot of places want to allocate IDs starting at 1 instead of 0. While the xa_alloc() API supports this, it's not very efficient if lots of IDs are allocated, due to having to walk down to the bottom of the tree to see if ID 1 is available, then all the way over to the next non-allocated ID. This method marks ID 0 as being occupied which wastes one slot in the XArray, but preserves xa_empty() as working. Signed-off-by: Matthew Wilcox <willy@infradead.org>
Diffstat (limited to 'lib/test_xarray.c')
-rw-r--r--lib/test_xarray.c88
1 files changed, 55 insertions, 33 deletions
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index 9d894e93456c..cd74f8f32abe 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -589,64 +589,86 @@ static noinline void check_multi_store(struct xarray *xa)
589#endif 589#endif
590} 590}
591 591
592static DEFINE_XARRAY_ALLOC(xa0); 592static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base)
593
594static noinline void check_xa_alloc(void)
595{ 593{
596 int i; 594 int i;
597 u32 id; 595 u32 id;
598 596
599 /* An empty array should assign 0 to the first alloc */ 597 XA_BUG_ON(xa, !xa_empty(xa));
600 xa_alloc_index(&xa0, 0, GFP_KERNEL); 598 /* An empty array should assign %base to the first alloc */
599 xa_alloc_index(xa, base, GFP_KERNEL);
601 600
602 /* Erasing it should make the array empty again */ 601 /* Erasing it should make the array empty again */
603 xa_erase_index(&xa0, 0); 602 xa_erase_index(xa, base);
604 XA_BUG_ON(&xa0, !xa_empty(&xa0)); 603 XA_BUG_ON(xa, !xa_empty(xa));
604
605 /* And it should assign %base again */
606 xa_alloc_index(xa, base, GFP_KERNEL);
607
608 /* Allocating and then erasing a lot should not lose base */
609 for (i = base + 1; i < 2 * XA_CHUNK_SIZE; i++)
610 xa_alloc_index(xa, i, GFP_KERNEL);
611 for (i = base; i < 2 * XA_CHUNK_SIZE; i++)
612 xa_erase_index(xa, i);
613 xa_alloc_index(xa, base, GFP_KERNEL);
614
615 /* Destroying the array should do the same as erasing */
616 xa_destroy(xa);
605 617
606 /* And it should assign 0 again */ 618 /* And it should assign %base again */
607 xa_alloc_index(&xa0, 0, GFP_KERNEL); 619 xa_alloc_index(xa, base, GFP_KERNEL);
608 620
609 /* The next assigned ID should be 1 */ 621 /* The next assigned ID should be base+1 */
610 xa_alloc_index(&xa0, 1, GFP_KERNEL); 622 xa_alloc_index(xa, base + 1, GFP_KERNEL);
611 xa_erase_index(&xa0, 1); 623 xa_erase_index(xa, base + 1);
612 624
613 /* Storing a value should mark it used */ 625 /* Storing a value should mark it used */
614 xa_store_index(&xa0, 1, GFP_KERNEL); 626 xa_store_index(xa, base + 1, GFP_KERNEL);
615 xa_alloc_index(&xa0, 2, GFP_KERNEL); 627 xa_alloc_index(xa, base + 2, GFP_KERNEL);
616 628
617 /* If we then erase 0, it should be free */ 629 /* If we then erase base, it should be free */
618 xa_erase_index(&xa0, 0); 630 xa_erase_index(xa, base);
619 xa_alloc_index(&xa0, 0, GFP_KERNEL); 631 xa_alloc_index(xa, base, GFP_KERNEL);
620 632
621 xa_erase_index(&xa0, 1); 633 xa_erase_index(xa, base + 1);
622 xa_erase_index(&xa0, 2); 634 xa_erase_index(xa, base + 2);
623 635
624 for (i = 1; i < 5000; i++) { 636 for (i = 1; i < 5000; i++) {
625 xa_alloc_index(&xa0, i, GFP_KERNEL); 637 xa_alloc_index(xa, base + i, GFP_KERNEL);
626 } 638 }
627 639
628 xa_destroy(&xa0); 640 xa_destroy(xa);
629 641
642 /* Check that we fail properly at the limit of allocation */
630 id = 0xfffffffeU; 643 id = 0xfffffffeU;
631 XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id), 644 XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(id),
632 GFP_KERNEL) != 0); 645 GFP_KERNEL) != 0);
633 XA_BUG_ON(&xa0, id != 0xfffffffeU); 646 XA_BUG_ON(xa, id != 0xfffffffeU);
634 XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id), 647 XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(id),
635 GFP_KERNEL) != 0); 648 GFP_KERNEL) != 0);
636 XA_BUG_ON(&xa0, id != 0xffffffffU); 649 XA_BUG_ON(xa, id != 0xffffffffU);
637 XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id), 650 XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(id),
638 GFP_KERNEL) != -ENOSPC); 651 GFP_KERNEL) != -ENOSPC);
639 XA_BUG_ON(&xa0, id != 0xffffffffU); 652 XA_BUG_ON(xa, id != 0xffffffffU);
640 xa_destroy(&xa0); 653 xa_destroy(xa);
641 654
642 id = 10; 655 id = 10;
643 XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id), 656 XA_BUG_ON(xa, xa_alloc(xa, &id, 5, xa_mk_index(id),
644 GFP_KERNEL) != -ENOSPC); 657 GFP_KERNEL) != -ENOSPC);
645 XA_BUG_ON(&xa0, xa_store_index(&xa0, 3, GFP_KERNEL) != 0); 658 XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != 0);
646 XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id), 659 XA_BUG_ON(xa, xa_alloc(xa, &id, 5, xa_mk_index(id),
647 GFP_KERNEL) != -ENOSPC); 660 GFP_KERNEL) != -ENOSPC);
648 xa_erase_index(&xa0, 3); 661 xa_erase_index(xa, 3);
649 XA_BUG_ON(&xa0, !xa_empty(&xa0)); 662 XA_BUG_ON(xa, !xa_empty(xa));
663}
664
665static DEFINE_XARRAY_ALLOC(xa0);
666static DEFINE_XARRAY_ALLOC1(xa1);
667
668static noinline void check_xa_alloc(void)
669{
670 check_xa_alloc_1(&xa0, 0);
671 check_xa_alloc_1(&xa1, 1);
650} 672}
651 673
652static noinline void __check_store_iter(struct xarray *xa, unsigned long start, 674static noinline void __check_store_iter(struct xarray *xa, unsigned long start,