summaryrefslogtreecommitdiffstats
path: root/lib/test_xarray.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-12-20 13:53:28 -0500
committerDavid S. Miller <davem@davemloft.net>2018-12-20 14:53:36 -0500
commit2be09de7d6a06f58e768de1255a687c9aaa66606 (patch)
tree298f9e04caf105873d987e807eccba27710a49cc /lib/test_xarray.c
parent44a7b3b6e3a458f9549c2cc28e74ecdc470e42f1 (diff)
parent1d51b4b1d3f2db0d6d144175e31a84e472fbd99a (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Lots of conflicts, by happily all cases of overlapping changes, parallel adds, things of that nature. Thanks to Stephen Rothwell, Saeed Mahameed, and others for their guidance in these resolutions. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib/test_xarray.c')
-rw-r--r--lib/test_xarray.c155
1 files changed, 112 insertions, 43 deletions
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index 0598e86af8fc..4676c0a1eeca 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -28,23 +28,28 @@ void xa_dump(const struct xarray *xa) { }
28} while (0) 28} while (0)
29#endif 29#endif
30 30
31static void *xa_mk_index(unsigned long index)
32{
33 return xa_mk_value(index & LONG_MAX);
34}
35
31static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp) 36static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp)
32{ 37{
33 return xa_store(xa, index, xa_mk_value(index & LONG_MAX), gfp); 38 return xa_store(xa, index, xa_mk_index(index), gfp);
34} 39}
35 40
36static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp) 41static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp)
37{ 42{
38 u32 id = 0; 43 u32 id = 0;
39 44
40 XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_value(index & LONG_MAX), 45 XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(index),
41 gfp) != 0); 46 gfp) != 0);
42 XA_BUG_ON(xa, id != index); 47 XA_BUG_ON(xa, id != index);
43} 48}
44 49
45static void xa_erase_index(struct xarray *xa, unsigned long index) 50static void xa_erase_index(struct xarray *xa, unsigned long index)
46{ 51{
47 XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_value(index & LONG_MAX)); 52 XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index));
48 XA_BUG_ON(xa, xa_load(xa, index) != NULL); 53 XA_BUG_ON(xa, xa_load(xa, index) != NULL);
49} 54}
50 55
@@ -118,7 +123,7 @@ static noinline void check_xas_retry(struct xarray *xa)
118 123
119 xas_set(&xas, 0); 124 xas_set(&xas, 0);
120 xas_for_each(&xas, entry, ULONG_MAX) { 125 xas_for_each(&xas, entry, ULONG_MAX) {
121 xas_store(&xas, xa_mk_value(xas.xa_index)); 126 xas_store(&xas, xa_mk_index(xas.xa_index));
122 } 127 }
123 xas_unlock(&xas); 128 xas_unlock(&xas);
124 129
@@ -196,7 +201,7 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
196 XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL)); 201 XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL));
197 xa_set_mark(xa, index + 2, XA_MARK_1); 202 xa_set_mark(xa, index + 2, XA_MARK_1);
198 XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL)); 203 XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL));
199 xa_store_order(xa, index, order, xa_mk_value(index), 204 xa_store_order(xa, index, order, xa_mk_index(index),
200 GFP_KERNEL); 205 GFP_KERNEL);
201 for (i = base; i < next; i++) { 206 for (i = base; i < next; i++) {
202 XA_STATE(xas, xa, i); 207 XA_STATE(xas, xa, i);
@@ -405,7 +410,7 @@ static noinline void check_xas_erase(struct xarray *xa)
405 xas_set(&xas, j); 410 xas_set(&xas, j);
406 do { 411 do {
407 xas_lock(&xas); 412 xas_lock(&xas);
408 xas_store(&xas, xa_mk_value(j)); 413 xas_store(&xas, xa_mk_index(j));
409 xas_unlock(&xas); 414 xas_unlock(&xas);
410 } while (xas_nomem(&xas, GFP_KERNEL)); 415 } while (xas_nomem(&xas, GFP_KERNEL));
411 } 416 }
@@ -423,7 +428,7 @@ static noinline void check_xas_erase(struct xarray *xa)
423 xas_set(&xas, 0); 428 xas_set(&xas, 0);
424 j = i; 429 j = i;
425 xas_for_each(&xas, entry, ULONG_MAX) { 430 xas_for_each(&xas, entry, ULONG_MAX) {
426 XA_BUG_ON(xa, entry != xa_mk_value(j)); 431 XA_BUG_ON(xa, entry != xa_mk_index(j));
427 xas_store(&xas, NULL); 432 xas_store(&xas, NULL);
428 j++; 433 j++;
429 } 434 }
@@ -440,17 +445,17 @@ static noinline void check_multi_store_1(struct xarray *xa, unsigned long index,
440 unsigned long min = index & ~((1UL << order) - 1); 445 unsigned long min = index & ~((1UL << order) - 1);
441 unsigned long max = min + (1UL << order); 446 unsigned long max = min + (1UL << order);
442 447
443 xa_store_order(xa, index, order, xa_mk_value(index), GFP_KERNEL); 448 xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
444 XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_value(index)); 449 XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(index));
445 XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_value(index)); 450 XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(index));
446 XA_BUG_ON(xa, xa_load(xa, max) != NULL); 451 XA_BUG_ON(xa, xa_load(xa, max) != NULL);
447 XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); 452 XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
448 453
449 xas_lock(&xas); 454 xas_lock(&xas);
450 XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(min)) != xa_mk_value(index)); 455 XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(min)) != xa_mk_index(index));
451 xas_unlock(&xas); 456 xas_unlock(&xas);
452 XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_value(min)); 457 XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(min));
453 XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_value(min)); 458 XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(min));
454 XA_BUG_ON(xa, xa_load(xa, max) != NULL); 459 XA_BUG_ON(xa, xa_load(xa, max) != NULL);
455 XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); 460 XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
456 461
@@ -471,6 +476,32 @@ static noinline void check_multi_store_2(struct xarray *xa, unsigned long index,
471 xas_unlock(&xas); 476 xas_unlock(&xas);
472 XA_BUG_ON(xa, !xa_empty(xa)); 477 XA_BUG_ON(xa, !xa_empty(xa));
473} 478}
479
480static noinline void check_multi_store_3(struct xarray *xa, unsigned long index,
481 unsigned int order)
482{
483 XA_STATE(xas, xa, 0);
484 void *entry;
485 int n = 0;
486
487 xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL);
488
489 xas_lock(&xas);
490 xas_for_each(&xas, entry, ULONG_MAX) {
491 XA_BUG_ON(xa, entry != xa_mk_index(index));
492 n++;
493 }
494 XA_BUG_ON(xa, n != 1);
495 xas_set(&xas, index + 1);
496 xas_for_each(&xas, entry, ULONG_MAX) {
497 XA_BUG_ON(xa, entry != xa_mk_index(index));
498 n++;
499 }
500 XA_BUG_ON(xa, n != 2);
501 xas_unlock(&xas);
502
503 xa_destroy(xa);
504}
474#endif 505#endif
475 506
476static noinline void check_multi_store(struct xarray *xa) 507static noinline void check_multi_store(struct xarray *xa)
@@ -523,15 +554,15 @@ static noinline void check_multi_store(struct xarray *xa)
523 554
524 for (i = 0; i < max_order; i++) { 555 for (i = 0; i < max_order; i++) {
525 for (j = 0; j < max_order; j++) { 556 for (j = 0; j < max_order; j++) {
526 xa_store_order(xa, 0, i, xa_mk_value(i), GFP_KERNEL); 557 xa_store_order(xa, 0, i, xa_mk_index(i), GFP_KERNEL);
527 xa_store_order(xa, 0, j, xa_mk_value(j), GFP_KERNEL); 558 xa_store_order(xa, 0, j, xa_mk_index(j), GFP_KERNEL);
528 559
529 for (k = 0; k < max_order; k++) { 560 for (k = 0; k < max_order; k++) {
530 void *entry = xa_load(xa, (1UL << k) - 1); 561 void *entry = xa_load(xa, (1UL << k) - 1);
531 if ((i < k) && (j < k)) 562 if ((i < k) && (j < k))
532 XA_BUG_ON(xa, entry != NULL); 563 XA_BUG_ON(xa, entry != NULL);
533 else 564 else
534 XA_BUG_ON(xa, entry != xa_mk_value(j)); 565 XA_BUG_ON(xa, entry != xa_mk_index(j));
535 } 566 }
536 567
537 xa_erase(xa, 0); 568 xa_erase(xa, 0);
@@ -545,6 +576,11 @@ static noinline void check_multi_store(struct xarray *xa)
545 check_multi_store_1(xa, (1UL << i) + 1, i); 576 check_multi_store_1(xa, (1UL << i) + 1, i);
546 } 577 }
547 check_multi_store_2(xa, 4095, 9); 578 check_multi_store_2(xa, 4095, 9);
579
580 for (i = 1; i < 20; i++) {
581 check_multi_store_3(xa, 0, i);
582 check_multi_store_3(xa, 1UL << i, i);
583 }
548#endif 584#endif
549} 585}
550 586
@@ -587,16 +623,25 @@ static noinline void check_xa_alloc(void)
587 xa_destroy(&xa0); 623 xa_destroy(&xa0);
588 624
589 id = 0xfffffffeU; 625 id = 0xfffffffeU;
590 XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_value(0), 626 XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id),
591 GFP_KERNEL) != 0); 627 GFP_KERNEL) != 0);
592 XA_BUG_ON(&xa0, id != 0xfffffffeU); 628 XA_BUG_ON(&xa0, id != 0xfffffffeU);
593 XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_value(0), 629 XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id),
594 GFP_KERNEL) != 0); 630 GFP_KERNEL) != 0);
595 XA_BUG_ON(&xa0, id != 0xffffffffU); 631 XA_BUG_ON(&xa0, id != 0xffffffffU);
596 XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_value(0), 632 XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id),
597 GFP_KERNEL) != -ENOSPC); 633 GFP_KERNEL) != -ENOSPC);
598 XA_BUG_ON(&xa0, id != 0xffffffffU); 634 XA_BUG_ON(&xa0, id != 0xffffffffU);
599 xa_destroy(&xa0); 635 xa_destroy(&xa0);
636
637 id = 10;
638 XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id),
639 GFP_KERNEL) != -ENOSPC);
640 XA_BUG_ON(&xa0, xa_store_index(&xa0, 3, GFP_KERNEL) != 0);
641 XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id),
642 GFP_KERNEL) != -ENOSPC);
643 xa_erase_index(&xa0, 3);
644 XA_BUG_ON(&xa0, !xa_empty(&xa0));
600} 645}
601 646
602static noinline void __check_store_iter(struct xarray *xa, unsigned long start, 647static noinline void __check_store_iter(struct xarray *xa, unsigned long start,
@@ -610,11 +655,11 @@ retry:
610 xas_lock(&xas); 655 xas_lock(&xas);
611 xas_for_each_conflict(&xas, entry) { 656 xas_for_each_conflict(&xas, entry) {
612 XA_BUG_ON(xa, !xa_is_value(entry)); 657 XA_BUG_ON(xa, !xa_is_value(entry));
613 XA_BUG_ON(xa, entry < xa_mk_value(start)); 658 XA_BUG_ON(xa, entry < xa_mk_index(start));
614 XA_BUG_ON(xa, entry > xa_mk_value(start + (1UL << order) - 1)); 659 XA_BUG_ON(xa, entry > xa_mk_index(start + (1UL << order) - 1));
615 count++; 660 count++;
616 } 661 }
617 xas_store(&xas, xa_mk_value(start)); 662 xas_store(&xas, xa_mk_index(start));
618 xas_unlock(&xas); 663 xas_unlock(&xas);
619 if (xas_nomem(&xas, GFP_KERNEL)) { 664 if (xas_nomem(&xas, GFP_KERNEL)) {
620 count = 0; 665 count = 0;
@@ -622,9 +667,9 @@ retry:
622 } 667 }
623 XA_BUG_ON(xa, xas_error(&xas)); 668 XA_BUG_ON(xa, xas_error(&xas));
624 XA_BUG_ON(xa, count != present); 669 XA_BUG_ON(xa, count != present);
625 XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_value(start)); 670 XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start));
626 XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) != 671 XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) !=
627 xa_mk_value(start)); 672 xa_mk_index(start));
628 xa_erase_index(xa, start); 673 xa_erase_index(xa, start);
629} 674}
630 675
@@ -703,7 +748,7 @@ static noinline void check_multi_find_2(struct xarray *xa)
703 for (j = 0; j < index; j++) { 748 for (j = 0; j < index; j++) {
704 XA_STATE(xas, xa, j + index); 749 XA_STATE(xas, xa, j + index);
705 xa_store_index(xa, index - 1, GFP_KERNEL); 750 xa_store_index(xa, index - 1, GFP_KERNEL);
706 xa_store_order(xa, index, i, xa_mk_value(index), 751 xa_store_order(xa, index, i, xa_mk_index(index),
707 GFP_KERNEL); 752 GFP_KERNEL);
708 rcu_read_lock(); 753 rcu_read_lock();
709 xas_for_each(&xas, entry, ULONG_MAX) { 754 xas_for_each(&xas, entry, ULONG_MAX) {
@@ -778,7 +823,7 @@ static noinline void check_find_2(struct xarray *xa)
778 j = 0; 823 j = 0;
779 index = 0; 824 index = 0;
780 xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) { 825 xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) {
781 XA_BUG_ON(xa, xa_mk_value(index) != entry); 826 XA_BUG_ON(xa, xa_mk_index(index) != entry);
782 XA_BUG_ON(xa, index != j++); 827 XA_BUG_ON(xa, index != j++);
783 } 828 }
784 } 829 }
@@ -786,10 +831,34 @@ static noinline void check_find_2(struct xarray *xa)
786 xa_destroy(xa); 831 xa_destroy(xa);
787} 832}
788 833
834static noinline void check_find_3(struct xarray *xa)
835{
836 XA_STATE(xas, xa, 0);
837 unsigned long i, j, k;
838 void *entry;
839
840 for (i = 0; i < 100; i++) {
841 for (j = 0; j < 100; j++) {
842 for (k = 0; k < 100; k++) {
843 xas_set(&xas, j);
844 xas_for_each_marked(&xas, entry, k, XA_MARK_0)
845 ;
846 if (j > k)
847 XA_BUG_ON(xa,
848 xas.xa_node != XAS_RESTART);
849 }
850 }
851 xa_store_index(xa, i, GFP_KERNEL);
852 xa_set_mark(xa, i, XA_MARK_0);
853 }
854 xa_destroy(xa);
855}
856
789static noinline void check_find(struct xarray *xa) 857static noinline void check_find(struct xarray *xa)
790{ 858{
791 check_find_1(xa); 859 check_find_1(xa);
792 check_find_2(xa); 860 check_find_2(xa);
861 check_find_3(xa);
793 check_multi_find(xa); 862 check_multi_find(xa);
794 check_multi_find_2(xa); 863 check_multi_find_2(xa);
795} 864}
@@ -829,11 +898,11 @@ static noinline void check_find_entry(struct xarray *xa)
829 for (index = 0; index < (1UL << (order + 5)); 898 for (index = 0; index < (1UL << (order + 5));
830 index += (1UL << order)) { 899 index += (1UL << order)) {
831 xa_store_order(xa, index, order, 900 xa_store_order(xa, index, order,
832 xa_mk_value(index), GFP_KERNEL); 901 xa_mk_index(index), GFP_KERNEL);
833 XA_BUG_ON(xa, xa_load(xa, index) != 902 XA_BUG_ON(xa, xa_load(xa, index) !=
834 xa_mk_value(index)); 903 xa_mk_index(index));
835 XA_BUG_ON(xa, xa_find_entry(xa, 904 XA_BUG_ON(xa, xa_find_entry(xa,
836 xa_mk_value(index)) != index); 905 xa_mk_index(index)) != index);
837 } 906 }
838 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); 907 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
839 xa_destroy(xa); 908 xa_destroy(xa);
@@ -844,7 +913,7 @@ static noinline void check_find_entry(struct xarray *xa)
844 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); 913 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
845 xa_store_index(xa, ULONG_MAX, GFP_KERNEL); 914 xa_store_index(xa, ULONG_MAX, GFP_KERNEL);
846 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); 915 XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1);
847 XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_value(LONG_MAX)) != -1); 916 XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1);
848 xa_erase_index(xa, ULONG_MAX); 917 xa_erase_index(xa, ULONG_MAX);
849 XA_BUG_ON(xa, !xa_empty(xa)); 918 XA_BUG_ON(xa, !xa_empty(xa));
850} 919}
@@ -864,7 +933,7 @@ static noinline void check_move_small(struct xarray *xa, unsigned long idx)
864 XA_BUG_ON(xa, xas.xa_node == XAS_RESTART); 933 XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
865 XA_BUG_ON(xa, xas.xa_index != i); 934 XA_BUG_ON(xa, xas.xa_index != i);
866 if (i == 0 || i == idx) 935 if (i == 0 || i == idx)
867 XA_BUG_ON(xa, entry != xa_mk_value(i)); 936 XA_BUG_ON(xa, entry != xa_mk_index(i));
868 else 937 else
869 XA_BUG_ON(xa, entry != NULL); 938 XA_BUG_ON(xa, entry != NULL);
870 } 939 }
@@ -878,7 +947,7 @@ static noinline void check_move_small(struct xarray *xa, unsigned long idx)
878 XA_BUG_ON(xa, xas.xa_node == XAS_RESTART); 947 XA_BUG_ON(xa, xas.xa_node == XAS_RESTART);
879 XA_BUG_ON(xa, xas.xa_index != i); 948 XA_BUG_ON(xa, xas.xa_index != i);
880 if (i == 0 || i == idx) 949 if (i == 0 || i == idx)
881 XA_BUG_ON(xa, entry != xa_mk_value(i)); 950 XA_BUG_ON(xa, entry != xa_mk_index(i));
882 else 951 else
883 XA_BUG_ON(xa, entry != NULL); 952 XA_BUG_ON(xa, entry != NULL);
884 } while (i > 0); 953 } while (i > 0);
@@ -909,7 +978,7 @@ static noinline void check_move(struct xarray *xa)
909 do { 978 do {
910 void *entry = xas_prev(&xas); 979 void *entry = xas_prev(&xas);
911 i--; 980 i--;
912 XA_BUG_ON(xa, entry != xa_mk_value(i)); 981 XA_BUG_ON(xa, entry != xa_mk_index(i));
913 XA_BUG_ON(xa, i != xas.xa_index); 982 XA_BUG_ON(xa, i != xas.xa_index);
914 } while (i != 0); 983 } while (i != 0);
915 984
@@ -918,7 +987,7 @@ static noinline void check_move(struct xarray *xa)
918 987
919 do { 988 do {
920 void *entry = xas_next(&xas); 989 void *entry = xas_next(&xas);
921 XA_BUG_ON(xa, entry != xa_mk_value(i)); 990 XA_BUG_ON(xa, entry != xa_mk_index(i));
922 XA_BUG_ON(xa, i != xas.xa_index); 991 XA_BUG_ON(xa, i != xas.xa_index);
923 i++; 992 i++;
924 } while (i < (1 << 16)); 993 } while (i < (1 << 16));
@@ -934,7 +1003,7 @@ static noinline void check_move(struct xarray *xa)
934 void *entry = xas_prev(&xas); 1003 void *entry = xas_prev(&xas);
935 i--; 1004 i--;
936 if ((i < (1 << 8)) || (i >= (1 << 15))) 1005 if ((i < (1 << 8)) || (i >= (1 << 15)))
937 XA_BUG_ON(xa, entry != xa_mk_value(i)); 1006 XA_BUG_ON(xa, entry != xa_mk_index(i));
938 else 1007 else
939 XA_BUG_ON(xa, entry != NULL); 1008 XA_BUG_ON(xa, entry != NULL);
940 XA_BUG_ON(xa, i != xas.xa_index); 1009 XA_BUG_ON(xa, i != xas.xa_index);
@@ -946,7 +1015,7 @@ static noinline void check_move(struct xarray *xa)
946 do { 1015 do {
947 void *entry = xas_next(&xas); 1016 void *entry = xas_next(&xas);
948 if ((i < (1 << 8)) || (i >= (1 << 15))) 1017 if ((i < (1 << 8)) || (i >= (1 << 15)))
949 XA_BUG_ON(xa, entry != xa_mk_value(i)); 1018 XA_BUG_ON(xa, entry != xa_mk_index(i));
950 else 1019 else
951 XA_BUG_ON(xa, entry != NULL); 1020 XA_BUG_ON(xa, entry != NULL);
952 XA_BUG_ON(xa, i != xas.xa_index); 1021 XA_BUG_ON(xa, i != xas.xa_index);
@@ -976,7 +1045,7 @@ static noinline void xa_store_many_order(struct xarray *xa,
976 if (xas_error(&xas)) 1045 if (xas_error(&xas))
977 goto unlock; 1046 goto unlock;
978 for (i = 0; i < (1U << order); i++) { 1047 for (i = 0; i < (1U << order); i++) {
979 XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(index + i))); 1048 XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(index + i)));
980 xas_next(&xas); 1049 xas_next(&xas);
981 } 1050 }
982unlock: 1051unlock:
@@ -1031,9 +1100,9 @@ static noinline void check_create_range_4(struct xarray *xa,
1031 if (xas_error(&xas)) 1100 if (xas_error(&xas))
1032 goto unlock; 1101 goto unlock;
1033 for (i = 0; i < (1UL << order); i++) { 1102 for (i = 0; i < (1UL << order); i++) {
1034 void *old = xas_store(&xas, xa_mk_value(base + i)); 1103 void *old = xas_store(&xas, xa_mk_index(base + i));
1035 if (xas.xa_index == index) 1104 if (xas.xa_index == index)
1036 XA_BUG_ON(xa, old != xa_mk_value(base + i)); 1105 XA_BUG_ON(xa, old != xa_mk_index(base + i));
1037 else 1106 else
1038 XA_BUG_ON(xa, old != NULL); 1107 XA_BUG_ON(xa, old != NULL);
1039 xas_next(&xas); 1108 xas_next(&xas);
@@ -1085,10 +1154,10 @@ static noinline void __check_store_range(struct xarray *xa, unsigned long first,
1085 unsigned long last) 1154 unsigned long last)
1086{ 1155{
1087#ifdef CONFIG_XARRAY_MULTI 1156#ifdef CONFIG_XARRAY_MULTI
1088 xa_store_range(xa, first, last, xa_mk_value(first), GFP_KERNEL); 1157 xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL);
1089 1158
1090 XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_value(first)); 1159 XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_index(first));
1091 XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_value(first)); 1160 XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_index(first));
1092 XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL); 1161 XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL);
1093 XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL); 1162 XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL);
1094 1163
@@ -1195,7 +1264,7 @@ static noinline void check_account(struct xarray *xa)
1195 XA_BUG_ON(xa, xas.xa_node->nr_values != 0); 1264 XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
1196 rcu_read_unlock(); 1265 rcu_read_unlock();
1197 1266
1198 xa_store_order(xa, 1 << order, order, xa_mk_value(1 << order), 1267 xa_store_order(xa, 1 << order, order, xa_mk_index(1UL << order),
1199 GFP_KERNEL); 1268 GFP_KERNEL);
1200 XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2); 1269 XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2);
1201 1270