diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-13 19:35:58 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-12-13 19:35:58 -0500 |
| commit | 880b9df1bf157dc28a2e65beea6183d095e0ccb0 (patch) | |
| tree | 4b02c486ad69782a0472e2253e33c322a7b960db | |
| parent | 65e08c5e86311143f45c3e4389561af3107fc8f6 (diff) | |
| parent | 48483614de97c4f5219abeda630e62b2bebdce62 (diff) | |
Merge tag 'xarray-4.20-rc7' of git://git.infradead.org/users/willy/linux-dax
Pull XArray fixes from Matthew Wilcox:
"Two bugfixes, each with test-suite updates, two improvements to the
test-suite without associated bugs, and one patch adding a missing
API"
* tag 'xarray-4.20-rc7' of git://git.infradead.org/users/willy/linux-dax:
XArray: Fix xa_alloc when id exceeds max
XArray tests: Check iterating over multiorder entries
XArray tests: Handle larger indices more elegantly
XArray: Add xa_cmpxchg_irq and xa_cmpxchg_bh
radix tree: Don't return retry entries from lookup
| -rw-r--r-- | Documentation/core-api/xarray.rst | 5 | ||||
| -rw-r--r-- | include/linux/xarray.h | 54 | ||||
| -rw-r--r-- | lib/radix-tree.c | 4 | ||||
| -rw-r--r-- | lib/test_xarray.c | 155 | ||||
| -rw-r--r-- | lib/xarray.c | 8 | ||||
| -rw-r--r-- | mm/shmem.c | 4 | ||||
| -rw-r--r-- | tools/testing/radix-tree/Makefile | 1 | ||||
| -rw-r--r-- | tools/testing/radix-tree/main.c | 1 | ||||
| -rw-r--r-- | tools/testing/radix-tree/regression.h | 1 | ||||
| -rw-r--r-- | tools/testing/radix-tree/regression4.c | 79 |
10 files changed, 258 insertions, 54 deletions
diff --git a/Documentation/core-api/xarray.rst b/Documentation/core-api/xarray.rst index dbe96cb5558e..6a6d67acaf69 100644 --- a/Documentation/core-api/xarray.rst +++ b/Documentation/core-api/xarray.rst | |||
| @@ -187,6 +187,8 @@ Takes xa_lock internally: | |||
| 187 | * :c:func:`xa_erase_bh` | 187 | * :c:func:`xa_erase_bh` |
| 188 | * :c:func:`xa_erase_irq` | 188 | * :c:func:`xa_erase_irq` |
| 189 | * :c:func:`xa_cmpxchg` | 189 | * :c:func:`xa_cmpxchg` |
| 190 | * :c:func:`xa_cmpxchg_bh` | ||
| 191 | * :c:func:`xa_cmpxchg_irq` | ||
| 190 | * :c:func:`xa_store_range` | 192 | * :c:func:`xa_store_range` |
| 191 | * :c:func:`xa_alloc` | 193 | * :c:func:`xa_alloc` |
| 192 | * :c:func:`xa_alloc_bh` | 194 | * :c:func:`xa_alloc_bh` |
| @@ -263,7 +265,8 @@ using :c:func:`xa_lock_irqsave` in both the interrupt handler and process | |||
| 263 | context, or :c:func:`xa_lock_irq` in process context and :c:func:`xa_lock` | 265 | context, or :c:func:`xa_lock_irq` in process context and :c:func:`xa_lock` |
| 264 | in the interrupt handler. Some of the more common patterns have helper | 266 | in the interrupt handler. Some of the more common patterns have helper |
| 265 | functions such as :c:func:`xa_store_bh`, :c:func:`xa_store_irq`, | 267 | functions such as :c:func:`xa_store_bh`, :c:func:`xa_store_irq`, |
| 266 | :c:func:`xa_erase_bh` and :c:func:`xa_erase_irq`. | 268 | :c:func:`xa_erase_bh`, :c:func:`xa_erase_irq`, :c:func:`xa_cmpxchg_bh` |
| 269 | and :c:func:`xa_cmpxchg_irq`. | ||
| 267 | 270 | ||
| 268 | Sometimes you need to protect access to the XArray with a mutex because | 271 | Sometimes you need to protect access to the XArray with a mutex because |
| 269 | that lock sits above another mutex in the locking hierarchy. That does | 272 | that lock sits above another mutex in the locking hierarchy. That does |
diff --git a/include/linux/xarray.h b/include/linux/xarray.h index 564892e19f8c..f492e21c4aa2 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h | |||
| @@ -554,6 +554,60 @@ static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index, | |||
| 554 | } | 554 | } |
| 555 | 555 | ||
| 556 | /** | 556 | /** |
| 557 | * xa_cmpxchg_bh() - Conditionally replace an entry in the XArray. | ||
| 558 | * @xa: XArray. | ||
| 559 | * @index: Index into array. | ||
| 560 | * @old: Old value to test against. | ||
| 561 | * @entry: New value to place in array. | ||
| 562 | * @gfp: Memory allocation flags. | ||
| 563 | * | ||
| 564 | * This function is like calling xa_cmpxchg() except it disables softirqs | ||
| 565 | * while holding the array lock. | ||
| 566 | * | ||
| 567 | * Context: Any context. Takes and releases the xa_lock while | ||
| 568 | * disabling softirqs. May sleep if the @gfp flags permit. | ||
| 569 | * Return: The old value at this index or xa_err() if an error happened. | ||
| 570 | */ | ||
| 571 | static inline void *xa_cmpxchg_bh(struct xarray *xa, unsigned long index, | ||
| 572 | void *old, void *entry, gfp_t gfp) | ||
| 573 | { | ||
| 574 | void *curr; | ||
| 575 | |||
| 576 | xa_lock_bh(xa); | ||
| 577 | curr = __xa_cmpxchg(xa, index, old, entry, gfp); | ||
| 578 | xa_unlock_bh(xa); | ||
| 579 | |||
| 580 | return curr; | ||
| 581 | } | ||
| 582 | |||
| 583 | /** | ||
| 584 | * xa_cmpxchg_irq() - Conditionally replace an entry in the XArray. | ||
| 585 | * @xa: XArray. | ||
| 586 | * @index: Index into array. | ||
| 587 | * @old: Old value to test against. | ||
| 588 | * @entry: New value to place in array. | ||
| 589 | * @gfp: Memory allocation flags. | ||
| 590 | * | ||
| 591 | * This function is like calling xa_cmpxchg() except it disables interrupts | ||
| 592 | * while holding the array lock. | ||
| 593 | * | ||
| 594 | * Context: Process context. Takes and releases the xa_lock while | ||
| 595 | * disabling interrupts. May sleep if the @gfp flags permit. | ||
| 596 | * Return: The old value at this index or xa_err() if an error happened. | ||
| 597 | */ | ||
| 598 | static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index, | ||
| 599 | void *old, void *entry, gfp_t gfp) | ||
| 600 | { | ||
| 601 | void *curr; | ||
| 602 | |||
| 603 | xa_lock_irq(xa); | ||
| 604 | curr = __xa_cmpxchg(xa, index, old, entry, gfp); | ||
| 605 | xa_unlock_irq(xa); | ||
| 606 | |||
| 607 | return curr; | ||
| 608 | } | ||
| 609 | |||
| 610 | /** | ||
| 557 | * xa_insert() - Store this entry in the XArray unless another entry is | 611 | * xa_insert() - Store this entry in the XArray unless another entry is |
| 558 | * already present. | 612 | * already present. |
| 559 | * @xa: XArray. | 613 | * @xa: XArray. |
diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 1106bb6aa01e..14d51548bea6 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c | |||
| @@ -784,11 +784,11 @@ void *__radix_tree_lookup(const struct radix_tree_root *root, | |||
| 784 | while (radix_tree_is_internal_node(node)) { | 784 | while (radix_tree_is_internal_node(node)) { |
| 785 | unsigned offset; | 785 | unsigned offset; |
| 786 | 786 | ||
| 787 | if (node == RADIX_TREE_RETRY) | ||
| 788 | goto restart; | ||
| 789 | parent = entry_to_node(node); | 787 | parent = entry_to_node(node); |
| 790 | offset = radix_tree_descend(parent, &node, index); | 788 | offset = radix_tree_descend(parent, &node, index); |
| 791 | slot = parent->slots + offset; | 789 | slot = parent->slots + offset; |
| 790 | if (node == RADIX_TREE_RETRY) | ||
| 791 | goto restart; | ||
| 792 | if (parent->shift == 0) | 792 | if (parent->shift == 0) |
| 793 | break; | 793 | break; |
| 794 | } | 794 | } |
diff --git a/lib/test_xarray.c b/lib/test_xarray.c index 0598e86af8fc..4676c0a1eeca 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c | |||
| @@ -28,23 +28,28 @@ void xa_dump(const struct xarray *xa) { } | |||
| 28 | } while (0) | 28 | } while (0) |
| 29 | #endif | 29 | #endif |
| 30 | 30 | ||
| 31 | static void *xa_mk_index(unsigned long index) | ||
| 32 | { | ||
| 33 | return xa_mk_value(index & LONG_MAX); | ||
| 34 | } | ||
| 35 | |||
| 31 | static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp) | 36 | static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp) |
| 32 | { | 37 | { |
| 33 | return xa_store(xa, index, xa_mk_value(index & LONG_MAX), gfp); | 38 | return xa_store(xa, index, xa_mk_index(index), gfp); |
| 34 | } | 39 | } |
| 35 | 40 | ||
| 36 | static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp) | 41 | static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp) |
| 37 | { | 42 | { |
| 38 | u32 id = 0; | 43 | u32 id = 0; |
| 39 | 44 | ||
| 40 | XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_value(index & LONG_MAX), | 45 | XA_BUG_ON(xa, xa_alloc(xa, &id, UINT_MAX, xa_mk_index(index), |
| 41 | gfp) != 0); | 46 | gfp) != 0); |
| 42 | XA_BUG_ON(xa, id != index); | 47 | XA_BUG_ON(xa, id != index); |
| 43 | } | 48 | } |
| 44 | 49 | ||
| 45 | static void xa_erase_index(struct xarray *xa, unsigned long index) | 50 | static void xa_erase_index(struct xarray *xa, unsigned long index) |
| 46 | { | 51 | { |
| 47 | XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_value(index & LONG_MAX)); | 52 | XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index)); |
| 48 | XA_BUG_ON(xa, xa_load(xa, index) != NULL); | 53 | XA_BUG_ON(xa, xa_load(xa, index) != NULL); |
| 49 | } | 54 | } |
| 50 | 55 | ||
| @@ -118,7 +123,7 @@ static noinline void check_xas_retry(struct xarray *xa) | |||
| 118 | 123 | ||
| 119 | xas_set(&xas, 0); | 124 | xas_set(&xas, 0); |
| 120 | xas_for_each(&xas, entry, ULONG_MAX) { | 125 | xas_for_each(&xas, entry, ULONG_MAX) { |
| 121 | xas_store(&xas, xa_mk_value(xas.xa_index)); | 126 | xas_store(&xas, xa_mk_index(xas.xa_index)); |
| 122 | } | 127 | } |
| 123 | xas_unlock(&xas); | 128 | xas_unlock(&xas); |
| 124 | 129 | ||
| @@ -196,7 +201,7 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index) | |||
| 196 | XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL)); | 201 | XA_BUG_ON(xa, xa_store_index(xa, index + 2, GFP_KERNEL)); |
| 197 | xa_set_mark(xa, index + 2, XA_MARK_1); | 202 | xa_set_mark(xa, index + 2, XA_MARK_1); |
| 198 | XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL)); | 203 | XA_BUG_ON(xa, xa_store_index(xa, next, GFP_KERNEL)); |
| 199 | xa_store_order(xa, index, order, xa_mk_value(index), | 204 | xa_store_order(xa, index, order, xa_mk_index(index), |
| 200 | GFP_KERNEL); | 205 | GFP_KERNEL); |
| 201 | for (i = base; i < next; i++) { | 206 | for (i = base; i < next; i++) { |
| 202 | XA_STATE(xas, xa, i); | 207 | XA_STATE(xas, xa, i); |
| @@ -405,7 +410,7 @@ static noinline void check_xas_erase(struct xarray *xa) | |||
| 405 | xas_set(&xas, j); | 410 | xas_set(&xas, j); |
| 406 | do { | 411 | do { |
| 407 | xas_lock(&xas); | 412 | xas_lock(&xas); |
| 408 | xas_store(&xas, xa_mk_value(j)); | 413 | xas_store(&xas, xa_mk_index(j)); |
| 409 | xas_unlock(&xas); | 414 | xas_unlock(&xas); |
| 410 | } while (xas_nomem(&xas, GFP_KERNEL)); | 415 | } while (xas_nomem(&xas, GFP_KERNEL)); |
| 411 | } | 416 | } |
| @@ -423,7 +428,7 @@ static noinline void check_xas_erase(struct xarray *xa) | |||
| 423 | xas_set(&xas, 0); | 428 | xas_set(&xas, 0); |
| 424 | j = i; | 429 | j = i; |
| 425 | xas_for_each(&xas, entry, ULONG_MAX) { | 430 | xas_for_each(&xas, entry, ULONG_MAX) { |
| 426 | XA_BUG_ON(xa, entry != xa_mk_value(j)); | 431 | XA_BUG_ON(xa, entry != xa_mk_index(j)); |
| 427 | xas_store(&xas, NULL); | 432 | xas_store(&xas, NULL); |
| 428 | j++; | 433 | j++; |
| 429 | } | 434 | } |
| @@ -440,17 +445,17 @@ static noinline void check_multi_store_1(struct xarray *xa, unsigned long index, | |||
| 440 | unsigned long min = index & ~((1UL << order) - 1); | 445 | unsigned long min = index & ~((1UL << order) - 1); |
| 441 | unsigned long max = min + (1UL << order); | 446 | unsigned long max = min + (1UL << order); |
| 442 | 447 | ||
| 443 | xa_store_order(xa, index, order, xa_mk_value(index), GFP_KERNEL); | 448 | xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); |
| 444 | XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_value(index)); | 449 | XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(index)); |
| 445 | XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_value(index)); | 450 | XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(index)); |
| 446 | XA_BUG_ON(xa, xa_load(xa, max) != NULL); | 451 | XA_BUG_ON(xa, xa_load(xa, max) != NULL); |
| 447 | XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); | 452 | XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); |
| 448 | 453 | ||
| 449 | xas_lock(&xas); | 454 | xas_lock(&xas); |
| 450 | XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(min)) != xa_mk_value(index)); | 455 | XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(min)) != xa_mk_index(index)); |
| 451 | xas_unlock(&xas); | 456 | xas_unlock(&xas); |
| 452 | XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_value(min)); | 457 | XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_index(min)); |
| 453 | XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_value(min)); | 458 | XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_index(min)); |
| 454 | XA_BUG_ON(xa, xa_load(xa, max) != NULL); | 459 | XA_BUG_ON(xa, xa_load(xa, max) != NULL); |
| 455 | XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); | 460 | XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); |
| 456 | 461 | ||
| @@ -471,6 +476,32 @@ static noinline void check_multi_store_2(struct xarray *xa, unsigned long index, | |||
| 471 | xas_unlock(&xas); | 476 | xas_unlock(&xas); |
| 472 | XA_BUG_ON(xa, !xa_empty(xa)); | 477 | XA_BUG_ON(xa, !xa_empty(xa)); |
| 473 | } | 478 | } |
| 479 | |||
| 480 | static noinline void check_multi_store_3(struct xarray *xa, unsigned long index, | ||
| 481 | unsigned int order) | ||
| 482 | { | ||
| 483 | XA_STATE(xas, xa, 0); | ||
| 484 | void *entry; | ||
| 485 | int n = 0; | ||
| 486 | |||
| 487 | xa_store_order(xa, index, order, xa_mk_index(index), GFP_KERNEL); | ||
| 488 | |||
| 489 | xas_lock(&xas); | ||
| 490 | xas_for_each(&xas, entry, ULONG_MAX) { | ||
| 491 | XA_BUG_ON(xa, entry != xa_mk_index(index)); | ||
| 492 | n++; | ||
| 493 | } | ||
| 494 | XA_BUG_ON(xa, n != 1); | ||
| 495 | xas_set(&xas, index + 1); | ||
| 496 | xas_for_each(&xas, entry, ULONG_MAX) { | ||
| 497 | XA_BUG_ON(xa, entry != xa_mk_index(index)); | ||
| 498 | n++; | ||
| 499 | } | ||
| 500 | XA_BUG_ON(xa, n != 2); | ||
| 501 | xas_unlock(&xas); | ||
| 502 | |||
| 503 | xa_destroy(xa); | ||
| 504 | } | ||
| 474 | #endif | 505 | #endif |
| 475 | 506 | ||
| 476 | static noinline void check_multi_store(struct xarray *xa) | 507 | static noinline void check_multi_store(struct xarray *xa) |
| @@ -523,15 +554,15 @@ static noinline void check_multi_store(struct xarray *xa) | |||
| 523 | 554 | ||
| 524 | for (i = 0; i < max_order; i++) { | 555 | for (i = 0; i < max_order; i++) { |
| 525 | for (j = 0; j < max_order; j++) { | 556 | for (j = 0; j < max_order; j++) { |
| 526 | xa_store_order(xa, 0, i, xa_mk_value(i), GFP_KERNEL); | 557 | xa_store_order(xa, 0, i, xa_mk_index(i), GFP_KERNEL); |
| 527 | xa_store_order(xa, 0, j, xa_mk_value(j), GFP_KERNEL); | 558 | xa_store_order(xa, 0, j, xa_mk_index(j), GFP_KERNEL); |
| 528 | 559 | ||
| 529 | for (k = 0; k < max_order; k++) { | 560 | for (k = 0; k < max_order; k++) { |
| 530 | void *entry = xa_load(xa, (1UL << k) - 1); | 561 | void *entry = xa_load(xa, (1UL << k) - 1); |
| 531 | if ((i < k) && (j < k)) | 562 | if ((i < k) && (j < k)) |
| 532 | XA_BUG_ON(xa, entry != NULL); | 563 | XA_BUG_ON(xa, entry != NULL); |
| 533 | else | 564 | else |
| 534 | XA_BUG_ON(xa, entry != xa_mk_value(j)); | 565 | XA_BUG_ON(xa, entry != xa_mk_index(j)); |
| 535 | } | 566 | } |
| 536 | 567 | ||
| 537 | xa_erase(xa, 0); | 568 | xa_erase(xa, 0); |
| @@ -545,6 +576,11 @@ static noinline void check_multi_store(struct xarray *xa) | |||
| 545 | check_multi_store_1(xa, (1UL << i) + 1, i); | 576 | check_multi_store_1(xa, (1UL << i) + 1, i); |
| 546 | } | 577 | } |
| 547 | check_multi_store_2(xa, 4095, 9); | 578 | check_multi_store_2(xa, 4095, 9); |
| 579 | |||
| 580 | for (i = 1; i < 20; i++) { | ||
| 581 | check_multi_store_3(xa, 0, i); | ||
| 582 | check_multi_store_3(xa, 1UL << i, i); | ||
| 583 | } | ||
| 548 | #endif | 584 | #endif |
| 549 | } | 585 | } |
| 550 | 586 | ||
| @@ -587,16 +623,25 @@ static noinline void check_xa_alloc(void) | |||
| 587 | xa_destroy(&xa0); | 623 | xa_destroy(&xa0); |
| 588 | 624 | ||
| 589 | id = 0xfffffffeU; | 625 | id = 0xfffffffeU; |
| 590 | XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_value(0), | 626 | XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id), |
| 591 | GFP_KERNEL) != 0); | 627 | GFP_KERNEL) != 0); |
| 592 | XA_BUG_ON(&xa0, id != 0xfffffffeU); | 628 | XA_BUG_ON(&xa0, id != 0xfffffffeU); |
| 593 | XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_value(0), | 629 | XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id), |
| 594 | GFP_KERNEL) != 0); | 630 | GFP_KERNEL) != 0); |
| 595 | XA_BUG_ON(&xa0, id != 0xffffffffU); | 631 | XA_BUG_ON(&xa0, id != 0xffffffffU); |
| 596 | XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_value(0), | 632 | XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, UINT_MAX, xa_mk_index(id), |
| 597 | GFP_KERNEL) != -ENOSPC); | 633 | GFP_KERNEL) != -ENOSPC); |
| 598 | XA_BUG_ON(&xa0, id != 0xffffffffU); | 634 | XA_BUG_ON(&xa0, id != 0xffffffffU); |
| 599 | xa_destroy(&xa0); | 635 | xa_destroy(&xa0); |
| 636 | |||
| 637 | id = 10; | ||
| 638 | XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id), | ||
| 639 | GFP_KERNEL) != -ENOSPC); | ||
| 640 | XA_BUG_ON(&xa0, xa_store_index(&xa0, 3, GFP_KERNEL) != 0); | ||
| 641 | XA_BUG_ON(&xa0, xa_alloc(&xa0, &id, 5, xa_mk_index(id), | ||
| 642 | GFP_KERNEL) != -ENOSPC); | ||
| 643 | xa_erase_index(&xa0, 3); | ||
| 644 | XA_BUG_ON(&xa0, !xa_empty(&xa0)); | ||
| 600 | } | 645 | } |
| 601 | 646 | ||
| 602 | static noinline void __check_store_iter(struct xarray *xa, unsigned long start, | 647 | static noinline void __check_store_iter(struct xarray *xa, unsigned long start, |
| @@ -610,11 +655,11 @@ retry: | |||
| 610 | xas_lock(&xas); | 655 | xas_lock(&xas); |
| 611 | xas_for_each_conflict(&xas, entry) { | 656 | xas_for_each_conflict(&xas, entry) { |
| 612 | XA_BUG_ON(xa, !xa_is_value(entry)); | 657 | XA_BUG_ON(xa, !xa_is_value(entry)); |
| 613 | XA_BUG_ON(xa, entry < xa_mk_value(start)); | 658 | XA_BUG_ON(xa, entry < xa_mk_index(start)); |
| 614 | XA_BUG_ON(xa, entry > xa_mk_value(start + (1UL << order) - 1)); | 659 | XA_BUG_ON(xa, entry > xa_mk_index(start + (1UL << order) - 1)); |
| 615 | count++; | 660 | count++; |
| 616 | } | 661 | } |
| 617 | xas_store(&xas, xa_mk_value(start)); | 662 | xas_store(&xas, xa_mk_index(start)); |
| 618 | xas_unlock(&xas); | 663 | xas_unlock(&xas); |
| 619 | if (xas_nomem(&xas, GFP_KERNEL)) { | 664 | if (xas_nomem(&xas, GFP_KERNEL)) { |
| 620 | count = 0; | 665 | count = 0; |
| @@ -622,9 +667,9 @@ retry: | |||
| 622 | } | 667 | } |
| 623 | XA_BUG_ON(xa, xas_error(&xas)); | 668 | XA_BUG_ON(xa, xas_error(&xas)); |
| 624 | XA_BUG_ON(xa, count != present); | 669 | XA_BUG_ON(xa, count != present); |
| 625 | XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_value(start)); | 670 | XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start)); |
| 626 | XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) != | 671 | XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) != |
| 627 | xa_mk_value(start)); | 672 | xa_mk_index(start)); |
| 628 | xa_erase_index(xa, start); | 673 | xa_erase_index(xa, start); |
| 629 | } | 674 | } |
| 630 | 675 | ||
| @@ -703,7 +748,7 @@ static noinline void check_multi_find_2(struct xarray *xa) | |||
| 703 | for (j = 0; j < index; j++) { | 748 | for (j = 0; j < index; j++) { |
| 704 | XA_STATE(xas, xa, j + index); | 749 | XA_STATE(xas, xa, j + index); |
| 705 | xa_store_index(xa, index - 1, GFP_KERNEL); | 750 | xa_store_index(xa, index - 1, GFP_KERNEL); |
| 706 | xa_store_order(xa, index, i, xa_mk_value(index), | 751 | xa_store_order(xa, index, i, xa_mk_index(index), |
| 707 | GFP_KERNEL); | 752 | GFP_KERNEL); |
| 708 | rcu_read_lock(); | 753 | rcu_read_lock(); |
| 709 | xas_for_each(&xas, entry, ULONG_MAX) { | 754 | xas_for_each(&xas, entry, ULONG_MAX) { |
| @@ -778,7 +823,7 @@ static noinline void check_find_2(struct xarray *xa) | |||
| 778 | j = 0; | 823 | j = 0; |
| 779 | index = 0; | 824 | index = 0; |
| 780 | xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) { | 825 | xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) { |
| 781 | XA_BUG_ON(xa, xa_mk_value(index) != entry); | 826 | XA_BUG_ON(xa, xa_mk_index(index) != entry); |
| 782 | XA_BUG_ON(xa, index != j++); | 827 | XA_BUG_ON(xa, index != j++); |
| 783 | } | 828 | } |
| 784 | } | 829 | } |
| @@ -786,10 +831,34 @@ static noinline void check_find_2(struct xarray *xa) | |||
| 786 | xa_destroy(xa); | 831 | xa_destroy(xa); |
| 787 | } | 832 | } |
| 788 | 833 | ||
| 834 | static noinline void check_find_3(struct xarray *xa) | ||
| 835 | { | ||
| 836 | XA_STATE(xas, xa, 0); | ||
| 837 | unsigned long i, j, k; | ||
| 838 | void *entry; | ||
| 839 | |||
| 840 | for (i = 0; i < 100; i++) { | ||
| 841 | for (j = 0; j < 100; j++) { | ||
| 842 | for (k = 0; k < 100; k++) { | ||
| 843 | xas_set(&xas, j); | ||
| 844 | xas_for_each_marked(&xas, entry, k, XA_MARK_0) | ||
| 845 | ; | ||
| 846 | if (j > k) | ||
| 847 | XA_BUG_ON(xa, | ||
| 848 | xas.xa_node != XAS_RESTART); | ||
| 849 | } | ||
| 850 | } | ||
| 851 | xa_store_index(xa, i, GFP_KERNEL); | ||
| 852 | xa_set_mark(xa, i, XA_MARK_0); | ||
| 853 | } | ||
| 854 | xa_destroy(xa); | ||
| 855 | } | ||
| 856 | |||
| 789 | static noinline void check_find(struct xarray *xa) | 857 | static noinline void check_find(struct xarray *xa) |
| 790 | { | 858 | { |
| 791 | check_find_1(xa); | 859 | check_find_1(xa); |
| 792 | check_find_2(xa); | 860 | check_find_2(xa); |
| 861 | check_find_3(xa); | ||
| 793 | check_multi_find(xa); | 862 | check_multi_find(xa); |
| 794 | check_multi_find_2(xa); | 863 | check_multi_find_2(xa); |
| 795 | } | 864 | } |
| @@ -829,11 +898,11 @@ static noinline void check_find_entry(struct xarray *xa) | |||
| 829 | for (index = 0; index < (1UL << (order + 5)); | 898 | for (index = 0; index < (1UL << (order + 5)); |
| 830 | index += (1UL << order)) { | 899 | index += (1UL << order)) { |
| 831 | xa_store_order(xa, index, order, | 900 | xa_store_order(xa, index, order, |
| 832 | xa_mk_value(index), GFP_KERNEL); | 901 | xa_mk_index(index), GFP_KERNEL); |
| 833 | XA_BUG_ON(xa, xa_load(xa, index) != | 902 | XA_BUG_ON(xa, xa_load(xa, index) != |
| 834 | xa_mk_value(index)); | 903 | xa_mk_index(index)); |
| 835 | XA_BUG_ON(xa, xa_find_entry(xa, | 904 | XA_BUG_ON(xa, xa_find_entry(xa, |
| 836 | xa_mk_value(index)) != index); | 905 | xa_mk_index(index)) != index); |
| 837 | } | 906 | } |
| 838 | XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); | 907 | XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); |
| 839 | xa_destroy(xa); | 908 | xa_destroy(xa); |
| @@ -844,7 +913,7 @@ static noinline void check_find_entry(struct xarray *xa) | |||
| 844 | XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); | 913 | XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); |
| 845 | xa_store_index(xa, ULONG_MAX, GFP_KERNEL); | 914 | xa_store_index(xa, ULONG_MAX, GFP_KERNEL); |
| 846 | XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); | 915 | XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); |
| 847 | XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_value(LONG_MAX)) != -1); | 916 | XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1); |
| 848 | xa_erase_index(xa, ULONG_MAX); | 917 | xa_erase_index(xa, ULONG_MAX); |
| 849 | XA_BUG_ON(xa, !xa_empty(xa)); | 918 | XA_BUG_ON(xa, !xa_empty(xa)); |
| 850 | } | 919 | } |
| @@ -864,7 +933,7 @@ static noinline void check_move_small(struct xarray *xa, unsigned long idx) | |||
| 864 | XA_BUG_ON(xa, xas.xa_node == XAS_RESTART); | 933 | XA_BUG_ON(xa, xas.xa_node == XAS_RESTART); |
| 865 | XA_BUG_ON(xa, xas.xa_index != i); | 934 | XA_BUG_ON(xa, xas.xa_index != i); |
| 866 | if (i == 0 || i == idx) | 935 | if (i == 0 || i == idx) |
| 867 | XA_BUG_ON(xa, entry != xa_mk_value(i)); | 936 | XA_BUG_ON(xa, entry != xa_mk_index(i)); |
| 868 | else | 937 | else |
| 869 | XA_BUG_ON(xa, entry != NULL); | 938 | XA_BUG_ON(xa, entry != NULL); |
| 870 | } | 939 | } |
| @@ -878,7 +947,7 @@ static noinline void check_move_small(struct xarray *xa, unsigned long idx) | |||
| 878 | XA_BUG_ON(xa, xas.xa_node == XAS_RESTART); | 947 | XA_BUG_ON(xa, xas.xa_node == XAS_RESTART); |
| 879 | XA_BUG_ON(xa, xas.xa_index != i); | 948 | XA_BUG_ON(xa, xas.xa_index != i); |
| 880 | if (i == 0 || i == idx) | 949 | if (i == 0 || i == idx) |
| 881 | XA_BUG_ON(xa, entry != xa_mk_value(i)); | 950 | XA_BUG_ON(xa, entry != xa_mk_index(i)); |
| 882 | else | 951 | else |
| 883 | XA_BUG_ON(xa, entry != NULL); | 952 | XA_BUG_ON(xa, entry != NULL); |
| 884 | } while (i > 0); | 953 | } while (i > 0); |
| @@ -909,7 +978,7 @@ static noinline void check_move(struct xarray *xa) | |||
| 909 | do { | 978 | do { |
| 910 | void *entry = xas_prev(&xas); | 979 | void *entry = xas_prev(&xas); |
| 911 | i--; | 980 | i--; |
| 912 | XA_BUG_ON(xa, entry != xa_mk_value(i)); | 981 | XA_BUG_ON(xa, entry != xa_mk_index(i)); |
| 913 | XA_BUG_ON(xa, i != xas.xa_index); | 982 | XA_BUG_ON(xa, i != xas.xa_index); |
| 914 | } while (i != 0); | 983 | } while (i != 0); |
| 915 | 984 | ||
| @@ -918,7 +987,7 @@ static noinline void check_move(struct xarray *xa) | |||
| 918 | 987 | ||
| 919 | do { | 988 | do { |
| 920 | void *entry = xas_next(&xas); | 989 | void *entry = xas_next(&xas); |
| 921 | XA_BUG_ON(xa, entry != xa_mk_value(i)); | 990 | XA_BUG_ON(xa, entry != xa_mk_index(i)); |
| 922 | XA_BUG_ON(xa, i != xas.xa_index); | 991 | XA_BUG_ON(xa, i != xas.xa_index); |
| 923 | i++; | 992 | i++; |
| 924 | } while (i < (1 << 16)); | 993 | } while (i < (1 << 16)); |
| @@ -934,7 +1003,7 @@ static noinline void check_move(struct xarray *xa) | |||
| 934 | void *entry = xas_prev(&xas); | 1003 | void *entry = xas_prev(&xas); |
| 935 | i--; | 1004 | i--; |
| 936 | if ((i < (1 << 8)) || (i >= (1 << 15))) | 1005 | if ((i < (1 << 8)) || (i >= (1 << 15))) |
| 937 | XA_BUG_ON(xa, entry != xa_mk_value(i)); | 1006 | XA_BUG_ON(xa, entry != xa_mk_index(i)); |
| 938 | else | 1007 | else |
| 939 | XA_BUG_ON(xa, entry != NULL); | 1008 | XA_BUG_ON(xa, entry != NULL); |
| 940 | XA_BUG_ON(xa, i != xas.xa_index); | 1009 | XA_BUG_ON(xa, i != xas.xa_index); |
| @@ -946,7 +1015,7 @@ static noinline void check_move(struct xarray *xa) | |||
| 946 | do { | 1015 | do { |
| 947 | void *entry = xas_next(&xas); | 1016 | void *entry = xas_next(&xas); |
| 948 | if ((i < (1 << 8)) || (i >= (1 << 15))) | 1017 | if ((i < (1 << 8)) || (i >= (1 << 15))) |
| 949 | XA_BUG_ON(xa, entry != xa_mk_value(i)); | 1018 | XA_BUG_ON(xa, entry != xa_mk_index(i)); |
| 950 | else | 1019 | else |
| 951 | XA_BUG_ON(xa, entry != NULL); | 1020 | XA_BUG_ON(xa, entry != NULL); |
| 952 | XA_BUG_ON(xa, i != xas.xa_index); | 1021 | XA_BUG_ON(xa, i != xas.xa_index); |
| @@ -976,7 +1045,7 @@ static noinline void xa_store_many_order(struct xarray *xa, | |||
| 976 | if (xas_error(&xas)) | 1045 | if (xas_error(&xas)) |
| 977 | goto unlock; | 1046 | goto unlock; |
| 978 | for (i = 0; i < (1U << order); i++) { | 1047 | for (i = 0; i < (1U << order); i++) { |
| 979 | XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(index + i))); | 1048 | XA_BUG_ON(xa, xas_store(&xas, xa_mk_index(index + i))); |
| 980 | xas_next(&xas); | 1049 | xas_next(&xas); |
| 981 | } | 1050 | } |
| 982 | unlock: | 1051 | unlock: |
| @@ -1031,9 +1100,9 @@ static noinline void check_create_range_4(struct xarray *xa, | |||
| 1031 | if (xas_error(&xas)) | 1100 | if (xas_error(&xas)) |
| 1032 | goto unlock; | 1101 | goto unlock; |
| 1033 | for (i = 0; i < (1UL << order); i++) { | 1102 | for (i = 0; i < (1UL << order); i++) { |
| 1034 | void *old = xas_store(&xas, xa_mk_value(base + i)); | 1103 | void *old = xas_store(&xas, xa_mk_index(base + i)); |
| 1035 | if (xas.xa_index == index) | 1104 | if (xas.xa_index == index) |
| 1036 | XA_BUG_ON(xa, old != xa_mk_value(base + i)); | 1105 | XA_BUG_ON(xa, old != xa_mk_index(base + i)); |
| 1037 | else | 1106 | else |
| 1038 | XA_BUG_ON(xa, old != NULL); | 1107 | XA_BUG_ON(xa, old != NULL); |
| 1039 | xas_next(&xas); | 1108 | xas_next(&xas); |
| @@ -1085,10 +1154,10 @@ static noinline void __check_store_range(struct xarray *xa, unsigned long first, | |||
| 1085 | unsigned long last) | 1154 | unsigned long last) |
| 1086 | { | 1155 | { |
| 1087 | #ifdef CONFIG_XARRAY_MULTI | 1156 | #ifdef CONFIG_XARRAY_MULTI |
| 1088 | xa_store_range(xa, first, last, xa_mk_value(first), GFP_KERNEL); | 1157 | xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL); |
| 1089 | 1158 | ||
| 1090 | XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_value(first)); | 1159 | XA_BUG_ON(xa, xa_load(xa, first) != xa_mk_index(first)); |
| 1091 | XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_value(first)); | 1160 | XA_BUG_ON(xa, xa_load(xa, last) != xa_mk_index(first)); |
| 1092 | XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL); | 1161 | XA_BUG_ON(xa, xa_load(xa, first - 1) != NULL); |
| 1093 | XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL); | 1162 | XA_BUG_ON(xa, xa_load(xa, last + 1) != NULL); |
| 1094 | 1163 | ||
| @@ -1195,7 +1264,7 @@ static noinline void check_account(struct xarray *xa) | |||
| 1195 | XA_BUG_ON(xa, xas.xa_node->nr_values != 0); | 1264 | XA_BUG_ON(xa, xas.xa_node->nr_values != 0); |
| 1196 | rcu_read_unlock(); | 1265 | rcu_read_unlock(); |
| 1197 | 1266 | ||
| 1198 | xa_store_order(xa, 1 << order, order, xa_mk_value(1 << order), | 1267 | xa_store_order(xa, 1 << order, order, xa_mk_index(1UL << order), |
| 1199 | GFP_KERNEL); | 1268 | GFP_KERNEL); |
| 1200 | XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2); | 1269 | XA_BUG_ON(xa, xas.xa_node->count != xas.xa_node->nr_values * 2); |
| 1201 | 1270 | ||
diff --git a/lib/xarray.c b/lib/xarray.c index bbacca576593..5f3f9311de89 100644 --- a/lib/xarray.c +++ b/lib/xarray.c | |||
| @@ -1131,7 +1131,7 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark) | |||
| 1131 | entry = xa_head(xas->xa); | 1131 | entry = xa_head(xas->xa); |
| 1132 | xas->xa_node = NULL; | 1132 | xas->xa_node = NULL; |
| 1133 | if (xas->xa_index > max_index(entry)) | 1133 | if (xas->xa_index > max_index(entry)) |
| 1134 | goto bounds; | 1134 | goto out; |
| 1135 | if (!xa_is_node(entry)) { | 1135 | if (!xa_is_node(entry)) { |
| 1136 | if (xa_marked(xas->xa, mark)) | 1136 | if (xa_marked(xas->xa, mark)) |
| 1137 | return entry; | 1137 | return entry; |
| @@ -1180,11 +1180,9 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark) | |||
| 1180 | } | 1180 | } |
| 1181 | 1181 | ||
| 1182 | out: | 1182 | out: |
| 1183 | if (!max) | 1183 | if (xas->xa_index > max) |
| 1184 | goto max; | 1184 | goto max; |
| 1185 | bounds: | 1185 | return set_bounds(xas); |
| 1186 | xas->xa_node = XAS_BOUNDS; | ||
| 1187 | return NULL; | ||
| 1188 | max: | 1186 | max: |
| 1189 | xas->xa_node = XAS_RESTART; | 1187 | xas->xa_node = XAS_RESTART; |
| 1190 | return NULL; | 1188 | return NULL; |
diff --git a/mm/shmem.c b/mm/shmem.c index 921f80488bb3..5d07e0b1352f 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
| @@ -661,9 +661,7 @@ static int shmem_free_swap(struct address_space *mapping, | |||
| 661 | { | 661 | { |
| 662 | void *old; | 662 | void *old; |
| 663 | 663 | ||
| 664 | xa_lock_irq(&mapping->i_pages); | 664 | old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0); |
| 665 | old = __xa_cmpxchg(&mapping->i_pages, index, radswap, NULL, 0); | ||
| 666 | xa_unlock_irq(&mapping->i_pages); | ||
| 667 | if (old != radswap) | 665 | if (old != radswap) |
| 668 | return -ENOENT; | 666 | return -ENOENT; |
| 669 | free_swap_and_cache(radix_to_swp_entry(radswap)); | 667 | free_swap_and_cache(radix_to_swp_entry(radswap)); |
diff --git a/tools/testing/radix-tree/Makefile b/tools/testing/radix-tree/Makefile index acf1afa01c5b..397d6b612502 100644 --- a/tools/testing/radix-tree/Makefile +++ b/tools/testing/radix-tree/Makefile | |||
| @@ -7,6 +7,7 @@ LDLIBS+= -lpthread -lurcu | |||
| 7 | TARGETS = main idr-test multiorder xarray | 7 | TARGETS = main idr-test multiorder xarray |
| 8 | CORE_OFILES := xarray.o radix-tree.o idr.o linux.o test.o find_bit.o bitmap.o | 8 | CORE_OFILES := xarray.o radix-tree.o idr.o linux.o test.o find_bit.o bitmap.o |
| 9 | OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \ | 9 | OFILES = main.o $(CORE_OFILES) regression1.o regression2.o regression3.o \ |
| 10 | regression4.o \ | ||
| 10 | tag_check.o multiorder.o idr-test.o iteration_check.o benchmark.o | 11 | tag_check.o multiorder.o idr-test.o iteration_check.o benchmark.o |
| 11 | 12 | ||
| 12 | ifndef SHIFT | 13 | ifndef SHIFT |
diff --git a/tools/testing/radix-tree/main.c b/tools/testing/radix-tree/main.c index 77a44c54998f..7a22d6e3732e 100644 --- a/tools/testing/radix-tree/main.c +++ b/tools/testing/radix-tree/main.c | |||
| @@ -308,6 +308,7 @@ int main(int argc, char **argv) | |||
| 308 | regression1_test(); | 308 | regression1_test(); |
| 309 | regression2_test(); | 309 | regression2_test(); |
| 310 | regression3_test(); | 310 | regression3_test(); |
| 311 | regression4_test(); | ||
| 311 | iteration_test(0, 10 + 90 * long_run); | 312 | iteration_test(0, 10 + 90 * long_run); |
| 312 | iteration_test(7, 10 + 90 * long_run); | 313 | iteration_test(7, 10 + 90 * long_run); |
| 313 | single_thread_tests(long_run); | 314 | single_thread_tests(long_run); |
diff --git a/tools/testing/radix-tree/regression.h b/tools/testing/radix-tree/regression.h index 3c8a1584e9ee..135145af18b7 100644 --- a/tools/testing/radix-tree/regression.h +++ b/tools/testing/radix-tree/regression.h | |||
| @@ -5,5 +5,6 @@ | |||
| 5 | void regression1_test(void); | 5 | void regression1_test(void); |
| 6 | void regression2_test(void); | 6 | void regression2_test(void); |
| 7 | void regression3_test(void); | 7 | void regression3_test(void); |
| 8 | void regression4_test(void); | ||
| 8 | 9 | ||
| 9 | #endif | 10 | #endif |
diff --git a/tools/testing/radix-tree/regression4.c b/tools/testing/radix-tree/regression4.c new file mode 100644 index 000000000000..cf4e5aba6b08 --- /dev/null +++ b/tools/testing/radix-tree/regression4.c | |||
| @@ -0,0 +1,79 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | #include <linux/kernel.h> | ||
| 3 | #include <linux/gfp.h> | ||
| 4 | #include <linux/slab.h> | ||
| 5 | #include <linux/radix-tree.h> | ||
| 6 | #include <linux/rcupdate.h> | ||
| 7 | #include <stdlib.h> | ||
| 8 | #include <pthread.h> | ||
| 9 | #include <stdio.h> | ||
| 10 | #include <assert.h> | ||
| 11 | |||
| 12 | #include "regression.h" | ||
| 13 | |||
| 14 | static pthread_barrier_t worker_barrier; | ||
| 15 | static int obj0, obj1; | ||
| 16 | static RADIX_TREE(mt_tree, GFP_KERNEL); | ||
| 17 | |||
| 18 | static void *reader_fn(void *arg) | ||
| 19 | { | ||
| 20 | int i; | ||
| 21 | void *entry; | ||
| 22 | |||
| 23 | rcu_register_thread(); | ||
| 24 | pthread_barrier_wait(&worker_barrier); | ||
| 25 | |||
| 26 | for (i = 0; i < 1000000; i++) { | ||
| 27 | rcu_read_lock(); | ||
| 28 | entry = radix_tree_lookup(&mt_tree, 0); | ||
| 29 | rcu_read_unlock(); | ||
| 30 | if (entry != &obj0) { | ||
| 31 | printf("iteration %d bad entry = %p\n", i, entry); | ||
| 32 | abort(); | ||
| 33 | } | ||
| 34 | } | ||
| 35 | |||
| 36 | rcu_unregister_thread(); | ||
| 37 | |||
| 38 | return NULL; | ||
| 39 | } | ||
| 40 | |||
| 41 | static void *writer_fn(void *arg) | ||
| 42 | { | ||
| 43 | int i; | ||
| 44 | |||
| 45 | rcu_register_thread(); | ||
| 46 | pthread_barrier_wait(&worker_barrier); | ||
| 47 | |||
| 48 | for (i = 0; i < 1000000; i++) { | ||
| 49 | radix_tree_insert(&mt_tree, 1, &obj1); | ||
| 50 | radix_tree_delete(&mt_tree, 1); | ||
| 51 | } | ||
| 52 | |||
| 53 | rcu_unregister_thread(); | ||
| 54 | |||
| 55 | return NULL; | ||
| 56 | } | ||
| 57 | |||
| 58 | void regression4_test(void) | ||
| 59 | { | ||
| 60 | pthread_t reader, writer; | ||
| 61 | |||
| 62 | printv(1, "regression test 4 starting\n"); | ||
| 63 | |||
| 64 | radix_tree_insert(&mt_tree, 0, &obj0); | ||
| 65 | pthread_barrier_init(&worker_barrier, NULL, 2); | ||
| 66 | |||
| 67 | if (pthread_create(&reader, NULL, reader_fn, NULL) || | ||
| 68 | pthread_create(&writer, NULL, writer_fn, NULL)) { | ||
| 69 | perror("pthread_create"); | ||
| 70 | exit(1); | ||
| 71 | } | ||
| 72 | |||
| 73 | if (pthread_join(reader, NULL) || pthread_join(writer, NULL)) { | ||
| 74 | perror("pthread_join"); | ||
| 75 | exit(1); | ||
| 76 | } | ||
| 77 | |||
| 78 | printv(1, "regression test 4 passed\n"); | ||
| 79 | } | ||
