diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2011-06-07 19:59:35 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2011-09-29 00:36:43 -0400 |
commit | bdf2a4364904d6cf2f59b16b0bd86fdc5a2c6152 (patch) | |
tree | ee8343d4a81757d75f1d51f54d3762f00525e7a9 /kernel/rcutorture.c | |
parent | 2c42818e962e2858334bf45bfc56662b3752df34 (diff) |
rcu: Catch rcutorture up to new RCU API additions
Now that the RCU API contains synchronize_rcu_bh(), synchronize_sched(),
call_rcu_sched(), and rcu_bh_expedited()...
Make rcutorture test synchronize_rcu_bh(), getting rid of the old
rcu_bh_torture_synchronize() workaround. Similarly, make rcutorture test
synchronize_sched(), getting rid of the old sched_torture_synchronize()
workaround. Make rcutorture test call_rcu_sched() instead of wrappering
synchronize_sched(). Also add testing of rcu_bh_expedited().
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutorture.c')
-rw-r--r-- | kernel/rcutorture.c | 55 |
1 files changed, 21 insertions, 34 deletions
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index 1d2415046154..75fca518888c 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c | |||
@@ -480,30 +480,6 @@ static void rcu_bh_torture_deferred_free(struct rcu_torture *p) | |||
480 | call_rcu_bh(&p->rtort_rcu, rcu_torture_cb); | 480 | call_rcu_bh(&p->rtort_rcu, rcu_torture_cb); |
481 | } | 481 | } |
482 | 482 | ||
483 | struct rcu_bh_torture_synchronize { | ||
484 | struct rcu_head head; | ||
485 | struct completion completion; | ||
486 | }; | ||
487 | |||
488 | static void rcu_bh_torture_wakeme_after_cb(struct rcu_head *head) | ||
489 | { | ||
490 | struct rcu_bh_torture_synchronize *rcu; | ||
491 | |||
492 | rcu = container_of(head, struct rcu_bh_torture_synchronize, head); | ||
493 | complete(&rcu->completion); | ||
494 | } | ||
495 | |||
496 | static void rcu_bh_torture_synchronize(void) | ||
497 | { | ||
498 | struct rcu_bh_torture_synchronize rcu; | ||
499 | |||
500 | init_rcu_head_on_stack(&rcu.head); | ||
501 | init_completion(&rcu.completion); | ||
502 | call_rcu_bh(&rcu.head, rcu_bh_torture_wakeme_after_cb); | ||
503 | wait_for_completion(&rcu.completion); | ||
504 | destroy_rcu_head_on_stack(&rcu.head); | ||
505 | } | ||
506 | |||
507 | static struct rcu_torture_ops rcu_bh_ops = { | 483 | static struct rcu_torture_ops rcu_bh_ops = { |
508 | .init = NULL, | 484 | .init = NULL, |
509 | .cleanup = NULL, | 485 | .cleanup = NULL, |
@@ -512,7 +488,7 @@ static struct rcu_torture_ops rcu_bh_ops = { | |||
512 | .readunlock = rcu_bh_torture_read_unlock, | 488 | .readunlock = rcu_bh_torture_read_unlock, |
513 | .completed = rcu_bh_torture_completed, | 489 | .completed = rcu_bh_torture_completed, |
514 | .deferred_free = rcu_bh_torture_deferred_free, | 490 | .deferred_free = rcu_bh_torture_deferred_free, |
515 | .sync = rcu_bh_torture_synchronize, | 491 | .sync = synchronize_rcu_bh, |
516 | .cb_barrier = rcu_barrier_bh, | 492 | .cb_barrier = rcu_barrier_bh, |
517 | .fqs = rcu_bh_force_quiescent_state, | 493 | .fqs = rcu_bh_force_quiescent_state, |
518 | .stats = NULL, | 494 | .stats = NULL, |
@@ -528,7 +504,7 @@ static struct rcu_torture_ops rcu_bh_sync_ops = { | |||
528 | .readunlock = rcu_bh_torture_read_unlock, | 504 | .readunlock = rcu_bh_torture_read_unlock, |
529 | .completed = rcu_bh_torture_completed, | 505 | .completed = rcu_bh_torture_completed, |
530 | .deferred_free = rcu_sync_torture_deferred_free, | 506 | .deferred_free = rcu_sync_torture_deferred_free, |
531 | .sync = rcu_bh_torture_synchronize, | 507 | .sync = synchronize_rcu_bh, |
532 | .cb_barrier = NULL, | 508 | .cb_barrier = NULL, |
533 | .fqs = rcu_bh_force_quiescent_state, | 509 | .fqs = rcu_bh_force_quiescent_state, |
534 | .stats = NULL, | 510 | .stats = NULL, |
@@ -536,6 +512,22 @@ static struct rcu_torture_ops rcu_bh_sync_ops = { | |||
536 | .name = "rcu_bh_sync" | 512 | .name = "rcu_bh_sync" |
537 | }; | 513 | }; |
538 | 514 | ||
515 | static struct rcu_torture_ops rcu_bh_expedited_ops = { | ||
516 | .init = rcu_sync_torture_init, | ||
517 | .cleanup = NULL, | ||
518 | .readlock = rcu_bh_torture_read_lock, | ||
519 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ | ||
520 | .readunlock = rcu_bh_torture_read_unlock, | ||
521 | .completed = rcu_bh_torture_completed, | ||
522 | .deferred_free = rcu_sync_torture_deferred_free, | ||
523 | .sync = synchronize_rcu_bh_expedited, | ||
524 | .cb_barrier = NULL, | ||
525 | .fqs = rcu_bh_force_quiescent_state, | ||
526 | .stats = NULL, | ||
527 | .irq_capable = 1, | ||
528 | .name = "rcu_bh_expedited" | ||
529 | }; | ||
530 | |||
539 | /* | 531 | /* |
540 | * Definitions for srcu torture testing. | 532 | * Definitions for srcu torture testing. |
541 | */ | 533 | */ |
@@ -659,11 +651,6 @@ static void rcu_sched_torture_deferred_free(struct rcu_torture *p) | |||
659 | call_rcu_sched(&p->rtort_rcu, rcu_torture_cb); | 651 | call_rcu_sched(&p->rtort_rcu, rcu_torture_cb); |
660 | } | 652 | } |
661 | 653 | ||
662 | static void sched_torture_synchronize(void) | ||
663 | { | ||
664 | synchronize_sched(); | ||
665 | } | ||
666 | |||
667 | static struct rcu_torture_ops sched_ops = { | 654 | static struct rcu_torture_ops sched_ops = { |
668 | .init = rcu_sync_torture_init, | 655 | .init = rcu_sync_torture_init, |
669 | .cleanup = NULL, | 656 | .cleanup = NULL, |
@@ -672,7 +659,7 @@ static struct rcu_torture_ops sched_ops = { | |||
672 | .readunlock = sched_torture_read_unlock, | 659 | .readunlock = sched_torture_read_unlock, |
673 | .completed = rcu_no_completed, | 660 | .completed = rcu_no_completed, |
674 | .deferred_free = rcu_sched_torture_deferred_free, | 661 | .deferred_free = rcu_sched_torture_deferred_free, |
675 | .sync = sched_torture_synchronize, | 662 | .sync = synchronize_sched, |
676 | .cb_barrier = rcu_barrier_sched, | 663 | .cb_barrier = rcu_barrier_sched, |
677 | .fqs = rcu_sched_force_quiescent_state, | 664 | .fqs = rcu_sched_force_quiescent_state, |
678 | .stats = NULL, | 665 | .stats = NULL, |
@@ -688,7 +675,7 @@ static struct rcu_torture_ops sched_sync_ops = { | |||
688 | .readunlock = sched_torture_read_unlock, | 675 | .readunlock = sched_torture_read_unlock, |
689 | .completed = rcu_no_completed, | 676 | .completed = rcu_no_completed, |
690 | .deferred_free = rcu_sync_torture_deferred_free, | 677 | .deferred_free = rcu_sync_torture_deferred_free, |
691 | .sync = sched_torture_synchronize, | 678 | .sync = synchronize_sched, |
692 | .cb_barrier = NULL, | 679 | .cb_barrier = NULL, |
693 | .fqs = rcu_sched_force_quiescent_state, | 680 | .fqs = rcu_sched_force_quiescent_state, |
694 | .stats = NULL, | 681 | .stats = NULL, |
@@ -1425,7 +1412,7 @@ rcu_torture_init(void) | |||
1425 | int firsterr = 0; | 1412 | int firsterr = 0; |
1426 | static struct rcu_torture_ops *torture_ops[] = | 1413 | static struct rcu_torture_ops *torture_ops[] = |
1427 | { &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops, | 1414 | { &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops, |
1428 | &rcu_bh_ops, &rcu_bh_sync_ops, | 1415 | &rcu_bh_ops, &rcu_bh_sync_ops, &rcu_bh_expedited_ops, |
1429 | &srcu_ops, &srcu_expedited_ops, | 1416 | &srcu_ops, &srcu_expedited_ops, |
1430 | &sched_ops, &sched_sync_ops, &sched_expedited_ops, }; | 1417 | &sched_ops, &sched_sync_ops, &sched_expedited_ops, }; |
1431 | 1418 | ||