aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutorture.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcutorture.c')
-rw-r--r--kernel/rcutorture.c77
1 files changed, 34 insertions, 43 deletions
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 98f51b13bb7e..764825c2685c 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -73,7 +73,7 @@ module_param(nreaders, int, 0444);
73MODULE_PARM_DESC(nreaders, "Number of RCU reader threads"); 73MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
74module_param(nfakewriters, int, 0444); 74module_param(nfakewriters, int, 0444);
75MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads"); 75MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads");
76module_param(stat_interval, int, 0444); 76module_param(stat_interval, int, 0644);
77MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s"); 77MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
78module_param(verbose, bool, 0444); 78module_param(verbose, bool, 0444);
79MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s"); 79MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
@@ -480,30 +480,6 @@ static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
480 call_rcu_bh(&p->rtort_rcu, rcu_torture_cb); 480 call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
481} 481}
482 482
483struct rcu_bh_torture_synchronize {
484 struct rcu_head head;
485 struct completion completion;
486};
487
488static void rcu_bh_torture_wakeme_after_cb(struct rcu_head *head)
489{
490 struct rcu_bh_torture_synchronize *rcu;
491
492 rcu = container_of(head, struct rcu_bh_torture_synchronize, head);
493 complete(&rcu->completion);
494}
495
496static void rcu_bh_torture_synchronize(void)
497{
498 struct rcu_bh_torture_synchronize rcu;
499
500 init_rcu_head_on_stack(&rcu.head);
501 init_completion(&rcu.completion);
502 call_rcu_bh(&rcu.head, rcu_bh_torture_wakeme_after_cb);
503 wait_for_completion(&rcu.completion);
504 destroy_rcu_head_on_stack(&rcu.head);
505}
506
507static struct rcu_torture_ops rcu_bh_ops = { 483static struct rcu_torture_ops rcu_bh_ops = {
508 .init = NULL, 484 .init = NULL,
509 .cleanup = NULL, 485 .cleanup = NULL,
@@ -512,7 +488,7 @@ static struct rcu_torture_ops rcu_bh_ops = {
512 .readunlock = rcu_bh_torture_read_unlock, 488 .readunlock = rcu_bh_torture_read_unlock,
513 .completed = rcu_bh_torture_completed, 489 .completed = rcu_bh_torture_completed,
514 .deferred_free = rcu_bh_torture_deferred_free, 490 .deferred_free = rcu_bh_torture_deferred_free,
515 .sync = rcu_bh_torture_synchronize, 491 .sync = synchronize_rcu_bh,
516 .cb_barrier = rcu_barrier_bh, 492 .cb_barrier = rcu_barrier_bh,
517 .fqs = rcu_bh_force_quiescent_state, 493 .fqs = rcu_bh_force_quiescent_state,
518 .stats = NULL, 494 .stats = NULL,
@@ -528,7 +504,7 @@ static struct rcu_torture_ops rcu_bh_sync_ops = {
528 .readunlock = rcu_bh_torture_read_unlock, 504 .readunlock = rcu_bh_torture_read_unlock,
529 .completed = rcu_bh_torture_completed, 505 .completed = rcu_bh_torture_completed,
530 .deferred_free = rcu_sync_torture_deferred_free, 506 .deferred_free = rcu_sync_torture_deferred_free,
531 .sync = rcu_bh_torture_synchronize, 507 .sync = synchronize_rcu_bh,
532 .cb_barrier = NULL, 508 .cb_barrier = NULL,
533 .fqs = rcu_bh_force_quiescent_state, 509 .fqs = rcu_bh_force_quiescent_state,
534 .stats = NULL, 510 .stats = NULL,
@@ -536,6 +512,22 @@ static struct rcu_torture_ops rcu_bh_sync_ops = {
536 .name = "rcu_bh_sync" 512 .name = "rcu_bh_sync"
537}; 513};
538 514
515static struct rcu_torture_ops rcu_bh_expedited_ops = {
516 .init = rcu_sync_torture_init,
517 .cleanup = NULL,
518 .readlock = rcu_bh_torture_read_lock,
519 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
520 .readunlock = rcu_bh_torture_read_unlock,
521 .completed = rcu_bh_torture_completed,
522 .deferred_free = rcu_sync_torture_deferred_free,
523 .sync = synchronize_rcu_bh_expedited,
524 .cb_barrier = NULL,
525 .fqs = rcu_bh_force_quiescent_state,
526 .stats = NULL,
527 .irq_capable = 1,
528 .name = "rcu_bh_expedited"
529};
530
539/* 531/*
540 * Definitions for srcu torture testing. 532 * Definitions for srcu torture testing.
541 */ 533 */
@@ -659,11 +651,6 @@ static void rcu_sched_torture_deferred_free(struct rcu_torture *p)
659 call_rcu_sched(&p->rtort_rcu, rcu_torture_cb); 651 call_rcu_sched(&p->rtort_rcu, rcu_torture_cb);
660} 652}
661 653
662static void sched_torture_synchronize(void)
663{
664 synchronize_sched();
665}
666
667static struct rcu_torture_ops sched_ops = { 654static struct rcu_torture_ops sched_ops = {
668 .init = rcu_sync_torture_init, 655 .init = rcu_sync_torture_init,
669 .cleanup = NULL, 656 .cleanup = NULL,
@@ -672,7 +659,7 @@ static struct rcu_torture_ops sched_ops = {
672 .readunlock = sched_torture_read_unlock, 659 .readunlock = sched_torture_read_unlock,
673 .completed = rcu_no_completed, 660 .completed = rcu_no_completed,
674 .deferred_free = rcu_sched_torture_deferred_free, 661 .deferred_free = rcu_sched_torture_deferred_free,
675 .sync = sched_torture_synchronize, 662 .sync = synchronize_sched,
676 .cb_barrier = rcu_barrier_sched, 663 .cb_barrier = rcu_barrier_sched,
677 .fqs = rcu_sched_force_quiescent_state, 664 .fqs = rcu_sched_force_quiescent_state,
678 .stats = NULL, 665 .stats = NULL,
@@ -688,7 +675,7 @@ static struct rcu_torture_ops sched_sync_ops = {
688 .readunlock = sched_torture_read_unlock, 675 .readunlock = sched_torture_read_unlock,
689 .completed = rcu_no_completed, 676 .completed = rcu_no_completed,
690 .deferred_free = rcu_sync_torture_deferred_free, 677 .deferred_free = rcu_sync_torture_deferred_free,
691 .sync = sched_torture_synchronize, 678 .sync = synchronize_sched,
692 .cb_barrier = NULL, 679 .cb_barrier = NULL,
693 .fqs = rcu_sched_force_quiescent_state, 680 .fqs = rcu_sched_force_quiescent_state,
694 .stats = NULL, 681 .stats = NULL,
@@ -754,7 +741,7 @@ static int rcu_torture_boost(void *arg)
754 do { 741 do {
755 /* Wait for the next test interval. */ 742 /* Wait for the next test interval. */
756 oldstarttime = boost_starttime; 743 oldstarttime = boost_starttime;
757 while (jiffies - oldstarttime > ULONG_MAX / 2) { 744 while (ULONG_CMP_LT(jiffies, oldstarttime)) {
758 schedule_timeout_uninterruptible(1); 745 schedule_timeout_uninterruptible(1);
759 rcu_stutter_wait("rcu_torture_boost"); 746 rcu_stutter_wait("rcu_torture_boost");
760 if (kthread_should_stop() || 747 if (kthread_should_stop() ||
@@ -765,7 +752,7 @@ static int rcu_torture_boost(void *arg)
765 /* Do one boost-test interval. */ 752 /* Do one boost-test interval. */
766 endtime = oldstarttime + test_boost_duration * HZ; 753 endtime = oldstarttime + test_boost_duration * HZ;
767 call_rcu_time = jiffies; 754 call_rcu_time = jiffies;
768 while (jiffies - endtime > ULONG_MAX / 2) { 755 while (ULONG_CMP_LT(jiffies, endtime)) {
769 /* If we don't have a callback in flight, post one. */ 756 /* If we don't have a callback in flight, post one. */
770 if (!rbi.inflight) { 757 if (!rbi.inflight) {
771 smp_mb(); /* RCU core before ->inflight = 1. */ 758 smp_mb(); /* RCU core before ->inflight = 1. */
@@ -792,7 +779,8 @@ static int rcu_torture_boost(void *arg)
792 * interval. Besides, we are running at RT priority, 779 * interval. Besides, we are running at RT priority,
793 * so delays should be relatively rare. 780 * so delays should be relatively rare.
794 */ 781 */
795 while (oldstarttime == boost_starttime) { 782 while (oldstarttime == boost_starttime &&
783 !kthread_should_stop()) {
796 if (mutex_trylock(&boost_mutex)) { 784 if (mutex_trylock(&boost_mutex)) {
797 boost_starttime = jiffies + 785 boost_starttime = jiffies +
798 test_boost_interval * HZ; 786 test_boost_interval * HZ;
@@ -809,11 +797,11 @@ checkwait: rcu_stutter_wait("rcu_torture_boost");
809 797
810 /* Clean up and exit. */ 798 /* Clean up and exit. */
811 VERBOSE_PRINTK_STRING("rcu_torture_boost task stopping"); 799 VERBOSE_PRINTK_STRING("rcu_torture_boost task stopping");
812 destroy_rcu_head_on_stack(&rbi.rcu);
813 rcutorture_shutdown_absorb("rcu_torture_boost"); 800 rcutorture_shutdown_absorb("rcu_torture_boost");
814 while (!kthread_should_stop() || rbi.inflight) 801 while (!kthread_should_stop() || rbi.inflight)
815 schedule_timeout_uninterruptible(1); 802 schedule_timeout_uninterruptible(1);
816 smp_mb(); /* order accesses to ->inflight before stack-frame death. */ 803 smp_mb(); /* order accesses to ->inflight before stack-frame death. */
804 destroy_rcu_head_on_stack(&rbi.rcu);
817 return 0; 805 return 0;
818} 806}
819 807
@@ -831,11 +819,13 @@ rcu_torture_fqs(void *arg)
831 VERBOSE_PRINTK_STRING("rcu_torture_fqs task started"); 819 VERBOSE_PRINTK_STRING("rcu_torture_fqs task started");
832 do { 820 do {
833 fqs_resume_time = jiffies + fqs_stutter * HZ; 821 fqs_resume_time = jiffies + fqs_stutter * HZ;
834 while (jiffies - fqs_resume_time > LONG_MAX) { 822 while (ULONG_CMP_LT(jiffies, fqs_resume_time) &&
823 !kthread_should_stop()) {
835 schedule_timeout_interruptible(1); 824 schedule_timeout_interruptible(1);
836 } 825 }
837 fqs_burst_remaining = fqs_duration; 826 fqs_burst_remaining = fqs_duration;
838 while (fqs_burst_remaining > 0) { 827 while (fqs_burst_remaining > 0 &&
828 !kthread_should_stop()) {
839 cur_ops->fqs(); 829 cur_ops->fqs();
840 udelay(fqs_holdoff); 830 udelay(fqs_holdoff);
841 fqs_burst_remaining -= fqs_holdoff; 831 fqs_burst_remaining -= fqs_holdoff;
@@ -1280,8 +1270,9 @@ static int rcutorture_booster_init(int cpu)
1280 /* Don't allow time recalculation while creating a new task. */ 1270 /* Don't allow time recalculation while creating a new task. */
1281 mutex_lock(&boost_mutex); 1271 mutex_lock(&boost_mutex);
1282 VERBOSE_PRINTK_STRING("Creating rcu_torture_boost task"); 1272 VERBOSE_PRINTK_STRING("Creating rcu_torture_boost task");
1283 boost_tasks[cpu] = kthread_create(rcu_torture_boost, NULL, 1273 boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
1284 "rcu_torture_boost"); 1274 cpu_to_node(cpu),
1275 "rcu_torture_boost");
1285 if (IS_ERR(boost_tasks[cpu])) { 1276 if (IS_ERR(boost_tasks[cpu])) {
1286 retval = PTR_ERR(boost_tasks[cpu]); 1277 retval = PTR_ERR(boost_tasks[cpu]);
1287 VERBOSE_PRINTK_STRING("rcu_torture_boost task create failed"); 1278 VERBOSE_PRINTK_STRING("rcu_torture_boost task create failed");
@@ -1424,7 +1415,7 @@ rcu_torture_init(void)
1424 int firsterr = 0; 1415 int firsterr = 0;
1425 static struct rcu_torture_ops *torture_ops[] = 1416 static struct rcu_torture_ops *torture_ops[] =
1426 { &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops, 1417 { &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops,
1427 &rcu_bh_ops, &rcu_bh_sync_ops, 1418 &rcu_bh_ops, &rcu_bh_sync_ops, &rcu_bh_expedited_ops,
1428 &srcu_ops, &srcu_expedited_ops, 1419 &srcu_ops, &srcu_expedited_ops,
1429 &sched_ops, &sched_sync_ops, &sched_expedited_ops, }; 1420 &sched_ops, &sched_sync_ops, &sched_expedited_ops, };
1430 1421