aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutorture.c
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /kernel/rcutorture.c
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'kernel/rcutorture.c')
-rw-r--r--kernel/rcutorture.c167
1 files changed, 145 insertions, 22 deletions
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 697c0a0229d4..58df55bf83ed 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -61,6 +61,9 @@ static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */
61static int shuffle_interval = 3; /* Interval between shuffles (in sec)*/ 61static int shuffle_interval = 3; /* Interval between shuffles (in sec)*/
62static int stutter = 5; /* Start/stop testing interval (in sec) */ 62static int stutter = 5; /* Start/stop testing interval (in sec) */
63static int irqreader = 1; /* RCU readers from irq (timers). */ 63static int irqreader = 1; /* RCU readers from irq (timers). */
64static int fqs_duration = 0; /* Duration of bursts (us), 0 to disable. */
65static int fqs_holdoff = 0; /* Hold time within burst (us). */
66static int fqs_stutter = 3; /* Wait time between bursts (s). */
64static char *torture_type = "rcu"; /* What RCU implementation to torture. */ 67static char *torture_type = "rcu"; /* What RCU implementation to torture. */
65 68
66module_param(nreaders, int, 0444); 69module_param(nreaders, int, 0444);
@@ -79,6 +82,12 @@ module_param(stutter, int, 0444);
79MODULE_PARM_DESC(stutter, "Number of seconds to run/halt test"); 82MODULE_PARM_DESC(stutter, "Number of seconds to run/halt test");
80module_param(irqreader, int, 0444); 83module_param(irqreader, int, 0444);
81MODULE_PARM_DESC(irqreader, "Allow RCU readers from irq handlers"); 84MODULE_PARM_DESC(irqreader, "Allow RCU readers from irq handlers");
85module_param(fqs_duration, int, 0444);
86MODULE_PARM_DESC(fqs_duration, "Duration of fqs bursts (us)");
87module_param(fqs_holdoff, int, 0444);
88MODULE_PARM_DESC(fqs_holdoff, "Holdoff time within fqs bursts (us)");
89module_param(fqs_stutter, int, 0444);
90MODULE_PARM_DESC(fqs_stutter, "Wait time between fqs bursts (s)");
82module_param(torture_type, charp, 0444); 91module_param(torture_type, charp, 0444);
83MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)"); 92MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)");
84 93
@@ -99,6 +108,7 @@ static struct task_struct **reader_tasks;
99static struct task_struct *stats_task; 108static struct task_struct *stats_task;
100static struct task_struct *shuffler_task; 109static struct task_struct *shuffler_task;
101static struct task_struct *stutter_task; 110static struct task_struct *stutter_task;
111static struct task_struct *fqs_task;
102 112
103#define RCU_TORTURE_PIPE_LEN 10 113#define RCU_TORTURE_PIPE_LEN 10
104 114
@@ -263,6 +273,7 @@ struct rcu_torture_ops {
263 void (*deferred_free)(struct rcu_torture *p); 273 void (*deferred_free)(struct rcu_torture *p);
264 void (*sync)(void); 274 void (*sync)(void);
265 void (*cb_barrier)(void); 275 void (*cb_barrier)(void);
276 void (*fqs)(void);
266 int (*stats)(char *page); 277 int (*stats)(char *page);
267 int irq_capable; 278 int irq_capable;
268 char *name; 279 char *name;
@@ -327,6 +338,11 @@ rcu_torture_cb(struct rcu_head *p)
327 cur_ops->deferred_free(rp); 338 cur_ops->deferred_free(rp);
328} 339}
329 340
341static int rcu_no_completed(void)
342{
343 return 0;
344}
345
330static void rcu_torture_deferred_free(struct rcu_torture *p) 346static void rcu_torture_deferred_free(struct rcu_torture *p)
331{ 347{
332 call_rcu(&p->rtort_rcu, rcu_torture_cb); 348 call_rcu(&p->rtort_rcu, rcu_torture_cb);
@@ -342,6 +358,7 @@ static struct rcu_torture_ops rcu_ops = {
342 .deferred_free = rcu_torture_deferred_free, 358 .deferred_free = rcu_torture_deferred_free,
343 .sync = synchronize_rcu, 359 .sync = synchronize_rcu,
344 .cb_barrier = rcu_barrier, 360 .cb_barrier = rcu_barrier,
361 .fqs = rcu_force_quiescent_state,
345 .stats = NULL, 362 .stats = NULL,
346 .irq_capable = 1, 363 .irq_capable = 1,
347 .name = "rcu" 364 .name = "rcu"
@@ -383,11 +400,28 @@ static struct rcu_torture_ops rcu_sync_ops = {
383 .deferred_free = rcu_sync_torture_deferred_free, 400 .deferred_free = rcu_sync_torture_deferred_free,
384 .sync = synchronize_rcu, 401 .sync = synchronize_rcu,
385 .cb_barrier = NULL, 402 .cb_barrier = NULL,
403 .fqs = rcu_force_quiescent_state,
386 .stats = NULL, 404 .stats = NULL,
387 .irq_capable = 1, 405 .irq_capable = 1,
388 .name = "rcu_sync" 406 .name = "rcu_sync"
389}; 407};
390 408
409static struct rcu_torture_ops rcu_expedited_ops = {
410 .init = rcu_sync_torture_init,
411 .cleanup = NULL,
412 .readlock = rcu_torture_read_lock,
413 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
414 .readunlock = rcu_torture_read_unlock,
415 .completed = rcu_no_completed,
416 .deferred_free = rcu_sync_torture_deferred_free,
417 .sync = synchronize_rcu_expedited,
418 .cb_barrier = NULL,
419 .fqs = rcu_force_quiescent_state,
420 .stats = NULL,
421 .irq_capable = 1,
422 .name = "rcu_expedited"
423};
424
391/* 425/*
392 * Definitions for rcu_bh torture testing. 426 * Definitions for rcu_bh torture testing.
393 */ 427 */
@@ -445,6 +479,7 @@ static struct rcu_torture_ops rcu_bh_ops = {
445 .deferred_free = rcu_bh_torture_deferred_free, 479 .deferred_free = rcu_bh_torture_deferred_free,
446 .sync = rcu_bh_torture_synchronize, 480 .sync = rcu_bh_torture_synchronize,
447 .cb_barrier = rcu_barrier_bh, 481 .cb_barrier = rcu_barrier_bh,
482 .fqs = rcu_bh_force_quiescent_state,
448 .stats = NULL, 483 .stats = NULL,
449 .irq_capable = 1, 484 .irq_capable = 1,
450 .name = "rcu_bh" 485 .name = "rcu_bh"
@@ -460,6 +495,7 @@ static struct rcu_torture_ops rcu_bh_sync_ops = {
460 .deferred_free = rcu_sync_torture_deferred_free, 495 .deferred_free = rcu_sync_torture_deferred_free,
461 .sync = rcu_bh_torture_synchronize, 496 .sync = rcu_bh_torture_synchronize,
462 .cb_barrier = NULL, 497 .cb_barrier = NULL,
498 .fqs = rcu_bh_force_quiescent_state,
463 .stats = NULL, 499 .stats = NULL,
464 .irq_capable = 1, 500 .irq_capable = 1,
465 .name = "rcu_bh_sync" 501 .name = "rcu_bh_sync"
@@ -547,6 +583,25 @@ static struct rcu_torture_ops srcu_ops = {
547 .name = "srcu" 583 .name = "srcu"
548}; 584};
549 585
586static void srcu_torture_synchronize_expedited(void)
587{
588 synchronize_srcu_expedited(&srcu_ctl);
589}
590
591static struct rcu_torture_ops srcu_expedited_ops = {
592 .init = srcu_torture_init,
593 .cleanup = srcu_torture_cleanup,
594 .readlock = srcu_torture_read_lock,
595 .read_delay = srcu_read_delay,
596 .readunlock = srcu_torture_read_unlock,
597 .completed = srcu_torture_completed,
598 .deferred_free = rcu_sync_torture_deferred_free,
599 .sync = srcu_torture_synchronize_expedited,
600 .cb_barrier = NULL,
601 .stats = srcu_torture_stats,
602 .name = "srcu_expedited"
603};
604
550/* 605/*
551 * Definitions for sched torture testing. 606 * Definitions for sched torture testing.
552 */ 607 */
@@ -562,11 +617,6 @@ static void sched_torture_read_unlock(int idx)
562 preempt_enable(); 617 preempt_enable();
563} 618}
564 619
565static int sched_torture_completed(void)
566{
567 return 0;
568}
569
570static void rcu_sched_torture_deferred_free(struct rcu_torture *p) 620static void rcu_sched_torture_deferred_free(struct rcu_torture *p)
571{ 621{
572 call_rcu_sched(&p->rtort_rcu, rcu_torture_cb); 622 call_rcu_sched(&p->rtort_rcu, rcu_torture_cb);
@@ -583,25 +633,27 @@ static struct rcu_torture_ops sched_ops = {
583 .readlock = sched_torture_read_lock, 633 .readlock = sched_torture_read_lock,
584 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 634 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
585 .readunlock = sched_torture_read_unlock, 635 .readunlock = sched_torture_read_unlock,
586 .completed = sched_torture_completed, 636 .completed = rcu_no_completed,
587 .deferred_free = rcu_sched_torture_deferred_free, 637 .deferred_free = rcu_sched_torture_deferred_free,
588 .sync = sched_torture_synchronize, 638 .sync = sched_torture_synchronize,
589 .cb_barrier = rcu_barrier_sched, 639 .cb_barrier = rcu_barrier_sched,
640 .fqs = rcu_sched_force_quiescent_state,
590 .stats = NULL, 641 .stats = NULL,
591 .irq_capable = 1, 642 .irq_capable = 1,
592 .name = "sched" 643 .name = "sched"
593}; 644};
594 645
595static struct rcu_torture_ops sched_ops_sync = { 646static struct rcu_torture_ops sched_sync_ops = {
596 .init = rcu_sync_torture_init, 647 .init = rcu_sync_torture_init,
597 .cleanup = NULL, 648 .cleanup = NULL,
598 .readlock = sched_torture_read_lock, 649 .readlock = sched_torture_read_lock,
599 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 650 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
600 .readunlock = sched_torture_read_unlock, 651 .readunlock = sched_torture_read_unlock,
601 .completed = sched_torture_completed, 652 .completed = rcu_no_completed,
602 .deferred_free = rcu_sync_torture_deferred_free, 653 .deferred_free = rcu_sync_torture_deferred_free,
603 .sync = sched_torture_synchronize, 654 .sync = sched_torture_synchronize,
604 .cb_barrier = NULL, 655 .cb_barrier = NULL,
656 .fqs = rcu_sched_force_quiescent_state,
605 .stats = NULL, 657 .stats = NULL,
606 .name = "sched_sync" 658 .name = "sched_sync"
607}; 659};
@@ -612,16 +664,49 @@ static struct rcu_torture_ops sched_expedited_ops = {
612 .readlock = sched_torture_read_lock, 664 .readlock = sched_torture_read_lock,
613 .read_delay = rcu_read_delay, /* just reuse rcu's version. */ 665 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
614 .readunlock = sched_torture_read_unlock, 666 .readunlock = sched_torture_read_unlock,
615 .completed = sched_torture_completed, 667 .completed = rcu_no_completed,
616 .deferred_free = rcu_sync_torture_deferred_free, 668 .deferred_free = rcu_sync_torture_deferred_free,
617 .sync = synchronize_sched_expedited, 669 .sync = synchronize_sched_expedited,
618 .cb_barrier = NULL, 670 .cb_barrier = NULL,
671 .fqs = rcu_sched_force_quiescent_state,
619 .stats = rcu_expedited_torture_stats, 672 .stats = rcu_expedited_torture_stats,
620 .irq_capable = 1, 673 .irq_capable = 1,
621 .name = "sched_expedited" 674 .name = "sched_expedited"
622}; 675};
623 676
624/* 677/*
678 * RCU torture force-quiescent-state kthread. Repeatedly induces
679 * bursts of calls to force_quiescent_state(), increasing the probability
680 * of occurrence of some important types of race conditions.
681 */
682static int
683rcu_torture_fqs(void *arg)
684{
685 unsigned long fqs_resume_time;
686 int fqs_burst_remaining;
687
688 VERBOSE_PRINTK_STRING("rcu_torture_fqs task started");
689 do {
690 fqs_resume_time = jiffies + fqs_stutter * HZ;
691 while (jiffies - fqs_resume_time > LONG_MAX) {
692 schedule_timeout_interruptible(1);
693 }
694 fqs_burst_remaining = fqs_duration;
695 while (fqs_burst_remaining > 0) {
696 cur_ops->fqs();
697 udelay(fqs_holdoff);
698 fqs_burst_remaining -= fqs_holdoff;
699 }
700 rcu_stutter_wait("rcu_torture_fqs");
701 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
702 VERBOSE_PRINTK_STRING("rcu_torture_fqs task stopping");
703 rcutorture_shutdown_absorb("rcu_torture_fqs");
704 while (!kthread_should_stop())
705 schedule_timeout_uninterruptible(1);
706 return 0;
707}
708
709/*
625 * RCU torture writer kthread. Repeatedly substitutes a new structure 710 * RCU torture writer kthread. Repeatedly substitutes a new structure
626 * for that pointed to by rcu_torture_current, freeing the old structure 711 * for that pointed to by rcu_torture_current, freeing the old structure
627 * after a series of grace periods (the "pipeline"). 712 * after a series of grace periods (the "pipeline").
@@ -711,7 +796,11 @@ static void rcu_torture_timer(unsigned long unused)
711 796
712 idx = cur_ops->readlock(); 797 idx = cur_ops->readlock();
713 completed = cur_ops->completed(); 798 completed = cur_ops->completed();
714 p = rcu_dereference(rcu_torture_current); 799 p = rcu_dereference_check(rcu_torture_current,
800 rcu_read_lock_held() ||
801 rcu_read_lock_bh_held() ||
802 rcu_read_lock_sched_held() ||
803 srcu_read_lock_held(&srcu_ctl));
715 if (p == NULL) { 804 if (p == NULL) {
716 /* Leave because rcu_torture_writer is not yet underway */ 805 /* Leave because rcu_torture_writer is not yet underway */
717 cur_ops->readunlock(idx); 806 cur_ops->readunlock(idx);
@@ -729,13 +818,13 @@ static void rcu_torture_timer(unsigned long unused)
729 /* Should not happen, but... */ 818 /* Should not happen, but... */
730 pipe_count = RCU_TORTURE_PIPE_LEN; 819 pipe_count = RCU_TORTURE_PIPE_LEN;
731 } 820 }
732 ++__get_cpu_var(rcu_torture_count)[pipe_count]; 821 __this_cpu_inc(rcu_torture_count[pipe_count]);
733 completed = cur_ops->completed() - completed; 822 completed = cur_ops->completed() - completed;
734 if (completed > RCU_TORTURE_PIPE_LEN) { 823 if (completed > RCU_TORTURE_PIPE_LEN) {
735 /* Should not happen, but... */ 824 /* Should not happen, but... */
736 completed = RCU_TORTURE_PIPE_LEN; 825 completed = RCU_TORTURE_PIPE_LEN;
737 } 826 }
738 ++__get_cpu_var(rcu_torture_batch)[completed]; 827 __this_cpu_inc(rcu_torture_batch[completed]);
739 preempt_enable(); 828 preempt_enable();
740 cur_ops->readunlock(idx); 829 cur_ops->readunlock(idx);
741} 830}
@@ -764,11 +853,15 @@ rcu_torture_reader(void *arg)
764 do { 853 do {
765 if (irqreader && cur_ops->irq_capable) { 854 if (irqreader && cur_ops->irq_capable) {
766 if (!timer_pending(&t)) 855 if (!timer_pending(&t))
767 mod_timer(&t, 1); 856 mod_timer(&t, jiffies + 1);
768 } 857 }
769 idx = cur_ops->readlock(); 858 idx = cur_ops->readlock();
770 completed = cur_ops->completed(); 859 completed = cur_ops->completed();
771 p = rcu_dereference(rcu_torture_current); 860 p = rcu_dereference_check(rcu_torture_current,
861 rcu_read_lock_held() ||
862 rcu_read_lock_bh_held() ||
863 rcu_read_lock_sched_held() ||
864 srcu_read_lock_held(&srcu_ctl));
772 if (p == NULL) { 865 if (p == NULL) {
773 /* Wait for rcu_torture_writer to get underway */ 866 /* Wait for rcu_torture_writer to get underway */
774 cur_ops->readunlock(idx); 867 cur_ops->readunlock(idx);
@@ -784,13 +877,13 @@ rcu_torture_reader(void *arg)
784 /* Should not happen, but... */ 877 /* Should not happen, but... */
785 pipe_count = RCU_TORTURE_PIPE_LEN; 878 pipe_count = RCU_TORTURE_PIPE_LEN;
786 } 879 }
787 ++__get_cpu_var(rcu_torture_count)[pipe_count]; 880 __this_cpu_inc(rcu_torture_count[pipe_count]);
788 completed = cur_ops->completed() - completed; 881 completed = cur_ops->completed() - completed;
789 if (completed > RCU_TORTURE_PIPE_LEN) { 882 if (completed > RCU_TORTURE_PIPE_LEN) {
790 /* Should not happen, but... */ 883 /* Should not happen, but... */
791 completed = RCU_TORTURE_PIPE_LEN; 884 completed = RCU_TORTURE_PIPE_LEN;
792 } 885 }
793 ++__get_cpu_var(rcu_torture_batch)[completed]; 886 __this_cpu_inc(rcu_torture_batch[completed]);
794 preempt_enable(); 887 preempt_enable();
795 cur_ops->readunlock(idx); 888 cur_ops->readunlock(idx);
796 schedule(); 889 schedule();
@@ -996,10 +1089,11 @@ rcu_torture_print_module_parms(char *tag)
996 printk(KERN_ALERT "%s" TORTURE_FLAG 1089 printk(KERN_ALERT "%s" TORTURE_FLAG
997 "--- %s: nreaders=%d nfakewriters=%d " 1090 "--- %s: nreaders=%d nfakewriters=%d "
998 "stat_interval=%d verbose=%d test_no_idle_hz=%d " 1091 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
999 "shuffle_interval=%d stutter=%d irqreader=%d\n", 1092 "shuffle_interval=%d stutter=%d irqreader=%d "
1093 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d\n",
1000 torture_type, tag, nrealreaders, nfakewriters, 1094 torture_type, tag, nrealreaders, nfakewriters,
1001 stat_interval, verbose, test_no_idle_hz, shuffle_interval, 1095 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
1002 stutter, irqreader); 1096 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter);
1003} 1097}
1004 1098
1005static struct notifier_block rcutorture_nb = { 1099static struct notifier_block rcutorture_nb = {
@@ -1075,6 +1169,12 @@ rcu_torture_cleanup(void)
1075 } 1169 }
1076 stats_task = NULL; 1170 stats_task = NULL;
1077 1171
1172 if (fqs_task) {
1173 VERBOSE_PRINTK_STRING("Stopping rcu_torture_fqs task");
1174 kthread_stop(fqs_task);
1175 }
1176 fqs_task = NULL;
1177
1078 /* Wait for all RCU callbacks to fire. */ 1178 /* Wait for all RCU callbacks to fire. */
1079 1179
1080 if (cur_ops->cb_barrier != NULL) 1180 if (cur_ops->cb_barrier != NULL)
@@ -1097,9 +1197,10 @@ rcu_torture_init(void)
1097 int cpu; 1197 int cpu;
1098 int firsterr = 0; 1198 int firsterr = 0;
1099 static struct rcu_torture_ops *torture_ops[] = 1199 static struct rcu_torture_ops *torture_ops[] =
1100 { &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops, 1200 { &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops,
1101 &sched_expedited_ops, 1201 &rcu_bh_ops, &rcu_bh_sync_ops,
1102 &srcu_ops, &sched_ops, &sched_ops_sync, }; 1202 &srcu_ops, &srcu_expedited_ops,
1203 &sched_ops, &sched_sync_ops, &sched_expedited_ops, };
1103 1204
1104 mutex_lock(&fullstop_mutex); 1205 mutex_lock(&fullstop_mutex);
1105 1206
@@ -1110,11 +1211,20 @@ rcu_torture_init(void)
1110 break; 1211 break;
1111 } 1212 }
1112 if (i == ARRAY_SIZE(torture_ops)) { 1213 if (i == ARRAY_SIZE(torture_ops)) {
1113 printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n", 1214 printk(KERN_ALERT "rcu-torture: invalid torture type: \"%s\"\n",
1114 torture_type); 1215 torture_type);
1216 printk(KERN_ALERT "rcu-torture types:");
1217 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
1218 printk(KERN_ALERT " %s", torture_ops[i]->name);
1219 printk(KERN_ALERT "\n");
1115 mutex_unlock(&fullstop_mutex); 1220 mutex_unlock(&fullstop_mutex);
1116 return -EINVAL; 1221 return -EINVAL;
1117 } 1222 }
1223 if (cur_ops->fqs == NULL && fqs_duration != 0) {
1224 printk(KERN_ALERT "rcu-torture: ->fqs NULL and non-zero "
1225 "fqs_duration, fqs disabled.\n");
1226 fqs_duration = 0;
1227 }
1118 if (cur_ops->init) 1228 if (cur_ops->init)
1119 cur_ops->init(); /* no "goto unwind" prior to this point!!! */ 1229 cur_ops->init(); /* no "goto unwind" prior to this point!!! */
1120 1230
@@ -1243,6 +1353,19 @@ rcu_torture_init(void)
1243 goto unwind; 1353 goto unwind;
1244 } 1354 }
1245 } 1355 }
1356 if (fqs_duration < 0)
1357 fqs_duration = 0;
1358 if (fqs_duration) {
1359 /* Create the stutter thread */
1360 fqs_task = kthread_run(rcu_torture_fqs, NULL,
1361 "rcu_torture_fqs");
1362 if (IS_ERR(fqs_task)) {
1363 firsterr = PTR_ERR(fqs_task);
1364 VERBOSE_PRINTK_ERRSTRING("Failed to create fqs");
1365 fqs_task = NULL;
1366 goto unwind;
1367 }
1368 }
1246 register_reboot_notifier(&rcutorture_nb); 1369 register_reboot_notifier(&rcutorture_nb);
1247 mutex_unlock(&fullstop_mutex); 1370 mutex_unlock(&fullstop_mutex);
1248 return 0; 1371 return 0;