diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2010-01-04 18:09:10 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-01-13 03:06:05 -0500 |
commit | bf66f18e79e34c421bbd8f6511e2c556b779df2f (patch) | |
tree | 2348e3bc8392addf7959fbe94003f7e0da2a1c8b | |
parent | 46a1e34eda805501a8b32f26394faa435149f6d1 (diff) |
rcu: Add force_quiescent_state() testing to rcutorture
Add force_quiescent_state() testing to rcutorture, with a
separate thread that repeatedly invokes force_quiescent_state()
in bursts. This can greatly increase the probability of
encountering certain types of race conditions.
Suggested-by: Josh Triplett <josh@joshtriplett.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: mathieu.desnoyers@polymtl.ca
Cc: josh@joshtriplett.org
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
Cc: Valdis.Kletnieks@vt.edu
Cc: dhowells@redhat.com
LKML-Reference: <1262646551116-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | include/linux/rcutiny.h | 12 | ||||
-rw-r--r-- | include/linux/rcutree.h | 3 | ||||
-rw-r--r-- | kernel/rcutorture.c | 80 | ||||
-rw-r--r-- | kernel/rcutree.c | 18 | ||||
-rw-r--r-- | kernel/rcutree_plugin.h | 19 |
5 files changed, 130 insertions, 2 deletions
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 96cc307ed9f4..2b70d4e37383 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h | |||
@@ -62,6 +62,18 @@ static inline long rcu_batches_completed_bh(void) | |||
62 | 62 | ||
63 | extern int rcu_expedited_torture_stats(char *page); | 63 | extern int rcu_expedited_torture_stats(char *page); |
64 | 64 | ||
65 | static inline void rcu_force_quiescent_state(void) | ||
66 | { | ||
67 | } | ||
68 | |||
69 | static inline void rcu_bh_force_quiescent_state(void) | ||
70 | { | ||
71 | } | ||
72 | |||
73 | static inline void rcu_sched_force_quiescent_state(void) | ||
74 | { | ||
75 | } | ||
76 | |||
65 | #define synchronize_rcu synchronize_sched | 77 | #define synchronize_rcu synchronize_sched |
66 | 78 | ||
67 | static inline void synchronize_rcu_expedited(void) | 79 | static inline void synchronize_rcu_expedited(void) |
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 8044b1b94333..704a010f686c 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
@@ -99,6 +99,9 @@ extern void rcu_check_callbacks(int cpu, int user); | |||
99 | extern long rcu_batches_completed(void); | 99 | extern long rcu_batches_completed(void); |
100 | extern long rcu_batches_completed_bh(void); | 100 | extern long rcu_batches_completed_bh(void); |
101 | extern long rcu_batches_completed_sched(void); | 101 | extern long rcu_batches_completed_sched(void); |
102 | extern void rcu_force_quiescent_state(void); | ||
103 | extern void rcu_bh_force_quiescent_state(void); | ||
104 | extern void rcu_sched_force_quiescent_state(void); | ||
102 | 105 | ||
103 | #ifdef CONFIG_NO_HZ | 106 | #ifdef CONFIG_NO_HZ |
104 | void rcu_enter_nohz(void); | 107 | void rcu_enter_nohz(void); |
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index 9bb52177af02..adda92bfafac 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c | |||
@@ -61,6 +61,9 @@ static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */ | |||
61 | static int shuffle_interval = 3; /* Interval between shuffles (in sec)*/ | 61 | static int shuffle_interval = 3; /* Interval between shuffles (in sec)*/ |
62 | static int stutter = 5; /* Start/stop testing interval (in sec) */ | 62 | static int stutter = 5; /* Start/stop testing interval (in sec) */ |
63 | static int irqreader = 1; /* RCU readers from irq (timers). */ | 63 | static int irqreader = 1; /* RCU readers from irq (timers). */ |
64 | static int fqs_duration = 0; /* Duration of bursts (us), 0 to disable. */ | ||
65 | static int fqs_holdoff = 0; /* Hold time within burst (us). */ | ||
66 | static int fqs_stutter = 3; /* Wait time between bursts (s). */ | ||
64 | static char *torture_type = "rcu"; /* What RCU implementation to torture. */ | 67 | static char *torture_type = "rcu"; /* What RCU implementation to torture. */ |
65 | 68 | ||
66 | module_param(nreaders, int, 0444); | 69 | module_param(nreaders, int, 0444); |
@@ -79,6 +82,12 @@ module_param(stutter, int, 0444); | |||
79 | MODULE_PARM_DESC(stutter, "Number of seconds to run/halt test"); | 82 | MODULE_PARM_DESC(stutter, "Number of seconds to run/halt test"); |
80 | module_param(irqreader, int, 0444); | 83 | module_param(irqreader, int, 0444); |
81 | MODULE_PARM_DESC(irqreader, "Allow RCU readers from irq handlers"); | 84 | MODULE_PARM_DESC(irqreader, "Allow RCU readers from irq handlers"); |
85 | module_param(fqs_duration, int, 0444); | ||
86 | MODULE_PARM_DESC(fqs_duration, "Duration of fqs bursts (us)"); | ||
87 | module_param(fqs_holdoff, int, 0444); | ||
88 | MODULE_PARM_DESC(fqs_holdoff, "Holdoff time within fqs bursts (us)"); | ||
89 | module_param(fqs_stutter, int, 0444); | ||
90 | MODULE_PARM_DESC(fqs_stutter, "Wait time between fqs bursts (s)"); | ||
82 | module_param(torture_type, charp, 0444); | 91 | module_param(torture_type, charp, 0444); |
83 | MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)"); | 92 | MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)"); |
84 | 93 | ||
@@ -99,6 +108,7 @@ static struct task_struct **reader_tasks; | |||
99 | static struct task_struct *stats_task; | 108 | static struct task_struct *stats_task; |
100 | static struct task_struct *shuffler_task; | 109 | static struct task_struct *shuffler_task; |
101 | static struct task_struct *stutter_task; | 110 | static struct task_struct *stutter_task; |
111 | static struct task_struct *fqs_task; | ||
102 | 112 | ||
103 | #define RCU_TORTURE_PIPE_LEN 10 | 113 | #define RCU_TORTURE_PIPE_LEN 10 |
104 | 114 | ||
@@ -263,6 +273,7 @@ struct rcu_torture_ops { | |||
263 | void (*deferred_free)(struct rcu_torture *p); | 273 | void (*deferred_free)(struct rcu_torture *p); |
264 | void (*sync)(void); | 274 | void (*sync)(void); |
265 | void (*cb_barrier)(void); | 275 | void (*cb_barrier)(void); |
276 | void (*fqs)(void); | ||
266 | int (*stats)(char *page); | 277 | int (*stats)(char *page); |
267 | int irq_capable; | 278 | int irq_capable; |
268 | char *name; | 279 | char *name; |
@@ -347,6 +358,7 @@ static struct rcu_torture_ops rcu_ops = { | |||
347 | .deferred_free = rcu_torture_deferred_free, | 358 | .deferred_free = rcu_torture_deferred_free, |
348 | .sync = synchronize_rcu, | 359 | .sync = synchronize_rcu, |
349 | .cb_barrier = rcu_barrier, | 360 | .cb_barrier = rcu_barrier, |
361 | .fqs = rcu_force_quiescent_state, | ||
350 | .stats = NULL, | 362 | .stats = NULL, |
351 | .irq_capable = 1, | 363 | .irq_capable = 1, |
352 | .name = "rcu" | 364 | .name = "rcu" |
@@ -388,6 +400,7 @@ static struct rcu_torture_ops rcu_sync_ops = { | |||
388 | .deferred_free = rcu_sync_torture_deferred_free, | 400 | .deferred_free = rcu_sync_torture_deferred_free, |
389 | .sync = synchronize_rcu, | 401 | .sync = synchronize_rcu, |
390 | .cb_barrier = NULL, | 402 | .cb_barrier = NULL, |
403 | .fqs = rcu_force_quiescent_state, | ||
391 | .stats = NULL, | 404 | .stats = NULL, |
392 | .irq_capable = 1, | 405 | .irq_capable = 1, |
393 | .name = "rcu_sync" | 406 | .name = "rcu_sync" |
@@ -403,6 +416,7 @@ static struct rcu_torture_ops rcu_expedited_ops = { | |||
403 | .deferred_free = rcu_sync_torture_deferred_free, | 416 | .deferred_free = rcu_sync_torture_deferred_free, |
404 | .sync = synchronize_rcu_expedited, | 417 | .sync = synchronize_rcu_expedited, |
405 | .cb_barrier = NULL, | 418 | .cb_barrier = NULL, |
419 | .fqs = rcu_force_quiescent_state, | ||
406 | .stats = NULL, | 420 | .stats = NULL, |
407 | .irq_capable = 1, | 421 | .irq_capable = 1, |
408 | .name = "rcu_expedited" | 422 | .name = "rcu_expedited" |
@@ -465,6 +479,7 @@ static struct rcu_torture_ops rcu_bh_ops = { | |||
465 | .deferred_free = rcu_bh_torture_deferred_free, | 479 | .deferred_free = rcu_bh_torture_deferred_free, |
466 | .sync = rcu_bh_torture_synchronize, | 480 | .sync = rcu_bh_torture_synchronize, |
467 | .cb_barrier = rcu_barrier_bh, | 481 | .cb_barrier = rcu_barrier_bh, |
482 | .fqs = rcu_bh_force_quiescent_state, | ||
468 | .stats = NULL, | 483 | .stats = NULL, |
469 | .irq_capable = 1, | 484 | .irq_capable = 1, |
470 | .name = "rcu_bh" | 485 | .name = "rcu_bh" |
@@ -480,6 +495,7 @@ static struct rcu_torture_ops rcu_bh_sync_ops = { | |||
480 | .deferred_free = rcu_sync_torture_deferred_free, | 495 | .deferred_free = rcu_sync_torture_deferred_free, |
481 | .sync = rcu_bh_torture_synchronize, | 496 | .sync = rcu_bh_torture_synchronize, |
482 | .cb_barrier = NULL, | 497 | .cb_barrier = NULL, |
498 | .fqs = rcu_bh_force_quiescent_state, | ||
483 | .stats = NULL, | 499 | .stats = NULL, |
484 | .irq_capable = 1, | 500 | .irq_capable = 1, |
485 | .name = "rcu_bh_sync" | 501 | .name = "rcu_bh_sync" |
@@ -621,6 +637,7 @@ static struct rcu_torture_ops sched_ops = { | |||
621 | .deferred_free = rcu_sched_torture_deferred_free, | 637 | .deferred_free = rcu_sched_torture_deferred_free, |
622 | .sync = sched_torture_synchronize, | 638 | .sync = sched_torture_synchronize, |
623 | .cb_barrier = rcu_barrier_sched, | 639 | .cb_barrier = rcu_barrier_sched, |
640 | .fqs = rcu_sched_force_quiescent_state, | ||
624 | .stats = NULL, | 641 | .stats = NULL, |
625 | .irq_capable = 1, | 642 | .irq_capable = 1, |
626 | .name = "sched" | 643 | .name = "sched" |
@@ -636,6 +653,7 @@ static struct rcu_torture_ops sched_sync_ops = { | |||
636 | .deferred_free = rcu_sync_torture_deferred_free, | 653 | .deferred_free = rcu_sync_torture_deferred_free, |
637 | .sync = sched_torture_synchronize, | 654 | .sync = sched_torture_synchronize, |
638 | .cb_barrier = NULL, | 655 | .cb_barrier = NULL, |
656 | .fqs = rcu_sched_force_quiescent_state, | ||
639 | .stats = NULL, | 657 | .stats = NULL, |
640 | .name = "sched_sync" | 658 | .name = "sched_sync" |
641 | }; | 659 | }; |
@@ -650,12 +668,45 @@ static struct rcu_torture_ops sched_expedited_ops = { | |||
650 | .deferred_free = rcu_sync_torture_deferred_free, | 668 | .deferred_free = rcu_sync_torture_deferred_free, |
651 | .sync = synchronize_sched_expedited, | 669 | .sync = synchronize_sched_expedited, |
652 | .cb_barrier = NULL, | 670 | .cb_barrier = NULL, |
671 | .fqs = rcu_sched_force_quiescent_state, | ||
653 | .stats = rcu_expedited_torture_stats, | 672 | .stats = rcu_expedited_torture_stats, |
654 | .irq_capable = 1, | 673 | .irq_capable = 1, |
655 | .name = "sched_expedited" | 674 | .name = "sched_expedited" |
656 | }; | 675 | }; |
657 | 676 | ||
658 | /* | 677 | /* |
678 | * RCU torture force-quiescent-state kthread. Repeatedly induces | ||
679 | * bursts of calls to force_quiescent_state(), increasing the probability | ||
680 | * of occurrence of some important types of race conditions. | ||
681 | */ | ||
682 | static int | ||
683 | rcu_torture_fqs(void *arg) | ||
684 | { | ||
685 | unsigned long fqs_resume_time; | ||
686 | int fqs_burst_remaining; | ||
687 | |||
688 | VERBOSE_PRINTK_STRING("rcu_torture_fqs task started"); | ||
689 | do { | ||
690 | fqs_resume_time = jiffies + fqs_stutter * HZ; | ||
691 | while (jiffies - fqs_resume_time > LONG_MAX) { | ||
692 | schedule_timeout_interruptible(1); | ||
693 | } | ||
694 | fqs_burst_remaining = fqs_duration; | ||
695 | while (fqs_burst_remaining > 0) { | ||
696 | cur_ops->fqs(); | ||
697 | udelay(fqs_holdoff); | ||
698 | fqs_burst_remaining -= fqs_holdoff; | ||
699 | } | ||
700 | rcu_stutter_wait("rcu_torture_fqs"); | ||
701 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); | ||
702 | VERBOSE_PRINTK_STRING("rcu_torture_fqs task stopping"); | ||
703 | rcutorture_shutdown_absorb("rcu_torture_fqs"); | ||
704 | while (!kthread_should_stop()) | ||
705 | schedule_timeout_uninterruptible(1); | ||
706 | return 0; | ||
707 | } | ||
708 | |||
709 | /* | ||
659 | * RCU torture writer kthread. Repeatedly substitutes a new structure | 710 | * RCU torture writer kthread. Repeatedly substitutes a new structure |
660 | * for that pointed to by rcu_torture_current, freeing the old structure | 711 | * for that pointed to by rcu_torture_current, freeing the old structure |
661 | * after a series of grace periods (the "pipeline"). | 712 | * after a series of grace periods (the "pipeline"). |
@@ -1030,10 +1081,11 @@ rcu_torture_print_module_parms(char *tag) | |||
1030 | printk(KERN_ALERT "%s" TORTURE_FLAG | 1081 | printk(KERN_ALERT "%s" TORTURE_FLAG |
1031 | "--- %s: nreaders=%d nfakewriters=%d " | 1082 | "--- %s: nreaders=%d nfakewriters=%d " |
1032 | "stat_interval=%d verbose=%d test_no_idle_hz=%d " | 1083 | "stat_interval=%d verbose=%d test_no_idle_hz=%d " |
1033 | "shuffle_interval=%d stutter=%d irqreader=%d\n", | 1084 | "shuffle_interval=%d stutter=%d irqreader=%d " |
1085 | "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d\n", | ||
1034 | torture_type, tag, nrealreaders, nfakewriters, | 1086 | torture_type, tag, nrealreaders, nfakewriters, |
1035 | stat_interval, verbose, test_no_idle_hz, shuffle_interval, | 1087 | stat_interval, verbose, test_no_idle_hz, shuffle_interval, |
1036 | stutter, irqreader); | 1088 | stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter); |
1037 | } | 1089 | } |
1038 | 1090 | ||
1039 | static struct notifier_block rcutorture_nb = { | 1091 | static struct notifier_block rcutorture_nb = { |
@@ -1109,6 +1161,12 @@ rcu_torture_cleanup(void) | |||
1109 | } | 1161 | } |
1110 | stats_task = NULL; | 1162 | stats_task = NULL; |
1111 | 1163 | ||
1164 | if (fqs_task) { | ||
1165 | VERBOSE_PRINTK_STRING("Stopping rcu_torture_fqs task"); | ||
1166 | kthread_stop(fqs_task); | ||
1167 | } | ||
1168 | fqs_task = NULL; | ||
1169 | |||
1112 | /* Wait for all RCU callbacks to fire. */ | 1170 | /* Wait for all RCU callbacks to fire. */ |
1113 | 1171 | ||
1114 | if (cur_ops->cb_barrier != NULL) | 1172 | if (cur_ops->cb_barrier != NULL) |
@@ -1154,6 +1212,11 @@ rcu_torture_init(void) | |||
1154 | mutex_unlock(&fullstop_mutex); | 1212 | mutex_unlock(&fullstop_mutex); |
1155 | return -EINVAL; | 1213 | return -EINVAL; |
1156 | } | 1214 | } |
1215 | if (cur_ops->fqs == NULL && fqs_duration != 0) { | ||
1216 | printk(KERN_ALERT "rcu-torture: ->fqs NULL and non-zero " | ||
1217 | "fqs_duration, fqs disabled.\n"); | ||
1218 | fqs_duration = 0; | ||
1219 | } | ||
1157 | if (cur_ops->init) | 1220 | if (cur_ops->init) |
1158 | cur_ops->init(); /* no "goto unwind" prior to this point!!! */ | 1221 | cur_ops->init(); /* no "goto unwind" prior to this point!!! */ |
1159 | 1222 | ||
@@ -1282,6 +1345,19 @@ rcu_torture_init(void) | |||
1282 | goto unwind; | 1345 | goto unwind; |
1283 | } | 1346 | } |
1284 | } | 1347 | } |
1348 | if (fqs_duration < 0) | ||
1349 | fqs_duration = 0; | ||
1350 | if (fqs_duration) { | ||
1351 | /* Create the stutter thread */ | ||
1352 | fqs_task = kthread_run(rcu_torture_fqs, NULL, | ||
1353 | "rcu_torture_fqs"); | ||
1354 | if (IS_ERR(fqs_task)) { | ||
1355 | firsterr = PTR_ERR(fqs_task); | ||
1356 | VERBOSE_PRINTK_ERRSTRING("Failed to create fqs"); | ||
1357 | fqs_task = NULL; | ||
1358 | goto unwind; | ||
1359 | } | ||
1360 | } | ||
1285 | register_reboot_notifier(&rcutorture_nb); | 1361 | register_reboot_notifier(&rcutorture_nb); |
1286 | mutex_unlock(&fullstop_mutex); | 1362 | mutex_unlock(&fullstop_mutex); |
1287 | return 0; | 1363 | return 0; |
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 55e8f6ef8195..0a4c32879398 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -157,6 +157,24 @@ long rcu_batches_completed_bh(void) | |||
157 | EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); | 157 | EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); |
158 | 158 | ||
159 | /* | 159 | /* |
160 | * Force a quiescent state for RCU BH. | ||
161 | */ | ||
162 | void rcu_bh_force_quiescent_state(void) | ||
163 | { | ||
164 | force_quiescent_state(&rcu_bh_state, 0); | ||
165 | } | ||
166 | EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); | ||
167 | |||
168 | /* | ||
169 | * Force a quiescent state for RCU-sched. | ||
170 | */ | ||
171 | void rcu_sched_force_quiescent_state(void) | ||
172 | { | ||
173 | force_quiescent_state(&rcu_sched_state, 0); | ||
174 | } | ||
175 | EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state); | ||
176 | |||
177 | /* | ||
160 | * Does the CPU have callbacks ready to be invoked? | 178 | * Does the CPU have callbacks ready to be invoked? |
161 | */ | 179 | */ |
162 | static int | 180 | static int |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 37fbccdf41d5..f11ebd44b454 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -62,6 +62,15 @@ long rcu_batches_completed(void) | |||
62 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | 62 | EXPORT_SYMBOL_GPL(rcu_batches_completed); |
63 | 63 | ||
64 | /* | 64 | /* |
65 | * Force a quiescent state for preemptible RCU. | ||
66 | */ | ||
67 | void rcu_force_quiescent_state(void) | ||
68 | { | ||
69 | force_quiescent_state(&rcu_preempt_state, 0); | ||
70 | } | ||
71 | EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); | ||
72 | |||
73 | /* | ||
65 | * Record a preemptable-RCU quiescent state for the specified CPU. Note | 74 | * Record a preemptable-RCU quiescent state for the specified CPU. Note |
66 | * that this just means that the task currently running on the CPU is | 75 | * that this just means that the task currently running on the CPU is |
67 | * not in a quiescent state. There might be any number of tasks blocked | 76 | * not in a quiescent state. There might be any number of tasks blocked |
@@ -713,6 +722,16 @@ long rcu_batches_completed(void) | |||
713 | EXPORT_SYMBOL_GPL(rcu_batches_completed); | 722 | EXPORT_SYMBOL_GPL(rcu_batches_completed); |
714 | 723 | ||
715 | /* | 724 | /* |
725 | * Force a quiescent state for RCU, which, because there is no preemptible | ||
726 | * RCU, becomes the same as rcu-sched. | ||
727 | */ | ||
728 | void rcu_force_quiescent_state(void) | ||
729 | { | ||
730 | rcu_sched_force_quiescent_state(); | ||
731 | } | ||
732 | EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); | ||
733 | |||
734 | /* | ||
716 | * Because preemptable RCU does not exist, we never have to check for | 735 | * Because preemptable RCU does not exist, we never have to check for |
717 | * CPUs being in quiescent states. | 736 | * CPUs being in quiescent states. |
718 | */ | 737 | */ |