diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2014-02-26 09:38:59 -0500 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2014-02-26 09:38:59 -0500 |
commit | f5604f67fe8cbd6f2088b20b9463f721aa613d4b (patch) | |
tree | 27b16f4c8415d60c796c24472c29968bc8d39b04 /kernel/rcu | |
parent | 322efba5b6442f331ac8aa24e92a817d804cc938 (diff) | |
parent | 73fa867e2c705cf5cf0a38df8618fa20eee3d75a (diff) |
Merge branch 'torture.2014.02.23a' into HEAD
torture.2014.02.23a: locktorture addition and rcutorture changes
Diffstat (limited to 'kernel/rcu')
-rw-r--r-- | kernel/rcu/Makefile | 2 | ||||
-rw-r--r-- | kernel/rcu/rcutorture.c (renamed from kernel/rcu/torture.c) | 992 |
2 files changed, 214 insertions, 780 deletions
diff --git a/kernel/rcu/Makefile b/kernel/rcu/Makefile index 01e9ec37a3e3..807ccfbf69b3 100644 --- a/kernel/rcu/Makefile +++ b/kernel/rcu/Makefile | |||
@@ -1,5 +1,5 @@ | |||
1 | obj-y += update.o srcu.o | 1 | obj-y += update.o srcu.o |
2 | obj-$(CONFIG_RCU_TORTURE_TEST) += torture.o | 2 | obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o |
3 | obj-$(CONFIG_TREE_RCU) += tree.o | 3 | obj-$(CONFIG_TREE_RCU) += tree.o |
4 | obj-$(CONFIG_TREE_PREEMPT_RCU) += tree.o | 4 | obj-$(CONFIG_TREE_PREEMPT_RCU) += tree.o |
5 | obj-$(CONFIG_TREE_RCU_TRACE) += tree_trace.o | 5 | obj-$(CONFIG_TREE_RCU_TRACE) += tree_trace.o |
diff --git a/kernel/rcu/torture.c b/kernel/rcu/rcutorture.c index 022c5312b725..f59d48597dde 100644 --- a/kernel/rcu/torture.c +++ b/kernel/rcu/rcutorture.c | |||
@@ -48,110 +48,58 @@ | |||
48 | #include <linux/slab.h> | 48 | #include <linux/slab.h> |
49 | #include <linux/trace_clock.h> | 49 | #include <linux/trace_clock.h> |
50 | #include <asm/byteorder.h> | 50 | #include <asm/byteorder.h> |
51 | #include <linux/torture.h> | ||
51 | 52 | ||
52 | MODULE_LICENSE("GPL"); | 53 | MODULE_LICENSE("GPL"); |
53 | MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@freedesktop.org>"); | 54 | MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@freedesktop.org>"); |
54 | 55 | ||
55 | MODULE_ALIAS("rcutorture"); | 56 | |
56 | #ifdef MODULE_PARAM_PREFIX | 57 | torture_param(int, fqs_duration, 0, |
57 | #undef MODULE_PARAM_PREFIX | 58 | "Duration of fqs bursts (us), 0 to disable"); |
58 | #endif | 59 | torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)"); |
59 | #define MODULE_PARAM_PREFIX "rcutorture." | 60 | torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)"); |
60 | 61 | torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); | |
61 | static int fqs_duration; | 62 | torture_param(bool, gp_normal, false, |
62 | module_param(fqs_duration, int, 0444); | 63 | "Use normal (non-expedited) GP wait primitives"); |
63 | MODULE_PARM_DESC(fqs_duration, "Duration of fqs bursts (us), 0 to disable"); | 64 | torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers"); |
64 | static int fqs_holdoff; | 65 | torture_param(int, n_barrier_cbs, 0, |
65 | module_param(fqs_holdoff, int, 0444); | 66 | "# of callbacks/kthreads for barrier testing"); |
66 | MODULE_PARM_DESC(fqs_holdoff, "Holdoff time within fqs bursts (us)"); | 67 | torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads"); |
67 | static int fqs_stutter = 3; | 68 | torture_param(int, nreaders, -1, "Number of RCU reader threads"); |
68 | module_param(fqs_stutter, int, 0444); | 69 | torture_param(int, object_debug, 0, |
69 | MODULE_PARM_DESC(fqs_stutter, "Wait time between fqs bursts (s)"); | 70 | "Enable debug-object double call_rcu() testing"); |
70 | static bool gp_exp; | 71 | torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); |
71 | module_param(gp_exp, bool, 0444); | 72 | torture_param(int, onoff_interval, 0, |
72 | MODULE_PARM_DESC(gp_exp, "Use expedited GP wait primitives"); | 73 | "Time between CPU hotplugs (s), 0=disable"); |
73 | static bool gp_normal; | 74 | torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles"); |
74 | module_param(gp_normal, bool, 0444); | 75 | torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable."); |
75 | MODULE_PARM_DESC(gp_normal, "Use normal (non-expedited) GP wait primitives"); | 76 | torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable."); |
76 | static int irqreader = 1; | 77 | torture_param(int, stall_cpu_holdoff, 10, |
77 | module_param(irqreader, int, 0444); | 78 | "Time to wait before starting stall (s)."); |
78 | MODULE_PARM_DESC(irqreader, "Allow RCU readers from irq handlers"); | 79 | torture_param(int, stat_interval, 60, |
79 | static int n_barrier_cbs; | 80 | "Number of seconds between stats printk()s"); |
80 | module_param(n_barrier_cbs, int, 0444); | 81 | torture_param(int, stutter, 5, "Number of seconds to run/halt test"); |
81 | MODULE_PARM_DESC(n_barrier_cbs, "# of callbacks/kthreads for barrier testing"); | 82 | torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes."); |
82 | static int nfakewriters = 4; | 83 | torture_param(int, test_boost_duration, 4, |
83 | module_param(nfakewriters, int, 0444); | 84 | "Duration of each boost test, seconds."); |
84 | MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads"); | 85 | torture_param(int, test_boost_interval, 7, |
85 | static int nreaders = -1; | 86 | "Interval between boost tests, seconds."); |
86 | module_param(nreaders, int, 0444); | 87 | torture_param(bool, test_no_idle_hz, true, |
87 | MODULE_PARM_DESC(nreaders, "Number of RCU reader threads"); | 88 | "Test support for tickless idle CPUs"); |
88 | static int object_debug; | 89 | torture_param(bool, verbose, true, |
89 | module_param(object_debug, int, 0444); | 90 | "Enable verbose debugging printk()s"); |
90 | MODULE_PARM_DESC(object_debug, "Enable debug-object double call_rcu() testing"); | 91 | |
91 | static int onoff_holdoff; | ||
92 | module_param(onoff_holdoff, int, 0444); | ||
93 | MODULE_PARM_DESC(onoff_holdoff, "Time after boot before CPU hotplugs (s)"); | ||
94 | static int onoff_interval; | ||
95 | module_param(onoff_interval, int, 0444); | ||
96 | MODULE_PARM_DESC(onoff_interval, "Time between CPU hotplugs (s), 0=disable"); | ||
97 | static int shuffle_interval = 3; | ||
98 | module_param(shuffle_interval, int, 0444); | ||
99 | MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles"); | ||
100 | static int shutdown_secs; | ||
101 | module_param(shutdown_secs, int, 0444); | ||
102 | MODULE_PARM_DESC(shutdown_secs, "Shutdown time (s), <= zero to disable."); | ||
103 | static int stall_cpu; | ||
104 | module_param(stall_cpu, int, 0444); | ||
105 | MODULE_PARM_DESC(stall_cpu, "Stall duration (s), zero to disable."); | ||
106 | static int stall_cpu_holdoff = 10; | ||
107 | module_param(stall_cpu_holdoff, int, 0444); | ||
108 | MODULE_PARM_DESC(stall_cpu_holdoff, "Time to wait before starting stall (s)."); | ||
109 | static int stat_interval = 60; | ||
110 | module_param(stat_interval, int, 0644); | ||
111 | MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s"); | ||
112 | static int stutter = 5; | ||
113 | module_param(stutter, int, 0444); | ||
114 | MODULE_PARM_DESC(stutter, "Number of seconds to run/halt test"); | ||
115 | static int test_boost = 1; | ||
116 | module_param(test_boost, int, 0444); | ||
117 | MODULE_PARM_DESC(test_boost, "Test RCU prio boost: 0=no, 1=maybe, 2=yes."); | ||
118 | static int test_boost_duration = 4; | ||
119 | module_param(test_boost_duration, int, 0444); | ||
120 | MODULE_PARM_DESC(test_boost_duration, "Duration of each boost test, seconds."); | ||
121 | static int test_boost_interval = 7; | ||
122 | module_param(test_boost_interval, int, 0444); | ||
123 | MODULE_PARM_DESC(test_boost_interval, "Interval between boost tests, seconds."); | ||
124 | static bool test_no_idle_hz = true; | ||
125 | module_param(test_no_idle_hz, bool, 0444); | ||
126 | MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs"); | ||
127 | static char *torture_type = "rcu"; | 92 | static char *torture_type = "rcu"; |
128 | module_param(torture_type, charp, 0444); | 93 | module_param(torture_type, charp, 0444); |
129 | MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, ...)"); | 94 | MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, ...)"); |
130 | static bool verbose; | ||
131 | module_param(verbose, bool, 0444); | ||
132 | MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s"); | ||
133 | |||
134 | #define TORTURE_FLAG "-torture:" | ||
135 | #define PRINTK_STRING(s) \ | ||
136 | do { pr_alert("%s" TORTURE_FLAG s "\n", torture_type); } while (0) | ||
137 | #define VERBOSE_PRINTK_STRING(s) \ | ||
138 | do { if (verbose) pr_alert("%s" TORTURE_FLAG s "\n", torture_type); } while (0) | ||
139 | #define VERBOSE_PRINTK_ERRSTRING(s) \ | ||
140 | do { if (verbose) pr_alert("%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0) | ||
141 | 95 | ||
142 | static int nrealreaders; | 96 | static int nrealreaders; |
143 | static struct task_struct *writer_task; | 97 | static struct task_struct *writer_task; |
144 | static struct task_struct **fakewriter_tasks; | 98 | static struct task_struct **fakewriter_tasks; |
145 | static struct task_struct **reader_tasks; | 99 | static struct task_struct **reader_tasks; |
146 | static struct task_struct *stats_task; | 100 | static struct task_struct *stats_task; |
147 | static struct task_struct *shuffler_task; | ||
148 | static struct task_struct *stutter_task; | ||
149 | static struct task_struct *fqs_task; | 101 | static struct task_struct *fqs_task; |
150 | static struct task_struct *boost_tasks[NR_CPUS]; | 102 | static struct task_struct *boost_tasks[NR_CPUS]; |
151 | static struct task_struct *shutdown_task; | ||
152 | #ifdef CONFIG_HOTPLUG_CPU | ||
153 | static struct task_struct *onoff_task; | ||
154 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
155 | static struct task_struct *stall_task; | 103 | static struct task_struct *stall_task; |
156 | static struct task_struct **barrier_cbs_tasks; | 104 | static struct task_struct **barrier_cbs_tasks; |
157 | static struct task_struct *barrier_task; | 105 | static struct task_struct *barrier_task; |
@@ -170,10 +118,10 @@ static struct rcu_torture __rcu *rcu_torture_current; | |||
170 | static unsigned long rcu_torture_current_version; | 118 | static unsigned long rcu_torture_current_version; |
171 | static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; | 119 | static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN]; |
172 | static DEFINE_SPINLOCK(rcu_torture_lock); | 120 | static DEFINE_SPINLOCK(rcu_torture_lock); |
173 | static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) = | 121 | static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], |
174 | { 0 }; | 122 | rcu_torture_count) = { 0 }; |
175 | static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) = | 123 | static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], |
176 | { 0 }; | 124 | rcu_torture_batch) = { 0 }; |
177 | static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; | 125 | static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1]; |
178 | static atomic_t n_rcu_torture_alloc; | 126 | static atomic_t n_rcu_torture_alloc; |
179 | static atomic_t n_rcu_torture_alloc_fail; | 127 | static atomic_t n_rcu_torture_alloc_fail; |
@@ -186,22 +134,9 @@ static long n_rcu_torture_boost_rterror; | |||
186 | static long n_rcu_torture_boost_failure; | 134 | static long n_rcu_torture_boost_failure; |
187 | static long n_rcu_torture_boosts; | 135 | static long n_rcu_torture_boosts; |
188 | static long n_rcu_torture_timers; | 136 | static long n_rcu_torture_timers; |
189 | static long n_offline_attempts; | ||
190 | static long n_offline_successes; | ||
191 | static unsigned long sum_offline; | ||
192 | static int min_offline = -1; | ||
193 | static int max_offline; | ||
194 | static long n_online_attempts; | ||
195 | static long n_online_successes; | ||
196 | static unsigned long sum_online; | ||
197 | static int min_online = -1; | ||
198 | static int max_online; | ||
199 | static long n_barrier_attempts; | 137 | static long n_barrier_attempts; |
200 | static long n_barrier_successes; | 138 | static long n_barrier_successes; |
201 | static struct list_head rcu_torture_removed; | 139 | static struct list_head rcu_torture_removed; |
202 | static cpumask_var_t shuffle_tmp_mask; | ||
203 | |||
204 | static int stutter_pause_test; | ||
205 | 140 | ||
206 | #if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE) | 141 | #if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE) |
207 | #define RCUTORTURE_RUNNABLE_INIT 1 | 142 | #define RCUTORTURE_RUNNABLE_INIT 1 |
@@ -232,7 +167,6 @@ static u64 notrace rcu_trace_clock_local(void) | |||
232 | } | 167 | } |
233 | #endif /* #else #ifdef CONFIG_RCU_TRACE */ | 168 | #endif /* #else #ifdef CONFIG_RCU_TRACE */ |
234 | 169 | ||
235 | static unsigned long shutdown_time; /* jiffies to system shutdown. */ | ||
236 | static unsigned long boost_starttime; /* jiffies of next boost test start. */ | 170 | static unsigned long boost_starttime; /* jiffies of next boost test start. */ |
237 | DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ | 171 | DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ |
238 | /* and boost task create/destroy. */ | 172 | /* and boost task create/destroy. */ |
@@ -242,51 +176,6 @@ static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */ | |||
242 | static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ | 176 | static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ |
243 | static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); | 177 | static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); |
244 | 178 | ||
245 | /* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */ | ||
246 | |||
247 | #define FULLSTOP_DONTSTOP 0 /* Normal operation. */ | ||
248 | #define FULLSTOP_SHUTDOWN 1 /* System shutdown with rcutorture running. */ | ||
249 | #define FULLSTOP_RMMOD 2 /* Normal rmmod of rcutorture. */ | ||
250 | static int fullstop = FULLSTOP_RMMOD; | ||
251 | /* | ||
252 | * Protect fullstop transitions and spawning of kthreads. | ||
253 | */ | ||
254 | static DEFINE_MUTEX(fullstop_mutex); | ||
255 | |||
256 | /* Forward reference. */ | ||
257 | static void rcu_torture_cleanup(void); | ||
258 | |||
259 | /* | ||
260 | * Detect and respond to a system shutdown. | ||
261 | */ | ||
262 | static int | ||
263 | rcutorture_shutdown_notify(struct notifier_block *unused1, | ||
264 | unsigned long unused2, void *unused3) | ||
265 | { | ||
266 | mutex_lock(&fullstop_mutex); | ||
267 | if (fullstop == FULLSTOP_DONTSTOP) | ||
268 | fullstop = FULLSTOP_SHUTDOWN; | ||
269 | else | ||
270 | pr_warn(/* but going down anyway, so... */ | ||
271 | "Concurrent 'rmmod rcutorture' and shutdown illegal!\n"); | ||
272 | mutex_unlock(&fullstop_mutex); | ||
273 | return NOTIFY_DONE; | ||
274 | } | ||
275 | |||
276 | /* | ||
277 | * Absorb kthreads into a kernel function that won't return, so that | ||
278 | * they won't ever access module text or data again. | ||
279 | */ | ||
280 | static void rcutorture_shutdown_absorb(const char *title) | ||
281 | { | ||
282 | if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) { | ||
283 | pr_notice( | ||
284 | "rcutorture thread %s parking due to system shutdown\n", | ||
285 | title); | ||
286 | schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT); | ||
287 | } | ||
288 | } | ||
289 | |||
290 | /* | 179 | /* |
291 | * Allocate an element from the rcu_tortures pool. | 180 | * Allocate an element from the rcu_tortures pool. |
292 | */ | 181 | */ |
@@ -320,44 +209,6 @@ rcu_torture_free(struct rcu_torture *p) | |||
320 | spin_unlock_bh(&rcu_torture_lock); | 209 | spin_unlock_bh(&rcu_torture_lock); |
321 | } | 210 | } |
322 | 211 | ||
323 | struct rcu_random_state { | ||
324 | unsigned long rrs_state; | ||
325 | long rrs_count; | ||
326 | }; | ||
327 | |||
328 | #define RCU_RANDOM_MULT 39916801 /* prime */ | ||
329 | #define RCU_RANDOM_ADD 479001701 /* prime */ | ||
330 | #define RCU_RANDOM_REFRESH 10000 | ||
331 | |||
332 | #define DEFINE_RCU_RANDOM(name) struct rcu_random_state name = { 0, 0 } | ||
333 | |||
334 | /* | ||
335 | * Crude but fast random-number generator. Uses a linear congruential | ||
336 | * generator, with occasional help from cpu_clock(). | ||
337 | */ | ||
338 | static unsigned long | ||
339 | rcu_random(struct rcu_random_state *rrsp) | ||
340 | { | ||
341 | if (--rrsp->rrs_count < 0) { | ||
342 | rrsp->rrs_state += (unsigned long)local_clock(); | ||
343 | rrsp->rrs_count = RCU_RANDOM_REFRESH; | ||
344 | } | ||
345 | rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD; | ||
346 | return swahw32(rrsp->rrs_state); | ||
347 | } | ||
348 | |||
349 | static void | ||
350 | rcu_stutter_wait(const char *title) | ||
351 | { | ||
352 | while (stutter_pause_test || !rcutorture_runnable) { | ||
353 | if (rcutorture_runnable) | ||
354 | schedule_timeout_interruptible(1); | ||
355 | else | ||
356 | schedule_timeout_interruptible(round_jiffies_relative(HZ)); | ||
357 | rcutorture_shutdown_absorb(title); | ||
358 | } | ||
359 | } | ||
360 | |||
361 | /* | 212 | /* |
362 | * Operations vector for selecting different types of tests. | 213 | * Operations vector for selecting different types of tests. |
363 | */ | 214 | */ |
@@ -365,7 +216,7 @@ rcu_stutter_wait(const char *title) | |||
365 | struct rcu_torture_ops { | 216 | struct rcu_torture_ops { |
366 | void (*init)(void); | 217 | void (*init)(void); |
367 | int (*readlock)(void); | 218 | int (*readlock)(void); |
368 | void (*read_delay)(struct rcu_random_state *rrsp); | 219 | void (*read_delay)(struct torture_random_state *rrsp); |
369 | void (*readunlock)(int idx); | 220 | void (*readunlock)(int idx); |
370 | int (*completed)(void); | 221 | int (*completed)(void); |
371 | void (*deferred_free)(struct rcu_torture *p); | 222 | void (*deferred_free)(struct rcu_torture *p); |
@@ -392,7 +243,7 @@ static int rcu_torture_read_lock(void) __acquires(RCU) | |||
392 | return 0; | 243 | return 0; |
393 | } | 244 | } |
394 | 245 | ||
395 | static void rcu_read_delay(struct rcu_random_state *rrsp) | 246 | static void rcu_read_delay(struct torture_random_state *rrsp) |
396 | { | 247 | { |
397 | const unsigned long shortdelay_us = 200; | 248 | const unsigned long shortdelay_us = 200; |
398 | const unsigned long longdelay_ms = 50; | 249 | const unsigned long longdelay_ms = 50; |
@@ -401,12 +252,13 @@ static void rcu_read_delay(struct rcu_random_state *rrsp) | |||
401 | * period, and we want a long delay occasionally to trigger | 252 | * period, and we want a long delay occasionally to trigger |
402 | * force_quiescent_state. */ | 253 | * force_quiescent_state. */ |
403 | 254 | ||
404 | if (!(rcu_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) | 255 | if (!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) |
405 | mdelay(longdelay_ms); | 256 | mdelay(longdelay_ms); |
406 | if (!(rcu_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) | 257 | if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) |
407 | udelay(shortdelay_us); | 258 | udelay(shortdelay_us); |
408 | #ifdef CONFIG_PREEMPT | 259 | #ifdef CONFIG_PREEMPT |
409 | if (!preempt_count() && !(rcu_random(rrsp) % (nrealreaders * 20000))) | 260 | if (!preempt_count() && |
261 | !(torture_random(rrsp) % (nrealreaders * 20000))) | ||
410 | preempt_schedule(); /* No QS if preempt_disable() in effect */ | 262 | preempt_schedule(); /* No QS if preempt_disable() in effect */ |
411 | #endif | 263 | #endif |
412 | } | 264 | } |
@@ -427,7 +279,7 @@ rcu_torture_cb(struct rcu_head *p) | |||
427 | int i; | 279 | int i; |
428 | struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); | 280 | struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu); |
429 | 281 | ||
430 | if (fullstop != FULLSTOP_DONTSTOP) { | 282 | if (torture_must_stop_irq()) { |
431 | /* Test is ending, just drop callbacks on the floor. */ | 283 | /* Test is ending, just drop callbacks on the floor. */ |
432 | /* The next initialization will pick up the pieces. */ | 284 | /* The next initialization will pick up the pieces. */ |
433 | return; | 285 | return; |
@@ -520,6 +372,48 @@ static struct rcu_torture_ops rcu_bh_ops = { | |||
520 | }; | 372 | }; |
521 | 373 | ||
522 | /* | 374 | /* |
375 | * Don't even think about trying any of these in real life!!! | ||
376 | * The names includes "busted", and they really means it! | ||
377 | * The only purpose of these functions is to provide a buggy RCU | ||
378 | * implementation to make sure that rcutorture correctly emits | ||
379 | * buggy-RCU error messages. | ||
380 | */ | ||
381 | static void rcu_busted_torture_deferred_free(struct rcu_torture *p) | ||
382 | { | ||
383 | /* This is a deliberate bug for testing purposes only! */ | ||
384 | rcu_torture_cb(&p->rtort_rcu); | ||
385 | } | ||
386 | |||
387 | static void synchronize_rcu_busted(void) | ||
388 | { | ||
389 | /* This is a deliberate bug for testing purposes only! */ | ||
390 | } | ||
391 | |||
392 | static void | ||
393 | call_rcu_busted(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | ||
394 | { | ||
395 | /* This is a deliberate bug for testing purposes only! */ | ||
396 | func(head); | ||
397 | } | ||
398 | |||
399 | static struct rcu_torture_ops rcu_busted_ops = { | ||
400 | .init = rcu_sync_torture_init, | ||
401 | .readlock = rcu_torture_read_lock, | ||
402 | .read_delay = rcu_read_delay, /* just reuse rcu's version. */ | ||
403 | .readunlock = rcu_torture_read_unlock, | ||
404 | .completed = rcu_no_completed, | ||
405 | .deferred_free = rcu_busted_torture_deferred_free, | ||
406 | .sync = synchronize_rcu_busted, | ||
407 | .exp_sync = synchronize_rcu_busted, | ||
408 | .call = call_rcu_busted, | ||
409 | .cb_barrier = NULL, | ||
410 | .fqs = NULL, | ||
411 | .stats = NULL, | ||
412 | .irq_capable = 1, | ||
413 | .name = "rcu_busted" | ||
414 | }; | ||
415 | |||
416 | /* | ||
523 | * Definitions for srcu torture testing. | 417 | * Definitions for srcu torture testing. |
524 | */ | 418 | */ |
525 | 419 | ||
@@ -530,7 +424,7 @@ static int srcu_torture_read_lock(void) __acquires(&srcu_ctl) | |||
530 | return srcu_read_lock(&srcu_ctl); | 424 | return srcu_read_lock(&srcu_ctl); |
531 | } | 425 | } |
532 | 426 | ||
533 | static void srcu_read_delay(struct rcu_random_state *rrsp) | 427 | static void srcu_read_delay(struct torture_random_state *rrsp) |
534 | { | 428 | { |
535 | long delay; | 429 | long delay; |
536 | const long uspertick = 1000000 / HZ; | 430 | const long uspertick = 1000000 / HZ; |
@@ -538,7 +432,8 @@ static void srcu_read_delay(struct rcu_random_state *rrsp) | |||
538 | 432 | ||
539 | /* We want there to be long-running readers, but not all the time. */ | 433 | /* We want there to be long-running readers, but not all the time. */ |
540 | 434 | ||
541 | delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick); | 435 | delay = torture_random(rrsp) % |
436 | (nrealreaders * 2 * longdelay * uspertick); | ||
542 | if (!delay) | 437 | if (!delay) |
543 | schedule_timeout_interruptible(longdelay); | 438 | schedule_timeout_interruptible(longdelay); |
544 | else | 439 | else |
@@ -677,12 +572,12 @@ static int rcu_torture_boost(void *arg) | |||
677 | struct rcu_boost_inflight rbi = { .inflight = 0 }; | 572 | struct rcu_boost_inflight rbi = { .inflight = 0 }; |
678 | struct sched_param sp; | 573 | struct sched_param sp; |
679 | 574 | ||
680 | VERBOSE_PRINTK_STRING("rcu_torture_boost started"); | 575 | VERBOSE_TOROUT_STRING("rcu_torture_boost started"); |
681 | 576 | ||
682 | /* Set real-time priority. */ | 577 | /* Set real-time priority. */ |
683 | sp.sched_priority = 1; | 578 | sp.sched_priority = 1; |
684 | if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) { | 579 | if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) { |
685 | VERBOSE_PRINTK_STRING("rcu_torture_boost RT prio failed!"); | 580 | VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!"); |
686 | n_rcu_torture_boost_rterror++; | 581 | n_rcu_torture_boost_rterror++; |
687 | } | 582 | } |
688 | 583 | ||
@@ -693,9 +588,8 @@ static int rcu_torture_boost(void *arg) | |||
693 | oldstarttime = boost_starttime; | 588 | oldstarttime = boost_starttime; |
694 | while (ULONG_CMP_LT(jiffies, oldstarttime)) { | 589 | while (ULONG_CMP_LT(jiffies, oldstarttime)) { |
695 | schedule_timeout_interruptible(oldstarttime - jiffies); | 590 | schedule_timeout_interruptible(oldstarttime - jiffies); |
696 | rcu_stutter_wait("rcu_torture_boost"); | 591 | stutter_wait("rcu_torture_boost"); |
697 | if (kthread_should_stop() || | 592 | if (torture_must_stop()) |
698 | fullstop != FULLSTOP_DONTSTOP) | ||
699 | goto checkwait; | 593 | goto checkwait; |
700 | } | 594 | } |
701 | 595 | ||
@@ -710,15 +604,14 @@ static int rcu_torture_boost(void *arg) | |||
710 | call_rcu(&rbi.rcu, rcu_torture_boost_cb); | 604 | call_rcu(&rbi.rcu, rcu_torture_boost_cb); |
711 | if (jiffies - call_rcu_time > | 605 | if (jiffies - call_rcu_time > |
712 | test_boost_duration * HZ - HZ / 2) { | 606 | test_boost_duration * HZ - HZ / 2) { |
713 | VERBOSE_PRINTK_STRING("rcu_torture_boost boosting failed"); | 607 | VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed"); |
714 | n_rcu_torture_boost_failure++; | 608 | n_rcu_torture_boost_failure++; |
715 | } | 609 | } |
716 | call_rcu_time = jiffies; | 610 | call_rcu_time = jiffies; |
717 | } | 611 | } |
718 | cond_resched(); | 612 | cond_resched(); |
719 | rcu_stutter_wait("rcu_torture_boost"); | 613 | stutter_wait("rcu_torture_boost"); |
720 | if (kthread_should_stop() || | 614 | if (torture_must_stop()) |
721 | fullstop != FULLSTOP_DONTSTOP) | ||
722 | goto checkwait; | 615 | goto checkwait; |
723 | } | 616 | } |
724 | 617 | ||
@@ -742,16 +635,17 @@ static int rcu_torture_boost(void *arg) | |||
742 | } | 635 | } |
743 | 636 | ||
744 | /* Go do the stutter. */ | 637 | /* Go do the stutter. */ |
745 | checkwait: rcu_stutter_wait("rcu_torture_boost"); | 638 | checkwait: stutter_wait("rcu_torture_boost"); |
746 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); | 639 | } while (!torture_must_stop()); |
747 | 640 | ||
748 | /* Clean up and exit. */ | 641 | /* Clean up and exit. */ |
749 | VERBOSE_PRINTK_STRING("rcu_torture_boost task stopping"); | 642 | while (!kthread_should_stop() || rbi.inflight) { |
750 | rcutorture_shutdown_absorb("rcu_torture_boost"); | 643 | torture_shutdown_absorb("rcu_torture_boost"); |
751 | while (!kthread_should_stop() || rbi.inflight) | ||
752 | schedule_timeout_uninterruptible(1); | 644 | schedule_timeout_uninterruptible(1); |
645 | } | ||
753 | smp_mb(); /* order accesses to ->inflight before stack-frame death. */ | 646 | smp_mb(); /* order accesses to ->inflight before stack-frame death. */ |
754 | destroy_rcu_head_on_stack(&rbi.rcu); | 647 | destroy_rcu_head_on_stack(&rbi.rcu); |
648 | torture_kthread_stopping("rcu_torture_boost"); | ||
755 | return 0; | 649 | return 0; |
756 | } | 650 | } |
757 | 651 | ||
@@ -766,7 +660,7 @@ rcu_torture_fqs(void *arg) | |||
766 | unsigned long fqs_resume_time; | 660 | unsigned long fqs_resume_time; |
767 | int fqs_burst_remaining; | 661 | int fqs_burst_remaining; |
768 | 662 | ||
769 | VERBOSE_PRINTK_STRING("rcu_torture_fqs task started"); | 663 | VERBOSE_TOROUT_STRING("rcu_torture_fqs task started"); |
770 | do { | 664 | do { |
771 | fqs_resume_time = jiffies + fqs_stutter * HZ; | 665 | fqs_resume_time = jiffies + fqs_stutter * HZ; |
772 | while (ULONG_CMP_LT(jiffies, fqs_resume_time) && | 666 | while (ULONG_CMP_LT(jiffies, fqs_resume_time) && |
@@ -780,12 +674,9 @@ rcu_torture_fqs(void *arg) | |||
780 | udelay(fqs_holdoff); | 674 | udelay(fqs_holdoff); |
781 | fqs_burst_remaining -= fqs_holdoff; | 675 | fqs_burst_remaining -= fqs_holdoff; |
782 | } | 676 | } |
783 | rcu_stutter_wait("rcu_torture_fqs"); | 677 | stutter_wait("rcu_torture_fqs"); |
784 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); | 678 | } while (!torture_must_stop()); |
785 | VERBOSE_PRINTK_STRING("rcu_torture_fqs task stopping"); | 679 | torture_kthread_stopping("rcu_torture_fqs"); |
786 | rcutorture_shutdown_absorb("rcu_torture_fqs"); | ||
787 | while (!kthread_should_stop()) | ||
788 | schedule_timeout_uninterruptible(1); | ||
789 | return 0; | 680 | return 0; |
790 | } | 681 | } |
791 | 682 | ||
@@ -802,9 +693,9 @@ rcu_torture_writer(void *arg) | |||
802 | struct rcu_torture *rp; | 693 | struct rcu_torture *rp; |
803 | struct rcu_torture *rp1; | 694 | struct rcu_torture *rp1; |
804 | struct rcu_torture *old_rp; | 695 | struct rcu_torture *old_rp; |
805 | static DEFINE_RCU_RANDOM(rand); | 696 | static DEFINE_TORTURE_RANDOM(rand); |
806 | 697 | ||
807 | VERBOSE_PRINTK_STRING("rcu_torture_writer task started"); | 698 | VERBOSE_TOROUT_STRING("rcu_torture_writer task started"); |
808 | set_user_nice(current, 19); | 699 | set_user_nice(current, 19); |
809 | 700 | ||
810 | do { | 701 | do { |
@@ -813,7 +704,7 @@ rcu_torture_writer(void *arg) | |||
813 | if (rp == NULL) | 704 | if (rp == NULL) |
814 | continue; | 705 | continue; |
815 | rp->rtort_pipe_count = 0; | 706 | rp->rtort_pipe_count = 0; |
816 | udelay(rcu_random(&rand) & 0x3ff); | 707 | udelay(torture_random(&rand) & 0x3ff); |
817 | old_rp = rcu_dereference_check(rcu_torture_current, | 708 | old_rp = rcu_dereference_check(rcu_torture_current, |
818 | current == writer_task); | 709 | current == writer_task); |
819 | rp->rtort_mbtest = 1; | 710 | rp->rtort_mbtest = 1; |
@@ -826,7 +717,7 @@ rcu_torture_writer(void *arg) | |||
826 | atomic_inc(&rcu_torture_wcount[i]); | 717 | atomic_inc(&rcu_torture_wcount[i]); |
827 | old_rp->rtort_pipe_count++; | 718 | old_rp->rtort_pipe_count++; |
828 | if (gp_normal == gp_exp) | 719 | if (gp_normal == gp_exp) |
829 | exp = !!(rcu_random(&rand) & 0x80); | 720 | exp = !!(torture_random(&rand) & 0x80); |
830 | else | 721 | else |
831 | exp = gp_exp; | 722 | exp = gp_exp; |
832 | if (!exp) { | 723 | if (!exp) { |
@@ -852,12 +743,9 @@ rcu_torture_writer(void *arg) | |||
852 | } | 743 | } |
853 | } | 744 | } |
854 | rcutorture_record_progress(++rcu_torture_current_version); | 745 | rcutorture_record_progress(++rcu_torture_current_version); |
855 | rcu_stutter_wait("rcu_torture_writer"); | 746 | stutter_wait("rcu_torture_writer"); |
856 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); | 747 | } while (!torture_must_stop()); |
857 | VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping"); | 748 | torture_kthread_stopping("rcu_torture_writer"); |
858 | rcutorture_shutdown_absorb("rcu_torture_writer"); | ||
859 | while (!kthread_should_stop()) | ||
860 | schedule_timeout_uninterruptible(1); | ||
861 | return 0; | 749 | return 0; |
862 | } | 750 | } |
863 | 751 | ||
@@ -868,19 +756,19 @@ rcu_torture_writer(void *arg) | |||
868 | static int | 756 | static int |
869 | rcu_torture_fakewriter(void *arg) | 757 | rcu_torture_fakewriter(void *arg) |
870 | { | 758 | { |
871 | DEFINE_RCU_RANDOM(rand); | 759 | DEFINE_TORTURE_RANDOM(rand); |
872 | 760 | ||
873 | VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task started"); | 761 | VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started"); |
874 | set_user_nice(current, 19); | 762 | set_user_nice(current, 19); |
875 | 763 | ||
876 | do { | 764 | do { |
877 | schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10); | 765 | schedule_timeout_uninterruptible(1 + torture_random(&rand)%10); |
878 | udelay(rcu_random(&rand) & 0x3ff); | 766 | udelay(torture_random(&rand) & 0x3ff); |
879 | if (cur_ops->cb_barrier != NULL && | 767 | if (cur_ops->cb_barrier != NULL && |
880 | rcu_random(&rand) % (nfakewriters * 8) == 0) { | 768 | torture_random(&rand) % (nfakewriters * 8) == 0) { |
881 | cur_ops->cb_barrier(); | 769 | cur_ops->cb_barrier(); |
882 | } else if (gp_normal == gp_exp) { | 770 | } else if (gp_normal == gp_exp) { |
883 | if (rcu_random(&rand) & 0x80) | 771 | if (torture_random(&rand) & 0x80) |
884 | cur_ops->sync(); | 772 | cur_ops->sync(); |
885 | else | 773 | else |
886 | cur_ops->exp_sync(); | 774 | cur_ops->exp_sync(); |
@@ -889,13 +777,10 @@ rcu_torture_fakewriter(void *arg) | |||
889 | } else { | 777 | } else { |
890 | cur_ops->exp_sync(); | 778 | cur_ops->exp_sync(); |
891 | } | 779 | } |
892 | rcu_stutter_wait("rcu_torture_fakewriter"); | 780 | stutter_wait("rcu_torture_fakewriter"); |
893 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); | 781 | } while (!torture_must_stop()); |
894 | 782 | ||
895 | VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping"); | 783 | torture_kthread_stopping("rcu_torture_fakewriter"); |
896 | rcutorture_shutdown_absorb("rcu_torture_fakewriter"); | ||
897 | while (!kthread_should_stop()) | ||
898 | schedule_timeout_uninterruptible(1); | ||
899 | return 0; | 784 | return 0; |
900 | } | 785 | } |
901 | 786 | ||
@@ -921,7 +806,7 @@ static void rcu_torture_timer(unsigned long unused) | |||
921 | int idx; | 806 | int idx; |
922 | int completed; | 807 | int completed; |
923 | int completed_end; | 808 | int completed_end; |
924 | static DEFINE_RCU_RANDOM(rand); | 809 | static DEFINE_TORTURE_RANDOM(rand); |
925 | static DEFINE_SPINLOCK(rand_lock); | 810 | static DEFINE_SPINLOCK(rand_lock); |
926 | struct rcu_torture *p; | 811 | struct rcu_torture *p; |
927 | int pipe_count; | 812 | int pipe_count; |
@@ -980,13 +865,13 @@ rcu_torture_reader(void *arg) | |||
980 | int completed; | 865 | int completed; |
981 | int completed_end; | 866 | int completed_end; |
982 | int idx; | 867 | int idx; |
983 | DEFINE_RCU_RANDOM(rand); | 868 | DEFINE_TORTURE_RANDOM(rand); |
984 | struct rcu_torture *p; | 869 | struct rcu_torture *p; |
985 | int pipe_count; | 870 | int pipe_count; |
986 | struct timer_list t; | 871 | struct timer_list t; |
987 | unsigned long long ts; | 872 | unsigned long long ts; |
988 | 873 | ||
989 | VERBOSE_PRINTK_STRING("rcu_torture_reader task started"); | 874 | VERBOSE_TOROUT_STRING("rcu_torture_reader task started"); |
990 | set_user_nice(current, 19); | 875 | set_user_nice(current, 19); |
991 | if (irqreader && cur_ops->irq_capable) | 876 | if (irqreader && cur_ops->irq_capable) |
992 | setup_timer_on_stack(&t, rcu_torture_timer, 0); | 877 | setup_timer_on_stack(&t, rcu_torture_timer, 0); |
@@ -1034,14 +919,11 @@ rcu_torture_reader(void *arg) | |||
1034 | preempt_enable(); | 919 | preempt_enable(); |
1035 | cur_ops->readunlock(idx); | 920 | cur_ops->readunlock(idx); |
1036 | schedule(); | 921 | schedule(); |
1037 | rcu_stutter_wait("rcu_torture_reader"); | 922 | stutter_wait("rcu_torture_reader"); |
1038 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); | 923 | } while (!torture_must_stop()); |
1039 | VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping"); | ||
1040 | rcutorture_shutdown_absorb("rcu_torture_reader"); | ||
1041 | if (irqreader && cur_ops->irq_capable) | 924 | if (irqreader && cur_ops->irq_capable) |
1042 | del_timer_sync(&t); | 925 | del_timer_sync(&t); |
1043 | while (!kthread_should_stop()) | 926 | torture_kthread_stopping("rcu_torture_reader"); |
1044 | schedule_timeout_uninterruptible(1); | ||
1045 | return 0; | 927 | return 0; |
1046 | } | 928 | } |
1047 | 929 | ||
@@ -1083,13 +965,7 @@ rcu_torture_printk(char *page) | |||
1083 | n_rcu_torture_boost_failure, | 965 | n_rcu_torture_boost_failure, |
1084 | n_rcu_torture_boosts, | 966 | n_rcu_torture_boosts, |
1085 | n_rcu_torture_timers); | 967 | n_rcu_torture_timers); |
1086 | page += sprintf(page, | 968 | page = torture_onoff_stats(page); |
1087 | "onoff: %ld/%ld:%ld/%ld %d,%d:%d,%d %lu:%lu (HZ=%d) ", | ||
1088 | n_online_successes, n_online_attempts, | ||
1089 | n_offline_successes, n_offline_attempts, | ||
1090 | min_online, max_online, | ||
1091 | min_offline, max_offline, | ||
1092 | sum_online, sum_offline, HZ); | ||
1093 | page += sprintf(page, "barrier: %ld/%ld:%ld", | 969 | page += sprintf(page, "barrier: %ld/%ld:%ld", |
1094 | n_barrier_successes, | 970 | n_barrier_successes, |
1095 | n_barrier_attempts, | 971 | n_barrier_attempts, |
@@ -1150,123 +1026,17 @@ rcu_torture_stats_print(void) | |||
1150 | /* | 1026 | /* |
1151 | * Periodically prints torture statistics, if periodic statistics printing | 1027 | * Periodically prints torture statistics, if periodic statistics printing |
1152 | * was specified via the stat_interval module parameter. | 1028 | * was specified via the stat_interval module parameter. |
1153 | * | ||
1154 | * No need to worry about fullstop here, since this one doesn't reference | ||
1155 | * volatile state or register callbacks. | ||
1156 | */ | 1029 | */ |
1157 | static int | 1030 | static int |
1158 | rcu_torture_stats(void *arg) | 1031 | rcu_torture_stats(void *arg) |
1159 | { | 1032 | { |
1160 | VERBOSE_PRINTK_STRING("rcu_torture_stats task started"); | 1033 | VERBOSE_TOROUT_STRING("rcu_torture_stats task started"); |
1161 | do { | 1034 | do { |
1162 | schedule_timeout_interruptible(stat_interval * HZ); | 1035 | schedule_timeout_interruptible(stat_interval * HZ); |
1163 | rcu_torture_stats_print(); | 1036 | rcu_torture_stats_print(); |
1164 | rcutorture_shutdown_absorb("rcu_torture_stats"); | 1037 | torture_shutdown_absorb("rcu_torture_stats"); |
1165 | } while (!kthread_should_stop()); | 1038 | } while (!torture_must_stop()); |
1166 | VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping"); | 1039 | torture_kthread_stopping("rcu_torture_stats"); |
1167 | return 0; | ||
1168 | } | ||
1169 | |||
1170 | static int rcu_idle_cpu; /* Force all torture tasks off this CPU */ | ||
1171 | |||
1172 | /* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case | ||
1173 | * is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs. | ||
1174 | */ | ||
1175 | static void rcu_torture_shuffle_tasks(void) | ||
1176 | { | ||
1177 | int i; | ||
1178 | |||
1179 | cpumask_setall(shuffle_tmp_mask); | ||
1180 | get_online_cpus(); | ||
1181 | |||
1182 | /* No point in shuffling if there is only one online CPU (ex: UP) */ | ||
1183 | if (num_online_cpus() == 1) { | ||
1184 | put_online_cpus(); | ||
1185 | return; | ||
1186 | } | ||
1187 | |||
1188 | if (rcu_idle_cpu != -1) | ||
1189 | cpumask_clear_cpu(rcu_idle_cpu, shuffle_tmp_mask); | ||
1190 | |||
1191 | set_cpus_allowed_ptr(current, shuffle_tmp_mask); | ||
1192 | |||
1193 | if (reader_tasks) { | ||
1194 | for (i = 0; i < nrealreaders; i++) | ||
1195 | if (reader_tasks[i]) | ||
1196 | set_cpus_allowed_ptr(reader_tasks[i], | ||
1197 | shuffle_tmp_mask); | ||
1198 | } | ||
1199 | if (fakewriter_tasks) { | ||
1200 | for (i = 0; i < nfakewriters; i++) | ||
1201 | if (fakewriter_tasks[i]) | ||
1202 | set_cpus_allowed_ptr(fakewriter_tasks[i], | ||
1203 | shuffle_tmp_mask); | ||
1204 | } | ||
1205 | if (writer_task) | ||
1206 | set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask); | ||
1207 | if (stats_task) | ||
1208 | set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask); | ||
1209 | if (stutter_task) | ||
1210 | set_cpus_allowed_ptr(stutter_task, shuffle_tmp_mask); | ||
1211 | if (fqs_task) | ||
1212 | set_cpus_allowed_ptr(fqs_task, shuffle_tmp_mask); | ||
1213 | if (shutdown_task) | ||
1214 | set_cpus_allowed_ptr(shutdown_task, shuffle_tmp_mask); | ||
1215 | #ifdef CONFIG_HOTPLUG_CPU | ||
1216 | if (onoff_task) | ||
1217 | set_cpus_allowed_ptr(onoff_task, shuffle_tmp_mask); | ||
1218 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
1219 | if (stall_task) | ||
1220 | set_cpus_allowed_ptr(stall_task, shuffle_tmp_mask); | ||
1221 | if (barrier_cbs_tasks) | ||
1222 | for (i = 0; i < n_barrier_cbs; i++) | ||
1223 | if (barrier_cbs_tasks[i]) | ||
1224 | set_cpus_allowed_ptr(barrier_cbs_tasks[i], | ||
1225 | shuffle_tmp_mask); | ||
1226 | if (barrier_task) | ||
1227 | set_cpus_allowed_ptr(barrier_task, shuffle_tmp_mask); | ||
1228 | |||
1229 | if (rcu_idle_cpu == -1) | ||
1230 | rcu_idle_cpu = num_online_cpus() - 1; | ||
1231 | else | ||
1232 | rcu_idle_cpu--; | ||
1233 | |||
1234 | put_online_cpus(); | ||
1235 | } | ||
1236 | |||
1237 | /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the | ||
1238 | * system to become idle at a time and cut off its timer ticks. This is meant | ||
1239 | * to test the support for such tickless idle CPU in RCU. | ||
1240 | */ | ||
1241 | static int | ||
1242 | rcu_torture_shuffle(void *arg) | ||
1243 | { | ||
1244 | VERBOSE_PRINTK_STRING("rcu_torture_shuffle task started"); | ||
1245 | do { | ||
1246 | schedule_timeout_interruptible(shuffle_interval * HZ); | ||
1247 | rcu_torture_shuffle_tasks(); | ||
1248 | rcutorture_shutdown_absorb("rcu_torture_shuffle"); | ||
1249 | } while (!kthread_should_stop()); | ||
1250 | VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping"); | ||
1251 | return 0; | ||
1252 | } | ||
1253 | |||
1254 | /* Cause the rcutorture test to "stutter", starting and stopping all | ||
1255 | * threads periodically. | ||
1256 | */ | ||
1257 | static int | ||
1258 | rcu_torture_stutter(void *arg) | ||
1259 | { | ||
1260 | VERBOSE_PRINTK_STRING("rcu_torture_stutter task started"); | ||
1261 | do { | ||
1262 | schedule_timeout_interruptible(stutter * HZ); | ||
1263 | stutter_pause_test = 1; | ||
1264 | if (!kthread_should_stop()) | ||
1265 | schedule_timeout_interruptible(stutter * HZ); | ||
1266 | stutter_pause_test = 0; | ||
1267 | rcutorture_shutdown_absorb("rcu_torture_stutter"); | ||
1268 | } while (!kthread_should_stop()); | ||
1269 | VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping"); | ||
1270 | return 0; | 1040 | return 0; |
1271 | } | 1041 | } |
1272 | 1042 | ||
@@ -1293,10 +1063,6 @@ rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag) | |||
1293 | onoff_interval, onoff_holdoff); | 1063 | onoff_interval, onoff_holdoff); |
1294 | } | 1064 | } |
1295 | 1065 | ||
1296 | static struct notifier_block rcutorture_shutdown_nb = { | ||
1297 | .notifier_call = rcutorture_shutdown_notify, | ||
1298 | }; | ||
1299 | |||
1300 | static void rcutorture_booster_cleanup(int cpu) | 1066 | static void rcutorture_booster_cleanup(int cpu) |
1301 | { | 1067 | { |
1302 | struct task_struct *t; | 1068 | struct task_struct *t; |
@@ -1304,14 +1070,12 @@ static void rcutorture_booster_cleanup(int cpu) | |||
1304 | if (boost_tasks[cpu] == NULL) | 1070 | if (boost_tasks[cpu] == NULL) |
1305 | return; | 1071 | return; |
1306 | mutex_lock(&boost_mutex); | 1072 | mutex_lock(&boost_mutex); |
1307 | VERBOSE_PRINTK_STRING("Stopping rcu_torture_boost task"); | ||
1308 | t = boost_tasks[cpu]; | 1073 | t = boost_tasks[cpu]; |
1309 | boost_tasks[cpu] = NULL; | 1074 | boost_tasks[cpu] = NULL; |
1310 | mutex_unlock(&boost_mutex); | 1075 | mutex_unlock(&boost_mutex); |
1311 | 1076 | ||
1312 | /* This must be outside of the mutex, otherwise deadlock! */ | 1077 | /* This must be outside of the mutex, otherwise deadlock! */ |
1313 | kthread_stop(t); | 1078 | torture_stop_kthread(rcu_torture_boost, t); |
1314 | boost_tasks[cpu] = NULL; | ||
1315 | } | 1079 | } |
1316 | 1080 | ||
1317 | static int rcutorture_booster_init(int cpu) | 1081 | static int rcutorture_booster_init(int cpu) |
@@ -1323,13 +1087,13 @@ static int rcutorture_booster_init(int cpu) | |||
1323 | 1087 | ||
1324 | /* Don't allow time recalculation while creating a new task. */ | 1088 | /* Don't allow time recalculation while creating a new task. */ |
1325 | mutex_lock(&boost_mutex); | 1089 | mutex_lock(&boost_mutex); |
1326 | VERBOSE_PRINTK_STRING("Creating rcu_torture_boost task"); | 1090 | VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task"); |
1327 | boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL, | 1091 | boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL, |
1328 | cpu_to_node(cpu), | 1092 | cpu_to_node(cpu), |
1329 | "rcu_torture_boost"); | 1093 | "rcu_torture_boost"); |
1330 | if (IS_ERR(boost_tasks[cpu])) { | 1094 | if (IS_ERR(boost_tasks[cpu])) { |
1331 | retval = PTR_ERR(boost_tasks[cpu]); | 1095 | retval = PTR_ERR(boost_tasks[cpu]); |
1332 | VERBOSE_PRINTK_STRING("rcu_torture_boost task create failed"); | 1096 | VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed"); |
1333 | n_rcu_torture_boost_ktrerror++; | 1097 | n_rcu_torture_boost_ktrerror++; |
1334 | boost_tasks[cpu] = NULL; | 1098 | boost_tasks[cpu] = NULL; |
1335 | mutex_unlock(&boost_mutex); | 1099 | mutex_unlock(&boost_mutex); |
@@ -1342,175 +1106,6 @@ static int rcutorture_booster_init(int cpu) | |||
1342 | } | 1106 | } |
1343 | 1107 | ||
1344 | /* | 1108 | /* |
1345 | * Cause the rcutorture test to shutdown the system after the test has | ||
1346 | * run for the time specified by the shutdown_secs module parameter. | ||
1347 | */ | ||
1348 | static int | ||
1349 | rcu_torture_shutdown(void *arg) | ||
1350 | { | ||
1351 | long delta; | ||
1352 | unsigned long jiffies_snap; | ||
1353 | |||
1354 | VERBOSE_PRINTK_STRING("rcu_torture_shutdown task started"); | ||
1355 | jiffies_snap = jiffies; | ||
1356 | while (ULONG_CMP_LT(jiffies_snap, shutdown_time) && | ||
1357 | !kthread_should_stop()) { | ||
1358 | delta = shutdown_time - jiffies_snap; | ||
1359 | if (verbose) | ||
1360 | pr_alert("%s" TORTURE_FLAG | ||
1361 | "rcu_torture_shutdown task: %lu jiffies remaining\n", | ||
1362 | torture_type, delta); | ||
1363 | schedule_timeout_interruptible(delta); | ||
1364 | jiffies_snap = jiffies; | ||
1365 | } | ||
1366 | if (kthread_should_stop()) { | ||
1367 | VERBOSE_PRINTK_STRING("rcu_torture_shutdown task stopping"); | ||
1368 | return 0; | ||
1369 | } | ||
1370 | |||
1371 | /* OK, shut down the system. */ | ||
1372 | |||
1373 | VERBOSE_PRINTK_STRING("rcu_torture_shutdown task shutting down system"); | ||
1374 | shutdown_task = NULL; /* Avoid self-kill deadlock. */ | ||
1375 | rcu_torture_cleanup(); /* Get the success/failure message. */ | ||
1376 | kernel_power_off(); /* Shut down the system. */ | ||
1377 | return 0; | ||
1378 | } | ||
1379 | |||
1380 | #ifdef CONFIG_HOTPLUG_CPU | ||
1381 | |||
1382 | /* | ||
1383 | * Execute random CPU-hotplug operations at the interval specified | ||
1384 | * by the onoff_interval. | ||
1385 | */ | ||
1386 | static int | ||
1387 | rcu_torture_onoff(void *arg) | ||
1388 | { | ||
1389 | int cpu; | ||
1390 | unsigned long delta; | ||
1391 | int maxcpu = -1; | ||
1392 | DEFINE_RCU_RANDOM(rand); | ||
1393 | int ret; | ||
1394 | unsigned long starttime; | ||
1395 | |||
1396 | VERBOSE_PRINTK_STRING("rcu_torture_onoff task started"); | ||
1397 | for_each_online_cpu(cpu) | ||
1398 | maxcpu = cpu; | ||
1399 | WARN_ON(maxcpu < 0); | ||
1400 | if (onoff_holdoff > 0) { | ||
1401 | VERBOSE_PRINTK_STRING("rcu_torture_onoff begin holdoff"); | ||
1402 | schedule_timeout_interruptible(onoff_holdoff * HZ); | ||
1403 | VERBOSE_PRINTK_STRING("rcu_torture_onoff end holdoff"); | ||
1404 | } | ||
1405 | while (!kthread_should_stop()) { | ||
1406 | cpu = (rcu_random(&rand) >> 4) % (maxcpu + 1); | ||
1407 | if (cpu_online(cpu) && cpu_is_hotpluggable(cpu)) { | ||
1408 | if (verbose) | ||
1409 | pr_alert("%s" TORTURE_FLAG | ||
1410 | "rcu_torture_onoff task: offlining %d\n", | ||
1411 | torture_type, cpu); | ||
1412 | starttime = jiffies; | ||
1413 | n_offline_attempts++; | ||
1414 | ret = cpu_down(cpu); | ||
1415 | if (ret) { | ||
1416 | if (verbose) | ||
1417 | pr_alert("%s" TORTURE_FLAG | ||
1418 | "rcu_torture_onoff task: offline %d failed: errno %d\n", | ||
1419 | torture_type, cpu, ret); | ||
1420 | } else { | ||
1421 | if (verbose) | ||
1422 | pr_alert("%s" TORTURE_FLAG | ||
1423 | "rcu_torture_onoff task: offlined %d\n", | ||
1424 | torture_type, cpu); | ||
1425 | n_offline_successes++; | ||
1426 | delta = jiffies - starttime; | ||
1427 | sum_offline += delta; | ||
1428 | if (min_offline < 0) { | ||
1429 | min_offline = delta; | ||
1430 | max_offline = delta; | ||
1431 | } | ||
1432 | if (min_offline > delta) | ||
1433 | min_offline = delta; | ||
1434 | if (max_offline < delta) | ||
1435 | max_offline = delta; | ||
1436 | } | ||
1437 | } else if (cpu_is_hotpluggable(cpu)) { | ||
1438 | if (verbose) | ||
1439 | pr_alert("%s" TORTURE_FLAG | ||
1440 | "rcu_torture_onoff task: onlining %d\n", | ||
1441 | torture_type, cpu); | ||
1442 | starttime = jiffies; | ||
1443 | n_online_attempts++; | ||
1444 | ret = cpu_up(cpu); | ||
1445 | if (ret) { | ||
1446 | if (verbose) | ||
1447 | pr_alert("%s" TORTURE_FLAG | ||
1448 | "rcu_torture_onoff task: online %d failed: errno %d\n", | ||
1449 | torture_type, cpu, ret); | ||
1450 | } else { | ||
1451 | if (verbose) | ||
1452 | pr_alert("%s" TORTURE_FLAG | ||
1453 | "rcu_torture_onoff task: onlined %d\n", | ||
1454 | torture_type, cpu); | ||
1455 | n_online_successes++; | ||
1456 | delta = jiffies - starttime; | ||
1457 | sum_online += delta; | ||
1458 | if (min_online < 0) { | ||
1459 | min_online = delta; | ||
1460 | max_online = delta; | ||
1461 | } | ||
1462 | if (min_online > delta) | ||
1463 | min_online = delta; | ||
1464 | if (max_online < delta) | ||
1465 | max_online = delta; | ||
1466 | } | ||
1467 | } | ||
1468 | schedule_timeout_interruptible(onoff_interval * HZ); | ||
1469 | } | ||
1470 | VERBOSE_PRINTK_STRING("rcu_torture_onoff task stopping"); | ||
1471 | return 0; | ||
1472 | } | ||
1473 | |||
1474 | static int | ||
1475 | rcu_torture_onoff_init(void) | ||
1476 | { | ||
1477 | int ret; | ||
1478 | |||
1479 | if (onoff_interval <= 0) | ||
1480 | return 0; | ||
1481 | onoff_task = kthread_run(rcu_torture_onoff, NULL, "rcu_torture_onoff"); | ||
1482 | if (IS_ERR(onoff_task)) { | ||
1483 | ret = PTR_ERR(onoff_task); | ||
1484 | onoff_task = NULL; | ||
1485 | return ret; | ||
1486 | } | ||
1487 | return 0; | ||
1488 | } | ||
1489 | |||
1490 | static void rcu_torture_onoff_cleanup(void) | ||
1491 | { | ||
1492 | if (onoff_task == NULL) | ||
1493 | return; | ||
1494 | VERBOSE_PRINTK_STRING("Stopping rcu_torture_onoff task"); | ||
1495 | kthread_stop(onoff_task); | ||
1496 | onoff_task = NULL; | ||
1497 | } | ||
1498 | |||
1499 | #else /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
1500 | |||
1501 | static int | ||
1502 | rcu_torture_onoff_init(void) | ||
1503 | { | ||
1504 | return 0; | ||
1505 | } | ||
1506 | |||
1507 | static void rcu_torture_onoff_cleanup(void) | ||
1508 | { | ||
1509 | } | ||
1510 | |||
1511 | #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */ | ||
1512 | |||
1513 | /* | ||
1514 | * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then | 1109 | * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then |
1515 | * induces a CPU stall for the time specified by stall_cpu. | 1110 | * induces a CPU stall for the time specified by stall_cpu. |
1516 | */ | 1111 | */ |
@@ -1518,11 +1113,11 @@ static int rcu_torture_stall(void *args) | |||
1518 | { | 1113 | { |
1519 | unsigned long stop_at; | 1114 | unsigned long stop_at; |
1520 | 1115 | ||
1521 | VERBOSE_PRINTK_STRING("rcu_torture_stall task started"); | 1116 | VERBOSE_TOROUT_STRING("rcu_torture_stall task started"); |
1522 | if (stall_cpu_holdoff > 0) { | 1117 | if (stall_cpu_holdoff > 0) { |
1523 | VERBOSE_PRINTK_STRING("rcu_torture_stall begin holdoff"); | 1118 | VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff"); |
1524 | schedule_timeout_interruptible(stall_cpu_holdoff * HZ); | 1119 | schedule_timeout_interruptible(stall_cpu_holdoff * HZ); |
1525 | VERBOSE_PRINTK_STRING("rcu_torture_stall end holdoff"); | 1120 | VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff"); |
1526 | } | 1121 | } |
1527 | if (!kthread_should_stop()) { | 1122 | if (!kthread_should_stop()) { |
1528 | stop_at = get_seconds() + stall_cpu; | 1123 | stop_at = get_seconds() + stall_cpu; |
@@ -1536,7 +1131,7 @@ static int rcu_torture_stall(void *args) | |||
1536 | rcu_read_unlock(); | 1131 | rcu_read_unlock(); |
1537 | pr_alert("rcu_torture_stall end.\n"); | 1132 | pr_alert("rcu_torture_stall end.\n"); |
1538 | } | 1133 | } |
1539 | rcutorture_shutdown_absorb("rcu_torture_stall"); | 1134 | torture_shutdown_absorb("rcu_torture_stall"); |
1540 | while (!kthread_should_stop()) | 1135 | while (!kthread_should_stop()) |
1541 | schedule_timeout_interruptible(10 * HZ); | 1136 | schedule_timeout_interruptible(10 * HZ); |
1542 | return 0; | 1137 | return 0; |
@@ -1545,27 +1140,9 @@ static int rcu_torture_stall(void *args) | |||
1545 | /* Spawn CPU-stall kthread, if stall_cpu specified. */ | 1140 | /* Spawn CPU-stall kthread, if stall_cpu specified. */ |
1546 | static int __init rcu_torture_stall_init(void) | 1141 | static int __init rcu_torture_stall_init(void) |
1547 | { | 1142 | { |
1548 | int ret; | ||
1549 | |||
1550 | if (stall_cpu <= 0) | 1143 | if (stall_cpu <= 0) |
1551 | return 0; | 1144 | return 0; |
1552 | stall_task = kthread_run(rcu_torture_stall, NULL, "rcu_torture_stall"); | 1145 | return torture_create_kthread(rcu_torture_stall, NULL, stall_task); |
1553 | if (IS_ERR(stall_task)) { | ||
1554 | ret = PTR_ERR(stall_task); | ||
1555 | stall_task = NULL; | ||
1556 | return ret; | ||
1557 | } | ||
1558 | return 0; | ||
1559 | } | ||
1560 | |||
1561 | /* Clean up after the CPU-stall kthread, if one was spawned. */ | ||
1562 | static void rcu_torture_stall_cleanup(void) | ||
1563 | { | ||
1564 | if (stall_task == NULL) | ||
1565 | return; | ||
1566 | VERBOSE_PRINTK_STRING("Stopping rcu_torture_stall_task."); | ||
1567 | kthread_stop(stall_task); | ||
1568 | stall_task = NULL; | ||
1569 | } | 1146 | } |
1570 | 1147 | ||
1571 | /* Callback function for RCU barrier testing. */ | 1148 | /* Callback function for RCU barrier testing. */ |
@@ -1583,28 +1160,24 @@ static int rcu_torture_barrier_cbs(void *arg) | |||
1583 | struct rcu_head rcu; | 1160 | struct rcu_head rcu; |
1584 | 1161 | ||
1585 | init_rcu_head_on_stack(&rcu); | 1162 | init_rcu_head_on_stack(&rcu); |
1586 | VERBOSE_PRINTK_STRING("rcu_torture_barrier_cbs task started"); | 1163 | VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started"); |
1587 | set_user_nice(current, 19); | 1164 | set_user_nice(current, 19); |
1588 | do { | 1165 | do { |
1589 | wait_event(barrier_cbs_wq[myid], | 1166 | wait_event(barrier_cbs_wq[myid], |
1590 | (newphase = | 1167 | (newphase = |
1591 | ACCESS_ONCE(barrier_phase)) != lastphase || | 1168 | ACCESS_ONCE(barrier_phase)) != lastphase || |
1592 | kthread_should_stop() || | 1169 | torture_must_stop()); |
1593 | fullstop != FULLSTOP_DONTSTOP); | ||
1594 | lastphase = newphase; | 1170 | lastphase = newphase; |
1595 | smp_mb(); /* ensure barrier_phase load before ->call(). */ | 1171 | smp_mb(); /* ensure barrier_phase load before ->call(). */ |
1596 | if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP) | 1172 | if (torture_must_stop()) |
1597 | break; | 1173 | break; |
1598 | cur_ops->call(&rcu, rcu_torture_barrier_cbf); | 1174 | cur_ops->call(&rcu, rcu_torture_barrier_cbf); |
1599 | if (atomic_dec_and_test(&barrier_cbs_count)) | 1175 | if (atomic_dec_and_test(&barrier_cbs_count)) |
1600 | wake_up(&barrier_wq); | 1176 | wake_up(&barrier_wq); |
1601 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); | 1177 | } while (!torture_must_stop()); |
1602 | VERBOSE_PRINTK_STRING("rcu_torture_barrier_cbs task stopping"); | ||
1603 | rcutorture_shutdown_absorb("rcu_torture_barrier_cbs"); | ||
1604 | while (!kthread_should_stop()) | ||
1605 | schedule_timeout_interruptible(1); | ||
1606 | cur_ops->cb_barrier(); | 1178 | cur_ops->cb_barrier(); |
1607 | destroy_rcu_head_on_stack(&rcu); | 1179 | destroy_rcu_head_on_stack(&rcu); |
1180 | torture_kthread_stopping("rcu_torture_barrier_cbs"); | ||
1608 | return 0; | 1181 | return 0; |
1609 | } | 1182 | } |
1610 | 1183 | ||
@@ -1613,7 +1186,7 @@ static int rcu_torture_barrier(void *arg) | |||
1613 | { | 1186 | { |
1614 | int i; | 1187 | int i; |
1615 | 1188 | ||
1616 | VERBOSE_PRINTK_STRING("rcu_torture_barrier task starting"); | 1189 | VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting"); |
1617 | do { | 1190 | do { |
1618 | atomic_set(&barrier_cbs_invoked, 0); | 1191 | atomic_set(&barrier_cbs_invoked, 0); |
1619 | atomic_set(&barrier_cbs_count, n_barrier_cbs); | 1192 | atomic_set(&barrier_cbs_count, n_barrier_cbs); |
@@ -1623,9 +1196,8 @@ static int rcu_torture_barrier(void *arg) | |||
1623 | wake_up(&barrier_cbs_wq[i]); | 1196 | wake_up(&barrier_cbs_wq[i]); |
1624 | wait_event(barrier_wq, | 1197 | wait_event(barrier_wq, |
1625 | atomic_read(&barrier_cbs_count) == 0 || | 1198 | atomic_read(&barrier_cbs_count) == 0 || |
1626 | kthread_should_stop() || | 1199 | torture_must_stop()); |
1627 | fullstop != FULLSTOP_DONTSTOP); | 1200 | if (torture_must_stop()) |
1628 | if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP) | ||
1629 | break; | 1201 | break; |
1630 | n_barrier_attempts++; | 1202 | n_barrier_attempts++; |
1631 | cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ | 1203 | cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */ |
@@ -1635,11 +1207,8 @@ static int rcu_torture_barrier(void *arg) | |||
1635 | } | 1207 | } |
1636 | n_barrier_successes++; | 1208 | n_barrier_successes++; |
1637 | schedule_timeout_interruptible(HZ / 10); | 1209 | schedule_timeout_interruptible(HZ / 10); |
1638 | } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP); | 1210 | } while (!torture_must_stop()); |
1639 | VERBOSE_PRINTK_STRING("rcu_torture_barrier task stopping"); | 1211 | torture_kthread_stopping("rcu_torture_barrier"); |
1640 | rcutorture_shutdown_absorb("rcu_torture_barrier"); | ||
1641 | while (!kthread_should_stop()) | ||
1642 | schedule_timeout_interruptible(1); | ||
1643 | return 0; | 1212 | return 0; |
1644 | } | 1213 | } |
1645 | 1214 | ||
@@ -1672,24 +1241,13 @@ static int rcu_torture_barrier_init(void) | |||
1672 | return -ENOMEM; | 1241 | return -ENOMEM; |
1673 | for (i = 0; i < n_barrier_cbs; i++) { | 1242 | for (i = 0; i < n_barrier_cbs; i++) { |
1674 | init_waitqueue_head(&barrier_cbs_wq[i]); | 1243 | init_waitqueue_head(&barrier_cbs_wq[i]); |
1675 | barrier_cbs_tasks[i] = kthread_run(rcu_torture_barrier_cbs, | 1244 | ret = torture_create_kthread(rcu_torture_barrier_cbs, |
1676 | (void *)(long)i, | 1245 | (void *)(long)i, |
1677 | "rcu_torture_barrier_cbs"); | 1246 | barrier_cbs_tasks[i]); |
1678 | if (IS_ERR(barrier_cbs_tasks[i])) { | 1247 | if (ret) |
1679 | ret = PTR_ERR(barrier_cbs_tasks[i]); | ||
1680 | VERBOSE_PRINTK_ERRSTRING("Failed to create rcu_torture_barrier_cbs"); | ||
1681 | barrier_cbs_tasks[i] = NULL; | ||
1682 | return ret; | 1248 | return ret; |
1683 | } | ||
1684 | } | 1249 | } |
1685 | barrier_task = kthread_run(rcu_torture_barrier, NULL, | 1250 | return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task); |
1686 | "rcu_torture_barrier"); | ||
1687 | if (IS_ERR(barrier_task)) { | ||
1688 | ret = PTR_ERR(barrier_task); | ||
1689 | VERBOSE_PRINTK_ERRSTRING("Failed to create rcu_torture_barrier"); | ||
1690 | barrier_task = NULL; | ||
1691 | } | ||
1692 | return 0; | ||
1693 | } | 1251 | } |
1694 | 1252 | ||
1695 | /* Clean up after RCU barrier testing. */ | 1253 | /* Clean up after RCU barrier testing. */ |
@@ -1697,19 +1255,11 @@ static void rcu_torture_barrier_cleanup(void) | |||
1697 | { | 1255 | { |
1698 | int i; | 1256 | int i; |
1699 | 1257 | ||
1700 | if (barrier_task != NULL) { | 1258 | torture_stop_kthread(rcu_torture_barrier, barrier_task); |
1701 | VERBOSE_PRINTK_STRING("Stopping rcu_torture_barrier task"); | ||
1702 | kthread_stop(barrier_task); | ||
1703 | barrier_task = NULL; | ||
1704 | } | ||
1705 | if (barrier_cbs_tasks != NULL) { | 1259 | if (barrier_cbs_tasks != NULL) { |
1706 | for (i = 0; i < n_barrier_cbs; i++) { | 1260 | for (i = 0; i < n_barrier_cbs; i++) |
1707 | if (barrier_cbs_tasks[i] != NULL) { | 1261 | torture_stop_kthread(rcu_torture_barrier_cbs, |
1708 | VERBOSE_PRINTK_STRING("Stopping rcu_torture_barrier_cbs task"); | 1262 | barrier_cbs_tasks[i]); |
1709 | kthread_stop(barrier_cbs_tasks[i]); | ||
1710 | barrier_cbs_tasks[i] = NULL; | ||
1711 | } | ||
1712 | } | ||
1713 | kfree(barrier_cbs_tasks); | 1263 | kfree(barrier_cbs_tasks); |
1714 | barrier_cbs_tasks = NULL; | 1264 | barrier_cbs_tasks = NULL; |
1715 | } | 1265 | } |
@@ -1747,90 +1297,42 @@ rcu_torture_cleanup(void) | |||
1747 | { | 1297 | { |
1748 | int i; | 1298 | int i; |
1749 | 1299 | ||
1750 | mutex_lock(&fullstop_mutex); | ||
1751 | rcutorture_record_test_transition(); | 1300 | rcutorture_record_test_transition(); |
1752 | if (fullstop == FULLSTOP_SHUTDOWN) { | 1301 | if (torture_cleanup()) { |
1753 | pr_warn(/* but going down anyway, so... */ | ||
1754 | "Concurrent 'rmmod rcutorture' and shutdown illegal!\n"); | ||
1755 | mutex_unlock(&fullstop_mutex); | ||
1756 | schedule_timeout_uninterruptible(10); | ||
1757 | if (cur_ops->cb_barrier != NULL) | 1302 | if (cur_ops->cb_barrier != NULL) |
1758 | cur_ops->cb_barrier(); | 1303 | cur_ops->cb_barrier(); |
1759 | return; | 1304 | return; |
1760 | } | 1305 | } |
1761 | fullstop = FULLSTOP_RMMOD; | ||
1762 | mutex_unlock(&fullstop_mutex); | ||
1763 | unregister_reboot_notifier(&rcutorture_shutdown_nb); | ||
1764 | rcu_torture_barrier_cleanup(); | ||
1765 | rcu_torture_stall_cleanup(); | ||
1766 | if (stutter_task) { | ||
1767 | VERBOSE_PRINTK_STRING("Stopping rcu_torture_stutter task"); | ||
1768 | kthread_stop(stutter_task); | ||
1769 | } | ||
1770 | stutter_task = NULL; | ||
1771 | if (shuffler_task) { | ||
1772 | VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task"); | ||
1773 | kthread_stop(shuffler_task); | ||
1774 | free_cpumask_var(shuffle_tmp_mask); | ||
1775 | } | ||
1776 | shuffler_task = NULL; | ||
1777 | 1306 | ||
1778 | if (writer_task) { | 1307 | rcu_torture_barrier_cleanup(); |
1779 | VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task"); | 1308 | torture_stop_kthread(rcu_torture_stall, stall_task); |
1780 | kthread_stop(writer_task); | 1309 | torture_stop_kthread(rcu_torture_writer, writer_task); |
1781 | } | ||
1782 | writer_task = NULL; | ||
1783 | 1310 | ||
1784 | if (reader_tasks) { | 1311 | if (reader_tasks) { |
1785 | for (i = 0; i < nrealreaders; i++) { | 1312 | for (i = 0; i < nrealreaders; i++) |
1786 | if (reader_tasks[i]) { | 1313 | torture_stop_kthread(rcu_torture_reader, |
1787 | VERBOSE_PRINTK_STRING( | 1314 | reader_tasks[i]); |
1788 | "Stopping rcu_torture_reader task"); | ||
1789 | kthread_stop(reader_tasks[i]); | ||
1790 | } | ||
1791 | reader_tasks[i] = NULL; | ||
1792 | } | ||
1793 | kfree(reader_tasks); | 1315 | kfree(reader_tasks); |
1794 | reader_tasks = NULL; | ||
1795 | } | 1316 | } |
1796 | rcu_torture_current = NULL; | 1317 | rcu_torture_current = NULL; |
1797 | 1318 | ||
1798 | if (fakewriter_tasks) { | 1319 | if (fakewriter_tasks) { |
1799 | for (i = 0; i < nfakewriters; i++) { | 1320 | for (i = 0; i < nfakewriters; i++) { |
1800 | if (fakewriter_tasks[i]) { | 1321 | torture_stop_kthread(rcu_torture_fakewriter, |
1801 | VERBOSE_PRINTK_STRING( | 1322 | fakewriter_tasks[i]); |
1802 | "Stopping rcu_torture_fakewriter task"); | ||
1803 | kthread_stop(fakewriter_tasks[i]); | ||
1804 | } | ||
1805 | fakewriter_tasks[i] = NULL; | ||
1806 | } | 1323 | } |
1807 | kfree(fakewriter_tasks); | 1324 | kfree(fakewriter_tasks); |
1808 | fakewriter_tasks = NULL; | 1325 | fakewriter_tasks = NULL; |
1809 | } | 1326 | } |
1810 | 1327 | ||
1811 | if (stats_task) { | 1328 | torture_stop_kthread(rcu_torture_stats, stats_task); |
1812 | VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task"); | 1329 | torture_stop_kthread(rcu_torture_fqs, fqs_task); |
1813 | kthread_stop(stats_task); | ||
1814 | } | ||
1815 | stats_task = NULL; | ||
1816 | |||
1817 | if (fqs_task) { | ||
1818 | VERBOSE_PRINTK_STRING("Stopping rcu_torture_fqs task"); | ||
1819 | kthread_stop(fqs_task); | ||
1820 | } | ||
1821 | fqs_task = NULL; | ||
1822 | if ((test_boost == 1 && cur_ops->can_boost) || | 1330 | if ((test_boost == 1 && cur_ops->can_boost) || |
1823 | test_boost == 2) { | 1331 | test_boost == 2) { |
1824 | unregister_cpu_notifier(&rcutorture_cpu_nb); | 1332 | unregister_cpu_notifier(&rcutorture_cpu_nb); |
1825 | for_each_possible_cpu(i) | 1333 | for_each_possible_cpu(i) |
1826 | rcutorture_booster_cleanup(i); | 1334 | rcutorture_booster_cleanup(i); |
1827 | } | 1335 | } |
1828 | if (shutdown_task != NULL) { | ||
1829 | VERBOSE_PRINTK_STRING("Stopping rcu_torture_shutdown task"); | ||
1830 | kthread_stop(shutdown_task); | ||
1831 | } | ||
1832 | shutdown_task = NULL; | ||
1833 | rcu_torture_onoff_cleanup(); | ||
1834 | 1336 | ||
1835 | /* Wait for all RCU callbacks to fire. */ | 1337 | /* Wait for all RCU callbacks to fire. */ |
1836 | 1338 | ||
@@ -1841,8 +1343,7 @@ rcu_torture_cleanup(void) | |||
1841 | 1343 | ||
1842 | if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error) | 1344 | if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error) |
1843 | rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); | 1345 | rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); |
1844 | else if (n_online_successes != n_online_attempts || | 1346 | else if (torture_onoff_failures()) |
1845 | n_offline_successes != n_offline_attempts) | ||
1846 | rcu_torture_print_module_parms(cur_ops, | 1347 | rcu_torture_print_module_parms(cur_ops, |
1847 | "End of test: RCU_HOTPLUG"); | 1348 | "End of test: RCU_HOTPLUG"); |
1848 | else | 1349 | else |
@@ -1911,12 +1412,11 @@ rcu_torture_init(void) | |||
1911 | int i; | 1412 | int i; |
1912 | int cpu; | 1413 | int cpu; |
1913 | int firsterr = 0; | 1414 | int firsterr = 0; |
1914 | int retval; | ||
1915 | static struct rcu_torture_ops *torture_ops[] = { | 1415 | static struct rcu_torture_ops *torture_ops[] = { |
1916 | &rcu_ops, &rcu_bh_ops, &srcu_ops, &sched_ops, | 1416 | &rcu_ops, &rcu_bh_ops, &rcu_busted_ops, &srcu_ops, &sched_ops, |
1917 | }; | 1417 | }; |
1918 | 1418 | ||
1919 | mutex_lock(&fullstop_mutex); | 1419 | torture_init_begin(torture_type, verbose, &rcutorture_runnable); |
1920 | 1420 | ||
1921 | /* Process args and tell the world that the torturer is on the job. */ | 1421 | /* Process args and tell the world that the torturer is on the job. */ |
1922 | for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { | 1422 | for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { |
@@ -1931,7 +1431,7 @@ rcu_torture_init(void) | |||
1931 | for (i = 0; i < ARRAY_SIZE(torture_ops); i++) | 1431 | for (i = 0; i < ARRAY_SIZE(torture_ops); i++) |
1932 | pr_alert(" %s", torture_ops[i]->name); | 1432 | pr_alert(" %s", torture_ops[i]->name); |
1933 | pr_alert("\n"); | 1433 | pr_alert("\n"); |
1934 | mutex_unlock(&fullstop_mutex); | 1434 | torture_init_end(); |
1935 | return -EINVAL; | 1435 | return -EINVAL; |
1936 | } | 1436 | } |
1937 | if (cur_ops->fqs == NULL && fqs_duration != 0) { | 1437 | if (cur_ops->fqs == NULL && fqs_duration != 0) { |
@@ -1946,7 +1446,6 @@ rcu_torture_init(void) | |||
1946 | else | 1446 | else |
1947 | nrealreaders = 2 * num_online_cpus(); | 1447 | nrealreaders = 2 * num_online_cpus(); |
1948 | rcu_torture_print_module_parms(cur_ops, "Start of test"); | 1448 | rcu_torture_print_module_parms(cur_ops, "Start of test"); |
1949 | fullstop = FULLSTOP_DONTSTOP; | ||
1950 | 1449 | ||
1951 | /* Set up the freelist. */ | 1450 | /* Set up the freelist. */ |
1952 | 1451 | ||
@@ -1982,108 +1481,61 @@ rcu_torture_init(void) | |||
1982 | 1481 | ||
1983 | /* Start up the kthreads. */ | 1482 | /* Start up the kthreads. */ |
1984 | 1483 | ||
1985 | VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task"); | 1484 | firsterr = torture_create_kthread(rcu_torture_writer, NULL, |
1986 | writer_task = kthread_create(rcu_torture_writer, NULL, | 1485 | writer_task); |
1987 | "rcu_torture_writer"); | 1486 | if (firsterr) |
1988 | if (IS_ERR(writer_task)) { | ||
1989 | firsterr = PTR_ERR(writer_task); | ||
1990 | VERBOSE_PRINTK_ERRSTRING("Failed to create writer"); | ||
1991 | writer_task = NULL; | ||
1992 | goto unwind; | 1487 | goto unwind; |
1993 | } | ||
1994 | wake_up_process(writer_task); | ||
1995 | fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]), | 1488 | fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]), |
1996 | GFP_KERNEL); | 1489 | GFP_KERNEL); |
1997 | if (fakewriter_tasks == NULL) { | 1490 | if (fakewriter_tasks == NULL) { |
1998 | VERBOSE_PRINTK_ERRSTRING("out of memory"); | 1491 | VERBOSE_TOROUT_ERRSTRING("out of memory"); |
1999 | firsterr = -ENOMEM; | 1492 | firsterr = -ENOMEM; |
2000 | goto unwind; | 1493 | goto unwind; |
2001 | } | 1494 | } |
2002 | for (i = 0; i < nfakewriters; i++) { | 1495 | for (i = 0; i < nfakewriters; i++) { |
2003 | VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task"); | 1496 | firsterr = torture_create_kthread(rcu_torture_fakewriter, |
2004 | fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL, | 1497 | NULL, fakewriter_tasks[i]); |
2005 | "rcu_torture_fakewriter"); | 1498 | if (firsterr) |
2006 | if (IS_ERR(fakewriter_tasks[i])) { | ||
2007 | firsterr = PTR_ERR(fakewriter_tasks[i]); | ||
2008 | VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter"); | ||
2009 | fakewriter_tasks[i] = NULL; | ||
2010 | goto unwind; | 1499 | goto unwind; |
2011 | } | ||
2012 | } | 1500 | } |
2013 | reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]), | 1501 | reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]), |
2014 | GFP_KERNEL); | 1502 | GFP_KERNEL); |
2015 | if (reader_tasks == NULL) { | 1503 | if (reader_tasks == NULL) { |
2016 | VERBOSE_PRINTK_ERRSTRING("out of memory"); | 1504 | VERBOSE_TOROUT_ERRSTRING("out of memory"); |
2017 | firsterr = -ENOMEM; | 1505 | firsterr = -ENOMEM; |
2018 | goto unwind; | 1506 | goto unwind; |
2019 | } | 1507 | } |
2020 | for (i = 0; i < nrealreaders; i++) { | 1508 | for (i = 0; i < nrealreaders; i++) { |
2021 | VERBOSE_PRINTK_STRING("Creating rcu_torture_reader task"); | 1509 | firsterr = torture_create_kthread(rcu_torture_reader, NULL, |
2022 | reader_tasks[i] = kthread_run(rcu_torture_reader, NULL, | 1510 | reader_tasks[i]); |
2023 | "rcu_torture_reader"); | 1511 | if (firsterr) |
2024 | if (IS_ERR(reader_tasks[i])) { | ||
2025 | firsterr = PTR_ERR(reader_tasks[i]); | ||
2026 | VERBOSE_PRINTK_ERRSTRING("Failed to create reader"); | ||
2027 | reader_tasks[i] = NULL; | ||
2028 | goto unwind; | 1512 | goto unwind; |
2029 | } | ||
2030 | } | 1513 | } |
2031 | if (stat_interval > 0) { | 1514 | if (stat_interval > 0) { |
2032 | VERBOSE_PRINTK_STRING("Creating rcu_torture_stats task"); | 1515 | firsterr = torture_create_kthread(rcu_torture_stats, NULL, |
2033 | stats_task = kthread_run(rcu_torture_stats, NULL, | 1516 | stats_task); |
2034 | "rcu_torture_stats"); | 1517 | if (firsterr) |
2035 | if (IS_ERR(stats_task)) { | ||
2036 | firsterr = PTR_ERR(stats_task); | ||
2037 | VERBOSE_PRINTK_ERRSTRING("Failed to create stats"); | ||
2038 | stats_task = NULL; | ||
2039 | goto unwind; | 1518 | goto unwind; |
2040 | } | ||
2041 | } | 1519 | } |
2042 | if (test_no_idle_hz) { | 1520 | if (test_no_idle_hz) { |
2043 | rcu_idle_cpu = num_online_cpus() - 1; | 1521 | firsterr = torture_shuffle_init(shuffle_interval * HZ); |
2044 | 1522 | if (firsterr) | |
2045 | if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) { | ||
2046 | firsterr = -ENOMEM; | ||
2047 | VERBOSE_PRINTK_ERRSTRING("Failed to alloc mask"); | ||
2048 | goto unwind; | ||
2049 | } | ||
2050 | |||
2051 | /* Create the shuffler thread */ | ||
2052 | shuffler_task = kthread_run(rcu_torture_shuffle, NULL, | ||
2053 | "rcu_torture_shuffle"); | ||
2054 | if (IS_ERR(shuffler_task)) { | ||
2055 | free_cpumask_var(shuffle_tmp_mask); | ||
2056 | firsterr = PTR_ERR(shuffler_task); | ||
2057 | VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler"); | ||
2058 | shuffler_task = NULL; | ||
2059 | goto unwind; | 1523 | goto unwind; |
2060 | } | ||
2061 | } | 1524 | } |
2062 | if (stutter < 0) | 1525 | if (stutter < 0) |
2063 | stutter = 0; | 1526 | stutter = 0; |
2064 | if (stutter) { | 1527 | if (stutter) { |
2065 | /* Create the stutter thread */ | 1528 | firsterr = torture_stutter_init(stutter * HZ); |
2066 | stutter_task = kthread_run(rcu_torture_stutter, NULL, | 1529 | if (firsterr) |
2067 | "rcu_torture_stutter"); | ||
2068 | if (IS_ERR(stutter_task)) { | ||
2069 | firsterr = PTR_ERR(stutter_task); | ||
2070 | VERBOSE_PRINTK_ERRSTRING("Failed to create stutter"); | ||
2071 | stutter_task = NULL; | ||
2072 | goto unwind; | 1530 | goto unwind; |
2073 | } | ||
2074 | } | 1531 | } |
2075 | if (fqs_duration < 0) | 1532 | if (fqs_duration < 0) |
2076 | fqs_duration = 0; | 1533 | fqs_duration = 0; |
2077 | if (fqs_duration) { | 1534 | if (fqs_duration) { |
2078 | /* Create the stutter thread */ | 1535 | /* Create the fqs thread */ |
2079 | fqs_task = kthread_run(rcu_torture_fqs, NULL, | 1536 | torture_create_kthread(rcu_torture_fqs, NULL, fqs_task); |
2080 | "rcu_torture_fqs"); | 1537 | if (firsterr) |
2081 | if (IS_ERR(fqs_task)) { | ||
2082 | firsterr = PTR_ERR(fqs_task); | ||
2083 | VERBOSE_PRINTK_ERRSTRING("Failed to create fqs"); | ||
2084 | fqs_task = NULL; | ||
2085 | goto unwind; | 1538 | goto unwind; |
2086 | } | ||
2087 | } | 1539 | } |
2088 | if (test_boost_interval < 1) | 1540 | if (test_boost_interval < 1) |
2089 | test_boost_interval = 1; | 1541 | test_boost_interval = 1; |
@@ -2097,49 +1549,31 @@ rcu_torture_init(void) | |||
2097 | for_each_possible_cpu(i) { | 1549 | for_each_possible_cpu(i) { |
2098 | if (cpu_is_offline(i)) | 1550 | if (cpu_is_offline(i)) |
2099 | continue; /* Heuristic: CPU can go offline. */ | 1551 | continue; /* Heuristic: CPU can go offline. */ |
2100 | retval = rcutorture_booster_init(i); | 1552 | firsterr = rcutorture_booster_init(i); |
2101 | if (retval < 0) { | 1553 | if (firsterr) |
2102 | firsterr = retval; | ||
2103 | goto unwind; | 1554 | goto unwind; |
2104 | } | ||
2105 | } | 1555 | } |
2106 | } | 1556 | } |
2107 | if (shutdown_secs > 0) { | 1557 | firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup); |
2108 | shutdown_time = jiffies + shutdown_secs * HZ; | 1558 | if (firsterr) |
2109 | shutdown_task = kthread_create(rcu_torture_shutdown, NULL, | ||
2110 | "rcu_torture_shutdown"); | ||
2111 | if (IS_ERR(shutdown_task)) { | ||
2112 | firsterr = PTR_ERR(shutdown_task); | ||
2113 | VERBOSE_PRINTK_ERRSTRING("Failed to create shutdown"); | ||
2114 | shutdown_task = NULL; | ||
2115 | goto unwind; | ||
2116 | } | ||
2117 | wake_up_process(shutdown_task); | ||
2118 | } | ||
2119 | i = rcu_torture_onoff_init(); | ||
2120 | if (i != 0) { | ||
2121 | firsterr = i; | ||
2122 | goto unwind; | 1559 | goto unwind; |
2123 | } | 1560 | firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval * HZ); |
2124 | register_reboot_notifier(&rcutorture_shutdown_nb); | 1561 | if (firsterr) |
2125 | i = rcu_torture_stall_init(); | ||
2126 | if (i != 0) { | ||
2127 | firsterr = i; | ||
2128 | goto unwind; | 1562 | goto unwind; |
2129 | } | 1563 | firsterr = rcu_torture_stall_init(); |
2130 | retval = rcu_torture_barrier_init(); | 1564 | if (firsterr) |
2131 | if (retval != 0) { | 1565 | goto unwind; |
2132 | firsterr = retval; | 1566 | firsterr = rcu_torture_barrier_init(); |
1567 | if (firsterr) | ||
2133 | goto unwind; | 1568 | goto unwind; |
2134 | } | ||
2135 | if (object_debug) | 1569 | if (object_debug) |
2136 | rcu_test_debug_objects(); | 1570 | rcu_test_debug_objects(); |
2137 | rcutorture_record_test_transition(); | 1571 | rcutorture_record_test_transition(); |
2138 | mutex_unlock(&fullstop_mutex); | 1572 | torture_init_end(); |
2139 | return 0; | 1573 | return 0; |
2140 | 1574 | ||
2141 | unwind: | 1575 | unwind: |
2142 | mutex_unlock(&fullstop_mutex); | 1576 | torture_init_end(); |
2143 | rcu_torture_cleanup(); | 1577 | rcu_torture_cleanup(); |
2144 | return firsterr; | 1578 | return firsterr; |
2145 | } | 1579 | } |