aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-12-31 21:33:22 -0500
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2016-03-31 16:37:38 -0400
commit8704baab9bc848b58c129fed6b591bb84ec02f41 (patch)
tree098899e1e6a460227348ae6012b573df4f38266b
parent291783b8ad77a83a6fdf91d55eee7f1ad72ed4d1 (diff)
rcutorture: Add RCU grace-period performance tests
This commit adds a new rcuperf module that carries out simple performance tests of RCU grace periods. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-rw-r--r--kernel/rcu/Makefile1
-rw-r--r--kernel/rcu/rcuperf.c637
-rw-r--r--lib/Kconfig.debug33
3 files changed, 671 insertions, 0 deletions
diff --git a/kernel/rcu/Makefile b/kernel/rcu/Makefile
index 032b2c015beb..18dfc485225c 100644
--- a/kernel/rcu/Makefile
+++ b/kernel/rcu/Makefile
@@ -5,6 +5,7 @@ KCOV_INSTRUMENT := n
5obj-y += update.o sync.o 5obj-y += update.o sync.o
6obj-$(CONFIG_SRCU) += srcu.o 6obj-$(CONFIG_SRCU) += srcu.o
7obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o 7obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
8obj-$(CONFIG_RCU_PERF_TEST) += rcuperf.o
8obj-$(CONFIG_TREE_RCU) += tree.o 9obj-$(CONFIG_TREE_RCU) += tree.o
9obj-$(CONFIG_PREEMPT_RCU) += tree.o 10obj-$(CONFIG_PREEMPT_RCU) += tree.o
10obj-$(CONFIG_TREE_RCU_TRACE) += tree_trace.o 11obj-$(CONFIG_TREE_RCU_TRACE) += tree_trace.o
diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c
new file mode 100644
index 000000000000..9d54a57bee7d
--- /dev/null
+++ b/kernel/rcu/rcuperf.c
@@ -0,0 +1,637 @@
1/*
2 * Read-Copy Update module-based performance-test facility
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
17 *
18 * Copyright (C) IBM Corporation, 2015
19 *
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21 */
22#include <linux/types.h>
23#include <linux/kernel.h>
24#include <linux/init.h>
25#include <linux/module.h>
26#include <linux/kthread.h>
27#include <linux/err.h>
28#include <linux/spinlock.h>
29#include <linux/smp.h>
30#include <linux/rcupdate.h>
31#include <linux/interrupt.h>
32#include <linux/sched.h>
33#include <linux/atomic.h>
34#include <linux/bitops.h>
35#include <linux/completion.h>
36#include <linux/moduleparam.h>
37#include <linux/percpu.h>
38#include <linux/notifier.h>
39#include <linux/reboot.h>
40#include <linux/freezer.h>
41#include <linux/cpu.h>
42#include <linux/delay.h>
43#include <linux/stat.h>
44#include <linux/srcu.h>
45#include <linux/slab.h>
46#include <asm/byteorder.h>
47#include <linux/torture.h>
48#include <linux/vmalloc.h>
49
50MODULE_LICENSE("GPL");
51MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.vnet.ibm.com>");
52
53#define PERF_FLAG "-perf:"
54#define PERFOUT_STRING(s) \
55 pr_alert("%s" PERF_FLAG s "\n", perf_type)
56#define VERBOSE_PERFOUT_STRING(s) \
57 do { if (verbose) pr_alert("%s" PERF_FLAG " %s\n", perf_type, s); } while (0)
58#define VERBOSE_PERFOUT_ERRSTRING(s) \
59 do { if (verbose) pr_alert("%s" PERF_FLAG "!!! %s\n", perf_type, s); } while (0)
60
61torture_param(bool, gp_exp, true, "Use expedited GP wait primitives");
62torture_param(int, nreaders, -1, "Number of RCU reader threads");
63torture_param(int, nwriters, -1, "Number of RCU updater threads");
64torture_param(bool, shutdown, false, "Shutdown at end of performance tests.");
65torture_param(bool, verbose, true, "Enable verbose debugging printk()s");
66
67static char *perf_type = "rcu";
68module_param(perf_type, charp, 0444);
69MODULE_PARM_DESC(perf_type, "Type of RCU to performance-test (rcu, rcu_bh, ...)");
70
71static int nrealreaders;
72static int nrealwriters;
73static struct task_struct **writer_tasks;
74static struct task_struct **reader_tasks;
75static struct task_struct *shutdown_task;
76
77static u64 **writer_durations;
78static int *writer_n_durations;
79static atomic_t n_rcu_perf_reader_started;
80static atomic_t n_rcu_perf_writer_started;
81static atomic_t n_rcu_perf_writer_finished;
82static wait_queue_head_t shutdown_wq;
83static u64 t_rcu_perf_writer_started;
84static u64 t_rcu_perf_writer_finished;
85static unsigned long b_rcu_perf_writer_started;
86static unsigned long b_rcu_perf_writer_finished;
87
88static int rcu_perf_writer_state;
89#define RTWS_INIT 0
90#define RTWS_EXP_SYNC 1
91#define RTWS_SYNC 2
92#define RTWS_IDLE 2
93#define RTWS_STOPPING 3
94
95#define MAX_MEAS 10000
96#define MIN_MEAS 100
97
98#if defined(MODULE) || defined(CONFIG_RCU_PERF_TEST_RUNNABLE)
99#define RCUPERF_RUNNABLE_INIT 1
100#else
101#define RCUPERF_RUNNABLE_INIT 0
102#endif
103static int perf_runnable = RCUPERF_RUNNABLE_INIT;
104module_param(perf_runnable, int, 0444);
105MODULE_PARM_DESC(perf_runnable, "Start rcuperf at boot");
106
107/*
108 * Operations vector for selecting different types of tests.
109 */
110
111struct rcu_perf_ops {
112 int ptype;
113 void (*init)(void);
114 void (*cleanup)(void);
115 int (*readlock)(void);
116 void (*readunlock)(int idx);
117 unsigned long (*started)(void);
118 unsigned long (*completed)(void);
119 unsigned long (*exp_completed)(void);
120 void (*sync)(void);
121 void (*exp_sync)(void);
122 const char *name;
123};
124
125static struct rcu_perf_ops *cur_ops;
126
127/*
128 * Definitions for rcu perf testing.
129 */
130
131static int rcu_perf_read_lock(void) __acquires(RCU)
132{
133 rcu_read_lock();
134 return 0;
135}
136
137static void rcu_perf_read_unlock(int idx) __releases(RCU)
138{
139 rcu_read_unlock();
140}
141
142static unsigned long __maybe_unused rcu_no_completed(void)
143{
144 return 0;
145}
146
147static void rcu_sync_perf_init(void)
148{
149}
150
151static struct rcu_perf_ops rcu_ops = {
152 .ptype = RCU_FLAVOR,
153 .init = rcu_sync_perf_init,
154 .readlock = rcu_perf_read_lock,
155 .readunlock = rcu_perf_read_unlock,
156 .started = rcu_batches_started,
157 .completed = rcu_batches_completed,
158 .exp_completed = rcu_exp_batches_completed,
159 .sync = synchronize_rcu,
160 .exp_sync = synchronize_rcu_expedited,
161 .name = "rcu"
162};
163
164/*
165 * Definitions for rcu_bh perf testing.
166 */
167
168static int rcu_bh_perf_read_lock(void) __acquires(RCU_BH)
169{
170 rcu_read_lock_bh();
171 return 0;
172}
173
174static void rcu_bh_perf_read_unlock(int idx) __releases(RCU_BH)
175{
176 rcu_read_unlock_bh();
177}
178
179static struct rcu_perf_ops rcu_bh_ops = {
180 .ptype = RCU_BH_FLAVOR,
181 .init = rcu_sync_perf_init,
182 .readlock = rcu_bh_perf_read_lock,
183 .readunlock = rcu_bh_perf_read_unlock,
184 .started = rcu_batches_started_bh,
185 .completed = rcu_batches_completed_bh,
186 .exp_completed = rcu_exp_batches_completed_sched,
187 .sync = synchronize_rcu_bh,
188 .exp_sync = synchronize_rcu_bh_expedited,
189 .name = "rcu_bh"
190};
191
192/*
193 * Definitions for srcu perf testing.
194 */
195
196DEFINE_STATIC_SRCU(srcu_ctl_perf);
197static struct srcu_struct *srcu_ctlp = &srcu_ctl_perf;
198
199static int srcu_perf_read_lock(void) __acquires(srcu_ctlp)
200{
201 return srcu_read_lock(srcu_ctlp);
202}
203
204static void srcu_perf_read_unlock(int idx) __releases(srcu_ctlp)
205{
206 srcu_read_unlock(srcu_ctlp, idx);
207}
208
209static unsigned long srcu_perf_completed(void)
210{
211 return srcu_batches_completed(srcu_ctlp);
212}
213
214static void srcu_perf_synchronize(void)
215{
216 synchronize_srcu(srcu_ctlp);
217}
218
219static void srcu_perf_synchronize_expedited(void)
220{
221 synchronize_srcu_expedited(srcu_ctlp);
222}
223
224static struct rcu_perf_ops srcu_ops = {
225 .ptype = SRCU_FLAVOR,
226 .init = rcu_sync_perf_init,
227 .readlock = srcu_perf_read_lock,
228 .readunlock = srcu_perf_read_unlock,
229 .started = NULL,
230 .completed = srcu_perf_completed,
231 .exp_completed = srcu_perf_completed,
232 .sync = srcu_perf_synchronize,
233 .exp_sync = srcu_perf_synchronize_expedited,
234 .name = "srcu"
235};
236
237/*
238 * Definitions for sched perf testing.
239 */
240
241static int sched_perf_read_lock(void)
242{
243 preempt_disable();
244 return 0;
245}
246
247static void sched_perf_read_unlock(int idx)
248{
249 preempt_enable();
250}
251
252static struct rcu_perf_ops sched_ops = {
253 .ptype = RCU_SCHED_FLAVOR,
254 .init = rcu_sync_perf_init,
255 .readlock = sched_perf_read_lock,
256 .readunlock = sched_perf_read_unlock,
257 .started = rcu_batches_started_sched,
258 .completed = rcu_batches_completed_sched,
259 .exp_completed = rcu_exp_batches_completed_sched,
260 .sync = synchronize_sched,
261 .exp_sync = synchronize_sched_expedited,
262 .name = "sched"
263};
264
265#ifdef CONFIG_TASKS_RCU
266
267/*
268 * Definitions for RCU-tasks perf testing.
269 */
270
271static int tasks_perf_read_lock(void)
272{
273 return 0;
274}
275
276static void tasks_perf_read_unlock(int idx)
277{
278}
279
280static struct rcu_perf_ops tasks_ops = {
281 .ptype = RCU_TASKS_FLAVOR,
282 .init = rcu_sync_perf_init,
283 .readlock = tasks_perf_read_lock,
284 .readunlock = tasks_perf_read_unlock,
285 .started = rcu_no_completed,
286 .completed = rcu_no_completed,
287 .sync = synchronize_rcu_tasks,
288 .exp_sync = synchronize_rcu_tasks,
289 .name = "tasks"
290};
291
292#define RCUPERF_TASKS_OPS &tasks_ops,
293
294static bool __maybe_unused torturing_tasks(void)
295{
296 return cur_ops == &tasks_ops;
297}
298
299#else /* #ifdef CONFIG_TASKS_RCU */
300
301#define RCUPERF_TASKS_OPS
302
303static bool __maybe_unused torturing_tasks(void)
304{
305 return false;
306}
307
308#endif /* #else #ifdef CONFIG_TASKS_RCU */
309
310/*
311 * If performance tests complete, wait for shutdown to commence.
312 */
313static void rcu_perf_wait_shutdown(void)
314{
315 cond_resched_rcu_qs();
316 if (atomic_read(&n_rcu_perf_writer_finished) < nrealwriters)
317 return;
318 while (!torture_must_stop())
319 schedule_timeout_uninterruptible(1);
320}
321
322/*
323 * RCU perf reader kthread. Repeatedly does empty RCU read-side
324 * critical section, minimizing update-side interference.
325 */
326static int
327rcu_perf_reader(void *arg)
328{
329 unsigned long flags;
330 int idx;
331
332 VERBOSE_PERFOUT_STRING("rcu_perf_reader task started");
333 set_user_nice(current, MAX_NICE);
334 atomic_inc(&n_rcu_perf_reader_started);
335
336 do {
337 local_irq_save(flags);
338 idx = cur_ops->readlock();
339 cur_ops->readunlock(idx);
340 local_irq_restore(flags);
341 rcu_perf_wait_shutdown();
342 } while (!torture_must_stop());
343 torture_kthread_stopping("rcu_perf_reader");
344 return 0;
345}
346
347/*
348 * RCU perf writer kthread. Repeatedly does a grace period.
349 */
350static int
351rcu_perf_writer(void *arg)
352{
353 int i = 0;
354 int i_max;
355 long me = (long)arg;
356 bool started = false, done = false, alldone = false;
357 u64 t;
358 u64 *wdp;
359 u64 *wdpp = writer_durations[me];
360
361 VERBOSE_PERFOUT_STRING("rcu_perf_writer task started");
362 WARN_ON(rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp);
363 WARN_ON(rcu_gp_is_normal() && gp_exp);
364 WARN_ON(!wdpp);
365 t = ktime_get_mono_fast_ns();
366 if (atomic_inc_return(&n_rcu_perf_writer_started) >= nrealwriters) {
367 t_rcu_perf_writer_started = t;
368 if (gp_exp) {
369 b_rcu_perf_writer_started =
370 cur_ops->exp_completed() / 2;
371 } else {
372 b_rcu_perf_writer_started =
373 cur_ops->completed();
374 }
375 }
376
377 do {
378 wdp = &wdpp[i];
379 *wdp = ktime_get_mono_fast_ns();
380 if (gp_exp) {
381 rcu_perf_writer_state = RTWS_EXP_SYNC;
382 cur_ops->exp_sync();
383 } else {
384 rcu_perf_writer_state = RTWS_SYNC;
385 cur_ops->sync();
386 }
387 rcu_perf_writer_state = RTWS_IDLE;
388 t = ktime_get_mono_fast_ns();
389 *wdp = t - *wdp;
390 i_max = i;
391 if (!started &&
392 atomic_read(&n_rcu_perf_writer_started) >= nrealwriters)
393 started = true;
394 if (!done && i >= MIN_MEAS) {
395 done = true;
396 pr_alert("%s" PERF_FLAG
397 "rcu_perf_writer %ld has %d measurements\n",
398 perf_type, me, MIN_MEAS);
399 if (atomic_inc_return(&n_rcu_perf_writer_finished) >=
400 nrealwriters) {
401 PERFOUT_STRING("Test complete");
402 t_rcu_perf_writer_finished = t;
403 if (gp_exp) {
404 b_rcu_perf_writer_finished =
405 cur_ops->exp_completed() / 2;
406 } else {
407 b_rcu_perf_writer_finished =
408 cur_ops->completed();
409 }
410 smp_mb(); /* Assign before wake. */
411 wake_up(&shutdown_wq);
412 }
413 }
414 if (done && !alldone &&
415 atomic_read(&n_rcu_perf_writer_finished) >= nrealwriters)
416 alldone = true;
417 if (started && !alldone && i < MAX_MEAS - 1)
418 i++;
419 rcu_perf_wait_shutdown();
420 } while (!torture_must_stop());
421 rcu_perf_writer_state = RTWS_STOPPING;
422 writer_n_durations[me] = i_max;
423 torture_kthread_stopping("rcu_perf_writer");
424 return 0;
425}
426
427static inline void
428rcu_perf_print_module_parms(struct rcu_perf_ops *cur_ops, const char *tag)
429{
430 pr_alert("%s" PERF_FLAG
431 "--- %s: nreaders=%d nwriters=%d verbose=%d shutdown=%d\n",
432 perf_type, tag, nrealreaders, nrealwriters, verbose, shutdown);
433}
434
435static void
436rcu_perf_cleanup(void)
437{
438 int i;
439 int j;
440 int ngps = 0;
441 u64 *wdp;
442 u64 *wdpp;
443
444 if (torture_cleanup_begin())
445 return;
446
447 if (reader_tasks) {
448 for (i = 0; i < nrealreaders; i++)
449 torture_stop_kthread(rcu_perf_reader,
450 reader_tasks[i]);
451 kfree(reader_tasks);
452 }
453
454 if (writer_tasks) {
455 for (i = 0; i < nrealwriters; i++) {
456 torture_stop_kthread(rcu_perf_writer,
457 writer_tasks[i]);
458 if (!writer_n_durations)
459 continue;
460 j = writer_n_durations[i];
461 pr_alert("%s%s writer %d gps: %d\n",
462 perf_type, PERF_FLAG, i, j);
463 ngps += j;
464 }
465 pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n",
466 perf_type, PERF_FLAG,
467 t_rcu_perf_writer_started, t_rcu_perf_writer_finished,
468 t_rcu_perf_writer_finished -
469 t_rcu_perf_writer_started,
470 ngps,
471 b_rcu_perf_writer_finished -
472 b_rcu_perf_writer_started);
473 for (i = 0; i < nrealwriters; i++) {
474 if (!writer_durations)
475 break;
476 if (!writer_n_durations)
477 continue;
478 wdpp = writer_durations[i];
479 if (!wdpp)
480 continue;
481 for (j = 0; j <= writer_n_durations[i]; j++) {
482 wdp = &wdpp[j];
483 pr_alert("%s%s %4d writer-duration: %5d %llu\n",
484 perf_type, PERF_FLAG,
485 i, j, *wdp);
486 if (j % 100 == 0)
487 schedule_timeout_uninterruptible(1);
488 }
489 kfree(writer_durations[i]);
490 }
491 kfree(writer_tasks);
492 kfree(writer_durations);
493 kfree(writer_n_durations);
494 }
495
496 /* Do flavor-specific cleanup operations. */
497 if (cur_ops->cleanup != NULL)
498 cur_ops->cleanup();
499
500 torture_cleanup_end();
501}
502
503/*
504 * Return the number if non-negative. If -1, the number of CPUs.
505 * If less than -1, that much less than the number of CPUs, but
506 * at least one.
507 */
508static int compute_real(int n)
509{
510 int nr;
511
512 if (n >= 0) {
513 nr = n;
514 } else {
515 nr = num_online_cpus() + 1 + n;
516 if (nr <= 0)
517 nr = 1;
518 }
519 return nr;
520}
521
522/*
523 * RCU perf shutdown kthread. Just waits to be awakened, then shuts
524 * down system.
525 */
526static int
527rcu_perf_shutdown(void *arg)
528{
529 do {
530 wait_event(shutdown_wq,
531 atomic_read(&n_rcu_perf_writer_finished) >=
532 nrealwriters);
533 } while (atomic_read(&n_rcu_perf_writer_finished) < nrealwriters);
534 smp_mb(); /* Wake before output. */
535 rcu_perf_cleanup();
536 kernel_power_off();
537 return -EINVAL;
538}
539
540static int __init
541rcu_perf_init(void)
542{
543 long i;
544 int firsterr = 0;
545 static struct rcu_perf_ops *perf_ops[] = {
546 &rcu_ops, &rcu_bh_ops, &srcu_ops, &sched_ops,
547 RCUPERF_TASKS_OPS
548 };
549
550 if (!torture_init_begin(perf_type, verbose, &perf_runnable))
551 return -EBUSY;
552
553 /* Process args and tell the world that the perf'er is on the job. */
554 for (i = 0; i < ARRAY_SIZE(perf_ops); i++) {
555 cur_ops = perf_ops[i];
556 if (strcmp(perf_type, cur_ops->name) == 0)
557 break;
558 }
559 if (i == ARRAY_SIZE(perf_ops)) {
560 pr_alert("rcu-perf: invalid perf type: \"%s\"\n",
561 perf_type);
562 pr_alert("rcu-perf types:");
563 for (i = 0; i < ARRAY_SIZE(perf_ops); i++)
564 pr_alert(" %s", perf_ops[i]->name);
565 pr_alert("\n");
566 firsterr = -EINVAL;
567 goto unwind;
568 }
569 if (cur_ops->init)
570 cur_ops->init();
571
572 nrealwriters = compute_real(nwriters);
573 nrealreaders = compute_real(nreaders);
574 atomic_set(&n_rcu_perf_reader_started, 0);
575 atomic_set(&n_rcu_perf_writer_started, 0);
576 atomic_set(&n_rcu_perf_writer_finished, 0);
577 rcu_perf_print_module_parms(cur_ops, "Start of test");
578
579 /* Start up the kthreads. */
580
581 if (shutdown) {
582 init_waitqueue_head(&shutdown_wq);
583 firsterr = torture_create_kthread(rcu_perf_shutdown, NULL,
584 shutdown_task);
585 if (firsterr)
586 goto unwind;
587 schedule_timeout_uninterruptible(1);
588 }
589 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
590 GFP_KERNEL);
591 if (reader_tasks == NULL) {
592 VERBOSE_PERFOUT_ERRSTRING("out of memory");
593 firsterr = -ENOMEM;
594 goto unwind;
595 }
596 for (i = 0; i < nrealreaders; i++) {
597 firsterr = torture_create_kthread(rcu_perf_reader, NULL,
598 reader_tasks[i]);
599 if (firsterr)
600 goto unwind;
601 }
602 while (atomic_read(&n_rcu_perf_reader_started) < nrealreaders)
603 schedule_timeout_uninterruptible(1);
604 writer_tasks = kcalloc(nrealwriters, sizeof(reader_tasks[0]),
605 GFP_KERNEL);
606 writer_durations = kcalloc(nrealwriters, sizeof(*writer_durations),
607 GFP_KERNEL);
608 writer_n_durations =
609 kcalloc(nrealwriters, sizeof(*writer_n_durations),
610 GFP_KERNEL);
611 if (!writer_tasks || !writer_durations || !writer_n_durations) {
612 VERBOSE_PERFOUT_ERRSTRING("out of memory");
613 firsterr = -ENOMEM;
614 goto unwind;
615 }
616 for (i = 0; i < nrealwriters; i++) {
617 writer_durations[i] =
618 kcalloc(MAX_MEAS, sizeof(*writer_durations[i]),
619 GFP_KERNEL);
620 if (!writer_durations[i])
621 goto unwind;
622 firsterr = torture_create_kthread(rcu_perf_writer, (void *)i,
623 writer_tasks[i]);
624 if (firsterr)
625 goto unwind;
626 }
627 torture_init_end();
628 return 0;
629
630unwind:
631 torture_init_end();
632 rcu_perf_cleanup();
633 return firsterr;
634}
635
636module_init(rcu_perf_init);
637module_exit(rcu_perf_cleanup);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 1e9a607534ca..f4b797a690ba 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1289,6 +1289,39 @@ config TORTURE_TEST
1289 tristate 1289 tristate
1290 default n 1290 default n
1291 1291
1292config RCU_PERF_TEST
1293 tristate "performance tests for RCU"
1294 depends on DEBUG_KERNEL
1295 select TORTURE_TEST
1296 select SRCU
1297 select TASKS_RCU
1298 default n
1299 help
1300 This option provides a kernel module that runs performance
1301 tests on the RCU infrastructure. The kernel module may be built
1302 after the fact on the running kernel to be tested, if desired.
1303
1304 Say Y here if you want RCU performance tests to be built into
1305 the kernel.
1306 Say M if you want the RCU performance tests to build as a module.
1307 Say N if you are unsure.
1308
1309config RCU_PERF_TEST_RUNNABLE
1310 bool "performance tests for RCU runnable by default"
1311 depends on RCU_PERF_TEST = y
1312 default n
1313 help
1314 This option provides a way to build the RCU performance tests
1315 directly into the kernel without them starting up at boot time.
1316 You can use /sys/module to manually override this setting.
1317 This /proc file is available only when the RCU performance
1318 tests have been built into the kernel.
1319
1320 Say Y here if you want the RCU performance tests to start during
1321 boot (you probably don't).
1322 Say N here if you want the RCU performance tests to start only
1323 after being manually enabled via /sys/module.
1324
1292config RCU_TORTURE_TEST 1325config RCU_TORTURE_TEST
1293 tristate "torture tests for RCU" 1326 tristate "torture tests for RCU"
1294 depends on DEBUG_KERNEL 1327 depends on DEBUG_KERNEL