aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2016-01-30 23:56:38 -0500
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2016-03-31 16:38:58 -0400
commitdf37e66bfdbb57e8cae7dbf39a0c66b1b8701338 (patch)
tree76c3d3915b98d79870a54593530aa029f77a976e /kernel/rcu
parent2b03d038457fc8d694d34981cb0a2f1702ba35d6 (diff)
rcutorture: Add rcuperf holdoff boot parameter to reduce interference
Boot-time activity can legitimately grab CPUs for extended time periods, so the commit adds a boot parameter to delay the start of the performance test until boot has completed. Defaults to 10 seconds. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/rcuperf.c5
1 files changed, 5 insertions, 0 deletions
diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c
index 12561f96f0a2..278600143bb6 100644
--- a/kernel/rcu/rcuperf.c
+++ b/kernel/rcu/rcuperf.c
@@ -59,6 +59,7 @@ MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.vnet.ibm.com>");
59 do { if (verbose) pr_alert("%s" PERF_FLAG "!!! %s\n", perf_type, s); } while (0) 59 do { if (verbose) pr_alert("%s" PERF_FLAG "!!! %s\n", perf_type, s); } while (0)
60 60
61torture_param(bool, gp_exp, true, "Use expedited GP wait primitives"); 61torture_param(bool, gp_exp, true, "Use expedited GP wait primitives");
62torture_param(int, holdoff, 10, "Holdoff time before test start (s)");
62torture_param(int, nreaders, -1, "Number of RCU reader threads"); 63torture_param(int, nreaders, -1, "Number of RCU reader threads");
63torture_param(int, nwriters, -1, "Number of RCU updater threads"); 64torture_param(int, nwriters, -1, "Number of RCU updater threads");
64torture_param(bool, shutdown, false, "Shutdown at end of performance tests."); 65torture_param(bool, shutdown, false, "Shutdown at end of performance tests.");
@@ -368,6 +369,10 @@ rcu_perf_writer(void *arg)
368 set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids)); 369 set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
369 sp.sched_priority = 1; 370 sp.sched_priority = 1;
370 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); 371 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
372
373 if (holdoff)
374 schedule_timeout_uninterruptible(holdoff * HZ);
375
371 t = ktime_get_mono_fast_ns(); 376 t = ktime_get_mono_fast_ns();
372 if (atomic_inc_return(&n_rcu_perf_writer_started) >= nrealwriters) { 377 if (atomic_inc_return(&n_rcu_perf_writer_started) >= nrealwriters) {
373 t_rcu_perf_writer_started = t; 378 t_rcu_perf_writer_started = t;