aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutorture.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2013-01-10 19:21:07 -0500
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2013-01-29 01:19:54 -0500
commit0e11c8e8a60f8591556d142c2e1e53eaf86ab528 (patch)
tree98a824aba939ebd38178e60a3ba4eb97a29fb97c /kernel/rcutorture.c
parent7e8b1e78ea028cbd32337e2aea574a8466c796bb (diff)
rcu: Make rcutorture's shuffler task shuffle recently added tasks
A number of kthreads have been added to rcutorture, but the shuffler task was not informed of them, and thus did not shuffle them. This commit therefore adds the requisite shuffling, and, while in the area fixes up some whitespace issues. However, the shuffling is intended to keep randomly selected CPUs idle, which means that the RCU priority boosting kthreads need to avoid waking up every jiffy. This commit also makes that fix. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutorture.c')
-rw-r--r--kernel/rcutorture.c24
1 files changed, 20 insertions, 4 deletions
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index a583f1ce713d..3ebc8bfb5525 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -846,7 +846,7 @@ static int rcu_torture_boost(void *arg)
846 /* Wait for the next test interval. */ 846 /* Wait for the next test interval. */
847 oldstarttime = boost_starttime; 847 oldstarttime = boost_starttime;
848 while (ULONG_CMP_LT(jiffies, oldstarttime)) { 848 while (ULONG_CMP_LT(jiffies, oldstarttime)) {
849 schedule_timeout_uninterruptible(1); 849 schedule_timeout_interruptible(oldstarttime - jiffies);
850 rcu_stutter_wait("rcu_torture_boost"); 850 rcu_stutter_wait("rcu_torture_boost");
851 if (kthread_should_stop() || 851 if (kthread_should_stop() ||
852 fullstop != FULLSTOP_DONTSTOP) 852 fullstop != FULLSTOP_DONTSTOP)
@@ -1318,19 +1318,35 @@ static void rcu_torture_shuffle_tasks(void)
1318 set_cpus_allowed_ptr(reader_tasks[i], 1318 set_cpus_allowed_ptr(reader_tasks[i],
1319 shuffle_tmp_mask); 1319 shuffle_tmp_mask);
1320 } 1320 }
1321
1322 if (fakewriter_tasks) { 1321 if (fakewriter_tasks) {
1323 for (i = 0; i < nfakewriters; i++) 1322 for (i = 0; i < nfakewriters; i++)
1324 if (fakewriter_tasks[i]) 1323 if (fakewriter_tasks[i])
1325 set_cpus_allowed_ptr(fakewriter_tasks[i], 1324 set_cpus_allowed_ptr(fakewriter_tasks[i],
1326 shuffle_tmp_mask); 1325 shuffle_tmp_mask);
1327 } 1326 }
1328
1329 if (writer_task) 1327 if (writer_task)
1330 set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask); 1328 set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask);
1331
1332 if (stats_task) 1329 if (stats_task)
1333 set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask); 1330 set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask);
1331 if (stutter_task)
1332 set_cpus_allowed_ptr(stutter_task, shuffle_tmp_mask);
1333 if (fqs_task)
1334 set_cpus_allowed_ptr(fqs_task, shuffle_tmp_mask);
1335 if (shutdown_task)
1336 set_cpus_allowed_ptr(shutdown_task, shuffle_tmp_mask);
1337#ifdef CONFIG_HOTPLUG_CPU
1338 if (onoff_task)
1339 set_cpus_allowed_ptr(onoff_task, shuffle_tmp_mask);
1340#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1341 if (stall_task)
1342 set_cpus_allowed_ptr(stall_task, shuffle_tmp_mask);
1343 if (barrier_cbs_tasks)
1344 for (i = 0; i < n_barrier_cbs; i++)
1345 if (barrier_cbs_tasks[i])
1346 set_cpus_allowed_ptr(barrier_cbs_tasks[i],
1347 shuffle_tmp_mask);
1348 if (barrier_task)
1349 set_cpus_allowed_ptr(barrier_task, shuffle_tmp_mask);
1334 1350
1335 if (rcu_idle_cpu == -1) 1351 if (rcu_idle_cpu == -1)
1336 rcu_idle_cpu = num_online_cpus() - 1; 1352 rcu_idle_cpu = num_online_cpus() - 1;