aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2011-04-18 18:31:26 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-05-06 02:16:55 -0400
commite3995a25fa361ce987a7d0ade00b17e3151519d7 (patch)
treeccf8a6ae71bc5cb289080b04f00170a758e33c59 /kernel
parent0f962a5e7277c34987b77dc82fc9aefcedc95e27 (diff)
rcu: put per-CPU kthread at non-RT priority during CPU hotplug operations
If you are doing CPU hotplug operations, it is best not to have CPU-bound realtime tasks running CPU-bound on the outgoing CPU. So this commit makes per-CPU kthreads run at non-realtime priority during that time. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcutree.c26
1 files changed, 26 insertions, 0 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 51eef4193e7c..198e4df7d83e 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1464,6 +1464,30 @@ static void invoke_rcu_node_kthread(struct rcu_node *rnp)
1464} 1464}
1465 1465
1466/* 1466/*
1467 * Set the specified CPU's kthread to run RT or not, as specified by
1468 * the to_rt argument. The CPU-hotplug locks are held, so the task
1469 * is not going away.
1470 */
1471static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1472{
1473 int policy;
1474 struct sched_param sp;
1475 struct task_struct *t;
1476
1477 t = per_cpu(rcu_cpu_kthread_task, cpu);
1478 if (t == NULL)
1479 return;
1480 if (to_rt) {
1481 policy = SCHED_FIFO;
1482 sp.sched_priority = RCU_KTHREAD_PRIO;
1483 } else {
1484 policy = SCHED_NORMAL;
1485 sp.sched_priority = 0;
1486 }
1487 sched_setscheduler_nocheck(t, policy, &sp);
1488}
1489
1490/*
1467 * Timer handler to initiate the waking up of per-CPU kthreads that 1491 * Timer handler to initiate the waking up of per-CPU kthreads that
1468 * have yielded the CPU due to excess numbers of RCU callbacks. 1492 * have yielded the CPU due to excess numbers of RCU callbacks.
1469 * We wake up the per-rcu_node kthread, which in turn will wake up 1493 * We wake up the per-rcu_node kthread, which in turn will wake up
@@ -2166,9 +2190,11 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
2166 case CPU_ONLINE: 2190 case CPU_ONLINE:
2167 case CPU_DOWN_FAILED: 2191 case CPU_DOWN_FAILED:
2168 rcu_node_kthread_setaffinity(rnp, -1); 2192 rcu_node_kthread_setaffinity(rnp, -1);
2193 rcu_cpu_kthread_setrt(cpu, 1);
2169 break; 2194 break;
2170 case CPU_DOWN_PREPARE: 2195 case CPU_DOWN_PREPARE:
2171 rcu_node_kthread_setaffinity(rnp, cpu); 2196 rcu_node_kthread_setaffinity(rnp, cpu);
2197 rcu_cpu_kthread_setrt(cpu, 0);
2172 break; 2198 break;
2173 case CPU_DYING: 2199 case CPU_DYING:
2174 case CPU_DYING_FROZEN: 2200 case CPU_DYING_FROZEN: