aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2012-07-16 06:42:35 -0400
committerThomas Gleixner <tglx@linutronix.de>2012-08-13 11:01:06 -0400
commit5d01bbd111d6ff9ea9d9847774f66dff39633776 (patch)
tree77cbce5ff479b3e70f95a29cc88ce50e025f9078 /kernel/rcutree.c
parent3bf671af14d591ede9251acb0085e8017f3705e7 (diff)
rcu: Yield simpler
The rcu_yield() code is amazing. It's there to avoid starvation of the system when lots of (boosting) work is to be done. Now looking at the code it's functionality is: Make the thread SCHED_OTHER and very nice, i.e. get it out of the way Arm a timer with 2 ticks schedule() Now if the system goes idle the rcu task returns, regains SCHED_FIFO and plugs on. If the systems stays busy the timer fires and wakes a per node kthread which in turn makes the per cpu thread SCHED_FIFO and brings it back on the cpu. For the boosting thread the "make it FIFO" bit is missing and it just runs some magic boost checks. Now this is a lot of code with extra threads and complexity. It's way simpler to let the tasks when they detect overload schedule away for 2 ticks and defer the normal wakeup as long as they are in yielded state and the cpu is not idle. That solves the same problem and the only difference is that when the cpu goes idle it's not guaranteed that the thread returns right away, but it won't be longer out than two ticks, so no harm is done. If that's an issue than it is way simpler just to wake the task from idle as RCU has callbacks there anyway. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Namhyung Kim <namhyung@kernel.org> Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Link: http://lkml.kernel.org/r/20120716103948.131256723@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index f280e542e3e9..f08ee3bc5741 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -139,7 +139,7 @@ DEFINE_PER_CPU(char, rcu_cpu_has_work);
139 139
140#endif /* #ifdef CONFIG_RCU_BOOST */ 140#endif /* #ifdef CONFIG_RCU_BOOST */
141 141
142static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); 142static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
143static void invoke_rcu_core(void); 143static void invoke_rcu_core(void);
144static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); 144static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
145 145
@@ -1469,7 +1469,7 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
1469 1469
1470 /* Adjust any no-longer-needed kthreads. */ 1470 /* Adjust any no-longer-needed kthreads. */
1471 rcu_stop_cpu_kthread(cpu); 1471 rcu_stop_cpu_kthread(cpu);
1472 rcu_node_kthread_setaffinity(rnp, -1); 1472 rcu_boost_kthread_setaffinity(rnp, -1);
1473 1473
1474 /* Remove the dead CPU from the bitmasks in the rcu_node hierarchy. */ 1474 /* Remove the dead CPU from the bitmasks in the rcu_node hierarchy. */
1475 1475
@@ -2594,11 +2594,11 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
2594 break; 2594 break;
2595 case CPU_ONLINE: 2595 case CPU_ONLINE:
2596 case CPU_DOWN_FAILED: 2596 case CPU_DOWN_FAILED:
2597 rcu_node_kthread_setaffinity(rnp, -1); 2597 rcu_boost_kthread_setaffinity(rnp, -1);
2598 rcu_cpu_kthread_setrt(cpu, 1); 2598 rcu_cpu_kthread_setrt(cpu, 1);
2599 break; 2599 break;
2600 case CPU_DOWN_PREPARE: 2600 case CPU_DOWN_PREPARE:
2601 rcu_node_kthread_setaffinity(rnp, cpu); 2601 rcu_boost_kthread_setaffinity(rnp, cpu);
2602 rcu_cpu_kthread_setrt(cpu, 0); 2602 rcu_cpu_kthread_setrt(cpu, 0);
2603 break; 2603 break;
2604 case CPU_DYING: 2604 case CPU_DYING: