aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree_plugin.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-06-22 20:06:26 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-09-23 10:41:54 -0400
commit4cdfc175c25c89eedc08460b5e6239c2ec67fcb6 (patch)
tree716acd208cb0633cdd19fc0cd5ad601906cceb24 /kernel/rcutree_plugin.h
parentb402b73b3afe3614bc0e921ebe18013ea103115a (diff)
rcu: Move quiescent-state forcing into kthread
As the first step towards allowing quiescent-state forcing to be preemptible, this commit moves RCU quiescent-state forcing into the same kthread that is now used to initialize and clean up after grace periods. This is yet another step towards keeping scheduling latency down to a dull roar. Updated to change from raw_spin_lock_irqsave() to raw_spin_lock_irq() and to remove the now-unused rcu_state structure fields as suggested by Peter Zijlstra. Reported-by: Mike Galbraith <mgalbraith@suse.de> Reported-by: Dimitri Sivanich <sivanich@sgi.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r--kernel/rcutree_plugin.h8
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 587963689328..eb8dcd1bc4b5 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -119,7 +119,7 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed);
119 */ 119 */
120void rcu_force_quiescent_state(void) 120void rcu_force_quiescent_state(void)
121{ 121{
122 force_quiescent_state(&rcu_preempt_state, 0); 122 force_quiescent_state(&rcu_preempt_state);
123} 123}
124EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); 124EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
125 125
@@ -2076,16 +2076,16 @@ static void rcu_prepare_for_idle(int cpu)
2076#ifdef CONFIG_TREE_PREEMPT_RCU 2076#ifdef CONFIG_TREE_PREEMPT_RCU
2077 if (per_cpu(rcu_preempt_data, cpu).nxtlist) { 2077 if (per_cpu(rcu_preempt_data, cpu).nxtlist) {
2078 rcu_preempt_qs(cpu); 2078 rcu_preempt_qs(cpu);
2079 force_quiescent_state(&rcu_preempt_state, 0); 2079 force_quiescent_state(&rcu_preempt_state);
2080 } 2080 }
2081#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 2081#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2082 if (per_cpu(rcu_sched_data, cpu).nxtlist) { 2082 if (per_cpu(rcu_sched_data, cpu).nxtlist) {
2083 rcu_sched_qs(cpu); 2083 rcu_sched_qs(cpu);
2084 force_quiescent_state(&rcu_sched_state, 0); 2084 force_quiescent_state(&rcu_sched_state);
2085 } 2085 }
2086 if (per_cpu(rcu_bh_data, cpu).nxtlist) { 2086 if (per_cpu(rcu_bh_data, cpu).nxtlist) {
2087 rcu_bh_qs(cpu); 2087 rcu_bh_qs(cpu);
2088 force_quiescent_state(&rcu_bh_state, 0); 2088 force_quiescent_state(&rcu_bh_state);
2089 } 2089 }
2090 2090
2091 /* 2091 /*