aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutiny.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2011-06-18 12:55:39 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-09-29 00:38:20 -0400
commit965a002b4f1a458c5dcb334ec29f48a0046faa25 (patch)
tree9aa3847fd44b322a73631758e7337632e5e3a32d /kernel/rcutiny.c
parent385680a9487d2f85382ad6d74e2a15837e47bfd9 (diff)
rcu: Make TINY_RCU also use softirq for RCU_BOOST=n
This patch #ifdefs TINY_RCU kthreads out of the kernel unless RCU_BOOST=y, thus eliminating context-switch overhead if RCU priority boosting has not been configured. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutiny.c')
-rw-r--r--kernel/rcutiny.c74
1 files changed, 10 insertions, 64 deletions
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
index 1c37bdd464f1..c9321d86999b 100644
--- a/kernel/rcutiny.c
+++ b/kernel/rcutiny.c
@@ -43,16 +43,11 @@
43 43
44#include "rcu.h" 44#include "rcu.h"
45 45
46/* Controls for rcu_kthread() kthread, replacing RCU_SOFTIRQ used previously. */
47static struct task_struct *rcu_kthread_task;
48static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq);
49static unsigned long have_rcu_kthread_work;
50
51/* Forward declarations for rcutiny_plugin.h. */ 46/* Forward declarations for rcutiny_plugin.h. */
52struct rcu_ctrlblk; 47struct rcu_ctrlblk;
53static void invoke_rcu_kthread(void); 48static void invoke_rcu_callbacks(void);
54static void rcu_process_callbacks(struct rcu_ctrlblk *rcp); 49static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
55static int rcu_kthread(void *arg); 50static void rcu_process_callbacks(struct softirq_action *unused);
56static void __call_rcu(struct rcu_head *head, 51static void __call_rcu(struct rcu_head *head,
57 void (*func)(struct rcu_head *rcu), 52 void (*func)(struct rcu_head *rcu),
58 struct rcu_ctrlblk *rcp); 53 struct rcu_ctrlblk *rcp);
@@ -102,16 +97,6 @@ static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
102} 97}
103 98
104/* 99/*
105 * Wake up rcu_kthread() to process callbacks now eligible for invocation
106 * or to boost readers.
107 */
108static void invoke_rcu_kthread(void)
109{
110 have_rcu_kthread_work = 1;
111 wake_up(&rcu_kthread_wq);
112}
113
114/*
115 * Record an rcu quiescent state. And an rcu_bh quiescent state while we 100 * Record an rcu quiescent state. And an rcu_bh quiescent state while we
116 * are at it, given that any rcu quiescent state is also an rcu_bh 101 * are at it, given that any rcu quiescent state is also an rcu_bh
117 * quiescent state. Use "+" instead of "||" to defeat short circuiting. 102 * quiescent state. Use "+" instead of "||" to defeat short circuiting.
@@ -123,7 +108,7 @@ void rcu_sched_qs(int cpu)
123 local_irq_save(flags); 108 local_irq_save(flags);
124 if (rcu_qsctr_help(&rcu_sched_ctrlblk) + 109 if (rcu_qsctr_help(&rcu_sched_ctrlblk) +
125 rcu_qsctr_help(&rcu_bh_ctrlblk)) 110 rcu_qsctr_help(&rcu_bh_ctrlblk))
126 invoke_rcu_kthread(); 111 invoke_rcu_callbacks();
127 local_irq_restore(flags); 112 local_irq_restore(flags);
128} 113}
129 114
@@ -136,7 +121,7 @@ void rcu_bh_qs(int cpu)
136 121
137 local_irq_save(flags); 122 local_irq_save(flags);
138 if (rcu_qsctr_help(&rcu_bh_ctrlblk)) 123 if (rcu_qsctr_help(&rcu_bh_ctrlblk))
139 invoke_rcu_kthread(); 124 invoke_rcu_callbacks();
140 local_irq_restore(flags); 125 local_irq_restore(flags);
141} 126}
142 127
@@ -160,7 +145,7 @@ void rcu_check_callbacks(int cpu, int user)
160 * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure 145 * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure
161 * whose grace period has elapsed. 146 * whose grace period has elapsed.
162 */ 147 */
163static void rcu_process_callbacks(struct rcu_ctrlblk *rcp) 148static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
164{ 149{
165 struct rcu_head *next, *list; 150 struct rcu_head *next, *list;
166 unsigned long flags; 151 unsigned long flags;
@@ -200,36 +185,11 @@ static void rcu_process_callbacks(struct rcu_ctrlblk *rcp)
200 RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count)); 185 RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count));
201} 186}
202 187
203/* 188static void rcu_process_callbacks(struct softirq_action *unused)
204 * This kthread invokes RCU callbacks whose grace periods have
205 * elapsed. It is awakened as needed, and takes the place of the
206 * RCU_SOFTIRQ that was used previously for this purpose.
207 * This is a kthread, but it is never stopped, at least not until
208 * the system goes down.
209 */
210static int rcu_kthread(void *arg)
211{ 189{
212 unsigned long work; 190 __rcu_process_callbacks(&rcu_sched_ctrlblk);
213 unsigned long morework; 191 __rcu_process_callbacks(&rcu_bh_ctrlblk);
214 unsigned long flags; 192 rcu_preempt_process_callbacks();
215
216 for (;;) {
217 wait_event_interruptible(rcu_kthread_wq,
218 have_rcu_kthread_work != 0);
219 morework = rcu_boost();
220 local_irq_save(flags);
221 work = have_rcu_kthread_work;
222 have_rcu_kthread_work = morework;
223 local_irq_restore(flags);
224 if (work) {
225 rcu_process_callbacks(&rcu_sched_ctrlblk);
226 rcu_process_callbacks(&rcu_bh_ctrlblk);
227 rcu_preempt_process_callbacks();
228 }
229 schedule_timeout_interruptible(1); /* Leave CPU for others. */
230 }
231
232 return 0; /* Not reached, but needed to shut gcc up. */
233} 193}
234 194
235/* 195/*
@@ -291,17 +251,3 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
291 __call_rcu(head, func, &rcu_bh_ctrlblk); 251 __call_rcu(head, func, &rcu_bh_ctrlblk);
292} 252}
293EXPORT_SYMBOL_GPL(call_rcu_bh); 253EXPORT_SYMBOL_GPL(call_rcu_bh);
294
295/*
296 * Spawn the kthread that invokes RCU callbacks.
297 */
298static int __init rcu_spawn_kthreads(void)
299{
300 struct sched_param sp;
301
302 rcu_kthread_task = kthread_run(rcu_kthread, NULL, "rcu_kthread");
303 sp.sched_priority = RCU_BOOST_PRIO;
304 sched_setscheduler_nocheck(rcu_kthread_task, SCHED_FIFO, &sp);
305 return 0;
306}
307early_initcall(rcu_spawn_kthreads);