aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2011-06-18 12:55:39 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-09-29 00:38:20 -0400
commit965a002b4f1a458c5dcb334ec29f48a0046faa25 (patch)
tree9aa3847fd44b322a73631758e7337632e5e3a32d /kernel
parent385680a9487d2f85382ad6d74e2a15837e47bfd9 (diff)
rcu: Make TINY_RCU also use softirq for RCU_BOOST=n
This patch #ifdefs TINY_RCU kthreads out of the kernel unless RCU_BOOST=y, thus eliminating context-switch overhead if RCU priority boosting has not been configured. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcutiny.c74
-rw-r--r--kernel/rcutiny_plugin.h110
2 files changed, 93 insertions, 91 deletions
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
index 1c37bdd464f1..c9321d86999b 100644
--- a/kernel/rcutiny.c
+++ b/kernel/rcutiny.c
@@ -43,16 +43,11 @@
43 43
44#include "rcu.h" 44#include "rcu.h"
45 45
46/* Controls for rcu_kthread() kthread, replacing RCU_SOFTIRQ used previously. */
47static struct task_struct *rcu_kthread_task;
48static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq);
49static unsigned long have_rcu_kthread_work;
50
51/* Forward declarations for rcutiny_plugin.h. */ 46/* Forward declarations for rcutiny_plugin.h. */
52struct rcu_ctrlblk; 47struct rcu_ctrlblk;
53static void invoke_rcu_kthread(void); 48static void invoke_rcu_callbacks(void);
54static void rcu_process_callbacks(struct rcu_ctrlblk *rcp); 49static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
55static int rcu_kthread(void *arg); 50static void rcu_process_callbacks(struct softirq_action *unused);
56static void __call_rcu(struct rcu_head *head, 51static void __call_rcu(struct rcu_head *head,
57 void (*func)(struct rcu_head *rcu), 52 void (*func)(struct rcu_head *rcu),
58 struct rcu_ctrlblk *rcp); 53 struct rcu_ctrlblk *rcp);
@@ -102,16 +97,6 @@ static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
102} 97}
103 98
104/* 99/*
105 * Wake up rcu_kthread() to process callbacks now eligible for invocation
106 * or to boost readers.
107 */
108static void invoke_rcu_kthread(void)
109{
110 have_rcu_kthread_work = 1;
111 wake_up(&rcu_kthread_wq);
112}
113
114/*
115 * Record an rcu quiescent state. And an rcu_bh quiescent state while we 100 * Record an rcu quiescent state. And an rcu_bh quiescent state while we
116 * are at it, given that any rcu quiescent state is also an rcu_bh 101 * are at it, given that any rcu quiescent state is also an rcu_bh
117 * quiescent state. Use "+" instead of "||" to defeat short circuiting. 102 * quiescent state. Use "+" instead of "||" to defeat short circuiting.
@@ -123,7 +108,7 @@ void rcu_sched_qs(int cpu)
123 local_irq_save(flags); 108 local_irq_save(flags);
124 if (rcu_qsctr_help(&rcu_sched_ctrlblk) + 109 if (rcu_qsctr_help(&rcu_sched_ctrlblk) +
125 rcu_qsctr_help(&rcu_bh_ctrlblk)) 110 rcu_qsctr_help(&rcu_bh_ctrlblk))
126 invoke_rcu_kthread(); 111 invoke_rcu_callbacks();
127 local_irq_restore(flags); 112 local_irq_restore(flags);
128} 113}
129 114
@@ -136,7 +121,7 @@ void rcu_bh_qs(int cpu)
136 121
137 local_irq_save(flags); 122 local_irq_save(flags);
138 if (rcu_qsctr_help(&rcu_bh_ctrlblk)) 123 if (rcu_qsctr_help(&rcu_bh_ctrlblk))
139 invoke_rcu_kthread(); 124 invoke_rcu_callbacks();
140 local_irq_restore(flags); 125 local_irq_restore(flags);
141} 126}
142 127
@@ -160,7 +145,7 @@ void rcu_check_callbacks(int cpu, int user)
160 * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure 145 * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure
161 * whose grace period has elapsed. 146 * whose grace period has elapsed.
162 */ 147 */
163static void rcu_process_callbacks(struct rcu_ctrlblk *rcp) 148static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
164{ 149{
165 struct rcu_head *next, *list; 150 struct rcu_head *next, *list;
166 unsigned long flags; 151 unsigned long flags;
@@ -200,36 +185,11 @@ static void rcu_process_callbacks(struct rcu_ctrlblk *rcp)
200 RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count)); 185 RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count));
201} 186}
202 187
203/* 188static void rcu_process_callbacks(struct softirq_action *unused)
204 * This kthread invokes RCU callbacks whose grace periods have
205 * elapsed. It is awakened as needed, and takes the place of the
206 * RCU_SOFTIRQ that was used previously for this purpose.
207 * This is a kthread, but it is never stopped, at least not until
208 * the system goes down.
209 */
210static int rcu_kthread(void *arg)
211{ 189{
212 unsigned long work; 190 __rcu_process_callbacks(&rcu_sched_ctrlblk);
213 unsigned long morework; 191 __rcu_process_callbacks(&rcu_bh_ctrlblk);
214 unsigned long flags; 192 rcu_preempt_process_callbacks();
215
216 for (;;) {
217 wait_event_interruptible(rcu_kthread_wq,
218 have_rcu_kthread_work != 0);
219 morework = rcu_boost();
220 local_irq_save(flags);
221 work = have_rcu_kthread_work;
222 have_rcu_kthread_work = morework;
223 local_irq_restore(flags);
224 if (work) {
225 rcu_process_callbacks(&rcu_sched_ctrlblk);
226 rcu_process_callbacks(&rcu_bh_ctrlblk);
227 rcu_preempt_process_callbacks();
228 }
229 schedule_timeout_interruptible(1); /* Leave CPU for others. */
230 }
231
232 return 0; /* Not reached, but needed to shut gcc up. */
233} 193}
234 194
235/* 195/*
@@ -291,17 +251,3 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
291 __call_rcu(head, func, &rcu_bh_ctrlblk); 251 __call_rcu(head, func, &rcu_bh_ctrlblk);
292} 252}
293EXPORT_SYMBOL_GPL(call_rcu_bh); 253EXPORT_SYMBOL_GPL(call_rcu_bh);
294
295/*
296 * Spawn the kthread that invokes RCU callbacks.
297 */
298static int __init rcu_spawn_kthreads(void)
299{
300 struct sched_param sp;
301
302 rcu_kthread_task = kthread_run(rcu_kthread, NULL, "rcu_kthread");
303 sp.sched_priority = RCU_BOOST_PRIO;
304 sched_setscheduler_nocheck(rcu_kthread_task, SCHED_FIFO, &sp);
305 return 0;
306}
307early_initcall(rcu_spawn_kthreads);
diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
index 791ddf7c99ab..02aa7139861c 100644
--- a/kernel/rcutiny_plugin.h
+++ b/kernel/rcutiny_plugin.h
@@ -245,6 +245,13 @@ static void show_tiny_preempt_stats(struct seq_file *m)
245 245
246#include "rtmutex_common.h" 246#include "rtmutex_common.h"
247 247
248#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
249
250/* Controls for rcu_kthread() kthread. */
251static struct task_struct *rcu_kthread_task;
252static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq);
253static unsigned long have_rcu_kthread_work;
254
248/* 255/*
249 * Carry out RCU priority boosting on the task indicated by ->boost_tasks, 256 * Carry out RCU priority boosting on the task indicated by ->boost_tasks,
250 * and advance ->boost_tasks to the next task in the ->blkd_tasks list. 257 * and advance ->boost_tasks to the next task in the ->blkd_tasks list.
@@ -332,7 +339,7 @@ static int rcu_initiate_boost(void)
332 if (rcu_preempt_ctrlblk.exp_tasks == NULL) 339 if (rcu_preempt_ctrlblk.exp_tasks == NULL)
333 rcu_preempt_ctrlblk.boost_tasks = 340 rcu_preempt_ctrlblk.boost_tasks =
334 rcu_preempt_ctrlblk.gp_tasks; 341 rcu_preempt_ctrlblk.gp_tasks;
335 invoke_rcu_kthread(); 342 invoke_rcu_callbacks();
336 } else 343 } else
337 RCU_TRACE(rcu_initiate_boost_trace()); 344 RCU_TRACE(rcu_initiate_boost_trace());
338 return 1; 345 return 1;
@@ -351,14 +358,6 @@ static void rcu_preempt_boost_start_gp(void)
351#else /* #ifdef CONFIG_RCU_BOOST */ 358#else /* #ifdef CONFIG_RCU_BOOST */
352 359
353/* 360/*
354 * If there is no RCU priority boosting, we don't boost.
355 */
356static int rcu_boost(void)
357{
358 return 0;
359}
360
361/*
362 * If there is no RCU priority boosting, we don't initiate boosting, 361 * If there is no RCU priority boosting, we don't initiate boosting,
363 * but we do indicate whether there are blocked readers blocking the 362 * but we do indicate whether there are blocked readers blocking the
364 * current grace period. 363 * current grace period.
@@ -425,7 +424,7 @@ static void rcu_preempt_cpu_qs(void)
425 424
426 /* If there are done callbacks, cause them to be invoked. */ 425 /* If there are done callbacks, cause them to be invoked. */
427 if (*rcu_preempt_ctrlblk.rcb.donetail != NULL) 426 if (*rcu_preempt_ctrlblk.rcb.donetail != NULL)
428 invoke_rcu_kthread(); 427 invoke_rcu_callbacks();
429} 428}
430 429
431/* 430/*
@@ -646,7 +645,7 @@ static void rcu_preempt_check_callbacks(void)
646 rcu_preempt_cpu_qs(); 645 rcu_preempt_cpu_qs();
647 if (&rcu_preempt_ctrlblk.rcb.rcucblist != 646 if (&rcu_preempt_ctrlblk.rcb.rcucblist !=
648 rcu_preempt_ctrlblk.rcb.donetail) 647 rcu_preempt_ctrlblk.rcb.donetail)
649 invoke_rcu_kthread(); 648 invoke_rcu_callbacks();
650 if (rcu_preempt_gp_in_progress() && 649 if (rcu_preempt_gp_in_progress() &&
651 rcu_cpu_blocking_cur_gp() && 650 rcu_cpu_blocking_cur_gp() &&
652 rcu_preempt_running_reader()) 651 rcu_preempt_running_reader())
@@ -672,7 +671,7 @@ static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
672 */ 671 */
673static void rcu_preempt_process_callbacks(void) 672static void rcu_preempt_process_callbacks(void)
674{ 673{
675 rcu_process_callbacks(&rcu_preempt_ctrlblk.rcb); 674 __rcu_process_callbacks(&rcu_preempt_ctrlblk.rcb);
676} 675}
677 676
678/* 677/*
@@ -848,15 +847,6 @@ static void show_tiny_preempt_stats(struct seq_file *m)
848#endif /* #ifdef CONFIG_RCU_TRACE */ 847#endif /* #ifdef CONFIG_RCU_TRACE */
849 848
850/* 849/*
851 * Because preemptible RCU does not exist, it is never necessary to
852 * boost preempted RCU readers.
853 */
854static int rcu_boost(void)
855{
856 return 0;
857}
858
859/*
860 * Because preemptible RCU does not exist, it never has any callbacks 850 * Because preemptible RCU does not exist, it never has any callbacks
861 * to check. 851 * to check.
862 */ 852 */
@@ -882,6 +872,78 @@ static void rcu_preempt_process_callbacks(void)
882 872
883#endif /* #else #ifdef CONFIG_TINY_PREEMPT_RCU */ 873#endif /* #else #ifdef CONFIG_TINY_PREEMPT_RCU */
884 874
875#ifdef CONFIG_RCU_BOOST
876
877/*
878 * Wake up rcu_kthread() to process callbacks now eligible for invocation
879 * or to boost readers.
880 */
881static void invoke_rcu_callbacks(void)
882{
883 have_rcu_kthread_work = 1;
884 wake_up(&rcu_kthread_wq);
885}
886
887/*
888 * This kthread invokes RCU callbacks whose grace periods have
889 * elapsed. It is awakened as needed, and takes the place of the
890 * RCU_SOFTIRQ that is used for this purpose when boosting is disabled.
891 * This is a kthread, but it is never stopped, at least not until
892 * the system goes down.
893 */
894static int rcu_kthread(void *arg)
895{
896 unsigned long work;
897 unsigned long morework;
898 unsigned long flags;
899
900 for (;;) {
901 wait_event_interruptible(rcu_kthread_wq,
902 have_rcu_kthread_work != 0);
903 morework = rcu_boost();
904 local_irq_save(flags);
905 work = have_rcu_kthread_work;
906 have_rcu_kthread_work = morework;
907 local_irq_restore(flags);
908 if (work)
909 rcu_process_callbacks(NULL);
910 schedule_timeout_interruptible(1); /* Leave CPU for others. */
911 }
912
913 return 0; /* Not reached, but needed to shut gcc up. */
914}
915
916/*
917 * Spawn the kthread that invokes RCU callbacks.
918 */
919static int __init rcu_spawn_kthreads(void)
920{
921 struct sched_param sp;
922
923 rcu_kthread_task = kthread_run(rcu_kthread, NULL, "rcu_kthread");
924 sp.sched_priority = RCU_BOOST_PRIO;
925 sched_setscheduler_nocheck(rcu_kthread_task, SCHED_FIFO, &sp);
926 return 0;
927}
928early_initcall(rcu_spawn_kthreads);
929
930#else /* #ifdef CONFIG_RCU_BOOST */
931
932/*
933 * Start up softirq processing of callbacks.
934 */
935void invoke_rcu_callbacks(void)
936{
937 raise_softirq(RCU_SOFTIRQ);
938}
939
940void rcu_init(void)
941{
942 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
943}
944
945#endif /* #else #ifdef CONFIG_RCU_BOOST */
946
885#ifdef CONFIG_DEBUG_LOCK_ALLOC 947#ifdef CONFIG_DEBUG_LOCK_ALLOC
886#include <linux/kernel_stat.h> 948#include <linux/kernel_stat.h>
887 949
@@ -897,12 +959,6 @@ void __init rcu_scheduler_starting(void)
897 959
898#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 960#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
899 961
900#ifdef CONFIG_RCU_BOOST
901#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
902#else /* #ifdef CONFIG_RCU_BOOST */
903#define RCU_BOOST_PRIO 1
904#endif /* #else #ifdef CONFIG_RCU_BOOST */
905
906#ifdef CONFIG_RCU_TRACE 962#ifdef CONFIG_RCU_TRACE
907 963
908#ifdef CONFIG_RCU_BOOST 964#ifdef CONFIG_RCU_BOOST