diff options
author | Paul E. McKenney <paul.mckenney@linaro.org> | 2010-09-27 20:25:23 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2010-11-30 01:01:54 -0500 |
commit | 24278d148316d2180be6df40e06db013d8b232b8 (patch) | |
tree | 6a579d483f8f799b352e39b972a7e03cc6204fc1 /kernel/rcutiny.c | |
parent | b2c0710c464ede15e1fc52fb1e7ee9ba54cea186 (diff) |
rcu: priority boosting for TINY_PREEMPT_RCU
Add priority boosting, but only for TINY_PREEMPT_RCU. This is enabled
by the default-off RCU_BOOST kernel parameter. The priority to which to
boost preempted RCU readers is controlled by the RCU_BOOST_PRIO kernel
parameter (defaulting to real-time priority 1) and the time to wait
before boosting the readers blocking a given grace period is controlled
by the RCU_BOOST_DELAY kernel parameter (defaulting to 500 milliseconds).
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutiny.c')
-rw-r--r-- | kernel/rcutiny.c | 66 |
1 files changed, 26 insertions, 40 deletions
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c index 86eef29cdfb2..93d166582cbb 100644 --- a/kernel/rcutiny.c +++ b/kernel/rcutiny.c | |||
@@ -36,38 +36,16 @@ | |||
36 | #include <linux/time.h> | 36 | #include <linux/time.h> |
37 | #include <linux/cpu.h> | 37 | #include <linux/cpu.h> |
38 | 38 | ||
39 | /* Global control variables for rcupdate callback mechanism. */ | 39 | /* Controls for rcu_kthread() kthread, replacing RCU_SOFTIRQ used previously. */ |
40 | struct rcu_ctrlblk { | 40 | static struct task_struct *rcu_kthread_task; |
41 | struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */ | 41 | static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq); |
42 | struct rcu_head **donetail; /* ->next pointer of last "done" CB. */ | 42 | static unsigned long have_rcu_kthread_work; |
43 | struct rcu_head **curtail; /* ->next pointer of last CB. */ | 43 | static void invoke_rcu_kthread(void); |
44 | }; | ||
45 | |||
46 | /* Definition for rcupdate control block. */ | ||
47 | static struct rcu_ctrlblk rcu_sched_ctrlblk = { | ||
48 | .donetail = &rcu_sched_ctrlblk.rcucblist, | ||
49 | .curtail = &rcu_sched_ctrlblk.rcucblist, | ||
50 | }; | ||
51 | |||
52 | static struct rcu_ctrlblk rcu_bh_ctrlblk = { | ||
53 | .donetail = &rcu_bh_ctrlblk.rcucblist, | ||
54 | .curtail = &rcu_bh_ctrlblk.rcucblist, | ||
55 | }; | ||
56 | |||
57 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
58 | int rcu_scheduler_active __read_mostly; | ||
59 | EXPORT_SYMBOL_GPL(rcu_scheduler_active); | ||
60 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | ||
61 | |||
62 | /* Controls for rcu_cbs() kthread, replacing RCU_SOFTIRQ used previously. */ | ||
63 | static struct task_struct *rcu_cbs_task; | ||
64 | static DECLARE_WAIT_QUEUE_HEAD(rcu_cbs_wq); | ||
65 | static unsigned long have_rcu_cbs; | ||
66 | static void invoke_rcu_cbs(void); | ||
67 | 44 | ||
68 | /* Forward declarations for rcutiny_plugin.h. */ | 45 | /* Forward declarations for rcutiny_plugin.h. */ |
46 | struct rcu_ctrlblk; | ||
69 | static void rcu_process_callbacks(struct rcu_ctrlblk *rcp); | 47 | static void rcu_process_callbacks(struct rcu_ctrlblk *rcp); |
70 | static int rcu_cbs(void *arg); | 48 | static int rcu_kthread(void *arg); |
71 | static void __call_rcu(struct rcu_head *head, | 49 | static void __call_rcu(struct rcu_head *head, |
72 | void (*func)(struct rcu_head *rcu), | 50 | void (*func)(struct rcu_head *rcu), |
73 | struct rcu_ctrlblk *rcp); | 51 | struct rcu_ctrlblk *rcp); |
@@ -130,7 +108,7 @@ void rcu_sched_qs(int cpu) | |||
130 | { | 108 | { |
131 | if (rcu_qsctr_help(&rcu_sched_ctrlblk) + | 109 | if (rcu_qsctr_help(&rcu_sched_ctrlblk) + |
132 | rcu_qsctr_help(&rcu_bh_ctrlblk)) | 110 | rcu_qsctr_help(&rcu_bh_ctrlblk)) |
133 | invoke_rcu_cbs(); | 111 | invoke_rcu_kthread(); |
134 | } | 112 | } |
135 | 113 | ||
136 | /* | 114 | /* |
@@ -139,7 +117,7 @@ void rcu_sched_qs(int cpu) | |||
139 | void rcu_bh_qs(int cpu) | 117 | void rcu_bh_qs(int cpu) |
140 | { | 118 | { |
141 | if (rcu_qsctr_help(&rcu_bh_ctrlblk)) | 119 | if (rcu_qsctr_help(&rcu_bh_ctrlblk)) |
142 | invoke_rcu_cbs(); | 120 | invoke_rcu_kthread(); |
143 | } | 121 | } |
144 | 122 | ||
145 | /* | 123 | /* |
@@ -201,37 +179,41 @@ static void rcu_process_callbacks(struct rcu_ctrlblk *rcp) | |||
201 | * This is a kthread, but it is never stopped, at least not until | 179 | * This is a kthread, but it is never stopped, at least not until |
202 | * the system goes down. | 180 | * the system goes down. |
203 | */ | 181 | */ |
204 | static int rcu_cbs(void *arg) | 182 | static int rcu_kthread(void *arg) |
205 | { | 183 | { |
206 | unsigned long work; | 184 | unsigned long work; |
185 | unsigned long morework; | ||
207 | unsigned long flags; | 186 | unsigned long flags; |
208 | 187 | ||
209 | for (;;) { | 188 | for (;;) { |
210 | wait_event(rcu_cbs_wq, have_rcu_cbs != 0); | 189 | wait_event(rcu_kthread_wq, have_rcu_kthread_work != 0); |
190 | morework = rcu_boost(); | ||
211 | local_irq_save(flags); | 191 | local_irq_save(flags); |
212 | work = have_rcu_cbs; | 192 | work = have_rcu_kthread_work; |
213 | have_rcu_cbs = 0; | 193 | have_rcu_kthread_work = morework; |
214 | local_irq_restore(flags); | 194 | local_irq_restore(flags); |
215 | if (work) { | 195 | if (work) { |
216 | rcu_process_callbacks(&rcu_sched_ctrlblk); | 196 | rcu_process_callbacks(&rcu_sched_ctrlblk); |
217 | rcu_process_callbacks(&rcu_bh_ctrlblk); | 197 | rcu_process_callbacks(&rcu_bh_ctrlblk); |
218 | rcu_preempt_process_callbacks(); | 198 | rcu_preempt_process_callbacks(); |
219 | } | 199 | } |
200 | schedule_timeout_interruptible(1); /* Leave CPU for others. */ | ||
220 | } | 201 | } |
221 | 202 | ||
222 | return 0; /* Not reached, but needed to shut gcc up. */ | 203 | return 0; /* Not reached, but needed to shut gcc up. */ |
223 | } | 204 | } |
224 | 205 | ||
225 | /* | 206 | /* |
226 | * Wake up rcu_cbs() to process callbacks now eligible for invocation. | 207 | * Wake up rcu_kthread() to process callbacks now eligible for invocation |
208 | * or to boost readers. | ||
227 | */ | 209 | */ |
228 | static void invoke_rcu_cbs(void) | 210 | static void invoke_rcu_kthread(void) |
229 | { | 211 | { |
230 | unsigned long flags; | 212 | unsigned long flags; |
231 | 213 | ||
232 | local_irq_save(flags); | 214 | local_irq_save(flags); |
233 | have_rcu_cbs = 1; | 215 | have_rcu_kthread_work = 1; |
234 | wake_up(&rcu_cbs_wq); | 216 | wake_up(&rcu_kthread_wq); |
235 | local_irq_restore(flags); | 217 | local_irq_restore(flags); |
236 | } | 218 | } |
237 | 219 | ||
@@ -327,7 +309,11 @@ EXPORT_SYMBOL_GPL(rcu_barrier_sched); | |||
327 | */ | 309 | */ |
328 | static int __init rcu_spawn_kthreads(void) | 310 | static int __init rcu_spawn_kthreads(void) |
329 | { | 311 | { |
330 | rcu_cbs_task = kthread_run(rcu_cbs, NULL, "rcu_cbs"); | 312 | struct sched_param sp; |
313 | |||
314 | rcu_kthread_task = kthread_run(rcu_kthread, NULL, "rcu_kthread"); | ||
315 | sp.sched_priority = RCU_BOOST_PRIO; | ||
316 | sched_setscheduler_nocheck(rcu_kthread_task, SCHED_FIFO, &sp); | ||
331 | return 0; | 317 | return 0; |
332 | } | 318 | } |
333 | early_initcall(rcu_spawn_kthreads); | 319 | early_initcall(rcu_spawn_kthreads); |