diff options
Diffstat (limited to 'kernel/rcutiny.c')
-rw-r--r-- | kernel/rcutiny.c | 151 |
1 files changed, 91 insertions, 60 deletions
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c index 196ec02f8be0..7bbac7d0f5ab 100644 --- a/kernel/rcutiny.c +++ b/kernel/rcutiny.c | |||
@@ -35,29 +35,23 @@ | |||
35 | #include <linux/init.h> | 35 | #include <linux/init.h> |
36 | #include <linux/time.h> | 36 | #include <linux/time.h> |
37 | #include <linux/cpu.h> | 37 | #include <linux/cpu.h> |
38 | #include <linux/prefetch.h> | ||
39 | |||
40 | /* Controls for rcu_kthread() kthread, replacing RCU_SOFTIRQ used previously. */ | ||
41 | static struct task_struct *rcu_kthread_task; | ||
42 | static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq); | ||
43 | static unsigned long have_rcu_kthread_work; | ||
44 | |||
45 | /* Forward declarations for rcutiny_plugin.h. */ | ||
46 | struct rcu_ctrlblk; | ||
47 | static void invoke_rcu_kthread(void); | ||
48 | static void rcu_process_callbacks(struct rcu_ctrlblk *rcp); | ||
49 | static int rcu_kthread(void *arg); | ||
50 | static void __call_rcu(struct rcu_head *head, | ||
51 | void (*func)(struct rcu_head *rcu), | ||
52 | struct rcu_ctrlblk *rcp); | ||
38 | 53 | ||
39 | /* Global control variables for rcupdate callback mechanism. */ | 54 | #include "rcutiny_plugin.h" |
40 | struct rcu_ctrlblk { | ||
41 | struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */ | ||
42 | struct rcu_head **donetail; /* ->next pointer of last "done" CB. */ | ||
43 | struct rcu_head **curtail; /* ->next pointer of last CB. */ | ||
44 | }; | ||
45 | |||
46 | /* Definition for rcupdate control block. */ | ||
47 | static struct rcu_ctrlblk rcu_sched_ctrlblk = { | ||
48 | .donetail = &rcu_sched_ctrlblk.rcucblist, | ||
49 | .curtail = &rcu_sched_ctrlblk.rcucblist, | ||
50 | }; | ||
51 | |||
52 | static struct rcu_ctrlblk rcu_bh_ctrlblk = { | ||
53 | .donetail = &rcu_bh_ctrlblk.rcucblist, | ||
54 | .curtail = &rcu_bh_ctrlblk.rcucblist, | ||
55 | }; | ||
56 | |||
57 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
58 | int rcu_scheduler_active __read_mostly; | ||
59 | EXPORT_SYMBOL_GPL(rcu_scheduler_active); | ||
60 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | ||
61 | 55 | ||
62 | #ifdef CONFIG_NO_HZ | 56 | #ifdef CONFIG_NO_HZ |
63 | 57 | ||
@@ -86,36 +80,45 @@ void rcu_exit_nohz(void) | |||
86 | #endif /* #ifdef CONFIG_NO_HZ */ | 80 | #endif /* #ifdef CONFIG_NO_HZ */ |
87 | 81 | ||
88 | /* | 82 | /* |
89 | * Helper function for rcu_qsctr_inc() and rcu_bh_qsctr_inc(). | 83 | * Helper function for rcu_sched_qs() and rcu_bh_qs(). |
90 | * Also disable irqs to avoid confusion due to interrupt handlers | 84 | * Also irqs are disabled to avoid confusion due to interrupt handlers |
91 | * invoking call_rcu(). | 85 | * invoking call_rcu(). |
92 | */ | 86 | */ |
93 | static int rcu_qsctr_help(struct rcu_ctrlblk *rcp) | 87 | static int rcu_qsctr_help(struct rcu_ctrlblk *rcp) |
94 | { | 88 | { |
95 | unsigned long flags; | ||
96 | |||
97 | local_irq_save(flags); | ||
98 | if (rcp->rcucblist != NULL && | 89 | if (rcp->rcucblist != NULL && |
99 | rcp->donetail != rcp->curtail) { | 90 | rcp->donetail != rcp->curtail) { |
100 | rcp->donetail = rcp->curtail; | 91 | rcp->donetail = rcp->curtail; |
101 | local_irq_restore(flags); | ||
102 | return 1; | 92 | return 1; |
103 | } | 93 | } |
104 | local_irq_restore(flags); | ||
105 | 94 | ||
106 | return 0; | 95 | return 0; |
107 | } | 96 | } |
108 | 97 | ||
109 | /* | 98 | /* |
99 | * Wake up rcu_kthread() to process callbacks now eligible for invocation | ||
100 | * or to boost readers. | ||
101 | */ | ||
102 | static void invoke_rcu_kthread(void) | ||
103 | { | ||
104 | have_rcu_kthread_work = 1; | ||
105 | wake_up(&rcu_kthread_wq); | ||
106 | } | ||
107 | |||
108 | /* | ||
110 | * Record an rcu quiescent state. And an rcu_bh quiescent state while we | 109 | * Record an rcu quiescent state. And an rcu_bh quiescent state while we |
111 | * are at it, given that any rcu quiescent state is also an rcu_bh | 110 | * are at it, given that any rcu quiescent state is also an rcu_bh |
112 | * quiescent state. Use "+" instead of "||" to defeat short circuiting. | 111 | * quiescent state. Use "+" instead of "||" to defeat short circuiting. |
113 | */ | 112 | */ |
114 | void rcu_sched_qs(int cpu) | 113 | void rcu_sched_qs(int cpu) |
115 | { | 114 | { |
115 | unsigned long flags; | ||
116 | |||
117 | local_irq_save(flags); | ||
116 | if (rcu_qsctr_help(&rcu_sched_ctrlblk) + | 118 | if (rcu_qsctr_help(&rcu_sched_ctrlblk) + |
117 | rcu_qsctr_help(&rcu_bh_ctrlblk)) | 119 | rcu_qsctr_help(&rcu_bh_ctrlblk)) |
118 | raise_softirq(RCU_SOFTIRQ); | 120 | invoke_rcu_kthread(); |
121 | local_irq_restore(flags); | ||
119 | } | 122 | } |
120 | 123 | ||
121 | /* | 124 | /* |
@@ -123,8 +126,12 @@ void rcu_sched_qs(int cpu) | |||
123 | */ | 126 | */ |
124 | void rcu_bh_qs(int cpu) | 127 | void rcu_bh_qs(int cpu) |
125 | { | 128 | { |
129 | unsigned long flags; | ||
130 | |||
131 | local_irq_save(flags); | ||
126 | if (rcu_qsctr_help(&rcu_bh_ctrlblk)) | 132 | if (rcu_qsctr_help(&rcu_bh_ctrlblk)) |
127 | raise_softirq(RCU_SOFTIRQ); | 133 | invoke_rcu_kthread(); |
134 | local_irq_restore(flags); | ||
128 | } | 135 | } |
129 | 136 | ||
130 | /* | 137 | /* |
@@ -140,16 +147,18 @@ void rcu_check_callbacks(int cpu, int user) | |||
140 | rcu_sched_qs(cpu); | 147 | rcu_sched_qs(cpu); |
141 | else if (!in_softirq()) | 148 | else if (!in_softirq()) |
142 | rcu_bh_qs(cpu); | 149 | rcu_bh_qs(cpu); |
150 | rcu_preempt_check_callbacks(); | ||
143 | } | 151 | } |
144 | 152 | ||
145 | /* | 153 | /* |
146 | * Helper function for rcu_process_callbacks() that operates on the | 154 | * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure |
147 | * specified rcu_ctrlkblk structure. | 155 | * whose grace period has elapsed. |
148 | */ | 156 | */ |
149 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) | 157 | static void rcu_process_callbacks(struct rcu_ctrlblk *rcp) |
150 | { | 158 | { |
151 | struct rcu_head *next, *list; | 159 | struct rcu_head *next, *list; |
152 | unsigned long flags; | 160 | unsigned long flags; |
161 | RCU_TRACE(int cb_count = 0); | ||
153 | 162 | ||
154 | /* If no RCU callbacks ready to invoke, just return. */ | 163 | /* If no RCU callbacks ready to invoke, just return. */ |
155 | if (&rcp->rcucblist == rcp->donetail) | 164 | if (&rcp->rcucblist == rcp->donetail) |
@@ -162,6 +171,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) | |||
162 | *rcp->donetail = NULL; | 171 | *rcp->donetail = NULL; |
163 | if (rcp->curtail == rcp->donetail) | 172 | if (rcp->curtail == rcp->donetail) |
164 | rcp->curtail = &rcp->rcucblist; | 173 | rcp->curtail = &rcp->rcucblist; |
174 | rcu_preempt_remove_callbacks(rcp); | ||
165 | rcp->donetail = &rcp->rcucblist; | 175 | rcp->donetail = &rcp->rcucblist; |
166 | local_irq_restore(flags); | 176 | local_irq_restore(flags); |
167 | 177 | ||
@@ -170,18 +180,45 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) | |||
170 | next = list->next; | 180 | next = list->next; |
171 | prefetch(next); | 181 | prefetch(next); |
172 | debug_rcu_head_unqueue(list); | 182 | debug_rcu_head_unqueue(list); |
173 | list->func(list); | 183 | local_bh_disable(); |
184 | __rcu_reclaim(list); | ||
185 | local_bh_enable(); | ||
174 | list = next; | 186 | list = next; |
187 | RCU_TRACE(cb_count++); | ||
175 | } | 188 | } |
189 | RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count)); | ||
176 | } | 190 | } |
177 | 191 | ||
178 | /* | 192 | /* |
179 | * Invoke any callbacks whose grace period has completed. | 193 | * This kthread invokes RCU callbacks whose grace periods have |
194 | * elapsed. It is awakened as needed, and takes the place of the | ||
195 | * RCU_SOFTIRQ that was used previously for this purpose. | ||
196 | * This is a kthread, but it is never stopped, at least not until | ||
197 | * the system goes down. | ||
180 | */ | 198 | */ |
181 | static void rcu_process_callbacks(struct softirq_action *unused) | 199 | static int rcu_kthread(void *arg) |
182 | { | 200 | { |
183 | __rcu_process_callbacks(&rcu_sched_ctrlblk); | 201 | unsigned long work; |
184 | __rcu_process_callbacks(&rcu_bh_ctrlblk); | 202 | unsigned long morework; |
203 | unsigned long flags; | ||
204 | |||
205 | for (;;) { | ||
206 | wait_event_interruptible(rcu_kthread_wq, | ||
207 | have_rcu_kthread_work != 0); | ||
208 | morework = rcu_boost(); | ||
209 | local_irq_save(flags); | ||
210 | work = have_rcu_kthread_work; | ||
211 | have_rcu_kthread_work = morework; | ||
212 | local_irq_restore(flags); | ||
213 | if (work) { | ||
214 | rcu_process_callbacks(&rcu_sched_ctrlblk); | ||
215 | rcu_process_callbacks(&rcu_bh_ctrlblk); | ||
216 | rcu_preempt_process_callbacks(); | ||
217 | } | ||
218 | schedule_timeout_interruptible(1); /* Leave CPU for others. */ | ||
219 | } | ||
220 | |||
221 | return 0; /* Not reached, but needed to shut gcc up. */ | ||
185 | } | 222 | } |
186 | 223 | ||
187 | /* | 224 | /* |
@@ -219,19 +256,20 @@ static void __call_rcu(struct rcu_head *head, | |||
219 | local_irq_save(flags); | 256 | local_irq_save(flags); |
220 | *rcp->curtail = head; | 257 | *rcp->curtail = head; |
221 | rcp->curtail = &head->next; | 258 | rcp->curtail = &head->next; |
259 | RCU_TRACE(rcp->qlen++); | ||
222 | local_irq_restore(flags); | 260 | local_irq_restore(flags); |
223 | } | 261 | } |
224 | 262 | ||
225 | /* | 263 | /* |
226 | * Post an RCU callback to be invoked after the end of an RCU grace | 264 | * Post an RCU callback to be invoked after the end of an RCU-sched grace |
227 | * period. But since we have but one CPU, that would be after any | 265 | * period. But since we have but one CPU, that would be after any |
228 | * quiescent state. | 266 | * quiescent state. |
229 | */ | 267 | */ |
230 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | 268 | void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
231 | { | 269 | { |
232 | __call_rcu(head, func, &rcu_sched_ctrlblk); | 270 | __call_rcu(head, func, &rcu_sched_ctrlblk); |
233 | } | 271 | } |
234 | EXPORT_SYMBOL_GPL(call_rcu); | 272 | EXPORT_SYMBOL_GPL(call_rcu_sched); |
235 | 273 | ||
236 | /* | 274 | /* |
237 | * Post an RCU bottom-half callback to be invoked after any subsequent | 275 | * Post an RCU bottom-half callback to be invoked after any subsequent |
@@ -243,20 +281,6 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |||
243 | } | 281 | } |
244 | EXPORT_SYMBOL_GPL(call_rcu_bh); | 282 | EXPORT_SYMBOL_GPL(call_rcu_bh); |
245 | 283 | ||
246 | void rcu_barrier(void) | ||
247 | { | ||
248 | struct rcu_synchronize rcu; | ||
249 | |||
250 | init_rcu_head_on_stack(&rcu.head); | ||
251 | init_completion(&rcu.completion); | ||
252 | /* Will wake me after RCU finished. */ | ||
253 | call_rcu(&rcu.head, wakeme_after_rcu); | ||
254 | /* Wait for it. */ | ||
255 | wait_for_completion(&rcu.completion); | ||
256 | destroy_rcu_head_on_stack(&rcu.head); | ||
257 | } | ||
258 | EXPORT_SYMBOL_GPL(rcu_barrier); | ||
259 | |||
260 | void rcu_barrier_bh(void) | 284 | void rcu_barrier_bh(void) |
261 | { | 285 | { |
262 | struct rcu_synchronize rcu; | 286 | struct rcu_synchronize rcu; |
@@ -285,9 +309,16 @@ void rcu_barrier_sched(void) | |||
285 | } | 309 | } |
286 | EXPORT_SYMBOL_GPL(rcu_barrier_sched); | 310 | EXPORT_SYMBOL_GPL(rcu_barrier_sched); |
287 | 311 | ||
288 | void __init rcu_init(void) | 312 | /* |
313 | * Spawn the kthread that invokes RCU callbacks. | ||
314 | */ | ||
315 | static int __init rcu_spawn_kthreads(void) | ||
289 | { | 316 | { |
290 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | 317 | struct sched_param sp; |
291 | } | ||
292 | 318 | ||
293 | #include "rcutiny_plugin.h" | 319 | rcu_kthread_task = kthread_run(rcu_kthread, NULL, "rcu_kthread"); |
320 | sp.sched_priority = RCU_BOOST_PRIO; | ||
321 | sched_setscheduler_nocheck(rcu_kthread_task, SCHED_FIFO, &sp); | ||
322 | return 0; | ||
323 | } | ||
324 | early_initcall(rcu_spawn_kthreads); | ||