diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2009-10-07 00:48:16 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-10-07 02:11:20 -0400 |
commit | d0ec774cb2599c858be9d923bb873cf6697520d8 (patch) | |
tree | 0897fd843622033a6db6ab43167e47c3236aa22d /kernel/rcupdate.c | |
parent | 322a2c100a8998158445599ea437fb556aa95b11 (diff) |
rcu: Move rcu_barrier() to rcutree
Move the existing rcu_barrier() implementation to rcutree.c,
consistent with the fact that the rcu_barrier() implementation is
tied quite tightly to the RCU implementation.
This opens the way to simplify and fix rcutree.c's rcu_barrier()
implementation in a later patch.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josh@joshtriplett.org
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
Cc: Valdis.Kletnieks@vt.edu
Cc: dhowells@redhat.com
LKML-Reference: <12548908982563-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/rcupdate.c')
-rw-r--r-- | kernel/rcupdate.c | 120 |
1 files changed, 1 insertions, 119 deletions
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index e43242274466..400183346ad2 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
@@ -53,16 +53,8 @@ struct lockdep_map rcu_lock_map = | |||
53 | EXPORT_SYMBOL_GPL(rcu_lock_map); | 53 | EXPORT_SYMBOL_GPL(rcu_lock_map); |
54 | #endif | 54 | #endif |
55 | 55 | ||
56 | static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; | ||
57 | static atomic_t rcu_barrier_cpu_count; | ||
58 | static DEFINE_MUTEX(rcu_barrier_mutex); | ||
59 | static struct completion rcu_barrier_completion; | ||
60 | int rcu_scheduler_active __read_mostly; | 56 | int rcu_scheduler_active __read_mostly; |
61 | 57 | ||
62 | static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0); | ||
63 | static struct rcu_head rcu_migrate_head[3]; | ||
64 | static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq); | ||
65 | |||
66 | /* | 58 | /* |
67 | * Awaken the corresponding synchronize_rcu() instance now that a | 59 | * Awaken the corresponding synchronize_rcu() instance now that a |
68 | * grace period has elapsed. | 60 | * grace period has elapsed. |
@@ -165,120 +157,10 @@ void synchronize_rcu_bh(void) | |||
165 | } | 157 | } |
166 | EXPORT_SYMBOL_GPL(synchronize_rcu_bh); | 158 | EXPORT_SYMBOL_GPL(synchronize_rcu_bh); |
167 | 159 | ||
168 | static void rcu_barrier_callback(struct rcu_head *notused) | ||
169 | { | ||
170 | if (atomic_dec_and_test(&rcu_barrier_cpu_count)) | ||
171 | complete(&rcu_barrier_completion); | ||
172 | } | ||
173 | |||
174 | /* | ||
175 | * Called with preemption disabled, and from cross-cpu IRQ context. | ||
176 | */ | ||
177 | static void rcu_barrier_func(void *type) | ||
178 | { | ||
179 | int cpu = smp_processor_id(); | ||
180 | struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu); | ||
181 | void (*call_rcu_func)(struct rcu_head *head, | ||
182 | void (*func)(struct rcu_head *head)); | ||
183 | |||
184 | atomic_inc(&rcu_barrier_cpu_count); | ||
185 | call_rcu_func = type; | ||
186 | call_rcu_func(head, rcu_barrier_callback); | ||
187 | } | ||
188 | |||
189 | static inline void wait_migrated_callbacks(void) | ||
190 | { | ||
191 | wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count)); | ||
192 | smp_mb(); /* In case we didn't sleep. */ | ||
193 | } | ||
194 | |||
195 | /* | ||
196 | * Orchestrate the specified type of RCU barrier, waiting for all | ||
197 | * RCU callbacks of the specified type to complete. | ||
198 | */ | ||
199 | static void _rcu_barrier(void (*call_rcu_func)(struct rcu_head *head, | ||
200 | void (*func)(struct rcu_head *head))) | ||
201 | { | ||
202 | BUG_ON(in_interrupt()); | ||
203 | /* Take cpucontrol mutex to protect against CPU hotplug */ | ||
204 | mutex_lock(&rcu_barrier_mutex); | ||
205 | init_completion(&rcu_barrier_completion); | ||
206 | /* | ||
207 | * Initialize rcu_barrier_cpu_count to 1, then invoke | ||
208 | * rcu_barrier_func() on each CPU, so that each CPU also has | ||
209 | * incremented rcu_barrier_cpu_count. Only then is it safe to | ||
210 | * decrement rcu_barrier_cpu_count -- otherwise the first CPU | ||
211 | * might complete its grace period before all of the other CPUs | ||
212 | * did their increment, causing this function to return too | ||
213 | * early. | ||
214 | */ | ||
215 | atomic_set(&rcu_barrier_cpu_count, 1); | ||
216 | on_each_cpu(rcu_barrier_func, (void *)call_rcu_func, 1); | ||
217 | if (atomic_dec_and_test(&rcu_barrier_cpu_count)) | ||
218 | complete(&rcu_barrier_completion); | ||
219 | wait_for_completion(&rcu_barrier_completion); | ||
220 | mutex_unlock(&rcu_barrier_mutex); | ||
221 | wait_migrated_callbacks(); | ||
222 | } | ||
223 | |||
224 | /** | ||
225 | * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. | ||
226 | */ | ||
227 | void rcu_barrier(void) | ||
228 | { | ||
229 | _rcu_barrier(call_rcu); | ||
230 | } | ||
231 | EXPORT_SYMBOL_GPL(rcu_barrier); | ||
232 | |||
233 | /** | ||
234 | * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. | ||
235 | */ | ||
236 | void rcu_barrier_bh(void) | ||
237 | { | ||
238 | _rcu_barrier(call_rcu_bh); | ||
239 | } | ||
240 | EXPORT_SYMBOL_GPL(rcu_barrier_bh); | ||
241 | |||
242 | /** | ||
243 | * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. | ||
244 | */ | ||
245 | void rcu_barrier_sched(void) | ||
246 | { | ||
247 | _rcu_barrier(call_rcu_sched); | ||
248 | } | ||
249 | EXPORT_SYMBOL_GPL(rcu_barrier_sched); | ||
250 | |||
251 | static void rcu_migrate_callback(struct rcu_head *notused) | ||
252 | { | ||
253 | if (atomic_dec_and_test(&rcu_migrate_type_count)) | ||
254 | wake_up(&rcu_migrate_wq); | ||
255 | } | ||
256 | |||
257 | static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, | 160 | static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, |
258 | unsigned long action, void *hcpu) | 161 | unsigned long action, void *hcpu) |
259 | { | 162 | { |
260 | rcu_cpu_notify(self, action, hcpu); | 163 | return rcu_cpu_notify(self, action, hcpu); |
261 | if (action == CPU_DYING) { | ||
262 | /* | ||
263 | * preempt_disable() in on_each_cpu() prevents stop_machine(), | ||
264 | * so when "on_each_cpu(rcu_barrier_func, (void *)type, 1);" | ||
265 | * returns, all online cpus have queued rcu_barrier_func(), | ||
266 | * and the dead cpu(if it exist) queues rcu_migrate_callback()s. | ||
267 | * | ||
268 | * These callbacks ensure _rcu_barrier() waits for all | ||
269 | * RCU callbacks of the specified type to complete. | ||
270 | */ | ||
271 | atomic_set(&rcu_migrate_type_count, 3); | ||
272 | call_rcu_bh(rcu_migrate_head, rcu_migrate_callback); | ||
273 | call_rcu_sched(rcu_migrate_head + 1, rcu_migrate_callback); | ||
274 | call_rcu(rcu_migrate_head + 2, rcu_migrate_callback); | ||
275 | } else if (action == CPU_DOWN_PREPARE) { | ||
276 | /* Don't need to wait until next removal operation. */ | ||
277 | /* rcu_migrate_head is protected by cpu_add_remove_lock */ | ||
278 | wait_migrated_callbacks(); | ||
279 | } | ||
280 | |||
281 | return NOTIFY_OK; | ||
282 | } | 164 | } |
283 | 165 | ||
284 | void __init rcu_init(void) | 166 | void __init rcu_init(void) |