diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2009-10-07 00:48:16 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-10-07 02:11:20 -0400 |
commit | d0ec774cb2599c858be9d923bb873cf6697520d8 (patch) | |
tree | 0897fd843622033a6db6ab43167e47c3236aa22d /kernel | |
parent | 322a2c100a8998158445599ea437fb556aa95b11 (diff) |
rcu: Move rcu_barrier() to rcutree
Move the existing rcu_barrier() implementation to rcutree.c,
consistent with the fact that the rcu_barrier() implementation is
tied quite tightly to the RCU implementation.
This opens the way to simplify and fix rcutree.c's rcu_barrier()
implementation in a later patch.
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: akpm@linux-foundation.org
Cc: mathieu.desnoyers@polymtl.ca
Cc: josh@joshtriplett.org
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
Cc: Valdis.Kletnieks@vt.edu
Cc: dhowells@redhat.com
LKML-Reference: <12548908982563-git-send-email->
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/rcupdate.c | 120 | ||||
-rw-r--r-- | kernel/rcutree.c | 119 |
2 files changed, 120 insertions, 119 deletions
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index e43242274466..400183346ad2 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
@@ -53,16 +53,8 @@ struct lockdep_map rcu_lock_map = | |||
53 | EXPORT_SYMBOL_GPL(rcu_lock_map); | 53 | EXPORT_SYMBOL_GPL(rcu_lock_map); |
54 | #endif | 54 | #endif |
55 | 55 | ||
56 | static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; | ||
57 | static atomic_t rcu_barrier_cpu_count; | ||
58 | static DEFINE_MUTEX(rcu_barrier_mutex); | ||
59 | static struct completion rcu_barrier_completion; | ||
60 | int rcu_scheduler_active __read_mostly; | 56 | int rcu_scheduler_active __read_mostly; |
61 | 57 | ||
62 | static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0); | ||
63 | static struct rcu_head rcu_migrate_head[3]; | ||
64 | static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq); | ||
65 | |||
66 | /* | 58 | /* |
67 | * Awaken the corresponding synchronize_rcu() instance now that a | 59 | * Awaken the corresponding synchronize_rcu() instance now that a |
68 | * grace period has elapsed. | 60 | * grace period has elapsed. |
@@ -165,120 +157,10 @@ void synchronize_rcu_bh(void) | |||
165 | } | 157 | } |
166 | EXPORT_SYMBOL_GPL(synchronize_rcu_bh); | 158 | EXPORT_SYMBOL_GPL(synchronize_rcu_bh); |
167 | 159 | ||
168 | static void rcu_barrier_callback(struct rcu_head *notused) | ||
169 | { | ||
170 | if (atomic_dec_and_test(&rcu_barrier_cpu_count)) | ||
171 | complete(&rcu_barrier_completion); | ||
172 | } | ||
173 | |||
174 | /* | ||
175 | * Called with preemption disabled, and from cross-cpu IRQ context. | ||
176 | */ | ||
177 | static void rcu_barrier_func(void *type) | ||
178 | { | ||
179 | int cpu = smp_processor_id(); | ||
180 | struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu); | ||
181 | void (*call_rcu_func)(struct rcu_head *head, | ||
182 | void (*func)(struct rcu_head *head)); | ||
183 | |||
184 | atomic_inc(&rcu_barrier_cpu_count); | ||
185 | call_rcu_func = type; | ||
186 | call_rcu_func(head, rcu_barrier_callback); | ||
187 | } | ||
188 | |||
189 | static inline void wait_migrated_callbacks(void) | ||
190 | { | ||
191 | wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count)); | ||
192 | smp_mb(); /* In case we didn't sleep. */ | ||
193 | } | ||
194 | |||
195 | /* | ||
196 | * Orchestrate the specified type of RCU barrier, waiting for all | ||
197 | * RCU callbacks of the specified type to complete. | ||
198 | */ | ||
199 | static void _rcu_barrier(void (*call_rcu_func)(struct rcu_head *head, | ||
200 | void (*func)(struct rcu_head *head))) | ||
201 | { | ||
202 | BUG_ON(in_interrupt()); | ||
203 | /* Take cpucontrol mutex to protect against CPU hotplug */ | ||
204 | mutex_lock(&rcu_barrier_mutex); | ||
205 | init_completion(&rcu_barrier_completion); | ||
206 | /* | ||
207 | * Initialize rcu_barrier_cpu_count to 1, then invoke | ||
208 | * rcu_barrier_func() on each CPU, so that each CPU also has | ||
209 | * incremented rcu_barrier_cpu_count. Only then is it safe to | ||
210 | * decrement rcu_barrier_cpu_count -- otherwise the first CPU | ||
211 | * might complete its grace period before all of the other CPUs | ||
212 | * did their increment, causing this function to return too | ||
213 | * early. | ||
214 | */ | ||
215 | atomic_set(&rcu_barrier_cpu_count, 1); | ||
216 | on_each_cpu(rcu_barrier_func, (void *)call_rcu_func, 1); | ||
217 | if (atomic_dec_and_test(&rcu_barrier_cpu_count)) | ||
218 | complete(&rcu_barrier_completion); | ||
219 | wait_for_completion(&rcu_barrier_completion); | ||
220 | mutex_unlock(&rcu_barrier_mutex); | ||
221 | wait_migrated_callbacks(); | ||
222 | } | ||
223 | |||
224 | /** | ||
225 | * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. | ||
226 | */ | ||
227 | void rcu_barrier(void) | ||
228 | { | ||
229 | _rcu_barrier(call_rcu); | ||
230 | } | ||
231 | EXPORT_SYMBOL_GPL(rcu_barrier); | ||
232 | |||
233 | /** | ||
234 | * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. | ||
235 | */ | ||
236 | void rcu_barrier_bh(void) | ||
237 | { | ||
238 | _rcu_barrier(call_rcu_bh); | ||
239 | } | ||
240 | EXPORT_SYMBOL_GPL(rcu_barrier_bh); | ||
241 | |||
242 | /** | ||
243 | * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. | ||
244 | */ | ||
245 | void rcu_barrier_sched(void) | ||
246 | { | ||
247 | _rcu_barrier(call_rcu_sched); | ||
248 | } | ||
249 | EXPORT_SYMBOL_GPL(rcu_barrier_sched); | ||
250 | |||
251 | static void rcu_migrate_callback(struct rcu_head *notused) | ||
252 | { | ||
253 | if (atomic_dec_and_test(&rcu_migrate_type_count)) | ||
254 | wake_up(&rcu_migrate_wq); | ||
255 | } | ||
256 | |||
257 | static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, | 160 | static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, |
258 | unsigned long action, void *hcpu) | 161 | unsigned long action, void *hcpu) |
259 | { | 162 | { |
260 | rcu_cpu_notify(self, action, hcpu); | 163 | return rcu_cpu_notify(self, action, hcpu); |
261 | if (action == CPU_DYING) { | ||
262 | /* | ||
263 | * preempt_disable() in on_each_cpu() prevents stop_machine(), | ||
264 | * so when "on_each_cpu(rcu_barrier_func, (void *)type, 1);" | ||
265 | * returns, all online cpus have queued rcu_barrier_func(), | ||
266 | * and the dead cpu(if it exist) queues rcu_migrate_callback()s. | ||
267 | * | ||
268 | * These callbacks ensure _rcu_barrier() waits for all | ||
269 | * RCU callbacks of the specified type to complete. | ||
270 | */ | ||
271 | atomic_set(&rcu_migrate_type_count, 3); | ||
272 | call_rcu_bh(rcu_migrate_head, rcu_migrate_callback); | ||
273 | call_rcu_sched(rcu_migrate_head + 1, rcu_migrate_callback); | ||
274 | call_rcu(rcu_migrate_head + 2, rcu_migrate_callback); | ||
275 | } else if (action == CPU_DOWN_PREPARE) { | ||
276 | /* Don't need to wait until next removal operation. */ | ||
277 | /* rcu_migrate_head is protected by cpu_add_remove_lock */ | ||
278 | wait_migrated_callbacks(); | ||
279 | } | ||
280 | |||
281 | return NOTIFY_OK; | ||
282 | } | 164 | } |
283 | 165 | ||
284 | void __init rcu_init(void) | 166 | void __init rcu_init(void) |
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index e2e272b5c277..0108570a192c 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -1363,6 +1363,103 @@ int rcu_needs_cpu(int cpu) | |||
1363 | rcu_preempt_needs_cpu(cpu); | 1363 | rcu_preempt_needs_cpu(cpu); |
1364 | } | 1364 | } |
1365 | 1365 | ||
1366 | static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL}; | ||
1367 | static atomic_t rcu_barrier_cpu_count; | ||
1368 | static DEFINE_MUTEX(rcu_barrier_mutex); | ||
1369 | static struct completion rcu_barrier_completion; | ||
1370 | static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0); | ||
1371 | static struct rcu_head rcu_migrate_head[3]; | ||
1372 | static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq); | ||
1373 | |||
1374 | static void rcu_barrier_callback(struct rcu_head *notused) | ||
1375 | { | ||
1376 | if (atomic_dec_and_test(&rcu_barrier_cpu_count)) | ||
1377 | complete(&rcu_barrier_completion); | ||
1378 | } | ||
1379 | |||
1380 | /* | ||
1381 | * Called with preemption disabled, and from cross-cpu IRQ context. | ||
1382 | */ | ||
1383 | static void rcu_barrier_func(void *type) | ||
1384 | { | ||
1385 | int cpu = smp_processor_id(); | ||
1386 | struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu); | ||
1387 | void (*call_rcu_func)(struct rcu_head *head, | ||
1388 | void (*func)(struct rcu_head *head)); | ||
1389 | |||
1390 | atomic_inc(&rcu_barrier_cpu_count); | ||
1391 | call_rcu_func = type; | ||
1392 | call_rcu_func(head, rcu_barrier_callback); | ||
1393 | } | ||
1394 | |||
1395 | static inline void wait_migrated_callbacks(void) | ||
1396 | { | ||
1397 | wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count)); | ||
1398 | smp_mb(); /* In case we didn't sleep. */ | ||
1399 | } | ||
1400 | |||
1401 | /* | ||
1402 | * Orchestrate the specified type of RCU barrier, waiting for all | ||
1403 | * RCU callbacks of the specified type to complete. | ||
1404 | */ | ||
1405 | static void _rcu_barrier(void (*call_rcu_func)(struct rcu_head *head, | ||
1406 | void (*func)(struct rcu_head *head))) | ||
1407 | { | ||
1408 | BUG_ON(in_interrupt()); | ||
1409 | /* Take cpucontrol mutex to protect against CPU hotplug */ | ||
1410 | mutex_lock(&rcu_barrier_mutex); | ||
1411 | init_completion(&rcu_barrier_completion); | ||
1412 | /* | ||
1413 | * Initialize rcu_barrier_cpu_count to 1, then invoke | ||
1414 | * rcu_barrier_func() on each CPU, so that each CPU also has | ||
1415 | * incremented rcu_barrier_cpu_count. Only then is it safe to | ||
1416 | * decrement rcu_barrier_cpu_count -- otherwise the first CPU | ||
1417 | * might complete its grace period before all of the other CPUs | ||
1418 | * did their increment, causing this function to return too | ||
1419 | * early. | ||
1420 | */ | ||
1421 | atomic_set(&rcu_barrier_cpu_count, 1); | ||
1422 | on_each_cpu(rcu_barrier_func, (void *)call_rcu_func, 1); | ||
1423 | if (atomic_dec_and_test(&rcu_barrier_cpu_count)) | ||
1424 | complete(&rcu_barrier_completion); | ||
1425 | wait_for_completion(&rcu_barrier_completion); | ||
1426 | mutex_unlock(&rcu_barrier_mutex); | ||
1427 | wait_migrated_callbacks(); | ||
1428 | } | ||
1429 | |||
1430 | /** | ||
1431 | * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. | ||
1432 | */ | ||
1433 | void rcu_barrier(void) | ||
1434 | { | ||
1435 | _rcu_barrier(call_rcu); | ||
1436 | } | ||
1437 | EXPORT_SYMBOL_GPL(rcu_barrier); | ||
1438 | |||
1439 | /** | ||
1440 | * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. | ||
1441 | */ | ||
1442 | void rcu_barrier_bh(void) | ||
1443 | { | ||
1444 | _rcu_barrier(call_rcu_bh); | ||
1445 | } | ||
1446 | EXPORT_SYMBOL_GPL(rcu_barrier_bh); | ||
1447 | |||
1448 | /** | ||
1449 | * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. | ||
1450 | */ | ||
1451 | void rcu_barrier_sched(void) | ||
1452 | { | ||
1453 | _rcu_barrier(call_rcu_sched); | ||
1454 | } | ||
1455 | EXPORT_SYMBOL_GPL(rcu_barrier_sched); | ||
1456 | |||
1457 | static void rcu_migrate_callback(struct rcu_head *notused) | ||
1458 | { | ||
1459 | if (atomic_dec_and_test(&rcu_migrate_type_count)) | ||
1460 | wake_up(&rcu_migrate_wq); | ||
1461 | } | ||
1462 | |||
1366 | /* | 1463 | /* |
1367 | * Do boot-time initialization of a CPU's per-CPU RCU data. | 1464 | * Do boot-time initialization of a CPU's per-CPU RCU data. |
1368 | */ | 1465 | */ |
@@ -1459,6 +1556,28 @@ int __cpuinit rcu_cpu_notify(struct notifier_block *self, | |||
1459 | case CPU_UP_PREPARE_FROZEN: | 1556 | case CPU_UP_PREPARE_FROZEN: |
1460 | rcu_online_cpu(cpu); | 1557 | rcu_online_cpu(cpu); |
1461 | break; | 1558 | break; |
1559 | case CPU_DOWN_PREPARE: | ||
1560 | case CPU_DOWN_PREPARE_FROZEN: | ||
1561 | /* Don't need to wait until next removal operation. */ | ||
1562 | /* rcu_migrate_head is protected by cpu_add_remove_lock */ | ||
1563 | wait_migrated_callbacks(); | ||
1564 | break; | ||
1565 | case CPU_DYING: | ||
1566 | case CPU_DYING_FROZEN: | ||
1567 | /* | ||
1568 | * preempt_disable() in on_each_cpu() prevents stop_machine(), | ||
1569 | * so when "on_each_cpu(rcu_barrier_func, (void *)type, 1);" | ||
1570 | * returns, all online cpus have queued rcu_barrier_func(), | ||
1571 | * and the dead cpu(if it exist) queues rcu_migrate_callback()s. | ||
1572 | * | ||
1573 | * These callbacks ensure _rcu_barrier() waits for all | ||
1574 | * RCU callbacks of the specified type to complete. | ||
1575 | */ | ||
1576 | atomic_set(&rcu_migrate_type_count, 3); | ||
1577 | call_rcu_bh(rcu_migrate_head, rcu_migrate_callback); | ||
1578 | call_rcu_sched(rcu_migrate_head + 1, rcu_migrate_callback); | ||
1579 | call_rcu(rcu_migrate_head + 2, rcu_migrate_callback); | ||
1580 | break; | ||
1462 | case CPU_DEAD: | 1581 | case CPU_DEAD: |
1463 | case CPU_DEAD_FROZEN: | 1582 | case CPU_DEAD_FROZEN: |
1464 | case CPU_UP_CANCELED: | 1583 | case CPU_UP_CANCELED: |