aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2009-08-15 12:53:48 -0400
committerIngo Molnar <mingo@elte.hu>2009-08-15 13:02:08 -0400
commit2e597558086dec36d5c33521a36e0f6b1bc3f3a7 (patch)
treece8c6ced1c8a3b6bf6170a5894e1d7102e6ee9d4 /kernel
parent799e64f05f4bfaad2bb3165cab95c8c992a1c296 (diff)
rcu: Simplify RCU CPU-hotplug notification
Use the new cpu_notifier() API to simplify RCU's CPU-hotplug notifiers, collapsing down to a single such notifier. This makes it trivial to provide the notifier-ordering guarantee that rcu_barrier() depends on. Also remove redundant open_softirq() calls from Hierarchical RCU notifier. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: josht@linux.vnet.ibm.com Cc: akpm@linux-foundation.org Cc: mathieu.desnoyers@polymtl.ca Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: hugh.dickins@tiscali.co.uk Cc: benh@kernel.crashing.org LKML-Reference: <12503552312510-git-send-email-> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcupdate.c16
-rw-r--r--kernel/rcupreempt.c25
-rw-r--r--kernel/rcutree.c17
3 files changed, 22 insertions, 36 deletions
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index eae29c25fb14..8df115600c2d 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -217,9 +217,13 @@ static void rcu_migrate_callback(struct rcu_head *notused)
217 wake_up(&rcu_migrate_wq); 217 wake_up(&rcu_migrate_wq);
218} 218}
219 219
220extern int rcu_cpu_notify(struct notifier_block *self,
221 unsigned long action, void *hcpu);
222
220static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, 223static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self,
221 unsigned long action, void *hcpu) 224 unsigned long action, void *hcpu)
222{ 225{
226 rcu_cpu_notify(self, action, hcpu);
223 if (action == CPU_DYING) { 227 if (action == CPU_DYING) {
224 /* 228 /*
225 * preempt_disable() in on_each_cpu() prevents stop_machine(), 229 * preempt_disable() in on_each_cpu() prevents stop_machine(),
@@ -244,8 +248,18 @@ static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self,
244 248
245void __init rcu_init(void) 249void __init rcu_init(void)
246{ 250{
251 int i;
252
247 __rcu_init(); 253 __rcu_init();
248 hotcpu_notifier(rcu_barrier_cpu_hotplug, 0); 254 cpu_notifier(rcu_barrier_cpu_hotplug, 0);
255
256 /*
257 * We don't need protection against CPU-hotplug here because
258 * this is called early in boot, before either interrupts
259 * or the scheduler are operational.
260 */
261 for_each_online_cpu(i)
262 rcu_barrier_cpu_hotplug(NULL, CPU_UP_PREPARE, (void *)(long)i);
249} 263}
250 264
251void rcu_scheduler_starting(void) 265void rcu_scheduler_starting(void)
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
index beb0e659adcc..9b87f5134ed7 100644
--- a/kernel/rcupreempt.c
+++ b/kernel/rcupreempt.c
@@ -1417,8 +1417,8 @@ int rcu_pending(int cpu)
1417 return 0; 1417 return 0;
1418} 1418}
1419 1419
1420static int __cpuinit rcu_cpu_notify(struct notifier_block *self, 1420int __cpuinit rcu_cpu_notify(struct notifier_block *self,
1421 unsigned long action, void *hcpu) 1421 unsigned long action, void *hcpu)
1422{ 1422{
1423 long cpu = (long)hcpu; 1423 long cpu = (long)hcpu;
1424 1424
@@ -1439,10 +1439,6 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
1439 return NOTIFY_OK; 1439 return NOTIFY_OK;
1440} 1440}
1441 1441
1442static struct notifier_block __cpuinitdata rcu_nb = {
1443 .notifier_call = rcu_cpu_notify,
1444};
1445
1446void __init __rcu_init(void) 1442void __init __rcu_init(void)
1447{ 1443{
1448 int cpu; 1444 int cpu;
@@ -1471,23 +1467,6 @@ void __init __rcu_init(void)
1471 rdp->waitschedtail = &rdp->waitschedlist; 1467 rdp->waitschedtail = &rdp->waitschedlist;
1472 rdp->rcu_sched_sleeping = 0; 1468 rdp->rcu_sched_sleeping = 0;
1473 } 1469 }
1474 register_cpu_notifier(&rcu_nb);
1475
1476 /*
1477 * We don't need protection against CPU-Hotplug here
1478 * since
1479 * a) If a CPU comes online while we are iterating over the
1480 * cpu_online_mask below, we would only end up making a
1481 * duplicate call to rcu_online_cpu() which sets the corresponding
1482 * CPU's mask in the rcu_cpu_online_map.
1483 *
1484 * b) A CPU cannot go offline at this point in time since the user
1485 * does not have access to the sysfs interface, nor do we
1486 * suspend the system.
1487 */
1488 for_each_online_cpu(cpu)
1489 rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, (void *)(long) cpu);
1490
1491 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); 1470 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
1492} 1471}
1493 1472
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index f3e43274ed53..75762cddbe03 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1132,6 +1132,8 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1132{ 1132{
1133 unsigned long flags; 1133 unsigned long flags;
1134 1134
1135 WARN_ON_ONCE(rdp->beenonline == 0);
1136
1135 /* 1137 /*
1136 * If an RCU GP has gone long enough, go check for dyntick 1138 * If an RCU GP has gone long enough, go check for dyntick
1137 * idle CPUs and, if needed, send resched IPIs. 1139 * idle CPUs and, if needed, send resched IPIs.
@@ -1416,14 +1418,13 @@ static void __cpuinit rcu_online_cpu(int cpu)
1416{ 1418{
1417 rcu_init_percpu_data(cpu, &rcu_state); 1419 rcu_init_percpu_data(cpu, &rcu_state);
1418 rcu_init_percpu_data(cpu, &rcu_bh_state); 1420 rcu_init_percpu_data(cpu, &rcu_bh_state);
1419 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
1420} 1421}
1421 1422
1422/* 1423/*
1423 * Handle CPU online/offline notifcation events. 1424 * Handle CPU online/offline notifcation events.
1424 */ 1425 */
1425static int __cpuinit rcu_cpu_notify(struct notifier_block *self, 1426int __cpuinit rcu_cpu_notify(struct notifier_block *self,
1426 unsigned long action, void *hcpu) 1427 unsigned long action, void *hcpu)
1427{ 1428{
1428 long cpu = (long)hcpu; 1429 long cpu = (long)hcpu;
1429 1430
@@ -1532,10 +1533,6 @@ do { \
1532 } \ 1533 } \
1533} while (0) 1534} while (0)
1534 1535
1535static struct notifier_block __cpuinitdata rcu_nb = {
1536 .notifier_call = rcu_cpu_notify,
1537};
1538
1539void __init __rcu_init(void) 1536void __init __rcu_init(void)
1540{ 1537{
1541 int i; /* All used by RCU_DATA_PTR_INIT(). */ 1538 int i; /* All used by RCU_DATA_PTR_INIT(). */
@@ -1554,11 +1551,7 @@ void __init __rcu_init(void)
1554 RCU_DATA_PTR_INIT(&rcu_bh_state, rcu_bh_data); 1551 RCU_DATA_PTR_INIT(&rcu_bh_state, rcu_bh_data);
1555 for_each_possible_cpu(i) 1552 for_each_possible_cpu(i)
1556 rcu_boot_init_percpu_data(i, &rcu_bh_state); 1553 rcu_boot_init_percpu_data(i, &rcu_bh_state);
1557 1554 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
1558 for_each_online_cpu(i)
1559 rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, (void *)(long)i);
1560 /* Register notifier for non-boot CPUs */
1561 register_cpu_notifier(&rcu_nb);
1562} 1555}
1563 1556
1564module_param(blimit, int, 0); 1557module_param(blimit, int, 0);