aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorSrivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>2014-03-10 16:42:51 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2014-03-20 08:43:48 -0400
commite30a293e8ad7e6048d6d88bcc114094f964bd67b (patch)
tree143269e0c4f0d4df9a3c9bef40964afab32fe3db /net/core
parent576378249c8e0a020aafeaa702c834dff81dd596 (diff)
net/core/flow.c: Fix CPU hotplug callback registration
Subsystems that want to register CPU hotplug callbacks, as well as perform initialization for the CPUs that are already online, often do it as shown below: get_online_cpus(); for_each_online_cpu(cpu) init_cpu(cpu); register_cpu_notifier(&foobar_cpu_notifier); put_online_cpus(); This is wrong, since it is prone to ABBA deadlocks involving the cpu_add_remove_lock and the cpu_hotplug.lock (when running concurrently with CPU hotplug operations). Instead, the correct and race-free way of performing the callback registration is: cpu_notifier_register_begin(); for_each_online_cpu(cpu) init_cpu(cpu); /* Note the use of the double underscored version of the API */ __register_cpu_notifier(&foobar_cpu_notifier); cpu_notifier_register_done(); Fix the code in net/core/flow.c by using this latter form of callback registration. Cc: Li RongQing <roy.qing.li@gmail.com> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Ingo Molnar <mingo@kernel.org> Acked-by: David S. Miller <davem@davemloft.net> Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/flow.c8
1 files changed, 7 insertions, 1 deletions
diff --git a/net/core/flow.c b/net/core/flow.c
index dfa602ceb8cd..9a2151f5d593 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -456,6 +456,8 @@ static int __init flow_cache_init(struct flow_cache *fc)
456 if (!fc->percpu) 456 if (!fc->percpu)
457 return -ENOMEM; 457 return -ENOMEM;
458 458
459 cpu_notifier_register_begin();
460
459 for_each_online_cpu(i) { 461 for_each_online_cpu(i) {
460 if (flow_cache_cpu_prepare(fc, i)) 462 if (flow_cache_cpu_prepare(fc, i))
461 goto err; 463 goto err;
@@ -463,7 +465,9 @@ static int __init flow_cache_init(struct flow_cache *fc)
463 fc->hotcpu_notifier = (struct notifier_block){ 465 fc->hotcpu_notifier = (struct notifier_block){
464 .notifier_call = flow_cache_cpu, 466 .notifier_call = flow_cache_cpu,
465 }; 467 };
466 register_hotcpu_notifier(&fc->hotcpu_notifier); 468 __register_hotcpu_notifier(&fc->hotcpu_notifier);
469
470 cpu_notifier_register_done();
467 471
468 setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd, 472 setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
469 (unsigned long) fc); 473 (unsigned long) fc);
@@ -479,6 +483,8 @@ err:
479 fcp->hash_table = NULL; 483 fcp->hash_table = NULL;
480 } 484 }
481 485
486 cpu_notifier_register_done();
487
482 free_percpu(fc->percpu); 488 free_percpu(fc->percpu);
483 fc->percpu = NULL; 489 fc->percpu = NULL;
484 490