aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/flow.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-12-12 22:25:04 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-12 22:25:04 -0500
commite71c3978d6f97659f6c3ee942c3e581299e4adf2 (patch)
tree0b58c76a20a79f5f5d9ada5731aa1dbb149fbcdc /net/core/flow.c
parentf797484c26300fec842fb669c69a3a60eb66e240 (diff)
parentb18cc3de00ec3442cf40ac60787dbe0703b99e24 (diff)
Merge branch 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull smp hotplug updates from Thomas Gleixner: "This is the final round of converting the notifier mess to the state machine. The removal of the notifiers and the related infrastructure will happen around rc1, as there are conversions outstanding in other trees. The whole exercise removed about 2000 lines of code in total and in course of the conversion several dozen bugs got fixed. The new mechanism allows to test almost every hotplug step standalone, so usage sites can exercise all transitions extensively. There is more room for improvement, like integrating all the pointlessly different architecture mechanisms of synchronizing, setting cpus online etc into the core code" * 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (60 commits) tracing/rb: Init the CPU mask on allocation soc/fsl/qbman: Convert to hotplug state machine soc/fsl/qbman: Convert to hotplug state machine zram: Convert to hotplug state machine KVM/PPC/Book3S HV: Convert to hotplug state machine arm64/cpuinfo: Convert to hotplug state machine arm64/cpuinfo: Make hotplug notifier symmetric mm/compaction: Convert to hotplug state machine iommu/vt-d: Convert to hotplug state machine mm/zswap: Convert pool to hotplug state machine mm/zswap: Convert dst-mem to hotplug state machine mm/zsmalloc: Convert to hotplug state machine mm/vmstat: Convert to hotplug state machine mm/vmstat: Avoid on each online CPU loops mm/vmstat: Drop get_online_cpus() from init_cpu_node_state/vmstat_cpu_dead() tracing/rb: Convert to hotplug state machine oprofile/nmi timer: Convert to hotplug state machine net/iucv: Use explicit clean up labels in iucv_init() x86/pci/amd-bus: Convert to hotplug state machine x86/oprofile/nmi: Convert to hotplug state machine ...
Diffstat (limited to 'net/core/flow.c')
-rw-r--r--net/core/flow.c60
1 files changed, 26 insertions, 34 deletions
diff --git a/net/core/flow.c b/net/core/flow.c
index 18e8893d4be5..f765c11d8df5 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -417,28 +417,20 @@ static int flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
417 return 0; 417 return 0;
418} 418}
419 419
420static int flow_cache_cpu(struct notifier_block *nfb, 420static int flow_cache_cpu_up_prep(unsigned int cpu, struct hlist_node *node)
421 unsigned long action,
422 void *hcpu)
423{ 421{
424 struct flow_cache *fc = container_of(nfb, struct flow_cache, 422 struct flow_cache *fc = hlist_entry_safe(node, struct flow_cache, node);
425 hotcpu_notifier); 423
426 int res, cpu = (unsigned long) hcpu; 424 return flow_cache_cpu_prepare(fc, cpu);
425}
426
427static int flow_cache_cpu_dead(unsigned int cpu, struct hlist_node *node)
428{
429 struct flow_cache *fc = hlist_entry_safe(node, struct flow_cache, node);
427 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); 430 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
428 431
429 switch (action) { 432 __flow_cache_shrink(fc, fcp, 0);
430 case CPU_UP_PREPARE: 433 return 0;
431 case CPU_UP_PREPARE_FROZEN:
432 res = flow_cache_cpu_prepare(fc, cpu);
433 if (res)
434 return notifier_from_errno(res);
435 break;
436 case CPU_DEAD:
437 case CPU_DEAD_FROZEN:
438 __flow_cache_shrink(fc, fcp, 0);
439 break;
440 }
441 return NOTIFY_OK;
442} 434}
443 435
444int flow_cache_init(struct net *net) 436int flow_cache_init(struct net *net)
@@ -465,18 +457,8 @@ int flow_cache_init(struct net *net)
465 if (!fc->percpu) 457 if (!fc->percpu)
466 return -ENOMEM; 458 return -ENOMEM;
467 459
468 cpu_notifier_register_begin(); 460 if (cpuhp_state_add_instance(CPUHP_NET_FLOW_PREPARE, &fc->node))
469 461 goto err;
470 for_each_online_cpu(i) {
471 if (flow_cache_cpu_prepare(fc, i))
472 goto err;
473 }
474 fc->hotcpu_notifier = (struct notifier_block){
475 .notifier_call = flow_cache_cpu,
476 };
477 __register_hotcpu_notifier(&fc->hotcpu_notifier);
478
479 cpu_notifier_register_done();
480 462
481 setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd, 463 setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
482 (unsigned long) fc); 464 (unsigned long) fc);
@@ -492,8 +474,6 @@ err:
492 fcp->hash_table = NULL; 474 fcp->hash_table = NULL;
493 } 475 }
494 476
495 cpu_notifier_register_done();
496
497 free_percpu(fc->percpu); 477 free_percpu(fc->percpu);
498 fc->percpu = NULL; 478 fc->percpu = NULL;
499 479
@@ -507,7 +487,8 @@ void flow_cache_fini(struct net *net)
507 struct flow_cache *fc = &net->xfrm.flow_cache_global; 487 struct flow_cache *fc = &net->xfrm.flow_cache_global;
508 488
509 del_timer_sync(&fc->rnd_timer); 489 del_timer_sync(&fc->rnd_timer);
510 unregister_hotcpu_notifier(&fc->hotcpu_notifier); 490
491 cpuhp_state_remove_instance_nocalls(CPUHP_NET_FLOW_PREPARE, &fc->node);
511 492
512 for_each_possible_cpu(i) { 493 for_each_possible_cpu(i) {
513 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i); 494 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i);
@@ -519,3 +500,14 @@ void flow_cache_fini(struct net *net)
519 fc->percpu = NULL; 500 fc->percpu = NULL;
520} 501}
521EXPORT_SYMBOL(flow_cache_fini); 502EXPORT_SYMBOL(flow_cache_fini);
503
504void __init flow_cache_hp_init(void)
505{
506 int ret;
507
508 ret = cpuhp_setup_state_multi(CPUHP_NET_FLOW_PREPARE,
509 "net/flow:prepare",
510 flow_cache_cpu_up_prep,
511 flow_cache_cpu_dead);
512 WARN_ON(ret < 0);
513}