aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/flow.c
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2016-11-03 10:50:05 -0400
committerThomas Gleixner <tglx@linutronix.de>2016-11-09 17:45:28 -0500
commita4fc1bfc42062e8bc7b2271a90d17403b096ce5d (patch)
treee2108987a5e90ab5ad65629a0695a04aad3f4f83 /net/core/flow.c
parentf0bf90def3528cebed45ebd81d9b5d0fa17d7422 (diff)
net/flowcache: Convert to hotplug state machine
Install the callbacks via the state machine. Use multi state support to avoid custom list handling for the multiple instances. Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Steffen Klassert <steffen.klassert@secunet.com> Cc: Herbert Xu <herbert@gondor.apana.org.au> Cc: netdev@vger.kernel.org Cc: rt@linutronix.de Cc: "David S. Miller" <davem@davemloft.net> Link: http://lkml.kernel.org/r/20161103145021.28528-10-bigeasy@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'net/core/flow.c')
-rw-r--r--net/core/flow.c60
1 files changed, 26 insertions, 34 deletions
diff --git a/net/core/flow.c b/net/core/flow.c
index 3937b1b68d5b..841fd7f87b30 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -419,28 +419,20 @@ static int flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
419 return 0; 419 return 0;
420} 420}
421 421
422static int flow_cache_cpu(struct notifier_block *nfb, 422static int flow_cache_cpu_up_prep(unsigned int cpu, struct hlist_node *node)
423 unsigned long action,
424 void *hcpu)
425{ 423{
426 struct flow_cache *fc = container_of(nfb, struct flow_cache, 424 struct flow_cache *fc = hlist_entry_safe(node, struct flow_cache, node);
427 hotcpu_notifier); 425
428 int res, cpu = (unsigned long) hcpu; 426 return flow_cache_cpu_prepare(fc, cpu);
427}
428
429static int flow_cache_cpu_dead(unsigned int cpu, struct hlist_node *node)
430{
431 struct flow_cache *fc = hlist_entry_safe(node, struct flow_cache, node);
429 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); 432 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
430 433
431 switch (action) { 434 __flow_cache_shrink(fc, fcp, 0);
432 case CPU_UP_PREPARE: 435 return 0;
433 case CPU_UP_PREPARE_FROZEN:
434 res = flow_cache_cpu_prepare(fc, cpu);
435 if (res)
436 return notifier_from_errno(res);
437 break;
438 case CPU_DEAD:
439 case CPU_DEAD_FROZEN:
440 __flow_cache_shrink(fc, fcp, 0);
441 break;
442 }
443 return NOTIFY_OK;
444} 436}
445 437
446int flow_cache_init(struct net *net) 438int flow_cache_init(struct net *net)
@@ -467,18 +459,8 @@ int flow_cache_init(struct net *net)
467 if (!fc->percpu) 459 if (!fc->percpu)
468 return -ENOMEM; 460 return -ENOMEM;
469 461
470 cpu_notifier_register_begin(); 462 if (cpuhp_state_add_instance(CPUHP_NET_FLOW_PREPARE, &fc->node))
471 463 goto err;
472 for_each_online_cpu(i) {
473 if (flow_cache_cpu_prepare(fc, i))
474 goto err;
475 }
476 fc->hotcpu_notifier = (struct notifier_block){
477 .notifier_call = flow_cache_cpu,
478 };
479 __register_hotcpu_notifier(&fc->hotcpu_notifier);
480
481 cpu_notifier_register_done();
482 464
483 setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd, 465 setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
484 (unsigned long) fc); 466 (unsigned long) fc);
@@ -494,8 +476,6 @@ err:
494 fcp->hash_table = NULL; 476 fcp->hash_table = NULL;
495 } 477 }
496 478
497 cpu_notifier_register_done();
498
499 free_percpu(fc->percpu); 479 free_percpu(fc->percpu);
500 fc->percpu = NULL; 480 fc->percpu = NULL;
501 481
@@ -509,7 +489,8 @@ void flow_cache_fini(struct net *net)
509 struct flow_cache *fc = &net->xfrm.flow_cache_global; 489 struct flow_cache *fc = &net->xfrm.flow_cache_global;
510 490
511 del_timer_sync(&fc->rnd_timer); 491 del_timer_sync(&fc->rnd_timer);
512 unregister_hotcpu_notifier(&fc->hotcpu_notifier); 492
493 cpuhp_state_remove_instance_nocalls(CPUHP_NET_FLOW_PREPARE, &fc->node);
513 494
514 for_each_possible_cpu(i) { 495 for_each_possible_cpu(i) {
515 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i); 496 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i);
@@ -521,3 +502,14 @@ void flow_cache_fini(struct net *net)
521 fc->percpu = NULL; 502 fc->percpu = NULL;
522} 503}
523EXPORT_SYMBOL(flow_cache_fini); 504EXPORT_SYMBOL(flow_cache_fini);
505
506void __init flow_cache_hp_init(void)
507{
508 int ret;
509
510 ret = cpuhp_setup_state_multi(CPUHP_NET_FLOW_PREPARE,
511 "net/flow:prepare",
512 flow_cache_cpu_up_prep,
513 flow_cache_cpu_dead);
514 WARN_ON(ret < 0);
515}