aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2016-08-18 08:57:23 -0400
committerThomas Gleixner <tglx@linutronix.de>2016-09-06 12:30:22 -0400
commit84a3f4db039e7c4bfe8ae9bebdebdf2a4e09bf86 (patch)
tree3f25e23407794e51536e07ac0dbd241133f5021a
parentc4544dbc7a9bce3da6fa2361cd68cadb34e9221f (diff)
net/mvneta: Convert to hotplug state machine
Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Cc: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: netdev@vger.kernel.org Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160818125731.27256-9-bigeasy@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c232
-rw-r--r--include/linux/cpuhotplug.h1
2 files changed, 144 insertions, 89 deletions
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index d41c28d00b57..b74548728fb5 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -382,7 +382,8 @@ struct mvneta_port {
382 struct mvneta_rx_queue *rxqs; 382 struct mvneta_rx_queue *rxqs;
383 struct mvneta_tx_queue *txqs; 383 struct mvneta_tx_queue *txqs;
384 struct net_device *dev; 384 struct net_device *dev;
385 struct notifier_block cpu_notifier; 385 struct hlist_node node_online;
386 struct hlist_node node_dead;
386 int rxq_def; 387 int rxq_def;
387 /* Protect the access to the percpu interrupt registers, 388 /* Protect the access to the percpu interrupt registers,
388 * ensuring that the configuration remains coherent. 389 * ensuring that the configuration remains coherent.
@@ -574,6 +575,7 @@ struct mvneta_rx_queue {
574 int next_desc_to_proc; 575 int next_desc_to_proc;
575}; 576};
576 577
578static enum cpuhp_state online_hpstate;
577/* The hardware supports eight (8) rx queues, but we are only allowing 579/* The hardware supports eight (8) rx queues, but we are only allowing
578 * the first one to be used. Therefore, let's just allocate one queue. 580 * the first one to be used. Therefore, let's just allocate one queue.
579 */ 581 */
@@ -3311,101 +3313,104 @@ static void mvneta_percpu_elect(struct mvneta_port *pp)
3311 } 3313 }
3312}; 3314};
3313 3315
3314static int mvneta_percpu_notifier(struct notifier_block *nfb, 3316static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
3315 unsigned long action, void *hcpu)
3316{ 3317{
3317 struct mvneta_port *pp = container_of(nfb, struct mvneta_port, 3318 int other_cpu;
3318 cpu_notifier); 3319 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
3319 int cpu = (unsigned long)hcpu, other_cpu; 3320 node_online);
3320 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); 3321 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
3321 3322
3322 switch (action) {
3323 case CPU_ONLINE:
3324 case CPU_ONLINE_FROZEN:
3325 case CPU_DOWN_FAILED:
3326 case CPU_DOWN_FAILED_FROZEN:
3327 spin_lock(&pp->lock);
3328 /* Configuring the driver for a new CPU while the
3329 * driver is stopping is racy, so just avoid it.
3330 */
3331 if (pp->is_stopped) {
3332 spin_unlock(&pp->lock);
3333 break;
3334 }
3335 netif_tx_stop_all_queues(pp->dev);
3336 3323
3337 /* We have to synchronise on tha napi of each CPU 3324 spin_lock(&pp->lock);
3338 * except the one just being waked up 3325 /*
3339 */ 3326 * Configuring the driver for a new CPU while the driver is
3340 for_each_online_cpu(other_cpu) { 3327 * stopping is racy, so just avoid it.
3341 if (other_cpu != cpu) { 3328 */
3342 struct mvneta_pcpu_port *other_port = 3329 if (pp->is_stopped) {
3343 per_cpu_ptr(pp->ports, other_cpu); 3330 spin_unlock(&pp->lock);
3331 return 0;
3332 }
3333 netif_tx_stop_all_queues(pp->dev);
3344 3334
3345 napi_synchronize(&other_port->napi); 3335 /*
3346 } 3336 * We have to synchronise on tha napi of each CPU except the one
3337 * just being woken up
3338 */
3339 for_each_online_cpu(other_cpu) {
3340 if (other_cpu != cpu) {
3341 struct mvneta_pcpu_port *other_port =
3342 per_cpu_ptr(pp->ports, other_cpu);
3343
3344 napi_synchronize(&other_port->napi);
3347 } 3345 }
3346 }
3348 3347
3349 /* Mask all ethernet port interrupts */ 3348 /* Mask all ethernet port interrupts */
3350 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); 3349 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3351 napi_enable(&port->napi); 3350 napi_enable(&port->napi);
3352 3351
3352 /*
3353 * Enable per-CPU interrupts on the CPU that is
3354 * brought up.
3355 */
3356 mvneta_percpu_enable(pp);
3353 3357
3354 /* Enable per-CPU interrupts on the CPU that is 3358 /*
3355 * brought up. 3359 * Enable per-CPU interrupt on the one CPU we care
3356 */ 3360 * about.
3357 mvneta_percpu_enable(pp); 3361 */
3362 mvneta_percpu_elect(pp);
3358 3363
3359 /* Enable per-CPU interrupt on the one CPU we care 3364 /* Unmask all ethernet port interrupts */
3360 * about. 3365 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3361 */ 3366 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3362 mvneta_percpu_elect(pp); 3367 MVNETA_CAUSE_PHY_STATUS_CHANGE |
3363 3368 MVNETA_CAUSE_LINK_CHANGE |
3364 /* Unmask all ethernet port interrupts */ 3369 MVNETA_CAUSE_PSC_SYNC_CHANGE);
3365 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); 3370 netif_tx_start_all_queues(pp->dev);
3366 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 3371 spin_unlock(&pp->lock);
3367 MVNETA_CAUSE_PHY_STATUS_CHANGE | 3372 return 0;
3368 MVNETA_CAUSE_LINK_CHANGE | 3373}
3369 MVNETA_CAUSE_PSC_SYNC_CHANGE);
3370 netif_tx_start_all_queues(pp->dev);
3371 spin_unlock(&pp->lock);
3372 break;
3373 case CPU_DOWN_PREPARE:
3374 case CPU_DOWN_PREPARE_FROZEN:
3375 netif_tx_stop_all_queues(pp->dev);
3376 /* Thanks to this lock we are sure that any pending
3377 * cpu election is done
3378 */
3379 spin_lock(&pp->lock);
3380 /* Mask all ethernet port interrupts */
3381 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3382 spin_unlock(&pp->lock);
3383 3374
3384 napi_synchronize(&port->napi); 3375static int mvneta_cpu_down_prepare(unsigned int cpu, struct hlist_node *node)
3385 napi_disable(&port->napi); 3376{
3386 /* Disable per-CPU interrupts on the CPU that is 3377 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
3387 * brought down. 3378 node_online);
3388 */ 3379 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
3389 mvneta_percpu_disable(pp);
3390 3380
3391 break; 3381 /*
3392 case CPU_DEAD: 3382 * Thanks to this lock we are sure that any pending cpu election is
3393 case CPU_DEAD_FROZEN: 3383 * done.
3394 /* Check if a new CPU must be elected now this on is down */ 3384 */
3395 spin_lock(&pp->lock); 3385 spin_lock(&pp->lock);
3396 mvneta_percpu_elect(pp); 3386 /* Mask all ethernet port interrupts */
3397 spin_unlock(&pp->lock); 3387 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3398 /* Unmask all ethernet port interrupts */ 3388 spin_unlock(&pp->lock);
3399 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3400 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3401 MVNETA_CAUSE_PHY_STATUS_CHANGE |
3402 MVNETA_CAUSE_LINK_CHANGE |
3403 MVNETA_CAUSE_PSC_SYNC_CHANGE);
3404 netif_tx_start_all_queues(pp->dev);
3405 break;
3406 }
3407 3389
3408 return NOTIFY_OK; 3390 napi_synchronize(&port->napi);
3391 napi_disable(&port->napi);
3392 /* Disable per-CPU interrupts on the CPU that is brought down. */
3393 mvneta_percpu_disable(pp);
3394 return 0;
3395}
3396
3397static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node)
3398{
3399 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
3400 node_dead);
3401
3402 /* Check if a new CPU must be elected now this on is down */
3403 spin_lock(&pp->lock);
3404 mvneta_percpu_elect(pp);
3405 spin_unlock(&pp->lock);
3406 /* Unmask all ethernet port interrupts */
3407 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3408 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3409 MVNETA_CAUSE_PHY_STATUS_CHANGE |
3410 MVNETA_CAUSE_LINK_CHANGE |
3411 MVNETA_CAUSE_PSC_SYNC_CHANGE);
3412 netif_tx_start_all_queues(pp->dev);
3413 return 0;
3409} 3414}
3410 3415
3411static int mvneta_open(struct net_device *dev) 3416static int mvneta_open(struct net_device *dev)
@@ -3442,7 +3447,15 @@ static int mvneta_open(struct net_device *dev)
3442 /* Register a CPU notifier to handle the case where our CPU 3447 /* Register a CPU notifier to handle the case where our CPU
3443 * might be taken offline. 3448 * might be taken offline.
3444 */ 3449 */
3445 register_cpu_notifier(&pp->cpu_notifier); 3450 ret = cpuhp_state_add_instance_nocalls(online_hpstate,
3451 &pp->node_online);
3452 if (ret)
3453 goto err_free_irq;
3454
3455 ret = cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
3456 &pp->node_dead);
3457 if (ret)
3458 goto err_free_online_hp;
3446 3459
3447 /* In default link is down */ 3460 /* In default link is down */
3448 netif_carrier_off(pp->dev); 3461 netif_carrier_off(pp->dev);
@@ -3450,15 +3463,19 @@ static int mvneta_open(struct net_device *dev)
3450 ret = mvneta_mdio_probe(pp); 3463 ret = mvneta_mdio_probe(pp);
3451 if (ret < 0) { 3464 if (ret < 0) {
3452 netdev_err(dev, "cannot probe MDIO bus\n"); 3465 netdev_err(dev, "cannot probe MDIO bus\n");
3453 goto err_free_irq; 3466 goto err_free_dead_hp;
3454 } 3467 }
3455 3468
3456 mvneta_start_dev(pp); 3469 mvneta_start_dev(pp);
3457 3470
3458 return 0; 3471 return 0;
3459 3472
3473err_free_dead_hp:
3474 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
3475 &pp->node_dead);
3476err_free_online_hp:
3477 cpuhp_state_remove_instance_nocalls(online_hpstate, &pp->node_online);
3460err_free_irq: 3478err_free_irq:
3461 unregister_cpu_notifier(&pp->cpu_notifier);
3462 on_each_cpu(mvneta_percpu_disable, pp, true); 3479 on_each_cpu(mvneta_percpu_disable, pp, true);
3463 free_percpu_irq(pp->dev->irq, pp->ports); 3480 free_percpu_irq(pp->dev->irq, pp->ports);
3464err_cleanup_txqs: 3481err_cleanup_txqs:
@@ -3484,7 +3501,10 @@ static int mvneta_stop(struct net_device *dev)
3484 3501
3485 mvneta_stop_dev(pp); 3502 mvneta_stop_dev(pp);
3486 mvneta_mdio_remove(pp); 3503 mvneta_mdio_remove(pp);
3487 unregister_cpu_notifier(&pp->cpu_notifier); 3504
3505 cpuhp_state_remove_instance_nocalls(online_hpstate, &pp->node_online);
3506 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
3507 &pp->node_dead);
3488 on_each_cpu(mvneta_percpu_disable, pp, true); 3508 on_each_cpu(mvneta_percpu_disable, pp, true);
3489 free_percpu_irq(dev->irq, pp->ports); 3509 free_percpu_irq(dev->irq, pp->ports);
3490 mvneta_cleanup_rxqs(pp); 3510 mvneta_cleanup_rxqs(pp);
@@ -4024,7 +4044,6 @@ static int mvneta_probe(struct platform_device *pdev)
4024 err = of_property_read_string(dn, "managed", &managed); 4044 err = of_property_read_string(dn, "managed", &managed);
4025 pp->use_inband_status = (err == 0 && 4045 pp->use_inband_status = (err == 0 &&
4026 strcmp(managed, "in-band-status") == 0); 4046 strcmp(managed, "in-band-status") == 0);
4027 pp->cpu_notifier.notifier_call = mvneta_percpu_notifier;
4028 4047
4029 pp->rxq_def = rxq_def; 4048 pp->rxq_def = rxq_def;
4030 4049
@@ -4227,7 +4246,42 @@ static struct platform_driver mvneta_driver = {
4227 }, 4246 },
4228}; 4247};
4229 4248
4230module_platform_driver(mvneta_driver); 4249static int __init mvneta_driver_init(void)
4250{
4251 int ret;
4252
4253 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvmeta:online",
4254 mvneta_cpu_online,
4255 mvneta_cpu_down_prepare);
4256 if (ret < 0)
4257 goto out;
4258 online_hpstate = ret;
4259 ret = cpuhp_setup_state_multi(CPUHP_NET_MVNETA_DEAD, "net/mvneta:dead",
4260 NULL, mvneta_cpu_dead);
4261 if (ret)
4262 goto err_dead;
4263
4264 ret = platform_driver_register(&mvneta_driver);
4265 if (ret)
4266 goto err;
4267 return 0;
4268
4269err:
4270 cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
4271err_dead:
4272 cpuhp_remove_multi_state(online_hpstate);
4273out:
4274 return ret;
4275}
4276module_init(mvneta_driver_init);
4277
4278static void __exit mvneta_driver_exit(void)
4279{
4280 platform_driver_unregister(&mvneta_driver);
4281 cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
4282 cpuhp_remove_multi_state(online_hpstate);
4283}
4284module_exit(mvneta_driver_exit);
4231 4285
4232MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com"); 4286MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
4233MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>"); 4287MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index a421407a317f..332b39c21d2e 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -18,6 +18,7 @@ enum cpuhp_state {
18 CPUHP_SLUB_DEAD, 18 CPUHP_SLUB_DEAD,
19 CPUHP_MM_WRITEBACK_DEAD, 19 CPUHP_MM_WRITEBACK_DEAD,
20 CPUHP_SOFTIRQ_DEAD, 20 CPUHP_SOFTIRQ_DEAD,
21 CPUHP_NET_MVNETA_DEAD,
21 CPUHP_WORKQUEUE_PREP, 22 CPUHP_WORKQUEUE_PREP,
22 CPUHP_POWER_NUMA_PREPARE, 23 CPUHP_POWER_NUMA_PREPARE,
23 CPUHP_HRTIMERS_PREPARE, 24 CPUHP_HRTIMERS_PREPARE,