diff options
author | Jisheng Zhang <Jisheng.Zhang@synaptics.com> | 2018-04-01 23:24:59 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-04-02 11:14:03 -0400 |
commit | 1799cdd287ff09c6d34347409b0d4f464e197aea (patch) | |
tree | 444f30d07de54c17548d100a745e969aae8e1953 | |
parent | 4a188a63afdffb5f62cccc508589c19e5297ed05 (diff) |
net: mvneta: improve suspend/resume
Current suspend/resume implementation reuses the mvneta_open() and
mvneta_close(), but it could be optimized to take only necessary
actions during suspend/resume.
One obvious problem of current implementation is: after hundreds of
system suspend/resume cycles, the resume of mvneta could fail due to
fragmented dma coherent memory. After this patch, the non-necessary
memory alloc/free is optimized out.
Signed-off-by: Jisheng Zhang <Jisheng.Zhang@synaptics.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/marvell/mvneta.c | 69 |
1 files changed, 62 insertions, 7 deletions
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index d4fbad235a79..17a904cc6a5e 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c | |||
@@ -4571,16 +4571,45 @@ static int mvneta_remove(struct platform_device *pdev) | |||
4571 | #ifdef CONFIG_PM_SLEEP | 4571 | #ifdef CONFIG_PM_SLEEP |
4572 | static int mvneta_suspend(struct device *device) | 4572 | static int mvneta_suspend(struct device *device) |
4573 | { | 4573 | { |
4574 | int queue; | ||
4574 | struct net_device *dev = dev_get_drvdata(device); | 4575 | struct net_device *dev = dev_get_drvdata(device); |
4575 | struct mvneta_port *pp = netdev_priv(dev); | 4576 | struct mvneta_port *pp = netdev_priv(dev); |
4576 | 4577 | ||
4578 | if (!netif_running(dev)) | ||
4579 | goto clean_exit; | ||
4580 | |||
4581 | if (!pp->neta_armada3700) { | ||
4582 | spin_lock(&pp->lock); | ||
4583 | pp->is_stopped = true; | ||
4584 | spin_unlock(&pp->lock); | ||
4585 | |||
4586 | cpuhp_state_remove_instance_nocalls(online_hpstate, | ||
4587 | &pp->node_online); | ||
4588 | cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD, | ||
4589 | &pp->node_dead); | ||
4590 | } | ||
4591 | |||
4577 | rtnl_lock(); | 4592 | rtnl_lock(); |
4578 | if (netif_running(dev)) | 4593 | mvneta_stop_dev(pp); |
4579 | mvneta_stop(dev); | ||
4580 | rtnl_unlock(); | 4594 | rtnl_unlock(); |
4595 | |||
4596 | for (queue = 0; queue < rxq_number; queue++) { | ||
4597 | struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; | ||
4598 | |||
4599 | mvneta_rxq_drop_pkts(pp, rxq); | ||
4600 | } | ||
4601 | |||
4602 | for (queue = 0; queue < txq_number; queue++) { | ||
4603 | struct mvneta_tx_queue *txq = &pp->txqs[queue]; | ||
4604 | |||
4605 | mvneta_txq_hw_deinit(pp, txq); | ||
4606 | } | ||
4607 | |||
4608 | clean_exit: | ||
4581 | netif_device_detach(dev); | 4609 | netif_device_detach(dev); |
4582 | clk_disable_unprepare(pp->clk_bus); | 4610 | clk_disable_unprepare(pp->clk_bus); |
4583 | clk_disable_unprepare(pp->clk); | 4611 | clk_disable_unprepare(pp->clk); |
4612 | |||
4584 | return 0; | 4613 | return 0; |
4585 | } | 4614 | } |
4586 | 4615 | ||
@@ -4589,7 +4618,7 @@ static int mvneta_resume(struct device *device) | |||
4589 | struct platform_device *pdev = to_platform_device(device); | 4618 | struct platform_device *pdev = to_platform_device(device); |
4590 | struct net_device *dev = dev_get_drvdata(device); | 4619 | struct net_device *dev = dev_get_drvdata(device); |
4591 | struct mvneta_port *pp = netdev_priv(dev); | 4620 | struct mvneta_port *pp = netdev_priv(dev); |
4592 | int err; | 4621 | int err, queue; |
4593 | 4622 | ||
4594 | clk_prepare_enable(pp->clk); | 4623 | clk_prepare_enable(pp->clk); |
4595 | if (!IS_ERR(pp->clk_bus)) | 4624 | if (!IS_ERR(pp->clk_bus)) |
@@ -4611,12 +4640,38 @@ static int mvneta_resume(struct device *device) | |||
4611 | } | 4640 | } |
4612 | 4641 | ||
4613 | netif_device_attach(dev); | 4642 | netif_device_attach(dev); |
4614 | rtnl_lock(); | 4643 | |
4615 | if (netif_running(dev)) { | 4644 | if (!netif_running(dev)) |
4616 | mvneta_open(dev); | 4645 | return 0; |
4617 | mvneta_set_rx_mode(dev); | 4646 | |
4647 | for (queue = 0; queue < rxq_number; queue++) { | ||
4648 | struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; | ||
4649 | |||
4650 | rxq->next_desc_to_proc = 0; | ||
4651 | mvneta_rxq_hw_init(pp, rxq); | ||
4652 | } | ||
4653 | |||
4654 | for (queue = 0; queue < txq_number; queue++) { | ||
4655 | struct mvneta_tx_queue *txq = &pp->txqs[queue]; | ||
4656 | |||
4657 | txq->next_desc_to_proc = 0; | ||
4658 | mvneta_txq_hw_init(pp, txq); | ||
4618 | } | 4659 | } |
4660 | |||
4661 | if (!pp->neta_armada3700) { | ||
4662 | spin_lock(&pp->lock); | ||
4663 | pp->is_stopped = false; | ||
4664 | spin_unlock(&pp->lock); | ||
4665 | cpuhp_state_add_instance_nocalls(online_hpstate, | ||
4666 | &pp->node_online); | ||
4667 | cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD, | ||
4668 | &pp->node_dead); | ||
4669 | } | ||
4670 | |||
4671 | rtnl_lock(); | ||
4672 | mvneta_start_dev(pp); | ||
4619 | rtnl_unlock(); | 4673 | rtnl_unlock(); |
4674 | mvneta_set_rx_mode(dev); | ||
4620 | 4675 | ||
4621 | return 0; | 4676 | return 0; |
4622 | } | 4677 | } |