aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c7
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c7
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c47
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c31
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c175
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c4
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c8
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c7
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c5
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c122
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c6
-rw-r--r--drivers/net/ethernet/cadence/macb.c8
-rw-r--r--drivers/net/ethernet/cadence/macb.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c57
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c56
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c17
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c131
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c40
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c23
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c246
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c28
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c44
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c119
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c143
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c9
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c8
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c32
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c18
-rw-r--r--drivers/net/ethernet/rocker/rocker.c6
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c7
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c30
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h114
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c65
-rw-r--r--drivers/net/ethernet/sun/niu.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw.c9
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c5
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c2
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c2
62 files changed, 1060 insertions, 700 deletions
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index 7769c05543f1..ec6eac1f8c95 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -484,11 +484,8 @@ static int axnet_open(struct net_device *dev)
484 link->open++; 484 link->open++;
485 485
486 info->link_status = 0x00; 486 info->link_status = 0x00;
487 init_timer(&info->watchdog); 487 setup_timer(&info->watchdog, ei_watchdog, (u_long)dev);
488 info->watchdog.function = ei_watchdog; 488 mod_timer(&info->watchdog, jiffies + HZ);
489 info->watchdog.data = (u_long)dev;
490 info->watchdog.expires = jiffies + HZ;
491 add_timer(&info->watchdog);
492 489
493 return ax_open(dev); 490 return ax_open(dev);
494} /* axnet_open */ 491} /* axnet_open */
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 9fb7b9d4fd6c..2777289a26c0 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -918,11 +918,8 @@ static int pcnet_open(struct net_device *dev)
918 918
919 info->phy_id = info->eth_phy; 919 info->phy_id = info->eth_phy;
920 info->link_status = 0x00; 920 info->link_status = 0x00;
921 init_timer(&info->watchdog); 921 setup_timer(&info->watchdog, ei_watchdog, (u_long)dev);
922 info->watchdog.function = ei_watchdog; 922 mod_timer(&info->watchdog, jiffies + HZ);
923 info->watchdog.data = (u_long)dev;
924 info->watchdog.expires = jiffies + HZ;
925 add_timer(&info->watchdog);
926 923
927 return ei_open(dev); 924 return ei_open(dev);
928} /* pcnet_open */ 925} /* pcnet_open */
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 760c72c6e2ac..6725dc00750b 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -376,7 +376,8 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
376 u16 pktlength; 376 u16 pktlength;
377 u16 pktstatus; 377 u16 pktstatus;
378 378
379 while ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) { 379 while (((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) &&
380 (count < limit)) {
380 pktstatus = rxstatus >> 16; 381 pktstatus = rxstatus >> 16;
381 pktlength = rxstatus & 0xffff; 382 pktlength = rxstatus & 0xffff;
382 383
@@ -491,28 +492,27 @@ static int tse_poll(struct napi_struct *napi, int budget)
491 struct altera_tse_private *priv = 492 struct altera_tse_private *priv =
492 container_of(napi, struct altera_tse_private, napi); 493 container_of(napi, struct altera_tse_private, napi);
493 int rxcomplete = 0; 494 int rxcomplete = 0;
494 int txcomplete = 0;
495 unsigned long int flags; 495 unsigned long int flags;
496 496
497 txcomplete = tse_tx_complete(priv); 497 tse_tx_complete(priv);
498 498
499 rxcomplete = tse_rx(priv, budget); 499 rxcomplete = tse_rx(priv, budget);
500 500
501 if (rxcomplete >= budget || txcomplete > 0) 501 if (rxcomplete < budget) {
502 return rxcomplete;
503 502
504 napi_gro_flush(napi, false); 503 napi_gro_flush(napi, false);
505 __napi_complete(napi); 504 __napi_complete(napi);
506 505
507 netdev_dbg(priv->dev, 506 netdev_dbg(priv->dev,
508 "NAPI Complete, did %d packets with budget %d\n", 507 "NAPI Complete, did %d packets with budget %d\n",
509 txcomplete+rxcomplete, budget); 508 rxcomplete, budget);
510 509
511 spin_lock_irqsave(&priv->rxdma_irq_lock, flags); 510 spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
512 priv->dmaops->enable_rxirq(priv); 511 priv->dmaops->enable_rxirq(priv);
513 priv->dmaops->enable_txirq(priv); 512 priv->dmaops->enable_txirq(priv);
514 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); 513 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
515 return rxcomplete + txcomplete; 514 }
515 return rxcomplete;
516} 516}
517 517
518/* DMA TX & RX FIFO interrupt routing 518/* DMA TX & RX FIFO interrupt routing
@@ -521,7 +521,6 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
521{ 521{
522 struct net_device *dev = dev_id; 522 struct net_device *dev = dev_id;
523 struct altera_tse_private *priv; 523 struct altera_tse_private *priv;
524 unsigned long int flags;
525 524
526 if (unlikely(!dev)) { 525 if (unlikely(!dev)) {
527 pr_err("%s: invalid dev pointer\n", __func__); 526 pr_err("%s: invalid dev pointer\n", __func__);
@@ -529,20 +528,20 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
529 } 528 }
530 priv = netdev_priv(dev); 529 priv = netdev_priv(dev);
531 530
532 /* turn off desc irqs and enable napi rx */ 531 spin_lock(&priv->rxdma_irq_lock);
533 spin_lock_irqsave(&priv->rxdma_irq_lock, flags); 532 /* reset IRQs */
533 priv->dmaops->clear_rxirq(priv);
534 priv->dmaops->clear_txirq(priv);
535 spin_unlock(&priv->rxdma_irq_lock);
534 536
535 if (likely(napi_schedule_prep(&priv->napi))) { 537 if (likely(napi_schedule_prep(&priv->napi))) {
538 spin_lock(&priv->rxdma_irq_lock);
536 priv->dmaops->disable_rxirq(priv); 539 priv->dmaops->disable_rxirq(priv);
537 priv->dmaops->disable_txirq(priv); 540 priv->dmaops->disable_txirq(priv);
541 spin_unlock(&priv->rxdma_irq_lock);
538 __napi_schedule(&priv->napi); 542 __napi_schedule(&priv->napi);
539 } 543 }
540 544
541 /* reset IRQs */
542 priv->dmaops->clear_rxirq(priv);
543 priv->dmaops->clear_txirq(priv);
544
545 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
546 545
547 return IRQ_HANDLED; 546 return IRQ_HANDLED;
548} 547}
@@ -1399,7 +1398,7 @@ static int altera_tse_probe(struct platform_device *pdev)
1399 } 1398 }
1400 1399
1401 if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", 1400 if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
1402 &priv->rx_fifo_depth)) { 1401 &priv->tx_fifo_depth)) {
1403 dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n"); 1402 dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
1404 ret = -ENXIO; 1403 ret = -ENXIO;
1405 goto err_free_netdev; 1404 goto err_free_netdev;
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 11d6e6561df1..15a8190a6f75 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1543,7 +1543,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1543{ 1543{
1544 struct pcnet32_private *lp; 1544 struct pcnet32_private *lp;
1545 int i, media; 1545 int i, media;
1546 int fdx, mii, fset, dxsuflo; 1546 int fdx, mii, fset, dxsuflo, sram;
1547 int chip_version; 1547 int chip_version;
1548 char *chipname; 1548 char *chipname;
1549 struct net_device *dev; 1549 struct net_device *dev;
@@ -1580,7 +1580,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1580 } 1580 }
1581 1581
1582 /* initialize variables */ 1582 /* initialize variables */
1583 fdx = mii = fset = dxsuflo = 0; 1583 fdx = mii = fset = dxsuflo = sram = 0;
1584 chip_version = (chip_version >> 12) & 0xffff; 1584 chip_version = (chip_version >> 12) & 0xffff;
1585 1585
1586 switch (chip_version) { 1586 switch (chip_version) {
@@ -1613,6 +1613,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1613 chipname = "PCnet/FAST III 79C973"; /* PCI */ 1613 chipname = "PCnet/FAST III 79C973"; /* PCI */
1614 fdx = 1; 1614 fdx = 1;
1615 mii = 1; 1615 mii = 1;
1616 sram = 1;
1616 break; 1617 break;
1617 case 0x2626: 1618 case 0x2626:
1618 chipname = "PCnet/Home 79C978"; /* PCI */ 1619 chipname = "PCnet/Home 79C978"; /* PCI */
@@ -1636,6 +1637,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1636 chipname = "PCnet/FAST III 79C975"; /* PCI */ 1637 chipname = "PCnet/FAST III 79C975"; /* PCI */
1637 fdx = 1; 1638 fdx = 1;
1638 mii = 1; 1639 mii = 1;
1640 sram = 1;
1639 break; 1641 break;
1640 case 0x2628: 1642 case 0x2628:
1641 chipname = "PCnet/PRO 79C976"; 1643 chipname = "PCnet/PRO 79C976";
@@ -1664,6 +1666,31 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1664 dxsuflo = 1; 1666 dxsuflo = 1;
1665 } 1667 }
1666 1668
1669 /*
1670 * The Am79C973/Am79C975 controllers come with 12K of SRAM
1671 * which we can use for the Tx/Rx buffers but most importantly,
1672 * the use of SRAM allow us to use the BCR18:NOUFLO bit to avoid
1673 * Tx fifo underflows.
1674 */
1675 if (sram) {
1676 /*
1677 * The SRAM is being configured in two steps. First we
1678 * set the SRAM size in the BCR25:SRAM_SIZE bits. According
1679 * to the datasheet, each bit corresponds to a 512-byte
1680 * page so we can have at most 24 pages. The SRAM_SIZE
1681 * holds the value of the upper 8 bits of the 16-bit SRAM size.
1682 * The low 8-bits start at 0x00 and end at 0xff. So the
1683 * address range is from 0x0000 up to 0x17ff. Therefore,
1684 * the SRAM_SIZE is set to 0x17. The next step is to set
1685 * the BCR26:SRAM_BND midway through so the Tx and Rx
1686 * buffers can share the SRAM equally.
1687 */
1688 a->write_bcr(ioaddr, 25, 0x17);
1689 a->write_bcr(ioaddr, 26, 0xc);
1690 /* And finally enable the NOUFLO bit */
1691 a->write_bcr(ioaddr, 18, a->read_bcr(ioaddr, 18) | (1 << 11));
1692 }
1693
1667 dev = alloc_etherdev(sizeof(*lp)); 1694 dev = alloc_etherdev(sizeof(*lp));
1668 if (!dev) { 1695 if (!dev) {
1669 ret = -ENOMEM; 1696 ret = -ENOMEM;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index b93d4404d975..885b02b5be07 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -609,6 +609,68 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
609 } 609 }
610} 610}
611 611
612static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
613{
614 struct xgbe_channel *channel;
615 struct net_device *netdev = pdata->netdev;
616 unsigned int i;
617 int ret;
618
619 ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
620 netdev->name, pdata);
621 if (ret) {
622 netdev_alert(netdev, "error requesting irq %d\n",
623 pdata->dev_irq);
624 return ret;
625 }
626
627 if (!pdata->per_channel_irq)
628 return 0;
629
630 channel = pdata->channel;
631 for (i = 0; i < pdata->channel_count; i++, channel++) {
632 snprintf(channel->dma_irq_name,
633 sizeof(channel->dma_irq_name) - 1,
634 "%s-TxRx-%u", netdev_name(netdev),
635 channel->queue_index);
636
637 ret = devm_request_irq(pdata->dev, channel->dma_irq,
638 xgbe_dma_isr, 0,
639 channel->dma_irq_name, channel);
640 if (ret) {
641 netdev_alert(netdev, "error requesting irq %d\n",
642 channel->dma_irq);
643 goto err_irq;
644 }
645 }
646
647 return 0;
648
649err_irq:
650 /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
651 for (i--, channel--; i < pdata->channel_count; i--, channel--)
652 devm_free_irq(pdata->dev, channel->dma_irq, channel);
653
654 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
655
656 return ret;
657}
658
659static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
660{
661 struct xgbe_channel *channel;
662 unsigned int i;
663
664 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
665
666 if (!pdata->per_channel_irq)
667 return;
668
669 channel = pdata->channel;
670 for (i = 0; i < pdata->channel_count; i++, channel++)
671 devm_free_irq(pdata->dev, channel->dma_irq, channel);
672}
673
612void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata) 674void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
613{ 675{
614 struct xgbe_hw_if *hw_if = &pdata->hw_if; 676 struct xgbe_hw_if *hw_if = &pdata->hw_if;
@@ -810,20 +872,20 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
810 return -EINVAL; 872 return -EINVAL;
811 } 873 }
812 874
813 phy_stop(pdata->phydev);
814
815 spin_lock_irqsave(&pdata->lock, flags); 875 spin_lock_irqsave(&pdata->lock, flags);
816 876
817 if (caller == XGMAC_DRIVER_CONTEXT) 877 if (caller == XGMAC_DRIVER_CONTEXT)
818 netif_device_detach(netdev); 878 netif_device_detach(netdev);
819 879
820 netif_tx_stop_all_queues(netdev); 880 netif_tx_stop_all_queues(netdev);
821 xgbe_napi_disable(pdata, 0);
822 881
823 /* Powerdown Tx/Rx */
824 hw_if->powerdown_tx(pdata); 882 hw_if->powerdown_tx(pdata);
825 hw_if->powerdown_rx(pdata); 883 hw_if->powerdown_rx(pdata);
826 884
885 xgbe_napi_disable(pdata, 0);
886
887 phy_stop(pdata->phydev);
888
827 pdata->power_down = 1; 889 pdata->power_down = 1;
828 890
829 spin_unlock_irqrestore(&pdata->lock, flags); 891 spin_unlock_irqrestore(&pdata->lock, flags);
@@ -854,14 +916,14 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
854 916
855 phy_start(pdata->phydev); 917 phy_start(pdata->phydev);
856 918
857 /* Enable Tx/Rx */ 919 xgbe_napi_enable(pdata, 0);
920
858 hw_if->powerup_tx(pdata); 921 hw_if->powerup_tx(pdata);
859 hw_if->powerup_rx(pdata); 922 hw_if->powerup_rx(pdata);
860 923
861 if (caller == XGMAC_DRIVER_CONTEXT) 924 if (caller == XGMAC_DRIVER_CONTEXT)
862 netif_device_attach(netdev); 925 netif_device_attach(netdev);
863 926
864 xgbe_napi_enable(pdata, 0);
865 netif_tx_start_all_queues(netdev); 927 netif_tx_start_all_queues(netdev);
866 928
867 spin_unlock_irqrestore(&pdata->lock, flags); 929 spin_unlock_irqrestore(&pdata->lock, flags);
@@ -875,6 +937,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
875{ 937{
876 struct xgbe_hw_if *hw_if = &pdata->hw_if; 938 struct xgbe_hw_if *hw_if = &pdata->hw_if;
877 struct net_device *netdev = pdata->netdev; 939 struct net_device *netdev = pdata->netdev;
940 int ret;
878 941
879 DBGPR("-->xgbe_start\n"); 942 DBGPR("-->xgbe_start\n");
880 943
@@ -884,17 +947,31 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
884 947
885 phy_start(pdata->phydev); 948 phy_start(pdata->phydev);
886 949
950 xgbe_napi_enable(pdata, 1);
951
952 ret = xgbe_request_irqs(pdata);
953 if (ret)
954 goto err_napi;
955
887 hw_if->enable_tx(pdata); 956 hw_if->enable_tx(pdata);
888 hw_if->enable_rx(pdata); 957 hw_if->enable_rx(pdata);
889 958
890 xgbe_init_tx_timers(pdata); 959 xgbe_init_tx_timers(pdata);
891 960
892 xgbe_napi_enable(pdata, 1);
893 netif_tx_start_all_queues(netdev); 961 netif_tx_start_all_queues(netdev);
894 962
895 DBGPR("<--xgbe_start\n"); 963 DBGPR("<--xgbe_start\n");
896 964
897 return 0; 965 return 0;
966
967err_napi:
968 xgbe_napi_disable(pdata, 1);
969
970 phy_stop(pdata->phydev);
971
972 hw_if->exit(pdata);
973
974 return ret;
898} 975}
899 976
900static void xgbe_stop(struct xgbe_prv_data *pdata) 977static void xgbe_stop(struct xgbe_prv_data *pdata)
@@ -907,16 +984,21 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
907 984
908 DBGPR("-->xgbe_stop\n"); 985 DBGPR("-->xgbe_stop\n");
909 986
910 phy_stop(pdata->phydev);
911
912 netif_tx_stop_all_queues(netdev); 987 netif_tx_stop_all_queues(netdev);
913 xgbe_napi_disable(pdata, 1);
914 988
915 xgbe_stop_tx_timers(pdata); 989 xgbe_stop_tx_timers(pdata);
916 990
917 hw_if->disable_tx(pdata); 991 hw_if->disable_tx(pdata);
918 hw_if->disable_rx(pdata); 992 hw_if->disable_rx(pdata);
919 993
994 xgbe_free_irqs(pdata);
995
996 xgbe_napi_disable(pdata, 1);
997
998 phy_stop(pdata->phydev);
999
1000 hw_if->exit(pdata);
1001
920 channel = pdata->channel; 1002 channel = pdata->channel;
921 for (i = 0; i < pdata->channel_count; i++, channel++) { 1003 for (i = 0; i < pdata->channel_count; i++, channel++) {
922 if (!channel->tx_ring) 1004 if (!channel->tx_ring)
@@ -931,10 +1013,6 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
931 1013
932static void xgbe_restart_dev(struct xgbe_prv_data *pdata) 1014static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
933{ 1015{
934 struct xgbe_channel *channel;
935 struct xgbe_hw_if *hw_if = &pdata->hw_if;
936 unsigned int i;
937
938 DBGPR("-->xgbe_restart_dev\n"); 1016 DBGPR("-->xgbe_restart_dev\n");
939 1017
940 /* If not running, "restart" will happen on open */ 1018 /* If not running, "restart" will happen on open */
@@ -942,19 +1020,10 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
942 return; 1020 return;
943 1021
944 xgbe_stop(pdata); 1022 xgbe_stop(pdata);
945 synchronize_irq(pdata->dev_irq);
946 if (pdata->per_channel_irq) {
947 channel = pdata->channel;
948 for (i = 0; i < pdata->channel_count; i++, channel++)
949 synchronize_irq(channel->dma_irq);
950 }
951 1023
952 xgbe_free_tx_data(pdata); 1024 xgbe_free_tx_data(pdata);
953 xgbe_free_rx_data(pdata); 1025 xgbe_free_rx_data(pdata);
954 1026
955 /* Issue software reset to device */
956 hw_if->exit(pdata);
957
958 xgbe_start(pdata); 1027 xgbe_start(pdata);
959 1028
960 DBGPR("<--xgbe_restart_dev\n"); 1029 DBGPR("<--xgbe_restart_dev\n");
@@ -1283,10 +1352,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
1283static int xgbe_open(struct net_device *netdev) 1352static int xgbe_open(struct net_device *netdev)
1284{ 1353{
1285 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1354 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1286 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1287 struct xgbe_desc_if *desc_if = &pdata->desc_if; 1355 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1288 struct xgbe_channel *channel = NULL;
1289 unsigned int i = 0;
1290 int ret; 1356 int ret;
1291 1357
1292 DBGPR("-->xgbe_open\n"); 1358 DBGPR("-->xgbe_open\n");
@@ -1329,55 +1395,14 @@ static int xgbe_open(struct net_device *netdev)
1329 INIT_WORK(&pdata->restart_work, xgbe_restart); 1395 INIT_WORK(&pdata->restart_work, xgbe_restart);
1330 INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp); 1396 INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
1331 1397
1332 /* Request interrupts */
1333 ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
1334 netdev->name, pdata);
1335 if (ret) {
1336 netdev_alert(netdev, "error requesting irq %d\n",
1337 pdata->dev_irq);
1338 goto err_rings;
1339 }
1340
1341 if (pdata->per_channel_irq) {
1342 channel = pdata->channel;
1343 for (i = 0; i < pdata->channel_count; i++, channel++) {
1344 snprintf(channel->dma_irq_name,
1345 sizeof(channel->dma_irq_name) - 1,
1346 "%s-TxRx-%u", netdev_name(netdev),
1347 channel->queue_index);
1348
1349 ret = devm_request_irq(pdata->dev, channel->dma_irq,
1350 xgbe_dma_isr, 0,
1351 channel->dma_irq_name, channel);
1352 if (ret) {
1353 netdev_alert(netdev,
1354 "error requesting irq %d\n",
1355 channel->dma_irq);
1356 goto err_irq;
1357 }
1358 }
1359 }
1360
1361 ret = xgbe_start(pdata); 1398 ret = xgbe_start(pdata);
1362 if (ret) 1399 if (ret)
1363 goto err_start; 1400 goto err_rings;
1364 1401
1365 DBGPR("<--xgbe_open\n"); 1402 DBGPR("<--xgbe_open\n");
1366 1403
1367 return 0; 1404 return 0;
1368 1405
1369err_start:
1370 hw_if->exit(pdata);
1371
1372err_irq:
1373 if (pdata->per_channel_irq) {
1374 /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
1375 for (i--, channel--; i < pdata->channel_count; i--, channel--)
1376 devm_free_irq(pdata->dev, channel->dma_irq, channel);
1377 }
1378
1379 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
1380
1381err_rings: 1406err_rings:
1382 desc_if->free_ring_resources(pdata); 1407 desc_if->free_ring_resources(pdata);
1383 1408
@@ -1399,30 +1424,16 @@ err_phy_init:
1399static int xgbe_close(struct net_device *netdev) 1424static int xgbe_close(struct net_device *netdev)
1400{ 1425{
1401 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1426 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1402 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1403 struct xgbe_desc_if *desc_if = &pdata->desc_if; 1427 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1404 struct xgbe_channel *channel;
1405 unsigned int i;
1406 1428
1407 DBGPR("-->xgbe_close\n"); 1429 DBGPR("-->xgbe_close\n");
1408 1430
1409 /* Stop the device */ 1431 /* Stop the device */
1410 xgbe_stop(pdata); 1432 xgbe_stop(pdata);
1411 1433
1412 /* Issue software reset to device */
1413 hw_if->exit(pdata);
1414
1415 /* Free the ring descriptors and buffers */ 1434 /* Free the ring descriptors and buffers */
1416 desc_if->free_ring_resources(pdata); 1435 desc_if->free_ring_resources(pdata);
1417 1436
1418 /* Release the interrupts */
1419 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
1420 if (pdata->per_channel_irq) {
1421 channel = pdata->channel;
1422 for (i = 0; i < pdata->channel_count; i++, channel++)
1423 devm_free_irq(pdata->dev, channel->dma_irq, channel);
1424 }
1425
1426 /* Free the channel and ring structures */ 1437 /* Free the channel and ring structures */
1427 xgbe_free_channels(pdata); 1438 xgbe_free_channels(pdata);
1428 1439
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 869d97fcf781..b927021c6c40 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -593,7 +593,7 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
593 if (!xgene_ring_mgr_init(pdata)) 593 if (!xgene_ring_mgr_init(pdata))
594 return -ENODEV; 594 return -ENODEV;
595 595
596 if (!efi_enabled(EFI_BOOT)) { 596 if (pdata->clk) {
597 clk_prepare_enable(pdata->clk); 597 clk_prepare_enable(pdata->clk);
598 clk_disable_unprepare(pdata->clk); 598 clk_disable_unprepare(pdata->clk);
599 clk_prepare_enable(pdata->clk); 599 clk_prepare_enable(pdata->clk);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 4de62b210c85..635a83be7e5e 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1025,6 +1025,8 @@ static int xgene_enet_remove(struct platform_device *pdev)
1025#ifdef CONFIG_ACPI 1025#ifdef CONFIG_ACPI
1026static const struct acpi_device_id xgene_enet_acpi_match[] = { 1026static const struct acpi_device_id xgene_enet_acpi_match[] = {
1027 { "APMC0D05", }, 1027 { "APMC0D05", },
1028 { "APMC0D30", },
1029 { "APMC0D31", },
1028 { } 1030 { }
1029}; 1031};
1030MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); 1032MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
@@ -1033,6 +1035,8 @@ MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match);
1033#ifdef CONFIG_OF 1035#ifdef CONFIG_OF
1034static struct of_device_id xgene_enet_of_match[] = { 1036static struct of_device_id xgene_enet_of_match[] = {
1035 {.compatible = "apm,xgene-enet",}, 1037 {.compatible = "apm,xgene-enet",},
1038 {.compatible = "apm,xgene1-sgenet",},
1039 {.compatible = "apm,xgene1-xgenet",},
1036 {}, 1040 {},
1037}; 1041};
1038 1042
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 21206d33b638..a7f2cc3e485e 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -486,7 +486,7 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget)
486{ 486{
487 struct bcm_enet_priv *priv; 487 struct bcm_enet_priv *priv;
488 struct net_device *dev; 488 struct net_device *dev;
489 int tx_work_done, rx_work_done; 489 int rx_work_done;
490 490
491 priv = container_of(napi, struct bcm_enet_priv, napi); 491 priv = container_of(napi, struct bcm_enet_priv, napi);
492 dev = priv->net_dev; 492 dev = priv->net_dev;
@@ -498,14 +498,14 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget)
498 ENETDMAC_IR, priv->tx_chan); 498 ENETDMAC_IR, priv->tx_chan);
499 499
500 /* reclaim sent skb */ 500 /* reclaim sent skb */
501 tx_work_done = bcm_enet_tx_reclaim(dev, 0); 501 bcm_enet_tx_reclaim(dev, 0);
502 502
503 spin_lock(&priv->rx_lock); 503 spin_lock(&priv->rx_lock);
504 rx_work_done = bcm_enet_receive_queue(dev, budget); 504 rx_work_done = bcm_enet_receive_queue(dev, budget);
505 spin_unlock(&priv->rx_lock); 505 spin_unlock(&priv->rx_lock);
506 506
507 if (rx_work_done >= budget || tx_work_done > 0) { 507 if (rx_work_done >= budget) {
508 /* rx/tx queue is not yet empty/clean */ 508 /* rx queue is not yet empty/clean */
509 return rx_work_done; 509 return rx_work_done;
510 } 510 }
511 511
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 5b308a4a4d0e..783543ad1fcf 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -274,9 +274,9 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
274 /* RBUF misc statistics */ 274 /* RBUF misc statistics */
275 STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR), 275 STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
276 STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR), 276 STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
277 STAT_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), 277 STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
278 STAT_MIB_RX("rx_dma_failed", mib.rx_dma_failed), 278 STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
279 STAT_MIB_TX("tx_dma_failed", mib.tx_dma_failed), 279 STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
280}; 280};
281 281
282#define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats) 282#define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
@@ -345,6 +345,7 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
345 s = &bcm_sysport_gstrings_stats[i]; 345 s = &bcm_sysport_gstrings_stats[i];
346 switch (s->type) { 346 switch (s->type) {
347 case BCM_SYSPORT_STAT_NETDEV: 347 case BCM_SYSPORT_STAT_NETDEV:
348 case BCM_SYSPORT_STAT_SOFT:
348 continue; 349 continue;
349 case BCM_SYSPORT_STAT_MIB_RX: 350 case BCM_SYSPORT_STAT_MIB_RX:
350 case BCM_SYSPORT_STAT_MIB_TX: 351 case BCM_SYSPORT_STAT_MIB_TX:
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index fc19417d82a5..7e3d87a88c76 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -570,6 +570,7 @@ enum bcm_sysport_stat_type {
570 BCM_SYSPORT_STAT_RUNT, 570 BCM_SYSPORT_STAT_RUNT,
571 BCM_SYSPORT_STAT_RXCHK, 571 BCM_SYSPORT_STAT_RXCHK,
572 BCM_SYSPORT_STAT_RBUF, 572 BCM_SYSPORT_STAT_RBUF,
573 BCM_SYSPORT_STAT_SOFT,
573}; 574};
574 575
575/* Macros to help define ethtool statistics */ 576/* Macros to help define ethtool statistics */
@@ -590,6 +591,7 @@ enum bcm_sysport_stat_type {
590#define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX) 591#define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX)
591#define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX) 592#define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX)
592#define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT) 593#define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT)
594#define STAT_MIB_SOFT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_SOFT)
593 595
594#define STAT_RXCHK(str, m, ofs) { \ 596#define STAT_RXCHK(str, m, ofs) { \
595 .stat_string = str, \ 597 .stat_string = str, \
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 676ffe093180..0469f72c6e7e 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -302,9 +302,6 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
302 slot->skb = skb; 302 slot->skb = skb;
303 slot->dma_addr = dma_addr; 303 slot->dma_addr = dma_addr;
304 304
305 if (slot->dma_addr & 0xC0000000)
306 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
307
308 return 0; 305 return 0;
309} 306}
310 307
@@ -505,8 +502,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
505 ring->mmio_base); 502 ring->mmio_base);
506 goto err_dma_free; 503 goto err_dma_free;
507 } 504 }
508 if (ring->dma_base & 0xC0000000)
509 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
510 505
511 ring->unaligned = bgmac_dma_unaligned(bgmac, ring, 506 ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
512 BGMAC_DMA_RING_TX); 507 BGMAC_DMA_RING_TX);
@@ -536,8 +531,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
536 err = -ENOMEM; 531 err = -ENOMEM;
537 goto err_dma_free; 532 goto err_dma_free;
538 } 533 }
539 if (ring->dma_base & 0xC0000000)
540 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
541 534
542 ring->unaligned = bgmac_dma_unaligned(bgmac, ring, 535 ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
543 BGMAC_DMA_RING_RX); 536 BGMAC_DMA_RING_RX);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 7155e1d2c208..996e215fc324 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12722,6 +12722,9 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
12722 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, 12722 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
12723 PCICFG_VENDOR_ID_OFFSET); 12723 PCICFG_VENDOR_ID_OFFSET);
12724 12724
12725 /* Set PCIe reset type to fundamental for EEH recovery */
12726 pdev->needs_freset = 1;
12727
12725 /* AER (Advanced Error reporting) configuration */ 12728 /* AER (Advanced Error reporting) configuration */
12726 rc = pci_enable_pcie_error_reporting(pdev); 12729 rc = pci_enable_pcie_error_reporting(pdev);
12727 if (!rc) 12730 if (!rc)
@@ -12766,7 +12769,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
12766 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | 12769 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
12767 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | 12770 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
12768 NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX; 12771 NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
12769 if (!CHIP_IS_E1x(bp)) { 12772 if (!chip_is_e1x) {
12770 dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | 12773 dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
12771 NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT; 12774 NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
12772 dev->hw_enc_features = 12775 dev->hw_enc_features =
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index ff83c46bc389..6befde61c203 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -487,6 +487,7 @@ enum bcmgenet_stat_type {
487 BCMGENET_STAT_MIB_TX, 487 BCMGENET_STAT_MIB_TX,
488 BCMGENET_STAT_RUNT, 488 BCMGENET_STAT_RUNT,
489 BCMGENET_STAT_MISC, 489 BCMGENET_STAT_MISC,
490 BCMGENET_STAT_SOFT,
490}; 491};
491 492
492struct bcmgenet_stats { 493struct bcmgenet_stats {
@@ -515,6 +516,7 @@ struct bcmgenet_stats {
515#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX) 516#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
516#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX) 517#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
517#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT) 518#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
519#define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
518 520
519#define STAT_GENET_MISC(str, m, offset) { \ 521#define STAT_GENET_MISC(str, m, offset) { \
520 .stat_string = str, \ 522 .stat_string = str, \
@@ -614,9 +616,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
614 UMAC_RBUF_OVFL_CNT), 616 UMAC_RBUF_OVFL_CNT),
615 STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT), 617 STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
616 STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), 618 STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
617 STAT_GENET_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), 619 STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
618 STAT_GENET_MIB_RX("rx_dma_failed", mib.rx_dma_failed), 620 STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
619 STAT_GENET_MIB_TX("tx_dma_failed", mib.tx_dma_failed), 621 STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
620}; 622};
621 623
622#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) 624#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
@@ -668,6 +670,7 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
668 s = &bcmgenet_gstrings_stats[i]; 670 s = &bcmgenet_gstrings_stats[i];
669 switch (s->type) { 671 switch (s->type) {
670 case BCMGENET_STAT_NETDEV: 672 case BCMGENET_STAT_NETDEV:
673 case BCMGENET_STAT_SOFT:
671 continue; 674 continue;
672 case BCMGENET_STAT_MIB_RX: 675 case BCMGENET_STAT_MIB_RX:
673 case BCMGENET_STAT_MIB_TX: 676 case BCMGENET_STAT_MIB_TX:
@@ -971,13 +974,14 @@ static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv,
971} 974}
972 975
973/* Unlocked version of the reclaim routine */ 976/* Unlocked version of the reclaim routine */
974static void __bcmgenet_tx_reclaim(struct net_device *dev, 977static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
975 struct bcmgenet_tx_ring *ring) 978 struct bcmgenet_tx_ring *ring)
976{ 979{
977 struct bcmgenet_priv *priv = netdev_priv(dev); 980 struct bcmgenet_priv *priv = netdev_priv(dev);
978 int last_tx_cn, last_c_index, num_tx_bds; 981 int last_tx_cn, last_c_index, num_tx_bds;
979 struct enet_cb *tx_cb_ptr; 982 struct enet_cb *tx_cb_ptr;
980 struct netdev_queue *txq; 983 struct netdev_queue *txq;
984 unsigned int pkts_compl = 0;
981 unsigned int bds_compl; 985 unsigned int bds_compl;
982 unsigned int c_index; 986 unsigned int c_index;
983 987
@@ -1005,6 +1009,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
1005 tx_cb_ptr = ring->cbs + last_c_index; 1009 tx_cb_ptr = ring->cbs + last_c_index;
1006 bds_compl = 0; 1010 bds_compl = 0;
1007 if (tx_cb_ptr->skb) { 1011 if (tx_cb_ptr->skb) {
1012 pkts_compl++;
1008 bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1; 1013 bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1;
1009 dev->stats.tx_bytes += tx_cb_ptr->skb->len; 1014 dev->stats.tx_bytes += tx_cb_ptr->skb->len;
1010 dma_unmap_single(&dev->dev, 1015 dma_unmap_single(&dev->dev,
@@ -1028,23 +1033,45 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
1028 last_c_index &= (num_tx_bds - 1); 1033 last_c_index &= (num_tx_bds - 1);
1029 } 1034 }
1030 1035
1031 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) 1036 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
1032 ring->int_disable(priv, ring); 1037 if (netif_tx_queue_stopped(txq))
1033 1038 netif_tx_wake_queue(txq);
1034 if (netif_tx_queue_stopped(txq)) 1039 }
1035 netif_tx_wake_queue(txq);
1036 1040
1037 ring->c_index = c_index; 1041 ring->c_index = c_index;
1042
1043 return pkts_compl;
1038} 1044}
1039 1045
1040static void bcmgenet_tx_reclaim(struct net_device *dev, 1046static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
1041 struct bcmgenet_tx_ring *ring) 1047 struct bcmgenet_tx_ring *ring)
1042{ 1048{
1049 unsigned int released;
1043 unsigned long flags; 1050 unsigned long flags;
1044 1051
1045 spin_lock_irqsave(&ring->lock, flags); 1052 spin_lock_irqsave(&ring->lock, flags);
1046 __bcmgenet_tx_reclaim(dev, ring); 1053 released = __bcmgenet_tx_reclaim(dev, ring);
1047 spin_unlock_irqrestore(&ring->lock, flags); 1054 spin_unlock_irqrestore(&ring->lock, flags);
1055
1056 return released;
1057}
1058
1059static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
1060{
1061 struct bcmgenet_tx_ring *ring =
1062 container_of(napi, struct bcmgenet_tx_ring, napi);
1063 unsigned int work_done = 0;
1064
1065 work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring);
1066
1067 if (work_done == 0) {
1068 napi_complete(napi);
1069 ring->int_enable(ring->priv, ring);
1070
1071 return 0;
1072 }
1073
1074 return budget;
1048} 1075}
1049 1076
1050static void bcmgenet_tx_reclaim_all(struct net_device *dev) 1077static void bcmgenet_tx_reclaim_all(struct net_device *dev)
@@ -1302,10 +1329,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
1302 bcmgenet_tdma_ring_writel(priv, ring->index, 1329 bcmgenet_tdma_ring_writel(priv, ring->index,
1303 ring->prod_index, TDMA_PROD_INDEX); 1330 ring->prod_index, TDMA_PROD_INDEX);
1304 1331
1305 if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) { 1332 if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
1306 netif_tx_stop_queue(txq); 1333 netif_tx_stop_queue(txq);
1307 ring->int_enable(priv, ring);
1308 }
1309 1334
1310out: 1335out:
1311 spin_unlock_irqrestore(&ring->lock, flags); 1336 spin_unlock_irqrestore(&ring->lock, flags);
@@ -1621,6 +1646,7 @@ static int init_umac(struct bcmgenet_priv *priv)
1621 struct device *kdev = &priv->pdev->dev; 1646 struct device *kdev = &priv->pdev->dev;
1622 int ret; 1647 int ret;
1623 u32 reg, cpu_mask_clear; 1648 u32 reg, cpu_mask_clear;
1649 int index;
1624 1650
1625 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); 1651 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
1626 1652
@@ -1647,7 +1673,7 @@ static int init_umac(struct bcmgenet_priv *priv)
1647 1673
1648 bcmgenet_intr_disable(priv); 1674 bcmgenet_intr_disable(priv);
1649 1675
1650 cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE; 1676 cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_TXDMA_BDONE;
1651 1677
1652 dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__); 1678 dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__);
1653 1679
@@ -1674,6 +1700,10 @@ static int init_umac(struct bcmgenet_priv *priv)
1674 1700
1675 bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR); 1701 bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
1676 1702
1703 for (index = 0; index < priv->hw_params->tx_queues; index++)
1704 bcmgenet_intrl2_1_writel(priv, (1 << index),
1705 INTRL2_CPU_MASK_CLEAR);
1706
1677 /* Enable rx/tx engine.*/ 1707 /* Enable rx/tx engine.*/
1678 dev_dbg(kdev, "done init umac\n"); 1708 dev_dbg(kdev, "done init umac\n");
1679 1709
@@ -1693,6 +1723,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
1693 unsigned int first_bd; 1723 unsigned int first_bd;
1694 1724
1695 spin_lock_init(&ring->lock); 1725 spin_lock_init(&ring->lock);
1726 ring->priv = priv;
1727 netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
1696 ring->index = index; 1728 ring->index = index;
1697 if (index == DESC_INDEX) { 1729 if (index == DESC_INDEX) {
1698 ring->queue = 0; 1730 ring->queue = 0;
@@ -1738,6 +1770,17 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
1738 TDMA_WRITE_PTR); 1770 TDMA_WRITE_PTR);
1739 bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, 1771 bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
1740 DMA_END_ADDR); 1772 DMA_END_ADDR);
1773
1774 napi_enable(&ring->napi);
1775}
1776
1777static void bcmgenet_fini_tx_ring(struct bcmgenet_priv *priv,
1778 unsigned int index)
1779{
1780 struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
1781
1782 napi_disable(&ring->napi);
1783 netif_napi_del(&ring->napi);
1741} 1784}
1742 1785
1743/* Initialize a RDMA ring */ 1786/* Initialize a RDMA ring */
@@ -1907,7 +1950,7 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
1907 return ret; 1950 return ret;
1908} 1951}
1909 1952
1910static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) 1953static void __bcmgenet_fini_dma(struct bcmgenet_priv *priv)
1911{ 1954{
1912 int i; 1955 int i;
1913 1956
@@ -1926,6 +1969,18 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
1926 kfree(priv->tx_cbs); 1969 kfree(priv->tx_cbs);
1927} 1970}
1928 1971
1972static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
1973{
1974 int i;
1975
1976 bcmgenet_fini_tx_ring(priv, DESC_INDEX);
1977
1978 for (i = 0; i < priv->hw_params->tx_queues; i++)
1979 bcmgenet_fini_tx_ring(priv, i);
1980
1981 __bcmgenet_fini_dma(priv);
1982}
1983
1929/* init_edma: Initialize DMA control register */ 1984/* init_edma: Initialize DMA control register */
1930static int bcmgenet_init_dma(struct bcmgenet_priv *priv) 1985static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
1931{ 1986{
@@ -1952,7 +2007,7 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
1952 priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb), 2007 priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
1953 GFP_KERNEL); 2008 GFP_KERNEL);
1954 if (!priv->tx_cbs) { 2009 if (!priv->tx_cbs) {
1955 bcmgenet_fini_dma(priv); 2010 __bcmgenet_fini_dma(priv);
1956 return -ENOMEM; 2011 return -ENOMEM;
1957 } 2012 }
1958 2013
@@ -1975,9 +2030,6 @@ static int bcmgenet_poll(struct napi_struct *napi, int budget)
1975 struct bcmgenet_priv, napi); 2030 struct bcmgenet_priv, napi);
1976 unsigned int work_done; 2031 unsigned int work_done;
1977 2032
1978 /* tx reclaim */
1979 bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
1980
1981 work_done = bcmgenet_desc_rx(priv, budget); 2033 work_done = bcmgenet_desc_rx(priv, budget);
1982 2034
1983 /* Advancing our consumer index*/ 2035 /* Advancing our consumer index*/
@@ -2022,28 +2074,34 @@ static void bcmgenet_irq_task(struct work_struct *work)
2022static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) 2074static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
2023{ 2075{
2024 struct bcmgenet_priv *priv = dev_id; 2076 struct bcmgenet_priv *priv = dev_id;
2077 struct bcmgenet_tx_ring *ring;
2025 unsigned int index; 2078 unsigned int index;
2026 2079
2027 /* Save irq status for bottom-half processing. */ 2080 /* Save irq status for bottom-half processing. */
2028 priv->irq1_stat = 2081 priv->irq1_stat =
2029 bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & 2082 bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
2030 ~priv->int1_mask; 2083 ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
2031 /* clear interrupts */ 2084 /* clear interrupts */
2032 bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); 2085 bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
2033 2086
2034 netif_dbg(priv, intr, priv->dev, 2087 netif_dbg(priv, intr, priv->dev,
2035 "%s: IRQ=0x%x\n", __func__, priv->irq1_stat); 2088 "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
2089
2036 /* Check the MBDONE interrupts. 2090 /* Check the MBDONE interrupts.
2037 * packet is done, reclaim descriptors 2091 * packet is done, reclaim descriptors
2038 */ 2092 */
2039 if (priv->irq1_stat & 0x0000ffff) { 2093 for (index = 0; index < priv->hw_params->tx_queues; index++) {
2040 index = 0; 2094 if (!(priv->irq1_stat & BIT(index)))
2041 for (index = 0; index < 16; index++) { 2095 continue;
2042 if (priv->irq1_stat & (1 << index)) 2096
2043 bcmgenet_tx_reclaim(priv->dev, 2097 ring = &priv->tx_rings[index];
2044 &priv->tx_rings[index]); 2098
2099 if (likely(napi_schedule_prep(&ring->napi))) {
2100 ring->int_disable(priv, ring);
2101 __napi_schedule(&ring->napi);
2045 } 2102 }
2046 } 2103 }
2104
2047 return IRQ_HANDLED; 2105 return IRQ_HANDLED;
2048} 2106}
2049 2107
@@ -2075,8 +2133,12 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
2075 } 2133 }
2076 if (priv->irq0_stat & 2134 if (priv->irq0_stat &
2077 (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) { 2135 (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) {
2078 /* Tx reclaim */ 2136 struct bcmgenet_tx_ring *ring = &priv->tx_rings[DESC_INDEX];
2079 bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]); 2137
2138 if (likely(napi_schedule_prep(&ring->napi))) {
2139 ring->int_disable(priv, ring);
2140 __napi_schedule(&ring->napi);
2141 }
2080 } 2142 }
2081 if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R | 2143 if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
2082 UMAC_IRQ_PHY_DET_F | 2144 UMAC_IRQ_PHY_DET_F |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index b36ddec0cc0a..0d370d168aee 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -520,6 +520,7 @@ struct bcmgenet_hw_params {
520 520
521struct bcmgenet_tx_ring { 521struct bcmgenet_tx_ring {
522 spinlock_t lock; /* ring lock */ 522 spinlock_t lock; /* ring lock */
523 struct napi_struct napi; /* NAPI per tx queue */
523 unsigned int index; /* ring index */ 524 unsigned int index; /* ring index */
524 unsigned int queue; /* queue index */ 525 unsigned int queue; /* queue index */
525 struct enet_cb *cbs; /* tx ring buffer control block*/ 526 struct enet_cb *cbs; /* tx ring buffer control block*/
@@ -534,6 +535,7 @@ struct bcmgenet_tx_ring {
534 struct bcmgenet_tx_ring *); 535 struct bcmgenet_tx_ring *);
535 void (*int_disable)(struct bcmgenet_priv *priv, 536 void (*int_disable)(struct bcmgenet_priv *priv,
536 struct bcmgenet_tx_ring *); 537 struct bcmgenet_tx_ring *);
538 struct bcmgenet_priv *priv;
537}; 539};
538 540
539/* device context */ 541/* device context */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index 149a0d70c108..b97122926d3a 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -73,15 +73,17 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
73 if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE)) 73 if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE))
74 return -EINVAL; 74 return -EINVAL;
75 75
76 reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
76 if (wol->wolopts & WAKE_MAGICSECURE) { 77 if (wol->wolopts & WAKE_MAGICSECURE) {
77 bcmgenet_umac_writel(priv, get_unaligned_be16(&wol->sopass[0]), 78 bcmgenet_umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
78 UMAC_MPD_PW_MS); 79 UMAC_MPD_PW_MS);
79 bcmgenet_umac_writel(priv, get_unaligned_be32(&wol->sopass[2]), 80 bcmgenet_umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
80 UMAC_MPD_PW_LS); 81 UMAC_MPD_PW_LS);
81 reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
82 reg |= MPD_PW_EN; 82 reg |= MPD_PW_EN;
83 bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); 83 } else {
84 reg &= ~MPD_PW_EN;
84 } 85 }
86 bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
85 87
86 /* Flag the device and relevant IRQ as wakeup capable */ 88 /* Flag the device and relevant IRQ as wakeup capable */
87 if (wol->wolopts) { 89 if (wol->wolopts) {
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index ad76b8e35a00..81d41539fcba 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -2113,17 +2113,17 @@ static const struct net_device_ops macb_netdev_ops = {
2113}; 2113};
2114 2114
2115#if defined(CONFIG_OF) 2115#if defined(CONFIG_OF)
2116static struct macb_config pc302gem_config = { 2116static const struct macb_config pc302gem_config = {
2117 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, 2117 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
2118 .dma_burst_length = 16, 2118 .dma_burst_length = 16,
2119}; 2119};
2120 2120
2121static struct macb_config sama5d3_config = { 2121static const struct macb_config sama5d3_config = {
2122 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, 2122 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
2123 .dma_burst_length = 16, 2123 .dma_burst_length = 16,
2124}; 2124};
2125 2125
2126static struct macb_config sama5d4_config = { 2126static const struct macb_config sama5d4_config = {
2127 .caps = 0, 2127 .caps = 0,
2128 .dma_burst_length = 4, 2128 .dma_burst_length = 4,
2129}; 2129};
@@ -2154,7 +2154,7 @@ static void macb_configure_caps(struct macb *bp)
2154 if (bp->pdev->dev.of_node) { 2154 if (bp->pdev->dev.of_node) {
2155 match = of_match_node(macb_dt_ids, bp->pdev->dev.of_node); 2155 match = of_match_node(macb_dt_ids, bp->pdev->dev.of_node);
2156 if (match && match->data) { 2156 if (match && match->data) {
2157 config = (const struct macb_config *)match->data; 2157 config = match->data;
2158 2158
2159 bp->caps = config->caps; 2159 bp->caps = config->caps;
2160 /* 2160 /*
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 31dc080f2437..ff85619a9732 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -351,7 +351,7 @@
351 351
352/* Bitfields in MID */ 352/* Bitfields in MID */
353#define MACB_IDNUM_OFFSET 16 353#define MACB_IDNUM_OFFSET 16
354#define MACB_IDNUM_SIZE 16 354#define MACB_IDNUM_SIZE 12
355#define MACB_REV_OFFSET 0 355#define MACB_REV_OFFSET 0
356#define MACB_REV_SIZE 16 356#define MACB_REV_SIZE 16
357 357
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
index 9062a8434246..c308429dd9c7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
@@ -35,10 +35,10 @@ static inline unsigned int ipv6_clip_hash(struct clip_tbl *d, const u32 *key)
35} 35}
36 36
37static unsigned int clip_addr_hash(struct clip_tbl *ctbl, const u32 *addr, 37static unsigned int clip_addr_hash(struct clip_tbl *ctbl, const u32 *addr,
38 int addr_len) 38 u8 v6)
39{ 39{
40 return addr_len == 4 ? ipv4_clip_hash(ctbl, addr) : 40 return v6 ? ipv6_clip_hash(ctbl, addr) :
41 ipv6_clip_hash(ctbl, addr); 41 ipv4_clip_hash(ctbl, addr);
42} 42}
43 43
44static int clip6_get_mbox(const struct net_device *dev, 44static int clip6_get_mbox(const struct net_device *dev,
@@ -78,23 +78,22 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
78 struct clip_entry *ce, *cte; 78 struct clip_entry *ce, *cte;
79 u32 *addr = (u32 *)lip; 79 u32 *addr = (u32 *)lip;
80 int hash; 80 int hash;
81 int addr_len; 81 int ret = -1;
82 int ret = 0;
83 82
84 if (!ctbl) 83 if (!ctbl)
85 return 0; 84 return 0;
86 85
87 if (v6) 86 hash = clip_addr_hash(ctbl, addr, v6);
88 addr_len = 16;
89 else
90 addr_len = 4;
91
92 hash = clip_addr_hash(ctbl, addr, addr_len);
93 87
94 read_lock_bh(&ctbl->lock); 88 read_lock_bh(&ctbl->lock);
95 list_for_each_entry(cte, &ctbl->hash_list[hash], list) { 89 list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
96 if (addr_len == cte->addr_len && 90 if (cte->addr6.sin6_family == AF_INET6 && v6)
97 memcmp(lip, cte->addr, cte->addr_len) == 0) { 91 ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr,
92 sizeof(struct in6_addr));
93 else if (cte->addr.sin_family == AF_INET && !v6)
94 ret = memcmp(lip, (char *)(&cte->addr.sin_addr),
95 sizeof(struct in_addr));
96 if (!ret) {
98 ce = cte; 97 ce = cte;
99 read_unlock_bh(&ctbl->lock); 98 read_unlock_bh(&ctbl->lock);
100 goto found; 99 goto found;
@@ -111,15 +110,20 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
111 spin_lock_init(&ce->lock); 110 spin_lock_init(&ce->lock);
112 atomic_set(&ce->refcnt, 0); 111 atomic_set(&ce->refcnt, 0);
113 atomic_dec(&ctbl->nfree); 112 atomic_dec(&ctbl->nfree);
114 ce->addr_len = addr_len;
115 memcpy(ce->addr, lip, addr_len);
116 list_add_tail(&ce->list, &ctbl->hash_list[hash]); 113 list_add_tail(&ce->list, &ctbl->hash_list[hash]);
117 if (v6) { 114 if (v6) {
115 ce->addr6.sin6_family = AF_INET6;
116 memcpy(ce->addr6.sin6_addr.s6_addr,
117 lip, sizeof(struct in6_addr));
118 ret = clip6_get_mbox(dev, (const struct in6_addr *)lip); 118 ret = clip6_get_mbox(dev, (const struct in6_addr *)lip);
119 if (ret) { 119 if (ret) {
120 write_unlock_bh(&ctbl->lock); 120 write_unlock_bh(&ctbl->lock);
121 return ret; 121 return ret;
122 } 122 }
123 } else {
124 ce->addr.sin_family = AF_INET;
125 memcpy((char *)(&ce->addr.sin_addr), lip,
126 sizeof(struct in_addr));
123 } 127 }
124 } else { 128 } else {
125 write_unlock_bh(&ctbl->lock); 129 write_unlock_bh(&ctbl->lock);
@@ -140,19 +144,19 @@ void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6)
140 struct clip_entry *ce, *cte; 144 struct clip_entry *ce, *cte;
141 u32 *addr = (u32 *)lip; 145 u32 *addr = (u32 *)lip;
142 int hash; 146 int hash;
143 int addr_len; 147 int ret = -1;
144
145 if (v6)
146 addr_len = 16;
147 else
148 addr_len = 4;
149 148
150 hash = clip_addr_hash(ctbl, addr, addr_len); 149 hash = clip_addr_hash(ctbl, addr, v6);
151 150
152 read_lock_bh(&ctbl->lock); 151 read_lock_bh(&ctbl->lock);
153 list_for_each_entry(cte, &ctbl->hash_list[hash], list) { 152 list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
154 if (addr_len == cte->addr_len && 153 if (cte->addr6.sin6_family == AF_INET6 && v6)
155 memcmp(lip, cte->addr, cte->addr_len) == 0) { 154 ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr,
155 sizeof(struct in6_addr));
156 else if (cte->addr.sin_family == AF_INET && !v6)
157 ret = memcmp(lip, (char *)(&cte->addr.sin_addr),
158 sizeof(struct in_addr));
159 if (!ret) {
156 ce = cte; 160 ce = cte;
157 read_unlock_bh(&ctbl->lock); 161 read_unlock_bh(&ctbl->lock);
158 goto found; 162 goto found;
@@ -249,10 +253,7 @@ int clip_tbl_show(struct seq_file *seq, void *v)
249 for (i = 0 ; i < ctbl->clipt_size; ++i) { 253 for (i = 0 ; i < ctbl->clipt_size; ++i) {
250 list_for_each_entry(ce, &ctbl->hash_list[i], list) { 254 list_for_each_entry(ce, &ctbl->hash_list[i], list) {
251 ip[0] = '\0'; 255 ip[0] = '\0';
252 if (ce->addr_len == 16) 256 sprintf(ip, "%pISc", &ce->addr);
253 sprintf(ip, "%pI6c", ce->addr);
254 else
255 sprintf(ip, "%pI4c", ce->addr);
256 seq_printf(seq, "%-25s %u\n", ip, 257 seq_printf(seq, "%-25s %u\n", ip,
257 atomic_read(&ce->refcnt)); 258 atomic_read(&ce->refcnt));
258 } 259 }
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
index 2eaba0161cf8..35eb43c6bcbb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
@@ -14,8 +14,10 @@ struct clip_entry {
14 spinlock_t lock; /* Hold while modifying clip reference */ 14 spinlock_t lock; /* Hold while modifying clip reference */
15 atomic_t refcnt; 15 atomic_t refcnt;
16 struct list_head list; 16 struct list_head list;
17 u32 addr[4]; 17 union {
18 int addr_len; 18 struct sockaddr_in addr;
19 struct sockaddr_in6 addr6;
20 };
19}; 21};
20 22
21struct clip_tbl { 23struct clip_tbl {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index d6cda17efe6e..97842d03675b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1103,7 +1103,7 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
1103#define T4_MEMORY_WRITE 0 1103#define T4_MEMORY_WRITE 0
1104#define T4_MEMORY_READ 1 1104#define T4_MEMORY_READ 1
1105int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len, 1105int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len,
1106 __be32 *buf, int dir); 1106 void *buf, int dir);
1107static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr, 1107static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr,
1108 u32 len, __be32 *buf) 1108 u32 len, __be32 *buf)
1109{ 1109{
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 4d643b65265e..1abdfa123c6c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -449,7 +449,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
449 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC 449 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
450 * @addr: address within indicated memory type 450 * @addr: address within indicated memory type
451 * @len: amount of memory to transfer 451 * @len: amount of memory to transfer
452 * @buf: host memory buffer 452 * @hbuf: host memory buffer
453 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0) 453 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
454 * 454 *
455 * Reads/writes an [almost] arbitrary memory region in the firmware: the 455 * Reads/writes an [almost] arbitrary memory region in the firmware: the
@@ -460,15 +460,17 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
460 * caller's responsibility to perform appropriate byte order conversions. 460 * caller's responsibility to perform appropriate byte order conversions.
461 */ 461 */
462int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, 462int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
463 u32 len, __be32 *buf, int dir) 463 u32 len, void *hbuf, int dir)
464{ 464{
465 u32 pos, offset, resid, memoffset; 465 u32 pos, offset, resid, memoffset;
466 u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base; 466 u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
467 u32 *buf;
467 468
468 /* Argument sanity checks ... 469 /* Argument sanity checks ...
469 */ 470 */
470 if (addr & 0x3) 471 if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
471 return -EINVAL; 472 return -EINVAL;
473 buf = (u32 *)hbuf;
472 474
473 /* It's convenient to be able to handle lengths which aren't a 475 /* It's convenient to be able to handle lengths which aren't a
474 * multiple of 32-bits because we often end up transferring files to 476 * multiple of 32-bits because we often end up transferring files to
@@ -532,14 +534,45 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
532 534
533 /* Transfer data to/from the adapter as long as there's an integral 535 /* Transfer data to/from the adapter as long as there's an integral
534 * number of 32-bit transfers to complete. 536 * number of 32-bit transfers to complete.
537 *
538 * A note on Endianness issues:
539 *
540 * The "register" reads and writes below from/to the PCI-E Memory
541 * Window invoke the standard adapter Big-Endian to PCI-E Link
542 * Little-Endian "swizzel." As a result, if we have the following
543 * data in adapter memory:
544 *
545 * Memory: ... | b0 | b1 | b2 | b3 | ...
546 * Address: i+0 i+1 i+2 i+3
547 *
548 * Then a read of the adapter memory via the PCI-E Memory Window
549 * will yield:
550 *
551 * x = readl(i)
552 * 31 0
553 * [ b3 | b2 | b1 | b0 ]
554 *
555 * If this value is stored into local memory on a Little-Endian system
556 * it will show up correctly in local memory as:
557 *
558 * ( ..., b0, b1, b2, b3, ... )
559 *
560 * But on a Big-Endian system, the store will show up in memory
561 * incorrectly swizzled as:
562 *
563 * ( ..., b3, b2, b1, b0, ... )
564 *
565 * So we need to account for this in the reads and writes to the
566 * PCI-E Memory Window below by undoing the register read/write
567 * swizzels.
535 */ 568 */
536 while (len > 0) { 569 while (len > 0) {
537 if (dir == T4_MEMORY_READ) 570 if (dir == T4_MEMORY_READ)
538 *buf++ = (__force __be32) t4_read_reg(adap, 571 *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
539 mem_base + offset); 572 mem_base + offset));
540 else 573 else
541 t4_write_reg(adap, mem_base + offset, 574 t4_write_reg(adap, mem_base + offset,
542 (__force u32) *buf++); 575 (__force u32)cpu_to_le32(*buf++));
543 offset += sizeof(__be32); 576 offset += sizeof(__be32);
544 len -= sizeof(__be32); 577 len -= sizeof(__be32);
545 578
@@ -568,15 +601,16 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
568 */ 601 */
569 if (resid) { 602 if (resid) {
570 union { 603 union {
571 __be32 word; 604 u32 word;
572 char byte[4]; 605 char byte[4];
573 } last; 606 } last;
574 unsigned char *bp; 607 unsigned char *bp;
575 int i; 608 int i;
576 609
577 if (dir == T4_MEMORY_READ) { 610 if (dir == T4_MEMORY_READ) {
578 last.word = (__force __be32) t4_read_reg(adap, 611 last.word = le32_to_cpu(
579 mem_base + offset); 612 (__force __le32)t4_read_reg(adap,
613 mem_base + offset));
580 for (bp = (unsigned char *)buf, i = resid; i < 4; i++) 614 for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
581 bp[i] = last.byte[i]; 615 bp[i] = last.byte[i];
582 } else { 616 } else {
@@ -584,7 +618,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
584 for (i = resid; i < 4; i++) 618 for (i = resid; i < 4; i++)
585 last.byte[i] = 0; 619 last.byte[i] = 0;
586 t4_write_reg(adap, mem_base + offset, 620 t4_write_reg(adap, mem_base + offset,
587 (__force u32) last.word); 621 (__force u32)cpu_to_le32(last.word));
588 } 622 }
589 } 623 }
590 624
@@ -1086,7 +1120,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
1086 } 1120 }
1087 1121
1088 /* Installed successfully, update the cached header too. */ 1122 /* Installed successfully, update the cached header too. */
1089 memcpy(card_fw, fs_fw, sizeof(*card_fw)); 1123 *card_fw = *fs_fw;
1090 card_fw_usable = 1; 1124 card_fw_usable = 1;
1091 *reset = 0; /* already reset as part of load_fw */ 1125 *reset = 0; /* already reset as part of load_fw */
1092 } 1126 }
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 9cbe038a388e..a5179bfcdc2c 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -272,8 +272,8 @@ static irqreturn_t enic_isr_legacy(int irq, void *data)
272 } 272 }
273 273
274 if (ENIC_TEST_INTR(pba, notify_intr)) { 274 if (ENIC_TEST_INTR(pba, notify_intr)) {
275 vnic_intr_return_all_credits(&enic->intr[notify_intr]);
276 enic_notify_check(enic); 275 enic_notify_check(enic);
276 vnic_intr_return_all_credits(&enic->intr[notify_intr]);
277 } 277 }
278 278
279 if (ENIC_TEST_INTR(pba, err_intr)) { 279 if (ENIC_TEST_INTR(pba, err_intr)) {
@@ -346,8 +346,8 @@ static irqreturn_t enic_isr_msix_notify(int irq, void *data)
346 struct enic *enic = data; 346 struct enic *enic = data;
347 unsigned int intr = enic_msix_notify_intr(enic); 347 unsigned int intr = enic_msix_notify_intr(enic);
348 348
349 vnic_intr_return_all_credits(&enic->intr[intr]);
350 enic_notify_check(enic); 349 enic_notify_check(enic);
350 vnic_intr_return_all_credits(&enic->intr[intr]);
351 351
352 return IRQ_HANDLED; 352 return IRQ_HANDLED;
353} 353}
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 3b42556f7f8d..ed41559bae77 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -589,7 +589,7 @@ static void tulip_tx_timeout(struct net_device *dev)
589 (unsigned int)tp->rx_ring[i].buffer1, 589 (unsigned int)tp->rx_ring[i].buffer1,
590 (unsigned int)tp->rx_ring[i].buffer2, 590 (unsigned int)tp->rx_ring[i].buffer2,
591 buf[0], buf[1], buf[2]); 591 buf[0], buf[1], buf[2]);
592 for (j = 0; buf[j] != 0xee && j < 1600; j++) 592 for (j = 0; ((j < 1600) && buf[j] != 0xee); j++)
593 if (j < 100) 593 if (j < 100)
594 pr_cont(" %02x", buf[j]); 594 pr_cont(" %02x", buf[j]);
595 pr_cont(" j=%d\n", j); 595 pr_cont(" j=%d\n", j);
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 27de37aa90af..27b9fe99a9bd 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -354,6 +354,7 @@ struct be_vf_cfg {
354 u16 vlan_tag; 354 u16 vlan_tag;
355 u32 tx_rate; 355 u32 tx_rate;
356 u32 plink_tracking; 356 u32 plink_tracking;
357 u32 privileges;
357}; 358};
358 359
359enum vf_state { 360enum vf_state {
@@ -423,6 +424,7 @@ struct be_adapter {
423 424
424 u8 __iomem *csr; /* CSR BAR used only for BE2/3 */ 425 u8 __iomem *csr; /* CSR BAR used only for BE2/3 */
425 u8 __iomem *db; /* Door Bell */ 426 u8 __iomem *db; /* Door Bell */
427 u8 __iomem *pcicfg; /* On SH,BEx only. Shadow of PCI config space */
426 428
427 struct mutex mbox_lock; /* For serializing mbox cmds to BE card */ 429 struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
428 struct be_dma_mem mbox_mem; 430 struct be_dma_mem mbox_mem;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 36916cfa70f9..7f05f309e935 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1902,15 +1902,11 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
1902{ 1902{
1903 int num_eqs, i = 0; 1903 int num_eqs, i = 0;
1904 1904
1905 if (lancer_chip(adapter) && num > 8) { 1905 while (num) {
1906 while (num) { 1906 num_eqs = min(num, 8);
1907 num_eqs = min(num, 8); 1907 __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs);
1908 __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs); 1908 i += num_eqs;
1909 i += num_eqs; 1909 num -= num_eqs;
1910 num -= num_eqs;
1911 }
1912 } else {
1913 __be_cmd_modify_eqd(adapter, set_eqd, num);
1914 } 1910 }
1915 1911
1916 return 0; 1912 return 0;
@@ -1918,7 +1914,7 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
1918 1914
1919/* Uses sycnhronous mcc */ 1915/* Uses sycnhronous mcc */
1920int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, 1916int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1921 u32 num) 1917 u32 num, u32 domain)
1922{ 1918{
1923 struct be_mcc_wrb *wrb; 1919 struct be_mcc_wrb *wrb;
1924 struct be_cmd_req_vlan_config *req; 1920 struct be_cmd_req_vlan_config *req;
@@ -1936,6 +1932,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1936 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1932 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1937 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), 1933 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
1938 wrb, NULL); 1934 wrb, NULL);
1935 req->hdr.domain = domain;
1939 1936
1940 req->interface_id = if_id; 1937 req->interface_id = if_id;
1941 req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0; 1938 req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index db761e8e42a3..a7634a3f052a 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -2256,7 +2256,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
2256int be_cmd_get_fw_ver(struct be_adapter *adapter); 2256int be_cmd_get_fw_ver(struct be_adapter *adapter);
2257int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num); 2257int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num);
2258int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, 2258int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
2259 u32 num); 2259 u32 num, u32 domain);
2260int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status); 2260int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
2261int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc); 2261int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc);
2262int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc); 2262int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 0a816859aca5..e6b790f0d9dc 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1171,7 +1171,7 @@ static int be_vid_config(struct be_adapter *adapter)
1171 for_each_set_bit(i, adapter->vids, VLAN_N_VID) 1171 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1172 vids[num++] = cpu_to_le16(i); 1172 vids[num++] = cpu_to_le16(i);
1173 1173
1174 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num); 1174 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
1175 if (status) { 1175 if (status) {
1176 dev_err(dev, "Setting HW VLAN filtering failed\n"); 1176 dev_err(dev, "Setting HW VLAN filtering failed\n");
1177 /* Set to VLAN promisc mode as setting VLAN filter failed */ 1177 /* Set to VLAN promisc mode as setting VLAN filter failed */
@@ -1380,11 +1380,67 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
1380 return 0; 1380 return 0;
1381} 1381}
1382 1382
1383static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1384{
1385 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1386 u16 vids[BE_NUM_VLANS_SUPPORTED];
1387 int vf_if_id = vf_cfg->if_handle;
1388 int status;
1389
1390 /* Enable Transparent VLAN Tagging */
1391 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0);
1392 if (status)
1393 return status;
1394
1395 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1396 vids[0] = 0;
1397 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1398 if (!status)
1399 dev_info(&adapter->pdev->dev,
1400 "Cleared guest VLANs on VF%d", vf);
1401
1402 /* After TVT is enabled, disallow VFs to program VLAN filters */
1403 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1404 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1405 ~BE_PRIV_FILTMGMT, vf + 1);
1406 if (!status)
1407 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1408 }
1409 return 0;
1410}
1411
1412static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1413{
1414 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1415 struct device *dev = &adapter->pdev->dev;
1416 int status;
1417
1418 /* Reset Transparent VLAN Tagging. */
1419 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
1420 vf_cfg->if_handle, 0);
1421 if (status)
1422 return status;
1423
1424 /* Allow VFs to program VLAN filtering */
1425 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1426 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1427 BE_PRIV_FILTMGMT, vf + 1);
1428 if (!status) {
1429 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1430 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1431 }
1432 }
1433
1434 dev_info(dev,
1435 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1436 return 0;
1437}
1438
1383static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) 1439static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1384{ 1440{
1385 struct be_adapter *adapter = netdev_priv(netdev); 1441 struct be_adapter *adapter = netdev_priv(netdev);
1386 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; 1442 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1387 int status = 0; 1443 int status;
1388 1444
1389 if (!sriov_enabled(adapter)) 1445 if (!sriov_enabled(adapter))
1390 return -EPERM; 1446 return -EPERM;
@@ -1394,24 +1450,19 @@ static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1394 1450
1395 if (vlan || qos) { 1451 if (vlan || qos) {
1396 vlan |= qos << VLAN_PRIO_SHIFT; 1452 vlan |= qos << VLAN_PRIO_SHIFT;
1397 if (vf_cfg->vlan_tag != vlan) 1453 status = be_set_vf_tvt(adapter, vf, vlan);
1398 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1399 vf_cfg->if_handle, 0);
1400 } else { 1454 } else {
1401 /* Reset Transparent Vlan Tagging. */ 1455 status = be_clear_vf_tvt(adapter, vf);
1402 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1403 vf + 1, vf_cfg->if_handle, 0);
1404 } 1456 }
1405 1457
1406 if (status) { 1458 if (status) {
1407 dev_err(&adapter->pdev->dev, 1459 dev_err(&adapter->pdev->dev,
1408 "VLAN %d config on VF %d failed : %#x\n", vlan, 1460 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1409 vf, status); 1461 status);
1410 return be_cmd_status(status); 1462 return be_cmd_status(status);
1411 } 1463 }
1412 1464
1413 vf_cfg->vlan_tag = vlan; 1465 vf_cfg->vlan_tag = vlan;
1414
1415 return 0; 1466 return 0;
1416} 1467}
1417 1468
@@ -2772,14 +2823,12 @@ void be_detect_error(struct be_adapter *adapter)
2772 } 2823 }
2773 } 2824 }
2774 } else { 2825 } else {
2775 pci_read_config_dword(adapter->pdev, 2826 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
2776 PCICFG_UE_STATUS_LOW, &ue_lo); 2827 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
2777 pci_read_config_dword(adapter->pdev, 2828 ue_lo_mask = ioread32(adapter->pcicfg +
2778 PCICFG_UE_STATUS_HIGH, &ue_hi); 2829 PCICFG_UE_STATUS_LOW_MASK);
2779 pci_read_config_dword(adapter->pdev, 2830 ue_hi_mask = ioread32(adapter->pcicfg +
2780 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask); 2831 PCICFG_UE_STATUS_HI_MASK);
2781 pci_read_config_dword(adapter->pdev,
2782 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2783 2832
2784 ue_lo = (ue_lo & ~ue_lo_mask); 2833 ue_lo = (ue_lo & ~ue_lo_mask);
2785 ue_hi = (ue_hi & ~ue_hi_mask); 2834 ue_hi = (ue_hi & ~ue_hi_mask);
@@ -3339,7 +3388,6 @@ static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3339 u32 cap_flags, u32 vf) 3388 u32 cap_flags, u32 vf)
3340{ 3389{
3341 u32 en_flags; 3390 u32 en_flags;
3342 int status;
3343 3391
3344 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | 3392 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3345 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS | 3393 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
@@ -3347,10 +3395,7 @@ static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3347 3395
3348 en_flags &= cap_flags; 3396 en_flags &= cap_flags;
3349 3397
3350 status = be_cmd_if_create(adapter, cap_flags, en_flags, 3398 return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
3351 if_handle, vf);
3352
3353 return status;
3354} 3399}
3355 3400
3356static int be_vfs_if_create(struct be_adapter *adapter) 3401static int be_vfs_if_create(struct be_adapter *adapter)
@@ -3368,8 +3413,13 @@ static int be_vfs_if_create(struct be_adapter *adapter)
3368 if (!BE3_chip(adapter)) { 3413 if (!BE3_chip(adapter)) {
3369 status = be_cmd_get_profile_config(adapter, &res, 3414 status = be_cmd_get_profile_config(adapter, &res,
3370 vf + 1); 3415 vf + 1);
3371 if (!status) 3416 if (!status) {
3372 cap_flags = res.if_cap_flags; 3417 cap_flags = res.if_cap_flags;
3418 /* Prevent VFs from enabling VLAN promiscuous
3419 * mode
3420 */
3421 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3422 }
3373 } 3423 }
3374 3424
3375 status = be_if_create(adapter, &vf_cfg->if_handle, 3425 status = be_if_create(adapter, &vf_cfg->if_handle,
@@ -3403,7 +3453,6 @@ static int be_vf_setup(struct be_adapter *adapter)
3403 struct device *dev = &adapter->pdev->dev; 3453 struct device *dev = &adapter->pdev->dev;
3404 struct be_vf_cfg *vf_cfg; 3454 struct be_vf_cfg *vf_cfg;
3405 int status, old_vfs, vf; 3455 int status, old_vfs, vf;
3406 u32 privileges;
3407 3456
3408 old_vfs = pci_num_vf(adapter->pdev); 3457 old_vfs = pci_num_vf(adapter->pdev);
3409 3458
@@ -3433,15 +3482,18 @@ static int be_vf_setup(struct be_adapter *adapter)
3433 3482
3434 for_all_vfs(adapter, vf_cfg, vf) { 3483 for_all_vfs(adapter, vf_cfg, vf) {
3435 /* Allow VFs to programs MAC/VLAN filters */ 3484 /* Allow VFs to programs MAC/VLAN filters */
3436 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1); 3485 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3437 if (!status && !(privileges & BE_PRIV_FILTMGMT)) { 3486 vf + 1);
3487 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
3438 status = be_cmd_set_fn_privileges(adapter, 3488 status = be_cmd_set_fn_privileges(adapter,
3439 privileges | 3489 vf_cfg->privileges |
3440 BE_PRIV_FILTMGMT, 3490 BE_PRIV_FILTMGMT,
3441 vf + 1); 3491 vf + 1);
3442 if (!status) 3492 if (!status) {
3493 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
3443 dev_info(dev, "VF%d has FILTMGMT privilege\n", 3494 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3444 vf); 3495 vf);
3496 }
3445 } 3497 }
3446 3498
3447 /* Allow full available bandwidth */ 3499 /* Allow full available bandwidth */
@@ -4820,24 +4872,37 @@ static int be_roce_map_pci_bars(struct be_adapter *adapter)
4820 4872
4821static int be_map_pci_bars(struct be_adapter *adapter) 4873static int be_map_pci_bars(struct be_adapter *adapter)
4822{ 4874{
4875 struct pci_dev *pdev = adapter->pdev;
4823 u8 __iomem *addr; 4876 u8 __iomem *addr;
4824 4877
4825 if (BEx_chip(adapter) && be_physfn(adapter)) { 4878 if (BEx_chip(adapter) && be_physfn(adapter)) {
4826 adapter->csr = pci_iomap(adapter->pdev, 2, 0); 4879 adapter->csr = pci_iomap(pdev, 2, 0);
4827 if (!adapter->csr) 4880 if (!adapter->csr)
4828 return -ENOMEM; 4881 return -ENOMEM;
4829 } 4882 }
4830 4883
4831 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0); 4884 addr = pci_iomap(pdev, db_bar(adapter), 0);
4832 if (!addr) 4885 if (!addr)
4833 goto pci_map_err; 4886 goto pci_map_err;
4834 adapter->db = addr; 4887 adapter->db = addr;
4835 4888
4889 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
4890 if (be_physfn(adapter)) {
4891 /* PCICFG is the 2nd BAR in BE2 */
4892 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
4893 if (!addr)
4894 goto pci_map_err;
4895 adapter->pcicfg = addr;
4896 } else {
4897 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
4898 }
4899 }
4900
4836 be_roce_map_pci_bars(adapter); 4901 be_roce_map_pci_bars(adapter);
4837 return 0; 4902 return 0;
4838 4903
4839pci_map_err: 4904pci_map_err:
4840 dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n"); 4905 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
4841 be_unmap_pci_bars(adapter); 4906 be_unmap_pci_bars(adapter);
4842 return -ENOMEM; 4907 return -ENOMEM;
4843} 4908}
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 9bb6220663b2..78e1ce09b1ab 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1189,13 +1189,12 @@ static void
1189fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) 1189fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1190{ 1190{
1191 struct fec_enet_private *fep; 1191 struct fec_enet_private *fep;
1192 struct bufdesc *bdp, *bdp_t; 1192 struct bufdesc *bdp;
1193 unsigned short status; 1193 unsigned short status;
1194 struct sk_buff *skb; 1194 struct sk_buff *skb;
1195 struct fec_enet_priv_tx_q *txq; 1195 struct fec_enet_priv_tx_q *txq;
1196 struct netdev_queue *nq; 1196 struct netdev_queue *nq;
1197 int index = 0; 1197 int index = 0;
1198 int i, bdnum;
1199 int entries_free; 1198 int entries_free;
1200 1199
1201 fep = netdev_priv(ndev); 1200 fep = netdev_priv(ndev);
@@ -1216,29 +1215,18 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1216 if (bdp == txq->cur_tx) 1215 if (bdp == txq->cur_tx)
1217 break; 1216 break;
1218 1217
1219 bdp_t = bdp; 1218 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep);
1220 bdnum = 1;
1221 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep);
1222 skb = txq->tx_skbuff[index];
1223 while (!skb) {
1224 bdp_t = fec_enet_get_nextdesc(bdp_t, fep, queue_id);
1225 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp_t, fep);
1226 skb = txq->tx_skbuff[index];
1227 bdnum++;
1228 }
1229 if (skb_shinfo(skb)->nr_frags &&
1230 (status = bdp_t->cbd_sc) & BD_ENET_TX_READY)
1231 break;
1232 1219
1233 for (i = 0; i < bdnum; i++) { 1220 skb = txq->tx_skbuff[index];
1234 if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr))
1235 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
1236 bdp->cbd_datlen, DMA_TO_DEVICE);
1237 bdp->cbd_bufaddr = 0;
1238 if (i < bdnum - 1)
1239 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
1240 }
1241 txq->tx_skbuff[index] = NULL; 1221 txq->tx_skbuff[index] = NULL;
1222 if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr))
1223 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
1224 bdp->cbd_datlen, DMA_TO_DEVICE);
1225 bdp->cbd_bufaddr = 0;
1226 if (!skb) {
1227 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
1228 continue;
1229 }
1242 1230
1243 /* Check for errors. */ 1231 /* Check for errors. */
1244 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | 1232 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
@@ -1479,8 +1467,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1479 1467
1480 vlan_packet_rcvd = true; 1468 vlan_packet_rcvd = true;
1481 1469
1482 skb_copy_to_linear_data_offset(skb, VLAN_HLEN, 1470 memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2);
1483 data, (2 * ETH_ALEN));
1484 skb_pull(skb, VLAN_HLEN); 1471 skb_pull(skb, VLAN_HLEN);
1485 } 1472 }
1486 1473
@@ -1597,7 +1584,7 @@ fec_enet_interrupt(int irq, void *dev_id)
1597 writel(int_events, fep->hwp + FEC_IEVENT); 1584 writel(int_events, fep->hwp + FEC_IEVENT);
1598 fec_enet_collect_events(fep, int_events); 1585 fec_enet_collect_events(fep, int_events);
1599 1586
1600 if (fep->work_tx || fep->work_rx) { 1587 if ((fep->work_tx || fep->work_rx) && fep->link) {
1601 ret = IRQ_HANDLED; 1588 ret = IRQ_HANDLED;
1602 1589
1603 if (napi_schedule_prep(&fep->napi)) { 1590 if (napi_schedule_prep(&fep->napi)) {
@@ -3383,7 +3370,6 @@ fec_drv_remove(struct platform_device *pdev)
3383 regulator_disable(fep->reg_phy); 3370 regulator_disable(fep->reg_phy);
3384 if (fep->ptp_clock) 3371 if (fep->ptp_clock)
3385 ptp_clock_unregister(fep->ptp_clock); 3372 ptp_clock_unregister(fep->ptp_clock);
3386 fec_enet_clk_enable(ndev, false);
3387 of_node_put(fep->phy_node); 3373 of_node_put(fep->phy_node);
3388 free_netdev(ndev); 3374 free_netdev(ndev);
3389 3375
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 43df78882e48..7bf3682cdf47 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -747,6 +747,18 @@ static int gfar_parse_group(struct device_node *np,
747 return 0; 747 return 0;
748} 748}
749 749
750static int gfar_of_group_count(struct device_node *np)
751{
752 struct device_node *child;
753 int num = 0;
754
755 for_each_available_child_of_node(np, child)
756 if (!of_node_cmp(child->name, "queue-group"))
757 num++;
758
759 return num;
760}
761
750static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) 762static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
751{ 763{
752 const char *model; 764 const char *model;
@@ -784,7 +796,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
784 num_rx_qs = 1; 796 num_rx_qs = 1;
785 } else { /* MQ_MG_MODE */ 797 } else { /* MQ_MG_MODE */
786 /* get the actual number of supported groups */ 798 /* get the actual number of supported groups */
787 unsigned int num_grps = of_get_available_child_count(np); 799 unsigned int num_grps = gfar_of_group_count(np);
788 800
789 if (num_grps == 0 || num_grps > MAXGROUPS) { 801 if (num_grps == 0 || num_grps > MAXGROUPS) {
790 dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n", 802 dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
@@ -851,7 +863,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
851 863
852 /* Parse and initialize group specific information */ 864 /* Parse and initialize group specific information */
853 if (priv->mode == MQ_MG_MODE) { 865 if (priv->mode == MQ_MG_MODE) {
854 for_each_child_of_node(np, child) { 866 for_each_available_child_of_node(np, child) {
867 if (of_node_cmp(child->name, "queue-group"))
868 continue;
869
855 err = gfar_parse_group(child, priv, model); 870 err = gfar_parse_group(child, priv, model);
856 if (err) 871 if (err)
857 goto err_grp_init; 872 goto err_grp_init;
@@ -3162,8 +3177,8 @@ static void adjust_link(struct net_device *dev)
3162 struct phy_device *phydev = priv->phydev; 3177 struct phy_device *phydev = priv->phydev;
3163 3178
3164 if (unlikely(phydev->link != priv->oldlink || 3179 if (unlikely(phydev->link != priv->oldlink ||
3165 phydev->duplex != priv->oldduplex || 3180 (phydev->link && (phydev->duplex != priv->oldduplex ||
3166 phydev->speed != priv->oldspeed)) 3181 phydev->speed != priv->oldspeed))))
3167 gfar_update_link_state(priv); 3182 gfar_update_link_state(priv);
3168} 3183}
3169 3184
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index e8a1adb7a962..c05e50759621 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -3262,6 +3262,139 @@ static void ehea_remove_device_sysfs(struct platform_device *dev)
3262 device_remove_file(&dev->dev, &dev_attr_remove_port); 3262 device_remove_file(&dev->dev, &dev_attr_remove_port);
3263} 3263}
3264 3264
3265static int ehea_reboot_notifier(struct notifier_block *nb,
3266 unsigned long action, void *unused)
3267{
3268 if (action == SYS_RESTART) {
3269 pr_info("Reboot: freeing all eHEA resources\n");
3270 ibmebus_unregister_driver(&ehea_driver);
3271 }
3272 return NOTIFY_DONE;
3273}
3274
3275static struct notifier_block ehea_reboot_nb = {
3276 .notifier_call = ehea_reboot_notifier,
3277};
3278
3279static int ehea_mem_notifier(struct notifier_block *nb,
3280 unsigned long action, void *data)
3281{
3282 int ret = NOTIFY_BAD;
3283 struct memory_notify *arg = data;
3284
3285 mutex_lock(&dlpar_mem_lock);
3286
3287 switch (action) {
3288 case MEM_CANCEL_OFFLINE:
3289 pr_info("memory offlining canceled");
3290 /* Fall through: re-add canceled memory block */
3291
3292 case MEM_ONLINE:
3293 pr_info("memory is going online");
3294 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3295 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3296 goto out_unlock;
3297 ehea_rereg_mrs();
3298 break;
3299
3300 case MEM_GOING_OFFLINE:
3301 pr_info("memory is going offline");
3302 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3303 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3304 goto out_unlock;
3305 ehea_rereg_mrs();
3306 break;
3307
3308 default:
3309 break;
3310 }
3311
3312 ehea_update_firmware_handles();
3313 ret = NOTIFY_OK;
3314
3315out_unlock:
3316 mutex_unlock(&dlpar_mem_lock);
3317 return ret;
3318}
3319
3320static struct notifier_block ehea_mem_nb = {
3321 .notifier_call = ehea_mem_notifier,
3322};
3323
3324static void ehea_crash_handler(void)
3325{
3326 int i;
3327
3328 if (ehea_fw_handles.arr)
3329 for (i = 0; i < ehea_fw_handles.num_entries; i++)
3330 ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
3331 ehea_fw_handles.arr[i].fwh,
3332 FORCE_FREE);
3333
3334 if (ehea_bcmc_regs.arr)
3335 for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
3336 ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
3337 ehea_bcmc_regs.arr[i].port_id,
3338 ehea_bcmc_regs.arr[i].reg_type,
3339 ehea_bcmc_regs.arr[i].macaddr,
3340 0, H_DEREG_BCMC);
3341}
3342
3343static atomic_t ehea_memory_hooks_registered;
3344
3345/* Register memory hooks on probe of first adapter */
3346static int ehea_register_memory_hooks(void)
3347{
3348 int ret = 0;
3349
3350 if (atomic_inc_and_test(&ehea_memory_hooks_registered))
3351 return 0;
3352
3353 ret = ehea_create_busmap();
3354 if (ret) {
3355 pr_info("ehea_create_busmap failed\n");
3356 goto out;
3357 }
3358
3359 ret = register_reboot_notifier(&ehea_reboot_nb);
3360 if (ret) {
3361 pr_info("register_reboot_notifier failed\n");
3362 goto out;
3363 }
3364
3365 ret = register_memory_notifier(&ehea_mem_nb);
3366 if (ret) {
3367 pr_info("register_memory_notifier failed\n");
3368 goto out2;
3369 }
3370
3371 ret = crash_shutdown_register(ehea_crash_handler);
3372 if (ret) {
3373 pr_info("crash_shutdown_register failed\n");
3374 goto out3;
3375 }
3376
3377 return 0;
3378
3379out3:
3380 unregister_memory_notifier(&ehea_mem_nb);
3381out2:
3382 unregister_reboot_notifier(&ehea_reboot_nb);
3383out:
3384 return ret;
3385}
3386
3387static void ehea_unregister_memory_hooks(void)
3388{
3389 if (atomic_read(&ehea_memory_hooks_registered))
3390 return;
3391
3392 unregister_reboot_notifier(&ehea_reboot_nb);
3393 if (crash_shutdown_unregister(ehea_crash_handler))
3394 pr_info("failed unregistering crash handler\n");
3395 unregister_memory_notifier(&ehea_mem_nb);
3396}
3397
3265static int ehea_probe_adapter(struct platform_device *dev) 3398static int ehea_probe_adapter(struct platform_device *dev)
3266{ 3399{
3267 struct ehea_adapter *adapter; 3400 struct ehea_adapter *adapter;
@@ -3269,6 +3402,10 @@ static int ehea_probe_adapter(struct platform_device *dev)
3269 int ret; 3402 int ret;
3270 int i; 3403 int i;
3271 3404
3405 ret = ehea_register_memory_hooks();
3406 if (ret)
3407 return ret;
3408
3272 if (!dev || !dev->dev.of_node) { 3409 if (!dev || !dev->dev.of_node) {
3273 pr_err("Invalid ibmebus device probed\n"); 3410 pr_err("Invalid ibmebus device probed\n");
3274 return -EINVAL; 3411 return -EINVAL;
@@ -3392,81 +3529,6 @@ static int ehea_remove(struct platform_device *dev)
3392 return 0; 3529 return 0;
3393} 3530}
3394 3531
3395static void ehea_crash_handler(void)
3396{
3397 int i;
3398
3399 if (ehea_fw_handles.arr)
3400 for (i = 0; i < ehea_fw_handles.num_entries; i++)
3401 ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
3402 ehea_fw_handles.arr[i].fwh,
3403 FORCE_FREE);
3404
3405 if (ehea_bcmc_regs.arr)
3406 for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
3407 ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
3408 ehea_bcmc_regs.arr[i].port_id,
3409 ehea_bcmc_regs.arr[i].reg_type,
3410 ehea_bcmc_regs.arr[i].macaddr,
3411 0, H_DEREG_BCMC);
3412}
3413
3414static int ehea_mem_notifier(struct notifier_block *nb,
3415 unsigned long action, void *data)
3416{
3417 int ret = NOTIFY_BAD;
3418 struct memory_notify *arg = data;
3419
3420 mutex_lock(&dlpar_mem_lock);
3421
3422 switch (action) {
3423 case MEM_CANCEL_OFFLINE:
3424 pr_info("memory offlining canceled");
3425 /* Readd canceled memory block */
3426 case MEM_ONLINE:
3427 pr_info("memory is going online");
3428 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3429 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3430 goto out_unlock;
3431 ehea_rereg_mrs();
3432 break;
3433 case MEM_GOING_OFFLINE:
3434 pr_info("memory is going offline");
3435 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3436 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3437 goto out_unlock;
3438 ehea_rereg_mrs();
3439 break;
3440 default:
3441 break;
3442 }
3443
3444 ehea_update_firmware_handles();
3445 ret = NOTIFY_OK;
3446
3447out_unlock:
3448 mutex_unlock(&dlpar_mem_lock);
3449 return ret;
3450}
3451
3452static struct notifier_block ehea_mem_nb = {
3453 .notifier_call = ehea_mem_notifier,
3454};
3455
3456static int ehea_reboot_notifier(struct notifier_block *nb,
3457 unsigned long action, void *unused)
3458{
3459 if (action == SYS_RESTART) {
3460 pr_info("Reboot: freeing all eHEA resources\n");
3461 ibmebus_unregister_driver(&ehea_driver);
3462 }
3463 return NOTIFY_DONE;
3464}
3465
3466static struct notifier_block ehea_reboot_nb = {
3467 .notifier_call = ehea_reboot_notifier,
3468};
3469
3470static int check_module_parm(void) 3532static int check_module_parm(void)
3471{ 3533{
3472 int ret = 0; 3534 int ret = 0;
@@ -3520,26 +3582,10 @@ static int __init ehea_module_init(void)
3520 if (ret) 3582 if (ret)
3521 goto out; 3583 goto out;
3522 3584
3523 ret = ehea_create_busmap();
3524 if (ret)
3525 goto out;
3526
3527 ret = register_reboot_notifier(&ehea_reboot_nb);
3528 if (ret)
3529 pr_info("failed registering reboot notifier\n");
3530
3531 ret = register_memory_notifier(&ehea_mem_nb);
3532 if (ret)
3533 pr_info("failed registering memory remove notifier\n");
3534
3535 ret = crash_shutdown_register(ehea_crash_handler);
3536 if (ret)
3537 pr_info("failed registering crash handler\n");
3538
3539 ret = ibmebus_register_driver(&ehea_driver); 3585 ret = ibmebus_register_driver(&ehea_driver);
3540 if (ret) { 3586 if (ret) {
3541 pr_err("failed registering eHEA device driver on ebus\n"); 3587 pr_err("failed registering eHEA device driver on ebus\n");
3542 goto out2; 3588 goto out;
3543 } 3589 }
3544 3590
3545 ret = driver_create_file(&ehea_driver.driver, 3591 ret = driver_create_file(&ehea_driver.driver,
@@ -3547,32 +3593,22 @@ static int __init ehea_module_init(void)
3547 if (ret) { 3593 if (ret) {
3548 pr_err("failed to register capabilities attribute, ret=%d\n", 3594 pr_err("failed to register capabilities attribute, ret=%d\n",
3549 ret); 3595 ret);
3550 goto out3; 3596 goto out2;
3551 } 3597 }
3552 3598
3553 return ret; 3599 return ret;
3554 3600
3555out3:
3556 ibmebus_unregister_driver(&ehea_driver);
3557out2: 3601out2:
3558 unregister_memory_notifier(&ehea_mem_nb); 3602 ibmebus_unregister_driver(&ehea_driver);
3559 unregister_reboot_notifier(&ehea_reboot_nb);
3560 crash_shutdown_unregister(ehea_crash_handler);
3561out: 3603out:
3562 return ret; 3604 return ret;
3563} 3605}
3564 3606
3565static void __exit ehea_module_exit(void) 3607static void __exit ehea_module_exit(void)
3566{ 3608{
3567 int ret;
3568
3569 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities); 3609 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
3570 ibmebus_unregister_driver(&ehea_driver); 3610 ibmebus_unregister_driver(&ehea_driver);
3571 unregister_reboot_notifier(&ehea_reboot_nb); 3611 ehea_unregister_memory_hooks();
3572 ret = crash_shutdown_unregister(ehea_crash_handler);
3573 if (ret)
3574 pr_info("failed unregistering crash handler\n");
3575 unregister_memory_notifier(&ehea_mem_nb);
3576 kfree(ehea_fw_handles.arr); 3612 kfree(ehea_fw_handles.arr);
3577 kfree(ehea_bcmc_regs.arr); 3613 kfree(ehea_bcmc_regs.arr);
3578 ehea_destroy_busmap(); 3614 ehea_destroy_busmap();
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 21978cc019e7..cd7675ac5bf9 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1136,6 +1136,8 @@ restart_poll:
1136 ibmveth_replenish_task(adapter); 1136 ibmveth_replenish_task(adapter);
1137 1137
1138 if (frames_processed < budget) { 1138 if (frames_processed < budget) {
1139 napi_complete(napi);
1140
1139 /* We think we are done - reenable interrupts, 1141 /* We think we are done - reenable interrupts,
1140 * then check once more to make sure we are done. 1142 * then check once more to make sure we are done.
1141 */ 1143 */
@@ -1144,8 +1146,6 @@ restart_poll:
1144 1146
1145 BUG_ON(lpar_rc != H_SUCCESS); 1147 BUG_ON(lpar_rc != H_SUCCESS);
1146 1148
1147 napi_complete(napi);
1148
1149 if (ibmveth_rxq_pending_buffer(adapter) && 1149 if (ibmveth_rxq_pending_buffer(adapter) &&
1150 napi_reschedule(napi)) { 1150 napi_reschedule(napi)) {
1151 lpar_rc = h_vio_signal(adapter->vdev->unit_address, 1151 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
@@ -1327,6 +1327,28 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1327 return ret; 1327 return ret;
1328} 1328}
1329 1329
1330static int ibmveth_set_mac_addr(struct net_device *dev, void *p)
1331{
1332 struct ibmveth_adapter *adapter = netdev_priv(dev);
1333 struct sockaddr *addr = p;
1334 u64 mac_address;
1335 int rc;
1336
1337 if (!is_valid_ether_addr(addr->sa_data))
1338 return -EADDRNOTAVAIL;
1339
1340 mac_address = ibmveth_encode_mac_addr(addr->sa_data);
1341 rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address);
1342 if (rc) {
1343 netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc);
1344 return rc;
1345 }
1346
1347 ether_addr_copy(dev->dev_addr, addr->sa_data);
1348
1349 return 0;
1350}
1351
1330static const struct net_device_ops ibmveth_netdev_ops = { 1352static const struct net_device_ops ibmveth_netdev_ops = {
1331 .ndo_open = ibmveth_open, 1353 .ndo_open = ibmveth_open,
1332 .ndo_stop = ibmveth_close, 1354 .ndo_stop = ibmveth_close,
@@ -1337,7 +1359,7 @@ static const struct net_device_ops ibmveth_netdev_ops = {
1337 .ndo_fix_features = ibmveth_fix_features, 1359 .ndo_fix_features = ibmveth_fix_features,
1338 .ndo_set_features = ibmveth_set_features, 1360 .ndo_set_features = ibmveth_set_features,
1339 .ndo_validate_addr = eth_validate_addr, 1361 .ndo_validate_addr = eth_validate_addr,
1340 .ndo_set_mac_address = eth_mac_addr, 1362 .ndo_set_mac_address = ibmveth_set_mac_addr,
1341#ifdef CONFIG_NET_POLL_CONTROLLER 1363#ifdef CONFIG_NET_POLL_CONTROLLER
1342 .ndo_poll_controller = ibmveth_poll_controller, 1364 .ndo_poll_controller = ibmveth_poll_controller,
1343#endif 1365#endif
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 11a9ffebf8d8..6aea65dae5ed 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -868,8 +868,9 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
868 * The grst delay value is in 100ms units, and we'll wait a 868 * The grst delay value is in 100ms units, and we'll wait a
869 * couple counts longer to be sure we don't just miss the end. 869 * couple counts longer to be sure we don't just miss the end.
870 */ 870 */
871 grst_del = rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK 871 grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
872 >> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; 872 I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
873 I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
873 for (cnt = 0; cnt < grst_del + 2; cnt++) { 874 for (cnt = 0; cnt < grst_del + 2; cnt++) {
874 reg = rd32(hw, I40E_GLGEN_RSTAT); 875 reg = rd32(hw, I40E_GLGEN_RSTAT);
875 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) 876 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
@@ -2846,7 +2847,7 @@ i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
2846 2847
2847 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); 2848 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
2848 2849
2849 if (!status) 2850 if (!status && filter_index)
2850 *filter_index = resp->index; 2851 *filter_index = resp->index;
2851 2852
2852 return status; 2853 return status;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index 183dcb63ce98..a11c70ca5a28 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -40,7 +40,7 @@ static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay)
40 u32 val; 40 u32 val;
41 41
42 val = rd32(hw, I40E_PRTDCB_GENC); 42 val = rd32(hw, I40E_PRTDCB_GENC);
43 *delay = (u16)(val & I40E_PRTDCB_GENC_PFCLDA_MASK >> 43 *delay = (u16)((val & I40E_PRTDCB_GENC_PFCLDA_MASK) >>
44 I40E_PRTDCB_GENC_PFCLDA_SHIFT); 44 I40E_PRTDCB_GENC_PFCLDA_SHIFT);
45} 45}
46 46
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 61236f983971..c17ee77100d3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -989,8 +989,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
989 if (!cmd_buf) 989 if (!cmd_buf)
990 return count; 990 return count;
991 bytes_not_copied = copy_from_user(cmd_buf, buffer, count); 991 bytes_not_copied = copy_from_user(cmd_buf, buffer, count);
992 if (bytes_not_copied < 0) 992 if (bytes_not_copied < 0) {
993 kfree(cmd_buf);
993 return bytes_not_copied; 994 return bytes_not_copied;
995 }
994 if (bytes_not_copied > 0) 996 if (bytes_not_copied > 0)
995 count -= bytes_not_copied; 997 count -= bytes_not_copied;
996 cmd_buf[count] = '\0'; 998 cmd_buf[count] = '\0';
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index cbe281be1c9f..dadda3c5d658 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1512,7 +1512,12 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1512 vsi->tc_config.numtc = numtc; 1512 vsi->tc_config.numtc = numtc;
1513 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; 1513 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1514 /* Number of queues per enabled TC */ 1514 /* Number of queues per enabled TC */
1515 num_tc_qps = vsi->alloc_queue_pairs/numtc; 1515 /* In MFP case we can have a much lower count of MSIx
1516 * vectors available and so we need to lower the used
1517 * q count.
1518 */
1519 qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
1520 num_tc_qps = qcount / numtc;
1516 num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC); 1521 num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
1517 1522
1518 /* Setup queue offset/count for all TCs for given VSI */ 1523 /* Setup queue offset/count for all TCs for given VSI */
@@ -2684,8 +2689,15 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
2684 u16 qoffset, qcount; 2689 u16 qoffset, qcount;
2685 int i, n; 2690 int i, n;
2686 2691
2687 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) 2692 if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
2688 return; 2693 /* Reset the TC information */
2694 for (i = 0; i < vsi->num_queue_pairs; i++) {
2695 rx_ring = vsi->rx_rings[i];
2696 tx_ring = vsi->tx_rings[i];
2697 rx_ring->dcb_tc = 0;
2698 tx_ring->dcb_tc = 0;
2699 }
2700 }
2689 2701
2690 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) { 2702 for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
2691 if (!(vsi->tc_config.enabled_tc & (1 << n))) 2703 if (!(vsi->tc_config.enabled_tc & (1 << n)))
@@ -3830,6 +3842,12 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
3830{ 3842{
3831 int i; 3843 int i;
3832 3844
3845 i40e_stop_misc_vector(pf);
3846 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3847 synchronize_irq(pf->msix_entries[0].vector);
3848 free_irq(pf->msix_entries[0].vector, pf);
3849 }
3850
3833 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); 3851 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
3834 for (i = 0; i < pf->num_alloc_vsi; i++) 3852 for (i = 0; i < pf->num_alloc_vsi; i++)
3835 if (pf->vsi[i]) 3853 if (pf->vsi[i])
@@ -5254,8 +5272,14 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
5254 5272
5255 /* Wait for the PF's Tx queues to be disabled */ 5273 /* Wait for the PF's Tx queues to be disabled */
5256 ret = i40e_pf_wait_txq_disabled(pf); 5274 ret = i40e_pf_wait_txq_disabled(pf);
5257 if (!ret) 5275 if (ret) {
5276 /* Schedule PF reset to recover */
5277 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5278 i40e_service_event_schedule(pf);
5279 } else {
5258 i40e_pf_unquiesce_all_vsi(pf); 5280 i40e_pf_unquiesce_all_vsi(pf);
5281 }
5282
5259exit: 5283exit:
5260 return ret; 5284 return ret;
5261} 5285}
@@ -5587,7 +5611,8 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
5587 int i, v; 5611 int i, v;
5588 5612
5589 /* If we're down or resetting, just bail */ 5613 /* If we're down or resetting, just bail */
5590 if (test_bit(__I40E_CONFIG_BUSY, &pf->state)) 5614 if (test_bit(__I40E_DOWN, &pf->state) ||
5615 test_bit(__I40E_CONFIG_BUSY, &pf->state))
5591 return; 5616 return;
5592 5617
5593 /* for each VSI/netdev 5618 /* for each VSI/netdev
@@ -9533,6 +9558,7 @@ static void i40e_remove(struct pci_dev *pdev)
9533 set_bit(__I40E_DOWN, &pf->state); 9558 set_bit(__I40E_DOWN, &pf->state);
9534 del_timer_sync(&pf->service_timer); 9559 del_timer_sync(&pf->service_timer);
9535 cancel_work_sync(&pf->service_task); 9560 cancel_work_sync(&pf->service_task);
9561 i40e_fdir_teardown(pf);
9536 9562
9537 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { 9563 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
9538 i40e_free_vfs(pf); 9564 i40e_free_vfs(pf);
@@ -9559,12 +9585,6 @@ static void i40e_remove(struct pci_dev *pdev)
9559 if (pf->vsi[pf->lan_vsi]) 9585 if (pf->vsi[pf->lan_vsi])
9560 i40e_vsi_release(pf->vsi[pf->lan_vsi]); 9586 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
9561 9587
9562 i40e_stop_misc_vector(pf);
9563 if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
9564 synchronize_irq(pf->msix_entries[0].vector);
9565 free_irq(pf->msix_entries[0].vector, pf);
9566 }
9567
9568 /* shutdown and destroy the HMC */ 9588 /* shutdown and destroy the HMC */
9569 if (pf->hw.hmc.hmc_obj) { 9589 if (pf->hw.hmc.hmc_obj) {
9570 ret_code = i40e_shutdown_lan_hmc(&pf->hw); 9590 ret_code = i40e_shutdown_lan_hmc(&pf->hw);
@@ -9718,6 +9738,8 @@ static void i40e_shutdown(struct pci_dev *pdev)
9718 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); 9738 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
9719 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); 9739 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
9720 9740
9741 i40e_clear_interrupt_scheme(pf);
9742
9721 if (system_state == SYSTEM_POWER_OFF) { 9743 if (system_state == SYSTEM_POWER_OFF) {
9722 pci_wake_from_d3(pdev, pf->wol_en); 9744 pci_wake_from_d3(pdev, pf->wol_en);
9723 pci_set_power_state(pdev, PCI_D3hot); 9745 pci_set_power_state(pdev, PCI_D3hot);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 3e70f2e45a47..5defe0d63514 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -679,9 +679,11 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
679{ 679{
680 i40e_status status; 680 i40e_status status;
681 enum i40e_nvmupd_cmd upd_cmd; 681 enum i40e_nvmupd_cmd upd_cmd;
682 bool retry_attempt = false;
682 683
683 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno); 684 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
684 685
686retry:
685 switch (upd_cmd) { 687 switch (upd_cmd) {
686 case I40E_NVMUPD_WRITE_CON: 688 case I40E_NVMUPD_WRITE_CON:
687 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); 689 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
@@ -725,6 +727,39 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
725 *errno = -ESRCH; 727 *errno = -ESRCH;
726 break; 728 break;
727 } 729 }
730
731 /* In some circumstances, a multi-write transaction takes longer
732 * than the default 3 minute timeout on the write semaphore. If
733 * the write failed with an EBUSY status, this is likely the problem,
734 * so here we try to reacquire the semaphore then retry the write.
735 * We only do one retry, then give up.
736 */
737 if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
738 !retry_attempt) {
739 i40e_status old_status = status;
740 u32 old_asq_status = hw->aq.asq_last_status;
741 u32 gtime;
742
743 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
744 if (gtime >= hw->nvm.hw_semaphore_timeout) {
745 i40e_debug(hw, I40E_DEBUG_ALL,
746 "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
747 gtime, hw->nvm.hw_semaphore_timeout);
748 i40e_release_nvm(hw);
749 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
750 if (status) {
751 i40e_debug(hw, I40E_DEBUG_ALL,
752 "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
753 hw->aq.asq_last_status);
754 status = old_status;
755 hw->aq.asq_last_status = old_asq_status;
756 } else {
757 retry_attempt = true;
758 goto retry;
759 }
760 }
761 }
762
728 return status; 763 return status;
729} 764}
730 765
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 2206d2d36f0f..bbf1b1247ac4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -586,6 +586,20 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
586} 586}
587 587
588/** 588/**
589 * i40e_get_head - Retrieve head from head writeback
590 * @tx_ring: tx ring to fetch head of
591 *
592 * Returns value of Tx ring head based on value stored
593 * in head write-back location
594 **/
595static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
596{
597 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
598
599 return le32_to_cpu(*(volatile __le32 *)head);
600}
601
602/**
589 * i40e_get_tx_pending - how many tx descriptors not processed 603 * i40e_get_tx_pending - how many tx descriptors not processed
590 * @tx_ring: the ring of descriptors 604 * @tx_ring: the ring of descriptors
591 * 605 *
@@ -594,10 +608,16 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
594 **/ 608 **/
595static u32 i40e_get_tx_pending(struct i40e_ring *ring) 609static u32 i40e_get_tx_pending(struct i40e_ring *ring)
596{ 610{
597 u32 ntu = ((ring->next_to_clean <= ring->next_to_use) 611 u32 head, tail;
598 ? ring->next_to_use 612
599 : ring->next_to_use + ring->count); 613 head = i40e_get_head(ring);
600 return ntu - ring->next_to_clean; 614 tail = readl(ring->tail);
615
616 if (head != tail)
617 return (head < tail) ?
618 tail - head : (tail + ring->count - head);
619
620 return 0;
601} 621}
602 622
603/** 623/**
@@ -606,6 +626,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring)
606 **/ 626 **/
607static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) 627static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
608{ 628{
629 u32 tx_done = tx_ring->stats.packets;
630 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
609 u32 tx_pending = i40e_get_tx_pending(tx_ring); 631 u32 tx_pending = i40e_get_tx_pending(tx_ring);
610 struct i40e_pf *pf = tx_ring->vsi->back; 632 struct i40e_pf *pf = tx_ring->vsi->back;
611 bool ret = false; 633 bool ret = false;
@@ -623,41 +645,25 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
623 * run the check_tx_hang logic with a transmit completion 645 * run the check_tx_hang logic with a transmit completion
624 * pending but without time to complete it yet. 646 * pending but without time to complete it yet.
625 */ 647 */
626 if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && 648 if ((tx_done_old == tx_done) && tx_pending) {
627 (tx_pending >= I40E_MIN_DESC_PENDING)) {
628 /* make sure it is true for two checks in a row */ 649 /* make sure it is true for two checks in a row */
629 ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, 650 ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
630 &tx_ring->state); 651 &tx_ring->state);
631 } else if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && 652 } else if (tx_done_old == tx_done &&
632 (tx_pending < I40E_MIN_DESC_PENDING) && 653 (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
633 (tx_pending > 0)) {
634 if (I40E_DEBUG_FLOW & pf->hw.debug_mask) 654 if (I40E_DEBUG_FLOW & pf->hw.debug_mask)
635 dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d", 655 dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d",
636 tx_pending, tx_ring->queue_index); 656 tx_pending, tx_ring->queue_index);
637 pf->tx_sluggish_count++; 657 pf->tx_sluggish_count++;
638 } else { 658 } else {
639 /* update completed stats and disarm the hang check */ 659 /* update completed stats and disarm the hang check */
640 tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets; 660 tx_ring->tx_stats.tx_done_old = tx_done;
641 clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); 661 clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
642 } 662 }
643 663
644 return ret; 664 return ret;
645} 665}
646 666
647/**
648 * i40e_get_head - Retrieve head from head writeback
649 * @tx_ring: tx ring to fetch head of
650 *
651 * Returns value of Tx ring head based on value stored
652 * in head write-back location
653 **/
654static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
655{
656 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
657
658 return le32_to_cpu(*(volatile __le32 *)head);
659}
660
661#define WB_STRIDE 0x3 667#define WB_STRIDE 0x3
662 668
663/** 669/**
@@ -2140,6 +2146,67 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
2140} 2146}
2141 2147
2142/** 2148/**
2149 * i40e_chk_linearize - Check if there are more than 8 fragments per packet
2150 * @skb: send buffer
2151 * @tx_flags: collected send information
2152 * @hdr_len: size of the packet header
2153 *
2154 * Note: Our HW can't scatter-gather more than 8 fragments to build
2155 * a packet on the wire and so we need to figure out the cases where we
2156 * need to linearize the skb.
2157 **/
2158static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
2159 const u8 hdr_len)
2160{
2161 struct skb_frag_struct *frag;
2162 bool linearize = false;
2163 unsigned int size = 0;
2164 u16 num_frags;
2165 u16 gso_segs;
2166
2167 num_frags = skb_shinfo(skb)->nr_frags;
2168 gso_segs = skb_shinfo(skb)->gso_segs;
2169
2170 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
2171 u16 j = 1;
2172
2173 if (num_frags < (I40E_MAX_BUFFER_TXD))
2174 goto linearize_chk_done;
2175 /* try the simple math, if we have too many frags per segment */
2176 if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
2177 I40E_MAX_BUFFER_TXD) {
2178 linearize = true;
2179 goto linearize_chk_done;
2180 }
2181 frag = &skb_shinfo(skb)->frags[0];
2182 size = hdr_len;
2183 /* we might still have more fragments per segment */
2184 do {
2185 size += skb_frag_size(frag);
2186 frag++; j++;
2187 if (j == I40E_MAX_BUFFER_TXD) {
2188 if (size < skb_shinfo(skb)->gso_size) {
2189 linearize = true;
2190 break;
2191 }
2192 j = 1;
2193 size -= skb_shinfo(skb)->gso_size;
2194 if (size)
2195 j++;
2196 size += hdr_len;
2197 }
2198 num_frags--;
2199 } while (num_frags);
2200 } else {
2201 if (num_frags >= I40E_MAX_BUFFER_TXD)
2202 linearize = true;
2203 }
2204
2205linearize_chk_done:
2206 return linearize;
2207}
2208
2209/**
2143 * i40e_tx_map - Build the Tx descriptor 2210 * i40e_tx_map - Build the Tx descriptor
2144 * @tx_ring: ring to send buffer on 2211 * @tx_ring: ring to send buffer on
2145 * @skb: send buffer 2212 * @skb: send buffer
@@ -2396,6 +2463,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2396 if (tsyn) 2463 if (tsyn)
2397 tx_flags |= I40E_TX_FLAGS_TSYN; 2464 tx_flags |= I40E_TX_FLAGS_TSYN;
2398 2465
2466 if (i40e_chk_linearize(skb, tx_flags, hdr_len))
2467 if (skb_linearize(skb))
2468 goto out_drop;
2469
2399 skb_tx_timestamp(skb); 2470 skb_tx_timestamp(skb);
2400 2471
2401 /* always enable CRC insertion offload */ 2472 /* always enable CRC insertion offload */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 18b00231d2f1..dff0baeb1ecc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -112,6 +112,7 @@ enum i40e_dyn_idx_t {
112 112
113#define i40e_rx_desc i40e_32byte_rx_desc 113#define i40e_rx_desc i40e_32byte_rx_desc
114 114
115#define I40E_MAX_BUFFER_TXD 8
115#define I40E_MIN_TX_LEN 17 116#define I40E_MIN_TX_LEN 17
116#define I40E_MAX_DATA_PER_TXD 8192 117#define I40E_MAX_DATA_PER_TXD 8192
117 118
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 29004382f462..708891571dae 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -126,6 +126,20 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
126} 126}
127 127
128/** 128/**
129 * i40e_get_head - Retrieve head from head writeback
130 * @tx_ring: tx ring to fetch head of
131 *
132 * Returns value of Tx ring head based on value stored
133 * in head write-back location
134 **/
135static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
136{
137 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
138
139 return le32_to_cpu(*(volatile __le32 *)head);
140}
141
142/**
129 * i40e_get_tx_pending - how many tx descriptors not processed 143 * i40e_get_tx_pending - how many tx descriptors not processed
130 * @tx_ring: the ring of descriptors 144 * @tx_ring: the ring of descriptors
131 * 145 *
@@ -134,10 +148,16 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
134 **/ 148 **/
135static u32 i40e_get_tx_pending(struct i40e_ring *ring) 149static u32 i40e_get_tx_pending(struct i40e_ring *ring)
136{ 150{
137 u32 ntu = ((ring->next_to_clean <= ring->next_to_use) 151 u32 head, tail;
138 ? ring->next_to_use 152
139 : ring->next_to_use + ring->count); 153 head = i40e_get_head(ring);
140 return ntu - ring->next_to_clean; 154 tail = readl(ring->tail);
155
156 if (head != tail)
157 return (head < tail) ?
158 tail - head : (tail + ring->count - head);
159
160 return 0;
141} 161}
142 162
143/** 163/**
@@ -146,6 +166,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring)
146 **/ 166 **/
147static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) 167static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
148{ 168{
169 u32 tx_done = tx_ring->stats.packets;
170 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
149 u32 tx_pending = i40e_get_tx_pending(tx_ring); 171 u32 tx_pending = i40e_get_tx_pending(tx_ring);
150 bool ret = false; 172 bool ret = false;
151 173
@@ -162,36 +184,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
162 * run the check_tx_hang logic with a transmit completion 184 * run the check_tx_hang logic with a transmit completion
163 * pending but without time to complete it yet. 185 * pending but without time to complete it yet.
164 */ 186 */
165 if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && 187 if ((tx_done_old == tx_done) && tx_pending) {
166 (tx_pending >= I40E_MIN_DESC_PENDING)) {
167 /* make sure it is true for two checks in a row */ 188 /* make sure it is true for two checks in a row */
168 ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, 189 ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
169 &tx_ring->state); 190 &tx_ring->state);
170 } else if (!(tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) || 191 } else if (tx_done_old == tx_done &&
171 !(tx_pending < I40E_MIN_DESC_PENDING) || 192 (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
172 !(tx_pending > 0)) {
173 /* update completed stats and disarm the hang check */ 193 /* update completed stats and disarm the hang check */
174 tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets; 194 tx_ring->tx_stats.tx_done_old = tx_done;
175 clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); 195 clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
176 } 196 }
177 197
178 return ret; 198 return ret;
179} 199}
180 200
181/**
182 * i40e_get_head - Retrieve head from head writeback
183 * @tx_ring: tx ring to fetch head of
184 *
185 * Returns value of Tx ring head based on value stored
186 * in head write-back location
187 **/
188static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
189{
190 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
191
192 return le32_to_cpu(*(volatile __le32 *)head);
193}
194
195#define WB_STRIDE 0x3 201#define WB_STRIDE 0x3
196 202
197/** 203/**
@@ -1206,17 +1212,16 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
1206 if (err < 0) 1212 if (err < 0)
1207 return err; 1213 return err;
1208 1214
1209 if (protocol == htons(ETH_P_IP)) { 1215 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
1210 iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); 1216 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
1217
1218 if (iph->version == 4) {
1211 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); 1219 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
1212 iph->tot_len = 0; 1220 iph->tot_len = 0;
1213 iph->check = 0; 1221 iph->check = 0;
1214 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 1222 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
1215 0, IPPROTO_TCP, 0); 1223 0, IPPROTO_TCP, 0);
1216 } else if (skb_is_gso_v6(skb)) { 1224 } else if (ipv6h->version == 6) {
1217
1218 ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
1219 : ipv6_hdr(skb);
1220 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); 1225 tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
1221 ipv6h->payload_len = 0; 1226 ipv6h->payload_len = 0;
1222 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 1227 tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
@@ -1274,13 +1279,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
1274 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; 1279 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1275 } 1280 }
1276 } else if (tx_flags & I40E_TX_FLAGS_IPV6) { 1281 } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
1277 if (tx_flags & I40E_TX_FLAGS_TSO) { 1282 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
1278 *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; 1283 if (tx_flags & I40E_TX_FLAGS_TSO)
1279 ip_hdr(skb)->check = 0; 1284 ip_hdr(skb)->check = 0;
1280 } else {
1281 *cd_tunneling |=
1282 I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
1283 }
1284 } 1285 }
1285 1286
1286 /* Now set the ctx descriptor fields */ 1287 /* Now set the ctx descriptor fields */
@@ -1290,6 +1291,11 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
1290 ((skb_inner_network_offset(skb) - 1291 ((skb_inner_network_offset(skb) -
1291 skb_transport_offset(skb)) >> 1) << 1292 skb_transport_offset(skb)) >> 1) <<
1292 I40E_TXD_CTX_QW0_NATLEN_SHIFT; 1293 I40E_TXD_CTX_QW0_NATLEN_SHIFT;
1294 if (this_ip_hdr->version == 6) {
1295 tx_flags &= ~I40E_TX_FLAGS_IPV4;
1296 tx_flags |= I40E_TX_FLAGS_IPV6;
1297 }
1298
1293 1299
1294 } else { 1300 } else {
1295 network_hdr_len = skb_network_header_len(skb); 1301 network_hdr_len = skb_network_header_len(skb);
@@ -1380,6 +1386,67 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
1380 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); 1386 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
1381} 1387}
1382 1388
1389 /**
1390 * i40e_chk_linearize - Check if there are more than 8 fragments per packet
1391 * @skb: send buffer
1392 * @tx_flags: collected send information
1393 * @hdr_len: size of the packet header
1394 *
1395 * Note: Our HW can't scatter-gather more than 8 fragments to build
1396 * a packet on the wire and so we need to figure out the cases where we
1397 * need to linearize the skb.
1398 **/
1399static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
1400 const u8 hdr_len)
1401{
1402 struct skb_frag_struct *frag;
1403 bool linearize = false;
1404 unsigned int size = 0;
1405 u16 num_frags;
1406 u16 gso_segs;
1407
1408 num_frags = skb_shinfo(skb)->nr_frags;
1409 gso_segs = skb_shinfo(skb)->gso_segs;
1410
1411 if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
1412 u16 j = 1;
1413
1414 if (num_frags < (I40E_MAX_BUFFER_TXD))
1415 goto linearize_chk_done;
1416 /* try the simple math, if we have too many frags per segment */
1417 if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
1418 I40E_MAX_BUFFER_TXD) {
1419 linearize = true;
1420 goto linearize_chk_done;
1421 }
1422 frag = &skb_shinfo(skb)->frags[0];
1423 size = hdr_len;
1424 /* we might still have more fragments per segment */
1425 do {
1426 size += skb_frag_size(frag);
1427 frag++; j++;
1428 if (j == I40E_MAX_BUFFER_TXD) {
1429 if (size < skb_shinfo(skb)->gso_size) {
1430 linearize = true;
1431 break;
1432 }
1433 j = 1;
1434 size -= skb_shinfo(skb)->gso_size;
1435 if (size)
1436 j++;
1437 size += hdr_len;
1438 }
1439 num_frags--;
1440 } while (num_frags);
1441 } else {
1442 if (num_frags >= I40E_MAX_BUFFER_TXD)
1443 linearize = true;
1444 }
1445
1446linearize_chk_done:
1447 return linearize;
1448}
1449
1383/** 1450/**
1384 * i40e_tx_map - Build the Tx descriptor 1451 * i40e_tx_map - Build the Tx descriptor
1385 * @tx_ring: ring to send buffer on 1452 * @tx_ring: ring to send buffer on
@@ -1654,6 +1721,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
1654 else if (tso) 1721 else if (tso)
1655 tx_flags |= I40E_TX_FLAGS_TSO; 1722 tx_flags |= I40E_TX_FLAGS_TSO;
1656 1723
1724 if (i40e_chk_linearize(skb, tx_flags, hdr_len))
1725 if (skb_linearize(skb))
1726 goto out_drop;
1727
1657 skb_tx_timestamp(skb); 1728 skb_tx_timestamp(skb);
1658 1729
1659 /* always enable CRC insertion offload */ 1730 /* always enable CRC insertion offload */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 4e15903b2b6d..c950a038237c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -112,6 +112,7 @@ enum i40e_dyn_idx_t {
112 112
113#define i40e_rx_desc i40e_32byte_rx_desc 113#define i40e_rx_desc i40e_32byte_rx_desc
114 114
115#define I40E_MAX_BUFFER_TXD 8
115#define I40E_MIN_TX_LEN 17 116#define I40E_MIN_TX_LEN 17
116#define I40E_MAX_DATA_PER_TXD 8192 117#define I40E_MAX_DATA_PER_TXD 8192
117 118
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 2a210c4efb89..ebce5bb24df9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1698,8 +1698,6 @@ int mlx4_en_start_port(struct net_device *dev)
1698 /* Schedule multicast task to populate multicast list */ 1698 /* Schedule multicast task to populate multicast list */
1699 queue_work(mdev->workqueue, &priv->rx_mode_task); 1699 queue_work(mdev->workqueue, &priv->rx_mode_task);
1700 1700
1701 mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
1702
1703#ifdef CONFIG_MLX4_EN_VXLAN 1701#ifdef CONFIG_MLX4_EN_VXLAN
1704 if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) 1702 if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
1705 vxlan_get_rx_port(dev); 1703 vxlan_get_rx_port(dev);
@@ -2853,6 +2851,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2853 queue_delayed_work(mdev->workqueue, &priv->service_task, 2851 queue_delayed_work(mdev->workqueue, &priv->service_task,
2854 SERVICE_TASK_DELAY); 2852 SERVICE_TASK_DELAY);
2855 2853
2854 mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
2855
2856 return 0; 2856 return 0;
2857 2857
2858out: 2858out:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 2d8ee66138e8..a61009f4b2df 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -81,12 +81,14 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
81{ 81{
82 u32 loopback_ok = 0; 82 u32 loopback_ok = 0;
83 int i; 83 int i;
84 84 bool gro_enabled;
85 85
86 priv->loopback_ok = 0; 86 priv->loopback_ok = 0;
87 priv->validate_loopback = 1; 87 priv->validate_loopback = 1;
88 gro_enabled = priv->dev->features & NETIF_F_GRO;
88 89
89 mlx4_en_update_loopback_state(priv->dev, priv->dev->features); 90 mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
91 priv->dev->features &= ~NETIF_F_GRO;
90 92
91 /* xmit */ 93 /* xmit */
92 if (mlx4_en_test_loopback_xmit(priv)) { 94 if (mlx4_en_test_loopback_xmit(priv)) {
@@ -108,6 +110,10 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
108mlx4_en_test_loopback_exit: 110mlx4_en_test_loopback_exit:
109 111
110 priv->validate_loopback = 0; 112 priv->validate_loopback = 0;
113
114 if (gro_enabled)
115 priv->dev->features |= NETIF_F_GRO;
116
111 mlx4_en_update_loopback_state(priv->dev, priv->dev->features); 117 mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
112 return !loopback_ok; 118 return !loopback_ok;
113} 119}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 2a8268e6be15..ebbe244e80dd 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -453,7 +453,7 @@ struct mlx4_en_port_stats {
453 unsigned long rx_chksum_none; 453 unsigned long rx_chksum_none;
454 unsigned long rx_chksum_complete; 454 unsigned long rx_chksum_complete;
455 unsigned long tx_chksum_offload; 455 unsigned long tx_chksum_offload;
456#define NUM_PORT_STATS 9 456#define NUM_PORT_STATS 10
457}; 457};
458 458
459struct mlx4_en_perf_stats { 459struct mlx4_en_perf_stats {
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 2bb8553bd905..eda29dbbfcd2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -412,7 +412,6 @@ err_icm:
412 412
413EXPORT_SYMBOL_GPL(mlx4_qp_alloc); 413EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
414 414
415#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
416int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, 415int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
417 enum mlx4_update_qp_attr attr, 416 enum mlx4_update_qp_attr attr,
418 struct mlx4_update_qp_params *params) 417 struct mlx4_update_qp_params *params)
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 486e3d26cd4a..d97ca88c55b5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -713,7 +713,7 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
713 struct mlx4_vport_oper_state *vp_oper; 713 struct mlx4_vport_oper_state *vp_oper;
714 struct mlx4_priv *priv; 714 struct mlx4_priv *priv;
715 u32 qp_type; 715 u32 qp_type;
716 int port; 716 int port, err = 0;
717 717
718 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1; 718 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
719 priv = mlx4_priv(dev); 719 priv = mlx4_priv(dev);
@@ -738,7 +738,9 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
738 } else { 738 } else {
739 struct mlx4_update_qp_params params = {.flags = 0}; 739 struct mlx4_update_qp_params params = {.flags = 0};
740 740
741 mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params); 741 err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
742 if (err)
743 goto out;
742 } 744 }
743 } 745 }
744 746
@@ -773,7 +775,8 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
773 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC; 775 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
774 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx; 776 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
775 } 777 }
776 return 0; 778out:
779 return err;
777} 780}
778 781
779static int mpt_mask(struct mlx4_dev *dev) 782static int mpt_mask(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 44e8d7d25547..57a6e6cd74fc 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1239,11 +1239,9 @@ static int pasemi_mac_open(struct net_device *dev)
1239 if (mac->phydev) 1239 if (mac->phydev)
1240 phy_start(mac->phydev); 1240 phy_start(mac->phydev);
1241 1241
1242 init_timer(&mac->tx->clean_timer); 1242 setup_timer(&mac->tx->clean_timer, pasemi_mac_tx_timer,
1243 mac->tx->clean_timer.function = pasemi_mac_tx_timer; 1243 (unsigned long)mac->tx);
1244 mac->tx->clean_timer.data = (unsigned long)mac->tx; 1244 mod_timer(&mac->tx->clean_timer, jiffies + HZ);
1245 mac->tx->clean_timer.expires = jiffies+HZ;
1246 add_timer(&mac->tx->clean_timer);
1247 1245
1248 return 0; 1246 return 0;
1249 1247
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index 6e426ae94692..0a5e204a0179 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -354,7 +354,7 @@ struct cmd_desc_type0 {
354 354
355} __attribute__ ((aligned(64))); 355} __attribute__ ((aligned(64)));
356 356
357/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */ 357/* Note: sizeof(rcv_desc) should always be a multiple of 2 */
358struct rcv_desc { 358struct rcv_desc {
359 __le16 reference_handle; 359 __le16 reference_handle;
360 __le16 reserved; 360 __le16 reserved;
@@ -499,7 +499,7 @@ struct uni_data_desc{
499#define NETXEN_IMAGE_START 0x43000 /* compressed image */ 499#define NETXEN_IMAGE_START 0x43000 /* compressed image */
500#define NETXEN_SECONDARY_START 0x200000 /* backup images */ 500#define NETXEN_SECONDARY_START 0x200000 /* backup images */
501#define NETXEN_PXE_START 0x3E0000 /* PXE boot rom */ 501#define NETXEN_PXE_START 0x3E0000 /* PXE boot rom */
502#define NETXEN_USER_START 0x3E8000 /* Firmare info */ 502#define NETXEN_USER_START 0x3E8000 /* Firmware info */
503#define NETXEN_FIXED_START 0x3F0000 /* backup of crbinit */ 503#define NETXEN_FIXED_START 0x3F0000 /* backup of crbinit */
504#define NETXEN_USER_START_OLD NETXEN_PXE_START /* very old flash */ 504#define NETXEN_USER_START_OLD NETXEN_PXE_START /* very old flash */
505 505
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index fa4317611fd6..f221126a5c4e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -314,7 +314,7 @@ struct qlcnic_fdt {
314#define QLCNIC_BRDCFG_START 0x4000 /* board config */ 314#define QLCNIC_BRDCFG_START 0x4000 /* board config */
315#define QLCNIC_BOOTLD_START 0x10000 /* bootld */ 315#define QLCNIC_BOOTLD_START 0x10000 /* bootld */
316#define QLCNIC_IMAGE_START 0x43000 /* compressed image */ 316#define QLCNIC_IMAGE_START 0x43000 /* compressed image */
317#define QLCNIC_USER_START 0x3E8000 /* Firmare info */ 317#define QLCNIC_USER_START 0x3E8000 /* Firmware info */
318 318
319#define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408) 319#define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408)
320#define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c) 320#define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c)
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index ad0020af2193..c70ab40d8698 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -2561,7 +2561,7 @@ static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2561 int rc = -EINVAL; 2561 int rc = -EINVAL;
2562 2562
2563 if (!rtl_fw_format_ok(tp, rtl_fw)) { 2563 if (!rtl_fw_format_ok(tp, rtl_fw)) {
2564 netif_err(tp, ifup, dev, "invalid firwmare\n"); 2564 netif_err(tp, ifup, dev, "invalid firmware\n");
2565 goto out; 2565 goto out;
2566 } 2566 }
2567 2567
@@ -5067,8 +5067,6 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
5067 RTL_W8(ChipCmd, CmdReset); 5067 RTL_W8(ChipCmd, CmdReset);
5068 5068
5069 rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100); 5069 rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
5070
5071 netdev_reset_queue(tp->dev);
5072} 5070}
5073 5071
5074static void rtl_request_uncached_firmware(struct rtl8169_private *tp) 5072static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
@@ -7049,7 +7047,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
7049 u32 status, len; 7047 u32 status, len;
7050 u32 opts[2]; 7048 u32 opts[2];
7051 int frags; 7049 int frags;
7052 bool stop_queue;
7053 7050
7054 if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) { 7051 if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
7055 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n"); 7052 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
@@ -7090,8 +7087,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
7090 7087
7091 txd->opts2 = cpu_to_le32(opts[1]); 7088 txd->opts2 = cpu_to_le32(opts[1]);
7092 7089
7093 netdev_sent_queue(dev, skb->len);
7094
7095 skb_tx_timestamp(skb); 7090 skb_tx_timestamp(skb);
7096 7091
7097 /* Force memory writes to complete before releasing descriptor */ 7092 /* Force memory writes to complete before releasing descriptor */
@@ -7106,16 +7101,11 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
7106 7101
7107 tp->cur_tx += frags + 1; 7102 tp->cur_tx += frags + 1;
7108 7103
7109 stop_queue = !TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS); 7104 RTL_W8(TxPoll, NPQ);
7110 7105
7111 if (!skb->xmit_more || stop_queue || 7106 mmiowb();
7112 netif_xmit_stopped(netdev_get_tx_queue(dev, 0))) {
7113 RTL_W8(TxPoll, NPQ);
7114
7115 mmiowb();
7116 }
7117 7107
7118 if (stop_queue) { 7108 if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
7119 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must 7109 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
7120 * not miss a ring update when it notices a stopped queue. 7110 * not miss a ring update when it notices a stopped queue.
7121 */ 7111 */
@@ -7198,7 +7188,6 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
7198static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) 7188static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
7199{ 7189{
7200 unsigned int dirty_tx, tx_left; 7190 unsigned int dirty_tx, tx_left;
7201 unsigned int bytes_compl = 0, pkts_compl = 0;
7202 7191
7203 dirty_tx = tp->dirty_tx; 7192 dirty_tx = tp->dirty_tx;
7204 smp_rmb(); 7193 smp_rmb();
@@ -7222,8 +7211,10 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
7222 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb, 7211 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
7223 tp->TxDescArray + entry); 7212 tp->TxDescArray + entry);
7224 if (status & LastFrag) { 7213 if (status & LastFrag) {
7225 pkts_compl++; 7214 u64_stats_update_begin(&tp->tx_stats.syncp);
7226 bytes_compl += tx_skb->skb->len; 7215 tp->tx_stats.packets++;
7216 tp->tx_stats.bytes += tx_skb->skb->len;
7217 u64_stats_update_end(&tp->tx_stats.syncp);
7227 dev_kfree_skb_any(tx_skb->skb); 7218 dev_kfree_skb_any(tx_skb->skb);
7228 tx_skb->skb = NULL; 7219 tx_skb->skb = NULL;
7229 } 7220 }
@@ -7232,13 +7223,6 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
7232 } 7223 }
7233 7224
7234 if (tp->dirty_tx != dirty_tx) { 7225 if (tp->dirty_tx != dirty_tx) {
7235 netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
7236
7237 u64_stats_update_begin(&tp->tx_stats.syncp);
7238 tp->tx_stats.packets += pkts_compl;
7239 tp->tx_stats.bytes += bytes_compl;
7240 u64_stats_update_end(&tp->tx_stats.syncp);
7241
7242 tp->dirty_tx = dirty_tx; 7226 tp->dirty_tx = dirty_tx;
7243 /* Sync with rtl8169_start_xmit: 7227 /* Sync with rtl8169_start_xmit:
7244 * - publish dirty_tx ring index (write barrier) 7228 * - publish dirty_tx ring index (write barrier)
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 4da8bd263997..736d5d1624a1 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -508,7 +508,6 @@ static struct sh_eth_cpu_data r8a779x_data = {
508 .tpauser = 1, 508 .tpauser = 1,
509 .hw_swap = 1, 509 .hw_swap = 1,
510 .rmiimode = 1, 510 .rmiimode = 1,
511 .shift_rd0 = 1,
512}; 511};
513 512
514static void sh_eth_set_rate_sh7724(struct net_device *ndev) 513static void sh_eth_set_rate_sh7724(struct net_device *ndev)
@@ -1392,6 +1391,9 @@ static void sh_eth_dev_exit(struct net_device *ndev)
1392 msleep(2); /* max frame time at 10 Mbps < 1250 us */ 1391 msleep(2); /* max frame time at 10 Mbps < 1250 us */
1393 sh_eth_get_stats(ndev); 1392 sh_eth_get_stats(ndev);
1394 sh_eth_reset(ndev); 1393 sh_eth_reset(ndev);
1394
1395 /* Set MAC address again */
1396 update_mac_address(ndev);
1395} 1397}
1396 1398
1397/* free Tx skb function */ 1399/* free Tx skb function */
@@ -1407,6 +1409,8 @@ static int sh_eth_txfree(struct net_device *ndev)
1407 txdesc = &mdp->tx_ring[entry]; 1409 txdesc = &mdp->tx_ring[entry];
1408 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT)) 1410 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
1409 break; 1411 break;
1412 /* TACT bit must be checked before all the following reads */
1413 rmb();
1410 /* Free the original skb. */ 1414 /* Free the original skb. */
1411 if (mdp->tx_skbuff[entry]) { 1415 if (mdp->tx_skbuff[entry]) {
1412 dma_unmap_single(&ndev->dev, txdesc->addr, 1416 dma_unmap_single(&ndev->dev, txdesc->addr,
@@ -1444,6 +1448,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1444 limit = boguscnt; 1448 limit = boguscnt;
1445 rxdesc = &mdp->rx_ring[entry]; 1449 rxdesc = &mdp->rx_ring[entry];
1446 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { 1450 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
1451 /* RACT bit must be checked before all the following reads */
1452 rmb();
1447 desc_status = edmac_to_cpu(mdp, rxdesc->status); 1453 desc_status = edmac_to_cpu(mdp, rxdesc->status);
1448 pkt_len = rxdesc->frame_length; 1454 pkt_len = rxdesc->frame_length;
1449 1455
@@ -1455,8 +1461,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1455 1461
1456 /* In case of almost all GETHER/ETHERs, the Receive Frame State 1462 /* In case of almost all GETHER/ETHERs, the Receive Frame State
1457 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to 1463 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
1458 * bit 0. However, in case of the R8A7740, R8A779x, and 1464 * bit 0. However, in case of the R8A7740 and R7S72100
1459 * R7S72100 the RFS bits are from bit 25 to bit 16. So, the 1465 * the RFS bits are from bit 25 to bit 16. So, the
1460 * driver needs right shifting by 16. 1466 * driver needs right shifting by 16.
1461 */ 1467 */
1462 if (mdp->cd->shift_rd0) 1468 if (mdp->cd->shift_rd0)
@@ -1523,6 +1529,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1523 skb_checksum_none_assert(skb); 1529 skb_checksum_none_assert(skb);
1524 rxdesc->addr = dma_addr; 1530 rxdesc->addr = dma_addr;
1525 } 1531 }
1532 wmb(); /* RACT bit must be set after all the above writes */
1526 if (entry >= mdp->num_rx_ring - 1) 1533 if (entry >= mdp->num_rx_ring - 1)
1527 rxdesc->status |= 1534 rxdesc->status |=
1528 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL); 1535 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
@@ -1535,7 +1542,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1535 /* If we don't need to check status, don't. -KDU */ 1542 /* If we don't need to check status, don't. -KDU */
1536 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) { 1543 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
1537 /* fix the values for the next receiving if RDE is set */ 1544 /* fix the values for the next receiving if RDE is set */
1538 if (intr_status & EESR_RDE) { 1545 if (intr_status & EESR_RDE && mdp->reg_offset[RDFAR] != 0) {
1539 u32 count = (sh_eth_read(ndev, RDFAR) - 1546 u32 count = (sh_eth_read(ndev, RDFAR) -
1540 sh_eth_read(ndev, RDLAR)) >> 4; 1547 sh_eth_read(ndev, RDLAR)) >> 4;
1541 1548
@@ -2174,7 +2181,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2174 } 2181 }
2175 spin_unlock_irqrestore(&mdp->lock, flags); 2182 spin_unlock_irqrestore(&mdp->lock, flags);
2176 2183
2177 if (skb_padto(skb, ETH_ZLEN)) 2184 if (skb_put_padto(skb, ETH_ZLEN))
2178 return NETDEV_TX_OK; 2185 return NETDEV_TX_OK;
2179 2186
2180 entry = mdp->cur_tx % mdp->num_tx_ring; 2187 entry = mdp->cur_tx % mdp->num_tx_ring;
@@ -2192,6 +2199,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2192 } 2199 }
2193 txdesc->buffer_length = skb->len; 2200 txdesc->buffer_length = skb->len;
2194 2201
2202 wmb(); /* TACT bit must be set after all the above writes */
2195 if (entry >= mdp->num_tx_ring - 1) 2203 if (entry >= mdp->num_tx_ring - 1)
2196 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); 2204 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
2197 else 2205 else
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index 34389b6aa67c..9fb6948e14c6 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -1257,9 +1257,9 @@ static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable)
1257 u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE); 1257 u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
1258 1258
1259 if (enable) 1259 if (enable)
1260 val |= 1 << rocker_port->lport; 1260 val |= 1ULL << rocker_port->lport;
1261 else 1261 else
1262 val &= ~(1 << rocker_port->lport); 1262 val &= ~(1ULL << rocker_port->lport);
1263 rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val); 1263 rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
1264} 1264}
1265 1265
@@ -4201,6 +4201,8 @@ static int rocker_probe_ports(struct rocker *rocker)
4201 4201
4202 alloc_size = sizeof(struct rocker_port *) * rocker->port_count; 4202 alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
4203 rocker->ports = kmalloc(alloc_size, GFP_KERNEL); 4203 rocker->ports = kmalloc(alloc_size, GFP_KERNEL);
4204 if (!rocker->ports)
4205 return -ENOMEM;
4204 for (i = 0; i < rocker->port_count; i++) { 4206 for (i = 0; i < rocker->port_count; i++) {
4205 err = rocker_probe_port(rocker, i); 4207 err = rocker_probe_port(rocker, i);
4206 if (err) 4208 if (err)
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 6b33127ab352..3449893aea8d 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -1070,11 +1070,8 @@ static int smc_open(struct net_device *dev)
1070 smc->packets_waiting = 0; 1070 smc->packets_waiting = 0;
1071 1071
1072 smc_reset(dev); 1072 smc_reset(dev);
1073 init_timer(&smc->media); 1073 setup_timer(&smc->media, media_check, (u_long)dev);
1074 smc->media.function = media_check; 1074 mod_timer(&smc->media, jiffies + HZ);
1075 smc->media.data = (u_long) dev;
1076 smc->media.expires = jiffies + HZ;
1077 add_timer(&smc->media);
1078 1075
1079 return 0; 1076 return 0;
1080} /* smc_open */ 1077} /* smc_open */
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 88a55f95fe09..8678e39aba08 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -91,6 +91,11 @@ static const char version[] =
91 91
92#include "smc91x.h" 92#include "smc91x.h"
93 93
94#if defined(CONFIG_ASSABET_NEPONSET)
95#include <mach/assabet.h>
96#include <mach/neponset.h>
97#endif
98
94#ifndef SMC_NOWAIT 99#ifndef SMC_NOWAIT
95# define SMC_NOWAIT 0 100# define SMC_NOWAIT 0
96#endif 101#endif
@@ -2243,10 +2248,9 @@ static int smc_drv_probe(struct platform_device *pdev)
2243 const struct of_device_id *match = NULL; 2248 const struct of_device_id *match = NULL;
2244 struct smc_local *lp; 2249 struct smc_local *lp;
2245 struct net_device *ndev; 2250 struct net_device *ndev;
2246 struct resource *res; 2251 struct resource *res, *ires;
2247 unsigned int __iomem *addr; 2252 unsigned int __iomem *addr;
2248 unsigned long irq_flags = SMC_IRQ_FLAGS; 2253 unsigned long irq_flags = SMC_IRQ_FLAGS;
2249 unsigned long irq_resflags;
2250 int ret; 2254 int ret;
2251 2255
2252 ndev = alloc_etherdev(sizeof(struct smc_local)); 2256 ndev = alloc_etherdev(sizeof(struct smc_local));
@@ -2338,25 +2342,23 @@ static int smc_drv_probe(struct platform_device *pdev)
2338 goto out_free_netdev; 2342 goto out_free_netdev;
2339 } 2343 }
2340 2344
2341 ndev->irq = platform_get_irq(pdev, 0); 2345 ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2342 if (ndev->irq <= 0) { 2346 if (!ires) {
2343 ret = -ENODEV; 2347 ret = -ENODEV;
2344 goto out_release_io; 2348 goto out_release_io;
2345 } 2349 }
2346 /* 2350
2347 * If this platform does not specify any special irqflags, or if 2351 ndev->irq = ires->start;
2348 * the resource supplies a trigger, override the irqflags with 2352
2349 * the trigger flags from the resource. 2353 if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK)
2350 */ 2354 irq_flags = ires->flags & IRQF_TRIGGER_MASK;
2351 irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq));
2352 if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK)
2353 irq_flags = irq_resflags & IRQF_TRIGGER_MASK;
2354 2355
2355 ret = smc_request_attrib(pdev, ndev); 2356 ret = smc_request_attrib(pdev, ndev);
2356 if (ret) 2357 if (ret)
2357 goto out_release_io; 2358 goto out_release_io;
2358#if defined(CONFIG_SA1100_ASSABET) 2359#if defined(CONFIG_ASSABET_NEPONSET)
2359 neponset_ncr_set(NCR_ENET_OSC_EN); 2360 if (machine_is_assabet() && machine_has_neponset())
2361 neponset_ncr_set(NCR_ENET_OSC_EN);
2360#endif 2362#endif
2361 platform_set_drvdata(pdev, ndev); 2363 platform_set_drvdata(pdev, ndev);
2362 ret = smc_enable_device(pdev); 2364 ret = smc_enable_device(pdev);
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index be67baf5f677..3a18501d1068 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -39,14 +39,7 @@
39 * Define your architecture specific bus configuration parameters here. 39 * Define your architecture specific bus configuration parameters here.
40 */ 40 */
41 41
42#if defined(CONFIG_ARCH_LUBBOCK) ||\ 42#if defined(CONFIG_ARM)
43 defined(CONFIG_MACH_MAINSTONE) ||\
44 defined(CONFIG_MACH_ZYLONITE) ||\
45 defined(CONFIG_MACH_LITTLETON) ||\
46 defined(CONFIG_MACH_ZYLONITE2) ||\
47 defined(CONFIG_ARCH_VIPER) ||\
48 defined(CONFIG_MACH_STARGATE2) ||\
49 defined(CONFIG_ARCH_VERSATILE)
50 43
51#include <asm/mach-types.h> 44#include <asm/mach-types.h>
52 45
@@ -74,95 +67,8 @@
74/* We actually can't write halfwords properly if not word aligned */ 67/* We actually can't write halfwords properly if not word aligned */
75static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg) 68static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
76{ 69{
77 if ((machine_is_mainstone() || machine_is_stargate2()) && reg & 2) { 70 if ((machine_is_mainstone() || machine_is_stargate2() ||
78 unsigned int v = val << 16; 71 machine_is_pxa_idp()) && reg & 2) {
79 v |= readl(ioaddr + (reg & ~2)) & 0xffff;
80 writel(v, ioaddr + (reg & ~2));
81 } else {
82 writew(val, ioaddr + reg);
83 }
84}
85
86#elif defined(CONFIG_SA1100_PLEB)
87/* We can only do 16-bit reads and writes in the static memory space. */
88#define SMC_CAN_USE_8BIT 1
89#define SMC_CAN_USE_16BIT 1
90#define SMC_CAN_USE_32BIT 0
91#define SMC_IO_SHIFT 0
92#define SMC_NOWAIT 1
93
94#define SMC_inb(a, r) readb((a) + (r))
95#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l))
96#define SMC_inw(a, r) readw((a) + (r))
97#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
98#define SMC_outb(v, a, r) writeb(v, (a) + (r))
99#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l))
100#define SMC_outw(v, a, r) writew(v, (a) + (r))
101#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
102
103#define SMC_IRQ_FLAGS (-1)
104
105#elif defined(CONFIG_SA1100_ASSABET)
106
107#include <mach/neponset.h>
108
109/* We can only do 8-bit reads and writes in the static memory space. */
110#define SMC_CAN_USE_8BIT 1
111#define SMC_CAN_USE_16BIT 0
112#define SMC_CAN_USE_32BIT 0
113#define SMC_NOWAIT 1
114
115/* The first two address lines aren't connected... */
116#define SMC_IO_SHIFT 2
117
118#define SMC_inb(a, r) readb((a) + (r))
119#define SMC_outb(v, a, r) writeb(v, (a) + (r))
120#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l))
121#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l))
122#define SMC_IRQ_FLAGS (-1) /* from resource */
123
124#elif defined(CONFIG_MACH_LOGICPD_PXA270) || \
125 defined(CONFIG_MACH_NOMADIK_8815NHK)
126
127#define SMC_CAN_USE_8BIT 0
128#define SMC_CAN_USE_16BIT 1
129#define SMC_CAN_USE_32BIT 0
130#define SMC_IO_SHIFT 0
131#define SMC_NOWAIT 1
132
133#define SMC_inw(a, r) readw((a) + (r))
134#define SMC_outw(v, a, r) writew(v, (a) + (r))
135#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
136#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
137
138#elif defined(CONFIG_ARCH_INNOKOM) || \
139 defined(CONFIG_ARCH_PXA_IDP) || \
140 defined(CONFIG_ARCH_RAMSES) || \
141 defined(CONFIG_ARCH_PCM027)
142
143#define SMC_CAN_USE_8BIT 1
144#define SMC_CAN_USE_16BIT 1
145#define SMC_CAN_USE_32BIT 1
146#define SMC_IO_SHIFT 0
147#define SMC_NOWAIT 1
148#define SMC_USE_PXA_DMA 1
149
150#define SMC_inb(a, r) readb((a) + (r))
151#define SMC_inw(a, r) readw((a) + (r))
152#define SMC_inl(a, r) readl((a) + (r))
153#define SMC_outb(v, a, r) writeb(v, (a) + (r))
154#define SMC_outl(v, a, r) writel(v, (a) + (r))
155#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
156#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
157#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
158#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
159#define SMC_IRQ_FLAGS (-1) /* from resource */
160
161/* We actually can't write halfwords properly if not word aligned */
162static inline void
163SMC_outw(u16 val, void __iomem *ioaddr, int reg)
164{
165 if (reg & 2) {
166 unsigned int v = val << 16; 72 unsigned int v = val << 16;
167 v |= readl(ioaddr + (reg & ~2)) & 0xffff; 73 v |= readl(ioaddr + (reg & ~2)) & 0xffff;
168 writel(v, ioaddr + (reg & ~2)); 74 writel(v, ioaddr + (reg & ~2));
@@ -237,20 +143,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
237#define RPC_LSA_DEFAULT RPC_LED_100_10 143#define RPC_LSA_DEFAULT RPC_LED_100_10
238#define RPC_LSB_DEFAULT RPC_LED_TX_RX 144#define RPC_LSB_DEFAULT RPC_LED_TX_RX
239 145
240#elif defined(CONFIG_ARCH_MSM)
241
242#define SMC_CAN_USE_8BIT 0
243#define SMC_CAN_USE_16BIT 1
244#define SMC_CAN_USE_32BIT 0
245#define SMC_NOWAIT 1
246
247#define SMC_inw(a, r) readw((a) + (r))
248#define SMC_outw(v, a, r) writew(v, (a) + (r))
249#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
250#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
251
252#define SMC_IRQ_FLAGS IRQF_TRIGGER_HIGH
253
254#elif defined(CONFIG_COLDFIRE) 146#elif defined(CONFIG_COLDFIRE)
255 147
256#define SMC_CAN_USE_8BIT 0 148#define SMC_CAN_USE_8BIT 0
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 55e89b3838f1..a0ea84fe6519 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -310,11 +310,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
310 spin_lock_irqsave(&priv->lock, flags); 310 spin_lock_irqsave(&priv->lock, flags);
311 if (!priv->eee_active) { 311 if (!priv->eee_active) {
312 priv->eee_active = 1; 312 priv->eee_active = 1;
313 init_timer(&priv->eee_ctrl_timer); 313 setup_timer(&priv->eee_ctrl_timer,
314 priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer; 314 stmmac_eee_ctrl_timer,
315 priv->eee_ctrl_timer.data = (unsigned long)priv; 315 (unsigned long)priv);
316 priv->eee_ctrl_timer.expires = STMMAC_LPI_T(eee_timer); 316 mod_timer(&priv->eee_ctrl_timer,
317 add_timer(&priv->eee_ctrl_timer); 317 STMMAC_LPI_T(eee_timer));
318 318
319 priv->hw->mac->set_eee_timer(priv->hw, 319 priv->hw->mac->set_eee_timer(priv->hw,
320 STMMAC_DEFAULT_LIT_LS, 320 STMMAC_DEFAULT_LIT_LS,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index fb846ebba1d9..f9b42f11950f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -272,6 +272,37 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
272 struct stmmac_priv *priv = NULL; 272 struct stmmac_priv *priv = NULL;
273 struct plat_stmmacenet_data *plat_dat = NULL; 273 struct plat_stmmacenet_data *plat_dat = NULL;
274 const char *mac = NULL; 274 const char *mac = NULL;
275 int irq, wol_irq, lpi_irq;
276
277 /* Get IRQ information early to have an ability to ask for deferred
278 * probe if needed before we went too far with resource allocation.
279 */
280 irq = platform_get_irq_byname(pdev, "macirq");
281 if (irq < 0) {
282 if (irq != -EPROBE_DEFER) {
283 dev_err(dev,
284 "MAC IRQ configuration information not found\n");
285 }
286 return irq;
287 }
288
289 /* On some platforms e.g. SPEAr the wake up irq differs from the mac irq
290 * The external wake up irq can be passed through the platform code
291 * named as "eth_wake_irq"
292 *
293 * In case the wake up interrupt is not passed from the platform
294 * so the driver will continue to use the mac irq (ndev->irq)
295 */
296 wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
297 if (wol_irq < 0) {
298 if (wol_irq == -EPROBE_DEFER)
299 return -EPROBE_DEFER;
300 wol_irq = irq;
301 }
302
303 lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
304 if (lpi_irq == -EPROBE_DEFER)
305 return -EPROBE_DEFER;
275 306
276 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 307 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
277 addr = devm_ioremap_resource(dev, res); 308 addr = devm_ioremap_resource(dev, res);
@@ -323,39 +354,15 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
323 return PTR_ERR(priv); 354 return PTR_ERR(priv);
324 } 355 }
325 356
357 /* Copy IRQ values to priv structure which is now avaialble */
358 priv->dev->irq = irq;
359 priv->wol_irq = wol_irq;
360 priv->lpi_irq = lpi_irq;
361
326 /* Get MAC address if available (DT) */ 362 /* Get MAC address if available (DT) */
327 if (mac) 363 if (mac)
328 memcpy(priv->dev->dev_addr, mac, ETH_ALEN); 364 memcpy(priv->dev->dev_addr, mac, ETH_ALEN);
329 365
330 /* Get the MAC information */
331 priv->dev->irq = platform_get_irq_byname(pdev, "macirq");
332 if (priv->dev->irq < 0) {
333 if (priv->dev->irq != -EPROBE_DEFER) {
334 netdev_err(priv->dev,
335 "MAC IRQ configuration information not found\n");
336 }
337 return priv->dev->irq;
338 }
339
340 /*
341 * On some platforms e.g. SPEAr the wake up irq differs from the mac irq
342 * The external wake up irq can be passed through the platform code
343 * named as "eth_wake_irq"
344 *
345 * In case the wake up interrupt is not passed from the platform
346 * so the driver will continue to use the mac irq (ndev->irq)
347 */
348 priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
349 if (priv->wol_irq < 0) {
350 if (priv->wol_irq == -EPROBE_DEFER)
351 return -EPROBE_DEFER;
352 priv->wol_irq = priv->dev->irq;
353 }
354
355 priv->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
356 if (priv->lpi_irq == -EPROBE_DEFER)
357 return -EPROBE_DEFER;
358
359 platform_set_drvdata(pdev, priv->dev); 366 platform_set_drvdata(pdev, priv->dev);
360 367
361 pr_debug("STMMAC platform driver registration completed"); 368 pr_debug("STMMAC platform driver registration completed");
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 4b51f903fb73..0c5842aeb807 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6989,10 +6989,10 @@ static int niu_class_to_ethflow(u64 class, int *flow_type)
6989 *flow_type = IP_USER_FLOW; 6989 *flow_type = IP_USER_FLOW;
6990 break; 6990 break;
6991 default: 6991 default:
6992 return 0; 6992 return -EINVAL;
6993 } 6993 }
6994 6994
6995 return 1; 6995 return 0;
6996} 6996}
6997 6997
6998static int niu_ethflow_to_class(int flow_type, u64 *class) 6998static int niu_ethflow_to_class(int flow_type, u64 *class)
@@ -7198,11 +7198,9 @@ static int niu_get_ethtool_tcam_entry(struct niu *np,
7198 class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >> 7198 class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
7199 TCAM_V4KEY0_CLASS_CODE_SHIFT; 7199 TCAM_V4KEY0_CLASS_CODE_SHIFT;
7200 ret = niu_class_to_ethflow(class, &fsp->flow_type); 7200 ret = niu_class_to_ethflow(class, &fsp->flow_type);
7201
7202 if (ret < 0) { 7201 if (ret < 0) {
7203 netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n", 7202 netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n",
7204 parent->index); 7203 parent->index);
7205 ret = -EINVAL;
7206 goto out; 7204 goto out;
7207 } 7205 }
7208 7206
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 7d8dd0d2182e..a1bbaf6352ba 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1103,7 +1103,7 @@ static inline void cpsw_add_dual_emac_def_ale_entries(
1103 cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, 1103 cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
1104 port_mask, ALE_VLAN, slave->port_vlan, 0); 1104 port_mask, ALE_VLAN, slave->port_vlan, 0);
1105 cpsw_ale_add_ucast(priv->ale, priv->mac_addr, 1105 cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
1106 priv->host_port, ALE_VLAN, slave->port_vlan); 1106 priv->host_port, ALE_VLAN | ALE_SECURE, slave->port_vlan);
1107} 1107}
1108 1108
1109static void soft_reset_slave(struct cpsw_slave *slave) 1109static void soft_reset_slave(struct cpsw_slave *slave)
@@ -2466,6 +2466,7 @@ static int cpsw_remove(struct platform_device *pdev)
2466 return 0; 2466 return 0;
2467} 2467}
2468 2468
2469#ifdef CONFIG_PM_SLEEP
2469static int cpsw_suspend(struct device *dev) 2470static int cpsw_suspend(struct device *dev)
2470{ 2471{
2471 struct platform_device *pdev = to_platform_device(dev); 2472 struct platform_device *pdev = to_platform_device(dev);
@@ -2518,11 +2519,9 @@ static int cpsw_resume(struct device *dev)
2518 } 2519 }
2519 return 0; 2520 return 0;
2520} 2521}
2522#endif
2521 2523
2522static const struct dev_pm_ops cpsw_pm_ops = { 2524static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
2523 .suspend = cpsw_suspend,
2524 .resume = cpsw_resume,
2525};
2526 2525
2527static const struct of_device_id cpsw_of_mtable[] = { 2526static const struct of_device_id cpsw_of_mtable[] = {
2528 { .compatible = "ti,cpsw", }, 2527 { .compatible = "ti,cpsw", },
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 98655b44b97e..c00084d689f3 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -423,6 +423,7 @@ static int davinci_mdio_remove(struct platform_device *pdev)
423 return 0; 423 return 0;
424} 424}
425 425
426#ifdef CONFIG_PM_SLEEP
426static int davinci_mdio_suspend(struct device *dev) 427static int davinci_mdio_suspend(struct device *dev)
427{ 428{
428 struct davinci_mdio_data *data = dev_get_drvdata(dev); 429 struct davinci_mdio_data *data = dev_get_drvdata(dev);
@@ -464,10 +465,10 @@ static int davinci_mdio_resume(struct device *dev)
464 465
465 return 0; 466 return 0;
466} 467}
468#endif
467 469
468static const struct dev_pm_ops davinci_mdio_pm_ops = { 470static const struct dev_pm_ops davinci_mdio_pm_ops = {
469 .suspend_late = davinci_mdio_suspend, 471 SET_LATE_SYSTEM_SLEEP_PM_OPS(davinci_mdio_suspend, davinci_mdio_resume)
470 .resume_early = davinci_mdio_resume,
471}; 472};
472 473
473#if IS_ENABLED(CONFIG_OF) 474#if IS_ENABLED(CONFIG_OF)
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index a495931a66a1..0e0fbb5842b3 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -498,9 +498,9 @@ static int w5100_napi_poll(struct napi_struct *napi, int budget)
498 } 498 }
499 499
500 if (rx_count < budget) { 500 if (rx_count < budget) {
501 napi_complete(napi);
501 w5100_write(priv, W5100_IMR, IR_S0); 502 w5100_write(priv, W5100_IMR, IR_S0);
502 mmiowb(); 503 mmiowb();
503 napi_complete(napi);
504 } 504 }
505 505
506 return rx_count; 506 return rx_count;
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 09322d9db578..4b310002258d 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -418,9 +418,9 @@ static int w5300_napi_poll(struct napi_struct *napi, int budget)
418 } 418 }
419 419
420 if (rx_count < budget) { 420 if (rx_count < budget) {
421 napi_complete(napi);
421 w5300_write(priv, W5300_IMR, IR_S0); 422 w5300_write(priv, W5300_IMR, IR_S0);
422 mmiowb(); 423 mmiowb();
423 napi_complete(napi);
424 } 424 }
425 425
426 return rx_count; 426 return rx_count;
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index f7e0f0f7c2e2..9e16a2819d48 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -938,7 +938,7 @@ static void eth_set_mcast_list(struct net_device *dev)
938 int i; 938 int i;
939 static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; 939 static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
940 940
941 if (dev->flags & IFF_ALLMULTI) { 941 if ((dev->flags & IFF_ALLMULTI) && !(dev->flags & IFF_PROMISC)) {
942 for (i = 0; i < ETH_ALEN; i++) { 942 for (i = 0; i < ETH_ALEN; i++) {
943 __raw_writel(allmulti[i], &port->regs->mcast_addr[i]); 943 __raw_writel(allmulti[i], &port->regs->mcast_addr[i]);
944 __raw_writel(allmulti[i], &port->regs->mcast_mask[i]); 944 __raw_writel(allmulti[i], &port->regs->mcast_mask[i]);