aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/3com/3c59x.c50
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c86
-rw-r--r--drivers/net/ethernet/aeroflex/greth.h2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c209
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c7
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h4
-rw-r--r--drivers/net/ethernet/apm/xgene/Kconfig1
-rw-r--r--drivers/net/ethernet/arc/emac_main.c53
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig2
-rw-r--r--drivers/net/ethernet/broadcom/b44.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c31
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c74
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c57
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c4
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c143
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c39
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h1
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c7
-rw-r--r--drivers/net/ethernet/cadence/macb.c11
-rw-r--r--drivers/net/ethernet/calxeda/Kconfig1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c65
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c79
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h4
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c19
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c21
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c38
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c34
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c3
-rw-r--r--drivers/net/ethernet/octeon/octeon_mgmt.c4
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/Kconfig1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c6
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c5
-rw-r--r--drivers/net/ethernet/realtek/r8169.c67
-rw-r--r--drivers/net/ethernet/renesas/Kconfig1
-rw-r--r--drivers/net/ethernet/sfc/farch.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c153
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h2
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c7
-rw-r--r--drivers/net/ethernet/ti/cpsw.c52
-rw-r--r--drivers/net/ethernet/via/via-velocity.c2
75 files changed, 1064 insertions, 577 deletions
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 059c7414e303..8ca49f04acec 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -2129,6 +2129,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
2129 int entry = vp->cur_tx % TX_RING_SIZE; 2129 int entry = vp->cur_tx % TX_RING_SIZE;
2130 struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE]; 2130 struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
2131 unsigned long flags; 2131 unsigned long flags;
2132 dma_addr_t dma_addr;
2132 2133
2133 if (vortex_debug > 6) { 2134 if (vortex_debug > 6) {
2134 pr_debug("boomerang_start_xmit()\n"); 2135 pr_debug("boomerang_start_xmit()\n");
@@ -2163,24 +2164,48 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
2163 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum); 2164 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
2164 2165
2165 if (!skb_shinfo(skb)->nr_frags) { 2166 if (!skb_shinfo(skb)->nr_frags) {
2166 vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, 2167 dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len,
2167 skb->len, PCI_DMA_TODEVICE)); 2168 PCI_DMA_TODEVICE);
2169 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
2170 goto out_dma_err;
2171
2172 vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
2168 vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG); 2173 vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG);
2169 } else { 2174 } else {
2170 int i; 2175 int i;
2171 2176
2172 vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, 2177 dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data,
2173 skb_headlen(skb), PCI_DMA_TODEVICE)); 2178 skb_headlen(skb), PCI_DMA_TODEVICE);
2179 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
2180 goto out_dma_err;
2181
2182 vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
2174 vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb)); 2183 vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb));
2175 2184
2176 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2185 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2177 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2186 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2178 2187
2188 dma_addr = skb_frag_dma_map(&VORTEX_PCI(vp)->dev, frag,
2189 0,
2190 frag->size,
2191 DMA_TO_DEVICE);
2192 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) {
2193 for(i = i-1; i >= 0; i--)
2194 dma_unmap_page(&VORTEX_PCI(vp)->dev,
2195 le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr),
2196 le32_to_cpu(vp->tx_ring[entry].frag[i+1].length),
2197 DMA_TO_DEVICE);
2198
2199 pci_unmap_single(VORTEX_PCI(vp),
2200 le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
2201 le32_to_cpu(vp->tx_ring[entry].frag[0].length),
2202 PCI_DMA_TODEVICE);
2203
2204 goto out_dma_err;
2205 }
2206
2179 vp->tx_ring[entry].frag[i+1].addr = 2207 vp->tx_ring[entry].frag[i+1].addr =
2180 cpu_to_le32(pci_map_single( 2208 cpu_to_le32(dma_addr);
2181 VORTEX_PCI(vp),
2182 (void *)skb_frag_address(frag),
2183 skb_frag_size(frag), PCI_DMA_TODEVICE));
2184 2209
2185 if (i == skb_shinfo(skb)->nr_frags-1) 2210 if (i == skb_shinfo(skb)->nr_frags-1)
2186 vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG); 2211 vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG);
@@ -2189,7 +2214,10 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
2189 } 2214 }
2190 } 2215 }
2191#else 2216#else
2192 vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE)); 2217 dma_addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE));
2218 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
2219 goto out_dma_err;
2220 vp->tx_ring[entry].addr = cpu_to_le32(dma_addr);
2193 vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG); 2221 vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
2194 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded); 2222 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
2195#endif 2223#endif
@@ -2217,7 +2245,11 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
2217 skb_tx_timestamp(skb); 2245 skb_tx_timestamp(skb);
2218 iowrite16(DownUnstall, ioaddr + EL3_CMD); 2246 iowrite16(DownUnstall, ioaddr + EL3_CMD);
2219 spin_unlock_irqrestore(&vp->lock, flags); 2247 spin_unlock_irqrestore(&vp->lock, flags);
2248out:
2220 return NETDEV_TX_OK; 2249 return NETDEV_TX_OK;
2250out_dma_err:
2251 dev_err(&VORTEX_PCI(vp)->dev, "Error mapping dma buffer\n");
2252 goto out;
2221} 2253}
2222 2254
2223/* The interrupt handler does all of the Rx thread work and cleans up 2255/* The interrupt handler does all of the Rx thread work and cleans up
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 23578dfee249..3005155e412b 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -123,6 +123,12 @@ static inline void greth_enable_tx(struct greth_private *greth)
123 GRETH_REGORIN(greth->regs->control, GRETH_TXEN); 123 GRETH_REGORIN(greth->regs->control, GRETH_TXEN);
124} 124}
125 125
126static inline void greth_enable_tx_and_irq(struct greth_private *greth)
127{
128 wmb(); /* BDs must been written to memory before enabling TX */
129 GRETH_REGORIN(greth->regs->control, GRETH_TXEN | GRETH_TXI);
130}
131
126static inline void greth_disable_tx(struct greth_private *greth) 132static inline void greth_disable_tx(struct greth_private *greth)
127{ 133{
128 GRETH_REGANDIN(greth->regs->control, ~GRETH_TXEN); 134 GRETH_REGANDIN(greth->regs->control, ~GRETH_TXEN);
@@ -447,29 +453,30 @@ out:
447 return err; 453 return err;
448} 454}
449 455
456static inline u16 greth_num_free_bds(u16 tx_last, u16 tx_next)
457{
458 if (tx_next < tx_last)
459 return (tx_last - tx_next) - 1;
460 else
461 return GRETH_TXBD_NUM - (tx_next - tx_last) - 1;
462}
450 463
451static netdev_tx_t 464static netdev_tx_t
452greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) 465greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
453{ 466{
454 struct greth_private *greth = netdev_priv(dev); 467 struct greth_private *greth = netdev_priv(dev);
455 struct greth_bd *bdp; 468 struct greth_bd *bdp;
456 u32 status = 0, dma_addr, ctrl; 469 u32 status, dma_addr;
457 int curr_tx, nr_frags, i, err = NETDEV_TX_OK; 470 int curr_tx, nr_frags, i, err = NETDEV_TX_OK;
458 unsigned long flags; 471 unsigned long flags;
472 u16 tx_last;
459 473
460 nr_frags = skb_shinfo(skb)->nr_frags; 474 nr_frags = skb_shinfo(skb)->nr_frags;
475 tx_last = greth->tx_last;
476 rmb(); /* tx_last is updated by the poll task */
461 477
462 /* Clean TX Ring */ 478 if (greth_num_free_bds(tx_last, greth->tx_next) < nr_frags + 1) {
463 greth_clean_tx_gbit(dev);
464
465 if (greth->tx_free < nr_frags + 1) {
466 spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
467 ctrl = GRETH_REGLOAD(greth->regs->control);
468 /* Enable TX IRQ only if not already in poll() routine */
469 if (ctrl & GRETH_RXI)
470 GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
471 netif_stop_queue(dev); 479 netif_stop_queue(dev);
472 spin_unlock_irqrestore(&greth->devlock, flags);
473 err = NETDEV_TX_BUSY; 480 err = NETDEV_TX_BUSY;
474 goto out; 481 goto out;
475 } 482 }
@@ -488,6 +495,8 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
488 /* Linear buf */ 495 /* Linear buf */
489 if (nr_frags != 0) 496 if (nr_frags != 0)
490 status = GRETH_TXBD_MORE; 497 status = GRETH_TXBD_MORE;
498 else
499 status = GRETH_BD_IE;
491 500
492 if (skb->ip_summed == CHECKSUM_PARTIAL) 501 if (skb->ip_summed == CHECKSUM_PARTIAL)
493 status |= GRETH_TXBD_CSALL; 502 status |= GRETH_TXBD_CSALL;
@@ -545,14 +554,12 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
545 554
546 /* Enable the descriptor chain by enabling the first descriptor */ 555 /* Enable the descriptor chain by enabling the first descriptor */
547 bdp = greth->tx_bd_base + greth->tx_next; 556 bdp = greth->tx_bd_base + greth->tx_next;
548 greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN); 557 greth_write_bd(&bdp->stat,
549 greth->tx_next = curr_tx; 558 greth_read_bd(&bdp->stat) | GRETH_BD_EN);
550 greth->tx_free -= nr_frags + 1;
551
552 wmb();
553 559
554 spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/ 560 spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
555 greth_enable_tx(greth); 561 greth->tx_next = curr_tx;
562 greth_enable_tx_and_irq(greth);
556 spin_unlock_irqrestore(&greth->devlock, flags); 563 spin_unlock_irqrestore(&greth->devlock, flags);
557 564
558 return NETDEV_TX_OK; 565 return NETDEV_TX_OK;
@@ -648,7 +655,6 @@ static void greth_clean_tx(struct net_device *dev)
648 if (greth->tx_free > 0) { 655 if (greth->tx_free > 0) {
649 netif_wake_queue(dev); 656 netif_wake_queue(dev);
650 } 657 }
651
652} 658}
653 659
654static inline void greth_update_tx_stats(struct net_device *dev, u32 stat) 660static inline void greth_update_tx_stats(struct net_device *dev, u32 stat)
@@ -670,20 +676,22 @@ static void greth_clean_tx_gbit(struct net_device *dev)
670{ 676{
671 struct greth_private *greth; 677 struct greth_private *greth;
672 struct greth_bd *bdp, *bdp_last_frag; 678 struct greth_bd *bdp, *bdp_last_frag;
673 struct sk_buff *skb; 679 struct sk_buff *skb = NULL;
674 u32 stat; 680 u32 stat;
675 int nr_frags, i; 681 int nr_frags, i;
682 u16 tx_last;
676 683
677 greth = netdev_priv(dev); 684 greth = netdev_priv(dev);
685 tx_last = greth->tx_last;
678 686
679 while (greth->tx_free < GRETH_TXBD_NUM) { 687 while (tx_last != greth->tx_next) {
680 688
681 skb = greth->tx_skbuff[greth->tx_last]; 689 skb = greth->tx_skbuff[tx_last];
682 690
683 nr_frags = skb_shinfo(skb)->nr_frags; 691 nr_frags = skb_shinfo(skb)->nr_frags;
684 692
685 /* We only clean fully completed SKBs */ 693 /* We only clean fully completed SKBs */
686 bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags); 694 bdp_last_frag = greth->tx_bd_base + SKIP_TX(tx_last, nr_frags);
687 695
688 GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX); 696 GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
689 mb(); 697 mb();
@@ -692,14 +700,14 @@ static void greth_clean_tx_gbit(struct net_device *dev)
692 if (stat & GRETH_BD_EN) 700 if (stat & GRETH_BD_EN)
693 break; 701 break;
694 702
695 greth->tx_skbuff[greth->tx_last] = NULL; 703 greth->tx_skbuff[tx_last] = NULL;
696 704
697 greth_update_tx_stats(dev, stat); 705 greth_update_tx_stats(dev, stat);
698 dev->stats.tx_bytes += skb->len; 706 dev->stats.tx_bytes += skb->len;
699 707
700 bdp = greth->tx_bd_base + greth->tx_last; 708 bdp = greth->tx_bd_base + tx_last;
701 709
702 greth->tx_last = NEXT_TX(greth->tx_last); 710 tx_last = NEXT_TX(tx_last);
703 711
704 dma_unmap_single(greth->dev, 712 dma_unmap_single(greth->dev,
705 greth_read_bd(&bdp->addr), 713 greth_read_bd(&bdp->addr),
@@ -708,21 +716,26 @@ static void greth_clean_tx_gbit(struct net_device *dev)
708 716
709 for (i = 0; i < nr_frags; i++) { 717 for (i = 0; i < nr_frags; i++) {
710 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 718 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
711 bdp = greth->tx_bd_base + greth->tx_last; 719 bdp = greth->tx_bd_base + tx_last;
712 720
713 dma_unmap_page(greth->dev, 721 dma_unmap_page(greth->dev,
714 greth_read_bd(&bdp->addr), 722 greth_read_bd(&bdp->addr),
715 skb_frag_size(frag), 723 skb_frag_size(frag),
716 DMA_TO_DEVICE); 724 DMA_TO_DEVICE);
717 725
718 greth->tx_last = NEXT_TX(greth->tx_last); 726 tx_last = NEXT_TX(tx_last);
719 } 727 }
720 greth->tx_free += nr_frags+1;
721 dev_kfree_skb(skb); 728 dev_kfree_skb(skb);
722 } 729 }
730 if (skb) { /* skb is set only if the above while loop was entered */
731 wmb();
732 greth->tx_last = tx_last;
723 733
724 if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS+1))) 734 if (netif_queue_stopped(dev) &&
725 netif_wake_queue(dev); 735 (greth_num_free_bds(tx_last, greth->tx_next) >
736 (MAX_SKB_FRAGS+1)))
737 netif_wake_queue(dev);
738 }
726} 739}
727 740
728static int greth_rx(struct net_device *dev, int limit) 741static int greth_rx(struct net_device *dev, int limit)
@@ -965,16 +978,12 @@ static int greth_poll(struct napi_struct *napi, int budget)
965 greth = container_of(napi, struct greth_private, napi); 978 greth = container_of(napi, struct greth_private, napi);
966 979
967restart_txrx_poll: 980restart_txrx_poll:
968 if (netif_queue_stopped(greth->netdev)) {
969 if (greth->gbit_mac)
970 greth_clean_tx_gbit(greth->netdev);
971 else
972 greth_clean_tx(greth->netdev);
973 }
974
975 if (greth->gbit_mac) { 981 if (greth->gbit_mac) {
982 greth_clean_tx_gbit(greth->netdev);
976 work_done += greth_rx_gbit(greth->netdev, budget - work_done); 983 work_done += greth_rx_gbit(greth->netdev, budget - work_done);
977 } else { 984 } else {
985 if (netif_queue_stopped(greth->netdev))
986 greth_clean_tx(greth->netdev);
978 work_done += greth_rx(greth->netdev, budget - work_done); 987 work_done += greth_rx(greth->netdev, budget - work_done);
979 } 988 }
980 989
@@ -983,7 +992,8 @@ restart_txrx_poll:
983 spin_lock_irqsave(&greth->devlock, flags); 992 spin_lock_irqsave(&greth->devlock, flags);
984 993
985 ctrl = GRETH_REGLOAD(greth->regs->control); 994 ctrl = GRETH_REGLOAD(greth->regs->control);
986 if (netif_queue_stopped(greth->netdev)) { 995 if ((greth->gbit_mac && (greth->tx_last != greth->tx_next)) ||
996 (!greth->gbit_mac && netif_queue_stopped(greth->netdev))) {
987 GRETH_REGSAVE(greth->regs->control, 997 GRETH_REGSAVE(greth->regs->control,
988 ctrl | GRETH_TXI | GRETH_RXI); 998 ctrl | GRETH_TXI | GRETH_RXI);
989 mask = GRETH_INT_RX | GRETH_INT_RE | 999 mask = GRETH_INT_RX | GRETH_INT_RE |
diff --git a/drivers/net/ethernet/aeroflex/greth.h b/drivers/net/ethernet/aeroflex/greth.h
index 232a622a85b7..ae16ac94daf8 100644
--- a/drivers/net/ethernet/aeroflex/greth.h
+++ b/drivers/net/ethernet/aeroflex/greth.h
@@ -107,7 +107,7 @@ struct greth_private {
107 107
108 u16 tx_next; 108 u16 tx_next;
109 u16 tx_last; 109 u16 tx_last;
110 u16 tx_free; 110 u16 tx_free; /* only used on 10/100Mbit */
111 u16 rx_cur; 111 u16 rx_cur;
112 112
113 struct greth_regs *regs; /* Address of controller registers. */ 113 struct greth_regs *regs; /* Address of controller registers. */
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
index 346592dca33c..a3c11355a34d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
@@ -272,8 +272,8 @@ static ssize_t xpcs_reg_value_read(struct file *filp, char __user *buffer,
272 struct xgbe_prv_data *pdata = filp->private_data; 272 struct xgbe_prv_data *pdata = filp->private_data;
273 unsigned int value; 273 unsigned int value;
274 274
275 value = pdata->hw_if.read_mmd_regs(pdata, pdata->debugfs_xpcs_mmd, 275 value = XMDIO_READ(pdata, pdata->debugfs_xpcs_mmd,
276 pdata->debugfs_xpcs_reg); 276 pdata->debugfs_xpcs_reg);
277 277
278 return xgbe_common_read(buffer, count, ppos, value); 278 return xgbe_common_read(buffer, count, ppos, value);
279} 279}
@@ -290,8 +290,8 @@ static ssize_t xpcs_reg_value_write(struct file *filp,
290 if (len < 0) 290 if (len < 0)
291 return len; 291 return len;
292 292
293 pdata->hw_if.write_mmd_regs(pdata, pdata->debugfs_xpcs_mmd, 293 XMDIO_WRITE(pdata, pdata->debugfs_xpcs_mmd, pdata->debugfs_xpcs_reg,
294 pdata->debugfs_xpcs_reg, value); 294 value);
295 295
296 return len; 296 return len;
297} 297}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index edaca4496264..ea273836d999 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -348,7 +348,7 @@ static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
348 348
349 /* Clear MAC flow control */ 349 /* Clear MAC flow control */
350 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; 350 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
351 q_count = min_t(unsigned int, pdata->rx_q_count, max_q_count); 351 q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
352 reg = MAC_Q0TFCR; 352 reg = MAC_Q0TFCR;
353 for (i = 0; i < q_count; i++) { 353 for (i = 0; i < q_count; i++) {
354 reg_val = XGMAC_IOREAD(pdata, reg); 354 reg_val = XGMAC_IOREAD(pdata, reg);
@@ -373,7 +373,7 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
373 373
374 /* Set MAC flow control */ 374 /* Set MAC flow control */
375 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; 375 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
376 q_count = min_t(unsigned int, pdata->rx_q_count, max_q_count); 376 q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
377 reg = MAC_Q0TFCR; 377 reg = MAC_Q0TFCR;
378 for (i = 0; i < q_count; i++) { 378 for (i = 0; i < q_count; i++) {
379 reg_val = XGMAC_IOREAD(pdata, reg); 379 reg_val = XGMAC_IOREAD(pdata, reg);
@@ -509,8 +509,8 @@ static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
509 XGMAC_IOWRITE(pdata, MAC_IER, mac_ier); 509 XGMAC_IOWRITE(pdata, MAC_IER, mac_ier);
510 510
511 /* Enable all counter interrupts */ 511 /* Enable all counter interrupts */
512 XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xff); 512 XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff);
513 XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xff); 513 XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff);
514} 514}
515 515
516static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata) 516static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata)
@@ -1633,6 +1633,9 @@ static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
1633{ 1633{
1634 unsigned int i, count; 1634 unsigned int i, count;
1635 1635
1636 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
1637 return 0;
1638
1636 for (i = 0; i < pdata->tx_q_count; i++) 1639 for (i = 0; i < pdata->tx_q_count; i++)
1637 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1); 1640 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
1638 1641
@@ -1703,8 +1706,8 @@ static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
1703 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP); 1706 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
1704} 1707}
1705 1708
1706static unsigned int xgbe_calculate_per_queue_fifo(unsigned long fifo_size, 1709static unsigned int xgbe_calculate_per_queue_fifo(unsigned int fifo_size,
1707 unsigned char queue_count) 1710 unsigned int queue_count)
1708{ 1711{
1709 unsigned int q_fifo_size = 0; 1712 unsigned int q_fifo_size = 0;
1710 enum xgbe_mtl_fifo_size p_fifo = XGMAC_MTL_FIFO_SIZE_256; 1713 enum xgbe_mtl_fifo_size p_fifo = XGMAC_MTL_FIFO_SIZE_256;
@@ -1748,6 +1751,10 @@ static unsigned int xgbe_calculate_per_queue_fifo(unsigned long fifo_size,
1748 q_fifo_size = XGBE_FIFO_SIZE_KB(256); 1751 q_fifo_size = XGBE_FIFO_SIZE_KB(256);
1749 break; 1752 break;
1750 } 1753 }
1754
1755 /* The configured value is not the actual amount of fifo RAM */
1756 q_fifo_size = min_t(unsigned int, XGBE_FIFO_MAX, q_fifo_size);
1757
1751 q_fifo_size = q_fifo_size / queue_count; 1758 q_fifo_size = q_fifo_size / queue_count;
1752 1759
1753 /* Set the queue fifo size programmable value */ 1760 /* Set the queue fifo size programmable value */
@@ -1947,6 +1954,32 @@ static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
1947 xgbe_disable_rx_vlan_stripping(pdata); 1954 xgbe_disable_rx_vlan_stripping(pdata);
1948} 1955}
1949 1956
1957static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo)
1958{
1959 bool read_hi;
1960 u64 val;
1961
1962 switch (reg_lo) {
1963 /* These registers are always 64 bit */
1964 case MMC_TXOCTETCOUNT_GB_LO:
1965 case MMC_TXOCTETCOUNT_G_LO:
1966 case MMC_RXOCTETCOUNT_GB_LO:
1967 case MMC_RXOCTETCOUNT_G_LO:
1968 read_hi = true;
1969 break;
1970
1971 default:
1972 read_hi = false;
1973 };
1974
1975 val = XGMAC_IOREAD(pdata, reg_lo);
1976
1977 if (read_hi)
1978 val |= ((u64)XGMAC_IOREAD(pdata, reg_lo + 4) << 32);
1979
1980 return val;
1981}
1982
1950static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata) 1983static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
1951{ 1984{
1952 struct xgbe_mmc_stats *stats = &pdata->mmc_stats; 1985 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
@@ -1954,75 +1987,75 @@ static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
1954 1987
1955 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB)) 1988 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB))
1956 stats->txoctetcount_gb += 1989 stats->txoctetcount_gb +=
1957 XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO); 1990 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
1958 1991
1959 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB)) 1992 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB))
1960 stats->txframecount_gb += 1993 stats->txframecount_gb +=
1961 XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO); 1994 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
1962 1995
1963 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G)) 1996 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G))
1964 stats->txbroadcastframes_g += 1997 stats->txbroadcastframes_g +=
1965 XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO); 1998 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
1966 1999
1967 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G)) 2000 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G))
1968 stats->txmulticastframes_g += 2001 stats->txmulticastframes_g +=
1969 XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO); 2002 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
1970 2003
1971 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB)) 2004 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB))
1972 stats->tx64octets_gb += 2005 stats->tx64octets_gb +=
1973 XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO); 2006 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
1974 2007
1975 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB)) 2008 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB))
1976 stats->tx65to127octets_gb += 2009 stats->tx65to127octets_gb +=
1977 XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO); 2010 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
1978 2011
1979 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB)) 2012 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB))
1980 stats->tx128to255octets_gb += 2013 stats->tx128to255octets_gb +=
1981 XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO); 2014 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
1982 2015
1983 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB)) 2016 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB))
1984 stats->tx256to511octets_gb += 2017 stats->tx256to511octets_gb +=
1985 XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO); 2018 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
1986 2019
1987 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB)) 2020 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB))
1988 stats->tx512to1023octets_gb += 2021 stats->tx512to1023octets_gb +=
1989 XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO); 2022 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
1990 2023
1991 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB)) 2024 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB))
1992 stats->tx1024tomaxoctets_gb += 2025 stats->tx1024tomaxoctets_gb +=
1993 XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); 2026 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
1994 2027
1995 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB)) 2028 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB))
1996 stats->txunicastframes_gb += 2029 stats->txunicastframes_gb +=
1997 XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO); 2030 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
1998 2031
1999 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB)) 2032 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB))
2000 stats->txmulticastframes_gb += 2033 stats->txmulticastframes_gb +=
2001 XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO); 2034 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
2002 2035
2003 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB)) 2036 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB))
2004 stats->txbroadcastframes_g += 2037 stats->txbroadcastframes_g +=
2005 XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO); 2038 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
2006 2039
2007 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR)) 2040 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR))
2008 stats->txunderflowerror += 2041 stats->txunderflowerror +=
2009 XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO); 2042 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
2010 2043
2011 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G)) 2044 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G))
2012 stats->txoctetcount_g += 2045 stats->txoctetcount_g +=
2013 XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO); 2046 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
2014 2047
2015 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G)) 2048 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G))
2016 stats->txframecount_g += 2049 stats->txframecount_g +=
2017 XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO); 2050 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
2018 2051
2019 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES)) 2052 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES))
2020 stats->txpauseframes += 2053 stats->txpauseframes +=
2021 XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO); 2054 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
2022 2055
2023 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G)) 2056 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G))
2024 stats->txvlanframes_g += 2057 stats->txvlanframes_g +=
2025 XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO); 2058 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
2026} 2059}
2027 2060
2028static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata) 2061static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
@@ -2032,95 +2065,95 @@ static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
2032 2065
2033 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB)) 2066 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB))
2034 stats->rxframecount_gb += 2067 stats->rxframecount_gb +=
2035 XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO); 2068 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
2036 2069
2037 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB)) 2070 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB))
2038 stats->rxoctetcount_gb += 2071 stats->rxoctetcount_gb +=
2039 XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO); 2072 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
2040 2073
2041 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G)) 2074 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G))
2042 stats->rxoctetcount_g += 2075 stats->rxoctetcount_g +=
2043 XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO); 2076 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
2044 2077
2045 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G)) 2078 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G))
2046 stats->rxbroadcastframes_g += 2079 stats->rxbroadcastframes_g +=
2047 XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO); 2080 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
2048 2081
2049 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G)) 2082 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G))
2050 stats->rxmulticastframes_g += 2083 stats->rxmulticastframes_g +=
2051 XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO); 2084 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
2052 2085
2053 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR)) 2086 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR))
2054 stats->rxcrcerror += 2087 stats->rxcrcerror +=
2055 XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO); 2088 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
2056 2089
2057 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR)) 2090 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR))
2058 stats->rxrunterror += 2091 stats->rxrunterror +=
2059 XGMAC_IOREAD(pdata, MMC_RXRUNTERROR); 2092 xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
2060 2093
2061 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR)) 2094 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR))
2062 stats->rxjabbererror += 2095 stats->rxjabbererror +=
2063 XGMAC_IOREAD(pdata, MMC_RXJABBERERROR); 2096 xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
2064 2097
2065 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G)) 2098 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G))
2066 stats->rxundersize_g += 2099 stats->rxundersize_g +=
2067 XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G); 2100 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
2068 2101
2069 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G)) 2102 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G))
2070 stats->rxoversize_g += 2103 stats->rxoversize_g +=
2071 XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G); 2104 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
2072 2105
2073 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB)) 2106 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB))
2074 stats->rx64octets_gb += 2107 stats->rx64octets_gb +=
2075 XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO); 2108 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
2076 2109
2077 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB)) 2110 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB))
2078 stats->rx65to127octets_gb += 2111 stats->rx65to127octets_gb +=
2079 XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO); 2112 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
2080 2113
2081 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB)) 2114 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB))
2082 stats->rx128to255octets_gb += 2115 stats->rx128to255octets_gb +=
2083 XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO); 2116 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
2084 2117
2085 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB)) 2118 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB))
2086 stats->rx256to511octets_gb += 2119 stats->rx256to511octets_gb +=
2087 XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO); 2120 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
2088 2121
2089 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB)) 2122 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB))
2090 stats->rx512to1023octets_gb += 2123 stats->rx512to1023octets_gb +=
2091 XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO); 2124 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
2092 2125
2093 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB)) 2126 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB))
2094 stats->rx1024tomaxoctets_gb += 2127 stats->rx1024tomaxoctets_gb +=
2095 XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); 2128 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
2096 2129
2097 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G)) 2130 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G))
2098 stats->rxunicastframes_g += 2131 stats->rxunicastframes_g +=
2099 XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO); 2132 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
2100 2133
2101 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR)) 2134 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR))
2102 stats->rxlengtherror += 2135 stats->rxlengtherror +=
2103 XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO); 2136 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
2104 2137
2105 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE)) 2138 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE))
2106 stats->rxoutofrangetype += 2139 stats->rxoutofrangetype +=
2107 XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO); 2140 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
2108 2141
2109 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES)) 2142 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES))
2110 stats->rxpauseframes += 2143 stats->rxpauseframes +=
2111 XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO); 2144 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
2112 2145
2113 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW)) 2146 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW))
2114 stats->rxfifooverflow += 2147 stats->rxfifooverflow +=
2115 XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO); 2148 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
2116 2149
2117 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB)) 2150 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB))
2118 stats->rxvlanframes_gb += 2151 stats->rxvlanframes_gb +=
2119 XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO); 2152 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
2120 2153
2121 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR)) 2154 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR))
2122 stats->rxwatchdogerror += 2155 stats->rxwatchdogerror +=
2123 XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR); 2156 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
2124} 2157}
2125 2158
2126static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata) 2159static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
@@ -2131,127 +2164,127 @@ static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
2131 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1); 2164 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
2132 2165
2133 stats->txoctetcount_gb += 2166 stats->txoctetcount_gb +=
2134 XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO); 2167 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
2135 2168
2136 stats->txframecount_gb += 2169 stats->txframecount_gb +=
2137 XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO); 2170 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
2138 2171
2139 stats->txbroadcastframes_g += 2172 stats->txbroadcastframes_g +=
2140 XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO); 2173 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
2141 2174
2142 stats->txmulticastframes_g += 2175 stats->txmulticastframes_g +=
2143 XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO); 2176 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
2144 2177
2145 stats->tx64octets_gb += 2178 stats->tx64octets_gb +=
2146 XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO); 2179 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
2147 2180
2148 stats->tx65to127octets_gb += 2181 stats->tx65to127octets_gb +=
2149 XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO); 2182 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
2150 2183
2151 stats->tx128to255octets_gb += 2184 stats->tx128to255octets_gb +=
2152 XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO); 2185 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
2153 2186
2154 stats->tx256to511octets_gb += 2187 stats->tx256to511octets_gb +=
2155 XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO); 2188 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
2156 2189
2157 stats->tx512to1023octets_gb += 2190 stats->tx512to1023octets_gb +=
2158 XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO); 2191 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
2159 2192
2160 stats->tx1024tomaxoctets_gb += 2193 stats->tx1024tomaxoctets_gb +=
2161 XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); 2194 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
2162 2195
2163 stats->txunicastframes_gb += 2196 stats->txunicastframes_gb +=
2164 XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO); 2197 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
2165 2198
2166 stats->txmulticastframes_gb += 2199 stats->txmulticastframes_gb +=
2167 XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO); 2200 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
2168 2201
2169 stats->txbroadcastframes_g += 2202 stats->txbroadcastframes_g +=
2170 XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO); 2203 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
2171 2204
2172 stats->txunderflowerror += 2205 stats->txunderflowerror +=
2173 XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO); 2206 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
2174 2207
2175 stats->txoctetcount_g += 2208 stats->txoctetcount_g +=
2176 XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO); 2209 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
2177 2210
2178 stats->txframecount_g += 2211 stats->txframecount_g +=
2179 XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO); 2212 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
2180 2213
2181 stats->txpauseframes += 2214 stats->txpauseframes +=
2182 XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO); 2215 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
2183 2216
2184 stats->txvlanframes_g += 2217 stats->txvlanframes_g +=
2185 XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO); 2218 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
2186 2219
2187 stats->rxframecount_gb += 2220 stats->rxframecount_gb +=
2188 XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO); 2221 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
2189 2222
2190 stats->rxoctetcount_gb += 2223 stats->rxoctetcount_gb +=
2191 XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO); 2224 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
2192 2225
2193 stats->rxoctetcount_g += 2226 stats->rxoctetcount_g +=
2194 XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO); 2227 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
2195 2228
2196 stats->rxbroadcastframes_g += 2229 stats->rxbroadcastframes_g +=
2197 XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO); 2230 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
2198 2231
2199 stats->rxmulticastframes_g += 2232 stats->rxmulticastframes_g +=
2200 XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO); 2233 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
2201 2234
2202 stats->rxcrcerror += 2235 stats->rxcrcerror +=
2203 XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO); 2236 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
2204 2237
2205 stats->rxrunterror += 2238 stats->rxrunterror +=
2206 XGMAC_IOREAD(pdata, MMC_RXRUNTERROR); 2239 xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
2207 2240
2208 stats->rxjabbererror += 2241 stats->rxjabbererror +=
2209 XGMAC_IOREAD(pdata, MMC_RXJABBERERROR); 2242 xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
2210 2243
2211 stats->rxundersize_g += 2244 stats->rxundersize_g +=
2212 XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G); 2245 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
2213 2246
2214 stats->rxoversize_g += 2247 stats->rxoversize_g +=
2215 XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G); 2248 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
2216 2249
2217 stats->rx64octets_gb += 2250 stats->rx64octets_gb +=
2218 XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO); 2251 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
2219 2252
2220 stats->rx65to127octets_gb += 2253 stats->rx65to127octets_gb +=
2221 XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO); 2254 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
2222 2255
2223 stats->rx128to255octets_gb += 2256 stats->rx128to255octets_gb +=
2224 XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO); 2257 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
2225 2258
2226 stats->rx256to511octets_gb += 2259 stats->rx256to511octets_gb +=
2227 XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO); 2260 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
2228 2261
2229 stats->rx512to1023octets_gb += 2262 stats->rx512to1023octets_gb +=
2230 XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO); 2263 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
2231 2264
2232 stats->rx1024tomaxoctets_gb += 2265 stats->rx1024tomaxoctets_gb +=
2233 XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); 2266 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
2234 2267
2235 stats->rxunicastframes_g += 2268 stats->rxunicastframes_g +=
2236 XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO); 2269 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
2237 2270
2238 stats->rxlengtherror += 2271 stats->rxlengtherror +=
2239 XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO); 2272 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
2240 2273
2241 stats->rxoutofrangetype += 2274 stats->rxoutofrangetype +=
2242 XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO); 2275 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
2243 2276
2244 stats->rxpauseframes += 2277 stats->rxpauseframes +=
2245 XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO); 2278 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
2246 2279
2247 stats->rxfifooverflow += 2280 stats->rxfifooverflow +=
2248 XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO); 2281 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
2249 2282
2250 stats->rxvlanframes_gb += 2283 stats->rxvlanframes_gb +=
2251 XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO); 2284 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
2252 2285
2253 stats->rxwatchdogerror += 2286 stats->rxwatchdogerror +=
2254 XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR); 2287 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
2255 2288
2256 /* Un-freeze counters */ 2289 /* Un-freeze counters */
2257 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0); 2290 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index dc84f7193c2d..b26d75856553 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -361,6 +361,8 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
361 361
362 memset(hw_feat, 0, sizeof(*hw_feat)); 362 memset(hw_feat, 0, sizeof(*hw_feat));
363 363
364 hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR);
365
364 /* Hardware feature register 0 */ 366 /* Hardware feature register 0 */
365 hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL); 367 hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
366 hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH); 368 hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index a076aca138a1..46f613028e9c 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -361,15 +361,16 @@ static void xgbe_get_drvinfo(struct net_device *netdev,
361 struct ethtool_drvinfo *drvinfo) 361 struct ethtool_drvinfo *drvinfo)
362{ 362{
363 struct xgbe_prv_data *pdata = netdev_priv(netdev); 363 struct xgbe_prv_data *pdata = netdev_priv(netdev);
364 struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
364 365
365 strlcpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver)); 366 strlcpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver));
366 strlcpy(drvinfo->version, XGBE_DRV_VERSION, sizeof(drvinfo->version)); 367 strlcpy(drvinfo->version, XGBE_DRV_VERSION, sizeof(drvinfo->version));
367 strlcpy(drvinfo->bus_info, dev_name(pdata->dev), 368 strlcpy(drvinfo->bus_info, dev_name(pdata->dev),
368 sizeof(drvinfo->bus_info)); 369 sizeof(drvinfo->bus_info));
369 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d", 370 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d",
370 XGMAC_IOREAD_BITS(pdata, MAC_VR, USERVER), 371 XGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER),
371 XGMAC_IOREAD_BITS(pdata, MAC_VR, DEVID), 372 XGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID),
372 XGMAC_IOREAD_BITS(pdata, MAC_VR, SNPSVER)); 373 XGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER));
373 drvinfo->n_stats = XGBE_STATS_COUNT; 374 drvinfo->n_stats = XGBE_STATS_COUNT;
374} 375}
375 376
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index 8aa6a9353f7b..bdf9cfa70e88 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -172,7 +172,7 @@ static struct xgbe_channel *xgbe_alloc_rings(struct xgbe_prv_data *pdata)
172 } 172 }
173 173
174 if (i < pdata->rx_ring_count) { 174 if (i < pdata->rx_ring_count) {
175 spin_lock_init(&tx_ring->lock); 175 spin_lock_init(&rx_ring->lock);
176 channel->rx_ring = rx_ring++; 176 channel->rx_ring = rx_ring++;
177 } 177 }
178 178
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 07bf70a82908..e9fe6e6ddcc3 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -183,6 +183,7 @@
183#define XGMAC_DRIVER_CONTEXT 1 183#define XGMAC_DRIVER_CONTEXT 1
184#define XGMAC_IOCTL_CONTEXT 2 184#define XGMAC_IOCTL_CONTEXT 2
185 185
186#define XGBE_FIFO_MAX 81920
186#define XGBE_FIFO_SIZE_B(x) (x) 187#define XGBE_FIFO_SIZE_B(x) (x)
187#define XGBE_FIFO_SIZE_KB(x) (x * 1024) 188#define XGBE_FIFO_SIZE_KB(x) (x * 1024)
188 189
@@ -526,6 +527,9 @@ struct xgbe_desc_if {
526 * or configurations are present in the device. 527 * or configurations are present in the device.
527 */ 528 */
528struct xgbe_hw_features { 529struct xgbe_hw_features {
530 /* HW Version */
531 unsigned int version;
532
529 /* HW Feature Register0 */ 533 /* HW Feature Register0 */
530 unsigned int gmii; /* 1000 Mbps support */ 534 unsigned int gmii; /* 1000 Mbps support */
531 unsigned int vlhash; /* VLAN Hash Filter */ 535 unsigned int vlhash; /* VLAN Hash Filter */
diff --git a/drivers/net/ethernet/apm/xgene/Kconfig b/drivers/net/ethernet/apm/xgene/Kconfig
index 616dff6d3f5f..f4054d242f3c 100644
--- a/drivers/net/ethernet/apm/xgene/Kconfig
+++ b/drivers/net/ethernet/apm/xgene/Kconfig
@@ -1,5 +1,6 @@
1config NET_XGENE 1config NET_XGENE
2 tristate "APM X-Gene SoC Ethernet Driver" 2 tristate "APM X-Gene SoC Ethernet Driver"
3 depends on HAS_DMA
3 select PHYLIB 4 select PHYLIB
4 help 5 help
5 This is the Ethernet driver for the on-chip ethernet interface on the 6 This is the Ethernet driver for the on-chip ethernet interface on the
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index fe5cfeace6e3..5919394d9f58 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -30,6 +30,17 @@
30#define DRV_VERSION "1.0" 30#define DRV_VERSION "1.0"
31 31
32/** 32/**
33 * arc_emac_tx_avail - Return the number of available slots in the tx ring.
34 * @priv: Pointer to ARC EMAC private data structure.
35 *
36 * returns: the number of slots available for transmission in tx the ring.
37 */
38static inline int arc_emac_tx_avail(struct arc_emac_priv *priv)
39{
40 return (priv->txbd_dirty + TX_BD_NUM - priv->txbd_curr - 1) % TX_BD_NUM;
41}
42
43/**
33 * arc_emac_adjust_link - Adjust the PHY link duplex. 44 * arc_emac_adjust_link - Adjust the PHY link duplex.
34 * @ndev: Pointer to the net_device structure. 45 * @ndev: Pointer to the net_device structure.
35 * 46 *
@@ -180,10 +191,15 @@ static void arc_emac_tx_clean(struct net_device *ndev)
180 txbd->info = 0; 191 txbd->info = 0;
181 192
182 *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM; 193 *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM;
183
184 if (netif_queue_stopped(ndev))
185 netif_wake_queue(ndev);
186 } 194 }
195
196 /* Ensure that txbd_dirty is visible to tx() before checking
197 * for queue stopped.
198 */
199 smp_mb();
200
201 if (netif_queue_stopped(ndev) && arc_emac_tx_avail(priv))
202 netif_wake_queue(ndev);
187} 203}
188 204
189/** 205/**
@@ -298,7 +314,7 @@ static int arc_emac_poll(struct napi_struct *napi, int budget)
298 work_done = arc_emac_rx(ndev, budget); 314 work_done = arc_emac_rx(ndev, budget);
299 if (work_done < budget) { 315 if (work_done < budget) {
300 napi_complete(napi); 316 napi_complete(napi);
301 arc_reg_or(priv, R_ENABLE, RXINT_MASK); 317 arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
302 } 318 }
303 319
304 return work_done; 320 return work_done;
@@ -327,9 +343,9 @@ static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
327 /* Reset all flags except "MDIO complete" */ 343 /* Reset all flags except "MDIO complete" */
328 arc_reg_set(priv, R_STATUS, status); 344 arc_reg_set(priv, R_STATUS, status);
329 345
330 if (status & RXINT_MASK) { 346 if (status & (RXINT_MASK | TXINT_MASK)) {
331 if (likely(napi_schedule_prep(&priv->napi))) { 347 if (likely(napi_schedule_prep(&priv->napi))) {
332 arc_reg_clr(priv, R_ENABLE, RXINT_MASK); 348 arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
333 __napi_schedule(&priv->napi); 349 __napi_schedule(&priv->napi);
334 } 350 }
335 } 351 }
@@ -440,7 +456,7 @@ static int arc_emac_open(struct net_device *ndev)
440 arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma); 456 arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma);
441 457
442 /* Enable interrupts */ 458 /* Enable interrupts */
443 arc_reg_set(priv, R_ENABLE, RXINT_MASK | ERR_MASK); 459 arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
444 460
445 /* Set CONTROL */ 461 /* Set CONTROL */
446 arc_reg_set(priv, R_CTRL, 462 arc_reg_set(priv, R_CTRL,
@@ -511,7 +527,7 @@ static int arc_emac_stop(struct net_device *ndev)
511 netif_stop_queue(ndev); 527 netif_stop_queue(ndev);
512 528
513 /* Disable interrupts */ 529 /* Disable interrupts */
514 arc_reg_clr(priv, R_ENABLE, RXINT_MASK | ERR_MASK); 530 arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
515 531
516 /* Disable EMAC */ 532 /* Disable EMAC */
517 arc_reg_clr(priv, R_CTRL, EN_MASK); 533 arc_reg_clr(priv, R_CTRL, EN_MASK);
@@ -574,11 +590,9 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
574 590
575 len = max_t(unsigned int, ETH_ZLEN, skb->len); 591 len = max_t(unsigned int, ETH_ZLEN, skb->len);
576 592
577 /* EMAC still holds this buffer in its possession. 593 if (unlikely(!arc_emac_tx_avail(priv))) {
578 * CPU must not modify this buffer descriptor
579 */
580 if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) {
581 netif_stop_queue(ndev); 594 netif_stop_queue(ndev);
595 netdev_err(ndev, "BUG! Tx Ring full when queue awake!\n");
582 return NETDEV_TX_BUSY; 596 return NETDEV_TX_BUSY;
583 } 597 }
584 598
@@ -607,12 +621,19 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
607 /* Increment index to point to the next BD */ 621 /* Increment index to point to the next BD */
608 *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM; 622 *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM;
609 623
610 /* Get "info" of the next BD */ 624 /* Ensure that tx_clean() sees the new txbd_curr before
611 info = &priv->txbd[*txbd_curr].info; 625 * checking the queue status. This prevents an unneeded wake
626 * of the queue in tx_clean().
627 */
628 smp_mb();
612 629
613 /* Check if if Tx BD ring is full - next BD is still owned by EMAC */ 630 if (!arc_emac_tx_avail(priv)) {
614 if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC))
615 netif_stop_queue(ndev); 631 netif_stop_queue(ndev);
632 /* Refresh tx_dirty */
633 smp_mb();
634 if (arc_emac_tx_avail(priv))
635 netif_start_queue(ndev);
636 }
616 637
617 arc_reg_set(priv, R_STATUS, TXPL_MASK); 638 arc_reg_set(priv, R_STATUS, TXPL_MASK);
618 639
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 7dcfb19a31c8..d8d07a818b89 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -84,7 +84,7 @@ config BNX2
84 84
85config CNIC 85config CNIC
86 tristate "QLogic CNIC support" 86 tristate "QLogic CNIC support"
87 depends on PCI 87 depends on PCI && (IPV6 || IPV6=n)
88 select BNX2 88 select BNX2
89 select UIO 89 select UIO
90 ---help--- 90 ---help---
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 4a7028d65912..d588136b23b9 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -1697,7 +1697,7 @@ static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
1697 hwstat->tx_underruns + 1697 hwstat->tx_underruns +
1698 hwstat->tx_excessive_cols + 1698 hwstat->tx_excessive_cols +
1699 hwstat->tx_late_cols); 1699 hwstat->tx_late_cols);
1700 nstat->multicast = hwstat->tx_multicast_pkts; 1700 nstat->multicast = hwstat->rx_multicast_pkts;
1701 nstat->collisions = hwstat->tx_total_cols; 1701 nstat->collisions = hwstat->tx_total_cols;
1702 1702
1703 nstat->rx_length_errors = (hwstat->rx_oversize_pkts + 1703 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 6f4e18644bd4..d9b9170ed2fc 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -534,6 +534,25 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
534 while ((processed < to_process) && (processed < budget)) { 534 while ((processed < to_process) && (processed < budget)) {
535 cb = &priv->rx_cbs[priv->rx_read_ptr]; 535 cb = &priv->rx_cbs[priv->rx_read_ptr];
536 skb = cb->skb; 536 skb = cb->skb;
537
538 processed++;
539 priv->rx_read_ptr++;
540
541 if (priv->rx_read_ptr == priv->num_rx_bds)
542 priv->rx_read_ptr = 0;
543
544 /* We do not have a backing SKB, so we do not a corresponding
545 * DMA mapping for this incoming packet since
546 * bcm_sysport_rx_refill always either has both skb and mapping
547 * or none.
548 */
549 if (unlikely(!skb)) {
550 netif_err(priv, rx_err, ndev, "out of memory!\n");
551 ndev->stats.rx_dropped++;
552 ndev->stats.rx_errors++;
553 goto refill;
554 }
555
537 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), 556 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
538 RX_BUF_LENGTH, DMA_FROM_DEVICE); 557 RX_BUF_LENGTH, DMA_FROM_DEVICE);
539 558
@@ -543,23 +562,11 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
543 status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) & 562 status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
544 DESC_STATUS_MASK; 563 DESC_STATUS_MASK;
545 564
546 processed++;
547 priv->rx_read_ptr++;
548 if (priv->rx_read_ptr == priv->num_rx_bds)
549 priv->rx_read_ptr = 0;
550
551 netif_dbg(priv, rx_status, ndev, 565 netif_dbg(priv, rx_status, ndev,
552 "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n", 566 "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
553 p_index, priv->rx_c_index, priv->rx_read_ptr, 567 p_index, priv->rx_c_index, priv->rx_read_ptr,
554 len, status); 568 len, status);
555 569
556 if (unlikely(!skb)) {
557 netif_err(priv, rx_err, ndev, "out of memory!\n");
558 ndev->stats.rx_dropped++;
559 ndev->stats.rx_errors++;
560 goto refill;
561 }
562
563 if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) { 570 if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
564 netif_err(priv, rx_status, ndev, "fragmented packet!\n"); 571 netif_err(priv, rx_status, ndev, "fragmented packet!\n");
565 ndev->stats.rx_dropped++; 572 ndev->stats.rx_dropped++;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 2fee73b878c2..823d01c5684c 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -3236,8 +3236,9 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3236 3236
3237 skb->protocol = eth_type_trans(skb, bp->dev); 3237 skb->protocol = eth_type_trans(skb, bp->dev);
3238 3238
3239 if ((len > (bp->dev->mtu + ETH_HLEN)) && 3239 if (len > (bp->dev->mtu + ETH_HLEN) &&
3240 (ntohs(skb->protocol) != 0x8100)) { 3240 skb->protocol != htons(0x8100) &&
3241 skb->protocol != htons(ETH_P_8021AD)) {
3241 3242
3242 dev_kfree_skb(skb); 3243 dev_kfree_skb(skb);
3243 goto next_rx; 3244 goto next_rx;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 5ba8af50c84f..c4daa068f1db 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -2233,7 +2233,12 @@ struct shmem2_region {
2233 u32 reserved3; /* Offset 0x14C */ 2233 u32 reserved3; /* Offset 0x14C */
2234 u32 reserved4; /* Offset 0x150 */ 2234 u32 reserved4; /* Offset 0x150 */
2235 u32 link_attr_sync[PORT_MAX]; /* Offset 0x154 */ 2235 u32 link_attr_sync[PORT_MAX]; /* Offset 0x154 */
2236 #define LINK_ATTR_SYNC_KR2_ENABLE (1<<0) 2236 #define LINK_ATTR_SYNC_KR2_ENABLE 0x00000001
2237 #define LINK_SFP_EEPROM_COMP_CODE_MASK 0x0000ff00
2238 #define LINK_SFP_EEPROM_COMP_CODE_SHIFT 8
2239 #define LINK_SFP_EEPROM_COMP_CODE_SR 0x00001000
2240 #define LINK_SFP_EEPROM_COMP_CODE_LR 0x00002000
2241 #define LINK_SFP_EEPROM_COMP_CODE_LRM 0x00004000
2237 2242
2238 u32 reserved5[2]; 2243 u32 reserved5[2];
2239 u32 reserved6[PORT_MAX]; 2244 u32 reserved6[PORT_MAX];
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 53fb4fa61b40..549549eaf580 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -154,15 +154,22 @@ typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy,
154 LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE) 154 LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE)
155 155
156#define SFP_EEPROM_CON_TYPE_ADDR 0x2 156#define SFP_EEPROM_CON_TYPE_ADDR 0x2
157 #define SFP_EEPROM_CON_TYPE_VAL_UNKNOWN 0x0
157 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7 158 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
158 #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21 159 #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21
159 #define SFP_EEPROM_CON_TYPE_VAL_RJ45 0x22 160 #define SFP_EEPROM_CON_TYPE_VAL_RJ45 0x22
160 161
161 162
162#define SFP_EEPROM_COMP_CODE_ADDR 0x3 163#define SFP_EEPROM_10G_COMP_CODE_ADDR 0x3
163 #define SFP_EEPROM_COMP_CODE_SR_MASK (1<<4) 164 #define SFP_EEPROM_10G_COMP_CODE_SR_MASK (1<<4)
164 #define SFP_EEPROM_COMP_CODE_LR_MASK (1<<5) 165 #define SFP_EEPROM_10G_COMP_CODE_LR_MASK (1<<5)
165 #define SFP_EEPROM_COMP_CODE_LRM_MASK (1<<6) 166 #define SFP_EEPROM_10G_COMP_CODE_LRM_MASK (1<<6)
167
168#define SFP_EEPROM_1G_COMP_CODE_ADDR 0x6
169 #define SFP_EEPROM_1G_COMP_CODE_SX (1<<0)
170 #define SFP_EEPROM_1G_COMP_CODE_LX (1<<1)
171 #define SFP_EEPROM_1G_COMP_CODE_CX (1<<2)
172 #define SFP_EEPROM_1G_COMP_CODE_BASE_T (1<<3)
166 173
167#define SFP_EEPROM_FC_TX_TECH_ADDR 0x8 174#define SFP_EEPROM_FC_TX_TECH_ADDR 0x8
168 #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4 175 #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4
@@ -3633,8 +3640,8 @@ static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy,
3633 reg_set[i].val); 3640 reg_set[i].val);
3634 3641
3635 /* Start KR2 work-around timer which handles BCM8073 link-parner */ 3642 /* Start KR2 work-around timer which handles BCM8073 link-parner */
3636 vars->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE; 3643 params->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE;
3637 bnx2x_update_link_attr(params, vars->link_attr_sync); 3644 bnx2x_update_link_attr(params, params->link_attr_sync);
3638} 3645}
3639 3646
3640static void bnx2x_disable_kr2(struct link_params *params, 3647static void bnx2x_disable_kr2(struct link_params *params,
@@ -3666,8 +3673,8 @@ static void bnx2x_disable_kr2(struct link_params *params,
3666 for (i = 0; i < ARRAY_SIZE(reg_set); i++) 3673 for (i = 0; i < ARRAY_SIZE(reg_set); i++)
3667 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, 3674 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
3668 reg_set[i].val); 3675 reg_set[i].val);
3669 vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE; 3676 params->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
3670 bnx2x_update_link_attr(params, vars->link_attr_sync); 3677 bnx2x_update_link_attr(params, params->link_attr_sync);
3671 3678
3672 vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT; 3679 vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT;
3673} 3680}
@@ -4810,7 +4817,7 @@ void bnx2x_link_status_update(struct link_params *params,
4810 ~FEATURE_CONFIG_PFC_ENABLED; 4817 ~FEATURE_CONFIG_PFC_ENABLED;
4811 4818
4812 if (SHMEM2_HAS(bp, link_attr_sync)) 4819 if (SHMEM2_HAS(bp, link_attr_sync))
4813 vars->link_attr_sync = SHMEM2_RD(bp, 4820 params->link_attr_sync = SHMEM2_RD(bp,
4814 link_attr_sync[params->port]); 4821 link_attr_sync[params->port]);
4815 4822
4816 DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x int_mask 0x%x\n", 4823 DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x int_mask 0x%x\n",
@@ -8057,21 +8064,24 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
8057{ 8064{
8058 struct bnx2x *bp = params->bp; 8065 struct bnx2x *bp = params->bp;
8059 u32 sync_offset = 0, phy_idx, media_types; 8066 u32 sync_offset = 0, phy_idx, media_types;
8060 u8 gport, val[2], check_limiting_mode = 0; 8067 u8 val[SFP_EEPROM_FC_TX_TECH_ADDR + 1], check_limiting_mode = 0;
8061 *edc_mode = EDC_MODE_LIMITING; 8068 *edc_mode = EDC_MODE_LIMITING;
8062 phy->media_type = ETH_PHY_UNSPECIFIED; 8069 phy->media_type = ETH_PHY_UNSPECIFIED;
8063 /* First check for copper cable */ 8070 /* First check for copper cable */
8064 if (bnx2x_read_sfp_module_eeprom(phy, 8071 if (bnx2x_read_sfp_module_eeprom(phy,
8065 params, 8072 params,
8066 I2C_DEV_ADDR_A0, 8073 I2C_DEV_ADDR_A0,
8067 SFP_EEPROM_CON_TYPE_ADDR, 8074 0,
8068 2, 8075 SFP_EEPROM_FC_TX_TECH_ADDR + 1,
8069 (u8 *)val) != 0) { 8076 (u8 *)val) != 0) {
8070 DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n"); 8077 DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n");
8071 return -EINVAL; 8078 return -EINVAL;
8072 } 8079 }
8073 8080 params->link_attr_sync &= ~LINK_SFP_EEPROM_COMP_CODE_MASK;
8074 switch (val[0]) { 8081 params->link_attr_sync |= val[SFP_EEPROM_10G_COMP_CODE_ADDR] <<
8082 LINK_SFP_EEPROM_COMP_CODE_SHIFT;
8083 bnx2x_update_link_attr(params, params->link_attr_sync);
8084 switch (val[SFP_EEPROM_CON_TYPE_ADDR]) {
8075 case SFP_EEPROM_CON_TYPE_VAL_COPPER: 8085 case SFP_EEPROM_CON_TYPE_VAL_COPPER:
8076 { 8086 {
8077 u8 copper_module_type; 8087 u8 copper_module_type;
@@ -8079,17 +8089,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
8079 /* Check if its active cable (includes SFP+ module) 8089 /* Check if its active cable (includes SFP+ module)
8080 * of passive cable 8090 * of passive cable
8081 */ 8091 */
8082 if (bnx2x_read_sfp_module_eeprom(phy, 8092 copper_module_type = val[SFP_EEPROM_FC_TX_TECH_ADDR];
8083 params,
8084 I2C_DEV_ADDR_A0,
8085 SFP_EEPROM_FC_TX_TECH_ADDR,
8086 1,
8087 &copper_module_type) != 0) {
8088 DP(NETIF_MSG_LINK,
8089 "Failed to read copper-cable-type"
8090 " from SFP+ EEPROM\n");
8091 return -EINVAL;
8092 }
8093 8093
8094 if (copper_module_type & 8094 if (copper_module_type &
8095 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) { 8095 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) {
@@ -8115,16 +8115,18 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
8115 } 8115 }
8116 break; 8116 break;
8117 } 8117 }
8118 case SFP_EEPROM_CON_TYPE_VAL_UNKNOWN:
8118 case SFP_EEPROM_CON_TYPE_VAL_LC: 8119 case SFP_EEPROM_CON_TYPE_VAL_LC:
8119 case SFP_EEPROM_CON_TYPE_VAL_RJ45: 8120 case SFP_EEPROM_CON_TYPE_VAL_RJ45:
8120 check_limiting_mode = 1; 8121 check_limiting_mode = 1;
8121 if ((val[1] & (SFP_EEPROM_COMP_CODE_SR_MASK | 8122 if ((val[SFP_EEPROM_10G_COMP_CODE_ADDR] &
8122 SFP_EEPROM_COMP_CODE_LR_MASK | 8123 (SFP_EEPROM_10G_COMP_CODE_SR_MASK |
8123 SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) { 8124 SFP_EEPROM_10G_COMP_CODE_LR_MASK |
8125 SFP_EEPROM_10G_COMP_CODE_LRM_MASK)) == 0) {
8124 DP(NETIF_MSG_LINK, "1G SFP module detected\n"); 8126 DP(NETIF_MSG_LINK, "1G SFP module detected\n");
8125 gport = params->port;
8126 phy->media_type = ETH_PHY_SFP_1G_FIBER; 8127 phy->media_type = ETH_PHY_SFP_1G_FIBER;
8127 if (phy->req_line_speed != SPEED_1000) { 8128 if (phy->req_line_speed != SPEED_1000) {
8129 u8 gport = params->port;
8128 phy->req_line_speed = SPEED_1000; 8130 phy->req_line_speed = SPEED_1000;
8129 if (!CHIP_IS_E1x(bp)) { 8131 if (!CHIP_IS_E1x(bp)) {
8130 gport = BP_PATH(bp) + 8132 gport = BP_PATH(bp) +
@@ -8134,6 +8136,12 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
8134 "Warning: Link speed was forced to 1000Mbps. Current SFP module in port %d is not compliant with 10G Ethernet\n", 8136 "Warning: Link speed was forced to 1000Mbps. Current SFP module in port %d is not compliant with 10G Ethernet\n",
8135 gport); 8137 gport);
8136 } 8138 }
8139 if (val[SFP_EEPROM_1G_COMP_CODE_ADDR] &
8140 SFP_EEPROM_1G_COMP_CODE_BASE_T) {
8141 bnx2x_sfp_set_transmitter(params, phy, 0);
8142 msleep(40);
8143 bnx2x_sfp_set_transmitter(params, phy, 1);
8144 }
8137 } else { 8145 } else {
8138 int idx, cfg_idx = 0; 8146 int idx, cfg_idx = 0;
8139 DP(NETIF_MSG_LINK, "10G Optic module detected\n"); 8147 DP(NETIF_MSG_LINK, "10G Optic module detected\n");
@@ -8149,7 +8157,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
8149 break; 8157 break;
8150 default: 8158 default:
8151 DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n", 8159 DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n",
8152 val[0]); 8160 val[SFP_EEPROM_CON_TYPE_ADDR]);
8153 return -EINVAL; 8161 return -EINVAL;
8154 } 8162 }
8155 sync_offset = params->shmem_base + 8163 sync_offset = params->shmem_base +
@@ -13507,7 +13515,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
13507 13515
13508 sigdet = bnx2x_warpcore_get_sigdet(phy, params); 13516 sigdet = bnx2x_warpcore_get_sigdet(phy, params);
13509 if (!sigdet) { 13517 if (!sigdet) {
13510 if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { 13518 if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
13511 bnx2x_kr2_recovery(params, vars, phy); 13519 bnx2x_kr2_recovery(params, vars, phy);
13512 DP(NETIF_MSG_LINK, "No sigdet\n"); 13520 DP(NETIF_MSG_LINK, "No sigdet\n");
13513 } 13521 }
@@ -13525,7 +13533,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
13525 13533
13526 /* CL73 has not begun yet */ 13534 /* CL73 has not begun yet */
13527 if (base_page == 0) { 13535 if (base_page == 0) {
13528 if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { 13536 if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
13529 bnx2x_kr2_recovery(params, vars, phy); 13537 bnx2x_kr2_recovery(params, vars, phy);
13530 DP(NETIF_MSG_LINK, "No BP\n"); 13538 DP(NETIF_MSG_LINK, "No BP\n");
13531 } 13539 }
@@ -13541,7 +13549,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
13541 ((next_page & 0xe0) == 0x20)))); 13549 ((next_page & 0xe0) == 0x20))));
13542 13550
13543 /* In case KR2 is already disabled, check if we need to re-enable it */ 13551 /* In case KR2 is already disabled, check if we need to re-enable it */
13544 if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { 13552 if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
13545 if (!not_kr2_device) { 13553 if (!not_kr2_device) {
13546 DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, 13554 DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page,
13547 next_page); 13555 next_page);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 389f5f8cb0a3..d9cce4c3899b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -323,6 +323,9 @@ struct link_params {
323#define LINK_FLAGS_INT_DISABLED (1<<0) 323#define LINK_FLAGS_INT_DISABLED (1<<0)
324#define PHY_INITIALIZED (1<<1) 324#define PHY_INITIALIZED (1<<1)
325 u32 lfa_base; 325 u32 lfa_base;
326
327 /* The same definitions as the shmem2 parameter */
328 u32 link_attr_sync;
326}; 329};
327 330
328/* Output parameters */ 331/* Output parameters */
@@ -364,8 +367,6 @@ struct link_vars {
364 u8 rx_tx_asic_rst; 367 u8 rx_tx_asic_rst;
365 u8 turn_to_run_wc_rt; 368 u8 turn_to_run_wc_rt;
366 u16 rsrv2; 369 u16 rsrv2;
367 /* The same definitions as the shmem2 parameter */
368 u32 link_attr_sync;
369}; 370};
370 371
371/***********************************************************/ 372/***********************************************************/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 900cab420810..d1c093dcb054 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -6849,6 +6849,37 @@ static void bnx2x__common_init_phy(struct bnx2x *bp)
6849 bnx2x_release_phy_lock(bp); 6849 bnx2x_release_phy_lock(bp);
6850} 6850}
6851 6851
6852static void bnx2x_config_endianity(struct bnx2x *bp, u32 val)
6853{
6854 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, val);
6855 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, val);
6856 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, val);
6857 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, val);
6858 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, val);
6859
6860 /* make sure this value is 0 */
6861 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6862
6863 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, val);
6864 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, val);
6865 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, val);
6866 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, val);
6867}
6868
6869static void bnx2x_set_endianity(struct bnx2x *bp)
6870{
6871#ifdef __BIG_ENDIAN
6872 bnx2x_config_endianity(bp, 1);
6873#else
6874 bnx2x_config_endianity(bp, 0);
6875#endif
6876}
6877
6878static void bnx2x_reset_endianity(struct bnx2x *bp)
6879{
6880 bnx2x_config_endianity(bp, 0);
6881}
6882
6852/** 6883/**
6853 * bnx2x_init_hw_common - initialize the HW at the COMMON phase. 6884 * bnx2x_init_hw_common - initialize the HW at the COMMON phase.
6854 * 6885 *
@@ -6915,23 +6946,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
6915 6946
6916 bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON); 6947 bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
6917 bnx2x_init_pxp(bp); 6948 bnx2x_init_pxp(bp);
6918 6949 bnx2x_set_endianity(bp);
6919#ifdef __BIG_ENDIAN
6920 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6921 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6922 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6923 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6924 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6925 /* make sure this value is 0 */
6926 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6927
6928/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6929 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6930 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6931 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6932 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6933#endif
6934
6935 bnx2x_ilt_init_page_size(bp, INITOP_SET); 6950 bnx2x_ilt_init_page_size(bp, INITOP_SET);
6936 6951
6937 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp)) 6952 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
@@ -13169,9 +13184,15 @@ static void __bnx2x_remove(struct pci_dev *pdev,
13169 bnx2x_iov_remove_one(bp); 13184 bnx2x_iov_remove_one(bp);
13170 13185
13171 /* Power on: we can't let PCI layer write to us while we are in D3 */ 13186 /* Power on: we can't let PCI layer write to us while we are in D3 */
13172 if (IS_PF(bp)) 13187 if (IS_PF(bp)) {
13173 bnx2x_set_power_state(bp, PCI_D0); 13188 bnx2x_set_power_state(bp, PCI_D0);
13174 13189
13190 /* Set endianity registers to reset values in case next driver
13191 * boots in different endianty environment.
13192 */
13193 bnx2x_reset_endianity(bp);
13194 }
13195
13175 /* Disable MSI/MSI-X */ 13196 /* Disable MSI/MSI-X */
13176 bnx2x_disable_msi(bp); 13197 bnx2x_disable_msi(bp);
13177 13198
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 27861a6c7ca5..a6a9f284c8dd 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -31,7 +31,7 @@
31#include <linux/if_vlan.h> 31#include <linux/if_vlan.h>
32#include <linux/prefetch.h> 32#include <linux/prefetch.h>
33#include <linux/random.h> 33#include <linux/random.h>
34#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 34#if IS_ENABLED(CONFIG_VLAN_8021Q)
35#define BCM_VLAN 1 35#define BCM_VLAN 1
36#endif 36#endif
37#include <net/ip.h> 37#include <net/ip.h>
@@ -3685,7 +3685,7 @@ static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
3685static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr, 3685static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
3686 struct dst_entry **dst) 3686 struct dst_entry **dst)
3687{ 3687{
3688#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE)) 3688#if IS_ENABLED(CONFIG_IPV6)
3689 struct flowi6 fl6; 3689 struct flowi6 fl6;
3690 3690
3691 memset(&fl6, 0, sizeof(fl6)); 3691 memset(&fl6, 0, sizeof(fl6));
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 3f9d4de8173c..5cc9cae21ed5 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -875,6 +875,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
875 int last_tx_cn, last_c_index, num_tx_bds; 875 int last_tx_cn, last_c_index, num_tx_bds;
876 struct enet_cb *tx_cb_ptr; 876 struct enet_cb *tx_cb_ptr;
877 struct netdev_queue *txq; 877 struct netdev_queue *txq;
878 unsigned int bds_compl;
878 unsigned int c_index; 879 unsigned int c_index;
879 880
880 /* Compute how many buffers are transmitted since last xmit call */ 881 /* Compute how many buffers are transmitted since last xmit call */
@@ -899,7 +900,9 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
899 /* Reclaim transmitted buffers */ 900 /* Reclaim transmitted buffers */
900 while (last_tx_cn-- > 0) { 901 while (last_tx_cn-- > 0) {
901 tx_cb_ptr = ring->cbs + last_c_index; 902 tx_cb_ptr = ring->cbs + last_c_index;
903 bds_compl = 0;
902 if (tx_cb_ptr->skb) { 904 if (tx_cb_ptr->skb) {
905 bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1;
903 dev->stats.tx_bytes += tx_cb_ptr->skb->len; 906 dev->stats.tx_bytes += tx_cb_ptr->skb->len;
904 dma_unmap_single(&dev->dev, 907 dma_unmap_single(&dev->dev,
905 dma_unmap_addr(tx_cb_ptr, dma_addr), 908 dma_unmap_addr(tx_cb_ptr, dma_addr),
@@ -916,7 +919,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
916 dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0); 919 dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
917 } 920 }
918 dev->stats.tx_packets++; 921 dev->stats.tx_packets++;
919 ring->free_bds += 1; 922 ring->free_bds += bds_compl;
920 923
921 last_c_index++; 924 last_c_index++;
922 last_c_index &= (num_tx_bds - 1); 925 last_c_index &= (num_tx_bds - 1);
@@ -1274,12 +1277,29 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
1274 1277
1275 while ((rxpktprocessed < rxpkttoprocess) && 1278 while ((rxpktprocessed < rxpkttoprocess) &&
1276 (rxpktprocessed < budget)) { 1279 (rxpktprocessed < budget)) {
1280 cb = &priv->rx_cbs[priv->rx_read_ptr];
1281 skb = cb->skb;
1282
1283 rxpktprocessed++;
1284
1285 priv->rx_read_ptr++;
1286 priv->rx_read_ptr &= (priv->num_rx_bds - 1);
1287
1288 /* We do not have a backing SKB, so we do not have a
1289 * corresponding DMA mapping for this incoming packet since
1290 * bcmgenet_rx_refill always either has both skb and mapping or
1291 * none.
1292 */
1293 if (unlikely(!skb)) {
1294 dev->stats.rx_dropped++;
1295 dev->stats.rx_errors++;
1296 goto refill;
1297 }
1298
1277 /* Unmap the packet contents such that we can use the 1299 /* Unmap the packet contents such that we can use the
1278 * RSV from the 64 bytes descriptor when enabled and save 1300 * RSV from the 64 bytes descriptor when enabled and save
1279 * a 32-bits register read 1301 * a 32-bits register read
1280 */ 1302 */
1281 cb = &priv->rx_cbs[priv->rx_read_ptr];
1282 skb = cb->skb;
1283 dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr), 1303 dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr),
1284 priv->rx_buf_len, DMA_FROM_DEVICE); 1304 priv->rx_buf_len, DMA_FROM_DEVICE);
1285 1305
@@ -1307,18 +1327,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
1307 __func__, p_index, priv->rx_c_index, 1327 __func__, p_index, priv->rx_c_index,
1308 priv->rx_read_ptr, dma_length_status); 1328 priv->rx_read_ptr, dma_length_status);
1309 1329
1310 rxpktprocessed++;
1311
1312 priv->rx_read_ptr++;
1313 priv->rx_read_ptr &= (priv->num_rx_bds - 1);
1314
1315 /* out of memory, just drop packets at the hardware level */
1316 if (unlikely(!skb)) {
1317 dev->stats.rx_dropped++;
1318 dev->stats.rx_errors++;
1319 goto refill;
1320 }
1321
1322 if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { 1330 if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
1323 netif_err(priv, rx_status, dev, 1331 netif_err(priv, rx_status, dev,
1324 "dropping fragmented packet!\n"); 1332 "dropping fragmented packet!\n");
@@ -1736,13 +1744,63 @@ static void bcmgenet_init_multiq(struct net_device *dev)
1736 bcmgenet_tdma_writel(priv, reg, DMA_CTRL); 1744 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
1737} 1745}
1738 1746
1747static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
1748{
1749 int ret = 0;
1750 int timeout = 0;
1751 u32 reg;
1752
1753 /* Disable TDMA to stop add more frames in TX DMA */
1754 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
1755 reg &= ~DMA_EN;
1756 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
1757
1758 /* Check TDMA status register to confirm TDMA is disabled */
1759 while (timeout++ < DMA_TIMEOUT_VAL) {
1760 reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
1761 if (reg & DMA_DISABLED)
1762 break;
1763
1764 udelay(1);
1765 }
1766
1767 if (timeout == DMA_TIMEOUT_VAL) {
1768 netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
1769 ret = -ETIMEDOUT;
1770 }
1771
1772 /* Wait 10ms for packet drain in both tx and rx dma */
1773 usleep_range(10000, 20000);
1774
1775 /* Disable RDMA */
1776 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
1777 reg &= ~DMA_EN;
1778 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
1779
1780 timeout = 0;
1781 /* Check RDMA status register to confirm RDMA is disabled */
1782 while (timeout++ < DMA_TIMEOUT_VAL) {
1783 reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
1784 if (reg & DMA_DISABLED)
1785 break;
1786
1787 udelay(1);
1788 }
1789
1790 if (timeout == DMA_TIMEOUT_VAL) {
1791 netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
1792 ret = -ETIMEDOUT;
1793 }
1794
1795 return ret;
1796}
1797
1739static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) 1798static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
1740{ 1799{
1741 int i; 1800 int i;
1742 1801
1743 /* disable DMA */ 1802 /* disable DMA */
1744 bcmgenet_rdma_writel(priv, 0, DMA_CTRL); 1803 bcmgenet_dma_teardown(priv);
1745 bcmgenet_tdma_writel(priv, 0, DMA_CTRL);
1746 1804
1747 for (i = 0; i < priv->num_tx_bds; i++) { 1805 for (i = 0; i < priv->num_tx_bds; i++) {
1748 if (priv->tx_cbs[i].skb != NULL) { 1806 if (priv->tx_cbs[i].skb != NULL) {
@@ -2101,57 +2159,6 @@ err_clk_disable:
2101 return ret; 2159 return ret;
2102} 2160}
2103 2161
2104static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
2105{
2106 int ret = 0;
2107 int timeout = 0;
2108 u32 reg;
2109
2110 /* Disable TDMA to stop add more frames in TX DMA */
2111 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2112 reg &= ~DMA_EN;
2113 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2114
2115 /* Check TDMA status register to confirm TDMA is disabled */
2116 while (timeout++ < DMA_TIMEOUT_VAL) {
2117 reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
2118 if (reg & DMA_DISABLED)
2119 break;
2120
2121 udelay(1);
2122 }
2123
2124 if (timeout == DMA_TIMEOUT_VAL) {
2125 netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
2126 ret = -ETIMEDOUT;
2127 }
2128
2129 /* Wait 10ms for packet drain in both tx and rx dma */
2130 usleep_range(10000, 20000);
2131
2132 /* Disable RDMA */
2133 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2134 reg &= ~DMA_EN;
2135 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2136
2137 timeout = 0;
2138 /* Check RDMA status register to confirm RDMA is disabled */
2139 while (timeout++ < DMA_TIMEOUT_VAL) {
2140 reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
2141 if (reg & DMA_DISABLED)
2142 break;
2143
2144 udelay(1);
2145 }
2146
2147 if (timeout == DMA_TIMEOUT_VAL) {
2148 netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
2149 ret = -ETIMEDOUT;
2150 }
2151
2152 return ret;
2153}
2154
2155static void bcmgenet_netif_stop(struct net_device *dev) 2162static void bcmgenet_netif_stop(struct net_device *dev)
2156{ 2163{
2157 struct bcmgenet_priv *priv = netdev_priv(dev); 2164 struct bcmgenet_priv *priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 3ac5d23454a8..ba499489969a 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -6918,7 +6918,8 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
6918 skb->protocol = eth_type_trans(skb, tp->dev); 6918 skb->protocol = eth_type_trans(skb, tp->dev);
6919 6919
6920 if (len > (tp->dev->mtu + ETH_HLEN) && 6920 if (len > (tp->dev->mtu + ETH_HLEN) &&
6921 skb->protocol != htons(ETH_P_8021Q)) { 6921 skb->protocol != htons(ETH_P_8021Q) &&
6922 skb->protocol != htons(ETH_P_8021AD)) {
6922 dev_kfree_skb_any(skb); 6923 dev_kfree_skb_any(skb);
6923 goto drop_it_no_recycle; 6924 goto drop_it_no_recycle;
6924 } 6925 }
@@ -7914,8 +7915,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7914 7915
7915 entry = tnapi->tx_prod; 7916 entry = tnapi->tx_prod;
7916 base_flags = 0; 7917 base_flags = 0;
7917 if (skb->ip_summed == CHECKSUM_PARTIAL)
7918 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7919 7918
7920 mss = skb_shinfo(skb)->gso_size; 7919 mss = skb_shinfo(skb)->gso_size;
7921 if (mss) { 7920 if (mss) {
@@ -7929,6 +7928,13 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7929 7928
7930 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN; 7929 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7931 7930
7931 /* HW/FW can not correctly segment packets that have been
7932 * vlan encapsulated.
7933 */
7934 if (skb->protocol == htons(ETH_P_8021Q) ||
7935 skb->protocol == htons(ETH_P_8021AD))
7936 return tg3_tso_bug(tp, tnapi, txq, skb);
7937
7932 if (!skb_is_gso_v6(skb)) { 7938 if (!skb_is_gso_v6(skb)) {
7933 if (unlikely((ETH_HLEN + hdr_len) > 80) && 7939 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7934 tg3_flag(tp, TSO_BUG)) 7940 tg3_flag(tp, TSO_BUG))
@@ -7979,6 +7985,17 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7979 base_flags |= tsflags << 12; 7985 base_flags |= tsflags << 12;
7980 } 7986 }
7981 } 7987 }
7988 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
7989 /* HW/FW can not correctly checksum packets that have been
7990 * vlan encapsulated.
7991 */
7992 if (skb->protocol == htons(ETH_P_8021Q) ||
7993 skb->protocol == htons(ETH_P_8021AD)) {
7994 if (skb_checksum_help(skb))
7995 goto drop;
7996 } else {
7997 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7998 }
7982 } 7999 }
7983 8000
7984 if (tg3_flag(tp, USE_JUMBO_BDFLAG) && 8001 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
@@ -11617,6 +11634,12 @@ static int tg3_open(struct net_device *dev)
11617 struct tg3 *tp = netdev_priv(dev); 11634 struct tg3 *tp = netdev_priv(dev);
11618 int err; 11635 int err;
11619 11636
11637 if (tp->pcierr_recovery) {
11638 netdev_err(dev, "Failed to open device. PCI error recovery "
11639 "in progress\n");
11640 return -EAGAIN;
11641 }
11642
11620 if (tp->fw_needed) { 11643 if (tp->fw_needed) {
11621 err = tg3_request_firmware(tp); 11644 err = tg3_request_firmware(tp);
11622 if (tg3_asic_rev(tp) == ASIC_REV_57766) { 11645 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
@@ -11674,6 +11697,12 @@ static int tg3_close(struct net_device *dev)
11674{ 11697{
11675 struct tg3 *tp = netdev_priv(dev); 11698 struct tg3 *tp = netdev_priv(dev);
11676 11699
11700 if (tp->pcierr_recovery) {
11701 netdev_err(dev, "Failed to close device. PCI error recovery "
11702 "in progress\n");
11703 return -EAGAIN;
11704 }
11705
11677 tg3_ptp_fini(tp); 11706 tg3_ptp_fini(tp);
11678 11707
11679 tg3_stop(tp); 11708 tg3_stop(tp);
@@ -17561,6 +17590,7 @@ static int tg3_init_one(struct pci_dev *pdev,
17561 tp->rx_mode = TG3_DEF_RX_MODE; 17590 tp->rx_mode = TG3_DEF_RX_MODE;
17562 tp->tx_mode = TG3_DEF_TX_MODE; 17591 tp->tx_mode = TG3_DEF_TX_MODE;
17563 tp->irq_sync = 1; 17592 tp->irq_sync = 1;
17593 tp->pcierr_recovery = false;
17564 17594
17565 if (tg3_debug > 0) 17595 if (tg3_debug > 0)
17566 tp->msg_enable = tg3_debug; 17596 tp->msg_enable = tg3_debug;
@@ -18071,6 +18101,8 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18071 18101
18072 rtnl_lock(); 18102 rtnl_lock();
18073 18103
18104 tp->pcierr_recovery = true;
18105
18074 /* We probably don't have netdev yet */ 18106 /* We probably don't have netdev yet */
18075 if (!netdev || !netif_running(netdev)) 18107 if (!netdev || !netif_running(netdev))
18076 goto done; 18108 goto done;
@@ -18195,6 +18227,7 @@ static void tg3_io_resume(struct pci_dev *pdev)
18195 tg3_phy_start(tp); 18227 tg3_phy_start(tp);
18196 18228
18197done: 18229done:
18230 tp->pcierr_recovery = false;
18198 rtnl_unlock(); 18231 rtnl_unlock();
18199} 18232}
18200 18233
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 461accaf0aa4..31c9f8295953 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -3407,6 +3407,7 @@ struct tg3 {
3407 3407
3408 struct device *hwmon_dev; 3408 struct device *hwmon_dev;
3409 bool link_up; 3409 bool link_up;
3410 bool pcierr_recovery;
3410}; 3411};
3411 3412
3412/* Accessor macros for chip and asic attributes 3413/* Accessor macros for chip and asic attributes
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index ff8cae5e2535..ffc92a41d75b 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -2506,7 +2506,7 @@ bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2506 * For TSO, the TCP checksum field is seeded with pseudo-header sum 2506 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2507 * excluding the length field. 2507 * excluding the length field.
2508 */ 2508 */
2509 if (skb->protocol == htons(ETH_P_IP)) { 2509 if (vlan_get_protocol(skb) == htons(ETH_P_IP)) {
2510 struct iphdr *iph = ip_hdr(skb); 2510 struct iphdr *iph = ip_hdr(skb);
2511 2511
2512 /* Do we really need these? */ 2512 /* Do we really need these? */
@@ -2870,12 +2870,13 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2870 } 2870 }
2871 2871
2872 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2872 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2873 __be16 net_proto = vlan_get_protocol(skb);
2873 u8 proto = 0; 2874 u8 proto = 0;
2874 2875
2875 if (skb->protocol == htons(ETH_P_IP)) 2876 if (net_proto == htons(ETH_P_IP))
2876 proto = ip_hdr(skb)->protocol; 2877 proto = ip_hdr(skb)->protocol;
2877#ifdef NETIF_F_IPV6_CSUM 2878#ifdef NETIF_F_IPV6_CSUM
2878 else if (skb->protocol == htons(ETH_P_IPV6)) { 2879 else if (net_proto == htons(ETH_P_IPV6)) {
2879 /* nexthdr may not be TCP immediately. */ 2880 /* nexthdr may not be TCP immediately. */
2880 proto = ipv6_hdr(skb)->nexthdr; 2881 proto = ipv6_hdr(skb)->nexthdr;
2881 } 2882 }
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index ca5d7798b265..e1e02fba4fcc 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -30,7 +30,6 @@
30#include <linux/of_device.h> 30#include <linux/of_device.h>
31#include <linux/of_mdio.h> 31#include <linux/of_mdio.h>
32#include <linux/of_net.h> 32#include <linux/of_net.h>
33#include <linux/pinctrl/consumer.h>
34 33
35#include "macb.h" 34#include "macb.h"
36 35
@@ -2071,7 +2070,6 @@ static int __init macb_probe(struct platform_device *pdev)
2071 struct phy_device *phydev; 2070 struct phy_device *phydev;
2072 u32 config; 2071 u32 config;
2073 int err = -ENXIO; 2072 int err = -ENXIO;
2074 struct pinctrl *pinctrl;
2075 const char *mac; 2073 const char *mac;
2076 2074
2077 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2075 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2080,15 +2078,6 @@ static int __init macb_probe(struct platform_device *pdev)
2080 goto err_out; 2078 goto err_out;
2081 } 2079 }
2082 2080
2083 pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
2084 if (IS_ERR(pinctrl)) {
2085 err = PTR_ERR(pinctrl);
2086 if (err == -EPROBE_DEFER)
2087 goto err_out;
2088
2089 dev_warn(&pdev->dev, "No pinctrl provided\n");
2090 }
2091
2092 err = -ENOMEM; 2081 err = -ENOMEM;
2093 dev = alloc_etherdev(sizeof(*bp)); 2082 dev = alloc_etherdev(sizeof(*bp));
2094 if (!dev) 2083 if (!dev)
diff --git a/drivers/net/ethernet/calxeda/Kconfig b/drivers/net/ethernet/calxeda/Kconfig
index 184a063bed5f..07d2201530d2 100644
--- a/drivers/net/ethernet/calxeda/Kconfig
+++ b/drivers/net/ethernet/calxeda/Kconfig
@@ -1,6 +1,7 @@
1config NET_CALXEDA_XGMAC 1config NET_CALXEDA_XGMAC
2 tristate "Calxeda 1G/10G XGMAC Ethernet driver" 2 tristate "Calxeda 1G/10G XGMAC Ethernet driver"
3 depends on HAS_IOMEM && HAS_DMA 3 depends on HAS_IOMEM && HAS_DMA
4 depends on ARCH_HIGHBANK || COMPILE_TEST
4 select CRC32 5 select CRC32
5 help 6 help
6 This is the driver for the XGMAC Ethernet IP block found on Calxeda 7 This is the driver for the XGMAC Ethernet IP block found on Calxeda
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 18fb9c61d7ba..e5be511a3c38 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -1253,7 +1253,9 @@ freeout: t4_free_sge_resources(adap);
1253 goto freeout; 1253 goto freeout;
1254 } 1254 }
1255 1255
1256 t4_write_reg(adap, MPS_TRC_RSS_CONTROL, 1256 t4_write_reg(adap, is_t4(adap->params.chip) ?
1257 MPS_TRC_RSS_CONTROL :
1258 MPS_T5_TRC_RSS_CONTROL,
1257 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) | 1259 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
1258 QUEUENUMBER(s->ethrxq[0].rspq.abs_id)); 1260 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
1259 return 0; 1261 return 0;
@@ -1761,7 +1763,8 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1761 0xd004, 0xd03c, 1763 0xd004, 0xd03c,
1762 0xdfc0, 0xdfe0, 1764 0xdfc0, 0xdfe0,
1763 0xe000, 0xea7c, 1765 0xe000, 0xea7c,
1764 0xf000, 0x11190, 1766 0xf000, 0x11110,
1767 0x11118, 0x11190,
1765 0x19040, 0x1906c, 1768 0x19040, 0x1906c,
1766 0x19078, 0x19080, 1769 0x19078, 0x19080,
1767 0x1908c, 0x19124, 1770 0x1908c, 0x19124,
@@ -1968,7 +1971,8 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1968 0xd004, 0xd03c, 1971 0xd004, 0xd03c,
1969 0xdfc0, 0xdfe0, 1972 0xdfc0, 0xdfe0,
1970 0xe000, 0x11088, 1973 0xe000, 0x11088,
1971 0x1109c, 0x1117c, 1974 0x1109c, 0x11110,
1975 0x11118, 0x1117c,
1972 0x11190, 0x11204, 1976 0x11190, 0x11204,
1973 0x19040, 0x1906c, 1977 0x19040, 0x1906c,
1974 0x19078, 0x19080, 1978 0x19078, 0x19080,
@@ -5955,7 +5959,8 @@ static int adap_init0(struct adapter *adap)
5955 params[3] = FW_PARAM_PFVF(CQ_END); 5959 params[3] = FW_PARAM_PFVF(CQ_END);
5956 params[4] = FW_PARAM_PFVF(OCQ_START); 5960 params[4] = FW_PARAM_PFVF(OCQ_START);
5957 params[5] = FW_PARAM_PFVF(OCQ_END); 5961 params[5] = FW_PARAM_PFVF(OCQ_END);
5958 ret = t4_query_params(adap, 0, 0, 0, 6, params, val); 5962 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params,
5963 val);
5959 if (ret < 0) 5964 if (ret < 0)
5960 goto bye; 5965 goto bye;
5961 adap->vres.qp.start = val[0]; 5966 adap->vres.qp.start = val[0];
@@ -5967,7 +5972,8 @@ static int adap_init0(struct adapter *adap)
5967 5972
5968 params[0] = FW_PARAM_DEV(MAXORDIRD_QP); 5973 params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
5969 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER); 5974 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
5970 ret = t4_query_params(adap, 0, 0, 0, 2, params, val); 5975 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params,
5976 val);
5971 if (ret < 0) { 5977 if (ret < 0) {
5972 adap->params.max_ordird_qp = 8; 5978 adap->params.max_ordird_qp = 8;
5973 adap->params.max_ird_adapter = 32 * adap->tids.ntids; 5979 adap->params.max_ird_adapter = 32 * adap->tids.ntids;
@@ -6472,6 +6478,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6472 struct port_info *pi; 6478 struct port_info *pi;
6473 bool highdma = false; 6479 bool highdma = false;
6474 struct adapter *adapter = NULL; 6480 struct adapter *adapter = NULL;
6481 void __iomem *regs;
6475 6482
6476 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); 6483 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
6477 6484
@@ -6488,19 +6495,35 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6488 goto out_release_regions; 6495 goto out_release_regions;
6489 } 6496 }
6490 6497
6498 regs = pci_ioremap_bar(pdev, 0);
6499 if (!regs) {
6500 dev_err(&pdev->dev, "cannot map device registers\n");
6501 err = -ENOMEM;
6502 goto out_disable_device;
6503 }
6504
6505 /* We control everything through one PF */
6506 func = SOURCEPF_GET(readl(regs + PL_WHOAMI));
6507 if (func != ent->driver_data) {
6508 iounmap(regs);
6509 pci_disable_device(pdev);
6510 pci_save_state(pdev); /* to restore SR-IOV later */
6511 goto sriov;
6512 }
6513
6491 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 6514 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
6492 highdma = true; 6515 highdma = true;
6493 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 6516 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
6494 if (err) { 6517 if (err) {
6495 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " 6518 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
6496 "coherent allocations\n"); 6519 "coherent allocations\n");
6497 goto out_disable_device; 6520 goto out_unmap_bar0;
6498 } 6521 }
6499 } else { 6522 } else {
6500 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 6523 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6501 if (err) { 6524 if (err) {
6502 dev_err(&pdev->dev, "no usable DMA configuration\n"); 6525 dev_err(&pdev->dev, "no usable DMA configuration\n");
6503 goto out_disable_device; 6526 goto out_unmap_bar0;
6504 } 6527 }
6505 } 6528 }
6506 6529
@@ -6512,7 +6535,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6512 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); 6535 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
6513 if (!adapter) { 6536 if (!adapter) {
6514 err = -ENOMEM; 6537 err = -ENOMEM;
6515 goto out_disable_device; 6538 goto out_unmap_bar0;
6516 } 6539 }
6517 6540
6518 adapter->workq = create_singlethread_workqueue("cxgb4"); 6541 adapter->workq = create_singlethread_workqueue("cxgb4");
@@ -6524,20 +6547,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6524 /* PCI device has been enabled */ 6547 /* PCI device has been enabled */
6525 adapter->flags |= DEV_ENABLED; 6548 adapter->flags |= DEV_ENABLED;
6526 6549
6527 adapter->regs = pci_ioremap_bar(pdev, 0); 6550 adapter->regs = regs;
6528 if (!adapter->regs) {
6529 dev_err(&pdev->dev, "cannot map device registers\n");
6530 err = -ENOMEM;
6531 goto out_free_adapter;
6532 }
6533
6534 /* We control everything through one PF */
6535 func = SOURCEPF_GET(readl(adapter->regs + PL_WHOAMI));
6536 if (func != ent->driver_data) {
6537 pci_save_state(pdev); /* to restore SR-IOV later */
6538 goto sriov;
6539 }
6540
6541 adapter->pdev = pdev; 6551 adapter->pdev = pdev;
6542 adapter->pdev_dev = &pdev->dev; 6552 adapter->pdev_dev = &pdev->dev;
6543 adapter->mbox = func; 6553 adapter->mbox = func;
@@ -6554,7 +6564,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6554 6564
6555 err = t4_prep_adapter(adapter); 6565 err = t4_prep_adapter(adapter);
6556 if (err) 6566 if (err)
6557 goto out_unmap_bar0; 6567 goto out_free_adapter;
6568
6558 6569
6559 if (!is_t4(adapter->params.chip)) { 6570 if (!is_t4(adapter->params.chip)) {
6560 s_qpp = QUEUESPERPAGEPF1 * adapter->fn; 6571 s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
@@ -6571,14 +6582,14 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6571 dev_err(&pdev->dev, 6582 dev_err(&pdev->dev,
6572 "Incorrect number of egress queues per page\n"); 6583 "Incorrect number of egress queues per page\n");
6573 err = -EINVAL; 6584 err = -EINVAL;
6574 goto out_unmap_bar0; 6585 goto out_free_adapter;
6575 } 6586 }
6576 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2), 6587 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
6577 pci_resource_len(pdev, 2)); 6588 pci_resource_len(pdev, 2));
6578 if (!adapter->bar2) { 6589 if (!adapter->bar2) {
6579 dev_err(&pdev->dev, "cannot map device bar2 region\n"); 6590 dev_err(&pdev->dev, "cannot map device bar2 region\n");
6580 err = -ENOMEM; 6591 err = -ENOMEM;
6581 goto out_unmap_bar0; 6592 goto out_free_adapter;
6582 } 6593 }
6583 } 6594 }
6584 6595
@@ -6716,13 +6727,13 @@ sriov:
6716 out_unmap_bar: 6727 out_unmap_bar:
6717 if (!is_t4(adapter->params.chip)) 6728 if (!is_t4(adapter->params.chip))
6718 iounmap(adapter->bar2); 6729 iounmap(adapter->bar2);
6719 out_unmap_bar0:
6720 iounmap(adapter->regs);
6721 out_free_adapter: 6730 out_free_adapter:
6722 if (adapter->workq) 6731 if (adapter->workq)
6723 destroy_workqueue(adapter->workq); 6732 destroy_workqueue(adapter->workq);
6724 6733
6725 kfree(adapter); 6734 kfree(adapter);
6735 out_unmap_bar0:
6736 iounmap(regs);
6726 out_disable_device: 6737 out_disable_device:
6727 pci_disable_pcie_error_reporting(pdev); 6738 pci_disable_pcie_error_reporting(pdev);
6728 pci_disable_device(pdev); 6739 pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index a853133d8db8..41d04462b72e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -168,6 +168,34 @@ void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
168} 168}
169 169
170/* 170/*
171 * t4_report_fw_error - report firmware error
172 * @adap: the adapter
173 *
174 * The adapter firmware can indicate error conditions to the host.
175 * If the firmware has indicated an error, print out the reason for
176 * the firmware error.
177 */
178static void t4_report_fw_error(struct adapter *adap)
179{
180 static const char *const reason[] = {
181 "Crash", /* PCIE_FW_EVAL_CRASH */
182 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
183 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
184 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
185 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
186 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
187 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
188 "Reserved", /* reserved */
189 };
190 u32 pcie_fw;
191
192 pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
193 if (pcie_fw & FW_PCIE_FW_ERR)
194 dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
195 reason[FW_PCIE_FW_EVAL_GET(pcie_fw)]);
196}
197
198/*
171 * Get the reply to a mailbox command and store it in @rpl in big-endian order. 199 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
172 */ 200 */
173static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, 201static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
@@ -300,6 +328,7 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
300 dump_mbox(adap, mbox, data_reg); 328 dump_mbox(adap, mbox, data_reg);
301 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", 329 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
302 *(const u8 *)cmd, mbox); 330 *(const u8 *)cmd, mbox);
331 t4_report_fw_error(adap);
303 return -ETIMEDOUT; 332 return -ETIMEDOUT;
304} 333}
305 334
@@ -566,6 +595,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
566#define VPD_BASE 0x400 595#define VPD_BASE 0x400
567#define VPD_BASE_OLD 0 596#define VPD_BASE_OLD 0
568#define VPD_LEN 1024 597#define VPD_LEN 1024
598#define CHELSIO_VPD_UNIQUE_ID 0x82
569 599
570/** 600/**
571 * t4_seeprom_wp - enable/disable EEPROM write protection 601 * t4_seeprom_wp - enable/disable EEPROM write protection
@@ -603,7 +633,14 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
603 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd); 633 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
604 if (ret < 0) 634 if (ret < 0)
605 goto out; 635 goto out;
606 addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD; 636
637 /* The VPD shall have a unique identifier specified by the PCI SIG.
638 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
639 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
640 * is expected to automatically put this entry at the
641 * beginning of the VPD.
642 */
643 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
607 644
608 ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd); 645 ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
609 if (ret < 0) 646 if (ret < 0)
@@ -667,6 +704,7 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
667 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE); 704 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
668 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); 705 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
669 strim(p->sn); 706 strim(p->sn);
707 i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
670 memcpy(p->pn, vpd + pn, min(i, PN_LEN)); 708 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
671 strim(p->pn); 709 strim(p->pn);
672 710
@@ -1394,15 +1432,18 @@ static void pcie_intr_handler(struct adapter *adapter)
1394 1432
1395 int fat; 1433 int fat;
1396 1434
1397 fat = t4_handle_intr_status(adapter, 1435 if (is_t4(adapter->params.chip))
1398 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, 1436 fat = t4_handle_intr_status(adapter,
1399 sysbus_intr_info) + 1437 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1400 t4_handle_intr_status(adapter, 1438 sysbus_intr_info) +
1401 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, 1439 t4_handle_intr_status(adapter,
1402 pcie_port_intr_info) + 1440 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1403 t4_handle_intr_status(adapter, PCIE_INT_CAUSE, 1441 pcie_port_intr_info) +
1404 is_t4(adapter->params.chip) ? 1442 t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
1405 pcie_intr_info : t5_pcie_intr_info); 1443 pcie_intr_info);
1444 else
1445 fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
1446 t5_pcie_intr_info);
1406 1447
1407 if (fat) 1448 if (fat)
1408 t4_fatal_err(adapter); 1449 t4_fatal_err(adapter);
@@ -1521,6 +1562,9 @@ static void cim_intr_handler(struct adapter *adapter)
1521 1562
1522 int fat; 1563 int fat;
1523 1564
1565 if (t4_read_reg(adapter, MA_PCIE_FW) & FW_PCIE_FW_ERR)
1566 t4_report_fw_error(adapter);
1567
1524 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE, 1568 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
1525 cim_intr_info) + 1569 cim_intr_info) +
1526 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE, 1570 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
@@ -1768,10 +1812,16 @@ static void ma_intr_handler(struct adapter *adap)
1768{ 1812{
1769 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE); 1813 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
1770 1814
1771 if (status & MEM_PERR_INT_CAUSE) 1815 if (status & MEM_PERR_INT_CAUSE) {
1772 dev_alert(adap->pdev_dev, 1816 dev_alert(adap->pdev_dev,
1773 "MA parity error, parity status %#x\n", 1817 "MA parity error, parity status %#x\n",
1774 t4_read_reg(adap, MA_PARITY_ERROR_STATUS)); 1818 t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
1819 if (is_t5(adap->params.chip))
1820 dev_alert(adap->pdev_dev,
1821 "MA parity error, parity status %#x\n",
1822 t4_read_reg(adap,
1823 MA_PARITY_ERROR_STATUS2));
1824 }
1775 if (status & MEM_WRAP_INT_CAUSE) { 1825 if (status & MEM_WRAP_INT_CAUSE) {
1776 v = t4_read_reg(adap, MA_INT_WRAP_STATUS); 1826 v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
1777 dev_alert(adap->pdev_dev, "MA address wrap-around error by " 1827 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
@@ -2733,12 +2783,16 @@ retry:
2733 /* 2783 /*
2734 * Issue the HELLO command to the firmware. If it's not successful 2784 * Issue the HELLO command to the firmware. If it's not successful
2735 * but indicates that we got a "busy" or "timeout" condition, retry 2785 * but indicates that we got a "busy" or "timeout" condition, retry
2736 * the HELLO until we exhaust our retry limit. 2786 * the HELLO until we exhaust our retry limit. If we do exceed our
2787 * retry limit, check to see if the firmware left us any error
2788 * information and report that if so.
2737 */ 2789 */
2738 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 2790 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2739 if (ret < 0) { 2791 if (ret < 0) {
2740 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0) 2792 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
2741 goto retry; 2793 goto retry;
2794 if (t4_read_reg(adap, MA_PCIE_FW) & FW_PCIE_FW_ERR)
2795 t4_report_fw_error(adap);
2742 return ret; 2796 return ret;
2743 } 2797 }
2744 2798
@@ -3742,6 +3796,7 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
3742 lc->link_ok = link_ok; 3796 lc->link_ok = link_ok;
3743 lc->speed = speed; 3797 lc->speed = speed;
3744 lc->fc = fc; 3798 lc->fc = fc;
3799 lc->supported = be16_to_cpu(p->u.info.pcap);
3745 t4_os_link_changed(adap, port, link_ok); 3800 t4_os_link_changed(adap, port, link_ok);
3746 } 3801 }
3747 if (mod != pi->mod_type) { 3802 if (mod != pi->mod_type) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index e3146e83df20..39fb325474f7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -511,6 +511,7 @@
511#define MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT) 511#define MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT)
512#define MA_PCIE_FW 0x30b8 512#define MA_PCIE_FW 0x30b8
513#define MA_PARITY_ERROR_STATUS 0x77f4 513#define MA_PARITY_ERROR_STATUS 0x77f4
514#define MA_PARITY_ERROR_STATUS2 0x7804
514 515
515#define MA_EXT_MEMORY1_BAR 0x7808 516#define MA_EXT_MEMORY1_BAR 0x7808
516#define EDC_0_BASE_ADDR 0x7900 517#define EDC_0_BASE_ADDR 0x7900
@@ -959,6 +960,7 @@
959#define TRCMULTIFILTER 0x00000001U 960#define TRCMULTIFILTER 0x00000001U
960 961
961#define MPS_TRC_RSS_CONTROL 0x9808 962#define MPS_TRC_RSS_CONTROL 0x9808
963#define MPS_T5_TRC_RSS_CONTROL 0xa00c
962#define RSSCONTROL_MASK 0x00ff0000U 964#define RSSCONTROL_MASK 0x00ff0000U
963#define RSSCONTROL_SHIFT 16 965#define RSSCONTROL_SHIFT 16
964#define RSSCONTROL(x) ((x) << RSSCONTROL_SHIFT) 966#define RSSCONTROL(x) ((x) << RSSCONTROL_SHIFT)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 5f2729ebadbe..3409756a85b9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -2228,6 +2228,10 @@ struct fw_debug_cmd {
2228#define FW_PCIE_FW_MASTER(x) ((x) << FW_PCIE_FW_MASTER_SHIFT) 2228#define FW_PCIE_FW_MASTER(x) ((x) << FW_PCIE_FW_MASTER_SHIFT)
2229#define FW_PCIE_FW_MASTER_GET(x) (((x) >> FW_PCIE_FW_MASTER_SHIFT) & \ 2229#define FW_PCIE_FW_MASTER_GET(x) (((x) >> FW_PCIE_FW_MASTER_SHIFT) & \
2230 FW_PCIE_FW_MASTER_MASK) 2230 FW_PCIE_FW_MASTER_MASK)
2231#define FW_PCIE_FW_EVAL_MASK 0x7
2232#define FW_PCIE_FW_EVAL_SHIFT 24
2233#define FW_PCIE_FW_EVAL_GET(x) (((x) >> FW_PCIE_FW_EVAL_SHIFT) & \
2234 FW_PCIE_FW_EVAL_MASK)
2231 2235
2232struct fw_hdr { 2236struct fw_hdr {
2233 u8 ver; 2237 u8 ver;
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 9b33057a9477..70089c29d307 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1399,7 +1399,7 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
1399 const void *mac_addr; 1399 const void *mac_addr;
1400 1400
1401 if (!IS_ENABLED(CONFIG_OF) || !np) 1401 if (!IS_ENABLED(CONFIG_OF) || !np)
1402 return NULL; 1402 return ERR_PTR(-ENXIO);
1403 1403
1404 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); 1404 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
1405 if (!pdata) 1405 if (!pdata)
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index a0b418e007a0..566b17db135a 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -1994,7 +1994,7 @@ static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe)
1994{ 1994{
1995 swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC; 1995 swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC;
1996 1996
1997 if (skb->protocol != htons(ETH_P_IP)) 1997 if (vlan_get_protocol(skb) != htons(ETH_P_IP))
1998 return; 1998 return;
1999 1999
2000 if (skb->ip_summed == CHECKSUM_PARTIAL) 2000 if (skb->ip_summed == CHECKSUM_PARTIAL)
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index cbc330b301cd..ad3d5d12173f 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -2674,7 +2674,8 @@ set_itr_now:
2674#define E1000_TX_FLAGS_VLAN_SHIFT 16 2674#define E1000_TX_FLAGS_VLAN_SHIFT 16
2675 2675
2676static int e1000_tso(struct e1000_adapter *adapter, 2676static int e1000_tso(struct e1000_adapter *adapter,
2677 struct e1000_tx_ring *tx_ring, struct sk_buff *skb) 2677 struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2678 __be16 protocol)
2678{ 2679{
2679 struct e1000_context_desc *context_desc; 2680 struct e1000_context_desc *context_desc;
2680 struct e1000_buffer *buffer_info; 2681 struct e1000_buffer *buffer_info;
@@ -2692,7 +2693,7 @@ static int e1000_tso(struct e1000_adapter *adapter,
2692 2693
2693 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 2694 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2694 mss = skb_shinfo(skb)->gso_size; 2695 mss = skb_shinfo(skb)->gso_size;
2695 if (skb->protocol == htons(ETH_P_IP)) { 2696 if (protocol == htons(ETH_P_IP)) {
2696 struct iphdr *iph = ip_hdr(skb); 2697 struct iphdr *iph = ip_hdr(skb);
2697 iph->tot_len = 0; 2698 iph->tot_len = 0;
2698 iph->check = 0; 2699 iph->check = 0;
@@ -2702,7 +2703,7 @@ static int e1000_tso(struct e1000_adapter *adapter,
2702 0); 2703 0);
2703 cmd_length = E1000_TXD_CMD_IP; 2704 cmd_length = E1000_TXD_CMD_IP;
2704 ipcse = skb_transport_offset(skb) - 1; 2705 ipcse = skb_transport_offset(skb) - 1;
2705 } else if (skb->protocol == htons(ETH_P_IPV6)) { 2706 } else if (skb_is_gso_v6(skb)) {
2706 ipv6_hdr(skb)->payload_len = 0; 2707 ipv6_hdr(skb)->payload_len = 0;
2707 tcp_hdr(skb)->check = 2708 tcp_hdr(skb)->check =
2708 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 2709 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
@@ -2745,7 +2746,8 @@ static int e1000_tso(struct e1000_adapter *adapter,
2745} 2746}
2746 2747
2747static bool e1000_tx_csum(struct e1000_adapter *adapter, 2748static bool e1000_tx_csum(struct e1000_adapter *adapter,
2748 struct e1000_tx_ring *tx_ring, struct sk_buff *skb) 2749 struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2750 __be16 protocol)
2749{ 2751{
2750 struct e1000_context_desc *context_desc; 2752 struct e1000_context_desc *context_desc;
2751 struct e1000_buffer *buffer_info; 2753 struct e1000_buffer *buffer_info;
@@ -2756,7 +2758,7 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter,
2756 if (skb->ip_summed != CHECKSUM_PARTIAL) 2758 if (skb->ip_summed != CHECKSUM_PARTIAL)
2757 return false; 2759 return false;
2758 2760
2759 switch (skb->protocol) { 2761 switch (protocol) {
2760 case cpu_to_be16(ETH_P_IP): 2762 case cpu_to_be16(ETH_P_IP):
2761 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 2763 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2762 cmd_len |= E1000_TXD_CMD_TCP; 2764 cmd_len |= E1000_TXD_CMD_TCP;
@@ -3097,6 +3099,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3097 int count = 0; 3099 int count = 0;
3098 int tso; 3100 int tso;
3099 unsigned int f; 3101 unsigned int f;
3102 __be16 protocol = vlan_get_protocol(skb);
3100 3103
3101 /* This goes back to the question of how to logically map a Tx queue 3104 /* This goes back to the question of how to logically map a Tx queue
3102 * to a flow. Right now, performance is impacted slightly negatively 3105 * to a flow. Right now, performance is impacted slightly negatively
@@ -3210,7 +3213,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3210 3213
3211 first = tx_ring->next_to_use; 3214 first = tx_ring->next_to_use;
3212 3215
3213 tso = e1000_tso(adapter, tx_ring, skb); 3216 tso = e1000_tso(adapter, tx_ring, skb, protocol);
3214 if (tso < 0) { 3217 if (tso < 0) {
3215 dev_kfree_skb_any(skb); 3218 dev_kfree_skb_any(skb);
3216 return NETDEV_TX_OK; 3219 return NETDEV_TX_OK;
@@ -3220,10 +3223,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3220 if (likely(hw->mac_type != e1000_82544)) 3223 if (likely(hw->mac_type != e1000_82544))
3221 tx_ring->last_tx_tso = true; 3224 tx_ring->last_tx_tso = true;
3222 tx_flags |= E1000_TX_FLAGS_TSO; 3225 tx_flags |= E1000_TX_FLAGS_TSO;
3223 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb))) 3226 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol)))
3224 tx_flags |= E1000_TX_FLAGS_CSUM; 3227 tx_flags |= E1000_TX_FLAGS_CSUM;
3225 3228
3226 if (likely(skb->protocol == htons(ETH_P_IP))) 3229 if (protocol == htons(ETH_P_IP))
3227 tx_flags |= E1000_TX_FLAGS_IPV4; 3230 tx_flags |= E1000_TX_FLAGS_IPV4;
3228 3231
3229 if (unlikely(skb->no_fcs)) 3232 if (unlikely(skb->no_fcs))
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 65c3aef2bd36..247335d2c7ec 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -5164,7 +5164,8 @@ link_up:
5164#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 5164#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
5165#define E1000_TX_FLAGS_VLAN_SHIFT 16 5165#define E1000_TX_FLAGS_VLAN_SHIFT 16
5166 5166
5167static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb) 5167static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb,
5168 __be16 protocol)
5168{ 5169{
5169 struct e1000_context_desc *context_desc; 5170 struct e1000_context_desc *context_desc;
5170 struct e1000_buffer *buffer_info; 5171 struct e1000_buffer *buffer_info;
@@ -5183,7 +5184,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
5183 5184
5184 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 5185 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
5185 mss = skb_shinfo(skb)->gso_size; 5186 mss = skb_shinfo(skb)->gso_size;
5186 if (skb->protocol == htons(ETH_P_IP)) { 5187 if (protocol == htons(ETH_P_IP)) {
5187 struct iphdr *iph = ip_hdr(skb); 5188 struct iphdr *iph = ip_hdr(skb);
5188 iph->tot_len = 0; 5189 iph->tot_len = 0;
5189 iph->check = 0; 5190 iph->check = 0;
@@ -5231,7 +5232,8 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
5231 return 1; 5232 return 1;
5232} 5233}
5233 5234
5234static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb) 5235static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb,
5236 __be16 protocol)
5235{ 5237{
5236 struct e1000_adapter *adapter = tx_ring->adapter; 5238 struct e1000_adapter *adapter = tx_ring->adapter;
5237 struct e1000_context_desc *context_desc; 5239 struct e1000_context_desc *context_desc;
@@ -5239,16 +5241,10 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
5239 unsigned int i; 5241 unsigned int i;
5240 u8 css; 5242 u8 css;
5241 u32 cmd_len = E1000_TXD_CMD_DEXT; 5243 u32 cmd_len = E1000_TXD_CMD_DEXT;
5242 __be16 protocol;
5243 5244
5244 if (skb->ip_summed != CHECKSUM_PARTIAL) 5245 if (skb->ip_summed != CHECKSUM_PARTIAL)
5245 return false; 5246 return false;
5246 5247
5247 if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
5248 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
5249 else
5250 protocol = skb->protocol;
5251
5252 switch (protocol) { 5248 switch (protocol) {
5253 case cpu_to_be16(ETH_P_IP): 5249 case cpu_to_be16(ETH_P_IP):
5254 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 5250 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
@@ -5546,6 +5542,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5546 int count = 0; 5542 int count = 0;
5547 int tso; 5543 int tso;
5548 unsigned int f; 5544 unsigned int f;
5545 __be16 protocol = vlan_get_protocol(skb);
5549 5546
5550 if (test_bit(__E1000_DOWN, &adapter->state)) { 5547 if (test_bit(__E1000_DOWN, &adapter->state)) {
5551 dev_kfree_skb_any(skb); 5548 dev_kfree_skb_any(skb);
@@ -5620,7 +5617,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5620 5617
5621 first = tx_ring->next_to_use; 5618 first = tx_ring->next_to_use;
5622 5619
5623 tso = e1000_tso(tx_ring, skb); 5620 tso = e1000_tso(tx_ring, skb, protocol);
5624 if (tso < 0) { 5621 if (tso < 0) {
5625 dev_kfree_skb_any(skb); 5622 dev_kfree_skb_any(skb);
5626 return NETDEV_TX_OK; 5623 return NETDEV_TX_OK;
@@ -5628,14 +5625,14 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5628 5625
5629 if (tso) 5626 if (tso)
5630 tx_flags |= E1000_TX_FLAGS_TSO; 5627 tx_flags |= E1000_TX_FLAGS_TSO;
5631 else if (e1000_tx_csum(tx_ring, skb)) 5628 else if (e1000_tx_csum(tx_ring, skb, protocol))
5632 tx_flags |= E1000_TX_FLAGS_CSUM; 5629 tx_flags |= E1000_TX_FLAGS_CSUM;
5633 5630
5634 /* Old method was to assume IPv4 packet by default if TSO was enabled. 5631 /* Old method was to assume IPv4 packet by default if TSO was enabled.
5635 * 82571 hardware supports TSO capabilities for IPv6 as well... 5632 * 82571 hardware supports TSO capabilities for IPv6 as well...
5636 * no longer assume, we must. 5633 * no longer assume, we must.
5637 */ 5634 */
5638 if (skb->protocol == htons(ETH_P_IP)) 5635 if (protocol == htons(ETH_P_IP))
5639 tx_flags |= E1000_TX_FLAGS_IPV4; 5636 tx_flags |= E1000_TX_FLAGS_IPV4;
5640 5637
5641 if (unlikely(skb->no_fcs)) 5638 if (unlikely(skb->no_fcs))
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index a51aa37b7b5a..369848e107f8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2295,7 +2295,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2295 goto out_drop; 2295 goto out_drop;
2296 2296
2297 /* obtain protocol of skb */ 2297 /* obtain protocol of skb */
2298 protocol = skb->protocol; 2298 protocol = vlan_get_protocol(skb);
2299 2299
2300 /* record the location of the first descriptor for this packet */ 2300 /* record the location of the first descriptor for this packet */
2301 first = &tx_ring->tx_bi[tx_ring->next_to_use]; 2301 first = &tx_ring->tx_bi[tx_ring->next_to_use];
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 79bf96ca6489..95a3ec236b49 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -1597,7 +1597,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
1597 goto out_drop; 1597 goto out_drop;
1598 1598
1599 /* obtain protocol of skb */ 1599 /* obtain protocol of skb */
1600 protocol = skb->protocol; 1600 protocol = vlan_get_protocol(skb);
1601 1601
1602 /* record the location of the first descriptor for this packet */ 1602 /* record the location of the first descriptor for this packet */
1603 first = &tx_ring->tx_bi[tx_ring->next_to_use]; 1603 first = &tx_ring->tx_bi[tx_ring->next_to_use];
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index c9f1d1b7ef37..ade067de1689 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -20,6 +20,7 @@
20#include <linux/mbus.h> 20#include <linux/mbus.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/if_vlan.h>
23#include <net/ip.h> 24#include <net/ip.h>
24#include <net/ipv6.h> 25#include <net/ipv6.h>
25#include <linux/io.h> 26#include <linux/io.h>
@@ -1371,15 +1372,16 @@ static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
1371{ 1372{
1372 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1373 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1373 int ip_hdr_len = 0; 1374 int ip_hdr_len = 0;
1375 __be16 l3_proto = vlan_get_protocol(skb);
1374 u8 l4_proto; 1376 u8 l4_proto;
1375 1377
1376 if (skb->protocol == htons(ETH_P_IP)) { 1378 if (l3_proto == htons(ETH_P_IP)) {
1377 struct iphdr *ip4h = ip_hdr(skb); 1379 struct iphdr *ip4h = ip_hdr(skb);
1378 1380
1379 /* Calculate IPv4 checksum and L4 checksum */ 1381 /* Calculate IPv4 checksum and L4 checksum */
1380 ip_hdr_len = ip4h->ihl; 1382 ip_hdr_len = ip4h->ihl;
1381 l4_proto = ip4h->protocol; 1383 l4_proto = ip4h->protocol;
1382 } else if (skb->protocol == htons(ETH_P_IPV6)) { 1384 } else if (l3_proto == htons(ETH_P_IPV6)) {
1383 struct ipv6hdr *ip6h = ipv6_hdr(skb); 1385 struct ipv6hdr *ip6h = ipv6_hdr(skb);
1384 1386
1385 /* Read l4_protocol from one of IPv6 extra headers */ 1387 /* Read l4_protocol from one of IPv6 extra headers */
@@ -1390,7 +1392,7 @@ static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
1390 return MVNETA_TX_L4_CSUM_NOT; 1392 return MVNETA_TX_L4_CSUM_NOT;
1391 1393
1392 return mvneta_txq_desc_csum(skb_network_offset(skb), 1394 return mvneta_txq_desc_csum(skb_network_offset(skb),
1393 skb->protocol, ip_hdr_len, l4_proto); 1395 l3_proto, ip_hdr_len, l4_proto);
1394 } 1396 }
1395 1397
1396 return MVNETA_TX_L4_CSUM_NOT; 1398 return MVNETA_TX_L4_CSUM_NOT;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 65a4a0f88ea0..923c4878461e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1695,7 +1695,7 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
1695 if (err) { 1695 if (err) {
1696 vp_oper->vlan_idx = NO_INDX; 1696 vp_oper->vlan_idx = NO_INDX;
1697 mlx4_warn(&priv->dev, 1697 mlx4_warn(&priv->dev,
1698 "No vlan resorces slave %d, port %d\n", 1698 "No vlan resources slave %d, port %d\n",
1699 slave, port); 1699 slave, port);
1700 return err; 1700 return err;
1701 } 1701 }
@@ -1711,7 +1711,7 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
1711 err = vp_oper->mac_idx; 1711 err = vp_oper->mac_idx;
1712 vp_oper->mac_idx = NO_INDX; 1712 vp_oper->mac_idx = NO_INDX;
1713 mlx4_warn(&priv->dev, 1713 mlx4_warn(&priv->dev,
1714 "No mac resorces slave %d, port %d\n", 1714 "No mac resources slave %d, port %d\n",
1715 slave, port); 1715 slave, port);
1716 return err; 1716 return err;
1717 } 1717 }
@@ -2389,6 +2389,22 @@ struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
2389} 2389}
2390EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv); 2390EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv);
2391 2391
2392static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port)
2393{
2394 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2395 int min_port = find_first_bit(actv_ports.ports, dev->caps.num_ports)
2396 + 1;
2397 int max_port = min_port +
2398 bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2399
2400 if (port < min_port)
2401 port = min_port;
2402 else if (port >= max_port)
2403 port = max_port - 1;
2404
2405 return port;
2406}
2407
2392int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) 2408int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
2393{ 2409{
2394 struct mlx4_priv *priv = mlx4_priv(dev); 2410 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -2402,6 +2418,7 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
2402 if (slave < 0) 2418 if (slave < 0)
2403 return -EINVAL; 2419 return -EINVAL;
2404 2420
2421 port = mlx4_slaves_closest_port(dev, slave, port);
2405 s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; 2422 s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2406 s_info->mac = mac; 2423 s_info->mac = mac;
2407 mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n", 2424 mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n",
@@ -2428,6 +2445,7 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
2428 if (slave < 0) 2445 if (slave < 0)
2429 return -EINVAL; 2446 return -EINVAL;
2430 2447
2448 port = mlx4_slaves_closest_port(dev, slave, port);
2431 vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; 2449 vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
2432 2450
2433 if ((0 == vlan) && (0 == qos)) 2451 if ((0 == vlan) && (0 == qos))
@@ -2455,6 +2473,7 @@ bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave,
2455 struct mlx4_priv *priv; 2473 struct mlx4_priv *priv;
2456 2474
2457 priv = mlx4_priv(dev); 2475 priv = mlx4_priv(dev);
2476 port = mlx4_slaves_closest_port(dev, slave, port);
2458 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; 2477 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
2459 2478
2460 if (MLX4_VGT != vp_oper->state.default_vlan) { 2479 if (MLX4_VGT != vp_oper->state.default_vlan) {
@@ -2482,6 +2501,7 @@ int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
2482 if (slave < 0) 2501 if (slave < 0)
2483 return -EINVAL; 2502 return -EINVAL;
2484 2503
2504 port = mlx4_slaves_closest_port(dev, slave, port);
2485 s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; 2505 s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2486 s_info->spoofchk = setting; 2506 s_info->spoofchk = setting;
2487 2507
@@ -2535,6 +2555,7 @@ int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_stat
2535 if (slave < 0) 2555 if (slave < 0)
2536 return -EINVAL; 2556 return -EINVAL;
2537 2557
2558 port = mlx4_slaves_closest_port(dev, slave, port);
2538 switch (link_state) { 2559 switch (link_state) {
2539 case IFLA_VF_LINK_STATE_AUTO: 2560 case IFLA_VF_LINK_STATE_AUTO:
2540 /* get current link state */ 2561 /* get current link state */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index e22f24f784fc..35ff2925110a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -487,6 +487,9 @@ static int mlx4_en_set_pauseparam(struct net_device *dev,
487 struct mlx4_en_dev *mdev = priv->mdev; 487 struct mlx4_en_dev *mdev = priv->mdev;
488 int err; 488 int err;
489 489
490 if (pause->autoneg)
491 return -EINVAL;
492
490 priv->prof->tx_pause = pause->tx_pause != 0; 493 priv->prof->tx_pause = pause->tx_pause != 0;
491 priv->prof->rx_pause = pause->rx_pause != 0; 494 priv->prof->rx_pause = pause->rx_pause != 0;
492 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 495 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index bb536aa613f4..abddcf8c40aa 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -474,39 +474,12 @@ static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *ad
474 int qpn, u64 *reg_id) 474 int qpn, u64 *reg_id)
475{ 475{
476 int err; 476 int err;
477 struct mlx4_spec_list spec_eth_outer = { {NULL} };
478 struct mlx4_spec_list spec_vxlan = { {NULL} };
479 struct mlx4_spec_list spec_eth_inner = { {NULL} };
480
481 struct mlx4_net_trans_rule rule = {
482 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
483 .exclusive = 0,
484 .allow_loopback = 1,
485 .promisc_mode = MLX4_FS_REGULAR,
486 .priority = MLX4_DOMAIN_NIC,
487 };
488
489 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
490 477
491 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) 478 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
492 return 0; /* do nothing */ 479 return 0; /* do nothing */
493 480
494 rule.port = priv->port; 481 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
495 rule.qpn = qpn; 482 MLX4_DOMAIN_NIC, reg_id);
496 INIT_LIST_HEAD(&rule.list);
497
498 spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH;
499 memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN);
500 memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
501
502 spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */
503 spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */
504
505 list_add_tail(&spec_eth_outer.list, &rule.list);
506 list_add_tail(&spec_vxlan.list, &rule.list);
507 list_add_tail(&spec_eth_inner.list, &rule.list);
508
509 err = mlx4_flow_attach(priv->mdev->dev, &rule, reg_id);
510 if (err) { 483 if (err) {
511 en_err(priv, "failed to add vxlan steering rule, err %d\n", err); 484 en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
512 return err; 485 return err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 7e2d5d57c598..871e3a5bda38 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -78,13 +78,13 @@ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
78#endif /* CONFIG_PCI_MSI */ 78#endif /* CONFIG_PCI_MSI */
79 79
80static uint8_t num_vfs[3] = {0, 0, 0}; 80static uint8_t num_vfs[3] = {0, 0, 0};
81static int num_vfs_argc = 3; 81static int num_vfs_argc;
82module_param_array(num_vfs, byte , &num_vfs_argc, 0444); 82module_param_array(num_vfs, byte , &num_vfs_argc, 0444);
83MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n" 83MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n"
84 "num_vfs=port1,port2,port1+2"); 84 "num_vfs=port1,port2,port1+2");
85 85
86static uint8_t probe_vf[3] = {0, 0, 0}; 86static uint8_t probe_vf[3] = {0, 0, 0};
87static int probe_vfs_argc = 3; 87static int probe_vfs_argc;
88module_param_array(probe_vf, byte, &probe_vfs_argc, 0444); 88module_param_array(probe_vf, byte, &probe_vfs_argc, 0444);
89MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n" 89MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n"
90 "probe_vf=port1,port2,port1+2"); 90 "probe_vf=port1,port2,port1+2");
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index d80e7a6fac74..ca0f98c95105 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1020,6 +1020,44 @@ int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id)
1020} 1020}
1021EXPORT_SYMBOL_GPL(mlx4_flow_detach); 1021EXPORT_SYMBOL_GPL(mlx4_flow_detach);
1022 1022
1023int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr,
1024 int port, int qpn, u16 prio, u64 *reg_id)
1025{
1026 int err;
1027 struct mlx4_spec_list spec_eth_outer = { {NULL} };
1028 struct mlx4_spec_list spec_vxlan = { {NULL} };
1029 struct mlx4_spec_list spec_eth_inner = { {NULL} };
1030
1031 struct mlx4_net_trans_rule rule = {
1032 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
1033 .exclusive = 0,
1034 .allow_loopback = 1,
1035 .promisc_mode = MLX4_FS_REGULAR,
1036 };
1037
1038 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
1039
1040 rule.port = port;
1041 rule.qpn = qpn;
1042 rule.priority = prio;
1043 INIT_LIST_HEAD(&rule.list);
1044
1045 spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH;
1046 memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN);
1047 memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
1048
1049 spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */
1050 spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */
1051
1052 list_add_tail(&spec_eth_outer.list, &rule.list);
1053 list_add_tail(&spec_vxlan.list, &rule.list);
1054 list_add_tail(&spec_eth_inner.list, &rule.list);
1055
1056 err = mlx4_flow_attach(dev, &rule, reg_id);
1057 return err;
1058}
1059EXPORT_SYMBOL(mlx4_tunnel_steer_add);
1060
1023int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn, 1061int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn,
1024 u32 max_range_qpn) 1062 u32 max_range_qpn)
1025{ 1063{
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 7d717eccb7b0..193a6adb5d04 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -298,6 +298,7 @@ static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
298 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); 298 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
299} 299}
300 300
301/* Must protect against concurrent access */
301int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, 302int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
302 struct mlx4_mpt_entry ***mpt_entry) 303 struct mlx4_mpt_entry ***mpt_entry)
303{ 304{
@@ -305,13 +306,10 @@ int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
305 int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1); 306 int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1);
306 struct mlx4_cmd_mailbox *mailbox = NULL; 307 struct mlx4_cmd_mailbox *mailbox = NULL;
307 308
308 /* Make sure that at this point we have single-threaded access only */
309
310 if (mmr->enabled != MLX4_MPT_EN_HW) 309 if (mmr->enabled != MLX4_MPT_EN_HW)
311 return -EINVAL; 310 return -EINVAL;
312 311
313 err = mlx4_HW2SW_MPT(dev, NULL, key); 312 err = mlx4_HW2SW_MPT(dev, NULL, key);
314
315 if (err) { 313 if (err) {
316 mlx4_warn(dev, "HW2SW_MPT failed (%d).", err); 314 mlx4_warn(dev, "HW2SW_MPT failed (%d).", err);
317 mlx4_warn(dev, "Most likely the MR has MWs bound to it.\n"); 315 mlx4_warn(dev, "Most likely the MR has MWs bound to it.\n");
@@ -333,7 +331,6 @@ int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
333 0, MLX4_CMD_QUERY_MPT, 331 0, MLX4_CMD_QUERY_MPT,
334 MLX4_CMD_TIME_CLASS_B, 332 MLX4_CMD_TIME_CLASS_B,
335 MLX4_CMD_WRAPPED); 333 MLX4_CMD_WRAPPED);
336
337 if (err) 334 if (err)
338 goto free_mailbox; 335 goto free_mailbox;
339 336
@@ -378,9 +375,10 @@ int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
378 err = mlx4_SW2HW_MPT(dev, mailbox, key); 375 err = mlx4_SW2HW_MPT(dev, mailbox, key);
379 } 376 }
380 377
381 mmr->pd = be32_to_cpu((*mpt_entry)->pd_flags) & MLX4_MPT_PD_MASK; 378 if (!err) {
382 if (!err) 379 mmr->pd = be32_to_cpu((*mpt_entry)->pd_flags) & MLX4_MPT_PD_MASK;
383 mmr->enabled = MLX4_MPT_EN_HW; 380 mmr->enabled = MLX4_MPT_EN_HW;
381 }
384 return err; 382 return err;
385} 383}
386EXPORT_SYMBOL_GPL(mlx4_mr_hw_write_mpt); 384EXPORT_SYMBOL_GPL(mlx4_mr_hw_write_mpt);
@@ -400,11 +398,12 @@ EXPORT_SYMBOL_GPL(mlx4_mr_hw_put_mpt);
400int mlx4_mr_hw_change_pd(struct mlx4_dev *dev, struct mlx4_mpt_entry *mpt_entry, 398int mlx4_mr_hw_change_pd(struct mlx4_dev *dev, struct mlx4_mpt_entry *mpt_entry,
401 u32 pdn) 399 u32 pdn)
402{ 400{
403 u32 pd_flags = be32_to_cpu(mpt_entry->pd_flags); 401 u32 pd_flags = be32_to_cpu(mpt_entry->pd_flags) & ~MLX4_MPT_PD_MASK;
404 /* The wrapper function will put the slave's id here */ 402 /* The wrapper function will put the slave's id here */
405 if (mlx4_is_mfunc(dev)) 403 if (mlx4_is_mfunc(dev))
406 pd_flags &= ~MLX4_MPT_PD_VF_MASK; 404 pd_flags &= ~MLX4_MPT_PD_VF_MASK;
407 mpt_entry->pd_flags = cpu_to_be32((pd_flags & ~MLX4_MPT_PD_MASK) | 405
406 mpt_entry->pd_flags = cpu_to_be32(pd_flags |
408 (pdn & MLX4_MPT_PD_MASK) 407 (pdn & MLX4_MPT_PD_MASK)
409 | MLX4_MPT_PD_FLAG_EN_INV); 408 | MLX4_MPT_PD_FLAG_EN_INV);
410 return 0; 409 return 0;
@@ -600,14 +599,18 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
600{ 599{
601 int err; 600 int err;
602 601
603 mpt_entry->start = cpu_to_be64(mr->iova); 602 mpt_entry->start = cpu_to_be64(iova);
604 mpt_entry->length = cpu_to_be64(mr->size); 603 mpt_entry->length = cpu_to_be64(size);
605 mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift); 604 mpt_entry->entity_size = cpu_to_be32(page_shift);
606 605
607 err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); 606 err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
608 if (err) 607 if (err)
609 return err; 608 return err;
610 609
610 mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK |
611 MLX4_MPT_PD_FLAG_EN_INV);
612 mpt_entry->flags &= cpu_to_be32(MLX4_MPT_FLAG_FREE |
613 MLX4_MPT_FLAG_SW_OWNS);
611 if (mr->mtt.order < 0) { 614 if (mr->mtt.order < 0) {
612 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); 615 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
613 mpt_entry->mtt_addr = 0; 616 mpt_entry->mtt_addr = 0;
@@ -617,6 +620,14 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
617 if (mr->mtt.page_shift == 0) 620 if (mr->mtt.page_shift == 0)
618 mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order); 621 mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order);
619 } 622 }
623 if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
624 /* fast register MR in free state */
625 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
626 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
627 MLX4_MPT_PD_FLAG_RAE);
628 } else {
629 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
630 }
620 mr->enabled = MLX4_MPT_EN_SW; 631 mr->enabled = MLX4_MPT_EN_SW;
621 632
622 return 0; 633 return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 9ba0c1ca10d5..94eeb2c7d7e4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -103,7 +103,8 @@ static int find_index(struct mlx4_dev *dev,
103 int i; 103 int i;
104 104
105 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { 105 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
106 if ((mac & MLX4_MAC_MASK) == 106 if (table->refs[i] &&
107 (MLX4_MAC_MASK & mac) ==
107 (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) 108 (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
108 return i; 109 return i;
109 } 110 }
@@ -165,12 +166,14 @@ int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
165 166
166 mutex_lock(&table->mutex); 167 mutex_lock(&table->mutex);
167 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { 168 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
168 if (free < 0 && !table->entries[i]) { 169 if (!table->refs[i]) {
169 free = i; 170 if (free < 0)
171 free = i;
170 continue; 172 continue;
171 } 173 }
172 174
173 if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { 175 if ((MLX4_MAC_MASK & mac) ==
176 (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
174 /* MAC already registered, increment ref count */ 177 /* MAC already registered, increment ref count */
175 err = i; 178 err = i;
176 ++table->refs[i]; 179 ++table->refs[i];
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 0dc31d85fc3b..2301365c79c7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -390,13 +390,14 @@ err_icm:
390EXPORT_SYMBOL_GPL(mlx4_qp_alloc); 390EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
391 391
392#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC 392#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
393int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp, 393int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
394 enum mlx4_update_qp_attr attr, 394 enum mlx4_update_qp_attr attr,
395 struct mlx4_update_qp_params *params) 395 struct mlx4_update_qp_params *params)
396{ 396{
397 struct mlx4_cmd_mailbox *mailbox; 397 struct mlx4_cmd_mailbox *mailbox;
398 struct mlx4_update_qp_context *cmd; 398 struct mlx4_update_qp_context *cmd;
399 u64 pri_addr_path_mask = 0; 399 u64 pri_addr_path_mask = 0;
400 u64 qp_mask = 0;
400 int err = 0; 401 int err = 0;
401 402
402 mailbox = mlx4_alloc_cmd_mailbox(dev); 403 mailbox = mlx4_alloc_cmd_mailbox(dev);
@@ -413,9 +414,16 @@ int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
413 cmd->qp_context.pri_path.grh_mylmc = params->smac_index; 414 cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
414 } 415 }
415 416
417 if (attr & MLX4_UPDATE_QP_VSD) {
418 qp_mask |= 1ULL << MLX4_UPD_QP_MASK_VSD;
419 if (params->flags & MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE)
420 cmd->qp_context.param3 |= cpu_to_be32(MLX4_STRIP_VLAN);
421 }
422
416 cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask); 423 cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
424 cmd->qp_mask = cpu_to_be64(qp_mask);
417 425
418 err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0, 426 err = mlx4_cmd(dev, mailbox->dma, qpn & 0xffffff, 0,
419 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A, 427 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
420 MLX4_CMD_NATIVE); 428 MLX4_CMD_NATIVE);
421 429
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 1089367fed22..5d2498dcf536 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -702,11 +702,13 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
702 struct mlx4_qp_context *qpc = inbox->buf + 8; 702 struct mlx4_qp_context *qpc = inbox->buf + 8;
703 struct mlx4_vport_oper_state *vp_oper; 703 struct mlx4_vport_oper_state *vp_oper;
704 struct mlx4_priv *priv; 704 struct mlx4_priv *priv;
705 u32 qp_type;
705 int port; 706 int port;
706 707
707 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1; 708 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
708 priv = mlx4_priv(dev); 709 priv = mlx4_priv(dev);
709 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; 710 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
711 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
710 712
711 if (MLX4_VGT != vp_oper->state.default_vlan) { 713 if (MLX4_VGT != vp_oper->state.default_vlan) {
712 /* the reserved QPs (special, proxy, tunnel) 714 /* the reserved QPs (special, proxy, tunnel)
@@ -715,8 +717,20 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
715 if (mlx4_is_qp_reserved(dev, qpn)) 717 if (mlx4_is_qp_reserved(dev, qpn))
716 return 0; 718 return 0;
717 719
718 /* force strip vlan by clear vsd */ 720 /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
719 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN); 721 if (qp_type == MLX4_QP_ST_UD ||
722 (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
723 if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
724 *(__be32 *)inbox->buf =
725 cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
726 MLX4_QP_OPTPAR_VLAN_STRIPPING);
727 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
728 } else {
729 struct mlx4_update_qp_params params = {.flags = 0};
730
731 mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
732 }
733 }
720 734
721 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE && 735 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
722 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) { 736 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
@@ -3998,13 +4012,17 @@ int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
3998 } 4012 }
3999 4013
4000 port = (rqp->sched_queue >> 6 & 1) + 1; 4014 port = (rqp->sched_queue >> 6 & 1) + 1;
4001 smac_index = cmd->qp_context.pri_path.grh_mylmc; 4015
4002 err = mac_find_smac_ix_in_slave(dev, slave, port, 4016 if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
4003 smac_index, &mac); 4017 smac_index = cmd->qp_context.pri_path.grh_mylmc;
4004 if (err) { 4018 err = mac_find_smac_ix_in_slave(dev, slave, port,
4005 mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n", 4019 smac_index, &mac);
4006 qpn, smac_index); 4020
4007 goto err_mac; 4021 if (err) {
4022 mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4023 qpn, smac_index);
4024 goto err_mac;
4025 }
4008 } 4026 }
4009 4027
4010 err = mlx4_cmd(dev, inbox->dma, 4028 err = mlx4_cmd(dev, inbox->dma,
@@ -4818,7 +4836,7 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4818 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; 4836 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4819 4837
4820 upd_context = mailbox->buf; 4838 upd_context = mailbox->buf;
4821 upd_context->qp_mask = cpu_to_be64(MLX4_UPD_QP_MASK_VSD); 4839 upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
4822 4840
4823 spin_lock_irq(mlx4_tlock(dev)); 4841 spin_lock_irq(mlx4_tlock(dev));
4824 list_for_each_entry_safe(qp, tmp, qp_list, com.list) { 4842 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 5020fd47825d..2f12c88c66ab 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -206,7 +206,7 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
206 int rx_head = priv->rx_head; 206 int rx_head = priv->rx_head;
207 int rx = 0; 207 int rx = 0;
208 208
209 while (1) { 209 while (rx < budget) {
210 desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head); 210 desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head);
211 desc0 = readl(desc + RX_REG_OFFSET_DESC0); 211 desc0 = readl(desc + RX_REG_OFFSET_DESC0);
212 212
@@ -218,7 +218,7 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
218 net_dbg_ratelimited("packet error\n"); 218 net_dbg_ratelimited("packet error\n");
219 priv->stats.rx_dropped++; 219 priv->stats.rx_dropped++;
220 priv->stats.rx_errors++; 220 priv->stats.rx_errors++;
221 continue; 221 goto rx_next;
222 } 222 }
223 223
224 len = desc0 & RX_DESC0_FRAME_LEN_MASK; 224 len = desc0 & RX_DESC0_FRAME_LEN_MASK;
@@ -226,13 +226,19 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
226 if (len > RX_BUF_SIZE) 226 if (len > RX_BUF_SIZE)
227 len = RX_BUF_SIZE; 227 len = RX_BUF_SIZE;
228 228
229 skb = build_skb(priv->rx_buf[rx_head], priv->rx_buf_size); 229 dma_sync_single_for_cpu(&ndev->dev,
230 priv->rx_mapping[rx_head],
231 priv->rx_buf_size, DMA_FROM_DEVICE);
232 skb = netdev_alloc_skb_ip_align(ndev, len);
233
230 if (unlikely(!skb)) { 234 if (unlikely(!skb)) {
231 net_dbg_ratelimited("build_skb failed\n"); 235 net_dbg_ratelimited("netdev_alloc_skb_ip_align failed\n");
232 priv->stats.rx_dropped++; 236 priv->stats.rx_dropped++;
233 priv->stats.rx_errors++; 237 priv->stats.rx_errors++;
238 goto rx_next;
234 } 239 }
235 240
241 memcpy(skb->data, priv->rx_buf[rx_head], len);
236 skb_put(skb, len); 242 skb_put(skb, len);
237 skb->protocol = eth_type_trans(skb, ndev); 243 skb->protocol = eth_type_trans(skb, ndev);
238 napi_gro_receive(&priv->napi, skb); 244 napi_gro_receive(&priv->napi, skb);
@@ -244,18 +250,15 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
244 if (desc0 & RX_DESC0_MULTICAST) 250 if (desc0 & RX_DESC0_MULTICAST)
245 priv->stats.multicast++; 251 priv->stats.multicast++;
246 252
253rx_next:
247 writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0); 254 writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
248 255
249 rx_head = RX_NEXT(rx_head); 256 rx_head = RX_NEXT(rx_head);
250 priv->rx_head = rx_head; 257 priv->rx_head = rx_head;
251
252 if (rx >= budget)
253 break;
254 } 258 }
255 259
256 if (rx < budget) { 260 if (rx < budget) {
257 napi_gro_flush(napi, false); 261 napi_complete(napi);
258 __napi_complete(napi);
259 } 262 }
260 263
261 priv->reg_imr |= RPKT_FINISH_M; 264 priv->reg_imr |= RPKT_FINISH_M;
@@ -346,10 +349,12 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
346 len = ETH_ZLEN; 349 len = ETH_ZLEN;
347 } 350 }
348 351
349 txdes1 = readl(desc + TX_REG_OFFSET_DESC1); 352 dma_sync_single_for_device(&ndev->dev, priv->tx_mapping[tx_head],
350 txdes1 |= TX_DESC1_LTS | TX_DESC1_FTS; 353 priv->tx_buf_size, DMA_TO_DEVICE);
351 txdes1 &= ~(TX_DESC1_FIFO_COMPLETE | TX_DESC1_INTR_COMPLETE); 354
352 txdes1 |= (len & TX_DESC1_BUF_SIZE_MASK); 355 txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK);
356 if (tx_head == TX_DESC_NUM_MASK)
357 txdes1 |= TX_DESC1_END;
353 writel(txdes1, desc + TX_REG_OFFSET_DESC1); 358 writel(txdes1, desc + TX_REG_OFFSET_DESC1);
354 writel(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0); 359 writel(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0);
355 360
@@ -465,8 +470,7 @@ static int moxart_mac_probe(struct platform_device *pdev)
465 spin_lock_init(&priv->txlock); 470 spin_lock_init(&priv->txlock);
466 471
467 priv->tx_buf_size = TX_BUF_SIZE; 472 priv->tx_buf_size = TX_BUF_SIZE;
468 priv->rx_buf_size = RX_BUF_SIZE + 473 priv->rx_buf_size = RX_BUF_SIZE;
469 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
470 474
471 priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE * 475 priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE *
472 TX_DESC_NUM, &priv->tx_base, 476 TX_DESC_NUM, &priv->tx_base,
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 8706c0dbd0c3..a44a03c45014 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1220,6 +1220,9 @@ static int lpc_eth_open(struct net_device *ndev)
1220 1220
1221 __lpc_eth_clock_enable(pldat, true); 1221 __lpc_eth_clock_enable(pldat, true);
1222 1222
1223 /* Suspended PHY makes LPC ethernet core block, so resume now */
1224 phy_resume(pldat->phy_dev);
1225
1223 /* Reset and initialize */ 1226 /* Reset and initialize */
1224 __lpc_eth_reset(pldat); 1227 __lpc_eth_reset(pldat);
1225 __lpc_eth_init(pldat); 1228 __lpc_eth_init(pldat);
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index 979c6980639f..a42293092ea4 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -290,9 +290,11 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
290 /* Read the hardware TX timestamp if one was recorded */ 290 /* Read the hardware TX timestamp if one was recorded */
291 if (unlikely(re.s.tstamp)) { 291 if (unlikely(re.s.tstamp)) {
292 struct skb_shared_hwtstamps ts; 292 struct skb_shared_hwtstamps ts;
293 u64 ns;
294
293 memset(&ts, 0, sizeof(ts)); 295 memset(&ts, 0, sizeof(ts));
294 /* Read the timestamp */ 296 /* Read the timestamp */
295 u64 ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port)); 297 ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port));
296 /* Remove the timestamp from the FIFO */ 298 /* Remove the timestamp from the FIFO */
297 cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0); 299 cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0);
298 /* Tell the kernel about the timestamp */ 300 /* Tell the kernel about the timestamp */
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
index 44c8be1c6805..5f7a35212796 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
@@ -7,6 +7,7 @@ config PCH_GBE
7 depends on PCI && (X86_32 || COMPILE_TEST) 7 depends on PCI && (X86_32 || COMPILE_TEST)
8 select MII 8 select MII
9 select PTP_1588_CLOCK_PCH 9 select PTP_1588_CLOCK_PCH
10 select NET_PTP_CLASSIFY
10 ---help--- 11 ---help---
11 This is a gigabit ethernet driver for EG20T PCH. 12 This is a gigabit ethernet driver for EG20T PCH.
12 EG20T PCH is the platform controller hub that is used in Intel's 13 EG20T PCH is the platform controller hub that is used in Intel's
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index db4280ce9c09..716fc37ada5a 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -922,7 +922,7 @@ int netxen_config_ipaddr(struct netxen_adapter *adapter, __be32 ip, int cmd)
922 922
923 rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); 923 rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
924 if (rv != 0) { 924 if (rv != 0) {
925 printk(KERN_ERR "%s: could not notify %s IP 0x%x reuqest\n", 925 printk(KERN_ERR "%s: could not notify %s IP 0x%x request\n",
926 adapter->netdev->name, 926 adapter->netdev->name,
927 (cmd == NX_IP_UP) ? "Add" : "Remove", ip); 927 (cmd == NX_IP_UP) ? "Add" : "Remove", ip);
928 } 928 }
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 32058614151a..5c4068353f66 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -135,6 +135,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
135 int i, j; 135 int i, j;
136 struct nx_host_tx_ring *tx_ring = adapter->tx_ring; 136 struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
137 137
138 spin_lock(&adapter->tx_clean_lock);
138 cmd_buf = tx_ring->cmd_buf_arr; 139 cmd_buf = tx_ring->cmd_buf_arr;
139 for (i = 0; i < tx_ring->num_desc; i++) { 140 for (i = 0; i < tx_ring->num_desc; i++) {
140 buffrag = cmd_buf->frag_array; 141 buffrag = cmd_buf->frag_array;
@@ -158,6 +159,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
158 } 159 }
159 cmd_buf++; 160 cmd_buf++;
160 } 161 }
162 spin_unlock(&adapter->tx_clean_lock);
161} 163}
162 164
163void netxen_free_sw_resources(struct netxen_adapter *adapter) 165void netxen_free_sw_resources(struct netxen_adapter *adapter)
@@ -1792,9 +1794,9 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
1792 break; 1794 break;
1793 } 1795 }
1794 1796
1795 if (count && netif_running(netdev)) { 1797 tx_ring->sw_consumer = sw_consumer;
1796 tx_ring->sw_consumer = sw_consumer;
1797 1798
1799 if (count && netif_running(netdev)) {
1798 smp_mb(); 1800 smp_mb();
1799 1801
1800 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) 1802 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev))
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 1159031f885b..5ec5a2b0e989 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1186,7 +1186,6 @@ __netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
1186 return; 1186 return;
1187 1187
1188 smp_mb(); 1188 smp_mb();
1189 spin_lock(&adapter->tx_clean_lock);
1190 netif_carrier_off(netdev); 1189 netif_carrier_off(netdev);
1191 netif_tx_disable(netdev); 1190 netif_tx_disable(netdev);
1192 1191
@@ -1204,7 +1203,6 @@ __netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
1204 netxen_napi_disable(adapter); 1203 netxen_napi_disable(adapter);
1205 1204
1206 netxen_release_tx_buffers(adapter); 1205 netxen_release_tx_buffers(adapter);
1207 spin_unlock(&adapter->tx_clean_lock);
1208} 1206}
1209 1207
1210/* Usage: During suspend and firmware recovery module */ 1208/* Usage: During suspend and firmware recovery module */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 86783e1afcf7..3172cdf591fe 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1177,9 +1177,8 @@ static void qlcnic_83xx_setup_idc_parameters(struct qlcnic_adapter *adapter)
1177{ 1177{
1178 u32 idc_params, val; 1178 u32 idc_params, val;
1179 1179
1180 if (qlcnic_83xx_lockless_flash_read32(adapter, 1180 if (qlcnic_83xx_flash_read32(adapter, QLC_83XX_IDC_FLASH_PARAM_ADDR,
1181 QLC_83XX_IDC_FLASH_PARAM_ADDR, 1181 (u8 *)&idc_params, 1)) {
1182 (u8 *)&idc_params, 1)) {
1183 dev_info(&adapter->pdev->dev, 1182 dev_info(&adapter->pdev->dev,
1184 "%s:failed to get IDC params from flash\n", __func__); 1183 "%s:failed to get IDC params from flash\n", __func__);
1185 adapter->dev_init_timeo = QLC_83XX_IDC_INIT_TIMEOUT_SECS; 1184 adapter->dev_init_timeo = QLC_83XX_IDC_INIT_TIMEOUT_SECS;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 141f116eb868..494e8105adee 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -1333,21 +1333,21 @@ static void qlcnic_get_ethtool_stats(struct net_device *dev,
1333 struct qlcnic_host_tx_ring *tx_ring; 1333 struct qlcnic_host_tx_ring *tx_ring;
1334 struct qlcnic_esw_statistics port_stats; 1334 struct qlcnic_esw_statistics port_stats;
1335 struct qlcnic_mac_statistics mac_stats; 1335 struct qlcnic_mac_statistics mac_stats;
1336 int index, ret, length, size, tx_size, ring; 1336 int index, ret, length, size, ring;
1337 char *p; 1337 char *p;
1338 1338
1339 tx_size = adapter->drv_tx_rings * QLCNIC_TX_STATS_LEN; 1339 memset(data, 0, stats->n_stats * sizeof(u64));
1340 1340
1341 memset(data, 0, tx_size * sizeof(u64));
1342 for (ring = 0, index = 0; ring < adapter->drv_tx_rings; ring++) { 1341 for (ring = 0, index = 0; ring < adapter->drv_tx_rings; ring++) {
1343 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { 1342 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
1344 tx_ring = &adapter->tx_ring[ring]; 1343 tx_ring = &adapter->tx_ring[ring];
1345 data = qlcnic_fill_tx_queue_stats(data, tx_ring); 1344 data = qlcnic_fill_tx_queue_stats(data, tx_ring);
1346 qlcnic_update_stats(adapter); 1345 qlcnic_update_stats(adapter);
1346 } else {
1347 data += QLCNIC_TX_STATS_LEN;
1347 } 1348 }
1348 } 1349 }
1349 1350
1350 memset(data, 0, stats->n_stats * sizeof(u64));
1351 length = QLCNIC_STATS_LEN; 1351 length = QLCNIC_STATS_LEN;
1352 for (index = 0; index < length; index++) { 1352 for (index = 0; index < length; index++) {
1353 p = (char *)adapter + qlcnic_gstrings_stats[index].stat_offset; 1353 p = (char *)adapter + qlcnic_gstrings_stats[index].stat_offset;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 851cb4a80d50..03cd4c3d7835 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -941,7 +941,7 @@ void qlcnic_82xx_config_ipaddr(struct qlcnic_adapter *adapter,
941 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); 941 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
942 if (rv != 0) 942 if (rv != 0)
943 dev_err(&adapter->netdev->dev, 943 dev_err(&adapter->netdev->dev,
944 "could not notify %s IP 0x%x reuqest\n", 944 "could not notify %s IP 0x%x request\n",
945 (cmd == QLCNIC_IP_UP) ? "Add" : "Remove", ip); 945 (cmd == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
946} 946}
947 947
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 188626e2a861..3e96f269150d 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2556,6 +2556,7 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2556 2556
2557 if (skb_is_gso(skb)) { 2557 if (skb_is_gso(skb)) {
2558 int err; 2558 int err;
2559 __be16 l3_proto = vlan_get_protocol(skb);
2559 2560
2560 err = skb_cow_head(skb, 0); 2561 err = skb_cow_head(skb, 0);
2561 if (err < 0) 2562 if (err < 0)
@@ -2572,7 +2573,7 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2572 << OB_MAC_TRANSPORT_HDR_SHIFT); 2573 << OB_MAC_TRANSPORT_HDR_SHIFT);
2573 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); 2574 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2574 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO; 2575 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2575 if (likely(skb->protocol == htons(ETH_P_IP))) { 2576 if (likely(l3_proto == htons(ETH_P_IP))) {
2576 struct iphdr *iph = ip_hdr(skb); 2577 struct iphdr *iph = ip_hdr(skb);
2577 iph->check = 0; 2578 iph->check = 0;
2578 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4; 2579 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
@@ -2580,7 +2581,7 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2580 iph->daddr, 0, 2581 iph->daddr, 0,
2581 IPPROTO_TCP, 2582 IPPROTO_TCP,
2582 0); 2583 0);
2583 } else if (skb->protocol == htons(ETH_P_IPV6)) { 2584 } else if (l3_proto == htons(ETH_P_IPV6)) {
2584 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6; 2585 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2585 tcp_hdr(skb)->check = 2586 tcp_hdr(skb)->check =
2586 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 2587 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 91652e7235e4..0921302553c6 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -1783,33 +1783,31 @@ static void __rtl8169_set_features(struct net_device *dev,
1783 netdev_features_t features) 1783 netdev_features_t features)
1784{ 1784{
1785 struct rtl8169_private *tp = netdev_priv(dev); 1785 struct rtl8169_private *tp = netdev_priv(dev);
1786 netdev_features_t changed = features ^ dev->features;
1787 void __iomem *ioaddr = tp->mmio_addr; 1786 void __iomem *ioaddr = tp->mmio_addr;
1787 u32 rx_config;
1788 1788
1789 if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM | 1789 rx_config = RTL_R32(RxConfig);
1790 NETIF_F_HW_VLAN_CTAG_RX))) 1790 if (features & NETIF_F_RXALL)
1791 return; 1791 rx_config |= (AcceptErr | AcceptRunt);
1792 else
1793 rx_config &= ~(AcceptErr | AcceptRunt);
1792 1794
1793 if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) { 1795 RTL_W32(RxConfig, rx_config);
1794 if (features & NETIF_F_RXCSUM)
1795 tp->cp_cmd |= RxChkSum;
1796 else
1797 tp->cp_cmd &= ~RxChkSum;
1798 1796
1799 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) 1797 if (features & NETIF_F_RXCSUM)
1800 tp->cp_cmd |= RxVlan; 1798 tp->cp_cmd |= RxChkSum;
1801 else 1799 else
1802 tp->cp_cmd &= ~RxVlan; 1800 tp->cp_cmd &= ~RxChkSum;
1803 1801
1804 RTL_W16(CPlusCmd, tp->cp_cmd); 1802 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1805 RTL_R16(CPlusCmd); 1803 tp->cp_cmd |= RxVlan;
1806 } 1804 else
1807 if (changed & NETIF_F_RXALL) { 1805 tp->cp_cmd &= ~RxVlan;
1808 int tmp = (RTL_R32(RxConfig) & ~(AcceptErr | AcceptRunt)); 1806
1809 if (features & NETIF_F_RXALL) 1807 tp->cp_cmd |= RTL_R16(CPlusCmd) & ~(RxVlan | RxChkSum);
1810 tmp |= (AcceptErr | AcceptRunt); 1808
1811 RTL_W32(RxConfig, tmp); 1809 RTL_W16(CPlusCmd, tp->cp_cmd);
1812 } 1810 RTL_R16(CPlusCmd);
1813} 1811}
1814 1812
1815static int rtl8169_set_features(struct net_device *dev, 1813static int rtl8169_set_features(struct net_device *dev,
@@ -1817,8 +1815,11 @@ static int rtl8169_set_features(struct net_device *dev,
1817{ 1815{
1818 struct rtl8169_private *tp = netdev_priv(dev); 1816 struct rtl8169_private *tp = netdev_priv(dev);
1819 1817
1818 features &= NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
1819
1820 rtl_lock_work(tp); 1820 rtl_lock_work(tp);
1821 __rtl8169_set_features(dev, features); 1821 if (features ^ dev->features)
1822 __rtl8169_set_features(dev, features);
1822 rtl_unlock_work(tp); 1823 rtl_unlock_work(tp);
1823 1824
1824 return 0; 1825 return 0;
@@ -7118,8 +7119,7 @@ static void rtl_hw_initialize(struct rtl8169_private *tp)
7118 } 7119 }
7119} 7120}
7120 7121
7121static int 7122static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7122rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7123{ 7123{
7124 const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; 7124 const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
7125 const unsigned int region = cfg->region; 7125 const unsigned int region = cfg->region;
@@ -7194,7 +7194,7 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7194 goto err_out_mwi_2; 7194 goto err_out_mwi_2;
7195 } 7195 }
7196 7196
7197 tp->cp_cmd = RxChkSum; 7197 tp->cp_cmd = 0;
7198 7198
7199 if ((sizeof(dma_addr_t) > 4) && 7199 if ((sizeof(dma_addr_t) > 4) &&
7200 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { 7200 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
@@ -7235,13 +7235,6 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7235 7235
7236 pci_set_master(pdev); 7236 pci_set_master(pdev);
7237 7237
7238 /*
7239 * Pretend we are using VLANs; This bypasses a nasty bug where
7240 * Interrupts stop flowing on high load on 8110SCd controllers.
7241 */
7242 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
7243 tp->cp_cmd |= RxVlan;
7244
7245 rtl_init_mdio_ops(tp); 7238 rtl_init_mdio_ops(tp);
7246 rtl_init_pll_power_ops(tp); 7239 rtl_init_pll_power_ops(tp);
7247 rtl_init_jumbo_ops(tp); 7240 rtl_init_jumbo_ops(tp);
@@ -7302,8 +7295,14 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7302 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | 7295 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
7303 NETIF_F_HIGHDMA; 7296 NETIF_F_HIGHDMA;
7304 7297
7298 tp->cp_cmd |= RxChkSum | RxVlan;
7299
7300 /*
7301 * Pretend we are using VLANs; This bypasses a nasty bug where
7302 * Interrupts stop flowing on high load on 8110SCd controllers.
7303 */
7305 if (tp->mac_version == RTL_GIGA_MAC_VER_05) 7304 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
7306 /* 8110SCd requires hardware Rx VLAN - disallow toggling */ 7305 /* Disallow toggling */
7307 dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; 7306 dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
7308 7307
7309 if (tp->txd_version == RTL_TD_0) 7308 if (tp->txd_version == RTL_TD_0)
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 9e757c792d84..196e98a2d93b 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -5,6 +5,7 @@
5config SH_ETH 5config SH_ETH
6 tristate "Renesas SuperH Ethernet support" 6 tristate "Renesas SuperH Ethernet support"
7 depends on HAS_DMA 7 depends on HAS_DMA
8 depends on ARCH_SHMOBILE || SUPERH || COMPILE_TEST
8 select CRC32 9 select CRC32
9 select MII 10 select MII
10 select MDIO_BITBANG 11 select MDIO_BITBANG
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 0537381cd2f6..6859437b59fb 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -2933,6 +2933,9 @@ void efx_farch_filter_sync_rx_mode(struct efx_nic *efx)
2933 u32 crc; 2933 u32 crc;
2934 int bit; 2934 int bit;
2935 2935
2936 if (!efx_dev_registered(efx))
2937 return;
2938
2936 netif_addr_lock_bh(net_dev); 2939 netif_addr_lock_bh(net_dev);
2937 2940
2938 efx->unicast_filter = !(net_dev->flags & IFF_PROMISC); 2941 efx->unicast_filter = !(net_dev->flags & IFF_PROMISC);
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
index c553f6b5a913..cf28daba4346 100644
--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -28,7 +28,7 @@
28 28
29#include "stmmac.h" 29#include "stmmac.h"
30 30
31static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) 31static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
32{ 32{
33 struct stmmac_priv *priv = (struct stmmac_priv *)p; 33 struct stmmac_priv *priv = (struct stmmac_priv *)p;
34 unsigned int txsize = priv->dma_tx_size; 34 unsigned int txsize = priv->dma_tx_size;
@@ -47,7 +47,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
47 47
48 desc->des2 = dma_map_single(priv->device, skb->data, 48 desc->des2 = dma_map_single(priv->device, skb->data,
49 bmax, DMA_TO_DEVICE); 49 bmax, DMA_TO_DEVICE);
50 priv->tx_skbuff_dma[entry] = desc->des2; 50 if (dma_mapping_error(priv->device, desc->des2))
51 return -1;
52 priv->tx_skbuff_dma[entry].buf = desc->des2;
51 priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE); 53 priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE);
52 54
53 while (len != 0) { 55 while (len != 0) {
@@ -59,7 +61,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
59 desc->des2 = dma_map_single(priv->device, 61 desc->des2 = dma_map_single(priv->device,
60 (skb->data + bmax * i), 62 (skb->data + bmax * i),
61 bmax, DMA_TO_DEVICE); 63 bmax, DMA_TO_DEVICE);
62 priv->tx_skbuff_dma[entry] = desc->des2; 64 if (dma_mapping_error(priv->device, desc->des2))
65 return -1;
66 priv->tx_skbuff_dma[entry].buf = desc->des2;
63 priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum, 67 priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum,
64 STMMAC_CHAIN_MODE); 68 STMMAC_CHAIN_MODE);
65 priv->hw->desc->set_tx_owner(desc); 69 priv->hw->desc->set_tx_owner(desc);
@@ -69,7 +73,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
69 desc->des2 = dma_map_single(priv->device, 73 desc->des2 = dma_map_single(priv->device,
70 (skb->data + bmax * i), len, 74 (skb->data + bmax * i), len,
71 DMA_TO_DEVICE); 75 DMA_TO_DEVICE);
72 priv->tx_skbuff_dma[entry] = desc->des2; 76 if (dma_mapping_error(priv->device, desc->des2))
77 return -1;
78 priv->tx_skbuff_dma[entry].buf = desc->des2;
73 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, 79 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
74 STMMAC_CHAIN_MODE); 80 STMMAC_CHAIN_MODE);
75 priv->hw->desc->set_tx_owner(desc); 81 priv->hw->desc->set_tx_owner(desc);
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index de507c32036c..593e6c4144a7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -220,10 +220,10 @@ enum dma_irq_status {
220 handle_tx = 0x8, 220 handle_tx = 0x8,
221}; 221};
222 222
223#define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 1) 223#define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 0)
224#define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 2) 224#define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 1)
225#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 3) 225#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 2)
226#define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 4) 226#define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 3)
227 227
228#define CORE_PCS_ANE_COMPLETE (1 << 5) 228#define CORE_PCS_ANE_COMPLETE (1 << 5)
229#define CORE_PCS_LINK_STATUS (1 << 6) 229#define CORE_PCS_LINK_STATUS (1 << 6)
@@ -287,7 +287,7 @@ struct dma_features {
287 287
288/* Default LPI timers */ 288/* Default LPI timers */
289#define STMMAC_DEFAULT_LIT_LS 0x3E8 289#define STMMAC_DEFAULT_LIT_LS 0x3E8
290#define STMMAC_DEFAULT_TWT_LS 0x0 290#define STMMAC_DEFAULT_TWT_LS 0x1E
291 291
292#define STMMAC_CHAIN_MODE 0x1 292#define STMMAC_CHAIN_MODE 0x1
293#define STMMAC_RING_MODE 0x2 293#define STMMAC_RING_MODE 0x2
@@ -425,7 +425,7 @@ struct stmmac_mode_ops {
425 void (*init) (void *des, dma_addr_t phy_addr, unsigned int size, 425 void (*init) (void *des, dma_addr_t phy_addr, unsigned int size,
426 unsigned int extend_desc); 426 unsigned int extend_desc);
427 unsigned int (*is_jumbo_frm) (int len, int ehn_desc); 427 unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
428 unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum); 428 int (*jumbo_frm)(void *priv, struct sk_buff *skb, int csum);
429 int (*set_16kib_bfsize)(int mtu); 429 int (*set_16kib_bfsize)(int mtu);
430 void (*init_desc3)(struct dma_desc *p); 430 void (*init_desc3)(struct dma_desc *p);
431 void (*refill_desc3) (void *priv, struct dma_desc *p); 431 void (*refill_desc3) (void *priv, struct dma_desc *p);
@@ -445,6 +445,7 @@ struct mac_device_info {
445 int multicast_filter_bins; 445 int multicast_filter_bins;
446 int unicast_filter_entries; 446 int unicast_filter_entries;
447 int mcast_bits_log2; 447 int mcast_bits_log2;
448 unsigned int rx_csum;
448}; 449};
449 450
450struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, 451struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index 71b5419256c1..64d8f56a9c17 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -153,7 +153,7 @@ enum inter_frame_gap {
153#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */ 153#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
154 154
155#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \ 155#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \
156 GMAC_CONTROL_BE) 156 GMAC_CONTROL_BE | GMAC_CONTROL_DCRS)
157 157
158/* GMAC Frame Filter defines */ 158/* GMAC Frame Filter defines */
159#define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */ 159#define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index d8ef18786a1c..5efe60ea6526 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -58,7 +58,11 @@ static int dwmac1000_rx_ipc_enable(struct mac_device_info *hw)
58 void __iomem *ioaddr = hw->pcsr; 58 void __iomem *ioaddr = hw->pcsr;
59 u32 value = readl(ioaddr + GMAC_CONTROL); 59 u32 value = readl(ioaddr + GMAC_CONTROL);
60 60
61 value |= GMAC_CONTROL_IPC; 61 if (hw->rx_csum)
62 value |= GMAC_CONTROL_IPC;
63 else
64 value &= ~GMAC_CONTROL_IPC;
65
62 writel(value, ioaddr + GMAC_CONTROL); 66 writel(value, ioaddr + GMAC_CONTROL);
63 67
64 value = readl(ioaddr + GMAC_CONTROL); 68 value = readl(ioaddr + GMAC_CONTROL);
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index 8607488cbcfc..192c2491330b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -68,7 +68,7 @@ struct stmmac_counters {
68 unsigned int mmc_rx_octetcount_g; 68 unsigned int mmc_rx_octetcount_g;
69 unsigned int mmc_rx_broadcastframe_g; 69 unsigned int mmc_rx_broadcastframe_g;
70 unsigned int mmc_rx_multicastframe_g; 70 unsigned int mmc_rx_multicastframe_g;
71 unsigned int mmc_rx_crc_errror; 71 unsigned int mmc_rx_crc_error;
72 unsigned int mmc_rx_align_error; 72 unsigned int mmc_rx_align_error;
73 unsigned int mmc_rx_run_error; 73 unsigned int mmc_rx_run_error;
74 unsigned int mmc_rx_jabber_error; 74 unsigned int mmc_rx_jabber_error;
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index 50617c5a0bdb..08c483bd2ec7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -196,7 +196,7 @@ void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc)
196 mmc->mmc_rx_octetcount_g += readl(ioaddr + MMC_RX_OCTETCOUNT_G); 196 mmc->mmc_rx_octetcount_g += readl(ioaddr + MMC_RX_OCTETCOUNT_G);
197 mmc->mmc_rx_broadcastframe_g += readl(ioaddr + MMC_RX_BROADCASTFRAME_G); 197 mmc->mmc_rx_broadcastframe_g += readl(ioaddr + MMC_RX_BROADCASTFRAME_G);
198 mmc->mmc_rx_multicastframe_g += readl(ioaddr + MMC_RX_MULTICASTFRAME_G); 198 mmc->mmc_rx_multicastframe_g += readl(ioaddr + MMC_RX_MULTICASTFRAME_G);
199 mmc->mmc_rx_crc_errror += readl(ioaddr + MMC_RX_CRC_ERRROR); 199 mmc->mmc_rx_crc_error += readl(ioaddr + MMC_RX_CRC_ERRROR);
200 mmc->mmc_rx_align_error += readl(ioaddr + MMC_RX_ALIGN_ERROR); 200 mmc->mmc_rx_align_error += readl(ioaddr + MMC_RX_ALIGN_ERROR);
201 mmc->mmc_rx_run_error += readl(ioaddr + MMC_RX_RUN_ERROR); 201 mmc->mmc_rx_run_error += readl(ioaddr + MMC_RX_RUN_ERROR);
202 mmc->mmc_rx_jabber_error += readl(ioaddr + MMC_RX_JABBER_ERROR); 202 mmc->mmc_rx_jabber_error += readl(ioaddr + MMC_RX_JABBER_ERROR);
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index 650a4be6bce5..5dd50c6cda5b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -28,7 +28,7 @@
28 28
29#include "stmmac.h" 29#include "stmmac.h"
30 30
31static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) 31static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
32{ 32{
33 struct stmmac_priv *priv = (struct stmmac_priv *)p; 33 struct stmmac_priv *priv = (struct stmmac_priv *)p;
34 unsigned int txsize = priv->dma_tx_size; 34 unsigned int txsize = priv->dma_tx_size;
@@ -53,7 +53,10 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
53 53
54 desc->des2 = dma_map_single(priv->device, skb->data, 54 desc->des2 = dma_map_single(priv->device, skb->data,
55 bmax, DMA_TO_DEVICE); 55 bmax, DMA_TO_DEVICE);
56 priv->tx_skbuff_dma[entry] = desc->des2; 56 if (dma_mapping_error(priv->device, desc->des2))
57 return -1;
58
59 priv->tx_skbuff_dma[entry].buf = desc->des2;
57 desc->des3 = desc->des2 + BUF_SIZE_4KiB; 60 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
58 priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, 61 priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
59 STMMAC_RING_MODE); 62 STMMAC_RING_MODE);
@@ -68,7 +71,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
68 71
69 desc->des2 = dma_map_single(priv->device, skb->data + bmax, 72 desc->des2 = dma_map_single(priv->device, skb->data + bmax,
70 len, DMA_TO_DEVICE); 73 len, DMA_TO_DEVICE);
71 priv->tx_skbuff_dma[entry] = desc->des2; 74 if (dma_mapping_error(priv->device, desc->des2))
75 return -1;
76 priv->tx_skbuff_dma[entry].buf = desc->des2;
72 desc->des3 = desc->des2 + BUF_SIZE_4KiB; 77 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
73 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, 78 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
74 STMMAC_RING_MODE); 79 STMMAC_RING_MODE);
@@ -77,7 +82,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
77 } else { 82 } else {
78 desc->des2 = dma_map_single(priv->device, skb->data, 83 desc->des2 = dma_map_single(priv->device, skb->data,
79 nopaged_len, DMA_TO_DEVICE); 84 nopaged_len, DMA_TO_DEVICE);
80 priv->tx_skbuff_dma[entry] = desc->des2; 85 if (dma_mapping_error(priv->device, desc->des2))
86 return -1;
87 priv->tx_skbuff_dma[entry].buf = desc->des2;
81 desc->des3 = desc->des2 + BUF_SIZE_4KiB; 88 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
82 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum, 89 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum,
83 STMMAC_RING_MODE); 90 STMMAC_RING_MODE);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index ca01035634a7..58097c0e2ad5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -34,6 +34,11 @@
34#include <linux/ptp_clock_kernel.h> 34#include <linux/ptp_clock_kernel.h>
35#include <linux/reset.h> 35#include <linux/reset.h>
36 36
37struct stmmac_tx_info {
38 dma_addr_t buf;
39 bool map_as_page;
40};
41
37struct stmmac_priv { 42struct stmmac_priv {
38 /* Frequently used values are kept adjacent for cache effect */ 43 /* Frequently used values are kept adjacent for cache effect */
39 struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp; 44 struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
@@ -45,7 +50,7 @@ struct stmmac_priv {
45 u32 tx_count_frames; 50 u32 tx_count_frames;
46 u32 tx_coal_frames; 51 u32 tx_coal_frames;
47 u32 tx_coal_timer; 52 u32 tx_coal_timer;
48 dma_addr_t *tx_skbuff_dma; 53 struct stmmac_tx_info *tx_skbuff_dma;
49 dma_addr_t dma_tx_phy; 54 dma_addr_t dma_tx_phy;
50 int tx_coalesce; 55 int tx_coalesce;
51 int hwts_tx_en; 56 int hwts_tx_en;
@@ -105,6 +110,8 @@ struct stmmac_priv {
105 struct ptp_clock *ptp_clock; 110 struct ptp_clock *ptp_clock;
106 struct ptp_clock_info ptp_clock_ops; 111 struct ptp_clock_info ptp_clock_ops;
107 unsigned int default_addend; 112 unsigned int default_addend;
113 struct clk *clk_ptp_ref;
114 unsigned int clk_ptp_rate;
108 u32 adv_ts; 115 u32 adv_ts;
109 int use_riwt; 116 int use_riwt;
110 int irq_wake; 117 int irq_wake;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 9af50bae4dde..cf4f38db1c0a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -175,7 +175,7 @@ static const struct stmmac_stats stmmac_mmc[] = {
175 STMMAC_MMC_STAT(mmc_rx_octetcount_g), 175 STMMAC_MMC_STAT(mmc_rx_octetcount_g),
176 STMMAC_MMC_STAT(mmc_rx_broadcastframe_g), 176 STMMAC_MMC_STAT(mmc_rx_broadcastframe_g),
177 STMMAC_MMC_STAT(mmc_rx_multicastframe_g), 177 STMMAC_MMC_STAT(mmc_rx_multicastframe_g),
178 STMMAC_MMC_STAT(mmc_rx_crc_errror), 178 STMMAC_MMC_STAT(mmc_rx_crc_error),
179 STMMAC_MMC_STAT(mmc_rx_align_error), 179 STMMAC_MMC_STAT(mmc_rx_align_error),
180 STMMAC_MMC_STAT(mmc_rx_run_error), 180 STMMAC_MMC_STAT(mmc_rx_run_error),
181 STMMAC_MMC_STAT(mmc_rx_jabber_error), 181 STMMAC_MMC_STAT(mmc_rx_jabber_error),
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 08addd653728..b0c1521e08a3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -275,6 +275,7 @@ static void stmmac_eee_ctrl_timer(unsigned long arg)
275 */ 275 */
276bool stmmac_eee_init(struct stmmac_priv *priv) 276bool stmmac_eee_init(struct stmmac_priv *priv)
277{ 277{
278 char *phy_bus_name = priv->plat->phy_bus_name;
278 bool ret = false; 279 bool ret = false;
279 280
280 /* Using PCS we cannot dial with the phy registers at this stage 281 /* Using PCS we cannot dial with the phy registers at this stage
@@ -284,6 +285,10 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
284 (priv->pcs == STMMAC_PCS_RTBI)) 285 (priv->pcs == STMMAC_PCS_RTBI))
285 goto out; 286 goto out;
286 287
288 /* Never init EEE in case of a switch is attached */
289 if (phy_bus_name && (!strcmp(phy_bus_name, "fixed")))
290 goto out;
291
287 /* MAC core supports the EEE feature. */ 292 /* MAC core supports the EEE feature. */
288 if (priv->dma_cap.eee) { 293 if (priv->dma_cap.eee) {
289 int tx_lpi_timer = priv->tx_lpi_timer; 294 int tx_lpi_timer = priv->tx_lpi_timer;
@@ -316,10 +321,9 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
316 priv->hw->mac->set_eee_timer(priv->hw, 321 priv->hw->mac->set_eee_timer(priv->hw,
317 STMMAC_DEFAULT_LIT_LS, 322 STMMAC_DEFAULT_LIT_LS,
318 tx_lpi_timer); 323 tx_lpi_timer);
319 } else 324 }
320 /* Set HW EEE according to the speed */ 325 /* Set HW EEE according to the speed */
321 priv->hw->mac->set_eee_pls(priv->hw, 326 priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link);
322 priv->phydev->link);
323 327
324 pr_debug("stmmac: Energy-Efficient Ethernet initialized\n"); 328 pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
325 329
@@ -603,16 +607,16 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
603 /* calculate default added value: 607 /* calculate default added value:
604 * formula is : 608 * formula is :
605 * addend = (2^32)/freq_div_ratio; 609 * addend = (2^32)/freq_div_ratio;
606 * where, freq_div_ratio = STMMAC_SYSCLOCK/50MHz 610 * where, freq_div_ratio = clk_ptp_ref_i/50MHz
607 * hence, addend = ((2^32) * 50MHz)/STMMAC_SYSCLOCK; 611 * hence, addend = ((2^32) * 50MHz)/clk_ptp_ref_i;
608 * NOTE: STMMAC_SYSCLOCK should be >= 50MHz to 612 * NOTE: clk_ptp_ref_i should be >= 50MHz to
609 * achive 20ns accuracy. 613 * achive 20ns accuracy.
610 * 614 *
611 * 2^x * y == (y << x), hence 615 * 2^x * y == (y << x), hence
612 * 2^32 * 50000000 ==> (50000000 << 32) 616 * 2^32 * 50000000 ==> (50000000 << 32)
613 */ 617 */
614 temp = (u64) (50000000ULL << 32); 618 temp = (u64) (50000000ULL << 32);
615 priv->default_addend = div_u64(temp, STMMAC_SYSCLOCK); 619 priv->default_addend = div_u64(temp, priv->clk_ptp_rate);
616 priv->hw->ptp->config_addend(priv->ioaddr, 620 priv->hw->ptp->config_addend(priv->ioaddr,
617 priv->default_addend); 621 priv->default_addend);
618 622
@@ -638,6 +642,16 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
638 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) 642 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
639 return -EOPNOTSUPP; 643 return -EOPNOTSUPP;
640 644
645 /* Fall-back to main clock in case of no PTP ref is passed */
646 priv->clk_ptp_ref = devm_clk_get(priv->device, "clk_ptp_ref");
647 if (IS_ERR(priv->clk_ptp_ref)) {
648 priv->clk_ptp_rate = clk_get_rate(priv->stmmac_clk);
649 priv->clk_ptp_ref = NULL;
650 } else {
651 clk_prepare_enable(priv->clk_ptp_ref);
652 priv->clk_ptp_rate = clk_get_rate(priv->clk_ptp_ref);
653 }
654
641 priv->adv_ts = 0; 655 priv->adv_ts = 0;
642 if (priv->dma_cap.atime_stamp && priv->extend_desc) 656 if (priv->dma_cap.atime_stamp && priv->extend_desc)
643 priv->adv_ts = 1; 657 priv->adv_ts = 1;
@@ -657,6 +671,8 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
657 671
658static void stmmac_release_ptp(struct stmmac_priv *priv) 672static void stmmac_release_ptp(struct stmmac_priv *priv)
659{ 673{
674 if (priv->clk_ptp_ref)
675 clk_disable_unprepare(priv->clk_ptp_ref);
660 stmmac_ptp_unregister(priv); 676 stmmac_ptp_unregister(priv);
661} 677}
662 678
@@ -1061,7 +1077,8 @@ static int init_dma_desc_rings(struct net_device *dev)
1061 else 1077 else
1062 p = priv->dma_tx + i; 1078 p = priv->dma_tx + i;
1063 p->des2 = 0; 1079 p->des2 = 0;
1064 priv->tx_skbuff_dma[i] = 0; 1080 priv->tx_skbuff_dma[i].buf = 0;
1081 priv->tx_skbuff_dma[i].map_as_page = false;
1065 priv->tx_skbuff[i] = NULL; 1082 priv->tx_skbuff[i] = NULL;
1066 } 1083 }
1067 1084
@@ -1100,17 +1117,24 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
1100 else 1117 else
1101 p = priv->dma_tx + i; 1118 p = priv->dma_tx + i;
1102 1119
1103 if (priv->tx_skbuff_dma[i]) { 1120 if (priv->tx_skbuff_dma[i].buf) {
1104 dma_unmap_single(priv->device, 1121 if (priv->tx_skbuff_dma[i].map_as_page)
1105 priv->tx_skbuff_dma[i], 1122 dma_unmap_page(priv->device,
1106 priv->hw->desc->get_tx_len(p), 1123 priv->tx_skbuff_dma[i].buf,
1107 DMA_TO_DEVICE); 1124 priv->hw->desc->get_tx_len(p),
1108 priv->tx_skbuff_dma[i] = 0; 1125 DMA_TO_DEVICE);
1126 else
1127 dma_unmap_single(priv->device,
1128 priv->tx_skbuff_dma[i].buf,
1129 priv->hw->desc->get_tx_len(p),
1130 DMA_TO_DEVICE);
1109 } 1131 }
1110 1132
1111 if (priv->tx_skbuff[i] != NULL) { 1133 if (priv->tx_skbuff[i] != NULL) {
1112 dev_kfree_skb_any(priv->tx_skbuff[i]); 1134 dev_kfree_skb_any(priv->tx_skbuff[i]);
1113 priv->tx_skbuff[i] = NULL; 1135 priv->tx_skbuff[i] = NULL;
1136 priv->tx_skbuff_dma[i].buf = 0;
1137 priv->tx_skbuff_dma[i].map_as_page = false;
1114 } 1138 }
1115 } 1139 }
1116} 1140}
@@ -1131,7 +1155,8 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1131 if (!priv->rx_skbuff) 1155 if (!priv->rx_skbuff)
1132 goto err_rx_skbuff; 1156 goto err_rx_skbuff;
1133 1157
1134 priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t), 1158 priv->tx_skbuff_dma = kmalloc_array(txsize,
1159 sizeof(*priv->tx_skbuff_dma),
1135 GFP_KERNEL); 1160 GFP_KERNEL);
1136 if (!priv->tx_skbuff_dma) 1161 if (!priv->tx_skbuff_dma)
1137 goto err_tx_skbuff_dma; 1162 goto err_tx_skbuff_dma;
@@ -1293,12 +1318,19 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
1293 pr_debug("%s: curr %d, dirty %d\n", __func__, 1318 pr_debug("%s: curr %d, dirty %d\n", __func__,
1294 priv->cur_tx, priv->dirty_tx); 1319 priv->cur_tx, priv->dirty_tx);
1295 1320
1296 if (likely(priv->tx_skbuff_dma[entry])) { 1321 if (likely(priv->tx_skbuff_dma[entry].buf)) {
1297 dma_unmap_single(priv->device, 1322 if (priv->tx_skbuff_dma[entry].map_as_page)
1298 priv->tx_skbuff_dma[entry], 1323 dma_unmap_page(priv->device,
1299 priv->hw->desc->get_tx_len(p), 1324 priv->tx_skbuff_dma[entry].buf,
1300 DMA_TO_DEVICE); 1325 priv->hw->desc->get_tx_len(p),
1301 priv->tx_skbuff_dma[entry] = 0; 1326 DMA_TO_DEVICE);
1327 else
1328 dma_unmap_single(priv->device,
1329 priv->tx_skbuff_dma[entry].buf,
1330 priv->hw->desc->get_tx_len(p),
1331 DMA_TO_DEVICE);
1332 priv->tx_skbuff_dma[entry].buf = 0;
1333 priv->tx_skbuff_dma[entry].map_as_page = false;
1302 } 1334 }
1303 priv->hw->mode->clean_desc3(priv, p); 1335 priv->hw->mode->clean_desc3(priv, p);
1304 1336
@@ -1637,6 +1669,13 @@ static int stmmac_hw_setup(struct net_device *dev)
1637 /* Initialize the MAC Core */ 1669 /* Initialize the MAC Core */
1638 priv->hw->mac->core_init(priv->hw, dev->mtu); 1670 priv->hw->mac->core_init(priv->hw, dev->mtu);
1639 1671
1672 ret = priv->hw->mac->rx_ipc(priv->hw);
1673 if (!ret) {
1674 pr_warn(" RX IPC Checksum Offload disabled\n");
1675 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
1676 priv->hw->rx_csum = 0;
1677 }
1678
1640 /* Enable the MAC Rx/Tx */ 1679 /* Enable the MAC Rx/Tx */
1641 stmmac_set_mac(priv->ioaddr, true); 1680 stmmac_set_mac(priv->ioaddr, true);
1642 1681
@@ -1887,12 +1926,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1887 if (likely(!is_jumbo)) { 1926 if (likely(!is_jumbo)) {
1888 desc->des2 = dma_map_single(priv->device, skb->data, 1927 desc->des2 = dma_map_single(priv->device, skb->data,
1889 nopaged_len, DMA_TO_DEVICE); 1928 nopaged_len, DMA_TO_DEVICE);
1890 priv->tx_skbuff_dma[entry] = desc->des2; 1929 if (dma_mapping_error(priv->device, desc->des2))
1930 goto dma_map_err;
1931 priv->tx_skbuff_dma[entry].buf = desc->des2;
1891 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, 1932 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
1892 csum_insertion, priv->mode); 1933 csum_insertion, priv->mode);
1893 } else { 1934 } else {
1894 desc = first; 1935 desc = first;
1895 entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion); 1936 entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
1937 if (unlikely(entry < 0))
1938 goto dma_map_err;
1896 } 1939 }
1897 1940
1898 for (i = 0; i < nfrags; i++) { 1941 for (i = 0; i < nfrags; i++) {
@@ -1908,7 +1951,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1908 1951
1909 desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len, 1952 desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
1910 DMA_TO_DEVICE); 1953 DMA_TO_DEVICE);
1911 priv->tx_skbuff_dma[entry] = desc->des2; 1954 if (dma_mapping_error(priv->device, desc->des2))
1955 goto dma_map_err; /* should reuse desc w/o issues */
1956
1957 priv->tx_skbuff_dma[entry].buf = desc->des2;
1958 priv->tx_skbuff_dma[entry].map_as_page = true;
1912 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion, 1959 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
1913 priv->mode); 1960 priv->mode);
1914 wmb(); 1961 wmb();
@@ -1975,7 +2022,12 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1975 priv->hw->dma->enable_dma_transmission(priv->ioaddr); 2022 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
1976 2023
1977 spin_unlock(&priv->tx_lock); 2024 spin_unlock(&priv->tx_lock);
2025 return NETDEV_TX_OK;
1978 2026
2027dma_map_err:
2028 dev_err(priv->device, "Tx dma map failed\n");
2029 dev_kfree_skb(skb);
2030 priv->dev->stats.tx_dropped++;
1979 return NETDEV_TX_OK; 2031 return NETDEV_TX_OK;
1980} 2032}
1981 2033
@@ -2028,7 +2080,12 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
2028 priv->rx_skbuff_dma[entry] = 2080 priv->rx_skbuff_dma[entry] =
2029 dma_map_single(priv->device, skb->data, bfsize, 2081 dma_map_single(priv->device, skb->data, bfsize,
2030 DMA_FROM_DEVICE); 2082 DMA_FROM_DEVICE);
2031 2083 if (dma_mapping_error(priv->device,
2084 priv->rx_skbuff_dma[entry])) {
2085 dev_err(priv->device, "Rx dma map failed\n");
2086 dev_kfree_skb(skb);
2087 break;
2088 }
2032 p->des2 = priv->rx_skbuff_dma[entry]; 2089 p->des2 = priv->rx_skbuff_dma[entry];
2033 2090
2034 priv->hw->mode->refill_desc3(priv, p); 2091 priv->hw->mode->refill_desc3(priv, p);
@@ -2055,7 +2112,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
2055 unsigned int entry = priv->cur_rx % rxsize; 2112 unsigned int entry = priv->cur_rx % rxsize;
2056 unsigned int next_entry; 2113 unsigned int next_entry;
2057 unsigned int count = 0; 2114 unsigned int count = 0;
2058 int coe = priv->plat->rx_coe; 2115 int coe = priv->hw->rx_csum;
2059 2116
2060 if (netif_msg_rx_status(priv)) { 2117 if (netif_msg_rx_status(priv)) {
2061 pr_debug("%s: descriptor ring:\n", __func__); 2118 pr_debug("%s: descriptor ring:\n", __func__);
@@ -2276,8 +2333,7 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev,
2276 2333
2277 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) 2334 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
2278 features &= ~NETIF_F_RXCSUM; 2335 features &= ~NETIF_F_RXCSUM;
2279 else if (priv->plat->rx_coe == STMMAC_RX_COE_TYPE1) 2336
2280 features &= ~NETIF_F_IPV6_CSUM;
2281 if (!priv->plat->tx_coe) 2337 if (!priv->plat->tx_coe)
2282 features &= ~NETIF_F_ALL_CSUM; 2338 features &= ~NETIF_F_ALL_CSUM;
2283 2339
@@ -2292,6 +2348,24 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev,
2292 return features; 2348 return features;
2293} 2349}
2294 2350
2351static int stmmac_set_features(struct net_device *netdev,
2352 netdev_features_t features)
2353{
2354 struct stmmac_priv *priv = netdev_priv(netdev);
2355
2356 /* Keep the COE Type in case of csum is supporting */
2357 if (features & NETIF_F_RXCSUM)
2358 priv->hw->rx_csum = priv->plat->rx_coe;
2359 else
2360 priv->hw->rx_csum = 0;
2361 /* No check needed because rx_coe has been set before and it will be
2362 * fixed in case of issue.
2363 */
2364 priv->hw->mac->rx_ipc(priv->hw);
2365
2366 return 0;
2367}
2368
2295/** 2369/**
2296 * stmmac_interrupt - main ISR 2370 * stmmac_interrupt - main ISR
2297 * @irq: interrupt number. 2371 * @irq: interrupt number.
@@ -2572,6 +2646,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
2572 .ndo_stop = stmmac_release, 2646 .ndo_stop = stmmac_release,
2573 .ndo_change_mtu = stmmac_change_mtu, 2647 .ndo_change_mtu = stmmac_change_mtu,
2574 .ndo_fix_features = stmmac_fix_features, 2648 .ndo_fix_features = stmmac_fix_features,
2649 .ndo_set_features = stmmac_set_features,
2575 .ndo_set_rx_mode = stmmac_set_rx_mode, 2650 .ndo_set_rx_mode = stmmac_set_rx_mode,
2576 .ndo_tx_timeout = stmmac_tx_timeout, 2651 .ndo_tx_timeout = stmmac_tx_timeout,
2577 .ndo_do_ioctl = stmmac_ioctl, 2652 .ndo_do_ioctl = stmmac_ioctl,
@@ -2592,7 +2667,6 @@ static const struct net_device_ops stmmac_netdev_ops = {
2592 */ 2667 */
2593static int stmmac_hw_init(struct stmmac_priv *priv) 2668static int stmmac_hw_init(struct stmmac_priv *priv)
2594{ 2669{
2595 int ret;
2596 struct mac_device_info *mac; 2670 struct mac_device_info *mac;
2597 2671
2598 /* Identify the MAC HW device */ 2672 /* Identify the MAC HW device */
@@ -2649,15 +2723,11 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
2649 /* To use alternate (extended) or normal descriptor structures */ 2723 /* To use alternate (extended) or normal descriptor structures */
2650 stmmac_selec_desc_mode(priv); 2724 stmmac_selec_desc_mode(priv);
2651 2725
2652 ret = priv->hw->mac->rx_ipc(priv->hw); 2726 if (priv->plat->rx_coe) {
2653 if (!ret) { 2727 priv->hw->rx_csum = priv->plat->rx_coe;
2654 pr_warn(" RX IPC Checksum Offload not configured.\n");
2655 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2656 }
2657
2658 if (priv->plat->rx_coe)
2659 pr_info(" RX Checksum Offload Engine supported (type %d)\n", 2728 pr_info(" RX Checksum Offload Engine supported (type %d)\n",
2660 priv->plat->rx_coe); 2729 priv->plat->rx_coe);
2730 }
2661 if (priv->plat->tx_coe) 2731 if (priv->plat->tx_coe)
2662 pr_info(" TX Checksum insertion supported\n"); 2732 pr_info(" TX Checksum insertion supported\n");
2663 2733
@@ -2716,8 +2786,15 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
2716 if (IS_ERR(priv->stmmac_clk)) { 2786 if (IS_ERR(priv->stmmac_clk)) {
2717 dev_warn(priv->device, "%s: warning: cannot get CSR clock\n", 2787 dev_warn(priv->device, "%s: warning: cannot get CSR clock\n",
2718 __func__); 2788 __func__);
2719 ret = PTR_ERR(priv->stmmac_clk); 2789 /* If failed to obtain stmmac_clk and specific clk_csr value
2720 goto error_clk_get; 2790 * is NOT passed from the platform, probe fail.
2791 */
2792 if (!priv->plat->clk_csr) {
2793 ret = PTR_ERR(priv->stmmac_clk);
2794 goto error_clk_get;
2795 } else {
2796 priv->stmmac_clk = NULL;
2797 }
2721 } 2798 }
2722 clk_prepare_enable(priv->stmmac_clk); 2799 clk_prepare_enable(priv->stmmac_clk);
2723 2800
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index b7ad3565566c..c5ee79d8a8c5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -206,6 +206,7 @@ void stmmac_ptp_unregister(struct stmmac_priv *priv)
206{ 206{
207 if (priv->ptp_clock) { 207 if (priv->ptp_clock) {
208 ptp_clock_unregister(priv->ptp_clock); 208 ptp_clock_unregister(priv->ptp_clock);
209 priv->ptp_clock = NULL;
209 pr_debug("Removed PTP HW clock successfully on %s\n", 210 pr_debug("Removed PTP HW clock successfully on %s\n",
210 priv->dev->name); 211 priv->dev->name);
211 } 212 }
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
index 3dbc047622fa..4535df37c227 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
@@ -25,8 +25,6 @@
25#ifndef __STMMAC_PTP_H__ 25#ifndef __STMMAC_PTP_H__
26#define __STMMAC_PTP_H__ 26#define __STMMAC_PTP_H__
27 27
28#define STMMAC_SYSCLOCK 62500000
29
30/* IEEE 1588 PTP register offsets */ 28/* IEEE 1588 PTP register offsets */
31#define PTP_TCR 0x0700 /* Timestamp Control Reg */ 29#define PTP_TCR 0x0700 /* Timestamp Control Reg */
32#define PTP_SSIR 0x0704 /* Sub-Second Increment Reg */ 30#define PTP_SSIR 0x0704 /* Sub-Second Increment Reg */
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 23c89ab5a6ad..f67539650c38 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -350,14 +350,17 @@ static int vnet_walk_rx_one(struct vnet_port *port,
350 if (IS_ERR(desc)) 350 if (IS_ERR(desc))
351 return PTR_ERR(desc); 351 return PTR_ERR(desc);
352 352
353 if (desc->hdr.state != VIO_DESC_READY)
354 return 1;
355
356 rmb();
357
353 viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n", 358 viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n",
354 desc->hdr.state, desc->hdr.ack, 359 desc->hdr.state, desc->hdr.ack,
355 desc->size, desc->ncookies, 360 desc->size, desc->ncookies,
356 desc->cookies[0].cookie_addr, 361 desc->cookies[0].cookie_addr,
357 desc->cookies[0].cookie_size); 362 desc->cookies[0].cookie_size);
358 363
359 if (desc->hdr.state != VIO_DESC_READY)
360 return 1;
361 err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies); 364 err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies);
362 if (err == -ECONNRESET) 365 if (err == -ECONNRESET)
363 return err; 366 return err;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 999fb72688d2..e2a00287f8eb 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -699,6 +699,28 @@ static void cpsw_rx_handler(void *token, int len, int status)
699 cpsw_dual_emac_src_port_detect(status, priv, ndev, skb); 699 cpsw_dual_emac_src_port_detect(status, priv, ndev, skb);
700 700
701 if (unlikely(status < 0) || unlikely(!netif_running(ndev))) { 701 if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
702 bool ndev_status = false;
703 struct cpsw_slave *slave = priv->slaves;
704 int n;
705
706 if (priv->data.dual_emac) {
707 /* In dual emac mode check for all interfaces */
708 for (n = priv->data.slaves; n; n--, slave++)
709 if (netif_running(slave->ndev))
710 ndev_status = true;
711 }
712
713 if (ndev_status && (status >= 0)) {
714 /* The packet received is for the interface which
715 * is already down and the other interface is up
716 * and running, intead of freeing which results
717 * in reducing of the number of rx descriptor in
718 * DMA engine, requeue skb back to cpdma.
719 */
720 new_skb = skb;
721 goto requeue;
722 }
723
702 /* the interface is going down, skbs are purged */ 724 /* the interface is going down, skbs are purged */
703 dev_kfree_skb_any(skb); 725 dev_kfree_skb_any(skb);
704 return; 726 return;
@@ -717,6 +739,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
717 new_skb = skb; 739 new_skb = skb;
718 } 740 }
719 741
742requeue:
720 ret = cpdma_chan_submit(priv->rxch, new_skb, new_skb->data, 743 ret = cpdma_chan_submit(priv->rxch, new_skb, new_skb->data,
721 skb_tailroom(new_skb), 0); 744 skb_tailroom(new_skb), 0);
722 if (WARN_ON(ret < 0)) 745 if (WARN_ON(ret < 0))
@@ -2311,10 +2334,19 @@ static int cpsw_suspend(struct device *dev)
2311 struct net_device *ndev = platform_get_drvdata(pdev); 2334 struct net_device *ndev = platform_get_drvdata(pdev);
2312 struct cpsw_priv *priv = netdev_priv(ndev); 2335 struct cpsw_priv *priv = netdev_priv(ndev);
2313 2336
2314 if (netif_running(ndev)) 2337 if (priv->data.dual_emac) {
2315 cpsw_ndo_stop(ndev); 2338 int i;
2316 2339
2317 for_each_slave(priv, soft_reset_slave); 2340 for (i = 0; i < priv->data.slaves; i++) {
2341 if (netif_running(priv->slaves[i].ndev))
2342 cpsw_ndo_stop(priv->slaves[i].ndev);
2343 soft_reset_slave(priv->slaves + i);
2344 }
2345 } else {
2346 if (netif_running(ndev))
2347 cpsw_ndo_stop(ndev);
2348 for_each_slave(priv, soft_reset_slave);
2349 }
2318 2350
2319 pm_runtime_put_sync(&pdev->dev); 2351 pm_runtime_put_sync(&pdev->dev);
2320 2352
@@ -2328,14 +2360,24 @@ static int cpsw_resume(struct device *dev)
2328{ 2360{
2329 struct platform_device *pdev = to_platform_device(dev); 2361 struct platform_device *pdev = to_platform_device(dev);
2330 struct net_device *ndev = platform_get_drvdata(pdev); 2362 struct net_device *ndev = platform_get_drvdata(pdev);
2363 struct cpsw_priv *priv = netdev_priv(ndev);
2331 2364
2332 pm_runtime_get_sync(&pdev->dev); 2365 pm_runtime_get_sync(&pdev->dev);
2333 2366
2334 /* Select default pin state */ 2367 /* Select default pin state */
2335 pinctrl_pm_select_default_state(&pdev->dev); 2368 pinctrl_pm_select_default_state(&pdev->dev);
2336 2369
2337 if (netif_running(ndev)) 2370 if (priv->data.dual_emac) {
2338 cpsw_ndo_open(ndev); 2371 int i;
2372
2373 for (i = 0; i < priv->data.slaves; i++) {
2374 if (netif_running(priv->slaves[i].ndev))
2375 cpsw_ndo_open(priv->slaves[i].ndev);
2376 }
2377 } else {
2378 if (netif_running(ndev))
2379 cpsw_ndo_open(ndev);
2380 }
2339 return 0; 2381 return 0;
2340} 2382}
2341 2383
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index f5fbc12d3e10..a43e8492b1ce 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2056,7 +2056,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2056 struct sk_buff *skb; 2056 struct sk_buff *skb;
2057 2057
2058 if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) { 2058 if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) {
2059 VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->netdev->name); 2059 VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame spans multiple RDs.\n", vptr->netdev->name);
2060 stats->rx_length_errors++; 2060 stats->rx_length_errors++;
2061 return -EINVAL; 2061 return -EINVAL;
2062 } 2062 }