aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/3com/3c509.c2
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c1127
-rw-r--r--drivers/net/ethernet/3com/3c59x.c2
-rw-r--r--drivers/net/ethernet/8390/lib8390.c2
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c3
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c2
-rw-r--r--drivers/net/ethernet/altera/Kconfig8
-rw-r--r--drivers/net/ethernet/altera/Makefile7
-rw-r--r--drivers/net/ethernet/altera/altera_msgdma.c202
-rw-r--r--drivers/net/ethernet/altera/altera_msgdma.h34
-rw-r--r--drivers/net/ethernet/altera/altera_msgdmahw.h167
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.c509
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.h35
-rw-r--r--drivers/net/ethernet/altera/altera_sgdmahw.h124
-rw-r--r--drivers/net/ethernet/altera/altera_tse.h486
-rw-r--r--drivers/net/ethernet/altera/altera_tse_ethtool.c235
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c1543
-rw-r--r--drivers/net/ethernet/altera/altera_utils.c44
-rw-r--r--drivers/net/ethernet/altera/altera_utils.h27
-rw-r--r--drivers/net/ethernet/amd/7990.c2
-rw-r--r--drivers/net/ethernet/amd/am79c961a.c2
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c3
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c124
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c20
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c10
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig11
-rw-r--r--drivers/net/ethernet/broadcom/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c8
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c24
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h24
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c155
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h48
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c235
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c155
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h21
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c289
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h13
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c127
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h22
-rw-r--r--drivers/net/ethernet/broadcom/genet/Makefile2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c2575
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h628
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c464
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c21
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c55
-rw-r--r--drivers/net/ethernet/cadence/macb.c2
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c26
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c210
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c172
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c152
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c35
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c6
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c24
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c4
-rw-r--r--drivers/net/ethernet/dlink/sundance.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h20
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c78
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h48
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c30
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h6
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c325
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.h2
-rw-r--r--drivers/net/ethernet/freescale/Makefile3
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c3
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c1327
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h114
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c165
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c1
-rw-r--r--drivers/net/ethernet/freescale/gianfar_sysfs.c340
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c6
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c6
-rw-r--r--drivers/net/ethernet/intel/e100.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c47
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.h47
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c47
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.h47
-rw-r--r--drivers/net/ethernet/intel/e1000e/Makefile7
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h55
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h50
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c63
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h55
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c427
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h72
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c47
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.h47
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.c47
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.h47
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c343
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c47
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.h47
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c53
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c47
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.h47
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c53
-rw-r--r--drivers/net/ethernet/intel/e1000e/regs.h48
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h49
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c366
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c52
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c465
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c481
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c117
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c424
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c135
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c366
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h7
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c90
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h15
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h47
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c13
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c292
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c27
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c15
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h12
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h75
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c25
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h14
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.h5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c76
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h6
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h36
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h15
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c76
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c5
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c159
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c61
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c10
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c33
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c360
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c200
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h14
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c18
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c17
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c181
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c45
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c32
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c141
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h11
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c139
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/regs.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h33
-rw-r--r--drivers/net/ethernet/jme.c2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c4
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c13
-rw-r--r--drivers/net/ethernet/marvell/skge.c4
-rw-r--r--drivers/net/ethernet/marvell/sky2.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c153
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c174
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c262
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c279
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c17
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c2
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c40
-rw-r--r--drivers/net/ethernet/neterion/s2io.c14
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c35
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c57
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h118
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h11
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c87
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c23
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c22
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c179
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c77
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c233
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c102
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c16
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c7
-rw-r--r--drivers/net/ethernet/realtek/8139too.c12
-rw-r--r--drivers/net/ethernet/realtek/r8169.c14
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c263
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h3
-rw-r--r--drivers/net/ethernet/sfc/ef10.c20
-rw-r--r--drivers/net/ethernet/sfc/ef10_regs.h61
-rw-r--r--drivers/net/ethernet/sfc/efx.c30
-rw-r--r--drivers/net/ethernet/sfc/efx.h2
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c39
-rw-r--r--drivers/net/ethernet/sfc/falcon.c6
-rw-r--r--drivers/net/ethernet/sfc/farch.c5
-rw-r--r--drivers/net/ethernet/sfc/filter.h2
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c14
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c4
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h1
-rw-r--r--drivers/net/ethernet/sfc/nic.c1
-rw-r--r--drivers/net/ethernet/sfc/ptp.c93
-rw-r--r--drivers/net/ethernet/sfc/selftest.c6
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c13
-rw-r--r--drivers/net/ethernet/sfc/tx.c21
-rw-r--r--drivers/net/ethernet/silan/sc92031.c2
-rw-r--r--drivers/net/ethernet/sis/sis900.c2
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c4
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c1
-rw-r--r--drivers/net/ethernet/sun/niu.c11
-rw-r--r--drivers/net/ethernet/sun/sungem.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c34
-rw-r--r--drivers/net/ethernet/ti/cpts.c1
-rw-r--r--drivers/net/ethernet/tile/tilegx.c4
-rw-r--r--drivers/net/ethernet/tile/tilepro.c11
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c2
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c3
-rw-r--r--drivers/net/ethernet/via/via-rhine.c16
-rw-r--r--drivers/net/ethernet/via/via-velocity.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c9
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c9
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c4
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c2
276 files changed, 16903 insertions, 5632 deletions
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index c53384d41c96..35df0b9e6848 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -749,7 +749,7 @@ el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
749 749
750 spin_unlock_irqrestore(&lp->lock, flags); 750 spin_unlock_irqrestore(&lp->lock, flags);
751 751
752 dev_kfree_skb (skb); 752 dev_consume_skb_any (skb);
753 753
754 /* Clear the Tx status stack. */ 754 /* Clear the Tx status stack. */
755 { 755 {
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 5992860a39c9..063557e037f2 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -1,23 +1,24 @@
1/*====================================================================== 1/* ======================================================================
2 2 *
3 A PCMCIA ethernet driver for the 3com 3c589 card. 3 * A PCMCIA ethernet driver for the 3com 3c589 card.
4 4 *
5 Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net 5 * Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net
6 6 *
7 3c589_cs.c 1.162 2001/10/13 00:08:50 7 * 3c589_cs.c 1.162 2001/10/13 00:08:50
8 8 *
9 The network driver code is based on Donald Becker's 3c589 code: 9 * The network driver code is based on Donald Becker's 3c589 code:
10 10 *
11 Written 1994 by Donald Becker. 11 * Written 1994 by Donald Becker.
12 Copyright 1993 United States Government as represented by the 12 * Copyright 1993 United States Government as represented by the
13 Director, National Security Agency. This software may be used and 13 * Director, National Security Agency. This software may be used and
14 distributed according to the terms of the GNU General Public License, 14 * distributed according to the terms of the GNU General Public License,
15 incorporated herein by reference. 15 * incorporated herein by reference.
16 Donald Becker may be reached at becker@scyld.com 16 * Donald Becker may be reached at becker@scyld.com
17 17 *
18 Updated for 2.5.x by Alan Cox <alan@lxorguk.ukuu.org.uk> 18 * Updated for 2.5.x by Alan Cox <alan@lxorguk.ukuu.org.uk>
19 19 *
20======================================================================*/ 20 * ======================================================================
21 */
21 22
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 24
@@ -41,18 +42,20 @@
41#include <linux/ioport.h> 42#include <linux/ioport.h>
42#include <linux/bitops.h> 43#include <linux/bitops.h>
43#include <linux/jiffies.h> 44#include <linux/jiffies.h>
45#include <linux/uaccess.h>
46#include <linux/io.h>
44 47
45#include <pcmcia/cistpl.h> 48#include <pcmcia/cistpl.h>
46#include <pcmcia/cisreg.h> 49#include <pcmcia/cisreg.h>
47#include <pcmcia/ciscode.h> 50#include <pcmcia/ciscode.h>
48#include <pcmcia/ds.h> 51#include <pcmcia/ds.h>
49 52
50#include <asm/uaccess.h>
51#include <asm/io.h>
52 53
53/* To minimize the size of the driver source I only define operating 54/* To minimize the size of the driver source I only define operating
54 constants if they are used several times. You'll need the manual 55 * constants if they are used several times. You'll need the manual
55 if you want to understand driver details. */ 56 * if you want to understand driver details.
57 */
58
56/* Offsets from base I/O address. */ 59/* Offsets from base I/O address. */
57#define EL3_DATA 0x00 60#define EL3_DATA 0x00
58#define EL3_TIMER 0x0a 61#define EL3_TIMER 0x0a
@@ -65,7 +68,9 @@
65#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD) 68#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD)
66 69
67/* The top five bits written to EL3_CMD are a command, the lower 70/* The top five bits written to EL3_CMD are a command, the lower
68 11 bits are the parameter, if applicable. */ 71 * 11 bits are the parameter, if applicable.
72 */
73
69enum c509cmd { 74enum c509cmd {
70 TotalReset = 0<<11, 75 TotalReset = 0<<11,
71 SelectWindow = 1<<11, 76 SelectWindow = 1<<11,
@@ -190,138 +195,142 @@ static const struct net_device_ops el3_netdev_ops = {
190 195
191static int tc589_probe(struct pcmcia_device *link) 196static int tc589_probe(struct pcmcia_device *link)
192{ 197{
193 struct el3_private *lp; 198 struct el3_private *lp;
194 struct net_device *dev; 199 struct net_device *dev;
195 200
196 dev_dbg(&link->dev, "3c589_attach()\n"); 201 dev_dbg(&link->dev, "3c589_attach()\n");
197 202
198 /* Create new ethernet device */ 203 /* Create new ethernet device */
199 dev = alloc_etherdev(sizeof(struct el3_private)); 204 dev = alloc_etherdev(sizeof(struct el3_private));
200 if (!dev) 205 if (!dev)
201 return -ENOMEM; 206 return -ENOMEM;
202 lp = netdev_priv(dev); 207 lp = netdev_priv(dev);
203 link->priv = dev; 208 link->priv = dev;
204 lp->p_dev = link; 209 lp->p_dev = link;
205 210
206 spin_lock_init(&lp->lock); 211 spin_lock_init(&lp->lock);
207 link->resource[0]->end = 16; 212 link->resource[0]->end = 16;
208 link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16; 213 link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16;
209 214
210 link->config_flags |= CONF_ENABLE_IRQ; 215 link->config_flags |= CONF_ENABLE_IRQ;
211 link->config_index = 1; 216 link->config_index = 1;
212 217
213 dev->netdev_ops = &el3_netdev_ops; 218 dev->netdev_ops = &el3_netdev_ops;
214 dev->watchdog_timeo = TX_TIMEOUT; 219 dev->watchdog_timeo = TX_TIMEOUT;
215 220
216 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); 221 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
217 222
218 return tc589_config(link); 223 return tc589_config(link);
219} 224}
220 225
221static void tc589_detach(struct pcmcia_device *link) 226static void tc589_detach(struct pcmcia_device *link)
222{ 227{
223 struct net_device *dev = link->priv; 228 struct net_device *dev = link->priv;
224 229
225 dev_dbg(&link->dev, "3c589_detach\n"); 230 dev_dbg(&link->dev, "3c589_detach\n");
226 231
227 unregister_netdev(dev); 232 unregister_netdev(dev);
228 233
229 tc589_release(link); 234 tc589_release(link);
230 235
231 free_netdev(dev); 236 free_netdev(dev);
232} /* tc589_detach */ 237} /* tc589_detach */
233 238
234static int tc589_config(struct pcmcia_device *link) 239static int tc589_config(struct pcmcia_device *link)
235{ 240{
236 struct net_device *dev = link->priv; 241 struct net_device *dev = link->priv;
237 __be16 *phys_addr; 242 __be16 *phys_addr;
238 int ret, i, j, multi = 0, fifo; 243 int ret, i, j, multi = 0, fifo;
239 unsigned int ioaddr; 244 unsigned int ioaddr;
240 static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; 245 static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
241 u8 *buf; 246 u8 *buf;
242 size_t len; 247 size_t len;
243 248
244 dev_dbg(&link->dev, "3c589_config\n"); 249 dev_dbg(&link->dev, "3c589_config\n");
245 250
246 phys_addr = (__be16 *)dev->dev_addr; 251 phys_addr = (__be16 *)dev->dev_addr;
247 /* Is this a 3c562? */ 252 /* Is this a 3c562? */
248 if (link->manf_id != MANFID_3COM) 253 if (link->manf_id != MANFID_3COM)
249 dev_info(&link->dev, "hmmm, is this really a 3Com card??\n"); 254 dev_info(&link->dev, "hmmm, is this really a 3Com card??\n");
250 multi = (link->card_id == PRODID_3COM_3C562); 255 multi = (link->card_id == PRODID_3COM_3C562);
251 256
252 link->io_lines = 16; 257 link->io_lines = 16;
253 258
254 /* For the 3c562, the base address must be xx00-xx7f */ 259 /* For the 3c562, the base address must be xx00-xx7f */
255 for (i = j = 0; j < 0x400; j += 0x10) { 260 for (i = j = 0; j < 0x400; j += 0x10) {
256 if (multi && (j & 0x80)) continue; 261 if (multi && (j & 0x80))
257 link->resource[0]->start = j ^ 0x300; 262 continue;
258 i = pcmcia_request_io(link); 263 link->resource[0]->start = j ^ 0x300;
259 if (i == 0) 264 i = pcmcia_request_io(link);
260 break; 265 if (i == 0)
261 } 266 break;
262 if (i != 0)
263 goto failed;
264
265 ret = pcmcia_request_irq(link, el3_interrupt);
266 if (ret)
267 goto failed;
268
269 ret = pcmcia_enable_device(link);
270 if (ret)
271 goto failed;
272
273 dev->irq = link->irq;
274 dev->base_addr = link->resource[0]->start;
275 ioaddr = dev->base_addr;
276 EL3WINDOW(0);
277
278 /* The 3c589 has an extra EEPROM for configuration info, including
279 the hardware address. The 3c562 puts the address in the CIS. */
280 len = pcmcia_get_tuple(link, 0x88, &buf);
281 if (buf && len >= 6) {
282 for (i = 0; i < 3; i++)
283 phys_addr[i] = htons(le16_to_cpu(buf[i*2]));
284 kfree(buf);
285 } else {
286 kfree(buf); /* 0 < len < 6 */
287 for (i = 0; i < 3; i++)
288 phys_addr[i] = htons(read_eeprom(ioaddr, i));
289 if (phys_addr[0] == htons(0x6060)) {
290 dev_err(&link->dev, "IO port conflict at 0x%03lx-0x%03lx\n",
291 dev->base_addr, dev->base_addr+15);
292 goto failed;
293 } 267 }
294 } 268 if (i != 0)
295 269 goto failed;
296 /* The address and resource configuration register aren't loaded from 270
297 the EEPROM and *must* be set to 0 and IRQ3 for the PCMCIA version. */ 271 ret = pcmcia_request_irq(link, el3_interrupt);
298 outw(0x3f00, ioaddr + 8); 272 if (ret)
299 fifo = inl(ioaddr); 273 goto failed;
300 274
301 /* The if_port symbol can be set when the module is loaded */ 275 ret = pcmcia_enable_device(link);
302 if ((if_port >= 0) && (if_port <= 3)) 276 if (ret)
303 dev->if_port = if_port; 277 goto failed;
304 else 278
305 dev_err(&link->dev, "invalid if_port requested\n"); 279 dev->irq = link->irq;
306 280 dev->base_addr = link->resource[0]->start;
307 SET_NETDEV_DEV(dev, &link->dev); 281 ioaddr = dev->base_addr;
308 282 EL3WINDOW(0);
309 if (register_netdev(dev) != 0) { 283
310 dev_err(&link->dev, "register_netdev() failed\n"); 284 /* The 3c589 has an extra EEPROM for configuration info, including
311 goto failed; 285 * the hardware address. The 3c562 puts the address in the CIS.
312 } 286 */
313 287 len = pcmcia_get_tuple(link, 0x88, &buf);
314 netdev_info(dev, "3Com 3c%s, io %#3lx, irq %d, hw_addr %pM\n", 288 if (buf && len >= 6) {
315 (multi ? "562" : "589"), dev->base_addr, dev->irq, 289 for (i = 0; i < 3; i++)
316 dev->dev_addr); 290 phys_addr[i] = htons(le16_to_cpu(buf[i*2]));
317 netdev_info(dev, " %dK FIFO split %s Rx:Tx, %s xcvr\n", 291 kfree(buf);
318 (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3], 292 } else {
319 if_names[dev->if_port]); 293 kfree(buf); /* 0 < len < 6 */
320 return 0; 294 for (i = 0; i < 3; i++)
295 phys_addr[i] = htons(read_eeprom(ioaddr, i));
296 if (phys_addr[0] == htons(0x6060)) {
297 dev_err(&link->dev, "IO port conflict at 0x%03lx-0x%03lx\n",
298 dev->base_addr, dev->base_addr+15);
299 goto failed;
300 }
301 }
302
303 /* The address and resource configuration register aren't loaded from
304 * the EEPROM and *must* be set to 0 and IRQ3 for the PCMCIA version.
305 */
306
307 outw(0x3f00, ioaddr + 8);
308 fifo = inl(ioaddr);
309
310 /* The if_port symbol can be set when the module is loaded */
311 if ((if_port >= 0) && (if_port <= 3))
312 dev->if_port = if_port;
313 else
314 dev_err(&link->dev, "invalid if_port requested\n");
315
316 SET_NETDEV_DEV(dev, &link->dev);
317
318 if (register_netdev(dev) != 0) {
319 dev_err(&link->dev, "register_netdev() failed\n");
320 goto failed;
321 }
322
323 netdev_info(dev, "3Com 3c%s, io %#3lx, irq %d, hw_addr %pM\n",
324 (multi ? "562" : "589"), dev->base_addr, dev->irq,
325 dev->dev_addr);
326 netdev_info(dev, " %dK FIFO split %s Rx:Tx, %s xcvr\n",
327 (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3],
328 if_names[dev->if_port]);
329 return 0;
321 330
322failed: 331failed:
323 tc589_release(link); 332 tc589_release(link);
324 return -ENODEV; 333 return -ENODEV;
325} /* tc589_config */ 334} /* tc589_config */
326 335
327static void tc589_release(struct pcmcia_device *link) 336static void tc589_release(struct pcmcia_device *link)
@@ -353,113 +362,120 @@ static int tc589_resume(struct pcmcia_device *link)
353 362
354/*====================================================================*/ 363/*====================================================================*/
355 364
356/* 365/* Use this for commands that may take time to finish */
357 Use this for commands that may take time to finish 366
358*/
359static void tc589_wait_for_completion(struct net_device *dev, int cmd) 367static void tc589_wait_for_completion(struct net_device *dev, int cmd)
360{ 368{
361 int i = 100; 369 int i = 100;
362 outw(cmd, dev->base_addr + EL3_CMD); 370 outw(cmd, dev->base_addr + EL3_CMD);
363 while (--i > 0) 371 while (--i > 0)
364 if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) break; 372 if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000))
365 if (i == 0) 373 break;
366 netdev_warn(dev, "command 0x%04x did not complete!\n", cmd); 374 if (i == 0)
375 netdev_warn(dev, "command 0x%04x did not complete!\n", cmd);
367} 376}
368 377
369/* 378/* Read a word from the EEPROM using the regular EEPROM access register.
370 Read a word from the EEPROM using the regular EEPROM access register. 379 * Assume that we are in register window zero.
371 Assume that we are in register window zero. 380 */
372*/ 381
373static u16 read_eeprom(unsigned int ioaddr, int index) 382static u16 read_eeprom(unsigned int ioaddr, int index)
374{ 383{
375 int i; 384 int i;
376 outw(EEPROM_READ + index, ioaddr + 10); 385 outw(EEPROM_READ + index, ioaddr + 10);
377 /* Reading the eeprom takes 162 us */ 386 /* Reading the eeprom takes 162 us */
378 for (i = 1620; i >= 0; i--) 387 for (i = 1620; i >= 0; i--)
379 if ((inw(ioaddr + 10) & EEPROM_BUSY) == 0) 388 if ((inw(ioaddr + 10) & EEPROM_BUSY) == 0)
380 break; 389 break;
381 return inw(ioaddr + 12); 390 return inw(ioaddr + 12);
382} 391}
383 392
384/* 393/* Set transceiver type, perhaps to something other than what the user
385 Set transceiver type, perhaps to something other than what the user 394 * specified in dev->if_port.
386 specified in dev->if_port. 395 */
387*/ 396
388static void tc589_set_xcvr(struct net_device *dev, int if_port) 397static void tc589_set_xcvr(struct net_device *dev, int if_port)
389{ 398{
390 struct el3_private *lp = netdev_priv(dev); 399 struct el3_private *lp = netdev_priv(dev);
391 unsigned int ioaddr = dev->base_addr; 400 unsigned int ioaddr = dev->base_addr;
392 401
393 EL3WINDOW(0); 402 EL3WINDOW(0);
394 switch (if_port) { 403 switch (if_port) {
395 case 0: case 1: outw(0, ioaddr + 6); break; 404 case 0:
396 case 2: outw(3<<14, ioaddr + 6); break; 405 case 1:
397 case 3: outw(1<<14, ioaddr + 6); break; 406 outw(0, ioaddr + 6);
398 } 407 break;
399 /* On PCMCIA, this just turns on the LED */ 408 case 2:
400 outw((if_port == 2) ? StartCoax : StopCoax, ioaddr + EL3_CMD); 409 outw(3<<14, ioaddr + 6);
401 /* 10baseT interface, enable link beat and jabber check. */ 410 break;
402 EL3WINDOW(4); 411 case 3:
403 outw(MEDIA_LED | ((if_port < 2) ? MEDIA_TP : 0), ioaddr + WN4_MEDIA); 412 outw(1<<14, ioaddr + 6);
404 EL3WINDOW(1); 413 break;
405 if (if_port == 2) 414 }
406 lp->media_status = ((dev->if_port == 0) ? 0x8000 : 0x4000); 415 /* On PCMCIA, this just turns on the LED */
407 else 416 outw((if_port == 2) ? StartCoax : StopCoax, ioaddr + EL3_CMD);
408 lp->media_status = ((dev->if_port == 0) ? 0x4010 : 0x8800); 417 /* 10baseT interface, enable link beat and jabber check. */
418 EL3WINDOW(4);
419 outw(MEDIA_LED | ((if_port < 2) ? MEDIA_TP : 0), ioaddr + WN4_MEDIA);
420 EL3WINDOW(1);
421 if (if_port == 2)
422 lp->media_status = ((dev->if_port == 0) ? 0x8000 : 0x4000);
423 else
424 lp->media_status = ((dev->if_port == 0) ? 0x4010 : 0x8800);
409} 425}
410 426
411static void dump_status(struct net_device *dev) 427static void dump_status(struct net_device *dev)
412{ 428{
413 unsigned int ioaddr = dev->base_addr; 429 unsigned int ioaddr = dev->base_addr;
414 EL3WINDOW(1); 430 EL3WINDOW(1);
415 netdev_info(dev, " irq status %04x, rx status %04x, tx status %02x tx free %04x\n", 431 netdev_info(dev, " irq status %04x, rx status %04x, tx status %02x tx free %04x\n",
416 inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS), 432 inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS),
417 inb(ioaddr+TX_STATUS), inw(ioaddr+TX_FREE)); 433 inb(ioaddr+TX_STATUS), inw(ioaddr+TX_FREE));
418 EL3WINDOW(4); 434 EL3WINDOW(4);
419 netdev_info(dev, " diagnostics: fifo %04x net %04x ethernet %04x media %04x\n", 435 netdev_info(dev, " diagnostics: fifo %04x net %04x ethernet %04x media %04x\n",
420 inw(ioaddr+0x04), inw(ioaddr+0x06), inw(ioaddr+0x08), 436 inw(ioaddr+0x04), inw(ioaddr+0x06), inw(ioaddr+0x08),
421 inw(ioaddr+0x0a)); 437 inw(ioaddr+0x0a));
422 EL3WINDOW(1); 438 EL3WINDOW(1);
423} 439}
424 440
425/* Reset and restore all of the 3c589 registers. */ 441/* Reset and restore all of the 3c589 registers. */
426static void tc589_reset(struct net_device *dev) 442static void tc589_reset(struct net_device *dev)
427{ 443{
428 unsigned int ioaddr = dev->base_addr; 444 unsigned int ioaddr = dev->base_addr;
429 int i; 445 int i;
430 446
431 EL3WINDOW(0); 447 EL3WINDOW(0);
432 outw(0x0001, ioaddr + 4); /* Activate board. */ 448 outw(0x0001, ioaddr + 4); /* Activate board. */
433 outw(0x3f00, ioaddr + 8); /* Set the IRQ line. */ 449 outw(0x3f00, ioaddr + 8); /* Set the IRQ line. */
434 450
435 /* Set the station address in window 2. */ 451 /* Set the station address in window 2. */
436 EL3WINDOW(2); 452 EL3WINDOW(2);
437 for (i = 0; i < 6; i++) 453 for (i = 0; i < 6; i++)
438 outb(dev->dev_addr[i], ioaddr + i); 454 outb(dev->dev_addr[i], ioaddr + i);
439 455
440 tc589_set_xcvr(dev, dev->if_port); 456 tc589_set_xcvr(dev, dev->if_port);
441 457
442 /* Switch to the stats window, and clear all stats by reading. */ 458 /* Switch to the stats window, and clear all stats by reading. */
443 outw(StatsDisable, ioaddr + EL3_CMD); 459 outw(StatsDisable, ioaddr + EL3_CMD);
444 EL3WINDOW(6); 460 EL3WINDOW(6);
445 for (i = 0; i < 9; i++) 461 for (i = 0; i < 9; i++)
446 inb(ioaddr+i); 462 inb(ioaddr+i);
447 inw(ioaddr + 10); 463 inw(ioaddr + 10);
448 inw(ioaddr + 12); 464 inw(ioaddr + 12);
449 465
450 /* Switch to register set 1 for normal use. */ 466 /* Switch to register set 1 for normal use. */
451 EL3WINDOW(1); 467 EL3WINDOW(1);
452 468
453 set_rx_mode(dev); 469 set_rx_mode(dev);
454 outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ 470 outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
455 outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ 471 outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
456 outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ 472 outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
457 /* Allow status bits to be seen. */ 473 /* Allow status bits to be seen. */
458 outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD); 474 outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD);
459 /* Ack all pending events, and set active indicator mask. */ 475 /* Ack all pending events, and set active indicator mask. */
460 outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq, 476 outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
461 ioaddr + EL3_CMD); 477 ioaddr + EL3_CMD);
462 outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull 478 outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull
463 | AdapterFailure, ioaddr + EL3_CMD); 479 | AdapterFailure, ioaddr + EL3_CMD);
464} 480}
465 481
@@ -478,381 +494,406 @@ static const struct ethtool_ops netdev_ethtool_ops = {
478 494
479static int el3_config(struct net_device *dev, struct ifmap *map) 495static int el3_config(struct net_device *dev, struct ifmap *map)
480{ 496{
481 if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) { 497 if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
482 if (map->port <= 3) { 498 if (map->port <= 3) {
483 dev->if_port = map->port; 499 dev->if_port = map->port;
484 netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]); 500 netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
485 tc589_set_xcvr(dev, dev->if_port); 501 tc589_set_xcvr(dev, dev->if_port);
486 } else 502 } else {
487 return -EINVAL; 503 return -EINVAL;
488 } 504 }
489 return 0; 505 }
506 return 0;
490} 507}
491 508
492static int el3_open(struct net_device *dev) 509static int el3_open(struct net_device *dev)
493{ 510{
494 struct el3_private *lp = netdev_priv(dev); 511 struct el3_private *lp = netdev_priv(dev);
495 struct pcmcia_device *link = lp->p_dev; 512 struct pcmcia_device *link = lp->p_dev;
496 513
497 if (!pcmcia_dev_present(link)) 514 if (!pcmcia_dev_present(link))
498 return -ENODEV; 515 return -ENODEV;
499 516
500 link->open++; 517 link->open++;
501 netif_start_queue(dev); 518 netif_start_queue(dev);
502 519
503 tc589_reset(dev); 520 tc589_reset(dev);
504 init_timer(&lp->media); 521 init_timer(&lp->media);
505 lp->media.function = media_check; 522 lp->media.function = media_check;
506 lp->media.data = (unsigned long) dev; 523 lp->media.data = (unsigned long) dev;
507 lp->media.expires = jiffies + HZ; 524 lp->media.expires = jiffies + HZ;
508 add_timer(&lp->media); 525 add_timer(&lp->media);
509 526
510 dev_dbg(&link->dev, "%s: opened, status %4.4x.\n", 527 dev_dbg(&link->dev, "%s: opened, status %4.4x.\n",
511 dev->name, inw(dev->base_addr + EL3_STATUS)); 528 dev->name, inw(dev->base_addr + EL3_STATUS));
512 529
513 return 0; 530 return 0;
514} 531}
515 532
516static void el3_tx_timeout(struct net_device *dev) 533static void el3_tx_timeout(struct net_device *dev)
517{ 534{
518 unsigned int ioaddr = dev->base_addr; 535 unsigned int ioaddr = dev->base_addr;
519 536
520 netdev_warn(dev, "Transmit timed out!\n"); 537 netdev_warn(dev, "Transmit timed out!\n");
521 dump_status(dev); 538 dump_status(dev);
522 dev->stats.tx_errors++; 539 dev->stats.tx_errors++;
523 dev->trans_start = jiffies; /* prevent tx timeout */ 540 dev->trans_start = jiffies; /* prevent tx timeout */
524 /* Issue TX_RESET and TX_START commands. */ 541 /* Issue TX_RESET and TX_START commands. */
525 tc589_wait_for_completion(dev, TxReset); 542 tc589_wait_for_completion(dev, TxReset);
526 outw(TxEnable, ioaddr + EL3_CMD); 543 outw(TxEnable, ioaddr + EL3_CMD);
527 netif_wake_queue(dev); 544 netif_wake_queue(dev);
528} 545}
529 546
530static void pop_tx_status(struct net_device *dev) 547static void pop_tx_status(struct net_device *dev)
531{ 548{
532 unsigned int ioaddr = dev->base_addr; 549 unsigned int ioaddr = dev->base_addr;
533 int i; 550 int i;
534 551
535 /* Clear the Tx status stack. */ 552 /* Clear the Tx status stack. */
536 for (i = 32; i > 0; i--) { 553 for (i = 32; i > 0; i--) {
537 u_char tx_status = inb(ioaddr + TX_STATUS); 554 u_char tx_status = inb(ioaddr + TX_STATUS);
538 if (!(tx_status & 0x84)) break; 555 if (!(tx_status & 0x84))
539 /* reset transmitter on jabber error or underrun */ 556 break;
540 if (tx_status & 0x30) 557 /* reset transmitter on jabber error or underrun */
541 tc589_wait_for_completion(dev, TxReset); 558 if (tx_status & 0x30)
542 if (tx_status & 0x38) { 559 tc589_wait_for_completion(dev, TxReset);
543 netdev_dbg(dev, "transmit error: status 0x%02x\n", tx_status); 560 if (tx_status & 0x38) {
544 outw(TxEnable, ioaddr + EL3_CMD); 561 netdev_dbg(dev, "transmit error: status 0x%02x\n", tx_status);
545 dev->stats.tx_aborted_errors++; 562 outw(TxEnable, ioaddr + EL3_CMD);
563 dev->stats.tx_aborted_errors++;
564 }
565 outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
546 } 566 }
547 outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
548 }
549} 567}
550 568
551static netdev_tx_t el3_start_xmit(struct sk_buff *skb, 569static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
552 struct net_device *dev) 570 struct net_device *dev)
553{ 571{
554 unsigned int ioaddr = dev->base_addr; 572 unsigned int ioaddr = dev->base_addr;
555 struct el3_private *priv = netdev_priv(dev); 573 struct el3_private *priv = netdev_priv(dev);
556 unsigned long flags; 574 unsigned long flags;
557 575
558 netdev_dbg(dev, "el3_start_xmit(length = %ld) called, status %4.4x.\n", 576 netdev_dbg(dev, "el3_start_xmit(length = %ld) called, status %4.4x.\n",
559 (long)skb->len, inw(ioaddr + EL3_STATUS)); 577 (long)skb->len, inw(ioaddr + EL3_STATUS));
560 578
561 spin_lock_irqsave(&priv->lock, flags); 579 spin_lock_irqsave(&priv->lock, flags);
562 580
563 dev->stats.tx_bytes += skb->len; 581 dev->stats.tx_bytes += skb->len;
564 582
565 /* Put out the doubleword header... */ 583 /* Put out the doubleword header... */
566 outw(skb->len, ioaddr + TX_FIFO); 584 outw(skb->len, ioaddr + TX_FIFO);
567 outw(0x00, ioaddr + TX_FIFO); 585 outw(0x00, ioaddr + TX_FIFO);
568 /* ... and the packet rounded to a doubleword. */ 586 /* ... and the packet rounded to a doubleword. */
569 outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); 587 outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
570 588
571 if (inw(ioaddr + TX_FREE) <= 1536) { 589 if (inw(ioaddr + TX_FREE) <= 1536) {
572 netif_stop_queue(dev); 590 netif_stop_queue(dev);
573 /* Interrupt us when the FIFO has room for max-sized packet. */ 591 /* Interrupt us when the FIFO has room for max-sized packet. */
574 outw(SetTxThreshold + 1536, ioaddr + EL3_CMD); 592 outw(SetTxThreshold + 1536, ioaddr + EL3_CMD);
575 } 593 }
576 594
577 pop_tx_status(dev); 595 pop_tx_status(dev);
578 spin_unlock_irqrestore(&priv->lock, flags); 596 spin_unlock_irqrestore(&priv->lock, flags);
579 dev_kfree_skb(skb); 597 dev_kfree_skb(skb);
580 598
581 return NETDEV_TX_OK; 599 return NETDEV_TX_OK;
582} 600}
583 601
584/* The EL3 interrupt handler. */ 602/* The EL3 interrupt handler. */
585static irqreturn_t el3_interrupt(int irq, void *dev_id) 603static irqreturn_t el3_interrupt(int irq, void *dev_id)
586{ 604{
587 struct net_device *dev = (struct net_device *) dev_id; 605 struct net_device *dev = (struct net_device *) dev_id;
588 struct el3_private *lp = netdev_priv(dev); 606 struct el3_private *lp = netdev_priv(dev);
589 unsigned int ioaddr; 607 unsigned int ioaddr;
590 __u16 status; 608 __u16 status;
591 int i = 0, handled = 1; 609 int i = 0, handled = 1;
592 610
593 if (!netif_device_present(dev)) 611 if (!netif_device_present(dev))
594 return IRQ_NONE; 612 return IRQ_NONE;
595 613
596 ioaddr = dev->base_addr; 614 ioaddr = dev->base_addr;
597 615
598 netdev_dbg(dev, "interrupt, status %4.4x.\n", inw(ioaddr + EL3_STATUS)); 616 netdev_dbg(dev, "interrupt, status %4.4x.\n", inw(ioaddr + EL3_STATUS));
599 617
600 spin_lock(&lp->lock); 618 spin_lock(&lp->lock);
601 while ((status = inw(ioaddr + EL3_STATUS)) & 619 while ((status = inw(ioaddr + EL3_STATUS)) &
602 (IntLatch | RxComplete | StatsFull)) { 620 (IntLatch | RxComplete | StatsFull)) {
603 if ((status & 0xe000) != 0x2000) { 621 if ((status & 0xe000) != 0x2000) {
604 netdev_dbg(dev, "interrupt from dead card\n"); 622 netdev_dbg(dev, "interrupt from dead card\n");
605 handled = 0; 623 handled = 0;
606 break; 624 break;
607 } 625 }
608 if (status & RxComplete) 626 if (status & RxComplete)
609 el3_rx(dev); 627 el3_rx(dev);
610 if (status & TxAvailable) { 628 if (status & TxAvailable) {
611 netdev_dbg(dev, " TX room bit was handled.\n"); 629 netdev_dbg(dev, " TX room bit was handled.\n");
612 /* There's room in the FIFO for a full-sized packet. */ 630 /* There's room in the FIFO for a full-sized packet. */
613 outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); 631 outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
614 netif_wake_queue(dev); 632 netif_wake_queue(dev);
615 } 633 }
616 if (status & TxComplete) 634 if (status & TxComplete)
617 pop_tx_status(dev); 635 pop_tx_status(dev);
618 if (status & (AdapterFailure | RxEarly | StatsFull)) { 636 if (status & (AdapterFailure | RxEarly | StatsFull)) {
619 /* Handle all uncommon interrupts. */ 637 /* Handle all uncommon interrupts. */
620 if (status & StatsFull) /* Empty statistics. */ 638 if (status & StatsFull) /* Empty statistics. */
621 update_stats(dev); 639 update_stats(dev);
622 if (status & RxEarly) { /* Rx early is unused. */ 640 if (status & RxEarly) {
623 el3_rx(dev); 641 /* Rx early is unused. */
624 outw(AckIntr | RxEarly, ioaddr + EL3_CMD); 642 el3_rx(dev);
625 } 643 outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
626 if (status & AdapterFailure) { 644 }
627 u16 fifo_diag; 645 if (status & AdapterFailure) {
628 EL3WINDOW(4); 646 u16 fifo_diag;
629 fifo_diag = inw(ioaddr + 4); 647 EL3WINDOW(4);
630 EL3WINDOW(1); 648 fifo_diag = inw(ioaddr + 4);
631 netdev_warn(dev, "adapter failure, FIFO diagnostic register %04x.\n", 649 EL3WINDOW(1);
650 netdev_warn(dev, "adapter failure, FIFO diagnostic register %04x.\n",
632 fifo_diag); 651 fifo_diag);
633 if (fifo_diag & 0x0400) { 652 if (fifo_diag & 0x0400) {
634 /* Tx overrun */ 653 /* Tx overrun */
635 tc589_wait_for_completion(dev, TxReset); 654 tc589_wait_for_completion(dev, TxReset);
636 outw(TxEnable, ioaddr + EL3_CMD); 655 outw(TxEnable, ioaddr + EL3_CMD);
656 }
657 if (fifo_diag & 0x2000) {
658 /* Rx underrun */
659 tc589_wait_for_completion(dev, RxReset);
660 set_rx_mode(dev);
661 outw(RxEnable, ioaddr + EL3_CMD);
662 }
663 outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
664 }
637 } 665 }
638 if (fifo_diag & 0x2000) { 666 if (++i > 10) {
639 /* Rx underrun */ 667 netdev_err(dev, "infinite loop in interrupt, status %4.4x.\n",
640 tc589_wait_for_completion(dev, RxReset); 668 status);
641 set_rx_mode(dev); 669 /* Clear all interrupts */
642 outw(RxEnable, ioaddr + EL3_CMD); 670 outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
671 break;
643 } 672 }
644 outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD); 673 /* Acknowledge the IRQ. */
645 } 674 outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
646 } 675 }
647 if (++i > 10) { 676 lp->last_irq = jiffies;
648 netdev_err(dev, "infinite loop in interrupt, status %4.4x.\n", 677 spin_unlock(&lp->lock);
649 status); 678 netdev_dbg(dev, "exiting interrupt, status %4.4x.\n",
650 /* Clear all interrupts */ 679 inw(ioaddr + EL3_STATUS));
651 outw(AckIntr | 0xFF, ioaddr + EL3_CMD); 680 return IRQ_RETVAL(handled);
652 break;
653 }
654 /* Acknowledge the IRQ. */
655 outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
656 }
657 lp->last_irq = jiffies;
658 spin_unlock(&lp->lock);
659 netdev_dbg(dev, "exiting interrupt, status %4.4x.\n",
660 inw(ioaddr + EL3_STATUS));
661 return IRQ_RETVAL(handled);
662} 681}
663 682
664static void media_check(unsigned long arg) 683static void media_check(unsigned long arg)
665{ 684{
666 struct net_device *dev = (struct net_device *)(arg); 685 struct net_device *dev = (struct net_device *)(arg);
667 struct el3_private *lp = netdev_priv(dev); 686 struct el3_private *lp = netdev_priv(dev);
668 unsigned int ioaddr = dev->base_addr; 687 unsigned int ioaddr = dev->base_addr;
669 u16 media, errs; 688 u16 media, errs;
670 unsigned long flags; 689 unsigned long flags;
671 690
672 if (!netif_device_present(dev)) goto reschedule; 691 if (!netif_device_present(dev))
692 goto reschedule;
673 693
674 /* Check for pending interrupt with expired latency timer: with 694 /* Check for pending interrupt with expired latency timer: with
675 this, we can limp along even if the interrupt is blocked */ 695 * this, we can limp along even if the interrupt is blocked
676 if ((inw(ioaddr + EL3_STATUS) & IntLatch) && 696 */
697 if ((inw(ioaddr + EL3_STATUS) & IntLatch) &&
677 (inb(ioaddr + EL3_TIMER) == 0xff)) { 698 (inb(ioaddr + EL3_TIMER) == 0xff)) {
678 if (!lp->fast_poll) 699 if (!lp->fast_poll)
679 netdev_warn(dev, "interrupt(s) dropped!\n"); 700 netdev_warn(dev, "interrupt(s) dropped!\n");
680 701
681 local_irq_save(flags); 702 local_irq_save(flags);
682 el3_interrupt(dev->irq, dev); 703 el3_interrupt(dev->irq, dev);
683 local_irq_restore(flags); 704 local_irq_restore(flags);
684 705
685 lp->fast_poll = HZ; 706 lp->fast_poll = HZ;
686 } 707 }
687 if (lp->fast_poll) { 708 if (lp->fast_poll) {
688 lp->fast_poll--; 709 lp->fast_poll--;
689 lp->media.expires = jiffies + HZ/100; 710 lp->media.expires = jiffies + HZ/100;
690 add_timer(&lp->media); 711 add_timer(&lp->media);
691 return; 712 return;
692 } 713 }
693 714
694 /* lp->lock guards the EL3 window. Window should always be 1 except 715 /* lp->lock guards the EL3 window. Window should always be 1 except
695 when the lock is held */ 716 * when the lock is held
696 spin_lock_irqsave(&lp->lock, flags); 717 */
697 EL3WINDOW(4); 718
698 media = inw(ioaddr+WN4_MEDIA) & 0xc810; 719 spin_lock_irqsave(&lp->lock, flags);
699 720 EL3WINDOW(4);
700 /* Ignore collisions unless we've had no irq's recently */ 721 media = inw(ioaddr+WN4_MEDIA) & 0xc810;
701 if (time_before(jiffies, lp->last_irq + HZ)) { 722
702 media &= ~0x0010; 723 /* Ignore collisions unless we've had no irq's recently */
703 } else { 724 if (time_before(jiffies, lp->last_irq + HZ)) {
704 /* Try harder to detect carrier errors */ 725 media &= ~0x0010;
705 EL3WINDOW(6); 726 } else {
706 outw(StatsDisable, ioaddr + EL3_CMD); 727 /* Try harder to detect carrier errors */
707 errs = inb(ioaddr + 0); 728 EL3WINDOW(6);
708 outw(StatsEnable, ioaddr + EL3_CMD); 729 outw(StatsDisable, ioaddr + EL3_CMD);
709 dev->stats.tx_carrier_errors += errs; 730 errs = inb(ioaddr + 0);
710 if (errs || (lp->media_status & 0x0010)) media |= 0x0010; 731 outw(StatsEnable, ioaddr + EL3_CMD);
711 } 732 dev->stats.tx_carrier_errors += errs;
733 if (errs || (lp->media_status & 0x0010))
734 media |= 0x0010;
735 }
712 736
713 if (media != lp->media_status) { 737 if (media != lp->media_status) {
714 if ((media & lp->media_status & 0x8000) && 738 if ((media & lp->media_status & 0x8000) &&
715 ((lp->media_status ^ media) & 0x0800)) 739 ((lp->media_status ^ media) & 0x0800))
716 netdev_info(dev, "%s link beat\n", 740 netdev_info(dev, "%s link beat\n",
717 (lp->media_status & 0x0800 ? "lost" : "found")); 741 (lp->media_status & 0x0800 ? "lost" : "found"));
718 else if ((media & lp->media_status & 0x4000) && 742 else if ((media & lp->media_status & 0x4000) &&
719 ((lp->media_status ^ media) & 0x0010)) 743 ((lp->media_status ^ media) & 0x0010))
720 netdev_info(dev, "coax cable %s\n", 744 netdev_info(dev, "coax cable %s\n",
721 (lp->media_status & 0x0010 ? "ok" : "problem")); 745 (lp->media_status & 0x0010 ? "ok" : "problem"));
722 if (dev->if_port == 0) { 746 if (dev->if_port == 0) {
723 if (media & 0x8000) { 747 if (media & 0x8000) {
724 if (media & 0x0800) 748 if (media & 0x0800)
725 netdev_info(dev, "flipped to 10baseT\n"); 749 netdev_info(dev, "flipped to 10baseT\n");
726 else 750 else
727 tc589_set_xcvr(dev, 2); 751 tc589_set_xcvr(dev, 2);
728 } else if (media & 0x4000) { 752 } else if (media & 0x4000) {
729 if (media & 0x0010) 753 if (media & 0x0010)
730 tc589_set_xcvr(dev, 1); 754 tc589_set_xcvr(dev, 1);
731 else 755 else
732 netdev_info(dev, "flipped to 10base2\n"); 756 netdev_info(dev, "flipped to 10base2\n");
733 } 757 }
758 }
759 lp->media_status = media;
734 } 760 }
735 lp->media_status = media;
736 }
737 761
738 EL3WINDOW(1); 762 EL3WINDOW(1);
739 spin_unlock_irqrestore(&lp->lock, flags); 763 spin_unlock_irqrestore(&lp->lock, flags);
740 764
741reschedule: 765reschedule:
742 lp->media.expires = jiffies + HZ; 766 lp->media.expires = jiffies + HZ;
743 add_timer(&lp->media); 767 add_timer(&lp->media);
744} 768}
745 769
746static struct net_device_stats *el3_get_stats(struct net_device *dev) 770static struct net_device_stats *el3_get_stats(struct net_device *dev)
747{ 771{
748 struct el3_private *lp = netdev_priv(dev); 772 struct el3_private *lp = netdev_priv(dev);
749 unsigned long flags; 773 unsigned long flags;
750 struct pcmcia_device *link = lp->p_dev; 774 struct pcmcia_device *link = lp->p_dev;
751 775
752 if (pcmcia_dev_present(link)) { 776 if (pcmcia_dev_present(link)) {
753 spin_lock_irqsave(&lp->lock, flags); 777 spin_lock_irqsave(&lp->lock, flags);
754 update_stats(dev); 778 update_stats(dev);
755 spin_unlock_irqrestore(&lp->lock, flags); 779 spin_unlock_irqrestore(&lp->lock, flags);
756 } 780 }
757 return &dev->stats; 781 return &dev->stats;
758} 782}
759 783
760/* 784/* Update statistics. We change to register window 6, so this should be run
761 Update statistics. We change to register window 6, so this should be run 785* single-threaded if the device is active. This is expected to be a rare
762 single-threaded if the device is active. This is expected to be a rare 786* operation, and it's simpler for the rest of the driver to assume that
763 operation, and it's simpler for the rest of the driver to assume that 787* window 1 is always valid rather than use a special window-state variable.
764 window 1 is always valid rather than use a special window-state variable. 788*
765 789* Caller must hold the lock for this
766 Caller must hold the lock for this
767*/ 790*/
791
768static void update_stats(struct net_device *dev) 792static void update_stats(struct net_device *dev)
769{ 793{
770 unsigned int ioaddr = dev->base_addr; 794 unsigned int ioaddr = dev->base_addr;
771 795
772 netdev_dbg(dev, "updating the statistics.\n"); 796 netdev_dbg(dev, "updating the statistics.\n");
773 /* Turn off statistics updates while reading. */ 797 /* Turn off statistics updates while reading. */
774 outw(StatsDisable, ioaddr + EL3_CMD); 798 outw(StatsDisable, ioaddr + EL3_CMD);
775 /* Switch to the stats window, and read everything. */ 799 /* Switch to the stats window, and read everything. */
776 EL3WINDOW(6); 800 EL3WINDOW(6);
777 dev->stats.tx_carrier_errors += inb(ioaddr + 0); 801 dev->stats.tx_carrier_errors += inb(ioaddr + 0);
778 dev->stats.tx_heartbeat_errors += inb(ioaddr + 1); 802 dev->stats.tx_heartbeat_errors += inb(ioaddr + 1);
779 /* Multiple collisions. */ inb(ioaddr + 2); 803 /* Multiple collisions. */
780 dev->stats.collisions += inb(ioaddr + 3); 804 inb(ioaddr + 2);
781 dev->stats.tx_window_errors += inb(ioaddr + 4); 805 dev->stats.collisions += inb(ioaddr + 3);
782 dev->stats.rx_fifo_errors += inb(ioaddr + 5); 806 dev->stats.tx_window_errors += inb(ioaddr + 4);
783 dev->stats.tx_packets += inb(ioaddr + 6); 807 dev->stats.rx_fifo_errors += inb(ioaddr + 5);
784 /* Rx packets */ inb(ioaddr + 7); 808 dev->stats.tx_packets += inb(ioaddr + 6);
785 /* Tx deferrals */ inb(ioaddr + 8); 809 /* Rx packets */
786 /* Rx octets */ inw(ioaddr + 10); 810 inb(ioaddr + 7);
787 /* Tx octets */ inw(ioaddr + 12); 811 /* Tx deferrals */
788 812 inb(ioaddr + 8);
789 /* Back to window 1, and turn statistics back on. */ 813 /* Rx octets */
790 EL3WINDOW(1); 814 inw(ioaddr + 10);
791 outw(StatsEnable, ioaddr + EL3_CMD); 815 /* Tx octets */
816 inw(ioaddr + 12);
817
818 /* Back to window 1, and turn statistics back on. */
819 EL3WINDOW(1);
820 outw(StatsEnable, ioaddr + EL3_CMD);
792} 821}
793 822
794static int el3_rx(struct net_device *dev) 823static int el3_rx(struct net_device *dev)
795{ 824{
796 unsigned int ioaddr = dev->base_addr; 825 unsigned int ioaddr = dev->base_addr;
797 int worklimit = 32; 826 int worklimit = 32;
798 short rx_status; 827 short rx_status;
799 828
800 netdev_dbg(dev, "in rx_packet(), status %4.4x, rx_status %4.4x.\n", 829 netdev_dbg(dev, "in rx_packet(), status %4.4x, rx_status %4.4x.\n",
801 inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS)); 830 inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS));
802 while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) && 831 while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) &&
803 worklimit > 0) { 832 worklimit > 0) {
804 worklimit--; 833 worklimit--;
805 if (rx_status & 0x4000) { /* Error, update stats. */ 834 if (rx_status & 0x4000) { /* Error, update stats. */
806 short error = rx_status & 0x3800; 835 short error = rx_status & 0x3800;
807 dev->stats.rx_errors++; 836 dev->stats.rx_errors++;
808 switch (error) { 837 switch (error) {
809 case 0x0000: dev->stats.rx_over_errors++; break; 838 case 0x0000:
810 case 0x0800: dev->stats.rx_length_errors++; break; 839 dev->stats.rx_over_errors++;
811 case 0x1000: dev->stats.rx_frame_errors++; break; 840 break;
812 case 0x1800: dev->stats.rx_length_errors++; break; 841 case 0x0800:
813 case 0x2000: dev->stats.rx_frame_errors++; break; 842 dev->stats.rx_length_errors++;
814 case 0x2800: dev->stats.rx_crc_errors++; break; 843 break;
815 } 844 case 0x1000:
816 } else { 845 dev->stats.rx_frame_errors++;
817 short pkt_len = rx_status & 0x7ff; 846 break;
818 struct sk_buff *skb; 847 case 0x1800:
819 848 dev->stats.rx_length_errors++;
820 skb = netdev_alloc_skb(dev, pkt_len + 5); 849 break;
821 850 case 0x2000:
822 netdev_dbg(dev, " Receiving packet size %d status %4.4x.\n", 851 dev->stats.rx_frame_errors++;
852 break;
853 case 0x2800:
854 dev->stats.rx_crc_errors++;
855 break;
856 }
857 } else {
858 short pkt_len = rx_status & 0x7ff;
859 struct sk_buff *skb;
860
861 skb = netdev_alloc_skb(dev, pkt_len + 5);
862
863 netdev_dbg(dev, " Receiving packet size %d status %4.4x.\n",
823 pkt_len, rx_status); 864 pkt_len, rx_status);
824 if (skb != NULL) { 865 if (skb != NULL) {
825 skb_reserve(skb, 2); 866 skb_reserve(skb, 2);
826 insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len), 867 insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len),
827 (pkt_len+3)>>2); 868 (pkt_len+3)>>2);
828 skb->protocol = eth_type_trans(skb, dev); 869 skb->protocol = eth_type_trans(skb, dev);
829 netif_rx(skb); 870 netif_rx(skb);
830 dev->stats.rx_packets++; 871 dev->stats.rx_packets++;
831 dev->stats.rx_bytes += pkt_len; 872 dev->stats.rx_bytes += pkt_len;
832 } else { 873 } else {
833 netdev_dbg(dev, "couldn't allocate a sk_buff of size %d.\n", 874 netdev_dbg(dev, "couldn't allocate a sk_buff of size %d.\n",
834 pkt_len); 875 pkt_len);
835 dev->stats.rx_dropped++; 876 dev->stats.rx_dropped++;
836 } 877 }
878 }
879 /* Pop the top of the Rx FIFO */
880 tc589_wait_for_completion(dev, RxDiscard);
837 } 881 }
838 /* Pop the top of the Rx FIFO */ 882 if (worklimit == 0)
839 tc589_wait_for_completion(dev, RxDiscard); 883 netdev_warn(dev, "too much work in el3_rx!\n");
840 } 884 return 0;
841 if (worklimit == 0)
842 netdev_warn(dev, "too much work in el3_rx!\n");
843 return 0;
844} 885}
845 886
846static void set_rx_mode(struct net_device *dev) 887static void set_rx_mode(struct net_device *dev)
847{ 888{
848 unsigned int ioaddr = dev->base_addr; 889 unsigned int ioaddr = dev->base_addr;
849 u16 opts = SetRxFilter | RxStation | RxBroadcast; 890 u16 opts = SetRxFilter | RxStation | RxBroadcast;
850 891
851 if (dev->flags & IFF_PROMISC) 892 if (dev->flags & IFF_PROMISC)
852 opts |= RxMulticast | RxProm; 893 opts |= RxMulticast | RxProm;
853 else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI)) 894 else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI))
854 opts |= RxMulticast; 895 opts |= RxMulticast;
855 outw(opts, ioaddr + EL3_CMD); 896 outw(opts, ioaddr + EL3_CMD);
856} 897}
857 898
858static void set_multicast_list(struct net_device *dev) 899static void set_multicast_list(struct net_device *dev)
@@ -867,44 +908,44 @@ static void set_multicast_list(struct net_device *dev)
867 908
868static int el3_close(struct net_device *dev) 909static int el3_close(struct net_device *dev)
869{ 910{
870 struct el3_private *lp = netdev_priv(dev); 911 struct el3_private *lp = netdev_priv(dev);
871 struct pcmcia_device *link = lp->p_dev; 912 struct pcmcia_device *link = lp->p_dev;
872 unsigned int ioaddr = dev->base_addr; 913 unsigned int ioaddr = dev->base_addr;
873 914
874 dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name); 915 dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name);
916
917 if (pcmcia_dev_present(link)) {
918 /* Turn off statistics ASAP. We update dev->stats below. */
919 outw(StatsDisable, ioaddr + EL3_CMD);
920
921 /* Disable the receiver and transmitter. */
922 outw(RxDisable, ioaddr + EL3_CMD);
923 outw(TxDisable, ioaddr + EL3_CMD);
924
925 if (dev->if_port == 2)
926 /* Turn off thinnet power. Green! */
927 outw(StopCoax, ioaddr + EL3_CMD);
928 else if (dev->if_port == 1) {
929 /* Disable link beat and jabber */
930 EL3WINDOW(4);
931 outw(0, ioaddr + WN4_MEDIA);
932 }
875 933
876 if (pcmcia_dev_present(link)) { 934 /* Switching back to window 0 disables the IRQ. */
877 /* Turn off statistics ASAP. We update dev->stats below. */ 935 EL3WINDOW(0);
878 outw(StatsDisable, ioaddr + EL3_CMD); 936 /* But we explicitly zero the IRQ line select anyway. */
937 outw(0x0f00, ioaddr + WN0_IRQ);
879 938
880 /* Disable the receiver and transmitter. */ 939 /* Check if the card still exists */
881 outw(RxDisable, ioaddr + EL3_CMD); 940 if ((inw(ioaddr+EL3_STATUS) & 0xe000) == 0x2000)
882 outw(TxDisable, ioaddr + EL3_CMD); 941 update_stats(dev);
883
884 if (dev->if_port == 2)
885 /* Turn off thinnet power. Green! */
886 outw(StopCoax, ioaddr + EL3_CMD);
887 else if (dev->if_port == 1) {
888 /* Disable link beat and jabber */
889 EL3WINDOW(4);
890 outw(0, ioaddr + WN4_MEDIA);
891 } 942 }
892 943
893 /* Switching back to window 0 disables the IRQ. */ 944 link->open--;
894 EL3WINDOW(0); 945 netif_stop_queue(dev);
895 /* But we explicitly zero the IRQ line select anyway. */ 946 del_timer_sync(&lp->media);
896 outw(0x0f00, ioaddr + WN0_IRQ);
897
898 /* Check if the card still exists */
899 if ((inw(ioaddr+EL3_STATUS) & 0xe000) == 0x2000)
900 update_stats(dev);
901 }
902
903 link->open--;
904 netif_stop_queue(dev);
905 del_timer_sync(&lp->media);
906 947
907 return 0; 948 return 0;
908} 949}
909 950
910static const struct pcmcia_device_id tc589_ids[] = { 951static const struct pcmcia_device_id tc589_ids[] = {
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 238ccea965c8..61477b8e8d24 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -2086,7 +2086,7 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
2086 /* ... and the packet rounded to a doubleword. */ 2086 /* ... and the packet rounded to a doubleword. */
2087 skb_tx_timestamp(skb); 2087 skb_tx_timestamp(skb);
2088 iowrite32_rep(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); 2088 iowrite32_rep(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
2089 dev_kfree_skb (skb); 2089 dev_consume_skb_any (skb);
2090 if (ioread16(ioaddr + TxFree) > 1536) { 2090 if (ioread16(ioaddr + TxFree) > 1536) {
2091 netif_start_queue (dev); /* AKPM: redundant? */ 2091 netif_start_queue (dev); /* AKPM: redundant? */
2092 } else { 2092 } else {
diff --git a/drivers/net/ethernet/8390/lib8390.c b/drivers/net/ethernet/8390/lib8390.c
index d2cd80444ade..599311f0e05c 100644
--- a/drivers/net/ethernet/8390/lib8390.c
+++ b/drivers/net/ethernet/8390/lib8390.c
@@ -404,7 +404,7 @@ static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
404 spin_unlock(&ei_local->page_lock); 404 spin_unlock(&ei_local->page_lock);
405 enable_irq_lockdep_irqrestore(dev->irq, &flags); 405 enable_irq_lockdep_irqrestore(dev->irq, &flags);
406 skb_tx_timestamp(skb); 406 skb_tx_timestamp(skb);
407 dev_kfree_skb(skb); 407 dev_consume_skb_any(skb);
408 dev->stats.tx_bytes += send_length; 408 dev->stats.tx_bytes += send_length;
409 409
410 return NETDEV_TX_OK; 410 return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 506b0248c400..39484b534f5e 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -22,6 +22,7 @@ source "drivers/net/ethernet/adaptec/Kconfig"
22source "drivers/net/ethernet/aeroflex/Kconfig" 22source "drivers/net/ethernet/aeroflex/Kconfig"
23source "drivers/net/ethernet/allwinner/Kconfig" 23source "drivers/net/ethernet/allwinner/Kconfig"
24source "drivers/net/ethernet/alteon/Kconfig" 24source "drivers/net/ethernet/alteon/Kconfig"
25source "drivers/net/ethernet/altera/Kconfig"
25source "drivers/net/ethernet/amd/Kconfig" 26source "drivers/net/ethernet/amd/Kconfig"
26source "drivers/net/ethernet/apple/Kconfig" 27source "drivers/net/ethernet/apple/Kconfig"
27source "drivers/net/ethernet/arc/Kconfig" 28source "drivers/net/ethernet/arc/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index c0b8789952e7..adf61af507f7 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/
8obj-$(CONFIG_GRETH) += aeroflex/ 8obj-$(CONFIG_GRETH) += aeroflex/
9obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/ 9obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/
10obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/ 10obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/
11obj-$(CONFIG_ALTERA_TSE) += altera/
11obj-$(CONFIG_NET_VENDOR_AMD) += amd/ 12obj-$(CONFIG_NET_VENDOR_AMD) += amd/
12obj-$(CONFIG_NET_VENDOR_APPLE) += apple/ 13obj-$(CONFIG_NET_VENDOR_APPLE) += apple/
13obj-$(CONFIG_NET_VENDOR_ARC) += arc/ 14obj-$(CONFIG_NET_VENDOR_ARC) += arc/
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index c0f68dcd1dc1..95779b6b7394 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -1040,6 +1040,7 @@ static struct ptp_clock_info bfin_ptp_caps = {
1040 .n_alarm = 0, 1040 .n_alarm = 0,
1041 .n_ext_ts = 0, 1041 .n_ext_ts = 0,
1042 .n_per_out = 0, 1042 .n_per_out = 0,
1043 .n_pins = 0,
1043 .pps = 0, 1044 .pps = 0,
1044 .adjfreq = bfin_ptp_adjfreq, 1045 .adjfreq = bfin_ptp_adjfreq,
1045 .adjtime = bfin_ptp_adjtime, 1046 .adjtime = bfin_ptp_adjtime,
@@ -1086,7 +1087,7 @@ static inline void _tx_reclaim_skb(void)
1086 tx_list_head->desc_a.config &= ~DMAEN; 1087 tx_list_head->desc_a.config &= ~DMAEN;
1087 tx_list_head->status.status_word = 0; 1088 tx_list_head->status.status_word = 0;
1088 if (tx_list_head->skb) { 1089 if (tx_list_head->skb) {
1089 dev_kfree_skb(tx_list_head->skb); 1090 dev_consume_skb_any(tx_list_head->skb);
1090 tx_list_head->skb = NULL; 1091 tx_list_head->skb = NULL;
1091 } 1092 }
1092 tx_list_head = tx_list_head->next; 1093 tx_list_head = tx_list_head->next;
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 511f6eecd58b..fcaeeb8a4929 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -476,7 +476,7 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
476 spin_unlock_irqrestore(&db->lock, flags); 476 spin_unlock_irqrestore(&db->lock, flags);
477 477
478 /* free this SKB */ 478 /* free this SKB */
479 dev_kfree_skb(skb); 479 dev_consume_skb_any(skb);
480 480
481 return NETDEV_TX_OK; 481 return NETDEV_TX_OK;
482} 482}
diff --git a/drivers/net/ethernet/altera/Kconfig b/drivers/net/ethernet/altera/Kconfig
new file mode 100644
index 000000000000..80c1ab74a4b8
--- /dev/null
+++ b/drivers/net/ethernet/altera/Kconfig
@@ -0,0 +1,8 @@
1config ALTERA_TSE
2 tristate "Altera Triple-Speed Ethernet MAC support"
3 select PHYLIB
4 ---help---
5 This driver supports the Altera Triple-Speed (TSE) Ethernet MAC.
6
7 To compile this driver as a module, choose M here. The module
8 will be called alteratse.
diff --git a/drivers/net/ethernet/altera/Makefile b/drivers/net/ethernet/altera/Makefile
new file mode 100644
index 000000000000..d4a187e45369
--- /dev/null
+++ b/drivers/net/ethernet/altera/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile for the Altera device drivers.
3#
4
5obj-$(CONFIG_ALTERA_TSE) += altera_tse.o
6altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \
7altera_msgdma.o altera_sgdma.o altera_utils.o
diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c
new file mode 100644
index 000000000000..3df18669ea30
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_msgdma.c
@@ -0,0 +1,202 @@
1/* Altera TSE SGDMA and MSGDMA Linux driver
2 * Copyright (C) 2014 Altera Corporation. All rights reserved
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/netdevice.h>
18#include "altera_utils.h"
19#include "altera_tse.h"
20#include "altera_msgdmahw.h"
21
22/* No initialization work to do for MSGDMA */
23int msgdma_initialize(struct altera_tse_private *priv)
24{
25 return 0;
26}
27
28void msgdma_uninitialize(struct altera_tse_private *priv)
29{
30}
31
32void msgdma_reset(struct altera_tse_private *priv)
33{
34 int counter;
35 struct msgdma_csr *txcsr =
36 (struct msgdma_csr *)priv->tx_dma_csr;
37 struct msgdma_csr *rxcsr =
38 (struct msgdma_csr *)priv->rx_dma_csr;
39
40 /* Reset Rx mSGDMA */
41 iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status);
42 iowrite32(MSGDMA_CSR_CTL_RESET, &rxcsr->control);
43
44 counter = 0;
45 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
46 if (tse_bit_is_clear(&rxcsr->status,
47 MSGDMA_CSR_STAT_RESETTING))
48 break;
49 udelay(1);
50 }
51
52 if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR)
53 netif_warn(priv, drv, priv->dev,
54 "TSE Rx mSGDMA resetting bit never cleared!\n");
55
56 /* clear all status bits */
57 iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status);
58
59 /* Reset Tx mSGDMA */
60 iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status);
61 iowrite32(MSGDMA_CSR_CTL_RESET, &txcsr->control);
62
63 counter = 0;
64 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
65 if (tse_bit_is_clear(&txcsr->status,
66 MSGDMA_CSR_STAT_RESETTING))
67 break;
68 udelay(1);
69 }
70
71 if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR)
72 netif_warn(priv, drv, priv->dev,
73 "TSE Tx mSGDMA resetting bit never cleared!\n");
74
75 /* clear all status bits */
76 iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status);
77}
78
79void msgdma_disable_rxirq(struct altera_tse_private *priv)
80{
81 struct msgdma_csr *csr = priv->rx_dma_csr;
82 tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
83}
84
85void msgdma_enable_rxirq(struct altera_tse_private *priv)
86{
87 struct msgdma_csr *csr = priv->rx_dma_csr;
88 tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
89}
90
91void msgdma_disable_txirq(struct altera_tse_private *priv)
92{
93 struct msgdma_csr *csr = priv->tx_dma_csr;
94 tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
95}
96
97void msgdma_enable_txirq(struct altera_tse_private *priv)
98{
99 struct msgdma_csr *csr = priv->tx_dma_csr;
100 tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR);
101}
102
103void msgdma_clear_rxirq(struct altera_tse_private *priv)
104{
105 struct msgdma_csr *csr = priv->rx_dma_csr;
106 iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
107}
108
109void msgdma_clear_txirq(struct altera_tse_private *priv)
110{
111 struct msgdma_csr *csr = priv->tx_dma_csr;
112 iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status);
113}
114
115/* return 0 to indicate transmit is pending */
116int msgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
117{
118 struct msgdma_extended_desc *desc = priv->tx_dma_desc;
119
120 iowrite32(lower_32_bits(buffer->dma_addr), &desc->read_addr_lo);
121 iowrite32(upper_32_bits(buffer->dma_addr), &desc->read_addr_hi);
122 iowrite32(0, &desc->write_addr_lo);
123 iowrite32(0, &desc->write_addr_hi);
124 iowrite32(buffer->len, &desc->len);
125 iowrite32(0, &desc->burst_seq_num);
126 iowrite32(MSGDMA_DESC_TX_STRIDE, &desc->stride);
127 iowrite32(MSGDMA_DESC_CTL_TX_SINGLE, &desc->control);
128 return 0;
129}
130
131u32 msgdma_tx_completions(struct altera_tse_private *priv)
132{
133 u32 ready = 0;
134 u32 inuse;
135 u32 status;
136 struct msgdma_csr *txcsr =
137 (struct msgdma_csr *)priv->tx_dma_csr;
138
139 /* Get number of sent descriptors */
140 inuse = ioread32(&txcsr->rw_fill_level) & 0xffff;
141
142 if (inuse) { /* Tx FIFO is not empty */
143 ready = priv->tx_prod - priv->tx_cons - inuse - 1;
144 } else {
145 /* Check for buffered last packet */
146 status = ioread32(&txcsr->status);
147 if (status & MSGDMA_CSR_STAT_BUSY)
148 ready = priv->tx_prod - priv->tx_cons - 1;
149 else
150 ready = priv->tx_prod - priv->tx_cons;
151 }
152 return ready;
153}
154
155/* Put buffer to the mSGDMA RX FIFO
156 */
157int msgdma_add_rx_desc(struct altera_tse_private *priv,
158 struct tse_buffer *rxbuffer)
159{
160 struct msgdma_extended_desc *desc = priv->rx_dma_desc;
161 u32 len = priv->rx_dma_buf_sz;
162 dma_addr_t dma_addr = rxbuffer->dma_addr;
163 u32 control = (MSGDMA_DESC_CTL_END_ON_EOP
164 | MSGDMA_DESC_CTL_END_ON_LEN
165 | MSGDMA_DESC_CTL_TR_COMP_IRQ
166 | MSGDMA_DESC_CTL_EARLY_IRQ
167 | MSGDMA_DESC_CTL_TR_ERR_IRQ
168 | MSGDMA_DESC_CTL_GO);
169
170 iowrite32(0, &desc->read_addr_lo);
171 iowrite32(0, &desc->read_addr_hi);
172 iowrite32(lower_32_bits(dma_addr), &desc->write_addr_lo);
173 iowrite32(upper_32_bits(dma_addr), &desc->write_addr_hi);
174 iowrite32(len, &desc->len);
175 iowrite32(0, &desc->burst_seq_num);
176 iowrite32(0x00010001, &desc->stride);
177 iowrite32(control, &desc->control);
178 return 1;
179}
180
181/* status is returned on upper 16 bits,
182 * length is returned in lower 16 bits
183 */
184u32 msgdma_rx_status(struct altera_tse_private *priv)
185{
186 u32 rxstatus = 0;
187 u32 pktlength;
188 u32 pktstatus;
189 struct msgdma_csr *rxcsr =
190 (struct msgdma_csr *)priv->rx_dma_csr;
191 struct msgdma_response *rxresp =
192 (struct msgdma_response *)priv->rx_dma_resp;
193
194 if (ioread32(&rxcsr->resp_fill_level) & 0xffff) {
195 pktlength = ioread32(&rxresp->bytes_transferred);
196 pktstatus = ioread32(&rxresp->status);
197 rxstatus = pktstatus;
198 rxstatus = rxstatus << 16;
199 rxstatus |= (pktlength & 0xffff);
200 }
201 return rxstatus;
202}
diff --git a/drivers/net/ethernet/altera/altera_msgdma.h b/drivers/net/ethernet/altera/altera_msgdma.h
new file mode 100644
index 000000000000..7f0f5bf2bba2
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_msgdma.h
@@ -0,0 +1,34 @@
1/* Altera TSE SGDMA and MSGDMA Linux driver
2 * Copyright (C) 2014 Altera Corporation. All rights reserved
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef __ALTERA_MSGDMA_H__
18#define __ALTERA_MSGDMA_H__
19
20void msgdma_reset(struct altera_tse_private *);
21void msgdma_enable_txirq(struct altera_tse_private *);
22void msgdma_enable_rxirq(struct altera_tse_private *);
23void msgdma_disable_rxirq(struct altera_tse_private *);
24void msgdma_disable_txirq(struct altera_tse_private *);
25void msgdma_clear_rxirq(struct altera_tse_private *);
26void msgdma_clear_txirq(struct altera_tse_private *);
27u32 msgdma_tx_completions(struct altera_tse_private *);
28int msgdma_add_rx_desc(struct altera_tse_private *, struct tse_buffer *);
29int msgdma_tx_buffer(struct altera_tse_private *, struct tse_buffer *);
30u32 msgdma_rx_status(struct altera_tse_private *);
31int msgdma_initialize(struct altera_tse_private *);
32void msgdma_uninitialize(struct altera_tse_private *);
33
34#endif /* __ALTERA_MSGDMA_H__ */
diff --git a/drivers/net/ethernet/altera/altera_msgdmahw.h b/drivers/net/ethernet/altera/altera_msgdmahw.h
new file mode 100644
index 000000000000..d7b59ba4019c
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_msgdmahw.h
@@ -0,0 +1,167 @@
1/* Altera TSE SGDMA and MSGDMA Linux driver
2 * Copyright (C) 2014 Altera Corporation. All rights reserved
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef __ALTERA_MSGDMAHW_H__
18#define __ALTERA_MSGDMAHW_H__
19
20/* mSGDMA standard descriptor format
21 */
22struct msgdma_desc {
23 u32 read_addr; /* data buffer source address */
24 u32 write_addr; /* data buffer destination address */
25 u32 len; /* the number of bytes to transfer per descriptor */
26 u32 control; /* characteristics of the transfer */
27};
28
29/* mSGDMA extended descriptor format
30 */
31struct msgdma_extended_desc {
32 u32 read_addr_lo; /* data buffer source address low bits */
33 u32 write_addr_lo; /* data buffer destination address low bits */
34 u32 len; /* the number of bytes to transfer
35 * per descriptor
36 */
37 u32 burst_seq_num; /* bit 31:24 write burst
38 * bit 23:16 read burst
39 * bit 15:0 sequence number
40 */
41 u32 stride; /* bit 31:16 write stride
42 * bit 15:0 read stride
43 */
44 u32 read_addr_hi; /* data buffer source address high bits */
45 u32 write_addr_hi; /* data buffer destination address high bits */
46 u32 control; /* characteristics of the transfer */
47};
48
49/* mSGDMA descriptor control field bit definitions
50 */
51#define MSGDMA_DESC_CTL_SET_CH(x) ((x) & 0xff)
52#define MSGDMA_DESC_CTL_GEN_SOP BIT(8)
53#define MSGDMA_DESC_CTL_GEN_EOP BIT(9)
54#define MSGDMA_DESC_CTL_PARK_READS BIT(10)
55#define MSGDMA_DESC_CTL_PARK_WRITES BIT(11)
56#define MSGDMA_DESC_CTL_END_ON_EOP BIT(12)
57#define MSGDMA_DESC_CTL_END_ON_LEN BIT(13)
58#define MSGDMA_DESC_CTL_TR_COMP_IRQ BIT(14)
59#define MSGDMA_DESC_CTL_EARLY_IRQ BIT(15)
60#define MSGDMA_DESC_CTL_TR_ERR_IRQ (0xff << 16)
61#define MSGDMA_DESC_CTL_EARLY_DONE BIT(24)
62/* Writing ‘1’ to the ‘go’ bit commits the entire descriptor into the
63 * descriptor FIFO(s)
64 */
65#define MSGDMA_DESC_CTL_GO BIT(31)
66
67/* Tx buffer control flags
68 */
69#define MSGDMA_DESC_CTL_TX_FIRST (MSGDMA_DESC_CTL_GEN_SOP | \
70 MSGDMA_DESC_CTL_TR_ERR_IRQ | \
71 MSGDMA_DESC_CTL_GO)
72
73#define MSGDMA_DESC_CTL_TX_MIDDLE (MSGDMA_DESC_CTL_TR_ERR_IRQ | \
74 MSGDMA_DESC_CTL_GO)
75
76#define MSGDMA_DESC_CTL_TX_LAST (MSGDMA_DESC_CTL_GEN_EOP | \
77 MSGDMA_DESC_CTL_TR_COMP_IRQ | \
78 MSGDMA_DESC_CTL_TR_ERR_IRQ | \
79 MSGDMA_DESC_CTL_GO)
80
81#define MSGDMA_DESC_CTL_TX_SINGLE (MSGDMA_DESC_CTL_GEN_SOP | \
82 MSGDMA_DESC_CTL_GEN_EOP | \
83 MSGDMA_DESC_CTL_TR_COMP_IRQ | \
84 MSGDMA_DESC_CTL_TR_ERR_IRQ | \
85 MSGDMA_DESC_CTL_GO)
86
87#define MSGDMA_DESC_CTL_RX_SINGLE (MSGDMA_DESC_CTL_END_ON_EOP | \
88 MSGDMA_DESC_CTL_END_ON_LEN | \
89 MSGDMA_DESC_CTL_TR_COMP_IRQ | \
90 MSGDMA_DESC_CTL_EARLY_IRQ | \
91 MSGDMA_DESC_CTL_TR_ERR_IRQ | \
92 MSGDMA_DESC_CTL_GO)
93
94/* mSGDMA extended descriptor stride definitions
95 */
96#define MSGDMA_DESC_TX_STRIDE (0x00010001)
97#define MSGDMA_DESC_RX_STRIDE (0x00010001)
98
99/* mSGDMA dispatcher control and status register map
100 */
101struct msgdma_csr {
102 u32 status; /* Read/Clear */
103 u32 control; /* Read/Write */
104 u32 rw_fill_level; /* bit 31:16 - write fill level
105 * bit 15:0 - read fill level
106 */
107 u32 resp_fill_level; /* bit 15:0 */
108 u32 rw_seq_num; /* bit 31:16 - write sequence number
109 * bit 15:0 - read sequence number
110 */
111 u32 pad[3]; /* reserved */
112};
113
114/* mSGDMA CSR status register bit definitions
115 */
116#define MSGDMA_CSR_STAT_BUSY BIT(0)
117#define MSGDMA_CSR_STAT_DESC_BUF_EMPTY BIT(1)
118#define MSGDMA_CSR_STAT_DESC_BUF_FULL BIT(2)
119#define MSGDMA_CSR_STAT_RESP_BUF_EMPTY BIT(3)
120#define MSGDMA_CSR_STAT_RESP_BUF_FULL BIT(4)
121#define MSGDMA_CSR_STAT_STOPPED BIT(5)
122#define MSGDMA_CSR_STAT_RESETTING BIT(6)
123#define MSGDMA_CSR_STAT_STOPPED_ON_ERR BIT(7)
124#define MSGDMA_CSR_STAT_STOPPED_ON_EARLY BIT(8)
125#define MSGDMA_CSR_STAT_IRQ BIT(9)
126#define MSGDMA_CSR_STAT_MASK 0x3FF
127#define MSGDMA_CSR_STAT_MASK_WITHOUT_IRQ 0x1FF
128
129#define MSGDMA_CSR_STAT_BUSY_GET(v) GET_BIT_VALUE(v, 0)
130#define MSGDMA_CSR_STAT_DESC_BUF_EMPTY_GET(v) GET_BIT_VALUE(v, 1)
131#define MSGDMA_CSR_STAT_DESC_BUF_FULL_GET(v) GET_BIT_VALUE(v, 2)
132#define MSGDMA_CSR_STAT_RESP_BUF_EMPTY_GET(v) GET_BIT_VALUE(v, 3)
133#define MSGDMA_CSR_STAT_RESP_BUF_FULL_GET(v) GET_BIT_VALUE(v, 4)
134#define MSGDMA_CSR_STAT_STOPPED_GET(v) GET_BIT_VALUE(v, 5)
135#define MSGDMA_CSR_STAT_RESETTING_GET(v) GET_BIT_VALUE(v, 6)
136#define MSGDMA_CSR_STAT_STOPPED_ON_ERR_GET(v) GET_BIT_VALUE(v, 7)
137#define MSGDMA_CSR_STAT_STOPPED_ON_EARLY_GET(v) GET_BIT_VALUE(v, 8)
138#define MSGDMA_CSR_STAT_IRQ_GET(v) GET_BIT_VALUE(v, 9)
139
140/* mSGDMA CSR control register bit definitions
141 */
142#define MSGDMA_CSR_CTL_STOP BIT(0)
143#define MSGDMA_CSR_CTL_RESET BIT(1)
144#define MSGDMA_CSR_CTL_STOP_ON_ERR BIT(2)
145#define MSGDMA_CSR_CTL_STOP_ON_EARLY BIT(3)
146#define MSGDMA_CSR_CTL_GLOBAL_INTR BIT(4)
147#define MSGDMA_CSR_CTL_STOP_DESCS BIT(5)
148
149/* mSGDMA CSR fill level bits
150 */
151#define MSGDMA_CSR_WR_FILL_LEVEL_GET(v) (((v) & 0xffff0000) >> 16)
152#define MSGDMA_CSR_RD_FILL_LEVEL_GET(v) ((v) & 0x0000ffff)
153#define MSGDMA_CSR_RESP_FILL_LEVEL_GET(v) ((v) & 0x0000ffff)
154
155/* mSGDMA response register map
156 */
157struct msgdma_response {
158 u32 bytes_transferred;
159 u32 status;
160};
161
162/* mSGDMA response register bit definitions
163 */
164#define MSGDMA_RESP_EARLY_TERM BIT(8)
165#define MSGDMA_RESP_ERR_MASK 0xFF
166
167#endif /* __ALTERA_MSGDMA_H__*/
diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c
new file mode 100644
index 000000000000..0ee96639ae44
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_sgdma.c
@@ -0,0 +1,509 @@
1/* Altera TSE SGDMA and MSGDMA Linux driver
2 * Copyright (C) 2014 Altera Corporation. All rights reserved
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/list.h>
18#include "altera_utils.h"
19#include "altera_tse.h"
20#include "altera_sgdmahw.h"
21#include "altera_sgdma.h"
22
23static void sgdma_descrip(struct sgdma_descrip *desc,
24 struct sgdma_descrip *ndesc,
25 dma_addr_t ndesc_phys,
26 dma_addr_t raddr,
27 dma_addr_t waddr,
28 u16 length,
29 int generate_eop,
30 int rfixed,
31 int wfixed);
32
33static int sgdma_async_write(struct altera_tse_private *priv,
34 struct sgdma_descrip *desc);
35
36static int sgdma_async_read(struct altera_tse_private *priv);
37
38static dma_addr_t
39sgdma_txphysaddr(struct altera_tse_private *priv,
40 struct sgdma_descrip *desc);
41
42static dma_addr_t
43sgdma_rxphysaddr(struct altera_tse_private *priv,
44 struct sgdma_descrip *desc);
45
46static int sgdma_txbusy(struct altera_tse_private *priv);
47
48static int sgdma_rxbusy(struct altera_tse_private *priv);
49
50static void
51queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer);
52
53static void
54queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer);
55
56static struct tse_buffer *
57dequeue_tx(struct altera_tse_private *priv);
58
59static struct tse_buffer *
60dequeue_rx(struct altera_tse_private *priv);
61
62static struct tse_buffer *
63queue_rx_peekhead(struct altera_tse_private *priv);
64
65int sgdma_initialize(struct altera_tse_private *priv)
66{
67 priv->txctrlreg = SGDMA_CTRLREG_ILASTD;
68
69 priv->rxctrlreg = SGDMA_CTRLREG_IDESCRIP |
70 SGDMA_CTRLREG_ILASTD;
71
72 INIT_LIST_HEAD(&priv->txlisthd);
73 INIT_LIST_HEAD(&priv->rxlisthd);
74
75 priv->rxdescphys = (dma_addr_t) 0;
76 priv->txdescphys = (dma_addr_t) 0;
77
78 priv->rxdescphys = dma_map_single(priv->device, priv->rx_dma_desc,
79 priv->rxdescmem, DMA_BIDIRECTIONAL);
80
81 if (dma_mapping_error(priv->device, priv->rxdescphys)) {
82 sgdma_uninitialize(priv);
83 netdev_err(priv->dev, "error mapping rx descriptor memory\n");
84 return -EINVAL;
85 }
86
87 priv->txdescphys = dma_map_single(priv->device, priv->tx_dma_desc,
88 priv->txdescmem, DMA_TO_DEVICE);
89
90 if (dma_mapping_error(priv->device, priv->txdescphys)) {
91 sgdma_uninitialize(priv);
92 netdev_err(priv->dev, "error mapping tx descriptor memory\n");
93 return -EINVAL;
94 }
95
96 return 0;
97}
98
99void sgdma_uninitialize(struct altera_tse_private *priv)
100{
101 if (priv->rxdescphys)
102 dma_unmap_single(priv->device, priv->rxdescphys,
103 priv->rxdescmem, DMA_BIDIRECTIONAL);
104
105 if (priv->txdescphys)
106 dma_unmap_single(priv->device, priv->txdescphys,
107 priv->txdescmem, DMA_TO_DEVICE);
108}
109
110/* This function resets the SGDMA controller and clears the
111 * descriptor memory used for transmits and receives.
112 */
113void sgdma_reset(struct altera_tse_private *priv)
114{
115 u32 *ptxdescripmem = (u32 *)priv->tx_dma_desc;
116 u32 txdescriplen = priv->txdescmem;
117 u32 *prxdescripmem = (u32 *)priv->rx_dma_desc;
118 u32 rxdescriplen = priv->rxdescmem;
119 struct sgdma_csr *ptxsgdma = (struct sgdma_csr *)priv->tx_dma_csr;
120 struct sgdma_csr *prxsgdma = (struct sgdma_csr *)priv->rx_dma_csr;
121
122 /* Initialize descriptor memory to 0 */
123 memset(ptxdescripmem, 0, txdescriplen);
124 memset(prxdescripmem, 0, rxdescriplen);
125
126 iowrite32(SGDMA_CTRLREG_RESET, &ptxsgdma->control);
127 iowrite32(0, &ptxsgdma->control);
128
129 iowrite32(SGDMA_CTRLREG_RESET, &prxsgdma->control);
130 iowrite32(0, &prxsgdma->control);
131}
132
133void sgdma_enable_rxirq(struct altera_tse_private *priv)
134{
135 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
136 priv->rxctrlreg |= SGDMA_CTRLREG_INTEN;
137 tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN);
138}
139
140void sgdma_enable_txirq(struct altera_tse_private *priv)
141{
142 struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
143 priv->txctrlreg |= SGDMA_CTRLREG_INTEN;
144 tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN);
145}
146
147/* for SGDMA, RX interrupts remain enabled after enabling */
148void sgdma_disable_rxirq(struct altera_tse_private *priv)
149{
150}
151
152/* for SGDMA, TX interrupts remain enabled after enabling */
153void sgdma_disable_txirq(struct altera_tse_private *priv)
154{
155}
156
157void sgdma_clear_rxirq(struct altera_tse_private *priv)
158{
159 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
160 tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
161}
162
163void sgdma_clear_txirq(struct altera_tse_private *priv)
164{
165 struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
166 tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
167}
168
169/* transmits buffer through SGDMA. Returns number of buffers
170 * transmitted, 0 if not possible.
171 *
172 * tx_lock is held by the caller
173 */
174int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
175{
176 int pktstx = 0;
177 struct sgdma_descrip *descbase =
178 (struct sgdma_descrip *)priv->tx_dma_desc;
179
180 struct sgdma_descrip *cdesc = &descbase[0];
181 struct sgdma_descrip *ndesc = &descbase[1];
182
183 /* wait 'til the tx sgdma is ready for the next transmit request */
184 if (sgdma_txbusy(priv))
185 return 0;
186
187 sgdma_descrip(cdesc, /* current descriptor */
188 ndesc, /* next descriptor */
189 sgdma_txphysaddr(priv, ndesc),
190 buffer->dma_addr, /* address of packet to xmit */
191 0, /* write addr 0 for tx dma */
192 buffer->len, /* length of packet */
193 SGDMA_CONTROL_EOP, /* Generate EOP */
194 0, /* read fixed */
195 SGDMA_CONTROL_WR_FIXED); /* Generate SOP */
196
197 pktstx = sgdma_async_write(priv, cdesc);
198
199 /* enqueue the request to the pending transmit queue */
200 queue_tx(priv, buffer);
201
202 return 1;
203}
204
205
206/* tx_lock held to protect access to queued tx list
207 */
208u32 sgdma_tx_completions(struct altera_tse_private *priv)
209{
210 u32 ready = 0;
211 struct sgdma_descrip *desc = (struct sgdma_descrip *)priv->tx_dma_desc;
212
213 if (!sgdma_txbusy(priv) &&
214 ((desc->control & SGDMA_CONTROL_HW_OWNED) == 0) &&
215 (dequeue_tx(priv))) {
216 ready = 1;
217 }
218
219 return ready;
220}
221
222int sgdma_add_rx_desc(struct altera_tse_private *priv,
223 struct tse_buffer *rxbuffer)
224{
225 queue_rx(priv, rxbuffer);
226 return sgdma_async_read(priv);
227}
228
229/* status is returned on upper 16 bits,
230 * length is returned in lower 16 bits
231 */
232u32 sgdma_rx_status(struct altera_tse_private *priv)
233{
234 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
235 struct sgdma_descrip *base = (struct sgdma_descrip *)priv->rx_dma_desc;
236 struct sgdma_descrip *desc = NULL;
237 int pktsrx;
238 unsigned int rxstatus = 0;
239 unsigned int pktlength = 0;
240 unsigned int pktstatus = 0;
241 struct tse_buffer *rxbuffer = NULL;
242
243 dma_sync_single_for_cpu(priv->device,
244 priv->rxdescphys,
245 priv->rxdescmem,
246 DMA_BIDIRECTIONAL);
247
248 desc = &base[0];
249 if ((ioread32(&csr->status) & SGDMA_STSREG_EOP) ||
250 (desc->status & SGDMA_STATUS_EOP)) {
251 pktlength = desc->bytes_xferred;
252 pktstatus = desc->status & 0x3f;
253 rxstatus = pktstatus;
254 rxstatus = rxstatus << 16;
255 rxstatus |= (pktlength & 0xffff);
256
257 desc->status = 0;
258
259 rxbuffer = dequeue_rx(priv);
260 if (rxbuffer == NULL)
261 netdev_err(priv->dev,
262 "sgdma rx and rx queue empty!\n");
263
264 /* kick the rx sgdma after reaping this descriptor */
265 pktsrx = sgdma_async_read(priv);
266 }
267
268 return rxstatus;
269}
270
271
272/* Private functions */
273static void sgdma_descrip(struct sgdma_descrip *desc,
274 struct sgdma_descrip *ndesc,
275 dma_addr_t ndesc_phys,
276 dma_addr_t raddr,
277 dma_addr_t waddr,
278 u16 length,
279 int generate_eop,
280 int rfixed,
281 int wfixed)
282{
283 /* Clear the next descriptor as not owned by hardware */
284 u32 ctrl = ndesc->control;
285 ctrl &= ~SGDMA_CONTROL_HW_OWNED;
286 ndesc->control = ctrl;
287
288 ctrl = 0;
289 ctrl = SGDMA_CONTROL_HW_OWNED;
290 ctrl |= generate_eop;
291 ctrl |= rfixed;
292 ctrl |= wfixed;
293
294 /* Channel is implicitly zero, initialized to 0 by default */
295
296 desc->raddr = raddr;
297 desc->waddr = waddr;
298 desc->next = lower_32_bits(ndesc_phys);
299 desc->control = ctrl;
300 desc->status = 0;
301 desc->rburst = 0;
302 desc->wburst = 0;
303 desc->bytes = length;
304 desc->bytes_xferred = 0;
305}
306
307/* If hardware is busy, don't restart async read.
308 * if status register is 0 - meaning initial state, restart async read,
309 * probably for the first time when populating a receive buffer.
310 * If read status indicate not busy and a status, restart the async
311 * DMA read.
312 */
313static int sgdma_async_read(struct altera_tse_private *priv)
314{
315 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
316 struct sgdma_descrip *descbase =
317 (struct sgdma_descrip *)priv->rx_dma_desc;
318
319 struct sgdma_descrip *cdesc = &descbase[0];
320 struct sgdma_descrip *ndesc = &descbase[1];
321
322 unsigned int sts = ioread32(&csr->status);
323 struct tse_buffer *rxbuffer = NULL;
324
325 if (!sgdma_rxbusy(priv)) {
326 rxbuffer = queue_rx_peekhead(priv);
327 if (rxbuffer == NULL)
328 return 0;
329
330 sgdma_descrip(cdesc, /* current descriptor */
331 ndesc, /* next descriptor */
332 sgdma_rxphysaddr(priv, ndesc),
333 0, /* read addr 0 for rx dma */
334 rxbuffer->dma_addr, /* write addr for rx dma */
335 0, /* read 'til EOP */
336 0, /* EOP: NA for rx dma */
337 0, /* read fixed: NA for rx dma */
338 0); /* SOP: NA for rx DMA */
339
340 /* clear control and status */
341 iowrite32(0, &csr->control);
342
343 /* If status available, clear those bits */
344 if (sts & 0xf)
345 iowrite32(0xf, &csr->status);
346
347 dma_sync_single_for_device(priv->device,
348 priv->rxdescphys,
349 priv->rxdescmem,
350 DMA_BIDIRECTIONAL);
351
352 iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
353 &csr->next_descrip);
354
355 iowrite32((priv->rxctrlreg | SGDMA_CTRLREG_START),
356 &csr->control);
357
358 return 1;
359 }
360
361 return 0;
362}
363
364static int sgdma_async_write(struct altera_tse_private *priv,
365 struct sgdma_descrip *desc)
366{
367 struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
368
369 if (sgdma_txbusy(priv))
370 return 0;
371
372 /* clear control and status */
373 iowrite32(0, &csr->control);
374 iowrite32(0x1f, &csr->status);
375
376 dma_sync_single_for_device(priv->device, priv->txdescphys,
377 priv->txdescmem, DMA_TO_DEVICE);
378
379 iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
380 &csr->next_descrip);
381
382 iowrite32((priv->txctrlreg | SGDMA_CTRLREG_START),
383 &csr->control);
384
385 return 1;
386}
387
388static dma_addr_t
389sgdma_txphysaddr(struct altera_tse_private *priv,
390 struct sgdma_descrip *desc)
391{
392 dma_addr_t paddr = priv->txdescmem_busaddr;
393 uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc;
394 return (dma_addr_t)((uintptr_t)paddr + offs);
395}
396
397static dma_addr_t
398sgdma_rxphysaddr(struct altera_tse_private *priv,
399 struct sgdma_descrip *desc)
400{
401 dma_addr_t paddr = priv->rxdescmem_busaddr;
402 uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc;
403 return (dma_addr_t)((uintptr_t)paddr + offs);
404}
405
406#define list_remove_head(list, entry, type, member) \
407 do { \
408 entry = NULL; \
409 if (!list_empty(list)) { \
410 entry = list_entry((list)->next, type, member); \
411 list_del_init(&entry->member); \
412 } \
413 } while (0)
414
415#define list_peek_head(list, entry, type, member) \
416 do { \
417 entry = NULL; \
418 if (!list_empty(list)) { \
419 entry = list_entry((list)->next, type, member); \
420 } \
421 } while (0)
422
423/* adds a tse_buffer to the tail of a tx buffer list.
424 * assumes the caller is managing and holding a mutual exclusion
425 * primitive to avoid simultaneous pushes/pops to the list.
426 */
427static void
428queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer)
429{
430 list_add_tail(&buffer->lh, &priv->txlisthd);
431}
432
433
434/* adds a tse_buffer to the tail of a rx buffer list
435 * assumes the caller is managing and holding a mutual exclusion
436 * primitive to avoid simultaneous pushes/pops to the list.
437 */
438static void
439queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer)
440{
441 list_add_tail(&buffer->lh, &priv->rxlisthd);
442}
443
444/* dequeues a tse_buffer from the transmit buffer list, otherwise
445 * returns NULL if empty.
446 * assumes the caller is managing and holding a mutual exclusion
447 * primitive to avoid simultaneous pushes/pops to the list.
448 */
449static struct tse_buffer *
450dequeue_tx(struct altera_tse_private *priv)
451{
452 struct tse_buffer *buffer = NULL;
453 list_remove_head(&priv->txlisthd, buffer, struct tse_buffer, lh);
454 return buffer;
455}
456
457/* dequeues a tse_buffer from the receive buffer list, otherwise
458 * returns NULL if empty
459 * assumes the caller is managing and holding a mutual exclusion
460 * primitive to avoid simultaneous pushes/pops to the list.
461 */
462static struct tse_buffer *
463dequeue_rx(struct altera_tse_private *priv)
464{
465 struct tse_buffer *buffer = NULL;
466 list_remove_head(&priv->rxlisthd, buffer, struct tse_buffer, lh);
467 return buffer;
468}
469
470/* dequeues a tse_buffer from the receive buffer list, otherwise
471 * returns NULL if empty
472 * assumes the caller is managing and holding a mutual exclusion
473 * primitive to avoid simultaneous pushes/pops to the list while the
474 * head is being examined.
475 */
476static struct tse_buffer *
477queue_rx_peekhead(struct altera_tse_private *priv)
478{
479 struct tse_buffer *buffer = NULL;
480 list_peek_head(&priv->rxlisthd, buffer, struct tse_buffer, lh);
481 return buffer;
482}
483
484/* check and return rx sgdma status without polling
485 */
486static int sgdma_rxbusy(struct altera_tse_private *priv)
487{
488 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
489 return ioread32(&csr->status) & SGDMA_STSREG_BUSY;
490}
491
492/* waits for the tx sgdma to finish it's current operation, returns 0
493 * when it transitions to nonbusy, returns 1 if the operation times out
494 */
495static int sgdma_txbusy(struct altera_tse_private *priv)
496{
497 int delay = 0;
498 struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
499
500 /* if DMA is busy, wait for current transactino to finish */
501 while ((ioread32(&csr->status) & SGDMA_STSREG_BUSY) && (delay++ < 100))
502 udelay(1);
503
504 if (ioread32(&csr->status) & SGDMA_STSREG_BUSY) {
505 netdev_err(priv->dev, "timeout waiting for tx dma\n");
506 return 1;
507 }
508 return 0;
509}
diff --git a/drivers/net/ethernet/altera/altera_sgdma.h b/drivers/net/ethernet/altera/altera_sgdma.h
new file mode 100644
index 000000000000..07d471729dc4
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_sgdma.h
@@ -0,0 +1,35 @@
1/* Altera TSE SGDMA and MSGDMA Linux driver
2 * Copyright (C) 2014 Altera Corporation. All rights reserved
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef __ALTERA_SGDMA_H__
18#define __ALTERA_SGDMA_H__
19
20void sgdma_reset(struct altera_tse_private *);
21void sgdma_enable_txirq(struct altera_tse_private *);
22void sgdma_enable_rxirq(struct altera_tse_private *);
23void sgdma_disable_rxirq(struct altera_tse_private *);
24void sgdma_disable_txirq(struct altera_tse_private *);
25void sgdma_clear_rxirq(struct altera_tse_private *);
26void sgdma_clear_txirq(struct altera_tse_private *);
27int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *);
28u32 sgdma_tx_completions(struct altera_tse_private *);
29int sgdma_add_rx_desc(struct altera_tse_private *priv, struct tse_buffer *);
30void sgdma_status(struct altera_tse_private *);
31u32 sgdma_rx_status(struct altera_tse_private *);
32int sgdma_initialize(struct altera_tse_private *);
33void sgdma_uninitialize(struct altera_tse_private *);
34
35#endif /* __ALTERA_SGDMA_H__ */
diff --git a/drivers/net/ethernet/altera/altera_sgdmahw.h b/drivers/net/ethernet/altera/altera_sgdmahw.h
new file mode 100644
index 000000000000..ba3334f35383
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_sgdmahw.h
@@ -0,0 +1,124 @@
1/* Altera TSE SGDMA and MSGDMA Linux driver
2 * Copyright (C) 2014 Altera Corporation. All rights reserved
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef __ALTERA_SGDMAHW_H__
18#define __ALTERA_SGDMAHW_H__
19
20/* SGDMA descriptor structure */
21struct sgdma_descrip {
22 unsigned int raddr; /* address of data to be read */
23 unsigned int pad1;
24 unsigned int waddr;
25 unsigned int pad2;
26 unsigned int next;
27 unsigned int pad3;
28 unsigned short bytes;
29 unsigned char rburst;
30 unsigned char wburst;
31 unsigned short bytes_xferred; /* 16 bits, bytes xferred */
32
33 /* bit 0: error
34 * bit 1: length error
35 * bit 2: crc error
36 * bit 3: truncated error
37 * bit 4: phy error
38 * bit 5: collision error
39 * bit 6: reserved
40 * bit 7: status eop for recv case
41 */
42 unsigned char status;
43
44 /* bit 0: eop
45 * bit 1: read_fixed
46 * bit 2: write fixed
47 * bits 3,4,5,6: Channel (always 0)
48 * bit 7: hardware owned
49 */
50 unsigned char control;
51} __packed;
52
53
54#define SGDMA_STATUS_ERR BIT(0)
55#define SGDMA_STATUS_LENGTH_ERR BIT(1)
56#define SGDMA_STATUS_CRC_ERR BIT(2)
57#define SGDMA_STATUS_TRUNC_ERR BIT(3)
58#define SGDMA_STATUS_PHY_ERR BIT(4)
59#define SGDMA_STATUS_COLL_ERR BIT(5)
60#define SGDMA_STATUS_EOP BIT(7)
61
62#define SGDMA_CONTROL_EOP BIT(0)
63#define SGDMA_CONTROL_RD_FIXED BIT(1)
64#define SGDMA_CONTROL_WR_FIXED BIT(2)
65
66/* Channel is always 0, so just zero initialize it */
67
68#define SGDMA_CONTROL_HW_OWNED BIT(7)
69
70/* SGDMA register space */
71struct sgdma_csr {
72 /* bit 0: error
73 * bit 1: eop
74 * bit 2: descriptor completed
75 * bit 3: chain completed
76 * bit 4: busy
77 * remainder reserved
78 */
79 u32 status;
80 u32 pad1[3];
81
82 /* bit 0: interrupt on error
83 * bit 1: interrupt on eop
84 * bit 2: interrupt after every descriptor
85 * bit 3: interrupt after last descrip in a chain
86 * bit 4: global interrupt enable
87 * bit 5: starts descriptor processing
88 * bit 6: stop core on dma error
89 * bit 7: interrupt on max descriptors
90 * bits 8-15: max descriptors to generate interrupt
91 * bit 16: Software reset
92 * bit 17: clears owned by hardware if 0, does not clear otherwise
93 * bit 18: enables descriptor polling mode
94 * bit 19-26: clocks before polling again
95 * bit 27-30: reserved
96 * bit 31: clear interrupt
97 */
98 u32 control;
99 u32 pad2[3];
100 u32 next_descrip;
101 u32 pad3[3];
102};
103
104
105#define SGDMA_STSREG_ERR BIT(0) /* Error */
106#define SGDMA_STSREG_EOP BIT(1) /* EOP */
107#define SGDMA_STSREG_DESCRIP BIT(2) /* Descriptor completed */
108#define SGDMA_STSREG_CHAIN BIT(3) /* Chain completed */
109#define SGDMA_STSREG_BUSY BIT(4) /* Controller busy */
110
111#define SGDMA_CTRLREG_IOE BIT(0) /* Interrupt on error */
112#define SGDMA_CTRLREG_IOEOP BIT(1) /* Interrupt on EOP */
113#define SGDMA_CTRLREG_IDESCRIP BIT(2) /* Interrupt after every descriptor */
114#define SGDMA_CTRLREG_ILASTD BIT(3) /* Interrupt after last descriptor */
115#define SGDMA_CTRLREG_INTEN BIT(4) /* Global Interrupt enable */
116#define SGDMA_CTRLREG_START BIT(5) /* starts descriptor processing */
117#define SGDMA_CTRLREG_STOPERR BIT(6) /* stop on dma error */
118#define SGDMA_CTRLREG_INTMAX BIT(7) /* Interrupt on max descriptors */
119#define SGDMA_CTRLREG_RESET BIT(16)/* Software reset */
120#define SGDMA_CTRLREG_COBHW BIT(17)/* Clears owned by hardware */
121#define SGDMA_CTRLREG_POLL BIT(18)/* enables descriptor polling mode */
122#define SGDMA_CTRLREG_CLRINT BIT(31)/* Clears interrupt */
123
124#endif /* __ALTERA_SGDMAHW_H__ */
diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h
new file mode 100644
index 000000000000..8feeed05de0e
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_tse.h
@@ -0,0 +1,486 @@
1/* Altera Triple-Speed Ethernet MAC driver
2 * Copyright (C) 2008-2014 Altera Corporation. All rights reserved
3 *
4 * Contributors:
5 * Dalon Westergreen
6 * Thomas Chou
7 * Ian Abbott
8 * Yuriy Kozlov
9 * Tobias Klauser
10 * Andriy Smolskyy
11 * Roman Bulgakov
12 * Dmytro Mytarchuk
13 * Matthew Gerlach
14 *
15 * Original driver contributed by SLS.
16 * Major updates contributed by GlobalLogic
17 *
18 * This program is free software; you can redistribute it and/or modify it
19 * under the terms and conditions of the GNU General Public License,
20 * version 2, as published by the Free Software Foundation.
21 *
22 * This program is distributed in the hope it will be useful, but WITHOUT
23 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
24 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
25 * more details.
26 *
27 * You should have received a copy of the GNU General Public License along with
28 * this program. If not, see <http://www.gnu.org/licenses/>.
29 */
30
31#ifndef __ALTERA_TSE_H__
32#define __ALTERA_TSE_H__
33
34#define ALTERA_TSE_RESOURCE_NAME "altera_tse"
35
36#include <linux/bitops.h>
37#include <linux/if_vlan.h>
38#include <linux/list.h>
39#include <linux/netdevice.h>
40#include <linux/phy.h>
41
42#define ALTERA_TSE_SW_RESET_WATCHDOG_CNTR 10000
43#define ALTERA_TSE_MAC_FIFO_WIDTH 4 /* TX/RX FIFO width in
44 * bytes
45 */
46/* Rx FIFO default settings */
47#define ALTERA_TSE_RX_SECTION_EMPTY 16
48#define ALTERA_TSE_RX_SECTION_FULL 0
49#define ALTERA_TSE_RX_ALMOST_EMPTY 8
50#define ALTERA_TSE_RX_ALMOST_FULL 8
51
52/* Tx FIFO default settings */
53#define ALTERA_TSE_TX_SECTION_EMPTY 16
54#define ALTERA_TSE_TX_SECTION_FULL 0
55#define ALTERA_TSE_TX_ALMOST_EMPTY 8
56#define ALTERA_TSE_TX_ALMOST_FULL 3
57
58/* MAC function configuration default settings */
59#define ALTERA_TSE_TX_IPG_LENGTH 12
60
61#define GET_BIT_VALUE(v, bit) (((v) >> (bit)) & 0x1)
62
63/* MAC Command_Config Register Bit Definitions
64 */
65#define MAC_CMDCFG_TX_ENA BIT(0)
66#define MAC_CMDCFG_RX_ENA BIT(1)
67#define MAC_CMDCFG_XON_GEN BIT(2)
68#define MAC_CMDCFG_ETH_SPEED BIT(3)
69#define MAC_CMDCFG_PROMIS_EN BIT(4)
70#define MAC_CMDCFG_PAD_EN BIT(5)
71#define MAC_CMDCFG_CRC_FWD BIT(6)
72#define MAC_CMDCFG_PAUSE_FWD BIT(7)
73#define MAC_CMDCFG_PAUSE_IGNORE BIT(8)
74#define MAC_CMDCFG_TX_ADDR_INS BIT(9)
75#define MAC_CMDCFG_HD_ENA BIT(10)
76#define MAC_CMDCFG_EXCESS_COL BIT(11)
77#define MAC_CMDCFG_LATE_COL BIT(12)
78#define MAC_CMDCFG_SW_RESET BIT(13)
79#define MAC_CMDCFG_MHASH_SEL BIT(14)
80#define MAC_CMDCFG_LOOP_ENA BIT(15)
81#define MAC_CMDCFG_TX_ADDR_SEL(v) (((v) & 0x7) << 16)
82#define MAC_CMDCFG_MAGIC_ENA BIT(19)
83#define MAC_CMDCFG_SLEEP BIT(20)
84#define MAC_CMDCFG_WAKEUP BIT(21)
85#define MAC_CMDCFG_XOFF_GEN BIT(22)
86#define MAC_CMDCFG_CNTL_FRM_ENA BIT(23)
87#define MAC_CMDCFG_NO_LGTH_CHECK BIT(24)
88#define MAC_CMDCFG_ENA_10 BIT(25)
89#define MAC_CMDCFG_RX_ERR_DISC BIT(26)
90#define MAC_CMDCFG_DISABLE_READ_TIMEOUT BIT(27)
91#define MAC_CMDCFG_CNT_RESET BIT(31)
92
93#define MAC_CMDCFG_TX_ENA_GET(v) GET_BIT_VALUE(v, 0)
94#define MAC_CMDCFG_RX_ENA_GET(v) GET_BIT_VALUE(v, 1)
95#define MAC_CMDCFG_XON_GEN_GET(v) GET_BIT_VALUE(v, 2)
96#define MAC_CMDCFG_ETH_SPEED_GET(v) GET_BIT_VALUE(v, 3)
97#define MAC_CMDCFG_PROMIS_EN_GET(v) GET_BIT_VALUE(v, 4)
98#define MAC_CMDCFG_PAD_EN_GET(v) GET_BIT_VALUE(v, 5)
99#define MAC_CMDCFG_CRC_FWD_GET(v) GET_BIT_VALUE(v, 6)
100#define MAC_CMDCFG_PAUSE_FWD_GET(v) GET_BIT_VALUE(v, 7)
101#define MAC_CMDCFG_PAUSE_IGNORE_GET(v) GET_BIT_VALUE(v, 8)
102#define MAC_CMDCFG_TX_ADDR_INS_GET(v) GET_BIT_VALUE(v, 9)
103#define MAC_CMDCFG_HD_ENA_GET(v) GET_BIT_VALUE(v, 10)
104#define MAC_CMDCFG_EXCESS_COL_GET(v) GET_BIT_VALUE(v, 11)
105#define MAC_CMDCFG_LATE_COL_GET(v) GET_BIT_VALUE(v, 12)
106#define MAC_CMDCFG_SW_RESET_GET(v) GET_BIT_VALUE(v, 13)
107#define MAC_CMDCFG_MHASH_SEL_GET(v) GET_BIT_VALUE(v, 14)
108#define MAC_CMDCFG_LOOP_ENA_GET(v) GET_BIT_VALUE(v, 15)
109#define MAC_CMDCFG_TX_ADDR_SEL_GET(v) (((v) >> 16) & 0x7)
110#define MAC_CMDCFG_MAGIC_ENA_GET(v) GET_BIT_VALUE(v, 19)
111#define MAC_CMDCFG_SLEEP_GET(v) GET_BIT_VALUE(v, 20)
112#define MAC_CMDCFG_WAKEUP_GET(v) GET_BIT_VALUE(v, 21)
113#define MAC_CMDCFG_XOFF_GEN_GET(v) GET_BIT_VALUE(v, 22)
114#define MAC_CMDCFG_CNTL_FRM_ENA_GET(v) GET_BIT_VALUE(v, 23)
115#define MAC_CMDCFG_NO_LGTH_CHECK_GET(v) GET_BIT_VALUE(v, 24)
116#define MAC_CMDCFG_ENA_10_GET(v) GET_BIT_VALUE(v, 25)
117#define MAC_CMDCFG_RX_ERR_DISC_GET(v) GET_BIT_VALUE(v, 26)
118#define MAC_CMDCFG_DISABLE_READ_TIMEOUT_GET(v) GET_BIT_VALUE(v, 27)
119#define MAC_CMDCFG_CNT_RESET_GET(v) GET_BIT_VALUE(v, 31)
120
121/* MDIO registers within MAC register Space
122 */
123struct altera_tse_mdio {
124 u32 control; /* PHY device operation control register */
125 u32 status; /* PHY device operation status register */
126 u32 phy_id1; /* Bits 31:16 of PHY identifier */
127 u32 phy_id2; /* Bits 15:0 of PHY identifier */
128 u32 auto_negotiation_advertisement; /* Auto-negotiation
129 * advertisement
130 * register
131 */
132 u32 remote_partner_base_page_ability;
133
134 u32 reg6;
135 u32 reg7;
136 u32 reg8;
137 u32 reg9;
138 u32 rega;
139 u32 regb;
140 u32 regc;
141 u32 regd;
142 u32 rege;
143 u32 regf;
144 u32 reg10;
145 u32 reg11;
146 u32 reg12;
147 u32 reg13;
148 u32 reg14;
149 u32 reg15;
150 u32 reg16;
151 u32 reg17;
152 u32 reg18;
153 u32 reg19;
154 u32 reg1a;
155 u32 reg1b;
156 u32 reg1c;
157 u32 reg1d;
158 u32 reg1e;
159 u32 reg1f;
160};
161
162/* MAC register Space. Note that some of these registers may or may not be
163 * present depending upon options chosen by the user when the core was
164 * configured and built. Please consult the Altera Triple Speed Ethernet User
165 * Guide for details.
166 */
167struct altera_tse_mac {
168 /* Bits 15:0: MegaCore function revision (0x0800). Bit 31:16: Customer
169 * specific revision
170 */
171 u32 megacore_revision;
172 /* Provides a memory location for user applications to test the device
173 * memory operation.
174 */
175 u32 scratch_pad;
176 /* The host processor uses this register to control and configure the
177 * MAC block
178 */
179 u32 command_config;
180 /* 32-bit primary MAC address word 0 bits 0 to 31 of the primary
181 * MAC address
182 */
183 u32 mac_addr_0;
184 /* 32-bit primary MAC address word 1 bits 32 to 47 of the primary
185 * MAC address
186 */
187 u32 mac_addr_1;
188 /* 14-bit maximum frame length. The MAC receive logic */
189 u32 frm_length;
190 /* The pause quanta is used in each pause frame sent to a remote
191 * Ethernet device, in increments of 512 Ethernet bit times
192 */
193 u32 pause_quanta;
194 /* 12-bit receive FIFO section-empty threshold */
195 u32 rx_section_empty;
196 /* 12-bit receive FIFO section-full threshold */
197 u32 rx_section_full;
198 /* 12-bit transmit FIFO section-empty threshold */
199 u32 tx_section_empty;
200 /* 12-bit transmit FIFO section-full threshold */
201 u32 tx_section_full;
202 /* 12-bit receive FIFO almost-empty threshold */
203 u32 rx_almost_empty;
204 /* 12-bit receive FIFO almost-full threshold */
205 u32 rx_almost_full;
206 /* 12-bit transmit FIFO almost-empty threshold */
207 u32 tx_almost_empty;
208 /* 12-bit transmit FIFO almost-full threshold */
209 u32 tx_almost_full;
210 /* MDIO address of PHY Device 0. Bits 0 to 4 hold a 5-bit PHY address */
211 u32 mdio_phy0_addr;
212 /* MDIO address of PHY Device 1. Bits 0 to 4 hold a 5-bit PHY address */
213 u32 mdio_phy1_addr;
214
215 /* Bit[15:0]—16-bit holdoff quanta */
216 u32 holdoff_quant;
217
218 /* only if 100/1000 BaseX PCS, reserved otherwise */
219 u32 reserved1[5];
220
221 /* Minimum IPG between consecutive transmit frame in terms of bytes */
222 u32 tx_ipg_length;
223
224 /* IEEE 802.3 oEntity Managed Object Support */
225
226 /* The MAC addresses */
227 u32 mac_id_1;
228 u32 mac_id_2;
229
230 /* Number of frames transmitted without error including pause frames */
231 u32 frames_transmitted_ok;
232 /* Number of frames received without error including pause frames */
233 u32 frames_received_ok;
234 /* Number of frames received with a CRC error */
235 u32 frames_check_sequence_errors;
236 /* Frame received with an alignment error */
237 u32 alignment_errors;
238 /* Sum of payload and padding octets of frames transmitted without
239 * error
240 */
241 u32 octets_transmitted_ok;
242 /* Sum of payload and padding octets of frames received without error */
243 u32 octets_received_ok;
244
245 /* IEEE 802.3 oPausedEntity Managed Object Support */
246
247 /* Number of transmitted pause frames */
248 u32 tx_pause_mac_ctrl_frames;
249 /* Number of Received pause frames */
250 u32 rx_pause_mac_ctrl_frames;
251
252 /* IETF MIB (MIB-II) Object Support */
253
254 /* Number of frames received with error */
255 u32 if_in_errors;
256 /* Number of frames transmitted with error */
257 u32 if_out_errors;
258 /* Number of valid received unicast frames */
259 u32 if_in_ucast_pkts;
260 /* Number of valid received multicasts frames (without pause) */
261 u32 if_in_multicast_pkts;
262 /* Number of valid received broadcast frames */
263 u32 if_in_broadcast_pkts;
264 u32 if_out_discards;
265 /* The number of valid unicast frames transmitted */
266 u32 if_out_ucast_pkts;
267 /* The number of valid multicast frames transmitted,
268 * excluding pause frames
269 */
270 u32 if_out_multicast_pkts;
271 u32 if_out_broadcast_pkts;
272
273 /* IETF RMON MIB Object Support */
274
275 /* Counts the number of dropped packets due to internal errors
276 * of the MAC client.
277 */
278 u32 ether_stats_drop_events;
279 /* Total number of bytes received. Good and bad frames. */
280 u32 ether_stats_octets;
281 /* Total number of packets received. Counts good and bad packets. */
282 u32 ether_stats_pkts;
283 /* Number of packets received with less than 64 bytes. */
284 u32 ether_stats_undersize_pkts;
285 /* The number of frames received that are longer than the
286 * value configured in the frm_length register
287 */
288 u32 ether_stats_oversize_pkts;
289 /* Number of received packet with 64 bytes */
290 u32 ether_stats_pkts_64_octets;
291 /* Frames (good and bad) with 65 to 127 bytes */
292 u32 ether_stats_pkts_65to127_octets;
293 /* Frames (good and bad) with 128 to 255 bytes */
294 u32 ether_stats_pkts_128to255_octets;
295 /* Frames (good and bad) with 256 to 511 bytes */
296 u32 ether_stats_pkts_256to511_octets;
297 /* Frames (good and bad) with 512 to 1023 bytes */
298 u32 ether_stats_pkts_512to1023_octets;
299 /* Frames (good and bad) with 1024 to 1518 bytes */
300 u32 ether_stats_pkts_1024to1518_octets;
301
302 /* Any frame length from 1519 to the maximum length configured in the
303 * frm_length register, if it is greater than 1518
304 */
305 u32 ether_stats_pkts_1519tox_octets;
306 /* Too long frames with CRC error */
307 u32 ether_stats_jabbers;
308 /* Too short frames with CRC error */
309 u32 ether_stats_fragments;
310
311 u32 reserved2;
312
313 /* FIFO control register */
314 u32 tx_cmd_stat;
315 u32 rx_cmd_stat;
316
317 /* Extended Statistics Counters */
318 u32 msb_octets_transmitted_ok;
319 u32 msb_octets_received_ok;
320 u32 msb_ether_stats_octets;
321
322 u32 reserved3;
323
324 /* Multicast address resolution table, mapped in the controller address
325 * space
326 */
327 u32 hash_table[64];
328
329 /* Registers 0 to 31 within PHY device 0/1 connected to the MDIO PHY
330 * management interface
331 */
332 struct altera_tse_mdio mdio_phy0;
333 struct altera_tse_mdio mdio_phy1;
334
335 /* 4 Supplemental MAC Addresses */
336 u32 supp_mac_addr_0_0;
337 u32 supp_mac_addr_0_1;
338 u32 supp_mac_addr_1_0;
339 u32 supp_mac_addr_1_1;
340 u32 supp_mac_addr_2_0;
341 u32 supp_mac_addr_2_1;
342 u32 supp_mac_addr_3_0;
343 u32 supp_mac_addr_3_1;
344
345 u32 reserved4[8];
346
347 /* IEEE 1588v2 Feature */
348 u32 tx_period;
349 u32 tx_adjust_fns;
350 u32 tx_adjust_ns;
351 u32 rx_period;
352 u32 rx_adjust_fns;
353 u32 rx_adjust_ns;
354
355 u32 reserved5[42];
356};
357
358/* Transmit and Receive Command Registers Bit Definitions
359 */
360#define ALTERA_TSE_TX_CMD_STAT_OMIT_CRC BIT(17)
361#define ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 BIT(18)
362#define ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16 BIT(25)
363
364/* Wrapper around a pointer to a socket buffer,
365 * so a DMA handle can be stored along with the buffer
366 */
367struct tse_buffer {
368 struct list_head lh;
369 struct sk_buff *skb;
370 dma_addr_t dma_addr;
371 u32 len;
372 int mapped_as_page;
373};
374
375struct altera_tse_private;
376
377#define ALTERA_DTYPE_SGDMA 1
378#define ALTERA_DTYPE_MSGDMA 2
379
380/* standard DMA interface for SGDMA and MSGDMA */
381struct altera_dmaops {
382 int altera_dtype;
383 int dmamask;
384 void (*reset_dma)(struct altera_tse_private *);
385 void (*enable_txirq)(struct altera_tse_private *);
386 void (*enable_rxirq)(struct altera_tse_private *);
387 void (*disable_txirq)(struct altera_tse_private *);
388 void (*disable_rxirq)(struct altera_tse_private *);
389 void (*clear_txirq)(struct altera_tse_private *);
390 void (*clear_rxirq)(struct altera_tse_private *);
391 int (*tx_buffer)(struct altera_tse_private *, struct tse_buffer *);
392 u32 (*tx_completions)(struct altera_tse_private *);
393 int (*add_rx_desc)(struct altera_tse_private *, struct tse_buffer *);
394 u32 (*get_rx_status)(struct altera_tse_private *);
395 int (*init_dma)(struct altera_tse_private *);
396 void (*uninit_dma)(struct altera_tse_private *);
397};
398
399/* This structure is private to each device.
400 */
401struct altera_tse_private {
402 struct net_device *dev;
403 struct device *device;
404 struct napi_struct napi;
405
406 /* MAC address space */
407 struct altera_tse_mac __iomem *mac_dev;
408
409 /* TSE Revision */
410 u32 revision;
411
412 /* mSGDMA Rx Dispatcher address space */
413 void __iomem *rx_dma_csr;
414 void __iomem *rx_dma_desc;
415 void __iomem *rx_dma_resp;
416
417 /* mSGDMA Tx Dispatcher address space */
418 void __iomem *tx_dma_csr;
419 void __iomem *tx_dma_desc;
420
421 /* Rx buffers queue */
422 struct tse_buffer *rx_ring;
423 u32 rx_cons;
424 u32 rx_prod;
425 u32 rx_ring_size;
426 u32 rx_dma_buf_sz;
427
428 /* Tx ring buffer */
429 struct tse_buffer *tx_ring;
430 u32 tx_prod;
431 u32 tx_cons;
432 u32 tx_ring_size;
433
434 /* Interrupts */
435 u32 tx_irq;
436 u32 rx_irq;
437
438 /* RX/TX MAC FIFO configs */
439 u32 tx_fifo_depth;
440 u32 rx_fifo_depth;
441 u32 max_mtu;
442
443 /* Hash filter settings */
444 u32 hash_filter;
445 u32 added_unicast;
446
447 /* Descriptor memory info for managing SGDMA */
448 u32 txdescmem;
449 u32 rxdescmem;
450 dma_addr_t rxdescmem_busaddr;
451 dma_addr_t txdescmem_busaddr;
452 u32 txctrlreg;
453 u32 rxctrlreg;
454 dma_addr_t rxdescphys;
455 dma_addr_t txdescphys;
456
457 struct list_head txlisthd;
458 struct list_head rxlisthd;
459
460 /* MAC command_config register protection */
461 spinlock_t mac_cfg_lock;
462 /* Tx path protection */
463 spinlock_t tx_lock;
464 /* Rx DMA & interrupt control protection */
465 spinlock_t rxdma_irq_lock;
466
467 /* PHY */
468 int phy_addr; /* PHY's MDIO address, -1 for autodetection */
469 phy_interface_t phy_iface;
470 struct mii_bus *mdio;
471 struct phy_device *phydev;
472 int oldspeed;
473 int oldduplex;
474 int oldlink;
475
476 /* ethtool msglvl option */
477 u32 msg_enable;
478
479 struct altera_dmaops *dmaops;
480};
481
482/* Function prototypes
483 */
484void altera_tse_set_ethtool_ops(struct net_device *);
485
486#endif /* __ALTERA_TSE_H__ */
diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c
new file mode 100644
index 000000000000..319ca74f5e74
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c
@@ -0,0 +1,235 @@
1/* Ethtool support for Altera Triple-Speed Ethernet MAC driver
2 * Copyright (C) 2008-2014 Altera Corporation. All rights reserved
3 *
4 * Contributors:
5 * Dalon Westergreen
6 * Thomas Chou
7 * Ian Abbott
8 * Yuriy Kozlov
9 * Tobias Klauser
10 * Andriy Smolskyy
11 * Roman Bulgakov
12 * Dmytro Mytarchuk
13 *
14 * Original driver contributed by SLS.
15 * Major updates contributed by GlobalLogic
16 *
17 * This program is free software; you can redistribute it and/or modify it
18 * under the terms and conditions of the GNU General Public License,
19 * version 2, as published by the Free Software Foundation.
20 *
21 * This program is distributed in the hope it will be useful, but WITHOUT
22 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
23 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
24 * more details.
25 *
26 * You should have received a copy of the GNU General Public License along with
27 * this program. If not, see <http://www.gnu.org/licenses/>.
28 */
29
30#include <linux/ethtool.h>
31#include <linux/kernel.h>
32#include <linux/netdevice.h>
33#include <linux/phy.h>
34
35#include "altera_tse.h"
36
37#define TSE_STATS_LEN 31
38#define TSE_NUM_REGS 128
39
40static char const stat_gstrings[][ETH_GSTRING_LEN] = {
41 "tx_packets",
42 "rx_packets",
43 "rx_crc_errors",
44 "rx_align_errors",
45 "tx_bytes",
46 "rx_bytes",
47 "tx_pause",
48 "rx_pause",
49 "rx_errors",
50 "tx_errors",
51 "rx_unicast",
52 "rx_multicast",
53 "rx_broadcast",
54 "tx_discards",
55 "tx_unicast",
56 "tx_multicast",
57 "tx_broadcast",
58 "ether_drops",
59 "rx_total_bytes",
60 "rx_total_packets",
61 "rx_undersize",
62 "rx_oversize",
63 "rx_64_bytes",
64 "rx_65_127_bytes",
65 "rx_128_255_bytes",
66 "rx_256_511_bytes",
67 "rx_512_1023_bytes",
68 "rx_1024_1518_bytes",
69 "rx_gte_1519_bytes",
70 "rx_jabbers",
71 "rx_runts",
72};
73
74static void tse_get_drvinfo(struct net_device *dev,
75 struct ethtool_drvinfo *info)
76{
77 struct altera_tse_private *priv = netdev_priv(dev);
78 u32 rev = ioread32(&priv->mac_dev->megacore_revision);
79
80 strcpy(info->driver, "Altera TSE MAC IP Driver");
81 strcpy(info->version, "v8.0");
82 snprintf(info->fw_version, ETHTOOL_FWVERS_LEN, "v%d.%d",
83 rev & 0xFFFF, (rev & 0xFFFF0000) >> 16);
84 sprintf(info->bus_info, "platform");
85}
86
87/* Fill in a buffer with the strings which correspond to the
88 * stats
89 */
90static void tse_gstrings(struct net_device *dev, u32 stringset, u8 *buf)
91{
92 memcpy(buf, stat_gstrings, TSE_STATS_LEN * ETH_GSTRING_LEN);
93}
94
95static void tse_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
96 u64 *buf)
97{
98 struct altera_tse_private *priv = netdev_priv(dev);
99 struct altera_tse_mac *mac = priv->mac_dev;
100 u64 ext;
101
102 buf[0] = ioread32(&mac->frames_transmitted_ok);
103 buf[1] = ioread32(&mac->frames_received_ok);
104 buf[2] = ioread32(&mac->frames_check_sequence_errors);
105 buf[3] = ioread32(&mac->alignment_errors);
106
107 /* Extended aOctetsTransmittedOK counter */
108 ext = (u64) ioread32(&mac->msb_octets_transmitted_ok) << 32;
109 ext |= ioread32(&mac->octets_transmitted_ok);
110 buf[4] = ext;
111
112 /* Extended aOctetsReceivedOK counter */
113 ext = (u64) ioread32(&mac->msb_octets_received_ok) << 32;
114 ext |= ioread32(&mac->octets_received_ok);
115 buf[5] = ext;
116
117 buf[6] = ioread32(&mac->tx_pause_mac_ctrl_frames);
118 buf[7] = ioread32(&mac->rx_pause_mac_ctrl_frames);
119 buf[8] = ioread32(&mac->if_in_errors);
120 buf[9] = ioread32(&mac->if_out_errors);
121 buf[10] = ioread32(&mac->if_in_ucast_pkts);
122 buf[11] = ioread32(&mac->if_in_multicast_pkts);
123 buf[12] = ioread32(&mac->if_in_broadcast_pkts);
124 buf[13] = ioread32(&mac->if_out_discards);
125 buf[14] = ioread32(&mac->if_out_ucast_pkts);
126 buf[15] = ioread32(&mac->if_out_multicast_pkts);
127 buf[16] = ioread32(&mac->if_out_broadcast_pkts);
128 buf[17] = ioread32(&mac->ether_stats_drop_events);
129
130 /* Extended etherStatsOctets counter */
131 ext = (u64) ioread32(&mac->msb_ether_stats_octets) << 32;
132 ext |= ioread32(&mac->ether_stats_octets);
133 buf[18] = ext;
134
135 buf[19] = ioread32(&mac->ether_stats_pkts);
136 buf[20] = ioread32(&mac->ether_stats_undersize_pkts);
137 buf[21] = ioread32(&mac->ether_stats_oversize_pkts);
138 buf[22] = ioread32(&mac->ether_stats_pkts_64_octets);
139 buf[23] = ioread32(&mac->ether_stats_pkts_65to127_octets);
140 buf[24] = ioread32(&mac->ether_stats_pkts_128to255_octets);
141 buf[25] = ioread32(&mac->ether_stats_pkts_256to511_octets);
142 buf[26] = ioread32(&mac->ether_stats_pkts_512to1023_octets);
143 buf[27] = ioread32(&mac->ether_stats_pkts_1024to1518_octets);
144 buf[28] = ioread32(&mac->ether_stats_pkts_1519tox_octets);
145 buf[29] = ioread32(&mac->ether_stats_jabbers);
146 buf[30] = ioread32(&mac->ether_stats_fragments);
147}
148
149static int tse_sset_count(struct net_device *dev, int sset)
150{
151 switch (sset) {
152 case ETH_SS_STATS:
153 return TSE_STATS_LEN;
154 default:
155 return -EOPNOTSUPP;
156 }
157}
158
159static u32 tse_get_msglevel(struct net_device *dev)
160{
161 struct altera_tse_private *priv = netdev_priv(dev);
162 return priv->msg_enable;
163}
164
165static void tse_set_msglevel(struct net_device *dev, uint32_t data)
166{
167 struct altera_tse_private *priv = netdev_priv(dev);
168 priv->msg_enable = data;
169}
170
171static int tse_reglen(struct net_device *dev)
172{
173 return TSE_NUM_REGS * sizeof(u32);
174}
175
176static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
177 void *regbuf)
178{
179 int i;
180 struct altera_tse_private *priv = netdev_priv(dev);
181 u32 *tse_mac_regs = (u32 *)priv->mac_dev;
182 u32 *buf = regbuf;
183
184 /* Set version to a known value, so ethtool knows
185 * how to do any special formatting of this data.
186 * This version number will need to change if and
187 * when this register table is changed.
188 */
189
190 regs->version = 1;
191
192 for (i = 0; i < TSE_NUM_REGS; i++)
193 buf[i] = ioread32(&tse_mac_regs[i]);
194}
195
196static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
197{
198 struct altera_tse_private *priv = netdev_priv(dev);
199 struct phy_device *phydev = priv->phydev;
200
201 if (phydev == NULL)
202 return -ENODEV;
203
204 return phy_ethtool_gset(phydev, cmd);
205}
206
207static int tse_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
208{
209 struct altera_tse_private *priv = netdev_priv(dev);
210 struct phy_device *phydev = priv->phydev;
211
212 if (phydev == NULL)
213 return -ENODEV;
214
215 return phy_ethtool_sset(phydev, cmd);
216}
217
218static const struct ethtool_ops tse_ethtool_ops = {
219 .get_drvinfo = tse_get_drvinfo,
220 .get_regs_len = tse_reglen,
221 .get_regs = tse_get_regs,
222 .get_link = ethtool_op_get_link,
223 .get_settings = tse_get_settings,
224 .set_settings = tse_set_settings,
225 .get_strings = tse_gstrings,
226 .get_sset_count = tse_sset_count,
227 .get_ethtool_stats = tse_fill_stats,
228 .get_msglevel = tse_get_msglevel,
229 .set_msglevel = tse_set_msglevel,
230};
231
232void altera_tse_set_ethtool_ops(struct net_device *netdev)
233{
234 SET_ETHTOOL_OPS(netdev, &tse_ethtool_ops);
235}
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
new file mode 100644
index 000000000000..6006ef275107
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -0,0 +1,1543 @@
1/* Altera Triple-Speed Ethernet MAC driver
2 * Copyright (C) 2008-2014 Altera Corporation. All rights reserved
3 *
4 * Contributors:
5 * Dalon Westergreen
6 * Thomas Chou
7 * Ian Abbott
8 * Yuriy Kozlov
9 * Tobias Klauser
10 * Andriy Smolskyy
11 * Roman Bulgakov
12 * Dmytro Mytarchuk
13 * Matthew Gerlach
14 *
15 * Original driver contributed by SLS.
16 * Major updates contributed by GlobalLogic
17 *
18 * This program is free software; you can redistribute it and/or modify it
19 * under the terms and conditions of the GNU General Public License,
20 * version 2, as published by the Free Software Foundation.
21 *
22 * This program is distributed in the hope it will be useful, but WITHOUT
23 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
24 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
25 * more details.
26 *
27 * You should have received a copy of the GNU General Public License along with
28 * this program. If not, see <http://www.gnu.org/licenses/>.
29 */
30
31#include <linux/atomic.h>
32#include <linux/delay.h>
33#include <linux/etherdevice.h>
34#include <linux/if_vlan.h>
35#include <linux/init.h>
36#include <linux/interrupt.h>
37#include <linux/io.h>
38#include <linux/kernel.h>
39#include <linux/module.h>
40#include <linux/netdevice.h>
41#include <linux/of_device.h>
42#include <linux/of_mdio.h>
43#include <linux/of_net.h>
44#include <linux/of_platform.h>
45#include <linux/phy.h>
46#include <linux/platform_device.h>
47#include <linux/skbuff.h>
48#include <asm/cacheflush.h>
49
50#include "altera_utils.h"
51#include "altera_tse.h"
52#include "altera_sgdma.h"
53#include "altera_msgdma.h"
54
55static atomic_t instance_count = ATOMIC_INIT(~0);
56/* Module parameters */
57static int debug = -1;
58module_param(debug, int, S_IRUGO | S_IWUSR);
59MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
60
61static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
62 NETIF_MSG_LINK | NETIF_MSG_IFUP |
63 NETIF_MSG_IFDOWN);
64
65#define RX_DESCRIPTORS 64
66static int dma_rx_num = RX_DESCRIPTORS;
67module_param(dma_rx_num, int, S_IRUGO | S_IWUSR);
68MODULE_PARM_DESC(dma_rx_num, "Number of descriptors in the RX list");
69
70#define TX_DESCRIPTORS 64
71static int dma_tx_num = TX_DESCRIPTORS;
72module_param(dma_tx_num, int, S_IRUGO | S_IWUSR);
73MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list");
74
75
76#define POLL_PHY (-1)
77
78/* Make sure DMA buffer size is larger than the max frame size
79 * plus some alignment offset and a VLAN header. If the max frame size is
80 * 1518, a VLAN header would be additional 4 bytes and additional
81 * headroom for alignment is 2 bytes, 2048 is just fine.
82 */
83#define ALTERA_RXDMABUFFER_SIZE 2048
84
85/* Allow network stack to resume queueing packets after we've
86 * finished transmitting at least 1/4 of the packets in the queue.
87 */
88#define TSE_TX_THRESH(x) (x->tx_ring_size / 4)
89
90#define TXQUEUESTOP_THRESHHOLD 2
91
92static struct of_device_id altera_tse_ids[];
93
94static inline u32 tse_tx_avail(struct altera_tse_private *priv)
95{
96 return priv->tx_cons + priv->tx_ring_size - priv->tx_prod - 1;
97}
98
99/* MDIO specific functions
100 */
101static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
102{
103 struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv;
104 unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0;
105 u32 data;
106
107 /* set MDIO address */
108 iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr);
109
110 /* get the data */
111 data = ioread32(&mdio_regs[regnum]) & 0xffff;
112 return data;
113}
114
115static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
116 u16 value)
117{
118 struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv;
119 unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0;
120
121 /* set MDIO address */
122 iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr);
123
124 /* write the data */
125 iowrite32((u32) value, &mdio_regs[regnum]);
126 return 0;
127}
128
129static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
130{
131 struct altera_tse_private *priv = netdev_priv(dev);
132 int ret;
133 int i;
134 struct device_node *mdio_node = NULL;
135 struct mii_bus *mdio = NULL;
136 struct device_node *child_node = NULL;
137
138 for_each_child_of_node(priv->device->of_node, child_node) {
139 if (of_device_is_compatible(child_node, "altr,tse-mdio")) {
140 mdio_node = child_node;
141 break;
142 }
143 }
144
145 if (mdio_node) {
146 netdev_dbg(dev, "FOUND MDIO subnode\n");
147 } else {
148 netdev_dbg(dev, "NO MDIO subnode\n");
149 return 0;
150 }
151
152 mdio = mdiobus_alloc();
153 if (mdio == NULL) {
154 netdev_err(dev, "Error allocating MDIO bus\n");
155 return -ENOMEM;
156 }
157
158 mdio->name = ALTERA_TSE_RESOURCE_NAME;
159 mdio->read = &altera_tse_mdio_read;
160 mdio->write = &altera_tse_mdio_write;
161 snprintf(mdio->id, MII_BUS_ID_SIZE, "%s-%u", mdio->name, id);
162
163 mdio->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
164 if (mdio->irq == NULL) {
165 ret = -ENOMEM;
166 goto out_free_mdio;
167 }
168 for (i = 0; i < PHY_MAX_ADDR; i++)
169 mdio->irq[i] = PHY_POLL;
170
171 mdio->priv = priv->mac_dev;
172 mdio->parent = priv->device;
173
174 ret = of_mdiobus_register(mdio, mdio_node);
175 if (ret != 0) {
176 netdev_err(dev, "Cannot register MDIO bus %s\n",
177 mdio->id);
178 goto out_free_mdio_irq;
179 }
180
181 if (netif_msg_drv(priv))
182 netdev_info(dev, "MDIO bus %s: created\n", mdio->id);
183
184 priv->mdio = mdio;
185 return 0;
186out_free_mdio_irq:
187 kfree(mdio->irq);
188out_free_mdio:
189 mdiobus_free(mdio);
190 mdio = NULL;
191 return ret;
192}
193
194static void altera_tse_mdio_destroy(struct net_device *dev)
195{
196 struct altera_tse_private *priv = netdev_priv(dev);
197
198 if (priv->mdio == NULL)
199 return;
200
201 if (netif_msg_drv(priv))
202 netdev_info(dev, "MDIO bus %s: removed\n",
203 priv->mdio->id);
204
205 mdiobus_unregister(priv->mdio);
206 kfree(priv->mdio->irq);
207 mdiobus_free(priv->mdio);
208 priv->mdio = NULL;
209}
210
211static int tse_init_rx_buffer(struct altera_tse_private *priv,
212 struct tse_buffer *rxbuffer, int len)
213{
214 rxbuffer->skb = netdev_alloc_skb_ip_align(priv->dev, len);
215 if (!rxbuffer->skb)
216 return -ENOMEM;
217
218 rxbuffer->dma_addr = dma_map_single(priv->device, rxbuffer->skb->data,
219 len,
220 DMA_FROM_DEVICE);
221
222 if (dma_mapping_error(priv->device, rxbuffer->dma_addr)) {
223 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
224 dev_kfree_skb_any(rxbuffer->skb);
225 return -EINVAL;
226 }
227 rxbuffer->len = len;
228 return 0;
229}
230
231static void tse_free_rx_buffer(struct altera_tse_private *priv,
232 struct tse_buffer *rxbuffer)
233{
234 struct sk_buff *skb = rxbuffer->skb;
235 dma_addr_t dma_addr = rxbuffer->dma_addr;
236
237 if (skb != NULL) {
238 if (dma_addr)
239 dma_unmap_single(priv->device, dma_addr,
240 rxbuffer->len,
241 DMA_FROM_DEVICE);
242 dev_kfree_skb_any(skb);
243 rxbuffer->skb = NULL;
244 rxbuffer->dma_addr = 0;
245 }
246}
247
248/* Unmap and free Tx buffer resources
249 */
250static void tse_free_tx_buffer(struct altera_tse_private *priv,
251 struct tse_buffer *buffer)
252{
253 if (buffer->dma_addr) {
254 if (buffer->mapped_as_page)
255 dma_unmap_page(priv->device, buffer->dma_addr,
256 buffer->len, DMA_TO_DEVICE);
257 else
258 dma_unmap_single(priv->device, buffer->dma_addr,
259 buffer->len, DMA_TO_DEVICE);
260 buffer->dma_addr = 0;
261 }
262 if (buffer->skb) {
263 dev_kfree_skb_any(buffer->skb);
264 buffer->skb = NULL;
265 }
266}
267
268static int alloc_init_skbufs(struct altera_tse_private *priv)
269{
270 unsigned int rx_descs = priv->rx_ring_size;
271 unsigned int tx_descs = priv->tx_ring_size;
272 int ret = -ENOMEM;
273 int i;
274
275 /* Create Rx ring buffer */
276 priv->rx_ring = kcalloc(rx_descs, sizeof(struct tse_buffer),
277 GFP_KERNEL);
278 if (!priv->rx_ring)
279 goto err_rx_ring;
280
281 /* Create Tx ring buffer */
282 priv->tx_ring = kcalloc(tx_descs, sizeof(struct tse_buffer),
283 GFP_KERNEL);
284 if (!priv->tx_ring)
285 goto err_tx_ring;
286
287 priv->tx_cons = 0;
288 priv->tx_prod = 0;
289
290 /* Init Rx ring */
291 for (i = 0; i < rx_descs; i++) {
292 ret = tse_init_rx_buffer(priv, &priv->rx_ring[i],
293 priv->rx_dma_buf_sz);
294 if (ret)
295 goto err_init_rx_buffers;
296 }
297
298 priv->rx_cons = 0;
299 priv->rx_prod = 0;
300
301 return 0;
302err_init_rx_buffers:
303 while (--i >= 0)
304 tse_free_rx_buffer(priv, &priv->rx_ring[i]);
305 kfree(priv->tx_ring);
306err_tx_ring:
307 kfree(priv->rx_ring);
308err_rx_ring:
309 return ret;
310}
311
312static void free_skbufs(struct net_device *dev)
313{
314 struct altera_tse_private *priv = netdev_priv(dev);
315 unsigned int rx_descs = priv->rx_ring_size;
316 unsigned int tx_descs = priv->tx_ring_size;
317 int i;
318
319 /* Release the DMA TX/RX socket buffers */
320 for (i = 0; i < rx_descs; i++)
321 tse_free_rx_buffer(priv, &priv->rx_ring[i]);
322 for (i = 0; i < tx_descs; i++)
323 tse_free_tx_buffer(priv, &priv->tx_ring[i]);
324
325
326 kfree(priv->tx_ring);
327}
328
329/* Reallocate the skb for the reception process
330 */
331static inline void tse_rx_refill(struct altera_tse_private *priv)
332{
333 unsigned int rxsize = priv->rx_ring_size;
334 unsigned int entry;
335 int ret;
336
337 for (; priv->rx_cons - priv->rx_prod > 0;
338 priv->rx_prod++) {
339 entry = priv->rx_prod % rxsize;
340 if (likely(priv->rx_ring[entry].skb == NULL)) {
341 ret = tse_init_rx_buffer(priv, &priv->rx_ring[entry],
342 priv->rx_dma_buf_sz);
343 if (unlikely(ret != 0))
344 break;
345 priv->dmaops->add_rx_desc(priv, &priv->rx_ring[entry]);
346 }
347 }
348}
349
350/* Pull out the VLAN tag and fix up the packet
351 */
352static inline void tse_rx_vlan(struct net_device *dev, struct sk_buff *skb)
353{
354 struct ethhdr *eth_hdr;
355 u16 vid;
356 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
357 !__vlan_get_tag(skb, &vid)) {
358 eth_hdr = (struct ethhdr *)skb->data;
359 memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
360 skb_pull(skb, VLAN_HLEN);
361 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
362 }
363}
364
365/* Receive a packet: retrieve and pass over to upper levels
366 */
367static int tse_rx(struct altera_tse_private *priv, int limit)
368{
369 unsigned int count = 0;
370 unsigned int next_entry;
371 struct sk_buff *skb;
372 unsigned int entry = priv->rx_cons % priv->rx_ring_size;
373 u32 rxstatus;
374 u16 pktlength;
375 u16 pktstatus;
376
377 while ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) {
378 pktstatus = rxstatus >> 16;
379 pktlength = rxstatus & 0xffff;
380
381 if ((pktstatus & 0xFF) || (pktlength == 0))
382 netdev_err(priv->dev,
383 "RCV pktstatus %08X pktlength %08X\n",
384 pktstatus, pktlength);
385
386 count++;
387 next_entry = (++priv->rx_cons) % priv->rx_ring_size;
388
389 skb = priv->rx_ring[entry].skb;
390 if (unlikely(!skb)) {
391 netdev_err(priv->dev,
392 "%s: Inconsistent Rx descriptor chain\n",
393 __func__);
394 priv->dev->stats.rx_dropped++;
395 break;
396 }
397 priv->rx_ring[entry].skb = NULL;
398
399 skb_put(skb, pktlength);
400
401 /* make cache consistent with receive packet buffer */
402 dma_sync_single_for_cpu(priv->device,
403 priv->rx_ring[entry].dma_addr,
404 priv->rx_ring[entry].len,
405 DMA_FROM_DEVICE);
406
407 dma_unmap_single(priv->device, priv->rx_ring[entry].dma_addr,
408 priv->rx_ring[entry].len, DMA_FROM_DEVICE);
409
410 if (netif_msg_pktdata(priv)) {
411 netdev_info(priv->dev, "frame received %d bytes\n",
412 pktlength);
413 print_hex_dump(KERN_ERR, "data: ", DUMP_PREFIX_OFFSET,
414 16, 1, skb->data, pktlength, true);
415 }
416
417 tse_rx_vlan(priv->dev, skb);
418
419 skb->protocol = eth_type_trans(skb, priv->dev);
420 skb_checksum_none_assert(skb);
421
422 napi_gro_receive(&priv->napi, skb);
423
424 priv->dev->stats.rx_packets++;
425 priv->dev->stats.rx_bytes += pktlength;
426
427 entry = next_entry;
428 }
429
430 tse_rx_refill(priv);
431 return count;
432}
433
434/* Reclaim resources after transmission completes
435 */
436static int tse_tx_complete(struct altera_tse_private *priv)
437{
438 unsigned int txsize = priv->tx_ring_size;
439 u32 ready;
440 unsigned int entry;
441 struct tse_buffer *tx_buff;
442 int txcomplete = 0;
443
444 spin_lock(&priv->tx_lock);
445
446 ready = priv->dmaops->tx_completions(priv);
447
448 /* Free sent buffers */
449 while (ready && (priv->tx_cons != priv->tx_prod)) {
450 entry = priv->tx_cons % txsize;
451 tx_buff = &priv->tx_ring[entry];
452
453 if (netif_msg_tx_done(priv))
454 netdev_dbg(priv->dev, "%s: curr %d, dirty %d\n",
455 __func__, priv->tx_prod, priv->tx_cons);
456
457 if (likely(tx_buff->skb))
458 priv->dev->stats.tx_packets++;
459
460 tse_free_tx_buffer(priv, tx_buff);
461 priv->tx_cons++;
462
463 txcomplete++;
464 ready--;
465 }
466
467 if (unlikely(netif_queue_stopped(priv->dev) &&
468 tse_tx_avail(priv) > TSE_TX_THRESH(priv))) {
469 netif_tx_lock(priv->dev);
470 if (netif_queue_stopped(priv->dev) &&
471 tse_tx_avail(priv) > TSE_TX_THRESH(priv)) {
472 if (netif_msg_tx_done(priv))
473 netdev_dbg(priv->dev, "%s: restart transmit\n",
474 __func__);
475 netif_wake_queue(priv->dev);
476 }
477 netif_tx_unlock(priv->dev);
478 }
479
480 spin_unlock(&priv->tx_lock);
481 return txcomplete;
482}
483
484/* NAPI polling function
485 */
486static int tse_poll(struct napi_struct *napi, int budget)
487{
488 struct altera_tse_private *priv =
489 container_of(napi, struct altera_tse_private, napi);
490 int rxcomplete = 0;
491 int txcomplete = 0;
492 unsigned long int flags;
493
494 txcomplete = tse_tx_complete(priv);
495
496 rxcomplete = tse_rx(priv, budget);
497
498 if (rxcomplete >= budget || txcomplete > 0)
499 return rxcomplete;
500
501 napi_gro_flush(napi, false);
502 __napi_complete(napi);
503
504 netdev_dbg(priv->dev,
505 "NAPI Complete, did %d packets with budget %d\n",
506 txcomplete+rxcomplete, budget);
507
508 spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
509 priv->dmaops->enable_rxirq(priv);
510 priv->dmaops->enable_txirq(priv);
511 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
512 return rxcomplete + txcomplete;
513}
514
515/* DMA TX & RX FIFO interrupt routing
516 */
517static irqreturn_t altera_isr(int irq, void *dev_id)
518{
519 struct net_device *dev = dev_id;
520 struct altera_tse_private *priv;
521 unsigned long int flags;
522
523
524 if (unlikely(!dev)) {
525 pr_err("%s: invalid dev pointer\n", __func__);
526 return IRQ_NONE;
527 }
528 priv = netdev_priv(dev);
529
530 /* turn off desc irqs and enable napi rx */
531 spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
532
533 if (likely(napi_schedule_prep(&priv->napi))) {
534 priv->dmaops->disable_rxirq(priv);
535 priv->dmaops->disable_txirq(priv);
536 __napi_schedule(&priv->napi);
537 }
538
539 /* reset IRQs */
540 priv->dmaops->clear_rxirq(priv);
541 priv->dmaops->clear_txirq(priv);
542
543 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
544
545 return IRQ_HANDLED;
546}
547
548/* Transmit a packet (called by the kernel). Dispatches
549 * either the SGDMA method for transmitting or the
550 * MSGDMA method, assumes no scatter/gather support,
551 * implying an assumption that there's only one
552 * physically contiguous fragment starting at
553 * skb->data, for length of skb_headlen(skb).
554 */
555static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
556{
557 struct altera_tse_private *priv = netdev_priv(dev);
558 unsigned int txsize = priv->tx_ring_size;
559 unsigned int entry;
560 struct tse_buffer *buffer = NULL;
561 int nfrags = skb_shinfo(skb)->nr_frags;
562 unsigned int nopaged_len = skb_headlen(skb);
563 enum netdev_tx ret = NETDEV_TX_OK;
564 dma_addr_t dma_addr;
565 int txcomplete = 0;
566
567 spin_lock_bh(&priv->tx_lock);
568
569 if (unlikely(tse_tx_avail(priv) < nfrags + 1)) {
570 if (!netif_queue_stopped(dev)) {
571 netif_stop_queue(dev);
572 /* This is a hard error, log it. */
573 netdev_err(priv->dev,
574 "%s: Tx list full when queue awake\n",
575 __func__);
576 }
577 ret = NETDEV_TX_BUSY;
578 goto out;
579 }
580
581 /* Map the first skb fragment */
582 entry = priv->tx_prod % txsize;
583 buffer = &priv->tx_ring[entry];
584
585 dma_addr = dma_map_single(priv->device, skb->data, nopaged_len,
586 DMA_TO_DEVICE);
587 if (dma_mapping_error(priv->device, dma_addr)) {
588 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
589 ret = NETDEV_TX_OK;
590 goto out;
591 }
592
593 buffer->skb = skb;
594 buffer->dma_addr = dma_addr;
595 buffer->len = nopaged_len;
596
597 /* Push data out of the cache hierarchy into main memory */
598 dma_sync_single_for_device(priv->device, buffer->dma_addr,
599 buffer->len, DMA_TO_DEVICE);
600
601 txcomplete = priv->dmaops->tx_buffer(priv, buffer);
602
603 skb_tx_timestamp(skb);
604
605 priv->tx_prod++;
606 dev->stats.tx_bytes += skb->len;
607
608 if (unlikely(tse_tx_avail(priv) <= TXQUEUESTOP_THRESHHOLD)) {
609 if (netif_msg_hw(priv))
610 netdev_dbg(priv->dev, "%s: stop transmitted packets\n",
611 __func__);
612 netif_stop_queue(dev);
613 }
614
615out:
616 spin_unlock_bh(&priv->tx_lock);
617
618 return ret;
619}
620
621/* Called every time the controller might need to be made
622 * aware of new link state. The PHY code conveys this
623 * information through variables in the phydev structure, and this
624 * function converts those variables into the appropriate
625 * register values, and can bring down the device if needed.
626 */
627static void altera_tse_adjust_link(struct net_device *dev)
628{
629 struct altera_tse_private *priv = netdev_priv(dev);
630 struct phy_device *phydev = priv->phydev;
631 int new_state = 0;
632
633 /* only change config if there is a link */
634 spin_lock(&priv->mac_cfg_lock);
635 if (phydev->link) {
636 /* Read old config */
637 u32 cfg_reg = ioread32(&priv->mac_dev->command_config);
638
639 /* Check duplex */
640 if (phydev->duplex != priv->oldduplex) {
641 new_state = 1;
642 if (!(phydev->duplex))
643 cfg_reg |= MAC_CMDCFG_HD_ENA;
644 else
645 cfg_reg &= ~MAC_CMDCFG_HD_ENA;
646
647 netdev_dbg(priv->dev, "%s: Link duplex = 0x%x\n",
648 dev->name, phydev->duplex);
649
650 priv->oldduplex = phydev->duplex;
651 }
652
653 /* Check speed */
654 if (phydev->speed != priv->oldspeed) {
655 new_state = 1;
656 switch (phydev->speed) {
657 case 1000:
658 cfg_reg |= MAC_CMDCFG_ETH_SPEED;
659 cfg_reg &= ~MAC_CMDCFG_ENA_10;
660 break;
661 case 100:
662 cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
663 cfg_reg &= ~MAC_CMDCFG_ENA_10;
664 break;
665 case 10:
666 cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
667 cfg_reg |= MAC_CMDCFG_ENA_10;
668 break;
669 default:
670 if (netif_msg_link(priv))
671 netdev_warn(dev, "Speed (%d) is not 10/100/1000!\n",
672 phydev->speed);
673 break;
674 }
675 priv->oldspeed = phydev->speed;
676 }
677 iowrite32(cfg_reg, &priv->mac_dev->command_config);
678
679 if (!priv->oldlink) {
680 new_state = 1;
681 priv->oldlink = 1;
682 }
683 } else if (priv->oldlink) {
684 new_state = 1;
685 priv->oldlink = 0;
686 priv->oldspeed = 0;
687 priv->oldduplex = -1;
688 }
689
690 if (new_state && netif_msg_link(priv))
691 phy_print_status(phydev);
692
693 spin_unlock(&priv->mac_cfg_lock);
694}
695static struct phy_device *connect_local_phy(struct net_device *dev)
696{
697 struct altera_tse_private *priv = netdev_priv(dev);
698 struct phy_device *phydev = NULL;
699 char phy_id_fmt[MII_BUS_ID_SIZE + 3];
700 int ret;
701
702 if (priv->phy_addr != POLL_PHY) {
703 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
704 priv->mdio->id, priv->phy_addr);
705
706 netdev_dbg(dev, "trying to attach to %s\n", phy_id_fmt);
707
708 phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link,
709 priv->phy_iface);
710 if (IS_ERR(phydev))
711 netdev_err(dev, "Could not attach to PHY\n");
712
713 } else {
714 phydev = phy_find_first(priv->mdio);
715 if (phydev == NULL) {
716 netdev_err(dev, "No PHY found\n");
717 return phydev;
718 }
719
720 ret = phy_connect_direct(dev, phydev, &altera_tse_adjust_link,
721 priv->phy_iface);
722 if (ret != 0) {
723 netdev_err(dev, "Could not attach to PHY\n");
724 phydev = NULL;
725 }
726 }
727 return phydev;
728}
729
730/* Initialize driver's PHY state, and attach to the PHY
731 */
732static int init_phy(struct net_device *dev)
733{
734 struct altera_tse_private *priv = netdev_priv(dev);
735 struct phy_device *phydev;
736 struct device_node *phynode;
737
738 priv->oldlink = 0;
739 priv->oldspeed = 0;
740 priv->oldduplex = -1;
741
742 phynode = of_parse_phandle(priv->device->of_node, "phy-handle", 0);
743
744 if (!phynode) {
745 netdev_dbg(dev, "no phy-handle found\n");
746 if (!priv->mdio) {
747 netdev_err(dev,
748 "No phy-handle nor local mdio specified\n");
749 return -ENODEV;
750 }
751 phydev = connect_local_phy(dev);
752 } else {
753 netdev_dbg(dev, "phy-handle found\n");
754 phydev = of_phy_connect(dev, phynode,
755 &altera_tse_adjust_link, 0, priv->phy_iface);
756 }
757
758 if (!phydev) {
759 netdev_err(dev, "Could not find the PHY\n");
760 return -ENODEV;
761 }
762
763 /* Stop Advertising 1000BASE Capability if interface is not GMII
764 * Note: Checkpatch throws CHECKs for the camel case defines below,
765 * it's ok to ignore.
766 */
767 if ((priv->phy_iface == PHY_INTERFACE_MODE_MII) ||
768 (priv->phy_iface == PHY_INTERFACE_MODE_RMII))
769 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
770 SUPPORTED_1000baseT_Full);
771
772 /* Broken HW is sometimes missing the pull-up resistor on the
773 * MDIO line, which results in reads to non-existent devices returning
774 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
775 * device as well.
776 * Note: phydev->phy_id is the result of reading the UID PHY registers.
777 */
778 if (phydev->phy_id == 0) {
779 netdev_err(dev, "Bad PHY UID 0x%08x\n", phydev->phy_id);
780 phy_disconnect(phydev);
781 return -ENODEV;
782 }
783
784 netdev_dbg(dev, "attached to PHY %d UID 0x%08x Link = %d\n",
785 phydev->addr, phydev->phy_id, phydev->link);
786
787 priv->phydev = phydev;
788 return 0;
789}
790
791static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
792{
793 struct altera_tse_mac *mac = priv->mac_dev;
794 u32 msb;
795 u32 lsb;
796
797 msb = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
798 lsb = ((addr[5] << 8) | addr[4]) & 0xffff;
799
800 /* Set primary MAC address */
801 iowrite32(msb, &mac->mac_addr_0);
802 iowrite32(lsb, &mac->mac_addr_1);
803}
804
805/* MAC software reset.
806 * When reset is triggered, the MAC function completes the current
807 * transmission or reception, and subsequently disables the transmit and
808 * receive logic, flushes the receive FIFO buffer, and resets the statistics
809 * counters.
810 */
811static int reset_mac(struct altera_tse_private *priv)
812{
813 void __iomem *cmd_cfg_reg = &priv->mac_dev->command_config;
814 int counter;
815 u32 dat;
816
817 dat = ioread32(cmd_cfg_reg);
818 dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
819 dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET;
820 iowrite32(dat, cmd_cfg_reg);
821
822 counter = 0;
823 while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
824 if (tse_bit_is_clear(cmd_cfg_reg, MAC_CMDCFG_SW_RESET))
825 break;
826 udelay(1);
827 }
828
829 if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) {
830 dat = ioread32(cmd_cfg_reg);
831 dat &= ~MAC_CMDCFG_SW_RESET;
832 iowrite32(dat, cmd_cfg_reg);
833 return -1;
834 }
835 return 0;
836}
837
838/* Initialize MAC core registers
839*/
840static int init_mac(struct altera_tse_private *priv)
841{
842 struct altera_tse_mac *mac = priv->mac_dev;
843 unsigned int cmd = 0;
844 u32 frm_length;
845
846 /* Setup Rx FIFO */
847 iowrite32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY,
848 &mac->rx_section_empty);
849 iowrite32(ALTERA_TSE_RX_SECTION_FULL, &mac->rx_section_full);
850 iowrite32(ALTERA_TSE_RX_ALMOST_EMPTY, &mac->rx_almost_empty);
851 iowrite32(ALTERA_TSE_RX_ALMOST_FULL, &mac->rx_almost_full);
852
853 /* Setup Tx FIFO */
854 iowrite32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY,
855 &mac->tx_section_empty);
856 iowrite32(ALTERA_TSE_TX_SECTION_FULL, &mac->tx_section_full);
857 iowrite32(ALTERA_TSE_TX_ALMOST_EMPTY, &mac->tx_almost_empty);
858 iowrite32(ALTERA_TSE_TX_ALMOST_FULL, &mac->tx_almost_full);
859
860 /* MAC Address Configuration */
861 tse_update_mac_addr(priv, priv->dev->dev_addr);
862
863 /* MAC Function Configuration */
864 frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN;
865 iowrite32(frm_length, &mac->frm_length);
866 iowrite32(ALTERA_TSE_TX_IPG_LENGTH, &mac->tx_ipg_length);
867
868 /* Disable RX/TX shift 16 for alignment of all received frames on 16-bit
869 * start address
870 */
871 tse_clear_bit(&mac->rx_cmd_stat, ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
872 tse_clear_bit(&mac->tx_cmd_stat, ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
873 ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
874
875 /* Set the MAC options */
876 cmd = ioread32(&mac->command_config);
877 cmd |= MAC_CMDCFG_PAD_EN; /* Padding Removal on Receive */
878 cmd &= ~MAC_CMDCFG_CRC_FWD; /* CRC Removal */
879 cmd |= MAC_CMDCFG_RX_ERR_DISC; /* Automatically discard frames
880 * with CRC errors
881 */
882 cmd |= MAC_CMDCFG_CNTL_FRM_ENA;
883 cmd &= ~MAC_CMDCFG_TX_ENA;
884 cmd &= ~MAC_CMDCFG_RX_ENA;
885 iowrite32(cmd, &mac->command_config);
886
887 if (netif_msg_hw(priv))
888 dev_dbg(priv->device,
889 "MAC post-initialization: CMD_CONFIG = 0x%08x\n", cmd);
890
891 return 0;
892}
893
894/* Start/stop MAC transmission logic
895 */
896static void tse_set_mac(struct altera_tse_private *priv, bool enable)
897{
898 struct altera_tse_mac *mac = priv->mac_dev;
899 u32 value = ioread32(&mac->command_config);
900
901 if (enable)
902 value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA;
903 else
904 value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA);
905
906 iowrite32(value, &mac->command_config);
907}
908
909/* Change the MTU
910 */
911static int tse_change_mtu(struct net_device *dev, int new_mtu)
912{
913 struct altera_tse_private *priv = netdev_priv(dev);
914 unsigned int max_mtu = priv->max_mtu;
915 unsigned int min_mtu = ETH_ZLEN + ETH_FCS_LEN;
916
917 if (netif_running(dev)) {
918 netdev_err(dev, "must be stopped to change its MTU\n");
919 return -EBUSY;
920 }
921
922 if ((new_mtu < min_mtu) || (new_mtu > max_mtu)) {
923 netdev_err(dev, "invalid MTU, max MTU is: %u\n", max_mtu);
924 return -EINVAL;
925 }
926
927 dev->mtu = new_mtu;
928 netdev_update_features(dev);
929
930 return 0;
931}
932
933static void altera_tse_set_mcfilter(struct net_device *dev)
934{
935 struct altera_tse_private *priv = netdev_priv(dev);
936 struct altera_tse_mac *mac = (struct altera_tse_mac *)priv->mac_dev;
937 int i;
938 struct netdev_hw_addr *ha;
939
940 /* clear the hash filter */
941 for (i = 0; i < 64; i++)
942 iowrite32(0, &(mac->hash_table[i]));
943
944 netdev_for_each_mc_addr(ha, dev) {
945 unsigned int hash = 0;
946 int mac_octet;
947
948 for (mac_octet = 5; mac_octet >= 0; mac_octet--) {
949 unsigned char xor_bit = 0;
950 unsigned char octet = ha->addr[mac_octet];
951 unsigned int bitshift;
952
953 for (bitshift = 0; bitshift < 8; bitshift++)
954 xor_bit ^= ((octet >> bitshift) & 0x01);
955
956 hash = (hash << 1) | xor_bit;
957 }
958 iowrite32(1, &(mac->hash_table[hash]));
959 }
960}
961
962
963static void altera_tse_set_mcfilterall(struct net_device *dev)
964{
965 struct altera_tse_private *priv = netdev_priv(dev);
966 struct altera_tse_mac *mac = (struct altera_tse_mac *)priv->mac_dev;
967 int i;
968
969 /* set the hash filter */
970 for (i = 0; i < 64; i++)
971 iowrite32(1, &(mac->hash_table[i]));
972}
973
974/* Set or clear the multicast filter for this adaptor
975 */
976static void tse_set_rx_mode_hashfilter(struct net_device *dev)
977{
978 struct altera_tse_private *priv = netdev_priv(dev);
979 struct altera_tse_mac *mac = priv->mac_dev;
980
981 spin_lock(&priv->mac_cfg_lock);
982
983 if (dev->flags & IFF_PROMISC)
984 tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
985
986 if (dev->flags & IFF_ALLMULTI)
987 altera_tse_set_mcfilterall(dev);
988 else
989 altera_tse_set_mcfilter(dev);
990
991 spin_unlock(&priv->mac_cfg_lock);
992}
993
994/* Set or clear the multicast filter for this adaptor
995 */
996static void tse_set_rx_mode(struct net_device *dev)
997{
998 struct altera_tse_private *priv = netdev_priv(dev);
999 struct altera_tse_mac *mac = priv->mac_dev;
1000
1001 spin_lock(&priv->mac_cfg_lock);
1002
1003 if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
1004 !netdev_mc_empty(dev) || !netdev_uc_empty(dev))
1005 tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
1006 else
1007 tse_clear_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN);
1008
1009 spin_unlock(&priv->mac_cfg_lock);
1010}
1011
1012/* Open and initialize the interface
1013 */
1014static int tse_open(struct net_device *dev)
1015{
1016 struct altera_tse_private *priv = netdev_priv(dev);
1017 int ret = 0;
1018 int i;
1019 unsigned long int flags;
1020
1021 /* Reset and configure TSE MAC and probe associated PHY */
1022 ret = priv->dmaops->init_dma(priv);
1023 if (ret != 0) {
1024 netdev_err(dev, "Cannot initialize DMA\n");
1025 goto phy_error;
1026 }
1027
1028 if (netif_msg_ifup(priv))
1029 netdev_warn(dev, "device MAC address %pM\n",
1030 dev->dev_addr);
1031
1032 if ((priv->revision < 0xd00) || (priv->revision > 0xe00))
1033 netdev_warn(dev, "TSE revision %x\n", priv->revision);
1034
1035 spin_lock(&priv->mac_cfg_lock);
1036 ret = reset_mac(priv);
1037 if (ret)
1038 netdev_err(dev, "Cannot reset MAC core (error: %d)\n", ret);
1039
1040 ret = init_mac(priv);
1041 spin_unlock(&priv->mac_cfg_lock);
1042 if (ret) {
1043 netdev_err(dev, "Cannot init MAC core (error: %d)\n", ret);
1044 goto alloc_skbuf_error;
1045 }
1046
1047 priv->dmaops->reset_dma(priv);
1048
1049 /* Create and initialize the TX/RX descriptors chains. */
1050 priv->rx_ring_size = dma_rx_num;
1051 priv->tx_ring_size = dma_tx_num;
1052 ret = alloc_init_skbufs(priv);
1053 if (ret) {
1054 netdev_err(dev, "DMA descriptors initialization failed\n");
1055 goto alloc_skbuf_error;
1056 }
1057
1058
1059 /* Register RX interrupt */
1060 ret = request_irq(priv->rx_irq, altera_isr, IRQF_SHARED,
1061 dev->name, dev);
1062 if (ret) {
1063 netdev_err(dev, "Unable to register RX interrupt %d\n",
1064 priv->rx_irq);
1065 goto init_error;
1066 }
1067
1068 /* Register TX interrupt */
1069 ret = request_irq(priv->tx_irq, altera_isr, IRQF_SHARED,
1070 dev->name, dev);
1071 if (ret) {
1072 netdev_err(dev, "Unable to register TX interrupt %d\n",
1073 priv->tx_irq);
1074 goto tx_request_irq_error;
1075 }
1076
1077 /* Enable DMA interrupts */
1078 spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
1079 priv->dmaops->enable_rxirq(priv);
1080 priv->dmaops->enable_txirq(priv);
1081
1082 /* Setup RX descriptor chain */
1083 for (i = 0; i < priv->rx_ring_size; i++)
1084 priv->dmaops->add_rx_desc(priv, &priv->rx_ring[i]);
1085
1086 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
1087
1088 /* Start MAC Rx/Tx */
1089 spin_lock(&priv->mac_cfg_lock);
1090 tse_set_mac(priv, true);
1091 spin_unlock(&priv->mac_cfg_lock);
1092
1093 if (priv->phydev)
1094 phy_start(priv->phydev);
1095
1096 napi_enable(&priv->napi);
1097 netif_start_queue(dev);
1098
1099 return 0;
1100
1101tx_request_irq_error:
1102 free_irq(priv->rx_irq, dev);
1103init_error:
1104 free_skbufs(dev);
1105alloc_skbuf_error:
1106 if (priv->phydev) {
1107 phy_disconnect(priv->phydev);
1108 priv->phydev = NULL;
1109 }
1110phy_error:
1111 return ret;
1112}
1113
1114/* Stop TSE MAC interface and put the device in an inactive state
1115 */
1116static int tse_shutdown(struct net_device *dev)
1117{
1118 struct altera_tse_private *priv = netdev_priv(dev);
1119 int ret;
1120 unsigned long int flags;
1121
1122 /* Stop and disconnect the PHY */
1123 if (priv->phydev) {
1124 phy_stop(priv->phydev);
1125 phy_disconnect(priv->phydev);
1126 priv->phydev = NULL;
1127 }
1128
1129 netif_stop_queue(dev);
1130 napi_disable(&priv->napi);
1131
1132 /* Disable DMA interrupts */
1133 spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
1134 priv->dmaops->disable_rxirq(priv);
1135 priv->dmaops->disable_txirq(priv);
1136 spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
1137
1138 /* Free the IRQ lines */
1139 free_irq(priv->rx_irq, dev);
1140 free_irq(priv->tx_irq, dev);
1141
1142 /* disable and reset the MAC, empties fifo */
1143 spin_lock(&priv->mac_cfg_lock);
1144 spin_lock(&priv->tx_lock);
1145
1146 ret = reset_mac(priv);
1147 if (ret)
1148 netdev_err(dev, "Cannot reset MAC core (error: %d)\n", ret);
1149 priv->dmaops->reset_dma(priv);
1150 free_skbufs(dev);
1151
1152 spin_unlock(&priv->tx_lock);
1153 spin_unlock(&priv->mac_cfg_lock);
1154
1155 priv->dmaops->uninit_dma(priv);
1156
1157 return 0;
1158}
1159
1160static struct net_device_ops altera_tse_netdev_ops = {
1161 .ndo_open = tse_open,
1162 .ndo_stop = tse_shutdown,
1163 .ndo_start_xmit = tse_start_xmit,
1164 .ndo_set_mac_address = eth_mac_addr,
1165 .ndo_set_rx_mode = tse_set_rx_mode,
1166 .ndo_change_mtu = tse_change_mtu,
1167 .ndo_validate_addr = eth_validate_addr,
1168};
1169
1170
1171static int request_and_map(struct platform_device *pdev, const char *name,
1172 struct resource **res, void __iomem **ptr)
1173{
1174 struct resource *region;
1175 struct device *device = &pdev->dev;
1176
1177 *res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
1178 if (*res == NULL) {
1179 dev_err(device, "resource %s not defined\n", name);
1180 return -ENODEV;
1181 }
1182
1183 region = devm_request_mem_region(device, (*res)->start,
1184 resource_size(*res), dev_name(device));
1185 if (region == NULL) {
1186 dev_err(device, "unable to request %s\n", name);
1187 return -EBUSY;
1188 }
1189
1190 *ptr = devm_ioremap_nocache(device, region->start,
1191 resource_size(region));
1192 if (*ptr == NULL) {
1193 dev_err(device, "ioremap_nocache of %s failed!", name);
1194 return -ENOMEM;
1195 }
1196
1197 return 0;
1198}
1199
1200/* Probe Altera TSE MAC device
1201 */
1202static int altera_tse_probe(struct platform_device *pdev)
1203{
1204 struct net_device *ndev;
1205 int ret = -ENODEV;
1206 struct resource *control_port;
1207 struct resource *dma_res;
1208 struct altera_tse_private *priv;
1209 const unsigned char *macaddr;
1210 struct device_node *np = pdev->dev.of_node;
1211 void __iomem *descmap;
1212 const struct of_device_id *of_id = NULL;
1213
1214 ndev = alloc_etherdev(sizeof(struct altera_tse_private));
1215 if (!ndev) {
1216 dev_err(&pdev->dev, "Could not allocate network device\n");
1217 return -ENODEV;
1218 }
1219
1220 SET_NETDEV_DEV(ndev, &pdev->dev);
1221
1222 priv = netdev_priv(ndev);
1223 priv->device = &pdev->dev;
1224 priv->dev = ndev;
1225 priv->msg_enable = netif_msg_init(debug, default_msg_level);
1226
1227 of_id = of_match_device(altera_tse_ids, &pdev->dev);
1228
1229 if (of_id)
1230 priv->dmaops = (struct altera_dmaops *)of_id->data;
1231
1232
1233 if (priv->dmaops &&
1234 priv->dmaops->altera_dtype == ALTERA_DTYPE_SGDMA) {
1235 /* Get the mapped address to the SGDMA descriptor memory */
1236 ret = request_and_map(pdev, "s1", &dma_res, &descmap);
1237 if (ret)
1238 goto out_free;
1239
1240 /* Start of that memory is for transmit descriptors */
1241 priv->tx_dma_desc = descmap;
1242
1243 /* First half is for tx descriptors, other half for tx */
1244 priv->txdescmem = resource_size(dma_res)/2;
1245
1246 priv->txdescmem_busaddr = (dma_addr_t)dma_res->start;
1247
1248 priv->rx_dma_desc = (void __iomem *)((uintptr_t)(descmap +
1249 priv->txdescmem));
1250 priv->rxdescmem = resource_size(dma_res)/2;
1251 priv->rxdescmem_busaddr = dma_res->start;
1252 priv->rxdescmem_busaddr += priv->txdescmem;
1253
1254 if (upper_32_bits(priv->rxdescmem_busaddr)) {
1255 dev_dbg(priv->device,
1256 "SGDMA bus addresses greater than 32-bits\n");
1257 goto out_free;
1258 }
1259 if (upper_32_bits(priv->txdescmem_busaddr)) {
1260 dev_dbg(priv->device,
1261 "SGDMA bus addresses greater than 32-bits\n");
1262 goto out_free;
1263 }
1264 } else if (priv->dmaops &&
1265 priv->dmaops->altera_dtype == ALTERA_DTYPE_MSGDMA) {
1266 ret = request_and_map(pdev, "rx_resp", &dma_res,
1267 &priv->rx_dma_resp);
1268 if (ret)
1269 goto out_free;
1270
1271 ret = request_and_map(pdev, "tx_desc", &dma_res,
1272 &priv->tx_dma_desc);
1273 if (ret)
1274 goto out_free;
1275
1276 priv->txdescmem = resource_size(dma_res);
1277 priv->txdescmem_busaddr = dma_res->start;
1278
1279 ret = request_and_map(pdev, "rx_desc", &dma_res,
1280 &priv->rx_dma_desc);
1281 if (ret)
1282 goto out_free;
1283
1284 priv->rxdescmem = resource_size(dma_res);
1285 priv->rxdescmem_busaddr = dma_res->start;
1286
1287 } else {
1288 goto out_free;
1289 }
1290
1291 if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask)))
1292 dma_set_coherent_mask(priv->device,
1293 DMA_BIT_MASK(priv->dmaops->dmamask));
1294 else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32)))
1295 dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32));
1296 else
1297 goto out_free;
1298
1299 /* MAC address space */
1300 ret = request_and_map(pdev, "control_port", &control_port,
1301 (void __iomem **)&priv->mac_dev);
1302 if (ret)
1303 goto out_free;
1304
1305 /* xSGDMA Rx Dispatcher address space */
1306 ret = request_and_map(pdev, "rx_csr", &dma_res,
1307 &priv->rx_dma_csr);
1308 if (ret)
1309 goto out_free;
1310
1311
1312 /* xSGDMA Tx Dispatcher address space */
1313 ret = request_and_map(pdev, "tx_csr", &dma_res,
1314 &priv->tx_dma_csr);
1315 if (ret)
1316 goto out_free;
1317
1318
1319 /* Rx IRQ */
1320 priv->rx_irq = platform_get_irq_byname(pdev, "rx_irq");
1321 if (priv->rx_irq == -ENXIO) {
1322 dev_err(&pdev->dev, "cannot obtain Rx IRQ\n");
1323 ret = -ENXIO;
1324 goto out_free;
1325 }
1326
1327 /* Tx IRQ */
1328 priv->tx_irq = platform_get_irq_byname(pdev, "tx_irq");
1329 if (priv->tx_irq == -ENXIO) {
1330 dev_err(&pdev->dev, "cannot obtain Tx IRQ\n");
1331 ret = -ENXIO;
1332 goto out_free;
1333 }
1334
1335 /* get FIFO depths from device tree */
1336 if (of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth",
1337 &priv->rx_fifo_depth)) {
1338 dev_err(&pdev->dev, "cannot obtain rx-fifo-depth\n");
1339 ret = -ENXIO;
1340 goto out_free;
1341 }
1342
1343 if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
1344 &priv->rx_fifo_depth)) {
1345 dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
1346 ret = -ENXIO;
1347 goto out_free;
1348 }
1349
1350 /* get hash filter settings for this instance */
1351 priv->hash_filter =
1352 of_property_read_bool(pdev->dev.of_node,
1353 "altr,has-hash-multicast-filter");
1354
1355 /* get supplemental address settings for this instance */
1356 priv->added_unicast =
1357 of_property_read_bool(pdev->dev.of_node,
1358 "altr,has-supplementary-unicast");
1359
1360 /* Max MTU is 1500, ETH_DATA_LEN */
1361 priv->max_mtu = ETH_DATA_LEN;
1362
1363 /* Get the max mtu from the device tree. Note that the
1364 * "max-frame-size" parameter is actually max mtu. Definition
1365 * in the ePAPR v1.1 spec and usage differ, so go with usage.
1366 */
1367 of_property_read_u32(pdev->dev.of_node, "max-frame-size",
1368 &priv->max_mtu);
1369
1370 /* The DMA buffer size already accounts for an alignment bias
1371 * to avoid unaligned access exceptions for the NIOS processor,
1372 */
1373 priv->rx_dma_buf_sz = ALTERA_RXDMABUFFER_SIZE;
1374
1375 /* get default MAC address from device tree */
1376 macaddr = of_get_mac_address(pdev->dev.of_node);
1377 if (macaddr)
1378 ether_addr_copy(ndev->dev_addr, macaddr);
1379 else
1380 eth_hw_addr_random(ndev);
1381
1382 priv->phy_iface = of_get_phy_mode(np);
1383
1384 /* try to get PHY address from device tree, use PHY autodetection if
1385 * no valid address is given
1386 */
1387 if (of_property_read_u32(pdev->dev.of_node, "phy-addr",
1388 &priv->phy_addr)) {
1389 priv->phy_addr = POLL_PHY;
1390 }
1391
1392 if (!((priv->phy_addr == POLL_PHY) ||
1393 ((priv->phy_addr >= 0) && (priv->phy_addr < PHY_MAX_ADDR)))) {
1394 dev_err(&pdev->dev, "invalid phy-addr specified %d\n",
1395 priv->phy_addr);
1396 goto out_free;
1397 }
1398
1399 /* Create/attach to MDIO bus */
1400 ret = altera_tse_mdio_create(ndev,
1401 atomic_add_return(1, &instance_count));
1402
1403 if (ret)
1404 goto out_free;
1405
1406 /* initialize netdev */
1407 ether_setup(ndev);
1408 ndev->mem_start = control_port->start;
1409 ndev->mem_end = control_port->end;
1410 ndev->netdev_ops = &altera_tse_netdev_ops;
1411 altera_tse_set_ethtool_ops(ndev);
1412
1413 altera_tse_netdev_ops.ndo_set_rx_mode = tse_set_rx_mode;
1414
1415 if (priv->hash_filter)
1416 altera_tse_netdev_ops.ndo_set_rx_mode =
1417 tse_set_rx_mode_hashfilter;
1418
1419 /* Scatter/gather IO is not supported,
1420 * so it is turned off
1421 */
1422 ndev->hw_features &= ~NETIF_F_SG;
1423 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
1424
1425 /* VLAN offloading of tagging, stripping and filtering is not
1426 * supported by hardware, but driver will accommodate the
1427 * extra 4-byte VLAN tag for processing by upper layers
1428 */
1429 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1430
1431 /* setup NAPI interface */
1432 netif_napi_add(ndev, &priv->napi, tse_poll, NAPI_POLL_WEIGHT);
1433
1434 spin_lock_init(&priv->mac_cfg_lock);
1435 spin_lock_init(&priv->tx_lock);
1436 spin_lock_init(&priv->rxdma_irq_lock);
1437
1438 ret = register_netdev(ndev);
1439 if (ret) {
1440 dev_err(&pdev->dev, "failed to register TSE net device\n");
1441 goto out_free_mdio;
1442 }
1443
1444 platform_set_drvdata(pdev, ndev);
1445
1446 priv->revision = ioread32(&priv->mac_dev->megacore_revision);
1447
1448 if (netif_msg_probe(priv))
1449 dev_info(&pdev->dev, "Altera TSE MAC version %d.%d at 0x%08lx irq %d/%d\n",
1450 (priv->revision >> 8) & 0xff,
1451 priv->revision & 0xff,
1452 (unsigned long) control_port->start, priv->rx_irq,
1453 priv->tx_irq);
1454
1455 ret = init_phy(ndev);
1456 if (ret != 0) {
1457 netdev_err(ndev, "Cannot attach to PHY (error: %d)\n", ret);
1458 goto out_free_mdio;
1459 }
1460 return 0;
1461
1462out_free_mdio:
1463 altera_tse_mdio_destroy(ndev);
1464out_free:
1465 free_netdev(ndev);
1466 return ret;
1467}
1468
1469/* Remove Altera TSE MAC device
1470 */
1471static int altera_tse_remove(struct platform_device *pdev)
1472{
1473 struct net_device *ndev = platform_get_drvdata(pdev);
1474
1475 platform_set_drvdata(pdev, NULL);
1476 altera_tse_mdio_destroy(ndev);
1477 unregister_netdev(ndev);
1478 free_netdev(ndev);
1479
1480 return 0;
1481}
1482
1483struct altera_dmaops altera_dtype_sgdma = {
1484 .altera_dtype = ALTERA_DTYPE_SGDMA,
1485 .dmamask = 32,
1486 .reset_dma = sgdma_reset,
1487 .enable_txirq = sgdma_enable_txirq,
1488 .enable_rxirq = sgdma_enable_rxirq,
1489 .disable_txirq = sgdma_disable_txirq,
1490 .disable_rxirq = sgdma_disable_rxirq,
1491 .clear_txirq = sgdma_clear_txirq,
1492 .clear_rxirq = sgdma_clear_rxirq,
1493 .tx_buffer = sgdma_tx_buffer,
1494 .tx_completions = sgdma_tx_completions,
1495 .add_rx_desc = sgdma_add_rx_desc,
1496 .get_rx_status = sgdma_rx_status,
1497 .init_dma = sgdma_initialize,
1498 .uninit_dma = sgdma_uninitialize,
1499};
1500
1501struct altera_dmaops altera_dtype_msgdma = {
1502 .altera_dtype = ALTERA_DTYPE_MSGDMA,
1503 .dmamask = 64,
1504 .reset_dma = msgdma_reset,
1505 .enable_txirq = msgdma_enable_txirq,
1506 .enable_rxirq = msgdma_enable_rxirq,
1507 .disable_txirq = msgdma_disable_txirq,
1508 .disable_rxirq = msgdma_disable_rxirq,
1509 .clear_txirq = msgdma_clear_txirq,
1510 .clear_rxirq = msgdma_clear_rxirq,
1511 .tx_buffer = msgdma_tx_buffer,
1512 .tx_completions = msgdma_tx_completions,
1513 .add_rx_desc = msgdma_add_rx_desc,
1514 .get_rx_status = msgdma_rx_status,
1515 .init_dma = msgdma_initialize,
1516 .uninit_dma = msgdma_uninitialize,
1517};
1518
1519static struct of_device_id altera_tse_ids[] = {
1520 { .compatible = "altr,tse-msgdma-1.0", .data = &altera_dtype_msgdma, },
1521 { .compatible = "altr,tse-1.0", .data = &altera_dtype_sgdma, },
1522 { .compatible = "ALTR,tse-1.0", .data = &altera_dtype_sgdma, },
1523 {},
1524};
1525MODULE_DEVICE_TABLE(of, altera_tse_ids);
1526
1527static struct platform_driver altera_tse_driver = {
1528 .probe = altera_tse_probe,
1529 .remove = altera_tse_remove,
1530 .suspend = NULL,
1531 .resume = NULL,
1532 .driver = {
1533 .name = ALTERA_TSE_RESOURCE_NAME,
1534 .owner = THIS_MODULE,
1535 .of_match_table = altera_tse_ids,
1536 },
1537};
1538
1539module_platform_driver(altera_tse_driver);
1540
1541MODULE_AUTHOR("Altera Corporation");
1542MODULE_DESCRIPTION("Altera Triple Speed Ethernet MAC driver");
1543MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/altera/altera_utils.c b/drivers/net/ethernet/altera/altera_utils.c
new file mode 100644
index 000000000000..70fa13f486b2
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_utils.c
@@ -0,0 +1,44 @@
1/* Altera TSE SGDMA and MSGDMA Linux driver
2 * Copyright (C) 2014 Altera Corporation. All rights reserved
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include "altera_tse.h"
18#include "altera_utils.h"
19
20void tse_set_bit(void __iomem *ioaddr, u32 bit_mask)
21{
22 u32 value = ioread32(ioaddr);
23 value |= bit_mask;
24 iowrite32(value, ioaddr);
25}
26
27void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask)
28{
29 u32 value = ioread32(ioaddr);
30 value &= ~bit_mask;
31 iowrite32(value, ioaddr);
32}
33
34int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask)
35{
36 u32 value = ioread32(ioaddr);
37 return (value & bit_mask) ? 1 : 0;
38}
39
40int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask)
41{
42 u32 value = ioread32(ioaddr);
43 return (value & bit_mask) ? 0 : 1;
44}
diff --git a/drivers/net/ethernet/altera/altera_utils.h b/drivers/net/ethernet/altera/altera_utils.h
new file mode 100644
index 000000000000..ce1db36d3583
--- /dev/null
+++ b/drivers/net/ethernet/altera/altera_utils.h
@@ -0,0 +1,27 @@
1/* Altera TSE SGDMA and MSGDMA Linux driver
2 * Copyright (C) 2014 Altera Corporation. All rights reserved
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/kernel.h>
18
19#ifndef __ALTERA_UTILS_H__
20#define __ALTERA_UTILS_H__
21
22void tse_set_bit(void __iomem *ioaddr, u32 bit_mask);
23void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask);
24int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask);
25int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask);
26
27#endif /* __ALTERA_UTILS_H__*/
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
index 18e542f7853d..98a10d555b79 100644
--- a/drivers/net/ethernet/amd/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
@@ -578,7 +578,7 @@ int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
578 outs++; 578 outs++;
579 /* Kick the lance: transmit now */ 579 /* Kick the lance: transmit now */
580 WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD); 580 WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD);
581 dev_kfree_skb(skb); 581 dev_consume_skb_any(skb);
582 582
583 spin_lock_irqsave(&lp->devlock, flags); 583 spin_lock_irqsave(&lp->devlock, flags);
584 if (TX_BUFFS_AVAIL) 584 if (TX_BUFFS_AVAIL)
diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c
index 9793767996a2..87e727b921dc 100644
--- a/drivers/net/ethernet/amd/am79c961a.c
+++ b/drivers/net/ethernet/amd/am79c961a.c
@@ -472,7 +472,7 @@ am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev)
472 if (am_readword(dev, priv->txhdr + (priv->txhead << 3) + 2) & TMD_OWN) 472 if (am_readword(dev, priv->txhdr + (priv->txhead << 3) + 2) & TMD_OWN)
473 netif_stop_queue(dev); 473 netif_stop_queue(dev);
474 474
475 dev_kfree_skb(skb); 475 dev_consume_skb_any(skb);
476 476
477 return NETDEV_TX_OK; 477 return NETDEV_TX_OK;
478} 478}
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 2061b471fd16..26efaaa5e73f 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -720,6 +720,9 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
720 int rx_pkt_limit = budget; 720 int rx_pkt_limit = budget;
721 unsigned long flags; 721 unsigned long flags;
722 722
723 if (rx_pkt_limit <= 0)
724 goto rx_not_empty;
725
723 do{ 726 do{
724 /* process receive packets until we use the quota*/ 727 /* process receive packets until we use the quota*/
725 /* If we own the next entry, it's a new packet. Send it up. */ 728 /* If we own the next entry, it's a new packet. Send it up. */
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 9339cccfe05a..e7cc9174e364 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -549,35 +549,35 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
549 struct pcnet32_rx_head *new_rx_ring; 549 struct pcnet32_rx_head *new_rx_ring;
550 struct sk_buff **new_skb_list; 550 struct sk_buff **new_skb_list;
551 int new, overlap; 551 int new, overlap;
552 unsigned int entries = 1 << size;
552 553
553 new_rx_ring = pci_alloc_consistent(lp->pci_dev, 554 new_rx_ring = pci_alloc_consistent(lp->pci_dev,
554 sizeof(struct pcnet32_rx_head) * 555 sizeof(struct pcnet32_rx_head) *
555 (1 << size), 556 entries,
556 &new_ring_dma_addr); 557 &new_ring_dma_addr);
557 if (new_rx_ring == NULL) { 558 if (new_rx_ring == NULL) {
558 netif_err(lp, drv, dev, "Consistent memory allocation failed\n"); 559 netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
559 return; 560 return;
560 } 561 }
561 memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size)); 562 memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * entries);
562 563
563 new_dma_addr_list = kcalloc(1 << size, sizeof(dma_addr_t), GFP_ATOMIC); 564 new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC);
564 if (!new_dma_addr_list) 565 if (!new_dma_addr_list)
565 goto free_new_rx_ring; 566 goto free_new_rx_ring;
566 567
567 new_skb_list = kcalloc(1 << size, sizeof(struct sk_buff *), 568 new_skb_list = kcalloc(entries, sizeof(struct sk_buff *), GFP_ATOMIC);
568 GFP_ATOMIC);
569 if (!new_skb_list) 569 if (!new_skb_list)
570 goto free_new_lists; 570 goto free_new_lists;
571 571
572 /* first copy the current receive buffers */ 572 /* first copy the current receive buffers */
573 overlap = min(size, lp->rx_ring_size); 573 overlap = min(entries, lp->rx_ring_size);
574 for (new = 0; new < overlap; new++) { 574 for (new = 0; new < overlap; new++) {
575 new_rx_ring[new] = lp->rx_ring[new]; 575 new_rx_ring[new] = lp->rx_ring[new];
576 new_dma_addr_list[new] = lp->rx_dma_addr[new]; 576 new_dma_addr_list[new] = lp->rx_dma_addr[new];
577 new_skb_list[new] = lp->rx_skbuff[new]; 577 new_skb_list[new] = lp->rx_skbuff[new];
578 } 578 }
579 /* now allocate any new buffers needed */ 579 /* now allocate any new buffers needed */
580 for (; new < size; new++) { 580 for (; new < entries; new++) {
581 struct sk_buff *rx_skbuff; 581 struct sk_buff *rx_skbuff;
582 new_skb_list[new] = netdev_alloc_skb(dev, PKT_BUF_SKB); 582 new_skb_list[new] = netdev_alloc_skb(dev, PKT_BUF_SKB);
583 rx_skbuff = new_skb_list[new]; 583 rx_skbuff = new_skb_list[new];
@@ -592,6 +592,13 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
592 new_dma_addr_list[new] = 592 new_dma_addr_list[new] =
593 pci_map_single(lp->pci_dev, rx_skbuff->data, 593 pci_map_single(lp->pci_dev, rx_skbuff->data,
594 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); 594 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
595 if (pci_dma_mapping_error(lp->pci_dev,
596 new_dma_addr_list[new])) {
597 netif_err(lp, drv, dev, "%s dma mapping failed\n",
598 __func__);
599 dev_kfree_skb(new_skb_list[new]);
600 goto free_all_new;
601 }
595 new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]); 602 new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]);
596 new_rx_ring[new].buf_length = cpu_to_le16(NEG_BUF_SIZE); 603 new_rx_ring[new].buf_length = cpu_to_le16(NEG_BUF_SIZE);
597 new_rx_ring[new].status = cpu_to_le16(0x8000); 604 new_rx_ring[new].status = cpu_to_le16(0x8000);
@@ -599,8 +606,12 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
599 /* and free any unneeded buffers */ 606 /* and free any unneeded buffers */
600 for (; new < lp->rx_ring_size; new++) { 607 for (; new < lp->rx_ring_size; new++) {
601 if (lp->rx_skbuff[new]) { 608 if (lp->rx_skbuff[new]) {
602 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new], 609 if (!pci_dma_mapping_error(lp->pci_dev,
603 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); 610 lp->rx_dma_addr[new]))
611 pci_unmap_single(lp->pci_dev,
612 lp->rx_dma_addr[new],
613 PKT_BUF_SIZE,
614 PCI_DMA_FROMDEVICE);
604 dev_kfree_skb(lp->rx_skbuff[new]); 615 dev_kfree_skb(lp->rx_skbuff[new]);
605 } 616 }
606 } 617 }
@@ -612,7 +623,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
612 lp->rx_ring_size, lp->rx_ring, 623 lp->rx_ring_size, lp->rx_ring,
613 lp->rx_ring_dma_addr); 624 lp->rx_ring_dma_addr);
614 625
615 lp->rx_ring_size = (1 << size); 626 lp->rx_ring_size = entries;
616 lp->rx_mod_mask = lp->rx_ring_size - 1; 627 lp->rx_mod_mask = lp->rx_ring_size - 1;
617 lp->rx_len_bits = (size << 4); 628 lp->rx_len_bits = (size << 4);
618 lp->rx_ring = new_rx_ring; 629 lp->rx_ring = new_rx_ring;
@@ -624,8 +635,12 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
624free_all_new: 635free_all_new:
625 while (--new >= lp->rx_ring_size) { 636 while (--new >= lp->rx_ring_size) {
626 if (new_skb_list[new]) { 637 if (new_skb_list[new]) {
627 pci_unmap_single(lp->pci_dev, new_dma_addr_list[new], 638 if (!pci_dma_mapping_error(lp->pci_dev,
628 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); 639 new_dma_addr_list[new]))
640 pci_unmap_single(lp->pci_dev,
641 new_dma_addr_list[new],
642 PKT_BUF_SIZE,
643 PCI_DMA_FROMDEVICE);
629 dev_kfree_skb(new_skb_list[new]); 644 dev_kfree_skb(new_skb_list[new]);
630 } 645 }
631 } 646 }
@@ -634,8 +649,7 @@ free_new_lists:
634 kfree(new_dma_addr_list); 649 kfree(new_dma_addr_list);
635free_new_rx_ring: 650free_new_rx_ring:
636 pci_free_consistent(lp->pci_dev, 651 pci_free_consistent(lp->pci_dev,
637 sizeof(struct pcnet32_rx_head) * 652 sizeof(struct pcnet32_rx_head) * entries,
638 (1 << size),
639 new_rx_ring, 653 new_rx_ring,
640 new_ring_dma_addr); 654 new_ring_dma_addr);
641} 655}
@@ -650,8 +664,12 @@ static void pcnet32_purge_rx_ring(struct net_device *dev)
650 lp->rx_ring[i].status = 0; /* CPU owns buffer */ 664 lp->rx_ring[i].status = 0; /* CPU owns buffer */
651 wmb(); /* Make sure adapter sees owner change */ 665 wmb(); /* Make sure adapter sees owner change */
652 if (lp->rx_skbuff[i]) { 666 if (lp->rx_skbuff[i]) {
653 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], 667 if (!pci_dma_mapping_error(lp->pci_dev,
654 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); 668 lp->rx_dma_addr[i]))
669 pci_unmap_single(lp->pci_dev,
670 lp->rx_dma_addr[i],
671 PKT_BUF_SIZE,
672 PCI_DMA_FROMDEVICE);
655 dev_kfree_skb_any(lp->rx_skbuff[i]); 673 dev_kfree_skb_any(lp->rx_skbuff[i]);
656 } 674 }
657 lp->rx_skbuff[i] = NULL; 675 lp->rx_skbuff[i] = NULL;
@@ -930,6 +948,12 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
930 lp->tx_dma_addr[x] = 948 lp->tx_dma_addr[x] =
931 pci_map_single(lp->pci_dev, skb->data, skb->len, 949 pci_map_single(lp->pci_dev, skb->data, skb->len,
932 PCI_DMA_TODEVICE); 950 PCI_DMA_TODEVICE);
951 if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[x])) {
952 netif_printk(lp, hw, KERN_DEBUG, dev,
953 "DMA mapping error at line: %d!\n",
954 __LINE__);
955 goto clean_up;
956 }
933 lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]); 957 lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]);
934 wmb(); /* Make sure owner changes after all others are visible */ 958 wmb(); /* Make sure owner changes after all others are visible */
935 lp->tx_ring[x].status = cpu_to_le16(status); 959 lp->tx_ring[x].status = cpu_to_le16(status);
@@ -1142,24 +1166,36 @@ static void pcnet32_rx_entry(struct net_device *dev,
1142 1166
1143 if (pkt_len > rx_copybreak) { 1167 if (pkt_len > rx_copybreak) {
1144 struct sk_buff *newskb; 1168 struct sk_buff *newskb;
1169 dma_addr_t new_dma_addr;
1145 1170
1146 newskb = netdev_alloc_skb(dev, PKT_BUF_SKB); 1171 newskb = netdev_alloc_skb(dev, PKT_BUF_SKB);
1172 /*
1173 * map the new buffer, if mapping fails, drop the packet and
1174 * reuse the old buffer
1175 */
1147 if (newskb) { 1176 if (newskb) {
1148 skb_reserve(newskb, NET_IP_ALIGN); 1177 skb_reserve(newskb, NET_IP_ALIGN);
1149 skb = lp->rx_skbuff[entry]; 1178 new_dma_addr = pci_map_single(lp->pci_dev,
1150 pci_unmap_single(lp->pci_dev, 1179 newskb->data,
1151 lp->rx_dma_addr[entry], 1180 PKT_BUF_SIZE,
1152 PKT_BUF_SIZE, 1181 PCI_DMA_FROMDEVICE);
1153 PCI_DMA_FROMDEVICE); 1182 if (pci_dma_mapping_error(lp->pci_dev, new_dma_addr)) {
1154 skb_put(skb, pkt_len); 1183 netif_err(lp, rx_err, dev,
1155 lp->rx_skbuff[entry] = newskb; 1184 "DMA mapping error.\n");
1156 lp->rx_dma_addr[entry] = 1185 dev_kfree_skb(newskb);
1157 pci_map_single(lp->pci_dev, 1186 skb = NULL;
1158 newskb->data, 1187 } else {
1159 PKT_BUF_SIZE, 1188 skb = lp->rx_skbuff[entry];
1160 PCI_DMA_FROMDEVICE); 1189 pci_unmap_single(lp->pci_dev,
1161 rxp->base = cpu_to_le32(lp->rx_dma_addr[entry]); 1190 lp->rx_dma_addr[entry],
1162 rx_in_place = 1; 1191 PKT_BUF_SIZE,
1192 PCI_DMA_FROMDEVICE);
1193 skb_put(skb, pkt_len);
1194 lp->rx_skbuff[entry] = newskb;
1195 lp->rx_dma_addr[entry] = new_dma_addr;
1196 rxp->base = cpu_to_le32(new_dma_addr);
1197 rx_in_place = 1;
1198 }
1163 } else 1199 } else
1164 skb = NULL; 1200 skb = NULL;
1165 } else 1201 } else
@@ -2229,9 +2265,12 @@ static void pcnet32_purge_tx_ring(struct net_device *dev)
2229 lp->tx_ring[i].status = 0; /* CPU owns buffer */ 2265 lp->tx_ring[i].status = 0; /* CPU owns buffer */
2230 wmb(); /* Make sure adapter sees owner change */ 2266 wmb(); /* Make sure adapter sees owner change */
2231 if (lp->tx_skbuff[i]) { 2267 if (lp->tx_skbuff[i]) {
2232 pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i], 2268 if (!pci_dma_mapping_error(lp->pci_dev,
2233 lp->tx_skbuff[i]->len, 2269 lp->tx_dma_addr[i]))
2234 PCI_DMA_TODEVICE); 2270 pci_unmap_single(lp->pci_dev,
2271 lp->tx_dma_addr[i],
2272 lp->tx_skbuff[i]->len,
2273 PCI_DMA_TODEVICE);
2235 dev_kfree_skb_any(lp->tx_skbuff[i]); 2274 dev_kfree_skb_any(lp->tx_skbuff[i]);
2236 } 2275 }
2237 lp->tx_skbuff[i] = NULL; 2276 lp->tx_skbuff[i] = NULL;
@@ -2264,10 +2303,19 @@ static int pcnet32_init_ring(struct net_device *dev)
2264 } 2303 }
2265 2304
2266 rmb(); 2305 rmb();
2267 if (lp->rx_dma_addr[i] == 0) 2306 if (lp->rx_dma_addr[i] == 0) {
2268 lp->rx_dma_addr[i] = 2307 lp->rx_dma_addr[i] =
2269 pci_map_single(lp->pci_dev, rx_skbuff->data, 2308 pci_map_single(lp->pci_dev, rx_skbuff->data,
2270 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); 2309 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
2310 if (pci_dma_mapping_error(lp->pci_dev,
2311 lp->rx_dma_addr[i])) {
2312 /* there is not much we can do at this point */
2313 netif_err(lp, drv, dev,
2314 "%s pci dma mapping error\n",
2315 __func__);
2316 return -1;
2317 }
2318 }
2271 lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]); 2319 lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]);
2272 lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE); 2320 lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE);
2273 wmb(); /* Make sure owner changes after all others are visible */ 2321 wmb(); /* Make sure owner changes after all others are visible */
@@ -2397,9 +2445,14 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
2397 2445
2398 lp->tx_ring[entry].misc = 0x00000000; 2446 lp->tx_ring[entry].misc = 0x00000000;
2399 2447
2400 lp->tx_skbuff[entry] = skb;
2401 lp->tx_dma_addr[entry] = 2448 lp->tx_dma_addr[entry] =
2402 pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); 2449 pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2450 if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[entry])) {
2451 dev_kfree_skb_any(skb);
2452 dev->stats.tx_dropped++;
2453 goto drop_packet;
2454 }
2455 lp->tx_skbuff[entry] = skb;
2403 lp->tx_ring[entry].base = cpu_to_le32(lp->tx_dma_addr[entry]); 2456 lp->tx_ring[entry].base = cpu_to_le32(lp->tx_dma_addr[entry]);
2404 wmb(); /* Make sure owner changes after all others are visible */ 2457 wmb(); /* Make sure owner changes after all others are visible */
2405 lp->tx_ring[entry].status = cpu_to_le16(status); 2458 lp->tx_ring[entry].status = cpu_to_le16(status);
@@ -2414,6 +2467,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
2414 lp->tx_full = 1; 2467 lp->tx_full = 1;
2415 netif_stop_queue(dev); 2468 netif_stop_queue(dev);
2416 } 2469 }
2470drop_packet:
2417 spin_unlock_irqrestore(&lp->lock, flags); 2471 spin_unlock_irqrestore(&lp->lock, flags);
2418 return NETDEV_TX_OK; 2472 return NETDEV_TX_OK;
2419} 2473}
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 380d24922049..96f3edb6c738 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1097,7 +1097,7 @@ static netdev_tx_t alx_start_xmit(struct sk_buff *skb,
1097 return NETDEV_TX_OK; 1097 return NETDEV_TX_OK;
1098 1098
1099drop: 1099drop:
1100 dev_kfree_skb(skb); 1100 dev_kfree_skb_any(skb);
1101 return NETDEV_TX_OK; 1101 return NETDEV_TX_OK;
1102} 1102}
1103 1103
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 4d3258dd0a88..31f262302128 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -832,7 +832,7 @@ static int atl1c_sw_init(struct atl1c_adapter *adapter)
832} 832}
833 833
834static inline void atl1c_clean_buffer(struct pci_dev *pdev, 834static inline void atl1c_clean_buffer(struct pci_dev *pdev,
835 struct atl1c_buffer *buffer_info, int in_irq) 835 struct atl1c_buffer *buffer_info)
836{ 836{
837 u16 pci_driection; 837 u16 pci_driection;
838 if (buffer_info->flags & ATL1C_BUFFER_FREE) 838 if (buffer_info->flags & ATL1C_BUFFER_FREE)
@@ -850,12 +850,8 @@ static inline void atl1c_clean_buffer(struct pci_dev *pdev,
850 pci_unmap_page(pdev, buffer_info->dma, 850 pci_unmap_page(pdev, buffer_info->dma,
851 buffer_info->length, pci_driection); 851 buffer_info->length, pci_driection);
852 } 852 }
853 if (buffer_info->skb) { 853 if (buffer_info->skb)
854 if (in_irq) 854 dev_consume_skb_any(buffer_info->skb);
855 dev_kfree_skb_irq(buffer_info->skb);
856 else
857 dev_kfree_skb(buffer_info->skb);
858 }
859 buffer_info->dma = 0; 855 buffer_info->dma = 0;
860 buffer_info->skb = NULL; 856 buffer_info->skb = NULL;
861 ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE); 857 ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
@@ -875,7 +871,7 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
875 ring_count = tpd_ring->count; 871 ring_count = tpd_ring->count;
876 for (index = 0; index < ring_count; index++) { 872 for (index = 0; index < ring_count; index++) {
877 buffer_info = &tpd_ring->buffer_info[index]; 873 buffer_info = &tpd_ring->buffer_info[index];
878 atl1c_clean_buffer(pdev, buffer_info, 0); 874 atl1c_clean_buffer(pdev, buffer_info);
879 } 875 }
880 876
881 /* Zero out Tx-buffers */ 877 /* Zero out Tx-buffers */
@@ -899,7 +895,7 @@ static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter)
899 895
900 for (j = 0; j < rfd_ring->count; j++) { 896 for (j = 0; j < rfd_ring->count; j++) {
901 buffer_info = &rfd_ring->buffer_info[j]; 897 buffer_info = &rfd_ring->buffer_info[j];
902 atl1c_clean_buffer(pdev, buffer_info, 0); 898 atl1c_clean_buffer(pdev, buffer_info);
903 } 899 }
904 /* zero out the descriptor ring */ 900 /* zero out the descriptor ring */
905 memset(rfd_ring->desc, 0, rfd_ring->size); 901 memset(rfd_ring->desc, 0, rfd_ring->size);
@@ -1562,7 +1558,7 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
1562 1558
1563 while (next_to_clean != hw_next_to_clean) { 1559 while (next_to_clean != hw_next_to_clean) {
1564 buffer_info = &tpd_ring->buffer_info[next_to_clean]; 1560 buffer_info = &tpd_ring->buffer_info[next_to_clean];
1565 atl1c_clean_buffer(pdev, buffer_info, 1); 1561 atl1c_clean_buffer(pdev, buffer_info);
1566 if (++next_to_clean == tpd_ring->count) 1562 if (++next_to_clean == tpd_ring->count)
1567 next_to_clean = 0; 1563 next_to_clean = 0;
1568 atomic_set(&tpd_ring->next_to_clean, next_to_clean); 1564 atomic_set(&tpd_ring->next_to_clean, next_to_clean);
@@ -2085,7 +2081,7 @@ static void atl1c_tx_rollback(struct atl1c_adapter *adpt,
2085 while (index != tpd_ring->next_to_use) { 2081 while (index != tpd_ring->next_to_use) {
2086 tpd = ATL1C_TPD_DESC(tpd_ring, index); 2082 tpd = ATL1C_TPD_DESC(tpd_ring, index);
2087 buffer_info = &tpd_ring->buffer_info[index]; 2083 buffer_info = &tpd_ring->buffer_info[index];
2088 atl1c_clean_buffer(adpt->pdev, buffer_info, 0); 2084 atl1c_clean_buffer(adpt->pdev, buffer_info);
2089 memset(tpd, 0, sizeof(struct atl1c_tpd_desc)); 2085 memset(tpd, 0, sizeof(struct atl1c_tpd_desc));
2090 if (++index == tpd_ring->count) 2086 if (++index == tpd_ring->count)
2091 index = 0; 2087 index = 0;
@@ -2258,7 +2254,7 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
2258 /* roll back tpd/buffer */ 2254 /* roll back tpd/buffer */
2259 atl1c_tx_rollback(adapter, tpd, type); 2255 atl1c_tx_rollback(adapter, tpd, type);
2260 spin_unlock_irqrestore(&adapter->tx_lock, flags); 2256 spin_unlock_irqrestore(&adapter->tx_lock, flags);
2261 dev_kfree_skb(skb); 2257 dev_kfree_skb_any(skb);
2262 } else { 2258 } else {
2263 atl1c_tx_queue(adapter, skb, tpd, type); 2259 atl1c_tx_queue(adapter, skb, tpd, type);
2264 spin_unlock_irqrestore(&adapter->tx_lock, flags); 2260 spin_unlock_irqrestore(&adapter->tx_lock, flags);
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 265ce1b752ed..78befb522a52 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -55,6 +55,7 @@ static const char atl2_driver_name[] = "atl2";
55static const char atl2_driver_string[] = "Atheros(R) L2 Ethernet Driver"; 55static const char atl2_driver_string[] = "Atheros(R) L2 Ethernet Driver";
56static const char atl2_copyright[] = "Copyright (c) 2007 Atheros Corporation."; 56static const char atl2_copyright[] = "Copyright (c) 2007 Atheros Corporation.";
57static const char atl2_driver_version[] = ATL2_DRV_VERSION; 57static const char atl2_driver_version[] = ATL2_DRV_VERSION;
58static const struct ethtool_ops atl2_ethtool_ops;
58 59
59MODULE_AUTHOR("Atheros Corporation <xiong.huang@atheros.com>, Chris Snook <csnook@redhat.com>"); 60MODULE_AUTHOR("Atheros Corporation <xiong.huang@atheros.com>, Chris Snook <csnook@redhat.com>");
60MODULE_DESCRIPTION("Atheros Fast Ethernet Network Driver"); 61MODULE_DESCRIPTION("Atheros Fast Ethernet Network Driver");
@@ -71,8 +72,6 @@ static DEFINE_PCI_DEVICE_TABLE(atl2_pci_tbl) = {
71}; 72};
72MODULE_DEVICE_TABLE(pci, atl2_pci_tbl); 73MODULE_DEVICE_TABLE(pci, atl2_pci_tbl);
73 74
74static void atl2_set_ethtool_ops(struct net_device *netdev);
75
76static void atl2_check_options(struct atl2_adapter *adapter); 75static void atl2_check_options(struct atl2_adapter *adapter);
77 76
78/** 77/**
@@ -1397,7 +1396,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1397 atl2_setup_pcicmd(pdev); 1396 atl2_setup_pcicmd(pdev);
1398 1397
1399 netdev->netdev_ops = &atl2_netdev_ops; 1398 netdev->netdev_ops = &atl2_netdev_ops;
1400 atl2_set_ethtool_ops(netdev); 1399 SET_ETHTOOL_OPS(netdev, &atl2_ethtool_ops);
1401 netdev->watchdog_timeo = 5 * HZ; 1400 netdev->watchdog_timeo = 5 * HZ;
1402 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 1401 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1403 1402
@@ -2105,11 +2104,6 @@ static const struct ethtool_ops atl2_ethtool_ops = {
2105 .set_eeprom = atl2_set_eeprom, 2104 .set_eeprom = atl2_set_eeprom,
2106}; 2105};
2107 2106
2108static void atl2_set_ethtool_ops(struct net_device *netdev)
2109{
2110 SET_ETHTOOL_OPS(netdev, &atl2_ethtool_ops);
2111}
2112
2113#define LBYTESWAP(a) ((((a) & 0x00ff00ff) << 8) | \ 2107#define LBYTESWAP(a) ((((a) & 0x00ff00ff) << 8) | \
2114 (((a) & 0xff00ff00) >> 8)) 2108 (((a) & 0xff00ff00) >> 8))
2115#define LONGSWAP(a) ((LBYTESWAP(a) << 16) | (LBYTESWAP(a) >> 16)) 2109#define LONGSWAP(a) ((LBYTESWAP(a) << 16) | (LBYTESWAP(a) >> 16))
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 3f97d9fd0a71..85dbddd03722 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -60,6 +60,17 @@ config BCM63XX_ENET
60 This driver supports the ethernet MACs in the Broadcom 63xx 60 This driver supports the ethernet MACs in the Broadcom 63xx
61 MIPS chipset family (BCM63XX). 61 MIPS chipset family (BCM63XX).
62 62
63config BCMGENET
64 tristate "Broadcom GENET internal MAC support"
65 depends on OF
66 select MII
67 select PHYLIB
68 select FIXED_PHY if BCMGENET=y
69 select BCM7XXX_PHY
70 help
71 This driver supports the built-in Ethernet MACs found in the
72 Broadcom BCM7xxx Set Top Box family chipset.
73
63config BNX2 74config BNX2
64 tristate "Broadcom NetXtremeII support" 75 tristate "Broadcom NetXtremeII support"
65 depends on PCI 76 depends on PCI
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index 68efa1a3fb88..fd639a0d4c7d 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -4,6 +4,7 @@
4 4
5obj-$(CONFIG_B44) += b44.o 5obj-$(CONFIG_B44) += b44.o
6obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o 6obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
7obj-$(CONFIG_BCMGENET) += genet/
7obj-$(CONFIG_BNX2) += bnx2.o 8obj-$(CONFIG_BNX2) += bnx2.o
8obj-$(CONFIG_CNIC) += cnic.o 9obj-$(CONFIG_CNIC) += cnic.o
9obj-$(CONFIG_BNX2X) += bnx2x/ 10obj-$(CONFIG_BNX2X) += bnx2x/
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 8a7bf7dad898..05ba62589017 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -1685,7 +1685,7 @@ static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
1685 unsigned int start; 1685 unsigned int start;
1686 1686
1687 do { 1687 do {
1688 start = u64_stats_fetch_begin_bh(&hwstat->syncp); 1688 start = u64_stats_fetch_begin_irq(&hwstat->syncp);
1689 1689
1690 /* Convert HW stats into rtnl_link_stats64 stats. */ 1690 /* Convert HW stats into rtnl_link_stats64 stats. */
1691 nstat->rx_packets = hwstat->rx_pkts; 1691 nstat->rx_packets = hwstat->rx_pkts;
@@ -1719,7 +1719,7 @@ static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
1719 /* Carrier lost counter seems to be broken for some devices */ 1719 /* Carrier lost counter seems to be broken for some devices */
1720 nstat->tx_carrier_errors = hwstat->tx_carrier_lost; 1720 nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
1721#endif 1721#endif
1722 } while (u64_stats_fetch_retry_bh(&hwstat->syncp, start)); 1722 } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
1723 1723
1724 return nstat; 1724 return nstat;
1725} 1725}
@@ -2073,12 +2073,12 @@ static void b44_get_ethtool_stats(struct net_device *dev,
2073 do { 2073 do {
2074 data_src = &hwstat->tx_good_octets; 2074 data_src = &hwstat->tx_good_octets;
2075 data_dst = data; 2075 data_dst = data;
2076 start = u64_stats_fetch_begin_bh(&hwstat->syncp); 2076 start = u64_stats_fetch_begin_irq(&hwstat->syncp);
2077 2077
2078 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++) 2078 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
2079 *data_dst++ = *data_src++; 2079 *data_dst++ = *data_src++;
2080 2080
2081 } while (u64_stats_fetch_retry_bh(&hwstat->syncp, start)); 2081 } while (u64_stats_fetch_retry_irq(&hwstat->syncp, start));
2082} 2082}
2083 2083
2084static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2084static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index b9a5fb6400d3..a7d11f5565d6 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1722,9 +1722,6 @@ static const struct net_device_ops bcm_enet_ops = {
1722 .ndo_set_rx_mode = bcm_enet_set_multicast_list, 1722 .ndo_set_rx_mode = bcm_enet_set_multicast_list,
1723 .ndo_do_ioctl = bcm_enet_ioctl, 1723 .ndo_do_ioctl = bcm_enet_ioctl,
1724 .ndo_change_mtu = bcm_enet_change_mtu, 1724 .ndo_change_mtu = bcm_enet_change_mtu,
1725#ifdef CONFIG_NET_POLL_CONTROLLER
1726 .ndo_poll_controller = bcm_enet_netpoll,
1727#endif
1728}; 1725};
1729 1726
1730/* 1727/*
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 6c9e1c9bdeb8..a8efb18e42fa 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -2886,7 +2886,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2886 sw_cons = BNX2_NEXT_TX_BD(sw_cons); 2886 sw_cons = BNX2_NEXT_TX_BD(sw_cons);
2887 2887
2888 tx_bytes += skb->len; 2888 tx_bytes += skb->len;
2889 dev_kfree_skb(skb); 2889 dev_kfree_skb_any(skb);
2890 tx_pkt++; 2890 tx_pkt++;
2891 if (tx_pkt == budget) 2891 if (tx_pkt == budget)
2892 break; 2892 break;
@@ -3133,6 +3133,9 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3133 struct l2_fhdr *rx_hdr; 3133 struct l2_fhdr *rx_hdr;
3134 int rx_pkt = 0, pg_ring_used = 0; 3134 int rx_pkt = 0, pg_ring_used = 0;
3135 3135
3136 if (budget <= 0)
3137 return rx_pkt;
3138
3136 hw_cons = bnx2_get_hw_rx_cons(bnapi); 3139 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3137 sw_cons = rxr->rx_cons; 3140 sw_cons = rxr->rx_cons;
3138 sw_prod = rxr->rx_prod; 3141 sw_prod = rxr->rx_prod;
@@ -6235,7 +6238,7 @@ bnx2_free_irq(struct bnx2 *bp)
6235static void 6238static void
6236bnx2_enable_msix(struct bnx2 *bp, int msix_vecs) 6239bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6237{ 6240{
6238 int i, total_vecs, rc; 6241 int i, total_vecs;
6239 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC]; 6242 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6240 struct net_device *dev = bp->dev; 6243 struct net_device *dev = bp->dev;
6241 const int len = sizeof(bp->irq_tbl[0].name); 6244 const int len = sizeof(bp->irq_tbl[0].name);
@@ -6258,16 +6261,9 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6258#ifdef BCM_CNIC 6261#ifdef BCM_CNIC
6259 total_vecs++; 6262 total_vecs++;
6260#endif 6263#endif
6261 rc = -ENOSPC; 6264 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent,
6262 while (total_vecs >= BNX2_MIN_MSIX_VEC) { 6265 BNX2_MIN_MSIX_VEC, total_vecs);
6263 rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs); 6266 if (total_vecs < 0)
6264 if (rc <= 0)
6265 break;
6266 if (rc > 0)
6267 total_vecs = rc;
6268 }
6269
6270 if (rc != 0)
6271 return; 6267 return;
6272 6268
6273 msix_vecs = total_vecs; 6269 msix_vecs = total_vecs;
@@ -6640,7 +6636,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6640 6636
6641 mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); 6637 mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6642 if (dma_mapping_error(&bp->pdev->dev, mapping)) { 6638 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6643 dev_kfree_skb(skb); 6639 dev_kfree_skb_any(skb);
6644 return NETDEV_TX_OK; 6640 return NETDEV_TX_OK;
6645 } 6641 }
6646 6642
@@ -6733,7 +6729,7 @@ dma_error:
6733 PCI_DMA_TODEVICE); 6729 PCI_DMA_TODEVICE);
6734 } 6730 }
6735 6731
6736 dev_kfree_skb(skb); 6732 dev_kfree_skb_any(skb);
6737 return NETDEV_TX_OK; 6733 return NETDEV_TX_OK;
6738} 6734}
6739 6735
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 391f29ef6d2e..722160940ab9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -26,8 +26,8 @@
26 * (you will need to reboot afterwards) */ 26 * (you will need to reboot afterwards) */
27/* #define BNX2X_STOP_ON_ERROR */ 27/* #define BNX2X_STOP_ON_ERROR */
28 28
29#define DRV_MODULE_VERSION "1.78.17-0" 29#define DRV_MODULE_VERSION "1.78.19-0"
30#define DRV_MODULE_RELDATE "2013/04/11" 30#define DRV_MODULE_RELDATE "2014/02/10"
31#define BNX2X_BC_VER 0x040200 31#define BNX2X_BC_VER 0x040200
32 32
33#if defined(CONFIG_DCB) 33#if defined(CONFIG_DCB)
@@ -75,13 +75,22 @@ enum bnx2x_int_mode {
75#define BNX2X_MSG_DCB 0x8000000 75#define BNX2X_MSG_DCB 0x8000000
76 76
77/* regular debug print */ 77/* regular debug print */
78#define DP_INNER(fmt, ...) \
79 pr_notice("[%s:%d(%s)]" fmt, \
80 __func__, __LINE__, \
81 bp->dev ? (bp->dev->name) : "?", \
82 ##__VA_ARGS__);
83
78#define DP(__mask, fmt, ...) \ 84#define DP(__mask, fmt, ...) \
79do { \ 85do { \
80 if (unlikely(bp->msg_enable & (__mask))) \ 86 if (unlikely(bp->msg_enable & (__mask))) \
81 pr_notice("[%s:%d(%s)]" fmt, \ 87 DP_INNER(fmt, ##__VA_ARGS__); \
82 __func__, __LINE__, \ 88} while (0)
83 bp->dev ? (bp->dev->name) : "?", \ 89
84 ##__VA_ARGS__); \ 90#define DP_AND(__mask, fmt, ...) \
91do { \
92 if (unlikely((bp->msg_enable & (__mask)) == __mask)) \
93 DP_INNER(fmt, ##__VA_ARGS__); \
85} while (0) 94} while (0)
86 95
87#define DP_CONT(__mask, fmt, ...) \ 96#define DP_CONT(__mask, fmt, ...) \
@@ -1261,6 +1270,7 @@ struct bnx2x_slowpath {
1261 union { 1270 union {
1262 struct client_init_ramrod_data init_data; 1271 struct client_init_ramrod_data init_data;
1263 struct client_update_ramrod_data update_data; 1272 struct client_update_ramrod_data update_data;
1273 struct tpa_update_ramrod_data tpa_data;
1264 } q_rdata; 1274 } q_rdata;
1265 1275
1266 union { 1276 union {
@@ -1392,7 +1402,7 @@ struct bnx2x_fw_stats_data {
1392}; 1402};
1393 1403
1394/* Public slow path states */ 1404/* Public slow path states */
1395enum { 1405enum sp_rtnl_flag {
1396 BNX2X_SP_RTNL_SETUP_TC, 1406 BNX2X_SP_RTNL_SETUP_TC,
1397 BNX2X_SP_RTNL_TX_TIMEOUT, 1407 BNX2X_SP_RTNL_TX_TIMEOUT,
1398 BNX2X_SP_RTNL_FAN_FAILURE, 1408 BNX2X_SP_RTNL_FAN_FAILURE,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index dbcff509dc3f..acd494647f25 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -61,10 +61,14 @@ static void bnx2x_add_all_napi(struct bnx2x *bp)
61 61
62static int bnx2x_calc_num_queues(struct bnx2x *bp) 62static int bnx2x_calc_num_queues(struct bnx2x *bp)
63{ 63{
64 return bnx2x_num_queues ? 64 int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
65 min_t(int, bnx2x_num_queues, BNX2X_MAX_QUEUES(bp)) : 65
66 min_t(int, netif_get_num_default_rss_queues(), 66 /* Reduce memory usage in kdump environment by using only one queue */
67 BNX2X_MAX_QUEUES(bp)); 67 if (reset_devices)
68 nq = 1;
69
70 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
71 return nq;
68} 72}
69 73
70/** 74/**
@@ -868,6 +872,8 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
868 if (unlikely(bp->panic)) 872 if (unlikely(bp->panic))
869 return 0; 873 return 0;
870#endif 874#endif
875 if (budget <= 0)
876 return rx_pkt;
871 877
872 bd_cons = fp->rx_bd_cons; 878 bd_cons = fp->rx_bd_cons;
873 bd_prod = fp->rx_bd_prod; 879 bd_prod = fp->rx_bd_prod;
@@ -1638,36 +1644,16 @@ int bnx2x_enable_msix(struct bnx2x *bp)
1638 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n", 1644 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1639 msix_vec); 1645 msix_vec);
1640 1646
1641 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec); 1647 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1642 1648 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
1643 /* 1649 /*
1644 * reconfigure number of tx/rx queues according to available 1650 * reconfigure number of tx/rx queues according to available
1645 * MSI-X vectors 1651 * MSI-X vectors
1646 */ 1652 */
1647 if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) { 1653 if (rc == -ENOSPC) {
1648 /* how less vectors we will have? */
1649 int diff = msix_vec - rc;
1650
1651 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1652
1653 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1654
1655 if (rc) {
1656 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1657 goto no_msix;
1658 }
1659 /*
1660 * decrease number of queues by number of unallocated entries
1661 */
1662 bp->num_ethernet_queues -= diff;
1663 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1664
1665 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1666 bp->num_queues);
1667 } else if (rc > 0) {
1668 /* Get by with single vector */ 1654 /* Get by with single vector */
1669 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1); 1655 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1670 if (rc) { 1656 if (rc < 0) {
1671 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n", 1657 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1672 rc); 1658 rc);
1673 goto no_msix; 1659 goto no_msix;
@@ -1680,8 +1666,22 @@ int bnx2x_enable_msix(struct bnx2x *bp)
1680 bp->num_ethernet_queues = 1; 1666 bp->num_ethernet_queues = 1;
1681 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues; 1667 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1682 } else if (rc < 0) { 1668 } else if (rc < 0) {
1683 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc); 1669 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1684 goto no_msix; 1670 goto no_msix;
1671 } else if (rc < msix_vec) {
1672 /* how less vectors we will have? */
1673 int diff = msix_vec - rc;
1674
1675 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1676
1677 /*
1678 * decrease number of queues by number of unallocated entries
1679 */
1680 bp->num_ethernet_queues -= diff;
1681 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1682
1683 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1684 bp->num_queues);
1685 } 1685 }
1686 1686
1687 bp->flags |= USING_MSIX_FLAG; 1687 bp->flags |= USING_MSIX_FLAG;
@@ -2234,8 +2234,10 @@ static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2234 sizeof(struct per_queue_stats) * num_queue_stats + 2234 sizeof(struct per_queue_stats) * num_queue_stats +
2235 sizeof(struct stats_counter); 2235 sizeof(struct stats_counter);
2236 2236
2237 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping, 2237 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2238 bp->fw_stats_data_sz + bp->fw_stats_req_sz); 2238 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2239 if (!bp->fw_stats)
2240 goto alloc_mem_err;
2239 2241
2240 /* Set shortcuts */ 2242 /* Set shortcuts */
2241 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats; 2243 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
@@ -4370,14 +4372,17 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4370 4372
4371 if (!IS_FCOE_IDX(index)) { 4373 if (!IS_FCOE_IDX(index)) {
4372 /* status blocks */ 4374 /* status blocks */
4373 if (!CHIP_IS_E1x(bp)) 4375 if (!CHIP_IS_E1x(bp)) {
4374 BNX2X_PCI_ALLOC(sb->e2_sb, 4376 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4375 &bnx2x_fp(bp, index, status_blk_mapping), 4377 sizeof(struct host_hc_status_block_e2));
4376 sizeof(struct host_hc_status_block_e2)); 4378 if (!sb->e2_sb)
4377 else 4379 goto alloc_mem_err;
4378 BNX2X_PCI_ALLOC(sb->e1x_sb, 4380 } else {
4379 &bnx2x_fp(bp, index, status_blk_mapping), 4381 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4380 sizeof(struct host_hc_status_block_e1x)); 4382 sizeof(struct host_hc_status_block_e1x));
4383 if (!sb->e1x_sb)
4384 goto alloc_mem_err;
4385 }
4381 } 4386 }
4382 4387
4383 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to 4388 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
@@ -4396,35 +4401,49 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4396 "allocating tx memory of fp %d cos %d\n", 4401 "allocating tx memory of fp %d cos %d\n",
4397 index, cos); 4402 index, cos);
4398 4403
4399 BNX2X_ALLOC(txdata->tx_buf_ring, 4404 txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4400 sizeof(struct sw_tx_bd) * NUM_TX_BD); 4405 sizeof(struct sw_tx_bd),
4401 BNX2X_PCI_ALLOC(txdata->tx_desc_ring, 4406 GFP_KERNEL);
4402 &txdata->tx_desc_mapping, 4407 if (!txdata->tx_buf_ring)
4403 sizeof(union eth_tx_bd_types) * NUM_TX_BD); 4408 goto alloc_mem_err;
4409 txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4410 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4411 if (!txdata->tx_desc_ring)
4412 goto alloc_mem_err;
4404 } 4413 }
4405 } 4414 }
4406 4415
4407 /* Rx */ 4416 /* Rx */
4408 if (!skip_rx_queue(bp, index)) { 4417 if (!skip_rx_queue(bp, index)) {
4409 /* fastpath rx rings: rx_buf rx_desc rx_comp */ 4418 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4410 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring), 4419 bnx2x_fp(bp, index, rx_buf_ring) =
4411 sizeof(struct sw_rx_bd) * NUM_RX_BD); 4420 kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4412 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring), 4421 if (!bnx2x_fp(bp, index, rx_buf_ring))
4413 &bnx2x_fp(bp, index, rx_desc_mapping), 4422 goto alloc_mem_err;
4414 sizeof(struct eth_rx_bd) * NUM_RX_BD); 4423 bnx2x_fp(bp, index, rx_desc_ring) =
4424 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4425 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4426 if (!bnx2x_fp(bp, index, rx_desc_ring))
4427 goto alloc_mem_err;
4415 4428
4416 /* Seed all CQEs by 1s */ 4429 /* Seed all CQEs by 1s */
4417 BNX2X_PCI_FALLOC(bnx2x_fp(bp, index, rx_comp_ring), 4430 bnx2x_fp(bp, index, rx_comp_ring) =
4418 &bnx2x_fp(bp, index, rx_comp_mapping), 4431 BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4419 sizeof(struct eth_fast_path_rx_cqe) * 4432 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4420 NUM_RCQ_BD); 4433 if (!bnx2x_fp(bp, index, rx_comp_ring))
4434 goto alloc_mem_err;
4421 4435
4422 /* SGE ring */ 4436 /* SGE ring */
4423 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring), 4437 bnx2x_fp(bp, index, rx_page_ring) =
4424 sizeof(struct sw_rx_page) * NUM_RX_SGE); 4438 kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4425 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring), 4439 GFP_KERNEL);
4426 &bnx2x_fp(bp, index, rx_sge_mapping), 4440 if (!bnx2x_fp(bp, index, rx_page_ring))
4427 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); 4441 goto alloc_mem_err;
4442 bnx2x_fp(bp, index, rx_sge_ring) =
4443 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4444 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4445 if (!bnx2x_fp(bp, index, rx_sge_ring))
4446 goto alloc_mem_err;
4428 /* RX BD ring */ 4447 /* RX BD ring */
4429 bnx2x_set_next_page_rx_bd(fp); 4448 bnx2x_set_next_page_rx_bd(fp);
4430 4449
@@ -4780,12 +4799,8 @@ void bnx2x_tx_timeout(struct net_device *dev)
4780 bnx2x_panic(); 4799 bnx2x_panic();
4781#endif 4800#endif
4782 4801
4783 smp_mb__before_clear_bit();
4784 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
4785 smp_mb__after_clear_bit();
4786
4787 /* This allows the netif to be shutdown gracefully before resetting */ 4802 /* This allows the netif to be shutdown gracefully before resetting */
4788 schedule_delayed_work(&bp->sp_rtnl_task, 0); 4803 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
4789} 4804}
4790 4805
4791int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state) 4806int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
@@ -4913,3 +4928,15 @@ void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4913 disable = disable ? 1 : (usec ? 0 : 1); 4928 disable = disable ? 1 : (usec ? 0 : 1);
4914 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable); 4929 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4915} 4930}
4931
4932void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
4933 u32 verbose)
4934{
4935 smp_mb__before_clear_bit();
4936 set_bit(flag, &bp->sp_rtnl_state);
4937 smp_mb__after_clear_bit();
4938 DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
4939 flag);
4940 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4941}
4942EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index a89a40f88c25..05f4f5f52635 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -47,31 +47,26 @@ extern int bnx2x_num_queues;
47 } \ 47 } \
48 } while (0) 48 } while (0)
49 49
50#define BNX2X_PCI_ALLOC(x, y, size) \ 50#define BNX2X_PCI_ALLOC(y, size) \
51 do { \ 51({ \
52 x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ 52 void *x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
53 if (x == NULL) \ 53 if (x) \
54 goto alloc_mem_err; \ 54 DP(NETIF_MSG_HW, \
55 DP(NETIF_MSG_HW, "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \ 55 "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \
56 (unsigned long long)(*y), x); \ 56 (unsigned long long)(*y), x); \
57 } while (0) 57 x; \
58 58})
59#define BNX2X_PCI_FALLOC(x, y, size) \ 59#define BNX2X_PCI_FALLOC(y, size) \
60 do { \ 60({ \
61 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ 61 void *x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
62 if (x == NULL) \ 62 if (x) { \
63 goto alloc_mem_err; \ 63 memset(x, 0xff, size); \
64 memset((void *)x, 0xFFFFFFFF, size); \ 64 DP(NETIF_MSG_HW, \
65 DP(NETIF_MSG_HW, "BNX2X_PCI_FALLOC: Physical %Lx Virtual %p\n",\ 65 "BNX2X_PCI_FALLOC: Physical %Lx Virtual %p\n", \
66 (unsigned long long)(*y), x); \ 66 (unsigned long long)(*y), x); \
67 } while (0) 67 } \
68 68 x; \
69#define BNX2X_ALLOC(x, size) \ 69})
70 do { \
71 x = kzalloc(size, GFP_KERNEL); \
72 if (x == NULL) \
73 goto alloc_mem_err; \
74 } while (0)
75 70
76/*********************** Interfaces **************************** 71/*********************** Interfaces ****************************
77 * Functions that need to be implemented by each driver version 72 * Functions that need to be implemented by each driver version
@@ -1324,4 +1319,7 @@ void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len);
1324int bnx2x_drain_tx_queues(struct bnx2x *bp); 1319int bnx2x_drain_tx_queues(struct bnx2x *bp);
1325void bnx2x_squeeze_objects(struct bnx2x *bp); 1320void bnx2x_squeeze_objects(struct bnx2x *bp);
1326 1321
1322void bnx2x_schedule_sp_rtnl(struct bnx2x*, enum sp_rtnl_flag,
1323 u32 verbose);
1324
1327#endif /* BNX2X_CMN_H */ 1325#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index fdace204b054..97ea5421dd96 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -710,8 +710,7 @@ static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp)
710 * as we are handling an attention on a work queue which must be 710 * as we are handling an attention on a work queue which must be
711 * flushed at some rtnl-locked contexts (e.g. if down) 711 * flushed at some rtnl-locked contexts (e.g. if down)
712 */ 712 */
713 if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) 713 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_SETUP_TC, 0);
714 schedule_delayed_work(&bp->sp_rtnl_task, 0);
715} 714}
716 715
717void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) 716void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
@@ -764,10 +763,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
764 if (IS_MF(bp)) 763 if (IS_MF(bp))
765 bnx2x_link_sync_notify(bp); 764 bnx2x_link_sync_notify(bp);
766 765
767 set_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state); 766 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_STOP, 0);
768
769 schedule_delayed_work(&bp->sp_rtnl_task, 0);
770
771 return; 767 return;
772 } 768 }
773 case BNX2X_DCBX_STATE_TX_PAUSED: 769 case BNX2X_DCBX_STATE_TX_PAUSED:
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 84aecdf06f7a..95dc36543548 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -87,7 +87,6 @@
87 (IRO[156].base + ((vfId) * IRO[156].m1)) 87 (IRO[156].base + ((vfId) * IRO[156].m1))
88#define CSTORM_VF_TO_PF_OFFSET(funcId) \ 88#define CSTORM_VF_TO_PF_OFFSET(funcId) \
89 (IRO[150].base + ((funcId) * IRO[150].m1)) 89 (IRO[150].base + ((funcId) * IRO[150].m1))
90#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[204].base)
91#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \ 90#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \
92 (IRO[203].base + ((pfId) * IRO[203].m1)) 91 (IRO[203].base + ((pfId) * IRO[203].m1))
93#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base) 92#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index cf1df8b62e2c..46e2f18df2cb 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -2848,7 +2848,7 @@ struct afex_stats {
2848 2848
2849#define BCM_5710_FW_MAJOR_VERSION 7 2849#define BCM_5710_FW_MAJOR_VERSION 7
2850#define BCM_5710_FW_MINOR_VERSION 8 2850#define BCM_5710_FW_MINOR_VERSION 8
2851#define BCM_5710_FW_REVISION_VERSION 17 2851#define BCM_5710_FW_REVISION_VERSION 19
2852#define BCM_5710_FW_ENGINEERING_VERSION 0 2852#define BCM_5710_FW_ENGINEERING_VERSION 0
2853#define BCM_5710_FW_COMPILE_FLAGS 1 2853#define BCM_5710_FW_COMPILE_FLAGS 1
2854 2854
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 7d4382286457..5e74599b05c7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -918,7 +918,7 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
918 u16 start = 0, end = 0; 918 u16 start = 0, end = 0;
919 u8 cos; 919 u8 cos;
920#endif 920#endif
921 if (disable_int) 921 if (IS_PF(bp) && disable_int)
922 bnx2x_int_disable(bp); 922 bnx2x_int_disable(bp);
923 923
924 bp->stats_state = STATS_STATE_DISABLED; 924 bp->stats_state = STATS_STATE_DISABLED;
@@ -929,33 +929,41 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
929 929
930 /* Indices */ 930 /* Indices */
931 /* Common */ 931 /* Common */
932 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n", 932 if (IS_PF(bp)) {
933 bp->def_idx, bp->def_att_idx, bp->attn_state, 933 struct host_sp_status_block *def_sb = bp->def_status_blk;
934 bp->spq_prod_idx, bp->stats_counter); 934 int data_size, cstorm_offset;
935 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n", 935
936 bp->def_status_blk->atten_status_block.attn_bits, 936 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
937 bp->def_status_blk->atten_status_block.attn_bits_ack, 937 bp->def_idx, bp->def_att_idx, bp->attn_state,
938 bp->def_status_blk->atten_status_block.status_block_id, 938 bp->spq_prod_idx, bp->stats_counter);
939 bp->def_status_blk->atten_status_block.attn_bits_index); 939 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
940 BNX2X_ERR(" def ("); 940 def_sb->atten_status_block.attn_bits,
941 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++) 941 def_sb->atten_status_block.attn_bits_ack,
942 pr_cont("0x%x%s", 942 def_sb->atten_status_block.status_block_id,
943 bp->def_status_blk->sp_sb.index_values[i], 943 def_sb->atten_status_block.attn_bits_index);
944 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " "); 944 BNX2X_ERR(" def (");
945 945 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
946 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++) 946 pr_cont("0x%x%s",
947 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM + 947 def_sb->sp_sb.index_values[i],
948 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) + 948 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
949 i*sizeof(u32)); 949
950 950 data_size = sizeof(struct hc_sp_status_block_data) /
951 pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n", 951 sizeof(u32);
952 sp_sb_data.igu_sb_id, 952 cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func);
953 sp_sb_data.igu_seg_id, 953 for (i = 0; i < data_size; i++)
954 sp_sb_data.p_func.pf_id, 954 *((u32 *)&sp_sb_data + i) =
955 sp_sb_data.p_func.vnic_id, 955 REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset +
956 sp_sb_data.p_func.vf_id, 956 i * sizeof(u32));
957 sp_sb_data.p_func.vf_valid, 957
958 sp_sb_data.state); 958 pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
959 sp_sb_data.igu_sb_id,
960 sp_sb_data.igu_seg_id,
961 sp_sb_data.p_func.pf_id,
962 sp_sb_data.p_func.vnic_id,
963 sp_sb_data.p_func.vf_id,
964 sp_sb_data.p_func.vf_valid,
965 sp_sb_data.state);
966 }
959 967
960 for_each_eth_queue(bp, i) { 968 for_each_eth_queue(bp, i) {
961 struct bnx2x_fastpath *fp = &bp->fp[i]; 969 struct bnx2x_fastpath *fp = &bp->fp[i];
@@ -1013,6 +1021,11 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
1013 pr_cont("0x%x%s", 1021 pr_cont("0x%x%s",
1014 fp->sb_index_values[j], 1022 fp->sb_index_values[j],
1015 (j == loop - 1) ? ")" : " "); 1023 (j == loop - 1) ? ")" : " ");
1024
1025 /* VF cannot access FW refelection for status block */
1026 if (IS_VF(bp))
1027 continue;
1028
1016 /* fw sb data */ 1029 /* fw sb data */
1017 data_size = CHIP_IS_E1x(bp) ? 1030 data_size = CHIP_IS_E1x(bp) ?
1018 sizeof(struct hc_status_block_data_e1x) : 1031 sizeof(struct hc_status_block_data_e1x) :
@@ -1064,16 +1077,18 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
1064 } 1077 }
1065 1078
1066#ifdef BNX2X_STOP_ON_ERROR 1079#ifdef BNX2X_STOP_ON_ERROR
1067 1080 if (IS_PF(bp)) {
1068 /* event queue */ 1081 /* event queue */
1069 BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod); 1082 BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
1070 for (i = 0; i < NUM_EQ_DESC; i++) { 1083 for (i = 0; i < NUM_EQ_DESC; i++) {
1071 u32 *data = (u32 *)&bp->eq_ring[i].message.data; 1084 u32 *data = (u32 *)&bp->eq_ring[i].message.data;
1072 1085
1073 BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n", 1086 BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
1074 i, bp->eq_ring[i].message.opcode, 1087 i, bp->eq_ring[i].message.opcode,
1075 bp->eq_ring[i].message.error); 1088 bp->eq_ring[i].message.error);
1076 BNX2X_ERR("data: %x %x %x\n", data[0], data[1], data[2]); 1089 BNX2X_ERR("data: %x %x %x\n",
1090 data[0], data[1], data[2]);
1091 }
1077 } 1092 }
1078 1093
1079 /* Rings */ 1094 /* Rings */
@@ -1140,8 +1155,10 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
1140 } 1155 }
1141 } 1156 }
1142#endif 1157#endif
1143 bnx2x_fw_dump(bp); 1158 if (IS_PF(bp)) {
1144 bnx2x_mc_assert(bp); 1159 bnx2x_fw_dump(bp);
1160 bnx2x_mc_assert(bp);
1161 }
1145 BNX2X_ERR("end crash dump -----------------\n"); 1162 BNX2X_ERR("end crash dump -----------------\n");
1146} 1163}
1147 1164
@@ -1814,6 +1831,11 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1814 drv_cmd = BNX2X_Q_CMD_EMPTY; 1831 drv_cmd = BNX2X_Q_CMD_EMPTY;
1815 break; 1832 break;
1816 1833
1834 case (RAMROD_CMD_ID_ETH_TPA_UPDATE):
1835 DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid);
1836 drv_cmd = BNX2X_Q_CMD_UPDATE_TPA;
1837 break;
1838
1817 default: 1839 default:
1818 BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n", 1840 BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
1819 command, fp->index); 1841 command, fp->index);
@@ -3644,10 +3666,18 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3644 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) | 3666 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
3645 HW_CID(bp, cid)); 3667 HW_CID(bp, cid));
3646 3668
3647 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; 3669 /* In some cases, type may already contain the func-id
3648 3670 * mainly in SRIOV related use cases, so we add it here only
3649 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) & 3671 * if it's not already set.
3650 SPE_HDR_FUNCTION_ID); 3672 */
3673 if (!(cmd_type & SPE_HDR_FUNCTION_ID)) {
3674 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) &
3675 SPE_HDR_CONN_TYPE;
3676 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
3677 SPE_HDR_FUNCTION_ID);
3678 } else {
3679 type = cmd_type;
3680 }
3651 3681
3652 spe->hdr.type = cpu_to_le16(type); 3682 spe->hdr.type = cpu_to_le16(type);
3653 3683
@@ -3878,10 +3908,7 @@ static void bnx2x_fan_failure(struct bnx2x *bp)
3878 * This is due to some boards consuming sufficient power when driver is 3908 * This is due to some boards consuming sufficient power when driver is
3879 * up to overheat if fan fails. 3909 * up to overheat if fan fails.
3880 */ 3910 */
3881 smp_mb__before_clear_bit(); 3911 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0);
3882 set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state);
3883 smp_mb__after_clear_bit();
3884 schedule_delayed_work(&bp->sp_rtnl_task, 0);
3885} 3912}
3886 3913
3887static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) 3914static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
@@ -5221,9 +5248,9 @@ static void bnx2x_eq_int(struct bnx2x *bp)
5221 continue; 5248 continue;
5222 5249
5223 case EVENT_RING_OPCODE_STAT_QUERY: 5250 case EVENT_RING_OPCODE_STAT_QUERY:
5224 DP(BNX2X_MSG_SP | BNX2X_MSG_STATS, 5251 DP_AND((BNX2X_MSG_SP | BNX2X_MSG_STATS),
5225 "got statistics comp event %d\n", 5252 "got statistics comp event %d\n",
5226 bp->stats_comp++); 5253 bp->stats_comp++);
5227 /* nothing to do with stats comp */ 5254 /* nothing to do with stats comp */
5228 goto next_spqe; 5255 goto next_spqe;
5229 5256
@@ -5273,6 +5300,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
5273 break; 5300 break;
5274 5301
5275 } else { 5302 } else {
5303 int cmd = BNX2X_SP_RTNL_AFEX_F_UPDATE;
5304
5276 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP, 5305 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
5277 "AFEX: ramrod completed FUNCTION_UPDATE\n"); 5306 "AFEX: ramrod completed FUNCTION_UPDATE\n");
5278 f_obj->complete_cmd(bp, f_obj, 5307 f_obj->complete_cmd(bp, f_obj,
@@ -5282,12 +5311,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
5282 * sp_rtnl task as all Queue SP operations 5311 * sp_rtnl task as all Queue SP operations
5283 * should run under rtnl_lock. 5312 * should run under rtnl_lock.
5284 */ 5313 */
5285 smp_mb__before_clear_bit(); 5314 bnx2x_schedule_sp_rtnl(bp, cmd, 0);
5286 set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE,
5287 &bp->sp_rtnl_state);
5288 smp_mb__after_clear_bit();
5289
5290 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5291 } 5315 }
5292 5316
5293 goto next_spqe; 5317 goto next_spqe;
@@ -6005,18 +6029,6 @@ static void bnx2x_init_internal_common(struct bnx2x *bp)
6005{ 6029{
6006 int i; 6030 int i;
6007 6031
6008 if (IS_MF_SI(bp))
6009 /*
6010 * In switch independent mode, the TSTORM needs to accept
6011 * packets that failed classification, since approximate match
6012 * mac addresses aren't written to NIG LLH
6013 */
6014 REG_WR8(bp, BAR_TSTRORM_INTMEM +
6015 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
6016 else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */
6017 REG_WR8(bp, BAR_TSTRORM_INTMEM +
6018 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0);
6019
6020 /* Zero this manually as its initialization is 6032 /* Zero this manually as its initialization is
6021 currently missing in the initTool */ 6033 currently missing in the initTool */
6022 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) 6034 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
@@ -7989,19 +8001,25 @@ void bnx2x_free_mem(struct bnx2x *bp)
7989 8001
7990int bnx2x_alloc_mem_cnic(struct bnx2x *bp) 8002int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
7991{ 8003{
7992 if (!CHIP_IS_E1x(bp)) 8004 if (!CHIP_IS_E1x(bp)) {
7993 /* size = the status block + ramrod buffers */ 8005 /* size = the status block + ramrod buffers */
7994 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping, 8006 bp->cnic_sb.e2_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
7995 sizeof(struct host_hc_status_block_e2)); 8007 sizeof(struct host_hc_status_block_e2));
7996 else 8008 if (!bp->cnic_sb.e2_sb)
7997 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, 8009 goto alloc_mem_err;
7998 &bp->cnic_sb_mapping, 8010 } else {
7999 sizeof(struct 8011 bp->cnic_sb.e1x_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
8000 host_hc_status_block_e1x)); 8012 sizeof(struct host_hc_status_block_e1x));
8013 if (!bp->cnic_sb.e1x_sb)
8014 goto alloc_mem_err;
8015 }
8001 8016
8002 if (CONFIGURE_NIC_MODE(bp) && !bp->t2) 8017 if (CONFIGURE_NIC_MODE(bp) && !bp->t2) {
8003 /* allocate searcher T2 table, as it wasn't allocated before */ 8018 /* allocate searcher T2 table, as it wasn't allocated before */
8004 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ); 8019 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
8020 if (!bp->t2)
8021 goto alloc_mem_err;
8022 }
8005 8023
8006 /* write address to which L5 should insert its values */ 8024 /* write address to which L5 should insert its values */
8007 bp->cnic_eth_dev.addr_drv_info_to_mcp = 8025 bp->cnic_eth_dev.addr_drv_info_to_mcp =
@@ -8022,15 +8040,22 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
8022{ 8040{
8023 int i, allocated, context_size; 8041 int i, allocated, context_size;
8024 8042
8025 if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) 8043 if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) {
8026 /* allocate searcher T2 table */ 8044 /* allocate searcher T2 table */
8027 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ); 8045 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
8046 if (!bp->t2)
8047 goto alloc_mem_err;
8048 }
8028 8049
8029 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping, 8050 bp->def_status_blk = BNX2X_PCI_ALLOC(&bp->def_status_blk_mapping,
8030 sizeof(struct host_sp_status_block)); 8051 sizeof(struct host_sp_status_block));
8052 if (!bp->def_status_blk)
8053 goto alloc_mem_err;
8031 8054
8032 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping, 8055 bp->slowpath = BNX2X_PCI_ALLOC(&bp->slowpath_mapping,
8033 sizeof(struct bnx2x_slowpath)); 8056 sizeof(struct bnx2x_slowpath));
8057 if (!bp->slowpath)
8058 goto alloc_mem_err;
8034 8059
8035 /* Allocate memory for CDU context: 8060 /* Allocate memory for CDU context:
8036 * This memory is allocated separately and not in the generic ILT 8061 * This memory is allocated separately and not in the generic ILT
@@ -8050,12 +8075,16 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
8050 for (i = 0, allocated = 0; allocated < context_size; i++) { 8075 for (i = 0, allocated = 0; allocated < context_size; i++) {
8051 bp->context[i].size = min(CDU_ILT_PAGE_SZ, 8076 bp->context[i].size = min(CDU_ILT_PAGE_SZ,
8052 (context_size - allocated)); 8077 (context_size - allocated));
8053 BNX2X_PCI_ALLOC(bp->context[i].vcxt, 8078 bp->context[i].vcxt = BNX2X_PCI_ALLOC(&bp->context[i].cxt_mapping,
8054 &bp->context[i].cxt_mapping, 8079 bp->context[i].size);
8055 bp->context[i].size); 8080 if (!bp->context[i].vcxt)
8081 goto alloc_mem_err;
8056 allocated += bp->context[i].size; 8082 allocated += bp->context[i].size;
8057 } 8083 }
8058 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES); 8084 bp->ilt->lines = kcalloc(ILT_MAX_LINES, sizeof(struct ilt_line),
8085 GFP_KERNEL);
8086 if (!bp->ilt->lines)
8087 goto alloc_mem_err;
8059 8088
8060 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC)) 8089 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
8061 goto alloc_mem_err; 8090 goto alloc_mem_err;
@@ -8064,11 +8093,15 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
8064 goto alloc_mem_err; 8093 goto alloc_mem_err;
8065 8094
8066 /* Slow path ring */ 8095 /* Slow path ring */
8067 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE); 8096 bp->spq = BNX2X_PCI_ALLOC(&bp->spq_mapping, BCM_PAGE_SIZE);
8097 if (!bp->spq)
8098 goto alloc_mem_err;
8068 8099
8069 /* EQ */ 8100 /* EQ */
8070 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping, 8101 bp->eq_ring = BNX2X_PCI_ALLOC(&bp->eq_mapping,
8071 BCM_PAGE_SIZE * NUM_EQ_PAGES); 8102 BCM_PAGE_SIZE * NUM_EQ_PAGES);
8103 if (!bp->eq_ring)
8104 goto alloc_mem_err;
8072 8105
8073 return 0; 8106 return 0;
8074 8107
@@ -11771,6 +11804,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
11771 11804
11772 bp->disable_tpa = disable_tpa; 11805 bp->disable_tpa = disable_tpa;
11773 bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp); 11806 bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp);
11807 /* Reduce memory usage in kdump environment by disabling TPA */
11808 bp->disable_tpa |= reset_devices;
11774 11809
11775 /* Set TPA flags */ 11810 /* Set TPA flags */
11776 if (bp->disable_tpa) { 11811 if (bp->disable_tpa) {
@@ -11942,7 +11977,7 @@ static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
11942{ 11977{
11943 int mc_count = netdev_mc_count(bp->dev); 11978 int mc_count = netdev_mc_count(bp->dev);
11944 struct bnx2x_mcast_list_elem *mc_mac = 11979 struct bnx2x_mcast_list_elem *mc_mac =
11945 kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC); 11980 kcalloc(mc_count, sizeof(*mc_mac), GFP_ATOMIC);
11946 struct netdev_hw_addr *ha; 11981 struct netdev_hw_addr *ha;
11947 11982
11948 if (!mc_mac) 11983 if (!mc_mac)
@@ -12064,11 +12099,8 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
12064 return; 12099 return;
12065 } else { 12100 } else {
12066 /* Schedule an SP task to handle rest of change */ 12101 /* Schedule an SP task to handle rest of change */
12067 DP(NETIF_MSG_IFUP, "Scheduling an Rx mode change\n"); 12102 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_RX_MODE,
12068 smp_mb__before_clear_bit(); 12103 NETIF_MSG_IFUP);
12069 set_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state);
12070 smp_mb__after_clear_bit();
12071 schedule_delayed_work(&bp->sp_rtnl_task, 0);
12072 } 12104 }
12073} 12105}
12074 12106
@@ -12101,11 +12133,8 @@ void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
12101 /* configuring mcast to a vf involves sleeping (when we 12133 /* configuring mcast to a vf involves sleeping (when we
12102 * wait for the pf's response). 12134 * wait for the pf's response).
12103 */ 12135 */
12104 smp_mb__before_clear_bit(); 12136 bnx2x_schedule_sp_rtnl(bp,
12105 set_bit(BNX2X_SP_RTNL_VFPF_MCAST, 12137 BNX2X_SP_RTNL_VFPF_MCAST, 0);
12106 &bp->sp_rtnl_state);
12107 smp_mb__after_clear_bit();
12108 schedule_delayed_work(&bp->sp_rtnl_task, 0);
12109 } 12138 }
12110 } 12139 }
12111 12140
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 0fb6ff2ac8e3..31297266b743 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -2277,11 +2277,11 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2277 data->header.rule_cnt, p->rx_accept_flags, 2277 data->header.rule_cnt, p->rx_accept_flags,
2278 p->tx_accept_flags); 2278 p->tx_accept_flags);
2279 2279
2280 /* No need for an explicit memory barrier here as long we would 2280 /* No need for an explicit memory barrier here as long as we
2281 * need to ensure the ordering of writing to the SPQ element 2281 * ensure the ordering of writing to the SPQ element
2282 * and updating of the SPQ producer which involves a memory 2282 * and updating of the SPQ producer which involves a memory
2283 * read and we will have to put a full memory barrier there 2283 * read. If the memory read is removed we will have to put a
2284 * (inside bnx2x_sp_post()). 2284 * full memory barrier there (inside bnx2x_sp_post()).
2285 */ 2285 */
2286 2286
2287 /* Send a ramrod */ 2287 /* Send a ramrod */
@@ -2982,11 +2982,11 @@ static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
2982 raw->clear_pending(raw); 2982 raw->clear_pending(raw);
2983 return 0; 2983 return 0;
2984 } else { 2984 } else {
2985 /* No need for an explicit memory barrier here as long we would 2985 /* No need for an explicit memory barrier here as long as we
2986 * need to ensure the ordering of writing to the SPQ element 2986 * ensure the ordering of writing to the SPQ element
2987 * and updating of the SPQ producer which involves a memory 2987 * and updating of the SPQ producer which involves a memory
2988 * read and we will have to put a full memory barrier there 2988 * read. If the memory read is removed we will have to put a
2989 * (inside bnx2x_sp_post()). 2989 * full memory barrier there (inside bnx2x_sp_post()).
2990 */ 2990 */
2991 2991
2992 /* Send a ramrod */ 2992 /* Send a ramrod */
@@ -3466,11 +3466,11 @@ static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3466 raw->clear_pending(raw); 3466 raw->clear_pending(raw);
3467 return 0; 3467 return 0;
3468 } else { 3468 } else {
3469 /* No need for an explicit memory barrier here as long we would 3469 /* No need for an explicit memory barrier here as long as we
3470 * need to ensure the ordering of writing to the SPQ element 3470 * ensure the ordering of writing to the SPQ element
3471 * and updating of the SPQ producer which involves a memory 3471 * and updating of the SPQ producer which involves a memory
3472 * read and we will have to put a full memory barrier there 3472 * read. If the memory read is removed we will have to put a
3473 * (inside bnx2x_sp_post()). 3473 * full memory barrier there (inside bnx2x_sp_post()).
3474 */ 3474 */
3475 3475
3476 /* Send a ramrod */ 3476 /* Send a ramrod */
@@ -4091,11 +4091,11 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
4091 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY; 4091 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4092 } 4092 }
4093 4093
4094 /* No need for an explicit memory barrier here as long we would 4094 /* No need for an explicit memory barrier here as long as we
4095 * need to ensure the ordering of writing to the SPQ element 4095 * ensure the ordering of writing to the SPQ element
4096 * and updating of the SPQ producer which involves a memory 4096 * and updating of the SPQ producer which involves a memory
4097 * read and we will have to put a full memory barrier there 4097 * read. If the memory read is removed we will have to put a
4098 * (inside bnx2x_sp_post()). 4098 * full memory barrier there (inside bnx2x_sp_post()).
4099 */ 4099 */
4100 4100
4101 /* Send a ramrod */ 4101 /* Send a ramrod */
@@ -4158,16 +4158,6 @@ void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4158 rss_obj->config_rss = bnx2x_setup_rss; 4158 rss_obj->config_rss = bnx2x_setup_rss;
4159} 4159}
4160 4160
4161int validate_vlan_mac(struct bnx2x *bp,
4162 struct bnx2x_vlan_mac_obj *vlan_mac)
4163{
4164 if (!vlan_mac->get_n_elements) {
4165 BNX2X_ERR("vlan mac object was not intialized\n");
4166 return -EINVAL;
4167 }
4168 return 0;
4169}
4170
4171/********************** Queue state object ***********************************/ 4161/********************** Queue state object ***********************************/
4172 4162
4173/** 4163/**
@@ -4587,13 +4577,12 @@ static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4587 /* Fill the ramrod data */ 4577 /* Fill the ramrod data */
4588 bnx2x_q_fill_setup_data_cmn(bp, params, rdata); 4578 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4589 4579
4590 /* No need for an explicit memory barrier here as long we would 4580 /* No need for an explicit memory barrier here as long as we
4591 * need to ensure the ordering of writing to the SPQ element 4581 * ensure the ordering of writing to the SPQ element
4592 * and updating of the SPQ producer which involves a memory 4582 * and updating of the SPQ producer which involves a memory
4593 * read and we will have to put a full memory barrier there 4583 * read. If the memory read is removed we will have to put a
4594 * (inside bnx2x_sp_post()). 4584 * full memory barrier there (inside bnx2x_sp_post()).
4595 */ 4585 */
4596
4597 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], 4586 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4598 U64_HI(data_mapping), 4587 U64_HI(data_mapping),
4599 U64_LO(data_mapping), ETH_CONNECTION_TYPE); 4588 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4615,13 +4604,12 @@ static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4615 bnx2x_q_fill_setup_data_cmn(bp, params, rdata); 4604 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4616 bnx2x_q_fill_setup_data_e2(bp, params, rdata); 4605 bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4617 4606
4618 /* No need for an explicit memory barrier here as long we would 4607 /* No need for an explicit memory barrier here as long as we
4619 * need to ensure the ordering of writing to the SPQ element 4608 * ensure the ordering of writing to the SPQ element
4620 * and updating of the SPQ producer which involves a memory 4609 * and updating of the SPQ producer which involves a memory
4621 * read and we will have to put a full memory barrier there 4610 * read. If the memory read is removed we will have to put a
4622 * (inside bnx2x_sp_post()). 4611 * full memory barrier there (inside bnx2x_sp_post()).
4623 */ 4612 */
4624
4625 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX], 4613 return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4626 U64_HI(data_mapping), 4614 U64_HI(data_mapping),
4627 U64_LO(data_mapping), ETH_CONNECTION_TYPE); 4615 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4659,13 +4647,12 @@ static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
4659 o->cids[cid_index], rdata->general.client_id, 4647 o->cids[cid_index], rdata->general.client_id,
4660 rdata->general.sp_client_id, rdata->general.cos); 4648 rdata->general.sp_client_id, rdata->general.cos);
4661 4649
4662 /* No need for an explicit memory barrier here as long we would 4650 /* No need for an explicit memory barrier here as long as we
4663 * need to ensure the ordering of writing to the SPQ element 4651 * ensure the ordering of writing to the SPQ element
4664 * and updating of the SPQ producer which involves a memory 4652 * and updating of the SPQ producer which involves a memory
4665 * read and we will have to put a full memory barrier there 4653 * read. If the memory read is removed we will have to put a
4666 * (inside bnx2x_sp_post()). 4654 * full memory barrier there (inside bnx2x_sp_post()).
4667 */ 4655 */
4668
4669 return bnx2x_sp_post(bp, ramrod, o->cids[cid_index], 4656 return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
4670 U64_HI(data_mapping), 4657 U64_HI(data_mapping),
4671 U64_LO(data_mapping), ETH_CONNECTION_TYPE); 4658 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4760,13 +4747,12 @@ static inline int bnx2x_q_send_update(struct bnx2x *bp,
4760 /* Fill the ramrod data */ 4747 /* Fill the ramrod data */
4761 bnx2x_q_fill_update_data(bp, o, update_params, rdata); 4748 bnx2x_q_fill_update_data(bp, o, update_params, rdata);
4762 4749
4763 /* No need for an explicit memory barrier here as long we would 4750 /* No need for an explicit memory barrier here as long as we
4764 * need to ensure the ordering of writing to the SPQ element 4751 * ensure the ordering of writing to the SPQ element
4765 * and updating of the SPQ producer which involves a memory 4752 * and updating of the SPQ producer which involves a memory
4766 * read and we will have to put a full memory barrier there 4753 * read. If the memory read is removed we will have to put a
4767 * (inside bnx2x_sp_post()). 4754 * full memory barrier there (inside bnx2x_sp_post()).
4768 */ 4755 */
4769
4770 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE, 4756 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4771 o->cids[cid_index], U64_HI(data_mapping), 4757 o->cids[cid_index], U64_HI(data_mapping),
4772 U64_LO(data_mapping), ETH_CONNECTION_TYPE); 4758 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4813,11 +4799,62 @@ static inline int bnx2x_q_send_activate(struct bnx2x *bp,
4813 return bnx2x_q_send_update(bp, params); 4799 return bnx2x_q_send_update(bp, params);
4814} 4800}
4815 4801
4802static void bnx2x_q_fill_update_tpa_data(struct bnx2x *bp,
4803 struct bnx2x_queue_sp_obj *obj,
4804 struct bnx2x_queue_update_tpa_params *params,
4805 struct tpa_update_ramrod_data *data)
4806{
4807 data->client_id = obj->cl_id;
4808 data->complete_on_both_clients = params->complete_on_both_clients;
4809 data->dont_verify_rings_pause_thr_flg =
4810 params->dont_verify_thr;
4811 data->max_agg_size = cpu_to_le16(params->max_agg_sz);
4812 data->max_sges_for_packet = params->max_sges_pkt;
4813 data->max_tpa_queues = params->max_tpa_queues;
4814 data->sge_buff_size = cpu_to_le16(params->sge_buff_sz);
4815 data->sge_page_base_hi = cpu_to_le32(U64_HI(params->sge_map));
4816 data->sge_page_base_lo = cpu_to_le32(U64_LO(params->sge_map));
4817 data->sge_pause_thr_high = cpu_to_le16(params->sge_pause_thr_high);
4818 data->sge_pause_thr_low = cpu_to_le16(params->sge_pause_thr_low);
4819 data->tpa_mode = params->tpa_mode;
4820 data->update_ipv4 = params->update_ipv4;
4821 data->update_ipv6 = params->update_ipv6;
4822}
4823
4816static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp, 4824static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
4817 struct bnx2x_queue_state_params *params) 4825 struct bnx2x_queue_state_params *params)
4818{ 4826{
4819 /* TODO: Not implemented yet. */ 4827 struct bnx2x_queue_sp_obj *o = params->q_obj;
4820 return -1; 4828 struct tpa_update_ramrod_data *rdata =
4829 (struct tpa_update_ramrod_data *)o->rdata;
4830 dma_addr_t data_mapping = o->rdata_mapping;
4831 struct bnx2x_queue_update_tpa_params *update_tpa_params =
4832 &params->params.update_tpa;
4833 u16 type;
4834
4835 /* Clear the ramrod data */
4836 memset(rdata, 0, sizeof(*rdata));
4837
4838 /* Fill the ramrod data */
4839 bnx2x_q_fill_update_tpa_data(bp, o, update_tpa_params, rdata);
4840
4841 /* Add the function id inside the type, so that sp post function
4842 * doesn't automatically add the PF func-id, this is required
4843 * for operations done by PFs on behalf of their VFs
4844 */
4845 type = ETH_CONNECTION_TYPE |
4846 ((o->func_id) << SPE_HDR_FUNCTION_ID_SHIFT);
4847
4848 /* No need for an explicit memory barrier here as long as we
4849 * ensure the ordering of writing to the SPQ element
4850 * and updating of the SPQ producer which involves a memory
4851 * read. If the memory read is removed we will have to put a
4852 * full memory barrier there (inside bnx2x_sp_post()).
4853 */
4854 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TPA_UPDATE,
4855 o->cids[BNX2X_PRIMARY_CID_INDEX],
4856 U64_HI(data_mapping),
4857 U64_LO(data_mapping), type);
4821} 4858}
4822 4859
4823static inline int bnx2x_q_send_halt(struct bnx2x *bp, 4860static inline int bnx2x_q_send_halt(struct bnx2x *bp,
@@ -5647,6 +5684,12 @@ static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
5647 rdata->tx_switch_suspend = switch_update_params->suspend; 5684 rdata->tx_switch_suspend = switch_update_params->suspend;
5648 rdata->echo = SWITCH_UPDATE; 5685 rdata->echo = SWITCH_UPDATE;
5649 5686
5687 /* No need for an explicit memory barrier here as long as we
5688 * ensure the ordering of writing to the SPQ element
5689 * and updating of the SPQ producer which involves a memory
5690 * read. If the memory read is removed we will have to put a
5691 * full memory barrier there (inside bnx2x_sp_post()).
5692 */
5650 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0, 5693 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5651 U64_HI(data_mapping), 5694 U64_HI(data_mapping),
5652 U64_LO(data_mapping), NONE_CONNECTION_TYPE); 5695 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
@@ -5674,11 +5717,11 @@ static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
5674 rdata->allowed_priorities = afex_update_params->allowed_priorities; 5717 rdata->allowed_priorities = afex_update_params->allowed_priorities;
5675 rdata->echo = AFEX_UPDATE; 5718 rdata->echo = AFEX_UPDATE;
5676 5719
5677 /* No need for an explicit memory barrier here as long we would 5720 /* No need for an explicit memory barrier here as long as we
5678 * need to ensure the ordering of writing to the SPQ element 5721 * ensure the ordering of writing to the SPQ element
5679 * and updating of the SPQ producer which involves a memory 5722 * and updating of the SPQ producer which involves a memory
5680 * read and we will have to put a full memory barrier there 5723 * read. If the memory read is removed we will have to put a
5681 * (inside bnx2x_sp_post()). 5724 * full memory barrier there (inside bnx2x_sp_post()).
5682 */ 5725 */
5683 DP(BNX2X_MSG_SP, 5726 DP(BNX2X_MSG_SP,
5684 "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n", 5727 "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
@@ -5763,6 +5806,12 @@ static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
5763 rdata->traffic_type_to_priority_cos[i] = 5806 rdata->traffic_type_to_priority_cos[i] =
5764 tx_start_params->traffic_type_to_priority_cos[i]; 5807 tx_start_params->traffic_type_to_priority_cos[i];
5765 5808
5809 /* No need for an explicit memory barrier here as long as we
5810 * ensure the ordering of writing to the SPQ element
5811 * and updating of the SPQ producer which involves a memory
5812 * read. If the memory read is removed we will have to put a
5813 * full memory barrier there (inside bnx2x_sp_post()).
5814 */
5766 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0, 5815 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
5767 U64_HI(data_mapping), 5816 U64_HI(data_mapping),
5768 U64_LO(data_mapping), NONE_CONNECTION_TYPE); 5817 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 00d7f214a40a..80f6c790ed88 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -893,6 +893,24 @@ struct bnx2x_queue_update_params {
893 u8 cid_index; 893 u8 cid_index;
894}; 894};
895 895
896struct bnx2x_queue_update_tpa_params {
897 dma_addr_t sge_map;
898 u8 update_ipv4;
899 u8 update_ipv6;
900 u8 max_tpa_queues;
901 u8 max_sges_pkt;
902 u8 complete_on_both_clients;
903 u8 dont_verify_thr;
904 u8 tpa_mode;
905 u8 _pad;
906
907 u16 sge_buff_sz;
908 u16 max_agg_sz;
909
910 u16 sge_pause_thr_low;
911 u16 sge_pause_thr_high;
912};
913
896struct rxq_pause_params { 914struct rxq_pause_params {
897 u16 bd_th_lo; 915 u16 bd_th_lo;
898 u16 bd_th_hi; 916 u16 bd_th_hi;
@@ -987,6 +1005,7 @@ struct bnx2x_queue_state_params {
987 /* Params according to the current command */ 1005 /* Params according to the current command */
988 union { 1006 union {
989 struct bnx2x_queue_update_params update; 1007 struct bnx2x_queue_update_params update;
1008 struct bnx2x_queue_update_tpa_params update_tpa;
990 struct bnx2x_queue_setup_params setup; 1009 struct bnx2x_queue_setup_params setup;
991 struct bnx2x_queue_init_params init; 1010 struct bnx2x_queue_init_params init;
992 struct bnx2x_queue_setup_tx_only_params tx_only; 1011 struct bnx2x_queue_setup_tx_only_params tx_only;
@@ -1403,6 +1422,4 @@ int bnx2x_config_rss(struct bnx2x *bp,
1403void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj, 1422void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
1404 u8 *ind_table); 1423 u8 *ind_table);
1405 1424
1406int validate_vlan_mac(struct bnx2x *bp,
1407 struct bnx2x_vlan_mac_obj *vlan_mac);
1408#endif /* BNX2X_SP_VERBS */ 1425#endif /* BNX2X_SP_VERBS */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index e42f48df6e94..61e6f606d8a4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -102,6 +102,21 @@ static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
102 mmiowb(); 102 mmiowb();
103 barrier(); 103 barrier();
104} 104}
105
106static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp,
107 struct bnx2x_virtf *vf,
108 bool print_err)
109{
110 if (!bnx2x_leading_vfq(vf, sp_initialized)) {
111 if (print_err)
112 BNX2X_ERR("Slowpath objects not yet initialized!\n");
113 else
114 DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n");
115 return false;
116 }
117 return true;
118}
119
105/* VFOP - VF slow-path operation support */ 120/* VFOP - VF slow-path operation support */
106 121
107#define BNX2X_VFOP_FILTER_ADD_CNT_MAX 0x10000 122#define BNX2X_VFOP_FILTER_ADD_CNT_MAX 0x10000
@@ -176,6 +191,11 @@ enum bnx2x_vfop_rss_state {
176 BNX2X_VFOP_RSS_DONE 191 BNX2X_VFOP_RSS_DONE
177}; 192};
178 193
194enum bnx2x_vfop_tpa_state {
195 BNX2X_VFOP_TPA_CONFIG,
196 BNX2X_VFOP_TPA_DONE
197};
198
179#define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0) 199#define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0)
180 200
181void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf, 201void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
@@ -716,7 +736,6 @@ static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
716 int qid, bool drv_only) 736 int qid, bool drv_only)
717{ 737{
718 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 738 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
719 int rc;
720 739
721 if (vfop) { 740 if (vfop) {
722 struct bnx2x_vfop_args_filters filters = { 741 struct bnx2x_vfop_args_filters filters = {
@@ -736,9 +755,6 @@ static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
736 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); 755 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
737 756
738 /* set object */ 757 /* set object */
739 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj));
740 if (rc)
741 return rc;
742 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 758 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
743 759
744 /* set extra args */ 760 /* set extra args */
@@ -758,9 +774,12 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
758 struct bnx2x_vfop_filters *macs, 774 struct bnx2x_vfop_filters *macs,
759 int qid, bool drv_only) 775 int qid, bool drv_only)
760{ 776{
761 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 777 struct bnx2x_vfop *vfop;
762 int rc; 778
779 if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
780 return -EINVAL;
763 781
782 vfop = bnx2x_vfop_add(bp, vf);
764 if (vfop) { 783 if (vfop) {
765 struct bnx2x_vfop_args_filters filters = { 784 struct bnx2x_vfop_args_filters filters = {
766 .multi_filter = macs, 785 .multi_filter = macs,
@@ -782,9 +801,6 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
782 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags); 801 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
783 802
784 /* set object */ 803 /* set object */
785 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj));
786 if (rc)
787 return rc;
788 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj); 804 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
789 805
790 /* set extra args */ 806 /* set extra args */
@@ -804,9 +820,12 @@ static int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
804 struct bnx2x_vfop_cmd *cmd, 820 struct bnx2x_vfop_cmd *cmd,
805 int qid, u16 vid, bool add) 821 int qid, u16 vid, bool add)
806{ 822{
807 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 823 struct bnx2x_vfop *vfop;
808 int rc;
809 824
825 if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
826 return -EINVAL;
827
828 vfop = bnx2x_vfop_add(bp, vf);
810 if (vfop) { 829 if (vfop) {
811 struct bnx2x_vfop_args_filters filters = { 830 struct bnx2x_vfop_args_filters filters = {
812 .multi_filter = NULL, /* single command */ 831 .multi_filter = NULL, /* single command */
@@ -826,9 +845,6 @@ static int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
826 ramrod->user_req.u.vlan.vlan = vid; 845 ramrod->user_req.u.vlan.vlan = vid;
827 846
828 /* set object */ 847 /* set object */
829 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
830 if (rc)
831 return rc;
832 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 848 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
833 849
834 /* set extra args */ 850 /* set extra args */
@@ -848,7 +864,6 @@ static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
848 int qid, bool drv_only) 864 int qid, bool drv_only)
849{ 865{
850 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 866 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
851 int rc;
852 867
853 if (vfop) { 868 if (vfop) {
854 struct bnx2x_vfop_args_filters filters = { 869 struct bnx2x_vfop_args_filters filters = {
@@ -868,9 +883,6 @@ static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
868 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 883 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
869 884
870 /* set object */ 885 /* set object */
871 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
872 if (rc)
873 return rc;
874 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 886 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
875 887
876 /* set extra args */ 888 /* set extra args */
@@ -890,9 +902,12 @@ int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
890 struct bnx2x_vfop_filters *vlans, 902 struct bnx2x_vfop_filters *vlans,
891 int qid, bool drv_only) 903 int qid, bool drv_only)
892{ 904{
893 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf); 905 struct bnx2x_vfop *vfop;
894 int rc;
895 906
907 if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
908 return -EINVAL;
909
910 vfop = bnx2x_vfop_add(bp, vf);
896 if (vfop) { 911 if (vfop) {
897 struct bnx2x_vfop_args_filters filters = { 912 struct bnx2x_vfop_args_filters filters = {
898 .multi_filter = vlans, 913 .multi_filter = vlans,
@@ -911,9 +926,6 @@ int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
911 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags); 926 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
912 927
913 /* set object */ 928 /* set object */
914 rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
915 if (rc)
916 return rc;
917 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj); 929 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
918 930
919 /* set extra args */ 931 /* set extra args */
@@ -971,11 +983,8 @@ op_err:
971op_done: 983op_done:
972 case BNX2X_VFOP_QSETUP_DONE: 984 case BNX2X_VFOP_QSETUP_DONE:
973 vf->cfg_flags |= VF_CFG_VLAN; 985 vf->cfg_flags |= VF_CFG_VLAN;
974 smp_mb__before_clear_bit(); 986 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
975 set_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN, 987 BNX2X_MSG_IOV);
976 &bp->sp_rtnl_state);
977 smp_mb__after_clear_bit();
978 schedule_delayed_work(&bp->sp_rtnl_task, 0);
979 bnx2x_vfop_end(bp, vf, vfop); 988 bnx2x_vfop_end(bp, vf, vfop);
980 return; 989 return;
981 default: 990 default:
@@ -1025,34 +1034,20 @@ static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf)
1025 /* vlan-clear-all: driver-only, don't consume credit */ 1034 /* vlan-clear-all: driver-only, don't consume credit */
1026 vfop->state = BNX2X_VFOP_QFLR_CLR_MAC; 1035 vfop->state = BNX2X_VFOP_QFLR_CLR_MAC;
1027 1036
1028 if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj))) { 1037 /* the vlan_mac vfop will re-schedule us */
1029 /* the vlan_mac vfop will re-schedule us */ 1038 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true);
1030 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, 1039 if (vfop->rc)
1031 qid, true); 1040 goto op_err;
1032 if (vfop->rc) 1041 return;
1033 goto op_err;
1034 return;
1035
1036 } else {
1037 /* need to reschedule ourselves */
1038 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
1039 }
1040 1042
1041 case BNX2X_VFOP_QFLR_CLR_MAC: 1043 case BNX2X_VFOP_QFLR_CLR_MAC:
1042 /* mac-clear-all: driver only consume credit */ 1044 /* mac-clear-all: driver only consume credit */
1043 vfop->state = BNX2X_VFOP_QFLR_TERMINATE; 1045 vfop->state = BNX2X_VFOP_QFLR_TERMINATE;
1044 if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj))) { 1046 /* the vlan_mac vfop will re-schedule us */
1045 /* the vlan_mac vfop will re-schedule us */ 1047 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true);
1046 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, 1048 if (vfop->rc)
1047 qid, true); 1049 goto op_err;
1048 if (vfop->rc) 1050 return;
1049 goto op_err;
1050 return;
1051
1052 } else {
1053 /* need to reschedule ourselves */
1054 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
1055 }
1056 1051
1057 case BNX2X_VFOP_QFLR_TERMINATE: 1052 case BNX2X_VFOP_QFLR_TERMINATE:
1058 qstate = &vfop->op_p->qctor.qstate; 1053 qstate = &vfop->op_p->qctor.qstate;
@@ -1095,8 +1090,13 @@ static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp,
1095 1090
1096 if (vfop) { 1091 if (vfop) {
1097 vfop->args.qx.qid = qid; 1092 vfop->args.qx.qid = qid;
1098 bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN, 1093 if ((qid == LEADING_IDX) &&
1099 bnx2x_vfop_qflr, cmd->done); 1094 bnx2x_validate_vf_sp_objs(bp, vf, false))
1095 bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN,
1096 bnx2x_vfop_qflr, cmd->done);
1097 else
1098 bnx2x_vfop_opset(BNX2X_VFOP_QFLR_TERMINATE,
1099 bnx2x_vfop_qflr, cmd->done);
1100 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr, 1100 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr,
1101 cmd->block); 1101 cmd->block);
1102 } 1102 }
@@ -1310,7 +1310,10 @@ static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf)
1310 switch (state) { 1310 switch (state) {
1311 case BNX2X_VFOP_QTEARDOWN_RXMODE: 1311 case BNX2X_VFOP_QTEARDOWN_RXMODE:
1312 /* Drop all */ 1312 /* Drop all */
1313 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN; 1313 if (bnx2x_validate_vf_sp_objs(bp, vf, true))
1314 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN;
1315 else
1316 vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
1314 vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0); 1317 vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0);
1315 if (vfop->rc) 1318 if (vfop->rc)
1316 goto op_err; 1319 goto op_err;
@@ -2117,7 +2120,9 @@ int bnx2x_iov_alloc_mem(struct bnx2x *bp)
2117 cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ); 2120 cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
2118 2121
2119 if (cxt->size) { 2122 if (cxt->size) {
2120 BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size); 2123 cxt->addr = BNX2X_PCI_ALLOC(&cxt->mapping, cxt->size);
2124 if (!cxt->addr)
2125 goto alloc_mem_err;
2121 } else { 2126 } else {
2122 cxt->addr = NULL; 2127 cxt->addr = NULL;
2123 cxt->mapping = 0; 2128 cxt->mapping = 0;
@@ -2127,20 +2132,28 @@ int bnx2x_iov_alloc_mem(struct bnx2x *bp)
2127 2132
2128 /* allocate vfs ramrods dma memory - client_init and set_mac */ 2133 /* allocate vfs ramrods dma memory - client_init and set_mac */
2129 tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp); 2134 tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
2130 BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping, 2135 BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping,
2131 tot_size); 2136 tot_size);
2137 if (!BP_VFDB(bp)->sp_dma.addr)
2138 goto alloc_mem_err;
2132 BP_VFDB(bp)->sp_dma.size = tot_size; 2139 BP_VFDB(bp)->sp_dma.size = tot_size;
2133 2140
2134 /* allocate mailboxes */ 2141 /* allocate mailboxes */
2135 tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE; 2142 tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
2136 BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping, 2143 BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping,
2137 tot_size); 2144 tot_size);
2145 if (!BP_VF_MBX_DMA(bp)->addr)
2146 goto alloc_mem_err;
2147
2138 BP_VF_MBX_DMA(bp)->size = tot_size; 2148 BP_VF_MBX_DMA(bp)->size = tot_size;
2139 2149
2140 /* allocate local bulletin boards */ 2150 /* allocate local bulletin boards */
2141 tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE; 2151 tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE;
2142 BNX2X_PCI_ALLOC(BP_VF_BULLETIN_DMA(bp)->addr, 2152 BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping,
2143 &BP_VF_BULLETIN_DMA(bp)->mapping, tot_size); 2153 tot_size);
2154 if (!BP_VF_BULLETIN_DMA(bp)->addr)
2155 goto alloc_mem_err;
2156
2144 BP_VF_BULLETIN_DMA(bp)->size = tot_size; 2157 BP_VF_BULLETIN_DMA(bp)->size = tot_size;
2145 2158
2146 return 0; 2159 return 0;
@@ -2166,6 +2179,9 @@ static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
2166 bnx2x_vf_sp_map(bp, vf, q_data), 2179 bnx2x_vf_sp_map(bp, vf, q_data),
2167 q_type); 2180 q_type);
2168 2181
2182 /* sp indication is set only when vlan/mac/etc. are initialized */
2183 q->sp_initialized = false;
2184
2169 DP(BNX2X_MSG_IOV, 2185 DP(BNX2X_MSG_IOV,
2170 "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n", 2186 "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
2171 vf->abs_vfid, q->sp_obj.func_id, q->cid); 2187 vf->abs_vfid, q->sp_obj.func_id, q->cid);
@@ -2527,10 +2543,10 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
2527 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 2543 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
2528 (is_fcoe ? 0 : 1); 2544 (is_fcoe ? 0 : 1);
2529 2545
2530 DP(BNX2X_MSG_IOV, 2546 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
2531 "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n", 2547 "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
2532 BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index, 2548 BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
2533 first_queue_query_index + num_queues_req); 2549 first_queue_query_index + num_queues_req);
2534 2550
2535 cur_data_offset = bp->fw_stats_data_mapping + 2551 cur_data_offset = bp->fw_stats_data_mapping +
2536 offsetof(struct bnx2x_fw_stats_data, queue_stats) + 2552 offsetof(struct bnx2x_fw_stats_data, queue_stats) +
@@ -2544,9 +2560,9 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
2544 struct bnx2x_virtf *vf = BP_VF(bp, i); 2560 struct bnx2x_virtf *vf = BP_VF(bp, i);
2545 2561
2546 if (vf->state != VF_ENABLED) { 2562 if (vf->state != VF_ENABLED) {
2547 DP(BNX2X_MSG_IOV, 2563 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
2548 "vf %d not enabled so no stats for it\n", 2564 "vf %d not enabled so no stats for it\n",
2549 vf->abs_vfid); 2565 vf->abs_vfid);
2550 continue; 2566 continue;
2551 } 2567 }
2552 2568
@@ -2597,7 +2613,8 @@ void bnx2x_iov_sp_task(struct bnx2x *bp)
2597 /* Iterate over all VFs and invoke state transition for VFs with 2613 /* Iterate over all VFs and invoke state transition for VFs with
2598 * 'in-progress' slow-path operations 2614 * 'in-progress' slow-path operations
2599 */ 2615 */
2600 DP(BNX2X_MSG_IOV, "searching for pending vf operations\n"); 2616 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_SP),
2617 "searching for pending vf operations\n");
2601 for_each_vf(bp, i) { 2618 for_each_vf(bp, i) {
2602 struct bnx2x_virtf *vf = BP_VF(bp, i); 2619 struct bnx2x_virtf *vf = BP_VF(bp, i);
2603 2620
@@ -3046,6 +3063,83 @@ int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
3046 return -ENOMEM; 3063 return -ENOMEM;
3047} 3064}
3048 3065
3066/* VFOP tpa update, send update on all queues */
3067static void bnx2x_vfop_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf)
3068{
3069 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
3070 struct bnx2x_vfop_args_tpa *tpa_args = &vfop->args.tpa;
3071 enum bnx2x_vfop_tpa_state state = vfop->state;
3072
3073 bnx2x_vfop_reset_wq(vf);
3074
3075 if (vfop->rc < 0)
3076 goto op_err;
3077
3078 DP(BNX2X_MSG_IOV, "vf[%d:%d] STATE: %d\n",
3079 vf->abs_vfid, tpa_args->qid,
3080 state);
3081
3082 switch (state) {
3083 case BNX2X_VFOP_TPA_CONFIG:
3084
3085 if (tpa_args->qid < vf_rxq_count(vf)) {
3086 struct bnx2x_queue_state_params *qstate =
3087 &vf->op_params.qstate;
3088
3089 qstate->q_obj = &bnx2x_vfq(vf, tpa_args->qid, sp_obj);
3090
3091 /* The only thing that changes for the ramrod params
3092 * between calls is the sge_map
3093 */
3094 qstate->params.update_tpa.sge_map =
3095 tpa_args->sge_map[tpa_args->qid];
3096
3097 DP(BNX2X_MSG_IOV, "sge_addr[%d] %08x:%08x\n",
3098 tpa_args->qid,
3099 U64_HI(qstate->params.update_tpa.sge_map),
3100 U64_LO(qstate->params.update_tpa.sge_map));
3101 qstate->cmd = BNX2X_Q_CMD_UPDATE_TPA;
3102 vfop->rc = bnx2x_queue_state_change(bp, qstate);
3103
3104 tpa_args->qid++;
3105 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
3106 }
3107 vfop->state = BNX2X_VFOP_TPA_DONE;
3108 vfop->rc = 0;
3109 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
3110op_err:
3111 BNX2X_ERR("TPA update error: rc %d\n", vfop->rc);
3112op_done:
3113 case BNX2X_VFOP_TPA_DONE:
3114 bnx2x_vfop_end(bp, vf, vfop);
3115 return;
3116 default:
3117 bnx2x_vfop_default(state);
3118 }
3119op_pending:
3120 return;
3121}
3122
3123int bnx2x_vfop_tpa_cmd(struct bnx2x *bp,
3124 struct bnx2x_virtf *vf,
3125 struct bnx2x_vfop_cmd *cmd,
3126 struct vfpf_tpa_tlv *tpa_tlv)
3127{
3128 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
3129
3130 if (vfop) {
3131 vfop->args.qx.qid = 0; /* loop */
3132 memcpy(&vfop->args.tpa.sge_map,
3133 tpa_tlv->tpa_client_info.sge_addr,
3134 sizeof(vfop->args.tpa.sge_map));
3135 bnx2x_vfop_opset(BNX2X_VFOP_TPA_CONFIG,
3136 bnx2x_vfop_tpa, cmd->done);
3137 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_tpa,
3138 cmd->block);
3139 }
3140 return -ENOMEM;
3141}
3142
3049/* VF release ~ VF close + VF release-resources 3143/* VF release ~ VF close + VF release-resources
3050 * Release is the ultimate SW shutdown and is called whenever an 3144 * Release is the ultimate SW shutdown and is called whenever an
3051 * irrecoverable error is encountered. 3145 * irrecoverable error is encountered.
@@ -3074,16 +3168,6 @@ static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp,
3074 *sbdf = vf->devfn | (vf->bus << 8); 3168 *sbdf = vf->devfn | (vf->bus << 8);
3075} 3169}
3076 3170
3077static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf,
3078 struct bnx2x_vf_bar_info *bar_info)
3079{
3080 int n;
3081
3082 bar_info->nr_bars = bp->vfdb->sriov.nres;
3083 for (n = 0; n < bar_info->nr_bars; n++)
3084 bar_info->bars[n] = vf->bars[n];
3085}
3086
3087void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf, 3171void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
3088 enum channel_tlvs tlv) 3172 enum channel_tlvs tlv)
3089{ 3173{
@@ -3405,13 +3489,13 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
3405 ivi->spoofchk = 1; /*always enabled */ 3489 ivi->spoofchk = 1; /*always enabled */
3406 if (vf->state == VF_ENABLED) { 3490 if (vf->state == VF_ENABLED) {
3407 /* mac and vlan are in vlan_mac objects */ 3491 /* mac and vlan are in vlan_mac objects */
3408 if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj))) 3492 if (bnx2x_validate_vf_sp_objs(bp, vf, false)) {
3409 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac, 3493 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
3410 0, ETH_ALEN); 3494 0, ETH_ALEN);
3411 if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, vlan_obj)))
3412 vlan_obj->get_n_elements(bp, vlan_obj, 1, 3495 vlan_obj->get_n_elements(bp, vlan_obj, 1,
3413 (u8 *)&ivi->vlan, 0, 3496 (u8 *)&ivi->vlan, 0,
3414 VLAN_HLEN); 3497 VLAN_HLEN);
3498 }
3415 } else { 3499 } else {
3416 /* mac */ 3500 /* mac */
3417 if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID)) 3501 if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
@@ -3485,17 +3569,17 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
3485 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { 3569 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
3486 /* configure the mac in device on this vf's queue */ 3570 /* configure the mac in device on this vf's queue */
3487 unsigned long ramrod_flags = 0; 3571 unsigned long ramrod_flags = 0;
3488 struct bnx2x_vlan_mac_obj *mac_obj = 3572 struct bnx2x_vlan_mac_obj *mac_obj;
3489 &bnx2x_leading_vfq(vf, mac_obj);
3490 3573
3491 rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); 3574 /* User should be able to see failure reason in system logs */
3492 if (rc) 3575 if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
3493 return rc; 3576 return -EINVAL;
3494 3577
3495 /* must lock vfpf channel to protect against vf flows */ 3578 /* must lock vfpf channel to protect against vf flows */
3496 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 3579 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
3497 3580
3498 /* remove existing eth macs */ 3581 /* remove existing eth macs */
3582 mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
3499 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true); 3583 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
3500 if (rc) { 3584 if (rc) {
3501 BNX2X_ERR("failed to delete eth macs\n"); 3585 BNX2X_ERR("failed to delete eth macs\n");
@@ -3569,17 +3653,16 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3569 BNX2X_Q_LOGICAL_STATE_ACTIVE) 3653 BNX2X_Q_LOGICAL_STATE_ACTIVE)
3570 return rc; 3654 return rc;
3571 3655
3572 /* configure the vlan in device on this vf's queue */ 3656 /* User should be able to see error in system logs */
3573 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj); 3657 if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
3574 rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)); 3658 return -EINVAL;
3575 if (rc)
3576 return rc;
3577 3659
3578 /* must lock vfpf channel to protect against vf flows */ 3660 /* must lock vfpf channel to protect against vf flows */
3579 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN); 3661 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
3580 3662
3581 /* remove existing vlans */ 3663 /* remove existing vlans */
3582 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); 3664 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3665 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
3583 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags, 3666 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
3584 &ramrod_flags); 3667 &ramrod_flags);
3585 if (rc) { 3668 if (rc) {
@@ -3736,13 +3819,9 @@ void bnx2x_timer_sriov(struct bnx2x *bp)
3736 bnx2x_sample_bulletin(bp); 3819 bnx2x_sample_bulletin(bp);
3737 3820
3738 /* if channel is down we need to self destruct */ 3821 /* if channel is down we need to self destruct */
3739 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) { 3822 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN)
3740 smp_mb__before_clear_bit(); 3823 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
3741 set_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN, 3824 BNX2X_MSG_IOV);
3742 &bp->sp_rtnl_state);
3743 smp_mb__after_clear_bit();
3744 schedule_delayed_work(&bp->sp_rtnl_task, 0);
3745 }
3746} 3825}
3747 3826
3748void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) 3827void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
@@ -3756,12 +3835,16 @@ int bnx2x_vf_pci_alloc(struct bnx2x *bp)
3756 mutex_init(&bp->vf2pf_mutex); 3835 mutex_init(&bp->vf2pf_mutex);
3757 3836
3758 /* allocate vf2pf mailbox for vf to pf channel */ 3837 /* allocate vf2pf mailbox for vf to pf channel */
3759 BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping, 3838 bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping,
3760 sizeof(struct bnx2x_vf_mbx_msg)); 3839 sizeof(struct bnx2x_vf_mbx_msg));
3840 if (!bp->vf2pf_mbox)
3841 goto alloc_mem_err;
3761 3842
3762 /* allocate pf 2 vf bulletin board */ 3843 /* allocate pf 2 vf bulletin board */
3763 BNX2X_PCI_ALLOC(bp->pf2vf_bulletin, &bp->pf2vf_bulletin_mapping, 3844 bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping,
3764 sizeof(union pf_vf_bulletin)); 3845 sizeof(union pf_vf_bulletin));
3846 if (!bp->pf2vf_bulletin)
3847 goto alloc_mem_err;
3765 3848
3766 return 0; 3849 return 0;
3767 3850
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index d9fcca1b5a9d..b1dc751c6175 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -83,6 +83,7 @@ struct bnx2x_vf_queue {
83 u16 index; 83 u16 index;
84 u16 sb_idx; 84 u16 sb_idx;
85 bool is_leading; 85 bool is_leading;
86 bool sp_initialized;
86}; 87};
87 88
88/* struct bnx2x_vfop_qctor_params - prepare queue construction parameters: 89/* struct bnx2x_vfop_qctor_params - prepare queue construction parameters:
@@ -100,6 +101,7 @@ union bnx2x_vfop_params {
100 struct bnx2x_mcast_ramrod_params mcast; 101 struct bnx2x_mcast_ramrod_params mcast;
101 struct bnx2x_config_rss_params rss; 102 struct bnx2x_config_rss_params rss;
102 struct bnx2x_vfop_qctor_params qctor; 103 struct bnx2x_vfop_qctor_params qctor;
104 struct bnx2x_queue_state_params qstate;
103}; 105};
104 106
105/* forward */ 107/* forward */
@@ -166,6 +168,11 @@ struct bnx2x_vfop_args_filters {
166 atomic_t *credit; /* non NULL means 'don't consume credit' */ 168 atomic_t *credit; /* non NULL means 'don't consume credit' */
167}; 169};
168 170
171struct bnx2x_vfop_args_tpa {
172 int qid;
173 dma_addr_t sge_map[PFVF_MAX_QUEUES_PER_VF];
174};
175
169union bnx2x_vfop_args { 176union bnx2x_vfop_args {
170 struct bnx2x_vfop_args_mcast mc_list; 177 struct bnx2x_vfop_args_mcast mc_list;
171 struct bnx2x_vfop_args_qctor qctor; 178 struct bnx2x_vfop_args_qctor qctor;
@@ -173,6 +180,7 @@ union bnx2x_vfop_args {
173 struct bnx2x_vfop_args_defvlan defvlan; 180 struct bnx2x_vfop_args_defvlan defvlan;
174 struct bnx2x_vfop_args_qx qx; 181 struct bnx2x_vfop_args_qx qx;
175 struct bnx2x_vfop_args_filters filters; 182 struct bnx2x_vfop_args_filters filters;
183 struct bnx2x_vfop_args_tpa tpa;
176}; 184};
177 185
178struct bnx2x_vfop { 186struct bnx2x_vfop {
@@ -704,6 +712,11 @@ int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
704 struct bnx2x_virtf *vf, 712 struct bnx2x_virtf *vf,
705 struct bnx2x_vfop_cmd *cmd); 713 struct bnx2x_vfop_cmd *cmd);
706 714
715int bnx2x_vfop_tpa_cmd(struct bnx2x *bp,
716 struct bnx2x_virtf *vf,
717 struct bnx2x_vfop_cmd *cmd,
718 struct vfpf_tpa_tlv *tpa_tlv);
719
707/* VF release ~ VF close + VF release-resources 720/* VF release ~ VF close + VF release-resources
708 * 721 *
709 * Release is the ultimate SW shutdown and is called whenever an 722 * Release is the ultimate SW shutdown and is called whenever an
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 3fa6c2a2a5a9..1117ed7776b6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -548,6 +548,7 @@ static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
548 548
549 vf->leading_rss = cl_id; 549 vf->leading_rss = cl_id;
550 q->is_leading = true; 550 q->is_leading = true;
551 q->sp_initialized = true;
551} 552}
552 553
553/* ask the pf to open a queue for the vf */ 554/* ask the pf to open a queue for the vf */
@@ -1159,7 +1160,8 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
1159 resp->pfdev_info.db_size = bp->db_size; 1160 resp->pfdev_info.db_size = bp->db_size;
1160 resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2; 1161 resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
1161 resp->pfdev_info.pf_cap = (PFVF_CAP_RSS | 1162 resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
1162 /* PFVF_CAP_DHC |*/ PFVF_CAP_TPA); 1163 PFVF_CAP_TPA |
1164 PFVF_CAP_TPA_UPDATE);
1163 bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver, 1165 bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
1164 sizeof(resp->pfdev_info.fw_ver)); 1166 sizeof(resp->pfdev_info.fw_ver));
1165 1167
@@ -1694,16 +1696,12 @@ static int bnx2x_vfop_mbx_qfilters_cmd(struct bnx2x *bp,
1694 return -ENOMEM; 1696 return -ENOMEM;
1695} 1697}
1696 1698
1697static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp, 1699static int bnx2x_filters_validate_mac(struct bnx2x *bp,
1698 struct bnx2x_virtf *vf, 1700 struct bnx2x_virtf *vf,
1699 struct bnx2x_vf_mbx *mbx) 1701 struct vfpf_set_q_filters_tlv *filters)
1700{ 1702{
1701 struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
1702 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index); 1703 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
1703 struct bnx2x_vfop_cmd cmd = { 1704 int rc = 0;
1704 .done = bnx2x_vf_mbx_resp,
1705 .block = false,
1706 };
1707 1705
1708 /* if a mac was already set for this VF via the set vf mac ndo, we only 1706 /* if a mac was already set for this VF via the set vf mac ndo, we only
1709 * accept mac configurations of that mac. Why accept them at all? 1707 * accept mac configurations of that mac. Why accept them at all?
@@ -1716,6 +1714,7 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
1716 BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n", 1714 BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n",
1717 vf->abs_vfid); 1715 vf->abs_vfid);
1718 vf->op_rc = -EPERM; 1716 vf->op_rc = -EPERM;
1717 rc = -EPERM;
1719 goto response; 1718 goto response;
1720 } 1719 }
1721 1720
@@ -1726,9 +1725,22 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
1726 vf->abs_vfid); 1725 vf->abs_vfid);
1727 1726
1728 vf->op_rc = -EPERM; 1727 vf->op_rc = -EPERM;
1728 rc = -EPERM;
1729 goto response; 1729 goto response;
1730 } 1730 }
1731 } 1731 }
1732
1733response:
1734 return rc;
1735}
1736
1737static int bnx2x_filters_validate_vlan(struct bnx2x *bp,
1738 struct bnx2x_virtf *vf,
1739 struct vfpf_set_q_filters_tlv *filters)
1740{
1741 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
1742 int rc = 0;
1743
1732 /* if vlan was set by hypervisor we don't allow guest to config vlan */ 1744 /* if vlan was set by hypervisor we don't allow guest to config vlan */
1733 if (bulletin->valid_bitmap & 1 << VLAN_VALID) { 1745 if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
1734 int i; 1746 int i;
@@ -1740,13 +1752,36 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
1740 BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n", 1752 BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
1741 vf->abs_vfid); 1753 vf->abs_vfid);
1742 vf->op_rc = -EPERM; 1754 vf->op_rc = -EPERM;
1755 rc = -EPERM;
1743 goto response; 1756 goto response;
1744 } 1757 }
1745 } 1758 }
1746 } 1759 }
1747 1760
1748 /* verify vf_qid */ 1761 /* verify vf_qid */
1749 if (filters->vf_qid > vf_rxq_count(vf)) 1762 if (filters->vf_qid > vf_rxq_count(vf)) {
1763 rc = -EPERM;
1764 goto response;
1765 }
1766
1767response:
1768 return rc;
1769}
1770
1771static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
1772 struct bnx2x_virtf *vf,
1773 struct bnx2x_vf_mbx *mbx)
1774{
1775 struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
1776 struct bnx2x_vfop_cmd cmd = {
1777 .done = bnx2x_vf_mbx_resp,
1778 .block = false,
1779 };
1780
1781 if (bnx2x_filters_validate_mac(bp, vf, filters))
1782 goto response;
1783
1784 if (bnx2x_filters_validate_vlan(bp, vf, filters))
1750 goto response; 1785 goto response;
1751 1786
1752 DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n", 1787 DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n",
@@ -1877,6 +1912,75 @@ mbx_resp:
1877 bnx2x_vf_mbx_resp(bp, vf); 1912 bnx2x_vf_mbx_resp(bp, vf);
1878} 1913}
1879 1914
1915static int bnx2x_validate_tpa_params(struct bnx2x *bp,
1916 struct vfpf_tpa_tlv *tpa_tlv)
1917{
1918 int rc = 0;
1919
1920 if (tpa_tlv->tpa_client_info.max_sges_for_packet >
1921 U_ETH_MAX_SGES_FOR_PACKET) {
1922 rc = -EINVAL;
1923 BNX2X_ERR("TPA update: max_sges received %d, max is %d\n",
1924 tpa_tlv->tpa_client_info.max_sges_for_packet,
1925 U_ETH_MAX_SGES_FOR_PACKET);
1926 }
1927
1928 if (tpa_tlv->tpa_client_info.max_tpa_queues > MAX_AGG_QS(bp)) {
1929 rc = -EINVAL;
1930 BNX2X_ERR("TPA update: max_tpa_queues received %d, max is %d\n",
1931 tpa_tlv->tpa_client_info.max_tpa_queues,
1932 MAX_AGG_QS(bp));
1933 }
1934
1935 return rc;
1936}
1937
1938static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf,
1939 struct bnx2x_vf_mbx *mbx)
1940{
1941 struct bnx2x_vfop_cmd cmd = {
1942 .done = bnx2x_vf_mbx_resp,
1943 .block = false,
1944 };
1945 struct bnx2x_queue_update_tpa_params *vf_op_params =
1946 &vf->op_params.qstate.params.update_tpa;
1947 struct vfpf_tpa_tlv *tpa_tlv = &mbx->msg->req.update_tpa;
1948
1949 memset(vf_op_params, 0, sizeof(*vf_op_params));
1950
1951 if (bnx2x_validate_tpa_params(bp, tpa_tlv))
1952 goto mbx_resp;
1953
1954 vf_op_params->complete_on_both_clients =
1955 tpa_tlv->tpa_client_info.complete_on_both_clients;
1956 vf_op_params->dont_verify_thr =
1957 tpa_tlv->tpa_client_info.dont_verify_thr;
1958 vf_op_params->max_agg_sz =
1959 tpa_tlv->tpa_client_info.max_agg_size;
1960 vf_op_params->max_sges_pkt =
1961 tpa_tlv->tpa_client_info.max_sges_for_packet;
1962 vf_op_params->max_tpa_queues =
1963 tpa_tlv->tpa_client_info.max_tpa_queues;
1964 vf_op_params->sge_buff_sz =
1965 tpa_tlv->tpa_client_info.sge_buff_size;
1966 vf_op_params->sge_pause_thr_high =
1967 tpa_tlv->tpa_client_info.sge_pause_thr_high;
1968 vf_op_params->sge_pause_thr_low =
1969 tpa_tlv->tpa_client_info.sge_pause_thr_low;
1970 vf_op_params->tpa_mode =
1971 tpa_tlv->tpa_client_info.tpa_mode;
1972 vf_op_params->update_ipv4 =
1973 tpa_tlv->tpa_client_info.update_ipv4;
1974 vf_op_params->update_ipv6 =
1975 tpa_tlv->tpa_client_info.update_ipv6;
1976
1977 vf->op_rc = bnx2x_vfop_tpa_cmd(bp, vf, &cmd, tpa_tlv);
1978
1979mbx_resp:
1980 if (vf->op_rc)
1981 bnx2x_vf_mbx_resp(bp, vf);
1982}
1983
1880/* dispatch request */ 1984/* dispatch request */
1881static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, 1985static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
1882 struct bnx2x_vf_mbx *mbx) 1986 struct bnx2x_vf_mbx *mbx)
@@ -1916,6 +2020,9 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
1916 case CHANNEL_TLV_UPDATE_RSS: 2020 case CHANNEL_TLV_UPDATE_RSS:
1917 bnx2x_vf_mbx_update_rss(bp, vf, mbx); 2021 bnx2x_vf_mbx_update_rss(bp, vf, mbx);
1918 return; 2022 return;
2023 case CHANNEL_TLV_UPDATE_TPA:
2024 bnx2x_vf_mbx_update_tpa(bp, vf, mbx);
2025 return;
1919 } 2026 }
1920 2027
1921 } else { 2028 } else {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index 208568bc7a71..c922b81170e5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -162,6 +162,7 @@ struct pfvf_acquire_resp_tlv {
162#define PFVF_CAP_RSS 0x00000001 162#define PFVF_CAP_RSS 0x00000001
163#define PFVF_CAP_DHC 0x00000002 163#define PFVF_CAP_DHC 0x00000002
164#define PFVF_CAP_TPA 0x00000004 164#define PFVF_CAP_TPA 0x00000004
165#define PFVF_CAP_TPA_UPDATE 0x00000008
165 char fw_ver[32]; 166 char fw_ver[32];
166 u16 db_size; 167 u16 db_size;
167 u8 indices_per_sb; 168 u8 indices_per_sb;
@@ -303,6 +304,25 @@ struct vfpf_set_q_filters_tlv {
303 u32 rx_mask; /* see mask constants at the top of the file */ 304 u32 rx_mask; /* see mask constants at the top of the file */
304}; 305};
305 306
307struct vfpf_tpa_tlv {
308 struct vfpf_first_tlv first_tlv;
309
310 struct vf_pf_tpa_client_info {
311 aligned_u64 sge_addr[PFVF_MAX_QUEUES_PER_VF];
312 u8 update_ipv4;
313 u8 update_ipv6;
314 u8 max_tpa_queues;
315 u8 max_sges_for_packet;
316 u8 complete_on_both_clients;
317 u8 dont_verify_thr;
318 u8 tpa_mode;
319 u16 sge_buff_size;
320 u16 max_agg_size;
321 u16 sge_pause_thr_low;
322 u16 sge_pause_thr_high;
323 } tpa_client_info;
324};
325
306/* close VF (disable VF) */ 326/* close VF (disable VF) */
307struct vfpf_close_tlv { 327struct vfpf_close_tlv {
308 struct vfpf_first_tlv first_tlv; 328 struct vfpf_first_tlv first_tlv;
@@ -331,6 +351,7 @@ union vfpf_tlvs {
331 struct vfpf_set_q_filters_tlv set_q_filters; 351 struct vfpf_set_q_filters_tlv set_q_filters;
332 struct vfpf_release_tlv release; 352 struct vfpf_release_tlv release;
333 struct vfpf_rss_tlv update_rss; 353 struct vfpf_rss_tlv update_rss;
354 struct vfpf_tpa_tlv update_tpa;
334 struct channel_list_end_tlv list_end; 355 struct channel_list_end_tlv list_end;
335 struct tlv_buffer_size tlv_buf_size; 356 struct tlv_buffer_size tlv_buf_size;
336}; 357};
@@ -405,6 +426,7 @@ enum channel_tlvs {
405 CHANNEL_TLV_PF_SET_VLAN, 426 CHANNEL_TLV_PF_SET_VLAN,
406 CHANNEL_TLV_UPDATE_RSS, 427 CHANNEL_TLV_UPDATE_RSS,
407 CHANNEL_TLV_PHYS_PORT_ID, 428 CHANNEL_TLV_PHYS_PORT_ID,
429 CHANNEL_TLV_UPDATE_TPA,
408 CHANNEL_TLV_MAX 430 CHANNEL_TLV_MAX
409}; 431};
410 432
diff --git a/drivers/net/ethernet/broadcom/genet/Makefile b/drivers/net/ethernet/broadcom/genet/Makefile
new file mode 100644
index 000000000000..31f55a90a197
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/genet/Makefile
@@ -0,0 +1,2 @@
1obj-$(CONFIG_BCMGENET) += genet.o
2genet-objs := bcmgenet.o bcmmii.o
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
new file mode 100644
index 000000000000..8f87fe001541
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -0,0 +1,2575 @@
1/*
2 * Broadcom GENET (Gigabit Ethernet) controller driver
3 *
4 * Copyright (c) 2014 Broadcom Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20#define pr_fmt(fmt) "bcmgenet: " fmt
21
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/sched.h>
25#include <linux/types.h>
26#include <linux/fcntl.h>
27#include <linux/interrupt.h>
28#include <linux/string.h>
29#include <linux/if_ether.h>
30#include <linux/init.h>
31#include <linux/errno.h>
32#include <linux/delay.h>
33#include <linux/platform_device.h>
34#include <linux/dma-mapping.h>
35#include <linux/pm.h>
36#include <linux/clk.h>
37#include <linux/version.h>
38#include <linux/of.h>
39#include <linux/of_address.h>
40#include <linux/of_irq.h>
41#include <linux/of_net.h>
42#include <linux/of_platform.h>
43#include <net/arp.h>
44
45#include <linux/mii.h>
46#include <linux/ethtool.h>
47#include <linux/netdevice.h>
48#include <linux/inetdevice.h>
49#include <linux/etherdevice.h>
50#include <linux/skbuff.h>
51#include <linux/in.h>
52#include <linux/ip.h>
53#include <linux/ipv6.h>
54#include <linux/phy.h>
55
56#include <asm/unaligned.h>
57
58#include "bcmgenet.h"
59
60/* Maximum number of hardware queues, downsized if needed */
61#define GENET_MAX_MQ_CNT 4
62
63/* Default highest priority queue for multi queue support */
64#define GENET_Q0_PRIORITY 0
65
66#define GENET_DEFAULT_BD_CNT \
67 (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->bds_cnt)
68
69#define RX_BUF_LENGTH 2048
70#define SKB_ALIGNMENT 32
71
72/* Tx/Rx DMA register offset, skip 256 descriptors */
73#define WORDS_PER_BD(p) (p->hw_params->words_per_bd)
74#define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32))
75
76#define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \
77 TOTAL_DESC * DMA_DESC_SIZE)
78
79#define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
80 TOTAL_DESC * DMA_DESC_SIZE)
81
82static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
83 void __iomem *d, u32 value)
84{
85 __raw_writel(value, d + DMA_DESC_LENGTH_STATUS);
86}
87
88static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv,
89 void __iomem *d)
90{
91 return __raw_readl(d + DMA_DESC_LENGTH_STATUS);
92}
93
94static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
95 void __iomem *d,
96 dma_addr_t addr)
97{
98 __raw_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO);
99
100 /* Register writes to GISB bus can take couple hundred nanoseconds
101 * and are done for each packet, save these expensive writes unless
102 * the platform is explicitely configured for 64-bits/LPAE.
103 */
104#ifdef CONFIG_PHYS_ADDR_T_64BIT
105 if (priv->hw_params->flags & GENET_HAS_40BITS)
106 __raw_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
107#endif
108}
109
110/* Combined address + length/status setter */
111static inline void dmadesc_set(struct bcmgenet_priv *priv,
112 void __iomem *d, dma_addr_t addr, u32 val)
113{
114 dmadesc_set_length_status(priv, d, val);
115 dmadesc_set_addr(priv, d, addr);
116}
117
118static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
119 void __iomem *d)
120{
121 dma_addr_t addr;
122
123 addr = __raw_readl(d + DMA_DESC_ADDRESS_LO);
124
125 /* Register writes to GISB bus can take couple hundred nanoseconds
126 * and are done for each packet, save these expensive writes unless
127 * the platform is explicitely configured for 64-bits/LPAE.
128 */
129#ifdef CONFIG_PHYS_ADDR_T_64BIT
130 if (priv->hw_params->flags & GENET_HAS_40BITS)
131 addr |= (u64)__raw_readl(d + DMA_DESC_ADDRESS_HI) << 32;
132#endif
133 return addr;
134}
135
136#define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x"
137
138#define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
139 NETIF_MSG_LINK)
140
141static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv)
142{
143 if (GENET_IS_V1(priv))
144 return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1);
145 else
146 return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL);
147}
148
149static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
150{
151 if (GENET_IS_V1(priv))
152 bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1);
153 else
154 bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL);
155}
156
157/* These macros are defined to deal with register map change
158 * between GENET1.1 and GENET2. Only those currently being used
159 * by driver are defined.
160 */
161static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv)
162{
163 if (GENET_IS_V1(priv))
164 return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1);
165 else
166 return __raw_readl(priv->base +
167 priv->hw_params->tbuf_offset + TBUF_CTRL);
168}
169
170static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
171{
172 if (GENET_IS_V1(priv))
173 bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1);
174 else
175 __raw_writel(val, priv->base +
176 priv->hw_params->tbuf_offset + TBUF_CTRL);
177}
178
179static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv)
180{
181 if (GENET_IS_V1(priv))
182 return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1);
183 else
184 return __raw_readl(priv->base +
185 priv->hw_params->tbuf_offset + TBUF_BP_MC);
186}
187
188static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val)
189{
190 if (GENET_IS_V1(priv))
191 bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1);
192 else
193 __raw_writel(val, priv->base +
194 priv->hw_params->tbuf_offset + TBUF_BP_MC);
195}
196
197/* RX/TX DMA register accessors */
198enum dma_reg {
199 DMA_RING_CFG = 0,
200 DMA_CTRL,
201 DMA_STATUS,
202 DMA_SCB_BURST_SIZE,
203 DMA_ARB_CTRL,
204 DMA_PRIORITY,
205 DMA_RING_PRIORITY,
206};
207
208static const u8 bcmgenet_dma_regs_v3plus[] = {
209 [DMA_RING_CFG] = 0x00,
210 [DMA_CTRL] = 0x04,
211 [DMA_STATUS] = 0x08,
212 [DMA_SCB_BURST_SIZE] = 0x0C,
213 [DMA_ARB_CTRL] = 0x2C,
214 [DMA_PRIORITY] = 0x30,
215 [DMA_RING_PRIORITY] = 0x38,
216};
217
218static const u8 bcmgenet_dma_regs_v2[] = {
219 [DMA_RING_CFG] = 0x00,
220 [DMA_CTRL] = 0x04,
221 [DMA_STATUS] = 0x08,
222 [DMA_SCB_BURST_SIZE] = 0x0C,
223 [DMA_ARB_CTRL] = 0x30,
224 [DMA_PRIORITY] = 0x34,
225 [DMA_RING_PRIORITY] = 0x3C,
226};
227
228static const u8 bcmgenet_dma_regs_v1[] = {
229 [DMA_CTRL] = 0x00,
230 [DMA_STATUS] = 0x04,
231 [DMA_SCB_BURST_SIZE] = 0x0C,
232 [DMA_ARB_CTRL] = 0x30,
233 [DMA_PRIORITY] = 0x34,
234 [DMA_RING_PRIORITY] = 0x3C,
235};
236
237/* Set at runtime once bcmgenet version is known */
238static const u8 *bcmgenet_dma_regs;
239
240static inline struct bcmgenet_priv *dev_to_priv(struct device *dev)
241{
242 return netdev_priv(dev_get_drvdata(dev));
243}
244
245static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv,
246 enum dma_reg r)
247{
248 return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
249 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
250}
251
252static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv,
253 u32 val, enum dma_reg r)
254{
255 __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
256 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
257}
258
259static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv,
260 enum dma_reg r)
261{
262 return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
263 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
264}
265
266static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv,
267 u32 val, enum dma_reg r)
268{
269 __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
270 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
271}
272
273/* RDMA/TDMA ring registers and accessors
274 * we merge the common fields and just prefix with T/D the registers
275 * having different meaning depending on the direction
276 */
277enum dma_ring_reg {
278 TDMA_READ_PTR = 0,
279 RDMA_WRITE_PTR = TDMA_READ_PTR,
280 TDMA_READ_PTR_HI,
281 RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI,
282 TDMA_CONS_INDEX,
283 RDMA_PROD_INDEX = TDMA_CONS_INDEX,
284 TDMA_PROD_INDEX,
285 RDMA_CONS_INDEX = TDMA_PROD_INDEX,
286 DMA_RING_BUF_SIZE,
287 DMA_START_ADDR,
288 DMA_START_ADDR_HI,
289 DMA_END_ADDR,
290 DMA_END_ADDR_HI,
291 DMA_MBUF_DONE_THRESH,
292 TDMA_FLOW_PERIOD,
293 RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD,
294 TDMA_WRITE_PTR,
295 RDMA_READ_PTR = TDMA_WRITE_PTR,
296 TDMA_WRITE_PTR_HI,
297 RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI
298};
299
300/* GENET v4 supports 40-bits pointer addressing
301 * for obvious reasons the LO and HI word parts
302 * are contiguous, but this offsets the other
303 * registers.
304 */
305static const u8 genet_dma_ring_regs_v4[] = {
306 [TDMA_READ_PTR] = 0x00,
307 [TDMA_READ_PTR_HI] = 0x04,
308 [TDMA_CONS_INDEX] = 0x08,
309 [TDMA_PROD_INDEX] = 0x0C,
310 [DMA_RING_BUF_SIZE] = 0x10,
311 [DMA_START_ADDR] = 0x14,
312 [DMA_START_ADDR_HI] = 0x18,
313 [DMA_END_ADDR] = 0x1C,
314 [DMA_END_ADDR_HI] = 0x20,
315 [DMA_MBUF_DONE_THRESH] = 0x24,
316 [TDMA_FLOW_PERIOD] = 0x28,
317 [TDMA_WRITE_PTR] = 0x2C,
318 [TDMA_WRITE_PTR_HI] = 0x30,
319};
320
321static const u8 genet_dma_ring_regs_v123[] = {
322 [TDMA_READ_PTR] = 0x00,
323 [TDMA_CONS_INDEX] = 0x04,
324 [TDMA_PROD_INDEX] = 0x08,
325 [DMA_RING_BUF_SIZE] = 0x0C,
326 [DMA_START_ADDR] = 0x10,
327 [DMA_END_ADDR] = 0x14,
328 [DMA_MBUF_DONE_THRESH] = 0x18,
329 [TDMA_FLOW_PERIOD] = 0x1C,
330 [TDMA_WRITE_PTR] = 0x20,
331};
332
333/* Set at runtime once GENET version is known */
334static const u8 *genet_dma_ring_regs;
335
336static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
337 unsigned int ring,
338 enum dma_ring_reg r)
339{
340 return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
341 (DMA_RING_SIZE * ring) +
342 genet_dma_ring_regs[r]);
343}
344
345static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
346 unsigned int ring,
347 u32 val,
348 enum dma_ring_reg r)
349{
350 __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
351 (DMA_RING_SIZE * ring) +
352 genet_dma_ring_regs[r]);
353}
354
355static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
356 unsigned int ring,
357 enum dma_ring_reg r)
358{
359 return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
360 (DMA_RING_SIZE * ring) +
361 genet_dma_ring_regs[r]);
362}
363
364static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
365 unsigned int ring,
366 u32 val,
367 enum dma_ring_reg r)
368{
369 __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
370 (DMA_RING_SIZE * ring) +
371 genet_dma_ring_regs[r]);
372}
373
374static int bcmgenet_get_settings(struct net_device *dev,
375 struct ethtool_cmd *cmd)
376{
377 struct bcmgenet_priv *priv = netdev_priv(dev);
378
379 if (!netif_running(dev))
380 return -EINVAL;
381
382 if (!priv->phydev)
383 return -ENODEV;
384
385 return phy_ethtool_gset(priv->phydev, cmd);
386}
387
388static int bcmgenet_set_settings(struct net_device *dev,
389 struct ethtool_cmd *cmd)
390{
391 struct bcmgenet_priv *priv = netdev_priv(dev);
392
393 if (!netif_running(dev))
394 return -EINVAL;
395
396 if (!priv->phydev)
397 return -ENODEV;
398
399 return phy_ethtool_sset(priv->phydev, cmd);
400}
401
402static int bcmgenet_set_rx_csum(struct net_device *dev,
403 netdev_features_t wanted)
404{
405 struct bcmgenet_priv *priv = netdev_priv(dev);
406 u32 rbuf_chk_ctrl;
407 bool rx_csum_en;
408
409 rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
410
411 rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
412
413 /* enable rx checksumming */
414 if (rx_csum_en)
415 rbuf_chk_ctrl |= RBUF_RXCHK_EN;
416 else
417 rbuf_chk_ctrl &= ~RBUF_RXCHK_EN;
418 priv->desc_rxchk_en = rx_csum_en;
419 bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL);
420
421 return 0;
422}
423
424static int bcmgenet_set_tx_csum(struct net_device *dev,
425 netdev_features_t wanted)
426{
427 struct bcmgenet_priv *priv = netdev_priv(dev);
428 bool desc_64b_en;
429 u32 tbuf_ctrl, rbuf_ctrl;
430
431 tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv);
432 rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
433
434 desc_64b_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
435
436 /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */
437 if (desc_64b_en) {
438 tbuf_ctrl |= RBUF_64B_EN;
439 rbuf_ctrl |= RBUF_64B_EN;
440 } else {
441 tbuf_ctrl &= ~RBUF_64B_EN;
442 rbuf_ctrl &= ~RBUF_64B_EN;
443 }
444 priv->desc_64b_en = desc_64b_en;
445
446 bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl);
447 bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL);
448
449 return 0;
450}
451
452static int bcmgenet_set_features(struct net_device *dev,
453 netdev_features_t features)
454{
455 netdev_features_t changed = features ^ dev->features;
456 netdev_features_t wanted = dev->wanted_features;
457 int ret = 0;
458
459 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
460 ret = bcmgenet_set_tx_csum(dev, wanted);
461 if (changed & (NETIF_F_RXCSUM))
462 ret = bcmgenet_set_rx_csum(dev, wanted);
463
464 return ret;
465}
466
467static u32 bcmgenet_get_msglevel(struct net_device *dev)
468{
469 struct bcmgenet_priv *priv = netdev_priv(dev);
470
471 return priv->msg_enable;
472}
473
474static void bcmgenet_set_msglevel(struct net_device *dev, u32 level)
475{
476 struct bcmgenet_priv *priv = netdev_priv(dev);
477
478 priv->msg_enable = level;
479}
480
481/* standard ethtool support functions. */
482enum bcmgenet_stat_type {
483 BCMGENET_STAT_NETDEV = -1,
484 BCMGENET_STAT_MIB_RX,
485 BCMGENET_STAT_MIB_TX,
486 BCMGENET_STAT_RUNT,
487 BCMGENET_STAT_MISC,
488};
489
490struct bcmgenet_stats {
491 char stat_string[ETH_GSTRING_LEN];
492 int stat_sizeof;
493 int stat_offset;
494 enum bcmgenet_stat_type type;
495 /* reg offset from UMAC base for misc counters */
496 u16 reg_offset;
497};
498
499#define STAT_NETDEV(m) { \
500 .stat_string = __stringify(m), \
501 .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
502 .stat_offset = offsetof(struct net_device_stats, m), \
503 .type = BCMGENET_STAT_NETDEV, \
504}
505
506#define STAT_GENET_MIB(str, m, _type) { \
507 .stat_string = str, \
508 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
509 .stat_offset = offsetof(struct bcmgenet_priv, m), \
510 .type = _type, \
511}
512
513#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
514#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
515#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
516
517#define STAT_GENET_MISC(str, m, offset) { \
518 .stat_string = str, \
519 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
520 .stat_offset = offsetof(struct bcmgenet_priv, m), \
521 .type = BCMGENET_STAT_MISC, \
522 .reg_offset = offset, \
523}
524
525
526/* There is a 0xC gap between the end of RX and beginning of TX stats and then
527 * between the end of TX stats and the beginning of the RX RUNT
528 */
529#define BCMGENET_STAT_OFFSET 0xc
530
531/* Hardware counters must be kept in sync because the order/offset
532 * is important here (order in structure declaration = order in hardware)
533 */
534static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
535 /* general stats */
536 STAT_NETDEV(rx_packets),
537 STAT_NETDEV(tx_packets),
538 STAT_NETDEV(rx_bytes),
539 STAT_NETDEV(tx_bytes),
540 STAT_NETDEV(rx_errors),
541 STAT_NETDEV(tx_errors),
542 STAT_NETDEV(rx_dropped),
543 STAT_NETDEV(tx_dropped),
544 STAT_NETDEV(multicast),
545 /* UniMAC RSV counters */
546 STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
547 STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
548 STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
549 STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
550 STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
551 STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
552 STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
553 STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
554 STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
555 STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
556 STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt),
557 STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes),
558 STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca),
559 STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca),
560 STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs),
561 STAT_GENET_MIB_RX("rx_control", mib.rx.cf),
562 STAT_GENET_MIB_RX("rx_pause", mib.rx.pf),
563 STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo),
564 STAT_GENET_MIB_RX("rx_align", mib.rx.aln),
565 STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr),
566 STAT_GENET_MIB_RX("rx_code", mib.rx.cde),
567 STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr),
568 STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr),
569 STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr),
570 STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue),
571 STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok),
572 STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc),
573 STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp),
574 STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc),
575 /* UniMAC TSV counters */
576 STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
577 STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
578 STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
579 STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
580 STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
581 STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
582 STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
583 STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
584 STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
585 STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
586 STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts),
587 STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca),
588 STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca),
589 STAT_GENET_MIB_TX("tx_pause", mib.tx.pf),
590 STAT_GENET_MIB_TX("tx_control", mib.tx.cf),
591 STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs),
592 STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr),
593 STAT_GENET_MIB_TX("tx_defer", mib.tx.drf),
594 STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf),
595 STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl),
596 STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl),
597 STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl),
598 STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl),
599 STAT_GENET_MIB_TX("tx_frags", mib.tx.frg),
600 STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl),
601 STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr),
602 STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes),
603 STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok),
604 STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc),
605 /* UniMAC RUNT counters */
606 STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
607 STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
608 STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
609 STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
610 /* Misc UniMAC counters */
611 STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
612 UMAC_RBUF_OVFL_CNT),
613 STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
614 STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
615};
616
617#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
618
619static void bcmgenet_get_drvinfo(struct net_device *dev,
620 struct ethtool_drvinfo *info)
621{
622 strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
623 strlcpy(info->version, "v2.0", sizeof(info->version));
624 info->n_stats = BCMGENET_STATS_LEN;
625
626}
627
628static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
629{
630 switch (string_set) {
631 case ETH_SS_STATS:
632 return BCMGENET_STATS_LEN;
633 default:
634 return -EOPNOTSUPP;
635 }
636}
637
638static void bcmgenet_get_strings(struct net_device *dev,
639 u32 stringset, u8 *data)
640{
641 int i;
642
643 switch (stringset) {
644 case ETH_SS_STATS:
645 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
646 memcpy(data + i * ETH_GSTRING_LEN,
647 bcmgenet_gstrings_stats[i].stat_string,
648 ETH_GSTRING_LEN);
649 }
650 break;
651 }
652}
653
654static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
655{
656 int i, j = 0;
657
658 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
659 const struct bcmgenet_stats *s;
660 u8 offset = 0;
661 u32 val = 0;
662 char *p;
663
664 s = &bcmgenet_gstrings_stats[i];
665 switch (s->type) {
666 case BCMGENET_STAT_NETDEV:
667 continue;
668 case BCMGENET_STAT_MIB_RX:
669 case BCMGENET_STAT_MIB_TX:
670 case BCMGENET_STAT_RUNT:
671 if (s->type != BCMGENET_STAT_MIB_RX)
672 offset = BCMGENET_STAT_OFFSET;
673 val = bcmgenet_umac_readl(priv, UMAC_MIB_START +
674 j + offset);
675 break;
676 case BCMGENET_STAT_MISC:
677 val = bcmgenet_umac_readl(priv, s->reg_offset);
678 /* clear if overflowed */
679 if (val == ~0)
680 bcmgenet_umac_writel(priv, 0, s->reg_offset);
681 break;
682 }
683
684 j += s->stat_sizeof;
685 p = (char *)priv + s->stat_offset;
686 *(u32 *)p = val;
687 }
688}
689
690static void bcmgenet_get_ethtool_stats(struct net_device *dev,
691 struct ethtool_stats *stats,
692 u64 *data)
693{
694 struct bcmgenet_priv *priv = netdev_priv(dev);
695 int i;
696
697 if (netif_running(dev))
698 bcmgenet_update_mib_counters(priv);
699
700 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
701 const struct bcmgenet_stats *s;
702 char *p;
703
704 s = &bcmgenet_gstrings_stats[i];
705 if (s->type == BCMGENET_STAT_NETDEV)
706 p = (char *)&dev->stats;
707 else
708 p = (char *)priv;
709 p += s->stat_offset;
710 data[i] = *(u32 *)p;
711 }
712}
713
714/* standard ethtool support functions. */
715static struct ethtool_ops bcmgenet_ethtool_ops = {
716 .get_strings = bcmgenet_get_strings,
717 .get_sset_count = bcmgenet_get_sset_count,
718 .get_ethtool_stats = bcmgenet_get_ethtool_stats,
719 .get_settings = bcmgenet_get_settings,
720 .set_settings = bcmgenet_set_settings,
721 .get_drvinfo = bcmgenet_get_drvinfo,
722 .get_link = ethtool_op_get_link,
723 .get_msglevel = bcmgenet_get_msglevel,
724 .set_msglevel = bcmgenet_set_msglevel,
725};
726
727/* Power down the unimac, based on mode. */
728static void bcmgenet_power_down(struct bcmgenet_priv *priv,
729 enum bcmgenet_power_mode mode)
730{
731 u32 reg;
732
733 switch (mode) {
734 case GENET_POWER_CABLE_SENSE:
735 phy_detach(priv->phydev);
736 break;
737
738 case GENET_POWER_PASSIVE:
739 /* Power down LED */
740 bcmgenet_mii_reset(priv->dev);
741 if (priv->hw_params->flags & GENET_HAS_EXT) {
742 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
743 reg |= (EXT_PWR_DOWN_PHY |
744 EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
745 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
746 }
747 break;
748 default:
749 break;
750 }
751}
752
753static void bcmgenet_power_up(struct bcmgenet_priv *priv,
754 enum bcmgenet_power_mode mode)
755{
756 u32 reg;
757
758 if (!(priv->hw_params->flags & GENET_HAS_EXT))
759 return;
760
761 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
762
763 switch (mode) {
764 case GENET_POWER_PASSIVE:
765 reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY |
766 EXT_PWR_DOWN_BIAS);
767 /* fallthrough */
768 case GENET_POWER_CABLE_SENSE:
769 /* enable APD */
770 reg |= EXT_PWR_DN_EN_LD;
771 break;
772 default:
773 break;
774 }
775
776 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
777 bcmgenet_mii_reset(priv->dev);
778}
779
780/* ioctl handle special commands that are not present in ethtool. */
781static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
782{
783 struct bcmgenet_priv *priv = netdev_priv(dev);
784 int val = 0;
785
786 if (!netif_running(dev))
787 return -EINVAL;
788
789 switch (cmd) {
790 case SIOCGMIIPHY:
791 case SIOCGMIIREG:
792 case SIOCSMIIREG:
793 if (!priv->phydev)
794 val = -ENODEV;
795 else
796 val = phy_mii_ioctl(priv->phydev, rq, cmd);
797 break;
798
799 default:
800 val = -EINVAL;
801 break;
802 }
803
804 return val;
805}
806
807static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
808 struct bcmgenet_tx_ring *ring)
809{
810 struct enet_cb *tx_cb_ptr;
811
812 tx_cb_ptr = ring->cbs;
813 tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
814 tx_cb_ptr->bd_addr = priv->tx_bds + ring->write_ptr * DMA_DESC_SIZE;
815 /* Advancing local write pointer */
816 if (ring->write_ptr == ring->end_ptr)
817 ring->write_ptr = ring->cb_ptr;
818 else
819 ring->write_ptr++;
820
821 return tx_cb_ptr;
822}
823
824/* Simple helper to free a control block's resources */
825static void bcmgenet_free_cb(struct enet_cb *cb)
826{
827 dev_kfree_skb_any(cb->skb);
828 cb->skb = NULL;
829 dma_unmap_addr_set(cb, dma_addr, 0);
830}
831
832static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_priv *priv,
833 struct bcmgenet_tx_ring *ring)
834{
835 bcmgenet_intrl2_0_writel(priv,
836 UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
837 INTRL2_CPU_MASK_SET);
838}
839
840static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_priv *priv,
841 struct bcmgenet_tx_ring *ring)
842{
843 bcmgenet_intrl2_0_writel(priv,
844 UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
845 INTRL2_CPU_MASK_CLEAR);
846}
847
848static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_priv *priv,
849 struct bcmgenet_tx_ring *ring)
850{
851 bcmgenet_intrl2_1_writel(priv,
852 (1 << ring->index), INTRL2_CPU_MASK_CLEAR);
853 priv->int1_mask &= ~(1 << ring->index);
854}
855
856static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv,
857 struct bcmgenet_tx_ring *ring)
858{
859 bcmgenet_intrl2_1_writel(priv,
860 (1 << ring->index), INTRL2_CPU_MASK_SET);
861 priv->int1_mask |= (1 << ring->index);
862}
863
864/* Unlocked version of the reclaim routine */
865static void __bcmgenet_tx_reclaim(struct net_device *dev,
866 struct bcmgenet_tx_ring *ring)
867{
868 struct bcmgenet_priv *priv = netdev_priv(dev);
869 int last_tx_cn, last_c_index, num_tx_bds;
870 struct enet_cb *tx_cb_ptr;
871 struct netdev_queue *txq;
872 unsigned int c_index;
873
874 /* Compute how many buffers are transmited since last xmit call */
875 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
876 txq = netdev_get_tx_queue(dev, ring->queue);
877
878 last_c_index = ring->c_index;
879 num_tx_bds = ring->size;
880
881 c_index &= (num_tx_bds - 1);
882
883 if (c_index >= last_c_index)
884 last_tx_cn = c_index - last_c_index;
885 else
886 last_tx_cn = num_tx_bds - last_c_index + c_index;
887
888 netif_dbg(priv, tx_done, dev,
889 "%s ring=%d index=%d last_tx_cn=%d last_index=%d\n",
890 __func__, ring->index,
891 c_index, last_tx_cn, last_c_index);
892
893 /* Reclaim transmitted buffers */
894 while (last_tx_cn-- > 0) {
895 tx_cb_ptr = ring->cbs + last_c_index;
896 if (tx_cb_ptr->skb) {
897 dev->stats.tx_bytes += tx_cb_ptr->skb->len;
898 dma_unmap_single(&dev->dev,
899 dma_unmap_addr(tx_cb_ptr, dma_addr),
900 tx_cb_ptr->skb->len,
901 DMA_TO_DEVICE);
902 bcmgenet_free_cb(tx_cb_ptr);
903 } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
904 dev->stats.tx_bytes +=
905 dma_unmap_len(tx_cb_ptr, dma_len);
906 dma_unmap_page(&dev->dev,
907 dma_unmap_addr(tx_cb_ptr, dma_addr),
908 dma_unmap_len(tx_cb_ptr, dma_len),
909 DMA_TO_DEVICE);
910 dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
911 }
912 dev->stats.tx_packets++;
913 ring->free_bds += 1;
914
915 last_c_index++;
916 last_c_index &= (num_tx_bds - 1);
917 }
918
919 if (ring->free_bds > (MAX_SKB_FRAGS + 1))
920 ring->int_disable(priv, ring);
921
922 if (netif_tx_queue_stopped(txq))
923 netif_tx_wake_queue(txq);
924
925 ring->c_index = c_index;
926}
927
928static void bcmgenet_tx_reclaim(struct net_device *dev,
929 struct bcmgenet_tx_ring *ring)
930{
931 unsigned long flags;
932
933 spin_lock_irqsave(&ring->lock, flags);
934 __bcmgenet_tx_reclaim(dev, ring);
935 spin_unlock_irqrestore(&ring->lock, flags);
936}
937
938static void bcmgenet_tx_reclaim_all(struct net_device *dev)
939{
940 struct bcmgenet_priv *priv = netdev_priv(dev);
941 int i;
942
943 if (netif_is_multiqueue(dev)) {
944 for (i = 0; i < priv->hw_params->tx_queues; i++)
945 bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]);
946 }
947
948 bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
949}
950
951/* Transmits a single SKB (either head of a fragment or a single SKB)
952 * caller must hold priv->lock
953 */
954static int bcmgenet_xmit_single(struct net_device *dev,
955 struct sk_buff *skb,
956 u16 dma_desc_flags,
957 struct bcmgenet_tx_ring *ring)
958{
959 struct bcmgenet_priv *priv = netdev_priv(dev);
960 struct device *kdev = &priv->pdev->dev;
961 struct enet_cb *tx_cb_ptr;
962 unsigned int skb_len;
963 dma_addr_t mapping;
964 u32 length_status;
965 int ret;
966
967 tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
968
969 if (unlikely(!tx_cb_ptr))
970 BUG();
971
972 tx_cb_ptr->skb = skb;
973
974 skb_len = skb_headlen(skb) < ETH_ZLEN ? ETH_ZLEN : skb_headlen(skb);
975
976 mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
977 ret = dma_mapping_error(kdev, mapping);
978 if (ret) {
979 netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
980 dev_kfree_skb(skb);
981 return ret;
982 }
983
984 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
985 dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len);
986 length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
987 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
988 DMA_TX_APPEND_CRC;
989
990 if (skb->ip_summed == CHECKSUM_PARTIAL)
991 length_status |= DMA_TX_DO_CSUM;
992
993 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status);
994
995 /* Decrement total BD count and advance our write pointer */
996 ring->free_bds -= 1;
997 ring->prod_index += 1;
998 ring->prod_index &= DMA_P_INDEX_MASK;
999
1000 return 0;
1001}
1002
1003/* Transmit a SKB fragement */
1004static int bcmgenet_xmit_frag(struct net_device *dev,
1005 skb_frag_t *frag,
1006 u16 dma_desc_flags,
1007 struct bcmgenet_tx_ring *ring)
1008{
1009 struct bcmgenet_priv *priv = netdev_priv(dev);
1010 struct device *kdev = &priv->pdev->dev;
1011 struct enet_cb *tx_cb_ptr;
1012 dma_addr_t mapping;
1013 int ret;
1014
1015 tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
1016
1017 if (unlikely(!tx_cb_ptr))
1018 BUG();
1019 tx_cb_ptr->skb = NULL;
1020
1021 mapping = skb_frag_dma_map(kdev, frag, 0,
1022 skb_frag_size(frag), DMA_TO_DEVICE);
1023 ret = dma_mapping_error(kdev, mapping);
1024 if (ret) {
1025 netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n",
1026 __func__);
1027 return ret;
1028 }
1029
1030 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
1031 dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size);
1032
1033 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
1034 (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
1035 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
1036
1037
1038 ring->free_bds -= 1;
1039 ring->prod_index += 1;
1040 ring->prod_index &= DMA_P_INDEX_MASK;
1041
1042 return 0;
1043}
1044
1045/* Reallocate the SKB to put enough headroom in front of it and insert
1046 * the transmit checksum offsets in the descriptors
1047 */
1048static int bcmgenet_put_tx_csum(struct net_device *dev, struct sk_buff *skb)
1049{
1050 struct status_64 *status = NULL;
1051 struct sk_buff *new_skb;
1052 u16 offset;
1053 u8 ip_proto;
1054 u16 ip_ver;
1055 u32 tx_csum_info;
1056
1057 if (unlikely(skb_headroom(skb) < sizeof(*status))) {
1058 /* If 64 byte status block enabled, must make sure skb has
1059 * enough headroom for us to insert 64B status block.
1060 */
1061 new_skb = skb_realloc_headroom(skb, sizeof(*status));
1062 dev_kfree_skb(skb);
1063 if (!new_skb) {
1064 dev->stats.tx_errors++;
1065 dev->stats.tx_dropped++;
1066 return -ENOMEM;
1067 }
1068 skb = new_skb;
1069 }
1070
1071 skb_push(skb, sizeof(*status));
1072 status = (struct status_64 *)skb->data;
1073
1074 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1075 ip_ver = htons(skb->protocol);
1076 switch (ip_ver) {
1077 case ETH_P_IP:
1078 ip_proto = ip_hdr(skb)->protocol;
1079 break;
1080 case ETH_P_IPV6:
1081 ip_proto = ipv6_hdr(skb)->nexthdr;
1082 break;
1083 default:
1084 return 0;
1085 }
1086
1087 offset = skb_checksum_start_offset(skb) - sizeof(*status);
1088 tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) |
1089 (offset + skb->csum_offset);
1090
1091 /* Set the length valid bit for TCP and UDP and just set
1092 * the special UDP flag for IPv4, else just set to 0.
1093 */
1094 if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
1095 tx_csum_info |= STATUS_TX_CSUM_LV;
1096 if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
1097 tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
1098 } else
1099 tx_csum_info = 0;
1100
1101 status->tx_csum_info = tx_csum_info;
1102 }
1103
1104 return 0;
1105}
1106
1107static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
1108{
1109 struct bcmgenet_priv *priv = netdev_priv(dev);
1110 struct bcmgenet_tx_ring *ring = NULL;
1111 struct netdev_queue *txq;
1112 unsigned long flags = 0;
1113 int nr_frags, index;
1114 u16 dma_desc_flags;
1115 int ret;
1116 int i;
1117
1118 index = skb_get_queue_mapping(skb);
1119 /* Mapping strategy:
1120 * queue_mapping = 0, unclassified, packet xmited through ring16
1121 * queue_mapping = 1, goes to ring 0. (highest priority queue
1122 * queue_mapping = 2, goes to ring 1.
1123 * queue_mapping = 3, goes to ring 2.
1124 * queue_mapping = 4, goes to ring 3.
1125 */
1126 if (index == 0)
1127 index = DESC_INDEX;
1128 else
1129 index -= 1;
1130
1131 nr_frags = skb_shinfo(skb)->nr_frags;
1132 ring = &priv->tx_rings[index];
1133 txq = netdev_get_tx_queue(dev, ring->queue);
1134
1135 spin_lock_irqsave(&ring->lock, flags);
1136 if (ring->free_bds <= nr_frags + 1) {
1137 netif_tx_stop_queue(txq);
1138 netdev_err(dev, "%s: tx ring %d full when queue %d awake\n",
1139 __func__, index, ring->queue);
1140 ret = NETDEV_TX_BUSY;
1141 goto out;
1142 }
1143
1144 /* set the SKB transmit checksum */
1145 if (priv->desc_64b_en) {
1146 ret = bcmgenet_put_tx_csum(dev, skb);
1147 if (ret) {
1148 ret = NETDEV_TX_OK;
1149 goto out;
1150 }
1151 }
1152
1153 dma_desc_flags = DMA_SOP;
1154 if (nr_frags == 0)
1155 dma_desc_flags |= DMA_EOP;
1156
1157 /* Transmit single SKB or head of fragment list */
1158 ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring);
1159 if (ret) {
1160 ret = NETDEV_TX_OK;
1161 goto out;
1162 }
1163
1164 /* xmit fragment */
1165 for (i = 0; i < nr_frags; i++) {
1166 ret = bcmgenet_xmit_frag(dev,
1167 &skb_shinfo(skb)->frags[i],
1168 (i == nr_frags - 1) ? DMA_EOP : 0, ring);
1169 if (ret) {
1170 ret = NETDEV_TX_OK;
1171 goto out;
1172 }
1173 }
1174
1175 skb_tx_timestamp(skb);
1176
1177 /* we kept a software copy of how much we should advance the TDMA
1178 * producer index, now write it down to the hardware
1179 */
1180 bcmgenet_tdma_ring_writel(priv, ring->index,
1181 ring->prod_index, TDMA_PROD_INDEX);
1182
1183 if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) {
1184 netif_tx_stop_queue(txq);
1185 ring->int_enable(priv, ring);
1186 }
1187
1188out:
1189 spin_unlock_irqrestore(&ring->lock, flags);
1190
1191 return ret;
1192}
1193
1194
1195static int bcmgenet_rx_refill(struct bcmgenet_priv *priv,
1196 struct enet_cb *cb)
1197{
1198 struct device *kdev = &priv->pdev->dev;
1199 struct sk_buff *skb;
1200 dma_addr_t mapping;
1201 int ret;
1202
1203 skb = netdev_alloc_skb(priv->dev,
1204 priv->rx_buf_len + SKB_ALIGNMENT);
1205 if (!skb)
1206 return -ENOMEM;
1207
1208 /* a caller did not release this control block */
1209 WARN_ON(cb->skb != NULL);
1210 cb->skb = skb;
1211 mapping = dma_map_single(kdev, skb->data,
1212 priv->rx_buf_len, DMA_FROM_DEVICE);
1213 ret = dma_mapping_error(kdev, mapping);
1214 if (ret) {
1215 bcmgenet_free_cb(cb);
1216 netif_err(priv, rx_err, priv->dev,
1217 "%s DMA map failed\n", __func__);
1218 return ret;
1219 }
1220
1221 dma_unmap_addr_set(cb, dma_addr, mapping);
1222 /* assign packet, prepare descriptor, and advance pointer */
1223
1224 dmadesc_set_addr(priv, priv->rx_bd_assign_ptr, mapping);
1225
1226 /* turn on the newly assigned BD for DMA to use */
1227 priv->rx_bd_assign_index++;
1228 priv->rx_bd_assign_index &= (priv->num_rx_bds - 1);
1229
1230 priv->rx_bd_assign_ptr = priv->rx_bds +
1231 (priv->rx_bd_assign_index * DMA_DESC_SIZE);
1232
1233 return 0;
1234}
1235
1236/* bcmgenet_desc_rx - descriptor based rx process.
1237 * this could be called from bottom half, or from NAPI polling method.
1238 */
1239static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
1240 unsigned int budget)
1241{
1242 struct net_device *dev = priv->dev;
1243 struct enet_cb *cb;
1244 struct sk_buff *skb;
1245 u32 dma_length_status;
1246 unsigned long dma_flag;
1247 int len, err;
1248 unsigned int rxpktprocessed = 0, rxpkttoprocess;
1249 unsigned int p_index;
1250 unsigned int chksum_ok = 0;
1251
1252 p_index = bcmgenet_rdma_ring_readl(priv,
1253 DESC_INDEX, RDMA_PROD_INDEX);
1254 p_index &= DMA_P_INDEX_MASK;
1255
1256 if (p_index < priv->rx_c_index)
1257 rxpkttoprocess = (DMA_C_INDEX_MASK + 1) -
1258 priv->rx_c_index + p_index;
1259 else
1260 rxpkttoprocess = p_index - priv->rx_c_index;
1261
1262 netif_dbg(priv, rx_status, dev,
1263 "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
1264
1265 while ((rxpktprocessed < rxpkttoprocess) &&
1266 (rxpktprocessed < budget)) {
1267
1268 /* Unmap the packet contents such that we can use the
1269 * RSV from the 64 bytes descriptor when enabled and save
1270 * a 32-bits register read
1271 */
1272 cb = &priv->rx_cbs[priv->rx_read_ptr];
1273 skb = cb->skb;
1274 dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr),
1275 priv->rx_buf_len, DMA_FROM_DEVICE);
1276
1277 if (!priv->desc_64b_en) {
1278 dma_length_status = dmadesc_get_length_status(priv,
1279 priv->rx_bds +
1280 (priv->rx_read_ptr *
1281 DMA_DESC_SIZE));
1282 } else {
1283 struct status_64 *status;
1284 status = (struct status_64 *)skb->data;
1285 dma_length_status = status->length_status;
1286 }
1287
1288 /* DMA flags and length are still valid no matter how
1289 * we got the Receive Status Vector (64B RSB or register)
1290 */
1291 dma_flag = dma_length_status & 0xffff;
1292 len = dma_length_status >> DMA_BUFLENGTH_SHIFT;
1293
1294 netif_dbg(priv, rx_status, dev,
1295 "%s: p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
1296 __func__, p_index, priv->rx_c_index, priv->rx_read_ptr,
1297 dma_length_status);
1298
1299 rxpktprocessed++;
1300
1301 priv->rx_read_ptr++;
1302 priv->rx_read_ptr &= (priv->num_rx_bds - 1);
1303
1304 /* out of memory, just drop packets at the hardware level */
1305 if (unlikely(!skb)) {
1306 dev->stats.rx_dropped++;
1307 dev->stats.rx_errors++;
1308 goto refill;
1309 }
1310
1311 if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
1312 netif_err(priv, rx_status, dev,
1313 "Droping fragmented packet!\n");
1314 dev->stats.rx_dropped++;
1315 dev->stats.rx_errors++;
1316 dev_kfree_skb_any(cb->skb);
1317 cb->skb = NULL;
1318 goto refill;
1319 }
1320 /* report errors */
1321 if (unlikely(dma_flag & (DMA_RX_CRC_ERROR |
1322 DMA_RX_OV |
1323 DMA_RX_NO |
1324 DMA_RX_LG |
1325 DMA_RX_RXER))) {
1326 netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
1327 (unsigned int)dma_flag);
1328 if (dma_flag & DMA_RX_CRC_ERROR)
1329 dev->stats.rx_crc_errors++;
1330 if (dma_flag & DMA_RX_OV)
1331 dev->stats.rx_over_errors++;
1332 if (dma_flag & DMA_RX_NO)
1333 dev->stats.rx_frame_errors++;
1334 if (dma_flag & DMA_RX_LG)
1335 dev->stats.rx_length_errors++;
1336 dev->stats.rx_dropped++;
1337 dev->stats.rx_errors++;
1338
1339 /* discard the packet and advance consumer index.*/
1340 dev_kfree_skb_any(cb->skb);
1341 cb->skb = NULL;
1342 goto refill;
1343 } /* error packet */
1344
1345 chksum_ok = (dma_flag & priv->dma_rx_chk_bit) &&
1346 priv->desc_rxchk_en;
1347
1348 skb_put(skb, len);
1349 if (priv->desc_64b_en) {
1350 skb_pull(skb, 64);
1351 len -= 64;
1352 }
1353
1354 if (likely(chksum_ok))
1355 skb->ip_summed = CHECKSUM_UNNECESSARY;
1356
1357 /* remove hardware 2bytes added for IP alignment */
1358 skb_pull(skb, 2);
1359 len -= 2;
1360
1361 if (priv->crc_fwd_en) {
1362 skb_trim(skb, len - ETH_FCS_LEN);
1363 len -= ETH_FCS_LEN;
1364 }
1365
1366 /*Finish setting up the received SKB and send it to the kernel*/
1367 skb->protocol = eth_type_trans(skb, priv->dev);
1368 dev->stats.rx_packets++;
1369 dev->stats.rx_bytes += len;
1370 if (dma_flag & DMA_RX_MULT)
1371 dev->stats.multicast++;
1372
1373 /* Notify kernel */
1374 napi_gro_receive(&priv->napi, skb);
1375 cb->skb = NULL;
1376 netif_dbg(priv, rx_status, dev, "pushed up to kernel\n");
1377
1378 /* refill RX path on the current control block */
1379refill:
1380 err = bcmgenet_rx_refill(priv, cb);
1381 if (err)
1382 netif_err(priv, rx_err, dev, "Rx refill failed\n");
1383 }
1384
1385 return rxpktprocessed;
1386}
1387
1388/* Assign skb to RX DMA descriptor. */
1389static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv)
1390{
1391 struct enet_cb *cb;
1392 int ret = 0;
1393 int i;
1394
1395 netif_dbg(priv, hw, priv->dev, "%s:\n", __func__);
1396
1397 /* loop here for each buffer needing assign */
1398 for (i = 0; i < priv->num_rx_bds; i++) {
1399 cb = &priv->rx_cbs[priv->rx_bd_assign_index];
1400 if (cb->skb)
1401 continue;
1402
1403 /* set the DMA descriptor length once and for all
1404 * it will only change if we support dynamically sizing
1405 * priv->rx_buf_len, but we do not
1406 */
1407 dmadesc_set_length_status(priv, priv->rx_bd_assign_ptr,
1408 priv->rx_buf_len << DMA_BUFLENGTH_SHIFT);
1409
1410 ret = bcmgenet_rx_refill(priv, cb);
1411 if (ret)
1412 break;
1413
1414 }
1415
1416 return ret;
1417}
1418
1419static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
1420{
1421 struct enet_cb *cb;
1422 int i;
1423
1424 for (i = 0; i < priv->num_rx_bds; i++) {
1425 cb = &priv->rx_cbs[i];
1426
1427 if (dma_unmap_addr(cb, dma_addr)) {
1428 dma_unmap_single(&priv->dev->dev,
1429 dma_unmap_addr(cb, dma_addr),
1430 priv->rx_buf_len, DMA_FROM_DEVICE);
1431 dma_unmap_addr_set(cb, dma_addr, 0);
1432 }
1433
1434 if (cb->skb)
1435 bcmgenet_free_cb(cb);
1436 }
1437}
1438
1439static int reset_umac(struct bcmgenet_priv *priv)
1440{
1441 struct device *kdev = &priv->pdev->dev;
1442 unsigned int timeout = 0;
1443 u32 reg;
1444
1445 /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */
1446 bcmgenet_rbuf_ctrl_set(priv, 0);
1447 udelay(10);
1448
1449 /* disable MAC while updating its registers */
1450 bcmgenet_umac_writel(priv, 0, UMAC_CMD);
1451
1452 /* issue soft reset, wait for it to complete */
1453 bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
1454 while (timeout++ < 1000) {
1455 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
1456 if (!(reg & CMD_SW_RESET))
1457 return 0;
1458
1459 udelay(1);
1460 }
1461
1462 if (timeout == 1000) {
1463 dev_err(kdev,
1464 "timeout waiting for MAC to come out of resetn\n");
1465 return -ETIMEDOUT;
1466 }
1467
1468 return 0;
1469}
1470
1471static int init_umac(struct bcmgenet_priv *priv)
1472{
1473 struct device *kdev = &priv->pdev->dev;
1474 int ret;
1475 u32 reg, cpu_mask_clear;
1476
1477 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
1478
1479 ret = reset_umac(priv);
1480 if (ret)
1481 return ret;
1482
1483 bcmgenet_umac_writel(priv, 0, UMAC_CMD);
1484 /* clear tx/rx counter */
1485 bcmgenet_umac_writel(priv,
1486 MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT, UMAC_MIB_CTRL);
1487 bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL);
1488
1489 bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1490
1491 /* init rx registers, enable ip header optimization */
1492 reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
1493 reg |= RBUF_ALIGN_2B;
1494 bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL);
1495
1496 if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
1497 bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
1498
1499 /* Mask all interrupts.*/
1500 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
1501 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
1502 bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
1503
1504 cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE;
1505
1506 dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__);
1507
1508 /* Monitor cable plug/unpluged event for internal PHY */
1509 if (phy_is_internal(priv->phydev))
1510 cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
1511 else if (priv->ext_phy)
1512 cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
1513 else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
1514 reg = bcmgenet_bp_mc_get(priv);
1515 reg |= BIT(priv->hw_params->bp_in_en_shift);
1516
1517 /* bp_mask: back pressure mask */
1518 if (netif_is_multiqueue(priv->dev))
1519 reg |= priv->hw_params->bp_in_mask;
1520 else
1521 reg &= ~priv->hw_params->bp_in_mask;
1522 bcmgenet_bp_mc_set(priv, reg);
1523 }
1524
1525 /* Enable MDIO interrupts on GENET v3+ */
1526 if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
1527 cpu_mask_clear |= UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR;
1528
1529 bcmgenet_intrl2_0_writel(priv, cpu_mask_clear,
1530 INTRL2_CPU_MASK_CLEAR);
1531
1532 /* Enable rx/tx engine.*/
1533 dev_dbg(kdev, "done init umac\n");
1534
1535 return 0;
1536}
1537
1538/* Initialize all house-keeping variables for a TX ring, along
1539 * with corresponding hardware registers
1540 */
1541static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
1542 unsigned int index, unsigned int size,
1543 unsigned int write_ptr, unsigned int end_ptr)
1544{
1545 struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
1546 u32 words_per_bd = WORDS_PER_BD(priv);
1547 u32 flow_period_val = 0;
1548 unsigned int first_bd;
1549
1550 spin_lock_init(&ring->lock);
1551 ring->index = index;
1552 if (index == DESC_INDEX) {
1553 ring->queue = 0;
1554 ring->int_enable = bcmgenet_tx_ring16_int_enable;
1555 ring->int_disable = bcmgenet_tx_ring16_int_disable;
1556 } else {
1557 ring->queue = index + 1;
1558 ring->int_enable = bcmgenet_tx_ring_int_enable;
1559 ring->int_disable = bcmgenet_tx_ring_int_disable;
1560 }
1561 ring->cbs = priv->tx_cbs + write_ptr;
1562 ring->size = size;
1563 ring->c_index = 0;
1564 ring->free_bds = size;
1565 ring->write_ptr = write_ptr;
1566 ring->cb_ptr = write_ptr;
1567 ring->end_ptr = end_ptr - 1;
1568 ring->prod_index = 0;
1569
1570 /* Set flow period for ring != 16 */
1571 if (index != DESC_INDEX)
1572 flow_period_val = ENET_MAX_MTU_SIZE << 16;
1573
1574 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
1575 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX);
1576 bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
1577 /* Disable rate control for now */
1578 bcmgenet_tdma_ring_writel(priv, index, flow_period_val,
1579 TDMA_FLOW_PERIOD);
1580 /* Unclassified traffic goes to ring 16 */
1581 bcmgenet_tdma_ring_writel(priv, index,
1582 ((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH),
1583 DMA_RING_BUF_SIZE);
1584
1585 first_bd = write_ptr;
1586
1587 /* Set start and end address, read and write pointers */
1588 bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd,
1589 DMA_START_ADDR);
1590 bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd,
1591 TDMA_READ_PTR);
1592 bcmgenet_tdma_ring_writel(priv, index, first_bd,
1593 TDMA_WRITE_PTR);
1594 bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
1595 DMA_END_ADDR);
1596}
1597
1598/* Initialize a RDMA ring */
1599static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
1600 unsigned int index, unsigned int size)
1601{
1602 u32 words_per_bd = WORDS_PER_BD(priv);
1603 int ret;
1604
1605 priv->num_rx_bds = TOTAL_DESC;
1606 priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
1607 priv->rx_bd_assign_ptr = priv->rx_bds;
1608 priv->rx_bd_assign_index = 0;
1609 priv->rx_c_index = 0;
1610 priv->rx_read_ptr = 0;
1611 priv->rx_cbs = kzalloc(priv->num_rx_bds * sizeof(struct enet_cb),
1612 GFP_KERNEL);
1613 if (!priv->rx_cbs)
1614 return -ENOMEM;
1615
1616 ret = bcmgenet_alloc_rx_buffers(priv);
1617 if (ret) {
1618 kfree(priv->rx_cbs);
1619 return ret;
1620 }
1621
1622 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_WRITE_PTR);
1623 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
1624 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
1625 bcmgenet_rdma_ring_writel(priv, index,
1626 ((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH),
1627 DMA_RING_BUF_SIZE);
1628 bcmgenet_rdma_ring_writel(priv, index, 0, DMA_START_ADDR);
1629 bcmgenet_rdma_ring_writel(priv, index,
1630 words_per_bd * size - 1, DMA_END_ADDR);
1631 bcmgenet_rdma_ring_writel(priv, index,
1632 (DMA_FC_THRESH_LO << DMA_XOFF_THRESHOLD_SHIFT) |
1633 DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
1634 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_READ_PTR);
1635
1636 return ret;
1637}
1638
1639/* init multi xmit queues, only available for GENET2+
1640 * the queue is partitioned as follows:
1641 *
1642 * queue 0 - 3 is priority based, each one has 32 descriptors,
1643 * with queue 0 being the highest priority queue.
1644 *
1645 * queue 16 is the default tx queue with GENET_DEFAULT_BD_CNT
1646 * descriptors: 256 - (number of tx queues * bds per queues) = 128
1647 * descriptors.
1648 *
1649 * The transmit control block pool is then partitioned as following:
1650 * - tx_cbs[0...127] are for queue 16
1651 * - tx_ring_cbs[0] points to tx_cbs[128..159]
1652 * - tx_ring_cbs[1] points to tx_cbs[160..191]
1653 * - tx_ring_cbs[2] points to tx_cbs[192..223]
1654 * - tx_ring_cbs[3] points to tx_cbs[224..255]
1655 */
1656static void bcmgenet_init_multiq(struct net_device *dev)
1657{
1658 struct bcmgenet_priv *priv = netdev_priv(dev);
1659 unsigned int i, dma_enable;
1660 u32 reg, dma_ctrl, ring_cfg = 0, dma_priority = 0;
1661
1662 if (!netif_is_multiqueue(dev)) {
1663 netdev_warn(dev, "called with non multi queue aware HW\n");
1664 return;
1665 }
1666
1667 dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
1668 dma_enable = dma_ctrl & DMA_EN;
1669 dma_ctrl &= ~DMA_EN;
1670 bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
1671
1672 /* Enable strict priority arbiter mode */
1673 bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
1674
1675 for (i = 0; i < priv->hw_params->tx_queues; i++) {
1676 /* first 64 tx_cbs are reserved for default tx queue
1677 * (ring 16)
1678 */
1679 bcmgenet_init_tx_ring(priv, i, priv->hw_params->bds_cnt,
1680 i * priv->hw_params->bds_cnt,
1681 (i + 1) * priv->hw_params->bds_cnt);
1682
1683 /* Configure ring as decriptor ring and setup priority */
1684 ring_cfg |= 1 << i;
1685 dma_priority |= ((GENET_Q0_PRIORITY + i) <<
1686 (GENET_MAX_MQ_CNT + 1) * i);
1687 dma_ctrl |= 1 << (i + DMA_RING_BUF_EN_SHIFT);
1688 }
1689
1690 /* Enable rings */
1691 reg = bcmgenet_tdma_readl(priv, DMA_RING_CFG);
1692 reg |= ring_cfg;
1693 bcmgenet_tdma_writel(priv, reg, DMA_RING_CFG);
1694
1695 /* Use configured rings priority and set ring #16 priority */
1696 reg = bcmgenet_tdma_readl(priv, DMA_RING_PRIORITY);
1697 reg |= ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << 20);
1698 reg |= dma_priority;
1699 bcmgenet_tdma_writel(priv, reg, DMA_PRIORITY);
1700
1701 /* Configure ring as descriptor ring and re-enable DMA if enabled */
1702 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
1703 reg |= dma_ctrl;
1704 if (dma_enable)
1705 reg |= DMA_EN;
1706 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
1707}
1708
1709static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
1710{
1711 int i;
1712
1713 /* disable DMA */
1714 bcmgenet_rdma_writel(priv, 0, DMA_CTRL);
1715 bcmgenet_tdma_writel(priv, 0, DMA_CTRL);
1716
1717 for (i = 0; i < priv->num_tx_bds; i++) {
1718 if (priv->tx_cbs[i].skb != NULL) {
1719 dev_kfree_skb(priv->tx_cbs[i].skb);
1720 priv->tx_cbs[i].skb = NULL;
1721 }
1722 }
1723
1724 bcmgenet_free_rx_buffers(priv);
1725 kfree(priv->rx_cbs);
1726 kfree(priv->tx_cbs);
1727}
1728
1729/* init_edma: Initialize DMA control register */
1730static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
1731{
1732 int ret;
1733
1734 netif_dbg(priv, hw, priv->dev, "bcmgenet: init_edma\n");
1735
1736 /* by default, enable ring 16 (descriptor based) */
1737 ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, TOTAL_DESC);
1738 if (ret) {
1739 netdev_err(priv->dev, "failed to initialize RX ring\n");
1740 return ret;
1741 }
1742
1743 /* init rDma */
1744 bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
1745
1746 /* Init tDma */
1747 bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
1748
1749 /* Initialize commont TX ring structures */
1750 priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
1751 priv->num_tx_bds = TOTAL_DESC;
1752 priv->tx_cbs = kzalloc(priv->num_tx_bds * sizeof(struct enet_cb),
1753 GFP_KERNEL);
1754 if (!priv->tx_cbs) {
1755 bcmgenet_fini_dma(priv);
1756 return -ENOMEM;
1757 }
1758
1759 /* initialize multi xmit queue */
1760 bcmgenet_init_multiq(priv->dev);
1761
1762 /* initialize special ring 16 */
1763 bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_DEFAULT_BD_CNT,
1764 priv->hw_params->tx_queues * priv->hw_params->bds_cnt,
1765 TOTAL_DESC);
1766
1767 return 0;
1768}
1769
1770/* NAPI polling method*/
1771static int bcmgenet_poll(struct napi_struct *napi, int budget)
1772{
1773 struct bcmgenet_priv *priv = container_of(napi,
1774 struct bcmgenet_priv, napi);
1775 unsigned int work_done;
1776
1777 /* tx reclaim */
1778 bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
1779
1780 work_done = bcmgenet_desc_rx(priv, budget);
1781
1782 /* Advancing our consumer index*/
1783 priv->rx_c_index += work_done;
1784 priv->rx_c_index &= DMA_C_INDEX_MASK;
1785 bcmgenet_rdma_ring_writel(priv, DESC_INDEX,
1786 priv->rx_c_index, RDMA_CONS_INDEX);
1787 if (work_done < budget) {
1788 napi_complete(napi);
1789 bcmgenet_intrl2_0_writel(priv,
1790 UMAC_IRQ_RXDMA_BDONE, INTRL2_CPU_MASK_CLEAR);
1791 }
1792
1793 return work_done;
1794}
1795
1796/* Interrupt bottom half */
1797static void bcmgenet_irq_task(struct work_struct *work)
1798{
1799 struct bcmgenet_priv *priv = container_of(
1800 work, struct bcmgenet_priv, bcmgenet_irq_work);
1801
1802 netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
1803
1804 /* Link UP/DOWN event */
1805 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
1806 (priv->irq0_stat & (UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN))) {
1807 phy_mac_interrupt(priv->phydev,
1808 priv->irq0_stat & UMAC_IRQ_LINK_UP);
1809 priv->irq0_stat &= ~(UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN);
1810 }
1811}
1812
1813/* bcmgenet_isr1: interrupt handler for ring buffer. */
1814static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
1815{
1816 struct bcmgenet_priv *priv = dev_id;
1817 unsigned int index;
1818
1819 /* Save irq status for bottom-half processing. */
1820 priv->irq1_stat =
1821 bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
1822 ~priv->int1_mask;
1823 /* clear inerrupts*/
1824 bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
1825
1826 netif_dbg(priv, intr, priv->dev,
1827 "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
1828 /* Check the MBDONE interrupts.
1829 * packet is done, reclaim descriptors
1830 */
1831 if (priv->irq1_stat & 0x0000ffff) {
1832 index = 0;
1833 for (index = 0; index < 16; index++) {
1834 if (priv->irq1_stat & (1 << index))
1835 bcmgenet_tx_reclaim(priv->dev,
1836 &priv->tx_rings[index]);
1837 }
1838 }
1839 return IRQ_HANDLED;
1840}
1841
1842/* bcmgenet_isr0: Handle various interrupts. */
1843static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
1844{
1845 struct bcmgenet_priv *priv = dev_id;
1846
1847 /* Save irq status for bottom-half processing. */
1848 priv->irq0_stat =
1849 bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
1850 ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
1851 /* clear inerrupts*/
1852 bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
1853
1854 netif_dbg(priv, intr, priv->dev,
1855 "IRQ=0x%x\n", priv->irq0_stat);
1856
1857 if (priv->irq0_stat & (UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_RXDMA_PDONE)) {
1858 /* We use NAPI(software interrupt throttling, if
1859 * Rx Descriptor throttling is not used.
1860 * Disable interrupt, will be enabled in the poll method.
1861 */
1862 if (likely(napi_schedule_prep(&priv->napi))) {
1863 bcmgenet_intrl2_0_writel(priv,
1864 UMAC_IRQ_RXDMA_BDONE, INTRL2_CPU_MASK_SET);
1865 __napi_schedule(&priv->napi);
1866 }
1867 }
1868 if (priv->irq0_stat &
1869 (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) {
1870 /* Tx reclaim */
1871 bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
1872 }
1873 if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
1874 UMAC_IRQ_PHY_DET_F |
1875 UMAC_IRQ_LINK_UP |
1876 UMAC_IRQ_LINK_DOWN |
1877 UMAC_IRQ_HFB_SM |
1878 UMAC_IRQ_HFB_MM |
1879 UMAC_IRQ_MPD_R)) {
1880 /* all other interested interrupts handled in bottom half */
1881 schedule_work(&priv->bcmgenet_irq_work);
1882 }
1883
1884 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
1885 priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
1886 priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
1887 wake_up(&priv->wq);
1888 }
1889
1890 return IRQ_HANDLED;
1891}
1892
1893static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
1894{
1895 u32 reg;
1896
1897 reg = bcmgenet_rbuf_ctrl_get(priv);
1898 reg |= BIT(1);
1899 bcmgenet_rbuf_ctrl_set(priv, reg);
1900 udelay(10);
1901
1902 reg &= ~BIT(1);
1903 bcmgenet_rbuf_ctrl_set(priv, reg);
1904 udelay(10);
1905}
1906
1907static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
1908 unsigned char *addr)
1909{
1910 bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
1911 (addr[2] << 8) | addr[3], UMAC_MAC0);
1912 bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
1913}
1914
1915static int bcmgenet_wol_resume(struct bcmgenet_priv *priv)
1916{
1917 int ret;
1918
1919 /* From WOL-enabled suspend, switch to regular clock */
1920 clk_disable(priv->clk_wol);
1921 /* init umac registers to synchronize s/w with h/w */
1922 ret = init_umac(priv);
1923 if (ret)
1924 return ret;
1925
1926 phy_init_hw(priv->phydev);
1927 /* Speed settings must be restored */
1928 bcmgenet_mii_config(priv->dev);
1929
1930 return 0;
1931}
1932
1933/* Returns a reusable dma control register value */
1934static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
1935{
1936 u32 reg;
1937 u32 dma_ctrl;
1938
1939 /* disable DMA */
1940 dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
1941 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
1942 reg &= ~dma_ctrl;
1943 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
1944
1945 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
1946 reg &= ~dma_ctrl;
1947 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
1948
1949 bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
1950 udelay(10);
1951 bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
1952
1953 return dma_ctrl;
1954}
1955
1956static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
1957{
1958 u32 reg;
1959
1960 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
1961 reg |= dma_ctrl;
1962 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
1963
1964 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
1965 reg |= dma_ctrl;
1966 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
1967}
1968
1969static int bcmgenet_open(struct net_device *dev)
1970{
1971 struct bcmgenet_priv *priv = netdev_priv(dev);
1972 unsigned long dma_ctrl;
1973 u32 reg;
1974 int ret;
1975
1976 netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
1977
1978 /* Turn on the clock */
1979 if (!IS_ERR(priv->clk))
1980 clk_prepare_enable(priv->clk);
1981
1982 /* take MAC out of reset */
1983 bcmgenet_umac_reset(priv);
1984
1985 ret = init_umac(priv);
1986 if (ret)
1987 goto err_clk_disable;
1988
1989 /* disable ethernet MAC while updating its registers */
1990 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
1991 reg &= ~(CMD_TX_EN | CMD_RX_EN);
1992 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
1993
1994 bcmgenet_set_hw_addr(priv, dev->dev_addr);
1995
1996 if (priv->wol_enabled) {
1997 ret = bcmgenet_wol_resume(priv);
1998 if (ret)
1999 return ret;
2000 }
2001
2002 if (phy_is_internal(priv->phydev)) {
2003 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
2004 reg |= EXT_ENERGY_DET_MASK;
2005 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
2006 }
2007
2008 /* Disable RX/TX DMA and flush TX queues */
2009 dma_ctrl = bcmgenet_dma_disable(priv);
2010
2011 /* Reinitialize TDMA and RDMA and SW housekeeping */
2012 ret = bcmgenet_init_dma(priv);
2013 if (ret) {
2014 netdev_err(dev, "failed to initialize DMA\n");
2015 goto err_fini_dma;
2016 }
2017
2018 /* Always enable ring 16 - descriptor ring */
2019 bcmgenet_enable_dma(priv, dma_ctrl);
2020
2021 ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
2022 dev->name, priv);
2023 if (ret < 0) {
2024 netdev_err(dev, "can't request IRQ %d\n", priv->irq0);
2025 goto err_fini_dma;
2026 }
2027
2028 ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED,
2029 dev->name, priv);
2030 if (ret < 0) {
2031 netdev_err(dev, "can't request IRQ %d\n", priv->irq1);
2032 goto err_irq0;
2033 }
2034
2035 /* Start the network engine */
2036 napi_enable(&priv->napi);
2037
2038 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2039 reg |= (CMD_TX_EN | CMD_RX_EN);
2040 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
2041
2042 /* Make sure we reflect the value of CRC_CMD_FWD */
2043 priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
2044
2045 device_set_wakeup_capable(&dev->dev, 1);
2046
2047 if (phy_is_internal(priv->phydev))
2048 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
2049
2050 netif_tx_start_all_queues(dev);
2051
2052 phy_start(priv->phydev);
2053
2054 return 0;
2055
2056err_irq0:
2057 free_irq(priv->irq0, dev);
2058err_fini_dma:
2059 bcmgenet_fini_dma(priv);
2060err_clk_disable:
2061 if (!IS_ERR(priv->clk))
2062 clk_disable_unprepare(priv->clk);
2063 return ret;
2064}
2065
2066static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
2067{
2068 int ret = 0;
2069 int timeout = 0;
2070 u32 reg;
2071
2072 /* Disable TDMA to stop add more frames in TX DMA */
2073 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2074 reg &= ~DMA_EN;
2075 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2076
2077 /* Check TDMA status register to confirm TDMA is disabled */
2078 while (timeout++ < DMA_TIMEOUT_VAL) {
2079 reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
2080 if (reg & DMA_DISABLED)
2081 break;
2082
2083 udelay(1);
2084 }
2085
2086 if (timeout == DMA_TIMEOUT_VAL) {
2087 netdev_warn(priv->dev,
2088 "Timed out while disabling TX DMA\n");
2089 ret = -ETIMEDOUT;
2090 }
2091
2092 /* Wait 10ms for packet drain in both tx and rx dma */
2093 usleep_range(10000, 20000);
2094
2095 /* Disable RDMA */
2096 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2097 reg &= ~DMA_EN;
2098 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2099
2100 timeout = 0;
2101 /* Check RDMA status register to confirm RDMA is disabled */
2102 while (timeout++ < DMA_TIMEOUT_VAL) {
2103 reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
2104 if (reg & DMA_DISABLED)
2105 break;
2106
2107 udelay(1);
2108 }
2109
2110 if (timeout == DMA_TIMEOUT_VAL) {
2111 netdev_warn(priv->dev,
2112 "Timed out while disabling RX DMA\n");
2113 ret = -ETIMEDOUT;
2114 }
2115
2116 return ret;
2117}
2118
2119static int bcmgenet_close(struct net_device *dev)
2120{
2121 struct bcmgenet_priv *priv = netdev_priv(dev);
2122 int ret;
2123 u32 reg;
2124
2125 netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
2126
2127 phy_stop(priv->phydev);
2128
2129 /* Disable MAC receive */
2130 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2131 reg &= ~CMD_RX_EN;
2132 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
2133
2134 netif_tx_stop_all_queues(dev);
2135
2136 ret = bcmgenet_dma_teardown(priv);
2137 if (ret)
2138 return ret;
2139
2140 /* Disable MAC transmit. TX DMA disabled have to done before this */
2141 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2142 reg &= ~CMD_TX_EN;
2143 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
2144
2145 napi_disable(&priv->napi);
2146
2147 /* tx reclaim */
2148 bcmgenet_tx_reclaim_all(dev);
2149 bcmgenet_fini_dma(priv);
2150
2151 free_irq(priv->irq0, priv);
2152 free_irq(priv->irq1, priv);
2153
2154 /* Wait for pending work items to complete - we are stopping
2155 * the clock now. Since interrupts are disabled, no new work
2156 * will be scheduled.
2157 */
2158 cancel_work_sync(&priv->bcmgenet_irq_work);
2159
2160 if (phy_is_internal(priv->phydev))
2161 bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
2162
2163 if (priv->wol_enabled)
2164 clk_enable(priv->clk_wol);
2165
2166 if (!IS_ERR(priv->clk))
2167 clk_disable_unprepare(priv->clk);
2168
2169 return 0;
2170}
2171
2172static void bcmgenet_timeout(struct net_device *dev)
2173{
2174 struct bcmgenet_priv *priv = netdev_priv(dev);
2175
2176 netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
2177
2178 dev->trans_start = jiffies;
2179
2180 dev->stats.tx_errors++;
2181
2182 netif_tx_wake_all_queues(dev);
2183}
2184
2185#define MAX_MC_COUNT 16
2186
2187static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
2188 unsigned char *addr,
2189 int *i,
2190 int *mc)
2191{
2192 u32 reg;
2193
2194 bcmgenet_umac_writel(priv,
2195 addr[0] << 8 | addr[1], UMAC_MDF_ADDR + (*i * 4));
2196 bcmgenet_umac_writel(priv,
2197 addr[2] << 24 | addr[3] << 16 |
2198 addr[4] << 8 | addr[5],
2199 UMAC_MDF_ADDR + ((*i + 1) * 4));
2200 reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL);
2201 reg |= (1 << (MAX_MC_COUNT - *mc));
2202 bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
2203 *i += 2;
2204 (*mc)++;
2205}
2206
2207static void bcmgenet_set_rx_mode(struct net_device *dev)
2208{
2209 struct bcmgenet_priv *priv = netdev_priv(dev);
2210 struct netdev_hw_addr *ha;
2211 int i, mc;
2212 u32 reg;
2213
2214 netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
2215
2216 /* Promiscous mode */
2217 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2218 if (dev->flags & IFF_PROMISC) {
2219 reg |= CMD_PROMISC;
2220 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
2221 bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
2222 return;
2223 } else {
2224 reg &= ~CMD_PROMISC;
2225 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
2226 }
2227
2228 /* UniMac doesn't support ALLMULTI */
2229 if (dev->flags & IFF_ALLMULTI) {
2230 netdev_warn(dev, "ALLMULTI is not supported\n");
2231 return;
2232 }
2233
2234 /* update MDF filter */
2235 i = 0;
2236 mc = 0;
2237 /* Broadcast */
2238 bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc);
2239 /* my own address.*/
2240 bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc);
2241 /* Unicast list*/
2242 if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc))
2243 return;
2244
2245 if (!netdev_uc_empty(dev))
2246 netdev_for_each_uc_addr(ha, dev)
2247 bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
2248 /* Multicast */
2249 if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc))
2250 return;
2251
2252 netdev_for_each_mc_addr(ha, dev)
2253 bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
2254}
2255
2256/* Set the hardware MAC address. */
2257static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
2258{
2259 struct sockaddr *addr = p;
2260
2261 /* Setting the MAC address at the hardware level is not possible
2262 * without disabling the UniMAC RX/TX enable bits.
2263 */
2264 if (netif_running(dev))
2265 return -EBUSY;
2266
2267 ether_addr_copy(dev->dev_addr, addr->sa_data);
2268
2269 return 0;
2270}
2271
2272static const struct net_device_ops bcmgenet_netdev_ops = {
2273 .ndo_open = bcmgenet_open,
2274 .ndo_stop = bcmgenet_close,
2275 .ndo_start_xmit = bcmgenet_xmit,
2276 .ndo_tx_timeout = bcmgenet_timeout,
2277 .ndo_set_rx_mode = bcmgenet_set_rx_mode,
2278 .ndo_set_mac_address = bcmgenet_set_mac_addr,
2279 .ndo_do_ioctl = bcmgenet_ioctl,
2280 .ndo_set_features = bcmgenet_set_features,
2281};
2282
2283/* Array of GENET hardware parameters/characteristics */
2284static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
2285 [GENET_V1] = {
2286 .tx_queues = 0,
2287 .rx_queues = 0,
2288 .bds_cnt = 0,
2289 .bp_in_en_shift = 16,
2290 .bp_in_mask = 0xffff,
2291 .hfb_filter_cnt = 16,
2292 .qtag_mask = 0x1F,
2293 .hfb_offset = 0x1000,
2294 .rdma_offset = 0x2000,
2295 .tdma_offset = 0x3000,
2296 .words_per_bd = 2,
2297 },
2298 [GENET_V2] = {
2299 .tx_queues = 4,
2300 .rx_queues = 4,
2301 .bds_cnt = 32,
2302 .bp_in_en_shift = 16,
2303 .bp_in_mask = 0xffff,
2304 .hfb_filter_cnt = 16,
2305 .qtag_mask = 0x1F,
2306 .tbuf_offset = 0x0600,
2307 .hfb_offset = 0x1000,
2308 .hfb_reg_offset = 0x2000,
2309 .rdma_offset = 0x3000,
2310 .tdma_offset = 0x4000,
2311 .words_per_bd = 2,
2312 .flags = GENET_HAS_EXT,
2313 },
2314 [GENET_V3] = {
2315 .tx_queues = 4,
2316 .rx_queues = 4,
2317 .bds_cnt = 32,
2318 .bp_in_en_shift = 17,
2319 .bp_in_mask = 0x1ffff,
2320 .hfb_filter_cnt = 48,
2321 .qtag_mask = 0x3F,
2322 .tbuf_offset = 0x0600,
2323 .hfb_offset = 0x8000,
2324 .hfb_reg_offset = 0xfc00,
2325 .rdma_offset = 0x10000,
2326 .tdma_offset = 0x11000,
2327 .words_per_bd = 2,
2328 .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR,
2329 },
2330 [GENET_V4] = {
2331 .tx_queues = 4,
2332 .rx_queues = 4,
2333 .bds_cnt = 32,
2334 .bp_in_en_shift = 17,
2335 .bp_in_mask = 0x1ffff,
2336 .hfb_filter_cnt = 48,
2337 .qtag_mask = 0x3F,
2338 .tbuf_offset = 0x0600,
2339 .hfb_offset = 0x8000,
2340 .hfb_reg_offset = 0xfc00,
2341 .rdma_offset = 0x2000,
2342 .tdma_offset = 0x4000,
2343 .words_per_bd = 3,
2344 .flags = GENET_HAS_40BITS | GENET_HAS_EXT | GENET_HAS_MDIO_INTR,
2345 },
2346};
2347
2348/* Infer hardware parameters from the detected GENET version */
2349static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
2350{
2351 struct bcmgenet_hw_params *params;
2352 u32 reg;
2353 u8 major;
2354
2355 if (GENET_IS_V4(priv)) {
2356 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
2357 genet_dma_ring_regs = genet_dma_ring_regs_v4;
2358 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
2359 priv->version = GENET_V4;
2360 } else if (GENET_IS_V3(priv)) {
2361 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
2362 genet_dma_ring_regs = genet_dma_ring_regs_v123;
2363 priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
2364 priv->version = GENET_V3;
2365 } else if (GENET_IS_V2(priv)) {
2366 bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
2367 genet_dma_ring_regs = genet_dma_ring_regs_v123;
2368 priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
2369 priv->version = GENET_V2;
2370 } else if (GENET_IS_V1(priv)) {
2371 bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
2372 genet_dma_ring_regs = genet_dma_ring_regs_v123;
2373 priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
2374 priv->version = GENET_V1;
2375 }
2376
2377 /* enum genet_version starts at 1 */
2378 priv->hw_params = &bcmgenet_hw_params[priv->version];
2379 params = priv->hw_params;
2380
2381 /* Read GENET HW version */
2382 reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
2383 major = (reg >> 24 & 0x0f);
2384 if (major == 5)
2385 major = 4;
2386 else if (major == 0)
2387 major = 1;
2388 if (major != priv->version) {
2389 dev_err(&priv->pdev->dev,
2390 "GENET version mismatch, got: %d, configured for: %d\n",
2391 major, priv->version);
2392 }
2393
2394 /* Print the GENET core version */
2395 dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT,
2396 major, (reg >> 16) & 0x0f, reg & 0xffff);
2397
2398#ifdef CONFIG_PHYS_ADDR_T_64BIT
2399 if (!(params->flags & GENET_HAS_40BITS))
2400 pr_warn("GENET does not support 40-bits PA\n");
2401#endif
2402
2403 pr_debug("Configuration for version: %d\n"
2404 "TXq: %1d, RXq: %1d, BDs: %1d\n"
2405 "BP << en: %2d, BP msk: 0x%05x\n"
2406 "HFB count: %2d, QTAQ msk: 0x%05x\n"
2407 "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
2408 "RDMA: 0x%05x, TDMA: 0x%05x\n"
2409 "Words/BD: %d\n",
2410 priv->version,
2411 params->tx_queues, params->rx_queues, params->bds_cnt,
2412 params->bp_in_en_shift, params->bp_in_mask,
2413 params->hfb_filter_cnt, params->qtag_mask,
2414 params->tbuf_offset, params->hfb_offset,
2415 params->hfb_reg_offset,
2416 params->rdma_offset, params->tdma_offset,
2417 params->words_per_bd);
2418}
2419
2420static const struct of_device_id bcmgenet_match[] = {
2421 { .compatible = "brcm,genet-v1", .data = (void *)GENET_V1 },
2422 { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 },
2423 { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 },
2424 { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
2425 { },
2426};
2427
2428static int bcmgenet_probe(struct platform_device *pdev)
2429{
2430 struct device_node *dn = pdev->dev.of_node;
2431 const struct of_device_id *of_id;
2432 struct bcmgenet_priv *priv;
2433 struct net_device *dev;
2434 const void *macaddr;
2435 struct resource *r;
2436 int err = -EIO;
2437
2438 /* Up to GENET_MAX_MQ_CNT + 1 TX queues and a single RX queue */
2439 dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, 1);
2440 if (!dev) {
2441 dev_err(&pdev->dev, "can't allocate net device\n");
2442 return -ENOMEM;
2443 }
2444
2445 of_id = of_match_node(bcmgenet_match, dn);
2446 if (!of_id)
2447 return -EINVAL;
2448
2449 priv = netdev_priv(dev);
2450 priv->irq0 = platform_get_irq(pdev, 0);
2451 priv->irq1 = platform_get_irq(pdev, 1);
2452 if (!priv->irq0 || !priv->irq1) {
2453 dev_err(&pdev->dev, "can't find IRQs\n");
2454 err = -EINVAL;
2455 goto err;
2456 }
2457
2458 macaddr = of_get_mac_address(dn);
2459 if (!macaddr) {
2460 dev_err(&pdev->dev, "can't find MAC address\n");
2461 err = -EINVAL;
2462 goto err;
2463 }
2464
2465 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2466 priv->base = devm_ioremap_resource(&pdev->dev, r);
2467 if (IS_ERR(priv->base)) {
2468 err = PTR_ERR(priv->base);
2469 goto err;
2470 }
2471
2472 SET_NETDEV_DEV(dev, &pdev->dev);
2473 dev_set_drvdata(&pdev->dev, dev);
2474 ether_addr_copy(dev->dev_addr, macaddr);
2475 dev->watchdog_timeo = 2 * HZ;
2476 SET_ETHTOOL_OPS(dev, &bcmgenet_ethtool_ops);
2477 dev->netdev_ops = &bcmgenet_netdev_ops;
2478 netif_napi_add(dev, &priv->napi, bcmgenet_poll, 64);
2479
2480 priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT);
2481
2482 /* Set hardware features */
2483 dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
2484 NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
2485
2486 /* Set the needed headroom to account for any possible
2487 * features enabling/disabling at runtime
2488 */
2489 dev->needed_headroom += 64;
2490
2491 netdev_boot_setup_check(dev);
2492
2493 priv->dev = dev;
2494 priv->pdev = pdev;
2495 priv->version = (enum bcmgenet_version)of_id->data;
2496
2497 bcmgenet_set_hw_params(priv);
2498
2499 /* Mii wait queue */
2500 init_waitqueue_head(&priv->wq);
2501 /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */
2502 priv->rx_buf_len = RX_BUF_LENGTH;
2503 INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
2504
2505 priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
2506 if (IS_ERR(priv->clk))
2507 dev_warn(&priv->pdev->dev, "failed to get enet clock\n");
2508
2509 priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
2510 if (IS_ERR(priv->clk_wol))
2511 dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
2512
2513 if (!IS_ERR(priv->clk))
2514 clk_prepare_enable(priv->clk);
2515
2516 err = reset_umac(priv);
2517 if (err)
2518 goto err_clk_disable;
2519
2520 err = bcmgenet_mii_init(dev);
2521 if (err)
2522 goto err_clk_disable;
2523
2524 /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues
2525 * just the ring 16 descriptor based TX
2526 */
2527 netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
2528 netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
2529
2530 err = register_netdev(dev);
2531 if (err)
2532 goto err_clk_disable;
2533
2534 /* Turn off the main clock, WOL clock is handled separately */
2535 if (!IS_ERR(priv->clk))
2536 clk_disable_unprepare(priv->clk);
2537
2538 return err;
2539
2540err_clk_disable:
2541 if (!IS_ERR(priv->clk))
2542 clk_disable_unprepare(priv->clk);
2543err:
2544 free_netdev(dev);
2545 return err;
2546}
2547
2548static int bcmgenet_remove(struct platform_device *pdev)
2549{
2550 struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev);
2551
2552 dev_set_drvdata(&pdev->dev, NULL);
2553 unregister_netdev(priv->dev);
2554 bcmgenet_mii_exit(priv->dev);
2555 free_netdev(priv->dev);
2556
2557 return 0;
2558}
2559
2560
2561static struct platform_driver bcmgenet_driver = {
2562 .probe = bcmgenet_probe,
2563 .remove = bcmgenet_remove,
2564 .driver = {
2565 .name = "bcmgenet",
2566 .owner = THIS_MODULE,
2567 .of_match_table = bcmgenet_match,
2568 },
2569};
2570module_platform_driver(bcmgenet_driver);
2571
2572MODULE_AUTHOR("Broadcom Corporation");
2573MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
2574MODULE_ALIAS("platform:bcmgenet");
2575MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
new file mode 100644
index 000000000000..0f117105fed1
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -0,0 +1,628 @@
1/*
2 * Copyright (c) 2014 Broadcom Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 *
18*/
19#ifndef __BCMGENET_H__
20#define __BCMGENET_H__
21
22#include <linux/skbuff.h>
23#include <linux/netdevice.h>
24#include <linux/spinlock.h>
25#include <linux/clk.h>
26#include <linux/mii.h>
27#include <linux/if_vlan.h>
28#include <linux/phy.h>
29
30/* total number of Buffer Descriptors, same for Rx/Tx */
31#define TOTAL_DESC 256
32
33/* which ring is descriptor based */
34#define DESC_INDEX 16
35
36/* Body(1500) + EH_SIZE(14) + VLANTAG(4) + BRCMTAG(6) + FCS(4) = 1528.
37 * 1536 is multiple of 256 bytes
38 */
39#define ENET_BRCM_TAG_LEN 6
40#define ENET_PAD 8
41#define ENET_MAX_MTU_SIZE (ETH_DATA_LEN + ETH_HLEN + VLAN_HLEN + \
42 ENET_BRCM_TAG_LEN + ETH_FCS_LEN + ENET_PAD)
43#define DMA_MAX_BURST_LENGTH 0x10
44
45/* misc. configuration */
46#define CLEAR_ALL_HFB 0xFF
47#define DMA_FC_THRESH_HI (TOTAL_DESC >> 4)
48#define DMA_FC_THRESH_LO 5
49
50/* 64B receive/transmit status block */
51struct status_64 {
52 u32 length_status; /* length and peripheral status */
53 u32 ext_status; /* Extended status*/
54 u32 rx_csum; /* partial rx checksum */
55 u32 unused1[9]; /* unused */
56 u32 tx_csum_info; /* Tx checksum info. */
57 u32 unused2[3]; /* unused */
58};
59
60/* Rx status bits */
61#define STATUS_RX_EXT_MASK 0x1FFFFF
62#define STATUS_RX_CSUM_MASK 0xFFFF
63#define STATUS_RX_CSUM_OK 0x10000
64#define STATUS_RX_CSUM_FR 0x20000
65#define STATUS_RX_PROTO_TCP 0
66#define STATUS_RX_PROTO_UDP 1
67#define STATUS_RX_PROTO_ICMP 2
68#define STATUS_RX_PROTO_OTHER 3
69#define STATUS_RX_PROTO_MASK 3
70#define STATUS_RX_PROTO_SHIFT 18
71#define STATUS_FILTER_INDEX_MASK 0xFFFF
72/* Tx status bits */
73#define STATUS_TX_CSUM_START_MASK 0X7FFF
74#define STATUS_TX_CSUM_START_SHIFT 16
75#define STATUS_TX_CSUM_PROTO_UDP 0x8000
76#define STATUS_TX_CSUM_OFFSET_MASK 0x7FFF
77#define STATUS_TX_CSUM_LV 0x80000000
78
79/* DMA Descriptor */
80#define DMA_DESC_LENGTH_STATUS 0x00 /* in bytes of data in buffer */
81#define DMA_DESC_ADDRESS_LO 0x04 /* lower bits of PA */
82#define DMA_DESC_ADDRESS_HI 0x08 /* upper 32 bits of PA, GENETv4+ */
83
84/* Rx/Tx common counter group */
85struct bcmgenet_pkt_counters {
86 u32 cnt_64; /* RO Received/Transmited 64 bytes packet */
87 u32 cnt_127; /* RO Rx/Tx 127 bytes packet */
88 u32 cnt_255; /* RO Rx/Tx 65-255 bytes packet */
89 u32 cnt_511; /* RO Rx/Tx 256-511 bytes packet */
90 u32 cnt_1023; /* RO Rx/Tx 512-1023 bytes packet */
91 u32 cnt_1518; /* RO Rx/Tx 1024-1518 bytes packet */
92 u32 cnt_mgv; /* RO Rx/Tx 1519-1522 good VLAN packet */
93 u32 cnt_2047; /* RO Rx/Tx 1522-2047 bytes packet*/
94 u32 cnt_4095; /* RO Rx/Tx 2048-4095 bytes packet*/
95 u32 cnt_9216; /* RO Rx/Tx 4096-9216 bytes packet*/
96};
97
98/* RSV, Receive Status Vector */
99struct bcmgenet_rx_counters {
100 struct bcmgenet_pkt_counters pkt_cnt;
101 u32 pkt; /* RO (0x428) Received pkt count*/
102 u32 bytes; /* RO Received byte count */
103 u32 mca; /* RO # of Received multicast pkt */
104 u32 bca; /* RO # of Receive broadcast pkt */
105 u32 fcs; /* RO # of Received FCS error */
106 u32 cf; /* RO # of Received control frame pkt*/
107 u32 pf; /* RO # of Received pause frame pkt */
108 u32 uo; /* RO # of unknown op code pkt */
109 u32 aln; /* RO # of alignment error count */
110 u32 flr; /* RO # of frame length out of range count */
111 u32 cde; /* RO # of code error pkt */
112 u32 fcr; /* RO # of carrier sense error pkt */
113 u32 ovr; /* RO # of oversize pkt*/
114 u32 jbr; /* RO # of jabber count */
115 u32 mtue; /* RO # of MTU error pkt*/
116 u32 pok; /* RO # of Received good pkt */
117 u32 uc; /* RO # of unicast pkt */
118 u32 ppp; /* RO # of PPP pkt */
119 u32 rcrc; /* RO (0x470),# of CRC match pkt */
120};
121
122/* TSV, Transmit Status Vector */
123struct bcmgenet_tx_counters {
124 struct bcmgenet_pkt_counters pkt_cnt;
125 u32 pkts; /* RO (0x4a8) Transmited pkt */
126 u32 mca; /* RO # of xmited multicast pkt */
127 u32 bca; /* RO # of xmited broadcast pkt */
128 u32 pf; /* RO # of xmited pause frame count */
129 u32 cf; /* RO # of xmited control frame count */
130 u32 fcs; /* RO # of xmited FCS error count */
131 u32 ovr; /* RO # of xmited oversize pkt */
132 u32 drf; /* RO # of xmited deferral pkt */
133 u32 edf; /* RO # of xmited Excessive deferral pkt*/
134 u32 scl; /* RO # of xmited single collision pkt */
135 u32 mcl; /* RO # of xmited multiple collision pkt*/
136 u32 lcl; /* RO # of xmited late collision pkt */
137 u32 ecl; /* RO # of xmited excessive collision pkt*/
138 u32 frg; /* RO # of xmited fragments pkt*/
139 u32 ncl; /* RO # of xmited total collision count */
140 u32 jbr; /* RO # of xmited jabber count*/
141 u32 bytes; /* RO # of xmited byte count */
142 u32 pok; /* RO # of xmited good pkt */
143 u32 uc; /* RO (0x0x4f0)# of xmited unitcast pkt */
144};
145
146struct bcmgenet_mib_counters {
147 struct bcmgenet_rx_counters rx;
148 struct bcmgenet_tx_counters tx;
149 u32 rx_runt_cnt;
150 u32 rx_runt_fcs;
151 u32 rx_runt_fcs_align;
152 u32 rx_runt_bytes;
153 u32 rbuf_ovflow_cnt;
154 u32 rbuf_err_cnt;
155 u32 mdf_err_cnt;
156};
157
158#define UMAC_HD_BKP_CTRL 0x004
159#define HD_FC_EN (1 << 0)
160#define HD_FC_BKOFF_OK (1 << 1)
161#define IPG_CONFIG_RX_SHIFT 2
162#define IPG_CONFIG_RX_MASK 0x1F
163
164#define UMAC_CMD 0x008
165#define CMD_TX_EN (1 << 0)
166#define CMD_RX_EN (1 << 1)
167#define UMAC_SPEED_10 0
168#define UMAC_SPEED_100 1
169#define UMAC_SPEED_1000 2
170#define UMAC_SPEED_2500 3
171#define CMD_SPEED_SHIFT 2
172#define CMD_SPEED_MASK 3
173#define CMD_PROMISC (1 << 4)
174#define CMD_PAD_EN (1 << 5)
175#define CMD_CRC_FWD (1 << 6)
176#define CMD_PAUSE_FWD (1 << 7)
177#define CMD_RX_PAUSE_IGNORE (1 << 8)
178#define CMD_TX_ADDR_INS (1 << 9)
179#define CMD_HD_EN (1 << 10)
180#define CMD_SW_RESET (1 << 13)
181#define CMD_LCL_LOOP_EN (1 << 15)
182#define CMD_AUTO_CONFIG (1 << 22)
183#define CMD_CNTL_FRM_EN (1 << 23)
184#define CMD_NO_LEN_CHK (1 << 24)
185#define CMD_RMT_LOOP_EN (1 << 25)
186#define CMD_PRBL_EN (1 << 27)
187#define CMD_TX_PAUSE_IGNORE (1 << 28)
188#define CMD_TX_RX_EN (1 << 29)
189#define CMD_RUNT_FILTER_DIS (1 << 30)
190
191#define UMAC_MAC0 0x00C
192#define UMAC_MAC1 0x010
193#define UMAC_MAX_FRAME_LEN 0x014
194
195#define UMAC_TX_FLUSH 0x334
196
197#define UMAC_MIB_START 0x400
198
199#define UMAC_MDIO_CMD 0x614
200#define MDIO_START_BUSY (1 << 29)
201#define MDIO_READ_FAIL (1 << 28)
202#define MDIO_RD (2 << 26)
203#define MDIO_WR (1 << 26)
204#define MDIO_PMD_SHIFT 21
205#define MDIO_PMD_MASK 0x1F
206#define MDIO_REG_SHIFT 16
207#define MDIO_REG_MASK 0x1F
208
209#define UMAC_RBUF_OVFL_CNT 0x61C
210
211#define UMAC_MPD_CTRL 0x620
212#define MPD_EN (1 << 0)
213#define MPD_PW_EN (1 << 27)
214#define MPD_MSEQ_LEN_SHIFT 16
215#define MPD_MSEQ_LEN_MASK 0xFF
216
217#define UMAC_MPD_PW_MS 0x624
218#define UMAC_MPD_PW_LS 0x628
219#define UMAC_RBUF_ERR_CNT 0x634
220#define UMAC_MDF_ERR_CNT 0x638
221#define UMAC_MDF_CTRL 0x650
222#define UMAC_MDF_ADDR 0x654
223#define UMAC_MIB_CTRL 0x580
224#define MIB_RESET_RX (1 << 0)
225#define MIB_RESET_RUNT (1 << 1)
226#define MIB_RESET_TX (1 << 2)
227
228#define RBUF_CTRL 0x00
229#define RBUF_64B_EN (1 << 0)
230#define RBUF_ALIGN_2B (1 << 1)
231#define RBUF_BAD_DIS (1 << 2)
232
233#define RBUF_STATUS 0x0C
234#define RBUF_STATUS_WOL (1 << 0)
235#define RBUF_STATUS_MPD_INTR_ACTIVE (1 << 1)
236#define RBUF_STATUS_ACPI_INTR_ACTIVE (1 << 2)
237
238#define RBUF_CHK_CTRL 0x14
239#define RBUF_RXCHK_EN (1 << 0)
240#define RBUF_SKIP_FCS (1 << 4)
241
242#define RBUF_TBUF_SIZE_CTRL 0xb4
243
244#define RBUF_HFB_CTRL_V1 0x38
245#define RBUF_HFB_FILTER_EN_SHIFT 16
246#define RBUF_HFB_FILTER_EN_MASK 0xffff0000
247#define RBUF_HFB_EN (1 << 0)
248#define RBUF_HFB_256B (1 << 1)
249#define RBUF_ACPI_EN (1 << 2)
250
251#define RBUF_HFB_LEN_V1 0x3C
252#define RBUF_FLTR_LEN_MASK 0xFF
253#define RBUF_FLTR_LEN_SHIFT 8
254
255#define TBUF_CTRL 0x00
256#define TBUF_BP_MC 0x0C
257
258#define TBUF_CTRL_V1 0x80
259#define TBUF_BP_MC_V1 0xA0
260
261#define HFB_CTRL 0x00
262#define HFB_FLT_ENABLE_V3PLUS 0x04
263#define HFB_FLT_LEN_V2 0x04
264#define HFB_FLT_LEN_V3PLUS 0x1C
265
266/* uniMac intrl2 registers */
267#define INTRL2_CPU_STAT 0x00
268#define INTRL2_CPU_SET 0x04
269#define INTRL2_CPU_CLEAR 0x08
270#define INTRL2_CPU_MASK_STATUS 0x0C
271#define INTRL2_CPU_MASK_SET 0x10
272#define INTRL2_CPU_MASK_CLEAR 0x14
273
274/* INTRL2 instance 0 definitions */
275#define UMAC_IRQ_SCB (1 << 0)
276#define UMAC_IRQ_EPHY (1 << 1)
277#define UMAC_IRQ_PHY_DET_R (1 << 2)
278#define UMAC_IRQ_PHY_DET_F (1 << 3)
279#define UMAC_IRQ_LINK_UP (1 << 4)
280#define UMAC_IRQ_LINK_DOWN (1 << 5)
281#define UMAC_IRQ_UMAC (1 << 6)
282#define UMAC_IRQ_UMAC_TSV (1 << 7)
283#define UMAC_IRQ_TBUF_UNDERRUN (1 << 8)
284#define UMAC_IRQ_RBUF_OVERFLOW (1 << 9)
285#define UMAC_IRQ_HFB_SM (1 << 10)
286#define UMAC_IRQ_HFB_MM (1 << 11)
287#define UMAC_IRQ_MPD_R (1 << 12)
288#define UMAC_IRQ_RXDMA_MBDONE (1 << 13)
289#define UMAC_IRQ_RXDMA_PDONE (1 << 14)
290#define UMAC_IRQ_RXDMA_BDONE (1 << 15)
291#define UMAC_IRQ_TXDMA_MBDONE (1 << 16)
292#define UMAC_IRQ_TXDMA_PDONE (1 << 17)
293#define UMAC_IRQ_TXDMA_BDONE (1 << 18)
294/* Only valid for GENETv3+ */
295#define UMAC_IRQ_MDIO_DONE (1 << 23)
296#define UMAC_IRQ_MDIO_ERROR (1 << 24)
297
298/* Register block offsets */
299#define GENET_SYS_OFF 0x0000
300#define GENET_GR_BRIDGE_OFF 0x0040
301#define GENET_EXT_OFF 0x0080
302#define GENET_INTRL2_0_OFF 0x0200
303#define GENET_INTRL2_1_OFF 0x0240
304#define GENET_RBUF_OFF 0x0300
305#define GENET_UMAC_OFF 0x0800
306
307/* SYS block offsets and register definitions */
308#define SYS_REV_CTRL 0x00
309#define SYS_PORT_CTRL 0x04
310#define PORT_MODE_INT_EPHY 0
311#define PORT_MODE_INT_GPHY 1
312#define PORT_MODE_EXT_EPHY 2
313#define PORT_MODE_EXT_GPHY 3
314#define PORT_MODE_EXT_RVMII_25 (4 | BIT(4))
315#define PORT_MODE_EXT_RVMII_50 4
316#define LED_ACT_SOURCE_MAC (1 << 9)
317
318#define SYS_RBUF_FLUSH_CTRL 0x08
319#define SYS_TBUF_FLUSH_CTRL 0x0C
320#define RBUF_FLUSH_CTRL_V1 0x04
321
322/* Ext block register offsets and definitions */
323#define EXT_EXT_PWR_MGMT 0x00
324#define EXT_PWR_DOWN_BIAS (1 << 0)
325#define EXT_PWR_DOWN_DLL (1 << 1)
326#define EXT_PWR_DOWN_PHY (1 << 2)
327#define EXT_PWR_DN_EN_LD (1 << 3)
328#define EXT_ENERGY_DET (1 << 4)
329#define EXT_IDDQ_FROM_PHY (1 << 5)
330#define EXT_PHY_RESET (1 << 8)
331#define EXT_ENERGY_DET_MASK (1 << 12)
332
333#define EXT_RGMII_OOB_CTRL 0x0C
334#define RGMII_MODE_EN (1 << 0)
335#define RGMII_LINK (1 << 4)
336#define OOB_DISABLE (1 << 5)
337#define ID_MODE_DIS (1 << 16)
338
339#define EXT_GPHY_CTRL 0x1C
340#define EXT_CFG_IDDQ_BIAS (1 << 0)
341#define EXT_CFG_PWR_DOWN (1 << 1)
342#define EXT_GPHY_RESET (1 << 5)
343
344/* DMA rings size */
345#define DMA_RING_SIZE (0x40)
346#define DMA_RINGS_SIZE (DMA_RING_SIZE * (DESC_INDEX + 1))
347
348/* DMA registers common definitions */
349#define DMA_RW_POINTER_MASK 0x1FF
350#define DMA_P_INDEX_DISCARD_CNT_MASK 0xFFFF
351#define DMA_P_INDEX_DISCARD_CNT_SHIFT 16
352#define DMA_BUFFER_DONE_CNT_MASK 0xFFFF
353#define DMA_BUFFER_DONE_CNT_SHIFT 16
354#define DMA_P_INDEX_MASK 0xFFFF
355#define DMA_C_INDEX_MASK 0xFFFF
356
357/* DMA ring size register */
358#define DMA_RING_SIZE_MASK 0xFFFF
359#define DMA_RING_SIZE_SHIFT 16
360#define DMA_RING_BUFFER_SIZE_MASK 0xFFFF
361
362/* DMA interrupt threshold register */
363#define DMA_INTR_THRESHOLD_MASK 0x00FF
364
365/* DMA XON/XOFF register */
366#define DMA_XON_THREHOLD_MASK 0xFFFF
367#define DMA_XOFF_THRESHOLD_MASK 0xFFFF
368#define DMA_XOFF_THRESHOLD_SHIFT 16
369
370/* DMA flow period register */
371#define DMA_FLOW_PERIOD_MASK 0xFFFF
372#define DMA_MAX_PKT_SIZE_MASK 0xFFFF
373#define DMA_MAX_PKT_SIZE_SHIFT 16
374
375
376/* DMA control register */
377#define DMA_EN (1 << 0)
378#define DMA_RING_BUF_EN_SHIFT 0x01
379#define DMA_RING_BUF_EN_MASK 0xFFFF
380#define DMA_TSB_SWAP_EN (1 << 20)
381
382/* DMA status register */
383#define DMA_DISABLED (1 << 0)
384#define DMA_DESC_RAM_INIT_BUSY (1 << 1)
385
386/* DMA SCB burst size register */
387#define DMA_SCB_BURST_SIZE_MASK 0x1F
388
389/* DMA activity vector register */
390#define DMA_ACTIVITY_VECTOR_MASK 0x1FFFF
391
392/* DMA backpressure mask register */
393#define DMA_BACKPRESSURE_MASK 0x1FFFF
394#define DMA_PFC_ENABLE (1 << 31)
395
396/* DMA backpressure status register */
397#define DMA_BACKPRESSURE_STATUS_MASK 0x1FFFF
398
399/* DMA override register */
400#define DMA_LITTLE_ENDIAN_MODE (1 << 0)
401#define DMA_REGISTER_MODE (1 << 1)
402
403/* DMA timeout register */
404#define DMA_TIMEOUT_MASK 0xFFFF
405#define DMA_TIMEOUT_VAL 5000 /* micro seconds */
406
407/* TDMA rate limiting control register */
408#define DMA_RATE_LIMIT_EN_MASK 0xFFFF
409
410/* TDMA arbitration control register */
411#define DMA_ARBITER_MODE_MASK 0x03
412#define DMA_RING_BUF_PRIORITY_MASK 0x1F
413#define DMA_RING_BUF_PRIORITY_SHIFT 5
414#define DMA_RATE_ADJ_MASK 0xFF
415
416/* Tx/Rx Dma Descriptor common bits*/
417#define DMA_BUFLENGTH_MASK 0x0fff
418#define DMA_BUFLENGTH_SHIFT 16
419#define DMA_OWN 0x8000
420#define DMA_EOP 0x4000
421#define DMA_SOP 0x2000
422#define DMA_WRAP 0x1000
423/* Tx specific Dma descriptor bits */
424#define DMA_TX_UNDERRUN 0x0200
425#define DMA_TX_APPEND_CRC 0x0040
426#define DMA_TX_OW_CRC 0x0020
427#define DMA_TX_DO_CSUM 0x0010
428#define DMA_TX_QTAG_SHIFT 7
429
430/* Rx Specific Dma descriptor bits */
431#define DMA_RX_CHK_V3PLUS 0x8000
432#define DMA_RX_CHK_V12 0x1000
433#define DMA_RX_BRDCAST 0x0040
434#define DMA_RX_MULT 0x0020
435#define DMA_RX_LG 0x0010
436#define DMA_RX_NO 0x0008
437#define DMA_RX_RXER 0x0004
438#define DMA_RX_CRC_ERROR 0x0002
439#define DMA_RX_OV 0x0001
440#define DMA_RX_FI_MASK 0x001F
441#define DMA_RX_FI_SHIFT 0x0007
442#define DMA_DESC_ALLOC_MASK 0x00FF
443
444#define DMA_ARBITER_RR 0x00
445#define DMA_ARBITER_WRR 0x01
446#define DMA_ARBITER_SP 0x02
447
448struct enet_cb {
449 struct sk_buff *skb;
450 void __iomem *bd_addr;
451 DEFINE_DMA_UNMAP_ADDR(dma_addr);
452 DEFINE_DMA_UNMAP_LEN(dma_len);
453};
454
455/* power management mode */
456enum bcmgenet_power_mode {
457 GENET_POWER_CABLE_SENSE = 0,
458 GENET_POWER_PASSIVE,
459};
460
461struct bcmgenet_priv;
462
463/* We support both runtime GENET detection and compile-time
464 * to optimize code-paths for a given hardware
465 */
466enum bcmgenet_version {
467 GENET_V1 = 1,
468 GENET_V2,
469 GENET_V3,
470 GENET_V4
471};
472
473#define GENET_IS_V1(p) ((p)->version == GENET_V1)
474#define GENET_IS_V2(p) ((p)->version == GENET_V2)
475#define GENET_IS_V3(p) ((p)->version == GENET_V3)
476#define GENET_IS_V4(p) ((p)->version == GENET_V4)
477
478/* Hardware flags */
479#define GENET_HAS_40BITS (1 << 0)
480#define GENET_HAS_EXT (1 << 1)
481#define GENET_HAS_MDIO_INTR (1 << 2)
482
483/* BCMGENET hardware parameters, keep this structure nicely aligned
484 * since it is going to be used in hot paths
485 */
486struct bcmgenet_hw_params {
487 u8 tx_queues;
488 u8 rx_queues;
489 u8 bds_cnt;
490 u8 bp_in_en_shift;
491 u32 bp_in_mask;
492 u8 hfb_filter_cnt;
493 u8 qtag_mask;
494 u16 tbuf_offset;
495 u32 hfb_offset;
496 u32 hfb_reg_offset;
497 u32 rdma_offset;
498 u32 tdma_offset;
499 u32 words_per_bd;
500 u32 flags;
501};
502
503struct bcmgenet_tx_ring {
504 spinlock_t lock; /* ring lock */
505 unsigned int index; /* ring index */
506 unsigned int queue; /* queue index */
507 struct enet_cb *cbs; /* tx ring buffer control block*/
508 unsigned int size; /* size of each tx ring */
509 unsigned int c_index; /* last consumer index of each ring*/
510 unsigned int free_bds; /* # of free bds for each ring */
511 unsigned int write_ptr; /* Tx ring write pointer SW copy */
512 unsigned int prod_index; /* Tx ring producer index SW copy */
513 unsigned int cb_ptr; /* Tx ring initial CB ptr */
514 unsigned int end_ptr; /* Tx ring end CB ptr */
515 void (*int_enable)(struct bcmgenet_priv *priv,
516 struct bcmgenet_tx_ring *);
517 void (*int_disable)(struct bcmgenet_priv *priv,
518 struct bcmgenet_tx_ring *);
519};
520
521/* device context */
522struct bcmgenet_priv {
523 void __iomem *base;
524 enum bcmgenet_version version;
525 struct net_device *dev;
526 u32 int0_mask;
527 u32 int1_mask;
528
529 /* NAPI for descriptor based rx */
530 struct napi_struct napi ____cacheline_aligned;
531
532 /* transmit variables */
533 void __iomem *tx_bds;
534 struct enet_cb *tx_cbs;
535 unsigned int num_tx_bds;
536
537 struct bcmgenet_tx_ring tx_rings[DESC_INDEX + 1];
538
539 /* receive variables */
540 void __iomem *rx_bds;
541 void __iomem *rx_bd_assign_ptr;
542 int rx_bd_assign_index;
543 struct enet_cb *rx_cbs;
544 unsigned int num_rx_bds;
545 unsigned int rx_buf_len;
546 unsigned int rx_read_ptr;
547 unsigned int rx_c_index;
548
549 /* other misc variables */
550 struct bcmgenet_hw_params *hw_params;
551
552 /* MDIO bus variables */
553 wait_queue_head_t wq;
554 struct phy_device *phydev;
555 struct device_node *phy_dn;
556 struct mii_bus *mii_bus;
557
558 /* PHY device variables */
559 int old_duplex;
560 int old_link;
561 int old_pause;
562 phy_interface_t phy_interface;
563 int phy_addr;
564 int ext_phy;
565
566 /* Interrupt variables */
567 struct work_struct bcmgenet_irq_work;
568 int irq0;
569 int irq1;
570 unsigned int irq0_stat;
571 unsigned int irq1_stat;
572
573 /* HW descriptors/checksum variables */
574 bool desc_64b_en;
575 bool desc_rxchk_en;
576 bool crc_fwd_en;
577
578 unsigned int dma_rx_chk_bit;
579
580 u32 msg_enable;
581
582 struct clk *clk;
583 struct platform_device *pdev;
584
585 /* WOL */
586 unsigned long wol_enabled;
587 struct clk *clk_wol;
588 u32 wolopts;
589
590 struct bcmgenet_mib_counters mib;
591};
592
593#define GENET_IO_MACRO(name, offset) \
594static inline u32 bcmgenet_##name##_readl(struct bcmgenet_priv *priv, \
595 u32 off) \
596{ \
597 return __raw_readl(priv->base + offset + off); \
598} \
599static inline void bcmgenet_##name##_writel(struct bcmgenet_priv *priv, \
600 u32 val, u32 off) \
601{ \
602 __raw_writel(val, priv->base + offset + off); \
603}
604
605GENET_IO_MACRO(ext, GENET_EXT_OFF);
606GENET_IO_MACRO(umac, GENET_UMAC_OFF);
607GENET_IO_MACRO(sys, GENET_SYS_OFF);
608
609/* interrupt l2 registers accessors */
610GENET_IO_MACRO(intrl2_0, GENET_INTRL2_0_OFF);
611GENET_IO_MACRO(intrl2_1, GENET_INTRL2_1_OFF);
612
613/* HFB register accessors */
614GENET_IO_MACRO(hfb, priv->hw_params->hfb_offset);
615
616/* GENET v2+ HFB control and filter len helpers */
617GENET_IO_MACRO(hfb_reg, priv->hw_params->hfb_reg_offset);
618
619/* RBUF register accessors */
620GENET_IO_MACRO(rbuf, GENET_RBUF_OFF);
621
622/* MDIO routines */
623int bcmgenet_mii_init(struct net_device *dev);
624int bcmgenet_mii_config(struct net_device *dev);
625void bcmgenet_mii_exit(struct net_device *dev);
626void bcmgenet_mii_reset(struct net_device *dev);
627
628#endif /* __BCMGENET_H__ */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
new file mode 100644
index 000000000000..4608673beaff
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -0,0 +1,464 @@
1/*
2 * Broadcom GENET MDIO routines
3 *
4 * Copyright (c) 2014 Broadcom Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20
21#include <linux/types.h>
22#include <linux/delay.h>
23#include <linux/wait.h>
24#include <linux/mii.h>
25#include <linux/ethtool.h>
26#include <linux/bitops.h>
27#include <linux/netdevice.h>
28#include <linux/platform_device.h>
29#include <linux/phy.h>
30#include <linux/phy_fixed.h>
31#include <linux/brcmphy.h>
32#include <linux/of.h>
33#include <linux/of_net.h>
34#include <linux/of_mdio.h>
35
36#include "bcmgenet.h"
37
38/* read a value from the MII */
39static int bcmgenet_mii_read(struct mii_bus *bus, int phy_id, int location)
40{
41 int ret;
42 struct net_device *dev = bus->priv;
43 struct bcmgenet_priv *priv = netdev_priv(dev);
44 u32 reg;
45
46 bcmgenet_umac_writel(priv, (MDIO_RD | (phy_id << MDIO_PMD_SHIFT) |
47 (location << MDIO_REG_SHIFT)), UMAC_MDIO_CMD);
48 /* Start MDIO transaction*/
49 reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
50 reg |= MDIO_START_BUSY;
51 bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD);
52 wait_event_timeout(priv->wq,
53 !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD)
54 & MDIO_START_BUSY),
55 HZ / 100);
56 ret = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
57
58 if (ret & MDIO_READ_FAIL)
59 return -EIO;
60
61 return ret & 0xffff;
62}
63
64/* write a value to the MII */
65static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id,
66 int location, u16 val)
67{
68 struct net_device *dev = bus->priv;
69 struct bcmgenet_priv *priv = netdev_priv(dev);
70 u32 reg;
71
72 bcmgenet_umac_writel(priv, (MDIO_WR | (phy_id << MDIO_PMD_SHIFT) |
73 (location << MDIO_REG_SHIFT) | (0xffff & val)),
74 UMAC_MDIO_CMD);
75 reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
76 reg |= MDIO_START_BUSY;
77 bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD);
78 wait_event_timeout(priv->wq,
79 !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) &
80 MDIO_START_BUSY),
81 HZ / 100);
82
83 return 0;
84}
85
86/* setup netdev link state when PHY link status change and
87 * update UMAC and RGMII block when link up
88 */
89static void bcmgenet_mii_setup(struct net_device *dev)
90{
91 struct bcmgenet_priv *priv = netdev_priv(dev);
92 struct phy_device *phydev = priv->phydev;
93 u32 reg, cmd_bits = 0;
94 unsigned int status_changed = 0;
95
96 if (priv->old_link != phydev->link) {
97 status_changed = 1;
98 priv->old_link = phydev->link;
99 }
100
101 if (phydev->link) {
102 /* program UMAC and RGMII block based on established link
103 * speed, pause, and duplex.
104 * the speed set in umac->cmd tell RGMII block which clock
105 * 25MHz(100Mbps)/125MHz(1Gbps) to use for transmit.
106 * receive clock is provided by PHY.
107 */
108 reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
109 reg &= ~OOB_DISABLE;
110 reg |= RGMII_LINK;
111 bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
112
113 /* speed */
114 if (phydev->speed == SPEED_1000)
115 cmd_bits = UMAC_SPEED_1000;
116 else if (phydev->speed == SPEED_100)
117 cmd_bits = UMAC_SPEED_100;
118 else
119 cmd_bits = UMAC_SPEED_10;
120 cmd_bits <<= CMD_SPEED_SHIFT;
121
122 if (priv->old_duplex != phydev->duplex) {
123 status_changed = 1;
124 priv->old_duplex = phydev->duplex;
125 }
126
127 /* duplex */
128 if (phydev->duplex != DUPLEX_FULL)
129 cmd_bits |= CMD_HD_EN;
130
131 if (priv->old_pause != phydev->pause) {
132 status_changed = 1;
133 priv->old_pause = phydev->pause;
134 }
135
136 /* pause capability */
137 if (!phydev->pause)
138 cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
139
140 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
141 reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
142 CMD_HD_EN |
143 CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE);
144 reg |= cmd_bits;
145 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
146 }
147
148 if (status_changed)
149 phy_print_status(phydev);
150}
151
152void bcmgenet_mii_reset(struct net_device *dev)
153{
154 struct bcmgenet_priv *priv = netdev_priv(dev);
155
156 if (priv->phydev) {
157 phy_init_hw(priv->phydev);
158 phy_start_aneg(priv->phydev);
159 }
160}
161
162static void bcmgenet_ephy_power_up(struct net_device *dev)
163{
164 struct bcmgenet_priv *priv = netdev_priv(dev);
165 u32 reg = 0;
166
167 /* EXT_GPHY_CTRL is only valid for GENETv4 and onward */
168 if (!GENET_IS_V4(priv))
169 return;
170
171 reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL);
172 reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN);
173 reg |= EXT_GPHY_RESET;
174 bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
175 mdelay(2);
176
177 reg &= ~EXT_GPHY_RESET;
178 bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
179 udelay(20);
180}
181
182static void bcmgenet_internal_phy_setup(struct net_device *dev)
183{
184 struct bcmgenet_priv *priv = netdev_priv(dev);
185 u32 reg;
186
187 /* Power up EPHY */
188 bcmgenet_ephy_power_up(dev);
189 /* enable APD */
190 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
191 reg |= EXT_PWR_DN_EN_LD;
192 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
193 bcmgenet_mii_reset(dev);
194}
195
196static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
197{
198 u32 reg;
199
200 /* Speed settings are set in bcmgenet_mii_setup() */
201 reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL);
202 reg |= LED_ACT_SOURCE_MAC;
203 bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
204}
205
206int bcmgenet_mii_config(struct net_device *dev)
207{
208 struct bcmgenet_priv *priv = netdev_priv(dev);
209 struct phy_device *phydev = priv->phydev;
210 struct device *kdev = &priv->pdev->dev;
211 const char *phy_name = NULL;
212 u32 id_mode_dis = 0;
213 u32 port_ctrl;
214 u32 reg;
215
216 priv->ext_phy = !phy_is_internal(priv->phydev) &&
217 (priv->phy_interface != PHY_INTERFACE_MODE_MOCA);
218
219 if (phy_is_internal(priv->phydev))
220 priv->phy_interface = PHY_INTERFACE_MODE_NA;
221
222 switch (priv->phy_interface) {
223 case PHY_INTERFACE_MODE_NA:
224 case PHY_INTERFACE_MODE_MOCA:
225 /* Irrespective of the actually configured PHY speed (100 or
226 * 1000) GENETv4 only has an internal GPHY so we will just end
227 * up masking the Gigabit features from what we support, not
228 * switching to the EPHY
229 */
230 if (GENET_IS_V4(priv))
231 port_ctrl = PORT_MODE_INT_GPHY;
232 else
233 port_ctrl = PORT_MODE_INT_EPHY;
234
235 bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL);
236
237 if (phy_is_internal(priv->phydev)) {
238 phy_name = "internal PHY";
239 bcmgenet_internal_phy_setup(dev);
240 } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
241 phy_name = "MoCA";
242 bcmgenet_moca_phy_setup(priv);
243 }
244 break;
245
246 case PHY_INTERFACE_MODE_MII:
247 phy_name = "external MII";
248 phydev->supported &= PHY_BASIC_FEATURES;
249 bcmgenet_sys_writel(priv,
250 PORT_MODE_EXT_EPHY, SYS_PORT_CTRL);
251 break;
252
253 case PHY_INTERFACE_MODE_REVMII:
254 phy_name = "external RvMII";
255 /* of_mdiobus_register took care of reading the 'max-speed'
256 * PHY property for us, effectively limiting the PHY supported
257 * capabilities, use that knowledge to also configure the
258 * Reverse MII interface correctly.
259 */
260 if ((priv->phydev->supported & PHY_BASIC_FEATURES) ==
261 PHY_BASIC_FEATURES)
262 port_ctrl = PORT_MODE_EXT_RVMII_25;
263 else
264 port_ctrl = PORT_MODE_EXT_RVMII_50;
265 bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL);
266 break;
267
268 case PHY_INTERFACE_MODE_RGMII:
269 /* RGMII_NO_ID: TXC transitions at the same time as TXD
270 * (requires PCB or receiver-side delay)
271 * RGMII: Add 2ns delay on TXC (90 degree shift)
272 *
273 * ID is implicitly disabled for 100Mbps (RG)MII operation.
274 */
275 id_mode_dis = BIT(16);
276 /* fall through */
277 case PHY_INTERFACE_MODE_RGMII_TXID:
278 if (id_mode_dis)
279 phy_name = "external RGMII (no delay)";
280 else
281 phy_name = "external RGMII (TX delay)";
282 reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
283 reg |= RGMII_MODE_EN | id_mode_dis;
284 bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
285 bcmgenet_sys_writel(priv,
286 PORT_MODE_EXT_GPHY, SYS_PORT_CTRL);
287 break;
288 default:
289 dev_err(kdev, "unknown phy mode: %d\n", priv->phy_interface);
290 return -EINVAL;
291 }
292
293 dev_info(kdev, "configuring instance for %s\n", phy_name);
294
295 return 0;
296}
297
298static int bcmgenet_mii_probe(struct net_device *dev)
299{
300 struct bcmgenet_priv *priv = netdev_priv(dev);
301 struct phy_device *phydev;
302 unsigned int phy_flags;
303 int ret;
304
305 if (priv->phydev) {
306 pr_info("PHY already attached\n");
307 return 0;
308 }
309
310 if (priv->phy_dn)
311 phydev = of_phy_connect(dev, priv->phy_dn,
312 bcmgenet_mii_setup, 0,
313 priv->phy_interface);
314 else
315 phydev = of_phy_connect_fixed_link(dev,
316 bcmgenet_mii_setup,
317 priv->phy_interface);
318
319 if (!phydev) {
320 pr_err("could not attach to PHY\n");
321 return -ENODEV;
322 }
323
324 priv->old_link = -1;
325 priv->old_duplex = -1;
326 priv->old_pause = -1;
327 priv->phydev = phydev;
328
329 /* Configure port multiplexer based on what the probed PHY device since
330 * reading the 'max-speed' property determines the maximum supported
331 * PHY speed which is needed for bcmgenet_mii_config() to configure
332 * things appropriately.
333 */
334 ret = bcmgenet_mii_config(dev);
335 if (ret) {
336 phy_disconnect(priv->phydev);
337 return ret;
338 }
339
340 phy_flags = PHY_BRCM_100MBPS_WAR;
341
342 /* workarounds are only needed for 100Mpbs PHYs, and
343 * never on GENET V1 hardware
344 */
345 if ((phydev->supported & PHY_GBIT_FEATURES) || GENET_IS_V1(priv))
346 phy_flags = 0;
347
348 phydev->dev_flags |= phy_flags;
349 phydev->advertising = phydev->supported;
350
351 /* The internal PHY has its link interrupts routed to the
352 * Ethernet MAC ISRs
353 */
354 if (phy_is_internal(priv->phydev))
355 priv->mii_bus->irq[phydev->addr] = PHY_IGNORE_INTERRUPT;
356 else
357 priv->mii_bus->irq[phydev->addr] = PHY_POLL;
358
359 pr_info("attached PHY at address %d [%s]\n",
360 phydev->addr, phydev->drv->name);
361
362 return 0;
363}
364
365static int bcmgenet_mii_alloc(struct bcmgenet_priv *priv)
366{
367 struct mii_bus *bus;
368
369 if (priv->mii_bus)
370 return 0;
371
372 priv->mii_bus = mdiobus_alloc();
373 if (!priv->mii_bus) {
374 pr_err("failed to allocate\n");
375 return -ENOMEM;
376 }
377
378 bus = priv->mii_bus;
379 bus->priv = priv->dev;
380 bus->name = "bcmgenet MII bus";
381 bus->parent = &priv->pdev->dev;
382 bus->read = bcmgenet_mii_read;
383 bus->write = bcmgenet_mii_write;
384 snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d",
385 priv->pdev->name, priv->pdev->id);
386
387 bus->irq = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
388 if (!bus->irq) {
389 mdiobus_free(priv->mii_bus);
390 return -ENOMEM;
391 }
392
393 return 0;
394}
395
396static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
397{
398 struct device_node *dn = priv->pdev->dev.of_node;
399 struct device *kdev = &priv->pdev->dev;
400 struct device_node *mdio_dn;
401 char *compat;
402 int ret;
403
404 compat = kasprintf(GFP_KERNEL, "brcm,genet-mdio-v%d", priv->version);
405 if (!compat)
406 return -ENOMEM;
407
408 mdio_dn = of_find_compatible_node(dn, NULL, compat);
409 kfree(compat);
410 if (!mdio_dn) {
411 dev_err(kdev, "unable to find MDIO bus node\n");
412 return -ENODEV;
413 }
414
415 ret = of_mdiobus_register(priv->mii_bus, mdio_dn);
416 if (ret) {
417 dev_err(kdev, "failed to register MDIO bus\n");
418 return ret;
419 }
420
421 /* Fetch the PHY phandle */
422 priv->phy_dn = of_parse_phandle(dn, "phy-handle", 0);
423
424 /* Get the link mode */
425 priv->phy_interface = of_get_phy_mode(dn);
426
427 return 0;
428}
429
430int bcmgenet_mii_init(struct net_device *dev)
431{
432 struct bcmgenet_priv *priv = netdev_priv(dev);
433 int ret;
434
435 ret = bcmgenet_mii_alloc(priv);
436 if (ret)
437 return ret;
438
439 ret = bcmgenet_mii_of_init(priv);
440 if (ret)
441 goto out_free;
442
443 ret = bcmgenet_mii_probe(dev);
444 if (ret)
445 goto out;
446
447 return 0;
448
449out:
450 mdiobus_unregister(priv->mii_bus);
451out_free:
452 kfree(priv->mii_bus->irq);
453 mdiobus_free(priv->mii_bus);
454 return ret;
455}
456
457void bcmgenet_mii_exit(struct net_device *dev)
458{
459 struct bcmgenet_priv *priv = netdev_priv(dev);
460
461 mdiobus_unregister(priv->mii_bus);
462 kfree(priv->mii_bus->irq);
463 mdiobus_free(priv->mii_bus);
464}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 3b6d0ba86c71..37422af9ef13 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -6322,6 +6322,7 @@ static const struct ptp_clock_info tg3_ptp_caps = {
6322 .n_alarm = 0, 6322 .n_alarm = 0,
6323 .n_ext_ts = 0, 6323 .n_ext_ts = 0,
6324 .n_per_out = 1, 6324 .n_per_out = 1,
6325 .n_pins = 0,
6325 .pps = 0, 6326 .pps = 0,
6326 .adjfreq = tg3_ptp_adjfreq, 6327 .adjfreq = tg3_ptp_adjfreq,
6327 .adjtime = tg3_ptp_adjtime, 6328 .adjtime = tg3_ptp_adjtime,
@@ -6593,7 +6594,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
6593 pkts_compl++; 6594 pkts_compl++;
6594 bytes_compl += skb->len; 6595 bytes_compl += skb->len;
6595 6596
6596 dev_kfree_skb(skb); 6597 dev_kfree_skb_any(skb);
6597 6598
6598 if (unlikely(tx_bug)) { 6599 if (unlikely(tx_bug)) {
6599 tg3_tx_recover(tp); 6600 tg3_tx_recover(tp);
@@ -6924,7 +6925,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
6924 6925
6925 if (len > (tp->dev->mtu + ETH_HLEN) && 6926 if (len > (tp->dev->mtu + ETH_HLEN) &&
6926 skb->protocol != htons(ETH_P_8021Q)) { 6927 skb->protocol != htons(ETH_P_8021Q)) {
6927 dev_kfree_skb(skb); 6928 dev_kfree_skb_any(skb);
6928 goto drop_it_no_recycle; 6929 goto drop_it_no_recycle;
6929 } 6930 }
6930 6931
@@ -7807,7 +7808,7 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7807 PCI_DMA_TODEVICE); 7808 PCI_DMA_TODEVICE);
7808 /* Make sure the mapping succeeded */ 7809 /* Make sure the mapping succeeded */
7809 if (pci_dma_mapping_error(tp->pdev, new_addr)) { 7810 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7810 dev_kfree_skb(new_skb); 7811 dev_kfree_skb_any(new_skb);
7811 ret = -1; 7812 ret = -1;
7812 } else { 7813 } else {
7813 u32 save_entry = *entry; 7814 u32 save_entry = *entry;
@@ -7822,13 +7823,13 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7822 new_skb->len, base_flags, 7823 new_skb->len, base_flags,
7823 mss, vlan)) { 7824 mss, vlan)) {
7824 tg3_tx_skb_unmap(tnapi, save_entry, -1); 7825 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7825 dev_kfree_skb(new_skb); 7826 dev_kfree_skb_any(new_skb);
7826 ret = -1; 7827 ret = -1;
7827 } 7828 }
7828 } 7829 }
7829 } 7830 }
7830 7831
7831 dev_kfree_skb(skb); 7832 dev_kfree_skb_any(skb);
7832 *pskb = new_skb; 7833 *pskb = new_skb;
7833 return ret; 7834 return ret;
7834} 7835}
@@ -7871,7 +7872,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7871 } while (segs); 7872 } while (segs);
7872 7873
7873tg3_tso_bug_end: 7874tg3_tso_bug_end:
7874 dev_kfree_skb(skb); 7875 dev_kfree_skb_any(skb);
7875 7876
7876 return NETDEV_TX_OK; 7877 return NETDEV_TX_OK;
7877} 7878}
@@ -8093,7 +8094,7 @@ dma_error:
8093 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i); 8094 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8094 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL; 8095 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8095drop: 8096drop:
8096 dev_kfree_skb(skb); 8097 dev_kfree_skb_any(skb);
8097drop_nofree: 8098drop_nofree:
8098 tp->tx_dropped++; 8099 tp->tx_dropped++;
8099 return NETDEV_TX_OK; 8100 return NETDEV_TX_OK;
@@ -11361,12 +11362,10 @@ static bool tg3_enable_msix(struct tg3 *tp)
11361 msix_ent[i].vector = 0; 11362 msix_ent[i].vector = 0;
11362 } 11363 }
11363 11364
11364 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt); 11365 rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11365 if (rc < 0) { 11366 if (rc < 0) {
11366 return false; 11367 return false;
11367 } else if (rc != 0) { 11368 } else if (rc < tp->irq_cnt) {
11368 if (pci_enable_msix(tp->pdev, msix_ent, rc))
11369 return false;
11370 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n", 11369 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11371 tp->irq_cnt, rc); 11370 tp->irq_cnt, rc);
11372 tp->irq_cnt = rc; 11371 tp->irq_cnt = rc;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 4ad1187e82fb..a881e982a084 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -2669,9 +2669,11 @@ bnad_enable_msix(struct bnad *bnad)
2669 for (i = 0; i < bnad->msix_num; i++) 2669 for (i = 0; i < bnad->msix_num; i++)
2670 bnad->msix_table[i].entry = i; 2670 bnad->msix_table[i].entry = i;
2671 2671
2672 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num); 2672 ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table,
2673 if (ret > 0) { 2673 1, bnad->msix_num);
2674 /* Not enough MSI-X vectors. */ 2674 if (ret < 0) {
2675 goto intx_mode;
2676 } else if (ret < bnad->msix_num) {
2675 pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n", 2677 pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
2676 ret, bnad->msix_num); 2678 ret, bnad->msix_num);
2677 2679
@@ -2684,18 +2686,11 @@ bnad_enable_msix(struct bnad *bnad)
2684 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP + 2686 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
2685 BNAD_MAILBOX_MSIX_VECTORS; 2687 BNAD_MAILBOX_MSIX_VECTORS;
2686 2688
2687 if (bnad->msix_num > ret) 2689 if (bnad->msix_num > ret) {
2688 goto intx_mode; 2690 pci_disable_msix(bnad->pcidev);
2689
2690 /* Try once more with adjusted numbers */
2691 /* If this fails, fall back to INTx */
2692 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
2693 bnad->msix_num);
2694 if (ret)
2695 goto intx_mode; 2691 goto intx_mode;
2696 2692 }
2697 } else if (ret < 0) 2693 }
2698 goto intx_mode;
2699 2694
2700 pci_intx(bnad->pcidev, 0); 2695 pci_intx(bnad->pcidev, 0);
2701 2696
@@ -2850,13 +2845,11 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2850 } 2845 }
2851 if (unlikely((gso_size + skb_transport_offset(skb) + 2846 if (unlikely((gso_size + skb_transport_offset(skb) +
2852 tcp_hdrlen(skb)) >= skb->len)) { 2847 tcp_hdrlen(skb)) >= skb->len)) {
2853 txqent->hdr.wi.opcode = 2848 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
2854 __constant_htons(BNA_TXQ_WI_SEND);
2855 txqent->hdr.wi.lso_mss = 0; 2849 txqent->hdr.wi.lso_mss = 0;
2856 BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short); 2850 BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2857 } else { 2851 } else {
2858 txqent->hdr.wi.opcode = 2852 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND_LSO);
2859 __constant_htons(BNA_TXQ_WI_SEND_LSO);
2860 txqent->hdr.wi.lso_mss = htons(gso_size); 2853 txqent->hdr.wi.lso_mss = htons(gso_size);
2861 } 2854 }
2862 2855
@@ -2870,7 +2863,7 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2870 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET( 2863 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
2871 tcp_hdrlen(skb) >> 2, skb_transport_offset(skb))); 2864 tcp_hdrlen(skb) >> 2, skb_transport_offset(skb)));
2872 } else { 2865 } else {
2873 txqent->hdr.wi.opcode = __constant_htons(BNA_TXQ_WI_SEND); 2866 txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
2874 txqent->hdr.wi.lso_mss = 0; 2867 txqent->hdr.wi.lso_mss = 0;
2875 2868
2876 if (unlikely(skb->len > (bnad->netdev->mtu + ETH_HLEN))) { 2869 if (unlikely(skb->len > (bnad->netdev->mtu + ETH_HLEN))) {
@@ -2881,11 +2874,10 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2881 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2874 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2882 u8 proto = 0; 2875 u8 proto = 0;
2883 2876
2884 if (skb->protocol == __constant_htons(ETH_P_IP)) 2877 if (skb->protocol == htons(ETH_P_IP))
2885 proto = ip_hdr(skb)->protocol; 2878 proto = ip_hdr(skb)->protocol;
2886#ifdef NETIF_F_IPV6_CSUM 2879#ifdef NETIF_F_IPV6_CSUM
2887 else if (skb->protocol == 2880 else if (skb->protocol == htons(ETH_P_IPV6)) {
2888 __constant_htons(ETH_P_IPV6)) {
2889 /* nexthdr may not be TCP immediately. */ 2881 /* nexthdr may not be TCP immediately. */
2890 proto = ipv6_hdr(skb)->nexthdr; 2882 proto = ipv6_hdr(skb)->nexthdr;
2891 } 2883 }
@@ -2954,17 +2946,17 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2954 /* Sanity checks for the skb */ 2946 /* Sanity checks for the skb */
2955 2947
2956 if (unlikely(skb->len <= ETH_HLEN)) { 2948 if (unlikely(skb->len <= ETH_HLEN)) {
2957 dev_kfree_skb(skb); 2949 dev_kfree_skb_any(skb);
2958 BNAD_UPDATE_CTR(bnad, tx_skb_too_short); 2950 BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2959 return NETDEV_TX_OK; 2951 return NETDEV_TX_OK;
2960 } 2952 }
2961 if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) { 2953 if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
2962 dev_kfree_skb(skb); 2954 dev_kfree_skb_any(skb);
2963 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero); 2955 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2964 return NETDEV_TX_OK; 2956 return NETDEV_TX_OK;
2965 } 2957 }
2966 if (unlikely(len == 0)) { 2958 if (unlikely(len == 0)) {
2967 dev_kfree_skb(skb); 2959 dev_kfree_skb_any(skb);
2968 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero); 2960 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2969 return NETDEV_TX_OK; 2961 return NETDEV_TX_OK;
2970 } 2962 }
@@ -2976,7 +2968,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2976 * and the netif_tx_stop_all_queues() call. 2968 * and the netif_tx_stop_all_queues() call.
2977 */ 2969 */
2978 if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) { 2970 if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2979 dev_kfree_skb(skb); 2971 dev_kfree_skb_any(skb);
2980 BNAD_UPDATE_CTR(bnad, tx_skb_stopping); 2972 BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2981 return NETDEV_TX_OK; 2973 return NETDEV_TX_OK;
2982 } 2974 }
@@ -2989,7 +2981,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2989 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */ 2981 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
2990 2982
2991 if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) { 2983 if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
2992 dev_kfree_skb(skb); 2984 dev_kfree_skb_any(skb);
2993 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors); 2985 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2994 return NETDEV_TX_OK; 2986 return NETDEV_TX_OK;
2995 } 2987 }
@@ -3029,7 +3021,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
3029 3021
3030 /* Program the opcode, flags, frame_len, num_vectors in WI */ 3022 /* Program the opcode, flags, frame_len, num_vectors in WI */
3031 if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) { 3023 if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
3032 dev_kfree_skb(skb); 3024 dev_kfree_skb_any(skb);
3033 return NETDEV_TX_OK; 3025 return NETDEV_TX_OK;
3034 } 3026 }
3035 txqent->hdr.wi.reserved = 0; 3027 txqent->hdr.wi.reserved = 0;
@@ -3055,7 +3047,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
3055 /* Undo the changes starting at tcb->producer_index */ 3047 /* Undo the changes starting at tcb->producer_index */
3056 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, 3048 bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3057 tcb->producer_index); 3049 tcb->producer_index);
3058 dev_kfree_skb(skb); 3050 dev_kfree_skb_any(skb);
3059 BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero); 3051 BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
3060 return NETDEV_TX_OK; 3052 return NETDEV_TX_OK;
3061 } 3053 }
@@ -3067,8 +3059,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
3067 vect_id = 0; 3059 vect_id = 0;
3068 BNA_QE_INDX_INC(prod, q_depth); 3060 BNA_QE_INDX_INC(prod, q_depth);
3069 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod]; 3061 txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3070 txqent->hdr.wi_ext.opcode = 3062 txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
3071 __constant_htons(BNA_TXQ_WI_EXTENSION);
3072 unmap = &unmap_q[prod]; 3063 unmap = &unmap_q[prod];
3073 } 3064 }
3074 3065
@@ -3085,7 +3076,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
3085 if (unlikely(len != skb->len)) { 3076 if (unlikely(len != skb->len)) {
3086 /* Undo the changes starting at tcb->producer_index */ 3077 /* Undo the changes starting at tcb->producer_index */
3087 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index); 3078 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
3088 dev_kfree_skb(skb); 3079 dev_kfree_skb_any(skb);
3089 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch); 3080 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
3090 return NETDEV_TX_OK; 3081 return NETDEV_TX_OK;
3091 } 3082 }
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index d0c38e01e99f..6116887d2880 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -1045,7 +1045,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1045 mapping = dma_map_single(&bp->pdev->dev, skb->data, 1045 mapping = dma_map_single(&bp->pdev->dev, skb->data,
1046 len, DMA_TO_DEVICE); 1046 len, DMA_TO_DEVICE);
1047 if (dma_mapping_error(&bp->pdev->dev, mapping)) { 1047 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
1048 kfree_skb(skb); 1048 dev_kfree_skb_any(skb);
1049 goto unlock; 1049 goto unlock;
1050 } 1050 }
1051 1051
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index d2a183c3a6ce..521dfea44b83 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -897,7 +897,7 @@ static void xgmac_tx_complete(struct xgmac_priv *priv)
897 /* Check tx error on the last segment */ 897 /* Check tx error on the last segment */
898 if (desc_get_tx_ls(p)) { 898 if (desc_get_tx_ls(p)) {
899 desc_get_tx_status(priv, p); 899 desc_get_tx_status(priv, p);
900 dev_kfree_skb(skb); 900 dev_consume_skb_any(skb);
901 } 901 }
902 902
903 priv->tx_skbuff[entry] = NULL; 903 priv->tx_skbuff[entry] = NULL;
@@ -1105,7 +1105,7 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
1105 len = skb_headlen(skb); 1105 len = skb_headlen(skb);
1106 paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE); 1106 paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE);
1107 if (dma_mapping_error(priv->device, paddr)) { 1107 if (dma_mapping_error(priv->device, paddr)) {
1108 dev_kfree_skb(skb); 1108 dev_kfree_skb_any(skb);
1109 return NETDEV_TX_OK; 1109 return NETDEV_TX_OK;
1110 } 1110 }
1111 priv->tx_skbuff[entry] = skb; 1111 priv->tx_skbuff[entry] = skb;
@@ -1169,7 +1169,7 @@ dma_err:
1169 desc = first; 1169 desc = first;
1170 dma_unmap_single(priv->device, desc_get_buf_addr(desc), 1170 dma_unmap_single(priv->device, desc_get_buf_addr(desc),
1171 desc_get_buf_len(desc), DMA_TO_DEVICE); 1171 desc_get_buf_len(desc), DMA_TO_DEVICE);
1172 dev_kfree_skb(skb); 1172 dev_kfree_skb_any(skb);
1173 return NETDEV_TX_OK; 1173 return NETDEV_TX_OK;
1174} 1174}
1175 1175
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 45d77334d7d9..07bbb711b7e5 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -3088,30 +3088,22 @@ static int cxgb_enable_msix(struct adapter *adap)
3088{ 3088{
3089 struct msix_entry entries[SGE_QSETS + 1]; 3089 struct msix_entry entries[SGE_QSETS + 1];
3090 int vectors; 3090 int vectors;
3091 int i, err; 3091 int i;
3092 3092
3093 vectors = ARRAY_SIZE(entries); 3093 vectors = ARRAY_SIZE(entries);
3094 for (i = 0; i < vectors; ++i) 3094 for (i = 0; i < vectors; ++i)
3095 entries[i].entry = i; 3095 entries[i].entry = i;
3096 3096
3097 while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0) 3097 vectors = pci_enable_msix_range(adap->pdev, entries,
3098 vectors = err; 3098 adap->params.nports + 1, vectors);
3099 3099 if (vectors < 0)
3100 if (err < 0) 3100 return vectors;
3101 pci_disable_msix(adap->pdev);
3102
3103 if (!err && vectors < (adap->params.nports + 1)) {
3104 pci_disable_msix(adap->pdev);
3105 err = -1;
3106 }
3107 3101
3108 if (!err) { 3102 for (i = 0; i < vectors; ++i)
3109 for (i = 0; i < vectors; ++i) 3103 adap->msix_info[i].vec = entries[i].vector;
3110 adap->msix_info[i].vec = entries[i].vector; 3104 adap->msix_nvectors = vectors;
3111 adap->msix_nvectors = vectors;
3112 }
3113 3105
3114 return err; 3106 return 0;
3115} 3107}
3116 3108
3117static void print_port_info(struct adapter *adap, const struct adapter_info *ai) 3109static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 632b318eb38a..8b069f96e920 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -298,7 +298,7 @@ static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
298 if (need_unmap) 298 if (need_unmap)
299 unmap_skb(d->skb, q, cidx, pdev); 299 unmap_skb(d->skb, q, cidx, pdev);
300 if (d->eop) { 300 if (d->eop) {
301 kfree_skb(d->skb); 301 dev_consume_skb_any(d->skb);
302 d->skb = NULL; 302 d->skb = NULL;
303 } 303 }
304 } 304 }
@@ -1188,7 +1188,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1188 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) | 1188 cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
1189 V_WR_TID(q->token)); 1189 V_WR_TID(q->token));
1190 wr_gen2(d, gen); 1190 wr_gen2(d, gen);
1191 kfree_skb(skb); 1191 dev_consume_skb_any(skb);
1192 return; 1192 return;
1193 } 1193 }
1194 1194
@@ -1233,7 +1233,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1233 * anything shorter than an Ethernet header. 1233 * anything shorter than an Ethernet header.
1234 */ 1234 */
1235 if (unlikely(skb->len < ETH_HLEN)) { 1235 if (unlikely(skb->len < ETH_HLEN)) {
1236 dev_kfree_skb(skb); 1236 dev_kfree_skb_any(skb);
1237 return NETDEV_TX_OK; 1237 return NETDEV_TX_OK;
1238 } 1238 }
1239 1239
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 1f4b9b30b9ed..32db37709263 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -66,6 +66,7 @@ enum {
66 SERNUM_LEN = 24, /* Serial # length */ 66 SERNUM_LEN = 24, /* Serial # length */
67 EC_LEN = 16, /* E/C length */ 67 EC_LEN = 16, /* E/C length */
68 ID_LEN = 16, /* ID length */ 68 ID_LEN = 16, /* ID length */
69 PN_LEN = 16, /* Part Number length */
69}; 70};
70 71
71enum { 72enum {
@@ -254,6 +255,7 @@ struct vpd_params {
254 u8 ec[EC_LEN + 1]; 255 u8 ec[EC_LEN + 1];
255 u8 sn[SERNUM_LEN + 1]; 256 u8 sn[SERNUM_LEN + 1];
256 u8 id[ID_LEN + 1]; 257 u8 id[ID_LEN + 1];
258 u8 pn[PN_LEN + 1];
257}; 259};
258 260
259struct pci_params { 261struct pci_params {
@@ -306,6 +308,7 @@ struct adapter_params {
306 unsigned char bypass; 308 unsigned char bypass;
307 309
308 unsigned int ofldq_wr_cred; 310 unsigned int ofldq_wr_cred;
311 bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
309}; 312};
310 313
311#include "t4fw_api.h" 314#include "t4fw_api.h"
@@ -497,6 +500,7 @@ struct sge_txq {
497 spinlock_t db_lock; 500 spinlock_t db_lock;
498 int db_disabled; 501 int db_disabled;
499 unsigned short db_pidx; 502 unsigned short db_pidx;
503 unsigned short db_pidx_inc;
500 u64 udb; 504 u64 udb;
501}; 505};
502 506
@@ -553,8 +557,13 @@ struct sge {
553 u32 pktshift; /* padding between CPL & packet data */ 557 u32 pktshift; /* padding between CPL & packet data */
554 u32 fl_align; /* response queue message alignment */ 558 u32 fl_align; /* response queue message alignment */
555 u32 fl_starve_thres; /* Free List starvation threshold */ 559 u32 fl_starve_thres; /* Free List starvation threshold */
556 unsigned int starve_thres; 560
557 u8 idma_state[2]; 561 /* State variables for detecting an SGE Ingress DMA hang */
562 unsigned int idma_1s_thresh;/* SGE same State Counter 1s threshold */
563 unsigned int idma_stalled[2];/* SGE synthesized stalled timers in HZ */
564 unsigned int idma_state[2]; /* SGE IDMA Hang detect state */
565 unsigned int idma_qid[2]; /* SGE IDMA Hung Ingress Queue ID */
566
558 unsigned int egr_start; 567 unsigned int egr_start;
559 unsigned int ingr_start; 568 unsigned int ingr_start;
560 void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */ 569 void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */
@@ -957,7 +966,7 @@ int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
957 u64 *parity); 966 u64 *parity);
958int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, 967int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
959 u64 *parity); 968 u64 *parity);
960 969const char *t4_get_port_type_description(enum fw_port_type port_type);
961void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p); 970void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
962void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log); 971void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
963void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, 972void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
@@ -1029,4 +1038,5 @@ void t4_db_dropped(struct adapter *adapter);
1029int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len); 1038int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len);
1030int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, 1039int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
1031 u32 addr, u32 val); 1040 u32 addr, u32 val);
1041void t4_sge_decode_idma_state(struct adapter *adapter, int state);
1032#endif /* __CXGB4_H__ */ 1042#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 34e2488767d9..cc04d090354c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -254,6 +254,8 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
254 CH_DEVICE(0x5011, 4), 254 CH_DEVICE(0x5011, 4),
255 CH_DEVICE(0x5012, 4), 255 CH_DEVICE(0x5012, 4),
256 CH_DEVICE(0x5013, 4), 256 CH_DEVICE(0x5013, 4),
257 CH_DEVICE(0x5014, 4),
258 CH_DEVICE(0x5015, 4),
257 CH_DEVICE(0x5401, 4), 259 CH_DEVICE(0x5401, 4),
258 CH_DEVICE(0x5402, 4), 260 CH_DEVICE(0x5402, 4),
259 CH_DEVICE(0x5403, 4), 261 CH_DEVICE(0x5403, 4),
@@ -273,6 +275,8 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
273 CH_DEVICE(0x5411, 4), 275 CH_DEVICE(0x5411, 4),
274 CH_DEVICE(0x5412, 4), 276 CH_DEVICE(0x5412, 4),
275 CH_DEVICE(0x5413, 4), 277 CH_DEVICE(0x5413, 4),
278 CH_DEVICE(0x5414, 4),
279 CH_DEVICE(0x5415, 4),
276 { 0, } 280 { 0, }
277}; 281};
278 282
@@ -423,15 +427,18 @@ static void link_report(struct net_device *dev)
423 const struct port_info *p = netdev_priv(dev); 427 const struct port_info *p = netdev_priv(dev);
424 428
425 switch (p->link_cfg.speed) { 429 switch (p->link_cfg.speed) {
426 case SPEED_10000: 430 case 10000:
427 s = "10Gbps"; 431 s = "10Gbps";
428 break; 432 break;
429 case SPEED_1000: 433 case 1000:
430 s = "1000Mbps"; 434 s = "1000Mbps";
431 break; 435 break;
432 case SPEED_100: 436 case 100:
433 s = "100Mbps"; 437 s = "100Mbps";
434 break; 438 break;
439 case 40000:
440 s = "40Gbps";
441 break;
435 } 442 }
436 443
437 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, 444 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
@@ -2061,7 +2068,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
2061 0x40200, 0x40298, 2068 0x40200, 0x40298,
2062 0x402ac, 0x4033c, 2069 0x402ac, 0x4033c,
2063 0x403f8, 0x403fc, 2070 0x403f8, 0x403fc,
2064 0x41300, 0x413c4, 2071 0x41304, 0x413c4,
2065 0x41400, 0x4141c, 2072 0x41400, 0x4141c,
2066 0x41480, 0x414d0, 2073 0x41480, 0x414d0,
2067 0x44000, 0x44078, 2074 0x44000, 0x44078,
@@ -2089,7 +2096,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
2089 0x48200, 0x48298, 2096 0x48200, 0x48298,
2090 0x482ac, 0x4833c, 2097 0x482ac, 0x4833c,
2091 0x483f8, 0x483fc, 2098 0x483f8, 0x483fc,
2092 0x49300, 0x493c4, 2099 0x49304, 0x493c4,
2093 0x49400, 0x4941c, 2100 0x49400, 0x4941c,
2094 0x49480, 0x494d0, 2101 0x49480, 0x494d0,
2095 0x4c000, 0x4c078, 2102 0x4c000, 0x4c078,
@@ -2199,6 +2206,8 @@ static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
2199 else if (type == FW_PORT_TYPE_FIBER_XFI || 2206 else if (type == FW_PORT_TYPE_FIBER_XFI ||
2200 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP) 2207 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
2201 v |= SUPPORTED_FIBRE; 2208 v |= SUPPORTED_FIBRE;
2209 else if (type == FW_PORT_TYPE_BP40_BA)
2210 v |= SUPPORTED_40000baseSR4_Full;
2202 2211
2203 if (caps & FW_PORT_CAP_ANEG) 2212 if (caps & FW_PORT_CAP_ANEG)
2204 v |= SUPPORTED_Autoneg; 2213 v |= SUPPORTED_Autoneg;
@@ -2215,6 +2224,8 @@ static unsigned int to_fw_linkcaps(unsigned int caps)
2215 v |= FW_PORT_CAP_SPEED_1G; 2224 v |= FW_PORT_CAP_SPEED_1G;
2216 if (caps & ADVERTISED_10000baseT_Full) 2225 if (caps & ADVERTISED_10000baseT_Full)
2217 v |= FW_PORT_CAP_SPEED_10G; 2226 v |= FW_PORT_CAP_SPEED_10G;
2227 if (caps & ADVERTISED_40000baseSR4_Full)
2228 v |= FW_PORT_CAP_SPEED_40G;
2218 return v; 2229 return v;
2219} 2230}
2220 2231
@@ -2263,12 +2274,14 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2263 2274
2264static unsigned int speed_to_caps(int speed) 2275static unsigned int speed_to_caps(int speed)
2265{ 2276{
2266 if (speed == SPEED_100) 2277 if (speed == 100)
2267 return FW_PORT_CAP_SPEED_100M; 2278 return FW_PORT_CAP_SPEED_100M;
2268 if (speed == SPEED_1000) 2279 if (speed == 1000)
2269 return FW_PORT_CAP_SPEED_1G; 2280 return FW_PORT_CAP_SPEED_1G;
2270 if (speed == SPEED_10000) 2281 if (speed == 10000)
2271 return FW_PORT_CAP_SPEED_10G; 2282 return FW_PORT_CAP_SPEED_10G;
2283 if (speed == 40000)
2284 return FW_PORT_CAP_SPEED_40G;
2272 return 0; 2285 return 0;
2273} 2286}
2274 2287
@@ -2296,8 +2309,10 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2296 if (cmd->autoneg == AUTONEG_DISABLE) { 2309 if (cmd->autoneg == AUTONEG_DISABLE) {
2297 cap = speed_to_caps(speed); 2310 cap = speed_to_caps(speed);
2298 2311
2299 if (!(lc->supported & cap) || (speed == SPEED_1000) || 2312 if (!(lc->supported & cap) ||
2300 (speed == SPEED_10000)) 2313 (speed == 1000) ||
2314 (speed == 10000) ||
2315 (speed == 40000))
2301 return -EINVAL; 2316 return -EINVAL;
2302 lc->requested_speed = cap; 2317 lc->requested_speed = cap;
2303 lc->advertising = 0; 2318 lc->advertising = 0;
@@ -3563,14 +3578,25 @@ static void drain_db_fifo(struct adapter *adap, int usecs)
3563 3578
3564static void disable_txq_db(struct sge_txq *q) 3579static void disable_txq_db(struct sge_txq *q)
3565{ 3580{
3566 spin_lock_irq(&q->db_lock); 3581 unsigned long flags;
3582
3583 spin_lock_irqsave(&q->db_lock, flags);
3567 q->db_disabled = 1; 3584 q->db_disabled = 1;
3568 spin_unlock_irq(&q->db_lock); 3585 spin_unlock_irqrestore(&q->db_lock, flags);
3569} 3586}
3570 3587
3571static void enable_txq_db(struct sge_txq *q) 3588static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
3572{ 3589{
3573 spin_lock_irq(&q->db_lock); 3590 spin_lock_irq(&q->db_lock);
3591 if (q->db_pidx_inc) {
3592 /* Make sure that all writes to the TX descriptors
3593 * are committed before we tell HW about them.
3594 */
3595 wmb();
3596 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3597 QID(q->cntxt_id) | PIDX(q->db_pidx_inc));
3598 q->db_pidx_inc = 0;
3599 }
3574 q->db_disabled = 0; 3600 q->db_disabled = 0;
3575 spin_unlock_irq(&q->db_lock); 3601 spin_unlock_irq(&q->db_lock);
3576} 3602}
@@ -3592,11 +3618,32 @@ static void enable_dbs(struct adapter *adap)
3592 int i; 3618 int i;
3593 3619
3594 for_each_ethrxq(&adap->sge, i) 3620 for_each_ethrxq(&adap->sge, i)
3595 enable_txq_db(&adap->sge.ethtxq[i].q); 3621 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
3596 for_each_ofldrxq(&adap->sge, i) 3622 for_each_ofldrxq(&adap->sge, i)
3597 enable_txq_db(&adap->sge.ofldtxq[i].q); 3623 enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
3598 for_each_port(adap, i) 3624 for_each_port(adap, i)
3599 enable_txq_db(&adap->sge.ctrlq[i].q); 3625 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
3626}
3627
3628static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
3629{
3630 if (adap->uld_handle[CXGB4_ULD_RDMA])
3631 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
3632 cmd);
3633}
3634
3635static void process_db_full(struct work_struct *work)
3636{
3637 struct adapter *adap;
3638
3639 adap = container_of(work, struct adapter, db_full_task);
3640
3641 drain_db_fifo(adap, dbfifo_drain_delay);
3642 enable_dbs(adap);
3643 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
3644 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3645 DBFIFO_HP_INT | DBFIFO_LP_INT,
3646 DBFIFO_HP_INT | DBFIFO_LP_INT);
3600} 3647}
3601 3648
3602static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q) 3649static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
@@ -3604,7 +3651,7 @@ static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
3604 u16 hw_pidx, hw_cidx; 3651 u16 hw_pidx, hw_cidx;
3605 int ret; 3652 int ret;
3606 3653
3607 spin_lock_bh(&q->db_lock); 3654 spin_lock_irq(&q->db_lock);
3608 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx); 3655 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
3609 if (ret) 3656 if (ret)
3610 goto out; 3657 goto out;
@@ -3621,7 +3668,8 @@ static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
3621 } 3668 }
3622out: 3669out:
3623 q->db_disabled = 0; 3670 q->db_disabled = 0;
3624 spin_unlock_bh(&q->db_lock); 3671 q->db_pidx_inc = 0;
3672 spin_unlock_irq(&q->db_lock);
3625 if (ret) 3673 if (ret)
3626 CH_WARN(adap, "DB drop recovery failed.\n"); 3674 CH_WARN(adap, "DB drop recovery failed.\n");
3627} 3675}
@@ -3637,29 +3685,6 @@ static void recover_all_queues(struct adapter *adap)
3637 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q); 3685 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
3638} 3686}
3639 3687
3640static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
3641{
3642 mutex_lock(&uld_mutex);
3643 if (adap->uld_handle[CXGB4_ULD_RDMA])
3644 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
3645 cmd);
3646 mutex_unlock(&uld_mutex);
3647}
3648
3649static void process_db_full(struct work_struct *work)
3650{
3651 struct adapter *adap;
3652
3653 adap = container_of(work, struct adapter, db_full_task);
3654
3655 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
3656 drain_db_fifo(adap, dbfifo_drain_delay);
3657 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3658 DBFIFO_HP_INT | DBFIFO_LP_INT,
3659 DBFIFO_HP_INT | DBFIFO_LP_INT);
3660 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
3661}
3662
3663static void process_db_drop(struct work_struct *work) 3688static void process_db_drop(struct work_struct *work)
3664{ 3689{
3665 struct adapter *adap; 3690 struct adapter *adap;
@@ -3667,11 +3692,13 @@ static void process_db_drop(struct work_struct *work)
3667 adap = container_of(work, struct adapter, db_drop_task); 3692 adap = container_of(work, struct adapter, db_drop_task);
3668 3693
3669 if (is_t4(adap->params.chip)) { 3694 if (is_t4(adap->params.chip)) {
3670 disable_dbs(adap); 3695 drain_db_fifo(adap, dbfifo_drain_delay);
3671 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP); 3696 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
3672 drain_db_fifo(adap, 1); 3697 drain_db_fifo(adap, dbfifo_drain_delay);
3673 recover_all_queues(adap); 3698 recover_all_queues(adap);
3699 drain_db_fifo(adap, dbfifo_drain_delay);
3674 enable_dbs(adap); 3700 enable_dbs(adap);
3701 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
3675 } else { 3702 } else {
3676 u32 dropped_db = t4_read_reg(adap, 0x010ac); 3703 u32 dropped_db = t4_read_reg(adap, 0x010ac);
3677 u16 qid = (dropped_db >> 15) & 0x1ffff; 3704 u16 qid = (dropped_db >> 15) & 0x1ffff;
@@ -3712,6 +3739,8 @@ static void process_db_drop(struct work_struct *work)
3712void t4_db_full(struct adapter *adap) 3739void t4_db_full(struct adapter *adap)
3713{ 3740{
3714 if (is_t4(adap->params.chip)) { 3741 if (is_t4(adap->params.chip)) {
3742 disable_dbs(adap);
3743 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
3715 t4_set_reg_field(adap, SGE_INT_ENABLE3, 3744 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3716 DBFIFO_HP_INT | DBFIFO_LP_INT, 0); 3745 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
3717 queue_work(workq, &adap->db_full_task); 3746 queue_work(workq, &adap->db_full_task);
@@ -3720,8 +3749,11 @@ void t4_db_full(struct adapter *adap)
3720 3749
3721void t4_db_dropped(struct adapter *adap) 3750void t4_db_dropped(struct adapter *adap)
3722{ 3751{
3723 if (is_t4(adap->params.chip)) 3752 if (is_t4(adap->params.chip)) {
3724 queue_work(workq, &adap->db_drop_task); 3753 disable_dbs(adap);
3754 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
3755 }
3756 queue_work(workq, &adap->db_drop_task);
3725} 3757}
3726 3758
3727static void uld_attach(struct adapter *adap, unsigned int uld) 3759static void uld_attach(struct adapter *adap, unsigned int uld)
@@ -3765,6 +3797,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
3765 lli.dbfifo_int_thresh = dbfifo_int_thresh; 3797 lli.dbfifo_int_thresh = dbfifo_int_thresh;
3766 lli.sge_pktshift = adap->sge.pktshift; 3798 lli.sge_pktshift = adap->sge.pktshift;
3767 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN; 3799 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
3800 lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
3768 3801
3769 handle = ulds[uld].add(&lli); 3802 handle = ulds[uld].add(&lli);
3770 if (IS_ERR(handle)) { 3803 if (IS_ERR(handle)) {
@@ -5370,6 +5403,21 @@ static int adap_init0(struct adapter *adap)
5370 (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val); 5403 (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
5371 5404
5372 /* 5405 /*
5406 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
5407 * capability. Earlier versions of the firmware didn't have the
5408 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
5409 * permission to use ULPTX MEMWRITE DSGL.
5410 */
5411 if (is_t4(adap->params.chip)) {
5412 adap->params.ulptx_memwrite_dsgl = false;
5413 } else {
5414 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
5415 ret = t4_query_params(adap, adap->mbox, adap->fn, 0,
5416 1, params, val);
5417 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
5418 }
5419
5420 /*
5373 * Get device capabilities so we can determine what resources we need 5421 * Get device capabilities so we can determine what resources we need
5374 * to manage. 5422 * to manage.
5375 */ 5423 */
@@ -5603,9 +5651,10 @@ static const struct pci_error_handlers cxgb4_eeh = {
5603 .resume = eeh_resume, 5651 .resume = eeh_resume,
5604}; 5652};
5605 5653
5606static inline bool is_10g_port(const struct link_config *lc) 5654static inline bool is_x_10g_port(const struct link_config *lc)
5607{ 5655{
5608 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0; 5656 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
5657 (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
5609} 5658}
5610 5659
5611static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx, 5660static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
@@ -5629,7 +5678,7 @@ static void cfg_queues(struct adapter *adap)
5629 int i, q10g = 0, n10g = 0, qidx = 0; 5678 int i, q10g = 0, n10g = 0, qidx = 0;
5630 5679
5631 for_each_port(adap, i) 5680 for_each_port(adap, i)
5632 n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg); 5681 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
5633 5682
5634 /* 5683 /*
5635 * We default to 1 queue per non-10G port and up to # of cores queues 5684 * We default to 1 queue per non-10G port and up to # of cores queues
@@ -5644,7 +5693,7 @@ static void cfg_queues(struct adapter *adap)
5644 struct port_info *pi = adap2pinfo(adap, i); 5693 struct port_info *pi = adap2pinfo(adap, i);
5645 5694
5646 pi->first_qset = qidx; 5695 pi->first_qset = qidx;
5647 pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1; 5696 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
5648 qidx += pi->nqsets; 5697 qidx += pi->nqsets;
5649 } 5698 }
5650 5699
@@ -5737,7 +5786,7 @@ static void reduce_ethqs(struct adapter *adap, int n)
5737static int enable_msix(struct adapter *adap) 5786static int enable_msix(struct adapter *adap)
5738{ 5787{
5739 int ofld_need = 0; 5788 int ofld_need = 0;
5740 int i, err, want, need; 5789 int i, want, need;
5741 struct sge *s = &adap->sge; 5790 struct sge *s = &adap->sge;
5742 unsigned int nchan = adap->params.nports; 5791 unsigned int nchan = adap->params.nports;
5743 struct msix_entry entries[MAX_INGQ + 1]; 5792 struct msix_entry entries[MAX_INGQ + 1];
@@ -5753,32 +5802,30 @@ static int enable_msix(struct adapter *adap)
5753 } 5802 }
5754 need = adap->params.nports + EXTRA_VECS + ofld_need; 5803 need = adap->params.nports + EXTRA_VECS + ofld_need;
5755 5804
5756 while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need) 5805 want = pci_enable_msix_range(adap->pdev, entries, need, want);
5757 want = err; 5806 if (want < 0)
5807 return want;
5758 5808
5759 if (!err) { 5809 /*
5760 /* 5810 * Distribute available vectors to the various queue groups.
5761 * Distribute available vectors to the various queue groups. 5811 * Every group gets its minimum requirement and NIC gets top
5762 * Every group gets its minimum requirement and NIC gets top 5812 * priority for leftovers.
5763 * priority for leftovers. 5813 */
5764 */ 5814 i = want - EXTRA_VECS - ofld_need;
5765 i = want - EXTRA_VECS - ofld_need; 5815 if (i < s->max_ethqsets) {
5766 if (i < s->max_ethqsets) { 5816 s->max_ethqsets = i;
5767 s->max_ethqsets = i; 5817 if (i < s->ethqsets)
5768 if (i < s->ethqsets) 5818 reduce_ethqs(adap, i);
5769 reduce_ethqs(adap, i); 5819 }
5770 } 5820 if (is_offload(adap)) {
5771 if (is_offload(adap)) { 5821 i = want - EXTRA_VECS - s->max_ethqsets;
5772 i = want - EXTRA_VECS - s->max_ethqsets; 5822 i -= ofld_need - nchan;
5773 i -= ofld_need - nchan; 5823 s->ofldqsets = (i / nchan) * nchan; /* round down */
5774 s->ofldqsets = (i / nchan) * nchan; /* round down */ 5824 }
5775 } 5825 for (i = 0; i < want; ++i)
5776 for (i = 0; i < want; ++i) 5826 adap->msix_info[i].vec = entries[i].vector;
5777 adap->msix_info[i].vec = entries[i].vector; 5827
5778 } else if (err > 0) 5828 return 0;
5779 dev_info(adap->pdev_dev,
5780 "only %d MSI-X vectors left, not using MSI-X\n", err);
5781 return err;
5782} 5829}
5783 5830
5784#undef EXTRA_VECS 5831#undef EXTRA_VECS
@@ -5801,11 +5848,6 @@ static int init_rss(struct adapter *adap)
5801 5848
5802static void print_port_info(const struct net_device *dev) 5849static void print_port_info(const struct net_device *dev)
5803{ 5850{
5804 static const char *base[] = {
5805 "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
5806 "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
5807 };
5808
5809 char buf[80]; 5851 char buf[80];
5810 char *bufp = buf; 5852 char *bufp = buf;
5811 const char *spd = ""; 5853 const char *spd = "";
@@ -5823,9 +5865,11 @@ static void print_port_info(const struct net_device *dev)
5823 bufp += sprintf(bufp, "1000/"); 5865 bufp += sprintf(bufp, "1000/");
5824 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) 5866 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
5825 bufp += sprintf(bufp, "10G/"); 5867 bufp += sprintf(bufp, "10G/");
5868 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
5869 bufp += sprintf(bufp, "40G/");
5826 if (bufp != buf) 5870 if (bufp != buf)
5827 --bufp; 5871 --bufp;
5828 sprintf(bufp, "BASE-%s", base[pi->port_type]); 5872 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
5829 5873
5830 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n", 5874 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
5831 adap->params.vpd.id, 5875 adap->params.vpd.id,
@@ -5833,8 +5877,8 @@ static void print_port_info(const struct net_device *dev)
5833 is_offload(adap) ? "R" : "", adap->params.pci.width, spd, 5877 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
5834 (adap->flags & USING_MSIX) ? " MSI-X" : 5878 (adap->flags & USING_MSIX) ? " MSI-X" :
5835 (adap->flags & USING_MSI) ? " MSI" : ""); 5879 (adap->flags & USING_MSI) ? " MSI" : "");
5836 netdev_info(dev, "S/N: %s, E/C: %s\n", 5880 netdev_info(dev, "S/N: %s, P/N: %s\n",
5837 adap->params.vpd.sn, adap->params.vpd.ec); 5881 adap->params.vpd.sn, adap->params.vpd.pn);
5838} 5882}
5839 5883
5840static void enable_pcie_relaxed_ordering(struct pci_dev *dev) 5884static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 4dd0a82533e4..e274a047528f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -253,6 +253,7 @@ struct cxgb4_lld_info {
253 /* packet data */ 253 /* packet data */
254 bool enable_fw_ofld_conn; /* Enable connection through fw */ 254 bool enable_fw_ofld_conn; /* Enable connection through fw */
255 /* WR */ 255 /* WR */
256 bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
256}; 257};
257 258
258struct cxgb4_uld_info { 259struct cxgb4_uld_info {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 47ffa64fcf19..ca95cf2954eb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -93,6 +93,16 @@
93 */ 93 */
94#define TX_QCHECK_PERIOD (HZ / 2) 94#define TX_QCHECK_PERIOD (HZ / 2)
95 95
96/* SGE Hung Ingress DMA Threshold Warning time (in Hz) and Warning Repeat Rate
97 * (in RX_QCHECK_PERIOD multiples). If we find one of the SGE Ingress DMA
98 * State Machines in the same state for this amount of time (in HZ) then we'll
99 * issue a warning about a potential hang. We'll repeat the warning as the
100 * SGE Ingress DMA Channel appears to be hung every N RX_QCHECK_PERIODs till
101 * the situation clears. If the situation clears, we'll note that as well.
102 */
103#define SGE_IDMA_WARN_THRESH (1 * HZ)
104#define SGE_IDMA_WARN_REPEAT (20 * RX_QCHECK_PERIOD)
105
96/* 106/*
97 * Max number of Tx descriptors to be reclaimed by the Tx timer. 107 * Max number of Tx descriptors to be reclaimed by the Tx timer.
98 */ 108 */
@@ -373,7 +383,7 @@ static void free_tx_desc(struct adapter *adap, struct sge_txq *q,
373 if (d->skb) { /* an SGL is present */ 383 if (d->skb) { /* an SGL is present */
374 if (unmap) 384 if (unmap)
375 unmap_sgl(dev, d->skb, d->sgl, q); 385 unmap_sgl(dev, d->skb, d->sgl, q);
376 kfree_skb(d->skb); 386 dev_consume_skb_any(d->skb);
377 d->skb = NULL; 387 d->skb = NULL;
378 } 388 }
379 ++d; 389 ++d;
@@ -706,11 +716,17 @@ static inline unsigned int flits_to_desc(unsigned int n)
706 * @skb: the packet 716 * @skb: the packet
707 * 717 *
708 * Returns whether an Ethernet packet is small enough to fit as 718 * Returns whether an Ethernet packet is small enough to fit as
709 * immediate data. 719 * immediate data. Return value corresponds to headroom required.
710 */ 720 */
711static inline int is_eth_imm(const struct sk_buff *skb) 721static inline int is_eth_imm(const struct sk_buff *skb)
712{ 722{
713 return skb->len <= MAX_IMM_TX_PKT_LEN - sizeof(struct cpl_tx_pkt); 723 int hdrlen = skb_shinfo(skb)->gso_size ?
724 sizeof(struct cpl_tx_pkt_lso_core) : 0;
725
726 hdrlen += sizeof(struct cpl_tx_pkt);
727 if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
728 return hdrlen;
729 return 0;
714} 730}
715 731
716/** 732/**
@@ -723,9 +739,10 @@ static inline int is_eth_imm(const struct sk_buff *skb)
723static inline unsigned int calc_tx_flits(const struct sk_buff *skb) 739static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
724{ 740{
725 unsigned int flits; 741 unsigned int flits;
742 int hdrlen = is_eth_imm(skb);
726 743
727 if (is_eth_imm(skb)) 744 if (hdrlen)
728 return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), 8); 745 return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
729 746
730 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4; 747 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4;
731 if (skb_shinfo(skb)->gso_size) 748 if (skb_shinfo(skb)->gso_size)
@@ -843,9 +860,10 @@ static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
843static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) 860static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
844{ 861{
845 unsigned int *wr, index; 862 unsigned int *wr, index;
863 unsigned long flags;
846 864
847 wmb(); /* write descriptors before telling HW */ 865 wmb(); /* write descriptors before telling HW */
848 spin_lock(&q->db_lock); 866 spin_lock_irqsave(&q->db_lock, flags);
849 if (!q->db_disabled) { 867 if (!q->db_disabled) {
850 if (is_t4(adap->params.chip)) { 868 if (is_t4(adap->params.chip)) {
851 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), 869 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
@@ -861,9 +879,10 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
861 writel(n, adap->bar2 + q->udb + 8); 879 writel(n, adap->bar2 + q->udb + 8);
862 wmb(); 880 wmb();
863 } 881 }
864 } 882 } else
883 q->db_pidx_inc += n;
865 q->db_pidx = q->pidx; 884 q->db_pidx = q->pidx;
866 spin_unlock(&q->db_lock); 885 spin_unlock_irqrestore(&q->db_lock, flags);
867} 886}
868 887
869/** 888/**
@@ -971,6 +990,7 @@ static inline void txq_advance(struct sge_txq *q, unsigned int n)
971 */ 990 */
972netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev) 991netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
973{ 992{
993 int len;
974 u32 wr_mid; 994 u32 wr_mid;
975 u64 cntrl, *end; 995 u64 cntrl, *end;
976 int qidx, credits; 996 int qidx, credits;
@@ -982,13 +1002,14 @@ netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
982 struct cpl_tx_pkt_core *cpl; 1002 struct cpl_tx_pkt_core *cpl;
983 const struct skb_shared_info *ssi; 1003 const struct skb_shared_info *ssi;
984 dma_addr_t addr[MAX_SKB_FRAGS + 1]; 1004 dma_addr_t addr[MAX_SKB_FRAGS + 1];
1005 bool immediate = false;
985 1006
986 /* 1007 /*
987 * The chip min packet length is 10 octets but play safe and reject 1008 * The chip min packet length is 10 octets but play safe and reject
988 * anything shorter than an Ethernet header. 1009 * anything shorter than an Ethernet header.
989 */ 1010 */
990 if (unlikely(skb->len < ETH_HLEN)) { 1011 if (unlikely(skb->len < ETH_HLEN)) {
991out_free: dev_kfree_skb(skb); 1012out_free: dev_kfree_skb_any(skb);
992 return NETDEV_TX_OK; 1013 return NETDEV_TX_OK;
993 } 1014 }
994 1015
@@ -1011,7 +1032,10 @@ out_free: dev_kfree_skb(skb);
1011 return NETDEV_TX_BUSY; 1032 return NETDEV_TX_BUSY;
1012 } 1033 }
1013 1034
1014 if (!is_eth_imm(skb) && 1035 if (is_eth_imm(skb))
1036 immediate = true;
1037
1038 if (!immediate &&
1015 unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) { 1039 unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
1016 q->mapping_err++; 1040 q->mapping_err++;
1017 goto out_free; 1041 goto out_free;
@@ -1028,6 +1052,7 @@ out_free: dev_kfree_skb(skb);
1028 wr->r3 = cpu_to_be64(0); 1052 wr->r3 = cpu_to_be64(0);
1029 end = (u64 *)wr + flits; 1053 end = (u64 *)wr + flits;
1030 1054
1055 len = immediate ? skb->len : 0;
1031 ssi = skb_shinfo(skb); 1056 ssi = skb_shinfo(skb);
1032 if (ssi->gso_size) { 1057 if (ssi->gso_size) {
1033 struct cpl_tx_pkt_lso *lso = (void *)wr; 1058 struct cpl_tx_pkt_lso *lso = (void *)wr;
@@ -1035,8 +1060,9 @@ out_free: dev_kfree_skb(skb);
1035 int l3hdr_len = skb_network_header_len(skb); 1060 int l3hdr_len = skb_network_header_len(skb);
1036 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; 1061 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1037 1062
1063 len += sizeof(*lso);
1038 wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) | 1064 wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
1039 FW_WR_IMMDLEN(sizeof(*lso))); 1065 FW_WR_IMMDLEN(len));
1040 lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) | 1066 lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
1041 LSO_FIRST_SLICE | LSO_LAST_SLICE | 1067 LSO_FIRST_SLICE | LSO_LAST_SLICE |
1042 LSO_IPV6(v6) | 1068 LSO_IPV6(v6) |
@@ -1054,9 +1080,7 @@ out_free: dev_kfree_skb(skb);
1054 q->tso++; 1080 q->tso++;
1055 q->tx_cso += ssi->gso_segs; 1081 q->tx_cso += ssi->gso_segs;
1056 } else { 1082 } else {
1057 int len; 1083 len += sizeof(*cpl);
1058
1059 len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
1060 wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) | 1084 wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
1061 FW_WR_IMMDLEN(len)); 1085 FW_WR_IMMDLEN(len));
1062 cpl = (void *)(wr + 1); 1086 cpl = (void *)(wr + 1);
@@ -1078,9 +1102,9 @@ out_free: dev_kfree_skb(skb);
1078 cpl->len = htons(skb->len); 1102 cpl->len = htons(skb->len);
1079 cpl->ctrl1 = cpu_to_be64(cntrl); 1103 cpl->ctrl1 = cpu_to_be64(cntrl);
1080 1104
1081 if (is_eth_imm(skb)) { 1105 if (immediate) {
1082 inline_tx_skb(skb, &q->q, cpl + 1); 1106 inline_tx_skb(skb, &q->q, cpl + 1);
1083 dev_kfree_skb(skb); 1107 dev_consume_skb_any(skb);
1084 } else { 1108 } else {
1085 int last_desc; 1109 int last_desc;
1086 1110
@@ -1467,8 +1491,12 @@ static inline int ofld_send(struct adapter *adap, struct sk_buff *skb)
1467{ 1491{
1468 unsigned int idx = skb_txq(skb); 1492 unsigned int idx = skb_txq(skb);
1469 1493
1470 if (unlikely(is_ctrl_pkt(skb))) 1494 if (unlikely(is_ctrl_pkt(skb))) {
1495 /* Single ctrl queue is a requirement for LE workaround path */
1496 if (adap->tids.nsftids)
1497 idx = 0;
1471 return ctrl_xmit(&adap->sge.ctrlq[idx], skb); 1498 return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
1499 }
1472 return ofld_xmit(&adap->sge.ofldtxq[idx], skb); 1500 return ofld_xmit(&adap->sge.ofldtxq[idx], skb);
1473} 1501}
1474 1502
@@ -1992,7 +2020,7 @@ irq_handler_t t4_intr_handler(struct adapter *adap)
1992static void sge_rx_timer_cb(unsigned long data) 2020static void sge_rx_timer_cb(unsigned long data)
1993{ 2021{
1994 unsigned long m; 2022 unsigned long m;
1995 unsigned int i, cnt[2]; 2023 unsigned int i, idma_same_state_cnt[2];
1996 struct adapter *adap = (struct adapter *)data; 2024 struct adapter *adap = (struct adapter *)data;
1997 struct sge *s = &adap->sge; 2025 struct sge *s = &adap->sge;
1998 2026
@@ -2015,21 +2043,64 @@ static void sge_rx_timer_cb(unsigned long data)
2015 } 2043 }
2016 2044
2017 t4_write_reg(adap, SGE_DEBUG_INDEX, 13); 2045 t4_write_reg(adap, SGE_DEBUG_INDEX, 13);
2018 cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH); 2046 idma_same_state_cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH);
2019 cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW); 2047 idma_same_state_cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
2020 2048
2021 for (i = 0; i < 2; i++) 2049 for (i = 0; i < 2; i++) {
2022 if (cnt[i] >= s->starve_thres) { 2050 u32 debug0, debug11;
2023 if (s->idma_state[i] || cnt[i] == 0xffffffff) 2051
2024 continue; 2052 /* If the Ingress DMA Same State Counter ("timer") is less
2025 s->idma_state[i] = 1; 2053 * than 1s, then we can reset our synthesized Stall Timer and
2026 t4_write_reg(adap, SGE_DEBUG_INDEX, 11); 2054 * continue. If we have previously emitted warnings about a
2027 m = t4_read_reg(adap, SGE_DEBUG_DATA_LOW) >> (i * 16); 2055 * potential stalled Ingress Queue, issue a note indicating
2028 dev_warn(adap->pdev_dev, 2056 * that the Ingress Queue has resumed forward progress.
2029 "SGE idma%u starvation detected for " 2057 */
2030 "queue %lu\n", i, m & 0xffff); 2058 if (idma_same_state_cnt[i] < s->idma_1s_thresh) {
2031 } else if (s->idma_state[i]) 2059 if (s->idma_stalled[i] >= SGE_IDMA_WARN_THRESH)
2032 s->idma_state[i] = 0; 2060 CH_WARN(adap, "SGE idma%d, queue%u,resumed after %d sec\n",
2061 i, s->idma_qid[i],
2062 s->idma_stalled[i]/HZ);
2063 s->idma_stalled[i] = 0;
2064 continue;
2065 }
2066
2067 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
2068 * domain. The first time we get here it'll be because we
2069 * passed the 1s Threshold; each additional time it'll be
2070 * because the RX Timer Callback is being fired on its regular
2071 * schedule.
2072 *
2073 * If the stall is below our Potential Hung Ingress Queue
2074 * Warning Threshold, continue.
2075 */
2076 if (s->idma_stalled[i] == 0)
2077 s->idma_stalled[i] = HZ;
2078 else
2079 s->idma_stalled[i] += RX_QCHECK_PERIOD;
2080
2081 if (s->idma_stalled[i] < SGE_IDMA_WARN_THRESH)
2082 continue;
2083
2084 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT Hz */
2085 if (((s->idma_stalled[i] - HZ) % SGE_IDMA_WARN_REPEAT) != 0)
2086 continue;
2087
2088 /* Read and save the SGE IDMA State and Queue ID information.
2089 * We do this every time in case it changes across time ...
2090 */
2091 t4_write_reg(adap, SGE_DEBUG_INDEX, 0);
2092 debug0 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
2093 s->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
2094
2095 t4_write_reg(adap, SGE_DEBUG_INDEX, 11);
2096 debug11 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
2097 s->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
2098
2099 CH_WARN(adap, "SGE idma%u, queue%u, maybe stuck state%u %dsecs (debug0=%#x, debug11=%#x)\n",
2100 i, s->idma_qid[i], s->idma_state[i],
2101 s->idma_stalled[i]/HZ, debug0, debug11);
2102 t4_sge_decode_idma_state(adap, s->idma_state[i]);
2103 }
2033 2104
2034 mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD); 2105 mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
2035} 2106}
@@ -2580,11 +2651,19 @@ static int t4_sge_init_soft(struct adapter *adap)
2580 fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF); 2651 fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
2581 fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF); 2652 fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
2582 2653
2654 /* We only bother using the Large Page logic if the Large Page Buffer
2655 * is larger than our Page Size Buffer.
2656 */
2657 if (fl_large_pg <= fl_small_pg)
2658 fl_large_pg = 0;
2659
2583 #undef READ_FL_BUF 2660 #undef READ_FL_BUF
2584 2661
2662 /* The Page Size Buffer must be exactly equal to our Page Size and the
2663 * Large Page Size Buffer should be 0 (per above) or a power of 2.
2664 */
2585 if (fl_small_pg != PAGE_SIZE || 2665 if (fl_small_pg != PAGE_SIZE ||
2586 (fl_large_pg != 0 && (fl_large_pg < fl_small_pg || 2666 (fl_large_pg & (fl_large_pg-1)) != 0) {
2587 (fl_large_pg & (fl_large_pg-1)) != 0))) {
2588 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n", 2667 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
2589 fl_small_pg, fl_large_pg); 2668 fl_small_pg, fl_large_pg);
2590 return -EINVAL; 2669 return -EINVAL;
@@ -2699,8 +2778,8 @@ static int t4_sge_init_hard(struct adapter *adap)
2699int t4_sge_init(struct adapter *adap) 2778int t4_sge_init(struct adapter *adap)
2700{ 2779{
2701 struct sge *s = &adap->sge; 2780 struct sge *s = &adap->sge;
2702 u32 sge_control; 2781 u32 sge_control, sge_conm_ctrl;
2703 int ret; 2782 int ret, egress_threshold;
2704 2783
2705 /* 2784 /*
2706 * Ingress Padding Boundary and Egress Status Page Size are set up by 2785 * Ingress Padding Boundary and Egress Status Page Size are set up by
@@ -2725,15 +2804,24 @@ int t4_sge_init(struct adapter *adap)
2725 * SGE's Egress Congestion Threshold. If it isn't, then we can get 2804 * SGE's Egress Congestion Threshold. If it isn't, then we can get
2726 * stuck waiting for new packets while the SGE is waiting for us to 2805 * stuck waiting for new packets while the SGE is waiting for us to
2727 * give it more Free List entries. (Note that the SGE's Egress 2806 * give it more Free List entries. (Note that the SGE's Egress
2728 * Congestion Threshold is in units of 2 Free List pointers.) 2807 * Congestion Threshold is in units of 2 Free List pointers.) For T4,
2808 * there was only a single field to control this. For T5 there's the
2809 * original field which now only applies to Unpacked Mode Free List
2810 * buffers and a new field which only applies to Packed Mode Free List
2811 * buffers.
2729 */ 2812 */
2730 s->fl_starve_thres 2813 sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL);
2731 = EGRTHRESHOLD_GET(t4_read_reg(adap, SGE_CONM_CTRL))*2 + 1; 2814 if (is_t4(adap->params.chip))
2815 egress_threshold = EGRTHRESHOLD_GET(sge_conm_ctrl);
2816 else
2817 egress_threshold = EGRTHRESHOLDPACKING_GET(sge_conm_ctrl);
2818 s->fl_starve_thres = 2*egress_threshold + 1;
2732 2819
2733 setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap); 2820 setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
2734 setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap); 2821 setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
2735 s->starve_thres = core_ticks_per_usec(adap) * 1000000; /* 1 s */ 2822 s->idma_1s_thresh = core_ticks_per_usec(adap) * 1000000; /* 1 s */
2736 s->idma_state[0] = s->idma_state[1] = 0; 2823 s->idma_stalled[0] = 0;
2824 s->idma_stalled[1] = 0;
2737 spin_lock_init(&s->intrq_lock); 2825 spin_lock_init(&s->intrq_lock);
2738 2826
2739 return 0; 2827 return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 2c109343d570..fb2fe65903c2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -573,7 +573,7 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
573{ 573{
574 u32 cclk_param, cclk_val; 574 u32 cclk_param, cclk_val;
575 int i, ret, addr; 575 int i, ret, addr;
576 int ec, sn; 576 int ec, sn, pn;
577 u8 *vpd, csum; 577 u8 *vpd, csum;
578 unsigned int vpdr_len, kw_offset, id_len; 578 unsigned int vpdr_len, kw_offset, id_len;
579 579
@@ -638,6 +638,7 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
638 638
639 FIND_VPD_KW(ec, "EC"); 639 FIND_VPD_KW(ec, "EC");
640 FIND_VPD_KW(sn, "SN"); 640 FIND_VPD_KW(sn, "SN");
641 FIND_VPD_KW(pn, "PN");
641#undef FIND_VPD_KW 642#undef FIND_VPD_KW
642 643
643 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len); 644 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
@@ -647,6 +648,8 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
647 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE); 648 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
648 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); 649 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
649 strim(p->sn); 650 strim(p->sn);
651 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
652 strim(p->pn);
650 653
651 /* 654 /*
652 * Ask firmware for the Core Clock since it knows how to translate the 655 * Ask firmware for the Core Clock since it knows how to translate the
@@ -1155,7 +1158,8 @@ out:
1155} 1158}
1156 1159
1157#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ 1160#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
1158 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG) 1161 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
1162 FW_PORT_CAP_ANEG)
1159 1163
1160/** 1164/**
1161 * t4_link_start - apply link configuration to MAC/PHY 1165 * t4_link_start - apply link configuration to MAC/PHY
@@ -2247,6 +2251,36 @@ static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
2247} 2251}
2248 2252
2249/** 2253/**
2254 * t4_get_port_type_description - return Port Type string description
2255 * @port_type: firmware Port Type enumeration
2256 */
2257const char *t4_get_port_type_description(enum fw_port_type port_type)
2258{
2259 static const char *const port_type_description[] = {
2260 "R XFI",
2261 "R XAUI",
2262 "T SGMII",
2263 "T XFI",
2264 "T XAUI",
2265 "KX4",
2266 "CX4",
2267 "KX",
2268 "KR",
2269 "R SFP+",
2270 "KR/KX",
2271 "KR/KX/KX4",
2272 "R QSFP_10G",
2273 "",
2274 "R QSFP",
2275 "R BP40_BA",
2276 };
2277
2278 if (port_type < ARRAY_SIZE(port_type_description))
2279 return port_type_description[port_type];
2280 return "UNKNOWN";
2281}
2282
2283/**
2250 * t4_get_port_stats - collect port statistics 2284 * t4_get_port_stats - collect port statistics
2251 * @adap: the adapter 2285 * @adap: the adapter
2252 * @idx: the port index 2286 * @idx: the port index
@@ -2563,6 +2597,112 @@ int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2563} 2597}
2564 2598
2565/** 2599/**
2600 * t4_sge_decode_idma_state - decode the idma state
2601 * @adap: the adapter
2602 * @state: the state idma is stuck in
2603 */
2604void t4_sge_decode_idma_state(struct adapter *adapter, int state)
2605{
2606 static const char * const t4_decode[] = {
2607 "IDMA_IDLE",
2608 "IDMA_PUSH_MORE_CPL_FIFO",
2609 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
2610 "Not used",
2611 "IDMA_PHYSADDR_SEND_PCIEHDR",
2612 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
2613 "IDMA_PHYSADDR_SEND_PAYLOAD",
2614 "IDMA_SEND_FIFO_TO_IMSG",
2615 "IDMA_FL_REQ_DATA_FL_PREP",
2616 "IDMA_FL_REQ_DATA_FL",
2617 "IDMA_FL_DROP",
2618 "IDMA_FL_H_REQ_HEADER_FL",
2619 "IDMA_FL_H_SEND_PCIEHDR",
2620 "IDMA_FL_H_PUSH_CPL_FIFO",
2621 "IDMA_FL_H_SEND_CPL",
2622 "IDMA_FL_H_SEND_IP_HDR_FIRST",
2623 "IDMA_FL_H_SEND_IP_HDR",
2624 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
2625 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
2626 "IDMA_FL_H_SEND_IP_HDR_PADDING",
2627 "IDMA_FL_D_SEND_PCIEHDR",
2628 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
2629 "IDMA_FL_D_REQ_NEXT_DATA_FL",
2630 "IDMA_FL_SEND_PCIEHDR",
2631 "IDMA_FL_PUSH_CPL_FIFO",
2632 "IDMA_FL_SEND_CPL",
2633 "IDMA_FL_SEND_PAYLOAD_FIRST",
2634 "IDMA_FL_SEND_PAYLOAD",
2635 "IDMA_FL_REQ_NEXT_DATA_FL",
2636 "IDMA_FL_SEND_NEXT_PCIEHDR",
2637 "IDMA_FL_SEND_PADDING",
2638 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
2639 "IDMA_FL_SEND_FIFO_TO_IMSG",
2640 "IDMA_FL_REQ_DATAFL_DONE",
2641 "IDMA_FL_REQ_HEADERFL_DONE",
2642 };
2643 static const char * const t5_decode[] = {
2644 "IDMA_IDLE",
2645 "IDMA_ALMOST_IDLE",
2646 "IDMA_PUSH_MORE_CPL_FIFO",
2647 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
2648 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
2649 "IDMA_PHYSADDR_SEND_PCIEHDR",
2650 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
2651 "IDMA_PHYSADDR_SEND_PAYLOAD",
2652 "IDMA_SEND_FIFO_TO_IMSG",
2653 "IDMA_FL_REQ_DATA_FL",
2654 "IDMA_FL_DROP",
2655 "IDMA_FL_DROP_SEND_INC",
2656 "IDMA_FL_H_REQ_HEADER_FL",
2657 "IDMA_FL_H_SEND_PCIEHDR",
2658 "IDMA_FL_H_PUSH_CPL_FIFO",
2659 "IDMA_FL_H_SEND_CPL",
2660 "IDMA_FL_H_SEND_IP_HDR_FIRST",
2661 "IDMA_FL_H_SEND_IP_HDR",
2662 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
2663 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
2664 "IDMA_FL_H_SEND_IP_HDR_PADDING",
2665 "IDMA_FL_D_SEND_PCIEHDR",
2666 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
2667 "IDMA_FL_D_REQ_NEXT_DATA_FL",
2668 "IDMA_FL_SEND_PCIEHDR",
2669 "IDMA_FL_PUSH_CPL_FIFO",
2670 "IDMA_FL_SEND_CPL",
2671 "IDMA_FL_SEND_PAYLOAD_FIRST",
2672 "IDMA_FL_SEND_PAYLOAD",
2673 "IDMA_FL_REQ_NEXT_DATA_FL",
2674 "IDMA_FL_SEND_NEXT_PCIEHDR",
2675 "IDMA_FL_SEND_PADDING",
2676 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
2677 };
2678 static const u32 sge_regs[] = {
2679 SGE_DEBUG_DATA_LOW_INDEX_2,
2680 SGE_DEBUG_DATA_LOW_INDEX_3,
2681 SGE_DEBUG_DATA_HIGH_INDEX_10,
2682 };
2683 const char **sge_idma_decode;
2684 int sge_idma_decode_nstates;
2685 int i;
2686
2687 if (is_t4(adapter->params.chip)) {
2688 sge_idma_decode = (const char **)t4_decode;
2689 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
2690 } else {
2691 sge_idma_decode = (const char **)t5_decode;
2692 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
2693 }
2694
2695 if (state < sge_idma_decode_nstates)
2696 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
2697 else
2698 CH_WARN(adapter, "idma state %d unknown\n", state);
2699
2700 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
2701 CH_WARN(adapter, "SGE register %#x value %#x\n",
2702 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
2703}
2704
2705/**
2566 * t4_fw_hello - establish communication with FW 2706 * t4_fw_hello - establish communication with FW
2567 * @adap: the adapter 2707 * @adap: the adapter
2568 * @mbox: mailbox to use for the FW command 2708 * @mbox: mailbox to use for the FW command
@@ -3533,11 +3673,13 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
3533 if (stat & FW_PORT_CMD_TXPAUSE) 3673 if (stat & FW_PORT_CMD_TXPAUSE)
3534 fc |= PAUSE_TX; 3674 fc |= PAUSE_TX;
3535 if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) 3675 if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
3536 speed = SPEED_100; 3676 speed = 100;
3537 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) 3677 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
3538 speed = SPEED_1000; 3678 speed = 1000;
3539 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) 3679 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
3540 speed = SPEED_10000; 3680 speed = 10000;
3681 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
3682 speed = 40000;
3541 3683
3542 if (link_ok != lc->link_ok || speed != lc->speed || 3684 if (link_ok != lc->link_ok || speed != lc->speed ||
3543 fc != lc->fc) { /* something changed */ 3685 fc != lc->fc) { /* something changed */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index cd6874b571ee..f2738c710789 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -116,6 +116,7 @@ enum CPL_error {
116 CPL_ERR_KEEPALIVE_TIMEDOUT = 34, 116 CPL_ERR_KEEPALIVE_TIMEDOUT = 34,
117 CPL_ERR_RTX_NEG_ADVICE = 35, 117 CPL_ERR_RTX_NEG_ADVICE = 35,
118 CPL_ERR_PERSIST_NEG_ADVICE = 36, 118 CPL_ERR_PERSIST_NEG_ADVICE = 36,
119 CPL_ERR_KEEPALV_NEG_ADVICE = 37,
119 CPL_ERR_ABORT_FAILED = 42, 120 CPL_ERR_ABORT_FAILED = 42,
120 CPL_ERR_IWARP_FLM = 50, 121 CPL_ERR_IWARP_FLM = 50,
121}; 122};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 4082522d8140..225ad8a5722d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -230,6 +230,12 @@
230#define EGRTHRESHOLD(x) ((x) << EGRTHRESHOLDshift) 230#define EGRTHRESHOLD(x) ((x) << EGRTHRESHOLDshift)
231#define EGRTHRESHOLD_GET(x) (((x) & EGRTHRESHOLD_MASK) >> EGRTHRESHOLDshift) 231#define EGRTHRESHOLD_GET(x) (((x) & EGRTHRESHOLD_MASK) >> EGRTHRESHOLDshift)
232 232
233#define EGRTHRESHOLDPACKING_MASK 0x3fU
234#define EGRTHRESHOLDPACKING_SHIFT 14
235#define EGRTHRESHOLDPACKING(x) ((x) << EGRTHRESHOLDPACKING_SHIFT)
236#define EGRTHRESHOLDPACKING_GET(x) (((x) >> EGRTHRESHOLDPACKING_SHIFT) & \
237 EGRTHRESHOLDPACKING_MASK)
238
233#define SGE_DBFIFO_STATUS 0x10a4 239#define SGE_DBFIFO_STATUS 0x10a4
234#define HP_INT_THRESH_SHIFT 28 240#define HP_INT_THRESH_SHIFT 28
235#define HP_INT_THRESH_MASK 0xfU 241#define HP_INT_THRESH_MASK 0xfU
@@ -278,6 +284,9 @@
278#define SGE_DEBUG_INDEX 0x10cc 284#define SGE_DEBUG_INDEX 0x10cc
279#define SGE_DEBUG_DATA_HIGH 0x10d0 285#define SGE_DEBUG_DATA_HIGH 0x10d0
280#define SGE_DEBUG_DATA_LOW 0x10d4 286#define SGE_DEBUG_DATA_LOW 0x10d4
287#define SGE_DEBUG_DATA_LOW_INDEX_2 0x12c8
288#define SGE_DEBUG_DATA_LOW_INDEX_3 0x12cc
289#define SGE_DEBUG_DATA_HIGH_INDEX_10 0x12a8
281#define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4 290#define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4
282 291
283#define S_HP_INT_THRESH 28 292#define S_HP_INT_THRESH 28
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 74fea74ce0aa..9cc973fbcf26 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -932,6 +932,7 @@ enum fw_params_param_dev {
932 FW_PARAMS_PARAM_DEV_FWREV = 0x0B, 932 FW_PARAMS_PARAM_DEV_FWREV = 0x0B,
933 FW_PARAMS_PARAM_DEV_TPREV = 0x0C, 933 FW_PARAMS_PARAM_DEV_TPREV = 0x0C,
934 FW_PARAMS_PARAM_DEV_CF = 0x0D, 934 FW_PARAMS_PARAM_DEV_CF = 0x0D,
935 FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17,
935}; 936};
936 937
937/* 938/*
@@ -1742,6 +1743,9 @@ enum fw_port_type {
1742 FW_PORT_TYPE_SFP, 1743 FW_PORT_TYPE_SFP,
1743 FW_PORT_TYPE_BP_AP, 1744 FW_PORT_TYPE_BP_AP,
1744 FW_PORT_TYPE_BP4_AP, 1745 FW_PORT_TYPE_BP4_AP,
1746 FW_PORT_TYPE_QSFP_10G,
1747 FW_PORT_TYPE_QSFP,
1748 FW_PORT_TYPE_BP40_BA,
1745 1749
1746 FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_MASK 1750 FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_MASK
1747}; 1751};
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 0899c0983594..1d0fe9b60312 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2444,7 +2444,7 @@ static void reduce_ethqs(struct adapter *adapter, int n)
2444 */ 2444 */
2445static int enable_msix(struct adapter *adapter) 2445static int enable_msix(struct adapter *adapter)
2446{ 2446{
2447 int i, err, want, need; 2447 int i, want, need, nqsets;
2448 struct msix_entry entries[MSIX_ENTRIES]; 2448 struct msix_entry entries[MSIX_ENTRIES];
2449 struct sge *s = &adapter->sge; 2449 struct sge *s = &adapter->sge;
2450 2450
@@ -2460,26 +2460,23 @@ static int enable_msix(struct adapter *adapter)
2460 */ 2460 */
2461 want = s->max_ethqsets + MSIX_EXTRAS; 2461 want = s->max_ethqsets + MSIX_EXTRAS;
2462 need = adapter->params.nports + MSIX_EXTRAS; 2462 need = adapter->params.nports + MSIX_EXTRAS;
2463 while ((err = pci_enable_msix(adapter->pdev, entries, want)) >= need)
2464 want = err;
2465 2463
2466 if (err == 0) { 2464 want = pci_enable_msix_range(adapter->pdev, entries, need, want);
2467 int nqsets = want - MSIX_EXTRAS; 2465 if (want < 0)
2468 if (nqsets < s->max_ethqsets) { 2466 return want;
2469 dev_warn(adapter->pdev_dev, "only enough MSI-X vectors" 2467
2470 " for %d Queue Sets\n", nqsets); 2468 nqsets = want - MSIX_EXTRAS;
2471 s->max_ethqsets = nqsets; 2469 if (nqsets < s->max_ethqsets) {
2472 if (nqsets < s->ethqsets) 2470 dev_warn(adapter->pdev_dev, "only enough MSI-X vectors"
2473 reduce_ethqs(adapter, nqsets); 2471 " for %d Queue Sets\n", nqsets);
2474 } 2472 s->max_ethqsets = nqsets;
2475 for (i = 0; i < want; ++i) 2473 if (nqsets < s->ethqsets)
2476 adapter->msix_info[i].vec = entries[i].vector; 2474 reduce_ethqs(adapter, nqsets);
2477 } else if (err > 0) {
2478 pci_disable_msix(adapter->pdev);
2479 dev_info(adapter->pdev_dev, "only %d MSI-X vectors left,"
2480 " not using MSI-X\n", err);
2481 } 2475 }
2482 return err; 2476 for (i = 0; i < want; ++i)
2477 adapter->msix_info[i].vec = entries[i].vector;
2478
2479 return 0;
2483} 2480}
2484 2481
2485static const struct net_device_ops cxgb4vf_netdev_ops = { 2482static const struct net_device_ops cxgb4vf_netdev_ops = {
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 0a89963c48ce..9cfa4b4bb089 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -401,7 +401,7 @@ static void free_tx_desc(struct adapter *adapter, struct sge_txq *tq,
401 if (sdesc->skb) { 401 if (sdesc->skb) {
402 if (need_unmap) 402 if (need_unmap)
403 unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq); 403 unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq);
404 kfree_skb(sdesc->skb); 404 dev_consume_skb_any(sdesc->skb);
405 sdesc->skb = NULL; 405 sdesc->skb = NULL;
406 } 406 }
407 407
@@ -1275,7 +1275,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1275 * need it any longer. 1275 * need it any longer.
1276 */ 1276 */
1277 inline_tx_skb(skb, &txq->q, cpl + 1); 1277 inline_tx_skb(skb, &txq->q, cpl + 1);
1278 dev_kfree_skb(skb); 1278 dev_consume_skb_any(skb);
1279 } else { 1279 } else {
1280 /* 1280 /*
1281 * Write the skb's Scatter/Gather list into the TX Packet CPL 1281 * Write the skb's Scatter/Gather list into the TX Packet CPL
@@ -1354,7 +1354,7 @@ out_free:
1354 * An error of some sort happened. Free the TX skb and tell the 1354 * An error of some sort happened. Free the TX skb and tell the
1355 * OS that we've "dealt" with the packet ... 1355 * OS that we've "dealt" with the packet ...
1356 */ 1356 */
1357 dev_kfree_skb(skb); 1357 dev_kfree_skb_any(skb);
1358 return NETDEV_TX_OK; 1358 return NETDEV_TX_OK;
1359} 1359}
1360 1360
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index 19f642a45f40..fe84fbabc0d4 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -1174,7 +1174,7 @@ static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev)
1174 writewords(lp, TX_FRAME_PORT, skb->data, (skb->len + 1) >> 1); 1174 writewords(lp, TX_FRAME_PORT, skb->data, (skb->len + 1) >> 1);
1175 spin_unlock_irqrestore(&lp->lock, flags); 1175 spin_unlock_irqrestore(&lp->lock, flags);
1176 dev->stats.tx_bytes += skb->len; 1176 dev->stats.tx_bytes += skb->len;
1177 dev_kfree_skb(skb); 1177 dev_consume_skb_any(skb);
1178 1178
1179 /* We DO NOT call netif_wake_queue() here. 1179 /* We DO NOT call netif_wake_queue() here.
1180 * We also DO NOT call netif_start_queue(). 1180 * We also DO NOT call netif_start_queue().
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index b740bfce72ef..2945718ce806 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -521,7 +521,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
521 unsigned int txq_map; 521 unsigned int txq_map;
522 522
523 if (skb->len <= 0) { 523 if (skb->len <= 0) {
524 dev_kfree_skb(skb); 524 dev_kfree_skb_any(skb);
525 return NETDEV_TX_OK; 525 return NETDEV_TX_OK;
526 } 526 }
527 527
@@ -536,7 +536,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
536 if (skb_shinfo(skb)->gso_size == 0 && 536 if (skb_shinfo(skb)->gso_size == 0 &&
537 skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC && 537 skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC &&
538 skb_linearize(skb)) { 538 skb_linearize(skb)) {
539 dev_kfree_skb(skb); 539 dev_kfree_skb_any(skb);
540 return NETDEV_TX_OK; 540 return NETDEV_TX_OK;
541 } 541 }
542 542
@@ -1086,14 +1086,15 @@ static int enic_poll(struct napi_struct *napi, int budget)
1086 unsigned int intr = enic_legacy_io_intr(); 1086 unsigned int intr = enic_legacy_io_intr();
1087 unsigned int rq_work_to_do = budget; 1087 unsigned int rq_work_to_do = budget;
1088 unsigned int wq_work_to_do = -1; /* no limit */ 1088 unsigned int wq_work_to_do = -1; /* no limit */
1089 unsigned int work_done, rq_work_done, wq_work_done; 1089 unsigned int work_done, rq_work_done = 0, wq_work_done;
1090 int err; 1090 int err;
1091 1091
1092 /* Service RQ (first) and WQ 1092 /* Service RQ (first) and WQ
1093 */ 1093 */
1094 1094
1095 rq_work_done = vnic_cq_service(&enic->cq[cq_rq], 1095 if (budget > 0)
1096 rq_work_to_do, enic_rq_service, NULL); 1096 rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
1097 rq_work_to_do, enic_rq_service, NULL);
1097 1098
1098 wq_work_done = vnic_cq_service(&enic->cq[cq_wq], 1099 wq_work_done = vnic_cq_service(&enic->cq[cq_wq],
1099 wq_work_to_do, enic_wq_service, NULL); 1100 wq_work_to_do, enic_wq_service, NULL);
@@ -1141,14 +1142,15 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
1141 unsigned int cq = enic_cq_rq(enic, rq); 1142 unsigned int cq = enic_cq_rq(enic, rq);
1142 unsigned int intr = enic_msix_rq_intr(enic, rq); 1143 unsigned int intr = enic_msix_rq_intr(enic, rq);
1143 unsigned int work_to_do = budget; 1144 unsigned int work_to_do = budget;
1144 unsigned int work_done; 1145 unsigned int work_done = 0;
1145 int err; 1146 int err;
1146 1147
1147 /* Service RQ 1148 /* Service RQ
1148 */ 1149 */
1149 1150
1150 work_done = vnic_cq_service(&enic->cq[cq], 1151 if (budget > 0)
1151 work_to_do, enic_rq_service, NULL); 1152 work_done = vnic_cq_service(&enic->cq[cq],
1153 work_to_do, enic_rq_service, NULL);
1152 1154
1153 /* Return intr event credits for this polling 1155 /* Return intr event credits for this polling
1154 * cycle. An intr event is the completion of a 1156 * cycle. An intr event is the completion of a
@@ -1796,7 +1798,8 @@ static int enic_set_intr_mode(struct enic *enic)
1796 enic->cq_count >= n + m && 1798 enic->cq_count >= n + m &&
1797 enic->intr_count >= n + m + 2) { 1799 enic->intr_count >= n + m + 2) {
1798 1800
1799 if (!pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) { 1801 if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
1802 n + m + 2, n + m + 2) > 0) {
1800 1803
1801 enic->rq_count = n; 1804 enic->rq_count = n;
1802 enic->wq_count = m; 1805 enic->wq_count = m;
@@ -1815,7 +1818,8 @@ static int enic_set_intr_mode(struct enic *enic)
1815 enic->wq_count >= m && 1818 enic->wq_count >= m &&
1816 enic->cq_count >= 1 + m && 1819 enic->cq_count >= 1 + m &&
1817 enic->intr_count >= 1 + m + 2) { 1820 enic->intr_count >= 1 + m + 2) {
1818 if (!pci_enable_msix(enic->pdev, enic->msix_entry, 1 + m + 2)) { 1821 if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
1822 1 + m + 2, 1 + m + 2) > 0) {
1819 1823
1820 enic->rq_count = 1; 1824 enic->rq_count = 1;
1821 enic->wq_count = m; 1825 enic->wq_count = m;
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index a1a2b4028a5c..8c4b93be333b 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1033,7 +1033,7 @@ dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
1033 spin_unlock_irqrestore(&db->lock, flags); 1033 spin_unlock_irqrestore(&db->lock, flags);
1034 1034
1035 /* free this SKB */ 1035 /* free this SKB */
1036 dev_kfree_skb(skb); 1036 dev_consume_skb_any(skb);
1037 1037
1038 return NETDEV_TX_OK; 1038 return NETDEV_TX_OK;
1039} 1039}
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 5ad9e3e3c0b8..53f0c618045c 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -696,7 +696,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
696 /* Too large packet check */ 696 /* Too large packet check */
697 if (skb->len > MAX_PACKET_SIZE) { 697 if (skb->len > MAX_PACKET_SIZE) {
698 pr_err("big packet = %d\n", (u16)skb->len); 698 pr_err("big packet = %d\n", (u16)skb->len);
699 dev_kfree_skb(skb); 699 dev_kfree_skb_any(skb);
700 return NETDEV_TX_OK; 700 return NETDEV_TX_OK;
701 } 701 }
702 702
@@ -743,7 +743,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
743 dw32(DCR7, db->cr7_data); 743 dw32(DCR7, db->cr7_data);
744 744
745 /* free this SKB */ 745 /* free this SKB */
746 dev_kfree_skb(skb); 746 dev_consume_skb_any(skb);
747 747
748 return NETDEV_TX_OK; 748 return NETDEV_TX_OK;
749} 749}
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index aa4ee385091f..aa801a6af7b9 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -607,7 +607,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
607 /* Too large packet check */ 607 /* Too large packet check */
608 if (skb->len > MAX_PACKET_SIZE) { 608 if (skb->len > MAX_PACKET_SIZE) {
609 netdev_err(dev, "big packet = %d\n", (u16)skb->len); 609 netdev_err(dev, "big packet = %d\n", (u16)skb->len);
610 dev_kfree_skb(skb); 610 dev_kfree_skb_any(skb);
611 return NETDEV_TX_OK; 611 return NETDEV_TX_OK;
612 } 612 }
613 613
@@ -648,7 +648,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
648 uw32(DCR7, db->cr7_data); 648 uw32(DCR7, db->cr7_data);
649 649
650 /* free this SKB */ 650 /* free this SKB */
651 dev_kfree_skb(skb); 651 dev_consume_skb_any(skb);
652 652
653 return NETDEV_TX_OK; 653 return NETDEV_TX_OK;
654} 654}
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index 113cd799a131..d9e5ca0d48c1 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -1137,7 +1137,7 @@ start_tx (struct sk_buff *skb, struct net_device *dev)
1137 return NETDEV_TX_OK; 1137 return NETDEV_TX_OK;
1138 1138
1139drop_frame: 1139drop_frame:
1140 dev_kfree_skb(skb); 1140 dev_kfree_skb_any(skb);
1141 np->tx_skbuff[entry] = NULL; 1141 np->tx_skbuff[entry] = NULL;
1142 dev->stats.tx_dropped++; 1142 dev->stats.tx_dropped++;
1143 return NETDEV_TX_OK; 1143 return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 05529e273050..a587c8aa27ed 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2013 Emulex 2 * Copyright (C) 2005 - 2014 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -34,7 +34,7 @@
34#include "be_hw.h" 34#include "be_hw.h"
35#include "be_roce.h" 35#include "be_roce.h"
36 36
37#define DRV_VER "10.0.600.0u" 37#define DRV_VER "10.2u"
38#define DRV_NAME "be2net" 38#define DRV_NAME "be2net"
39#define BE_NAME "Emulex BladeEngine2" 39#define BE_NAME "Emulex BladeEngine2"
40#define BE3_NAME "Emulex BladeEngine3" 40#define BE3_NAME "Emulex BladeEngine3"
@@ -88,7 +88,6 @@ static inline char *nic_name(struct pci_dev *pdev)
88#define BE_MIN_MTU 256 88#define BE_MIN_MTU 256
89 89
90#define BE_NUM_VLANS_SUPPORTED 64 90#define BE_NUM_VLANS_SUPPORTED 64
91#define BE_UMC_NUM_VLANS_SUPPORTED 15
92#define BE_MAX_EQD 128u 91#define BE_MAX_EQD 128u
93#define BE_MAX_TX_FRAG_COUNT 30 92#define BE_MAX_TX_FRAG_COUNT 30
94 93
@@ -262,9 +261,10 @@ struct be_tx_obj {
262/* Struct to remember the pages posted for rx frags */ 261/* Struct to remember the pages posted for rx frags */
263struct be_rx_page_info { 262struct be_rx_page_info {
264 struct page *page; 263 struct page *page;
264 /* set to page-addr for last frag of the page & frag-addr otherwise */
265 DEFINE_DMA_UNMAP_ADDR(bus); 265 DEFINE_DMA_UNMAP_ADDR(bus);
266 u16 page_offset; 266 u16 page_offset;
267 bool last_page_user; 267 bool last_frag; /* last frag of the page */
268}; 268};
269 269
270struct be_rx_stats { 270struct be_rx_stats {
@@ -293,7 +293,7 @@ struct be_rx_compl_info {
293 u8 ip_csum; 293 u8 ip_csum;
294 u8 l4_csum; 294 u8 l4_csum;
295 u8 ipv6; 295 u8 ipv6;
296 u8 vtm; 296 u8 qnq;
297 u8 pkt_type; 297 u8 pkt_type;
298 u8 ip_frag; 298 u8 ip_frag;
299}; 299};
@@ -359,6 +359,7 @@ struct be_vf_cfg {
359 int pmac_id; 359 int pmac_id;
360 u16 vlan_tag; 360 u16 vlan_tag;
361 u32 tx_rate; 361 u32 tx_rate;
362 u32 plink_tracking;
362}; 363};
363 364
364enum vf_state { 365enum vf_state {
@@ -467,6 +468,7 @@ struct be_adapter {
467 468
468 u32 port_num; 469 u32 port_num;
469 bool promiscuous; 470 bool promiscuous;
471 u8 mc_type;
470 u32 function_mode; 472 u32 function_mode;
471 u32 function_caps; 473 u32 function_caps;
472 u32 rx_fc; /* Rx flow control */ 474 u32 rx_fc; /* Rx flow control */
@@ -536,6 +538,14 @@ static inline u16 be_max_qs(struct be_adapter *adapter)
536 return min_t(u16, num, num_online_cpus()); 538 return min_t(u16, num, num_online_cpus());
537} 539}
538 540
541/* Is BE in pvid_tagging mode */
542#define be_pvid_tagging_enabled(adapter) (adapter->pvid)
543
544/* Is BE in QNQ multi-channel mode */
545#define be_is_qnq_mode(adapter) (adapter->mc_type == FLEX10 || \
546 adapter->mc_type == vNIC1 || \
547 adapter->mc_type == UFP)
548
539#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3 || \ 549#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3 || \
540 adapter->pdev->device == OC_DEVICE_ID4) 550 adapter->pdev->device == OC_DEVICE_ID4)
541 551
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 48076a6370c3..cf5afe72f12f 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2013 Emulex 2 * Copyright (C) 2005 - 2014 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -202,8 +202,12 @@ static void be_async_link_state_process(struct be_adapter *adapter,
202 /* When link status changes, link speed must be re-queried from FW */ 202 /* When link status changes, link speed must be re-queried from FW */
203 adapter->phy.link_speed = -1; 203 adapter->phy.link_speed = -1;
204 204
205 /* Ignore physical link event */ 205 /* On BEx the FW does not send a separate link status
206 if (lancer_chip(adapter) && 206 * notification for physical and logical link.
207 * On other chips just process the logical link
208 * status notification
209 */
210 if (!BEx_chip(adapter) &&
207 !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK)) 211 !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
208 return; 212 return;
209 213
@@ -211,7 +215,8 @@ static void be_async_link_state_process(struct be_adapter *adapter,
211 * it may not be received in some cases. 215 * it may not be received in some cases.
212 */ 216 */
213 if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT) 217 if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
214 be_link_status_update(adapter, evt->port_link_status); 218 be_link_status_update(adapter,
219 evt->port_link_status & LINK_STATUS_MASK);
215} 220}
216 221
217/* Grp5 CoS Priority evt */ 222/* Grp5 CoS Priority evt */
@@ -239,10 +244,12 @@ static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
239static void be_async_grp5_pvid_state_process(struct be_adapter *adapter, 244static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
240 struct be_async_event_grp5_pvid_state *evt) 245 struct be_async_event_grp5_pvid_state *evt)
241{ 246{
242 if (evt->enabled) 247 if (evt->enabled) {
243 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK; 248 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
244 else 249 dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid);
250 } else {
245 adapter->pvid = 0; 251 adapter->pvid = 0;
252 }
246} 253}
247 254
248static void be_async_grp5_evt_process(struct be_adapter *adapter, 255static void be_async_grp5_evt_process(struct be_adapter *adapter,
@@ -3296,6 +3303,21 @@ static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf,
3296 return NULL; 3303 return NULL;
3297} 3304}
3298 3305
3306static struct be_port_res_desc *be_get_port_desc(u8 *buf, u32 desc_count)
3307{
3308 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3309 int i;
3310
3311 for (i = 0; i < desc_count; i++) {
3312 if (hdr->desc_type == PORT_RESOURCE_DESC_TYPE_V1)
3313 return (struct be_port_res_desc *)hdr;
3314
3315 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3316 hdr = (void *)hdr + hdr->desc_len;
3317 }
3318 return NULL;
3319}
3320
3299static void be_copy_nic_desc(struct be_resources *res, 3321static void be_copy_nic_desc(struct be_resources *res,
3300 struct be_nic_res_desc *desc) 3322 struct be_nic_res_desc *desc)
3301{ 3323{
@@ -3439,6 +3461,7 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
3439{ 3461{
3440 struct be_cmd_resp_get_profile_config *resp; 3462 struct be_cmd_resp_get_profile_config *resp;
3441 struct be_pcie_res_desc *pcie; 3463 struct be_pcie_res_desc *pcie;
3464 struct be_port_res_desc *port;
3442 struct be_nic_res_desc *nic; 3465 struct be_nic_res_desc *nic;
3443 struct be_queue_info *mccq = &adapter->mcc_obj.q; 3466 struct be_queue_info *mccq = &adapter->mcc_obj.q;
3444 struct be_dma_mem cmd; 3467 struct be_dma_mem cmd;
@@ -3466,6 +3489,10 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
3466 if (pcie) 3489 if (pcie)
3467 res->max_vfs = le16_to_cpu(pcie->num_vfs); 3490 res->max_vfs = le16_to_cpu(pcie->num_vfs);
3468 3491
3492 port = be_get_port_desc(resp->func_param, desc_count);
3493 if (port)
3494 adapter->mc_type = port->mc_type;
3495
3469 nic = be_get_nic_desc(resp->func_param, desc_count); 3496 nic = be_get_nic_desc(resp->func_param, desc_count);
3470 if (nic) 3497 if (nic)
3471 be_copy_nic_desc(res, nic); 3498 be_copy_nic_desc(res, nic);
@@ -3723,6 +3750,45 @@ err:
3723 return status; 3750 return status;
3724} 3751}
3725 3752
3753int be_cmd_set_logical_link_config(struct be_adapter *adapter,
3754 int link_state, u8 domain)
3755{
3756 struct be_mcc_wrb *wrb;
3757 struct be_cmd_req_set_ll_link *req;
3758 int status;
3759
3760 if (BEx_chip(adapter) || lancer_chip(adapter))
3761 return 0;
3762
3763 spin_lock_bh(&adapter->mcc_lock);
3764
3765 wrb = wrb_from_mccq(adapter);
3766 if (!wrb) {
3767 status = -EBUSY;
3768 goto err;
3769 }
3770
3771 req = embedded_payload(wrb);
3772
3773 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3774 OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG,
3775 sizeof(*req), wrb, NULL);
3776
3777 req->hdr.version = 1;
3778 req->hdr.domain = domain;
3779
3780 if (link_state == IFLA_VF_LINK_STATE_ENABLE)
3781 req->link_config |= 1;
3782
3783 if (link_state == IFLA_VF_LINK_STATE_AUTO)
3784 req->link_config |= 1 << PLINK_TRACK_SHIFT;
3785
3786 status = be_mcc_notify_wait(adapter);
3787err:
3788 spin_unlock_bh(&adapter->mcc_lock);
3789 return status;
3790}
3791
3726int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, 3792int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
3727 int wrb_payload_size, u16 *cmd_status, u16 *ext_status) 3793 int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
3728{ 3794{
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index fc4e076dc202..fda3e8851e17 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2013 Emulex 2 * Copyright (C) 2005 - 2014 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -203,6 +203,7 @@ struct be_mcc_mailbox {
203#define OPCODE_COMMON_GET_BEACON_STATE 70 203#define OPCODE_COMMON_GET_BEACON_STATE 70
204#define OPCODE_COMMON_READ_TRANSRECV_DATA 73 204#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
205#define OPCODE_COMMON_GET_PORT_NAME 77 205#define OPCODE_COMMON_GET_PORT_NAME 77
206#define OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG 80
206#define OPCODE_COMMON_SET_INTERRUPT_ENABLE 89 207#define OPCODE_COMMON_SET_INTERRUPT_ENABLE 89
207#define OPCODE_COMMON_SET_FN_PRIVILEGES 100 208#define OPCODE_COMMON_SET_FN_PRIVILEGES 100
208#define OPCODE_COMMON_GET_PHY_DETAILS 102 209#define OPCODE_COMMON_GET_PHY_DETAILS 102
@@ -1098,14 +1099,6 @@ struct be_cmd_resp_query_fw_cfg {
1098 u32 function_caps; 1099 u32 function_caps;
1099}; 1100};
1100 1101
1101/* Is BE in a multi-channel mode */
1102static inline bool be_is_mc(struct be_adapter *adapter)
1103{
1104 return adapter->function_mode & FLEX10_MODE ||
1105 adapter->function_mode & VNIC_MODE ||
1106 adapter->function_mode & UMC_ENABLED;
1107}
1108
1109/******************** RSS Config ****************************************/ 1102/******************** RSS Config ****************************************/
1110/* RSS type Input parameters used to compute RX hash 1103/* RSS type Input parameters used to compute RX hash
1111 * RSS_ENABLE_IPV4 SRC IPv4, DST IPv4 1104 * RSS_ENABLE_IPV4 SRC IPv4, DST IPv4
@@ -1828,6 +1821,7 @@ struct be_cmd_req_set_ext_fat_caps {
1828#define NIC_RESOURCE_DESC_TYPE_V0 0x41 1821#define NIC_RESOURCE_DESC_TYPE_V0 0x41
1829#define PCIE_RESOURCE_DESC_TYPE_V1 0x50 1822#define PCIE_RESOURCE_DESC_TYPE_V1 0x50
1830#define NIC_RESOURCE_DESC_TYPE_V1 0x51 1823#define NIC_RESOURCE_DESC_TYPE_V1 0x51
1824#define PORT_RESOURCE_DESC_TYPE_V1 0x55
1831#define MAX_RESOURCE_DESC 264 1825#define MAX_RESOURCE_DESC 264
1832 1826
1833/* QOS unit number */ 1827/* QOS unit number */
@@ -1891,6 +1885,33 @@ struct be_nic_res_desc {
1891 u32 rsvd8[7]; 1885 u32 rsvd8[7];
1892} __packed; 1886} __packed;
1893 1887
1888/************ Multi-Channel type ***********/
1889enum mc_type {
1890 MC_NONE = 0x01,
1891 UMC = 0x02,
1892 FLEX10 = 0x03,
1893 vNIC1 = 0x04,
1894 nPAR = 0x05,
1895 UFP = 0x06,
1896 vNIC2 = 0x07
1897};
1898
1899struct be_port_res_desc {
1900 struct be_res_desc_hdr hdr;
1901 u8 rsvd0;
1902 u8 flags;
1903 u8 rsvd1;
1904 u8 mc_type;
1905 u16 rsvd2;
1906 u32 rsvd3[20];
1907} __packed;
1908
1909/* Is BE in a multi-channel mode */
1910static inline bool be_is_mc(struct be_adapter *adapter)
1911{
1912 return adapter->mc_type > MC_NONE;
1913}
1914
1894struct be_cmd_req_get_func_config { 1915struct be_cmd_req_get_func_config {
1895 struct be_cmd_req_hdr hdr; 1916 struct be_cmd_req_hdr hdr;
1896}; 1917};
@@ -1971,6 +1992,13 @@ struct be_cmd_resp_get_iface_list {
1971 struct be_if_desc if_desc; 1992 struct be_if_desc if_desc;
1972}; 1993};
1973 1994
1995/*************** Set logical link ********************/
1996#define PLINK_TRACK_SHIFT 8
1997struct be_cmd_req_set_ll_link {
1998 struct be_cmd_req_hdr hdr;
1999 u32 link_config; /* Bit 0: UP_DOWN, Bit 9: PLINK */
2000};
2001
1974int be_pci_fnum_get(struct be_adapter *adapter); 2002int be_pci_fnum_get(struct be_adapter *adapter);
1975int be_fw_wait_ready(struct be_adapter *adapter); 2003int be_fw_wait_ready(struct be_adapter *adapter);
1976int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, 2004int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
@@ -2092,3 +2120,5 @@ int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
2092 int vf_num); 2120 int vf_num);
2093int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain); 2121int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain);
2094int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable); 2122int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable);
2123int be_cmd_set_logical_link_config(struct be_adapter *adapter,
2124 int link_state, u8 domain);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 05be0070f55f..15ba96cba65d 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2013 Emulex 2 * Copyright (C) 2005 - 2014 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -357,10 +357,10 @@ be_get_ethtool_stats(struct net_device *netdev,
357 struct be_rx_stats *stats = rx_stats(rxo); 357 struct be_rx_stats *stats = rx_stats(rxo);
358 358
359 do { 359 do {
360 start = u64_stats_fetch_begin_bh(&stats->sync); 360 start = u64_stats_fetch_begin_irq(&stats->sync);
361 data[base] = stats->rx_bytes; 361 data[base] = stats->rx_bytes;
362 data[base + 1] = stats->rx_pkts; 362 data[base + 1] = stats->rx_pkts;
363 } while (u64_stats_fetch_retry_bh(&stats->sync, start)); 363 } while (u64_stats_fetch_retry_irq(&stats->sync, start));
364 364
365 for (i = 2; i < ETHTOOL_RXSTATS_NUM; i++) { 365 for (i = 2; i < ETHTOOL_RXSTATS_NUM; i++) {
366 p = (u8 *)stats + et_rx_stats[i].offset; 366 p = (u8 *)stats + et_rx_stats[i].offset;
@@ -373,19 +373,19 @@ be_get_ethtool_stats(struct net_device *netdev,
373 struct be_tx_stats *stats = tx_stats(txo); 373 struct be_tx_stats *stats = tx_stats(txo);
374 374
375 do { 375 do {
376 start = u64_stats_fetch_begin_bh(&stats->sync_compl); 376 start = u64_stats_fetch_begin_irq(&stats->sync_compl);
377 data[base] = stats->tx_compl; 377 data[base] = stats->tx_compl;
378 } while (u64_stats_fetch_retry_bh(&stats->sync_compl, start)); 378 } while (u64_stats_fetch_retry_irq(&stats->sync_compl, start));
379 379
380 do { 380 do {
381 start = u64_stats_fetch_begin_bh(&stats->sync); 381 start = u64_stats_fetch_begin_irq(&stats->sync);
382 for (i = 1; i < ETHTOOL_TXSTATS_NUM; i++) { 382 for (i = 1; i < ETHTOOL_TXSTATS_NUM; i++) {
383 p = (u8 *)stats + et_tx_stats[i].offset; 383 p = (u8 *)stats + et_tx_stats[i].offset;
384 data[base + i] = 384 data[base + i] =
385 (et_tx_stats[i].size == sizeof(u64)) ? 385 (et_tx_stats[i].size == sizeof(u64)) ?
386 *(u64 *)p : *(u32 *)p; 386 *(u64 *)p : *(u32 *)p;
387 } 387 }
388 } while (u64_stats_fetch_retry_bh(&stats->sync, start)); 388 } while (u64_stats_fetch_retry_irq(&stats->sync, start));
389 base += ETHTOOL_TXSTATS_NUM; 389 base += ETHTOOL_TXSTATS_NUM;
390 } 390 }
391} 391}
@@ -802,16 +802,18 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
802 802
803 if (test->flags & ETH_TEST_FL_OFFLINE) { 803 if (test->flags & ETH_TEST_FL_OFFLINE) {
804 if (be_loopback_test(adapter, BE_MAC_LOOPBACK, 804 if (be_loopback_test(adapter, BE_MAC_LOOPBACK,
805 &data[0]) != 0) { 805 &data[0]) != 0)
806 test->flags |= ETH_TEST_FL_FAILED; 806 test->flags |= ETH_TEST_FL_FAILED;
807 } 807
808 if (be_loopback_test(adapter, BE_PHY_LOOPBACK, 808 if (be_loopback_test(adapter, BE_PHY_LOOPBACK,
809 &data[1]) != 0) { 809 &data[1]) != 0)
810 test->flags |= ETH_TEST_FL_FAILED;
811 }
812 if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK,
813 &data[2]) != 0) {
814 test->flags |= ETH_TEST_FL_FAILED; 810 test->flags |= ETH_TEST_FL_FAILED;
811
812 if (test->flags & ETH_TEST_FL_EXTERNAL_LB) {
813 if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK,
814 &data[2]) != 0)
815 test->flags |= ETH_TEST_FL_FAILED;
816 test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
815 } 817 }
816 } 818 }
817 819
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index dc88782185f2..28ac8dd0beaa 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2013 Emulex 2 * Copyright (C) 2005 - 2014 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -368,7 +368,7 @@ struct amap_eth_rx_compl_v0 {
368 u8 numfrags[3]; /* dword 1 */ 368 u8 numfrags[3]; /* dword 1 */
369 u8 rss_flush; /* dword 2 */ 369 u8 rss_flush; /* dword 2 */
370 u8 cast_enc[2]; /* dword 2 */ 370 u8 cast_enc[2]; /* dword 2 */
371 u8 vtm; /* dword 2 */ 371 u8 qnq; /* dword 2 */
372 u8 rss_bank; /* dword 2 */ 372 u8 rss_bank; /* dword 2 */
373 u8 rsvd1[23]; /* dword 2 */ 373 u8 rsvd1[23]; /* dword 2 */
374 u8 lro_pkt; /* dword 2 */ 374 u8 lro_pkt; /* dword 2 */
@@ -401,7 +401,7 @@ struct amap_eth_rx_compl_v1 {
401 u8 numfrags[3]; /* dword 1 */ 401 u8 numfrags[3]; /* dword 1 */
402 u8 rss_flush; /* dword 2 */ 402 u8 rss_flush; /* dword 2 */
403 u8 cast_enc[2]; /* dword 2 */ 403 u8 cast_enc[2]; /* dword 2 */
404 u8 vtm; /* dword 2 */ 404 u8 qnq; /* dword 2 */
405 u8 rss_bank; /* dword 2 */ 405 u8 rss_bank; /* dword 2 */
406 u8 port[2]; /* dword 2 */ 406 u8 port[2]; /* dword 2 */
407 u8 vntagp; /* dword 2 */ 407 u8 vntagp; /* dword 2 */
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 36c80612e21a..a61f967f9ca1 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2013 Emulex 2 * Copyright (C) 2005 - 2014 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -591,10 +591,10 @@ static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
591 for_all_rx_queues(adapter, rxo, i) { 591 for_all_rx_queues(adapter, rxo, i) {
592 const struct be_rx_stats *rx_stats = rx_stats(rxo); 592 const struct be_rx_stats *rx_stats = rx_stats(rxo);
593 do { 593 do {
594 start = u64_stats_fetch_begin_bh(&rx_stats->sync); 594 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
595 pkts = rx_stats(rxo)->rx_pkts; 595 pkts = rx_stats(rxo)->rx_pkts;
596 bytes = rx_stats(rxo)->rx_bytes; 596 bytes = rx_stats(rxo)->rx_bytes;
597 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start)); 597 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
598 stats->rx_packets += pkts; 598 stats->rx_packets += pkts;
599 stats->rx_bytes += bytes; 599 stats->rx_bytes += bytes;
600 stats->multicast += rx_stats(rxo)->rx_mcast_pkts; 600 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
@@ -605,10 +605,10 @@ static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
605 for_all_tx_queues(adapter, txo, i) { 605 for_all_tx_queues(adapter, txo, i) {
606 const struct be_tx_stats *tx_stats = tx_stats(txo); 606 const struct be_tx_stats *tx_stats = tx_stats(txo);
607 do { 607 do {
608 start = u64_stats_fetch_begin_bh(&tx_stats->sync); 608 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
609 pkts = tx_stats(txo)->tx_pkts; 609 pkts = tx_stats(txo)->tx_pkts;
610 bytes = tx_stats(txo)->tx_bytes; 610 bytes = tx_stats(txo)->tx_bytes;
611 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start)); 611 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
612 stats->tx_packets += pkts; 612 stats->tx_packets += pkts;
613 stats->tx_bytes += bytes; 613 stats->tx_bytes += bytes;
614 } 614 }
@@ -652,7 +652,7 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
652 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT; 652 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
653 } 653 }
654 654
655 if ((link_status & LINK_STATUS_MASK) == LINK_UP) 655 if (link_status)
656 netif_carrier_on(netdev); 656 netif_carrier_on(netdev);
657 else 657 else
658 netif_carrier_off(netdev); 658 netif_carrier_off(netdev);
@@ -935,9 +935,9 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
935 } 935 }
936 936
937 /* If vlan tag is already inlined in the packet, skip HW VLAN 937 /* If vlan tag is already inlined in the packet, skip HW VLAN
938 * tagging in UMC mode 938 * tagging in pvid-tagging mode
939 */ 939 */
940 if ((adapter->function_mode & UMC_ENABLED) && 940 if (be_pvid_tagging_enabled(adapter) &&
941 veh->h_vlan_proto == htons(ETH_P_8021Q)) 941 veh->h_vlan_proto == htons(ETH_P_8021Q))
942 *skip_hw_vlan = true; 942 *skip_hw_vlan = true;
943 943
@@ -1138,7 +1138,10 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1138 1138
1139 /* Packets with VID 0 are always received by Lancer by default */ 1139 /* Packets with VID 0 are always received by Lancer by default */
1140 if (lancer_chip(adapter) && vid == 0) 1140 if (lancer_chip(adapter) && vid == 0)
1141 goto ret; 1141 return status;
1142
1143 if (adapter->vlan_tag[vid])
1144 return status;
1142 1145
1143 adapter->vlan_tag[vid] = 1; 1146 adapter->vlan_tag[vid] = 1;
1144 adapter->vlans_added++; 1147 adapter->vlans_added++;
@@ -1148,7 +1151,7 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1148 adapter->vlans_added--; 1151 adapter->vlans_added--;
1149 adapter->vlan_tag[vid] = 0; 1152 adapter->vlan_tag[vid] = 0;
1150 } 1153 }
1151ret: 1154
1152 return status; 1155 return status;
1153} 1156}
1154 1157
@@ -1288,6 +1291,7 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
1288 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK; 1291 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1289 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT; 1292 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1290 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN); 1293 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1294 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
1291 1295
1292 return 0; 1296 return 0;
1293} 1297}
@@ -1354,6 +1358,24 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
1354 adapter->vf_cfg[vf].tx_rate = rate; 1358 adapter->vf_cfg[vf].tx_rate = rate;
1355 return status; 1359 return status;
1356} 1360}
1361static int be_set_vf_link_state(struct net_device *netdev, int vf,
1362 int link_state)
1363{
1364 struct be_adapter *adapter = netdev_priv(netdev);
1365 int status;
1366
1367 if (!sriov_enabled(adapter))
1368 return -EPERM;
1369
1370 if (vf >= adapter->num_vfs)
1371 return -EINVAL;
1372
1373 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
1374 if (!status)
1375 adapter->vf_cfg[vf].plink_tracking = link_state;
1376
1377 return status;
1378}
1357 1379
1358static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts, 1380static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1359 ulong now) 1381 ulong now)
@@ -1386,15 +1408,15 @@ static void be_eqd_update(struct be_adapter *adapter)
1386 1408
1387 rxo = &adapter->rx_obj[eqo->idx]; 1409 rxo = &adapter->rx_obj[eqo->idx];
1388 do { 1410 do {
1389 start = u64_stats_fetch_begin_bh(&rxo->stats.sync); 1411 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
1390 rx_pkts = rxo->stats.rx_pkts; 1412 rx_pkts = rxo->stats.rx_pkts;
1391 } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start)); 1413 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
1392 1414
1393 txo = &adapter->tx_obj[eqo->idx]; 1415 txo = &adapter->tx_obj[eqo->idx];
1394 do { 1416 do {
1395 start = u64_stats_fetch_begin_bh(&txo->stats.sync); 1417 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
1396 tx_pkts = txo->stats.tx_reqs; 1418 tx_pkts = txo->stats.tx_reqs;
1397 } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start)); 1419 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
1398 1420
1399 1421
1400 /* Skip, if wrapped around or first calculation */ 1422 /* Skip, if wrapped around or first calculation */
@@ -1464,11 +1486,15 @@ static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
1464 rx_page_info = &rxo->page_info_tbl[frag_idx]; 1486 rx_page_info = &rxo->page_info_tbl[frag_idx];
1465 BUG_ON(!rx_page_info->page); 1487 BUG_ON(!rx_page_info->page);
1466 1488
1467 if (rx_page_info->last_page_user) { 1489 if (rx_page_info->last_frag) {
1468 dma_unmap_page(&adapter->pdev->dev, 1490 dma_unmap_page(&adapter->pdev->dev,
1469 dma_unmap_addr(rx_page_info, bus), 1491 dma_unmap_addr(rx_page_info, bus),
1470 adapter->big_page_size, DMA_FROM_DEVICE); 1492 adapter->big_page_size, DMA_FROM_DEVICE);
1471 rx_page_info->last_page_user = false; 1493 rx_page_info->last_frag = false;
1494 } else {
1495 dma_sync_single_for_cpu(&adapter->pdev->dev,
1496 dma_unmap_addr(rx_page_info, bus),
1497 rx_frag_size, DMA_FROM_DEVICE);
1472 } 1498 }
1473 1499
1474 queue_tail_inc(rxq); 1500 queue_tail_inc(rxq);
@@ -1676,7 +1702,7 @@ static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1676 rxcp->rss_hash = 1702 rxcp->rss_hash =
1677 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl); 1703 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1678 if (rxcp->vlanf) { 1704 if (rxcp->vlanf) {
1679 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm, 1705 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
1680 compl); 1706 compl);
1681 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag, 1707 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1682 compl); 1708 compl);
@@ -1706,7 +1732,7 @@ static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1706 rxcp->rss_hash = 1732 rxcp->rss_hash =
1707 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl); 1733 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1708 if (rxcp->vlanf) { 1734 if (rxcp->vlanf) {
1709 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm, 1735 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
1710 compl); 1736 compl);
1711 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag, 1737 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1712 compl); 1738 compl);
@@ -1739,9 +1765,11 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1739 rxcp->l4_csum = 0; 1765 rxcp->l4_csum = 0;
1740 1766
1741 if (rxcp->vlanf) { 1767 if (rxcp->vlanf) {
1742 /* vlanf could be wrongly set in some cards. 1768 /* In QNQ modes, if qnq bit is not set, then the packet was
1743 * ignore if vtm is not set */ 1769 * tagged only with the transparent outer vlan-tag and must
1744 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm) 1770 * not be treated as a vlan packet by host
1771 */
1772 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
1745 rxcp->vlanf = 0; 1773 rxcp->vlanf = 0;
1746 1774
1747 if (!lancer_chip(adapter)) 1775 if (!lancer_chip(adapter))
@@ -1800,17 +1828,16 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1800 rx_stats(rxo)->rx_post_fail++; 1828 rx_stats(rxo)->rx_post_fail++;
1801 break; 1829 break;
1802 } 1830 }
1803 page_info->page_offset = 0; 1831 page_offset = 0;
1804 } else { 1832 } else {
1805 get_page(pagep); 1833 get_page(pagep);
1806 page_info->page_offset = page_offset + rx_frag_size; 1834 page_offset += rx_frag_size;
1807 } 1835 }
1808 page_offset = page_info->page_offset; 1836 page_info->page_offset = page_offset;
1809 page_info->page = pagep; 1837 page_info->page = pagep;
1810 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1811 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1812 1838
1813 rxd = queue_head_node(rxq); 1839 rxd = queue_head_node(rxq);
1840 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1814 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF); 1841 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1815 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr)); 1842 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1816 1843
@@ -1818,15 +1845,24 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1818 if ((page_offset + rx_frag_size + rx_frag_size) > 1845 if ((page_offset + rx_frag_size + rx_frag_size) >
1819 adapter->big_page_size) { 1846 adapter->big_page_size) {
1820 pagep = NULL; 1847 pagep = NULL;
1821 page_info->last_page_user = true; 1848 page_info->last_frag = true;
1849 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1850 } else {
1851 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
1822 } 1852 }
1823 1853
1824 prev_page_info = page_info; 1854 prev_page_info = page_info;
1825 queue_head_inc(rxq); 1855 queue_head_inc(rxq);
1826 page_info = &rxo->page_info_tbl[rxq->head]; 1856 page_info = &rxo->page_info_tbl[rxq->head];
1827 } 1857 }
1828 if (pagep) 1858
1829 prev_page_info->last_page_user = true; 1859 /* Mark the last frag of a page when we break out of the above loop
1860 * with no more slots available in the RXQ
1861 */
1862 if (pagep) {
1863 prev_page_info->last_frag = true;
1864 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1865 }
1830 1866
1831 if (posted) { 1867 if (posted) {
1832 atomic_add(posted, &rxq->used); 1868 atomic_add(posted, &rxq->used);
@@ -1883,7 +1919,7 @@ static u16 be_tx_compl_process(struct be_adapter *adapter,
1883 queue_tail_inc(txq); 1919 queue_tail_inc(txq);
1884 } while (cur_index != last_index); 1920 } while (cur_index != last_index);
1885 1921
1886 kfree_skb(sent_skb); 1922 dev_kfree_skb_any(sent_skb);
1887 return num_wrbs; 1923 return num_wrbs;
1888} 1924}
1889 1925
@@ -2439,6 +2475,9 @@ void be_detect_error(struct be_adapter *adapter)
2439 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0; 2475 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2440 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0; 2476 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2441 u32 i; 2477 u32 i;
2478 bool error_detected = false;
2479 struct device *dev = &adapter->pdev->dev;
2480 struct net_device *netdev = adapter->netdev;
2442 2481
2443 if (be_hw_error(adapter)) 2482 if (be_hw_error(adapter))
2444 return; 2483 return;
@@ -2450,6 +2489,21 @@ void be_detect_error(struct be_adapter *adapter)
2450 SLIPORT_ERROR1_OFFSET); 2489 SLIPORT_ERROR1_OFFSET);
2451 sliport_err2 = ioread32(adapter->db + 2490 sliport_err2 = ioread32(adapter->db +
2452 SLIPORT_ERROR2_OFFSET); 2491 SLIPORT_ERROR2_OFFSET);
2492 adapter->hw_error = true;
2493 /* Do not log error messages if its a FW reset */
2494 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2495 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2496 dev_info(dev, "Firmware update in progress\n");
2497 } else {
2498 error_detected = true;
2499 dev_err(dev, "Error detected in the card\n");
2500 dev_err(dev, "ERR: sliport status 0x%x\n",
2501 sliport_status);
2502 dev_err(dev, "ERR: sliport error1 0x%x\n",
2503 sliport_err1);
2504 dev_err(dev, "ERR: sliport error2 0x%x\n",
2505 sliport_err2);
2506 }
2453 } 2507 }
2454 } else { 2508 } else {
2455 pci_read_config_dword(adapter->pdev, 2509 pci_read_config_dword(adapter->pdev,
@@ -2463,51 +2517,33 @@ void be_detect_error(struct be_adapter *adapter)
2463 2517
2464 ue_lo = (ue_lo & ~ue_lo_mask); 2518 ue_lo = (ue_lo & ~ue_lo_mask);
2465 ue_hi = (ue_hi & ~ue_hi_mask); 2519 ue_hi = (ue_hi & ~ue_hi_mask);
2466 }
2467
2468 /* On certain platforms BE hardware can indicate spurious UEs.
2469 * Allow the h/w to stop working completely in case of a real UE.
2470 * Hence not setting the hw_error for UE detection.
2471 */
2472 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2473 adapter->hw_error = true;
2474 /* Do not log error messages if its a FW reset */
2475 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2476 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2477 dev_info(&adapter->pdev->dev,
2478 "Firmware update in progress\n");
2479 return;
2480 } else {
2481 dev_err(&adapter->pdev->dev,
2482 "Error detected in the card\n");
2483 }
2484 }
2485
2486 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2487 dev_err(&adapter->pdev->dev,
2488 "ERR: sliport status 0x%x\n", sliport_status);
2489 dev_err(&adapter->pdev->dev,
2490 "ERR: sliport error1 0x%x\n", sliport_err1);
2491 dev_err(&adapter->pdev->dev,
2492 "ERR: sliport error2 0x%x\n", sliport_err2);
2493 }
2494 2520
2495 if (ue_lo) { 2521 /* On certain platforms BE hardware can indicate spurious UEs.
2496 for (i = 0; ue_lo; ue_lo >>= 1, i++) { 2522 * Allow HW to stop working completely in case of a real UE.
2497 if (ue_lo & 1) 2523 * Hence not setting the hw_error for UE detection.
2498 dev_err(&adapter->pdev->dev, 2524 */
2499 "UE: %s bit set\n", ue_status_low_desc[i]);
2500 }
2501 }
2502 2525
2503 if (ue_hi) { 2526 if (ue_lo || ue_hi) {
2504 for (i = 0; ue_hi; ue_hi >>= 1, i++) { 2527 error_detected = true;
2505 if (ue_hi & 1) 2528 dev_err(dev,
2506 dev_err(&adapter->pdev->dev, 2529 "Unrecoverable Error detected in the adapter");
2507 "UE: %s bit set\n", ue_status_hi_desc[i]); 2530 dev_err(dev, "Please reboot server to recover");
2531 if (skyhawk_chip(adapter))
2532 adapter->hw_error = true;
2533 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2534 if (ue_lo & 1)
2535 dev_err(dev, "UE: %s bit set\n",
2536 ue_status_low_desc[i]);
2537 }
2538 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2539 if (ue_hi & 1)
2540 dev_err(dev, "UE: %s bit set\n",
2541 ue_status_hi_desc[i]);
2542 }
2508 } 2543 }
2509 } 2544 }
2510 2545 if (error_detected)
2546 netif_carrier_off(netdev);
2511} 2547}
2512 2548
2513static void be_msix_disable(struct be_adapter *adapter) 2549static void be_msix_disable(struct be_adapter *adapter)
@@ -2521,7 +2557,7 @@ static void be_msix_disable(struct be_adapter *adapter)
2521 2557
2522static int be_msix_enable(struct be_adapter *adapter) 2558static int be_msix_enable(struct be_adapter *adapter)
2523{ 2559{
2524 int i, status, num_vec; 2560 int i, num_vec;
2525 struct device *dev = &adapter->pdev->dev; 2561 struct device *dev = &adapter->pdev->dev;
2526 2562
2527 /* If RoCE is supported, program the max number of NIC vectors that 2563 /* If RoCE is supported, program the max number of NIC vectors that
@@ -2537,24 +2573,11 @@ static int be_msix_enable(struct be_adapter *adapter)
2537 for (i = 0; i < num_vec; i++) 2573 for (i = 0; i < num_vec; i++)
2538 adapter->msix_entries[i].entry = i; 2574 adapter->msix_entries[i].entry = i;
2539 2575
2540 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec); 2576 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2541 if (status == 0) { 2577 MIN_MSIX_VECTORS, num_vec);
2542 goto done; 2578 if (num_vec < 0)
2543 } else if (status >= MIN_MSIX_VECTORS) { 2579 goto fail;
2544 num_vec = status;
2545 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2546 num_vec);
2547 if (!status)
2548 goto done;
2549 }
2550
2551 dev_warn(dev, "MSIx enable failed\n");
2552 2580
2553 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2554 if (!be_physfn(adapter))
2555 return status;
2556 return 0;
2557done:
2558 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) { 2581 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2559 adapter->num_msix_roce_vec = num_vec / 2; 2582 adapter->num_msix_roce_vec = num_vec / 2;
2560 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n", 2583 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
@@ -2566,6 +2589,14 @@ done:
2566 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n", 2589 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2567 adapter->num_msix_vec); 2590 adapter->num_msix_vec);
2568 return 0; 2591 return 0;
2592
2593fail:
2594 dev_warn(dev, "MSIx enable failed\n");
2595
2596 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2597 if (!be_physfn(adapter))
2598 return num_vec;
2599 return 0;
2569} 2600}
2570 2601
2571static inline int be_msix_vec_get(struct be_adapter *adapter, 2602static inline int be_msix_vec_get(struct be_adapter *adapter,
@@ -3100,8 +3131,12 @@ static int be_vf_setup(struct be_adapter *adapter)
3100 if (!status) 3131 if (!status)
3101 vf_cfg->tx_rate = lnk_speed; 3132 vf_cfg->tx_rate = lnk_speed;
3102 3133
3103 if (!old_vfs) 3134 if (!old_vfs) {
3104 be_cmd_enable_vf(adapter, vf + 1); 3135 be_cmd_enable_vf(adapter, vf + 1);
3136 be_cmd_set_logical_link_config(adapter,
3137 IFLA_VF_LINK_STATE_AUTO,
3138 vf+1);
3139 }
3105 } 3140 }
3106 3141
3107 if (!old_vfs) { 3142 if (!old_vfs) {
@@ -3119,19 +3154,38 @@ err:
3119 return status; 3154 return status;
3120} 3155}
3121 3156
3157/* Converting function_mode bits on BE3 to SH mc_type enums */
3158
3159static u8 be_convert_mc_type(u32 function_mode)
3160{
3161 if (function_mode & VNIC_MODE && function_mode & FLEX10_MODE)
3162 return vNIC1;
3163 else if (function_mode & FLEX10_MODE)
3164 return FLEX10;
3165 else if (function_mode & VNIC_MODE)
3166 return vNIC2;
3167 else if (function_mode & UMC_ENABLED)
3168 return UMC;
3169 else
3170 return MC_NONE;
3171}
3172
3122/* On BE2/BE3 FW does not suggest the supported limits */ 3173/* On BE2/BE3 FW does not suggest the supported limits */
3123static void BEx_get_resources(struct be_adapter *adapter, 3174static void BEx_get_resources(struct be_adapter *adapter,
3124 struct be_resources *res) 3175 struct be_resources *res)
3125{ 3176{
3126 struct pci_dev *pdev = adapter->pdev; 3177 struct pci_dev *pdev = adapter->pdev;
3127 bool use_sriov = false; 3178 bool use_sriov = false;
3128 int max_vfs; 3179 int max_vfs = 0;
3129 3180
3130 max_vfs = pci_sriov_get_totalvfs(pdev); 3181 if (be_physfn(adapter) && BE3_chip(adapter)) {
3131 3182 be_cmd_get_profile_config(adapter, res, 0);
3132 if (BE3_chip(adapter) && sriov_want(adapter)) { 3183 /* Some old versions of BE3 FW don't report max_vfs value */
3133 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0; 3184 if (res->max_vfs == 0) {
3134 use_sriov = res->max_vfs; 3185 max_vfs = pci_sriov_get_totalvfs(pdev);
3186 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3187 }
3188 use_sriov = res->max_vfs && sriov_want(adapter);
3135 } 3189 }
3136 3190
3137 if (be_physfn(adapter)) 3191 if (be_physfn(adapter))
@@ -3139,17 +3193,32 @@ static void BEx_get_resources(struct be_adapter *adapter,
3139 else 3193 else
3140 res->max_uc_mac = BE_VF_UC_PMAC_COUNT; 3194 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3141 3195
3142 if (adapter->function_mode & FLEX10_MODE) 3196 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3143 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8; 3197
3144 else if (adapter->function_mode & UMC_ENABLED) 3198 if (be_is_mc(adapter)) {
3145 res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED; 3199 /* Assuming that there are 4 channels per port,
3146 else 3200 * when multi-channel is enabled
3201 */
3202 if (be_is_qnq_mode(adapter))
3203 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3204 else
3205 /* In a non-qnq multichannel mode, the pvid
3206 * takes up one vlan entry
3207 */
3208 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3209 } else {
3147 res->max_vlans = BE_NUM_VLANS_SUPPORTED; 3210 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3211 }
3212
3148 res->max_mcast_mac = BE_MAX_MC; 3213 res->max_mcast_mac = BE_MAX_MC;
3149 3214
3150 /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */ 3215 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3151 if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) || 3216 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3152 !be_physfn(adapter) || (adapter->port_num > 1)) 3217 * *only* if it is RSS-capable.
3218 */
3219 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3220 !be_physfn(adapter) || (be_is_mc(adapter) &&
3221 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS)))
3153 res->max_tx_qs = 1; 3222 res->max_tx_qs = 1;
3154 else 3223 else
3155 res->max_tx_qs = BE3_MAX_TX_QS; 3224 res->max_tx_qs = BE3_MAX_TX_QS;
@@ -3161,7 +3230,7 @@ static void BEx_get_resources(struct be_adapter *adapter,
3161 res->max_rx_qs = res->max_rss_qs + 1; 3230 res->max_rx_qs = res->max_rss_qs + 1;
3162 3231
3163 if (be_physfn(adapter)) 3232 if (be_physfn(adapter))
3164 res->max_evt_qs = (max_vfs > 0) ? 3233 res->max_evt_qs = (res->max_vfs > 0) ?
3165 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS; 3234 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3166 else 3235 else
3167 res->max_evt_qs = 1; 3236 res->max_evt_qs = 1;
@@ -3252,9 +3321,8 @@ static int be_get_config(struct be_adapter *adapter)
3252 if (status) 3321 if (status)
3253 return status; 3322 return status;
3254 3323
3255 /* primary mac needs 1 pmac entry */ 3324 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3256 adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32), 3325 sizeof(*adapter->pmac_id), GFP_KERNEL);
3257 GFP_KERNEL);
3258 if (!adapter->pmac_id) 3326 if (!adapter->pmac_id)
3259 return -ENOMEM; 3327 return -ENOMEM;
3260 3328
@@ -3428,6 +3496,10 @@ static int be_setup(struct be_adapter *adapter)
3428 be_cmd_set_flow_control(adapter, adapter->tx_fc, 3496 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3429 adapter->rx_fc); 3497 adapter->rx_fc);
3430 3498
3499 if (be_physfn(adapter))
3500 be_cmd_set_logical_link_config(adapter,
3501 IFLA_VF_LINK_STATE_AUTO, 0);
3502
3431 if (sriov_want(adapter)) { 3503 if (sriov_want(adapter)) {
3432 if (be_max_vfs(adapter)) 3504 if (be_max_vfs(adapter))
3433 be_vf_setup(adapter); 3505 be_vf_setup(adapter);
@@ -4067,6 +4139,7 @@ static const struct net_device_ops be_netdev_ops = {
4067 .ndo_set_vf_vlan = be_set_vf_vlan, 4139 .ndo_set_vf_vlan = be_set_vf_vlan,
4068 .ndo_set_vf_tx_rate = be_set_vf_tx_rate, 4140 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
4069 .ndo_get_vf_config = be_get_vf_config, 4141 .ndo_get_vf_config = be_get_vf_config,
4142 .ndo_set_vf_link_state = be_set_vf_link_state,
4070#ifdef CONFIG_NET_POLL_CONTROLLER 4143#ifdef CONFIG_NET_POLL_CONTROLLER
4071 .ndo_poll_controller = be_netpoll, 4144 .ndo_poll_controller = be_netpoll,
4072#endif 4145#endif
@@ -4427,14 +4500,32 @@ static bool be_reset_required(struct be_adapter *adapter)
4427 4500
4428static char *mc_name(struct be_adapter *adapter) 4501static char *mc_name(struct be_adapter *adapter)
4429{ 4502{
4430 if (adapter->function_mode & FLEX10_MODE) 4503 char *str = ""; /* default */
4431 return "FLEX10"; 4504
4432 else if (adapter->function_mode & VNIC_MODE) 4505 switch (adapter->mc_type) {
4433 return "vNIC"; 4506 case UMC:
4434 else if (adapter->function_mode & UMC_ENABLED) 4507 str = "UMC";
4435 return "UMC"; 4508 break;
4436 else 4509 case FLEX10:
4437 return ""; 4510 str = "FLEX10";
4511 break;
4512 case vNIC1:
4513 str = "vNIC-1";
4514 break;
4515 case nPAR:
4516 str = "nPAR";
4517 break;
4518 case UFP:
4519 str = "UFP";
4520 break;
4521 case vNIC2:
4522 str = "vNIC-2";
4523 break;
4524 default:
4525 str = "";
4526 }
4527
4528 return str;
4438} 4529}
4439 4530
4440static inline char *func_name(struct be_adapter *adapter) 4531static inline char *func_name(struct be_adapter *adapter)
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c
index 9cd5415fe017..a5dae4a62bb3 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.c
+++ b/drivers/net/ethernet/emulex/benet/be_roce.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2013 Emulex 2 * Copyright (C) 2005 - 2014 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h
index 2cd1129e19af..a3ef8f804b9e 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.h
+++ b/drivers/net/ethernet/emulex/benet/be_roce.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2013 Emulex 2 * Copyright (C) 2005 - 2014 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index 549ce13b92ac..71debd1c18c9 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -14,7 +14,6 @@ obj-$(CONFIG_FSL_XGMAC_MDIO) += xgmac_mdio.o
14obj-$(CONFIG_GIANFAR) += gianfar_driver.o 14obj-$(CONFIG_GIANFAR) += gianfar_driver.o
15obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o 15obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o
16gianfar_driver-objs := gianfar.o \ 16gianfar_driver-objs := gianfar.o \
17 gianfar_ethtool.o \ 17 gianfar_ethtool.o
18 gianfar_sysfs.o
19obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o 18obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o
20ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o 19ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 03a351300013..f9f8a589cdef 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -338,7 +338,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
338 338
339 /* Protocol checksum off-load for TCP and UDP. */ 339 /* Protocol checksum off-load for TCP and UDP. */
340 if (fec_enet_clear_csum(skb, ndev)) { 340 if (fec_enet_clear_csum(skb, ndev)) {
341 kfree_skb(skb); 341 dev_kfree_skb_any(skb);
342 return NETDEV_TX_OK; 342 return NETDEV_TX_OK;
343 } 343 }
344 344
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 89ccb5b08708..82386b29914a 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -372,6 +372,7 @@ void fec_ptp_init(struct platform_device *pdev)
372 fep->ptp_caps.n_alarm = 0; 372 fep->ptp_caps.n_alarm = 0;
373 fep->ptp_caps.n_ext_ts = 0; 373 fep->ptp_caps.n_ext_ts = 0;
374 fep->ptp_caps.n_per_out = 0; 374 fep->ptp_caps.n_per_out = 0;
375 fep->ptp_caps.n_pins = 0;
375 fep->ptp_caps.pps = 0; 376 fep->ptp_caps.pps = 0;
376 fep->ptp_caps.adjfreq = fec_ptp_adjfreq; 377 fep->ptp_caps.adjfreq = fec_ptp_adjfreq;
377 fep->ptp_caps.adjtime = fec_ptp_adjtime; 378 fep->ptp_caps.adjtime = fec_ptp_adjtime;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 62f042d4aaa9..dc80db41d6b3 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -91,6 +91,9 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
91 u16 pkt_len, sc; 91 u16 pkt_len, sc;
92 int curidx; 92 int curidx;
93 93
94 if (budget <= 0)
95 return received;
96
94 /* 97 /*
95 * First, grab all of the stats for the incoming packet. 98 * First, grab all of the stats for the incoming packet.
96 * These get messed up if we get called due to a busy condition. 99 * These get messed up if we get called due to a busy condition.
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index ad5a5aadc7e1..9125d9abf099 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -9,7 +9,7 @@
9 * Maintainer: Kumar Gala 9 * Maintainer: Kumar Gala
10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> 10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
11 * 11 *
12 * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc. 12 * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
13 * Copyright 2007 MontaVista Software, Inc. 13 * Copyright 2007 MontaVista Software, Inc.
14 * 14 *
15 * This program is free software; you can redistribute it and/or modify it 15 * This program is free software; you can redistribute it and/or modify it
@@ -121,7 +121,6 @@ static irqreturn_t gfar_error(int irq, void *dev_id);
121static irqreturn_t gfar_transmit(int irq, void *dev_id); 121static irqreturn_t gfar_transmit(int irq, void *dev_id);
122static irqreturn_t gfar_interrupt(int irq, void *dev_id); 122static irqreturn_t gfar_interrupt(int irq, void *dev_id);
123static void adjust_link(struct net_device *dev); 123static void adjust_link(struct net_device *dev);
124static void init_registers(struct net_device *dev);
125static int init_phy(struct net_device *dev); 124static int init_phy(struct net_device *dev);
126static int gfar_probe(struct platform_device *ofdev); 125static int gfar_probe(struct platform_device *ofdev);
127static int gfar_remove(struct platform_device *ofdev); 126static int gfar_remove(struct platform_device *ofdev);
@@ -129,8 +128,10 @@ static void free_skb_resources(struct gfar_private *priv);
129static void gfar_set_multi(struct net_device *dev); 128static void gfar_set_multi(struct net_device *dev);
130static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); 129static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
131static void gfar_configure_serdes(struct net_device *dev); 130static void gfar_configure_serdes(struct net_device *dev);
132static int gfar_poll(struct napi_struct *napi, int budget); 131static int gfar_poll_rx(struct napi_struct *napi, int budget);
133static int gfar_poll_sq(struct napi_struct *napi, int budget); 132static int gfar_poll_tx(struct napi_struct *napi, int budget);
133static int gfar_poll_rx_sq(struct napi_struct *napi, int budget);
134static int gfar_poll_tx_sq(struct napi_struct *napi, int budget);
134#ifdef CONFIG_NET_POLL_CONTROLLER 135#ifdef CONFIG_NET_POLL_CONTROLLER
135static void gfar_netpoll(struct net_device *dev); 136static void gfar_netpoll(struct net_device *dev);
136#endif 137#endif
@@ -138,9 +139,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
138static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); 139static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
139static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 140static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
140 int amount_pull, struct napi_struct *napi); 141 int amount_pull, struct napi_struct *napi);
141void gfar_halt(struct net_device *dev); 142static void gfar_halt_nodisable(struct gfar_private *priv);
142static void gfar_halt_nodisable(struct net_device *dev);
143void gfar_start(struct net_device *dev);
144static void gfar_clear_exact_match(struct net_device *dev); 143static void gfar_clear_exact_match(struct net_device *dev);
145static void gfar_set_mac_for_addr(struct net_device *dev, int num, 144static void gfar_set_mac_for_addr(struct net_device *dev, int num,
146 const u8 *addr); 145 const u8 *addr);
@@ -332,72 +331,76 @@ static void gfar_init_tx_rx_base(struct gfar_private *priv)
332 } 331 }
333} 332}
334 333
335static void gfar_init_mac(struct net_device *ndev) 334static void gfar_rx_buff_size_config(struct gfar_private *priv)
336{ 335{
337 struct gfar_private *priv = netdev_priv(ndev); 336 int frame_size = priv->ndev->mtu + ETH_HLEN;
338 struct gfar __iomem *regs = priv->gfargrp[0].regs;
339 u32 rctrl = 0;
340 u32 tctrl = 0;
341 u32 attrs = 0;
342
343 /* write the tx/rx base registers */
344 gfar_init_tx_rx_base(priv);
345
346 /* Configure the coalescing support */
347 gfar_configure_coalescing_all(priv);
348 337
349 /* set this when rx hw offload (TOE) functions are being used */ 338 /* set this when rx hw offload (TOE) functions are being used */
350 priv->uses_rxfcb = 0; 339 priv->uses_rxfcb = 0;
351 340
341 if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
342 priv->uses_rxfcb = 1;
343
344 if (priv->hwts_rx_en)
345 priv->uses_rxfcb = 1;
346
347 if (priv->uses_rxfcb)
348 frame_size += GMAC_FCB_LEN;
349
350 frame_size += priv->padding;
351
352 frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
353 INCREMENTAL_BUFFER_SIZE;
354
355 priv->rx_buffer_size = frame_size;
356}
357
358static void gfar_mac_rx_config(struct gfar_private *priv)
359{
360 struct gfar __iomem *regs = priv->gfargrp[0].regs;
361 u32 rctrl = 0;
362
352 if (priv->rx_filer_enable) { 363 if (priv->rx_filer_enable) {
353 rctrl |= RCTRL_FILREN; 364 rctrl |= RCTRL_FILREN;
354 /* Program the RIR0 reg with the required distribution */ 365 /* Program the RIR0 reg with the required distribution */
355 gfar_write(&regs->rir0, DEFAULT_RIR0); 366 if (priv->poll_mode == GFAR_SQ_POLLING)
367 gfar_write(&regs->rir0, DEFAULT_2RXQ_RIR0);
368 else /* GFAR_MQ_POLLING */
369 gfar_write(&regs->rir0, DEFAULT_8RXQ_RIR0);
356 } 370 }
357 371
358 /* Restore PROMISC mode */ 372 /* Restore PROMISC mode */
359 if (ndev->flags & IFF_PROMISC) 373 if (priv->ndev->flags & IFF_PROMISC)
360 rctrl |= RCTRL_PROM; 374 rctrl |= RCTRL_PROM;
361 375
362 if (ndev->features & NETIF_F_RXCSUM) { 376 if (priv->ndev->features & NETIF_F_RXCSUM)
363 rctrl |= RCTRL_CHECKSUMMING; 377 rctrl |= RCTRL_CHECKSUMMING;
364 priv->uses_rxfcb = 1;
365 }
366 378
367 if (priv->extended_hash) { 379 if (priv->extended_hash)
368 rctrl |= RCTRL_EXTHASH; 380 rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
369
370 gfar_clear_exact_match(ndev);
371 rctrl |= RCTRL_EMEN;
372 }
373 381
374 if (priv->padding) { 382 if (priv->padding) {
375 rctrl &= ~RCTRL_PAL_MASK; 383 rctrl &= ~RCTRL_PAL_MASK;
376 rctrl |= RCTRL_PADDING(priv->padding); 384 rctrl |= RCTRL_PADDING(priv->padding);
377 } 385 }
378 386
379 /* Insert receive time stamps into padding alignment bytes */
380 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
381 rctrl &= ~RCTRL_PAL_MASK;
382 rctrl |= RCTRL_PADDING(8);
383 priv->padding = 8;
384 }
385
386 /* Enable HW time stamping if requested from user space */ 387 /* Enable HW time stamping if requested from user space */
387 if (priv->hwts_rx_en) { 388 if (priv->hwts_rx_en)
388 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE; 389 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
389 priv->uses_rxfcb = 1;
390 }
391 390
392 if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX) { 391 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
393 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; 392 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
394 priv->uses_rxfcb = 1;
395 }
396 393
397 /* Init rctrl based on our settings */ 394 /* Init rctrl based on our settings */
398 gfar_write(&regs->rctrl, rctrl); 395 gfar_write(&regs->rctrl, rctrl);
396}
397
398static void gfar_mac_tx_config(struct gfar_private *priv)
399{
400 struct gfar __iomem *regs = priv->gfargrp[0].regs;
401 u32 tctrl = 0;
399 402
400 if (ndev->features & NETIF_F_IP_CSUM) 403 if (priv->ndev->features & NETIF_F_IP_CSUM)
401 tctrl |= TCTRL_INIT_CSUM; 404 tctrl |= TCTRL_INIT_CSUM;
402 405
403 if (priv->prio_sched_en) 406 if (priv->prio_sched_en)
@@ -408,30 +411,51 @@ static void gfar_init_mac(struct net_device *ndev)
408 gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT); 411 gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
409 } 412 }
410 413
411 gfar_write(&regs->tctrl, tctrl); 414 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
415 tctrl |= TCTRL_VLINS;
412 416
413 /* Set the extraction length and index */ 417 gfar_write(&regs->tctrl, tctrl);
414 attrs = ATTRELI_EL(priv->rx_stash_size) | 418}
415 ATTRELI_EI(priv->rx_stash_index);
416 419
417 gfar_write(&regs->attreli, attrs); 420static void gfar_configure_coalescing(struct gfar_private *priv,
421 unsigned long tx_mask, unsigned long rx_mask)
422{
423 struct gfar __iomem *regs = priv->gfargrp[0].regs;
424 u32 __iomem *baddr;
418 425
419 /* Start with defaults, and add stashing or locking 426 if (priv->mode == MQ_MG_MODE) {
420 * depending on the approprate variables 427 int i = 0;
421 */
422 attrs = ATTR_INIT_SETTINGS;
423 428
424 if (priv->bd_stash_en) 429 baddr = &regs->txic0;
425 attrs |= ATTR_BDSTASH; 430 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
431 gfar_write(baddr + i, 0);
432 if (likely(priv->tx_queue[i]->txcoalescing))
433 gfar_write(baddr + i, priv->tx_queue[i]->txic);
434 }
426 435
427 if (priv->rx_stash_size != 0) 436 baddr = &regs->rxic0;
428 attrs |= ATTR_BUFSTASH; 437 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
438 gfar_write(baddr + i, 0);
439 if (likely(priv->rx_queue[i]->rxcoalescing))
440 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
441 }
442 } else {
443 /* Backward compatible case -- even if we enable
444 * multiple queues, there's only single reg to program
445 */
446 gfar_write(&regs->txic, 0);
447 if (likely(priv->tx_queue[0]->txcoalescing))
448 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
429 449
430 gfar_write(&regs->attr, attrs); 450 gfar_write(&regs->rxic, 0);
451 if (unlikely(priv->rx_queue[0]->rxcoalescing))
452 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
453 }
454}
431 455
432 gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold); 456void gfar_configure_coalescing_all(struct gfar_private *priv)
433 gfar_write(&regs->fifo_tx_starve, priv->fifo_starve); 457{
434 gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off); 458 gfar_configure_coalescing(priv, 0xFF, 0xFF);
435} 459}
436 460
437static struct net_device_stats *gfar_get_stats(struct net_device *dev) 461static struct net_device_stats *gfar_get_stats(struct net_device *dev)
@@ -479,12 +503,27 @@ static const struct net_device_ops gfar_netdev_ops = {
479#endif 503#endif
480}; 504};
481 505
482void lock_rx_qs(struct gfar_private *priv) 506static void gfar_ints_disable(struct gfar_private *priv)
483{ 507{
484 int i; 508 int i;
509 for (i = 0; i < priv->num_grps; i++) {
510 struct gfar __iomem *regs = priv->gfargrp[i].regs;
511 /* Clear IEVENT */
512 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
485 513
486 for (i = 0; i < priv->num_rx_queues; i++) 514 /* Initialize IMASK */
487 spin_lock(&priv->rx_queue[i]->rxlock); 515 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
516 }
517}
518
519static void gfar_ints_enable(struct gfar_private *priv)
520{
521 int i;
522 for (i = 0; i < priv->num_grps; i++) {
523 struct gfar __iomem *regs = priv->gfargrp[i].regs;
524 /* Unmask the interrupts we look for */
525 gfar_write(&regs->imask, IMASK_DEFAULT);
526 }
488} 527}
489 528
490void lock_tx_qs(struct gfar_private *priv) 529void lock_tx_qs(struct gfar_private *priv)
@@ -495,23 +534,50 @@ void lock_tx_qs(struct gfar_private *priv)
495 spin_lock(&priv->tx_queue[i]->txlock); 534 spin_lock(&priv->tx_queue[i]->txlock);
496} 535}
497 536
498void unlock_rx_qs(struct gfar_private *priv) 537void unlock_tx_qs(struct gfar_private *priv)
499{ 538{
500 int i; 539 int i;
501 540
502 for (i = 0; i < priv->num_rx_queues; i++) 541 for (i = 0; i < priv->num_tx_queues; i++)
503 spin_unlock(&priv->rx_queue[i]->rxlock); 542 spin_unlock(&priv->tx_queue[i]->txlock);
504} 543}
505 544
506void unlock_tx_qs(struct gfar_private *priv) 545static int gfar_alloc_tx_queues(struct gfar_private *priv)
507{ 546{
508 int i; 547 int i;
509 548
510 for (i = 0; i < priv->num_tx_queues; i++) 549 for (i = 0; i < priv->num_tx_queues; i++) {
511 spin_unlock(&priv->tx_queue[i]->txlock); 550 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
551 GFP_KERNEL);
552 if (!priv->tx_queue[i])
553 return -ENOMEM;
554
555 priv->tx_queue[i]->tx_skbuff = NULL;
556 priv->tx_queue[i]->qindex = i;
557 priv->tx_queue[i]->dev = priv->ndev;
558 spin_lock_init(&(priv->tx_queue[i]->txlock));
559 }
560 return 0;
512} 561}
513 562
514static void free_tx_pointers(struct gfar_private *priv) 563static int gfar_alloc_rx_queues(struct gfar_private *priv)
564{
565 int i;
566
567 for (i = 0; i < priv->num_rx_queues; i++) {
568 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
569 GFP_KERNEL);
570 if (!priv->rx_queue[i])
571 return -ENOMEM;
572
573 priv->rx_queue[i]->rx_skbuff = NULL;
574 priv->rx_queue[i]->qindex = i;
575 priv->rx_queue[i]->dev = priv->ndev;
576 }
577 return 0;
578}
579
580static void gfar_free_tx_queues(struct gfar_private *priv)
515{ 581{
516 int i; 582 int i;
517 583
@@ -519,7 +585,7 @@ static void free_tx_pointers(struct gfar_private *priv)
519 kfree(priv->tx_queue[i]); 585 kfree(priv->tx_queue[i]);
520} 586}
521 587
522static void free_rx_pointers(struct gfar_private *priv) 588static void gfar_free_rx_queues(struct gfar_private *priv)
523{ 589{
524 int i; 590 int i;
525 591
@@ -553,23 +619,26 @@ static void disable_napi(struct gfar_private *priv)
553{ 619{
554 int i; 620 int i;
555 621
556 for (i = 0; i < priv->num_grps; i++) 622 for (i = 0; i < priv->num_grps; i++) {
557 napi_disable(&priv->gfargrp[i].napi); 623 napi_disable(&priv->gfargrp[i].napi_rx);
624 napi_disable(&priv->gfargrp[i].napi_tx);
625 }
558} 626}
559 627
560static void enable_napi(struct gfar_private *priv) 628static void enable_napi(struct gfar_private *priv)
561{ 629{
562 int i; 630 int i;
563 631
564 for (i = 0; i < priv->num_grps; i++) 632 for (i = 0; i < priv->num_grps; i++) {
565 napi_enable(&priv->gfargrp[i].napi); 633 napi_enable(&priv->gfargrp[i].napi_rx);
634 napi_enable(&priv->gfargrp[i].napi_tx);
635 }
566} 636}
567 637
568static int gfar_parse_group(struct device_node *np, 638static int gfar_parse_group(struct device_node *np,
569 struct gfar_private *priv, const char *model) 639 struct gfar_private *priv, const char *model)
570{ 640{
571 struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps]; 641 struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
572 u32 *queue_mask;
573 int i; 642 int i;
574 643
575 for (i = 0; i < GFAR_NUM_IRQS; i++) { 644 for (i = 0; i < GFAR_NUM_IRQS; i++) {
@@ -598,16 +667,52 @@ static int gfar_parse_group(struct device_node *np,
598 grp->priv = priv; 667 grp->priv = priv;
599 spin_lock_init(&grp->grplock); 668 spin_lock_init(&grp->grplock);
600 if (priv->mode == MQ_MG_MODE) { 669 if (priv->mode == MQ_MG_MODE) {
601 queue_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL); 670 u32 *rxq_mask, *txq_mask;
602 grp->rx_bit_map = queue_mask ? 671 rxq_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
603 *queue_mask : (DEFAULT_MAPPING >> priv->num_grps); 672 txq_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
604 queue_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL); 673
605 grp->tx_bit_map = queue_mask ? 674 if (priv->poll_mode == GFAR_SQ_POLLING) {
606 *queue_mask : (DEFAULT_MAPPING >> priv->num_grps); 675 /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
676 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
677 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
678 } else { /* GFAR_MQ_POLLING */
679 grp->rx_bit_map = rxq_mask ?
680 *rxq_mask : (DEFAULT_MAPPING >> priv->num_grps);
681 grp->tx_bit_map = txq_mask ?
682 *txq_mask : (DEFAULT_MAPPING >> priv->num_grps);
683 }
607 } else { 684 } else {
608 grp->rx_bit_map = 0xFF; 685 grp->rx_bit_map = 0xFF;
609 grp->tx_bit_map = 0xFF; 686 grp->tx_bit_map = 0xFF;
610 } 687 }
688
689 /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
690 * right to left, so we need to revert the 8 bits to get the q index
691 */
692 grp->rx_bit_map = bitrev8(grp->rx_bit_map);
693 grp->tx_bit_map = bitrev8(grp->tx_bit_map);
694
695 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
696 * also assign queues to groups
697 */
698 for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
699 if (!grp->rx_queue)
700 grp->rx_queue = priv->rx_queue[i];
701 grp->num_rx_queues++;
702 grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
703 priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
704 priv->rx_queue[i]->grp = grp;
705 }
706
707 for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
708 if (!grp->tx_queue)
709 grp->tx_queue = priv->tx_queue[i];
710 grp->num_tx_queues++;
711 grp->tstat |= (TSTAT_CLEAR_THALT >> i);
712 priv->tqueue |= (TQUEUE_EN0 >> i);
713 priv->tx_queue[i]->grp = grp;
714 }
715
611 priv->num_grps++; 716 priv->num_grps++;
612 717
613 return 0; 718 return 0;
@@ -628,13 +733,45 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
628 const u32 *stash_idx; 733 const u32 *stash_idx;
629 unsigned int num_tx_qs, num_rx_qs; 734 unsigned int num_tx_qs, num_rx_qs;
630 u32 *tx_queues, *rx_queues; 735 u32 *tx_queues, *rx_queues;
736 unsigned short mode, poll_mode;
631 737
632 if (!np || !of_device_is_available(np)) 738 if (!np || !of_device_is_available(np))
633 return -ENODEV; 739 return -ENODEV;
634 740
635 /* parse the num of tx and rx queues */ 741 if (of_device_is_compatible(np, "fsl,etsec2")) {
742 mode = MQ_MG_MODE;
743 poll_mode = GFAR_SQ_POLLING;
744 } else {
745 mode = SQ_SG_MODE;
746 poll_mode = GFAR_SQ_POLLING;
747 }
748
749 /* parse the num of HW tx and rx queues */
636 tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL); 750 tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
637 num_tx_qs = tx_queues ? *tx_queues : 1; 751 rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
752
753 if (mode == SQ_SG_MODE) {
754 num_tx_qs = 1;
755 num_rx_qs = 1;
756 } else { /* MQ_MG_MODE */
757 /* get the actual number of supported groups */
758 unsigned int num_grps = of_get_available_child_count(np);
759
760 if (num_grps == 0 || num_grps > MAXGROUPS) {
761 dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
762 num_grps);
763 pr_err("Cannot do alloc_etherdev, aborting\n");
764 return -EINVAL;
765 }
766
767 if (poll_mode == GFAR_SQ_POLLING) {
768 num_tx_qs = num_grps; /* one txq per int group */
769 num_rx_qs = num_grps; /* one rxq per int group */
770 } else { /* GFAR_MQ_POLLING */
771 num_tx_qs = tx_queues ? *tx_queues : 1;
772 num_rx_qs = rx_queues ? *rx_queues : 1;
773 }
774 }
638 775
639 if (num_tx_qs > MAX_TX_QS) { 776 if (num_tx_qs > MAX_TX_QS) {
640 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n", 777 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
@@ -643,9 +780,6 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
643 return -EINVAL; 780 return -EINVAL;
644 } 781 }
645 782
646 rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
647 num_rx_qs = rx_queues ? *rx_queues : 1;
648
649 if (num_rx_qs > MAX_RX_QS) { 783 if (num_rx_qs > MAX_RX_QS) {
650 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n", 784 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
651 num_rx_qs, MAX_RX_QS); 785 num_rx_qs, MAX_RX_QS);
@@ -661,10 +795,20 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
661 priv = netdev_priv(dev); 795 priv = netdev_priv(dev);
662 priv->ndev = dev; 796 priv->ndev = dev;
663 797
798 priv->mode = mode;
799 priv->poll_mode = poll_mode;
800
664 priv->num_tx_queues = num_tx_qs; 801 priv->num_tx_queues = num_tx_qs;
665 netif_set_real_num_rx_queues(dev, num_rx_qs); 802 netif_set_real_num_rx_queues(dev, num_rx_qs);
666 priv->num_rx_queues = num_rx_qs; 803 priv->num_rx_queues = num_rx_qs;
667 priv->num_grps = 0x0; 804
805 err = gfar_alloc_tx_queues(priv);
806 if (err)
807 goto tx_alloc_failed;
808
809 err = gfar_alloc_rx_queues(priv);
810 if (err)
811 goto rx_alloc_failed;
668 812
669 /* Init Rx queue filer rule set linked list */ 813 /* Init Rx queue filer rule set linked list */
670 INIT_LIST_HEAD(&priv->rx_list.list); 814 INIT_LIST_HEAD(&priv->rx_list.list);
@@ -677,52 +821,18 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
677 priv->gfargrp[i].regs = NULL; 821 priv->gfargrp[i].regs = NULL;
678 822
679 /* Parse and initialize group specific information */ 823 /* Parse and initialize group specific information */
680 if (of_device_is_compatible(np, "fsl,etsec2")) { 824 if (priv->mode == MQ_MG_MODE) {
681 priv->mode = MQ_MG_MODE;
682 for_each_child_of_node(np, child) { 825 for_each_child_of_node(np, child) {
683 err = gfar_parse_group(child, priv, model); 826 err = gfar_parse_group(child, priv, model);
684 if (err) 827 if (err)
685 goto err_grp_init; 828 goto err_grp_init;
686 } 829 }
687 } else { 830 } else { /* SQ_SG_MODE */
688 priv->mode = SQ_SG_MODE;
689 err = gfar_parse_group(np, priv, model); 831 err = gfar_parse_group(np, priv, model);
690 if (err) 832 if (err)
691 goto err_grp_init; 833 goto err_grp_init;
692 } 834 }
693 835
694 for (i = 0; i < priv->num_tx_queues; i++)
695 priv->tx_queue[i] = NULL;
696 for (i = 0; i < priv->num_rx_queues; i++)
697 priv->rx_queue[i] = NULL;
698
699 for (i = 0; i < priv->num_tx_queues; i++) {
700 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
701 GFP_KERNEL);
702 if (!priv->tx_queue[i]) {
703 err = -ENOMEM;
704 goto tx_alloc_failed;
705 }
706 priv->tx_queue[i]->tx_skbuff = NULL;
707 priv->tx_queue[i]->qindex = i;
708 priv->tx_queue[i]->dev = dev;
709 spin_lock_init(&(priv->tx_queue[i]->txlock));
710 }
711
712 for (i = 0; i < priv->num_rx_queues; i++) {
713 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
714 GFP_KERNEL);
715 if (!priv->rx_queue[i]) {
716 err = -ENOMEM;
717 goto rx_alloc_failed;
718 }
719 priv->rx_queue[i]->rx_skbuff = NULL;
720 priv->rx_queue[i]->qindex = i;
721 priv->rx_queue[i]->dev = dev;
722 spin_lock_init(&(priv->rx_queue[i]->rxlock));
723 }
724
725
726 stash = of_get_property(np, "bd-stash", NULL); 836 stash = of_get_property(np, "bd-stash", NULL);
727 837
728 if (stash) { 838 if (stash) {
@@ -749,17 +859,16 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
749 memcpy(dev->dev_addr, mac_addr, ETH_ALEN); 859 memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
750 860
751 if (model && !strcasecmp(model, "TSEC")) 861 if (model && !strcasecmp(model, "TSEC"))
752 priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT | 862 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
753 FSL_GIANFAR_DEV_HAS_COALESCE | 863 FSL_GIANFAR_DEV_HAS_COALESCE |
754 FSL_GIANFAR_DEV_HAS_RMON | 864 FSL_GIANFAR_DEV_HAS_RMON |
755 FSL_GIANFAR_DEV_HAS_MULTI_INTR; 865 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
756 866
757 if (model && !strcasecmp(model, "eTSEC")) 867 if (model && !strcasecmp(model, "eTSEC"))
758 priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT | 868 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
759 FSL_GIANFAR_DEV_HAS_COALESCE | 869 FSL_GIANFAR_DEV_HAS_COALESCE |
760 FSL_GIANFAR_DEV_HAS_RMON | 870 FSL_GIANFAR_DEV_HAS_RMON |
761 FSL_GIANFAR_DEV_HAS_MULTI_INTR | 871 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
762 FSL_GIANFAR_DEV_HAS_PADDING |
763 FSL_GIANFAR_DEV_HAS_CSUM | 872 FSL_GIANFAR_DEV_HAS_CSUM |
764 FSL_GIANFAR_DEV_HAS_VLAN | 873 FSL_GIANFAR_DEV_HAS_VLAN |
765 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | 874 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
@@ -784,12 +893,12 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
784 893
785 return 0; 894 return 0;
786 895
787rx_alloc_failed:
788 free_rx_pointers(priv);
789tx_alloc_failed:
790 free_tx_pointers(priv);
791err_grp_init: 896err_grp_init:
792 unmap_group_regs(priv); 897 unmap_group_regs(priv);
898rx_alloc_failed:
899 gfar_free_rx_queues(priv);
900tx_alloc_failed:
901 gfar_free_tx_queues(priv);
793 free_gfar_dev(priv); 902 free_gfar_dev(priv);
794 return err; 903 return err;
795} 904}
@@ -822,18 +931,16 @@ static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
822 switch (config.rx_filter) { 931 switch (config.rx_filter) {
823 case HWTSTAMP_FILTER_NONE: 932 case HWTSTAMP_FILTER_NONE:
824 if (priv->hwts_rx_en) { 933 if (priv->hwts_rx_en) {
825 stop_gfar(netdev);
826 priv->hwts_rx_en = 0; 934 priv->hwts_rx_en = 0;
827 startup_gfar(netdev); 935 reset_gfar(netdev);
828 } 936 }
829 break; 937 break;
830 default: 938 default:
831 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) 939 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
832 return -ERANGE; 940 return -ERANGE;
833 if (!priv->hwts_rx_en) { 941 if (!priv->hwts_rx_en) {
834 stop_gfar(netdev);
835 priv->hwts_rx_en = 1; 942 priv->hwts_rx_en = 1;
836 startup_gfar(netdev); 943 reset_gfar(netdev);
837 } 944 }
838 config.rx_filter = HWTSTAMP_FILTER_ALL; 945 config.rx_filter = HWTSTAMP_FILTER_ALL;
839 break; 946 break;
@@ -875,19 +982,6 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
875 return phy_mii_ioctl(priv->phydev, rq, cmd); 982 return phy_mii_ioctl(priv->phydev, rq, cmd);
876} 983}
877 984
878static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
879{
880 unsigned int new_bit_map = 0x0;
881 int mask = 0x1 << (max_qs - 1), i;
882
883 for (i = 0; i < max_qs; i++) {
884 if (bit_map & mask)
885 new_bit_map = new_bit_map + (1 << i);
886 mask = mask >> 0x1;
887 }
888 return new_bit_map;
889}
890
891static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar, 985static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
892 u32 class) 986 u32 class)
893{ 987{
@@ -1005,99 +1099,140 @@ static void gfar_detect_errata(struct gfar_private *priv)
1005 priv->errata); 1099 priv->errata);
1006} 1100}
1007 1101
1008/* Set up the ethernet device structure, private data, 1102void gfar_mac_reset(struct gfar_private *priv)
1009 * and anything else we need before we start
1010 */
1011static int gfar_probe(struct platform_device *ofdev)
1012{ 1103{
1104 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1013 u32 tempval; 1105 u32 tempval;
1014 struct net_device *dev = NULL;
1015 struct gfar_private *priv = NULL;
1016 struct gfar __iomem *regs = NULL;
1017 int err = 0, i, grp_idx = 0;
1018 u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
1019 u32 isrg = 0;
1020 u32 __iomem *baddr;
1021
1022 err = gfar_of_init(ofdev, &dev);
1023
1024 if (err)
1025 return err;
1026
1027 priv = netdev_priv(dev);
1028 priv->ndev = dev;
1029 priv->ofdev = ofdev;
1030 priv->dev = &ofdev->dev;
1031 SET_NETDEV_DEV(dev, &ofdev->dev);
1032
1033 spin_lock_init(&priv->bflock);
1034 INIT_WORK(&priv->reset_task, gfar_reset_task);
1035
1036 platform_set_drvdata(ofdev, priv);
1037 regs = priv->gfargrp[0].regs;
1038
1039 gfar_detect_errata(priv);
1040
1041 /* Stop the DMA engine now, in case it was running before
1042 * (The firmware could have used it, and left it running).
1043 */
1044 gfar_halt(dev);
1045 1106
1046 /* Reset MAC layer */ 1107 /* Reset MAC layer */
1047 gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET); 1108 gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
1048 1109
1049 /* We need to delay at least 3 TX clocks */ 1110 /* We need to delay at least 3 TX clocks */
1050 udelay(2); 1111 udelay(3);
1051 1112
1052 tempval = 0;
1053 if (!priv->pause_aneg_en && priv->tx_pause_en)
1054 tempval |= MACCFG1_TX_FLOW;
1055 if (!priv->pause_aneg_en && priv->rx_pause_en)
1056 tempval |= MACCFG1_RX_FLOW;
1057 /* the soft reset bit is not self-resetting, so we need to 1113 /* the soft reset bit is not self-resetting, so we need to
1058 * clear it before resuming normal operation 1114 * clear it before resuming normal operation
1059 */ 1115 */
1060 gfar_write(&regs->maccfg1, tempval); 1116 gfar_write(&regs->maccfg1, 0);
1117
1118 udelay(3);
1119
1120 /* Compute rx_buff_size based on config flags */
1121 gfar_rx_buff_size_config(priv);
1122
1123 /* Initialize the max receive frame/buffer lengths */
1124 gfar_write(&regs->maxfrm, priv->rx_buffer_size);
1125 gfar_write(&regs->mrblr, priv->rx_buffer_size);
1126
1127 /* Initialize the Minimum Frame Length Register */
1128 gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
1061 1129
1062 /* Initialize MACCFG2. */ 1130 /* Initialize MACCFG2. */
1063 tempval = MACCFG2_INIT_SETTINGS; 1131 tempval = MACCFG2_INIT_SETTINGS;
1064 if (gfar_has_errata(priv, GFAR_ERRATA_74)) 1132
1133 /* If the mtu is larger than the max size for standard
1134 * ethernet frames (ie, a jumbo frame), then set maccfg2
1135 * to allow huge frames, and to check the length
1136 */
1137 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
1138 gfar_has_errata(priv, GFAR_ERRATA_74))
1065 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK; 1139 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
1140
1066 gfar_write(&regs->maccfg2, tempval); 1141 gfar_write(&regs->maccfg2, tempval);
1067 1142
1143 /* Clear mac addr hash registers */
1144 gfar_write(&regs->igaddr0, 0);
1145 gfar_write(&regs->igaddr1, 0);
1146 gfar_write(&regs->igaddr2, 0);
1147 gfar_write(&regs->igaddr3, 0);
1148 gfar_write(&regs->igaddr4, 0);
1149 gfar_write(&regs->igaddr5, 0);
1150 gfar_write(&regs->igaddr6, 0);
1151 gfar_write(&regs->igaddr7, 0);
1152
1153 gfar_write(&regs->gaddr0, 0);
1154 gfar_write(&regs->gaddr1, 0);
1155 gfar_write(&regs->gaddr2, 0);
1156 gfar_write(&regs->gaddr3, 0);
1157 gfar_write(&regs->gaddr4, 0);
1158 gfar_write(&regs->gaddr5, 0);
1159 gfar_write(&regs->gaddr6, 0);
1160 gfar_write(&regs->gaddr7, 0);
1161
1162 if (priv->extended_hash)
1163 gfar_clear_exact_match(priv->ndev);
1164
1165 gfar_mac_rx_config(priv);
1166
1167 gfar_mac_tx_config(priv);
1168
1169 gfar_set_mac_address(priv->ndev);
1170
1171 gfar_set_multi(priv->ndev);
1172
1173 /* clear ievent and imask before configuring coalescing */
1174 gfar_ints_disable(priv);
1175
1176 /* Configure the coalescing support */
1177 gfar_configure_coalescing_all(priv);
1178}
1179
1180static void gfar_hw_init(struct gfar_private *priv)
1181{
1182 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1183 u32 attrs;
1184
1185 /* Stop the DMA engine now, in case it was running before
1186 * (The firmware could have used it, and left it running).
1187 */
1188 gfar_halt(priv);
1189
1190 gfar_mac_reset(priv);
1191
1192 /* Zero out the rmon mib registers if it has them */
1193 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1194 memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
1195
1196 /* Mask off the CAM interrupts */
1197 gfar_write(&regs->rmon.cam1, 0xffffffff);
1198 gfar_write(&regs->rmon.cam2, 0xffffffff);
1199 }
1200
1068 /* Initialize ECNTRL */ 1201 /* Initialize ECNTRL */
1069 gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS); 1202 gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
1070 1203
1071 /* Set the dev->base_addr to the gfar reg region */ 1204 /* Set the extraction length and index */
1072 dev->base_addr = (unsigned long) regs; 1205 attrs = ATTRELI_EL(priv->rx_stash_size) |
1206 ATTRELI_EI(priv->rx_stash_index);
1073 1207
1074 /* Fill in the dev structure */ 1208 gfar_write(&regs->attreli, attrs);
1075 dev->watchdog_timeo = TX_TIMEOUT;
1076 dev->mtu = 1500;
1077 dev->netdev_ops = &gfar_netdev_ops;
1078 dev->ethtool_ops = &gfar_ethtool_ops;
1079 1209
1080 /* Register for napi ...We are registering NAPI for each grp */ 1210 /* Start with defaults, and add stashing
1081 if (priv->mode == SQ_SG_MODE) 1211 * depending on driver parameters
1082 netif_napi_add(dev, &priv->gfargrp[0].napi, gfar_poll_sq, 1212 */
1083 GFAR_DEV_WEIGHT); 1213 attrs = ATTR_INIT_SETTINGS;
1084 else
1085 for (i = 0; i < priv->num_grps; i++)
1086 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll,
1087 GFAR_DEV_WEIGHT);
1088 1214
1089 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { 1215 if (priv->bd_stash_en)
1090 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | 1216 attrs |= ATTR_BDSTASH;
1091 NETIF_F_RXCSUM;
1092 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1093 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1094 }
1095 1217
1096 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { 1218 if (priv->rx_stash_size != 0)
1097 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | 1219 attrs |= ATTR_BUFSTASH;
1098 NETIF_F_HW_VLAN_CTAG_RX; 1220
1099 dev->features |= NETIF_F_HW_VLAN_CTAG_RX; 1221 gfar_write(&regs->attr, attrs);
1100 } 1222
1223 /* FIFO configs */
1224 gfar_write(&regs->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
1225 gfar_write(&regs->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
1226 gfar_write(&regs->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
1227
1228 /* Program the interrupt steering regs, only for MG devices */
1229 if (priv->num_grps > 1)
1230 gfar_write_isrg(priv);
1231}
1232
1233static void __init gfar_init_addr_hash_table(struct gfar_private *priv)
1234{
1235 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1101 1236
1102 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { 1237 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
1103 priv->extended_hash = 1; 1238 priv->extended_hash = 1;
@@ -1133,68 +1268,81 @@ static int gfar_probe(struct platform_device *ofdev)
1133 priv->hash_regs[6] = &regs->gaddr6; 1268 priv->hash_regs[6] = &regs->gaddr6;
1134 priv->hash_regs[7] = &regs->gaddr7; 1269 priv->hash_regs[7] = &regs->gaddr7;
1135 } 1270 }
1271}
1136 1272
1137 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING) 1273/* Set up the ethernet device structure, private data,
1138 priv->padding = DEFAULT_PADDING; 1274 * and anything else we need before we start
1139 else 1275 */
1140 priv->padding = 0; 1276static int gfar_probe(struct platform_device *ofdev)
1277{
1278 struct net_device *dev = NULL;
1279 struct gfar_private *priv = NULL;
1280 int err = 0, i;
1141 1281
1142 if (dev->features & NETIF_F_IP_CSUM || 1282 err = gfar_of_init(ofdev, &dev);
1143 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1144 dev->needed_headroom = GMAC_FCB_LEN;
1145 1283
1146 /* Program the isrg regs only if number of grps > 1 */ 1284 if (err)
1147 if (priv->num_grps > 1) { 1285 return err;
1148 baddr = &regs->isrg0; 1286
1149 for (i = 0; i < priv->num_grps; i++) { 1287 priv = netdev_priv(dev);
1150 isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX); 1288 priv->ndev = dev;
1151 isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX); 1289 priv->ofdev = ofdev;
1152 gfar_write(baddr, isrg); 1290 priv->dev = &ofdev->dev;
1153 baddr++; 1291 SET_NETDEV_DEV(dev, &ofdev->dev);
1154 isrg = 0x0; 1292
1293 spin_lock_init(&priv->bflock);
1294 INIT_WORK(&priv->reset_task, gfar_reset_task);
1295
1296 platform_set_drvdata(ofdev, priv);
1297
1298 gfar_detect_errata(priv);
1299
1300 /* Set the dev->base_addr to the gfar reg region */
1301 dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
1302
1303 /* Fill in the dev structure */
1304 dev->watchdog_timeo = TX_TIMEOUT;
1305 dev->mtu = 1500;
1306 dev->netdev_ops = &gfar_netdev_ops;
1307 dev->ethtool_ops = &gfar_ethtool_ops;
1308
1309 /* Register for napi ...We are registering NAPI for each grp */
1310 for (i = 0; i < priv->num_grps; i++) {
1311 if (priv->poll_mode == GFAR_SQ_POLLING) {
1312 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1313 gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
1314 netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
1315 gfar_poll_tx_sq, 2);
1316 } else {
1317 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
1318 gfar_poll_rx, GFAR_DEV_WEIGHT);
1319 netif_napi_add(dev, &priv->gfargrp[i].napi_tx,
1320 gfar_poll_tx, 2);
1155 } 1321 }
1156 } 1322 }
1157 1323
1158 /* Need to reverse the bit maps as bit_map's MSB is q0 1324 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
1159 * but, for_each_set_bit parses from right to left, which 1325 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
1160 * basically reverses the queue numbers 1326 NETIF_F_RXCSUM;
1161 */ 1327 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
1162 for (i = 0; i< priv->num_grps; i++) { 1328 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
1163 priv->gfargrp[i].tx_bit_map =
1164 reverse_bitmap(priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
1165 priv->gfargrp[i].rx_bit_map =
1166 reverse_bitmap(priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
1167 } 1329 }
1168 1330
1169 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values, 1331 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
1170 * also assign queues to groups 1332 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
1171 */ 1333 NETIF_F_HW_VLAN_CTAG_RX;
1172 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) { 1334 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
1173 priv->gfargrp[grp_idx].num_rx_queues = 0x0;
1174
1175 for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
1176 priv->num_rx_queues) {
1177 priv->gfargrp[grp_idx].num_rx_queues++;
1178 priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
1179 rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
1180 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
1181 }
1182 priv->gfargrp[grp_idx].num_tx_queues = 0x0;
1183
1184 for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
1185 priv->num_tx_queues) {
1186 priv->gfargrp[grp_idx].num_tx_queues++;
1187 priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
1188 tstat = tstat | (TSTAT_CLEAR_THALT >> i);
1189 tqueue = tqueue | (TQUEUE_EN0 >> i);
1190 }
1191 priv->gfargrp[grp_idx].rstat = rstat;
1192 priv->gfargrp[grp_idx].tstat = tstat;
1193 rstat = tstat =0;
1194 } 1335 }
1195 1336
1196 gfar_write(&regs->rqueue, rqueue); 1337 gfar_init_addr_hash_table(priv);
1197 gfar_write(&regs->tqueue, tqueue); 1338
1339 /* Insert receive time stamps into padding alignment bytes */
1340 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1341 priv->padding = 8;
1342
1343 if (dev->features & NETIF_F_IP_CSUM ||
1344 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
1345 dev->needed_headroom = GMAC_FCB_LEN;
1198 1346
1199 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; 1347 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
1200 1348
@@ -1220,8 +1368,9 @@ static int gfar_probe(struct platform_device *ofdev)
1220 if (priv->num_tx_queues == 1) 1368 if (priv->num_tx_queues == 1)
1221 priv->prio_sched_en = 1; 1369 priv->prio_sched_en = 1;
1222 1370
1223 /* Carrier starts down, phylib will bring it up */ 1371 set_bit(GFAR_DOWN, &priv->state);
1224 netif_carrier_off(dev); 1372
1373 gfar_hw_init(priv);
1225 1374
1226 err = register_netdev(dev); 1375 err = register_netdev(dev);
1227 1376
@@ -1230,6 +1379,9 @@ static int gfar_probe(struct platform_device *ofdev)
1230 goto register_fail; 1379 goto register_fail;
1231 } 1380 }
1232 1381
1382 /* Carrier starts down, phylib will bring it up */
1383 netif_carrier_off(dev);
1384
1233 device_init_wakeup(&dev->dev, 1385 device_init_wakeup(&dev->dev,
1234 priv->device_flags & 1386 priv->device_flags &
1235 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1387 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
@@ -1251,9 +1403,6 @@ static int gfar_probe(struct platform_device *ofdev)
1251 /* Initialize the filer table */ 1403 /* Initialize the filer table */
1252 gfar_init_filer_table(priv); 1404 gfar_init_filer_table(priv);
1253 1405
1254 /* Create all the sysfs files */
1255 gfar_init_sysfs(dev);
1256
1257 /* Print out the device info */ 1406 /* Print out the device info */
1258 netdev_info(dev, "mac: %pM\n", dev->dev_addr); 1407 netdev_info(dev, "mac: %pM\n", dev->dev_addr);
1259 1408
@@ -1272,8 +1421,8 @@ static int gfar_probe(struct platform_device *ofdev)
1272 1421
1273register_fail: 1422register_fail:
1274 unmap_group_regs(priv); 1423 unmap_group_regs(priv);
1275 free_tx_pointers(priv); 1424 gfar_free_rx_queues(priv);
1276 free_rx_pointers(priv); 1425 gfar_free_tx_queues(priv);
1277 if (priv->phy_node) 1426 if (priv->phy_node)
1278 of_node_put(priv->phy_node); 1427 of_node_put(priv->phy_node);
1279 if (priv->tbi_node) 1428 if (priv->tbi_node)
@@ -1293,6 +1442,8 @@ static int gfar_remove(struct platform_device *ofdev)
1293 1442
1294 unregister_netdev(priv->ndev); 1443 unregister_netdev(priv->ndev);
1295 unmap_group_regs(priv); 1444 unmap_group_regs(priv);
1445 gfar_free_rx_queues(priv);
1446 gfar_free_tx_queues(priv);
1296 free_gfar_dev(priv); 1447 free_gfar_dev(priv);
1297 1448
1298 return 0; 1449 return 0;
@@ -1318,9 +1469,8 @@ static int gfar_suspend(struct device *dev)
1318 1469
1319 local_irq_save(flags); 1470 local_irq_save(flags);
1320 lock_tx_qs(priv); 1471 lock_tx_qs(priv);
1321 lock_rx_qs(priv);
1322 1472
1323 gfar_halt_nodisable(ndev); 1473 gfar_halt_nodisable(priv);
1324 1474
1325 /* Disable Tx, and Rx if wake-on-LAN is disabled. */ 1475 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
1326 tempval = gfar_read(&regs->maccfg1); 1476 tempval = gfar_read(&regs->maccfg1);
@@ -1332,7 +1482,6 @@ static int gfar_suspend(struct device *dev)
1332 1482
1333 gfar_write(&regs->maccfg1, tempval); 1483 gfar_write(&regs->maccfg1, tempval);
1334 1484
1335 unlock_rx_qs(priv);
1336 unlock_tx_qs(priv); 1485 unlock_tx_qs(priv);
1337 local_irq_restore(flags); 1486 local_irq_restore(flags);
1338 1487
@@ -1378,15 +1527,13 @@ static int gfar_resume(struct device *dev)
1378 */ 1527 */
1379 local_irq_save(flags); 1528 local_irq_save(flags);
1380 lock_tx_qs(priv); 1529 lock_tx_qs(priv);
1381 lock_rx_qs(priv);
1382 1530
1383 tempval = gfar_read(&regs->maccfg2); 1531 tempval = gfar_read(&regs->maccfg2);
1384 tempval &= ~MACCFG2_MPEN; 1532 tempval &= ~MACCFG2_MPEN;
1385 gfar_write(&regs->maccfg2, tempval); 1533 gfar_write(&regs->maccfg2, tempval);
1386 1534
1387 gfar_start(ndev); 1535 gfar_start(priv);
1388 1536
1389 unlock_rx_qs(priv);
1390 unlock_tx_qs(priv); 1537 unlock_tx_qs(priv);
1391 local_irq_restore(flags); 1538 local_irq_restore(flags);
1392 1539
@@ -1413,10 +1560,11 @@ static int gfar_restore(struct device *dev)
1413 return -ENOMEM; 1560 return -ENOMEM;
1414 } 1561 }
1415 1562
1416 init_registers(ndev); 1563 gfar_mac_reset(priv);
1417 gfar_set_mac_address(ndev); 1564
1418 gfar_init_mac(ndev); 1565 gfar_init_tx_rx_base(priv);
1419 gfar_start(ndev); 1566
1567 gfar_start(priv);
1420 1568
1421 priv->oldlink = 0; 1569 priv->oldlink = 0;
1422 priv->oldspeed = 0; 1570 priv->oldspeed = 0;
@@ -1574,57 +1722,6 @@ static void gfar_configure_serdes(struct net_device *dev)
1574 BMCR_SPEED1000); 1722 BMCR_SPEED1000);
1575} 1723}
1576 1724
1577static void init_registers(struct net_device *dev)
1578{
1579 struct gfar_private *priv = netdev_priv(dev);
1580 struct gfar __iomem *regs = NULL;
1581 int i;
1582
1583 for (i = 0; i < priv->num_grps; i++) {
1584 regs = priv->gfargrp[i].regs;
1585 /* Clear IEVENT */
1586 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
1587
1588 /* Initialize IMASK */
1589 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1590 }
1591
1592 regs = priv->gfargrp[0].regs;
1593 /* Init hash registers to zero */
1594 gfar_write(&regs->igaddr0, 0);
1595 gfar_write(&regs->igaddr1, 0);
1596 gfar_write(&regs->igaddr2, 0);
1597 gfar_write(&regs->igaddr3, 0);
1598 gfar_write(&regs->igaddr4, 0);
1599 gfar_write(&regs->igaddr5, 0);
1600 gfar_write(&regs->igaddr6, 0);
1601 gfar_write(&regs->igaddr7, 0);
1602
1603 gfar_write(&regs->gaddr0, 0);
1604 gfar_write(&regs->gaddr1, 0);
1605 gfar_write(&regs->gaddr2, 0);
1606 gfar_write(&regs->gaddr3, 0);
1607 gfar_write(&regs->gaddr4, 0);
1608 gfar_write(&regs->gaddr5, 0);
1609 gfar_write(&regs->gaddr6, 0);
1610 gfar_write(&regs->gaddr7, 0);
1611
1612 /* Zero out the rmon mib registers if it has them */
1613 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
1614 memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
1615
1616 /* Mask off the CAM interrupts */
1617 gfar_write(&regs->rmon.cam1, 0xffffffff);
1618 gfar_write(&regs->rmon.cam2, 0xffffffff);
1619 }
1620
1621 /* Initialize the max receive buffer length */
1622 gfar_write(&regs->mrblr, priv->rx_buffer_size);
1623
1624 /* Initialize the Minimum Frame Length Register */
1625 gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
1626}
1627
1628static int __gfar_is_rx_idle(struct gfar_private *priv) 1725static int __gfar_is_rx_idle(struct gfar_private *priv)
1629{ 1726{
1630 u32 res; 1727 u32 res;
@@ -1648,23 +1745,13 @@ static int __gfar_is_rx_idle(struct gfar_private *priv)
1648} 1745}
1649 1746
1650/* Halt the receive and transmit queues */ 1747/* Halt the receive and transmit queues */
1651static void gfar_halt_nodisable(struct net_device *dev) 1748static void gfar_halt_nodisable(struct gfar_private *priv)
1652{ 1749{
1653 struct gfar_private *priv = netdev_priv(dev); 1750 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1654 struct gfar __iomem *regs = NULL;
1655 u32 tempval; 1751 u32 tempval;
1656 int i;
1657
1658 for (i = 0; i < priv->num_grps; i++) {
1659 regs = priv->gfargrp[i].regs;
1660 /* Mask all interrupts */
1661 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1662 1752
1663 /* Clear all interrupts */ 1753 gfar_ints_disable(priv);
1664 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
1665 }
1666 1754
1667 regs = priv->gfargrp[0].regs;
1668 /* Stop the DMA, and wait for it to stop */ 1755 /* Stop the DMA, and wait for it to stop */
1669 tempval = gfar_read(&regs->dmactrl); 1756 tempval = gfar_read(&regs->dmactrl);
1670 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) != 1757 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) !=
@@ -1685,56 +1772,41 @@ static void gfar_halt_nodisable(struct net_device *dev)
1685} 1772}
1686 1773
1687/* Halt the receive and transmit queues */ 1774/* Halt the receive and transmit queues */
1688void gfar_halt(struct net_device *dev) 1775void gfar_halt(struct gfar_private *priv)
1689{ 1776{
1690 struct gfar_private *priv = netdev_priv(dev);
1691 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1777 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1692 u32 tempval; 1778 u32 tempval;
1693 1779
1694 gfar_halt_nodisable(dev); 1780 /* Dissable the Rx/Tx hw queues */
1781 gfar_write(&regs->rqueue, 0);
1782 gfar_write(&regs->tqueue, 0);
1695 1783
1696 /* Disable Rx and Tx */ 1784 mdelay(10);
1785
1786 gfar_halt_nodisable(priv);
1787
1788 /* Disable Rx/Tx DMA */
1697 tempval = gfar_read(&regs->maccfg1); 1789 tempval = gfar_read(&regs->maccfg1);
1698 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN); 1790 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1699 gfar_write(&regs->maccfg1, tempval); 1791 gfar_write(&regs->maccfg1, tempval);
1700} 1792}
1701 1793
1702static void free_grp_irqs(struct gfar_priv_grp *grp)
1703{
1704 free_irq(gfar_irq(grp, TX)->irq, grp);
1705 free_irq(gfar_irq(grp, RX)->irq, grp);
1706 free_irq(gfar_irq(grp, ER)->irq, grp);
1707}
1708
1709void stop_gfar(struct net_device *dev) 1794void stop_gfar(struct net_device *dev)
1710{ 1795{
1711 struct gfar_private *priv = netdev_priv(dev); 1796 struct gfar_private *priv = netdev_priv(dev);
1712 unsigned long flags;
1713 int i;
1714
1715 phy_stop(priv->phydev);
1716 1797
1798 netif_tx_stop_all_queues(dev);
1717 1799
1718 /* Lock it down */ 1800 smp_mb__before_clear_bit();
1719 local_irq_save(flags); 1801 set_bit(GFAR_DOWN, &priv->state);
1720 lock_tx_qs(priv); 1802 smp_mb__after_clear_bit();
1721 lock_rx_qs(priv);
1722 1803
1723 gfar_halt(dev); 1804 disable_napi(priv);
1724 1805
1725 unlock_rx_qs(priv); 1806 /* disable ints and gracefully shut down Rx/Tx DMA */
1726 unlock_tx_qs(priv); 1807 gfar_halt(priv);
1727 local_irq_restore(flags);
1728 1808
1729 /* Free the IRQs */ 1809 phy_stop(priv->phydev);
1730 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1731 for (i = 0; i < priv->num_grps; i++)
1732 free_grp_irqs(&priv->gfargrp[i]);
1733 } else {
1734 for (i = 0; i < priv->num_grps; i++)
1735 free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
1736 &priv->gfargrp[i]);
1737 }
1738 1810
1739 free_skb_resources(priv); 1811 free_skb_resources(priv);
1740} 1812}
@@ -1825,17 +1897,15 @@ static void free_skb_resources(struct gfar_private *priv)
1825 priv->tx_queue[0]->tx_bd_dma_base); 1897 priv->tx_queue[0]->tx_bd_dma_base);
1826} 1898}
1827 1899
1828void gfar_start(struct net_device *dev) 1900void gfar_start(struct gfar_private *priv)
1829{ 1901{
1830 struct gfar_private *priv = netdev_priv(dev);
1831 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1902 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1832 u32 tempval; 1903 u32 tempval;
1833 int i = 0; 1904 int i = 0;
1834 1905
1835 /* Enable Rx and Tx in MACCFG1 */ 1906 /* Enable Rx/Tx hw queues */
1836 tempval = gfar_read(&regs->maccfg1); 1907 gfar_write(&regs->rqueue, priv->rqueue);
1837 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN); 1908 gfar_write(&regs->tqueue, priv->tqueue);
1838 gfar_write(&regs->maccfg1, tempval);
1839 1909
1840 /* Initialize DMACTRL to have WWR and WOP */ 1910 /* Initialize DMACTRL to have WWR and WOP */
1841 tempval = gfar_read(&regs->dmactrl); 1911 tempval = gfar_read(&regs->dmactrl);
@@ -1852,52 +1922,23 @@ void gfar_start(struct net_device *dev)
1852 /* Clear THLT/RHLT, so that the DMA starts polling now */ 1922 /* Clear THLT/RHLT, so that the DMA starts polling now */
1853 gfar_write(&regs->tstat, priv->gfargrp[i].tstat); 1923 gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
1854 gfar_write(&regs->rstat, priv->gfargrp[i].rstat); 1924 gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
1855 /* Unmask the interrupts we look for */
1856 gfar_write(&regs->imask, IMASK_DEFAULT);
1857 } 1925 }
1858 1926
1859 dev->trans_start = jiffies; /* prevent tx timeout */ 1927 /* Enable Rx/Tx DMA */
1860} 1928 tempval = gfar_read(&regs->maccfg1);
1861 1929 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1862static void gfar_configure_coalescing(struct gfar_private *priv, 1930 gfar_write(&regs->maccfg1, tempval);
1863 unsigned long tx_mask, unsigned long rx_mask)
1864{
1865 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1866 u32 __iomem *baddr;
1867
1868 if (priv->mode == MQ_MG_MODE) {
1869 int i = 0;
1870
1871 baddr = &regs->txic0;
1872 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
1873 gfar_write(baddr + i, 0);
1874 if (likely(priv->tx_queue[i]->txcoalescing))
1875 gfar_write(baddr + i, priv->tx_queue[i]->txic);
1876 }
1877 1931
1878 baddr = &regs->rxic0; 1932 gfar_ints_enable(priv);
1879 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
1880 gfar_write(baddr + i, 0);
1881 if (likely(priv->rx_queue[i]->rxcoalescing))
1882 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
1883 }
1884 } else {
1885 /* Backward compatible case -- even if we enable
1886 * multiple queues, there's only single reg to program
1887 */
1888 gfar_write(&regs->txic, 0);
1889 if (likely(priv->tx_queue[0]->txcoalescing))
1890 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
1891 1933
1892 gfar_write(&regs->rxic, 0); 1934 priv->ndev->trans_start = jiffies; /* prevent tx timeout */
1893 if (unlikely(priv->rx_queue[0]->rxcoalescing))
1894 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
1895 }
1896} 1935}
1897 1936
1898void gfar_configure_coalescing_all(struct gfar_private *priv) 1937static void free_grp_irqs(struct gfar_priv_grp *grp)
1899{ 1938{
1900 gfar_configure_coalescing(priv, 0xFF, 0xFF); 1939 free_irq(gfar_irq(grp, TX)->irq, grp);
1940 free_irq(gfar_irq(grp, RX)->irq, grp);
1941 free_irq(gfar_irq(grp, ER)->irq, grp);
1901} 1942}
1902 1943
1903static int register_grp_irqs(struct gfar_priv_grp *grp) 1944static int register_grp_irqs(struct gfar_priv_grp *grp)
@@ -1956,46 +1997,65 @@ err_irq_fail:
1956 1997
1957} 1998}
1958 1999
1959/* Bring the controller up and running */ 2000static void gfar_free_irq(struct gfar_private *priv)
1960int startup_gfar(struct net_device *ndev)
1961{ 2001{
1962 struct gfar_private *priv = netdev_priv(ndev); 2002 int i;
1963 struct gfar __iomem *regs = NULL;
1964 int err, i, j;
1965 2003
1966 for (i = 0; i < priv->num_grps; i++) { 2004 /* Free the IRQs */
1967 regs= priv->gfargrp[i].regs; 2005 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1968 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 2006 for (i = 0; i < priv->num_grps; i++)
2007 free_grp_irqs(&priv->gfargrp[i]);
2008 } else {
2009 for (i = 0; i < priv->num_grps; i++)
2010 free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
2011 &priv->gfargrp[i]);
1969 } 2012 }
2013}
1970 2014
1971 regs= priv->gfargrp[0].regs; 2015static int gfar_request_irq(struct gfar_private *priv)
1972 err = gfar_alloc_skb_resources(ndev); 2016{
1973 if (err) 2017 int err, i, j;
1974 return err;
1975
1976 gfar_init_mac(ndev);
1977 2018
1978 for (i = 0; i < priv->num_grps; i++) { 2019 for (i = 0; i < priv->num_grps; i++) {
1979 err = register_grp_irqs(&priv->gfargrp[i]); 2020 err = register_grp_irqs(&priv->gfargrp[i]);
1980 if (err) { 2021 if (err) {
1981 for (j = 0; j < i; j++) 2022 for (j = 0; j < i; j++)
1982 free_grp_irqs(&priv->gfargrp[j]); 2023 free_grp_irqs(&priv->gfargrp[j]);
1983 goto irq_fail; 2024 return err;
1984 } 2025 }
1985 } 2026 }
1986 2027
1987 /* Start the controller */ 2028 return 0;
1988 gfar_start(ndev); 2029}
2030
2031/* Bring the controller up and running */
2032int startup_gfar(struct net_device *ndev)
2033{
2034 struct gfar_private *priv = netdev_priv(ndev);
2035 int err;
2036
2037 gfar_mac_reset(priv);
2038
2039 err = gfar_alloc_skb_resources(ndev);
2040 if (err)
2041 return err;
2042
2043 gfar_init_tx_rx_base(priv);
2044
2045 smp_mb__before_clear_bit();
2046 clear_bit(GFAR_DOWN, &priv->state);
2047 smp_mb__after_clear_bit();
2048
2049 /* Start Rx/Tx DMA and enable the interrupts */
2050 gfar_start(priv);
1989 2051
1990 phy_start(priv->phydev); 2052 phy_start(priv->phydev);
1991 2053
1992 gfar_configure_coalescing_all(priv); 2054 enable_napi(priv);
1993 2055
1994 return 0; 2056 netif_tx_wake_all_queues(ndev);
1995 2057
1996irq_fail: 2058 return 0;
1997 free_skb_resources(priv);
1998 return err;
1999} 2059}
2000 2060
2001/* Called when something needs to use the ethernet device 2061/* Called when something needs to use the ethernet device
@@ -2006,27 +2066,17 @@ static int gfar_enet_open(struct net_device *dev)
2006 struct gfar_private *priv = netdev_priv(dev); 2066 struct gfar_private *priv = netdev_priv(dev);
2007 int err; 2067 int err;
2008 2068
2009 enable_napi(priv);
2010
2011 /* Initialize a bunch of registers */
2012 init_registers(dev);
2013
2014 gfar_set_mac_address(dev);
2015
2016 err = init_phy(dev); 2069 err = init_phy(dev);
2070 if (err)
2071 return err;
2017 2072
2018 if (err) { 2073 err = gfar_request_irq(priv);
2019 disable_napi(priv); 2074 if (err)
2020 return err; 2075 return err;
2021 }
2022 2076
2023 err = startup_gfar(dev); 2077 err = startup_gfar(dev);
2024 if (err) { 2078 if (err)
2025 disable_napi(priv);
2026 return err; 2079 return err;
2027 }
2028
2029 netif_tx_start_all_queues(dev);
2030 2080
2031 device_set_wakeup_enable(&dev->dev, priv->wol_en); 2081 device_set_wakeup_enable(&dev->dev, priv->wol_en);
2032 2082
@@ -2152,13 +2202,13 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2152 skb_new = skb_realloc_headroom(skb, fcb_len); 2202 skb_new = skb_realloc_headroom(skb, fcb_len);
2153 if (!skb_new) { 2203 if (!skb_new) {
2154 dev->stats.tx_errors++; 2204 dev->stats.tx_errors++;
2155 kfree_skb(skb); 2205 dev_kfree_skb_any(skb);
2156 return NETDEV_TX_OK; 2206 return NETDEV_TX_OK;
2157 } 2207 }
2158 2208
2159 if (skb->sk) 2209 if (skb->sk)
2160 skb_set_owner_w(skb_new, skb->sk); 2210 skb_set_owner_w(skb_new, skb->sk);
2161 consume_skb(skb); 2211 dev_consume_skb_any(skb);
2162 skb = skb_new; 2212 skb = skb_new;
2163 } 2213 }
2164 2214
@@ -2351,8 +2401,6 @@ static int gfar_close(struct net_device *dev)
2351{ 2401{
2352 struct gfar_private *priv = netdev_priv(dev); 2402 struct gfar_private *priv = netdev_priv(dev);
2353 2403
2354 disable_napi(priv);
2355
2356 cancel_work_sync(&priv->reset_task); 2404 cancel_work_sync(&priv->reset_task);
2357 stop_gfar(dev); 2405 stop_gfar(dev);
2358 2406
@@ -2360,7 +2408,7 @@ static int gfar_close(struct net_device *dev)
2360 phy_disconnect(priv->phydev); 2408 phy_disconnect(priv->phydev);
2361 priv->phydev = NULL; 2409 priv->phydev = NULL;
2362 2410
2363 netif_tx_stop_all_queues(dev); 2411 gfar_free_irq(priv);
2364 2412
2365 return 0; 2413 return 0;
2366} 2414}
@@ -2373,77 +2421,9 @@ static int gfar_set_mac_address(struct net_device *dev)
2373 return 0; 2421 return 0;
2374} 2422}
2375 2423
2376/* Check if rx parser should be activated */
2377void gfar_check_rx_parser_mode(struct gfar_private *priv)
2378{
2379 struct gfar __iomem *regs;
2380 u32 tempval;
2381
2382 regs = priv->gfargrp[0].regs;
2383
2384 tempval = gfar_read(&regs->rctrl);
2385 /* If parse is no longer required, then disable parser */
2386 if (tempval & RCTRL_REQ_PARSER) {
2387 tempval |= RCTRL_PRSDEP_INIT;
2388 priv->uses_rxfcb = 1;
2389 } else {
2390 tempval &= ~RCTRL_PRSDEP_INIT;
2391 priv->uses_rxfcb = 0;
2392 }
2393 gfar_write(&regs->rctrl, tempval);
2394}
2395
2396/* Enables and disables VLAN insertion/extraction */
2397void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
2398{
2399 struct gfar_private *priv = netdev_priv(dev);
2400 struct gfar __iomem *regs = NULL;
2401 unsigned long flags;
2402 u32 tempval;
2403
2404 regs = priv->gfargrp[0].regs;
2405 local_irq_save(flags);
2406 lock_rx_qs(priv);
2407
2408 if (features & NETIF_F_HW_VLAN_CTAG_TX) {
2409 /* Enable VLAN tag insertion */
2410 tempval = gfar_read(&regs->tctrl);
2411 tempval |= TCTRL_VLINS;
2412 gfar_write(&regs->tctrl, tempval);
2413 } else {
2414 /* Disable VLAN tag insertion */
2415 tempval = gfar_read(&regs->tctrl);
2416 tempval &= ~TCTRL_VLINS;
2417 gfar_write(&regs->tctrl, tempval);
2418 }
2419
2420 if (features & NETIF_F_HW_VLAN_CTAG_RX) {
2421 /* Enable VLAN tag extraction */
2422 tempval = gfar_read(&regs->rctrl);
2423 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
2424 gfar_write(&regs->rctrl, tempval);
2425 priv->uses_rxfcb = 1;
2426 } else {
2427 /* Disable VLAN tag extraction */
2428 tempval = gfar_read(&regs->rctrl);
2429 tempval &= ~RCTRL_VLEX;
2430 gfar_write(&regs->rctrl, tempval);
2431
2432 gfar_check_rx_parser_mode(priv);
2433 }
2434
2435 gfar_change_mtu(dev, dev->mtu);
2436
2437 unlock_rx_qs(priv);
2438 local_irq_restore(flags);
2439}
2440
2441static int gfar_change_mtu(struct net_device *dev, int new_mtu) 2424static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2442{ 2425{
2443 int tempsize, tempval;
2444 struct gfar_private *priv = netdev_priv(dev); 2426 struct gfar_private *priv = netdev_priv(dev);
2445 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2446 int oldsize = priv->rx_buffer_size;
2447 int frame_size = new_mtu + ETH_HLEN; 2427 int frame_size = new_mtu + ETH_HLEN;
2448 2428
2449 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) { 2429 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
@@ -2451,45 +2431,33 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2451 return -EINVAL; 2431 return -EINVAL;
2452 } 2432 }
2453 2433
2454 if (priv->uses_rxfcb) 2434 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2455 frame_size += GMAC_FCB_LEN; 2435 cpu_relax();
2456 2436
2457 frame_size += priv->padding; 2437 if (dev->flags & IFF_UP)
2458
2459 tempsize = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
2460 INCREMENTAL_BUFFER_SIZE;
2461
2462 /* Only stop and start the controller if it isn't already
2463 * stopped, and we changed something
2464 */
2465 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
2466 stop_gfar(dev); 2438 stop_gfar(dev);
2467 2439
2468 priv->rx_buffer_size = tempsize;
2469
2470 dev->mtu = new_mtu; 2440 dev->mtu = new_mtu;
2471 2441
2472 gfar_write(&regs->mrblr, priv->rx_buffer_size); 2442 if (dev->flags & IFF_UP)
2473 gfar_write(&regs->maxfrm, priv->rx_buffer_size); 2443 startup_gfar(dev);
2474 2444
2475 /* If the mtu is larger than the max size for standard 2445 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2476 * ethernet frames (ie, a jumbo frame), then set maccfg2
2477 * to allow huge frames, and to check the length
2478 */
2479 tempval = gfar_read(&regs->maccfg2);
2480 2446
2481 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE || 2447 return 0;
2482 gfar_has_errata(priv, GFAR_ERRATA_74)) 2448}
2483 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2484 else
2485 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
2486 2449
2487 gfar_write(&regs->maccfg2, tempval); 2450void reset_gfar(struct net_device *ndev)
2451{
2452 struct gfar_private *priv = netdev_priv(ndev);
2488 2453
2489 if ((oldsize != tempsize) && (dev->flags & IFF_UP)) 2454 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2490 startup_gfar(dev); 2455 cpu_relax();
2491 2456
2492 return 0; 2457 stop_gfar(ndev);
2458 startup_gfar(ndev);
2459
2460 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2493} 2461}
2494 2462
2495/* gfar_reset_task gets scheduled when a packet has not been 2463/* gfar_reset_task gets scheduled when a packet has not been
@@ -2501,16 +2469,7 @@ static void gfar_reset_task(struct work_struct *work)
2501{ 2469{
2502 struct gfar_private *priv = container_of(work, struct gfar_private, 2470 struct gfar_private *priv = container_of(work, struct gfar_private,
2503 reset_task); 2471 reset_task);
2504 struct net_device *dev = priv->ndev; 2472 reset_gfar(priv->ndev);
2505
2506 if (dev->flags & IFF_UP) {
2507 netif_tx_stop_all_queues(dev);
2508 stop_gfar(dev);
2509 startup_gfar(dev);
2510 netif_tx_start_all_queues(dev);
2511 }
2512
2513 netif_tx_schedule_all(dev);
2514} 2473}
2515 2474
2516static void gfar_timeout(struct net_device *dev) 2475static void gfar_timeout(struct net_device *dev)
@@ -2623,8 +2582,10 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2623 } 2582 }
2624 2583
2625 /* If we freed a buffer, we can restart transmission, if necessary */ 2584 /* If we freed a buffer, we can restart transmission, if necessary */
2626 if (netif_tx_queue_stopped(txq) && tx_queue->num_txbdfree) 2585 if (tx_queue->num_txbdfree &&
2627 netif_wake_subqueue(dev, tqi); 2586 netif_tx_queue_stopped(txq) &&
2587 !(test_bit(GFAR_DOWN, &priv->state)))
2588 netif_wake_subqueue(priv->ndev, tqi);
2628 2589
2629 /* Update dirty indicators */ 2590 /* Update dirty indicators */
2630 tx_queue->skb_dirtytx = skb_dirtytx; 2591 tx_queue->skb_dirtytx = skb_dirtytx;
@@ -2633,31 +2594,6 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2633 netdev_tx_completed_queue(txq, howmany, bytes_sent); 2594 netdev_tx_completed_queue(txq, howmany, bytes_sent);
2634} 2595}
2635 2596
2636static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
2637{
2638 unsigned long flags;
2639
2640 spin_lock_irqsave(&gfargrp->grplock, flags);
2641 if (napi_schedule_prep(&gfargrp->napi)) {
2642 gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
2643 __napi_schedule(&gfargrp->napi);
2644 } else {
2645 /* Clear IEVENT, so interrupts aren't called again
2646 * because of the packets that have already arrived.
2647 */
2648 gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
2649 }
2650 spin_unlock_irqrestore(&gfargrp->grplock, flags);
2651
2652}
2653
2654/* Interrupt Handler for Transmit complete */
2655static irqreturn_t gfar_transmit(int irq, void *grp_id)
2656{
2657 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
2658 return IRQ_HANDLED;
2659}
2660
2661static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp, 2597static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
2662 struct sk_buff *skb) 2598 struct sk_buff *skb)
2663{ 2599{
@@ -2728,7 +2664,48 @@ static inline void count_errors(unsigned short status, struct net_device *dev)
2728 2664
2729irqreturn_t gfar_receive(int irq, void *grp_id) 2665irqreturn_t gfar_receive(int irq, void *grp_id)
2730{ 2666{
2731 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id); 2667 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2668 unsigned long flags;
2669 u32 imask;
2670
2671 if (likely(napi_schedule_prep(&grp->napi_rx))) {
2672 spin_lock_irqsave(&grp->grplock, flags);
2673 imask = gfar_read(&grp->regs->imask);
2674 imask &= IMASK_RX_DISABLED;
2675 gfar_write(&grp->regs->imask, imask);
2676 spin_unlock_irqrestore(&grp->grplock, flags);
2677 __napi_schedule(&grp->napi_rx);
2678 } else {
2679 /* Clear IEVENT, so interrupts aren't called again
2680 * because of the packets that have already arrived.
2681 */
2682 gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
2683 }
2684
2685 return IRQ_HANDLED;
2686}
2687
2688/* Interrupt Handler for Transmit complete */
2689static irqreturn_t gfar_transmit(int irq, void *grp_id)
2690{
2691 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2692 unsigned long flags;
2693 u32 imask;
2694
2695 if (likely(napi_schedule_prep(&grp->napi_tx))) {
2696 spin_lock_irqsave(&grp->grplock, flags);
2697 imask = gfar_read(&grp->regs->imask);
2698 imask &= IMASK_TX_DISABLED;
2699 gfar_write(&grp->regs->imask, imask);
2700 spin_unlock_irqrestore(&grp->grplock, flags);
2701 __napi_schedule(&grp->napi_tx);
2702 } else {
2703 /* Clear IEVENT, so interrupts aren't called again
2704 * because of the packets that have already arrived.
2705 */
2706 gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
2707 }
2708
2732 return IRQ_HANDLED; 2709 return IRQ_HANDLED;
2733} 2710}
2734 2711
@@ -2852,7 +2829,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2852 rx_queue->stats.rx_bytes += pkt_len; 2829 rx_queue->stats.rx_bytes += pkt_len;
2853 skb_record_rx_queue(skb, rx_queue->qindex); 2830 skb_record_rx_queue(skb, rx_queue->qindex);
2854 gfar_process_frame(dev, skb, amount_pull, 2831 gfar_process_frame(dev, skb, amount_pull,
2855 &rx_queue->grp->napi); 2832 &rx_queue->grp->napi_rx);
2856 2833
2857 } else { 2834 } else {
2858 netif_warn(priv, rx_err, dev, "Missing skb!\n"); 2835 netif_warn(priv, rx_err, dev, "Missing skb!\n");
@@ -2881,66 +2858,81 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2881 return howmany; 2858 return howmany;
2882} 2859}
2883 2860
2884static int gfar_poll_sq(struct napi_struct *napi, int budget) 2861static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
2885{ 2862{
2886 struct gfar_priv_grp *gfargrp = 2863 struct gfar_priv_grp *gfargrp =
2887 container_of(napi, struct gfar_priv_grp, napi); 2864 container_of(napi, struct gfar_priv_grp, napi_rx);
2888 struct gfar __iomem *regs = gfargrp->regs; 2865 struct gfar __iomem *regs = gfargrp->regs;
2889 struct gfar_priv_tx_q *tx_queue = gfargrp->priv->tx_queue[0]; 2866 struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
2890 struct gfar_priv_rx_q *rx_queue = gfargrp->priv->rx_queue[0];
2891 int work_done = 0; 2867 int work_done = 0;
2892 2868
2893 /* Clear IEVENT, so interrupts aren't called again 2869 /* Clear IEVENT, so interrupts aren't called again
2894 * because of the packets that have already arrived 2870 * because of the packets that have already arrived
2895 */ 2871 */
2896 gfar_write(&regs->ievent, IEVENT_RTX_MASK); 2872 gfar_write(&regs->ievent, IEVENT_RX_MASK);
2897
2898 /* run Tx cleanup to completion */
2899 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
2900 gfar_clean_tx_ring(tx_queue);
2901 2873
2902 work_done = gfar_clean_rx_ring(rx_queue, budget); 2874 work_done = gfar_clean_rx_ring(rx_queue, budget);
2903 2875
2904 if (work_done < budget) { 2876 if (work_done < budget) {
2877 u32 imask;
2905 napi_complete(napi); 2878 napi_complete(napi);
2906 /* Clear the halt bit in RSTAT */ 2879 /* Clear the halt bit in RSTAT */
2907 gfar_write(&regs->rstat, gfargrp->rstat); 2880 gfar_write(&regs->rstat, gfargrp->rstat);
2908 2881
2909 gfar_write(&regs->imask, IMASK_DEFAULT); 2882 spin_lock_irq(&gfargrp->grplock);
2910 2883 imask = gfar_read(&regs->imask);
2911 /* If we are coalescing interrupts, update the timer 2884 imask |= IMASK_RX_DEFAULT;
2912 * Otherwise, clear it 2885 gfar_write(&regs->imask, imask);
2913 */ 2886 spin_unlock_irq(&gfargrp->grplock);
2914 gfar_write(&regs->txic, 0);
2915 if (likely(tx_queue->txcoalescing))
2916 gfar_write(&regs->txic, tx_queue->txic);
2917
2918 gfar_write(&regs->rxic, 0);
2919 if (unlikely(rx_queue->rxcoalescing))
2920 gfar_write(&regs->rxic, rx_queue->rxic);
2921 } 2887 }
2922 2888
2923 return work_done; 2889 return work_done;
2924} 2890}
2925 2891
2926static int gfar_poll(struct napi_struct *napi, int budget) 2892static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
2893{
2894 struct gfar_priv_grp *gfargrp =
2895 container_of(napi, struct gfar_priv_grp, napi_tx);
2896 struct gfar __iomem *regs = gfargrp->regs;
2897 struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
2898 u32 imask;
2899
2900 /* Clear IEVENT, so interrupts aren't called again
2901 * because of the packets that have already arrived
2902 */
2903 gfar_write(&regs->ievent, IEVENT_TX_MASK);
2904
2905 /* run Tx cleanup to completion */
2906 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
2907 gfar_clean_tx_ring(tx_queue);
2908
2909 napi_complete(napi);
2910
2911 spin_lock_irq(&gfargrp->grplock);
2912 imask = gfar_read(&regs->imask);
2913 imask |= IMASK_TX_DEFAULT;
2914 gfar_write(&regs->imask, imask);
2915 spin_unlock_irq(&gfargrp->grplock);
2916
2917 return 0;
2918}
2919
2920static int gfar_poll_rx(struct napi_struct *napi, int budget)
2927{ 2921{
2928 struct gfar_priv_grp *gfargrp = 2922 struct gfar_priv_grp *gfargrp =
2929 container_of(napi, struct gfar_priv_grp, napi); 2923 container_of(napi, struct gfar_priv_grp, napi_rx);
2930 struct gfar_private *priv = gfargrp->priv; 2924 struct gfar_private *priv = gfargrp->priv;
2931 struct gfar __iomem *regs = gfargrp->regs; 2925 struct gfar __iomem *regs = gfargrp->regs;
2932 struct gfar_priv_tx_q *tx_queue = NULL;
2933 struct gfar_priv_rx_q *rx_queue = NULL; 2926 struct gfar_priv_rx_q *rx_queue = NULL;
2934 int work_done = 0, work_done_per_q = 0; 2927 int work_done = 0, work_done_per_q = 0;
2935 int i, budget_per_q = 0; 2928 int i, budget_per_q = 0;
2936 int has_tx_work = 0;
2937 unsigned long rstat_rxf; 2929 unsigned long rstat_rxf;
2938 int num_act_queues; 2930 int num_act_queues;
2939 2931
2940 /* Clear IEVENT, so interrupts aren't called again 2932 /* Clear IEVENT, so interrupts aren't called again
2941 * because of the packets that have already arrived 2933 * because of the packets that have already arrived
2942 */ 2934 */
2943 gfar_write(&regs->ievent, IEVENT_RTX_MASK); 2935 gfar_write(&regs->ievent, IEVENT_RX_MASK);
2944 2936
2945 rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK; 2937 rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;
2946 2938
@@ -2948,15 +2940,6 @@ static int gfar_poll(struct napi_struct *napi, int budget)
2948 if (num_act_queues) 2940 if (num_act_queues)
2949 budget_per_q = budget/num_act_queues; 2941 budget_per_q = budget/num_act_queues;
2950 2942
2951 for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
2952 tx_queue = priv->tx_queue[i];
2953 /* run Tx cleanup to completion */
2954 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
2955 gfar_clean_tx_ring(tx_queue);
2956 has_tx_work = 1;
2957 }
2958 }
2959
2960 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { 2943 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2961 /* skip queue if not active */ 2944 /* skip queue if not active */
2962 if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i))) 2945 if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
@@ -2979,25 +2962,62 @@ static int gfar_poll(struct napi_struct *napi, int budget)
2979 } 2962 }
2980 } 2963 }
2981 2964
2982 if (!num_act_queues && !has_tx_work) { 2965 if (!num_act_queues) {
2983 2966 u32 imask;
2984 napi_complete(napi); 2967 napi_complete(napi);
2985 2968
2986 /* Clear the halt bit in RSTAT */ 2969 /* Clear the halt bit in RSTAT */
2987 gfar_write(&regs->rstat, gfargrp->rstat); 2970 gfar_write(&regs->rstat, gfargrp->rstat);
2988 2971
2989 gfar_write(&regs->imask, IMASK_DEFAULT); 2972 spin_lock_irq(&gfargrp->grplock);
2990 2973 imask = gfar_read(&regs->imask);
2991 /* If we are coalescing interrupts, update the timer 2974 imask |= IMASK_RX_DEFAULT;
2992 * Otherwise, clear it 2975 gfar_write(&regs->imask, imask);
2993 */ 2976 spin_unlock_irq(&gfargrp->grplock);
2994 gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
2995 gfargrp->tx_bit_map);
2996 } 2977 }
2997 2978
2998 return work_done; 2979 return work_done;
2999} 2980}
3000 2981
2982static int gfar_poll_tx(struct napi_struct *napi, int budget)
2983{
2984 struct gfar_priv_grp *gfargrp =
2985 container_of(napi, struct gfar_priv_grp, napi_tx);
2986 struct gfar_private *priv = gfargrp->priv;
2987 struct gfar __iomem *regs = gfargrp->regs;
2988 struct gfar_priv_tx_q *tx_queue = NULL;
2989 int has_tx_work = 0;
2990 int i;
2991
2992 /* Clear IEVENT, so interrupts aren't called again
2993 * because of the packets that have already arrived
2994 */
2995 gfar_write(&regs->ievent, IEVENT_TX_MASK);
2996
2997 for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
2998 tx_queue = priv->tx_queue[i];
2999 /* run Tx cleanup to completion */
3000 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
3001 gfar_clean_tx_ring(tx_queue);
3002 has_tx_work = 1;
3003 }
3004 }
3005
3006 if (!has_tx_work) {
3007 u32 imask;
3008 napi_complete(napi);
3009
3010 spin_lock_irq(&gfargrp->grplock);
3011 imask = gfar_read(&regs->imask);
3012 imask |= IMASK_TX_DEFAULT;
3013 gfar_write(&regs->imask, imask);
3014 spin_unlock_irq(&gfargrp->grplock);
3015 }
3016
3017 return 0;
3018}
3019
3020
3001#ifdef CONFIG_NET_POLL_CONTROLLER 3021#ifdef CONFIG_NET_POLL_CONTROLLER
3002/* Polling 'interrupt' - used by things like netconsole to send skbs 3022/* Polling 'interrupt' - used by things like netconsole to send skbs
3003 * without having to re-enable interrupts. It's not called while 3023 * without having to re-enable interrupts. It's not called while
@@ -3101,12 +3121,11 @@ static void adjust_link(struct net_device *dev)
3101{ 3121{
3102 struct gfar_private *priv = netdev_priv(dev); 3122 struct gfar_private *priv = netdev_priv(dev);
3103 struct gfar __iomem *regs = priv->gfargrp[0].regs; 3123 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3104 unsigned long flags;
3105 struct phy_device *phydev = priv->phydev; 3124 struct phy_device *phydev = priv->phydev;
3106 int new_state = 0; 3125 int new_state = 0;
3107 3126
3108 local_irq_save(flags); 3127 if (test_bit(GFAR_RESETTING, &priv->state))
3109 lock_tx_qs(priv); 3128 return;
3110 3129
3111 if (phydev->link) { 3130 if (phydev->link) {
3112 u32 tempval1 = gfar_read(&regs->maccfg1); 3131 u32 tempval1 = gfar_read(&regs->maccfg1);
@@ -3178,8 +3197,6 @@ static void adjust_link(struct net_device *dev)
3178 3197
3179 if (new_state && netif_msg_link(priv)) 3198 if (new_state && netif_msg_link(priv))
3180 phy_print_status(phydev); 3199 phy_print_status(phydev);
3181 unlock_tx_qs(priv);
3182 local_irq_restore(flags);
3183} 3200}
3184 3201
3185/* Update the hash table based on the current list of multicast 3202/* Update the hash table based on the current list of multicast
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 52bb2b0195cc..84632c569f2c 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -9,7 +9,7 @@
9 * Maintainer: Kumar Gala 9 * Maintainer: Kumar Gala
10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> 10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
11 * 11 *
12 * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc. 12 * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
13 * 13 *
14 * This program is free software; you can redistribute it and/or modify it 14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the 15 * under the terms of the GNU General Public License as published by the
@@ -377,8 +377,11 @@ extern const char gfar_driver_version[];
377 IMASK_RXFEN0 | IMASK_BSY | IMASK_EBERR | IMASK_BABR | \ 377 IMASK_RXFEN0 | IMASK_BSY | IMASK_EBERR | IMASK_BABR | \
378 IMASK_XFUN | IMASK_RXC | IMASK_BABT | IMASK_DPE \ 378 IMASK_XFUN | IMASK_RXC | IMASK_BABT | IMASK_DPE \
379 | IMASK_PERR) 379 | IMASK_PERR)
380#define IMASK_RTX_DISABLED ((~(IMASK_RXFEN0 | IMASK_TXFEN | IMASK_BSY)) \ 380#define IMASK_RX_DEFAULT (IMASK_RXFEN0 | IMASK_BSY)
381 & IMASK_DEFAULT) 381#define IMASK_TX_DEFAULT (IMASK_TXFEN | IMASK_TXBEN)
382
383#define IMASK_RX_DISABLED ((~(IMASK_RX_DEFAULT)) & IMASK_DEFAULT)
384#define IMASK_TX_DISABLED ((~(IMASK_TX_DEFAULT)) & IMASK_DEFAULT)
382 385
383/* Fifo management */ 386/* Fifo management */
384#define FIFO_TX_THR_MASK 0x01ff 387#define FIFO_TX_THR_MASK 0x01ff
@@ -409,7 +412,9 @@ extern const char gfar_driver_version[];
409 412
410/* This default RIR value directly corresponds 413/* This default RIR value directly corresponds
411 * to the 3-bit hash value generated */ 414 * to the 3-bit hash value generated */
412#define DEFAULT_RIR0 0x05397700 415#define DEFAULT_8RXQ_RIR0 0x05397700
416/* Map even hash values to Q0, and odd ones to Q1 */
417#define DEFAULT_2RXQ_RIR0 0x04104100
413 418
414/* RQFCR register bits */ 419/* RQFCR register bits */
415#define RQFCR_GPI 0x80000000 420#define RQFCR_GPI 0x80000000
@@ -880,7 +885,6 @@ struct gfar {
880#define FSL_GIANFAR_DEV_HAS_CSUM 0x00000010 885#define FSL_GIANFAR_DEV_HAS_CSUM 0x00000010
881#define FSL_GIANFAR_DEV_HAS_VLAN 0x00000020 886#define FSL_GIANFAR_DEV_HAS_VLAN 0x00000020
882#define FSL_GIANFAR_DEV_HAS_EXTENDED_HASH 0x00000040 887#define FSL_GIANFAR_DEV_HAS_EXTENDED_HASH 0x00000040
883#define FSL_GIANFAR_DEV_HAS_PADDING 0x00000080
884#define FSL_GIANFAR_DEV_HAS_MAGIC_PACKET 0x00000100 888#define FSL_GIANFAR_DEV_HAS_MAGIC_PACKET 0x00000100
885#define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200 889#define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200
886#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400 890#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400
@@ -892,8 +896,8 @@ struct gfar {
892#define DEFAULT_MAPPING 0xFF 896#define DEFAULT_MAPPING 0xFF
893#endif 897#endif
894 898
895#define ISRG_SHIFT_TX 0x10 899#define ISRG_RR0 0x80000000
896#define ISRG_SHIFT_RX 0x18 900#define ISRG_TR0 0x00800000
897 901
898/* The same driver can operate in two modes */ 902/* The same driver can operate in two modes */
899/* SQ_SG_MODE: Single Queue Single Group Mode 903/* SQ_SG_MODE: Single Queue Single Group Mode
@@ -905,6 +909,22 @@ enum {
905 MQ_MG_MODE 909 MQ_MG_MODE
906}; 910};
907 911
912/* GFAR_SQ_POLLING: Single Queue NAPI polling mode
913 * The driver supports a single pair of RX/Tx queues
914 * per interrupt group (Rx/Tx int line). MQ_MG mode
915 * devices have 2 interrupt groups, so the device will
916 * have a total of 2 Tx and 2 Rx queues in this case.
917 * GFAR_MQ_POLLING: Multi Queue NAPI polling mode
918 * The driver supports all the 8 Rx and Tx HW queues
919 * each queue mapped by the Device Tree to one of
920 * the 2 interrupt groups. This mode implies significant
921 * processing overhead (CPU and controller level).
922 */
923enum gfar_poll_mode {
924 GFAR_SQ_POLLING = 0,
925 GFAR_MQ_POLLING
926};
927
908/* 928/*
909 * Per TX queue stats 929 * Per TX queue stats
910 */ 930 */
@@ -966,7 +986,6 @@ struct rx_q_stats {
966 986
967/** 987/**
968 * struct gfar_priv_rx_q - per rx queue structure 988 * struct gfar_priv_rx_q - per rx queue structure
969 * @rxlock: per queue rx spin lock
970 * @rx_skbuff: skb pointers 989 * @rx_skbuff: skb pointers
971 * @skb_currx: currently use skb pointer 990 * @skb_currx: currently use skb pointer
972 * @rx_bd_base: First rx buffer descriptor 991 * @rx_bd_base: First rx buffer descriptor
@@ -979,8 +998,7 @@ struct rx_q_stats {
979 */ 998 */
980 999
981struct gfar_priv_rx_q { 1000struct gfar_priv_rx_q {
982 spinlock_t rxlock __attribute__ ((aligned (SMP_CACHE_BYTES))); 1001 struct sk_buff **rx_skbuff __aligned(SMP_CACHE_BYTES);
983 struct sk_buff ** rx_skbuff;
984 dma_addr_t rx_bd_dma_base; 1002 dma_addr_t rx_bd_dma_base;
985 struct rxbd8 *rx_bd_base; 1003 struct rxbd8 *rx_bd_base;
986 struct rxbd8 *cur_rx; 1004 struct rxbd8 *cur_rx;
@@ -1016,17 +1034,20 @@ struct gfar_irqinfo {
1016 */ 1034 */
1017 1035
1018struct gfar_priv_grp { 1036struct gfar_priv_grp {
1019 spinlock_t grplock __attribute__ ((aligned (SMP_CACHE_BYTES))); 1037 spinlock_t grplock __aligned(SMP_CACHE_BYTES);
1020 struct napi_struct napi; 1038 struct napi_struct napi_rx;
1021 struct gfar_private *priv; 1039 struct napi_struct napi_tx;
1022 struct gfar __iomem *regs; 1040 struct gfar __iomem *regs;
1023 unsigned int rstat; 1041 struct gfar_priv_tx_q *tx_queue;
1024 unsigned long num_rx_queues; 1042 struct gfar_priv_rx_q *rx_queue;
1025 unsigned long rx_bit_map;
1026 /* cacheline 3 */
1027 unsigned int tstat; 1043 unsigned int tstat;
1044 unsigned int rstat;
1045
1046 struct gfar_private *priv;
1028 unsigned long num_tx_queues; 1047 unsigned long num_tx_queues;
1029 unsigned long tx_bit_map; 1048 unsigned long tx_bit_map;
1049 unsigned long num_rx_queues;
1050 unsigned long rx_bit_map;
1030 1051
1031 struct gfar_irqinfo *irqinfo[GFAR_NUM_IRQS]; 1052 struct gfar_irqinfo *irqinfo[GFAR_NUM_IRQS];
1032}; 1053};
@@ -1041,6 +1062,11 @@ enum gfar_errata {
1041 GFAR_ERRATA_12 = 0x08, /* a.k.a errata eTSEC49 */ 1062 GFAR_ERRATA_12 = 0x08, /* a.k.a errata eTSEC49 */
1042}; 1063};
1043 1064
1065enum gfar_dev_state {
1066 GFAR_DOWN = 1,
1067 GFAR_RESETTING
1068};
1069
1044/* Struct stolen almost completely (and shamelessly) from the FCC enet source 1070/* Struct stolen almost completely (and shamelessly) from the FCC enet source
1045 * (Ok, that's not so true anymore, but there is a family resemblance) 1071 * (Ok, that's not so true anymore, but there is a family resemblance)
1046 * The GFAR buffer descriptors track the ring buffers. The rx_bd_base 1072 * The GFAR buffer descriptors track the ring buffers. The rx_bd_base
@@ -1051,8 +1077,6 @@ enum gfar_errata {
1051 * the buffer descriptor determines the actual condition. 1077 * the buffer descriptor determines the actual condition.
1052 */ 1078 */
1053struct gfar_private { 1079struct gfar_private {
1054 unsigned int num_rx_queues;
1055
1056 struct device *dev; 1080 struct device *dev;
1057 struct net_device *ndev; 1081 struct net_device *ndev;
1058 enum gfar_errata errata; 1082 enum gfar_errata errata;
@@ -1060,6 +1084,7 @@ struct gfar_private {
1060 1084
1061 u16 uses_rxfcb; 1085 u16 uses_rxfcb;
1062 u16 padding; 1086 u16 padding;
1087 u32 device_flags;
1063 1088
1064 /* HW time stamping enabled flag */ 1089 /* HW time stamping enabled flag */
1065 int hwts_rx_en; 1090 int hwts_rx_en;
@@ -1069,10 +1094,12 @@ struct gfar_private {
1069 struct gfar_priv_rx_q *rx_queue[MAX_RX_QS]; 1094 struct gfar_priv_rx_q *rx_queue[MAX_RX_QS];
1070 struct gfar_priv_grp gfargrp[MAXGROUPS]; 1095 struct gfar_priv_grp gfargrp[MAXGROUPS];
1071 1096
1072 u32 device_flags; 1097 unsigned long state;
1073 1098
1074 unsigned int mode; 1099 unsigned short mode;
1100 unsigned short poll_mode;
1075 unsigned int num_tx_queues; 1101 unsigned int num_tx_queues;
1102 unsigned int num_rx_queues;
1076 unsigned int num_grps; 1103 unsigned int num_grps;
1077 1104
1078 /* Network Statistics */ 1105 /* Network Statistics */
@@ -1113,6 +1140,9 @@ struct gfar_private {
1113 unsigned int total_tx_ring_size; 1140 unsigned int total_tx_ring_size;
1114 unsigned int total_rx_ring_size; 1141 unsigned int total_rx_ring_size;
1115 1142
1143 u32 rqueue;
1144 u32 tqueue;
1145
1116 /* RX per device parameters */ 1146 /* RX per device parameters */
1117 unsigned int rx_stash_size; 1147 unsigned int rx_stash_size;
1118 unsigned int rx_stash_index; 1148 unsigned int rx_stash_index;
@@ -1127,11 +1157,6 @@ struct gfar_private {
1127 u32 __iomem *hash_regs[16]; 1157 u32 __iomem *hash_regs[16];
1128 int hash_width; 1158 int hash_width;
1129 1159
1130 /* global parameters */
1131 unsigned int fifo_threshold;
1132 unsigned int fifo_starve;
1133 unsigned int fifo_starve_off;
1134
1135 /*Filer table*/ 1160 /*Filer table*/
1136 unsigned int ftp_rqfpr[MAX_FILER_IDX + 1]; 1161 unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
1137 unsigned int ftp_rqfcr[MAX_FILER_IDX + 1]; 1162 unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
@@ -1176,21 +1201,42 @@ static inline void gfar_read_filer(struct gfar_private *priv,
1176 *fpr = gfar_read(&regs->rqfpr); 1201 *fpr = gfar_read(&regs->rqfpr);
1177} 1202}
1178 1203
1179void lock_rx_qs(struct gfar_private *priv); 1204static inline void gfar_write_isrg(struct gfar_private *priv)
1180void lock_tx_qs(struct gfar_private *priv); 1205{
1181void unlock_rx_qs(struct gfar_private *priv); 1206 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1182void unlock_tx_qs(struct gfar_private *priv); 1207 u32 __iomem *baddr = &regs->isrg0;
1208 u32 isrg = 0;
1209 int grp_idx, i;
1210
1211 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
1212 struct gfar_priv_grp *grp = &priv->gfargrp[grp_idx];
1213
1214 for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
1215 isrg |= (ISRG_RR0 >> i);
1216 }
1217
1218 for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
1219 isrg |= (ISRG_TR0 >> i);
1220 }
1221
1222 gfar_write(baddr, isrg);
1223
1224 baddr++;
1225 isrg = 0;
1226 }
1227}
1228
1183irqreturn_t gfar_receive(int irq, void *dev_id); 1229irqreturn_t gfar_receive(int irq, void *dev_id);
1184int startup_gfar(struct net_device *dev); 1230int startup_gfar(struct net_device *dev);
1185void stop_gfar(struct net_device *dev); 1231void stop_gfar(struct net_device *dev);
1186void gfar_halt(struct net_device *dev); 1232void reset_gfar(struct net_device *dev);
1233void gfar_mac_reset(struct gfar_private *priv);
1234void gfar_halt(struct gfar_private *priv);
1235void gfar_start(struct gfar_private *priv);
1187void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, int enable, 1236void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, int enable,
1188 u32 regnum, u32 read); 1237 u32 regnum, u32 read);
1189void gfar_configure_coalescing_all(struct gfar_private *priv); 1238void gfar_configure_coalescing_all(struct gfar_private *priv);
1190void gfar_init_sysfs(struct net_device *dev);
1191int gfar_set_features(struct net_device *dev, netdev_features_t features); 1239int gfar_set_features(struct net_device *dev, netdev_features_t features);
1192void gfar_check_rx_parser_mode(struct gfar_private *priv);
1193void gfar_vlan_mode(struct net_device *dev, netdev_features_t features);
1194 1240
1195extern const struct ethtool_ops gfar_ethtool_ops; 1241extern const struct ethtool_ops gfar_ethtool_ops;
1196 1242
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 63d234419cc1..891dbee6e6c1 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -44,10 +44,6 @@
44 44
45#include "gianfar.h" 45#include "gianfar.h"
46 46
47extern void gfar_start(struct net_device *dev);
48extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
49 int rx_work_limit);
50
51#define GFAR_MAX_COAL_USECS 0xffff 47#define GFAR_MAX_COAL_USECS 0xffff
52#define GFAR_MAX_COAL_FRAMES 0xff 48#define GFAR_MAX_COAL_FRAMES 0xff
53static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, 49static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
@@ -364,25 +360,11 @@ static int gfar_scoalesce(struct net_device *dev,
364 struct ethtool_coalesce *cvals) 360 struct ethtool_coalesce *cvals)
365{ 361{
366 struct gfar_private *priv = netdev_priv(dev); 362 struct gfar_private *priv = netdev_priv(dev);
367 int i = 0; 363 int i, err = 0;
368 364
369 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE)) 365 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
370 return -EOPNOTSUPP; 366 return -EOPNOTSUPP;
371 367
372 /* Set up rx coalescing */
373 /* As of now, we will enable/disable coalescing for all
374 * queues together in case of eTSEC2, this will be modified
375 * along with the ethtool interface
376 */
377 if ((cvals->rx_coalesce_usecs == 0) ||
378 (cvals->rx_max_coalesced_frames == 0)) {
379 for (i = 0; i < priv->num_rx_queues; i++)
380 priv->rx_queue[i]->rxcoalescing = 0;
381 } else {
382 for (i = 0; i < priv->num_rx_queues; i++)
383 priv->rx_queue[i]->rxcoalescing = 1;
384 }
385
386 if (NULL == priv->phydev) 368 if (NULL == priv->phydev)
387 return -ENODEV; 369 return -ENODEV;
388 370
@@ -399,6 +381,32 @@ static int gfar_scoalesce(struct net_device *dev,
399 return -EINVAL; 381 return -EINVAL;
400 } 382 }
401 383
384 /* Check the bounds of the values */
385 if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
386 netdev_info(dev, "Coalescing is limited to %d microseconds\n",
387 GFAR_MAX_COAL_USECS);
388 return -EINVAL;
389 }
390
391 if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
392 netdev_info(dev, "Coalescing is limited to %d frames\n",
393 GFAR_MAX_COAL_FRAMES);
394 return -EINVAL;
395 }
396
397 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
398 cpu_relax();
399
400 /* Set up rx coalescing */
401 if ((cvals->rx_coalesce_usecs == 0) ||
402 (cvals->rx_max_coalesced_frames == 0)) {
403 for (i = 0; i < priv->num_rx_queues; i++)
404 priv->rx_queue[i]->rxcoalescing = 0;
405 } else {
406 for (i = 0; i < priv->num_rx_queues; i++)
407 priv->rx_queue[i]->rxcoalescing = 1;
408 }
409
402 for (i = 0; i < priv->num_rx_queues; i++) { 410 for (i = 0; i < priv->num_rx_queues; i++) {
403 priv->rx_queue[i]->rxic = mk_ic_value( 411 priv->rx_queue[i]->rxic = mk_ic_value(
404 cvals->rx_max_coalesced_frames, 412 cvals->rx_max_coalesced_frames,
@@ -415,28 +423,22 @@ static int gfar_scoalesce(struct net_device *dev,
415 priv->tx_queue[i]->txcoalescing = 1; 423 priv->tx_queue[i]->txcoalescing = 1;
416 } 424 }
417 425
418 /* Check the bounds of the values */
419 if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
420 netdev_info(dev, "Coalescing is limited to %d microseconds\n",
421 GFAR_MAX_COAL_USECS);
422 return -EINVAL;
423 }
424
425 if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
426 netdev_info(dev, "Coalescing is limited to %d frames\n",
427 GFAR_MAX_COAL_FRAMES);
428 return -EINVAL;
429 }
430
431 for (i = 0; i < priv->num_tx_queues; i++) { 426 for (i = 0; i < priv->num_tx_queues; i++) {
432 priv->tx_queue[i]->txic = mk_ic_value( 427 priv->tx_queue[i]->txic = mk_ic_value(
433 cvals->tx_max_coalesced_frames, 428 cvals->tx_max_coalesced_frames,
434 gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs)); 429 gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
435 } 430 }
436 431
437 gfar_configure_coalescing_all(priv); 432 if (dev->flags & IFF_UP) {
433 stop_gfar(dev);
434 err = startup_gfar(dev);
435 } else {
436 gfar_mac_reset(priv);
437 }
438
439 clear_bit_unlock(GFAR_RESETTING, &priv->state);
438 440
439 return 0; 441 return err;
440} 442}
441 443
442/* Fills in rvals with the current ring parameters. Currently, 444/* Fills in rvals with the current ring parameters. Currently,
@@ -467,15 +469,13 @@ static void gfar_gringparam(struct net_device *dev,
467} 469}
468 470
469/* Change the current ring parameters, stopping the controller if 471/* Change the current ring parameters, stopping the controller if
470 * necessary so that we don't mess things up while we're in 472 * necessary so that we don't mess things up while we're in motion.
471 * motion. We wait for the ring to be clean before reallocating
472 * the rings.
473 */ 473 */
474static int gfar_sringparam(struct net_device *dev, 474static int gfar_sringparam(struct net_device *dev,
475 struct ethtool_ringparam *rvals) 475 struct ethtool_ringparam *rvals)
476{ 476{
477 struct gfar_private *priv = netdev_priv(dev); 477 struct gfar_private *priv = netdev_priv(dev);
478 int err = 0, i = 0; 478 int err = 0, i;
479 479
480 if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE) 480 if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
481 return -EINVAL; 481 return -EINVAL;
@@ -493,44 +493,25 @@ static int gfar_sringparam(struct net_device *dev,
493 return -EINVAL; 493 return -EINVAL;
494 } 494 }
495 495
496 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
497 cpu_relax();
496 498
497 if (dev->flags & IFF_UP) { 499 if (dev->flags & IFF_UP)
498 unsigned long flags;
499
500 /* Halt TX and RX, and process the frames which
501 * have already been received
502 */
503 local_irq_save(flags);
504 lock_tx_qs(priv);
505 lock_rx_qs(priv);
506
507 gfar_halt(dev);
508
509 unlock_rx_qs(priv);
510 unlock_tx_qs(priv);
511 local_irq_restore(flags);
512
513 for (i = 0; i < priv->num_rx_queues; i++)
514 gfar_clean_rx_ring(priv->rx_queue[i],
515 priv->rx_queue[i]->rx_ring_size);
516
517 /* Now we take down the rings to rebuild them */
518 stop_gfar(dev); 500 stop_gfar(dev);
519 }
520 501
521 /* Change the size */ 502 /* Change the sizes */
522 for (i = 0; i < priv->num_rx_queues; i++) { 503 for (i = 0; i < priv->num_rx_queues; i++)
523 priv->rx_queue[i]->rx_ring_size = rvals->rx_pending; 504 priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
505
506 for (i = 0; i < priv->num_tx_queues; i++)
524 priv->tx_queue[i]->tx_ring_size = rvals->tx_pending; 507 priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
525 priv->tx_queue[i]->num_txbdfree =
526 priv->tx_queue[i]->tx_ring_size;
527 }
528 508
529 /* Rebuild the rings with the new size */ 509 /* Rebuild the rings with the new size */
530 if (dev->flags & IFF_UP) { 510 if (dev->flags & IFF_UP)
531 err = startup_gfar(dev); 511 err = startup_gfar(dev);
532 netif_tx_wake_all_queues(dev); 512
533 } 513 clear_bit_unlock(GFAR_RESETTING, &priv->state);
514
534 return err; 515 return err;
535} 516}
536 517
@@ -608,43 +589,29 @@ static int gfar_spauseparam(struct net_device *dev,
608 589
609int gfar_set_features(struct net_device *dev, netdev_features_t features) 590int gfar_set_features(struct net_device *dev, netdev_features_t features)
610{ 591{
611 struct gfar_private *priv = netdev_priv(dev);
612 unsigned long flags;
613 int err = 0, i = 0;
614 netdev_features_t changed = dev->features ^ features; 592 netdev_features_t changed = dev->features ^ features;
593 struct gfar_private *priv = netdev_priv(dev);
594 int err = 0;
615 595
616 if (changed & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX)) 596 if (!(changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
617 gfar_vlan_mode(dev, features); 597 NETIF_F_RXCSUM)))
618
619 if (!(changed & NETIF_F_RXCSUM))
620 return 0; 598 return 0;
621 599
622 if (dev->flags & IFF_UP) { 600 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
623 /* Halt TX and RX, and process the frames which 601 cpu_relax();
624 * have already been received
625 */
626 local_irq_save(flags);
627 lock_tx_qs(priv);
628 lock_rx_qs(priv);
629
630 gfar_halt(dev);
631 602
632 unlock_tx_qs(priv); 603 dev->features = features;
633 unlock_rx_qs(priv);
634 local_irq_restore(flags);
635
636 for (i = 0; i < priv->num_rx_queues; i++)
637 gfar_clean_rx_ring(priv->rx_queue[i],
638 priv->rx_queue[i]->rx_ring_size);
639 604
605 if (dev->flags & IFF_UP) {
640 /* Now we take down the rings to rebuild them */ 606 /* Now we take down the rings to rebuild them */
641 stop_gfar(dev); 607 stop_gfar(dev);
642
643 dev->features = features;
644
645 err = startup_gfar(dev); 608 err = startup_gfar(dev);
646 netif_tx_wake_all_queues(dev); 609 } else {
610 gfar_mac_reset(priv);
647 } 611 }
612
613 clear_bit_unlock(GFAR_RESETTING, &priv->state);
614
648 return err; 615 return err;
649} 616}
650 617
@@ -1610,9 +1577,6 @@ static int gfar_write_filer_table(struct gfar_private *priv,
1610 if (tab->index > MAX_FILER_IDX - 1) 1577 if (tab->index > MAX_FILER_IDX - 1)
1611 return -EBUSY; 1578 return -EBUSY;
1612 1579
1613 /* Avoid inconsistent filer table to be processed */
1614 lock_rx_qs(priv);
1615
1616 /* Fill regular entries */ 1580 /* Fill regular entries */
1617 for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl); 1581 for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl);
1618 i++) 1582 i++)
@@ -1625,8 +1589,6 @@ static int gfar_write_filer_table(struct gfar_private *priv,
1625 */ 1589 */
1626 gfar_write_filer(priv, i, 0x20, 0x0); 1590 gfar_write_filer(priv, i, 0x20, 0x0);
1627 1591
1628 unlock_rx_qs(priv);
1629
1630 return 0; 1592 return 0;
1631} 1593}
1632 1594
@@ -1831,6 +1793,9 @@ static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1831 struct gfar_private *priv = netdev_priv(dev); 1793 struct gfar_private *priv = netdev_priv(dev);
1832 int ret = 0; 1794 int ret = 0;
1833 1795
1796 if (test_bit(GFAR_RESETTING, &priv->state))
1797 return -EBUSY;
1798
1834 mutex_lock(&priv->rx_queue_access); 1799 mutex_lock(&priv->rx_queue_access);
1835 1800
1836 switch (cmd->cmd) { 1801 switch (cmd->cmd) {
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index abc28da27042..bb568006f37d 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -414,6 +414,7 @@ static struct ptp_clock_info ptp_gianfar_caps = {
414 .n_alarm = 0, 414 .n_alarm = 0,
415 .n_ext_ts = N_EXT_TS, 415 .n_ext_ts = N_EXT_TS,
416 .n_per_out = 0, 416 .n_per_out = 0,
417 .n_pins = 0,
417 .pps = 1, 418 .pps = 1,
418 .adjfreq = ptp_gianfar_adjfreq, 419 .adjfreq = ptp_gianfar_adjfreq,
419 .adjtime = ptp_gianfar_adjtime, 420 .adjtime = ptp_gianfar_adjtime,
diff --git a/drivers/net/ethernet/freescale/gianfar_sysfs.c b/drivers/net/ethernet/freescale/gianfar_sysfs.c
deleted file mode 100644
index e02dd1378751..000000000000
--- a/drivers/net/ethernet/freescale/gianfar_sysfs.c
+++ /dev/null
@@ -1,340 +0,0 @@
1/*
2 * drivers/net/ethernet/freescale/gianfar_sysfs.c
3 *
4 * Gianfar Ethernet Driver
5 * This driver is designed for the non-CPM ethernet controllers
6 * on the 85xx and 83xx family of integrated processors
7 * Based on 8260_io/fcc_enet.c
8 *
9 * Author: Andy Fleming
10 * Maintainer: Kumar Gala (galak@kernel.crashing.org)
11 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
12 *
13 * Copyright 2002-2009 Freescale Semiconductor, Inc.
14 *
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
19 *
20 * Sysfs file creation and management
21 */
22
23#include <linux/kernel.h>
24#include <linux/string.h>
25#include <linux/errno.h>
26#include <linux/unistd.h>
27#include <linux/delay.h>
28#include <linux/etherdevice.h>
29#include <linux/spinlock.h>
30#include <linux/mm.h>
31#include <linux/device.h>
32
33#include <asm/uaccess.h>
34#include <linux/module.h>
35
36#include "gianfar.h"
37
38static ssize_t gfar_show_bd_stash(struct device *dev,
39 struct device_attribute *attr, char *buf)
40{
41 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
42
43 return sprintf(buf, "%s\n", priv->bd_stash_en ? "on" : "off");
44}
45
46static ssize_t gfar_set_bd_stash(struct device *dev,
47 struct device_attribute *attr,
48 const char *buf, size_t count)
49{
50 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
51 struct gfar __iomem *regs = priv->gfargrp[0].regs;
52 int new_setting = 0;
53 u32 temp;
54 unsigned long flags;
55
56 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BD_STASHING))
57 return count;
58
59
60 /* Find out the new setting */
61 if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1))
62 new_setting = 1;
63 else if (!strncmp("off", buf, count - 1) ||
64 !strncmp("0", buf, count - 1))
65 new_setting = 0;
66 else
67 return count;
68
69
70 local_irq_save(flags);
71 lock_rx_qs(priv);
72
73 /* Set the new stashing value */
74 priv->bd_stash_en = new_setting;
75
76 temp = gfar_read(&regs->attr);
77
78 if (new_setting)
79 temp |= ATTR_BDSTASH;
80 else
81 temp &= ~(ATTR_BDSTASH);
82
83 gfar_write(&regs->attr, temp);
84
85 unlock_rx_qs(priv);
86 local_irq_restore(flags);
87
88 return count;
89}
90
91static DEVICE_ATTR(bd_stash, 0644, gfar_show_bd_stash, gfar_set_bd_stash);
92
93static ssize_t gfar_show_rx_stash_size(struct device *dev,
94 struct device_attribute *attr, char *buf)
95{
96 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
97
98 return sprintf(buf, "%d\n", priv->rx_stash_size);
99}
100
101static ssize_t gfar_set_rx_stash_size(struct device *dev,
102 struct device_attribute *attr,
103 const char *buf, size_t count)
104{
105 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
106 struct gfar __iomem *regs = priv->gfargrp[0].regs;
107 unsigned int length = simple_strtoul(buf, NULL, 0);
108 u32 temp;
109 unsigned long flags;
110
111 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
112 return count;
113
114 local_irq_save(flags);
115 lock_rx_qs(priv);
116
117 if (length > priv->rx_buffer_size)
118 goto out;
119
120 if (length == priv->rx_stash_size)
121 goto out;
122
123 priv->rx_stash_size = length;
124
125 temp = gfar_read(&regs->attreli);
126 temp &= ~ATTRELI_EL_MASK;
127 temp |= ATTRELI_EL(length);
128 gfar_write(&regs->attreli, temp);
129
130 /* Turn stashing on/off as appropriate */
131 temp = gfar_read(&regs->attr);
132
133 if (length)
134 temp |= ATTR_BUFSTASH;
135 else
136 temp &= ~(ATTR_BUFSTASH);
137
138 gfar_write(&regs->attr, temp);
139
140out:
141 unlock_rx_qs(priv);
142 local_irq_restore(flags);
143
144 return count;
145}
146
147static DEVICE_ATTR(rx_stash_size, 0644, gfar_show_rx_stash_size,
148 gfar_set_rx_stash_size);
149
150/* Stashing will only be enabled when rx_stash_size != 0 */
151static ssize_t gfar_show_rx_stash_index(struct device *dev,
152 struct device_attribute *attr,
153 char *buf)
154{
155 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
156
157 return sprintf(buf, "%d\n", priv->rx_stash_index);
158}
159
160static ssize_t gfar_set_rx_stash_index(struct device *dev,
161 struct device_attribute *attr,
162 const char *buf, size_t count)
163{
164 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
165 struct gfar __iomem *regs = priv->gfargrp[0].regs;
166 unsigned short index = simple_strtoul(buf, NULL, 0);
167 u32 temp;
168 unsigned long flags;
169
170 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
171 return count;
172
173 local_irq_save(flags);
174 lock_rx_qs(priv);
175
176 if (index > priv->rx_stash_size)
177 goto out;
178
179 if (index == priv->rx_stash_index)
180 goto out;
181
182 priv->rx_stash_index = index;
183
184 temp = gfar_read(&regs->attreli);
185 temp &= ~ATTRELI_EI_MASK;
186 temp |= ATTRELI_EI(index);
187 gfar_write(&regs->attreli, temp);
188
189out:
190 unlock_rx_qs(priv);
191 local_irq_restore(flags);
192
193 return count;
194}
195
196static DEVICE_ATTR(rx_stash_index, 0644, gfar_show_rx_stash_index,
197 gfar_set_rx_stash_index);
198
199static ssize_t gfar_show_fifo_threshold(struct device *dev,
200 struct device_attribute *attr,
201 char *buf)
202{
203 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
204
205 return sprintf(buf, "%d\n", priv->fifo_threshold);
206}
207
208static ssize_t gfar_set_fifo_threshold(struct device *dev,
209 struct device_attribute *attr,
210 const char *buf, size_t count)
211{
212 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
213 struct gfar __iomem *regs = priv->gfargrp[0].regs;
214 unsigned int length = simple_strtoul(buf, NULL, 0);
215 u32 temp;
216 unsigned long flags;
217
218 if (length > GFAR_MAX_FIFO_THRESHOLD)
219 return count;
220
221 local_irq_save(flags);
222 lock_tx_qs(priv);
223
224 priv->fifo_threshold = length;
225
226 temp = gfar_read(&regs->fifo_tx_thr);
227 temp &= ~FIFO_TX_THR_MASK;
228 temp |= length;
229 gfar_write(&regs->fifo_tx_thr, temp);
230
231 unlock_tx_qs(priv);
232 local_irq_restore(flags);
233
234 return count;
235}
236
237static DEVICE_ATTR(fifo_threshold, 0644, gfar_show_fifo_threshold,
238 gfar_set_fifo_threshold);
239
240static ssize_t gfar_show_fifo_starve(struct device *dev,
241 struct device_attribute *attr, char *buf)
242{
243 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
244
245 return sprintf(buf, "%d\n", priv->fifo_starve);
246}
247
248static ssize_t gfar_set_fifo_starve(struct device *dev,
249 struct device_attribute *attr,
250 const char *buf, size_t count)
251{
252 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
253 struct gfar __iomem *regs = priv->gfargrp[0].regs;
254 unsigned int num = simple_strtoul(buf, NULL, 0);
255 u32 temp;
256 unsigned long flags;
257
258 if (num > GFAR_MAX_FIFO_STARVE)
259 return count;
260
261 local_irq_save(flags);
262 lock_tx_qs(priv);
263
264 priv->fifo_starve = num;
265
266 temp = gfar_read(&regs->fifo_tx_starve);
267 temp &= ~FIFO_TX_STARVE_MASK;
268 temp |= num;
269 gfar_write(&regs->fifo_tx_starve, temp);
270
271 unlock_tx_qs(priv);
272 local_irq_restore(flags);
273
274 return count;
275}
276
277static DEVICE_ATTR(fifo_starve, 0644, gfar_show_fifo_starve,
278 gfar_set_fifo_starve);
279
280static ssize_t gfar_show_fifo_starve_off(struct device *dev,
281 struct device_attribute *attr,
282 char *buf)
283{
284 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
285
286 return sprintf(buf, "%d\n", priv->fifo_starve_off);
287}
288
289static ssize_t gfar_set_fifo_starve_off(struct device *dev,
290 struct device_attribute *attr,
291 const char *buf, size_t count)
292{
293 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
294 struct gfar __iomem *regs = priv->gfargrp[0].regs;
295 unsigned int num = simple_strtoul(buf, NULL, 0);
296 u32 temp;
297 unsigned long flags;
298
299 if (num > GFAR_MAX_FIFO_STARVE_OFF)
300 return count;
301
302 local_irq_save(flags);
303 lock_tx_qs(priv);
304
305 priv->fifo_starve_off = num;
306
307 temp = gfar_read(&regs->fifo_tx_starve_shutoff);
308 temp &= ~FIFO_TX_STARVE_OFF_MASK;
309 temp |= num;
310 gfar_write(&regs->fifo_tx_starve_shutoff, temp);
311
312 unlock_tx_qs(priv);
313 local_irq_restore(flags);
314
315 return count;
316}
317
318static DEVICE_ATTR(fifo_starve_off, 0644, gfar_show_fifo_starve_off,
319 gfar_set_fifo_starve_off);
320
321void gfar_init_sysfs(struct net_device *dev)
322{
323 struct gfar_private *priv = netdev_priv(dev);
324 int rc;
325
326 /* Initialize the default values */
327 priv->fifo_threshold = DEFAULT_FIFO_TX_THR;
328 priv->fifo_starve = DEFAULT_FIFO_TX_STARVE;
329 priv->fifo_starve_off = DEFAULT_FIFO_TX_STARVE_OFF;
330
331 /* Create our sysfs files */
332 rc = device_create_file(&dev->dev, &dev_attr_bd_stash);
333 rc |= device_create_file(&dev->dev, &dev_attr_rx_stash_size);
334 rc |= device_create_file(&dev->dev, &dev_attr_rx_stash_index);
335 rc |= device_create_file(&dev->dev, &dev_attr_fifo_threshold);
336 rc |= device_create_file(&dev->dev, &dev_attr_fifo_starve);
337 rc |= device_create_file(&dev->dev, &dev_attr_fifo_starve_off);
338 if (rc)
339 dev_err(&dev->dev, "Error creating gianfar sysfs files\n");
340}
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 72291a8904a9..c8299c31b21f 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3261,7 +3261,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
3261 3261
3262 dev->stats.tx_packets++; 3262 dev->stats.tx_packets++;
3263 3263
3264 dev_kfree_skb(skb); 3264 dev_consume_skb_any(skb);
3265 3265
3266 ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL; 3266 ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
3267 ugeth->skb_dirtytx[txQ] = 3267 ugeth->skb_dirtytx[txQ] =
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index 17fca323c143..c984998b34a0 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -993,7 +993,7 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
993 dev->name)); 993 dev->name));
994 dev->stats.tx_dropped++; 994 dev->stats.tx_dropped++;
995 995
996 dev_kfree_skb(skb); 996 dev_kfree_skb_any(skb);
997 } else { 997 } else {
998 if (++lp->next_tx_cmd == TX_RING_SIZE) 998 if (++lp->next_tx_cmd == TX_RING_SIZE)
999 lp->next_tx_cmd = 0; 999 lp->next_tx_cmd = 0;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 7628e0fd8455..538903bf13bc 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -490,7 +490,7 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr,
490 skb_arr[index] = skb; 490 skb_arr[index] = skb;
491 tmp_addr = ehea_map_vaddr(skb->data); 491 tmp_addr = ehea_map_vaddr(skb->data);
492 if (tmp_addr == -1) { 492 if (tmp_addr == -1) {
493 dev_kfree_skb(skb); 493 dev_consume_skb_any(skb);
494 q_skba->os_skbs = fill_wqes - i; 494 q_skba->os_skbs = fill_wqes - i;
495 ret = 0; 495 ret = 0;
496 break; 496 break;
@@ -856,7 +856,7 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
856 856
857 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id); 857 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
858 skb = pr->sq_skba.arr[index]; 858 skb = pr->sq_skba.arr[index];
859 dev_kfree_skb(skb); 859 dev_consume_skb_any(skb);
860 pr->sq_skba.arr[index] = NULL; 860 pr->sq_skba.arr[index] = NULL;
861 } 861 }
862 862
@@ -2044,7 +2044,7 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
2044 skb_copy_bits(skb, 0, imm_data, skb->len); 2044 skb_copy_bits(skb, 0, imm_data, skb->len);
2045 2045
2046 swqe->immediate_data_length = skb->len; 2046 swqe->immediate_data_length = skb->len;
2047 dev_kfree_skb(skb); 2047 dev_consume_skb_any(skb);
2048} 2048}
2049 2049
2050static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) 2050static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 1fc8334fc181..c9127562bd22 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1044,7 +1044,7 @@ retry_bounce:
1044 DMA_TO_DEVICE); 1044 DMA_TO_DEVICE);
1045 1045
1046out: 1046out:
1047 dev_kfree_skb(skb); 1047 dev_consume_skb_any(skb);
1048 return NETDEV_TX_OK; 1048 return NETDEV_TX_OK;
1049 1049
1050map_failed_frags: 1050map_failed_frags:
@@ -1072,7 +1072,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
1072 unsigned long lpar_rc; 1072 unsigned long lpar_rc;
1073 1073
1074restart_poll: 1074restart_poll:
1075 do { 1075 while (frames_processed < budget) {
1076 if (!ibmveth_rxq_pending_buffer(adapter)) 1076 if (!ibmveth_rxq_pending_buffer(adapter))
1077 break; 1077 break;
1078 1078
@@ -1121,7 +1121,7 @@ restart_poll:
1121 netdev->stats.rx_bytes += length; 1121 netdev->stats.rx_bytes += length;
1122 frames_processed++; 1122 frames_processed++;
1123 } 1123 }
1124 } while (frames_processed < budget); 1124 }
1125 1125
1126 ibmveth_replenish_task(adapter); 1126 ibmveth_replenish_task(adapter);
1127 1127
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index bf7a01ef9a57..b56461ce674c 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1778,9 +1778,9 @@ static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
1778 * testing, ie sending frames with bad CRC. 1778 * testing, ie sending frames with bad CRC.
1779 */ 1779 */
1780 if (unlikely(skb->no_fcs)) 1780 if (unlikely(skb->no_fcs))
1781 cb->command |= __constant_cpu_to_le16(cb_tx_nc); 1781 cb->command |= cpu_to_le16(cb_tx_nc);
1782 else 1782 else
1783 cb->command &= ~__constant_cpu_to_le16(cb_tx_nc); 1783 cb->command &= ~cpu_to_le16(cb_tx_nc);
1784 1784
1785 /* interrupt every 16 packets regardless of delay */ 1785 /* interrupt every 16 packets regardless of delay */
1786 if ((nic->cbs_avail & ~15) == nic->cbs_avail) 1786 if ((nic->cbs_avail & ~15) == nic->cbs_avail)
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index ff2d806eaef7..a5f6b11d6992 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29/* 80003ES2LAN Gigabit Ethernet Controller (Copper) 22/* 80003ES2LAN Gigabit Ethernet Controller (Copper)
30 * 80003ES2LAN Gigabit Ethernet Controller (Serdes) 23 * 80003ES2LAN Gigabit Ethernet Controller (Serdes)
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.h b/drivers/net/ethernet/intel/e1000e/80003es2lan.h
index 90d363b2d280..535a9430976d 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.h
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.h
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29#ifndef _E1000E_80003ES2LAN_H_ 22#ifndef _E1000E_80003ES2LAN_H_
30#define _E1000E_80003ES2LAN_H_ 23#define _E1000E_80003ES2LAN_H_
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 8fed74e3fa53..e0aa7f1efb08 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29/* 82571EB Gigabit Ethernet Controller 22/* 82571EB Gigabit Ethernet Controller
30 * 82571EB Gigabit Ethernet Controller (Copper) 23 * 82571EB Gigabit Ethernet Controller (Copper)
diff --git a/drivers/net/ethernet/intel/e1000e/82571.h b/drivers/net/ethernet/intel/e1000e/82571.h
index 08e24dc3dc0e..2e758f796d60 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.h
+++ b/drivers/net/ethernet/intel/e1000e/82571.h
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29#ifndef _E1000E_82571_H_ 22#ifndef _E1000E_82571_H_
30#define _E1000E_82571_H_ 23#define _E1000E_82571_H_
diff --git a/drivers/net/ethernet/intel/e1000e/Makefile b/drivers/net/ethernet/intel/e1000e/Makefile
index c2dcfcc10857..106de493373c 100644
--- a/drivers/net/ethernet/intel/e1000e/Makefile
+++ b/drivers/net/ethernet/intel/e1000e/Makefile
@@ -1,7 +1,7 @@
1################################################################################ 1################################################################################
2# 2#
3# Intel PRO/1000 Linux driver 3# Intel PRO/1000 Linux driver
4# Copyright(c) 1999 - 2013 Intel Corporation. 4# Copyright(c) 1999 - 2014 Intel Corporation.
5# 5#
6# This program is free software; you can redistribute it and/or modify it 6# This program is free software; you can redistribute it and/or modify it
7# under the terms and conditions of the GNU General Public License, 7# under the terms and conditions of the GNU General Public License,
@@ -12,9 +12,8 @@
12# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13# more details. 13# more details.
14# 14#
15# You should have received a copy of the GNU General Public License along with 15# You should have received a copy of the GNU General Public License
16# this program; if not, write to the Free Software Foundation, Inc., 16# along with this program; if not, see <http://www.gnu.org/licenses/>.
17# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18# 17#
19# The full GNU General Public License is included in this distribution in 18# The full GNU General Public License is included in this distribution in
20# the file called "COPYING". 19# the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 351c94a0cf74..d18e89212575 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29#ifndef _E1000_DEFINES_H_ 22#ifndef _E1000_DEFINES_H_
30#define _E1000_DEFINES_H_ 23#define _E1000_DEFINES_H_
@@ -35,9 +28,11 @@
35 28
36/* Definitions for power management and wakeup registers */ 29/* Definitions for power management and wakeup registers */
37/* Wake Up Control */ 30/* Wake Up Control */
38#define E1000_WUC_APME 0x00000001 /* APM Enable */ 31#define E1000_WUC_APME 0x00000001 /* APM Enable */
39#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ 32#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */
40#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */ 33#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */
34#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */
35#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */
41 36
42/* Wake Up Filter Control */ 37/* Wake Up Filter Control */
43#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ 38#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 0150f7fc893d..5325e3e2154e 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29/* Linux PRO/1000 Ethernet Driver main header file */ 22/* Linux PRO/1000 Ethernet Driver main header file */
30 23
@@ -333,7 +326,6 @@ struct e1000_adapter {
333 struct work_struct update_phy_task; 326 struct work_struct update_phy_task;
334 struct work_struct print_hang_task; 327 struct work_struct print_hang_task;
335 328
336 bool idle_check;
337 int phy_hang_count; 329 int phy_hang_count;
338 330
339 u16 tx_ring_count; 331 u16 tx_ring_count;
@@ -476,7 +468,7 @@ void e1000e_check_options(struct e1000_adapter *adapter);
476void e1000e_set_ethtool_ops(struct net_device *netdev); 468void e1000e_set_ethtool_ops(struct net_device *netdev);
477 469
478int e1000e_up(struct e1000_adapter *adapter); 470int e1000e_up(struct e1000_adapter *adapter);
479void e1000e_down(struct e1000_adapter *adapter); 471void e1000e_down(struct e1000_adapter *adapter, bool reset);
480void e1000e_reinit_locked(struct e1000_adapter *adapter); 472void e1000e_reinit_locked(struct e1000_adapter *adapter);
481void e1000e_reset(struct e1000_adapter *adapter); 473void e1000e_reset(struct e1000_adapter *adapter);
482void e1000e_power_up_phy(struct e1000_adapter *adapter); 474void e1000e_power_up_phy(struct e1000_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index d14c8f53384c..3c2898d0c2aa 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29/* ethtool support for e1000 */ 22/* ethtool support for e1000 */
30 23
@@ -332,7 +325,7 @@ static int e1000_set_settings(struct net_device *netdev,
332 325
333 /* reset the link */ 326 /* reset the link */
334 if (netif_running(adapter->netdev)) { 327 if (netif_running(adapter->netdev)) {
335 e1000e_down(adapter); 328 e1000e_down(adapter, true);
336 e1000e_up(adapter); 329 e1000e_up(adapter);
337 } else { 330 } else {
338 e1000e_reset(adapter); 331 e1000e_reset(adapter);
@@ -380,7 +373,7 @@ static int e1000_set_pauseparam(struct net_device *netdev,
380 if (adapter->fc_autoneg == AUTONEG_ENABLE) { 373 if (adapter->fc_autoneg == AUTONEG_ENABLE) {
381 hw->fc.requested_mode = e1000_fc_default; 374 hw->fc.requested_mode = e1000_fc_default;
382 if (netif_running(adapter->netdev)) { 375 if (netif_running(adapter->netdev)) {
383 e1000e_down(adapter); 376 e1000e_down(adapter, true);
384 e1000e_up(adapter); 377 e1000e_up(adapter);
385 } else { 378 } else {
386 e1000e_reset(adapter); 379 e1000e_reset(adapter);
@@ -726,7 +719,7 @@ static int e1000_set_ringparam(struct net_device *netdev,
726 719
727 pm_runtime_get_sync(netdev->dev.parent); 720 pm_runtime_get_sync(netdev->dev.parent);
728 721
729 e1000e_down(adapter); 722 e1000e_down(adapter, true);
730 723
731 /* We can't just free everything and then setup again, because the 724 /* We can't just free everything and then setup again, because the
732 * ISRs in MSI-X mode get passed pointers to the Tx and Rx ring 725 * ISRs in MSI-X mode get passed pointers to the Tx and Rx ring
@@ -924,15 +917,21 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
924 } 917 }
925 if (mac->type == e1000_pch2lan) { 918 if (mac->type == e1000_pch2lan) {
926 /* SHRAH[0,1,2] different than previous */ 919 /* SHRAH[0,1,2] different than previous */
927 if (i == 7) 920 if (i == 1)
928 mask &= 0xFFF4FFFF; 921 mask &= 0xFFF4FFFF;
929 /* SHRAH[3] different than SHRAH[0,1,2] */ 922 /* SHRAH[3] different than SHRAH[0,1,2] */
930 if (i == 10) 923 if (i == 4)
931 mask |= (1 << 30); 924 mask |= (1 << 30);
925 /* RAR[1-6] owned by management engine - skipping */
926 if (i > 0)
927 i += 6;
932 } 928 }
933 929
934 REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), mask, 930 REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), mask,
935 0xFFFFFFFF); 931 0xFFFFFFFF);
932 /* reset index to actual value */
933 if ((mac->type == e1000_pch2lan) && (i > 6))
934 i -= 6;
936 } 935 }
937 936
938 for (i = 0; i < mac->mta_reg_count; i++) 937 for (i = 0; i < mac->mta_reg_count; i++)
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index b7f38435d1fd..6b3de5f39a97 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29#ifndef _E1000_HW_H_ 22#ifndef _E1000_HW_H_
30#define _E1000_HW_H_ 23#define _E1000_HW_H_
@@ -655,12 +648,20 @@ struct e1000_shadow_ram {
655 648
656#define E1000_ICH8_SHADOW_RAM_WORDS 2048 649#define E1000_ICH8_SHADOW_RAM_WORDS 2048
657 650
651/* I218 PHY Ultra Low Power (ULP) states */
652enum e1000_ulp_state {
653 e1000_ulp_state_unknown,
654 e1000_ulp_state_off,
655 e1000_ulp_state_on,
656};
657
658struct e1000_dev_spec_ich8lan { 658struct e1000_dev_spec_ich8lan {
659 bool kmrn_lock_loss_workaround_enabled; 659 bool kmrn_lock_loss_workaround_enabled;
660 struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS]; 660 struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS];
661 bool nvm_k1_enabled; 661 bool nvm_k1_enabled;
662 bool eee_disable; 662 bool eee_disable;
663 u16 eee_lp_ability; 663 u16 eee_lp_ability;
664 enum e1000_ulp_state ulp_state;
664}; 665};
665 666
666struct e1000_hw { 667struct e1000_hw {
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 42f0f6717511..9866f264f55e 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29/* 82562G 10/100 Network Connection 22/* 82562G 10/100 Network Connection
30 * 82562G-2 10/100 Network Connection 23 * 82562G-2 10/100 Network Connection
@@ -53,6 +46,14 @@
53 * 82578DC Gigabit Network Connection 46 * 82578DC Gigabit Network Connection
54 * 82579LM Gigabit Network Connection 47 * 82579LM Gigabit Network Connection
55 * 82579V Gigabit Network Connection 48 * 82579V Gigabit Network Connection
49 * Ethernet Connection I217-LM
50 * Ethernet Connection I217-V
51 * Ethernet Connection I218-V
52 * Ethernet Connection I218-LM
53 * Ethernet Connection (2) I218-LM
54 * Ethernet Connection (2) I218-V
55 * Ethernet Connection (3) I218-LM
56 * Ethernet Connection (3) I218-V
56 */ 57 */
57 58
58#include "e1000.h" 59#include "e1000.h"
@@ -142,7 +143,9 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
142static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index); 143static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
143static s32 e1000_k1_workaround_lv(struct e1000_hw *hw); 144static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
144static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate); 145static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
146static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force);
145static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw); 147static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
148static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state);
146 149
147static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) 150static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
148{ 151{
@@ -239,6 +242,47 @@ out:
239} 242}
240 243
241/** 244/**
245 * e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
246 * @hw: pointer to the HW structure
247 *
248 * Toggling the LANPHYPC pin value fully power-cycles the PHY and is
249 * used to reset the PHY to a quiescent state when necessary.
250 **/
251static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
252{
253 u32 mac_reg;
254
255 /* Set Phy Config Counter to 50msec */
256 mac_reg = er32(FEXTNVM3);
257 mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
258 mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
259 ew32(FEXTNVM3, mac_reg);
260
261 /* Toggle LANPHYPC Value bit */
262 mac_reg = er32(CTRL);
263 mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
264 mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
265 ew32(CTRL, mac_reg);
266 e1e_flush();
267 usleep_range(10, 20);
268 mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
269 ew32(CTRL, mac_reg);
270 e1e_flush();
271
272 if (hw->mac.type < e1000_pch_lpt) {
273 msleep(50);
274 } else {
275 u16 count = 20;
276
277 do {
278 usleep_range(5000, 10000);
279 } while (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LPCD) && count--);
280
281 msleep(30);
282 }
283}
284
285/**
242 * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds 286 * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
243 * @hw: pointer to the HW structure 287 * @hw: pointer to the HW structure
244 * 288 *
@@ -247,6 +291,7 @@ out:
247 **/ 291 **/
248static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) 292static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
249{ 293{
294 struct e1000_adapter *adapter = hw->adapter;
250 u32 mac_reg, fwsm = er32(FWSM); 295 u32 mac_reg, fwsm = er32(FWSM);
251 s32 ret_val; 296 s32 ret_val;
252 297
@@ -255,6 +300,12 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
255 */ 300 */
256 e1000_gate_hw_phy_config_ich8lan(hw, true); 301 e1000_gate_hw_phy_config_ich8lan(hw, true);
257 302
303 /* It is not possible to be certain of the current state of ULP
304 * so forcibly disable it.
305 */
306 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
307 e1000_disable_ulp_lpt_lp(hw, true);
308
258 ret_val = hw->phy.ops.acquire(hw); 309 ret_val = hw->phy.ops.acquire(hw);
259 if (ret_val) { 310 if (ret_val) {
260 e_dbg("Failed to initialize PHY flow\n"); 311 e_dbg("Failed to initialize PHY flow\n");
@@ -300,33 +351,9 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
300 break; 351 break;
301 } 352 }
302 353
303 e_dbg("Toggling LANPHYPC\n");
304
305 /* Set Phy Config Counter to 50msec */
306 mac_reg = er32(FEXTNVM3);
307 mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
308 mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
309 ew32(FEXTNVM3, mac_reg);
310
311 /* Toggle LANPHYPC Value bit */ 354 /* Toggle LANPHYPC Value bit */
312 mac_reg = er32(CTRL); 355 e1000_toggle_lanphypc_pch_lpt(hw);
313 mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE; 356 if (hw->mac.type >= e1000_pch_lpt) {
314 mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
315 ew32(CTRL, mac_reg);
316 e1e_flush();
317 usleep_range(10, 20);
318 mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
319 ew32(CTRL, mac_reg);
320 e1e_flush();
321 if (hw->mac.type < e1000_pch_lpt) {
322 msleep(50);
323 } else {
324 u16 count = 20;
325 do {
326 usleep_range(5000, 10000);
327 } while (!(er32(CTRL_EXT) &
328 E1000_CTRL_EXT_LPCD) && count--);
329 usleep_range(30000, 60000);
330 if (e1000_phy_is_accessible_pchlan(hw)) 357 if (e1000_phy_is_accessible_pchlan(hw))
331 break; 358 break;
332 359
@@ -349,12 +376,31 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
349 376
350 hw->phy.ops.release(hw); 377 hw->phy.ops.release(hw);
351 if (!ret_val) { 378 if (!ret_val) {
379
380 /* Check to see if able to reset PHY. Print error if not */
381 if (hw->phy.ops.check_reset_block(hw)) {
382 e_err("Reset blocked by ME\n");
383 goto out;
384 }
385
352 /* Reset the PHY before any access to it. Doing so, ensures 386 /* Reset the PHY before any access to it. Doing so, ensures
353 * that the PHY is in a known good state before we read/write 387 * that the PHY is in a known good state before we read/write
354 * PHY registers. The generic reset is sufficient here, 388 * PHY registers. The generic reset is sufficient here,
355 * because we haven't determined the PHY type yet. 389 * because we haven't determined the PHY type yet.
356 */ 390 */
357 ret_val = e1000e_phy_hw_reset_generic(hw); 391 ret_val = e1000e_phy_hw_reset_generic(hw);
392 if (ret_val)
393 goto out;
394
395 /* On a successful reset, possibly need to wait for the PHY
396 * to quiesce to an accessible state before returning control
397 * to the calling function. If the PHY does not quiesce, then
398 * return E1000E_BLK_PHY_RESET, as this is the condition that
399 * the PHY is in.
400 */
401 ret_val = hw->phy.ops.check_reset_block(hw);
402 if (ret_val)
403 e_err("ME blocked access to PHY after reset\n");
358 } 404 }
359 405
360out: 406out:
@@ -724,8 +770,14 @@ s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
724 * Enable/disable EEE based on setting in dev_spec structure, the duplex of 770 * Enable/disable EEE based on setting in dev_spec structure, the duplex of
725 * the link and the EEE capabilities of the link partner. The LPI Control 771 * the link and the EEE capabilities of the link partner. The LPI Control
726 * register bits will remain set only if/when link is up. 772 * register bits will remain set only if/when link is up.
773 *
774 * EEE LPI must not be asserted earlier than one second after link is up.
775 * On 82579, EEE LPI should not be enabled until such time otherwise there
776 * can be link issues with some switches. Other devices can have EEE LPI
777 * enabled immediately upon link up since they have a timer in hardware which
778 * prevents LPI from being asserted too early.
727 **/ 779 **/
728static s32 e1000_set_eee_pchlan(struct e1000_hw *hw) 780s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
729{ 781{
730 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 782 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
731 s32 ret_val; 783 s32 ret_val;
@@ -979,6 +1031,253 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
979} 1031}
980 1032
981/** 1033/**
1034 * e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1035 * @hw: pointer to the HW structure
1036 * @to_sx: boolean indicating a system power state transition to Sx
1037 *
1038 * When link is down, configure ULP mode to significantly reduce the power
1039 * to the PHY. If on a Manageability Engine (ME) enabled system, tell the
1040 * ME firmware to start the ULP configuration. If not on an ME enabled
1041 * system, configure the ULP mode by software.
1042 */
1043s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1044{
1045 u32 mac_reg;
1046 s32 ret_val = 0;
1047 u16 phy_reg;
1048
1049 if ((hw->mac.type < e1000_pch_lpt) ||
1050 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1051 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) ||
1052 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) ||
1053 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) ||
1054 (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1055 return 0;
1056
1057 if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
1058 /* Request ME configure ULP mode in the PHY */
1059 mac_reg = er32(H2ME);
1060 mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1061 ew32(H2ME, mac_reg);
1062
1063 goto out;
1064 }
1065
1066 if (!to_sx) {
1067 int i = 0;
1068
1069 /* Poll up to 5 seconds for Cable Disconnected indication */
1070 while (!(er32(FEXT) & E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1071 /* Bail if link is re-acquired */
1072 if (er32(STATUS) & E1000_STATUS_LU)
1073 return -E1000_ERR_PHY;
1074
1075 if (i++ == 100)
1076 break;
1077
1078 msleep(50);
1079 }
1080 e_dbg("CABLE_DISCONNECTED %s set after %dmsec\n",
1081 (er32(FEXT) &
1082 E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not", i * 50);
1083 }
1084
1085 ret_val = hw->phy.ops.acquire(hw);
1086 if (ret_val)
1087 goto out;
1088
1089 /* Force SMBus mode in PHY */
1090 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1091 if (ret_val)
1092 goto release;
1093 phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1094 e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1095
1096 /* Force SMBus mode in MAC */
1097 mac_reg = er32(CTRL_EXT);
1098 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1099 ew32(CTRL_EXT, mac_reg);
1100
1101 /* Set Inband ULP Exit, Reset to SMBus mode and
1102 * Disable SMBus Release on PERST# in PHY
1103 */
1104 ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1105 if (ret_val)
1106 goto release;
1107 phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1108 I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1109 if (to_sx) {
1110 if (er32(WUFC) & E1000_WUFC_LNKC)
1111 phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1112
1113 phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1114 } else {
1115 phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1116 }
1117 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1118
1119 /* Set Disable SMBus Release on PERST# in MAC */
1120 mac_reg = er32(FEXTNVM7);
1121 mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1122 ew32(FEXTNVM7, mac_reg);
1123
1124 /* Commit ULP changes in PHY by starting auto ULP configuration */
1125 phy_reg |= I218_ULP_CONFIG1_START;
1126 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1127release:
1128 hw->phy.ops.release(hw);
1129out:
1130 if (ret_val)
1131 e_dbg("Error in ULP enable flow: %d\n", ret_val);
1132 else
1133 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1134
1135 return ret_val;
1136}
1137
1138/**
1139 * e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1140 * @hw: pointer to the HW structure
1141 * @force: boolean indicating whether or not to force disabling ULP
1142 *
1143 * Un-configure ULP mode when link is up, the system is transitioned from
1144 * Sx or the driver is unloaded. If on a Manageability Engine (ME) enabled
1145 * system, poll for an indication from ME that ULP has been un-configured.
1146 * If not on an ME enabled system, un-configure the ULP mode by software.
1147 *
1148 * During nominal operation, this function is called when link is acquired
1149 * to disable ULP mode (force=false); otherwise, for example when unloading
1150 * the driver or during Sx->S0 transitions, this is called with force=true
1151 * to forcibly disable ULP.
1152 */
1153static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1154{
1155 s32 ret_val = 0;
1156 u32 mac_reg;
1157 u16 phy_reg;
1158 int i = 0;
1159
1160 if ((hw->mac.type < e1000_pch_lpt) ||
1161 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1162 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) ||
1163 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) ||
1164 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) ||
1165 (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1166 return 0;
1167
1168 if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
1169 if (force) {
1170 /* Request ME un-configure ULP mode in the PHY */
1171 mac_reg = er32(H2ME);
1172 mac_reg &= ~E1000_H2ME_ULP;
1173 mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1174 ew32(H2ME, mac_reg);
1175 }
1176
1177 /* Poll up to 100msec for ME to clear ULP_CFG_DONE */
1178 while (er32(FWSM) & E1000_FWSM_ULP_CFG_DONE) {
1179 if (i++ == 10) {
1180 ret_val = -E1000_ERR_PHY;
1181 goto out;
1182 }
1183
1184 usleep_range(10000, 20000);
1185 }
1186 e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1187
1188 if (force) {
1189 mac_reg = er32(H2ME);
1190 mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1191 ew32(H2ME, mac_reg);
1192 } else {
1193 /* Clear H2ME.ULP after ME ULP configuration */
1194 mac_reg = er32(H2ME);
1195 mac_reg &= ~E1000_H2ME_ULP;
1196 ew32(H2ME, mac_reg);
1197 }
1198
1199 goto out;
1200 }
1201
1202 ret_val = hw->phy.ops.acquire(hw);
1203 if (ret_val)
1204 goto out;
1205
1206 if (force)
1207 /* Toggle LANPHYPC Value bit */
1208 e1000_toggle_lanphypc_pch_lpt(hw);
1209
1210 /* Unforce SMBus mode in PHY */
1211 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1212 if (ret_val) {
1213 /* The MAC might be in PCIe mode, so temporarily force to
1214 * SMBus mode in order to access the PHY.
1215 */
1216 mac_reg = er32(CTRL_EXT);
1217 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1218 ew32(CTRL_EXT, mac_reg);
1219
1220 msleep(50);
1221
1222 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1223 &phy_reg);
1224 if (ret_val)
1225 goto release;
1226 }
1227 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1228 e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1229
1230 /* Unforce SMBus mode in MAC */
1231 mac_reg = er32(CTRL_EXT);
1232 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1233 ew32(CTRL_EXT, mac_reg);
1234
1235 /* When ULP mode was previously entered, K1 was disabled by the
1236 * hardware. Re-Enable K1 in the PHY when exiting ULP.
1237 */
1238 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1239 if (ret_val)
1240 goto release;
1241 phy_reg |= HV_PM_CTRL_K1_ENABLE;
1242 e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1243
1244 /* Clear ULP enabled configuration */
1245 ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1246 if (ret_val)
1247 goto release;
1248 phy_reg &= ~(I218_ULP_CONFIG1_IND |
1249 I218_ULP_CONFIG1_STICKY_ULP |
1250 I218_ULP_CONFIG1_RESET_TO_SMBUS |
1251 I218_ULP_CONFIG1_WOL_HOST |
1252 I218_ULP_CONFIG1_INBAND_EXIT |
1253 I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1254 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1255
1256 /* Commit ULP changes by starting auto ULP configuration */
1257 phy_reg |= I218_ULP_CONFIG1_START;
1258 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1259
1260 /* Clear Disable SMBus Release on PERST# in MAC */
1261 mac_reg = er32(FEXTNVM7);
1262 mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1263 ew32(FEXTNVM7, mac_reg);
1264
1265release:
1266 hw->phy.ops.release(hw);
1267 if (force) {
1268 e1000_phy_hw_reset(hw);
1269 msleep(50);
1270 }
1271out:
1272 if (ret_val)
1273 e_dbg("Error in ULP disable flow: %d\n", ret_val);
1274 else
1275 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1276
1277 return ret_val;
1278}
1279
1280/**
982 * e1000_check_for_copper_link_ich8lan - Check for link (Copper) 1281 * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
983 * @hw: pointer to the HW structure 1282 * @hw: pointer to the HW structure
984 * 1283 *
@@ -1106,9 +1405,11 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1106 e1000e_check_downshift(hw); 1405 e1000e_check_downshift(hw);
1107 1406
1108 /* Enable/Disable EEE after link up */ 1407 /* Enable/Disable EEE after link up */
1109 ret_val = e1000_set_eee_pchlan(hw); 1408 if (hw->phy.type > e1000_phy_82579) {
1110 if (ret_val) 1409 ret_val = e1000_set_eee_pchlan(hw);
1111 return ret_val; 1410 if (ret_val)
1411 return ret_val;
1412 }
1112 1413
1113 /* If we are forcing speed/duplex, then we simply return since 1414 /* If we are forcing speed/duplex, then we simply return since
1114 * we have already determined whether we have link or not. 1415 * we have already determined whether we have link or not.
@@ -1374,7 +1675,7 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1374 /* RAR[1-6] are owned by manageability. Skip those and program the 1675 /* RAR[1-6] are owned by manageability. Skip those and program the
1375 * next address into the SHRA register array. 1676 * next address into the SHRA register array.
1376 */ 1677 */
1377 if (index < (u32)(hw->mac.rar_entry_count - 6)) { 1678 if (index < (u32)(hw->mac.rar_entry_count)) {
1378 s32 ret_val; 1679 s32 ret_val;
1379 1680
1380 ret_val = e1000_acquire_swflag_ich8lan(hw); 1681 ret_val = e1000_acquire_swflag_ich8lan(hw);
@@ -1484,11 +1785,13 @@ out:
1484 **/ 1785 **/
1485static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) 1786static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
1486{ 1787{
1487 u32 fwsm; 1788 bool blocked = false;
1789 int i = 0;
1488 1790
1489 fwsm = er32(FWSM); 1791 while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) &&
1490 1792 (i++ < 10))
1491 return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? 0 : E1000_BLK_PHY_RESET; 1793 usleep_range(10000, 20000);
1794 return blocked ? E1000_BLK_PHY_RESET : 0;
1492} 1795}
1493 1796
1494/** 1797/**
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 217090df33e7..bead50f9187b 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29#ifndef _E1000E_ICH8LAN_H_ 22#ifndef _E1000E_ICH8LAN_H_
30#define _E1000E_ICH8LAN_H_ 23#define _E1000E_ICH8LAN_H_
@@ -65,11 +58,16 @@
65 58
66#define E1000_FWSM_WLOCK_MAC_MASK 0x0380 59#define E1000_FWSM_WLOCK_MAC_MASK 0x0380
67#define E1000_FWSM_WLOCK_MAC_SHIFT 7 60#define E1000_FWSM_WLOCK_MAC_SHIFT 7
61#define E1000_FWSM_ULP_CFG_DONE 0x00000400 /* Low power cfg done */
68 62
69/* Shared Receive Address Registers */ 63/* Shared Receive Address Registers */
70#define E1000_SHRAL_PCH_LPT(_i) (0x05408 + ((_i) * 8)) 64#define E1000_SHRAL_PCH_LPT(_i) (0x05408 + ((_i) * 8))
71#define E1000_SHRAH_PCH_LPT(_i) (0x0540C + ((_i) * 8)) 65#define E1000_SHRAH_PCH_LPT(_i) (0x0540C + ((_i) * 8))
72 66
67#define E1000_H2ME 0x05B50 /* Host to ME */
68#define E1000_H2ME_ULP 0x00000800 /* ULP Indication Bit */
69#define E1000_H2ME_ENFORCE_SETTINGS 0x00001000 /* Enforce Settings */
70
73#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \ 71#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \
74 (ID_LED_OFF1_OFF2 << 8) | \ 72 (ID_LED_OFF1_OFF2 << 8) | \
75 (ID_LED_OFF1_ON2 << 4) | \ 73 (ID_LED_OFF1_ON2 << 4) | \
@@ -82,6 +80,9 @@
82 80
83#define E1000_ICH8_LAN_INIT_TIMEOUT 1500 81#define E1000_ICH8_LAN_INIT_TIMEOUT 1500
84 82
83/* FEXT register bit definition */
84#define E1000_FEXT_PHY_CABLE_DISCONNECTED 0x00000004
85
85#define E1000_FEXTNVM_SW_CONFIG 1 86#define E1000_FEXTNVM_SW_CONFIG 1
86#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* different on ICH8M */ 87#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* different on ICH8M */
87 88
@@ -95,10 +96,12 @@
95#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100 96#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100
96#define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION 0x00000200 97#define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION 0x00000200
97 98
99#define E1000_FEXTNVM7_DISABLE_SMB_PERST 0x00000020
100
98#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL 101#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
99 102
100#define E1000_ICH_RAR_ENTRIES 7 103#define E1000_ICH_RAR_ENTRIES 7
101#define E1000_PCH2_RAR_ENTRIES 11 /* RAR[0-6], SHRA[0-3] */ 104#define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */
102#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */ 105#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */
103 106
104#define PHY_PAGE_SHIFT 5 107#define PHY_PAGE_SHIFT 5
@@ -161,6 +164,16 @@
161#define CV_SMB_CTRL PHY_REG(769, 23) 164#define CV_SMB_CTRL PHY_REG(769, 23)
162#define CV_SMB_CTRL_FORCE_SMBUS 0x0001 165#define CV_SMB_CTRL_FORCE_SMBUS 0x0001
163 166
167/* I218 Ultra Low Power Configuration 1 Register */
168#define I218_ULP_CONFIG1 PHY_REG(779, 16)
169#define I218_ULP_CONFIG1_START 0x0001 /* Start auto ULP config */
170#define I218_ULP_CONFIG1_IND 0x0004 /* Pwr up from ULP indication */
171#define I218_ULP_CONFIG1_STICKY_ULP 0x0010 /* Set sticky ULP mode */
172#define I218_ULP_CONFIG1_INBAND_EXIT 0x0020 /* Inband on ULP exit */
173#define I218_ULP_CONFIG1_WOL_HOST 0x0040 /* WoL Host on ULP exit */
174#define I218_ULP_CONFIG1_RESET_TO_SMBUS 0x0100 /* Reset to SMBus mode */
175#define I218_ULP_CONFIG1_DISABLE_SMB_PERST 0x1000 /* Disable on PERST# */
176
164/* SMBus Address Phy Register */ 177/* SMBus Address Phy Register */
165#define HV_SMB_ADDR PHY_REG(768, 26) 178#define HV_SMB_ADDR PHY_REG(768, 26)
166#define HV_SMB_ADDR_MASK 0x007F 179#define HV_SMB_ADDR_MASK 0x007F
@@ -195,6 +208,7 @@
195/* PHY Power Management Control */ 208/* PHY Power Management Control */
196#define HV_PM_CTRL PHY_REG(770, 17) 209#define HV_PM_CTRL PHY_REG(770, 17)
197#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100 210#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100
211#define HV_PM_CTRL_K1_ENABLE 0x4000
198 212
199#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */ 213#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */
200 214
@@ -268,4 +282,6 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
268s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable); 282s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);
269s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data); 283s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data);
270s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data); 284s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data);
285s32 e1000_set_eee_pchlan(struct e1000_hw *hw);
286s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx);
271#endif /* _E1000E_ICH8LAN_H_ */ 287#endif /* _E1000E_ICH8LAN_H_ */
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index 2480c1091873..baa0a466d1d0 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29#include "e1000.h" 22#include "e1000.h"
30 23
diff --git a/drivers/net/ethernet/intel/e1000e/mac.h b/drivers/net/ethernet/intel/e1000e/mac.h
index a61fee404ebe..4e81c2825b7a 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.h
+++ b/drivers/net/ethernet/intel/e1000e/mac.h
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29#ifndef _E1000E_MAC_H_ 22#ifndef _E1000E_MAC_H_
30#define _E1000E_MAC_H_ 23#define _E1000E_MAC_H_
diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c
index e4b0f1ef92f6..cb37ff1f1321 100644
--- a/drivers/net/ethernet/intel/e1000e/manage.c
+++ b/drivers/net/ethernet/intel/e1000e/manage.c
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29#include "e1000.h" 22#include "e1000.h"
30 23
diff --git a/drivers/net/ethernet/intel/e1000e/manage.h b/drivers/net/ethernet/intel/e1000e/manage.h
index 326897c29ea8..a8c27f98f7b0 100644
--- a/drivers/net/ethernet/intel/e1000e/manage.h
+++ b/drivers/net/ethernet/intel/e1000e/manage.h
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29#ifndef _E1000E_MANAGE_H_ 22#ifndef _E1000E_MANAGE_H_
30#define _E1000E_MANAGE_H_ 23#define _E1000E_MANAGE_H_
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 6d91933c4cdd..6bd1832e3f3e 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 23
@@ -885,7 +878,7 @@ static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss,
885 struct sk_buff *skb) 878 struct sk_buff *skb)
886{ 879{
887 if (netdev->features & NETIF_F_RXHASH) 880 if (netdev->features & NETIF_F_RXHASH)
888 skb->rxhash = le32_to_cpu(rss); 881 skb_set_hash(skb, le32_to_cpu(rss), PKT_HASH_TYPE_L3);
889} 882}
890 883
891/** 884/**
@@ -1097,8 +1090,14 @@ static void e1000_print_hw_hang(struct work_struct *work)
1097 adapter->tx_hang_recheck = true; 1090 adapter->tx_hang_recheck = true;
1098 return; 1091 return;
1099 } 1092 }
1100 /* Real hang detected */
1101 adapter->tx_hang_recheck = false; 1093 adapter->tx_hang_recheck = false;
1094
1095 if (er32(TDH(0)) == er32(TDT(0))) {
1096 e_dbg("false hang detected, ignoring\n");
1097 return;
1098 }
1099
1100 /* Real hang detected */
1102 netif_stop_queue(netdev); 1101 netif_stop_queue(netdev);
1103 1102
1104 e1e_rphy(hw, MII_BMSR, &phy_status); 1103 e1e_rphy(hw, MII_BMSR, &phy_status);
@@ -1128,6 +1127,8 @@ static void e1000_print_hw_hang(struct work_struct *work)
1128 eop, jiffies, eop_desc->upper.fields.status, er32(STATUS), 1127 eop, jiffies, eop_desc->upper.fields.status, er32(STATUS),
1129 phy_status, phy_1000t_status, phy_ext_status, pci_status); 1128 phy_status, phy_1000t_status, phy_ext_status, pci_status);
1130 1129
1130 e1000e_dump(adapter);
1131
1131 /* Suggest workaround for known h/w issue */ 1132 /* Suggest workaround for known h/w issue */
1132 if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE)) 1133 if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE))
1133 e_err("Try turning off Tx pause (flow control) via ethtool\n"); 1134 e_err("Try turning off Tx pause (flow control) via ethtool\n");
@@ -1701,7 +1702,7 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
1701 adapter->flags2 &= ~FLAG2_IS_DISCARDING; 1702 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1702 1703
1703 writel(0, rx_ring->head); 1704 writel(0, rx_ring->head);
1704 if (rx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) 1705 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
1705 e1000e_update_rdt_wa(rx_ring, 0); 1706 e1000e_update_rdt_wa(rx_ring, 0);
1706 else 1707 else
1707 writel(0, rx_ring->tail); 1708 writel(0, rx_ring->tail);
@@ -2038,13 +2039,16 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
2038 msix_entry), 2039 msix_entry),
2039 GFP_KERNEL); 2040 GFP_KERNEL);
2040 if (adapter->msix_entries) { 2041 if (adapter->msix_entries) {
2042 struct e1000_adapter *a = adapter;
2043
2041 for (i = 0; i < adapter->num_vectors; i++) 2044 for (i = 0; i < adapter->num_vectors; i++)
2042 adapter->msix_entries[i].entry = i; 2045 adapter->msix_entries[i].entry = i;
2043 2046
2044 err = pci_enable_msix(adapter->pdev, 2047 err = pci_enable_msix_range(a->pdev,
2045 adapter->msix_entries, 2048 a->msix_entries,
2046 adapter->num_vectors); 2049 a->num_vectors,
2047 if (err == 0) 2050 a->num_vectors);
2051 if (err > 0)
2048 return; 2052 return;
2049 } 2053 }
2050 /* MSI-X failed, so fall through and try MSI */ 2054 /* MSI-X failed, so fall through and try MSI */
@@ -2402,7 +2406,7 @@ static void e1000_clean_tx_ring(struct e1000_ring *tx_ring)
2402 tx_ring->next_to_clean = 0; 2406 tx_ring->next_to_clean = 0;
2403 2407
2404 writel(0, tx_ring->head); 2408 writel(0, tx_ring->head);
2405 if (tx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) 2409 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
2406 e1000e_update_tdt_wa(tx_ring, 0); 2410 e1000e_update_tdt_wa(tx_ring, 0);
2407 else 2411 else
2408 writel(0, tx_ring->tail); 2412 writel(0, tx_ring->tail);
@@ -2894,7 +2898,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
2894 struct e1000_hw *hw = &adapter->hw; 2898 struct e1000_hw *hw = &adapter->hw;
2895 struct e1000_ring *tx_ring = adapter->tx_ring; 2899 struct e1000_ring *tx_ring = adapter->tx_ring;
2896 u64 tdba; 2900 u64 tdba;
2897 u32 tdlen, tarc; 2901 u32 tdlen, tctl, tarc;
2898 2902
2899 /* Setup the HW Tx Head and Tail descriptor pointers */ 2903 /* Setup the HW Tx Head and Tail descriptor pointers */
2900 tdba = tx_ring->dma; 2904 tdba = tx_ring->dma;
@@ -2931,6 +2935,12 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
2931 /* erratum work around: set txdctl the same for both queues */ 2935 /* erratum work around: set txdctl the same for both queues */
2932 ew32(TXDCTL(1), er32(TXDCTL(0))); 2936 ew32(TXDCTL(1), er32(TXDCTL(0)));
2933 2937
2938 /* Program the Transmit Control Register */
2939 tctl = er32(TCTL);
2940 tctl &= ~E1000_TCTL_CT;
2941 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2942 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2943
2934 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { 2944 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
2935 tarc = er32(TARC(0)); 2945 tarc = er32(TARC(0));
2936 /* set the speed mode bit, we'll clear it if we're not at 2946 /* set the speed mode bit, we'll clear it if we're not at
@@ -2961,6 +2971,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
2961 /* enable Report Status bit */ 2971 /* enable Report Status bit */
2962 adapter->txd_cmd |= E1000_TXD_CMD_RS; 2972 adapter->txd_cmd |= E1000_TXD_CMD_RS;
2963 2973
2974 ew32(TCTL, tctl);
2975
2964 hw->mac.ops.config_collision_dist(hw); 2976 hw->mac.ops.config_collision_dist(hw);
2965} 2977}
2966 2978
@@ -3331,6 +3343,9 @@ static void e1000e_set_rx_mode(struct net_device *netdev)
3331 struct e1000_hw *hw = &adapter->hw; 3343 struct e1000_hw *hw = &adapter->hw;
3332 u32 rctl; 3344 u32 rctl;
3333 3345
3346 if (pm_runtime_suspended(netdev->dev.parent))
3347 return;
3348
3334 /* Check for Promiscuous and All Multicast modes */ 3349 /* Check for Promiscuous and All Multicast modes */
3335 rctl = er32(RCTL); 3350 rctl = er32(RCTL);
3336 3351
@@ -3691,10 +3706,6 @@ void e1000e_power_up_phy(struct e1000_adapter *adapter)
3691 */ 3706 */
3692static void e1000_power_down_phy(struct e1000_adapter *adapter) 3707static void e1000_power_down_phy(struct e1000_adapter *adapter)
3693{ 3708{
3694 /* WoL is enabled */
3695 if (adapter->wol)
3696 return;
3697
3698 if (adapter->hw.phy.ops.power_down) 3709 if (adapter->hw.phy.ops.power_down)
3699 adapter->hw.phy.ops.power_down(&adapter->hw); 3710 adapter->hw.phy.ops.power_down(&adapter->hw);
3700} 3711}
@@ -3911,10 +3922,8 @@ void e1000e_reset(struct e1000_adapter *adapter)
3911 } 3922 }
3912 3923
3913 if (!netif_running(adapter->netdev) && 3924 if (!netif_running(adapter->netdev) &&
3914 !test_bit(__E1000_TESTING, &adapter->state)) { 3925 !test_bit(__E1000_TESTING, &adapter->state))
3915 e1000_power_down_phy(adapter); 3926 e1000_power_down_phy(adapter);
3916 return;
3917 }
3918 3927
3919 e1000_get_phy_info(hw); 3928 e1000_get_phy_info(hw);
3920 3929
@@ -3981,7 +3990,12 @@ static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
3981 3990
3982static void e1000e_update_stats(struct e1000_adapter *adapter); 3991static void e1000e_update_stats(struct e1000_adapter *adapter);
3983 3992
3984void e1000e_down(struct e1000_adapter *adapter) 3993/**
3994 * e1000e_down - quiesce the device and optionally reset the hardware
3995 * @adapter: board private structure
3996 * @reset: boolean flag to reset the hardware or not
3997 */
3998void e1000e_down(struct e1000_adapter *adapter, bool reset)
3985{ 3999{
3986 struct net_device *netdev = adapter->netdev; 4000 struct net_device *netdev = adapter->netdev;
3987 struct e1000_hw *hw = &adapter->hw; 4001 struct e1000_hw *hw = &adapter->hw;
@@ -4035,12 +4049,8 @@ void e1000e_down(struct e1000_adapter *adapter)
4035 e1000_lv_jumbo_workaround_ich8lan(hw, false)) 4049 e1000_lv_jumbo_workaround_ich8lan(hw, false))
4036 e_dbg("failed to disable jumbo frame workaround mode\n"); 4050 e_dbg("failed to disable jumbo frame workaround mode\n");
4037 4051
4038 if (!pci_channel_offline(adapter->pdev)) 4052 if (reset && !pci_channel_offline(adapter->pdev))
4039 e1000e_reset(adapter); 4053 e1000e_reset(adapter);
4040
4041 /* TODO: for power management, we could drop the link and
4042 * pci_disable_device here.
4043 */
4044} 4054}
4045 4055
4046void e1000e_reinit_locked(struct e1000_adapter *adapter) 4056void e1000e_reinit_locked(struct e1000_adapter *adapter)
@@ -4048,7 +4058,7 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter)
4048 might_sleep(); 4058 might_sleep();
4049 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) 4059 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
4050 usleep_range(1000, 2000); 4060 usleep_range(1000, 2000);
4051 e1000e_down(adapter); 4061 e1000e_down(adapter, true);
4052 e1000e_up(adapter); 4062 e1000e_up(adapter);
4053 clear_bit(__E1000_RESETTING, &adapter->state); 4063 clear_bit(__E1000_RESETTING, &adapter->state);
4054} 4064}
@@ -4326,7 +4336,6 @@ static int e1000_open(struct net_device *netdev)
4326 adapter->tx_hang_recheck = false; 4336 adapter->tx_hang_recheck = false;
4327 netif_start_queue(netdev); 4337 netif_start_queue(netdev);
4328 4338
4329 adapter->idle_check = true;
4330 hw->mac.get_link_status = true; 4339 hw->mac.get_link_status = true;
4331 pm_runtime_put(&pdev->dev); 4340 pm_runtime_put(&pdev->dev);
4332 4341
@@ -4376,14 +4385,15 @@ static int e1000_close(struct net_device *netdev)
4376 pm_runtime_get_sync(&pdev->dev); 4385 pm_runtime_get_sync(&pdev->dev);
4377 4386
4378 if (!test_bit(__E1000_DOWN, &adapter->state)) { 4387 if (!test_bit(__E1000_DOWN, &adapter->state)) {
4379 e1000e_down(adapter); 4388 e1000e_down(adapter, true);
4380 e1000_free_irq(adapter); 4389 e1000_free_irq(adapter);
4390
4391 /* Link status message must follow this format */
4392 pr_info("%s NIC Link is Down\n", adapter->netdev->name);
4381 } 4393 }
4382 4394
4383 napi_disable(&adapter->napi); 4395 napi_disable(&adapter->napi);
4384 4396
4385 e1000_power_down_phy(adapter);
4386
4387 e1000e_free_tx_resources(adapter->tx_ring); 4397 e1000e_free_tx_resources(adapter->tx_ring);
4388 e1000e_free_rx_resources(adapter->rx_ring); 4398 e1000e_free_rx_resources(adapter->rx_ring);
4389 4399
@@ -4460,11 +4470,16 @@ static void e1000e_update_phy_task(struct work_struct *work)
4460 struct e1000_adapter *adapter = container_of(work, 4470 struct e1000_adapter *adapter = container_of(work,
4461 struct e1000_adapter, 4471 struct e1000_adapter,
4462 update_phy_task); 4472 update_phy_task);
4473 struct e1000_hw *hw = &adapter->hw;
4463 4474
4464 if (test_bit(__E1000_DOWN, &adapter->state)) 4475 if (test_bit(__E1000_DOWN, &adapter->state))
4465 return; 4476 return;
4466 4477
4467 e1000_get_phy_info(&adapter->hw); 4478 e1000_get_phy_info(hw);
4479
4480 /* Enable EEE on 82579 after link up */
4481 if (hw->phy.type == e1000_phy_82579)
4482 e1000_set_eee_pchlan(hw);
4468} 4483}
4469 4484
4470/** 4485/**
@@ -4799,6 +4814,7 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
4799 4814
4800 if (adapter->phy_hang_count > 1) { 4815 if (adapter->phy_hang_count > 1) {
4801 adapter->phy_hang_count = 0; 4816 adapter->phy_hang_count = 0;
4817 e_dbg("PHY appears hung - resetting\n");
4802 schedule_work(&adapter->reset_task); 4818 schedule_work(&adapter->reset_task);
4803 } 4819 }
4804} 4820}
@@ -4957,15 +4973,11 @@ static void e1000_watchdog_task(struct work_struct *work)
4957 mod_timer(&adapter->phy_info_timer, 4973 mod_timer(&adapter->phy_info_timer,
4958 round_jiffies(jiffies + 2 * HZ)); 4974 round_jiffies(jiffies + 2 * HZ));
4959 4975
4960 /* The link is lost so the controller stops DMA. 4976 /* 8000ES2LAN requires a Rx packet buffer work-around
4961 * If there is queued Tx work that cannot be done 4977 * on link down event; reset the controller to flush
4962 * or if on an 8000ES2LAN which requires a Rx packet 4978 * the Rx packet buffer.
4963 * buffer work-around on link down event, reset the
4964 * controller to flush the Tx/Rx packet buffers.
4965 * (Do the reset outside of interrupt context).
4966 */ 4979 */
4967 if ((adapter->flags & FLAG_RX_NEEDS_RESTART) || 4980 if (adapter->flags & FLAG_RX_NEEDS_RESTART)
4968 (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
4969 adapter->flags |= FLAG_RESTART_NOW; 4981 adapter->flags |= FLAG_RESTART_NOW;
4970 else 4982 else
4971 pm_schedule_suspend(netdev->dev.parent, 4983 pm_schedule_suspend(netdev->dev.parent,
@@ -4988,6 +5000,15 @@ link_up:
4988 adapter->gotc_old = adapter->stats.gotc; 5000 adapter->gotc_old = adapter->stats.gotc;
4989 spin_unlock(&adapter->stats64_lock); 5001 spin_unlock(&adapter->stats64_lock);
4990 5002
5003 /* If the link is lost the controller stops DMA, but
5004 * if there is queued Tx work it cannot be done. So
5005 * reset the controller to flush the Tx packet buffers.
5006 */
5007 if (!netif_carrier_ok(netdev) &&
5008 (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
5009 adapter->flags |= FLAG_RESTART_NOW;
5010
5011 /* If reset is necessary, do it outside of interrupt context. */
4991 if (adapter->flags & FLAG_RESTART_NOW) { 5012 if (adapter->flags & FLAG_RESTART_NOW) {
4992 schedule_work(&adapter->reset_task); 5013 schedule_work(&adapter->reset_task);
4993 /* return immediately since reset is imminent */ 5014 /* return immediately since reset is imminent */
@@ -5684,8 +5705,11 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
5684 adapter->max_frame_size = max_frame; 5705 adapter->max_frame_size = max_frame;
5685 e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu); 5706 e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
5686 netdev->mtu = new_mtu; 5707 netdev->mtu = new_mtu;
5708
5709 pm_runtime_get_sync(netdev->dev.parent);
5710
5687 if (netif_running(netdev)) 5711 if (netif_running(netdev))
5688 e1000e_down(adapter); 5712 e1000e_down(adapter, true);
5689 5713
5690 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 5714 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
5691 * means we reserve 2 more, this pushes us to allocate from the next 5715 * means we reserve 2 more, this pushes us to allocate from the next
@@ -5711,6 +5735,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
5711 else 5735 else
5712 e1000e_reset(adapter); 5736 e1000e_reset(adapter);
5713 5737
5738 pm_runtime_put_sync(netdev->dev.parent);
5739
5714 clear_bit(__E1000_RESETTING, &adapter->state); 5740 clear_bit(__E1000_RESETTING, &adapter->state);
5715 5741
5716 return 0; 5742 return 0;
@@ -5852,7 +5878,7 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5852static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) 5878static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
5853{ 5879{
5854 struct e1000_hw *hw = &adapter->hw; 5880 struct e1000_hw *hw = &adapter->hw;
5855 u32 i, mac_reg; 5881 u32 i, mac_reg, wuc;
5856 u16 phy_reg, wuc_enable; 5882 u16 phy_reg, wuc_enable;
5857 int retval; 5883 int retval;
5858 5884
@@ -5899,13 +5925,18 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
5899 phy_reg |= BM_RCTL_RFCE; 5925 phy_reg |= BM_RCTL_RFCE;
5900 hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg); 5926 hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg);
5901 5927
5928 wuc = E1000_WUC_PME_EN;
5929 if (wufc & (E1000_WUFC_MAG | E1000_WUFC_LNKC))
5930 wuc |= E1000_WUC_APME;
5931
5902 /* enable PHY wakeup in MAC register */ 5932 /* enable PHY wakeup in MAC register */
5903 ew32(WUFC, wufc); 5933 ew32(WUFC, wufc);
5904 ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN); 5934 ew32(WUC, (E1000_WUC_PHY_WAKE | E1000_WUC_APMPME |
5935 E1000_WUC_PME_STATUS | wuc));
5905 5936
5906 /* configure and enable PHY wakeup in PHY registers */ 5937 /* configure and enable PHY wakeup in PHY registers */
5907 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc); 5938 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc);
5908 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, E1000_WUC_PME_EN); 5939 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, wuc);
5909 5940
5910 /* activate PHY wakeup */ 5941 /* activate PHY wakeup */
5911 wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT; 5942 wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
@@ -5918,15 +5949,10 @@ release:
5918 return retval; 5949 return retval;
5919} 5950}
5920 5951
5921static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) 5952static int e1000e_pm_freeze(struct device *dev)
5922{ 5953{
5923 struct net_device *netdev = pci_get_drvdata(pdev); 5954 struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
5924 struct e1000_adapter *adapter = netdev_priv(netdev); 5955 struct e1000_adapter *adapter = netdev_priv(netdev);
5925 struct e1000_hw *hw = &adapter->hw;
5926 u32 ctrl, ctrl_ext, rctl, status;
5927 /* Runtime suspend should only enable wakeup for link changes */
5928 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
5929 int retval = 0;
5930 5956
5931 netif_device_detach(netdev); 5957 netif_device_detach(netdev);
5932 5958
@@ -5937,11 +5963,29 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
5937 usleep_range(10000, 20000); 5963 usleep_range(10000, 20000);
5938 5964
5939 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); 5965 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
5940 e1000e_down(adapter); 5966
5967 /* Quiesce the device without resetting the hardware */
5968 e1000e_down(adapter, false);
5941 e1000_free_irq(adapter); 5969 e1000_free_irq(adapter);
5942 } 5970 }
5943 e1000e_reset_interrupt_capability(adapter); 5971 e1000e_reset_interrupt_capability(adapter);
5944 5972
5973 /* Allow time for pending master requests to run */
5974 e1000e_disable_pcie_master(&adapter->hw);
5975
5976 return 0;
5977}
5978
5979static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
5980{
5981 struct net_device *netdev = pci_get_drvdata(pdev);
5982 struct e1000_adapter *adapter = netdev_priv(netdev);
5983 struct e1000_hw *hw = &adapter->hw;
5984 u32 ctrl, ctrl_ext, rctl, status;
5985 /* Runtime suspend should only enable wakeup for link changes */
5986 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
5987 int retval = 0;
5988
5945 status = er32(STATUS); 5989 status = er32(STATUS);
5946 if (status & E1000_STATUS_LU) 5990 if (status & E1000_STATUS_LU)
5947 wufc &= ~E1000_WUFC_LNKC; 5991 wufc &= ~E1000_WUFC_LNKC;
@@ -5972,12 +6016,12 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
5972 ew32(CTRL_EXT, ctrl_ext); 6016 ew32(CTRL_EXT, ctrl_ext);
5973 } 6017 }
5974 6018
6019 if (!runtime)
6020 e1000e_power_up_phy(adapter);
6021
5975 if (adapter->flags & FLAG_IS_ICH) 6022 if (adapter->flags & FLAG_IS_ICH)
5976 e1000_suspend_workarounds_ich8lan(&adapter->hw); 6023 e1000_suspend_workarounds_ich8lan(&adapter->hw);
5977 6024
5978 /* Allow time for pending master requests to run */
5979 e1000e_disable_pcie_master(&adapter->hw);
5980
5981 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { 6025 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
5982 /* enable wakeup by the PHY */ 6026 /* enable wakeup by the PHY */
5983 retval = e1000_init_phy_wakeup(adapter, wufc); 6027 retval = e1000_init_phy_wakeup(adapter, wufc);
@@ -5991,10 +6035,23 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
5991 } else { 6035 } else {
5992 ew32(WUC, 0); 6036 ew32(WUC, 0);
5993 ew32(WUFC, 0); 6037 ew32(WUFC, 0);
6038
6039 e1000_power_down_phy(adapter);
5994 } 6040 }
5995 6041
5996 if (adapter->hw.phy.type == e1000_phy_igp_3) 6042 if (adapter->hw.phy.type == e1000_phy_igp_3) {
5997 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); 6043 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
6044 } else if (hw->mac.type == e1000_pch_lpt) {
6045 if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC)))
6046 /* ULP does not support wake from unicast, multicast
6047 * or broadcast.
6048 */
6049 retval = e1000_enable_ulp_lpt_lp(hw, !runtime);
6050
6051 if (retval)
6052 return retval;
6053 }
6054
5998 6055
5999 /* Release control of h/w to f/w. If f/w is AMT enabled, this 6056 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6000 * would have already happened in close and is redundant. 6057 * would have already happened in close and is redundant.
@@ -6102,18 +6159,12 @@ static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
6102} 6159}
6103 6160
6104#ifdef CONFIG_PM 6161#ifdef CONFIG_PM
6105static bool e1000e_pm_ready(struct e1000_adapter *adapter)
6106{
6107 return !!adapter->tx_ring->buffer_info;
6108}
6109
6110static int __e1000_resume(struct pci_dev *pdev) 6162static int __e1000_resume(struct pci_dev *pdev)
6111{ 6163{
6112 struct net_device *netdev = pci_get_drvdata(pdev); 6164 struct net_device *netdev = pci_get_drvdata(pdev);
6113 struct e1000_adapter *adapter = netdev_priv(netdev); 6165 struct e1000_adapter *adapter = netdev_priv(netdev);
6114 struct e1000_hw *hw = &adapter->hw; 6166 struct e1000_hw *hw = &adapter->hw;
6115 u16 aspm_disable_flag = 0; 6167 u16 aspm_disable_flag = 0;
6116 u32 err;
6117 6168
6118 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) 6169 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
6119 aspm_disable_flag = PCIE_LINK_STATE_L0S; 6170 aspm_disable_flag = PCIE_LINK_STATE_L0S;
@@ -6124,13 +6175,6 @@ static int __e1000_resume(struct pci_dev *pdev)
6124 6175
6125 pci_set_master(pdev); 6176 pci_set_master(pdev);
6126 6177
6127 e1000e_set_interrupt_capability(adapter);
6128 if (netif_running(netdev)) {
6129 err = e1000_request_irq(adapter);
6130 if (err)
6131 return err;
6132 }
6133
6134 if (hw->mac.type >= e1000_pch2lan) 6178 if (hw->mac.type >= e1000_pch2lan)
6135 e1000_resume_workarounds_pchlan(&adapter->hw); 6179 e1000_resume_workarounds_pchlan(&adapter->hw);
6136 6180
@@ -6169,11 +6213,6 @@ static int __e1000_resume(struct pci_dev *pdev)
6169 6213
6170 e1000_init_manageability_pt(adapter); 6214 e1000_init_manageability_pt(adapter);
6171 6215
6172 if (netif_running(netdev))
6173 e1000e_up(adapter);
6174
6175 netif_device_attach(netdev);
6176
6177 /* If the controller has AMT, do not set DRV_LOAD until the interface 6216 /* If the controller has AMT, do not set DRV_LOAD until the interface
6178 * is up. For all other cases, let the f/w know that the h/w is now 6217 * is up. For all other cases, let the f/w know that the h/w is now
6179 * under the control of the driver. 6218 * under the control of the driver.
@@ -6184,75 +6223,111 @@ static int __e1000_resume(struct pci_dev *pdev)
6184 return 0; 6223 return 0;
6185} 6224}
6186 6225
6226static int e1000e_pm_thaw(struct device *dev)
6227{
6228 struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
6229 struct e1000_adapter *adapter = netdev_priv(netdev);
6230
6231 e1000e_set_interrupt_capability(adapter);
6232 if (netif_running(netdev)) {
6233 u32 err = e1000_request_irq(adapter);
6234
6235 if (err)
6236 return err;
6237
6238 e1000e_up(adapter);
6239 }
6240
6241 netif_device_attach(netdev);
6242
6243 return 0;
6244}
6245
6187#ifdef CONFIG_PM_SLEEP 6246#ifdef CONFIG_PM_SLEEP
6188static int e1000_suspend(struct device *dev) 6247static int e1000e_pm_suspend(struct device *dev)
6189{ 6248{
6190 struct pci_dev *pdev = to_pci_dev(dev); 6249 struct pci_dev *pdev = to_pci_dev(dev);
6191 6250
6251 e1000e_pm_freeze(dev);
6252
6192 return __e1000_shutdown(pdev, false); 6253 return __e1000_shutdown(pdev, false);
6193} 6254}
6194 6255
6195static int e1000_resume(struct device *dev) 6256static int e1000e_pm_resume(struct device *dev)
6196{ 6257{
6197 struct pci_dev *pdev = to_pci_dev(dev); 6258 struct pci_dev *pdev = to_pci_dev(dev);
6198 struct net_device *netdev = pci_get_drvdata(pdev); 6259 int rc;
6199 struct e1000_adapter *adapter = netdev_priv(netdev);
6200 6260
6201 if (e1000e_pm_ready(adapter)) 6261 rc = __e1000_resume(pdev);
6202 adapter->idle_check = true; 6262 if (rc)
6263 return rc;
6203 6264
6204 return __e1000_resume(pdev); 6265 return e1000e_pm_thaw(dev);
6205} 6266}
6206#endif /* CONFIG_PM_SLEEP */ 6267#endif /* CONFIG_PM_SLEEP */
6207 6268
6208#ifdef CONFIG_PM_RUNTIME 6269#ifdef CONFIG_PM_RUNTIME
6209static int e1000_runtime_suspend(struct device *dev) 6270static int e1000e_pm_runtime_idle(struct device *dev)
6210{ 6271{
6211 struct pci_dev *pdev = to_pci_dev(dev); 6272 struct pci_dev *pdev = to_pci_dev(dev);
6212 struct net_device *netdev = pci_get_drvdata(pdev); 6273 struct net_device *netdev = pci_get_drvdata(pdev);
6213 struct e1000_adapter *adapter = netdev_priv(netdev); 6274 struct e1000_adapter *adapter = netdev_priv(netdev);
6214 6275
6215 if (!e1000e_pm_ready(adapter)) 6276 if (!e1000e_has_link(adapter))
6216 return 0; 6277 pm_schedule_suspend(dev, 5 * MSEC_PER_SEC);
6217 6278
6218 return __e1000_shutdown(pdev, true); 6279 return -EBUSY;
6219} 6280}
6220 6281
6221static int e1000_idle(struct device *dev) 6282static int e1000e_pm_runtime_resume(struct device *dev)
6222{ 6283{
6223 struct pci_dev *pdev = to_pci_dev(dev); 6284 struct pci_dev *pdev = to_pci_dev(dev);
6224 struct net_device *netdev = pci_get_drvdata(pdev); 6285 struct net_device *netdev = pci_get_drvdata(pdev);
6225 struct e1000_adapter *adapter = netdev_priv(netdev); 6286 struct e1000_adapter *adapter = netdev_priv(netdev);
6287 int rc;
6226 6288
6227 if (!e1000e_pm_ready(adapter)) 6289 rc = __e1000_resume(pdev);
6228 return 0; 6290 if (rc)
6291 return rc;
6229 6292
6230 if (adapter->idle_check) { 6293 if (netdev->flags & IFF_UP)
6231 adapter->idle_check = false; 6294 rc = e1000e_up(adapter);
6232 if (!e1000e_has_link(adapter))
6233 pm_schedule_suspend(dev, MSEC_PER_SEC);
6234 }
6235 6295
6236 return -EBUSY; 6296 return rc;
6237} 6297}
6238 6298
6239static int e1000_runtime_resume(struct device *dev) 6299static int e1000e_pm_runtime_suspend(struct device *dev)
6240{ 6300{
6241 struct pci_dev *pdev = to_pci_dev(dev); 6301 struct pci_dev *pdev = to_pci_dev(dev);
6242 struct net_device *netdev = pci_get_drvdata(pdev); 6302 struct net_device *netdev = pci_get_drvdata(pdev);
6243 struct e1000_adapter *adapter = netdev_priv(netdev); 6303 struct e1000_adapter *adapter = netdev_priv(netdev);
6244 6304
6245 if (!e1000e_pm_ready(adapter)) 6305 if (netdev->flags & IFF_UP) {
6246 return 0; 6306 int count = E1000_CHECK_RESET_COUNT;
6247 6307
6248 adapter->idle_check = !dev->power.runtime_auto; 6308 while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
6249 return __e1000_resume(pdev); 6309 usleep_range(10000, 20000);
6310
6311 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
6312
6313 /* Down the device without resetting the hardware */
6314 e1000e_down(adapter, false);
6315 }
6316
6317 if (__e1000_shutdown(pdev, true)) {
6318 e1000e_pm_runtime_resume(dev);
6319 return -EBUSY;
6320 }
6321
6322 return 0;
6250} 6323}
6251#endif /* CONFIG_PM_RUNTIME */ 6324#endif /* CONFIG_PM_RUNTIME */
6252#endif /* CONFIG_PM */ 6325#endif /* CONFIG_PM */
6253 6326
6254static void e1000_shutdown(struct pci_dev *pdev) 6327static void e1000_shutdown(struct pci_dev *pdev)
6255{ 6328{
6329 e1000e_pm_freeze(&pdev->dev);
6330
6256 __e1000_shutdown(pdev, false); 6331 __e1000_shutdown(pdev, false);
6257} 6332}
6258 6333
@@ -6338,7 +6413,7 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
6338 return PCI_ERS_RESULT_DISCONNECT; 6413 return PCI_ERS_RESULT_DISCONNECT;
6339 6414
6340 if (netif_running(netdev)) 6415 if (netif_running(netdev))
6341 e1000e_down(adapter); 6416 e1000e_down(adapter, true);
6342 pci_disable_device(pdev); 6417 pci_disable_device(pdev);
6343 6418
6344 /* Request a slot slot reset. */ 6419 /* Request a slot slot reset. */
@@ -6350,7 +6425,7 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
6350 * @pdev: Pointer to PCI device 6425 * @pdev: Pointer to PCI device
6351 * 6426 *
6352 * Restart the card from scratch, as if from a cold-boot. Implementation 6427 * Restart the card from scratch, as if from a cold-boot. Implementation
6353 * resembles the first-half of the e1000_resume routine. 6428 * resembles the first-half of the e1000e_pm_resume routine.
6354 */ 6429 */
6355static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) 6430static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
6356{ 6431{
@@ -6397,7 +6472,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
6397 * 6472 *
6398 * This callback is called when the error recovery driver tells us that 6473 * This callback is called when the error recovery driver tells us that
6399 * its OK to resume normal operation. Implementation resembles the 6474 * its OK to resume normal operation. Implementation resembles the
6400 * second-half of the e1000_resume routine. 6475 * second-half of the e1000e_pm_resume routine.
6401 */ 6476 */
6402static void e1000_io_resume(struct pci_dev *pdev) 6477static void e1000_io_resume(struct pci_dev *pdev)
6403{ 6478{
@@ -6902,9 +6977,6 @@ static void e1000_remove(struct pci_dev *pdev)
6902 } 6977 }
6903 } 6978 }
6904 6979
6905 if (!(netdev->flags & IFF_UP))
6906 e1000_power_down_phy(adapter);
6907
6908 /* Don't lie to e1000_close() down the road. */ 6980 /* Don't lie to e1000_close() down the road. */
6909 if (!down) 6981 if (!down)
6910 clear_bit(__E1000_DOWN, &adapter->state); 6982 clear_bit(__E1000_DOWN, &adapter->state);
@@ -7026,9 +7098,16 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
7026MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); 7098MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
7027 7099
7028static const struct dev_pm_ops e1000_pm_ops = { 7100static const struct dev_pm_ops e1000_pm_ops = {
7029 SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume) 7101#ifdef CONFIG_PM_SLEEP
7030 SET_RUNTIME_PM_OPS(e1000_runtime_suspend, e1000_runtime_resume, 7102 .suspend = e1000e_pm_suspend,
7031 e1000_idle) 7103 .resume = e1000e_pm_resume,
7104 .freeze = e1000e_pm_freeze,
7105 .thaw = e1000e_pm_thaw,
7106 .poweroff = e1000e_pm_suspend,
7107 .restore = e1000e_pm_resume,
7108#endif
7109 SET_RUNTIME_PM_OPS(e1000e_pm_runtime_suspend, e1000e_pm_runtime_resume,
7110 e1000e_pm_runtime_idle)
7032}; 7111};
7033 7112
7034/* PCI Device API Driver */ 7113/* PCI Device API Driver */
@@ -7055,7 +7134,7 @@ static int __init e1000_init_module(void)
7055 int ret; 7134 int ret;
7056 pr_info("Intel(R) PRO/1000 Network Driver - %s\n", 7135 pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
7057 e1000e_driver_version); 7136 e1000e_driver_version);
7058 pr_info("Copyright(c) 1999 - 2013 Intel Corporation.\n"); 7137 pr_info("Copyright(c) 1999 - 2014 Intel Corporation.\n");
7059 ret = pci_register_driver(&e1000_driver); 7138 ret = pci_register_driver(&e1000_driver);
7060 7139
7061 return ret; 7140 return ret;
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index d70a03906ac0..a9a976f04bff 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29#include "e1000.h" 22#include "e1000.h"
30 23
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.h b/drivers/net/ethernet/intel/e1000e/nvm.h
index 45fc69561627..342bf69efab5 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.h
+++ b/drivers/net/ethernet/intel/e1000e/nvm.h
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29#ifndef _E1000E_NVM_H_ 22#ifndef _E1000E_NVM_H_
30#define _E1000E_NVM_H_ 23#define _E1000E_NVM_H_
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index c16bd75b6caa..d0ac0f3249c8 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29#include <linux/netdevice.h> 22#include <linux/netdevice.h>
30#include <linux/module.h> 23#include <linux/module.h>
@@ -381,6 +374,12 @@ void e1000e_check_options(struct e1000_adapter *adapter)
381 "%s set to dynamic mode\n", opt.name); 374 "%s set to dynamic mode\n", opt.name);
382 adapter->itr = 20000; 375 adapter->itr = 20000;
383 break; 376 break;
377 case 2:
378 dev_info(&adapter->pdev->dev,
379 "%s Invalid mode - setting default\n",
380 opt.name);
381 adapter->itr_setting = opt.def;
382 /* fall-through */
384 case 3: 383 case 3:
385 dev_info(&adapter->pdev->dev, 384 dev_info(&adapter->pdev->dev,
386 "%s set to dynamic conservative mode\n", 385 "%s set to dynamic conservative mode\n",
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 20e71f4ca426..00b3fc98bf30 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29#include "e1000.h" 22#include "e1000.h"
30 23
diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h
index f4f71b9991e3..3841bccf058c 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.h
+++ b/drivers/net/ethernet/intel/e1000e/phy.h
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29#ifndef _E1000E_PHY_H_ 22#ifndef _E1000E_PHY_H_
30#define _E1000E_PHY_H_ 23#define _E1000E_PHY_H_
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 065f8c80d4f2..fb1a914a3ad4 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29/* PTP 1588 Hardware Clock (PHC) 22/* PTP 1588 Hardware Clock (PHC)
30 * Derived from PTP Hardware Clock driver for Intel 82576 and 82580 (igb) 23 * Derived from PTP Hardware Clock driver for Intel 82576 and 82580 (igb)
@@ -47,6 +40,7 @@ static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
47 ptp_clock_info); 40 ptp_clock_info);
48 struct e1000_hw *hw = &adapter->hw; 41 struct e1000_hw *hw = &adapter->hw;
49 bool neg_adj = false; 42 bool neg_adj = false;
43 unsigned long flags;
50 u64 adjustment; 44 u64 adjustment;
51 u32 timinca, incvalue; 45 u32 timinca, incvalue;
52 s32 ret_val; 46 s32 ret_val;
@@ -64,6 +58,8 @@ static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
64 if (ret_val) 58 if (ret_val)
65 return ret_val; 59 return ret_val;
66 60
61 spin_lock_irqsave(&adapter->systim_lock, flags);
62
67 incvalue = timinca & E1000_TIMINCA_INCVALUE_MASK; 63 incvalue = timinca & E1000_TIMINCA_INCVALUE_MASK;
68 64
69 adjustment = incvalue; 65 adjustment = incvalue;
@@ -77,6 +73,8 @@ static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
77 73
78 ew32(TIMINCA, timinca); 74 ew32(TIMINCA, timinca);
79 75
76 spin_unlock_irqrestore(&adapter->systim_lock, flags);
77
80 return 0; 78 return 0;
81} 79}
82 80
@@ -191,6 +189,7 @@ static const struct ptp_clock_info e1000e_ptp_clock_info = {
191 .n_alarm = 0, 189 .n_alarm = 0,
192 .n_ext_ts = 0, 190 .n_ext_ts = 0,
193 .n_per_out = 0, 191 .n_per_out = 0,
192 .n_pins = 0,
194 .pps = 0, 193 .pps = 0,
195 .adjfreq = e1000e_phc_adjfreq, 194 .adjfreq = e1000e_phc_adjfreq,
196 .adjtime = e1000e_phc_adjtime, 195 .adjtime = e1000e_phc_adjtime,
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
index a7e6a3e37257..ea235bbe50d3 100644
--- a/drivers/net/ethernet/intel/e1000e/regs.h
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -1,30 +1,23 @@
1/******************************************************************************* 1/* Intel PRO/1000 Linux driver
2 2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 Intel PRO/1000 Linux driver 3 *
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 * This program is free software; you can redistribute it and/or modify it
5 5 * under the terms and conditions of the GNU General Public License,
6 This program is free software; you can redistribute it and/or modify it 6 * version 2, as published by the Free Software Foundation.
7 under the terms and conditions of the GNU General Public License, 7 *
8 version 2, as published by the Free Software Foundation. 8 * This program is distributed in the hope it will be useful, but WITHOUT
9 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 This program is distributed in the hope it will be useful, but WITHOUT 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * more details.
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 *
13 more details. 13 * The full GNU General Public License is included in this distribution in
14 14 * the file called "COPYING".
15 You should have received a copy of the GNU General Public License along with 15 *
16 this program; if not, write to the Free Software Foundation, Inc., 16 * Contact Information:
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 * Linux NICS <linux.nics@intel.com>
18 18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 The full GNU General Public License is included in this distribution in 19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 the file called "COPYING". 20 */
21
22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27*******************************************************************************/
28 21
29#ifndef _E1000E_REGS_H_ 22#ifndef _E1000E_REGS_H_
30#define _E1000E_REGS_H_ 23#define _E1000E_REGS_H_
@@ -39,6 +32,7 @@
39#define E1000_SCTL 0x00024 /* SerDes Control - RW */ 32#define E1000_SCTL 0x00024 /* SerDes Control - RW */
40#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ 33#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
41#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ 34#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */
35#define E1000_FEXT 0x0002C /* Future Extended - RW */
42#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */ 36#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */
43#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */ 37#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */
44#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */ 38#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 72dae4d97b43..33cd8b67535d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -86,12 +86,12 @@
86 86
87#define I40E_NVM_VERSION_LO_SHIFT 0 87#define I40E_NVM_VERSION_LO_SHIFT 0
88#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT) 88#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
89#define I40E_NVM_VERSION_HI_SHIFT 8 89#define I40E_NVM_VERSION_HI_SHIFT 12
90#define I40E_NVM_VERSION_HI_MASK (0xff << I40E_NVM_VERSION_HI_SHIFT) 90#define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT)
91 91
92/* The values in here are decimal coded as hex as is the case in the NVM map*/ 92/* The values in here are decimal coded as hex as is the case in the NVM map*/
93#define I40E_CURRENT_NVM_VERSION_HI 0x2 93#define I40E_CURRENT_NVM_VERSION_HI 0x2
94#define I40E_CURRENT_NVM_VERSION_LO 0x30 94#define I40E_CURRENT_NVM_VERSION_LO 0x40
95 95
96/* magic for getting defines into strings */ 96/* magic for getting defines into strings */
97#define STRINGIFY(foo) #foo 97#define STRINGIFY(foo) #foo
@@ -152,8 +152,21 @@ struct i40e_lump_tracking {
152}; 152};
153 153
154#define I40E_DEFAULT_ATR_SAMPLE_RATE 20 154#define I40E_DEFAULT_ATR_SAMPLE_RATE 20
155#define I40E_FDIR_MAX_RAW_PACKET_LOOKUP 512 155#define I40E_FDIR_MAX_RAW_PACKET_SIZE 512
156struct i40e_fdir_data { 156#define I40E_FDIR_BUFFER_FULL_MARGIN 10
157#define I40E_FDIR_BUFFER_HEAD_ROOM 200
158
159struct i40e_fdir_filter {
160 struct hlist_node fdir_node;
161 /* filter ipnut set */
162 u8 flow_type;
163 u8 ip4_proto;
164 __be32 dst_ip[4];
165 __be32 src_ip[4];
166 __be16 src_port;
167 __be16 dst_port;
168 __be32 sctp_v_tag;
169 /* filter control */
157 u16 q_index; 170 u16 q_index;
158 u8 flex_off; 171 u8 flex_off;
159 u8 pctype; 172 u8 pctype;
@@ -162,7 +175,6 @@ struct i40e_fdir_data {
162 u8 fd_status; 175 u8 fd_status;
163 u16 cnt_index; 176 u16 cnt_index;
164 u32 fd_id; 177 u32 fd_id;
165 u8 *raw_packet;
166}; 178};
167 179
168#define I40E_ETH_P_LLDP 0x88cc 180#define I40E_ETH_P_LLDP 0x88cc
@@ -196,7 +208,7 @@ struct i40e_pf {
196 bool fc_autoneg_status; 208 bool fc_autoneg_status;
197 209
198 u16 eeprom_version; 210 u16 eeprom_version;
199 u16 num_vmdq_vsis; /* num vmdq pools this pf has set up */ 211 u16 num_vmdq_vsis; /* num vmdq vsis this pf has set up */
200 u16 num_vmdq_qps; /* num queue pairs per vmdq pool */ 212 u16 num_vmdq_qps; /* num queue pairs per vmdq pool */
201 u16 num_vmdq_msix; /* num queue vectors per vmdq pool */ 213 u16 num_vmdq_msix; /* num queue vectors per vmdq pool */
202 u16 num_req_vfs; /* num vfs requested for this vf */ 214 u16 num_req_vfs; /* num vfs requested for this vf */
@@ -210,6 +222,9 @@ struct i40e_pf {
210 u8 atr_sample_rate; 222 u8 atr_sample_rate;
211 bool wol_en; 223 bool wol_en;
212 224
225 struct hlist_head fdir_filter_list;
226 u16 fdir_pf_active_filters;
227
213#ifdef CONFIG_I40E_VXLAN 228#ifdef CONFIG_I40E_VXLAN
214 __be16 vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS]; 229 __be16 vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
215 u16 pending_vxlan_bitmap; 230 u16 pending_vxlan_bitmap;
@@ -251,6 +266,9 @@ struct i40e_pf {
251#define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27) 266#define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27)
252#endif 267#endif
253 268
269 /* tracks features that get auto disabled by errors */
270 u64 auto_disable_flags;
271
254 bool stat_offsets_loaded; 272 bool stat_offsets_loaded;
255 struct i40e_hw_port_stats stats; 273 struct i40e_hw_port_stats stats;
256 struct i40e_hw_port_stats stats_offsets; 274 struct i40e_hw_port_stats stats_offsets;
@@ -477,10 +495,10 @@ static inline char *i40e_fw_version_str(struct i40e_hw *hw)
477 "f%d.%d a%d.%d n%02x.%02x e%08x", 495 "f%d.%d a%d.%d n%02x.%02x e%08x",
478 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, 496 hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
479 hw->aq.api_maj_ver, hw->aq.api_min_ver, 497 hw->aq.api_maj_ver, hw->aq.api_min_ver,
480 (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) 498 (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >>
481 >> I40E_NVM_VERSION_HI_SHIFT, 499 I40E_NVM_VERSION_HI_SHIFT,
482 (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) 500 (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >>
483 >> I40E_NVM_VERSION_LO_SHIFT, 501 I40E_NVM_VERSION_LO_SHIFT,
484 hw->nvm.eetrack); 502 hw->nvm.eetrack);
485 503
486 return buf; 504 return buf;
@@ -534,9 +552,13 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi);
534int i40e_fetch_switch_configuration(struct i40e_pf *pf, 552int i40e_fetch_switch_configuration(struct i40e_pf *pf,
535 bool printconfig); 553 bool printconfig);
536 554
537int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data, 555int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
538 struct i40e_pf *pf, bool add); 556 struct i40e_pf *pf, bool add);
539 557int i40e_add_del_fdir(struct i40e_vsi *vsi,
558 struct i40e_fdir_filter *input, bool add);
559void i40e_fdir_check_and_reenable(struct i40e_pf *pf);
560int i40e_get_current_fd_count(struct i40e_pf *pf);
561bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features);
540void i40e_set_ethtool_ops(struct net_device *netdev); 562void i40e_set_ethtool_ops(struct net_device *netdev);
541struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, 563struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
542 u8 *macaddr, s16 vlan, 564 u8 *macaddr, s16 vlan,
@@ -575,6 +597,7 @@ void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
575void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf); 597void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
576void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf); 598void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
577int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); 599int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
600int i40e_vsi_open(struct i40e_vsi *vsi);
578void i40e_vlan_stripping_disable(struct i40e_vsi *vsi); 601void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
579int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid); 602int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
580int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid); 603int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index a50e6b3479ae..ed3902bf249b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -647,9 +647,8 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)
647 desc_cb = *desc; 647 desc_cb = *desc;
648 cb_func(hw, &desc_cb); 648 cb_func(hw, &desc_cb);
649 } 649 }
650 memset((void *)desc, 0, sizeof(struct i40e_aq_desc)); 650 memset(desc, 0, sizeof(*desc));
651 memset((void *)details, 0, 651 memset(details, 0, sizeof(*details));
652 sizeof(struct i40e_asq_cmd_details));
653 ntc++; 652 ntc++;
654 if (ntc == asq->count) 653 if (ntc == asq->count)
655 ntc = 0; 654 ntc = 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index e7f38b57834d..bb948dd92474 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -162,6 +162,372 @@ i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
162 return status; 162 return status;
163} 163}
164 164
165/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
166 * hardware to a bit-field that can be used by SW to more easily determine the
167 * packet type.
168 *
169 * Macros are used to shorten the table lines and make this table human
170 * readable.
171 *
172 * We store the PTYPE in the top byte of the bit field - this is just so that
173 * we can check that the table doesn't have a row missing, as the index into
174 * the table should be the PTYPE.
175 *
176 * Typical work flow:
177 *
178 * IF NOT i40e_ptype_lookup[ptype].known
179 * THEN
180 * Packet is unknown
181 * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
182 * Use the rest of the fields to look at the tunnels, inner protocols, etc
183 * ELSE
184 * Use the enum i40e_rx_l2_ptype to decode the packet type
185 * ENDIF
186 */
187
188/* macro to make the table lines short */
189#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
190 { PTYPE, \
191 1, \
192 I40E_RX_PTYPE_OUTER_##OUTER_IP, \
193 I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
194 I40E_RX_PTYPE_##OUTER_FRAG, \
195 I40E_RX_PTYPE_TUNNEL_##T, \
196 I40E_RX_PTYPE_TUNNEL_END_##TE, \
197 I40E_RX_PTYPE_##TEF, \
198 I40E_RX_PTYPE_INNER_PROT_##I, \
199 I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
200
201#define I40E_PTT_UNUSED_ENTRY(PTYPE) \
202 { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
203
204/* shorter macros makes the table fit but are terse */
205#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG
206#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG
207#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC
208
209/* Lookup table mapping the HW PTYPE to the bit field for decoding */
210struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
211 /* L2 Packet types */
212 I40E_PTT_UNUSED_ENTRY(0),
213 I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
214 I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
215 I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
216 I40E_PTT_UNUSED_ENTRY(4),
217 I40E_PTT_UNUSED_ENTRY(5),
218 I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
219 I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
220 I40E_PTT_UNUSED_ENTRY(8),
221 I40E_PTT_UNUSED_ENTRY(9),
222 I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
223 I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
224 I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
225 I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
226 I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
227 I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
228 I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
229 I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
230 I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
231 I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
232 I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
233 I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
234
235 /* Non Tunneled IPv4 */
236 I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
237 I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
238 I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
239 I40E_PTT_UNUSED_ENTRY(25),
240 I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
241 I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
242 I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
243
244 /* IPv4 --> IPv4 */
245 I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
246 I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
247 I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
248 I40E_PTT_UNUSED_ENTRY(32),
249 I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
250 I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
251 I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
252
253 /* IPv4 --> IPv6 */
254 I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
255 I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
256 I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
257 I40E_PTT_UNUSED_ENTRY(39),
258 I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
259 I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
260 I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
261
262 /* IPv4 --> GRE/NAT */
263 I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
264
265 /* IPv4 --> GRE/NAT --> IPv4 */
266 I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
267 I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
268 I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
269 I40E_PTT_UNUSED_ENTRY(47),
270 I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
271 I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
272 I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
273
274 /* IPv4 --> GRE/NAT --> IPv6 */
275 I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
276 I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
277 I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
278 I40E_PTT_UNUSED_ENTRY(54),
279 I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
280 I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
281 I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
282
283 /* IPv4 --> GRE/NAT --> MAC */
284 I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
285
286 /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
287 I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
288 I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
289 I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
290 I40E_PTT_UNUSED_ENTRY(62),
291 I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
292 I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
293 I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
294
295 /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
296 I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
297 I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
298 I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
299 I40E_PTT_UNUSED_ENTRY(69),
300 I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
301 I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
302 I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
303
304 /* IPv4 --> GRE/NAT --> MAC/VLAN */
305 I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
306
307 /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
308 I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
309 I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
310 I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
311 I40E_PTT_UNUSED_ENTRY(77),
312 I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
313 I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
314 I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
315
316 /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
317 I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
318 I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
319 I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
320 I40E_PTT_UNUSED_ENTRY(84),
321 I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
322 I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
323 I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
324
325 /* Non Tunneled IPv6 */
326 I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
327 I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
328 I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
329 I40E_PTT_UNUSED_ENTRY(91),
330 I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
331 I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
332 I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
333
334 /* IPv6 --> IPv4 */
335 I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
336 I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
337 I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
338 I40E_PTT_UNUSED_ENTRY(98),
339 I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
340 I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
341 I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
342
343 /* IPv6 --> IPv6 */
344 I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
345 I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
346 I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
347 I40E_PTT_UNUSED_ENTRY(105),
348 I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
349 I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
350 I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
351
352 /* IPv6 --> GRE/NAT */
353 I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
354
355 /* IPv6 --> GRE/NAT -> IPv4 */
356 I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
357 I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
358 I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
359 I40E_PTT_UNUSED_ENTRY(113),
360 I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
361 I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
362 I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
363
364 /* IPv6 --> GRE/NAT -> IPv6 */
365 I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
366 I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
367 I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
368 I40E_PTT_UNUSED_ENTRY(120),
369 I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
370 I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
371 I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
372
373 /* IPv6 --> GRE/NAT -> MAC */
374 I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
375
376 /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
377 I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
378 I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
379 I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
380 I40E_PTT_UNUSED_ENTRY(128),
381 I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
382 I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
383 I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
384
385 /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
386 I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
387 I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
388 I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
389 I40E_PTT_UNUSED_ENTRY(135),
390 I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
391 I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
392 I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
393
394 /* IPv6 --> GRE/NAT -> MAC/VLAN */
395 I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
396
397 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
398 I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
399 I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
400 I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
401 I40E_PTT_UNUSED_ENTRY(143),
402 I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
403 I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
404 I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
405
406 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
407 I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
408 I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
409 I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
410 I40E_PTT_UNUSED_ENTRY(150),
411 I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
412 I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
413 I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
414
415 /* unused entries */
416 I40E_PTT_UNUSED_ENTRY(154),
417 I40E_PTT_UNUSED_ENTRY(155),
418 I40E_PTT_UNUSED_ENTRY(156),
419 I40E_PTT_UNUSED_ENTRY(157),
420 I40E_PTT_UNUSED_ENTRY(158),
421 I40E_PTT_UNUSED_ENTRY(159),
422
423 I40E_PTT_UNUSED_ENTRY(160),
424 I40E_PTT_UNUSED_ENTRY(161),
425 I40E_PTT_UNUSED_ENTRY(162),
426 I40E_PTT_UNUSED_ENTRY(163),
427 I40E_PTT_UNUSED_ENTRY(164),
428 I40E_PTT_UNUSED_ENTRY(165),
429 I40E_PTT_UNUSED_ENTRY(166),
430 I40E_PTT_UNUSED_ENTRY(167),
431 I40E_PTT_UNUSED_ENTRY(168),
432 I40E_PTT_UNUSED_ENTRY(169),
433
434 I40E_PTT_UNUSED_ENTRY(170),
435 I40E_PTT_UNUSED_ENTRY(171),
436 I40E_PTT_UNUSED_ENTRY(172),
437 I40E_PTT_UNUSED_ENTRY(173),
438 I40E_PTT_UNUSED_ENTRY(174),
439 I40E_PTT_UNUSED_ENTRY(175),
440 I40E_PTT_UNUSED_ENTRY(176),
441 I40E_PTT_UNUSED_ENTRY(177),
442 I40E_PTT_UNUSED_ENTRY(178),
443 I40E_PTT_UNUSED_ENTRY(179),
444
445 I40E_PTT_UNUSED_ENTRY(180),
446 I40E_PTT_UNUSED_ENTRY(181),
447 I40E_PTT_UNUSED_ENTRY(182),
448 I40E_PTT_UNUSED_ENTRY(183),
449 I40E_PTT_UNUSED_ENTRY(184),
450 I40E_PTT_UNUSED_ENTRY(185),
451 I40E_PTT_UNUSED_ENTRY(186),
452 I40E_PTT_UNUSED_ENTRY(187),
453 I40E_PTT_UNUSED_ENTRY(188),
454 I40E_PTT_UNUSED_ENTRY(189),
455
456 I40E_PTT_UNUSED_ENTRY(190),
457 I40E_PTT_UNUSED_ENTRY(191),
458 I40E_PTT_UNUSED_ENTRY(192),
459 I40E_PTT_UNUSED_ENTRY(193),
460 I40E_PTT_UNUSED_ENTRY(194),
461 I40E_PTT_UNUSED_ENTRY(195),
462 I40E_PTT_UNUSED_ENTRY(196),
463 I40E_PTT_UNUSED_ENTRY(197),
464 I40E_PTT_UNUSED_ENTRY(198),
465 I40E_PTT_UNUSED_ENTRY(199),
466
467 I40E_PTT_UNUSED_ENTRY(200),
468 I40E_PTT_UNUSED_ENTRY(201),
469 I40E_PTT_UNUSED_ENTRY(202),
470 I40E_PTT_UNUSED_ENTRY(203),
471 I40E_PTT_UNUSED_ENTRY(204),
472 I40E_PTT_UNUSED_ENTRY(205),
473 I40E_PTT_UNUSED_ENTRY(206),
474 I40E_PTT_UNUSED_ENTRY(207),
475 I40E_PTT_UNUSED_ENTRY(208),
476 I40E_PTT_UNUSED_ENTRY(209),
477
478 I40E_PTT_UNUSED_ENTRY(210),
479 I40E_PTT_UNUSED_ENTRY(211),
480 I40E_PTT_UNUSED_ENTRY(212),
481 I40E_PTT_UNUSED_ENTRY(213),
482 I40E_PTT_UNUSED_ENTRY(214),
483 I40E_PTT_UNUSED_ENTRY(215),
484 I40E_PTT_UNUSED_ENTRY(216),
485 I40E_PTT_UNUSED_ENTRY(217),
486 I40E_PTT_UNUSED_ENTRY(218),
487 I40E_PTT_UNUSED_ENTRY(219),
488
489 I40E_PTT_UNUSED_ENTRY(220),
490 I40E_PTT_UNUSED_ENTRY(221),
491 I40E_PTT_UNUSED_ENTRY(222),
492 I40E_PTT_UNUSED_ENTRY(223),
493 I40E_PTT_UNUSED_ENTRY(224),
494 I40E_PTT_UNUSED_ENTRY(225),
495 I40E_PTT_UNUSED_ENTRY(226),
496 I40E_PTT_UNUSED_ENTRY(227),
497 I40E_PTT_UNUSED_ENTRY(228),
498 I40E_PTT_UNUSED_ENTRY(229),
499
500 I40E_PTT_UNUSED_ENTRY(230),
501 I40E_PTT_UNUSED_ENTRY(231),
502 I40E_PTT_UNUSED_ENTRY(232),
503 I40E_PTT_UNUSED_ENTRY(233),
504 I40E_PTT_UNUSED_ENTRY(234),
505 I40E_PTT_UNUSED_ENTRY(235),
506 I40E_PTT_UNUSED_ENTRY(236),
507 I40E_PTT_UNUSED_ENTRY(237),
508 I40E_PTT_UNUSED_ENTRY(238),
509 I40E_PTT_UNUSED_ENTRY(239),
510
511 I40E_PTT_UNUSED_ENTRY(240),
512 I40E_PTT_UNUSED_ENTRY(241),
513 I40E_PTT_UNUSED_ENTRY(242),
514 I40E_PTT_UNUSED_ENTRY(243),
515 I40E_PTT_UNUSED_ENTRY(244),
516 I40E_PTT_UNUSED_ENTRY(245),
517 I40E_PTT_UNUSED_ENTRY(246),
518 I40E_PTT_UNUSED_ENTRY(247),
519 I40E_PTT_UNUSED_ENTRY(248),
520 I40E_PTT_UNUSED_ENTRY(249),
521
522 I40E_PTT_UNUSED_ENTRY(250),
523 I40E_PTT_UNUSED_ENTRY(251),
524 I40E_PTT_UNUSED_ENTRY(252),
525 I40E_PTT_UNUSED_ENTRY(253),
526 I40E_PTT_UNUSED_ENTRY(254),
527 I40E_PTT_UNUSED_ENTRY(255)
528};
529
530
165/** 531/**
166 * i40e_init_shared_code - Initialize the shared code 532 * i40e_init_shared_code - Initialize the shared code
167 * @hw: pointer to hardware structure 533 * @hw: pointer to hardware structure
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 50730141bb7b..036570d76176 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -332,6 +332,7 @@ i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
332 u16 type; 332 u16 type;
333 u16 length; 333 u16 length;
334 u16 typelength; 334 u16 typelength;
335 u16 offset = 0;
335 336
336 if (!lldpmib || !dcbcfg) 337 if (!lldpmib || !dcbcfg)
337 return I40E_ERR_PARAM; 338 return I40E_ERR_PARAM;
@@ -339,15 +340,17 @@ i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
339 /* set to the start of LLDPDU */ 340 /* set to the start of LLDPDU */
340 lldpmib += ETH_HLEN; 341 lldpmib += ETH_HLEN;
341 tlv = (struct i40e_lldp_org_tlv *)lldpmib; 342 tlv = (struct i40e_lldp_org_tlv *)lldpmib;
342 while (tlv) { 343 while (1) {
343 typelength = ntohs(tlv->typelength); 344 typelength = ntohs(tlv->typelength);
344 type = (u16)((typelength & I40E_LLDP_TLV_TYPE_MASK) >> 345 type = (u16)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
345 I40E_LLDP_TLV_TYPE_SHIFT); 346 I40E_LLDP_TLV_TYPE_SHIFT);
346 length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> 347 length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
347 I40E_LLDP_TLV_LEN_SHIFT); 348 I40E_LLDP_TLV_LEN_SHIFT);
349 offset += sizeof(typelength) + length;
348 350
349 if (type == I40E_TLV_TYPE_END) 351 /* END TLV or beyond LLDPDU size */
350 break;/* END TLV break out */ 352 if ((type == I40E_TLV_TYPE_END) || (offset > I40E_LLDPDU_SIZE))
353 break;
351 354
352 switch (type) { 355 switch (type) {
353 case I40E_TLV_TYPE_ORG: 356 case I40E_TLV_TYPE_ORG:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index da22c3fa2c00..afd43d7973fa 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1011,10 +1011,12 @@ static void i40e_dbg_dump_veb_all(struct i40e_pf *pf)
1011 **/ 1011 **/
1012static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable) 1012static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable)
1013{ 1013{
1014 if (enable) 1014 if (enable) {
1015 pf->flags |= flag; 1015 pf->flags |= flag;
1016 else 1016 } else {
1017 pf->flags &= ~flag; 1017 pf->flags &= ~flag;
1018 pf->auto_disable_flags |= flag;
1019 }
1018 dev_info(&pf->pdev->dev, "requesting a pf reset\n"); 1020 dev_info(&pf->pdev->dev, "requesting a pf reset\n");
1019 i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED)); 1021 i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
1020} 1022}
@@ -1467,19 +1469,19 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1467 pf->msg_enable); 1469 pf->msg_enable);
1468 } 1470 }
1469 } else if (strncmp(cmd_buf, "pfr", 3) == 0) { 1471 } else if (strncmp(cmd_buf, "pfr", 3) == 0) {
1470 dev_info(&pf->pdev->dev, "forcing PFR\n"); 1472 dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n");
1471 i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED)); 1473 i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED));
1472 1474
1473 } else if (strncmp(cmd_buf, "corer", 5) == 0) { 1475 } else if (strncmp(cmd_buf, "corer", 5) == 0) {
1474 dev_info(&pf->pdev->dev, "forcing CoreR\n"); 1476 dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n");
1475 i40e_do_reset_safe(pf, (1 << __I40E_CORE_RESET_REQUESTED)); 1477 i40e_do_reset_safe(pf, (1 << __I40E_CORE_RESET_REQUESTED));
1476 1478
1477 } else if (strncmp(cmd_buf, "globr", 5) == 0) { 1479 } else if (strncmp(cmd_buf, "globr", 5) == 0) {
1478 dev_info(&pf->pdev->dev, "forcing GlobR\n"); 1480 dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n");
1479 i40e_do_reset_safe(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED)); 1481 i40e_do_reset_safe(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED));
1480 1482
1481 } else if (strncmp(cmd_buf, "empr", 4) == 0) { 1483 } else if (strncmp(cmd_buf, "empr", 4) == 0) {
1482 dev_info(&pf->pdev->dev, "forcing EMPR\n"); 1484 dev_info(&pf->pdev->dev, "debugfs: forcing EMPR\n");
1483 i40e_do_reset_safe(pf, (1 << __I40E_EMP_RESET_REQUESTED)); 1485 i40e_do_reset_safe(pf, (1 << __I40E_EMP_RESET_REQUESTED));
1484 1486
1485 } else if (strncmp(cmd_buf, "read", 4) == 0) { 1487 } else if (strncmp(cmd_buf, "read", 4) == 0) {
@@ -1663,28 +1665,36 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1663 desc = NULL; 1665 desc = NULL;
1664 } else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) || 1666 } else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) ||
1665 (strncmp(cmd_buf, "rem fd_filter", 13) == 0)) { 1667 (strncmp(cmd_buf, "rem fd_filter", 13) == 0)) {
1666 struct i40e_fdir_data fd_data; 1668 struct i40e_fdir_filter fd_data;
1667 u16 packet_len, i, j = 0; 1669 u16 packet_len, i, j = 0;
1668 char *asc_packet; 1670 char *asc_packet;
1671 u8 *raw_packet;
1669 bool add = false; 1672 bool add = false;
1670 int ret; 1673 int ret;
1671 1674
1672 asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 1675 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
1676 goto command_write_done;
1677
1678 if (strncmp(cmd_buf, "add", 3) == 0)
1679 add = true;
1680
1681 if (add && (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
1682 goto command_write_done;
1683
1684 asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE,
1673 GFP_KERNEL); 1685 GFP_KERNEL);
1674 if (!asc_packet) 1686 if (!asc_packet)
1675 goto command_write_done; 1687 goto command_write_done;
1676 1688
1677 fd_data.raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 1689 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE,
1678 GFP_KERNEL); 1690 GFP_KERNEL);
1679 1691
1680 if (!fd_data.raw_packet) { 1692 if (!raw_packet) {
1681 kfree(asc_packet); 1693 kfree(asc_packet);
1682 asc_packet = NULL; 1694 asc_packet = NULL;
1683 goto command_write_done; 1695 goto command_write_done;
1684 } 1696 }
1685 1697
1686 if (strncmp(cmd_buf, "add", 3) == 0)
1687 add = true;
1688 cnt = sscanf(&cmd_buf[13], 1698 cnt = sscanf(&cmd_buf[13],
1689 "%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %511s", 1699 "%hx %2hhx %2hhx %hx %2hhx %2hhx %hx %x %hd %511s",
1690 &fd_data.q_index, 1700 &fd_data.q_index,
@@ -1698,36 +1708,36 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
1698 cnt); 1708 cnt);
1699 kfree(asc_packet); 1709 kfree(asc_packet);
1700 asc_packet = NULL; 1710 asc_packet = NULL;
1701 kfree(fd_data.raw_packet); 1711 kfree(raw_packet);
1702 goto command_write_done; 1712 goto command_write_done;
1703 } 1713 }
1704 1714
1705 /* fix packet length if user entered 0 */ 1715 /* fix packet length if user entered 0 */
1706 if (packet_len == 0) 1716 if (packet_len == 0)
1707 packet_len = I40E_FDIR_MAX_RAW_PACKET_LOOKUP; 1717 packet_len = I40E_FDIR_MAX_RAW_PACKET_SIZE;
1708 1718
1709 /* make sure to check the max as well */ 1719 /* make sure to check the max as well */
1710 packet_len = min_t(u16, 1720 packet_len = min_t(u16,
1711 packet_len, I40E_FDIR_MAX_RAW_PACKET_LOOKUP); 1721 packet_len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
1712 1722
1713 for (i = 0; i < packet_len; i++) { 1723 for (i = 0; i < packet_len; i++) {
1714 sscanf(&asc_packet[j], "%2hhx ", 1724 sscanf(&asc_packet[j], "%2hhx ",
1715 &fd_data.raw_packet[i]); 1725 &raw_packet[i]);
1716 j += 3; 1726 j += 3;
1717 } 1727 }
1718 dev_info(&pf->pdev->dev, "FD raw packet dump\n"); 1728 dev_info(&pf->pdev->dev, "FD raw packet dump\n");
1719 print_hex_dump(KERN_INFO, "FD raw packet: ", 1729 print_hex_dump(KERN_INFO, "FD raw packet: ",
1720 DUMP_PREFIX_OFFSET, 16, 1, 1730 DUMP_PREFIX_OFFSET, 16, 1,
1721 fd_data.raw_packet, packet_len, true); 1731 raw_packet, packet_len, true);
1722 ret = i40e_program_fdir_filter(&fd_data, pf, add); 1732 ret = i40e_program_fdir_filter(&fd_data, raw_packet, pf, add);
1723 if (!ret) { 1733 if (!ret) {
1724 dev_info(&pf->pdev->dev, "Filter command send Status : Success\n"); 1734 dev_info(&pf->pdev->dev, "Filter command send Status : Success\n");
1725 } else { 1735 } else {
1726 dev_info(&pf->pdev->dev, 1736 dev_info(&pf->pdev->dev,
1727 "Filter command send failed %d\n", ret); 1737 "Filter command send failed %d\n", ret);
1728 } 1738 }
1729 kfree(fd_data.raw_packet); 1739 kfree(raw_packet);
1730 fd_data.raw_packet = NULL; 1740 raw_packet = NULL;
1731 kfree(asc_packet); 1741 kfree(asc_packet);
1732 asc_packet = NULL; 1742 asc_packet = NULL;
1733 } else if (strncmp(cmd_buf, "fd-atr off", 10) == 0) { 1743 } else if (strncmp(cmd_buf, "fd-atr off", 10) == 0) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index b1d7d8c5cb9b..28da4125c8c9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -62,6 +62,9 @@ static const struct i40e_stats i40e_gstrings_net_stats[] = {
62 I40E_NETDEV_STAT(rx_crc_errors), 62 I40E_NETDEV_STAT(rx_crc_errors),
63}; 63};
64 64
65static int i40e_add_del_fdir_ethtool(struct i40e_vsi *vsi,
66 struct ethtool_rxnfc *cmd, bool add);
67
65/* These PF_STATs might look like duplicates of some NETDEV_STATs, 68/* These PF_STATs might look like duplicates of some NETDEV_STATs,
66 * but they are separate. This device supports Virtualization, and 69 * but they are separate. This device supports Virtualization, and
67 * as such might have several netdevs supporting VMDq and FCoE going 70 * as such might have several netdevs supporting VMDq and FCoE going
@@ -84,6 +87,7 @@ static struct i40e_stats i40e_gstrings_stats[] = {
84 I40E_PF_STAT("illegal_bytes", stats.illegal_bytes), 87 I40E_PF_STAT("illegal_bytes", stats.illegal_bytes),
85 I40E_PF_STAT("mac_local_faults", stats.mac_local_faults), 88 I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),
86 I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults), 89 I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
90 I40E_PF_STAT("tx_timeout", tx_timeout_count),
87 I40E_PF_STAT("rx_length_errors", stats.rx_length_errors), 91 I40E_PF_STAT("rx_length_errors", stats.rx_length_errors),
88 I40E_PF_STAT("link_xon_rx", stats.link_xon_rx), 92 I40E_PF_STAT("link_xon_rx", stats.link_xon_rx),
89 I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx), 93 I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
@@ -110,6 +114,11 @@ static struct i40e_stats i40e_gstrings_stats[] = {
110 I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests), 114 I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
111 I40E_PF_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), 115 I40E_PF_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
112 I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), 116 I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
117 /* LPI stats */
118 I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status),
119 I40E_PF_STAT("rx_lpi_status", stats.rx_lpi_status),
120 I40E_PF_STAT("tx_lpi_count", stats.tx_lpi_count),
121 I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count),
113}; 122};
114 123
115#define I40E_QUEUE_STATS_LEN(n) \ 124#define I40E_QUEUE_STATS_LEN(n) \
@@ -649,18 +658,18 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
649 658
650 /* process Tx ring statistics */ 659 /* process Tx ring statistics */
651 do { 660 do {
652 start = u64_stats_fetch_begin_bh(&tx_ring->syncp); 661 start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
653 data[i] = tx_ring->stats.packets; 662 data[i] = tx_ring->stats.packets;
654 data[i + 1] = tx_ring->stats.bytes; 663 data[i + 1] = tx_ring->stats.bytes;
655 } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start)); 664 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
656 665
657 /* Rx ring is the 2nd half of the queue pair */ 666 /* Rx ring is the 2nd half of the queue pair */
658 rx_ring = &tx_ring[1]; 667 rx_ring = &tx_ring[1];
659 do { 668 do {
660 start = u64_stats_fetch_begin_bh(&rx_ring->syncp); 669 start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
661 data[i + 2] = rx_ring->stats.packets; 670 data[i + 2] = rx_ring->stats.packets;
662 data[i + 3] = rx_ring->stats.bytes; 671 data[i + 3] = rx_ring->stats.bytes;
663 } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start)); 672 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
664 } 673 }
665 rcu_read_unlock(); 674 rcu_read_unlock();
666 if (vsi == pf->vsi[pf->lan_vsi]) { 675 if (vsi == pf->vsi[pf->lan_vsi]) {
@@ -1112,6 +1121,84 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd)
1112} 1121}
1113 1122
1114/** 1123/**
1124 * i40e_get_ethtool_fdir_all - Populates the rule count of a command
1125 * @pf: Pointer to the physical function struct
1126 * @cmd: The command to get or set Rx flow classification rules
1127 * @rule_locs: Array of used rule locations
1128 *
1129 * This function populates both the total and actual rule count of
1130 * the ethtool flow classification command
1131 *
1132 * Returns 0 on success or -EMSGSIZE if entry not found
1133 **/
1134static int i40e_get_ethtool_fdir_all(struct i40e_pf *pf,
1135 struct ethtool_rxnfc *cmd,
1136 u32 *rule_locs)
1137{
1138 struct i40e_fdir_filter *rule;
1139 struct hlist_node *node2;
1140 int cnt = 0;
1141
1142 /* report total rule count */
1143 cmd->data = pf->hw.fdir_shared_filter_count +
1144 pf->fdir_pf_filter_count;
1145
1146 hlist_for_each_entry_safe(rule, node2,
1147 &pf->fdir_filter_list, fdir_node) {
1148 if (cnt == cmd->rule_cnt)
1149 return -EMSGSIZE;
1150
1151 rule_locs[cnt] = rule->fd_id;
1152 cnt++;
1153 }
1154
1155 cmd->rule_cnt = cnt;
1156
1157 return 0;
1158}
1159
1160/**
1161 * i40e_get_ethtool_fdir_entry - Look up a filter based on Rx flow
1162 * @pf: Pointer to the physical function struct
1163 * @cmd: The command to get or set Rx flow classification rules
1164 *
1165 * This function looks up a filter based on the Rx flow classification
1166 * command and fills the flow spec info for it if found
1167 *
1168 * Returns 0 on success or -EINVAL if filter not found
1169 **/
1170static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
1171 struct ethtool_rxnfc *cmd)
1172{
1173 struct ethtool_rx_flow_spec *fsp =
1174 (struct ethtool_rx_flow_spec *)&cmd->fs;
1175 struct i40e_fdir_filter *rule = NULL;
1176 struct hlist_node *node2;
1177
1178 /* report total rule count */
1179 cmd->data = pf->hw.fdir_shared_filter_count +
1180 pf->fdir_pf_filter_count;
1181
1182 hlist_for_each_entry_safe(rule, node2,
1183 &pf->fdir_filter_list, fdir_node) {
1184 if (fsp->location <= rule->fd_id)
1185 break;
1186 }
1187
1188 if (!rule || fsp->location != rule->fd_id)
1189 return -EINVAL;
1190
1191 fsp->flow_type = rule->flow_type;
1192 fsp->h_u.tcp_ip4_spec.psrc = rule->src_port;
1193 fsp->h_u.tcp_ip4_spec.pdst = rule->dst_port;
1194 fsp->h_u.tcp_ip4_spec.ip4src = rule->src_ip[0];
1195 fsp->h_u.tcp_ip4_spec.ip4dst = rule->dst_ip[0];
1196 fsp->ring_cookie = rule->q_index;
1197
1198 return 0;
1199}
1200
1201/**
1115 * i40e_get_rxnfc - command to get RX flow classification rules 1202 * i40e_get_rxnfc - command to get RX flow classification rules
1116 * @netdev: network interface device structure 1203 * @netdev: network interface device structure
1117 * @cmd: ethtool rxnfc command 1204 * @cmd: ethtool rxnfc command
@@ -1135,15 +1222,15 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
1135 ret = i40e_get_rss_hash_opts(pf, cmd); 1222 ret = i40e_get_rss_hash_opts(pf, cmd);
1136 break; 1223 break;
1137 case ETHTOOL_GRXCLSRLCNT: 1224 case ETHTOOL_GRXCLSRLCNT:
1138 cmd->rule_cnt = 10; 1225 cmd->rule_cnt = pf->fdir_pf_active_filters;
1139 ret = 0; 1226 ret = 0;
1140 break; 1227 break;
1141 case ETHTOOL_GRXCLSRULE: 1228 case ETHTOOL_GRXCLSRULE:
1142 ret = 0; 1229 ret = i40e_get_ethtool_fdir_entry(pf, cmd);
1143 break; 1230 break;
1144 case ETHTOOL_GRXCLSRLALL: 1231 case ETHTOOL_GRXCLSRLALL:
1145 cmd->data = 500; 1232 ret = i40e_get_ethtool_fdir_all(pf, cmd, rule_locs);
1146 ret = 0; 1233 break;
1147 default: 1234 default:
1148 break; 1235 break;
1149 } 1236 }
@@ -1274,289 +1361,183 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
1274 return 0; 1361 return 0;
1275} 1362}
1276 1363
1277#define IP_HEADER_OFFSET 14
1278#define I40E_UDPIP_DUMMY_PACKET_LEN 42
1279/** 1364/**
1280 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 Flow Director filters for 1365 * i40e_match_fdir_input_set - Match a new filter against an existing one
1281 * a specific flow spec 1366 * @rule: The filter already added
1282 * @vsi: pointer to the targeted VSI 1367 * @input: The new filter to comapre against
1283 * @fd_data: the flow director data required from the FDir descriptor
1284 * @ethtool_rx_flow_spec: the flow spec
1285 * @add: true adds a filter, false removes it
1286 * 1368 *
1287 * Returns 0 if the filters were successfully added or removed 1369 * Returns true if the two input set match
1288 **/ 1370 **/
1289static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi, 1371static bool i40e_match_fdir_input_set(struct i40e_fdir_filter *rule,
1290 struct i40e_fdir_data *fd_data, 1372 struct i40e_fdir_filter *input)
1291 struct ethtool_rx_flow_spec *fsp, bool add)
1292{ 1373{
1293 struct i40e_pf *pf = vsi->back; 1374 if ((rule->dst_ip[0] != input->dst_ip[0]) ||
1294 struct udphdr *udp; 1375 (rule->src_ip[0] != input->src_ip[0]) ||
1295 struct iphdr *ip; 1376 (rule->dst_port != input->dst_port) ||
1296 bool err = false; 1377 (rule->src_port != input->src_port))
1297 int ret; 1378 return false;
1298 int i; 1379 return true;
1299 char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
1300 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11,
1301 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1302 0, 0, 0, 0, 0, 0, 0, 0};
1303
1304 memcpy(fd_data->raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
1305
1306 ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
1307 udp = (struct udphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
1308 + sizeof(struct iphdr));
1309
1310 ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
1311 ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
1312 udp->source = fsp->h_u.tcp_ip4_spec.psrc;
1313 udp->dest = fsp->h_u.tcp_ip4_spec.pdst;
1314
1315 for (i = I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP;
1316 i <= I40E_FILTER_PCTYPE_NONF_IPV4_UDP; i++) {
1317 fd_data->pctype = i;
1318 ret = i40e_program_fdir_filter(fd_data, pf, add);
1319
1320 if (ret) {
1321 dev_info(&pf->pdev->dev,
1322 "Filter command send failed for PCTYPE %d (ret = %d)\n",
1323 fd_data->pctype, ret);
1324 err = true;
1325 } else {
1326 dev_info(&pf->pdev->dev,
1327 "Filter OK for PCTYPE %d (ret = %d)\n",
1328 fd_data->pctype, ret);
1329 }
1330 }
1331
1332 return err ? -EOPNOTSUPP : 0;
1333} 1380}
1334 1381
1335#define I40E_TCPIP_DUMMY_PACKET_LEN 54
1336/** 1382/**
1337 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 Flow Director filters for 1383 * i40e_update_ethtool_fdir_entry - Updates the fdir filter entry
1338 * a specific flow spec 1384 * @vsi: Pointer to the targeted VSI
1339 * @vsi: pointer to the targeted VSI 1385 * @input: The filter to update or NULL to indicate deletion
1340 * @fd_data: the flow director data required from the FDir descriptor 1386 * @sw_idx: Software index to the filter
1341 * @ethtool_rx_flow_spec: the flow spec 1387 * @cmd: The command to get or set Rx flow classification rules
1342 * @add: true adds a filter, false removes it 1388 *
1389 * This function updates (or deletes) a Flow Director entry from
1390 * the hlist of the corresponding PF
1343 * 1391 *
1344 * Returns 0 if the filters were successfully added or removed 1392 * Returns 0 on success
1345 **/ 1393 **/
1346static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi, 1394static int i40e_update_ethtool_fdir_entry(struct i40e_vsi *vsi,
1347 struct i40e_fdir_data *fd_data, 1395 struct i40e_fdir_filter *input,
1348 struct ethtool_rx_flow_spec *fsp, bool add) 1396 u16 sw_idx,
1397 struct ethtool_rxnfc *cmd)
1349{ 1398{
1399 struct i40e_fdir_filter *rule, *parent;
1350 struct i40e_pf *pf = vsi->back; 1400 struct i40e_pf *pf = vsi->back;
1351 struct tcphdr *tcp; 1401 struct hlist_node *node2;
1352 struct iphdr *ip; 1402 int err = -EINVAL;
1353 bool err = false;
1354 int ret;
1355 /* Dummy packet */
1356 char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
1357 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6,
1358 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1359 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1360 0x80, 0x11, 0x0, 0x72, 0, 0, 0, 0};
1361
1362 memcpy(fd_data->raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
1363
1364 ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
1365 tcp = (struct tcphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET
1366 + sizeof(struct iphdr));
1367
1368 ip->daddr = fsp->h_u.tcp_ip4_spec.ip4dst;
1369 tcp->dest = fsp->h_u.tcp_ip4_spec.pdst;
1370 ip->saddr = fsp->h_u.tcp_ip4_spec.ip4src;
1371 tcp->source = fsp->h_u.tcp_ip4_spec.psrc;
1372
1373 if (add) {
1374 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
1375 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
1376 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
1377 }
1378 }
1379 1403
1380 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN; 1404 parent = NULL;
1381 ret = i40e_program_fdir_filter(fd_data, pf, add); 1405 rule = NULL;
1382 1406
1383 if (ret) { 1407 hlist_for_each_entry_safe(rule, node2,
1384 dev_info(&pf->pdev->dev, 1408 &pf->fdir_filter_list, fdir_node) {
1385 "Filter command send failed for PCTYPE %d (ret = %d)\n", 1409 /* hash found, or no matching entry */
1386 fd_data->pctype, ret); 1410 if (rule->fd_id >= sw_idx)
1387 err = true; 1411 break;
1388 } else { 1412 parent = rule;
1389 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
1390 fd_data->pctype, ret);
1391 } 1413 }
1392 1414
1393 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; 1415 /* if there is an old rule occupying our place remove it */
1394 1416 if (rule && (rule->fd_id == sw_idx)) {
1395 ret = i40e_program_fdir_filter(fd_data, pf, add); 1417 if (input && !i40e_match_fdir_input_set(rule, input))
1396 if (ret) { 1418 err = i40e_add_del_fdir(vsi, rule, false);
1397 dev_info(&pf->pdev->dev, 1419 else if (!input)
1398 "Filter command send failed for PCTYPE %d (ret = %d)\n", 1420 err = i40e_add_del_fdir(vsi, rule, false);
1399 fd_data->pctype, ret); 1421 hlist_del(&rule->fdir_node);
1400 err = true; 1422 kfree(rule);
1401 } else { 1423 pf->fdir_pf_active_filters--;
1402 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
1403 fd_data->pctype, ret);
1404 } 1424 }
1405 1425
1406 return err ? -EOPNOTSUPP : 0; 1426 /* If no input this was a delete, err should be 0 if a rule was
1407} 1427 * successfully found and removed from the list else -EINVAL
1428 */
1429 if (!input)
1430 return err;
1408 1431
1409/** 1432 /* initialize node and set software index */
1410 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for 1433 INIT_HLIST_NODE(&input->fdir_node);
1411 * a specific flow spec 1434
1412 * @vsi: pointer to the targeted VSI 1435 /* add filter to the list */
1413 * @fd_data: the flow director data required from the FDir descriptor 1436 if (parent)
1414 * @ethtool_rx_flow_spec: the flow spec 1437 hlist_add_after(&parent->fdir_node, &input->fdir_node);
1415 * @add: true adds a filter, false removes it 1438 else
1416 * 1439 hlist_add_head(&input->fdir_node,
1417 * Returns 0 if the filters were successfully added or removed 1440 &pf->fdir_filter_list);
1418 **/ 1441
1419static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi, 1442 /* update counts */
1420 struct i40e_fdir_data *fd_data, 1443 pf->fdir_pf_active_filters++;
1421 struct ethtool_rx_flow_spec *fsp, bool add) 1444
1422{ 1445 return 0;
1423 return -EOPNOTSUPP;
1424} 1446}
1425 1447
1426#define I40E_IP_DUMMY_PACKET_LEN 34
1427/** 1448/**
1428 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for 1449 * i40e_del_fdir_entry - Deletes a Flow Director filter entry
1429 * a specific flow spec 1450 * @vsi: Pointer to the targeted VSI
1430 * @vsi: pointer to the targeted VSI 1451 * @cmd: The command to get or set Rx flow classification rules
1431 * @fd_data: the flow director data required for the FDir descriptor
1432 * @fsp: the ethtool flow spec
1433 * @add: true adds a filter, false removes it
1434 * 1452 *
1435 * Returns 0 if the filters were successfully added or removed 1453 * The function removes a Flow Director filter entry from the
1436 **/ 1454 * hlist of the corresponding PF
1437static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi, 1455 *
1438 struct i40e_fdir_data *fd_data, 1456 * Returns 0 on success
1439 struct ethtool_rx_flow_spec *fsp, bool add) 1457 */
1458static int i40e_del_fdir_entry(struct i40e_vsi *vsi,
1459 struct ethtool_rxnfc *cmd)
1440{ 1460{
1461 struct ethtool_rx_flow_spec *fsp =
1462 (struct ethtool_rx_flow_spec *)&cmd->fs;
1441 struct i40e_pf *pf = vsi->back; 1463 struct i40e_pf *pf = vsi->back;
1442 struct iphdr *ip; 1464 int ret = 0;
1443 bool err = false;
1444 int ret;
1445 int i;
1446 char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
1447 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10,
1448 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1449
1450 memcpy(fd_data->raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
1451 ip = (struct iphdr *)(fd_data->raw_packet + IP_HEADER_OFFSET);
1452
1453 ip->saddr = fsp->h_u.usr_ip4_spec.ip4src;
1454 ip->daddr = fsp->h_u.usr_ip4_spec.ip4dst;
1455 ip->protocol = fsp->h_u.usr_ip4_spec.proto;
1456 1465
1457 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER; 1466 ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd);
1458 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
1459 fd_data->pctype = i;
1460 ret = i40e_program_fdir_filter(fd_data, pf, add);
1461 1467
1462 if (ret) { 1468 i40e_fdir_check_and_reenable(pf);
1463 dev_info(&pf->pdev->dev, 1469 return ret;
1464 "Filter command send failed for PCTYPE %d (ret = %d)\n",
1465 fd_data->pctype, ret);
1466 err = true;
1467 } else {
1468 dev_info(&pf->pdev->dev,
1469 "Filter OK for PCTYPE %d (ret = %d)\n",
1470 fd_data->pctype, ret);
1471 }
1472 }
1473
1474 return err ? -EOPNOTSUPP : 0;
1475} 1470}
1476 1471
1477/** 1472/**
1478 * i40e_add_del_fdir_ethtool - Add/Remove Flow Director filters for 1473 * i40e_add_del_fdir_ethtool - Add/Remove Flow Director filters
1479 * a specific flow spec based on their protocol
1480 * @vsi: pointer to the targeted VSI 1474 * @vsi: pointer to the targeted VSI
1481 * @cmd: command to get or set RX flow classification rules 1475 * @cmd: command to get or set RX flow classification rules
1482 * @add: true adds a filter, false removes it 1476 * @add: true adds a filter, false removes it
1483 * 1477 *
1484 * Returns 0 if the filters were successfully added or removed 1478 * Add/Remove Flow Director filters for a specific flow spec based on their
1479 * protocol. Returns 0 if the filters were successfully added or removed.
1485 **/ 1480 **/
1486static int i40e_add_del_fdir_ethtool(struct i40e_vsi *vsi, 1481static int i40e_add_del_fdir_ethtool(struct i40e_vsi *vsi,
1487 struct ethtool_rxnfc *cmd, bool add) 1482 struct ethtool_rxnfc *cmd, bool add)
1488{ 1483{
1489 struct i40e_fdir_data fd_data; 1484 struct ethtool_rx_flow_spec *fsp;
1490 int ret = -EINVAL; 1485 struct i40e_fdir_filter *input;
1491 struct i40e_pf *pf; 1486 struct i40e_pf *pf;
1492 struct ethtool_rx_flow_spec *fsp = 1487 int ret = -EINVAL;
1493 (struct ethtool_rx_flow_spec *)&cmd->fs;
1494 1488
1495 if (!vsi) 1489 if (!vsi)
1496 return -EINVAL; 1490 return -EINVAL;
1497 1491
1498 pf = vsi->back; 1492 pf = vsi->back;
1499 1493
1500 if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) && 1494 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
1501 (fsp->ring_cookie >= vsi->num_queue_pairs)) 1495 return -EOPNOTSUPP;
1502 return -EINVAL;
1503 1496
1504 /* Populate the Flow Director that we have at the moment 1497 if (add && (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
1505 * and allocate the raw packet buffer for the calling functions 1498 return -ENOSPC;
1506 */
1507 fd_data.raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
1508 GFP_KERNEL);
1509 1499
1510 if (!fd_data.raw_packet) { 1500 fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
1511 dev_info(&pf->pdev->dev, "Could not allocate memory\n"); 1501
1512 return -ENOMEM; 1502 if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort +
1503 pf->hw.func_caps.fd_filters_guaranteed)) {
1504 return -EINVAL;
1513 } 1505 }
1514 1506
1515 fd_data.q_index = fsp->ring_cookie; 1507 if ((fsp->ring_cookie >= vsi->num_queue_pairs) && add)
1516 fd_data.flex_off = 0; 1508 return -EINVAL;
1517 fd_data.pctype = 0;
1518 fd_data.dest_vsi = vsi->id;
1519 fd_data.dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
1520 fd_data.fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
1521 fd_data.cnt_index = 0;
1522 fd_data.fd_id = 0;
1523 1509
1524 switch (fsp->flow_type & ~FLOW_EXT) { 1510 input = kzalloc(sizeof(*input), GFP_KERNEL);
1525 case TCP_V4_FLOW: 1511
1526 ret = i40e_add_del_fdir_tcpv4(vsi, &fd_data, fsp, add); 1512 if (!input)
1527 break; 1513 return -ENOMEM;
1528 case UDP_V4_FLOW: 1514
1529 ret = i40e_add_del_fdir_udpv4(vsi, &fd_data, fsp, add); 1515 input->fd_id = fsp->location;
1530 break; 1516
1531 case SCTP_V4_FLOW: 1517 input->q_index = fsp->ring_cookie;
1532 ret = i40e_add_del_fdir_sctpv4(vsi, &fd_data, fsp, add); 1518 input->flex_off = 0;
1533 break; 1519 input->pctype = 0;
1534 case IPV4_FLOW: 1520 input->dest_vsi = vsi->id;
1535 ret = i40e_add_del_fdir_ipv4(vsi, &fd_data, fsp, add); 1521 input->dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX;
1536 break; 1522 input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
1537 case IP_USER_FLOW: 1523 input->cnt_index = 0;
1538 switch (fsp->h_u.usr_ip4_spec.proto) { 1524 input->flow_type = fsp->flow_type;
1539 case IPPROTO_TCP: 1525 input->ip4_proto = fsp->h_u.usr_ip4_spec.proto;
1540 ret = i40e_add_del_fdir_tcpv4(vsi, &fd_data, fsp, add); 1526 input->src_port = fsp->h_u.tcp_ip4_spec.psrc;
1541 break; 1527 input->dst_port = fsp->h_u.tcp_ip4_spec.pdst;
1542 case IPPROTO_UDP: 1528 input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
1543 ret = i40e_add_del_fdir_udpv4(vsi, &fd_data, fsp, add); 1529 input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
1544 break; 1530
1545 case IPPROTO_SCTP: 1531 ret = i40e_add_del_fdir(vsi, input, add);
1546 ret = i40e_add_del_fdir_sctpv4(vsi, &fd_data, fsp, add); 1532 if (ret) {
1547 break; 1533 kfree(input);
1548 default: 1534 return ret;
1549 ret = i40e_add_del_fdir_ipv4(vsi, &fd_data, fsp, add);
1550 break;
1551 }
1552 break;
1553 default:
1554 dev_info(&pf->pdev->dev, "Could not specify spec type\n");
1555 ret = -EINVAL;
1556 } 1535 }
1557 1536
1558 kfree(fd_data.raw_packet); 1537 if (!ret && add)
1559 fd_data.raw_packet = NULL; 1538 i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL);
1539 else
1540 kfree(input);
1560 1541
1561 return ret; 1542 return ret;
1562} 1543}
@@ -1583,7 +1564,7 @@ static int i40e_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
1583 ret = i40e_add_del_fdir_ethtool(vsi, cmd, true); 1564 ret = i40e_add_del_fdir_ethtool(vsi, cmd, true);
1584 break; 1565 break;
1585 case ETHTOOL_SRXCLSRLDEL: 1566 case ETHTOOL_SRXCLSRLDEL:
1586 ret = i40e_add_del_fdir_ethtool(vsi, cmd, false); 1567 ret = i40e_del_fdir_entry(vsi, cmd);
1587 break; 1568 break;
1588 default: 1569 default:
1589 break; 1570 break;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index b901371ca361..28df88ef3c8b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -38,7 +38,7 @@ static const char i40e_driver_string[] =
38 38
39#define DRV_VERSION_MAJOR 0 39#define DRV_VERSION_MAJOR 0
40#define DRV_VERSION_MINOR 3 40#define DRV_VERSION_MINOR 3
41#define DRV_VERSION_BUILD 30 41#define DRV_VERSION_BUILD 36
42#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ 42#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
43 __stringify(DRV_VERSION_MINOR) "." \ 43 __stringify(DRV_VERSION_MINOR) "." \
44 __stringify(DRV_VERSION_BUILD) DRV_KERN 44 __stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -305,6 +305,7 @@ static void i40e_tx_timeout(struct net_device *netdev)
305 break; 305 break;
306 default: 306 default:
307 netdev_err(netdev, "tx_timeout recovery unsuccessful\n"); 307 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
308 set_bit(__I40E_DOWN, &vsi->state);
308 i40e_down(vsi); 309 i40e_down(vsi);
309 break; 310 break;
310 } 311 }
@@ -375,20 +376,20 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
375 continue; 376 continue;
376 377
377 do { 378 do {
378 start = u64_stats_fetch_begin_bh(&tx_ring->syncp); 379 start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
379 packets = tx_ring->stats.packets; 380 packets = tx_ring->stats.packets;
380 bytes = tx_ring->stats.bytes; 381 bytes = tx_ring->stats.bytes;
381 } while (u64_stats_fetch_retry_bh(&tx_ring->syncp, start)); 382 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
382 383
383 stats->tx_packets += packets; 384 stats->tx_packets += packets;
384 stats->tx_bytes += bytes; 385 stats->tx_bytes += bytes;
385 rx_ring = &tx_ring[1]; 386 rx_ring = &tx_ring[1];
386 387
387 do { 388 do {
388 start = u64_stats_fetch_begin_bh(&rx_ring->syncp); 389 start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
389 packets = rx_ring->stats.packets; 390 packets = rx_ring->stats.packets;
390 bytes = rx_ring->stats.bytes; 391 bytes = rx_ring->stats.bytes;
391 } while (u64_stats_fetch_retry_bh(&rx_ring->syncp, start)); 392 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
392 393
393 stats->rx_packets += packets; 394 stats->rx_packets += packets;
394 stats->rx_bytes += bytes; 395 stats->rx_bytes += bytes;
@@ -739,6 +740,7 @@ void i40e_update_stats(struct i40e_vsi *vsi)
739 u32 rx_page, rx_buf; 740 u32 rx_page, rx_buf;
740 u64 rx_p, rx_b; 741 u64 rx_p, rx_b;
741 u64 tx_p, tx_b; 742 u64 tx_p, tx_b;
743 u32 val;
742 int i; 744 int i;
743 u16 q; 745 u16 q;
744 746
@@ -769,10 +771,10 @@ void i40e_update_stats(struct i40e_vsi *vsi)
769 p = ACCESS_ONCE(vsi->tx_rings[q]); 771 p = ACCESS_ONCE(vsi->tx_rings[q]);
770 772
771 do { 773 do {
772 start = u64_stats_fetch_begin_bh(&p->syncp); 774 start = u64_stats_fetch_begin_irq(&p->syncp);
773 packets = p->stats.packets; 775 packets = p->stats.packets;
774 bytes = p->stats.bytes; 776 bytes = p->stats.bytes;
775 } while (u64_stats_fetch_retry_bh(&p->syncp, start)); 777 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
776 tx_b += bytes; 778 tx_b += bytes;
777 tx_p += packets; 779 tx_p += packets;
778 tx_restart += p->tx_stats.restart_queue; 780 tx_restart += p->tx_stats.restart_queue;
@@ -781,10 +783,10 @@ void i40e_update_stats(struct i40e_vsi *vsi)
781 /* Rx queue is part of the same block as Tx queue */ 783 /* Rx queue is part of the same block as Tx queue */
782 p = &p[1]; 784 p = &p[1];
783 do { 785 do {
784 start = u64_stats_fetch_begin_bh(&p->syncp); 786 start = u64_stats_fetch_begin_irq(&p->syncp);
785 packets = p->stats.packets; 787 packets = p->stats.packets;
786 bytes = p->stats.bytes; 788 bytes = p->stats.bytes;
787 } while (u64_stats_fetch_retry_bh(&p->syncp, start)); 789 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
788 rx_b += bytes; 790 rx_b += bytes;
789 rx_p += packets; 791 rx_p += packets;
790 rx_buf += p->rx_stats.alloc_buff_failed; 792 rx_buf += p->rx_stats.alloc_buff_failed;
@@ -971,6 +973,20 @@ void i40e_update_stats(struct i40e_vsi *vsi)
971 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port), 973 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
972 pf->stat_offsets_loaded, 974 pf->stat_offsets_loaded,
973 &osd->rx_jabber, &nsd->rx_jabber); 975 &osd->rx_jabber, &nsd->rx_jabber);
976
977 val = rd32(hw, I40E_PRTPM_EEE_STAT);
978 nsd->tx_lpi_status =
979 (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
980 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
981 nsd->rx_lpi_status =
982 (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
983 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
984 i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
985 pf->stat_offsets_loaded,
986 &osd->tx_lpi_count, &nsd->tx_lpi_count);
987 i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
988 pf->stat_offsets_loaded,
989 &osd->rx_lpi_count, &nsd->rx_lpi_count);
974 } 990 }
975 991
976 pf->stat_offsets_loaded = true; 992 pf->stat_offsets_loaded = true;
@@ -1964,11 +1980,14 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev,
1964 1980
1965 netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid); 1981 netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
1966 1982
1967 /* If the network stack called us with vid = 0, we should 1983 /* If the network stack called us with vid = 0 then
1968 * indicate to i40e_vsi_add_vlan() that we want to receive 1984 * it is asking to receive priority tagged packets with
1969 * any traffic (i.e. with any vlan tag, or untagged) 1985 * vlan id 0. Our HW receives them by default when configured
1986 * to receive untagged packets so there is no need to add an
1987 * extra filter for vlan 0 tagged packets.
1970 */ 1988 */
1971 ret = i40e_vsi_add_vlan(vsi, vid ? vid : I40E_VLAN_ANY); 1989 if (vid)
1990 ret = i40e_vsi_add_vlan(vsi, vid);
1972 1991
1973 if (!ret && (vid < VLAN_N_VID)) 1992 if (!ret && (vid < VLAN_N_VID))
1974 set_bit(vid, vsi->active_vlans); 1993 set_bit(vid, vsi->active_vlans);
@@ -1981,7 +2000,7 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev,
1981 * @netdev: network interface to be adjusted 2000 * @netdev: network interface to be adjusted
1982 * @vid: vlan id to be removed 2001 * @vid: vlan id to be removed
1983 * 2002 *
1984 * net_device_ops implementation for adding vlan ids 2003 * net_device_ops implementation for removing vlan ids
1985 **/ 2004 **/
1986static int i40e_vlan_rx_kill_vid(struct net_device *netdev, 2005static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
1987 __always_unused __be16 proto, u16 vid) 2006 __always_unused __be16 proto, u16 vid)
@@ -2177,6 +2196,11 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
2177 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED | 2196 tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
2178 I40E_FLAG_FD_ATR_ENABLED)); 2197 I40E_FLAG_FD_ATR_ENABLED));
2179 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP); 2198 tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
2199 /* FDIR VSI tx ring can still use RS bit and writebacks */
2200 if (vsi->type != I40E_VSI_FDIR)
2201 tx_ctx.head_wb_ena = 1;
2202 tx_ctx.head_wb_addr = ring->dma +
2203 (ring->count * sizeof(struct i40e_tx_desc));
2180 2204
2181 /* As part of VSI creation/update, FW allocates certain 2205 /* As part of VSI creation/update, FW allocates certain
2182 * Tx arbitration queue sets for each TC enabled for 2206 * Tx arbitration queue sets for each TC enabled for
@@ -2420,6 +2444,28 @@ static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
2420} 2444}
2421 2445
2422/** 2446/**
2447 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
2448 * @vsi: Pointer to the targeted VSI
2449 *
2450 * This function replays the hlist on the hw where all the SB Flow Director
2451 * filters were saved.
2452 **/
2453static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
2454{
2455 struct i40e_fdir_filter *filter;
2456 struct i40e_pf *pf = vsi->back;
2457 struct hlist_node *node;
2458
2459 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
2460 return;
2461
2462 hlist_for_each_entry_safe(filter, node,
2463 &pf->fdir_filter_list, fdir_node) {
2464 i40e_add_del_fdir(vsi, filter, true);
2465 }
2466}
2467
2468/**
2423 * i40e_vsi_configure - Set up the VSI for action 2469 * i40e_vsi_configure - Set up the VSI for action
2424 * @vsi: the VSI being configured 2470 * @vsi: the VSI being configured
2425 **/ 2471 **/
@@ -2557,7 +2603,7 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
2557 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */ 2603 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
2558 wr32(hw, I40E_PFINT_LNKLST0, 0); 2604 wr32(hw, I40E_PFINT_LNKLST0, 0);
2559 2605
2560 /* Associate the queue pair to the vector and enable the q int */ 2606 /* Associate the queue pair to the vector and enable the queue int */
2561 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK | 2607 val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
2562 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | 2608 (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2563 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); 2609 (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
@@ -2866,8 +2912,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
2866 icr0_remaining); 2912 icr0_remaining);
2867 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) || 2913 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
2868 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) || 2914 (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
2869 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK) || 2915 (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
2870 (icr0_remaining & I40E_PFINT_ICR0_MAL_DETECT_MASK)) {
2871 dev_info(&pf->pdev->dev, "device will be reset\n"); 2916 dev_info(&pf->pdev->dev, "device will be reset\n");
2872 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); 2917 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
2873 i40e_service_event_schedule(pf); 2918 i40e_service_event_schedule(pf);
@@ -3107,13 +3152,13 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
3107 3152
3108 pf_q = vsi->base_queue; 3153 pf_q = vsi->base_queue;
3109 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 3154 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3110 j = 1000; 3155 for (j = 0; j < 50; j++) {
3111 do {
3112 usleep_range(1000, 2000);
3113 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q)); 3156 tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
3114 } while (j-- && ((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) 3157 if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
3115 ^ (tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)) & 1); 3158 ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
3116 3159 break;
3160 usleep_range(1000, 2000);
3161 }
3117 /* Skip if the queue is already in the requested state */ 3162 /* Skip if the queue is already in the requested state */
3118 if (enable && (tx_reg & I40E_QTX_ENA_QENA_STAT_MASK)) 3163 if (enable && (tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3119 continue; 3164 continue;
@@ -3123,8 +3168,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
3123 /* turn on/off the queue */ 3168 /* turn on/off the queue */
3124 if (enable) { 3169 if (enable) {
3125 wr32(hw, I40E_QTX_HEAD(pf_q), 0); 3170 wr32(hw, I40E_QTX_HEAD(pf_q), 0);
3126 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK | 3171 tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
3127 I40E_QTX_ENA_QENA_STAT_MASK;
3128 } else { 3172 } else {
3129 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; 3173 tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3130 } 3174 }
@@ -3171,12 +3215,13 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
3171 3215
3172 pf_q = vsi->base_queue; 3216 pf_q = vsi->base_queue;
3173 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { 3217 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3174 j = 1000; 3218 for (j = 0; j < 50; j++) {
3175 do {
3176 usleep_range(1000, 2000);
3177 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q)); 3219 rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
3178 } while (j-- && ((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) 3220 if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
3179 ^ (rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT)) & 1); 3221 ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
3222 break;
3223 usleep_range(1000, 2000);
3224 }
3180 3225
3181 if (enable) { 3226 if (enable) {
3182 /* is STAT set ? */ 3227 /* is STAT set ? */
@@ -3190,11 +3235,9 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
3190 3235
3191 /* turn on/off the queue */ 3236 /* turn on/off the queue */
3192 if (enable) 3237 if (enable)
3193 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK | 3238 rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
3194 I40E_QRX_ENA_QENA_STAT_MASK;
3195 else 3239 else
3196 rx_reg &= ~(I40E_QRX_ENA_QENA_REQ_MASK | 3240 rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3197 I40E_QRX_ENA_QENA_STAT_MASK);
3198 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg); 3241 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
3199 3242
3200 /* wait for the change to finish */ 3243 /* wait for the change to finish */
@@ -3732,8 +3775,8 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
3732 NULL); 3775 NULL);
3733 if (aq_ret) { 3776 if (aq_ret) {
3734 dev_info(&vsi->back->pdev->dev, 3777 dev_info(&vsi->back->pdev->dev,
3735 "%s: AQ command Config VSI BW allocation per TC failed = %d\n", 3778 "AQ command Config VSI BW allocation per TC failed = %d\n",
3736 __func__, vsi->back->hw.aq.asq_last_status); 3779 vsi->back->hw.aq.asq_last_status);
3737 return -EINVAL; 3780 return -EINVAL;
3738 } 3781 }
3739 3782
@@ -4062,6 +4105,10 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
4062 } else if (vsi->netdev) { 4105 } else if (vsi->netdev) {
4063 netdev_info(vsi->netdev, "NIC Link is Down\n"); 4106 netdev_info(vsi->netdev, "NIC Link is Down\n");
4064 } 4107 }
4108
4109 /* replay FDIR SB filters */
4110 if (vsi->type == I40E_VSI_FDIR)
4111 i40e_fdir_filter_restore(vsi);
4065 i40e_service_event_schedule(pf); 4112 i40e_service_event_schedule(pf);
4066 4113
4067 return 0; 4114 return 0;
@@ -4208,7 +4255,6 @@ static int i40e_open(struct net_device *netdev)
4208 struct i40e_netdev_priv *np = netdev_priv(netdev); 4255 struct i40e_netdev_priv *np = netdev_priv(netdev);
4209 struct i40e_vsi *vsi = np->vsi; 4256 struct i40e_vsi *vsi = np->vsi;
4210 struct i40e_pf *pf = vsi->back; 4257 struct i40e_pf *pf = vsi->back;
4211 char int_name[IFNAMSIZ];
4212 int err; 4258 int err;
4213 4259
4214 /* disallow open during test */ 4260 /* disallow open during test */
@@ -4217,6 +4263,31 @@ static int i40e_open(struct net_device *netdev)
4217 4263
4218 netif_carrier_off(netdev); 4264 netif_carrier_off(netdev);
4219 4265
4266 err = i40e_vsi_open(vsi);
4267 if (err)
4268 return err;
4269
4270#ifdef CONFIG_I40E_VXLAN
4271 vxlan_get_rx_port(netdev);
4272#endif
4273
4274 return 0;
4275}
4276
4277/**
4278 * i40e_vsi_open -
4279 * @vsi: the VSI to open
4280 *
4281 * Finish initialization of the VSI.
4282 *
4283 * Returns 0 on success, negative value on failure
4284 **/
4285int i40e_vsi_open(struct i40e_vsi *vsi)
4286{
4287 struct i40e_pf *pf = vsi->back;
4288 char int_name[IFNAMSIZ];
4289 int err;
4290
4220 /* allocate descriptors */ 4291 /* allocate descriptors */
4221 err = i40e_vsi_setup_tx_resources(vsi); 4292 err = i40e_vsi_setup_tx_resources(vsi);
4222 if (err) 4293 if (err)
@@ -4229,18 +4300,22 @@ static int i40e_open(struct net_device *netdev)
4229 if (err) 4300 if (err)
4230 goto err_setup_rx; 4301 goto err_setup_rx;
4231 4302
4303 if (!vsi->netdev) {
4304 err = EINVAL;
4305 goto err_setup_rx;
4306 }
4232 snprintf(int_name, sizeof(int_name) - 1, "%s-%s", 4307 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
4233 dev_driver_string(&pf->pdev->dev), netdev->name); 4308 dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
4234 err = i40e_vsi_request_irq(vsi, int_name); 4309 err = i40e_vsi_request_irq(vsi, int_name);
4235 if (err) 4310 if (err)
4236 goto err_setup_rx; 4311 goto err_setup_rx;
4237 4312
4238 /* Notify the stack of the actual queue counts. */ 4313 /* Notify the stack of the actual queue counts. */
4239 err = netif_set_real_num_tx_queues(netdev, vsi->num_queue_pairs); 4314 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_queue_pairs);
4240 if (err) 4315 if (err)
4241 goto err_set_queues; 4316 goto err_set_queues;
4242 4317
4243 err = netif_set_real_num_rx_queues(netdev, vsi->num_queue_pairs); 4318 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_queue_pairs);
4244 if (err) 4319 if (err)
4245 goto err_set_queues; 4320 goto err_set_queues;
4246 4321
@@ -4248,10 +4323,6 @@ static int i40e_open(struct net_device *netdev)
4248 if (err) 4323 if (err)
4249 goto err_up_complete; 4324 goto err_up_complete;
4250 4325
4251#ifdef CONFIG_I40E_VXLAN
4252 vxlan_get_rx_port(netdev);
4253#endif
4254
4255 return 0; 4326 return 0;
4256 4327
4257err_up_complete: 4328err_up_complete:
@@ -4269,6 +4340,26 @@ err_setup_tx:
4269} 4340}
4270 4341
4271/** 4342/**
4343 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
4344 * @pf: Pointer to pf
4345 *
4346 * This function destroys the hlist where all the Flow Director
4347 * filters were saved.
4348 **/
4349static void i40e_fdir_filter_exit(struct i40e_pf *pf)
4350{
4351 struct i40e_fdir_filter *filter;
4352 struct hlist_node *node2;
4353
4354 hlist_for_each_entry_safe(filter, node2,
4355 &pf->fdir_filter_list, fdir_node) {
4356 hlist_del(&filter->fdir_node);
4357 kfree(filter);
4358 }
4359 pf->fdir_pf_active_filters = 0;
4360}
4361
4362/**
4272 * i40e_close - Disables a network interface 4363 * i40e_close - Disables a network interface
4273 * @netdev: network interface device structure 4364 * @netdev: network interface device structure
4274 * 4365 *
@@ -4321,7 +4412,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
4321 * for the warning interrupt will deal with the shutdown 4412 * for the warning interrupt will deal with the shutdown
4322 * and recovery of the switch setup. 4413 * and recovery of the switch setup.
4323 */ 4414 */
4324 dev_info(&pf->pdev->dev, "GlobalR requested\n"); 4415 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
4325 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); 4416 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
4326 val |= I40E_GLGEN_RTRIG_GLOBR_MASK; 4417 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
4327 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); 4418 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
@@ -4332,7 +4423,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
4332 * 4423 *
4333 * Same as Global Reset, except does *not* include the MAC/PHY 4424 * Same as Global Reset, except does *not* include the MAC/PHY
4334 */ 4425 */
4335 dev_info(&pf->pdev->dev, "CoreR requested\n"); 4426 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
4336 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); 4427 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
4337 val |= I40E_GLGEN_RTRIG_CORER_MASK; 4428 val |= I40E_GLGEN_RTRIG_CORER_MASK;
4338 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); 4429 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
@@ -4366,7 +4457,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
4366 * the switch, since we need to do all the recovery as 4457 * the switch, since we need to do all the recovery as
4367 * for the Core Reset. 4458 * for the Core Reset.
4368 */ 4459 */
4369 dev_info(&pf->pdev->dev, "PFR requested\n"); 4460 dev_dbg(&pf->pdev->dev, "PFR requested\n");
4370 i40e_handle_reset_warning(pf); 4461 i40e_handle_reset_warning(pf);
4371 4462
4372 } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) { 4463 } else if (reset_flags & (1 << __I40E_REINIT_REQUESTED)) {
@@ -4415,18 +4506,18 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
4415 &old_cfg->etscfg.prioritytable, 4506 &old_cfg->etscfg.prioritytable,
4416 sizeof(new_cfg->etscfg.prioritytable))) { 4507 sizeof(new_cfg->etscfg.prioritytable))) {
4417 need_reconfig = true; 4508 need_reconfig = true;
4418 dev_info(&pf->pdev->dev, "ETS UP2TC changed.\n"); 4509 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
4419 } 4510 }
4420 4511
4421 if (memcmp(&new_cfg->etscfg.tcbwtable, 4512 if (memcmp(&new_cfg->etscfg.tcbwtable,
4422 &old_cfg->etscfg.tcbwtable, 4513 &old_cfg->etscfg.tcbwtable,
4423 sizeof(new_cfg->etscfg.tcbwtable))) 4514 sizeof(new_cfg->etscfg.tcbwtable)))
4424 dev_info(&pf->pdev->dev, "ETS TC BW Table changed.\n"); 4515 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
4425 4516
4426 if (memcmp(&new_cfg->etscfg.tsatable, 4517 if (memcmp(&new_cfg->etscfg.tsatable,
4427 &old_cfg->etscfg.tsatable, 4518 &old_cfg->etscfg.tsatable,
4428 sizeof(new_cfg->etscfg.tsatable))) 4519 sizeof(new_cfg->etscfg.tsatable)))
4429 dev_info(&pf->pdev->dev, "ETS TSA Table changed.\n"); 4520 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
4430 } 4521 }
4431 4522
4432 /* Check if PFC configuration has changed */ 4523 /* Check if PFC configuration has changed */
@@ -4434,7 +4525,7 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
4434 &old_cfg->pfc, 4525 &old_cfg->pfc,
4435 sizeof(new_cfg->pfc))) { 4526 sizeof(new_cfg->pfc))) {
4436 need_reconfig = true; 4527 need_reconfig = true;
4437 dev_info(&pf->pdev->dev, "PFC config change detected.\n"); 4528 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
4438 } 4529 }
4439 4530
4440 /* Check if APP Table has changed */ 4531 /* Check if APP Table has changed */
@@ -4442,7 +4533,7 @@ bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
4442 &old_cfg->app, 4533 &old_cfg->app,
4443 sizeof(new_cfg->app))) { 4534 sizeof(new_cfg->app))) {
4444 need_reconfig = true; 4535 need_reconfig = true;
4445 dev_info(&pf->pdev->dev, "APP Table change detected.\n"); 4536 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
4446 } 4537 }
4447 4538
4448 return need_reconfig; 4539 return need_reconfig;
@@ -4492,7 +4583,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
4492 4583
4493 /* No change detected in DCBX configs */ 4584 /* No change detected in DCBX configs */
4494 if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) { 4585 if (!memcmp(&tmp_dcbx_cfg, dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
4495 dev_info(&pf->pdev->dev, "No change detected in DCBX configuration.\n"); 4586 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
4496 goto exit; 4587 goto exit;
4497 } 4588 }
4498 4589
@@ -4550,8 +4641,8 @@ static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
4550 struct i40e_vf *vf; 4641 struct i40e_vf *vf;
4551 u16 vf_id; 4642 u16 vf_id;
4552 4643
4553 dev_info(&pf->pdev->dev, "%s: Rx Queue Number = %d QTX_CTL=0x%08x\n", 4644 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
4554 __func__, queue, qtx_ctl); 4645 queue, qtx_ctl);
4555 4646
4556 /* Queue belongs to VF, find the VF and issue VF reset */ 4647 /* Queue belongs to VF, find the VF and issue VF reset */
4557 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK) 4648 if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
@@ -4581,6 +4672,54 @@ static void i40e_service_event_complete(struct i40e_pf *pf)
4581} 4672}
4582 4673
4583/** 4674/**
4675 * i40e_get_current_fd_count - Get the count of FD filters programmed in the HW
4676 * @pf: board private structure
4677 **/
4678int i40e_get_current_fd_count(struct i40e_pf *pf)
4679{
4680 int val, fcnt_prog;
4681 val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
4682 fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
4683 ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
4684 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
4685 return fcnt_prog;
4686}
4687
4688/**
4689 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
4690 * @pf: board private structure
4691 **/
4692void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
4693{
4694 u32 fcnt_prog, fcnt_avail;
4695
4696 /* Check if, FD SB or ATR was auto disabled and if there is enough room
4697 * to re-enable
4698 */
4699 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
4700 (pf->flags & I40E_FLAG_FD_SB_ENABLED))
4701 return;
4702 fcnt_prog = i40e_get_current_fd_count(pf);
4703 fcnt_avail = pf->hw.fdir_shared_filter_count +
4704 pf->fdir_pf_filter_count;
4705 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) {
4706 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
4707 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
4708 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
4709 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
4710 }
4711 }
4712 /* Wait for some more space to be available to turn on ATR */
4713 if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
4714 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
4715 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
4716 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
4717 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
4718 }
4719 }
4720}
4721
4722/**
4584 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table 4723 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
4585 * @pf: board private structure 4724 * @pf: board private structure
4586 **/ 4725 **/
@@ -4589,11 +4728,14 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
4589 if (!(pf->flags & I40E_FLAG_FDIR_REQUIRES_REINIT)) 4728 if (!(pf->flags & I40E_FLAG_FDIR_REQUIRES_REINIT))
4590 return; 4729 return;
4591 4730
4592 pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT;
4593
4594 /* if interface is down do nothing */ 4731 /* if interface is down do nothing */
4595 if (test_bit(__I40E_DOWN, &pf->state)) 4732 if (test_bit(__I40E_DOWN, &pf->state))
4596 return; 4733 return;
4734 i40e_fdir_check_and_reenable(pf);
4735
4736 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
4737 (pf->flags & I40E_FLAG_FD_SB_ENABLED))
4738 pf->flags &= ~I40E_FLAG_FDIR_REQUIRES_REINIT;
4597} 4739}
4598 4740
4599/** 4741/**
@@ -4903,7 +5045,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
4903 event.msg_size); 5045 event.msg_size);
4904 break; 5046 break;
4905 case i40e_aqc_opc_lldp_update_mib: 5047 case i40e_aqc_opc_lldp_update_mib:
4906 dev_info(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); 5048 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
4907#ifdef CONFIG_I40E_DCB 5049#ifdef CONFIG_I40E_DCB
4908 rtnl_lock(); 5050 rtnl_lock();
4909 ret = i40e_handle_lldp_event(pf, &event); 5051 ret = i40e_handle_lldp_event(pf, &event);
@@ -4911,7 +5053,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
4911#endif /* CONFIG_I40E_DCB */ 5053#endif /* CONFIG_I40E_DCB */
4912 break; 5054 break;
4913 case i40e_aqc_opc_event_lan_overflow: 5055 case i40e_aqc_opc_event_lan_overflow:
4914 dev_info(&pf->pdev->dev, "ARQ LAN queue overflow event received\n"); 5056 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
4915 i40e_handle_lan_overflow_event(pf, &event); 5057 i40e_handle_lan_overflow_event(pf, &event);
4916 break; 5058 break;
4917 case i40e_aqc_opc_send_msg_to_peer: 5059 case i40e_aqc_opc_send_msg_to_peer:
@@ -5053,6 +5195,12 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
5053 /* increment MSI-X count because current FW skips one */ 5195 /* increment MSI-X count because current FW skips one */
5054 pf->hw.func_caps.num_msix_vectors++; 5196 pf->hw.func_caps.num_msix_vectors++;
5055 5197
5198 if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) ||
5199 (pf->hw.aq.fw_maj_ver < 2)) {
5200 pf->hw.func_caps.num_msix_vectors++;
5201 pf->hw.func_caps.num_msix_vectors_vf++;
5202 }
5203
5056 if (pf->hw.debug_mask & I40E_DEBUG_USER) 5204 if (pf->hw.debug_mask & I40E_DEBUG_USER)
5057 dev_info(&pf->pdev->dev, 5205 dev_info(&pf->pdev->dev,
5058 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n", 5206 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
@@ -5132,9 +5280,9 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
5132 err = i40e_up_complete(vsi); 5280 err = i40e_up_complete(vsi);
5133 if (err) 5281 if (err)
5134 goto err_up_complete; 5282 goto err_up_complete;
5283 clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
5135 } 5284 }
5136 5285
5137 clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
5138 return; 5286 return;
5139 5287
5140err_up_complete: 5288err_up_complete:
@@ -5157,6 +5305,7 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
5157{ 5305{
5158 int i; 5306 int i;
5159 5307
5308 i40e_fdir_filter_exit(pf);
5160 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) { 5309 for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
5161 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { 5310 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
5162 i40e_vsi_release(pf->vsi[i]); 5311 i40e_vsi_release(pf->vsi[i]);
@@ -5181,7 +5330,7 @@ static int i40e_prep_for_reset(struct i40e_pf *pf)
5181 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) 5330 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
5182 return 0; 5331 return 0;
5183 5332
5184 dev_info(&pf->pdev->dev, "Tearing down internal switch for reset\n"); 5333 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
5185 5334
5186 if (i40e_check_asq_alive(hw)) 5335 if (i40e_check_asq_alive(hw))
5187 i40e_vc_notify_reset(pf); 5336 i40e_vc_notify_reset(pf);
@@ -5228,7 +5377,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
5228 5377
5229 if (test_bit(__I40E_DOWN, &pf->state)) 5378 if (test_bit(__I40E_DOWN, &pf->state))
5230 goto end_core_reset; 5379 goto end_core_reset;
5231 dev_info(&pf->pdev->dev, "Rebuilding internal switch\n"); 5380 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
5232 5381
5233 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */ 5382 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
5234 ret = i40e_init_adminq(&pf->hw); 5383 ret = i40e_init_adminq(&pf->hw);
@@ -5278,7 +5427,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
5278 * try to recover minimal use by getting the basic PF VSI working. 5427 * try to recover minimal use by getting the basic PF VSI working.
5279 */ 5428 */
5280 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) { 5429 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
5281 dev_info(&pf->pdev->dev, "attempting to rebuild switch\n"); 5430 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
5282 /* find the one VEB connected to the MAC, and find orphans */ 5431 /* find the one VEB connected to the MAC, and find orphans */
5283 for (v = 0; v < I40E_MAX_VEB; v++) { 5432 for (v = 0; v < I40E_MAX_VEB; v++) {
5284 if (!pf->veb[v]) 5433 if (!pf->veb[v])
@@ -5331,6 +5480,11 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
5331 /* restart the VSIs that were rebuilt and running before the reset */ 5480 /* restart the VSIs that were rebuilt and running before the reset */
5332 i40e_pf_unquiesce_all_vsi(pf); 5481 i40e_pf_unquiesce_all_vsi(pf);
5333 5482
5483 if (pf->num_alloc_vfs) {
5484 for (v = 0; v < pf->num_alloc_vfs; v++)
5485 i40e_reset_vf(&pf->vf[v], true);
5486 }
5487
5334 /* tell the firmware that we're starting */ 5488 /* tell the firmware that we're starting */
5335 dv.major_version = DRV_VERSION_MAJOR; 5489 dv.major_version = DRV_VERSION_MAJOR;
5336 dv.minor_version = DRV_VERSION_MINOR; 5490 dv.minor_version = DRV_VERSION_MINOR;
@@ -5338,7 +5492,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
5338 dv.subbuild_version = 0; 5492 dv.subbuild_version = 0;
5339 i40e_aq_send_driver_version(&pf->hw, &dv, NULL); 5493 i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
5340 5494
5341 dev_info(&pf->pdev->dev, "PF reset done\n"); 5495 dev_info(&pf->pdev->dev, "reset complete\n");
5342 5496
5343end_core_reset: 5497end_core_reset:
5344 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); 5498 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
@@ -5387,7 +5541,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
5387 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) 5541 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK)
5388 >> I40E_GL_MDET_TX_QUEUE_SHIFT; 5542 >> I40E_GL_MDET_TX_QUEUE_SHIFT;
5389 dev_info(&pf->pdev->dev, 5543 dev_info(&pf->pdev->dev,
5390 "Malicious Driver Detection TX event 0x%02x on q %d of function 0x%02x\n", 5544 "Malicious Driver Detection event 0x%02x on TX queue %d of function 0x%02x\n",
5391 event, queue, func); 5545 event, queue, func);
5392 wr32(hw, I40E_GL_MDET_TX, 0xffffffff); 5546 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
5393 mdd_detected = true; 5547 mdd_detected = true;
@@ -5401,7 +5555,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
5401 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) 5555 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK)
5402 >> I40E_GL_MDET_RX_QUEUE_SHIFT; 5556 >> I40E_GL_MDET_RX_QUEUE_SHIFT;
5403 dev_info(&pf->pdev->dev, 5557 dev_info(&pf->pdev->dev,
5404 "Malicious Driver Detection RX event 0x%02x on q %d of function 0x%02x\n", 5558 "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
5405 event, queue, func); 5559 event, queue, func);
5406 wr32(hw, I40E_GL_MDET_RX, 0xffffffff); 5560 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
5407 mdd_detected = true; 5561 mdd_detected = true;
@@ -5850,37 +6004,16 @@ err_out:
5850 **/ 6004 **/
5851static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors) 6005static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
5852{ 6006{
5853 int err = 0; 6007 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
5854 6008 I40E_MIN_MSIX, vectors);
5855 pf->num_msix_entries = 0; 6009 if (vectors < 0) {
5856 while (vectors >= I40E_MIN_MSIX) {
5857 err = pci_enable_msix(pf->pdev, pf->msix_entries, vectors);
5858 if (err == 0) {
5859 /* good to go */
5860 pf->num_msix_entries = vectors;
5861 break;
5862 } else if (err < 0) {
5863 /* total failure */
5864 dev_info(&pf->pdev->dev,
5865 "MSI-X vector reservation failed: %d\n", err);
5866 vectors = 0;
5867 break;
5868 } else {
5869 /* err > 0 is the hint for retry */
5870 dev_info(&pf->pdev->dev,
5871 "MSI-X vectors wanted %d, retrying with %d\n",
5872 vectors, err);
5873 vectors = err;
5874 }
5875 }
5876
5877 if (vectors > 0 && vectors < I40E_MIN_MSIX) {
5878 dev_info(&pf->pdev->dev, 6010 dev_info(&pf->pdev->dev,
5879 "Couldn't get enough vectors, only %d available\n", 6011 "MSI-X vector reservation failed: %d\n", vectors);
5880 vectors);
5881 vectors = 0; 6012 vectors = 0;
5882 } 6013 }
5883 6014
6015 pf->num_msix_entries = vectors;
6016
5884 return vectors; 6017 return vectors;
5885} 6018}
5886 6019
@@ -5942,7 +6075,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
5942 6075
5943 } else if (vec == I40E_MIN_MSIX) { 6076 } else if (vec == I40E_MIN_MSIX) {
5944 /* Adjust for minimal MSIX use */ 6077 /* Adjust for minimal MSIX use */
5945 dev_info(&pf->pdev->dev, "Features disabled, not enough MSIX vectors\n"); 6078 dev_info(&pf->pdev->dev, "Features disabled, not enough MSI-X vectors\n");
5946 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED; 6079 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
5947 pf->num_vmdq_vsis = 0; 6080 pf->num_vmdq_vsis = 0;
5948 pf->num_vmdq_qps = 0; 6081 pf->num_vmdq_qps = 0;
@@ -6071,7 +6204,7 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
6071 6204
6072 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && 6205 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
6073 (pf->flags & I40E_FLAG_MSI_ENABLED)) { 6206 (pf->flags & I40E_FLAG_MSI_ENABLED)) {
6074 dev_info(&pf->pdev->dev, "MSIX not available, trying MSI\n"); 6207 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
6075 err = pci_enable_msi(pf->pdev); 6208 err = pci_enable_msi(pf->pdev);
6076 if (err) { 6209 if (err) {
6077 dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err); 6210 dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err);
@@ -6080,7 +6213,7 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
6080 } 6213 }
6081 6214
6082 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED))) 6215 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
6083 dev_info(&pf->pdev->dev, "MSIX and MSI not available, falling back to Legacy IRQ\n"); 6216 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
6084 6217
6085 /* track first vector for misc interrupts */ 6218 /* track first vector for misc interrupts */
6086 err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1); 6219 err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
@@ -6107,7 +6240,8 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
6107 i40e_intr, 0, pf->misc_int_name, pf); 6240 i40e_intr, 0, pf->misc_int_name, pf);
6108 if (err) { 6241 if (err) {
6109 dev_info(&pf->pdev->dev, 6242 dev_info(&pf->pdev->dev,
6110 "request_irq for msix_misc failed: %d\n", err); 6243 "request_irq for %s failed: %d\n",
6244 pf->misc_int_name, err);
6111 return -EFAULT; 6245 return -EFAULT;
6112 } 6246 }
6113 } 6247 }
@@ -6258,15 +6392,11 @@ static int i40e_sw_init(struct i40e_pf *pf)
6258 (pf->hw.func_caps.fd_filters_best_effort > 0)) { 6392 (pf->hw.func_caps.fd_filters_best_effort > 0)) {
6259 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; 6393 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
6260 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; 6394 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
6261 dev_info(&pf->pdev->dev,
6262 "Flow Director ATR mode Enabled\n");
6263 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) { 6395 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
6264 pf->flags |= I40E_FLAG_FD_SB_ENABLED; 6396 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
6265 dev_info(&pf->pdev->dev,
6266 "Flow Director Side Band mode Enabled\n");
6267 } else { 6397 } else {
6268 dev_info(&pf->pdev->dev, 6398 dev_info(&pf->pdev->dev,
6269 "Flow Director Side Band mode Disabled in MFP mode\n"); 6399 "Flow Director Sideband mode Disabled in MFP mode\n");
6270 } 6400 }
6271 pf->fdir_pf_filter_count = 6401 pf->fdir_pf_filter_count =
6272 pf->hw.func_caps.fd_filters_guaranteed; 6402 pf->hw.func_caps.fd_filters_guaranteed;
@@ -6287,9 +6417,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
6287 pf->num_req_vfs = min_t(int, 6417 pf->num_req_vfs = min_t(int,
6288 pf->hw.func_caps.num_vfs, 6418 pf->hw.func_caps.num_vfs,
6289 I40E_MAX_VF_COUNT); 6419 I40E_MAX_VF_COUNT);
6290 dev_info(&pf->pdev->dev,
6291 "Number of VFs being requested for PF[%d] = %d\n",
6292 pf->hw.pf_id, pf->num_req_vfs);
6293 } 6420 }
6294#endif /* CONFIG_PCI_IOV */ 6421#endif /* CONFIG_PCI_IOV */
6295 pf->eeprom_version = 0xDEAD; 6422 pf->eeprom_version = 0xDEAD;
@@ -6326,6 +6453,39 @@ sw_init_done:
6326} 6453}
6327 6454
6328/** 6455/**
6456 * i40e_set_ntuple - set the ntuple feature flag and take action
6457 * @pf: board private structure to initialize
6458 * @features: the feature set that the stack is suggesting
6459 *
6460 * returns a bool to indicate if reset needs to happen
6461 **/
6462bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
6463{
6464 bool need_reset = false;
6465
6466 /* Check if Flow Director n-tuple support was enabled or disabled. If
6467 * the state changed, we need to reset.
6468 */
6469 if (features & NETIF_F_NTUPLE) {
6470 /* Enable filters and mark for reset */
6471 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
6472 need_reset = true;
6473 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
6474 } else {
6475 /* turn off filters, mark for reset and clear SW filter list */
6476 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
6477 need_reset = true;
6478 i40e_fdir_filter_exit(pf);
6479 }
6480 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
6481 /* if ATR was disabled it can be re-enabled. */
6482 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
6483 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
6484 }
6485 return need_reset;
6486}
6487
6488/**
6329 * i40e_set_features - set the netdev feature flags 6489 * i40e_set_features - set the netdev feature flags
6330 * @netdev: ptr to the netdev being adjusted 6490 * @netdev: ptr to the netdev being adjusted
6331 * @features: the feature set that the stack is suggesting 6491 * @features: the feature set that the stack is suggesting
@@ -6335,12 +6495,19 @@ static int i40e_set_features(struct net_device *netdev,
6335{ 6495{
6336 struct i40e_netdev_priv *np = netdev_priv(netdev); 6496 struct i40e_netdev_priv *np = netdev_priv(netdev);
6337 struct i40e_vsi *vsi = np->vsi; 6497 struct i40e_vsi *vsi = np->vsi;
6498 struct i40e_pf *pf = vsi->back;
6499 bool need_reset;
6338 6500
6339 if (features & NETIF_F_HW_VLAN_CTAG_RX) 6501 if (features & NETIF_F_HW_VLAN_CTAG_RX)
6340 i40e_vlan_stripping_enable(vsi); 6502 i40e_vlan_stripping_enable(vsi);
6341 else 6503 else
6342 i40e_vlan_stripping_disable(vsi); 6504 i40e_vlan_stripping_disable(vsi);
6343 6505
6506 need_reset = i40e_set_ntuple(pf, features);
6507
6508 if (need_reset)
6509 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
6510
6344 return 0; 6511 return 0;
6345} 6512}
6346 6513
@@ -6464,6 +6631,7 @@ static const struct net_device_ops i40e_netdev_ops = {
6464 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan, 6631 .ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
6465 .ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw, 6632 .ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw,
6466 .ndo_get_vf_config = i40e_ndo_get_vf_config, 6633 .ndo_get_vf_config = i40e_ndo_get_vf_config,
6634 .ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
6467#ifdef CONFIG_I40E_VXLAN 6635#ifdef CONFIG_I40E_VXLAN
6468 .ndo_add_vxlan_port = i40e_add_vxlan_port, 6636 .ndo_add_vxlan_port = i40e_add_vxlan_port,
6469 .ndo_del_vxlan_port = i40e_del_vxlan_port, 6637 .ndo_del_vxlan_port = i40e_del_vxlan_port,
@@ -6495,10 +6663,9 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
6495 np = netdev_priv(netdev); 6663 np = netdev_priv(netdev);
6496 np->vsi = vsi; 6664 np->vsi = vsi;
6497 6665
6498 netdev->hw_enc_features = NETIF_F_IP_CSUM | 6666 netdev->hw_enc_features |= NETIF_F_IP_CSUM |
6499 NETIF_F_GSO_UDP_TUNNEL | 6667 NETIF_F_GSO_UDP_TUNNEL |
6500 NETIF_F_TSO | 6668 NETIF_F_TSO;
6501 NETIF_F_SG;
6502 6669
6503 netdev->features = NETIF_F_SG | 6670 netdev->features = NETIF_F_SG |
6504 NETIF_F_IP_CSUM | 6671 NETIF_F_IP_CSUM |
@@ -6512,6 +6679,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
6512 NETIF_F_TSO | 6679 NETIF_F_TSO |
6513 NETIF_F_TSO6 | 6680 NETIF_F_TSO6 |
6514 NETIF_F_RXCSUM | 6681 NETIF_F_RXCSUM |
6682 NETIF_F_NTUPLE |
6515 NETIF_F_RXHASH | 6683 NETIF_F_RXHASH |
6516 0; 6684 0;
6517 6685
@@ -6771,8 +6939,6 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
6771 if (vsi->netdev) { 6939 if (vsi->netdev) {
6772 /* results in a call to i40e_close() */ 6940 /* results in a call to i40e_close() */
6773 unregister_netdev(vsi->netdev); 6941 unregister_netdev(vsi->netdev);
6774 free_netdev(vsi->netdev);
6775 vsi->netdev = NULL;
6776 } 6942 }
6777 } else { 6943 } else {
6778 if (!test_and_set_bit(__I40E_DOWN, &vsi->state)) 6944 if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
@@ -6791,6 +6957,10 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
6791 6957
6792 i40e_vsi_delete(vsi); 6958 i40e_vsi_delete(vsi);
6793 i40e_vsi_free_q_vectors(vsi); 6959 i40e_vsi_free_q_vectors(vsi);
6960 if (vsi->netdev) {
6961 free_netdev(vsi->netdev);
6962 vsi->netdev = NULL;
6963 }
6794 i40e_vsi_clear_rings(vsi); 6964 i40e_vsi_clear_rings(vsi);
6795 i40e_vsi_clear(vsi); 6965 i40e_vsi_clear(vsi);
6796 6966
@@ -6845,8 +7015,7 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
6845 } 7015 }
6846 7016
6847 if (vsi->base_vector) { 7017 if (vsi->base_vector) {
6848 dev_info(&pf->pdev->dev, 7018 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
6849 "VSI %d has non-zero base vector %d\n",
6850 vsi->seid, vsi->base_vector); 7019 vsi->seid, vsi->base_vector);
6851 return -EEXIST; 7020 return -EEXIST;
6852 } 7021 }
@@ -6865,7 +7034,7 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
6865 vsi->num_q_vectors, vsi->idx); 7034 vsi->num_q_vectors, vsi->idx);
6866 if (vsi->base_vector < 0) { 7035 if (vsi->base_vector < 0) {
6867 dev_info(&pf->pdev->dev, 7036 dev_info(&pf->pdev->dev,
6868 "failed to get q tracking for VSI %d, err=%d\n", 7037 "failed to get queue tracking for VSI %d, err=%d\n",
6869 vsi->seid, vsi->base_vector); 7038 vsi->seid, vsi->base_vector);
6870 i40e_vsi_free_q_vectors(vsi); 7039 i40e_vsi_free_q_vectors(vsi);
6871 ret = -ENOENT; 7040 ret = -ENOENT;
@@ -7822,6 +7991,44 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
7822 return 0; 7991 return 0;
7823} 7992}
7824 7993
7994#define INFO_STRING_LEN 255
7995static void i40e_print_features(struct i40e_pf *pf)
7996{
7997 struct i40e_hw *hw = &pf->hw;
7998 char *buf, *string;
7999
8000 string = kzalloc(INFO_STRING_LEN, GFP_KERNEL);
8001 if (!string) {
8002 dev_err(&pf->pdev->dev, "Features string allocation failed\n");
8003 return;
8004 }
8005
8006 buf = string;
8007
8008 buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id);
8009#ifdef CONFIG_PCI_IOV
8010 buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs);
8011#endif
8012 buf += sprintf(buf, "VSIs: %d QP: %d ", pf->hw.func_caps.num_vsis,
8013 pf->vsi[pf->lan_vsi]->num_queue_pairs);
8014
8015 if (pf->flags & I40E_FLAG_RSS_ENABLED)
8016 buf += sprintf(buf, "RSS ");
8017 buf += sprintf(buf, "FDir ");
8018 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
8019 buf += sprintf(buf, "ATR ");
8020 if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
8021 buf += sprintf(buf, "NTUPLE ");
8022 if (pf->flags & I40E_FLAG_DCB_ENABLED)
8023 buf += sprintf(buf, "DCB ");
8024 if (pf->flags & I40E_FLAG_PTP)
8025 buf += sprintf(buf, "PTP ");
8026
8027 BUG_ON(buf > (string + INFO_STRING_LEN));
8028 dev_info(&pf->pdev->dev, "%s\n", string);
8029 kfree(string);
8030}
8031
7825/** 8032/**
7826 * i40e_probe - Device initialization routine 8033 * i40e_probe - Device initialization routine
7827 * @pdev: PCI device information struct 8034 * @pdev: PCI device information struct
@@ -7848,16 +8055,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7848 return err; 8055 return err;
7849 8056
7850 /* set up for high or low dma */ 8057 /* set up for high or low dma */
7851 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { 8058 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
7852 /* coherent mask for the same size will always succeed if 8059 if (err)
7853 * dma_set_mask does 8060 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
7854 */ 8061 if (err) {
7855 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 8062 dev_err(&pdev->dev,
7856 } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { 8063 "DMA configuration failed: 0x%x\n", err);
7857 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
7858 } else {
7859 dev_err(&pdev->dev, "DMA configuration failed: %d\n", err);
7860 err = -EIO;
7861 goto err_dma; 8064 goto err_dma;
7862 } 8065 }
7863 8066
@@ -7946,13 +8149,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7946 8149
7947 err = i40e_init_adminq(hw); 8150 err = i40e_init_adminq(hw);
7948 dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw)); 8151 dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
7949 if (((hw->nvm.version & I40E_NVM_VERSION_HI_MASK)
7950 >> I40E_NVM_VERSION_HI_SHIFT) != I40E_CURRENT_NVM_VERSION_HI) {
7951 dev_info(&pdev->dev,
7952 "warning: NVM version not supported, supported version: %02x.%02x\n",
7953 I40E_CURRENT_NVM_VERSION_HI,
7954 I40E_CURRENT_NVM_VERSION_LO);
7955 }
7956 if (err) { 8152 if (err) {
7957 dev_info(&pdev->dev, 8153 dev_info(&pdev->dev,
7958 "init_adminq failed: %d expecting API %02x.%02x\n", 8154 "init_adminq failed: %d expecting API %02x.%02x\n",
@@ -8070,6 +8266,16 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8070 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK; 8266 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
8071 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val); 8267 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
8072 i40e_flush(hw); 8268 i40e_flush(hw);
8269
8270 if (pci_num_vf(pdev)) {
8271 dev_info(&pdev->dev,
8272 "Active VFs found, allocating resources.\n");
8273 err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
8274 if (err)
8275 dev_info(&pdev->dev,
8276 "Error %d allocating resources for existing VFs\n",
8277 err);
8278 }
8073 } 8279 }
8074 8280
8075 pfs_found++; 8281 pfs_found++;
@@ -8092,7 +8298,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8092 8298
8093 i40e_set_pci_config_data(hw, link_status); 8299 i40e_set_pci_config_data(hw, link_status);
8094 8300
8095 dev_info(&pdev->dev, "PCI Express: %s %s\n", 8301 dev_info(&pdev->dev, "PCI-Express: %s %s\n",
8096 (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" : 8302 (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" :
8097 hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" : 8303 hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" :
8098 hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" : 8304 hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" :
@@ -8109,6 +8315,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8109 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n"); 8315 dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
8110 } 8316 }
8111 8317
8318 /* print a string summarizing features */
8319 i40e_print_features(pf);
8320
8112 return 0; 8321 return 0;
8113 8322
8114 /* Unwind what we've done if something failed in the setup */ 8323 /* Unwind what we've done if something failed in the setup */
@@ -8165,16 +8374,16 @@ static void i40e_remove(struct pci_dev *pdev)
8165 8374
8166 i40e_ptp_stop(pf); 8375 i40e_ptp_stop(pf);
8167 8376
8168 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
8169 i40e_free_vfs(pf);
8170 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
8171 }
8172
8173 /* no more scheduling of any task */ 8377 /* no more scheduling of any task */
8174 set_bit(__I40E_DOWN, &pf->state); 8378 set_bit(__I40E_DOWN, &pf->state);
8175 del_timer_sync(&pf->service_timer); 8379 del_timer_sync(&pf->service_timer);
8176 cancel_work_sync(&pf->service_task); 8380 cancel_work_sync(&pf->service_task);
8177 8381
8382 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
8383 i40e_free_vfs(pf);
8384 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
8385 }
8386
8178 i40e_fdir_teardown(pf); 8387 i40e_fdir_teardown(pf);
8179 8388
8180 /* If there is a switch structure or any orphans, remove them. 8389 /* If there is a switch structure or any orphans, remove them.
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 73f95b081927..262bdf11d221 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -27,14 +27,14 @@
27#include "i40e_prototype.h" 27#include "i40e_prototype.h"
28 28
29/** 29/**
30 * i40e_init_nvm_ops - Initialize NVM function pointers. 30 * i40e_init_nvm_ops - Initialize NVM function pointers
31 * @hw: pointer to the HW structure. 31 * @hw: pointer to the HW structure
32 * 32 *
33 * Setups the function pointers and the NVM info structure. Should be called 33 * Setup the function pointers and the NVM info structure. Should be called
34 * once per NVM initialization, e.g. inside the i40e_init_shared_code(). 34 * once per NVM initialization, e.g. inside the i40e_init_shared_code().
35 * Please notice that the NVM term is used here (& in all methods covered 35 * Please notice that the NVM term is used here (& in all methods covered
36 * in this file) as an equivalent of the FLASH part mapped into the SR. 36 * in this file) as an equivalent of the FLASH part mapped into the SR.
37 * We are accessing FLASH always thru the Shadow RAM. 37 * We are accessing FLASH always thru the Shadow RAM.
38 **/ 38 **/
39i40e_status i40e_init_nvm(struct i40e_hw *hw) 39i40e_status i40e_init_nvm(struct i40e_hw *hw)
40{ 40{
@@ -49,16 +49,16 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw)
49 gens = rd32(hw, I40E_GLNVM_GENS); 49 gens = rd32(hw, I40E_GLNVM_GENS);
50 sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >> 50 sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
51 I40E_GLNVM_GENS_SR_SIZE_SHIFT); 51 I40E_GLNVM_GENS_SR_SIZE_SHIFT);
52 /* Switching to words (sr_size contains power of 2KB). */ 52 /* Switching to words (sr_size contains power of 2KB) */
53 nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB; 53 nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB;
54 54
55 /* Check if we are in the normal or blank NVM programming mode. */ 55 /* Check if we are in the normal or blank NVM programming mode */
56 fla = rd32(hw, I40E_GLNVM_FLA); 56 fla = rd32(hw, I40E_GLNVM_FLA);
57 if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode. */ 57 if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
58 /* Max NVM timeout. */ 58 /* Max NVM timeout */
59 nvm->timeout = I40E_MAX_NVM_TIMEOUT; 59 nvm->timeout = I40E_MAX_NVM_TIMEOUT;
60 nvm->blank_nvm_mode = false; 60 nvm->blank_nvm_mode = false;
61 } else { /* Blank programming mode. */ 61 } else { /* Blank programming mode */
62 nvm->blank_nvm_mode = true; 62 nvm->blank_nvm_mode = true;
63 ret_code = I40E_ERR_NVM_BLANK_MODE; 63 ret_code = I40E_ERR_NVM_BLANK_MODE;
64 hw_dbg(hw, "NVM init error: unsupported blank mode.\n"); 64 hw_dbg(hw, "NVM init error: unsupported blank mode.\n");
@@ -68,12 +68,12 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw)
68} 68}
69 69
70/** 70/**
71 * i40e_acquire_nvm - Generic request for acquiring the NVM ownership. 71 * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
72 * @hw: pointer to the HW structure. 72 * @hw: pointer to the HW structure
73 * @access: NVM access type (read or write). 73 * @access: NVM access type (read or write)
74 * 74 *
75 * This function will request NVM ownership for reading 75 * This function will request NVM ownership for reading
76 * via the proper Admin Command. 76 * via the proper Admin Command.
77 **/ 77 **/
78i40e_status i40e_acquire_nvm(struct i40e_hw *hw, 78i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
79 enum i40e_aq_resource_access_type access) 79 enum i40e_aq_resource_access_type access)
@@ -87,20 +87,20 @@ i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
87 87
88 ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access, 88 ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
89 0, &time, NULL); 89 0, &time, NULL);
90 /* Reading the Global Device Timer. */ 90 /* Reading the Global Device Timer */
91 gtime = rd32(hw, I40E_GLVFGEN_TIMER); 91 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
92 92
93 /* Store the timeout. */ 93 /* Store the timeout */
94 hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time) + gtime; 94 hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time) + gtime;
95 95
96 if (ret_code) { 96 if (ret_code) {
97 /* Set the polling timeout. */ 97 /* Set the polling timeout */
98 if (time > I40E_MAX_NVM_TIMEOUT) 98 if (time > I40E_MAX_NVM_TIMEOUT)
99 timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) 99 timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT)
100 + gtime; 100 + gtime;
101 else 101 else
102 timeout = hw->nvm.hw_semaphore_timeout; 102 timeout = hw->nvm.hw_semaphore_timeout;
103 /* Poll until the current NVM owner timeouts. */ 103 /* Poll until the current NVM owner timeouts */
104 while (gtime < timeout) { 104 while (gtime < timeout) {
105 usleep_range(10000, 20000); 105 usleep_range(10000, 20000);
106 ret_code = i40e_aq_request_resource(hw, 106 ret_code = i40e_aq_request_resource(hw,
@@ -128,10 +128,10 @@ i40e_i40e_acquire_nvm_exit:
128} 128}
129 129
130/** 130/**
131 * i40e_release_nvm - Generic request for releasing the NVM ownership. 131 * i40e_release_nvm - Generic request for releasing the NVM ownership
132 * @hw: pointer to the HW structure. 132 * @hw: pointer to the HW structure
133 * 133 *
134 * This function will release NVM resource via the proper Admin Command. 134 * This function will release NVM resource via the proper Admin Command.
135 **/ 135 **/
136void i40e_release_nvm(struct i40e_hw *hw) 136void i40e_release_nvm(struct i40e_hw *hw)
137{ 137{
@@ -140,17 +140,17 @@ void i40e_release_nvm(struct i40e_hw *hw)
140} 140}
141 141
142/** 142/**
143 * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit. 143 * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
144 * @hw: pointer to the HW structure. 144 * @hw: pointer to the HW structure
145 * 145 *
146 * Polls the SRCTL Shadow RAM register done bit. 146 * Polls the SRCTL Shadow RAM register done bit.
147 **/ 147 **/
148static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw) 148static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
149{ 149{
150 i40e_status ret_code = I40E_ERR_TIMEOUT; 150 i40e_status ret_code = I40E_ERR_TIMEOUT;
151 u32 srctl, wait_cnt; 151 u32 srctl, wait_cnt;
152 152
153 /* Poll the I40E_GLNVM_SRCTL until the done bit is set. */ 153 /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
154 for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) { 154 for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
155 srctl = rd32(hw, I40E_GLNVM_SRCTL); 155 srctl = rd32(hw, I40E_GLNVM_SRCTL);
156 if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) { 156 if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
@@ -165,12 +165,12 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
165} 165}
166 166
167/** 167/**
168 * i40e_read_nvm_word - Reads Shadow RAM 168 * i40e_read_nvm_word - Reads Shadow RAM
169 * @hw: pointer to the HW structure. 169 * @hw: pointer to the HW structure
170 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). 170 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
171 * @data: word read from the Shadow RAM. 171 * @data: word read from the Shadow RAM
172 * 172 *
173 * Reads 16 bit word from the Shadow RAM using the GLNVM_SRCTL register. 173 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
174 **/ 174 **/
175i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, 175i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
176 u16 *data) 176 u16 *data)
@@ -184,15 +184,15 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
184 goto read_nvm_exit; 184 goto read_nvm_exit;
185 } 185 }
186 186
187 /* Poll the done bit first. */ 187 /* Poll the done bit first */
188 ret_code = i40e_poll_sr_srctl_done_bit(hw); 188 ret_code = i40e_poll_sr_srctl_done_bit(hw);
189 if (!ret_code) { 189 if (!ret_code) {
190 /* Write the address and start reading. */ 190 /* Write the address and start reading */
191 sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) | 191 sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
192 (1 << I40E_GLNVM_SRCTL_START_SHIFT); 192 (1 << I40E_GLNVM_SRCTL_START_SHIFT);
193 wr32(hw, I40E_GLNVM_SRCTL, sr_reg); 193 wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
194 194
195 /* Poll I40E_GLNVM_SRCTL until the done bit is set. */ 195 /* Poll I40E_GLNVM_SRCTL until the done bit is set */
196 ret_code = i40e_poll_sr_srctl_done_bit(hw); 196 ret_code = i40e_poll_sr_srctl_done_bit(hw);
197 if (!ret_code) { 197 if (!ret_code) {
198 sr_reg = rd32(hw, I40E_GLNVM_SRDATA); 198 sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
@@ -210,16 +210,15 @@ read_nvm_exit:
210} 210}
211 211
212/** 212/**
213 * i40e_read_nvm_buffer - Reads Shadow RAM buffer. 213 * i40e_read_nvm_buffer - Reads Shadow RAM buffer
214 * @hw: pointer to the HW structure. 214 * @hw: pointer to the HW structure
215 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF). 215 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
216 * @words: number of words to read (in) & 216 * @words: (in) number of words to read; (out) number of words actually read
217 * number of words read before the NVM ownership timeout (out). 217 * @data: words read from the Shadow RAM
218 * @data: words read from the Shadow RAM.
219 * 218 *
220 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd() 219 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
221 * method. The buffer read is preceded by the NVM ownership take 220 * method. The buffer read is preceded by the NVM ownership take
222 * and followed by the release. 221 * and followed by the release.
223 **/ 222 **/
224i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset, 223i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
225 u16 *words, u16 *data) 224 u16 *words, u16 *data)
@@ -227,7 +226,7 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
227 i40e_status ret_code = 0; 226 i40e_status ret_code = 0;
228 u16 index, word; 227 u16 index, word;
229 228
230 /* Loop thru the selected region. */ 229 /* Loop thru the selected region */
231 for (word = 0; word < *words; word++) { 230 for (word = 0; word < *words; word++) {
232 index = offset + word; 231 index = offset + word;
233 ret_code = i40e_read_nvm_word(hw, index, &data[word]); 232 ret_code = i40e_read_nvm_word(hw, index, &data[word]);
@@ -235,21 +234,21 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
235 break; 234 break;
236 } 235 }
237 236
238 /* Update the number of words read from the Shadow RAM. */ 237 /* Update the number of words read from the Shadow RAM */
239 *words = word; 238 *words = word;
240 239
241 return ret_code; 240 return ret_code;
242} 241}
243 242
244/** 243/**
245 * i40e_calc_nvm_checksum - Calculates and returns the checksum 244 * i40e_calc_nvm_checksum - Calculates and returns the checksum
246 * @hw: pointer to hardware structure 245 * @hw: pointer to hardware structure
247 * @checksum: pointer to the checksum 246 * @checksum: pointer to the checksum
248 * 247 *
249 * This function calculate SW Checksum that covers the whole 64kB shadow RAM 248 * This function calculates SW Checksum that covers the whole 64kB shadow RAM
250 * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD 249 * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
251 * is customer specific and unknown. Therefore, this function skips all maximum 250 * is customer specific and unknown. Therefore, this function skips all maximum
252 * possible size of VPD (1kB). 251 * possible size of VPD (1kB).
253 **/ 252 **/
254static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw, 253static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
255 u16 *checksum) 254 u16 *checksum)
@@ -311,12 +310,12 @@ i40e_calc_nvm_checksum_exit:
311} 310}
312 311
313/** 312/**
314 * i40e_validate_nvm_checksum - Validate EEPROM checksum 313 * i40e_validate_nvm_checksum - Validate EEPROM checksum
315 * @hw: pointer to hardware structure 314 * @hw: pointer to hardware structure
316 * @checksum: calculated checksum 315 * @checksum: calculated checksum
317 * 316 *
318 * Performs checksum calculation and validates the NVM SW checksum. If the 317 * Performs checksum calculation and validates the NVM SW checksum. If the
319 * caller does not need checksum, the value can be NULL. 318 * caller does not need checksum, the value can be NULL.
320 **/ 319 **/
321i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw, 320i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
322 u16 *checksum) 321 u16 *checksum)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index ed91f93ede2b..9cd57e617959 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -231,6 +231,13 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
231 u16 *checksum); 231 u16 *checksum);
232void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status); 232void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
233 233
234extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
235
236static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
237{
238 return i40e_ptype_lookup[ptype];
239}
240
234/* prototype for functions used for SW locks */ 241/* prototype for functions used for SW locks */
235 242
236/* i40e_common for VF drivers*/ 243/* i40e_common for VF drivers*/
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index d4bb482b1a7f..851f6537a96a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -25,6 +25,7 @@
25 ******************************************************************************/ 25 ******************************************************************************/
26 26
27#include "i40e.h" 27#include "i40e.h"
28#include "i40e_prototype.h"
28 29
29static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, 30static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
30 u32 td_tag) 31 u32 td_tag)
@@ -39,11 +40,12 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
39#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS) 40#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
40/** 41/**
41 * i40e_program_fdir_filter - Program a Flow Director filter 42 * i40e_program_fdir_filter - Program a Flow Director filter
42 * @fdir_input: Packet data that will be filter parameters 43 * @fdir_data: Packet data that will be filter parameters
44 * @raw_packet: the pre-allocated packet buffer for FDir
43 * @pf: The pf pointer 45 * @pf: The pf pointer
44 * @add: True for add/update, False for remove 46 * @add: True for add/update, False for remove
45 **/ 47 **/
46int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data, 48int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
47 struct i40e_pf *pf, bool add) 49 struct i40e_pf *pf, bool add)
48{ 50{
49 struct i40e_filter_program_desc *fdir_desc; 51 struct i40e_filter_program_desc *fdir_desc;
@@ -68,8 +70,8 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
68 tx_ring = vsi->tx_rings[0]; 70 tx_ring = vsi->tx_rings[0];
69 dev = tx_ring->dev; 71 dev = tx_ring->dev;
70 72
71 dma = dma_map_single(dev, fdir_data->raw_packet, 73 dma = dma_map_single(dev, raw_packet,
72 I40E_FDIR_MAX_RAW_PACKET_LOOKUP, DMA_TO_DEVICE); 74 I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
73 if (dma_mapping_error(dev, dma)) 75 if (dma_mapping_error(dev, dma))
74 goto dma_fail; 76 goto dma_fail;
75 77
@@ -132,14 +134,14 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
132 tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0; 134 tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
133 135
134 /* record length, and DMA address */ 136 /* record length, and DMA address */
135 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_LOOKUP); 137 dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
136 dma_unmap_addr_set(tx_buf, dma, dma); 138 dma_unmap_addr_set(tx_buf, dma, dma);
137 139
138 tx_desc->buffer_addr = cpu_to_le64(dma); 140 tx_desc->buffer_addr = cpu_to_le64(dma);
139 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY; 141 td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
140 142
141 tx_desc->cmd_type_offset_bsz = 143 tx_desc->cmd_type_offset_bsz =
142 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0); 144 build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
143 145
144 /* set the timestamp */ 146 /* set the timestamp */
145 tx_buf->time_stamp = jiffies; 147 tx_buf->time_stamp = jiffies;
@@ -161,26 +163,328 @@ dma_fail:
161 return -1; 163 return -1;
162} 164}
163 165
166#define IP_HEADER_OFFSET 14
167#define I40E_UDPIP_DUMMY_PACKET_LEN 42
168/**
169 * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
170 * @vsi: pointer to the targeted VSI
171 * @fd_data: the flow director data required for the FDir descriptor
172 * @raw_packet: the pre-allocated packet buffer for FDir
173 * @add: true adds a filter, false removes it
174 *
175 * Returns 0 if the filters were successfully added or removed
176 **/
177static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
178 struct i40e_fdir_filter *fd_data,
179 u8 *raw_packet, bool add)
180{
181 struct i40e_pf *pf = vsi->back;
182 struct udphdr *udp;
183 struct iphdr *ip;
184 bool err = false;
185 int ret;
186 int i;
187 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
188 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
189 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
190
191 memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
192
193 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
194 udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
195 + sizeof(struct iphdr));
196
197 ip->daddr = fd_data->dst_ip[0];
198 udp->dest = fd_data->dst_port;
199 ip->saddr = fd_data->src_ip[0];
200 udp->source = fd_data->src_port;
201
202 for (i = I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP;
203 i <= I40E_FILTER_PCTYPE_NONF_IPV4_UDP; i++) {
204 fd_data->pctype = i;
205 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
206
207 if (ret) {
208 dev_info(&pf->pdev->dev,
209 "Filter command send failed for PCTYPE %d (ret = %d)\n",
210 fd_data->pctype, ret);
211 err = true;
212 } else {
213 dev_info(&pf->pdev->dev,
214 "Filter OK for PCTYPE %d (ret = %d)\n",
215 fd_data->pctype, ret);
216 }
217 }
218
219 return err ? -EOPNOTSUPP : 0;
220}
221
222#define I40E_TCPIP_DUMMY_PACKET_LEN 54
223/**
224 * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
225 * @vsi: pointer to the targeted VSI
226 * @fd_data: the flow director data required for the FDir descriptor
227 * @raw_packet: the pre-allocated packet buffer for FDir
228 * @add: true adds a filter, false removes it
229 *
230 * Returns 0 if the filters were successfully added or removed
231 **/
232static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
233 struct i40e_fdir_filter *fd_data,
234 u8 *raw_packet, bool add)
235{
236 struct i40e_pf *pf = vsi->back;
237 struct tcphdr *tcp;
238 struct iphdr *ip;
239 bool err = false;
240 int ret;
241 /* Dummy packet */
242 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
243 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
244 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
245 0x0, 0x72, 0, 0, 0, 0};
246
247 memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
248
249 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
250 tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
251 + sizeof(struct iphdr));
252
253 ip->daddr = fd_data->dst_ip[0];
254 tcp->dest = fd_data->dst_port;
255 ip->saddr = fd_data->src_ip[0];
256 tcp->source = fd_data->src_port;
257
258 if (add) {
259 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
260 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
261 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
262 }
263 }
264
265 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN;
266 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
267
268 if (ret) {
269 dev_info(&pf->pdev->dev,
270 "Filter command send failed for PCTYPE %d (ret = %d)\n",
271 fd_data->pctype, ret);
272 err = true;
273 } else {
274 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
275 fd_data->pctype, ret);
276 }
277
278 fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
279
280 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
281 if (ret) {
282 dev_info(&pf->pdev->dev,
283 "Filter command send failed for PCTYPE %d (ret = %d)\n",
284 fd_data->pctype, ret);
285 err = true;
286 } else {
287 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
288 fd_data->pctype, ret);
289 }
290
291 return err ? -EOPNOTSUPP : 0;
292}
293
294/**
295 * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
296 * a specific flow spec
297 * @vsi: pointer to the targeted VSI
298 * @fd_data: the flow director data required for the FDir descriptor
299 * @raw_packet: the pre-allocated packet buffer for FDir
300 * @add: true adds a filter, false removes it
301 *
302 * Returns 0 if the filters were successfully added or removed
303 **/
304static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
305 struct i40e_fdir_filter *fd_data,
306 u8 *raw_packet, bool add)
307{
308 return -EOPNOTSUPP;
309}
310
311#define I40E_IP_DUMMY_PACKET_LEN 34
312/**
313 * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
314 * a specific flow spec
315 * @vsi: pointer to the targeted VSI
316 * @fd_data: the flow director data required for the FDir descriptor
317 * @raw_packet: the pre-allocated packet buffer for FDir
318 * @add: true adds a filter, false removes it
319 *
320 * Returns 0 if the filters were successfully added or removed
321 **/
322static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
323 struct i40e_fdir_filter *fd_data,
324 u8 *raw_packet, bool add)
325{
326 struct i40e_pf *pf = vsi->back;
327 struct iphdr *ip;
328 bool err = false;
329 int ret;
330 int i;
331 static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
332 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
333 0, 0, 0, 0};
334
335 memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
336 ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
337
338 ip->saddr = fd_data->src_ip[0];
339 ip->daddr = fd_data->dst_ip[0];
340 ip->protocol = 0;
341
342 for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
343 i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
344 fd_data->pctype = i;
345 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
346
347 if (ret) {
348 dev_info(&pf->pdev->dev,
349 "Filter command send failed for PCTYPE %d (ret = %d)\n",
350 fd_data->pctype, ret);
351 err = true;
352 } else {
353 dev_info(&pf->pdev->dev,
354 "Filter OK for PCTYPE %d (ret = %d)\n",
355 fd_data->pctype, ret);
356 }
357 }
358
359 return err ? -EOPNOTSUPP : 0;
360}
361
362/**
363 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
364 * @vsi: pointer to the targeted VSI
365 * @cmd: command to get or set RX flow classification rules
366 * @add: true adds a filter, false removes it
367 *
368 **/
369int i40e_add_del_fdir(struct i40e_vsi *vsi,
370 struct i40e_fdir_filter *input, bool add)
371{
372 struct i40e_pf *pf = vsi->back;
373 u8 *raw_packet;
374 int ret;
375
376 /* Populate the Flow Director that we have at the moment
377 * and allocate the raw packet buffer for the calling functions
378 */
379 raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
380 if (!raw_packet)
381 return -ENOMEM;
382
383 switch (input->flow_type & ~FLOW_EXT) {
384 case TCP_V4_FLOW:
385 ret = i40e_add_del_fdir_tcpv4(vsi, input, raw_packet,
386 add);
387 break;
388 case UDP_V4_FLOW:
389 ret = i40e_add_del_fdir_udpv4(vsi, input, raw_packet,
390 add);
391 break;
392 case SCTP_V4_FLOW:
393 ret = i40e_add_del_fdir_sctpv4(vsi, input, raw_packet,
394 add);
395 break;
396 case IPV4_FLOW:
397 ret = i40e_add_del_fdir_ipv4(vsi, input, raw_packet,
398 add);
399 break;
400 case IP_USER_FLOW:
401 switch (input->ip4_proto) {
402 case IPPROTO_TCP:
403 ret = i40e_add_del_fdir_tcpv4(vsi, input,
404 raw_packet, add);
405 break;
406 case IPPROTO_UDP:
407 ret = i40e_add_del_fdir_udpv4(vsi, input,
408 raw_packet, add);
409 break;
410 case IPPROTO_SCTP:
411 ret = i40e_add_del_fdir_sctpv4(vsi, input,
412 raw_packet, add);
413 break;
414 default:
415 ret = i40e_add_del_fdir_ipv4(vsi, input,
416 raw_packet, add);
417 break;
418 }
419 break;
420 default:
421 dev_info(&pf->pdev->dev, "Could not specify spec type %d",
422 input->flow_type);
423 ret = -EINVAL;
424 }
425
426 kfree(raw_packet);
427 return ret;
428}
429
164/** 430/**
165 * i40e_fd_handle_status - check the Programming Status for FD 431 * i40e_fd_handle_status - check the Programming Status for FD
166 * @rx_ring: the Rx ring for this descriptor 432 * @rx_ring: the Rx ring for this descriptor
167 * @qw: the descriptor data 433 * @rx_desc: the Rx descriptor for programming Status, not a packet descriptor.
168 * @prog_id: the id originally used for programming 434 * @prog_id: the id originally used for programming
169 * 435 *
170 * This is used to verify if the FD programming or invalidation 436 * This is used to verify if the FD programming or invalidation
171 * requested by SW to the HW is successful or not and take actions accordingly. 437 * requested by SW to the HW is successful or not and take actions accordingly.
172 **/ 438 **/
173static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u32 qw, u8 prog_id) 439static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
440 union i40e_rx_desc *rx_desc, u8 prog_id)
174{ 441{
175 struct pci_dev *pdev = rx_ring->vsi->back->pdev; 442 struct i40e_pf *pf = rx_ring->vsi->back;
443 struct pci_dev *pdev = pf->pdev;
444 u32 fcnt_prog, fcnt_avail;
176 u32 error; 445 u32 error;
446 u64 qw;
177 447
448 qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
178 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >> 449 error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
179 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT; 450 I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
180 451
181 /* for now just print the Status */ 452 if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
182 dev_info(&pdev->dev, "FD programming id %02x, Status %08x\n", 453 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
183 prog_id, error); 454 rx_desc->wb.qword0.hi_dword.fd_id);
455
456 /* filter programming failed most likely due to table full */
457 fcnt_prog = i40e_get_current_fd_count(pf);
458 fcnt_avail = pf->hw.fdir_shared_filter_count +
459 pf->fdir_pf_filter_count;
460
461 /* If ATR is running fcnt_prog can quickly change,
462 * if we are very close to full, it makes sense to disable
463 * FD ATR/SB and then re-enable it when there is room.
464 */
465 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
466 /* Turn off ATR first */
467 if (pf->flags | I40E_FLAG_FD_ATR_ENABLED) {
468 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
469 dev_warn(&pdev->dev, "FD filter space full, ATR for further flows will be turned off\n");
470 pf->auto_disable_flags |=
471 I40E_FLAG_FD_ATR_ENABLED;
472 pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
473 } else if (pf->flags | I40E_FLAG_FD_SB_ENABLED) {
474 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
475 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
476 pf->auto_disable_flags |=
477 I40E_FLAG_FD_SB_ENABLED;
478 pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
479 }
480 } else {
481 dev_info(&pdev->dev, "FD filter programming error");
482 }
483 } else if (error ==
484 (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
485 netdev_info(rx_ring->vsi->netdev, "ntuple filter loc = %d, could not be removed\n",
486 rx_desc->wb.qword0.hi_dword.fd_id);
487 }
184} 488}
185 489
186/** 490/**
@@ -315,6 +619,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
315} 619}
316 620
317/** 621/**
622 * i40e_get_head - Retrieve head from head writeback
623 * @tx_ring: tx ring to fetch head of
624 *
625 * Returns value of Tx ring head based on value stored
626 * in head write-back location
627 **/
628static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
629{
630 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
631
632 return le32_to_cpu(*(volatile __le32 *)head);
633}
634
635/**
318 * i40e_clean_tx_irq - Reclaim resources after transmit completes 636 * i40e_clean_tx_irq - Reclaim resources after transmit completes
319 * @tx_ring: tx ring to clean 637 * @tx_ring: tx ring to clean
320 * @budget: how many cleans we're allowed 638 * @budget: how many cleans we're allowed
@@ -325,6 +643,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
325{ 643{
326 u16 i = tx_ring->next_to_clean; 644 u16 i = tx_ring->next_to_clean;
327 struct i40e_tx_buffer *tx_buf; 645 struct i40e_tx_buffer *tx_buf;
646 struct i40e_tx_desc *tx_head;
328 struct i40e_tx_desc *tx_desc; 647 struct i40e_tx_desc *tx_desc;
329 unsigned int total_packets = 0; 648 unsigned int total_packets = 0;
330 unsigned int total_bytes = 0; 649 unsigned int total_bytes = 0;
@@ -333,6 +652,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
333 tx_desc = I40E_TX_DESC(tx_ring, i); 652 tx_desc = I40E_TX_DESC(tx_ring, i);
334 i -= tx_ring->count; 653 i -= tx_ring->count;
335 654
655 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
656
336 do { 657 do {
337 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; 658 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
338 659
@@ -343,9 +664,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
343 /* prevent any other reads prior to eop_desc */ 664 /* prevent any other reads prior to eop_desc */
344 read_barrier_depends(); 665 read_barrier_depends();
345 666
346 /* if the descriptor isn't done, no work yet to do */ 667 /* we have caught up to head, no work left to do */
347 if (!(eop_desc->cmd_type_offset_bsz & 668 if (tx_head == tx_desc)
348 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
349 break; 669 break;
350 670
351 /* clear next_to_watch to prevent false hangs */ 671 /* clear next_to_watch to prevent false hangs */
@@ -577,7 +897,7 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
577 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT; 897 I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT;
578 898
579 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS) 899 if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
580 i40e_fd_handle_status(rx_ring, qw, id); 900 i40e_fd_handle_status(rx_ring, rx_desc, id);
581} 901}
582 902
583/** 903/**
@@ -601,6 +921,10 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
601 921
602 /* round up to nearest 4K */ 922 /* round up to nearest 4K */
603 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); 923 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
924 /* add u32 for head writeback, align after this takes care of
925 * guaranteeing this is at least one cache line in size
926 */
927 tx_ring->size += sizeof(u32);
604 tx_ring->size = ALIGN(tx_ring->size, 4096); 928 tx_ring->size = ALIGN(tx_ring->size, 4096);
605 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, 929 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
606 &tx_ring->dma, GFP_KERNEL); 930 &tx_ring->dma, GFP_KERNEL);
@@ -892,7 +1216,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
892 rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT))) 1216 rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
893 return; 1217 return;
894 1218
895 /* likely incorrect csum if alternate IP extention headers found */ 1219 /* likely incorrect csum if alternate IP extension headers found */
896 if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) 1220 if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
897 return; 1221 return;
898 1222
@@ -956,6 +1280,29 @@ static inline u32 i40e_rx_hash(struct i40e_ring *ring,
956} 1280}
957 1281
958/** 1282/**
1283 * i40e_ptype_to_hash - get a hash type
1284 * @ptype: the ptype value from the descriptor
1285 *
1286 * Returns a hash type to be used by skb_set_hash
1287 **/
1288static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
1289{
1290 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
1291
1292 if (!decoded.known)
1293 return PKT_HASH_TYPE_NONE;
1294
1295 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1296 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
1297 return PKT_HASH_TYPE_L4;
1298 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
1299 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
1300 return PKT_HASH_TYPE_L3;
1301 else
1302 return PKT_HASH_TYPE_L2;
1303}
1304
1305/**
959 * i40e_clean_rx_irq - Reclaim resources after receive completes 1306 * i40e_clean_rx_irq - Reclaim resources after receive completes
960 * @rx_ring: rx ring to clean 1307 * @rx_ring: rx ring to clean
961 * @budget: how many cleans we're allowed 1308 * @budget: how many cleans we're allowed
@@ -972,8 +1319,11 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
972 u16 i = rx_ring->next_to_clean; 1319 u16 i = rx_ring->next_to_clean;
973 union i40e_rx_desc *rx_desc; 1320 union i40e_rx_desc *rx_desc;
974 u32 rx_error, rx_status; 1321 u32 rx_error, rx_status;
1322 u8 rx_ptype;
975 u64 qword; 1323 u64 qword;
976 u16 rx_ptype; 1324
1325 if (budget <= 0)
1326 return 0;
977 1327
978 rx_desc = I40E_RX_DESC(rx_ring, i); 1328 rx_desc = I40E_RX_DESC(rx_ring, i);
979 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); 1329 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
@@ -1087,7 +1437,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
1087 goto next_desc; 1437 goto next_desc;
1088 } 1438 }
1089 1439
1090 skb->rxhash = i40e_rx_hash(rx_ring, rx_desc); 1440 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
1441 i40e_ptype_to_hash(rx_ptype));
1091 if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) { 1442 if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
1092 i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status & 1443 i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
1093 I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> 1444 I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
@@ -1246,8 +1597,6 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
1246 if (!tx_ring->atr_sample_rate) 1597 if (!tx_ring->atr_sample_rate)
1247 return; 1598 return;
1248 1599
1249 tx_ring->atr_count++;
1250
1251 /* snag network header to get L4 type and address */ 1600 /* snag network header to get L4 type and address */
1252 hdr.network = skb_network_header(skb); 1601 hdr.network = skb_network_header(skb);
1253 1602
@@ -1269,6 +1618,12 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
1269 1618
1270 th = (struct tcphdr *)(hdr.network + hlen); 1619 th = (struct tcphdr *)(hdr.network + hlen);
1271 1620
1621 /* Due to lack of space, no more new filters can be programmed */
1622 if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
1623 return;
1624
1625 tx_ring->atr_count++;
1626
1272 /* sample on all syn/fin packets or once every atr sample rate */ 1627 /* sample on all syn/fin packets or once every atr sample rate */
1273 if (!th->fin && !th->syn && (tx_ring->atr_count < tx_ring->atr_sample_rate)) 1628 if (!th->fin && !th->syn && (tx_ring->atr_count < tx_ring->atr_sample_rate))
1274 return; 1629 return;
@@ -1596,7 +1951,8 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
1596 struct i40e_tx_context_desc *context_desc; 1951 struct i40e_tx_context_desc *context_desc;
1597 int i = tx_ring->next_to_use; 1952 int i = tx_ring->next_to_use;
1598 1953
1599 if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2) 1954 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
1955 !cd_tunneling && !cd_l2tag2)
1600 return; 1956 return;
1601 1957
1602 /* grab the next descriptor */ 1958 /* grab the next descriptor */
@@ -1707,9 +2063,23 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
1707 tx_bi = &tx_ring->tx_bi[i]; 2063 tx_bi = &tx_ring->tx_bi[i];
1708 } 2064 }
1709 2065
1710 tx_desc->cmd_type_offset_bsz = 2066 /* Place RS bit on last descriptor of any packet that spans across the
1711 build_ctob(td_cmd, td_offset, size, td_tag) | 2067 * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
1712 cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT); 2068 */
2069#define WB_STRIDE 0x3
2070 if (((i & WB_STRIDE) != WB_STRIDE) &&
2071 (first <= &tx_ring->tx_bi[i]) &&
2072 (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
2073 tx_desc->cmd_type_offset_bsz =
2074 build_ctob(td_cmd, td_offset, size, td_tag) |
2075 cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP <<
2076 I40E_TXD_QW1_CMD_SHIFT);
2077 } else {
2078 tx_desc->cmd_type_offset_bsz =
2079 build_ctob(td_cmd, td_offset, size, td_tag) |
2080 cpu_to_le64((u64)I40E_TXD_CMD <<
2081 I40E_TXD_QW1_CMD_SHIFT);
2082 }
1713 2083
1714 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, 2084 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
1715 tx_ring->queue_index), 2085 tx_ring->queue_index),
@@ -1812,7 +2182,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
1812 2182
1813 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, 2183 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
1814 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, 2184 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
1815 * + 2 desc gap to keep tail from touching head, 2185 * + 4 desc gap to avoid the cache line where head is,
1816 * + 1 desc for context descriptor, 2186 * + 1 desc for context descriptor,
1817 * otherwise try next time 2187 * otherwise try next time
1818 */ 2188 */
@@ -1823,7 +2193,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
1823 count += skb_shinfo(skb)->nr_frags; 2193 count += skb_shinfo(skb)->nr_frags;
1824#endif 2194#endif
1825 count += TXD_USE_COUNT(skb_headlen(skb)); 2195 count += TXD_USE_COUNT(skb_headlen(skb));
1826 if (i40e_maybe_stop_tx(tx_ring, count + 3)) { 2196 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
1827 tx_ring->tx_stats.tx_busy++; 2197 tx_ring->tx_stats.tx_busy++;
1828 return 0; 2198 return 0;
1829 } 2199 }
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 181a825d3160..d2f0b95fd0d7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -458,6 +458,10 @@ union i40e_32byte_rx_desc {
458 union { 458 union {
459 __le32 rss; /* RSS Hash */ 459 __le32 rss; /* RSS Hash */
460 __le32 fcoe_param; /* FCoE DDP Context id */ 460 __le32 fcoe_param; /* FCoE DDP Context id */
461 /* Flow director filter id in case of
462 * Programming status desc WB
463 */
464 __le32 fd_id;
461 } hi_dword; 465 } hi_dword;
462 } qword0; 466 } qword0;
463 struct { 467 struct {
@@ -698,7 +702,7 @@ enum i40e_rx_prog_status_desc_prog_id_masks {
698enum i40e_rx_prog_status_desc_error_bits { 702enum i40e_rx_prog_status_desc_error_bits {
699 /* Note: These are predefined bit offsets */ 703 /* Note: These are predefined bit offsets */
700 I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0, 704 I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0,
701 I40E_RX_PROG_STATUS_DESC_NO_FD_QUOTA_SHIFT = 1, 705 I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1,
702 I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2, 706 I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2,
703 I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3 707 I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3
704}; 708};
@@ -1010,6 +1014,11 @@ struct i40e_hw_port_stats {
1010 u64 tx_size_big; /* ptc9522 */ 1014 u64 tx_size_big; /* ptc9522 */
1011 u64 mac_short_packet_dropped; /* mspdc */ 1015 u64 mac_short_packet_dropped; /* mspdc */
1012 u64 checksum_error; /* xec */ 1016 u64 checksum_error; /* xec */
1017 /* EEE LPI */
1018 bool tx_lpi_status;
1019 bool rx_lpi_status;
1020 u64 tx_lpi_count; /* etlpic */
1021 u64 rx_lpi_count; /* erlpic */
1013}; 1022};
1014 1023
1015/* Checksum and Shadow RAM pointers */ 1024/* Checksum and Shadow RAM pointers */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index b9d1c1c8ca5a..02c11a7f7d29 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -69,7 +69,7 @@ static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
69{ 69{
70 struct i40e_pf *pf = vf->pf; 70 struct i40e_pf *pf = vf->pf;
71 71
72 return vector_id <= pf->hw.func_caps.num_msix_vectors_vf; 72 return vector_id < pf->hw.func_caps.num_msix_vectors_vf;
73} 73}
74 74
75/***********************vf resource mgmt routines*****************/ 75/***********************vf resource mgmt routines*****************/
@@ -126,8 +126,8 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
126 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); 126 reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
127 else 127 else
128 reg_idx = I40E_VPINT_LNKLSTN( 128 reg_idx = I40E_VPINT_LNKLSTN(
129 (pf->hw.func_caps.num_msix_vectors_vf 129 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) +
130 * vf->vf_id) + (vector_id - 1)); 130 (vector_id - 1));
131 131
132 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) { 132 if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
133 /* Special case - No queues mapped on this vector */ 133 /* Special case - No queues mapped on this vector */
@@ -230,6 +230,9 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
230 tx_ctx.qlen = info->ring_len; 230 tx_ctx.qlen = info->ring_len;
231 tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]); 231 tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]);
232 tx_ctx.rdylist_act = 0; 232 tx_ctx.rdylist_act = 0;
233 tx_ctx.head_wb_ena = 1;
234 tx_ctx.head_wb_addr = info->dma_ring_addr +
235 (info->ring_len * sizeof(struct i40e_tx_desc));
233 236
234 /* clear the context in the HMC */ 237 /* clear the context in the HMC */
235 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id); 238 ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
@@ -408,18 +411,10 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
408 "Could not allocate VF broadcast filter\n"); 411 "Could not allocate VF broadcast filter\n");
409 } 412 }
410 413
411 if (!f) {
412 dev_err(&pf->pdev->dev, "Unable to add ucast filter\n");
413 ret = -ENOMEM;
414 goto error_alloc_vsi_res;
415 }
416
417 /* program mac filter */ 414 /* program mac filter */
418 ret = i40e_sync_vsi_filters(vsi); 415 ret = i40e_sync_vsi_filters(vsi);
419 if (ret) { 416 if (ret)
420 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); 417 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
421 goto error_alloc_vsi_res;
422 }
423 418
424error_alloc_vsi_res: 419error_alloc_vsi_res:
425 return ret; 420 return ret;
@@ -514,7 +509,8 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
514 vf->lan_vsi_index = 0; 509 vf->lan_vsi_index = 0;
515 vf->lan_vsi_id = 0; 510 vf->lan_vsi_id = 0;
516 } 511 }
517 msix_vf = pf->hw.func_caps.num_msix_vectors_vf + 1; 512 msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
513
518 /* disable interrupts so the VF starts in a known state */ 514 /* disable interrupts so the VF starts in a known state */
519 for (i = 0; i < msix_vf; i++) { 515 for (i = 0; i < msix_vf; i++) {
520 /* format is same for both registers */ 516 /* format is same for both registers */
@@ -679,9 +675,9 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
679complete_reset: 675complete_reset:
680 /* reallocate vf resources to reset the VSI state */ 676 /* reallocate vf resources to reset the VSI state */
681 i40e_free_vf_res(vf); 677 i40e_free_vf_res(vf);
682 mdelay(10);
683 i40e_alloc_vf_res(vf); 678 i40e_alloc_vf_res(vf);
684 i40e_enable_vf_mappings(vf); 679 i40e_enable_vf_mappings(vf);
680 set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
685 681
686 /* tell the VF the reset is done */ 682 /* tell the VF the reset is done */
687 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE); 683 wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
@@ -847,7 +843,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
847 * 843 *
848 * allocate vf resources 844 * allocate vf resources
849 **/ 845 **/
850static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) 846int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
851{ 847{
852 struct i40e_vf *vfs; 848 struct i40e_vf *vfs;
853 int i, ret = 0; 849 int i, ret = 0;
@@ -855,16 +851,18 @@ static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
855 /* Disable interrupt 0 so we don't try to handle the VFLR. */ 851 /* Disable interrupt 0 so we don't try to handle the VFLR. */
856 i40e_irq_dynamic_disable_icr0(pf); 852 i40e_irq_dynamic_disable_icr0(pf);
857 853
858 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); 854 /* Check to see if we're just allocating resources for extant VFs */
859 if (ret) { 855 if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
860 dev_err(&pf->pdev->dev, 856 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
861 "pci_enable_sriov failed with error %d!\n", ret); 857 if (ret) {
862 pf->num_alloc_vfs = 0; 858 dev_err(&pf->pdev->dev,
863 goto err_iov; 859 "Failed to enable SR-IOV, error %d.\n", ret);
860 pf->num_alloc_vfs = 0;
861 goto err_iov;
862 }
864 } 863 }
865
866 /* allocate memory */ 864 /* allocate memory */
867 vfs = kzalloc(num_alloc_vfs * sizeof(struct i40e_vf), GFP_KERNEL); 865 vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
868 if (!vfs) { 866 if (!vfs) {
869 ret = -ENOMEM; 867 ret = -ENOMEM;
870 goto err_alloc; 868 goto err_alloc;
@@ -1776,7 +1774,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
1776 u32 v_retval, u8 *msg, u16 msglen) 1774 u32 v_retval, u8 *msg, u16 msglen)
1777{ 1775{
1778 struct i40e_hw *hw = &pf->hw; 1776 struct i40e_hw *hw = &pf->hw;
1779 int local_vf_id = vf_id - hw->func_caps.vf_base_id; 1777 unsigned int local_vf_id = vf_id - hw->func_caps.vf_base_id;
1780 struct i40e_vf *vf; 1778 struct i40e_vf *vf;
1781 int ret; 1779 int ret;
1782 1780
@@ -1873,7 +1871,8 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
1873 /* clear the bit in GLGEN_VFLRSTAT */ 1871 /* clear the bit in GLGEN_VFLRSTAT */
1874 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx)); 1872 wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
1875 1873
1876 i40e_reset_vf(vf, true); 1874 if (!test_bit(__I40E_DOWN, &pf->state))
1875 i40e_reset_vf(vf, true);
1877 } 1876 }
1878 } 1877 }
1879 1878
@@ -1924,15 +1923,28 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
1924void i40e_vc_notify_link_state(struct i40e_pf *pf) 1923void i40e_vc_notify_link_state(struct i40e_pf *pf)
1925{ 1924{
1926 struct i40e_virtchnl_pf_event pfe; 1925 struct i40e_virtchnl_pf_event pfe;
1926 struct i40e_hw *hw = &pf->hw;
1927 struct i40e_vf *vf = pf->vf;
1928 struct i40e_link_status *ls = &pf->hw.phy.link_info;
1929 int i;
1927 1930
1928 pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; 1931 pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
1929 pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; 1932 pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
1930 pfe.event_data.link_event.link_status = 1933 for (i = 0; i < pf->num_alloc_vfs; i++) {
1931 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP; 1934 if (vf->link_forced) {
1932 pfe.event_data.link_event.link_speed = pf->hw.phy.link_info.link_speed; 1935 pfe.event_data.link_event.link_status = vf->link_up;
1933 1936 pfe.event_data.link_event.link_speed =
1934 i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, I40E_SUCCESS, 1937 (vf->link_up ? I40E_LINK_SPEED_40GB : 0);
1935 (u8 *)&pfe, sizeof(struct i40e_virtchnl_pf_event)); 1938 } else {
1939 pfe.event_data.link_event.link_status =
1940 ls->link_info & I40E_AQ_LINK_UP;
1941 pfe.event_data.link_event.link_speed = ls->link_speed;
1942 }
1943 i40e_aq_send_msg_to_vf(hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT,
1944 0, (u8 *)&pfe, sizeof(pfe),
1945 NULL);
1946 vf++;
1947 }
1936} 1948}
1937 1949
1938/** 1950/**
@@ -2197,3 +2209,64 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
2197error_param: 2209error_param:
2198 return ret; 2210 return ret;
2199} 2211}
2212
2213/**
2214 * i40e_ndo_set_vf_link_state
2215 * @netdev: network interface device structure
2216 * @vf_id: vf identifier
2217 * @link: required link state
2218 *
2219 * Set the link state of a specified VF, regardless of physical link state
2220 **/
2221int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
2222{
2223 struct i40e_netdev_priv *np = netdev_priv(netdev);
2224 struct i40e_pf *pf = np->vsi->back;
2225 struct i40e_virtchnl_pf_event pfe;
2226 struct i40e_hw *hw = &pf->hw;
2227 struct i40e_vf *vf;
2228 int ret = 0;
2229
2230 /* validate the request */
2231 if (vf_id >= pf->num_alloc_vfs) {
2232 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
2233 ret = -EINVAL;
2234 goto error_out;
2235 }
2236
2237 vf = &pf->vf[vf_id];
2238
2239 pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
2240 pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
2241
2242 switch (link) {
2243 case IFLA_VF_LINK_STATE_AUTO:
2244 vf->link_forced = false;
2245 pfe.event_data.link_event.link_status =
2246 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP;
2247 pfe.event_data.link_event.link_speed =
2248 pf->hw.phy.link_info.link_speed;
2249 break;
2250 case IFLA_VF_LINK_STATE_ENABLE:
2251 vf->link_forced = true;
2252 vf->link_up = true;
2253 pfe.event_data.link_event.link_status = true;
2254 pfe.event_data.link_event.link_speed = I40E_LINK_SPEED_40GB;
2255 break;
2256 case IFLA_VF_LINK_STATE_DISABLE:
2257 vf->link_forced = true;
2258 vf->link_up = false;
2259 pfe.event_data.link_event.link_status = false;
2260 pfe.event_data.link_event.link_speed = 0;
2261 break;
2262 default:
2263 ret = -EINVAL;
2264 goto error_out;
2265 }
2266 /* Notify the VF of its new link state */
2267 i40e_aq_send_msg_to_vf(hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT,
2268 0, (u8 *)&pfe, sizeof(pfe), NULL);
2269
2270error_out:
2271 return ret;
2272}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index cc1feee36e12..389c47f396d5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -98,10 +98,13 @@ struct i40e_vf {
98 98
99 unsigned long vf_caps; /* vf's adv. capabilities */ 99 unsigned long vf_caps; /* vf's adv. capabilities */
100 unsigned long vf_states; /* vf's runtime states */ 100 unsigned long vf_states; /* vf's runtime states */
101 bool link_forced;
102 bool link_up; /* only valid if vf link is forced */
101}; 103};
102 104
103void i40e_free_vfs(struct i40e_pf *pf); 105void i40e_free_vfs(struct i40e_pf *pf);
104int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs); 106int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
107int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs);
105int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, 108int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
106 u32 v_retval, u8 *msg, u16 msglen); 109 u32 v_retval, u8 *msg, u16 msglen);
107int i40e_vc_process_vflr_event(struct i40e_pf *pf); 110int i40e_vc_process_vflr_event(struct i40e_pf *pf);
@@ -115,6 +118,8 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
115int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate); 118int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate);
116int i40e_ndo_get_vf_config(struct net_device *netdev, 119int i40e_ndo_get_vf_config(struct net_device *netdev,
117 int vf_id, struct ifla_vf_info *ivi); 120 int vf_id, struct ifla_vf_info *ivi);
121int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
122
118void i40e_vc_notify_link_state(struct i40e_pf *pf); 123void i40e_vc_notify_link_state(struct i40e_pf *pf);
119void i40e_vc_notify_reset(struct i40e_pf *pf); 124void i40e_vc_notify_reset(struct i40e_pf *pf);
120 125
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index f7cea1bca38d..97662b6bd98a 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -1229,7 +1229,7 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
1229#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2 1229#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2
1230#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 1230#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3
1231 1231
1232 __le32 tenant_id ; 1232 __le32 tenant_id;
1233 u8 reserved[4]; 1233 u8 reserved[4];
1234 __le16 queue_number; 1234 __le16 queue_number;
1235#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 1235#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 7b13953b28c4..c688a0fc5c29 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -160,6 +160,372 @@ i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw,
160} 160}
161 161
162 162
163/* The i40evf_ptype_lookup table is used to convert from the 8-bit ptype in the
164 * hardware to a bit-field that can be used by SW to more easily determine the
165 * packet type.
166 *
167 * Macros are used to shorten the table lines and make this table human
168 * readable.
169 *
170 * We store the PTYPE in the top byte of the bit field - this is just so that
171 * we can check that the table doesn't have a row missing, as the index into
172 * the table should be the PTYPE.
173 *
174 * Typical work flow:
175 *
176 * IF NOT i40evf_ptype_lookup[ptype].known
177 * THEN
178 * Packet is unknown
179 * ELSE IF i40evf_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
180 * Use the rest of the fields to look at the tunnels, inner protocols, etc
181 * ELSE
182 * Use the enum i40e_rx_l2_ptype to decode the packet type
183 * ENDIF
184 */
185
186/* macro to make the table lines short */
187#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
188 { PTYPE, \
189 1, \
190 I40E_RX_PTYPE_OUTER_##OUTER_IP, \
191 I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
192 I40E_RX_PTYPE_##OUTER_FRAG, \
193 I40E_RX_PTYPE_TUNNEL_##T, \
194 I40E_RX_PTYPE_TUNNEL_END_##TE, \
195 I40E_RX_PTYPE_##TEF, \
196 I40E_RX_PTYPE_INNER_PROT_##I, \
197 I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
198
199#define I40E_PTT_UNUSED_ENTRY(PTYPE) \
200 { PTYPE, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
201
202/* shorter macros makes the table fit but are terse */
203#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG
204#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG
205#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC
206
207/* Lookup table mapping the HW PTYPE to the bit field for decoding */
208struct i40e_rx_ptype_decoded i40evf_ptype_lookup[] = {
209 /* L2 Packet types */
210 I40E_PTT_UNUSED_ENTRY(0),
211 I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
212 I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
213 I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
214 I40E_PTT_UNUSED_ENTRY(4),
215 I40E_PTT_UNUSED_ENTRY(5),
216 I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
217 I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
218 I40E_PTT_UNUSED_ENTRY(8),
219 I40E_PTT_UNUSED_ENTRY(9),
220 I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
221 I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
222 I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
223 I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
224 I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
225 I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
226 I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
227 I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
228 I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
229 I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
230 I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
231 I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
232
233 /* Non Tunneled IPv4 */
234 I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
235 I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
236 I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
237 I40E_PTT_UNUSED_ENTRY(25),
238 I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
239 I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
240 I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
241
242 /* IPv4 --> IPv4 */
243 I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
244 I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
245 I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
246 I40E_PTT_UNUSED_ENTRY(32),
247 I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
248 I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
249 I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
250
251 /* IPv4 --> IPv6 */
252 I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
253 I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
254 I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
255 I40E_PTT_UNUSED_ENTRY(39),
256 I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
257 I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
258 I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
259
260 /* IPv4 --> GRE/NAT */
261 I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
262
263 /* IPv4 --> GRE/NAT --> IPv4 */
264 I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
265 I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
266 I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
267 I40E_PTT_UNUSED_ENTRY(47),
268 I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
269 I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
270 I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
271
272 /* IPv4 --> GRE/NAT --> IPv6 */
273 I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
274 I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
275 I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
276 I40E_PTT_UNUSED_ENTRY(54),
277 I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
278 I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
279 I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
280
281 /* IPv4 --> GRE/NAT --> MAC */
282 I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
283
284 /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
285 I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
286 I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
287 I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
288 I40E_PTT_UNUSED_ENTRY(62),
289 I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
290 I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
291 I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
292
293 /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
294 I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
295 I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
296 I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
297 I40E_PTT_UNUSED_ENTRY(69),
298 I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
299 I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
300 I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
301
302 /* IPv4 --> GRE/NAT --> MAC/VLAN */
303 I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
304
305 /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
306 I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
307 I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
308 I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
309 I40E_PTT_UNUSED_ENTRY(77),
310 I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
311 I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
312 I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
313
314 /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
315 I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
316 I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
317 I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
318 I40E_PTT_UNUSED_ENTRY(84),
319 I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
320 I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
321 I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
322
323 /* Non Tunneled IPv6 */
324 I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
325 I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
326 I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
327 I40E_PTT_UNUSED_ENTRY(91),
328 I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
329 I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
330 I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
331
332 /* IPv6 --> IPv4 */
333 I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
334 I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
335 I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
336 I40E_PTT_UNUSED_ENTRY(98),
337 I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
338 I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
339 I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
340
341 /* IPv6 --> IPv6 */
342 I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
343 I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
344 I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
345 I40E_PTT_UNUSED_ENTRY(105),
346 I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
347 I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
348 I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
349
350 /* IPv6 --> GRE/NAT */
351 I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
352
353 /* IPv6 --> GRE/NAT -> IPv4 */
354 I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
355 I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
356 I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
357 I40E_PTT_UNUSED_ENTRY(113),
358 I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
359 I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
360 I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
361
362 /* IPv6 --> GRE/NAT -> IPv6 */
363 I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
364 I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
365 I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
366 I40E_PTT_UNUSED_ENTRY(120),
367 I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
368 I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
369 I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
370
371 /* IPv6 --> GRE/NAT -> MAC */
372 I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
373
374 /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
375 I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
376 I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
377 I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
378 I40E_PTT_UNUSED_ENTRY(128),
379 I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
380 I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
381 I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
382
383 /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
384 I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
385 I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
386 I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
387 I40E_PTT_UNUSED_ENTRY(135),
388 I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
389 I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
390 I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
391
392 /* IPv6 --> GRE/NAT -> MAC/VLAN */
393 I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
394
395 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
396 I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
397 I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
398 I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
399 I40E_PTT_UNUSED_ENTRY(143),
400 I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
401 I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
402 I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
403
404 /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
405 I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
406 I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
407 I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
408 I40E_PTT_UNUSED_ENTRY(150),
409 I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
410 I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
411 I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
412
413 /* unused entries */
414 I40E_PTT_UNUSED_ENTRY(154),
415 I40E_PTT_UNUSED_ENTRY(155),
416 I40E_PTT_UNUSED_ENTRY(156),
417 I40E_PTT_UNUSED_ENTRY(157),
418 I40E_PTT_UNUSED_ENTRY(158),
419 I40E_PTT_UNUSED_ENTRY(159),
420
421 I40E_PTT_UNUSED_ENTRY(160),
422 I40E_PTT_UNUSED_ENTRY(161),
423 I40E_PTT_UNUSED_ENTRY(162),
424 I40E_PTT_UNUSED_ENTRY(163),
425 I40E_PTT_UNUSED_ENTRY(164),
426 I40E_PTT_UNUSED_ENTRY(165),
427 I40E_PTT_UNUSED_ENTRY(166),
428 I40E_PTT_UNUSED_ENTRY(167),
429 I40E_PTT_UNUSED_ENTRY(168),
430 I40E_PTT_UNUSED_ENTRY(169),
431
432 I40E_PTT_UNUSED_ENTRY(170),
433 I40E_PTT_UNUSED_ENTRY(171),
434 I40E_PTT_UNUSED_ENTRY(172),
435 I40E_PTT_UNUSED_ENTRY(173),
436 I40E_PTT_UNUSED_ENTRY(174),
437 I40E_PTT_UNUSED_ENTRY(175),
438 I40E_PTT_UNUSED_ENTRY(176),
439 I40E_PTT_UNUSED_ENTRY(177),
440 I40E_PTT_UNUSED_ENTRY(178),
441 I40E_PTT_UNUSED_ENTRY(179),
442
443 I40E_PTT_UNUSED_ENTRY(180),
444 I40E_PTT_UNUSED_ENTRY(181),
445 I40E_PTT_UNUSED_ENTRY(182),
446 I40E_PTT_UNUSED_ENTRY(183),
447 I40E_PTT_UNUSED_ENTRY(184),
448 I40E_PTT_UNUSED_ENTRY(185),
449 I40E_PTT_UNUSED_ENTRY(186),
450 I40E_PTT_UNUSED_ENTRY(187),
451 I40E_PTT_UNUSED_ENTRY(188),
452 I40E_PTT_UNUSED_ENTRY(189),
453
454 I40E_PTT_UNUSED_ENTRY(190),
455 I40E_PTT_UNUSED_ENTRY(191),
456 I40E_PTT_UNUSED_ENTRY(192),
457 I40E_PTT_UNUSED_ENTRY(193),
458 I40E_PTT_UNUSED_ENTRY(194),
459 I40E_PTT_UNUSED_ENTRY(195),
460 I40E_PTT_UNUSED_ENTRY(196),
461 I40E_PTT_UNUSED_ENTRY(197),
462 I40E_PTT_UNUSED_ENTRY(198),
463 I40E_PTT_UNUSED_ENTRY(199),
464
465 I40E_PTT_UNUSED_ENTRY(200),
466 I40E_PTT_UNUSED_ENTRY(201),
467 I40E_PTT_UNUSED_ENTRY(202),
468 I40E_PTT_UNUSED_ENTRY(203),
469 I40E_PTT_UNUSED_ENTRY(204),
470 I40E_PTT_UNUSED_ENTRY(205),
471 I40E_PTT_UNUSED_ENTRY(206),
472 I40E_PTT_UNUSED_ENTRY(207),
473 I40E_PTT_UNUSED_ENTRY(208),
474 I40E_PTT_UNUSED_ENTRY(209),
475
476 I40E_PTT_UNUSED_ENTRY(210),
477 I40E_PTT_UNUSED_ENTRY(211),
478 I40E_PTT_UNUSED_ENTRY(212),
479 I40E_PTT_UNUSED_ENTRY(213),
480 I40E_PTT_UNUSED_ENTRY(214),
481 I40E_PTT_UNUSED_ENTRY(215),
482 I40E_PTT_UNUSED_ENTRY(216),
483 I40E_PTT_UNUSED_ENTRY(217),
484 I40E_PTT_UNUSED_ENTRY(218),
485 I40E_PTT_UNUSED_ENTRY(219),
486
487 I40E_PTT_UNUSED_ENTRY(220),
488 I40E_PTT_UNUSED_ENTRY(221),
489 I40E_PTT_UNUSED_ENTRY(222),
490 I40E_PTT_UNUSED_ENTRY(223),
491 I40E_PTT_UNUSED_ENTRY(224),
492 I40E_PTT_UNUSED_ENTRY(225),
493 I40E_PTT_UNUSED_ENTRY(226),
494 I40E_PTT_UNUSED_ENTRY(227),
495 I40E_PTT_UNUSED_ENTRY(228),
496 I40E_PTT_UNUSED_ENTRY(229),
497
498 I40E_PTT_UNUSED_ENTRY(230),
499 I40E_PTT_UNUSED_ENTRY(231),
500 I40E_PTT_UNUSED_ENTRY(232),
501 I40E_PTT_UNUSED_ENTRY(233),
502 I40E_PTT_UNUSED_ENTRY(234),
503 I40E_PTT_UNUSED_ENTRY(235),
504 I40E_PTT_UNUSED_ENTRY(236),
505 I40E_PTT_UNUSED_ENTRY(237),
506 I40E_PTT_UNUSED_ENTRY(238),
507 I40E_PTT_UNUSED_ENTRY(239),
508
509 I40E_PTT_UNUSED_ENTRY(240),
510 I40E_PTT_UNUSED_ENTRY(241),
511 I40E_PTT_UNUSED_ENTRY(242),
512 I40E_PTT_UNUSED_ENTRY(243),
513 I40E_PTT_UNUSED_ENTRY(244),
514 I40E_PTT_UNUSED_ENTRY(245),
515 I40E_PTT_UNUSED_ENTRY(246),
516 I40E_PTT_UNUSED_ENTRY(247),
517 I40E_PTT_UNUSED_ENTRY(248),
518 I40E_PTT_UNUSED_ENTRY(249),
519
520 I40E_PTT_UNUSED_ENTRY(250),
521 I40E_PTT_UNUSED_ENTRY(251),
522 I40E_PTT_UNUSED_ENTRY(252),
523 I40E_PTT_UNUSED_ENTRY(253),
524 I40E_PTT_UNUSED_ENTRY(254),
525 I40E_PTT_UNUSED_ENTRY(255)
526};
527
528
163/** 529/**
164 * i40e_aq_send_msg_to_pf 530 * i40e_aq_send_msg_to_pf
165 * @hw: pointer to the hardware structure 531 * @hw: pointer to the hardware structure
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index 7841573a58c9..862fcdf52675 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -63,6 +63,13 @@ i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw,
63 63
64i40e_status i40e_set_mac_type(struct i40e_hw *hw); 64i40e_status i40e_set_mac_type(struct i40e_hw *hw);
65 65
66extern struct i40e_rx_ptype_decoded i40evf_ptype_lookup[];
67
68static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
69{
70 return i40evf_ptype_lookup[ptype];
71}
72
66/* prototype for functions used for SW locks */ 73/* prototype for functions used for SW locks */
67 74
68/* i40e_common for VF drivers*/ 75/* i40e_common for VF drivers*/
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index ffdb01d853db..53be5f44d015 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -24,6 +24,7 @@
24#include <linux/prefetch.h> 24#include <linux/prefetch.h>
25 25
26#include "i40evf.h" 26#include "i40evf.h"
27#include "i40e_prototype.h"
27 28
28static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size, 29static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
29 u32 td_tag) 30 u32 td_tag)
@@ -169,6 +170,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
169} 170}
170 171
171/** 172/**
173 * i40e_get_head - Retrieve head from head writeback
174 * @tx_ring: tx ring to fetch head of
175 *
176 * Returns value of Tx ring head based on value stored
177 * in head write-back location
178 **/
179static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
180{
181 void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
182
183 return le32_to_cpu(*(volatile __le32 *)head);
184}
185
186/**
172 * i40e_clean_tx_irq - Reclaim resources after transmit completes 187 * i40e_clean_tx_irq - Reclaim resources after transmit completes
173 * @tx_ring: tx ring to clean 188 * @tx_ring: tx ring to clean
174 * @budget: how many cleans we're allowed 189 * @budget: how many cleans we're allowed
@@ -179,6 +194,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
179{ 194{
180 u16 i = tx_ring->next_to_clean; 195 u16 i = tx_ring->next_to_clean;
181 struct i40e_tx_buffer *tx_buf; 196 struct i40e_tx_buffer *tx_buf;
197 struct i40e_tx_desc *tx_head;
182 struct i40e_tx_desc *tx_desc; 198 struct i40e_tx_desc *tx_desc;
183 unsigned int total_packets = 0; 199 unsigned int total_packets = 0;
184 unsigned int total_bytes = 0; 200 unsigned int total_bytes = 0;
@@ -187,6 +203,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
187 tx_desc = I40E_TX_DESC(tx_ring, i); 203 tx_desc = I40E_TX_DESC(tx_ring, i);
188 i -= tx_ring->count; 204 i -= tx_ring->count;
189 205
206 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring));
207
190 do { 208 do {
191 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; 209 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
192 210
@@ -197,9 +215,8 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
197 /* prevent any other reads prior to eop_desc */ 215 /* prevent any other reads prior to eop_desc */
198 read_barrier_depends(); 216 read_barrier_depends();
199 217
200 /* if the descriptor isn't done, no work yet to do */ 218 /* we have caught up to head, no work left to do */
201 if (!(eop_desc->cmd_type_offset_bsz & 219 if (tx_head == tx_desc)
202 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
203 break; 220 break;
204 221
205 /* clear next_to_watch to prevent false hangs */ 222 /* clear next_to_watch to prevent false hangs */
@@ -431,6 +448,10 @@ int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring)
431 448
432 /* round up to nearest 4K */ 449 /* round up to nearest 4K */
433 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); 450 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc);
451 /* add u32 for head writeback, align after this takes care of
452 * guaranteeing this is at least one cache line in size
453 */
454 tx_ring->size += sizeof(u32);
434 tx_ring->size = ALIGN(tx_ring->size, 4096); 455 tx_ring->size = ALIGN(tx_ring->size, 4096);
435 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, 456 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
436 &tx_ring->dma, GFP_KERNEL); 457 &tx_ring->dma, GFP_KERNEL);
@@ -722,7 +743,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
722 rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT))) 743 rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
723 return; 744 return;
724 745
725 /* likely incorrect csum if alternate IP extention headers found */ 746 /* likely incorrect csum if alternate IP extension headers found */
726 if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT)) 747 if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
727 return; 748 return;
728 749
@@ -786,6 +807,29 @@ static inline u32 i40e_rx_hash(struct i40e_ring *ring,
786} 807}
787 808
788/** 809/**
810 * i40e_ptype_to_hash - get a hash type
811 * @ptype: the ptype value from the descriptor
812 *
813 * Returns a hash type to be used by skb_set_hash
814 **/
815static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
816{
817 struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
818
819 if (!decoded.known)
820 return PKT_HASH_TYPE_NONE;
821
822 if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
823 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
824 return PKT_HASH_TYPE_L4;
825 else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
826 decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
827 return PKT_HASH_TYPE_L3;
828 else
829 return PKT_HASH_TYPE_L2;
830}
831
832/**
789 * i40e_clean_rx_irq - Reclaim resources after receive completes 833 * i40e_clean_rx_irq - Reclaim resources after receive completes
790 * @rx_ring: rx ring to clean 834 * @rx_ring: rx ring to clean
791 * @budget: how many cleans we're allowed 835 * @budget: how many cleans we're allowed
@@ -802,13 +846,13 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
802 u16 i = rx_ring->next_to_clean; 846 u16 i = rx_ring->next_to_clean;
803 union i40e_rx_desc *rx_desc; 847 union i40e_rx_desc *rx_desc;
804 u32 rx_error, rx_status; 848 u32 rx_error, rx_status;
849 u8 rx_ptype;
805 u64 qword; 850 u64 qword;
806 u16 rx_ptype;
807 851
808 rx_desc = I40E_RX_DESC(rx_ring, i); 852 rx_desc = I40E_RX_DESC(rx_ring, i);
809 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); 853 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
810 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) 854 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
811 >> I40E_RXD_QW1_STATUS_SHIFT; 855 I40E_RXD_QW1_STATUS_SHIFT;
812 856
813 while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) { 857 while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
814 union i40e_rx_desc *next_rxd; 858 union i40e_rx_desc *next_rxd;
@@ -912,7 +956,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
912 goto next_desc; 956 goto next_desc;
913 } 957 }
914 958
915 skb->rxhash = i40e_rx_hash(rx_ring, rx_desc); 959 skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc),
960 i40e_ptype_to_hash(rx_ptype));
916 /* probably a little skewed due to removing CRC */ 961 /* probably a little skewed due to removing CRC */
917 total_rx_bytes += skb->len; 962 total_rx_bytes += skb->len;
918 total_rx_packets++; 963 total_rx_packets++;
@@ -1241,7 +1286,8 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
1241 struct i40e_tx_context_desc *context_desc; 1286 struct i40e_tx_context_desc *context_desc;
1242 int i = tx_ring->next_to_use; 1287 int i = tx_ring->next_to_use;
1243 1288
1244 if (!cd_type_cmd_tso_mss && !cd_tunneling && !cd_l2tag2) 1289 if ((cd_type_cmd_tso_mss == I40E_TX_DESC_DTYPE_CONTEXT) &&
1290 !cd_tunneling && !cd_l2tag2)
1245 return; 1291 return;
1246 1292
1247 /* grab the next descriptor */ 1293 /* grab the next descriptor */
@@ -1352,9 +1398,23 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
1352 tx_bi = &tx_ring->tx_bi[i]; 1398 tx_bi = &tx_ring->tx_bi[i];
1353 } 1399 }
1354 1400
1355 tx_desc->cmd_type_offset_bsz = 1401 /* Place RS bit on last descriptor of any packet that spans across the
1356 build_ctob(td_cmd, td_offset, size, td_tag) | 1402 * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
1357 cpu_to_le64((u64)I40E_TXD_CMD << I40E_TXD_QW1_CMD_SHIFT); 1403 */
1404#define WB_STRIDE 0x3
1405 if (((i & WB_STRIDE) != WB_STRIDE) &&
1406 (first <= &tx_ring->tx_bi[i]) &&
1407 (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
1408 tx_desc->cmd_type_offset_bsz =
1409 build_ctob(td_cmd, td_offset, size, td_tag) |
1410 cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP <<
1411 I40E_TXD_QW1_CMD_SHIFT);
1412 } else {
1413 tx_desc->cmd_type_offset_bsz =
1414 build_ctob(td_cmd, td_offset, size, td_tag) |
1415 cpu_to_le64((u64)I40E_TXD_CMD <<
1416 I40E_TXD_QW1_CMD_SHIFT);
1417 }
1358 1418
1359 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, 1419 netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
1360 tx_ring->queue_index), 1420 tx_ring->queue_index),
@@ -1457,7 +1517,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
1457 1517
1458 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, 1518 /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
1459 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, 1519 * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
1460 * + 2 desc gap to keep tail from touching head, 1520 * + 4 desc gap to avoid the cache line where head is,
1461 * + 1 desc for context descriptor, 1521 * + 1 desc for context descriptor,
1462 * otherwise try next time 1522 * otherwise try next time
1463 */ 1523 */
@@ -1468,7 +1528,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
1468 count += skb_shinfo(skb)->nr_frags; 1528 count += skb_shinfo(skb)->nr_frags;
1469#endif 1529#endif
1470 count += TXD_USE_COUNT(skb_headlen(skb)); 1530 count += TXD_USE_COUNT(skb_headlen(skb));
1471 if (i40e_maybe_stop_tx(tx_ring, count + 3)) { 1531 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
1472 tx_ring->tx_stats.tx_busy++; 1532 tx_ring->tx_stats.tx_busy++;
1473 return 0; 1533 return 0;
1474 } 1534 }
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 3bffac06592f..efe73ad6fdb9 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -64,8 +64,6 @@
64struct i40e_hw; 64struct i40e_hw;
65typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *); 65typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
66 66
67#define ETH_ALEN 6
68
69/* Data type manipulation macros. */ 67/* Data type manipulation macros. */
70 68
71#define I40E_DESC_UNUSED(R) \ 69#define I40E_DESC_UNUSED(R) \
@@ -466,6 +464,10 @@ union i40e_32byte_rx_desc {
466 union { 464 union {
467 __le32 rss; /* RSS Hash */ 465 __le32 rss; /* RSS Hash */
468 __le32 fcoe_param; /* FCoE DDP Context id */ 466 __le32 fcoe_param; /* FCoE DDP Context id */
467 /* Flow director filter id in case of
468 * Programming status desc WB
469 */
470 __le32 fd_id;
469 } hi_dword; 471 } hi_dword;
470 } qword0; 472 } qword0;
471 struct { 473 struct {
@@ -706,7 +708,7 @@ enum i40e_rx_prog_status_desc_prog_id_masks {
706enum i40e_rx_prog_status_desc_error_bits { 708enum i40e_rx_prog_status_desc_error_bits {
707 /* Note: These are predefined bit offsets */ 709 /* Note: These are predefined bit offsets */
708 I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0, 710 I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0,
709 I40E_RX_PROG_STATUS_DESC_NO_FD_QUOTA_SHIFT = 1, 711 I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1,
710 I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2, 712 I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2,
711 I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3 713 I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3
712}; 714};
@@ -1018,6 +1020,11 @@ struct i40e_hw_port_stats {
1018 u64 tx_size_big; /* ptc9522 */ 1020 u64 tx_size_big; /* ptc9522 */
1019 u64 mac_short_packet_dropped; /* mspdc */ 1021 u64 mac_short_packet_dropped; /* mspdc */
1020 u64 checksum_error; /* xec */ 1022 u64 checksum_error; /* xec */
1023 /* EEE LPI */
1024 bool tx_lpi_status;
1025 bool rx_lpi_status;
1026 u64 tx_lpi_count; /* etlpic */
1027 u64 rx_lpi_count; /* erlpic */
1021}; 1028};
1022 1029
1023/* Checksum and Shadow RAM pointers */ 1030/* Checksum and Shadow RAM pointers */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index ff6529b288a1..ccb43d343543 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -38,8 +38,6 @@
38#include <linux/ipv6.h> 38#include <linux/ipv6.h>
39#include <net/ip6_checksum.h> 39#include <net/ip6_checksum.h>
40#include <net/udp.h> 40#include <net/udp.h>
41#include <linux/sctp.h>
42
43 41
44#include "i40e_type.h" 42#include "i40e_type.h"
45#include "i40e_virtchnl.h" 43#include "i40e_virtchnl.h"
@@ -164,15 +162,14 @@ struct i40evf_vlan_filter {
164/* Driver state. The order of these is important! */ 162/* Driver state. The order of these is important! */
165enum i40evf_state_t { 163enum i40evf_state_t {
166 __I40EVF_STARTUP, /* driver loaded, probe complete */ 164 __I40EVF_STARTUP, /* driver loaded, probe complete */
167 __I40EVF_FAILED, /* PF communication failed. Fatal. */
168 __I40EVF_REMOVE, /* driver is being unloaded */ 165 __I40EVF_REMOVE, /* driver is being unloaded */
169 __I40EVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */ 166 __I40EVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */
170 __I40EVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */ 167 __I40EVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */
171 __I40EVF_INIT_SW, /* got resources, setting up structs */ 168 __I40EVF_INIT_SW, /* got resources, setting up structs */
169 __I40EVF_RESETTING, /* in reset */
172 /* Below here, watchdog is running */ 170 /* Below here, watchdog is running */
173 __I40EVF_DOWN, /* ready, can be opened */ 171 __I40EVF_DOWN, /* ready, can be opened */
174 __I40EVF_TESTING, /* in ethtool self-test */ 172 __I40EVF_TESTING, /* in ethtool self-test */
175 __I40EVF_RESETTING, /* in reset */
176 __I40EVF_RUNNING, /* opened, working */ 173 __I40EVF_RUNNING, /* opened, working */
177}; 174};
178 175
@@ -185,47 +182,25 @@ enum i40evf_critical_section_t {
185/* board specific private data structure */ 182/* board specific private data structure */
186struct i40evf_adapter { 183struct i40evf_adapter {
187 struct timer_list watchdog_timer; 184 struct timer_list watchdog_timer;
188 struct vlan_group *vlgrp;
189 struct work_struct reset_task; 185 struct work_struct reset_task;
190 struct work_struct adminq_task; 186 struct work_struct adminq_task;
191 struct delayed_work init_task; 187 struct delayed_work init_task;
192 struct i40e_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; 188 struct i40e_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
193 struct list_head vlan_filter_list; 189 struct list_head vlan_filter_list;
194 char name[MAX_MSIX_COUNT][IFNAMSIZ + 9]; 190 char misc_vector_name[IFNAMSIZ + 9];
195
196 /* Interrupt Throttle Rate */
197 u32 itr_setting;
198 u16 eitr_low;
199 u16 eitr_high;
200 191
201 /* TX */ 192 /* TX */
202 struct i40e_ring *tx_rings[I40E_MAX_VSI_QP]; 193 struct i40e_ring *tx_rings[I40E_MAX_VSI_QP];
203 u64 restart_queue;
204 u64 hw_csum_tx_good;
205 u64 lsc_int;
206 u64 hw_tso_ctxt;
207 u64 hw_tso6_ctxt;
208 u32 tx_timeout_count; 194 u32 tx_timeout_count;
209 struct list_head mac_filter_list; 195 struct list_head mac_filter_list;
210#ifdef DEBUG
211 bool detect_tx_hung;
212#endif /* DEBUG */
213 196
214 /* RX */ 197 /* RX */
215 struct i40e_ring *rx_rings[I40E_MAX_VSI_QP]; 198 struct i40e_ring *rx_rings[I40E_MAX_VSI_QP];
216 int txd_count;
217 int rxd_count;
218 u64 hw_csum_rx_error; 199 u64 hw_csum_rx_error;
219 u64 hw_rx_no_dma_resources;
220 u64 hw_csum_rx_good;
221 u64 non_eop_descs;
222 int num_msix_vectors; 200 int num_msix_vectors;
223 struct msix_entry *msix_entries; 201 struct msix_entry *msix_entries;
224 202
225 u64 rx_hdr_split; 203 u32 flags;
226
227 u32 init_state;
228 volatile unsigned long flags;
229#define I40EVF_FLAG_RX_CSUM_ENABLED (u32)(1) 204#define I40EVF_FLAG_RX_CSUM_ENABLED (u32)(1)
230#define I40EVF_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1) 205#define I40EVF_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1)
231#define I40EVF_FLAG_RX_PS_CAPABLE (u32)(1 << 2) 206#define I40EVF_FLAG_RX_PS_CAPABLE (u32)(1 << 2)
@@ -234,6 +209,8 @@ struct i40evf_adapter {
234#define I40EVF_FLAG_IMIR_ENABLED (u32)(1 << 5) 209#define I40EVF_FLAG_IMIR_ENABLED (u32)(1 << 5)
235#define I40EVF_FLAG_MQ_CAPABLE (u32)(1 << 6) 210#define I40EVF_FLAG_MQ_CAPABLE (u32)(1 << 6)
236#define I40EVF_FLAG_NEED_LINK_UPDATE (u32)(1 << 7) 211#define I40EVF_FLAG_NEED_LINK_UPDATE (u32)(1 << 7)
212#define I40EVF_FLAG_PF_COMMS_FAILED (u32)(1 << 8)
213#define I40EVF_FLAG_RESET_PENDING (u32)(1 << 9)
237/* duplcates for common code */ 214/* duplcates for common code */
238#define I40E_FLAG_FDIR_ATR_ENABLED 0 215#define I40E_FLAG_FDIR_ATR_ENABLED 0
239#define I40E_FLAG_DCB_ENABLED 0 216#define I40E_FLAG_DCB_ENABLED 0
@@ -251,21 +228,19 @@ struct i40evf_adapter {
251#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES (u32)(1 << 6) 228#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES (u32)(1 << 6)
252#define I40EVF_FLAG_AQ_MAP_VECTORS (u32)(1 << 7) 229#define I40EVF_FLAG_AQ_MAP_VECTORS (u32)(1 << 7)
253#define I40EVF_FLAG_AQ_HANDLE_RESET (u32)(1 << 8) 230#define I40EVF_FLAG_AQ_HANDLE_RESET (u32)(1 << 8)
231
254 /* OS defined structs */ 232 /* OS defined structs */
255 struct net_device *netdev; 233 struct net_device *netdev;
256 struct pci_dev *pdev; 234 struct pci_dev *pdev;
257 struct net_device_stats net_stats; 235 struct net_device_stats net_stats;
258 236
259 /* structs defined in i40e_vf.h */ 237 struct i40e_hw hw; /* defined in i40e_type.h */
260 struct i40e_hw hw;
261 238
262 enum i40evf_state_t state; 239 enum i40evf_state_t state;
263 volatile unsigned long crit_section; 240 volatile unsigned long crit_section;
264 u64 tx_busy;
265 241
266 struct work_struct watchdog_task; 242 struct work_struct watchdog_task;
267 bool netdev_registered; 243 bool netdev_registered;
268 bool dev_closed;
269 bool link_up; 244 bool link_up;
270 enum i40e_virtchnl_ops current_op; 245 enum i40e_virtchnl_ops current_op;
271 struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */ 246 struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */
@@ -276,11 +251,6 @@ struct i40evf_adapter {
276 u32 aq_wait_count; 251 u32 aq_wait_count;
277}; 252};
278 253
279struct i40evf_info {
280 enum i40e_mac_type mac;
281 unsigned int flags;
282};
283
284 254
285/* needed by i40evf_ethtool.c */ 255/* needed by i40evf_ethtool.c */
286extern char i40evf_driver_name[]; 256extern char i40evf_driver_name[];
@@ -315,6 +285,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter);
315void i40evf_del_vlans(struct i40evf_adapter *adapter); 285void i40evf_del_vlans(struct i40evf_adapter *adapter);
316void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags); 286void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags);
317void i40evf_request_stats(struct i40evf_adapter *adapter); 287void i40evf_request_stats(struct i40evf_adapter *adapter);
288void i40evf_request_reset(struct i40evf_adapter *adapter);
318void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, 289void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
319 enum i40e_virtchnl_ops v_opcode, 290 enum i40e_virtchnl_ops v_opcode,
320 i40e_status v_retval, u8 *msg, u16 msglen); 291 i40e_status v_retval, u8 *msg, u16 msglen);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index b0b1f4bf5ac0..8b0db1ce179c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -241,6 +241,7 @@ static int i40evf_set_ringparam(struct net_device *netdev,
241{ 241{
242 struct i40evf_adapter *adapter = netdev_priv(netdev); 242 struct i40evf_adapter *adapter = netdev_priv(netdev);
243 u32 new_rx_count, new_tx_count; 243 u32 new_rx_count, new_tx_count;
244 int i;
244 245
245 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 246 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
246 return -EINVAL; 247 return -EINVAL;
@@ -256,12 +257,14 @@ static int i40evf_set_ringparam(struct net_device *netdev,
256 new_rx_count = ALIGN(new_rx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE); 257 new_rx_count = ALIGN(new_rx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE);
257 258
258 /* if nothing to do return success */ 259 /* if nothing to do return success */
259 if ((new_tx_count == adapter->txd_count) && 260 if ((new_tx_count == adapter->tx_rings[0]->count) &&
260 (new_rx_count == adapter->rxd_count)) 261 (new_rx_count == adapter->rx_rings[0]->count))
261 return 0; 262 return 0;
262 263
263 adapter->txd_count = new_tx_count; 264 for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
264 adapter->rxd_count = new_rx_count; 265 adapter->tx_rings[0]->count = new_tx_count;
266 adapter->rx_rings[0]->count = new_rx_count;
267 }
265 268
266 if (netif_running(netdev)) 269 if (netif_running(netdev))
267 i40evf_reinit_locked(adapter); 270 i40evf_reinit_locked(adapter);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index f5caf4419243..d3eafa320ba9 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -31,10 +31,10 @@ char i40evf_driver_name[] = "i40evf";
31static const char i40evf_driver_string[] = 31static const char i40evf_driver_string[] =
32 "Intel(R) XL710 X710 Virtual Function Network Driver"; 32 "Intel(R) XL710 X710 Virtual Function Network Driver";
33 33
34#define DRV_VERSION "0.9.11" 34#define DRV_VERSION "0.9.16"
35const char i40evf_driver_version[] = DRV_VERSION; 35const char i40evf_driver_version[] = DRV_VERSION;
36static const char i40evf_copyright[] = 36static const char i40evf_copyright[] =
37 "Copyright (c) 2013 Intel Corporation."; 37 "Copyright (c) 2013 - 2014 Intel Corporation.";
38 38
39/* i40evf_pci_tbl - PCI Device ID Table 39/* i40evf_pci_tbl - PCI Device ID Table
40 * 40 *
@@ -167,9 +167,13 @@ static void i40evf_tx_timeout(struct net_device *netdev)
167 struct i40evf_adapter *adapter = netdev_priv(netdev); 167 struct i40evf_adapter *adapter = netdev_priv(netdev);
168 168
169 adapter->tx_timeout_count++; 169 adapter->tx_timeout_count++;
170 170 dev_info(&adapter->pdev->dev, "TX timeout detected.\n");
171 /* Do the reset outside of interrupt context */ 171 if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
172 schedule_work(&adapter->reset_task); 172 dev_info(&adapter->pdev->dev, "Requesting reset from PF\n");
173 i40evf_request_reset(adapter);
174 adapter->flags |= I40EVF_FLAG_RESET_PENDING;
175 schedule_work(&adapter->reset_task);
176 }
173} 177}
174 178
175/** 179/**
@@ -211,6 +215,9 @@ static void i40evf_irq_disable(struct i40evf_adapter *adapter)
211 int i; 215 int i;
212 struct i40e_hw *hw = &adapter->hw; 216 struct i40e_hw *hw = &adapter->hw;
213 217
218 if (!adapter->msix_entries)
219 return;
220
214 for (i = 1; i < adapter->num_msix_vectors; i++) { 221 for (i = 1; i < adapter->num_msix_vectors; i++) {
215 wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), 0); 222 wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), 0);
216 synchronize_irq(adapter->msix_entries[i].vector); 223 synchronize_irq(adapter->msix_entries[i].vector);
@@ -511,12 +518,14 @@ static int i40evf_request_misc_irq(struct i40evf_adapter *adapter)
511 struct net_device *netdev = adapter->netdev; 518 struct net_device *netdev = adapter->netdev;
512 int err; 519 int err;
513 520
514 sprintf(adapter->name[0], "i40evf:mbx"); 521 sprintf(adapter->misc_vector_name, "i40evf:mbx");
515 err = request_irq(adapter->msix_entries[0].vector, 522 err = request_irq(adapter->msix_entries[0].vector,
516 &i40evf_msix_aq, 0, adapter->name[0], netdev); 523 &i40evf_msix_aq, 0,
524 adapter->misc_vector_name, netdev);
517 if (err) { 525 if (err) {
518 dev_err(&adapter->pdev->dev, 526 dev_err(&adapter->pdev->dev,
519 "request_irq for msix_aq failed: %d\n", err); 527 "request_irq for %s failed: %d\n",
528 adapter->misc_vector_name, err);
520 free_irq(adapter->msix_entries[0].vector, netdev); 529 free_irq(adapter->msix_entries[0].vector, netdev);
521 } 530 }
522 return err; 531 return err;
@@ -963,16 +972,23 @@ void i40evf_down(struct i40evf_adapter *adapter)
963 struct net_device *netdev = adapter->netdev; 972 struct net_device *netdev = adapter->netdev;
964 struct i40evf_mac_filter *f; 973 struct i40evf_mac_filter *f;
965 974
966 /* remove all MAC filters from the VSI */ 975 /* remove all MAC filters */
967 list_for_each_entry(f, &adapter->mac_filter_list, list) { 976 list_for_each_entry(f, &adapter->mac_filter_list, list) {
968 f->remove = true; 977 f->remove = true;
969 } 978 }
970 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER; 979 /* remove all VLAN filters */
971 /* disable receives */ 980 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
972 adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES; 981 f->remove = true;
973 mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); 982 }
974 msleep(20); 983 if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
975 984 adapter->state != __I40EVF_RESETTING) {
985 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
986 adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
987 /* disable receives */
988 adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
989 mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
990 msleep(20);
991 }
976 netif_tx_disable(netdev); 992 netif_tx_disable(netdev);
977 993
978 netif_tx_stop_all_queues(netdev); 994 netif_tx_stop_all_queues(netdev);
@@ -1124,8 +1140,8 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
1124 * than CPU's. So let's be conservative and only ask for 1140 * than CPU's. So let's be conservative and only ask for
1125 * (roughly) twice the number of vectors as there are CPU's. 1141 * (roughly) twice the number of vectors as there are CPU's.
1126 */ 1142 */
1127 v_budget = min(pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS; 1143 v_budget = min_t(int, pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS;
1128 v_budget = min(v_budget, (int)adapter->vf_res->max_vectors + 1); 1144 v_budget = min_t(int, v_budget, (int)adapter->vf_res->max_vectors);
1129 1145
1130 /* A failure in MSI-X entry allocation isn't fatal, but it does 1146 /* A failure in MSI-X entry allocation isn't fatal, but it does
1131 * mean we disable MSI-X capabilities of the adapter. 1147 * mean we disable MSI-X capabilities of the adapter.
@@ -1291,19 +1307,47 @@ static void i40evf_watchdog_task(struct work_struct *work)
1291 watchdog_task); 1307 watchdog_task);
1292 struct i40e_hw *hw = &adapter->hw; 1308 struct i40e_hw *hw = &adapter->hw;
1293 1309
1294 if (adapter->state < __I40EVF_DOWN) 1310 if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section))
1311 goto restart_watchdog;
1312
1313 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
1314 dev_info(&adapter->pdev->dev, "Checking for redemption\n");
1315 if ((rd32(hw, I40E_VFGEN_RSTAT) & 0x3) == I40E_VFR_VFACTIVE) {
1316 /* A chance for redemption! */
1317 dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
1318 adapter->state = __I40EVF_STARTUP;
1319 adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
1320 schedule_delayed_work(&adapter->init_task, 10);
1321 clear_bit(__I40EVF_IN_CRITICAL_TASK,
1322 &adapter->crit_section);
1323 /* Don't reschedule the watchdog, since we've restarted
1324 * the init task. When init_task contacts the PF and
1325 * gets everything set up again, it'll restart the
1326 * watchdog for us. Down, boy. Sit. Stay. Woof.
1327 */
1328 return;
1329 }
1330 adapter->aq_pending = 0;
1331 adapter->aq_required = 0;
1332 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
1295 goto watchdog_done; 1333 goto watchdog_done;
1334 }
1296 1335
1297 if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section)) 1336 if ((adapter->state < __I40EVF_DOWN) ||
1337 (adapter->flags & I40EVF_FLAG_RESET_PENDING))
1298 goto watchdog_done; 1338 goto watchdog_done;
1299 1339
1300 /* check for unannounced reset */ 1340 /* check for reset */
1301 if ((adapter->state != __I40EVF_RESETTING) && 1341 if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) &&
1302 (rd32(hw, I40E_VFGEN_RSTAT) & 0x3) != I40E_VFR_VFACTIVE) { 1342 (rd32(hw, I40E_VFGEN_RSTAT) & 0x3) != I40E_VFR_VFACTIVE) {
1303 adapter->state = __I40EVF_RESETTING; 1343 adapter->state = __I40EVF_RESETTING;
1344 adapter->flags |= I40EVF_FLAG_RESET_PENDING;
1345 dev_err(&adapter->pdev->dev, "Hardware reset detected.\n");
1346 dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
1304 schedule_work(&adapter->reset_task); 1347 schedule_work(&adapter->reset_task);
1305 dev_info(&adapter->pdev->dev, "%s: hardware reset detected\n", 1348 adapter->aq_pending = 0;
1306 __func__); 1349 adapter->aq_required = 0;
1350 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
1307 goto watchdog_done; 1351 goto watchdog_done;
1308 } 1352 }
1309 1353
@@ -1358,16 +1402,25 @@ static void i40evf_watchdog_task(struct work_struct *work)
1358 1402
1359 i40evf_irq_enable(adapter, true); 1403 i40evf_irq_enable(adapter, true);
1360 i40evf_fire_sw_int(adapter, 0xFF); 1404 i40evf_fire_sw_int(adapter, 0xFF);
1405
1361watchdog_done: 1406watchdog_done:
1407 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1408restart_watchdog:
1362 if (adapter->aq_required) 1409 if (adapter->aq_required)
1363 mod_timer(&adapter->watchdog_timer, 1410 mod_timer(&adapter->watchdog_timer,
1364 jiffies + msecs_to_jiffies(20)); 1411 jiffies + msecs_to_jiffies(20));
1365 else 1412 else
1366 mod_timer(&adapter->watchdog_timer, jiffies + (HZ * 2)); 1413 mod_timer(&adapter->watchdog_timer, jiffies + (HZ * 2));
1367 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1368 schedule_work(&adapter->adminq_task); 1414 schedule_work(&adapter->adminq_task);
1369} 1415}
1370 1416
1417static int next_queue(struct i40evf_adapter *adapter, int j)
1418{
1419 j += 1;
1420
1421 return j >= adapter->vsi_res->num_queue_pairs ? 0 : j;
1422}
1423
1371/** 1424/**
1372 * i40evf_configure_rss - Prepare for RSS if used 1425 * i40evf_configure_rss - Prepare for RSS if used
1373 * @adapter: board private structure 1426 * @adapter: board private structure
@@ -1398,19 +1451,19 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter)
1398 wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); 1451 wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
1399 1452
1400 /* Populate the LUT with max no. of queues in round robin fashion */ 1453 /* Populate the LUT with max no. of queues in round robin fashion */
1401 for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX; i++, j++) { 1454 j = adapter->vsi_res->num_queue_pairs;
1402 if (j == adapter->vsi_res->num_queue_pairs) 1455 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
1403 j = 0; 1456 lut = next_queue(adapter, j);
1404 /* lut = 4-byte sliding window of 4 lut entries */ 1457 lut |= next_queue(adapter, j) << 8;
1405 lut = (lut << 8) | (j & 1458 lut |= next_queue(adapter, j) << 16;
1406 ((0x1 << 8) - 1)); 1459 lut |= next_queue(adapter, j) << 24;
1407 /* On i = 3, we have 4 entries in lut; write to the register */ 1460 wr32(hw, I40E_VFQF_HLUT(i), lut);
1408 if ((i & 3) == 3)
1409 wr32(hw, I40E_VFQF_HLUT(i >> 2), lut);
1410 } 1461 }
1411 i40e_flush(hw); 1462 i40e_flush(hw);
1412} 1463}
1413 1464
1465#define I40EVF_RESET_WAIT_MS 100
1466#define I40EVF_RESET_WAIT_COUNT 200
1414/** 1467/**
1415 * i40evf_reset_task - Call-back task to handle hardware reset 1468 * i40evf_reset_task - Call-back task to handle hardware reset
1416 * @work: pointer to work_struct 1469 * @work: pointer to work_struct
@@ -1421,8 +1474,9 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter)
1421 **/ 1474 **/
1422static void i40evf_reset_task(struct work_struct *work) 1475static void i40evf_reset_task(struct work_struct *work)
1423{ 1476{
1424 struct i40evf_adapter *adapter = 1477 struct i40evf_adapter *adapter = container_of(work,
1425 container_of(work, struct i40evf_adapter, reset_task); 1478 struct i40evf_adapter,
1479 reset_task);
1426 struct i40e_hw *hw = &adapter->hw; 1480 struct i40e_hw *hw = &adapter->hw;
1427 int i = 0, err; 1481 int i = 0, err;
1428 uint32_t rstat_val; 1482 uint32_t rstat_val;
@@ -1430,22 +1484,56 @@ static void i40evf_reset_task(struct work_struct *work)
1430 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, 1484 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
1431 &adapter->crit_section)) 1485 &adapter->crit_section))
1432 udelay(500); 1486 udelay(500);
1487 /* poll until we see the reset actually happen */
1488 for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
1489 rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
1490 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1491 if (rstat_val != I40E_VFR_VFACTIVE) {
1492 dev_info(&adapter->pdev->dev, "Reset now occurring\n");
1493 break;
1494 } else {
1495 msleep(I40EVF_RESET_WAIT_MS);
1496 }
1497 }
1498 if (i == I40EVF_RESET_WAIT_COUNT) {
1499 dev_err(&adapter->pdev->dev, "Reset was not detected\n");
1500 adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
1501 goto continue_reset; /* act like the reset happened */
1502 }
1433 1503
1434 /* wait until the reset is complete */ 1504 /* wait until the reset is complete and the PF is responding to us */
1435 for (i = 0; i < 20; i++) { 1505 for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
1436 rstat_val = rd32(hw, I40E_VFGEN_RSTAT) & 1506 rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
1437 I40E_VFGEN_RSTAT_VFR_STATE_MASK; 1507 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1438 if (rstat_val == I40E_VFR_COMPLETED) 1508 if (rstat_val == I40E_VFR_VFACTIVE) {
1509 dev_info(&adapter->pdev->dev, "Reset is complete. Reinitializing.\n");
1439 break; 1510 break;
1440 else 1511 } else {
1441 mdelay(100); 1512 msleep(I40EVF_RESET_WAIT_MS);
1513 }
1442 } 1514 }
1443 if (i == 20) { 1515 if (i == I40EVF_RESET_WAIT_COUNT) {
1444 /* reset never finished */ 1516 /* reset never finished */
1445 dev_info(&adapter->pdev->dev, "%s: reset never finished: %x\n", 1517 dev_err(&adapter->pdev->dev, "Reset never finished (%x). PF driver is dead, and so am I.\n",
1446 __func__, rstat_val); 1518 rstat_val);
1447 /* carry on anyway */ 1519 adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
1520
1521 if (netif_running(adapter->netdev))
1522 i40evf_close(adapter->netdev);
1523
1524 i40evf_free_misc_irq(adapter);
1525 i40evf_reset_interrupt_capability(adapter);
1526 i40evf_free_queues(adapter);
1527 kfree(adapter->vf_res);
1528 i40evf_shutdown_adminq(hw);
1529 adapter->netdev->flags &= ~IFF_UP;
1530 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
1531 return; /* Do not attempt to reinit. It's dead, Jim. */
1448 } 1532 }
1533
1534continue_reset:
1535 adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
1536
1449 i40evf_down(adapter); 1537 i40evf_down(adapter);
1450 adapter->state = __I40EVF_RESETTING; 1538 adapter->state = __I40EVF_RESETTING;
1451 1539
@@ -1505,6 +1593,9 @@ static void i40evf_adminq_task(struct work_struct *work)
1505 i40e_status ret; 1593 i40e_status ret;
1506 u16 pending; 1594 u16 pending;
1507 1595
1596 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
1597 return;
1598
1508 event.msg_size = I40EVF_MAX_AQ_BUF_SIZE; 1599 event.msg_size = I40EVF_MAX_AQ_BUF_SIZE;
1509 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL); 1600 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
1510 if (!event.msg_buf) { 1601 if (!event.msg_buf) {
@@ -1636,6 +1727,10 @@ static int i40evf_open(struct net_device *netdev)
1636 struct i40evf_adapter *adapter = netdev_priv(netdev); 1727 struct i40evf_adapter *adapter = netdev_priv(netdev);
1637 int err; 1728 int err;
1638 1729
1730 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
1731 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
1732 return -EIO;
1733 }
1639 if (adapter->state != __I40EVF_DOWN) 1734 if (adapter->state != __I40EVF_DOWN)
1640 return -EBUSY; 1735 return -EBUSY;
1641 1736
@@ -1690,8 +1785,12 @@ static int i40evf_close(struct net_device *netdev)
1690{ 1785{
1691 struct i40evf_adapter *adapter = netdev_priv(netdev); 1786 struct i40evf_adapter *adapter = netdev_priv(netdev);
1692 1787
1788 if (adapter->state <= __I40EVF_DOWN)
1789 return 0;
1790
1693 /* signal that we are down to the interrupt handler */ 1791 /* signal that we are down to the interrupt handler */
1694 adapter->state = __I40EVF_DOWN; 1792 adapter->state = __I40EVF_DOWN;
1793
1695 set_bit(__I40E_DOWN, &adapter->vsi.state); 1794 set_bit(__I40E_DOWN, &adapter->vsi.state);
1696 1795
1697 i40evf_down(adapter); 1796 i40evf_down(adapter);
@@ -1842,16 +1941,18 @@ static void i40evf_init_task(struct work_struct *work)
1842 switch (adapter->state) { 1941 switch (adapter->state) {
1843 case __I40EVF_STARTUP: 1942 case __I40EVF_STARTUP:
1844 /* driver loaded, probe complete */ 1943 /* driver loaded, probe complete */
1944 adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
1945 adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
1845 err = i40e_set_mac_type(hw); 1946 err = i40e_set_mac_type(hw);
1846 if (err) { 1947 if (err) {
1847 dev_info(&pdev->dev, "%s: set_mac_type failed: %d\n", 1948 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n",
1848 __func__, err); 1949 err);
1849 goto err; 1950 goto err;
1850 } 1951 }
1851 err = i40evf_check_reset_complete(hw); 1952 err = i40evf_check_reset_complete(hw);
1852 if (err) { 1953 if (err) {
1853 dev_info(&pdev->dev, "%s: device is still in reset (%d).\n", 1954 dev_err(&pdev->dev, "Device is still in reset (%d)\n",
1854 __func__, err); 1955 err);
1855 goto err; 1956 goto err;
1856 } 1957 }
1857 hw->aq.num_arq_entries = I40EVF_AQ_LEN; 1958 hw->aq.num_arq_entries = I40EVF_AQ_LEN;
@@ -1861,14 +1962,13 @@ static void i40evf_init_task(struct work_struct *work)
1861 1962
1862 err = i40evf_init_adminq(hw); 1963 err = i40evf_init_adminq(hw);
1863 if (err) { 1964 if (err) {
1864 dev_info(&pdev->dev, "%s: init_adminq failed: %d\n", 1965 dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
1865 __func__, err); 1966 err);
1866 goto err; 1967 goto err;
1867 } 1968 }
1868 err = i40evf_send_api_ver(adapter); 1969 err = i40evf_send_api_ver(adapter);
1869 if (err) { 1970 if (err) {
1870 dev_info(&pdev->dev, "%s: unable to send to PF (%d)\n", 1971 dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err);
1871 __func__, err);
1872 i40evf_shutdown_adminq(hw); 1972 i40evf_shutdown_adminq(hw);
1873 goto err; 1973 goto err;
1874 } 1974 }
@@ -1876,19 +1976,21 @@ static void i40evf_init_task(struct work_struct *work)
1876 goto restart; 1976 goto restart;
1877 break; 1977 break;
1878 case __I40EVF_INIT_VERSION_CHECK: 1978 case __I40EVF_INIT_VERSION_CHECK:
1879 if (!i40evf_asq_done(hw)) 1979 if (!i40evf_asq_done(hw)) {
1980 dev_err(&pdev->dev, "Admin queue command never completed.\n");
1880 goto err; 1981 goto err;
1982 }
1881 1983
1882 /* aq msg sent, awaiting reply */ 1984 /* aq msg sent, awaiting reply */
1883 err = i40evf_verify_api_ver(adapter); 1985 err = i40evf_verify_api_ver(adapter);
1884 if (err) { 1986 if (err) {
1885 dev_err(&pdev->dev, "Unable to verify API version, error %d\n", 1987 dev_err(&pdev->dev, "Unable to verify API version (%d)\n",
1886 err); 1988 err);
1887 goto err; 1989 goto err;
1888 } 1990 }
1889 err = i40evf_send_vf_config_msg(adapter); 1991 err = i40evf_send_vf_config_msg(adapter);
1890 if (err) { 1992 if (err) {
1891 dev_err(&pdev->dev, "Unable send config request, error %d\n", 1993 dev_err(&pdev->dev, "Unable send config request (%d)\n",
1892 err); 1994 err);
1893 goto err; 1995 goto err;
1894 } 1996 }
@@ -1902,18 +2004,15 @@ static void i40evf_init_task(struct work_struct *work)
1902 (I40E_MAX_VF_VSI * 2004 (I40E_MAX_VF_VSI *
1903 sizeof(struct i40e_virtchnl_vsi_resource)); 2005 sizeof(struct i40e_virtchnl_vsi_resource));
1904 adapter->vf_res = kzalloc(bufsz, GFP_KERNEL); 2006 adapter->vf_res = kzalloc(bufsz, GFP_KERNEL);
1905 if (!adapter->vf_res) { 2007 if (!adapter->vf_res)
1906 dev_err(&pdev->dev, "%s: unable to allocate memory\n",
1907 __func__);
1908 goto err; 2008 goto err;
1909 }
1910 } 2009 }
1911 err = i40evf_get_vf_config(adapter); 2010 err = i40evf_get_vf_config(adapter);
1912 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) 2011 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
1913 goto restart; 2012 goto restart;
1914 if (err) { 2013 if (err) {
1915 dev_info(&pdev->dev, "%s: unable to get VF config (%d)\n", 2014 dev_err(&pdev->dev, "Unable to get VF config (%d)\n",
1916 __func__, err); 2015 err);
1917 goto err_alloc; 2016 goto err_alloc;
1918 } 2017 }
1919 adapter->state = __I40EVF_INIT_SW; 2018 adapter->state = __I40EVF_INIT_SW;
@@ -1927,25 +2026,23 @@ static void i40evf_init_task(struct work_struct *work)
1927 adapter->vsi_res = &adapter->vf_res->vsi_res[i]; 2026 adapter->vsi_res = &adapter->vf_res->vsi_res[i];
1928 } 2027 }
1929 if (!adapter->vsi_res) { 2028 if (!adapter->vsi_res) {
1930 dev_info(&pdev->dev, "%s: no LAN VSI found\n", __func__); 2029 dev_err(&pdev->dev, "No LAN VSI found\n");
1931 goto err_alloc; 2030 goto err_alloc;
1932 } 2031 }
1933 2032
1934 adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED; 2033 adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
1935 2034
1936 adapter->txd_count = I40EVF_DEFAULT_TXD;
1937 adapter->rxd_count = I40EVF_DEFAULT_RXD;
1938
1939 netdev->netdev_ops = &i40evf_netdev_ops; 2035 netdev->netdev_ops = &i40evf_netdev_ops;
1940 i40evf_set_ethtool_ops(netdev); 2036 i40evf_set_ethtool_ops(netdev);
1941 netdev->watchdog_timeo = 5 * HZ; 2037 netdev->watchdog_timeo = 5 * HZ;
1942 2038 netdev->features |= NETIF_F_HIGHDMA |
1943 netdev->features |= NETIF_F_SG | 2039 NETIF_F_SG |
1944 NETIF_F_IP_CSUM | 2040 NETIF_F_IP_CSUM |
1945 NETIF_F_SCTP_CSUM | 2041 NETIF_F_SCTP_CSUM |
1946 NETIF_F_IPV6_CSUM | 2042 NETIF_F_IPV6_CSUM |
1947 NETIF_F_TSO | 2043 NETIF_F_TSO |
1948 NETIF_F_TSO6 | 2044 NETIF_F_TSO6 |
2045 NETIF_F_RXCSUM |
1949 NETIF_F_GRO; 2046 NETIF_F_GRO;
1950 2047
1951 if (adapter->vf_res->vf_offload_flags 2048 if (adapter->vf_res->vf_offload_flags
@@ -1956,11 +2053,13 @@ static void i40evf_init_task(struct work_struct *work)
1956 NETIF_F_HW_VLAN_CTAG_FILTER; 2053 NETIF_F_HW_VLAN_CTAG_FILTER;
1957 } 2054 }
1958 2055
1959 /* The HW MAC address was set and/or determined in sw_init */ 2056 /* copy netdev features into list of user selectable features */
2057 netdev->hw_features |= netdev->features;
2058 netdev->hw_features &= ~NETIF_F_RXCSUM;
2059
1960 if (!is_valid_ether_addr(adapter->hw.mac.addr)) { 2060 if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
1961 dev_info(&pdev->dev, 2061 dev_info(&pdev->dev, "Invalid MAC address %pMAC, using random\n",
1962 "Invalid MAC address %pMAC, using random\n", 2062 adapter->hw.mac.addr);
1963 adapter->hw.mac.addr);
1964 random_ether_addr(adapter->hw.mac.addr); 2063 random_ether_addr(adapter->hw.mac.addr);
1965 } 2064 }
1966 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); 2065 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
@@ -1994,8 +2093,6 @@ static void i40evf_init_task(struct work_struct *work)
1994 2093
1995 netif_carrier_off(netdev); 2094 netif_carrier_off(netdev);
1996 2095
1997 strcpy(netdev->name, "eth%d");
1998
1999 adapter->vsi.id = adapter->vsi_res->vsi_id; 2096 adapter->vsi.id = adapter->vsi_res->vsi_id;
2000 adapter->vsi.seid = adapter->vsi_res->vsi_id; /* dummy */ 2097 adapter->vsi.seid = adapter->vsi_res->vsi_id; /* dummy */
2001 adapter->vsi.back = adapter; 2098 adapter->vsi.back = adapter;
@@ -2005,9 +2102,11 @@ static void i40evf_init_task(struct work_struct *work)
2005 adapter->vsi.tx_itr_setting = I40E_ITR_DYNAMIC; 2102 adapter->vsi.tx_itr_setting = I40E_ITR_DYNAMIC;
2006 adapter->vsi.netdev = adapter->netdev; 2103 adapter->vsi.netdev = adapter->netdev;
2007 2104
2008 err = register_netdev(netdev); 2105 if (!adapter->netdev_registered) {
2009 if (err) 2106 err = register_netdev(netdev);
2010 goto err_register; 2107 if (err)
2108 goto err_register;
2109 }
2011 2110
2012 adapter->netdev_registered = true; 2111 adapter->netdev_registered = true;
2013 2112
@@ -2031,7 +2130,6 @@ err_register:
2031 i40evf_free_misc_irq(adapter); 2130 i40evf_free_misc_irq(adapter);
2032err_sw_init: 2131err_sw_init:
2033 i40evf_reset_interrupt_capability(adapter); 2132 i40evf_reset_interrupt_capability(adapter);
2034 adapter->state = __I40EVF_FAILED;
2035err_alloc: 2133err_alloc:
2036 kfree(adapter->vf_res); 2134 kfree(adapter->vf_res);
2037 adapter->vf_res = NULL; 2135 adapter->vf_res = NULL;
@@ -2039,9 +2137,7 @@ err:
2039 /* Things went into the weeds, so try again later */ 2137 /* Things went into the weeds, so try again later */
2040 if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) { 2138 if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) {
2041 dev_err(&pdev->dev, "Failed to communicate with PF; giving up.\n"); 2139 dev_err(&pdev->dev, "Failed to communicate with PF; giving up.\n");
2042 if (hw->aq.asq.count) 2140 adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
2043 i40evf_shutdown_adminq(hw); /* ignore error */
2044 adapter->state = __I40EVF_FAILED;
2045 return; /* do not reschedule */ 2141 return; /* do not reschedule */
2046 } 2142 }
2047 schedule_delayed_work(&adapter->init_task, HZ * 3); 2143 schedule_delayed_work(&adapter->init_task, HZ * 3);
@@ -2084,25 +2180,18 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2084 struct net_device *netdev; 2180 struct net_device *netdev;
2085 struct i40evf_adapter *adapter = NULL; 2181 struct i40evf_adapter *adapter = NULL;
2086 struct i40e_hw *hw = NULL; 2182 struct i40e_hw *hw = NULL;
2087 int err, pci_using_dac; 2183 int err;
2088 2184
2089 err = pci_enable_device(pdev); 2185 err = pci_enable_device(pdev);
2090 if (err) 2186 if (err)
2091 return err; 2187 return err;
2092 2188
2093 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { 2189 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2094 pci_using_dac = true; 2190 if (err)
2095 /* coherent mask for the same size will always succeed if 2191 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2096 * dma_set_mask does 2192 if (err) {
2097 */ 2193 dev_err(&pdev->dev,
2098 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 2194 "DMA configuration failed: 0x%x\n", err);
2099 } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
2100 pci_using_dac = false;
2101 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
2102 } else {
2103 dev_err(&pdev->dev, "%s: DMA configuration failed: %d\n",
2104 __func__, err);
2105 err = -EIO;
2106 goto err_dma; 2195 goto err_dma;
2107 } 2196 }
2108 2197
@@ -2128,8 +2217,6 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2128 2217
2129 pci_set_drvdata(pdev, netdev); 2218 pci_set_drvdata(pdev, netdev);
2130 adapter = netdev_priv(netdev); 2219 adapter = netdev_priv(netdev);
2131 if (pci_using_dac)
2132 netdev->features |= NETIF_F_HIGHDMA;
2133 2220
2134 adapter->netdev = netdev; 2221 adapter->netdev = netdev;
2135 adapter->pdev = pdev; 2222 adapter->pdev = pdev;
@@ -2271,6 +2358,7 @@ static void i40evf_remove(struct pci_dev *pdev)
2271 struct i40e_hw *hw = &adapter->hw; 2358 struct i40e_hw *hw = &adapter->hw;
2272 2359
2273 cancel_delayed_work_sync(&adapter->init_task); 2360 cancel_delayed_work_sync(&adapter->init_task);
2361 cancel_work_sync(&adapter->reset_task);
2274 2362
2275 if (adapter->netdev_registered) { 2363 if (adapter->netdev_registered) {
2276 unregister_netdev(netdev); 2364 unregister_netdev(netdev);
@@ -2278,17 +2366,15 @@ static void i40evf_remove(struct pci_dev *pdev)
2278 } 2366 }
2279 adapter->state = __I40EVF_REMOVE; 2367 adapter->state = __I40EVF_REMOVE;
2280 2368
2281 if (adapter->num_msix_vectors) { 2369 if (adapter->msix_entries) {
2282 i40evf_misc_irq_disable(adapter); 2370 i40evf_misc_irq_disable(adapter);
2283 del_timer_sync(&adapter->watchdog_timer);
2284
2285 flush_scheduled_work();
2286
2287 i40evf_free_misc_irq(adapter); 2371 i40evf_free_misc_irq(adapter);
2288
2289 i40evf_reset_interrupt_capability(adapter); 2372 i40evf_reset_interrupt_capability(adapter);
2290 } 2373 }
2291 2374
2375 del_timer_sync(&adapter->watchdog_timer);
2376 flush_scheduled_work();
2377
2292 if (hw->aq.asq.count) 2378 if (hw->aq.asq.count)
2293 i40evf_shutdown_adminq(hw); 2379 i40evf_shutdown_adminq(hw);
2294 2380
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index e6978d79e62b..e294f012647d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 * 2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver 3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation. 4 * Copyright(c) 2013 - 2014 Intel Corporation.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License, 7 * under the terms and conditions of the GNU General Public License,
@@ -43,6 +43,9 @@ static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
43 struct i40e_hw *hw = &adapter->hw; 43 struct i40e_hw *hw = &adapter->hw;
44 i40e_status err; 44 i40e_status err;
45 45
46 if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
47 return 0; /* nothing to see here, move along */
48
46 err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL); 49 err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
47 if (err) 50 if (err)
48 dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, error %d, aq status %d\n", 51 dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, error %d, aq status %d\n",
@@ -651,6 +654,18 @@ void i40evf_request_stats(struct i40evf_adapter *adapter)
651 /* if the request failed, don't lock out others */ 654 /* if the request failed, don't lock out others */
652 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; 655 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
653} 656}
657/**
658 * i40evf_request_reset
659 * @adapter: adapter structure
660 *
661 * Request that the PF reset this VF. No response is expected.
662 **/
663void i40evf_request_reset(struct i40evf_adapter *adapter)
664{
665 /* Don't check CURRENT_OP - this is always higher priority */
666 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0);
667 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
668}
654 669
655/** 670/**
656 * i40evf_virtchnl_completion 671 * i40evf_virtchnl_completion
@@ -689,10 +704,12 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
689 } 704 }
690 break; 705 break;
691 case I40E_VIRTCHNL_EVENT_RESET_IMPENDING: 706 case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
692 adapter->state = __I40EVF_RESETTING; 707 dev_info(&adapter->pdev->dev, "PF reset warning received\n");
693 schedule_work(&adapter->reset_task); 708 if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
694 dev_info(&adapter->pdev->dev, 709 adapter->flags |= I40EVF_FLAG_RESET_PENDING;
695 "%s: hardware reset pending\n", __func__); 710 dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
711 schedule_work(&adapter->reset_task);
712 }
696 break; 713 break;
697 default: 714 default:
698 dev_err(&adapter->pdev->dev, 715 dev_err(&adapter->pdev->dev,
diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
index f19700e285bb..5bcb2de75933 100644
--- a/drivers/net/ethernet/intel/igb/Makefile
+++ b/drivers/net/ethernet/intel/igb/Makefile
@@ -1,7 +1,7 @@
1################################################################################ 1################################################################################
2# 2#
3# Intel 82575 PCI-Express Ethernet Linux driver 3# Intel 82575 PCI-Express Ethernet Linux driver
4# Copyright(c) 1999 - 2013 Intel Corporation. 4# Copyright(c) 1999 - 2014 Intel Corporation.
5# 5#
6# This program is free software; you can redistribute it and/or modify it 6# This program is free software; you can redistribute it and/or modify it
7# under the terms and conditions of the GNU General Public License, 7# under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13# more details. 13# more details.
14# 14#
15# You should have received a copy of the GNU General Public License along with 15# You should have received a copy of the GNU General Public License along with
16# this program; if not, write to the Free Software Foundation, Inc., 16# this program; if not, see <http://www.gnu.org/licenses/>.
17# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18# 17#
19# The full GNU General Public License is included in this distribution in 18# The full GNU General Public License is included in this distribution in
20# the file called "COPYING". 19# the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 06df6928f44c..fa36fe12e775 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation. 4 Copyright(c) 2007-2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
@@ -77,8 +76,6 @@ static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
77static const u16 e1000_82580_rxpbs_table[] = 76static const u16 e1000_82580_rxpbs_table[] =
78 { 36, 72, 144, 1, 2, 4, 8, 16, 77 { 36, 72, 144, 1, 2, 4, 8, 16,
79 35, 70, 140 }; 78 35, 70, 140 };
80#define E1000_82580_RXPBS_TABLE_SIZE \
81 (sizeof(e1000_82580_rxpbs_table)/sizeof(u16))
82 79
83/** 80/**
84 * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO 81 * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
@@ -2308,7 +2305,7 @@ u16 igb_rxpbs_adjust_82580(u32 data)
2308{ 2305{
2309 u16 ret_val = 0; 2306 u16 ret_val = 0;
2310 2307
2311 if (data < E1000_82580_RXPBS_TABLE_SIZE) 2308 if (data < ARRAY_SIZE(e1000_82580_rxpbs_table))
2312 ret_val = e1000_82580_rxpbs_table[data]; 2309 ret_val = e1000_82580_rxpbs_table[data];
2313 2310
2314 return ret_val; 2311 return ret_val;
@@ -2714,13 +2711,14 @@ static const u8 e1000_emc_therm_limit[4] = {
2714 E1000_EMC_DIODE3_THERM_LIMIT 2711 E1000_EMC_DIODE3_THERM_LIMIT
2715}; 2712};
2716 2713
2714#ifdef CONFIG_IGB_HWMON
2717/** 2715/**
2718 * igb_get_thermal_sensor_data_generic - Gathers thermal sensor data 2716 * igb_get_thermal_sensor_data_generic - Gathers thermal sensor data
2719 * @hw: pointer to hardware structure 2717 * @hw: pointer to hardware structure
2720 * 2718 *
2721 * Updates the temperatures in mac.thermal_sensor_data 2719 * Updates the temperatures in mac.thermal_sensor_data
2722 **/ 2720 **/
2723s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw) 2721static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
2724{ 2722{
2725 s32 status = E1000_SUCCESS; 2723 s32 status = E1000_SUCCESS;
2726 u16 ets_offset; 2724 u16 ets_offset;
@@ -2774,7 +2772,7 @@ s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
2774 * Sets the thermal sensor thresholds according to the NVM map 2772 * Sets the thermal sensor thresholds according to the NVM map
2775 * and save off the threshold and location values into mac.thermal_sensor_data 2773 * and save off the threshold and location values into mac.thermal_sensor_data
2776 **/ 2774 **/
2777s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw) 2775static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
2778{ 2776{
2779 s32 status = E1000_SUCCESS; 2777 s32 status = E1000_SUCCESS;
2780 u16 ets_offset; 2778 u16 ets_offset;
@@ -2836,6 +2834,7 @@ s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
2836 return status; 2834 return status;
2837} 2835}
2838 2836
2837#endif
2839static struct e1000_mac_operations e1000_mac_ops_82575 = { 2838static struct e1000_mac_operations e1000_mac_ops_82575 = {
2840 .init_hw = igb_init_hw_82575, 2839 .init_hw = igb_init_hw_82575,
2841 .check_for_link = igb_check_for_link_82575, 2840 .check_for_link = igb_check_for_link_82575,
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index 8c2437722aad..09d78be72416 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation. 4 Copyright(c) 2007-2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
@@ -231,6 +230,10 @@ struct e1000_adv_tx_context_desc {
231#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ 230#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
232#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */ 231#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */
233 232
233#define E1000_DVMOLR_HIDEVLAN 0x20000000 /* Hide vlan enable */
234#define E1000_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */
235#define E1000_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */
236
234#define E1000_VLVF_ARRAY_SIZE 32 237#define E1000_VLVF_ARRAY_SIZE 32
235#define E1000_VLVF_VLANID_MASK 0x00000FFF 238#define E1000_VLVF_VLANID_MASK 0x00000FFF
236#define E1000_VLVF_POOLSEL_SHIFT 12 239#define E1000_VLVF_POOLSEL_SHIFT 12
@@ -266,8 +269,7 @@ u16 igb_rxpbs_adjust_82580(u32 data);
266s32 igb_read_emi_reg(struct e1000_hw *, u16 addr, u16 *data); 269s32 igb_read_emi_reg(struct e1000_hw *, u16 addr, u16 *data);
267s32 igb_set_eee_i350(struct e1000_hw *); 270s32 igb_set_eee_i350(struct e1000_hw *);
268s32 igb_set_eee_i354(struct e1000_hw *); 271s32 igb_set_eee_i354(struct e1000_hw *);
269s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *); 272s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status);
270s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw);
271 273
272#define E1000_I2C_THERMAL_SENSOR_ADDR 0xF8 274#define E1000_I2C_THERMAL_SENSOR_ADDR 0xF8
273#define E1000_EMC_INTERNAL_DATA 0x00 275#define E1000_EMC_INTERNAL_DATA 0x00
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 0571b973be80..b05bf925ac72 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation. 4 Copyright(c) 2007-2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
@@ -44,7 +43,11 @@
44#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ 43#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
45 44
46/* Extended Device Control */ 45/* Extended Device Control */
46#define E1000_CTRL_EXT_SDP2_DATA 0x00000040 /* Value of SW Defineable Pin 2 */
47#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */ 47#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */
48#define E1000_CTRL_EXT_SDP2_DIR 0x00000400 /* SDP2 Data direction */
49#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */
50
48/* Physical Func Reset Done Indication */ 51/* Physical Func Reset Done Indication */
49#define E1000_CTRL_EXT_PFRSTD 0x00004000 52#define E1000_CTRL_EXT_PFRSTD 0x00004000
50#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 53#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
@@ -191,7 +194,8 @@
191/* enable link status from external LINK_0 and LINK_1 pins */ 194/* enable link status from external LINK_0 and LINK_1 pins */
192#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ 195#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
193#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ 196#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
194#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ 197#define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */
198#define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */
195#define E1000_CTRL_RST 0x04000000 /* Global reset */ 199#define E1000_CTRL_RST 0x04000000 /* Global reset */
196#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ 200#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
197#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ 201#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
@@ -529,8 +533,67 @@
529 533
530#define E1000_TIMINCA_16NS_SHIFT 24 534#define E1000_TIMINCA_16NS_SHIFT 24
531 535
532#define E1000_TSICR_TXTS 0x00000002 536/* Time Sync Interrupt Cause/Mask Register Bits */
533#define E1000_TSIM_TXTS 0x00000002 537
538#define TSINTR_SYS_WRAP (1 << 0) /* SYSTIM Wrap around. */
539#define TSINTR_TXTS (1 << 1) /* Transmit Timestamp. */
540#define TSINTR_RXTS (1 << 2) /* Receive Timestamp. */
541#define TSINTR_TT0 (1 << 3) /* Target Time 0 Trigger. */
542#define TSINTR_TT1 (1 << 4) /* Target Time 1 Trigger. */
543#define TSINTR_AUTT0 (1 << 5) /* Auxiliary Timestamp 0 Taken. */
544#define TSINTR_AUTT1 (1 << 6) /* Auxiliary Timestamp 1 Taken. */
545#define TSINTR_TADJ (1 << 7) /* Time Adjust Done. */
546
547#define TSYNC_INTERRUPTS TSINTR_TXTS
548#define E1000_TSICR_TXTS TSINTR_TXTS
549
550/* TSAUXC Configuration Bits */
551#define TSAUXC_EN_TT0 (1 << 0) /* Enable target time 0. */
552#define TSAUXC_EN_TT1 (1 << 1) /* Enable target time 1. */
553#define TSAUXC_EN_CLK0 (1 << 2) /* Enable Configurable Frequency Clock 0. */
554#define TSAUXC_SAMP_AUT0 (1 << 3) /* Latch SYSTIML/H into AUXSTMPL/0. */
555#define TSAUXC_ST0 (1 << 4) /* Start Clock 0 Toggle on Target Time 0. */
556#define TSAUXC_EN_CLK1 (1 << 5) /* Enable Configurable Frequency Clock 1. */
557#define TSAUXC_SAMP_AUT1 (1 << 6) /* Latch SYSTIML/H into AUXSTMPL/1. */
558#define TSAUXC_ST1 (1 << 7) /* Start Clock 1 Toggle on Target Time 1. */
559#define TSAUXC_EN_TS0 (1 << 8) /* Enable hardware timestamp 0. */
560#define TSAUXC_AUTT0 (1 << 9) /* Auxiliary Timestamp Taken. */
561#define TSAUXC_EN_TS1 (1 << 10) /* Enable hardware timestamp 0. */
562#define TSAUXC_AUTT1 (1 << 11) /* Auxiliary Timestamp Taken. */
563#define TSAUXC_PLSG (1 << 17) /* Generate a pulse. */
564#define TSAUXC_DISABLE (1 << 31) /* Disable SYSTIM Count Operation. */
565
566/* SDP Configuration Bits */
567#define AUX0_SEL_SDP0 (0 << 0) /* Assign SDP0 to auxiliary time stamp 0. */
568#define AUX0_SEL_SDP1 (1 << 0) /* Assign SDP1 to auxiliary time stamp 0. */
569#define AUX0_SEL_SDP2 (2 << 0) /* Assign SDP2 to auxiliary time stamp 0. */
570#define AUX0_SEL_SDP3 (3 << 0) /* Assign SDP3 to auxiliary time stamp 0. */
571#define AUX0_TS_SDP_EN (1 << 2) /* Enable auxiliary time stamp trigger 0. */
572#define AUX1_SEL_SDP0 (0 << 3) /* Assign SDP0 to auxiliary time stamp 1. */
573#define AUX1_SEL_SDP1 (1 << 3) /* Assign SDP1 to auxiliary time stamp 1. */
574#define AUX1_SEL_SDP2 (2 << 3) /* Assign SDP2 to auxiliary time stamp 1. */
575#define AUX1_SEL_SDP3 (3 << 3) /* Assign SDP3 to auxiliary time stamp 1. */
576#define AUX1_TS_SDP_EN (1 << 5) /* Enable auxiliary time stamp trigger 1. */
577#define TS_SDP0_SEL_TT0 (0 << 6) /* Target time 0 is output on SDP0. */
578#define TS_SDP0_SEL_TT1 (1 << 6) /* Target time 1 is output on SDP0. */
579#define TS_SDP0_SEL_FC0 (2 << 6) /* Freq clock 0 is output on SDP0. */
580#define TS_SDP0_SEL_FC1 (3 << 6) /* Freq clock 1 is output on SDP0. */
581#define TS_SDP0_EN (1 << 8) /* SDP0 is assigned to Tsync. */
582#define TS_SDP1_SEL_TT0 (0 << 9) /* Target time 0 is output on SDP1. */
583#define TS_SDP1_SEL_TT1 (1 << 9) /* Target time 1 is output on SDP1. */
584#define TS_SDP1_SEL_FC0 (2 << 9) /* Freq clock 0 is output on SDP1. */
585#define TS_SDP1_SEL_FC1 (3 << 9) /* Freq clock 1 is output on SDP1. */
586#define TS_SDP1_EN (1 << 11) /* SDP1 is assigned to Tsync. */
587#define TS_SDP2_SEL_TT0 (0 << 12) /* Target time 0 is output on SDP2. */
588#define TS_SDP2_SEL_TT1 (1 << 12) /* Target time 1 is output on SDP2. */
589#define TS_SDP2_SEL_FC0 (2 << 12) /* Freq clock 0 is output on SDP2. */
590#define TS_SDP2_SEL_FC1 (3 << 12) /* Freq clock 1 is output on SDP2. */
591#define TS_SDP2_EN (1 << 14) /* SDP2 is assigned to Tsync. */
592#define TS_SDP3_SEL_TT0 (0 << 15) /* Target time 0 is output on SDP3. */
593#define TS_SDP3_SEL_TT1 (1 << 15) /* Target time 1 is output on SDP3. */
594#define TS_SDP3_SEL_FC0 (2 << 15) /* Freq clock 0 is output on SDP3. */
595#define TS_SDP3_SEL_FC1 (3 << 15) /* Freq clock 1 is output on SDP3. */
596#define TS_SDP3_EN (1 << 17) /* SDP3 is assigned to Tsync. */
534 597
535#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */ 598#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */
536#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */ 599#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index ab99e2b582a8..10741d170f2d 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation. 4 Copyright(c) 2007-2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 0c0393316a3a..db963397cc27 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation. 4 Copyright(c) 2007-2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
@@ -35,6 +34,8 @@
35#include "e1000_hw.h" 34#include "e1000_hw.h"
36#include "e1000_i210.h" 35#include "e1000_i210.h"
37 36
37static s32 igb_update_flash_i210(struct e1000_hw *hw);
38
38/** 39/**
39 * igb_get_hw_semaphore_i210 - Acquire hardware semaphore 40 * igb_get_hw_semaphore_i210 - Acquire hardware semaphore
40 * @hw: pointer to the HW structure 41 * @hw: pointer to the HW structure
@@ -111,7 +112,7 @@ static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
111 * Return successful if access grant bit set, else clear the request for 112 * Return successful if access grant bit set, else clear the request for
112 * EEPROM access and return -E1000_ERR_NVM (-1). 113 * EEPROM access and return -E1000_ERR_NVM (-1).
113 **/ 114 **/
114s32 igb_acquire_nvm_i210(struct e1000_hw *hw) 115static s32 igb_acquire_nvm_i210(struct e1000_hw *hw)
115{ 116{
116 return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); 117 return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
117} 118}
@@ -123,7 +124,7 @@ s32 igb_acquire_nvm_i210(struct e1000_hw *hw)
123 * Stop any current commands to the EEPROM and clear the EEPROM request bit, 124 * Stop any current commands to the EEPROM and clear the EEPROM request bit,
124 * then release the semaphores acquired. 125 * then release the semaphores acquired.
125 **/ 126 **/
126void igb_release_nvm_i210(struct e1000_hw *hw) 127static void igb_release_nvm_i210(struct e1000_hw *hw)
127{ 128{
128 igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); 129 igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
129} 130}
@@ -206,8 +207,8 @@ void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
206 * Reads a 16 bit word from the Shadow Ram using the EERD register. 207 * Reads a 16 bit word from the Shadow Ram using the EERD register.
207 * Uses necessary synchronization semaphores. 208 * Uses necessary synchronization semaphores.
208 **/ 209 **/
209s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, 210static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
210 u16 *data) 211 u16 *data)
211{ 212{
212 s32 status = E1000_SUCCESS; 213 s32 status = E1000_SUCCESS;
213 u16 i, count; 214 u16 i, count;
@@ -306,8 +307,8 @@ out:
306 * If error code is returned, data and Shadow RAM may be inconsistent - buffer 307 * If error code is returned, data and Shadow RAM may be inconsistent - buffer
307 * partially written. 308 * partially written.
308 **/ 309 **/
309s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, 310static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
310 u16 *data) 311 u16 *data)
311{ 312{
312 s32 status = E1000_SUCCESS; 313 s32 status = E1000_SUCCESS;
313 u16 i, count; 314 u16 i, count;
@@ -555,7 +556,7 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
555 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM 556 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
556 * and then verifies that the sum of the EEPROM is equal to 0xBABA. 557 * and then verifies that the sum of the EEPROM is equal to 0xBABA.
557 **/ 558 **/
558s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw) 559static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
559{ 560{
560 s32 status = E1000_SUCCESS; 561 s32 status = E1000_SUCCESS;
561 s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *); 562 s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
@@ -590,7 +591,7 @@ s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
590 * up to the checksum. Then calculates the EEPROM checksum and writes the 591 * up to the checksum. Then calculates the EEPROM checksum and writes the
591 * value to the EEPROM. Next commit EEPROM data onto the Flash. 592 * value to the EEPROM. Next commit EEPROM data onto the Flash.
592 **/ 593 **/
593s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw) 594static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
594{ 595{
595 s32 ret_val = E1000_SUCCESS; 596 s32 ret_val = E1000_SUCCESS;
596 u16 checksum = 0; 597 u16 checksum = 0;
@@ -684,7 +685,7 @@ bool igb_get_flash_presence_i210(struct e1000_hw *hw)
684 * @hw: pointer to the HW structure 685 * @hw: pointer to the HW structure
685 * 686 *
686 **/ 687 **/
687s32 igb_update_flash_i210(struct e1000_hw *hw) 688static s32 igb_update_flash_i210(struct e1000_hw *hw)
688{ 689{
689 s32 ret_val = E1000_SUCCESS; 690 s32 ret_val = E1000_SUCCESS;
690 u32 flup; 691 u32 flup;
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 2d913716573a..907fe99a9813 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation. 4 Copyright(c) 2007-2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
@@ -28,17 +27,8 @@
28#ifndef _E1000_I210_H_ 27#ifndef _E1000_I210_H_
29#define _E1000_I210_H_ 28#define _E1000_I210_H_
30 29
31s32 igb_update_flash_i210(struct e1000_hw *hw);
32s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw);
33s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw);
34s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
35 u16 *data);
36s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
37 u16 *data);
38s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask); 30s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
39void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask); 31void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
40s32 igb_acquire_nvm_i210(struct e1000_hw *hw);
41void igb_release_nvm_i210(struct e1000_hw *hw);
42s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data); 32s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
43s32 igb_read_invm_version(struct e1000_hw *hw, 33s32 igb_read_invm_version(struct e1000_hw *hw,
44 struct e1000_fw_version *invm_ver); 34 struct e1000_fw_version *invm_ver);
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 298f0ed50670..5910a932ea7c 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation. 4 Copyright(c) 2007-2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index e4cbe8ef67b3..99299ba8ee3a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation. 4 Copyright(c) 2007-2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
index dac1447fabf7..d5b121771c31 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation. 4 Copyright(c) 2007-2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h
index de9bba41acf3..f52f5515e5a8 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation. 4 Copyright(c) 2007-2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index a7db7f3db914..9abf82919c65 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation. 4 Copyright(c) 2007-2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
index 433b7419cb98..5b101170b17e 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2013 Intel Corporation. 4 Copyright(c) 2007-2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index ad2b74d95138..4009bbab7407 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation. 4 Copyright(c) 2007-2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
@@ -394,77 +393,6 @@ s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data)
394} 393}
395 394
396/** 395/**
397 * e1000_write_sfp_data_byte - Writes SFP module data.
398 * @hw: pointer to the HW structure
399 * @offset: byte location offset to write to
400 * @data: data to write
401 *
402 * Writes one byte to SFP module data stored
403 * in SFP resided EEPROM memory or SFP diagnostic area.
404 * Function should be called with
405 * E1000_I2CCMD_SFP_DATA_ADDR(<byte offset>) for SFP module database access
406 * E1000_I2CCMD_SFP_DIAG_ADDR(<byte offset>) for SFP diagnostics parameters
407 * access
408 **/
409s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data)
410{
411 u32 i = 0;
412 u32 i2ccmd = 0;
413 u32 data_local = 0;
414
415 if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) {
416 hw_dbg("I2CCMD command address exceeds upper limit\n");
417 return -E1000_ERR_PHY;
418 }
419 /* The programming interface is 16 bits wide
420 * so we need to read the whole word first
421 * then update appropriate byte lane and write
422 * the updated word back.
423 */
424 /* Set up Op-code, EEPROM Address,in the I2CCMD
425 * register. The MAC will take care of interfacing
426 * with an EEPROM to write the data given.
427 */
428 i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
429 E1000_I2CCMD_OPCODE_READ);
430 /* Set a command to read single word */
431 wr32(E1000_I2CCMD, i2ccmd);
432 for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
433 udelay(50);
434 /* Poll the ready bit to see if lastly
435 * launched I2C operation completed
436 */
437 i2ccmd = rd32(E1000_I2CCMD);
438 if (i2ccmd & E1000_I2CCMD_READY) {
439 /* Check if this is READ or WRITE phase */
440 if ((i2ccmd & E1000_I2CCMD_OPCODE_READ) ==
441 E1000_I2CCMD_OPCODE_READ) {
442 /* Write the selected byte
443 * lane and update whole word
444 */
445 data_local = i2ccmd & 0xFF00;
446 data_local |= data;
447 i2ccmd = ((offset <<
448 E1000_I2CCMD_REG_ADDR_SHIFT) |
449 E1000_I2CCMD_OPCODE_WRITE | data_local);
450 wr32(E1000_I2CCMD, i2ccmd);
451 } else {
452 break;
453 }
454 }
455 }
456 if (!(i2ccmd & E1000_I2CCMD_READY)) {
457 hw_dbg("I2CCMD Write did not complete\n");
458 return -E1000_ERR_PHY;
459 }
460 if (i2ccmd & E1000_I2CCMD_ERROR) {
461 hw_dbg("I2CCMD Error bit set\n");
462 return -E1000_ERR_PHY;
463 }
464 return 0;
465}
466
467/**
468 * igb_read_phy_reg_igp - Read igp PHY register 396 * igb_read_phy_reg_igp - Read igp PHY register
469 * @hw: pointer to the HW structure 397 * @hw: pointer to the HW structure
470 * @offset: register offset to be read 398 * @offset: register offset to be read
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 6a0873f2095a..4c2c36c46a73 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation. 4 Copyright(c) 2007-2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
@@ -70,7 +69,6 @@ s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
70s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data); 69s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
71s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data); 70s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data);
72s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data); 71s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data);
73s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data);
74s32 igb_copper_link_setup_82580(struct e1000_hw *hw); 72s32 igb_copper_link_setup_82580(struct e1000_hw *hw);
75s32 igb_get_phy_info_82580(struct e1000_hw *hw); 73s32 igb_get_phy_info_82580(struct e1000_hw *hw);
76s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw); 74s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw);
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 82632c6c53af..bdb246e848e1 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation. 4 Copyright(c) 2007-2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
@@ -41,6 +40,7 @@
41#define E1000_FCT 0x00030 /* Flow Control Type - RW */ 40#define E1000_FCT 0x00030 /* Flow Control Type - RW */
42#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ 41#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
43#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ 42#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
43#define E1000_TSSDP 0x0003C /* Time Sync SDP Configuration Register - RW */
44#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ 44#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */
45#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ 45#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
46#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ 46#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */
@@ -102,6 +102,14 @@
102#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ 102#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */
103#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ 103#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
104#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ 104#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */
105#define E1000_TRGTTIML0 0x0B644 /* Target Time Register 0 Low - RW */
106#define E1000_TRGTTIMH0 0x0B648 /* Target Time Register 0 High - RW */
107#define E1000_TRGTTIML1 0x0B64C /* Target Time Register 1 Low - RW */
108#define E1000_TRGTTIMH1 0x0B650 /* Target Time Register 1 High - RW */
109#define E1000_AUXSTMPL0 0x0B65C /* Auxiliary Time Stamp 0 Register Low - RO */
110#define E1000_AUXSTMPH0 0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */
111#define E1000_AUXSTMPL1 0x0B664 /* Auxiliary Time Stamp 1 Register Low - RO */
112#define E1000_AUXSTMPH1 0x0B668 /* Auxiliary Time Stamp 1 Register High - RO */
105#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */ 113#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */
106#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */ 114#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */
107#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */ 115#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */
@@ -349,16 +357,30 @@
349#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n))) 357#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n)))
350#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) 358#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
351#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n))) 359#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
360#define E1000_DVMOLR(_n) (0x0C038 + (64 * (_n)))
352#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine 361#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine
353 * Filter - RW */ 362 * Filter - RW */
354#define E1000_VMVIR(_n) (0x03700 + (4 * (_n))) 363#define E1000_VMVIR(_n) (0x03700 + (4 * (_n)))
355 364
356#define wr32(reg, value) (writel(value, hw->hw_addr + reg)) 365struct e1000_hw;
357#define rd32(reg) (readl(hw->hw_addr + reg)) 366
367u32 igb_rd32(struct e1000_hw *hw, u32 reg);
368
369/* write operations, indexed using DWORDS */
370#define wr32(reg, val) \
371do { \
372 u8 __iomem *hw_addr = ACCESS_ONCE((hw)->hw_addr); \
373 if (!E1000_REMOVED(hw_addr)) \
374 writel((val), &hw_addr[(reg)]); \
375} while (0)
376
377#define rd32(reg) (igb_rd32(hw, reg))
378
358#define wrfl() ((void)rd32(E1000_STATUS)) 379#define wrfl() ((void)rd32(E1000_STATUS))
359 380
360#define array_wr32(reg, offset, value) \ 381#define array_wr32(reg, offset, value) \
361 (writel(value, hw->hw_addr + reg + ((offset) << 2))) 382 wr32((reg) + ((offset) << 2), (value))
383
362#define array_rd32(reg, offset) \ 384#define array_rd32(reg, offset) \
363 (readl(hw->hw_addr + reg + ((offset) << 2))) 385 (readl(hw->hw_addr + reg + ((offset) << 2)))
364 386
@@ -397,4 +419,6 @@
397#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n)) 419#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n))
398#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */ 420#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */
399 421
422#define E1000_REMOVED(h) unlikely(!(h))
423
400#endif 424#endif
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index ccf472f073dd..411b213c63be 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation. 4 Copyright(c) 2007-2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
@@ -42,6 +41,7 @@
42#include <linux/i2c.h> 41#include <linux/i2c.h>
43#include <linux/i2c-algo-bit.h> 42#include <linux/i2c-algo-bit.h>
44#include <linux/pci.h> 43#include <linux/pci.h>
44#include <linux/mdio.h>
45 45
46struct igb_adapter; 46struct igb_adapter;
47 47
@@ -434,6 +434,7 @@ struct igb_adapter {
434 struct delayed_work ptp_overflow_work; 434 struct delayed_work ptp_overflow_work;
435 struct work_struct ptp_tx_work; 435 struct work_struct ptp_tx_work;
436 struct sk_buff *ptp_tx_skb; 436 struct sk_buff *ptp_tx_skb;
437 struct hwtstamp_config tstamp_config;
437 unsigned long ptp_tx_start; 438 unsigned long ptp_tx_start;
438 unsigned long last_rx_ptp_check; 439 unsigned long last_rx_ptp_check;
439 spinlock_t tmreg_lock; 440 spinlock_t tmreg_lock;
@@ -456,6 +457,7 @@ struct igb_adapter {
456 unsigned long link_check_timeout; 457 unsigned long link_check_timeout;
457 int copper_tries; 458 int copper_tries;
458 struct e1000_info ei; 459 struct e1000_info ei;
460 u16 eee_advert;
459}; 461};
460 462
461#define IGB_FLAG_HAS_MSI (1 << 0) 463#define IGB_FLAG_HAS_MSI (1 << 0)
@@ -472,6 +474,7 @@ struct igb_adapter {
472#define IGB_FLAG_MAS_CAPABLE (1 << 11) 474#define IGB_FLAG_MAS_CAPABLE (1 << 11)
473#define IGB_FLAG_MAS_ENABLE (1 << 12) 475#define IGB_FLAG_MAS_ENABLE (1 << 12)
474#define IGB_FLAG_HAS_MSIX (1 << 13) 476#define IGB_FLAG_HAS_MSIX (1 << 13)
477#define IGB_FLAG_EEE (1 << 14)
475 478
476/* Media Auto Sense */ 479/* Media Auto Sense */
477#define IGB_MAS_ENABLE_0 0X0001 480#define IGB_MAS_ENABLE_0 0X0001
@@ -525,9 +528,7 @@ void igb_set_fw_version(struct igb_adapter *);
525void igb_ptp_init(struct igb_adapter *adapter); 528void igb_ptp_init(struct igb_adapter *adapter);
526void igb_ptp_stop(struct igb_adapter *adapter); 529void igb_ptp_stop(struct igb_adapter *adapter);
527void igb_ptp_reset(struct igb_adapter *adapter); 530void igb_ptp_reset(struct igb_adapter *adapter);
528void igb_ptp_tx_work(struct work_struct *work);
529void igb_ptp_rx_hang(struct igb_adapter *adapter); 531void igb_ptp_rx_hang(struct igb_adapter *adapter);
530void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
531void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb); 532void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
532void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va, 533void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va,
533 struct sk_buff *skb); 534 struct sk_buff *skb);
@@ -545,8 +546,8 @@ static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring,
545 rx_ring->last_rx_timestamp = jiffies; 546 rx_ring->last_rx_timestamp = jiffies;
546} 547}
547 548
548int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, 549int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
549 int cmd); 550int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
550#ifdef CONFIG_IGB_HWMON 551#ifdef CONFIG_IGB_HWMON
551void igb_sysfs_exit(struct igb_adapter *adapter); 552void igb_sysfs_exit(struct igb_adapter *adapter);
552int igb_sysfs_init(struct igb_adapter *adapter); 553int igb_sysfs_init(struct igb_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 1df02378de69..e5570acbeea8 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation. 4 Copyright(c) 2007-2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
@@ -2274,15 +2273,15 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
2274 2273
2275 ring = adapter->tx_ring[j]; 2274 ring = adapter->tx_ring[j];
2276 do { 2275 do {
2277 start = u64_stats_fetch_begin_bh(&ring->tx_syncp); 2276 start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
2278 data[i] = ring->tx_stats.packets; 2277 data[i] = ring->tx_stats.packets;
2279 data[i+1] = ring->tx_stats.bytes; 2278 data[i+1] = ring->tx_stats.bytes;
2280 data[i+2] = ring->tx_stats.restart_queue; 2279 data[i+2] = ring->tx_stats.restart_queue;
2281 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start)); 2280 } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
2282 do { 2281 do {
2283 start = u64_stats_fetch_begin_bh(&ring->tx_syncp2); 2282 start = u64_stats_fetch_begin_irq(&ring->tx_syncp2);
2284 restart2 = ring->tx_stats.restart_queue2; 2283 restart2 = ring->tx_stats.restart_queue2;
2285 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp2, start)); 2284 } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start));
2286 data[i+2] += restart2; 2285 data[i+2] += restart2;
2287 2286
2288 i += IGB_TX_QUEUE_STATS_LEN; 2287 i += IGB_TX_QUEUE_STATS_LEN;
@@ -2290,13 +2289,13 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
2290 for (j = 0; j < adapter->num_rx_queues; j++) { 2289 for (j = 0; j < adapter->num_rx_queues; j++) {
2291 ring = adapter->rx_ring[j]; 2290 ring = adapter->rx_ring[j];
2292 do { 2291 do {
2293 start = u64_stats_fetch_begin_bh(&ring->rx_syncp); 2292 start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
2294 data[i] = ring->rx_stats.packets; 2293 data[i] = ring->rx_stats.packets;
2295 data[i+1] = ring->rx_stats.bytes; 2294 data[i+1] = ring->rx_stats.bytes;
2296 data[i+2] = ring->rx_stats.drops; 2295 data[i+2] = ring->rx_stats.drops;
2297 data[i+3] = ring->rx_stats.csum_err; 2296 data[i+3] = ring->rx_stats.csum_err;
2298 data[i+4] = ring->rx_stats.alloc_failed; 2297 data[i+4] = ring->rx_stats.alloc_failed;
2299 } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start)); 2298 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
2300 i += IGB_RX_QUEUE_STATS_LEN; 2299 i += IGB_RX_QUEUE_STATS_LEN;
2301 } 2300 }
2302 spin_unlock(&adapter->stats64_lock); 2301 spin_unlock(&adapter->stats64_lock);
@@ -2354,6 +2353,11 @@ static int igb_get_ts_info(struct net_device *dev,
2354{ 2353{
2355 struct igb_adapter *adapter = netdev_priv(dev); 2354 struct igb_adapter *adapter = netdev_priv(dev);
2356 2355
2356 if (adapter->ptp_clock)
2357 info->phc_index = ptp_clock_index(adapter->ptp_clock);
2358 else
2359 info->phc_index = -1;
2360
2357 switch (adapter->hw.mac.type) { 2361 switch (adapter->hw.mac.type) {
2358 case e1000_82575: 2362 case e1000_82575:
2359 info->so_timestamping = 2363 info->so_timestamping =
@@ -2375,11 +2379,6 @@ static int igb_get_ts_info(struct net_device *dev,
2375 SOF_TIMESTAMPING_RX_HARDWARE | 2379 SOF_TIMESTAMPING_RX_HARDWARE |
2376 SOF_TIMESTAMPING_RAW_HARDWARE; 2380 SOF_TIMESTAMPING_RAW_HARDWARE;
2377 2381
2378 if (adapter->ptp_clock)
2379 info->phc_index = ptp_clock_index(adapter->ptp_clock);
2380 else
2381 info->phc_index = -1;
2382
2383 info->tx_types = 2382 info->tx_types =
2384 (1 << HWTSTAMP_TX_OFF) | 2383 (1 << HWTSTAMP_TX_OFF) |
2385 (1 << HWTSTAMP_TX_ON); 2384 (1 << HWTSTAMP_TX_ON);
@@ -2588,7 +2587,7 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
2588{ 2587{
2589 struct igb_adapter *adapter = netdev_priv(netdev); 2588 struct igb_adapter *adapter = netdev_priv(netdev);
2590 struct e1000_hw *hw = &adapter->hw; 2589 struct e1000_hw *hw = &adapter->hw;
2591 u32 ipcnfg, eeer, ret_val; 2590 u32 ret_val;
2592 u16 phy_data; 2591 u16 phy_data;
2593 2592
2594 if ((hw->mac.type < e1000_i350) || 2593 if ((hw->mac.type < e1000_i350) ||
@@ -2597,16 +2596,25 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
2597 2596
2598 edata->supported = (SUPPORTED_1000baseT_Full | 2597 edata->supported = (SUPPORTED_1000baseT_Full |
2599 SUPPORTED_100baseT_Full); 2598 SUPPORTED_100baseT_Full);
2599 if (!hw->dev_spec._82575.eee_disable)
2600 edata->advertised =
2601 mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert);
2602
2603 /* The IPCNFG and EEER registers are not supported on I354. */
2604 if (hw->mac.type == e1000_i354) {
2605 igb_get_eee_status_i354(hw, (bool *)&edata->eee_active);
2606 } else {
2607 u32 eeer;
2600 2608
2601 ipcnfg = rd32(E1000_IPCNFG); 2609 eeer = rd32(E1000_EEER);
2602 eeer = rd32(E1000_EEER);
2603 2610
2604 /* EEE status on negotiated link */ 2611 /* EEE status on negotiated link */
2605 if (ipcnfg & E1000_IPCNFG_EEE_1G_AN) 2612 if (eeer & E1000_EEER_EEE_NEG)
2606 edata->advertised = ADVERTISED_1000baseT_Full; 2613 edata->eee_active = true;
2607 2614
2608 if (ipcnfg & E1000_IPCNFG_EEE_100M_AN) 2615 if (eeer & E1000_EEER_TX_LPI_EN)
2609 edata->advertised |= ADVERTISED_100baseT_Full; 2616 edata->tx_lpi_enabled = true;
2617 }
2610 2618
2611 /* EEE Link Partner Advertised */ 2619 /* EEE Link Partner Advertised */
2612 switch (hw->mac.type) { 2620 switch (hw->mac.type) {
@@ -2617,8 +2625,8 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
2617 return -ENODATA; 2625 return -ENODATA;
2618 2626
2619 edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data); 2627 edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
2620
2621 break; 2628 break;
2629 case e1000_i354:
2622 case e1000_i210: 2630 case e1000_i210:
2623 case e1000_i211: 2631 case e1000_i211:
2624 ret_val = igb_read_xmdio_reg(hw, E1000_EEE_LP_ADV_ADDR_I210, 2632 ret_val = igb_read_xmdio_reg(hw, E1000_EEE_LP_ADV_ADDR_I210,
@@ -2634,12 +2642,10 @@ static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
2634 break; 2642 break;
2635 } 2643 }
2636 2644
2637 if (eeer & E1000_EEER_EEE_NEG)
2638 edata->eee_active = true;
2639
2640 edata->eee_enabled = !hw->dev_spec._82575.eee_disable; 2645 edata->eee_enabled = !hw->dev_spec._82575.eee_disable;
2641 2646
2642 if (eeer & E1000_EEER_TX_LPI_EN) 2647 if ((hw->mac.type == e1000_i354) &&
2648 (edata->eee_enabled))
2643 edata->tx_lpi_enabled = true; 2649 edata->tx_lpi_enabled = true;
2644 2650
2645 /* Report correct negotiated EEE status for devices that 2651 /* Report correct negotiated EEE status for devices that
@@ -2687,9 +2693,10 @@ static int igb_set_eee(struct net_device *netdev,
2687 return -EINVAL; 2693 return -EINVAL;
2688 } 2694 }
2689 2695
2690 if (eee_curr.advertised != edata->advertised) { 2696 if (edata->advertised &
2697 ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) {
2691 dev_err(&adapter->pdev->dev, 2698 dev_err(&adapter->pdev->dev,
2692 "Setting EEE Advertisement is not supported\n"); 2699 "EEE Advertisement supports only 100Tx and or 100T full duplex\n");
2693 return -EINVAL; 2700 return -EINVAL;
2694 } 2701 }
2695 2702
@@ -2699,9 +2706,14 @@ static int igb_set_eee(struct net_device *netdev,
2699 return -EINVAL; 2706 return -EINVAL;
2700 } 2707 }
2701 2708
2709 adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
2702 if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) { 2710 if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) {
2703 hw->dev_spec._82575.eee_disable = !edata->eee_enabled; 2711 hw->dev_spec._82575.eee_disable = !edata->eee_enabled;
2704 igb_set_eee_i350(hw); 2712 adapter->flags |= IGB_FLAG_EEE;
2713 if (hw->mac.type == e1000_i350)
2714 igb_set_eee_i350(hw);
2715 else
2716 igb_set_eee_i354(hw);
2705 2717
2706 /* reset link */ 2718 /* reset link */
2707 if (netif_running(netdev)) 2719 if (netif_running(netdev))
@@ -2779,9 +2791,11 @@ static int igb_get_module_eeprom(struct net_device *netdev,
2779 /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */ 2791 /* Read EEPROM block, SFF-8079/SFF-8472, word at a time */
2780 for (i = 0; i < last_word - first_word + 1; i++) { 2792 for (i = 0; i < last_word - first_word + 1; i++) {
2781 status = igb_read_phy_reg_i2c(hw, first_word + i, &dataword[i]); 2793 status = igb_read_phy_reg_i2c(hw, first_word + i, &dataword[i]);
2782 if (status != E1000_SUCCESS) 2794 if (status != E1000_SUCCESS) {
2783 /* Error occurred while reading module */ 2795 /* Error occurred while reading module */
2796 kfree(dataword);
2784 return -EIO; 2797 return -EIO;
2798 }
2785 2799
2786 be16_to_cpus(&dataword[i]); 2800 be16_to_cpus(&dataword[i]);
2787 } 2801 }
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
index e0af5bc61613..8333f67acf96 100644
--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation. 4 Copyright(c) 2007-2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 46d31a49f5ea..cd20409858d1 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) Gigabit Ethernet Linux driver 3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2013 Intel Corporation. 4 Copyright(c) 2007-2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, see <http://www.gnu.org/licenses/>.
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 17
19 The full GNU General Public License is included in this distribution in 18 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 19 the file called "COPYING".
@@ -70,7 +69,7 @@ char igb_driver_version[] = DRV_VERSION;
70static const char igb_driver_string[] = 69static const char igb_driver_string[] =
71 "Intel(R) Gigabit Ethernet Network Driver"; 70 "Intel(R) Gigabit Ethernet Network Driver";
72static const char igb_copyright[] = 71static const char igb_copyright[] =
73 "Copyright (c) 2007-2013 Intel Corporation."; 72 "Copyright (c) 2007-2014 Intel Corporation.";
74 73
75static const struct e1000_info *igb_info_tbl[] = { 74static const struct e1000_info *igb_info_tbl[] = {
76 [board_82575] = &e1000_82575_info, 75 [board_82575] = &e1000_82575_info,
@@ -752,6 +751,28 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
752 } 751 }
753} 752}
754 753
754u32 igb_rd32(struct e1000_hw *hw, u32 reg)
755{
756 struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
757 u8 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr);
758 u32 value = 0;
759
760 if (E1000_REMOVED(hw_addr))
761 return ~value;
762
763 value = readl(&hw_addr[reg]);
764
765 /* reads should not return all F's */
766 if (!(~value) && (!reg || !(~readl(hw_addr)))) {
767 struct net_device *netdev = igb->netdev;
768 hw->hw_addr = NULL;
769 netif_device_detach(netdev);
770 netdev_err(netdev, "PCIe link lost, device now detached\n");
771 }
772
773 return value;
774}
775
755/** 776/**
756 * igb_write_ivar - configure ivar for given MSI-X vector 777 * igb_write_ivar - configure ivar for given MSI-X vector
757 * @hw: pointer to the HW structure 778 * @hw: pointer to the HW structure
@@ -1014,6 +1035,12 @@ static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
1014{ 1035{
1015 struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; 1036 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1016 1037
1038 /* Coming from igb_set_interrupt_capability, the vectors are not yet
1039 * allocated. So, q_vector is NULL so we should stop here.
1040 */
1041 if (!q_vector)
1042 return;
1043
1017 if (q_vector->tx.ring) 1044 if (q_vector->tx.ring)
1018 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; 1045 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
1019 1046
@@ -1111,16 +1138,18 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
1111 for (i = 0; i < numvecs; i++) 1138 for (i = 0; i < numvecs; i++)
1112 adapter->msix_entries[i].entry = i; 1139 adapter->msix_entries[i].entry = i;
1113 1140
1114 err = pci_enable_msix(adapter->pdev, 1141 err = pci_enable_msix_range(adapter->pdev,
1115 adapter->msix_entries, 1142 adapter->msix_entries,
1116 numvecs); 1143 numvecs,
1117 if (err == 0) 1144 numvecs);
1145 if (err > 0)
1118 return; 1146 return;
1119 1147
1120 igb_reset_interrupt_capability(adapter); 1148 igb_reset_interrupt_capability(adapter);
1121 1149
1122 /* If we can't do MSI-X, try MSI */ 1150 /* If we can't do MSI-X, try MSI */
1123msi_only: 1151msi_only:
1152 adapter->flags &= ~IGB_FLAG_HAS_MSIX;
1124#ifdef CONFIG_PCI_IOV 1153#ifdef CONFIG_PCI_IOV
1125 /* disable SR-IOV for non MSI-X configurations */ 1154 /* disable SR-IOV for non MSI-X configurations */
1126 if (adapter->vf_data) { 1155 if (adapter->vf_data) {
@@ -1726,6 +1755,10 @@ int igb_up(struct igb_adapter *adapter)
1726 hw->mac.get_link_status = 1; 1755 hw->mac.get_link_status = 1;
1727 schedule_work(&adapter->watchdog_task); 1756 schedule_work(&adapter->watchdog_task);
1728 1757
1758 if ((adapter->flags & IGB_FLAG_EEE) &&
1759 (!hw->dev_spec._82575.eee_disable))
1760 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
1761
1729 return 0; 1762 return 0;
1730} 1763}
1731 1764
@@ -1974,6 +2007,21 @@ void igb_reset(struct igb_adapter *adapter)
1974 } 2007 }
1975 } 2008 }
1976#endif 2009#endif
2010 /* Re-establish EEE setting */
2011 if (hw->phy.media_type == e1000_media_type_copper) {
2012 switch (mac->type) {
2013 case e1000_i350:
2014 case e1000_i210:
2015 case e1000_i211:
2016 igb_set_eee_i350(hw);
2017 break;
2018 case e1000_i354:
2019 igb_set_eee_i354(hw);
2020 break;
2021 default:
2022 break;
2023 }
2024 }
1977 if (!netif_running(adapter->netdev)) 2025 if (!netif_running(adapter->netdev))
1978 igb_power_down_link(adapter); 2026 igb_power_down_link(adapter);
1979 2027
@@ -2560,23 +2608,36 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2560 (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" : 2608 (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" :
2561 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy", 2609 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
2562 adapter->num_rx_queues, adapter->num_tx_queues); 2610 adapter->num_rx_queues, adapter->num_tx_queues);
2563 switch (hw->mac.type) { 2611 if (hw->phy.media_type == e1000_media_type_copper) {
2564 case e1000_i350: 2612 switch (hw->mac.type) {
2565 case e1000_i210: 2613 case e1000_i350:
2566 case e1000_i211: 2614 case e1000_i210:
2567 igb_set_eee_i350(hw); 2615 case e1000_i211:
2568 break; 2616 /* Enable EEE for internal copper PHY devices */
2569 case e1000_i354: 2617 err = igb_set_eee_i350(hw);
2570 if (hw->phy.media_type == e1000_media_type_copper) { 2618 if ((!err) &&
2619 (!hw->dev_spec._82575.eee_disable)) {
2620 adapter->eee_advert =
2621 MDIO_EEE_100TX | MDIO_EEE_1000T;
2622 adapter->flags |= IGB_FLAG_EEE;
2623 }
2624 break;
2625 case e1000_i354:
2571 if ((rd32(E1000_CTRL_EXT) & 2626 if ((rd32(E1000_CTRL_EXT) &
2572 E1000_CTRL_EXT_LINK_MODE_SGMII)) 2627 E1000_CTRL_EXT_LINK_MODE_SGMII)) {
2573 igb_set_eee_i354(hw); 2628 err = igb_set_eee_i354(hw);
2629 if ((!err) &&
2630 (!hw->dev_spec._82575.eee_disable)) {
2631 adapter->eee_advert =
2632 MDIO_EEE_100TX | MDIO_EEE_1000T;
2633 adapter->flags |= IGB_FLAG_EEE;
2634 }
2635 }
2636 break;
2637 default:
2638 break;
2574 } 2639 }
2575 break;
2576 default:
2577 break;
2578 } 2640 }
2579
2580 pm_runtime_put_noidle(&pdev->dev); 2641 pm_runtime_put_noidle(&pdev->dev);
2581 return 0; 2642 return 0;
2582 2643
@@ -3510,6 +3571,13 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter,
3510 3571
3511 vmolr = rd32(E1000_VMOLR(vfn)); 3572 vmolr = rd32(E1000_VMOLR(vfn));
3512 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */ 3573 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
3574 if (hw->mac.type == e1000_i350) {
3575 u32 dvmolr;
3576
3577 dvmolr = rd32(E1000_DVMOLR(vfn));
3578 dvmolr |= E1000_DVMOLR_STRVLAN;
3579 wr32(E1000_DVMOLR(vfn), dvmolr);
3580 }
3513 if (aupe) 3581 if (aupe)
3514 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */ 3582 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
3515 else 3583 else
@@ -4158,6 +4226,15 @@ static void igb_watchdog_task(struct work_struct *work)
4158 (ctrl & E1000_CTRL_RFCE) ? "RX" : 4226 (ctrl & E1000_CTRL_RFCE) ? "RX" :
4159 (ctrl & E1000_CTRL_TFCE) ? "TX" : "None"); 4227 (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
4160 4228
4229 /* disable EEE if enabled */
4230 if ((adapter->flags & IGB_FLAG_EEE) &&
4231 (adapter->link_duplex == HALF_DUPLEX)) {
4232 dev_info(&adapter->pdev->dev,
4233 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n");
4234 adapter->hw.dev_spec._82575.eee_disable = true;
4235 adapter->flags &= ~IGB_FLAG_EEE;
4236 }
4237
4161 /* check if SmartSpeed worked */ 4238 /* check if SmartSpeed worked */
4162 igb_check_downshift(hw); 4239 igb_check_downshift(hw);
4163 if (phy->speed_downgraded) 4240 if (phy->speed_downgraded)
@@ -4306,8 +4383,7 @@ enum latency_range {
4306 * were determined based on theoretical maximum wire speed and testing 4383 * were determined based on theoretical maximum wire speed and testing
4307 * data, in order to minimize response time while increasing bulk 4384 * data, in order to minimize response time while increasing bulk
4308 * throughput. 4385 * throughput.
4309 * This functionality is controlled by the InterruptThrottleRate module 4386 * This functionality is controlled by ethtool's coalescing settings.
4310 * parameter (see igb_param.c)
4311 * NOTE: This function is called only when operating in a multiqueue 4387 * NOTE: This function is called only when operating in a multiqueue
4312 * receive environment. 4388 * receive environment.
4313 **/ 4389 **/
@@ -4381,8 +4457,7 @@ clear_counts:
4381 * based on theoretical maximum wire speed and thresholds were set based 4457 * based on theoretical maximum wire speed and thresholds were set based
4382 * on testing data as well as attempting to minimize response time 4458 * on testing data as well as attempting to minimize response time
4383 * while increasing bulk throughput. 4459 * while increasing bulk throughput.
4384 * this functionality is controlled by the InterruptThrottleRate module 4460 * This functionality is controlled by ethtool's coalescing settings.
4385 * parameter (see igb_param.c)
4386 * NOTE: These calculations are only valid when operating in a single- 4461 * NOTE: These calculations are only valid when operating in a single-
4387 * queue environment. 4462 * queue environment.
4388 **/ 4463 **/
@@ -4546,7 +4621,7 @@ static int igb_tso(struct igb_ring *tx_ring,
4546 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 4621 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
4547 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; 4622 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
4548 4623
4549 if (first->protocol == __constant_htons(ETH_P_IP)) { 4624 if (first->protocol == htons(ETH_P_IP)) {
4550 struct iphdr *iph = ip_hdr(skb); 4625 struct iphdr *iph = ip_hdr(skb);
4551 iph->tot_len = 0; 4626 iph->tot_len = 0;
4552 iph->check = 0; 4627 iph->check = 0;
@@ -4602,12 +4677,12 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
4602 } else { 4677 } else {
4603 u8 l4_hdr = 0; 4678 u8 l4_hdr = 0;
4604 switch (first->protocol) { 4679 switch (first->protocol) {
4605 case __constant_htons(ETH_P_IP): 4680 case htons(ETH_P_IP):
4606 vlan_macip_lens |= skb_network_header_len(skb); 4681 vlan_macip_lens |= skb_network_header_len(skb);
4607 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4; 4682 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
4608 l4_hdr = ip_hdr(skb)->protocol; 4683 l4_hdr = ip_hdr(skb)->protocol;
4609 break; 4684 break;
4610 case __constant_htons(ETH_P_IPV6): 4685 case htons(ETH_P_IPV6):
4611 vlan_macip_lens |= skb_network_header_len(skb); 4686 vlan_macip_lens |= skb_network_header_len(skb);
4612 l4_hdr = ipv6_hdr(skb)->nexthdr; 4687 l4_hdr = ipv6_hdr(skb)->nexthdr;
4613 break; 4688 break;
@@ -5127,10 +5202,10 @@ void igb_update_stats(struct igb_adapter *adapter,
5127 } 5202 }
5128 5203
5129 do { 5204 do {
5130 start = u64_stats_fetch_begin_bh(&ring->rx_syncp); 5205 start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
5131 _bytes = ring->rx_stats.bytes; 5206 _bytes = ring->rx_stats.bytes;
5132 _packets = ring->rx_stats.packets; 5207 _packets = ring->rx_stats.packets;
5133 } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start)); 5208 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
5134 bytes += _bytes; 5209 bytes += _bytes;
5135 packets += _packets; 5210 packets += _packets;
5136 } 5211 }
@@ -5143,10 +5218,10 @@ void igb_update_stats(struct igb_adapter *adapter,
5143 for (i = 0; i < adapter->num_tx_queues; i++) { 5218 for (i = 0; i < adapter->num_tx_queues; i++) {
5144 struct igb_ring *ring = adapter->tx_ring[i]; 5219 struct igb_ring *ring = adapter->tx_ring[i];
5145 do { 5220 do {
5146 start = u64_stats_fetch_begin_bh(&ring->tx_syncp); 5221 start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
5147 _bytes = ring->tx_stats.bytes; 5222 _bytes = ring->tx_stats.bytes;
5148 _packets = ring->tx_stats.packets; 5223 _packets = ring->tx_stats.packets;
5149 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start)); 5224 } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
5150 bytes += _bytes; 5225 bytes += _bytes;
5151 packets += _packets; 5226 packets += _packets;
5152 } 5227 }
@@ -6620,7 +6695,9 @@ static inline void igb_rx_hash(struct igb_ring *ring,
6620 struct sk_buff *skb) 6695 struct sk_buff *skb)
6621{ 6696{
6622 if (ring->netdev->features & NETIF_F_RXHASH) 6697 if (ring->netdev->features & NETIF_F_RXHASH)
6623 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); 6698 skb_set_hash(skb,
6699 le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
6700 PKT_HASH_TYPE_L3);
6624} 6701}
6625 6702
6626/** 6703/**
@@ -6690,7 +6767,7 @@ static unsigned int igb_get_headlen(unsigned char *data,
6690 hdr.network += ETH_HLEN; 6767 hdr.network += ETH_HLEN;
6691 6768
6692 /* handle any vlan tag if present */ 6769 /* handle any vlan tag if present */
6693 if (protocol == __constant_htons(ETH_P_8021Q)) { 6770 if (protocol == htons(ETH_P_8021Q)) {
6694 if ((hdr.network - data) > (max_len - VLAN_HLEN)) 6771 if ((hdr.network - data) > (max_len - VLAN_HLEN))
6695 return max_len; 6772 return max_len;
6696 6773
@@ -6699,7 +6776,7 @@ static unsigned int igb_get_headlen(unsigned char *data,
6699 } 6776 }
6700 6777
6701 /* handle L3 protocols */ 6778 /* handle L3 protocols */
6702 if (protocol == __constant_htons(ETH_P_IP)) { 6779 if (protocol == htons(ETH_P_IP)) {
6703 if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) 6780 if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
6704 return max_len; 6781 return max_len;
6705 6782
@@ -6713,7 +6790,7 @@ static unsigned int igb_get_headlen(unsigned char *data,
6713 /* record next protocol if header is present */ 6790 /* record next protocol if header is present */
6714 if (!(hdr.ipv4->frag_off & htons(IP_OFFSET))) 6791 if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
6715 nexthdr = hdr.ipv4->protocol; 6792 nexthdr = hdr.ipv4->protocol;
6716 } else if (protocol == __constant_htons(ETH_P_IPV6)) { 6793 } else if (protocol == htons(ETH_P_IPV6)) {
6717 if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) 6794 if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
6718 return max_len; 6795 return max_len;
6719 6796
@@ -6903,7 +6980,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
6903 unsigned int total_bytes = 0, total_packets = 0; 6980 unsigned int total_bytes = 0, total_packets = 0;
6904 u16 cleaned_count = igb_desc_unused(rx_ring); 6981 u16 cleaned_count = igb_desc_unused(rx_ring);
6905 6982
6906 do { 6983 while (likely(total_packets < budget)) {
6907 union e1000_adv_rx_desc *rx_desc; 6984 union e1000_adv_rx_desc *rx_desc;
6908 6985
6909 /* return some buffers to hardware, one at a time is too slow */ 6986 /* return some buffers to hardware, one at a time is too slow */
@@ -6955,7 +7032,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
6955 7032
6956 /* update budget accounting */ 7033 /* update budget accounting */
6957 total_packets++; 7034 total_packets++;
6958 } while (likely(total_packets < budget)); 7035 }
6959 7036
6960 /* place incomplete frames back on ring for completion */ 7037 /* place incomplete frames back on ring for completion */
6961 rx_ring->skb = skb; 7038 rx_ring->skb = skb;
@@ -7114,8 +7191,10 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
7114 case SIOCGMIIREG: 7191 case SIOCGMIIREG:
7115 case SIOCSMIIREG: 7192 case SIOCSMIIREG:
7116 return igb_mii_ioctl(netdev, ifr, cmd); 7193 return igb_mii_ioctl(netdev, ifr, cmd);
7194 case SIOCGHWTSTAMP:
7195 return igb_ptp_get_ts_config(netdev, ifr);
7117 case SIOCSHWTSTAMP: 7196 case SIOCSHWTSTAMP:
7118 return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd); 7197 return igb_ptp_set_ts_config(netdev, ifr);
7119 default: 7198 default:
7120 return -EOPNOTSUPP; 7199 return -EOPNOTSUPP;
7121 } 7200 }
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 5a54e3dc535d..da55fbb090b2 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -12,9 +12,8 @@
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along 15 * You should have received a copy of the GNU General Public License along with
16 * with this program; if not, write to the Free Software Foundation, Inc., 16 * this program; if not, see <http://www.gnu.org/licenses/>.
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 */ 17 */
19#include <linux/module.h> 18#include <linux/module.h>
20#include <linux/device.h> 19#include <linux/device.h>
@@ -75,6 +74,8 @@
75#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT) 74#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT)
76#define IGB_NBITS_82580 40 75#define IGB_NBITS_82580 40
77 76
77static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
78
78/* SYSTIM read access for the 82576 */ 79/* SYSTIM read access for the 82576 */
79static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc) 80static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc)
80{ 81{
@@ -372,7 +373,7 @@ static int igb_ptp_enable(struct ptp_clock_info *ptp,
372 * This work function polls the TSYNCTXCTL valid bit to determine when a 373 * This work function polls the TSYNCTXCTL valid bit to determine when a
373 * timestamp has been taken for the current stored skb. 374 * timestamp has been taken for the current stored skb.
374 **/ 375 **/
375void igb_ptp_tx_work(struct work_struct *work) 376static void igb_ptp_tx_work(struct work_struct *work)
376{ 377{
377 struct igb_adapter *adapter = container_of(work, struct igb_adapter, 378 struct igb_adapter *adapter = container_of(work, struct igb_adapter,
378 ptp_tx_work); 379 ptp_tx_work);
@@ -466,7 +467,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter)
466 * available, then it must have been for this skb here because we only 467 * available, then it must have been for this skb here because we only
467 * allow only one such packet into the queue. 468 * allow only one such packet into the queue.
468 **/ 469 **/
469void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) 470static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
470{ 471{
471 struct e1000_hw *hw = &adapter->hw; 472 struct e1000_hw *hw = &adapter->hw;
472 struct skb_shared_hwtstamps shhwtstamps; 473 struct skb_shared_hwtstamps shhwtstamps;
@@ -540,10 +541,26 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
540} 541}
541 542
542/** 543/**
543 * igb_ptp_hwtstamp_ioctl - control hardware time stamping 544 * igb_ptp_get_ts_config - get hardware time stamping config
545 * @netdev:
546 * @ifreq:
547 *
548 * Get the hwtstamp_config settings to return to the user. Rather than attempt
549 * to deconstruct the settings from the registers, just return a shadow copy
550 * of the last known settings.
551 **/
552int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
553{
554 struct igb_adapter *adapter = netdev_priv(netdev);
555 struct hwtstamp_config *config = &adapter->tstamp_config;
556
557 return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
558 -EFAULT : 0;
559}
560/**
561 * igb_ptp_set_ts_config - control hardware time stamping
544 * @netdev: 562 * @netdev:
545 * @ifreq: 563 * @ifreq:
546 * @cmd:
547 * 564 *
548 * Outgoing time stamping can be enabled and disabled. Play nice and 565 * Outgoing time stamping can be enabled and disabled. Play nice and
549 * disable it when requested, although it shouldn't case any overhead 566 * disable it when requested, although it shouldn't case any overhead
@@ -557,12 +574,11 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
557 * not supported, with the exception of "all V2 events regardless of 574 * not supported, with the exception of "all V2 events regardless of
558 * level 2 or 4". 575 * level 2 or 4".
559 **/ 576 **/
560int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, 577int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
561 struct ifreq *ifr, int cmd)
562{ 578{
563 struct igb_adapter *adapter = netdev_priv(netdev); 579 struct igb_adapter *adapter = netdev_priv(netdev);
564 struct e1000_hw *hw = &adapter->hw; 580 struct e1000_hw *hw = &adapter->hw;
565 struct hwtstamp_config config; 581 struct hwtstamp_config *config = &adapter->tstamp_config;
566 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; 582 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
567 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; 583 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
568 u32 tsync_rx_cfg = 0; 584 u32 tsync_rx_cfg = 0;
@@ -570,14 +586,14 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
570 bool is_l2 = false; 586 bool is_l2 = false;
571 u32 regval; 587 u32 regval;
572 588
573 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 589 if (copy_from_user(config, ifr->ifr_data, sizeof(*config)))
574 return -EFAULT; 590 return -EFAULT;
575 591
576 /* reserved for future extensions */ 592 /* reserved for future extensions */
577 if (config.flags) 593 if (config->flags)
578 return -EINVAL; 594 return -EINVAL;
579 595
580 switch (config.tx_type) { 596 switch (config->tx_type) {
581 case HWTSTAMP_TX_OFF: 597 case HWTSTAMP_TX_OFF:
582 tsync_tx_ctl = 0; 598 tsync_tx_ctl = 0;
583 case HWTSTAMP_TX_ON: 599 case HWTSTAMP_TX_ON:
@@ -586,7 +602,7 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
586 return -ERANGE; 602 return -ERANGE;
587 } 603 }
588 604
589 switch (config.rx_filter) { 605 switch (config->rx_filter) {
590 case HWTSTAMP_FILTER_NONE: 606 case HWTSTAMP_FILTER_NONE:
591 tsync_rx_ctl = 0; 607 tsync_rx_ctl = 0;
592 break; 608 break;
@@ -610,7 +626,7 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
610 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 626 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
611 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 627 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
612 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2; 628 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
613 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 629 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
614 is_l2 = true; 630 is_l2 = true;
615 is_l4 = true; 631 is_l4 = true;
616 break; 632 break;
@@ -621,12 +637,12 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
621 */ 637 */
622 if (hw->mac.type != e1000_82576) { 638 if (hw->mac.type != e1000_82576) {
623 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; 639 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
624 config.rx_filter = HWTSTAMP_FILTER_ALL; 640 config->rx_filter = HWTSTAMP_FILTER_ALL;
625 break; 641 break;
626 } 642 }
627 /* fall through */ 643 /* fall through */
628 default: 644 default:
629 config.rx_filter = HWTSTAMP_FILTER_NONE; 645 config->rx_filter = HWTSTAMP_FILTER_NONE;
630 return -ERANGE; 646 return -ERANGE;
631 } 647 }
632 648
@@ -643,7 +659,7 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
643 if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) { 659 if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
644 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; 660 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
645 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; 661 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
646 config.rx_filter = HWTSTAMP_FILTER_ALL; 662 config->rx_filter = HWTSTAMP_FILTER_ALL;
647 is_l2 = true; 663 is_l2 = true;
648 is_l4 = true; 664 is_l4 = true;
649 665
@@ -707,7 +723,7 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
707 regval = rd32(E1000_RXSTMPL); 723 regval = rd32(E1000_RXSTMPL);
708 regval = rd32(E1000_RXSTMPH); 724 regval = rd32(E1000_RXSTMPH);
709 725
710 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 726 return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
711 -EFAULT : 0; 727 -EFAULT : 0;
712} 728}
713 729
@@ -798,7 +814,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
798 814
799 /* Initialize the time sync interrupts for devices that support it. */ 815 /* Initialize the time sync interrupts for devices that support it. */
800 if (hw->mac.type >= e1000_82580) { 816 if (hw->mac.type >= e1000_82580) {
801 wr32(E1000_TSIM, E1000_TSIM_TXTS); 817 wr32(E1000_TSIM, TSYNC_INTERRUPTS);
802 wr32(E1000_IMS, E1000_IMS_TS); 818 wr32(E1000_IMS, E1000_IMS_TS);
803 } 819 }
804 820
@@ -864,6 +880,9 @@ void igb_ptp_reset(struct igb_adapter *adapter)
864 if (!(adapter->flags & IGB_FLAG_PTP)) 880 if (!(adapter->flags & IGB_FLAG_PTP))
865 return; 881 return;
866 882
883 /* reset the tstamp_config */
884 memset(&adapter->tstamp_config, 0, sizeof(adapter->tstamp_config));
885
867 switch (adapter->hw.mac.type) { 886 switch (adapter->hw.mac.type) {
868 case e1000_82576: 887 case e1000_82576:
869 /* Dial the nominal frequency. */ 888 /* Dial the nominal frequency. */
@@ -876,7 +895,7 @@ void igb_ptp_reset(struct igb_adapter *adapter)
876 case e1000_i211: 895 case e1000_i211:
877 /* Enable the timer functions and interrupts. */ 896 /* Enable the timer functions and interrupts. */
878 wr32(E1000_TSAUXC, 0x0); 897 wr32(E1000_TSAUXC, 0x0);
879 wr32(E1000_TSIM, E1000_TSIM_TXTS); 898 wr32(E1000_TSIM, TSYNC_INTERRUPTS);
880 wr32(E1000_IMS, E1000_IMS_TS); 899 wr32(E1000_IMS, E1000_IMS_TS);
881 break; 900 break;
882 default: 901 default:
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 675435fc2e53..b7ab03a2f28f 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1043,11 +1043,11 @@ static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter)
1043 for (i = 0; i < 3; i++) 1043 for (i = 0; i < 3; i++)
1044 adapter->msix_entries[i].entry = i; 1044 adapter->msix_entries[i].entry = i;
1045 1045
1046 err = pci_enable_msix(adapter->pdev, 1046 err = pci_enable_msix_range(adapter->pdev,
1047 adapter->msix_entries, 3); 1047 adapter->msix_entries, 3, 3);
1048 } 1048 }
1049 1049
1050 if (err) { 1050 if (err < 0) {
1051 /* MSI-X failed */ 1051 /* MSI-X failed */
1052 dev_err(&adapter->pdev->dev, 1052 dev_err(&adapter->pdev->dev,
1053 "Failed to initialize MSI-X interrupts.\n"); 1053 "Failed to initialize MSI-X interrupts.\n");
@@ -2014,12 +2014,12 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
2014 2014
2015 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2015 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2016 switch (skb->protocol) { 2016 switch (skb->protocol) {
2017 case __constant_htons(ETH_P_IP): 2017 case htons(ETH_P_IP):
2018 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; 2018 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
2019 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 2019 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2020 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; 2020 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
2021 break; 2021 break;
2022 case __constant_htons(ETH_P_IPV6): 2022 case htons(ETH_P_IPV6):
2023 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 2023 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2024 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; 2024 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
2025 break; 2025 break;
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 57e390cbe6d0..f42c201f727f 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -1521,12 +1521,12 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1521 int tso; 1521 int tso;
1522 1522
1523 if (test_bit(__IXGB_DOWN, &adapter->flags)) { 1523 if (test_bit(__IXGB_DOWN, &adapter->flags)) {
1524 dev_kfree_skb(skb); 1524 dev_kfree_skb_any(skb);
1525 return NETDEV_TX_OK; 1525 return NETDEV_TX_OK;
1526 } 1526 }
1527 1527
1528 if (skb->len <= 0) { 1528 if (skb->len <= 0) {
1529 dev_kfree_skb(skb); 1529 dev_kfree_skb_any(skb);
1530 return NETDEV_TX_OK; 1530 return NETDEV_TX_OK;
1531 } 1531 }
1532 1532
@@ -1543,7 +1543,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1543 1543
1544 tso = ixgb_tso(adapter, skb); 1544 tso = ixgb_tso(adapter, skb);
1545 if (tso < 0) { 1545 if (tso < 0) {
1546 dev_kfree_skb(skb); 1546 dev_kfree_skb_any(skb);
1547 return NETDEV_TX_OK; 1547 return NETDEV_TX_OK;
1548 } 1548 }
1549 1549
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 0186ea2969fe..2fff0fc4e6e8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
@@ -765,6 +766,7 @@ struct ixgbe_adapter {
765 struct ptp_clock_info ptp_caps; 766 struct ptp_clock_info ptp_caps;
766 struct work_struct ptp_tx_work; 767 struct work_struct ptp_tx_work;
767 struct sk_buff *ptp_tx_skb; 768 struct sk_buff *ptp_tx_skb;
769 struct hwtstamp_config tstamp_config;
768 unsigned long ptp_tx_start; 770 unsigned long ptp_tx_start;
769 unsigned long last_overflow_check; 771 unsigned long last_overflow_check;
770 unsigned long last_rx_ptp_check; 772 unsigned long last_rx_ptp_check;
@@ -884,7 +886,6 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
884 u16 soft_id); 886 u16 soft_id);
885void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, 887void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
886 union ixgbe_atr_input *mask); 888 union ixgbe_atr_input *mask);
887bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
888void ixgbe_set_rx_mode(struct net_device *netdev); 889void ixgbe_set_rx_mode(struct net_device *netdev);
889#ifdef CONFIG_IXGBE_DCB 890#ifdef CONFIG_IXGBE_DCB
890void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter); 891void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
@@ -958,8 +959,8 @@ static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
958 rx_ring->last_rx_timestamp = jiffies; 959 rx_ring->last_rx_timestamp = jiffies;
959} 960}
960 961
961int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, struct ifreq *ifr, 962int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
962 int cmd); 963int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
963void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter); 964void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
964void ixgbe_ptp_reset(struct ixgbe_adapter *adapter); 965void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
965void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr); 966void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index a26f3fee4f35..4c78ea8946c1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 Copyright(c) 1999 - 2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
@@ -57,10 +58,12 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
57 **/ 58 **/
58static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) 59static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
59{ 60{
60 struct ixgbe_adapter *adapter = hw->back;
61 u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR); 61 u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
62 u16 pcie_devctl2; 62 u16 pcie_devctl2;
63 63
64 if (ixgbe_removed(hw->hw_addr))
65 return;
66
64 /* only take action if timeout value is defaulted to 0 */ 67 /* only take action if timeout value is defaulted to 0 */
65 if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK) 68 if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
66 goto out; 69 goto out;
@@ -79,11 +82,9 @@ static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
79 * directly in order to set the completion timeout value for 82 * directly in order to set the completion timeout value for
80 * 16ms to 55ms 83 * 16ms to 55ms
81 */ 84 */
82 pci_read_config_word(adapter->pdev, 85 pcie_devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2);
83 IXGBE_PCI_DEVICE_CONTROL2, &pcie_devctl2);
84 pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms; 86 pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
85 pci_write_config_word(adapter->pdev, 87 ixgbe_write_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
86 IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
87out: 88out:
88 /* disable completion timeout resend */ 89 /* disable completion timeout resend */
89 gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND; 90 gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
@@ -100,6 +101,7 @@ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
100 mac->mcft_size = IXGBE_82598_MC_TBL_SIZE; 101 mac->mcft_size = IXGBE_82598_MC_TBL_SIZE;
101 mac->vft_size = IXGBE_82598_VFT_TBL_SIZE; 102 mac->vft_size = IXGBE_82598_VFT_TBL_SIZE;
102 mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES; 103 mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
104 mac->rx_pb_size = IXGBE_82598_RX_PB_SIZE;
103 mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; 105 mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
104 mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; 106 mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
105 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); 107 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
@@ -201,8 +203,6 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
201 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); 203 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
202 } 204 }
203 205
204 hw->mac.rx_pb_size = IXGBE_82598_RX_PB_SIZE;
205
206 /* set the completion timeout for interface */ 206 /* set the completion timeout for interface */
207 if (ret_val == 0) 207 if (ret_val == 0)
208 ixgbe_set_pcie_completion_timeout(hw); 208 ixgbe_set_pcie_completion_timeout(hw);
@@ -1237,14 +1237,14 @@ static void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
1237} 1237}
1238 1238
1239/** 1239/**
1240 * ixgbe_set_rxpba_82598 - Configure packet buffers 1240 * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
1241 * @hw: pointer to hardware structure 1241 * @hw: pointer to hardware structure
1242 * @dcb_config: pointer to ixgbe_dcb_config structure 1242 * @num_pb: number of packet buffers to allocate
1243 * 1243 * @headroom: reserve n KB of headroom
1244 * Configure packet buffers. 1244 * @strategy: packet buffer allocation strategy
1245 */ 1245 **/
1246static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, u32 headroom, 1246static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
1247 int strategy) 1247 u32 headroom, int strategy)
1248{ 1248{
1249 u32 rxpktsize = IXGBE_RXPBSIZE_64KB; 1249 u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
1250 u8 i = 0; 1250 u8 i = 0;
@@ -1315,7 +1315,8 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
1315 .release_swfw_sync = &ixgbe_release_swfw_sync, 1315 .release_swfw_sync = &ixgbe_release_swfw_sync,
1316 .get_thermal_sensor_data = NULL, 1316 .get_thermal_sensor_data = NULL,
1317 .init_thermal_sensor_thresh = NULL, 1317 .init_thermal_sensor_thresh = NULL,
1318 .mng_fw_enabled = NULL, 1318 .prot_autoc_read = &prot_autoc_read_generic,
1319 .prot_autoc_write = &prot_autoc_write_generic,
1319}; 1320};
1320 1321
1321static struct ixgbe_eeprom_operations eeprom_ops_82598 = { 1322static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index edda6814108c..f32b3dd1ba8e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 Copyright(c) 1999 - 2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
@@ -63,8 +64,10 @@ static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
63 u8 dev_addr, u8 *data); 64 u8 dev_addr, u8 *data);
64static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, 65static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
65 u8 dev_addr, u8 data); 66 u8 dev_addr, u8 data);
67static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
68static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
66 69
67static bool ixgbe_mng_enabled(struct ixgbe_hw *hw) 70bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
68{ 71{
69 u32 fwsm, manc, factps; 72 u32 fwsm, manc, factps;
70 73
@@ -91,7 +94,7 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
91 * and MNG not enabled 94 * and MNG not enabled
92 */ 95 */
93 if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) && 96 if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
94 !hw->mng_fw_enabled) { 97 !ixgbe_mng_enabled(hw)) {
95 mac->ops.disable_tx_laser = 98 mac->ops.disable_tx_laser =
96 &ixgbe_disable_tx_laser_multispeed_fiber; 99 &ixgbe_disable_tx_laser_multispeed_fiber;
97 mac->ops.enable_tx_laser = 100 mac->ops.enable_tx_laser =
@@ -122,7 +125,6 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
122{ 125{
123 s32 ret_val = 0; 126 s32 ret_val = 0;
124 u16 list_offset, data_offset, data_value; 127 u16 list_offset, data_offset, data_value;
125 bool got_lock = false;
126 128
127 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { 129 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
128 ixgbe_init_mac_link_ops_82599(hw); 130 ixgbe_init_mac_link_ops_82599(hw);
@@ -160,30 +162,10 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
160 usleep_range(hw->eeprom.semaphore_delay * 1000, 162 usleep_range(hw->eeprom.semaphore_delay * 1000,
161 hw->eeprom.semaphore_delay * 2000); 163 hw->eeprom.semaphore_delay * 2000);
162 164
163 /* Need SW/FW semaphore around AUTOC writes if LESM on,
164 * likewise reset_pipeline requires lock as it also writes
165 * AUTOC.
166 */
167 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
168 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
169 IXGBE_GSSR_MAC_CSR_SM);
170 if (ret_val)
171 goto setup_sfp_out;
172
173 got_lock = true;
174 }
175
176 /* Restart DSP and set SFI mode */ 165 /* Restart DSP and set SFI mode */
177 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((hw->mac.orig_autoc) | 166 ret_val = hw->mac.ops.prot_autoc_write(hw,
178 IXGBE_AUTOC_LMS_10G_SERIAL)); 167 hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL,
179 hw->mac.cached_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 168 false);
180 ret_val = ixgbe_reset_pipeline_82599(hw);
181
182 if (got_lock) {
183 hw->mac.ops.release_swfw_sync(hw,
184 IXGBE_GSSR_MAC_CSR_SM);
185 got_lock = false;
186 }
187 169
188 if (ret_val) { 170 if (ret_val) {
189 hw_dbg(hw, " sfp module setup not complete\n"); 171 hw_dbg(hw, " sfp module setup not complete\n");
@@ -207,6 +189,81 @@ setup_sfp_err:
207 return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; 189 return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
208} 190}
209 191
192/**
193 * prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read
194 * @hw: pointer to hardware structure
195 * @locked: Return the if we locked for this read.
196 * @reg_val: Value we read from AUTOC
197 *
198 * For this part (82599) we need to wrap read-modify-writes with a possible
199 * FW/SW lock. It is assumed this lock will be freed with the next
200 * prot_autoc_write_82599(). Note, that locked can only be true in cases
201 * where this function doesn't return an error.
202 **/
203static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked,
204 u32 *reg_val)
205{
206 s32 ret_val;
207
208 *locked = false;
209 /* If LESM is on then we need to hold the SW/FW semaphore. */
210 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
211 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
212 IXGBE_GSSR_MAC_CSR_SM);
213 if (ret_val)
214 return IXGBE_ERR_SWFW_SYNC;
215
216 *locked = true;
217 }
218
219 *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
220 return 0;
221}
222
223/**
224 * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write
225 * @hw: pointer to hardware structure
226 * @reg_val: value to write to AUTOC
227 * @locked: bool to indicate whether the SW/FW lock was already taken by
228 * previous proc_autoc_read_82599.
229 *
230 * This part (82599) may need to hold a the SW/FW lock around all writes to
231 * AUTOC. Likewise after a write we need to do a pipeline reset.
232 **/
233static s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
234{
235 s32 ret_val = 0;
236
237 /* Blocked by MNG FW so bail */
238 if (ixgbe_check_reset_blocked(hw))
239 goto out;
240
241 /* We only need to get the lock if:
242 * - We didn't do it already (in the read part of a read-modify-write)
243 * - LESM is enabled.
244 */
245 if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) {
246 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
247 IXGBE_GSSR_MAC_CSR_SM);
248 if (ret_val)
249 return IXGBE_ERR_SWFW_SYNC;
250
251 locked = true;
252 }
253
254 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
255 ret_val = ixgbe_reset_pipeline_82599(hw);
256
257out:
258 /* Free the SW/FW semaphore as we either grabbed it here or
259 * already had it when this function was called.
260 */
261 if (locked)
262 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
263
264 return ret_val;
265}
266
210static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw) 267static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
211{ 268{
212 struct ixgbe_mac_info *mac = &hw->mac; 269 struct ixgbe_mac_info *mac = &hw->mac;
@@ -216,6 +273,7 @@ static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
216 mac->mcft_size = IXGBE_82599_MC_TBL_SIZE; 273 mac->mcft_size = IXGBE_82599_MC_TBL_SIZE;
217 mac->vft_size = IXGBE_82599_VFT_TBL_SIZE; 274 mac->vft_size = IXGBE_82599_VFT_TBL_SIZE;
218 mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES; 275 mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES;
276 mac->rx_pb_size = IXGBE_82599_RX_PB_SIZE;
219 mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES; 277 mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES;
220 mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES; 278 mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES;
221 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); 279 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
@@ -456,12 +514,20 @@ out:
456 * 514 *
457 * Disables link, should be called during D3 power down sequence. 515 * Disables link, should be called during D3 power down sequence.
458 * 516 *
459 */ 517 **/
460static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw) 518static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
461{ 519{
462 u32 autoc2_reg; 520 u32 autoc2_reg, fwsm;
521 u16 ee_ctrl_2 = 0;
522
523 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2);
463 524
464 if (!hw->mng_fw_enabled && !hw->wol_enabled) { 525 /* Check to see if MNG FW could be enabled */
526 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
527
528 if (((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) &&
529 !hw->wol_enabled &&
530 ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) {
465 autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 531 autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
466 autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK; 532 autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
467 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg); 533 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
@@ -542,6 +608,10 @@ static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
542{ 608{
543 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 609 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
544 610
611 /* Blocked by MNG FW so bail */
612 if (ixgbe_check_reset_blocked(hw))
613 return;
614
545 /* Disable tx laser; allow 100us to go dark per spec */ 615 /* Disable tx laser; allow 100us to go dark per spec */
546 esdp_reg |= IXGBE_ESDP_SDP3; 616 esdp_reg |= IXGBE_ESDP_SDP3;
547 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 617 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
@@ -582,6 +652,10 @@ static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
582 **/ 652 **/
583static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 653static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
584{ 654{
655 /* Blocked by MNG FW so bail */
656 if (ixgbe_check_reset_blocked(hw))
657 return;
658
585 if (hw->mac.autotry_restart) { 659 if (hw->mac.autotry_restart) {
586 ixgbe_disable_tx_laser_multispeed_fiber(hw); 660 ixgbe_disable_tx_laser_multispeed_fiber(hw);
587 ixgbe_enable_tx_laser_multispeed_fiber(hw); 661 ixgbe_enable_tx_laser_multispeed_fiber(hw);
@@ -590,75 +664,6 @@ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
590} 664}
591 665
592/** 666/**
593 * ixgbe_set_fiber_fixed_speed - Set module link speed for fixed fiber
594 * @hw: pointer to hardware structure
595 * @speed: link speed to set
596 *
597 * We set the module speed differently for fixed fiber. For other
598 * multi-speed devices we don't have an error value so here if we
599 * detect an error we just log it and exit.
600 */
601static void ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw,
602 ixgbe_link_speed speed)
603{
604 s32 status;
605 u8 rs, eeprom_data;
606
607 switch (speed) {
608 case IXGBE_LINK_SPEED_10GB_FULL:
609 /* one bit mask same as setting on */
610 rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
611 break;
612 case IXGBE_LINK_SPEED_1GB_FULL:
613 rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
614 break;
615 default:
616 hw_dbg(hw, "Invalid fixed module speed\n");
617 return;
618 }
619
620 /* Set RS0 */
621 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
622 IXGBE_I2C_EEPROM_DEV_ADDR2,
623 &eeprom_data);
624 if (status) {
625 hw_dbg(hw, "Failed to read Rx Rate Select RS0\n");
626 goto out;
627 }
628
629 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs;
630
631 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
632 IXGBE_I2C_EEPROM_DEV_ADDR2,
633 eeprom_data);
634 if (status) {
635 hw_dbg(hw, "Failed to write Rx Rate Select RS0\n");
636 goto out;
637 }
638
639 /* Set RS1 */
640 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
641 IXGBE_I2C_EEPROM_DEV_ADDR2,
642 &eeprom_data);
643 if (status) {
644 hw_dbg(hw, "Failed to read Rx Rate Select RS1\n");
645 goto out;
646 }
647
648 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
649
650 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
651 IXGBE_I2C_EEPROM_DEV_ADDR2,
652 eeprom_data);
653 if (status) {
654 hw_dbg(hw, "Failed to write Rx Rate Select RS1\n");
655 goto out;
656 }
657out:
658 return;
659}
660
661/**
662 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed 667 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
663 * @hw: pointer to hardware structure 668 * @hw: pointer to hardware structure
664 * @speed: new link speed 669 * @speed: new link speed
@@ -768,10 +773,6 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
768 773
769 /* Set the module link speed */ 774 /* Set the module link speed */
770 switch (hw->phy.media_type) { 775 switch (hw->phy.media_type) {
771 case ixgbe_media_type_fiber_fixed:
772 ixgbe_set_fiber_fixed_speed(hw,
773 IXGBE_LINK_SPEED_1GB_FULL);
774 break;
775 case ixgbe_media_type_fiber: 776 case ixgbe_media_type_fiber:
776 esdp_reg &= ~IXGBE_ESDP_SDP5; 777 esdp_reg &= ~IXGBE_ESDP_SDP5;
777 esdp_reg |= IXGBE_ESDP_SDP5_DIR; 778 esdp_reg |= IXGBE_ESDP_SDP5_DIR;
@@ -941,8 +942,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
941 942
942out: 943out:
943 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) 944 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
944 hw_dbg(hw, "Smartspeed has downgraded the link speed from " 945 hw_dbg(hw, "Smartspeed has downgraded the link speed from the maximum advertised\n");
945 "the maximum advertised\n");
946 return status; 946 return status;
947} 947}
948 948
@@ -958,16 +958,19 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
958 ixgbe_link_speed speed, 958 ixgbe_link_speed speed,
959 bool autoneg_wait_to_complete) 959 bool autoneg_wait_to_complete)
960{ 960{
961 bool autoneg = false;
961 s32 status = 0; 962 s32 status = 0;
962 u32 autoc, pma_pmd_1g, link_mode, start_autoc; 963 u32 pma_pmd_1g, link_mode, links_reg, i;
963 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 964 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
964 u32 orig_autoc = 0;
965 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; 965 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
966 u32 links_reg;
967 u32 i;
968 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; 966 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
969 bool got_lock = false; 967
970 bool autoneg = false; 968 /* holds the value of AUTOC register at this current point in time */
969 u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
970 /* holds the cached value of AUTOC register */
971 u32 orig_autoc = 0;
972 /* temporary variable used for comparison purposes */
973 u32 autoc = current_autoc;
971 974
972 /* Check to see if speed passed in is supported. */ 975 /* Check to see if speed passed in is supported. */
973 status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities, 976 status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities,
@@ -984,12 +987,10 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
984 987
985 /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/ 988 /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
986 if (hw->mac.orig_link_settings_stored) 989 if (hw->mac.orig_link_settings_stored)
987 autoc = hw->mac.orig_autoc; 990 orig_autoc = hw->mac.orig_autoc;
988 else 991 else
989 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 992 orig_autoc = autoc;
990 993
991 orig_autoc = autoc;
992 start_autoc = hw->mac.cached_autoc;
993 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; 994 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
994 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; 995 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
995 996
@@ -1029,28 +1030,11 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
1029 } 1030 }
1030 } 1031 }
1031 1032
1032 if (autoc != start_autoc) { 1033 if (autoc != current_autoc) {
1033 /* Need SW/FW semaphore around AUTOC writes if LESM is on,
1034 * likewise reset_pipeline requires us to hold this lock as
1035 * it also writes to AUTOC.
1036 */
1037 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
1038 status = hw->mac.ops.acquire_swfw_sync(hw,
1039 IXGBE_GSSR_MAC_CSR_SM);
1040 if (status != 0)
1041 goto out;
1042
1043 got_lock = true;
1044 }
1045
1046 /* Restart link */ 1034 /* Restart link */
1047 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); 1035 status = hw->mac.ops.prot_autoc_write(hw, autoc, false);
1048 hw->mac.cached_autoc = autoc; 1036 if (status)
1049 ixgbe_reset_pipeline_82599(hw); 1037 goto out;
1050
1051 if (got_lock)
1052 hw->mac.ops.release_swfw_sync(hw,
1053 IXGBE_GSSR_MAC_CSR_SM);
1054 1038
1055 /* Only poll for autoneg to complete if specified to do so */ 1039 /* Only poll for autoneg to complete if specified to do so */
1056 if (autoneg_wait_to_complete) { 1040 if (autoneg_wait_to_complete) {
@@ -1068,8 +1052,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
1068 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 1052 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
1069 status = 1053 status =
1070 IXGBE_ERR_AUTONEG_NOT_COMPLETE; 1054 IXGBE_ERR_AUTONEG_NOT_COMPLETE;
1071 hw_dbg(hw, "Autoneg did not " 1055 hw_dbg(hw, "Autoneg did not complete.\n");
1072 "complete.\n");
1073 } 1056 }
1074 } 1057 }
1075 } 1058 }
@@ -1117,7 +1100,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
1117{ 1100{
1118 ixgbe_link_speed link_speed; 1101 ixgbe_link_speed link_speed;
1119 s32 status; 1102 s32 status;
1120 u32 ctrl, i, autoc2; 1103 u32 ctrl, i, autoc, autoc2;
1121 u32 curr_lms; 1104 u32 curr_lms;
1122 bool link_up = false; 1105 bool link_up = false;
1123 1106
@@ -1151,11 +1134,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
1151 hw->phy.ops.reset(hw); 1134 hw->phy.ops.reset(hw);
1152 1135
1153 /* remember AUTOC from before we reset */ 1136 /* remember AUTOC from before we reset */
1154 if (hw->mac.cached_autoc) 1137 curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK;
1155 curr_lms = hw->mac.cached_autoc & IXGBE_AUTOC_LMS_MASK;
1156 else
1157 curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) &
1158 IXGBE_AUTOC_LMS_MASK;
1159 1138
1160mac_reset_top: 1139mac_reset_top:
1161 /* 1140 /*
@@ -1205,7 +1184,7 @@ mac_reset_top:
1205 * stored off yet. Otherwise restore the stored original 1184 * stored off yet. Otherwise restore the stored original
1206 * values since the reset operation sets back to defaults. 1185 * values since the reset operation sets back to defaults.
1207 */ 1186 */
1208 hw->mac.cached_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 1187 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1209 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 1188 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
1210 1189
1211 /* Enable link if disabled in NVM */ 1190 /* Enable link if disabled in NVM */
@@ -1216,7 +1195,7 @@ mac_reset_top:
1216 } 1195 }
1217 1196
1218 if (hw->mac.orig_link_settings_stored == false) { 1197 if (hw->mac.orig_link_settings_stored == false) {
1219 hw->mac.orig_autoc = hw->mac.cached_autoc; 1198 hw->mac.orig_autoc = autoc;
1220 hw->mac.orig_autoc2 = autoc2; 1199 hw->mac.orig_autoc2 = autoc2;
1221 hw->mac.orig_link_settings_stored = true; 1200 hw->mac.orig_link_settings_stored = true;
1222 } else { 1201 } else {
@@ -1227,34 +1206,18 @@ mac_reset_top:
1227 * Likewise if we support WoL we don't want change the 1206 * Likewise if we support WoL we don't want change the
1228 * LMS state either. 1207 * LMS state either.
1229 */ 1208 */
1230 if ((hw->phy.multispeed_fiber && hw->mng_fw_enabled) || 1209 if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) ||
1231 hw->wol_enabled) 1210 hw->wol_enabled)
1232 hw->mac.orig_autoc = 1211 hw->mac.orig_autoc =
1233 (hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) | 1212 (hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) |
1234 curr_lms; 1213 curr_lms;
1235 1214
1236 if (hw->mac.cached_autoc != hw->mac.orig_autoc) { 1215 if (autoc != hw->mac.orig_autoc) {
1237 /* Need SW/FW semaphore around AUTOC writes if LESM is 1216 status = hw->mac.ops.prot_autoc_write(hw,
1238 * on, likewise reset_pipeline requires us to hold 1217 hw->mac.orig_autoc,
1239 * this lock as it also writes to AUTOC. 1218 false);
1240 */ 1219 if (status)
1241 bool got_lock = false; 1220 goto reset_hw_out;
1242 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
1243 status = hw->mac.ops.acquire_swfw_sync(hw,
1244 IXGBE_GSSR_MAC_CSR_SM);
1245 if (status)
1246 goto reset_hw_out;
1247
1248 got_lock = true;
1249 }
1250
1251 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
1252 hw->mac.cached_autoc = hw->mac.orig_autoc;
1253 ixgbe_reset_pipeline_82599(hw);
1254
1255 if (got_lock)
1256 hw->mac.ops.release_swfw_sync(hw,
1257 IXGBE_GSSR_MAC_CSR_SM);
1258 } 1221 }
1259 1222
1260 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != 1223 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
@@ -1634,35 +1597,20 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
1634{ 1597{
1635 1598
1636 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; 1599 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1637 u32 bucket_hash = 0; 1600 u32 bucket_hash = 0, hi_dword = 0;
1601 int i;
1638 1602
1639 /* Apply masks to input data */ 1603 /* Apply masks to input data */
1640 input->dword_stream[0] &= input_mask->dword_stream[0]; 1604 for (i = 0; i <= 10; i++)
1641 input->dword_stream[1] &= input_mask->dword_stream[1]; 1605 input->dword_stream[i] &= input_mask->dword_stream[i];
1642 input->dword_stream[2] &= input_mask->dword_stream[2];
1643 input->dword_stream[3] &= input_mask->dword_stream[3];
1644 input->dword_stream[4] &= input_mask->dword_stream[4];
1645 input->dword_stream[5] &= input_mask->dword_stream[5];
1646 input->dword_stream[6] &= input_mask->dword_stream[6];
1647 input->dword_stream[7] &= input_mask->dword_stream[7];
1648 input->dword_stream[8] &= input_mask->dword_stream[8];
1649 input->dword_stream[9] &= input_mask->dword_stream[9];
1650 input->dword_stream[10] &= input_mask->dword_stream[10];
1651 1606
1652 /* record the flow_vm_vlan bits as they are a key part to the hash */ 1607 /* record the flow_vm_vlan bits as they are a key part to the hash */
1653 flow_vm_vlan = ntohl(input->dword_stream[0]); 1608 flow_vm_vlan = ntohl(input->dword_stream[0]);
1654 1609
1655 /* generate common hash dword */ 1610 /* generate common hash dword */
1656 hi_hash_dword = ntohl(input->dword_stream[1] ^ 1611 for (i = 1; i <= 10; i++)
1657 input->dword_stream[2] ^ 1612 hi_dword ^= input->dword_stream[i];
1658 input->dword_stream[3] ^ 1613 hi_hash_dword = ntohl(hi_dword);
1659 input->dword_stream[4] ^
1660 input->dword_stream[5] ^
1661 input->dword_stream[6] ^
1662 input->dword_stream[7] ^
1663 input->dword_stream[8] ^
1664 input->dword_stream[9] ^
1665 input->dword_stream[10]);
1666 1614
1667 /* low dword is word swapped version of common */ 1615 /* low dword is word swapped version of common */
1668 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); 1616 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
@@ -1681,21 +1629,8 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
1681 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); 1629 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1682 1630
1683 /* Process remaining 30 bit of the key */ 1631 /* Process remaining 30 bit of the key */
1684 IXGBE_COMPUTE_BKT_HASH_ITERATION(1); 1632 for (i = 1; i <= 15; i++)
1685 IXGBE_COMPUTE_BKT_HASH_ITERATION(2); 1633 IXGBE_COMPUTE_BKT_HASH_ITERATION(i);
1686 IXGBE_COMPUTE_BKT_HASH_ITERATION(3);
1687 IXGBE_COMPUTE_BKT_HASH_ITERATION(4);
1688 IXGBE_COMPUTE_BKT_HASH_ITERATION(5);
1689 IXGBE_COMPUTE_BKT_HASH_ITERATION(6);
1690 IXGBE_COMPUTE_BKT_HASH_ITERATION(7);
1691 IXGBE_COMPUTE_BKT_HASH_ITERATION(8);
1692 IXGBE_COMPUTE_BKT_HASH_ITERATION(9);
1693 IXGBE_COMPUTE_BKT_HASH_ITERATION(10);
1694 IXGBE_COMPUTE_BKT_HASH_ITERATION(11);
1695 IXGBE_COMPUTE_BKT_HASH_ITERATION(12);
1696 IXGBE_COMPUTE_BKT_HASH_ITERATION(13);
1697 IXGBE_COMPUTE_BKT_HASH_ITERATION(14);
1698 IXGBE_COMPUTE_BKT_HASH_ITERATION(15);
1699 1634
1700 /* 1635 /*
1701 * Limit hash to 13 bits since max bucket count is 8K. 1636 * Limit hash to 13 bits since max bucket count is 8K.
@@ -2001,7 +1936,6 @@ static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
2001 1936
2002 /* We need to run link autotry after the driver loads */ 1937 /* We need to run link autotry after the driver loads */
2003 hw->mac.autotry_restart = true; 1938 hw->mac.autotry_restart = true;
2004 hw->mac.rx_pb_size = IXGBE_82599_RX_PB_SIZE;
2005 1939
2006 if (ret_val == 0) 1940 if (ret_val == 0)
2007 ret_val = ixgbe_verify_fw_version_82599(hw); 1941 ret_val = ixgbe_verify_fw_version_82599(hw);
@@ -2260,7 +2194,7 @@ fw_version_err:
2260 * Returns true if the LESM FW module is present and enabled. Otherwise 2194 * Returns true if the LESM FW module is present and enabled. Otherwise
2261 * returns false. Smart Speed must be disabled if LESM FW module is enabled. 2195 * returns false. Smart Speed must be disabled if LESM FW module is enabled.
2262 **/ 2196 **/
2263bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) 2197static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
2264{ 2198{
2265 bool lesm_enabled = false; 2199 bool lesm_enabled = false;
2266 u16 fw_offset, fw_lesm_param_offset, fw_lesm_state; 2200 u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
@@ -2366,7 +2300,7 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
2366 * full pipeline reset. Note - We must hold the SW/FW semaphore before writing 2300 * full pipeline reset. Note - We must hold the SW/FW semaphore before writing
2367 * to AUTOC, so this function assumes the semaphore is held. 2301 * to AUTOC, so this function assumes the semaphore is held.
2368 **/ 2302 **/
2369s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) 2303static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
2370{ 2304{
2371 s32 ret_val; 2305 s32 ret_val;
2372 u32 anlp1_reg = 0; 2306 u32 anlp1_reg = 0;
@@ -2380,11 +2314,12 @@ s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
2380 IXGBE_WRITE_FLUSH(hw); 2314 IXGBE_WRITE_FLUSH(hw);
2381 } 2315 }
2382 2316
2383 autoc_reg = hw->mac.cached_autoc; 2317 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2384 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 2318 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2385 2319
2386 /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */ 2320 /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
2387 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg ^ IXGBE_AUTOC_LMS_1G_AN); 2321 IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
2322 autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT));
2388 2323
2389 /* Wait for AN to leave state 0 */ 2324 /* Wait for AN to leave state 0 */
2390 for (i = 0; i < 10; i++) { 2325 for (i = 0; i < 10; i++) {
@@ -2565,7 +2500,8 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
2565 .release_swfw_sync = &ixgbe_release_swfw_sync, 2500 .release_swfw_sync = &ixgbe_release_swfw_sync,
2566 .get_thermal_sensor_data = &ixgbe_get_thermal_sensor_data_generic, 2501 .get_thermal_sensor_data = &ixgbe_get_thermal_sensor_data_generic,
2567 .init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic, 2502 .init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic,
2568 .mng_fw_enabled = &ixgbe_mng_enabled, 2503 .prot_autoc_read = &prot_autoc_read_82599,
2504 .prot_autoc_write = &prot_autoc_write_82599,
2569}; 2505};
2570 2506
2571static struct ixgbe_eeprom_operations eeprom_ops_82599 = { 2507static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index b5c434b617b1..24fba39e194e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 Copyright(c) 1999 - 2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
@@ -72,7 +73,6 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
72 bool link_up; 73 bool link_up;
73 74
74 switch (hw->phy.media_type) { 75 switch (hw->phy.media_type) {
75 case ixgbe_media_type_fiber_fixed:
76 case ixgbe_media_type_fiber: 76 case ixgbe_media_type_fiber:
77 hw->mac.ops.check_link(hw, &speed, &link_up, false); 77 hw->mac.ops.check_link(hw, &speed, &link_up, false);
78 /* if link is down, assume supported */ 78 /* if link is down, assume supported */
@@ -114,7 +114,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
114 s32 ret_val = 0; 114 s32 ret_val = 0;
115 u32 reg = 0, reg_bp = 0; 115 u32 reg = 0, reg_bp = 0;
116 u16 reg_cu = 0; 116 u16 reg_cu = 0;
117 bool got_lock = false; 117 bool locked = false;
118 118
119 /* 119 /*
120 * Validate the requested mode. Strict IEEE mode does not allow 120 * Validate the requested mode. Strict IEEE mode does not allow
@@ -139,11 +139,16 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
139 * we link at 10G, the 1G advertisement is harmless and vice versa. 139 * we link at 10G, the 1G advertisement is harmless and vice versa.
140 */ 140 */
141 switch (hw->phy.media_type) { 141 switch (hw->phy.media_type) {
142 case ixgbe_media_type_fiber_fixed:
143 case ixgbe_media_type_fiber:
144 case ixgbe_media_type_backplane: 142 case ixgbe_media_type_backplane:
143 /* some MAC's need RMW protection on AUTOC */
144 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &reg_bp);
145 if (ret_val)
146 goto out;
147
148 /* only backplane uses autoc so fall though */
149 case ixgbe_media_type_fiber:
145 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 150 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
146 reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC); 151
147 break; 152 break;
148 case ixgbe_media_type_copper: 153 case ixgbe_media_type_copper:
149 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, 154 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
@@ -240,27 +245,12 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
240 * LESM is on, likewise reset_pipeline requries the lock as 245 * LESM is on, likewise reset_pipeline requries the lock as
241 * it also writes AUTOC. 246 * it also writes AUTOC.
242 */ 247 */
243 if ((hw->mac.type == ixgbe_mac_82599EB) && 248 ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
244 ixgbe_verify_lesm_fw_enabled_82599(hw)) { 249 if (ret_val)
245 ret_val = hw->mac.ops.acquire_swfw_sync(hw, 250 goto out;
246 IXGBE_GSSR_MAC_CSR_SM);
247 if (ret_val)
248 goto out;
249
250 got_lock = true;
251 }
252
253 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
254
255 if (hw->mac.type == ixgbe_mac_82599EB)
256 ixgbe_reset_pipeline_82599(hw);
257
258 if (got_lock)
259 hw->mac.ops.release_swfw_sync(hw,
260 IXGBE_GSSR_MAC_CSR_SM);
261 251
262 } else if ((hw->phy.media_type == ixgbe_media_type_copper) && 252 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
263 ixgbe_device_supports_autoneg_fc(hw)) { 253 ixgbe_device_supports_autoneg_fc(hw)) {
264 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, 254 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
265 MDIO_MMD_AN, reg_cu); 255 MDIO_MMD_AN, reg_cu);
266 } 256 }
@@ -656,20 +646,17 @@ enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status)
656 **/ 646 **/
657s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) 647s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
658{ 648{
659 struct ixgbe_adapter *adapter = hw->back;
660 struct ixgbe_mac_info *mac = &hw->mac;
661 u16 link_status; 649 u16 link_status;
662 650
663 hw->bus.type = ixgbe_bus_type_pci_express; 651 hw->bus.type = ixgbe_bus_type_pci_express;
664 652
665 /* Get the negotiated link width and speed from PCI config space */ 653 /* Get the negotiated link width and speed from PCI config space */
666 pci_read_config_word(adapter->pdev, IXGBE_PCI_LINK_STATUS, 654 link_status = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_LINK_STATUS);
667 &link_status);
668 655
669 hw->bus.width = ixgbe_convert_bus_width(link_status); 656 hw->bus.width = ixgbe_convert_bus_width(link_status);
670 hw->bus.speed = ixgbe_convert_bus_speed(link_status); 657 hw->bus.speed = ixgbe_convert_bus_speed(link_status);
671 658
672 mac->ops.set_lan_id(hw); 659 hw->mac.ops.set_lan_id(hw);
673 660
674 return 0; 661 return 0;
675} 662}
@@ -2406,7 +2393,6 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
2406 2393
2407 switch (hw->phy.media_type) { 2394 switch (hw->phy.media_type) {
2408 /* Autoneg flow control on fiber adapters */ 2395 /* Autoneg flow control on fiber adapters */
2409 case ixgbe_media_type_fiber_fixed:
2410 case ixgbe_media_type_fiber: 2396 case ixgbe_media_type_fiber:
2411 if (speed == IXGBE_LINK_SPEED_1GB_FULL) 2397 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
2412 ret_val = ixgbe_fc_autoneg_fiber(hw); 2398 ret_val = ixgbe_fc_autoneg_fiber(hw);
@@ -2437,6 +2423,53 @@ out:
2437} 2423}
2438 2424
2439/** 2425/**
2426 * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
2427 * @hw: pointer to hardware structure
2428 *
2429 * System-wide timeout range is encoded in PCIe Device Control2 register.
2430 *
2431 * Add 10% to specified maximum and return the number of times to poll for
2432 * completion timeout, in units of 100 microsec. Never return less than
2433 * 800 = 80 millisec.
2434 **/
2435static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
2436{
2437 s16 devctl2;
2438 u32 pollcnt;
2439
2440 devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2);
2441 devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
2442
2443 switch (devctl2) {
2444 case IXGBE_PCIDEVCTRL2_65_130ms:
2445 pollcnt = 1300; /* 130 millisec */
2446 break;
2447 case IXGBE_PCIDEVCTRL2_260_520ms:
2448 pollcnt = 5200; /* 520 millisec */
2449 break;
2450 case IXGBE_PCIDEVCTRL2_1_2s:
2451 pollcnt = 20000; /* 2 sec */
2452 break;
2453 case IXGBE_PCIDEVCTRL2_4_8s:
2454 pollcnt = 80000; /* 8 sec */
2455 break;
2456 case IXGBE_PCIDEVCTRL2_17_34s:
2457 pollcnt = 34000; /* 34 sec */
2458 break;
2459 case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */
2460 case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */
2461 case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */
2462 case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */
2463 default:
2464 pollcnt = 800; /* 80 millisec minimum */
2465 break;
2466 }
2467
2468 /* add 10% to spec maximum */
2469 return (pollcnt * 11) / 10;
2470}
2471
2472/**
2440 * ixgbe_disable_pcie_master - Disable PCI-express master access 2473 * ixgbe_disable_pcie_master - Disable PCI-express master access
2441 * @hw: pointer to hardware structure 2474 * @hw: pointer to hardware structure
2442 * 2475 *
@@ -2447,16 +2480,16 @@ out:
2447 **/ 2480 **/
2448static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) 2481static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2449{ 2482{
2450 struct ixgbe_adapter *adapter = hw->back;
2451 s32 status = 0; 2483 s32 status = 0;
2452 u32 i; 2484 u32 i, poll;
2453 u16 value; 2485 u16 value;
2454 2486
2455 /* Always set this bit to ensure any future transactions are blocked */ 2487 /* Always set this bit to ensure any future transactions are blocked */
2456 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS); 2488 IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
2457 2489
2458 /* Exit if master requests are blocked */ 2490 /* Exit if master requests are blocked */
2459 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) 2491 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
2492 ixgbe_removed(hw->hw_addr))
2460 goto out; 2493 goto out;
2461 2494
2462 /* Poll for master request bit to clear */ 2495 /* Poll for master request bit to clear */
@@ -2481,10 +2514,12 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
2481 * Before proceeding, make sure that the PCIe block does not have 2514 * Before proceeding, make sure that the PCIe block does not have
2482 * transactions pending. 2515 * transactions pending.
2483 */ 2516 */
2484 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { 2517 poll = ixgbe_pcie_timeout_poll(hw);
2518 for (i = 0; i < poll; i++) {
2485 udelay(100); 2519 udelay(100);
2486 pci_read_config_word(adapter->pdev, IXGBE_PCI_DEVICE_STATUS, 2520 value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS);
2487 &value); 2521 if (ixgbe_removed(hw->hw_addr))
2522 goto out;
2488 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) 2523 if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
2489 goto out; 2524 goto out;
2490 } 2525 }
@@ -2564,6 +2599,35 @@ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
2564} 2599}
2565 2600
2566/** 2601/**
2602 * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read
2603 * @hw: pointer to hardware structure
2604 * @reg_val: Value we read from AUTOC
2605 * @locked: bool to indicate whether the SW/FW lock should be taken. Never
2606 * true in this the generic case.
2607 *
2608 * The default case requires no protection so just to the register read.
2609 **/
2610s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
2611{
2612 *locked = false;
2613 *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2614 return 0;
2615}
2616
2617/**
2618 * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write
2619 * @hw: pointer to hardware structure
2620 * @reg_val: value to write to AUTOC
2621 * @locked: bool to indicate whether the SW/FW lock was already taken by
2622 * previous read.
2623 **/
2624s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
2625{
2626 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
2627 return 0;
2628}
2629
2630/**
2567 * ixgbe_disable_rx_buff_generic - Stops the receive data path 2631 * ixgbe_disable_rx_buff_generic - Stops the receive data path
2568 * @hw: pointer to hardware structure 2632 * @hw: pointer to hardware structure
2569 * 2633 *
@@ -2641,6 +2705,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
2641 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2705 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2642 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 2706 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2643 s32 ret_val = 0; 2707 s32 ret_val = 0;
2708 bool locked = false;
2644 2709
2645 /* 2710 /*
2646 * Link must be up to auto-blink the LEDs; 2711 * Link must be up to auto-blink the LEDs;
@@ -2649,28 +2714,19 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
2649 hw->mac.ops.check_link(hw, &speed, &link_up, false); 2714 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2650 2715
2651 if (!link_up) { 2716 if (!link_up) {
2652 /* Need the SW/FW semaphore around AUTOC writes if 82599 and 2717 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
2653 * LESM is on. 2718 if (ret_val)
2654 */ 2719 goto out;
2655 bool got_lock = false;
2656
2657 if ((hw->mac.type == ixgbe_mac_82599EB) &&
2658 ixgbe_verify_lesm_fw_enabled_82599(hw)) {
2659 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
2660 IXGBE_GSSR_MAC_CSR_SM);
2661 if (ret_val)
2662 goto out;
2663 2720
2664 got_lock = true;
2665 }
2666 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 2721 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2667 autoc_reg |= IXGBE_AUTOC_FLU; 2722 autoc_reg |= IXGBE_AUTOC_FLU;
2668 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 2723
2724 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
2725 if (ret_val)
2726 goto out;
2727
2669 IXGBE_WRITE_FLUSH(hw); 2728 IXGBE_WRITE_FLUSH(hw);
2670 2729
2671 if (got_lock)
2672 hw->mac.ops.release_swfw_sync(hw,
2673 IXGBE_GSSR_MAC_CSR_SM);
2674 usleep_range(10000, 20000); 2730 usleep_range(10000, 20000);
2675 } 2731 }
2676 2732
@@ -2690,33 +2746,21 @@ out:
2690 **/ 2746 **/
2691s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) 2747s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
2692{ 2748{
2693 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2749 u32 autoc_reg = 0;
2694 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 2750 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2695 s32 ret_val = 0; 2751 s32 ret_val = 0;
2696 bool got_lock = false; 2752 bool locked = false;
2697 2753
2698 /* Need the SW/FW semaphore around AUTOC writes if 82599 and 2754 ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
2699 * LESM is on. 2755 if (ret_val)
2700 */ 2756 goto out;
2701 if ((hw->mac.type == ixgbe_mac_82599EB) &&
2702 ixgbe_verify_lesm_fw_enabled_82599(hw)) {
2703 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
2704 IXGBE_GSSR_MAC_CSR_SM);
2705 if (ret_val)
2706 goto out;
2707
2708 got_lock = true;
2709 }
2710 2757
2711 autoc_reg &= ~IXGBE_AUTOC_FLU; 2758 autoc_reg &= ~IXGBE_AUTOC_FLU;
2712 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 2759 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2713 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2714
2715 if (hw->mac.type == ixgbe_mac_82599EB)
2716 ixgbe_reset_pipeline_82599(hw);
2717 2760
2718 if (got_lock) 2761 ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
2719 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 2762 if (ret_val)
2763 goto out;
2720 2764
2721 led_reg &= ~IXGBE_LED_MODE_MASK(index); 2765 led_reg &= ~IXGBE_LED_MODE_MASK(index);
2722 led_reg &= ~IXGBE_LED_BLINK(index); 2766 led_reg &= ~IXGBE_LED_BLINK(index);
@@ -2817,7 +2861,6 @@ san_mac_addr_clr:
2817 **/ 2861 **/
2818u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) 2862u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
2819{ 2863{
2820 struct ixgbe_adapter *adapter = hw->back;
2821 u16 msix_count = 1; 2864 u16 msix_count = 1;
2822 u16 max_msix_count; 2865 u16 max_msix_count;
2823 u16 pcie_offset; 2866 u16 pcie_offset;
@@ -2836,7 +2879,9 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
2836 return msix_count; 2879 return msix_count;
2837 } 2880 }
2838 2881
2839 pci_read_config_word(adapter->pdev, pcie_offset, &msix_count); 2882 msix_count = ixgbe_read_pci_cfg_word(hw, pcie_offset);
2883 if (ixgbe_removed(hw->hw_addr))
2884 msix_count = 0;
2840 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; 2885 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
2841 2886
2842 /* MSI-X count is zero-based in HW */ 2887 /* MSI-X count is zero-based in HW */
@@ -2868,6 +2913,9 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
2868 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 2913 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2869 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 2914 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2870 2915
2916 if (ixgbe_removed(hw->hw_addr))
2917 goto done;
2918
2871 if (!mpsar_lo && !mpsar_hi) 2919 if (!mpsar_lo && !mpsar_hi)
2872 goto done; 2920 goto done;
2873 2921
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index f2e3919750ec..f12c40fb5537 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 Copyright(c) 1999 - 2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
@@ -98,6 +99,10 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
98 bool *link_up, bool link_up_wait_to_complete); 99 bool *link_up, bool link_up_wait_to_complete);
99s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, 100s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
100 u16 *wwpn_prefix); 101 u16 *wwpn_prefix);
102
103s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val);
104s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
105
101s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); 106s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
102s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); 107s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
103void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf); 108void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf);
@@ -106,10 +111,10 @@ s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
106s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, 111s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
107 u8 build, u8 ver); 112 u8 build, u8 ver);
108void ixgbe_clear_tx_pending(struct ixgbe_hw *hw); 113void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
114bool ixgbe_mng_enabled(struct ixgbe_hw *hw);
109 115
110void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, 116void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb,
111 u32 headroom, int strategy); 117 u32 headroom, int strategy);
112s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
113 118
114#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8 119#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
115#define IXGBE_EMC_INTERNAL_DATA 0x00 120#define IXGBE_EMC_INTERNAL_DATA 0x00
@@ -125,6 +130,11 @@ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
125s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw); 130s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
126 131
127#define IXGBE_FAILED_READ_REG 0xffffffffU 132#define IXGBE_FAILED_READ_REG 0xffffffffU
133#define IXGBE_FAILED_READ_CFG_DWORD 0xffffffffU
134#define IXGBE_FAILED_READ_CFG_WORD 0xffffU
135
136u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg);
137void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value);
128 138
129static inline bool ixgbe_removed(void __iomem *addr) 139static inline bool ixgbe_removed(void __iomem *addr)
130{ 140{
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index 05e23b80b5e3..bdb99b3b0f30 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
index d71d9ce3e394..d5a1e3db0774 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
index c5933f6dceee..472b0f450bf9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 043307024c4a..6c55c14d082a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 Copyright(c) 1999 - 2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
@@ -1127,10 +1128,10 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1127 } 1128 }
1128 1129
1129 do { 1130 do {
1130 start = u64_stats_fetch_begin_bh(&ring->syncp); 1131 start = u64_stats_fetch_begin_irq(&ring->syncp);
1131 data[i] = ring->stats.packets; 1132 data[i] = ring->stats.packets;
1132 data[i+1] = ring->stats.bytes; 1133 data[i+1] = ring->stats.bytes;
1133 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 1134 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1134 i += 2; 1135 i += 2;
1135#ifdef BP_EXTENDED_STATS 1136#ifdef BP_EXTENDED_STATS
1136 data[i] = ring->stats.yields; 1137 data[i] = ring->stats.yields;
@@ -1155,10 +1156,10 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1155 } 1156 }
1156 1157
1157 do { 1158 do {
1158 start = u64_stats_fetch_begin_bh(&ring->syncp); 1159 start = u64_stats_fetch_begin_irq(&ring->syncp);
1159 data[i] = ring->stats.packets; 1160 data[i] = ring->stats.packets;
1160 data[i+1] = ring->stats.bytes; 1161 data[i+1] = ring->stats.bytes;
1161 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 1162 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1162 i += 2; 1163 i += 2;
1163#ifdef BP_EXTENDED_STATS 1164#ifdef BP_EXTENDED_STATS
1164 data[i] = ring->stats.yields; 1165 data[i] = ring->stats.yields;
@@ -1247,6 +1248,11 @@ static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
1247 struct ixgbe_hw *hw = &adapter->hw; 1248 struct ixgbe_hw *hw = &adapter->hw;
1248 bool link_up; 1249 bool link_up;
1249 u32 link_speed = 0; 1250 u32 link_speed = 0;
1251
1252 if (ixgbe_removed(hw->hw_addr)) {
1253 *data = 1;
1254 return 1;
1255 }
1250 *data = 0; 1256 *data = 0;
1251 1257
1252 hw->mac.ops.check_link(hw, &link_speed, &link_up, true); 1258 hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
@@ -1969,6 +1975,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
1969 data[1] = 1; 1975 data[1] = 1;
1970 data[2] = 1; 1976 data[2] = 1;
1971 data[3] = 1; 1977 data[3] = 1;
1978 data[4] = 1;
1972 eth_test->flags |= ETH_TEST_FL_FAILED; 1979 eth_test->flags |= ETH_TEST_FL_FAILED;
1973 return; 1980 return;
1974 } 1981 }
@@ -1988,6 +1995,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
1988 data[1] = 1; 1995 data[1] = 1;
1989 data[2] = 1; 1996 data[2] = 1;
1990 data[3] = 1; 1997 data[3] = 1;
1998 data[4] = 1;
1991 eth_test->flags |= ETH_TEST_FL_FAILED; 1999 eth_test->flags |= ETH_TEST_FL_FAILED;
1992 clear_bit(__IXGBE_TESTING, 2000 clear_bit(__IXGBE_TESTING,
1993 &adapter->state); 2001 &adapter->state);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index f58db453a97e..25a3dfef33e8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
@@ -407,13 +408,13 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
407 408
408 switch (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_FCSTAT)) { 409 switch (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_FCSTAT)) {
409 /* return 0 to bypass going to ULD for DDPed data */ 410 /* return 0 to bypass going to ULD for DDPed data */
410 case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP): 411 case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP):
411 /* update length of DDPed data */ 412 /* update length of DDPed data */
412 ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); 413 ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
413 rc = 0; 414 rc = 0;
414 break; 415 break;
415 /* unmap the sg list when FCPRSP is received */ 416 /* unmap the sg list when FCPRSP is received */
416 case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP): 417 case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP):
417 dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, 418 dma_unmap_sg(&adapter->pdev->dev, ddp->sgl,
418 ddp->sgc, DMA_FROM_DEVICE); 419 ddp->sgc, DMA_FROM_DEVICE);
419 ddp->err = ddp_err; 420 ddp->err = ddp_err;
@@ -421,14 +422,14 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
421 ddp->sgc = 0; 422 ddp->sgc = 0;
422 /* fall through */ 423 /* fall through */
423 /* if DDP length is present pass it through to ULD */ 424 /* if DDP length is present pass it through to ULD */
424 case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP): 425 case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP):
425 /* update length of DDPed data */ 426 /* update length of DDPed data */
426 ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); 427 ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
427 if (ddp->len) 428 if (ddp->len)
428 rc = ddp->len; 429 rc = ddp->len;
429 break; 430 break;
430 /* no match will return as an error */ 431 /* no match will return as an error */
431 case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH): 432 case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH):
432 default: 433 default:
433 break; 434 break;
434 } 435 }
@@ -585,7 +586,7 @@ static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe,
585 struct dma_pool *pool; 586 struct dma_pool *pool;
586 char pool_name[32]; 587 char pool_name[32];
587 588
588 snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu); 589 snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%u", cpu);
589 590
590 pool = dma_pool_create(pool_name, dev, IXGBE_FCPTR_MAX, 591 pool = dma_pool_create(pool_name, dev, IXGBE_FCPTR_MAX,
591 IXGBE_FCPTR_ALIGN, PAGE_SIZE); 592 IXGBE_FCPTR_ALIGN, PAGE_SIZE);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
index 3a02759b5e95..b16cc786750d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 32e3eaaa160a..2067d392cc3d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
@@ -698,7 +699,7 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
698static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, 699static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
699 int vectors) 700 int vectors)
700{ 701{
701 int err, vector_threshold; 702 int vector_threshold;
702 703
703 /* We'll want at least 2 (vector_threshold): 704 /* We'll want at least 2 (vector_threshold):
704 * 1) TxQ[0] + RxQ[0] handler 705 * 1) TxQ[0] + RxQ[0] handler
@@ -712,18 +713,10 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
712 * Right now, we simply care about how many we'll get; we'll 713 * Right now, we simply care about how many we'll get; we'll
713 * set them up later while requesting irq's. 714 * set them up later while requesting irq's.
714 */ 715 */
715 while (vectors >= vector_threshold) { 716 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
716 err = pci_enable_msix(adapter->pdev, adapter->msix_entries, 717 vector_threshold, vectors);
717 vectors);
718 if (!err) /* Success in acquiring all requested vectors. */
719 break;
720 else if (err < 0)
721 vectors = 0; /* Nasty failure, quit now */
722 else /* err == number of vectors we should try again with */
723 vectors = err;
724 }
725 718
726 if (vectors < vector_threshold) { 719 if (vectors < 0) {
727 /* Can't allocate enough MSI-X interrupts? Oh well. 720 /* Can't allocate enough MSI-X interrupts? Oh well.
728 * This just means we'll go with either a single MSI 721 * This just means we'll go with either a single MSI
729 * vector or fall back to legacy interrupts. 722 * vector or fall back to legacy interrupts.
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 18076c4178b4..9e5a36612432 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 Copyright(c) 1999 - 2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
@@ -67,7 +68,7 @@ static char ixgbe_default_device_descr[] =
67#define DRV_VERSION "3.19.1-k" 68#define DRV_VERSION "3.19.1-k"
68const char ixgbe_driver_version[] = DRV_VERSION; 69const char ixgbe_driver_version[] = DRV_VERSION;
69static const char ixgbe_copyright[] = 70static const char ixgbe_copyright[] =
70 "Copyright (c) 1999-2013 Intel Corporation."; 71 "Copyright (c) 1999-2014 Intel Corporation.";
71 72
72static const struct ixgbe_info *ixgbe_info_tbl[] = { 73static const struct ixgbe_info *ixgbe_info_tbl[] = {
73 [board_82598] = &ixgbe_82598_info, 74 [board_82598] = &ixgbe_82598_info,
@@ -151,6 +152,8 @@ MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
151MODULE_LICENSE("GPL"); 152MODULE_LICENSE("GPL");
152MODULE_VERSION(DRV_VERSION); 153MODULE_VERSION(DRV_VERSION);
153 154
155static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
156
154static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter, 157static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
155 u32 reg, u16 *value) 158 u32 reg, u16 *value)
156{ 159{
@@ -169,6 +172,9 @@ static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
169 return -1; 172 return -1;
170 173
171 pcie_capability_read_word(parent_dev, reg, value); 174 pcie_capability_read_word(parent_dev, reg, value);
175 if (*value == IXGBE_FAILED_READ_CFG_WORD &&
176 ixgbe_check_cfg_remove(&adapter->hw, parent_dev))
177 return -1;
172 return 0; 178 return 0;
173} 179}
174 180
@@ -313,6 +319,57 @@ void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
313 ixgbe_remove_adapter(hw); 319 ixgbe_remove_adapter(hw);
314} 320}
315 321
322static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev)
323{
324 u16 value;
325
326 pci_read_config_word(pdev, PCI_VENDOR_ID, &value);
327 if (value == IXGBE_FAILED_READ_CFG_WORD) {
328 ixgbe_remove_adapter(hw);
329 return true;
330 }
331 return false;
332}
333
334u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg)
335{
336 struct ixgbe_adapter *adapter = hw->back;
337 u16 value;
338
339 if (ixgbe_removed(hw->hw_addr))
340 return IXGBE_FAILED_READ_CFG_WORD;
341 pci_read_config_word(adapter->pdev, reg, &value);
342 if (value == IXGBE_FAILED_READ_CFG_WORD &&
343 ixgbe_check_cfg_remove(hw, adapter->pdev))
344 return IXGBE_FAILED_READ_CFG_WORD;
345 return value;
346}
347
348#ifdef CONFIG_PCI_IOV
349static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg)
350{
351 struct ixgbe_adapter *adapter = hw->back;
352 u32 value;
353
354 if (ixgbe_removed(hw->hw_addr))
355 return IXGBE_FAILED_READ_CFG_DWORD;
356 pci_read_config_dword(adapter->pdev, reg, &value);
357 if (value == IXGBE_FAILED_READ_CFG_DWORD &&
358 ixgbe_check_cfg_remove(hw, adapter->pdev))
359 return IXGBE_FAILED_READ_CFG_DWORD;
360 return value;
361}
362#endif /* CONFIG_PCI_IOV */
363
364void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value)
365{
366 struct ixgbe_adapter *adapter = hw->back;
367
368 if (ixgbe_removed(hw->hw_addr))
369 return;
370 pci_write_config_word(adapter->pdev, reg, value);
371}
372
316static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter) 373static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
317{ 374{
318 BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state)); 375 BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
@@ -1264,7 +1321,9 @@ static inline void ixgbe_rx_hash(struct ixgbe_ring *ring,
1264 struct sk_buff *skb) 1321 struct sk_buff *skb)
1265{ 1322{
1266 if (ring->netdev->features & NETIF_F_RXHASH) 1323 if (ring->netdev->features & NETIF_F_RXHASH)
1267 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); 1324 skb_set_hash(skb,
1325 le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
1326 PKT_HASH_TYPE_L3);
1268} 1327}
1269 1328
1270#ifdef IXGBE_FCOE 1329#ifdef IXGBE_FCOE
@@ -1480,7 +1539,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
1480 hdr.network += ETH_HLEN; 1539 hdr.network += ETH_HLEN;
1481 1540
1482 /* handle any vlan tag if present */ 1541 /* handle any vlan tag if present */
1483 if (protocol == __constant_htons(ETH_P_8021Q)) { 1542 if (protocol == htons(ETH_P_8021Q)) {
1484 if ((hdr.network - data) > (max_len - VLAN_HLEN)) 1543 if ((hdr.network - data) > (max_len - VLAN_HLEN))
1485 return max_len; 1544 return max_len;
1486 1545
@@ -1489,7 +1548,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
1489 } 1548 }
1490 1549
1491 /* handle L3 protocols */ 1550 /* handle L3 protocols */
1492 if (protocol == __constant_htons(ETH_P_IP)) { 1551 if (protocol == htons(ETH_P_IP)) {
1493 if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) 1552 if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
1494 return max_len; 1553 return max_len;
1495 1554
@@ -1503,7 +1562,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
1503 /* record next protocol if header is present */ 1562 /* record next protocol if header is present */
1504 if (!(hdr.ipv4->frag_off & htons(IP_OFFSET))) 1563 if (!(hdr.ipv4->frag_off & htons(IP_OFFSET)))
1505 nexthdr = hdr.ipv4->protocol; 1564 nexthdr = hdr.ipv4->protocol;
1506 } else if (protocol == __constant_htons(ETH_P_IPV6)) { 1565 } else if (protocol == htons(ETH_P_IPV6)) {
1507 if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) 1566 if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
1508 return max_len; 1567 return max_len;
1509 1568
@@ -1511,7 +1570,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
1511 nexthdr = hdr.ipv6->nexthdr; 1570 nexthdr = hdr.ipv6->nexthdr;
1512 hlen = sizeof(struct ipv6hdr); 1571 hlen = sizeof(struct ipv6hdr);
1513#ifdef IXGBE_FCOE 1572#ifdef IXGBE_FCOE
1514 } else if (protocol == __constant_htons(ETH_P_FCOE)) { 1573 } else if (protocol == htons(ETH_P_FCOE)) {
1515 if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN)) 1574 if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN))
1516 return max_len; 1575 return max_len;
1517 hlen = FCOE_HEADER_LEN; 1576 hlen = FCOE_HEADER_LEN;
@@ -2026,7 +2085,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
2026#endif /* IXGBE_FCOE */ 2085#endif /* IXGBE_FCOE */
2027 u16 cleaned_count = ixgbe_desc_unused(rx_ring); 2086 u16 cleaned_count = ixgbe_desc_unused(rx_ring);
2028 2087
2029 do { 2088 while (likely(total_rx_packets < budget)) {
2030 union ixgbe_adv_rx_desc *rx_desc; 2089 union ixgbe_adv_rx_desc *rx_desc;
2031 struct sk_buff *skb; 2090 struct sk_buff *skb;
2032 2091
@@ -2101,7 +2160,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
2101 2160
2102 /* update budget accounting */ 2161 /* update budget accounting */
2103 total_rx_packets++; 2162 total_rx_packets++;
2104 } while (likely(total_rx_packets < budget)); 2163 }
2105 2164
2106 u64_stats_update_begin(&rx_ring->syncp); 2165 u64_stats_update_begin(&rx_ring->syncp);
2107 rx_ring->stats.packets += total_rx_packets; 2166 rx_ring->stats.packets += total_rx_packets;
@@ -2630,9 +2689,12 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
2630 switch (hw->mac.type) { 2689 switch (hw->mac.type) {
2631 case ixgbe_mac_82599EB: 2690 case ixgbe_mac_82599EB:
2632 case ixgbe_mac_X540: 2691 case ixgbe_mac_X540:
2633 if (eicr & IXGBE_EICR_ECC) 2692 if (eicr & IXGBE_EICR_ECC) {
2634 e_info(link, "Received unrecoverable ECC Err, please " 2693 e_info(link, "Received ECC Err, initiating reset\n");
2635 "reboot\n"); 2694 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
2695 ixgbe_service_event_schedule(adapter);
2696 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2697 }
2636 /* Handle Flow Director Full threshold interrupt */ 2698 /* Handle Flow Director Full threshold interrupt */
2637 if (eicr & IXGBE_EICR_FLOW_DIR) { 2699 if (eicr & IXGBE_EICR_FLOW_DIR) {
2638 int reinit_count = 0; 2700 int reinit_count = 0;
@@ -2846,9 +2908,12 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
2846 ixgbe_check_sfp_event(adapter, eicr); 2908 ixgbe_check_sfp_event(adapter, eicr);
2847 /* Fall through */ 2909 /* Fall through */
2848 case ixgbe_mac_X540: 2910 case ixgbe_mac_X540:
2849 if (eicr & IXGBE_EICR_ECC) 2911 if (eicr & IXGBE_EICR_ECC) {
2850 e_info(link, "Received unrecoverable ECC err, please " 2912 e_info(link, "Received ECC Err, initiating reset\n");
2851 "reboot\n"); 2913 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
2914 ixgbe_service_event_schedule(adapter);
2915 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
2916 }
2852 ixgbe_check_overtemp_event(adapter, eicr); 2917 ixgbe_check_overtemp_event(adapter, eicr);
2853 break; 2918 break;
2854 default: 2919 default:
@@ -4590,8 +4655,6 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
4590static void ixgbe_up_complete(struct ixgbe_adapter *adapter) 4655static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
4591{ 4656{
4592 struct ixgbe_hw *hw = &adapter->hw; 4657 struct ixgbe_hw *hw = &adapter->hw;
4593 struct net_device *upper;
4594 struct list_head *iter;
4595 int err; 4658 int err;
4596 u32 ctrl_ext; 4659 u32 ctrl_ext;
4597 4660
@@ -4633,19 +4696,6 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
4633 e_crit(drv, "Fan has stopped, replace the adapter\n"); 4696 e_crit(drv, "Fan has stopped, replace the adapter\n");
4634 } 4697 }
4635 4698
4636 /* enable transmits */
4637 netif_tx_start_all_queues(adapter->netdev);
4638
4639 /* enable any upper devices */
4640 netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
4641 if (netif_is_macvlan(upper)) {
4642 struct macvlan_dev *vlan = netdev_priv(upper);
4643
4644 if (vlan->fwd_priv)
4645 netif_tx_start_all_queues(upper);
4646 }
4647 }
4648
4649 /* bring the link up in the watchdog, this could race with our first 4699 /* bring the link up in the watchdog, this could race with our first
4650 * link up interrupt but shouldn't be a problem */ 4700 * link up interrupt but shouldn't be a problem */
4651 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 4701 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
@@ -5502,6 +5552,7 @@ static int ixgbe_resume(struct pci_dev *pdev)
5502 struct net_device *netdev = adapter->netdev; 5552 struct net_device *netdev = adapter->netdev;
5503 u32 err; 5553 u32 err;
5504 5554
5555 adapter->hw.hw_addr = adapter->io_addr;
5505 pci_set_power_state(pdev, PCI_D0); 5556 pci_set_power_state(pdev, PCI_D0);
5506 pci_restore_state(pdev); 5557 pci_restore_state(pdev);
5507 /* 5558 /*
@@ -6016,6 +6067,8 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
6016{ 6067{
6017 struct net_device *netdev = adapter->netdev; 6068 struct net_device *netdev = adapter->netdev;
6018 struct ixgbe_hw *hw = &adapter->hw; 6069 struct ixgbe_hw *hw = &adapter->hw;
6070 struct net_device *upper;
6071 struct list_head *iter;
6019 u32 link_speed = adapter->link_speed; 6072 u32 link_speed = adapter->link_speed;
6020 bool flow_rx, flow_tx; 6073 bool flow_rx, flow_tx;
6021 6074
@@ -6067,6 +6120,21 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
6067 netif_carrier_on(netdev); 6120 netif_carrier_on(netdev);
6068 ixgbe_check_vf_rate_limit(adapter); 6121 ixgbe_check_vf_rate_limit(adapter);
6069 6122
6123 /* enable transmits */
6124 netif_tx_wake_all_queues(adapter->netdev);
6125
6126 /* enable any upper devices */
6127 rtnl_lock();
6128 netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
6129 if (netif_is_macvlan(upper)) {
6130 struct macvlan_dev *vlan = netdev_priv(upper);
6131
6132 if (vlan->fwd_priv)
6133 netif_tx_wake_all_queues(upper);
6134 }
6135 }
6136 rtnl_unlock();
6137
6070 /* update the default user priority for VFs */ 6138 /* update the default user priority for VFs */
6071 ixgbe_update_default_up(adapter); 6139 ixgbe_update_default_up(adapter);
6072 6140
@@ -6454,7 +6522,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
6454 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 6522 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
6455 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; 6523 type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
6456 6524
6457 if (first->protocol == __constant_htons(ETH_P_IP)) { 6525 if (first->protocol == htons(ETH_P_IP)) {
6458 struct iphdr *iph = ip_hdr(skb); 6526 struct iphdr *iph = ip_hdr(skb);
6459 iph->tot_len = 0; 6527 iph->tot_len = 0;
6460 iph->check = 0; 6528 iph->check = 0;
@@ -6514,12 +6582,12 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
6514 } else { 6582 } else {
6515 u8 l4_hdr = 0; 6583 u8 l4_hdr = 0;
6516 switch (first->protocol) { 6584 switch (first->protocol) {
6517 case __constant_htons(ETH_P_IP): 6585 case htons(ETH_P_IP):
6518 vlan_macip_lens |= skb_network_header_len(skb); 6586 vlan_macip_lens |= skb_network_header_len(skb);
6519 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 6587 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
6520 l4_hdr = ip_hdr(skb)->protocol; 6588 l4_hdr = ip_hdr(skb)->protocol;
6521 break; 6589 break;
6522 case __constant_htons(ETH_P_IPV6): 6590 case htons(ETH_P_IPV6):
6523 vlan_macip_lens |= skb_network_header_len(skb); 6591 vlan_macip_lens |= skb_network_header_len(skb);
6524 l4_hdr = ipv6_hdr(skb)->nexthdr; 6592 l4_hdr = ipv6_hdr(skb)->nexthdr;
6525 break; 6593 break;
@@ -6794,9 +6862,9 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
6794 hdr.network = skb_network_header(first->skb); 6862 hdr.network = skb_network_header(first->skb);
6795 6863
6796 /* Currently only IPv4/IPv6 with TCP is supported */ 6864 /* Currently only IPv4/IPv6 with TCP is supported */
6797 if ((first->protocol != __constant_htons(ETH_P_IPV6) || 6865 if ((first->protocol != htons(ETH_P_IPV6) ||
6798 hdr.ipv6->nexthdr != IPPROTO_TCP) && 6866 hdr.ipv6->nexthdr != IPPROTO_TCP) &&
6799 (first->protocol != __constant_htons(ETH_P_IP) || 6867 (first->protocol != htons(ETH_P_IP) ||
6800 hdr.ipv4->protocol != IPPROTO_TCP)) 6868 hdr.ipv4->protocol != IPPROTO_TCP))
6801 return; 6869 return;
6802 6870
@@ -6829,12 +6897,12 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
6829 * and write the value to source port portion of compressed dword 6897 * and write the value to source port portion of compressed dword
6830 */ 6898 */
6831 if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN)) 6899 if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
6832 common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q); 6900 common.port.src ^= th->dest ^ htons(ETH_P_8021Q);
6833 else 6901 else
6834 common.port.src ^= th->dest ^ first->protocol; 6902 common.port.src ^= th->dest ^ first->protocol;
6835 common.port.dst ^= th->source; 6903 common.port.dst ^= th->source;
6836 6904
6837 if (first->protocol == __constant_htons(ETH_P_IP)) { 6905 if (first->protocol == htons(ETH_P_IP)) {
6838 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; 6906 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
6839 common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; 6907 common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
6840 } else { 6908 } else {
@@ -6900,8 +6968,8 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
6900 * or FIP and we have FCoE enabled on the adapter 6968 * or FIP and we have FCoE enabled on the adapter
6901 */ 6969 */
6902 switch (vlan_get_protocol(skb)) { 6970 switch (vlan_get_protocol(skb)) {
6903 case __constant_htons(ETH_P_FCOE): 6971 case htons(ETH_P_FCOE):
6904 case __constant_htons(ETH_P_FIP): 6972 case htons(ETH_P_FIP):
6905 adapter = netdev_priv(dev); 6973 adapter = netdev_priv(dev);
6906 6974
6907 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) 6975 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
@@ -6962,7 +7030,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6962 tx_flags |= vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT; 7030 tx_flags |= vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT;
6963 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN; 7031 tx_flags |= IXGBE_TX_FLAGS_HW_VLAN;
6964 /* else if it is a SW VLAN check the next protocol and store the tag */ 7032 /* else if it is a SW VLAN check the next protocol and store the tag */
6965 } else if (protocol == __constant_htons(ETH_P_8021Q)) { 7033 } else if (protocol == htons(ETH_P_8021Q)) {
6966 struct vlan_hdr *vhdr, _vhdr; 7034 struct vlan_hdr *vhdr, _vhdr;
6967 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr); 7035 vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
6968 if (!vhdr) 7036 if (!vhdr)
@@ -7021,7 +7089,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
7021 7089
7022#ifdef IXGBE_FCOE 7090#ifdef IXGBE_FCOE
7023 /* setup tx offload for FCoE */ 7091 /* setup tx offload for FCoE */
7024 if ((protocol == __constant_htons(ETH_P_FCOE)) && 7092 if ((protocol == htons(ETH_P_FCOE)) &&
7025 (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) { 7093 (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
7026 tso = ixgbe_fso(tx_ring, first, &hdr_len); 7094 tso = ixgbe_fso(tx_ring, first, &hdr_len);
7027 if (tso < 0) 7095 if (tso < 0)
@@ -7143,7 +7211,9 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
7143 7211
7144 switch (cmd) { 7212 switch (cmd) {
7145 case SIOCSHWTSTAMP: 7213 case SIOCSHWTSTAMP:
7146 return ixgbe_ptp_hwtstamp_ioctl(adapter, req, cmd); 7214 return ixgbe_ptp_set_ts_config(adapter, req);
7215 case SIOCGHWTSTAMP:
7216 return ixgbe_ptp_get_ts_config(adapter, req);
7147 default: 7217 default:
7148 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); 7218 return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
7149 } 7219 }
@@ -7234,10 +7304,10 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
7234 7304
7235 if (ring) { 7305 if (ring) {
7236 do { 7306 do {
7237 start = u64_stats_fetch_begin_bh(&ring->syncp); 7307 start = u64_stats_fetch_begin_irq(&ring->syncp);
7238 packets = ring->stats.packets; 7308 packets = ring->stats.packets;
7239 bytes = ring->stats.bytes; 7309 bytes = ring->stats.bytes;
7240 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 7310 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
7241 stats->rx_packets += packets; 7311 stats->rx_packets += packets;
7242 stats->rx_bytes += bytes; 7312 stats->rx_bytes += bytes;
7243 } 7313 }
@@ -7250,10 +7320,10 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
7250 7320
7251 if (ring) { 7321 if (ring) {
7252 do { 7322 do {
7253 start = u64_stats_fetch_begin_bh(&ring->syncp); 7323 start = u64_stats_fetch_begin_irq(&ring->syncp);
7254 packets = ring->stats.packets; 7324 packets = ring->stats.packets;
7255 bytes = ring->stats.bytes; 7325 bytes = ring->stats.bytes;
7256 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 7326 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
7257 stats->tx_packets += packets; 7327 stats->tx_packets += packets;
7258 stats->tx_bytes += bytes; 7328 stats->tx_bytes += bytes;
7259 } 7329 }
@@ -7792,6 +7862,7 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
7792 case IXGBE_DEV_ID_82599_SFP: 7862 case IXGBE_DEV_ID_82599_SFP:
7793 /* Only these subdevices could supports WOL */ 7863 /* Only these subdevices could supports WOL */
7794 switch (subdevice_id) { 7864 switch (subdevice_id) {
7865 case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
7795 case IXGBE_SUBDEV_ID_82599_560FLR: 7866 case IXGBE_SUBDEV_ID_82599_560FLR:
7796 /* only support first port */ 7867 /* only support first port */
7797 if (hw->bus.func != 0) 7868 if (hw->bus.func != 0)
@@ -7969,10 +8040,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7969 if (err) 8040 if (err)
7970 goto err_sw_init; 8041 goto err_sw_init;
7971 8042
7972 /* Cache if MNG FW is up so we don't have to read the REG later */
7973 if (hw->mac.ops.mng_fw_enabled)
7974 hw->mng_fw_enabled = hw->mac.ops.mng_fw_enabled(hw);
7975
7976 /* Make it possible the adapter to be woken up via WOL */ 8043 /* Make it possible the adapter to be woken up via WOL */
7977 switch (adapter->hw.mac.type) { 8044 switch (adapter->hw.mac.type) {
7978 case ixgbe_mac_82599EB: 8045 case ixgbe_mac_82599EB:
@@ -8223,7 +8290,7 @@ skip_sriov:
8223 ixgbe_dbg_adapter_init(adapter); 8290 ixgbe_dbg_adapter_init(adapter);
8224 8291
8225 /* Need link setup for MNG FW, else wait for IXGBE_UP */ 8292 /* Need link setup for MNG FW, else wait for IXGBE_UP */
8226 if (hw->mng_fw_enabled && hw->mac.ops.setup_link) 8293 if (ixgbe_mng_enabled(hw) && hw->mac.ops.setup_link)
8227 hw->mac.ops.setup_link(hw, 8294 hw->mac.ops.setup_link(hw,
8228 IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL, 8295 IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
8229 true); 8296 true);
@@ -8331,6 +8398,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
8331 struct net_device *netdev = adapter->netdev; 8398 struct net_device *netdev = adapter->netdev;
8332 8399
8333#ifdef CONFIG_PCI_IOV 8400#ifdef CONFIG_PCI_IOV
8401 struct ixgbe_hw *hw = &adapter->hw;
8334 struct pci_dev *bdev, *vfdev; 8402 struct pci_dev *bdev, *vfdev;
8335 u32 dw0, dw1, dw2, dw3; 8403 u32 dw0, dw1, dw2, dw3;
8336 int vf, pos; 8404 int vf, pos;
@@ -8351,10 +8419,12 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
8351 if (!pos) 8419 if (!pos)
8352 goto skip_bad_vf_detection; 8420 goto skip_bad_vf_detection;
8353 8421
8354 pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG, &dw0); 8422 dw0 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG);
8355 pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 4, &dw1); 8423 dw1 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 4);
8356 pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 8, &dw2); 8424 dw2 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 8);
8357 pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 12, &dw3); 8425 dw3 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 12);
8426 if (ixgbe_removed(hw->hw_addr))
8427 goto skip_bad_vf_detection;
8358 8428
8359 req_id = dw1 >> 16; 8429 req_id = dw1 >> 16;
8360 /* On the 82599 if bit 7 of the requestor ID is set then it's a VF */ 8430 /* On the 82599 if bit 7 of the requestor ID is set then it's a VF */
@@ -8446,6 +8516,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
8446 e_err(probe, "Cannot re-enable PCI device after reset.\n"); 8516 e_err(probe, "Cannot re-enable PCI device after reset.\n");
8447 result = PCI_ERS_RESULT_DISCONNECT; 8517 result = PCI_ERS_RESULT_DISCONNECT;
8448 } else { 8518 } else {
8519 adapter->hw.hw_addr = adapter->io_addr;
8449 pci_set_master(pdev); 8520 pci_set_master(pdev);
8450 pci_restore_state(pdev); 8521 pci_restore_state(pdev);
8451 pci_save_state(pdev); 8522 pci_save_state(pdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index cc3101afd29f..f5c6af2b891b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index e44ff47659b5..a9b9ad69ed0e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 132557c318f8..ad51c12cb26a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 Copyright(c) 1999 - 2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
@@ -98,6 +99,32 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
98} 99}
99 100
100/** 101/**
102 * ixgbe_check_reset_blocked - check status of MNG FW veto bit
103 * @hw: pointer to the hardware structure
104 *
105 * This function checks the MMNGC.MNG_VETO bit to see if there are
106 * any constraints on link from manageability. For MAC's that don't
107 * have this bit just return false since the link can not be blocked
108 * via this method.
109 **/
110s32 ixgbe_check_reset_blocked(struct ixgbe_hw *hw)
111{
112 u32 mmngc;
113
114 /* If we don't have this bit, it can't be blocking */
115 if (hw->mac.type == ixgbe_mac_82598EB)
116 return false;
117
118 mmngc = IXGBE_READ_REG(hw, IXGBE_MMNGC);
119 if (mmngc & IXGBE_MMNGC_MNG_VETO) {
120 hw_dbg(hw, "MNG_VETO bit detected.\n");
121 return true;
122 }
123
124 return false;
125}
126
127/**
101 * ixgbe_get_phy_id - Get the phy type 128 * ixgbe_get_phy_id - Get the phy type
102 * @hw: pointer to hardware structure 129 * @hw: pointer to hardware structure
103 * 130 *
@@ -172,6 +199,10 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
172 (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw))) 199 (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
173 goto out; 200 goto out;
174 201
202 /* Blocked by MNG FW so bail */
203 if (ixgbe_check_reset_blocked(hw))
204 goto out;
205
175 /* 206 /*
176 * Perform soft PHY reset to the PHY_XS. 207 * Perform soft PHY reset to the PHY_XS.
177 * This will cause a soft reset to the PHY 208 * This will cause a soft reset to the PHY
@@ -476,6 +507,10 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
476 autoneg_reg); 507 autoneg_reg);
477 } 508 }
478 509
510 /* Blocked by MNG FW so don't reset PHY */
511 if (ixgbe_check_reset_blocked(hw))
512 return status;
513
479 /* Restart PHY autonegotiation and wait for completion */ 514 /* Restart PHY autonegotiation and wait for completion */
480 hw->phy.ops.read_reg(hw, MDIO_CTRL1, 515 hw->phy.ops.read_reg(hw, MDIO_CTRL1,
481 MDIO_MMD_AN, &autoneg_reg); 516 MDIO_MMD_AN, &autoneg_reg);
@@ -682,6 +717,10 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
682 autoneg_reg); 717 autoneg_reg);
683 } 718 }
684 719
720 /* Blocked by MNG FW so don't reset PHY */
721 if (ixgbe_check_reset_blocked(hw))
722 return status;
723
685 /* Restart PHY autonegotiation and wait for completion */ 724 /* Restart PHY autonegotiation and wait for completion */
686 hw->phy.ops.read_reg(hw, MDIO_CTRL1, 725 hw->phy.ops.read_reg(hw, MDIO_CTRL1,
687 MDIO_MMD_AN, &autoneg_reg); 726 MDIO_MMD_AN, &autoneg_reg);
@@ -759,6 +798,10 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
759 s32 ret_val = 0; 798 s32 ret_val = 0;
760 u32 i; 799 u32 i;
761 800
801 /* Blocked by MNG FW so bail */
802 if (ixgbe_check_reset_blocked(hw))
803 goto out;
804
762 hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &phy_data); 805 hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &phy_data);
763 806
764 /* reset the PHY and poll for completion */ 807 /* reset the PHY and poll for completion */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index fffcbdd2bf0e..4a456c974ef2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 Copyright(c) 1999 - 2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
@@ -65,9 +66,6 @@
65#define IXGBE_SFF_1GBASET_CAPABLE 0x8 66#define IXGBE_SFF_1GBASET_CAPABLE 0x8
66#define IXGBE_SFF_10GBASESR_CAPABLE 0x10 67#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
67#define IXGBE_SFF_10GBASELR_CAPABLE 0x20 68#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
68#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
69#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8
70#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0
71#define IXGBE_SFF_ADDRESSING_MODE 0x4 69#define IXGBE_SFF_ADDRESSING_MODE 0x4
72#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1 70#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1
73#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8 71#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8
@@ -79,7 +77,6 @@
79#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1 77#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1
80#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 78#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
81#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 79#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
82
83/* Flow control defines */ 80/* Flow control defines */
84#define IXGBE_TAF_SYM_PAUSE 0x400 81#define IXGBE_TAF_SYM_PAUSE 0x400
85#define IXGBE_TAF_ASM_PAUSE 0x800 82#define IXGBE_TAF_ASM_PAUSE 0x800
@@ -131,6 +128,7 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
131s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, 128s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
132 ixgbe_link_speed *speed, 129 ixgbe_link_speed *speed,
133 bool *autoneg); 130 bool *autoneg);
131s32 ixgbe_check_reset_blocked(struct ixgbe_hw *hw);
134 132
135/* PHY specific */ 133/* PHY specific */
136s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, 134s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 5184e2a1a7d8..44ac9aef6a8d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
@@ -576,14 +577,21 @@ void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
576 shhwtstamps->hwtstamp = ns_to_ktime(ns); 577 shhwtstamps->hwtstamp = ns_to_ktime(ns);
577} 578}
578 579
580int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
581{
582 struct hwtstamp_config *config = &adapter->tstamp_config;
583
584 return copy_to_user(ifr->ifr_data, config,
585 sizeof(*config)) ? -EFAULT : 0;
586}
587
579/** 588/**
580 * ixgbe_ptp_hwtstamp_ioctl - control hardware time stamping 589 * ixgbe_ptp_set_ts_config - control hardware time stamping
581 * @adapter: pointer to adapter struct 590 * @adapter: pointer to adapter struct
582 * @ifreq: ioctl data 591 * @ifreq: ioctl data
583 * @cmd: particular ioctl requested
584 * 592 *
585 * Outgoing time stamping can be enabled and disabled. Play nice and 593 * Outgoing time stamping can be enabled and disabled. Play nice and
586 * disable it when requested, although it shouldn't case any overhead 594 * disable it when requested, although it shouldn't cause any overhead
587 * when no packet needs it. At most one packet in the queue may be 595 * when no packet needs it. At most one packet in the queue may be
588 * marked for time stamping, otherwise it would be impossible to tell 596 * marked for time stamping, otherwise it would be impossible to tell
589 * for sure to which packet the hardware time stamp belongs. 597 * for sure to which packet the hardware time stamp belongs.
@@ -599,8 +607,7 @@ void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
599 * Event mode. This more accurately tells the user what the hardware is going 607 * Event mode. This more accurately tells the user what the hardware is going
600 * to do anyways. 608 * to do anyways.
601 */ 609 */
602int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, 610int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
603 struct ifreq *ifr, int cmd)
604{ 611{
605 struct ixgbe_hw *hw = &adapter->hw; 612 struct ixgbe_hw *hw = &adapter->hw;
606 struct hwtstamp_config config; 613 struct hwtstamp_config config;
@@ -702,6 +709,10 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
702 regval = IXGBE_READ_REG(hw, IXGBE_TXSTMPH); 709 regval = IXGBE_READ_REG(hw, IXGBE_TXSTMPH);
703 regval = IXGBE_READ_REG(hw, IXGBE_RXSTMPH); 710 regval = IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
704 711
712 /* save these settings for future reference */
713 memcpy(&adapter->tstamp_config, &config,
714 sizeof(adapter->tstamp_config));
715
705 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 716 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
706 -EFAULT : 0; 717 -EFAULT : 0;
707} 718}
@@ -809,6 +820,9 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
809 IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000); 820 IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000);
810 IXGBE_WRITE_FLUSH(hw); 821 IXGBE_WRITE_FLUSH(hw);
811 822
823 /* Reset the saved tstamp_config */
824 memset(&adapter->tstamp_config, 0, sizeof(adapter->tstamp_config));
825
812 ixgbe_ptp_start_cyclecounter(adapter); 826 ixgbe_ptp_start_cyclecounter(adapter);
813 827
814 spin_lock_irqsave(&adapter->tmreg_lock, flags); 828 spin_lock_irqsave(&adapter->tmreg_lock, flags);
@@ -840,7 +854,9 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
840 854
841 switch (adapter->hw.mac.type) { 855 switch (adapter->hw.mac.type) {
842 case ixgbe_mac_X540: 856 case ixgbe_mac_X540:
843 snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name); 857 snprintf(adapter->ptp_caps.name,
858 sizeof(adapter->ptp_caps.name),
859 "%s", netdev->name);
844 adapter->ptp_caps.owner = THIS_MODULE; 860 adapter->ptp_caps.owner = THIS_MODULE;
845 adapter->ptp_caps.max_adj = 250000000; 861 adapter->ptp_caps.max_adj = 250000000;
846 adapter->ptp_caps.n_alarm = 0; 862 adapter->ptp_caps.n_alarm = 0;
@@ -854,7 +870,9 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
854 adapter->ptp_caps.enable = ixgbe_ptp_enable; 870 adapter->ptp_caps.enable = ixgbe_ptp_enable;
855 break; 871 break;
856 case ixgbe_mac_82599EB: 872 case ixgbe_mac_82599EB:
857 snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name); 873 snprintf(adapter->ptp_caps.name,
874 sizeof(adapter->ptp_caps.name),
875 "%s", netdev->name);
858 adapter->ptp_caps.owner = THIS_MODULE; 876 adapter->ptp_caps.owner = THIS_MODULE;
859 adapter->ptp_caps.max_adj = 250000000; 877 adapter->ptp_caps.max_adj = 250000000;
860 adapter->ptp_caps.n_alarm = 0; 878 adapter->ptp_caps.n_alarm = 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index dff0977876f7..e6c68d396c99 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 8bd29190514e..139eaddfb2ed 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
index e74ae3682733..ef6df3d6437e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 0d39cfc4a3bf..8a6ff2423f07 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2013 Intel Corporation. 4 Copyright(c) 1999 - 2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
@@ -54,6 +55,7 @@
54#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152a 55#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152a
55#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529 56#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
56#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9 57#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
58#define IXGBE_SUBDEV_ID_82599_SFP_WOL0 0x1071
57#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72 59#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72
58#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0 60#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
59#define IXGBE_SUBDEV_ID_82599_SP_560FLR 0x211B 61#define IXGBE_SUBDEV_ID_82599_SP_560FLR 0x211B
@@ -1609,6 +1611,9 @@ enum {
1609#define IXGBE_MACC_FS 0x00040000 1611#define IXGBE_MACC_FS 0x00040000
1610#define IXGBE_MAC_RX2TX_LPBK 0x00000002 1612#define IXGBE_MAC_RX2TX_LPBK 0x00000002
1611 1613
1614/* Veto Bit definiton */
1615#define IXGBE_MMNGC_MNG_VETO 0x00000001
1616
1612/* LINKS Bit Masks */ 1617/* LINKS Bit Masks */
1613#define IXGBE_LINKS_KX_AN_COMP 0x80000000 1618#define IXGBE_LINKS_KX_AN_COMP 0x80000000
1614#define IXGBE_LINKS_UP 0x40000000 1619#define IXGBE_LINKS_UP 0x40000000
@@ -1788,6 +1793,9 @@ enum {
1788#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */ 1793#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */
1789#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */ 1794#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */
1790 1795
1796#define IXGBE_EEPROM_CTRL_2 1 /* EEPROM CTRL word 2 */
1797#define IXGBE_EEPROM_CCD_BIT 2 /* EEPROM Core Clock Disable bit */
1798
1791#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS 1799#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS
1792#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ 1800#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */
1793#endif 1801#endif
@@ -1853,8 +1861,19 @@ enum {
1853#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80 1861#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80
1854#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005 1862#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005
1855 1863
1864#define IXGBE_PCIDEVCTRL2_TIMEO_MASK 0xf
1865#define IXGBE_PCIDEVCTRL2_16_32ms_def 0x0
1866#define IXGBE_PCIDEVCTRL2_50_100us 0x1
1867#define IXGBE_PCIDEVCTRL2_1_2ms 0x2
1868#define IXGBE_PCIDEVCTRL2_16_32ms 0x5
1869#define IXGBE_PCIDEVCTRL2_65_130ms 0x6
1870#define IXGBE_PCIDEVCTRL2_260_520ms 0x9
1871#define IXGBE_PCIDEVCTRL2_1_2s 0xa
1872#define IXGBE_PCIDEVCTRL2_4_8s 0xd
1873#define IXGBE_PCIDEVCTRL2_17_34s 0xe
1874
1856/* Number of 100 microseconds we wait for PCI Express master disable */ 1875/* Number of 100 microseconds we wait for PCI Express master disable */
1857#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 1876#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
1858 1877
1859/* RAH */ 1878/* RAH */
1860#define IXGBE_RAH_VIND_MASK 0x003C0000 1879#define IXGBE_RAH_VIND_MASK 0x003C0000
@@ -2645,7 +2664,6 @@ enum ixgbe_sfp_type {
2645enum ixgbe_media_type { 2664enum ixgbe_media_type {
2646 ixgbe_media_type_unknown = 0, 2665 ixgbe_media_type_unknown = 0,
2647 ixgbe_media_type_fiber, 2666 ixgbe_media_type_fiber,
2648 ixgbe_media_type_fiber_fixed,
2649 ixgbe_media_type_fiber_qsfp, 2667 ixgbe_media_type_fiber_qsfp,
2650 ixgbe_media_type_fiber_lco, 2668 ixgbe_media_type_fiber_lco,
2651 ixgbe_media_type_copper, 2669 ixgbe_media_type_copper,
@@ -2858,6 +2876,8 @@ struct ixgbe_mac_operations {
2858 s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); 2876 s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
2859 s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16); 2877 s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16);
2860 void (*release_swfw_sync)(struct ixgbe_hw *, u16); 2878 void (*release_swfw_sync)(struct ixgbe_hw *, u16);
2879 s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *);
2880 s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool);
2861 2881
2862 /* Link */ 2882 /* Link */
2863 void (*disable_tx_laser)(struct ixgbe_hw *); 2883 void (*disable_tx_laser)(struct ixgbe_hw *);
@@ -2901,7 +2921,6 @@ struct ixgbe_mac_operations {
2901 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); 2921 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
2902 s32 (*get_thermal_sensor_data)(struct ixgbe_hw *); 2922 s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
2903 s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw); 2923 s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
2904 bool (*mng_fw_enabled)(struct ixgbe_hw *hw);
2905}; 2924};
2906 2925
2907struct ixgbe_phy_operations { 2926struct ixgbe_phy_operations {
@@ -2957,7 +2976,6 @@ struct ixgbe_mac_info {
2957 u32 max_tx_queues; 2976 u32 max_tx_queues;
2958 u32 max_rx_queues; 2977 u32 max_rx_queues;
2959 u32 orig_autoc; 2978 u32 orig_autoc;
2960 u32 cached_autoc;
2961 u32 orig_autoc2; 2979 u32 orig_autoc2;
2962 bool orig_link_settings_stored; 2980 bool orig_link_settings_stored;
2963 bool autotry_restart; 2981 bool autotry_restart;
@@ -3033,7 +3051,6 @@ struct ixgbe_hw {
3033 bool adapter_stopped; 3051 bool adapter_stopped;
3034 bool force_full_reset; 3052 bool force_full_reset;
3035 bool allow_unsupported_sfp; 3053 bool allow_unsupported_sfp;
3036 bool mng_fw_enabled;
3037 bool wol_enabled; 3054 bool wol_enabled;
3038}; 3055};
3039 3056
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 24b80a6cfca4..188a5974b85c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -20,6 +20,7 @@
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 Linux NICS <linux.nics@intel.com>
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 26
@@ -61,6 +62,7 @@ static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
61 mac->mcft_size = IXGBE_X540_MC_TBL_SIZE; 62 mac->mcft_size = IXGBE_X540_MC_TBL_SIZE;
62 mac->vft_size = IXGBE_X540_VFT_TBL_SIZE; 63 mac->vft_size = IXGBE_X540_VFT_TBL_SIZE;
63 mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES; 64 mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES;
65 mac->rx_pb_size = IXGBE_X540_RX_PB_SIZE;
64 mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES; 66 mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES;
65 mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES; 67 mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES;
66 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); 68 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
@@ -187,7 +189,6 @@ static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
187 goto out; 189 goto out;
188 190
189 ret_val = ixgbe_start_hw_gen2(hw); 191 ret_val = ixgbe_start_hw_gen2(hw);
190 hw->mac.rx_pb_size = IXGBE_X540_RX_PB_SIZE;
191out: 192out:
192 return ret_val; 193 return ret_val;
193} 194}
@@ -854,7 +855,8 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
854 .enable_rx_buff = &ixgbe_enable_rx_buff_generic, 855 .enable_rx_buff = &ixgbe_enable_rx_buff_generic,
855 .get_thermal_sensor_data = NULL, 856 .get_thermal_sensor_data = NULL,
856 .init_thermal_sensor_thresh = NULL, 857 .init_thermal_sensor_thresh = NULL,
857 .mng_fw_enabled = NULL, 858 .prot_autoc_read = &prot_autoc_read_generic,
859 .prot_autoc_write = &prot_autoc_write_generic,
858}; 860};
859 861
860static struct ixgbe_eeprom_operations eeprom_ops_X540 = { 862static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index f68b78c732a8..b2d002394e5d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2012 Intel Corporation. 4 Copyright(c) 1999 - 2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -530,41 +530,55 @@ static const u32 register_test_patterns[] = {
530 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF 530 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
531}; 531};
532 532
533#define REG_PATTERN_TEST(R, M, W) \ 533static bool reg_pattern_test(struct ixgbevf_adapter *adapter, u64 *data,
534{ \ 534 int reg, u32 mask, u32 write)
535 u32 pat, val, before; \ 535{
536 for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) { \ 536 u32 pat, val, before;
537 before = readl(adapter->hw.hw_addr + R); \ 537
538 writel((register_test_patterns[pat] & W), \ 538 if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
539 (adapter->hw.hw_addr + R)); \ 539 *data = 1;
540 val = readl(adapter->hw.hw_addr + R); \ 540 return true;
541 if (val != (register_test_patterns[pat] & W & M)) { \ 541 }
542 hw_dbg(&adapter->hw, \ 542 for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) {
543 "pattern test reg %04X failed: got " \ 543 before = ixgbe_read_reg(&adapter->hw, reg);
544 "0x%08X expected 0x%08X\n", \ 544 ixgbe_write_reg(&adapter->hw, reg,
545 R, val, (register_test_patterns[pat] & W & M)); \ 545 register_test_patterns[pat] & write);
546 *data = R; \ 546 val = ixgbe_read_reg(&adapter->hw, reg);
547 writel(before, adapter->hw.hw_addr + R); \ 547 if (val != (register_test_patterns[pat] & write & mask)) {
548 return 1; \ 548 hw_dbg(&adapter->hw,
549 } \ 549 "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
550 writel(before, adapter->hw.hw_addr + R); \ 550 reg, val,
551 } \ 551 register_test_patterns[pat] & write & mask);
552 *data = reg;
553 ixgbe_write_reg(&adapter->hw, reg, before);
554 return true;
555 }
556 ixgbe_write_reg(&adapter->hw, reg, before);
557 }
558 return false;
552} 559}
553 560
554#define REG_SET_AND_CHECK(R, M, W) \ 561static bool reg_set_and_check(struct ixgbevf_adapter *adapter, u64 *data,
555{ \ 562 int reg, u32 mask, u32 write)
556 u32 val, before; \ 563{
557 before = readl(adapter->hw.hw_addr + R); \ 564 u32 val, before;
558 writel((W & M), (adapter->hw.hw_addr + R)); \ 565
559 val = readl(adapter->hw.hw_addr + R); \ 566 if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
560 if ((W & M) != (val & M)) { \ 567 *data = 1;
561 pr_err("set/check reg %04X test failed: got 0x%08X expected " \ 568 return true;
562 "0x%08X\n", R, (val & M), (W & M)); \ 569 }
563 *data = R; \ 570 before = ixgbe_read_reg(&adapter->hw, reg);
564 writel(before, (adapter->hw.hw_addr + R)); \ 571 ixgbe_write_reg(&adapter->hw, reg, write & mask);
565 return 1; \ 572 val = ixgbe_read_reg(&adapter->hw, reg);
566 } \ 573 if ((write & mask) != (val & mask)) {
567 writel(before, (adapter->hw.hw_addr + R)); \ 574 pr_err("set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
575 reg, (val & mask), write & mask);
576 *data = reg;
577 ixgbe_write_reg(&adapter->hw, reg, before);
578 return true;
579 }
580 ixgbe_write_reg(&adapter->hw, reg, before);
581 return false;
568} 582}
569 583
570static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data) 584static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
@@ -572,6 +586,12 @@ static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
572 const struct ixgbevf_reg_test *test; 586 const struct ixgbevf_reg_test *test;
573 u32 i; 587 u32 i;
574 588
589 if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
590 dev_err(&adapter->pdev->dev,
591 "Adapter removed - register test blocked\n");
592 *data = 1;
593 return 1;
594 }
575 test = reg_test_vf; 595 test = reg_test_vf;
576 596
577 /* 597 /*
@@ -580,38 +600,47 @@ static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
580 */ 600 */
581 while (test->reg) { 601 while (test->reg) {
582 for (i = 0; i < test->array_len; i++) { 602 for (i = 0; i < test->array_len; i++) {
603 bool b = false;
604
583 switch (test->test_type) { 605 switch (test->test_type) {
584 case PATTERN_TEST: 606 case PATTERN_TEST:
585 REG_PATTERN_TEST(test->reg + (i * 0x40), 607 b = reg_pattern_test(adapter, data,
586 test->mask, 608 test->reg + (i * 0x40),
587 test->write); 609 test->mask,
610 test->write);
588 break; 611 break;
589 case SET_READ_TEST: 612 case SET_READ_TEST:
590 REG_SET_AND_CHECK(test->reg + (i * 0x40), 613 b = reg_set_and_check(adapter, data,
591 test->mask, 614 test->reg + (i * 0x40),
592 test->write); 615 test->mask,
616 test->write);
593 break; 617 break;
594 case WRITE_NO_TEST: 618 case WRITE_NO_TEST:
595 writel(test->write, 619 ixgbe_write_reg(&adapter->hw,
596 (adapter->hw.hw_addr + test->reg) 620 test->reg + (i * 0x40),
597 + (i * 0x40)); 621 test->write);
598 break; 622 break;
599 case TABLE32_TEST: 623 case TABLE32_TEST:
600 REG_PATTERN_TEST(test->reg + (i * 4), 624 b = reg_pattern_test(adapter, data,
601 test->mask, 625 test->reg + (i * 4),
602 test->write); 626 test->mask,
627 test->write);
603 break; 628 break;
604 case TABLE64_TEST_LO: 629 case TABLE64_TEST_LO:
605 REG_PATTERN_TEST(test->reg + (i * 8), 630 b = reg_pattern_test(adapter, data,
606 test->mask, 631 test->reg + (i * 8),
607 test->write); 632 test->mask,
633 test->write);
608 break; 634 break;
609 case TABLE64_TEST_HI: 635 case TABLE64_TEST_HI:
610 REG_PATTERN_TEST((test->reg + 4) + (i * 8), 636 b = reg_pattern_test(adapter, data,
611 test->mask, 637 test->reg + 4 + (i * 8),
612 test->write); 638 test->mask,
639 test->write);
613 break; 640 break;
614 } 641 }
642 if (b)
643 return 1;
615 } 644 }
616 test++; 645 test++;
617 } 646 }
@@ -626,6 +655,14 @@ static void ixgbevf_diag_test(struct net_device *netdev,
626 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 655 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
627 bool if_running = netif_running(netdev); 656 bool if_running = netif_running(netdev);
628 657
658 if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
659 dev_err(&adapter->pdev->dev,
660 "Adapter removed - test blocked\n");
661 data[0] = 1;
662 data[1] = 1;
663 eth_test->flags |= ETH_TEST_FL_FAILED;
664 return;
665 }
629 set_bit(__IXGBEVF_TESTING, &adapter->state); 666 set_bit(__IXGBEVF_TESTING, &adapter->state);
630 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 667 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
631 /* Offline tests */ 668 /* Offline tests */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 54829326bb09..a08bd7c46766 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2012 Intel Corporation. 4 Copyright(c) 1999 - 2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -315,6 +315,11 @@ static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring)
315 return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; 315 return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
316} 316}
317 317
318static inline void ixgbevf_write_tail(struct ixgbevf_ring *ring, u32 value)
319{
320 writel(value, ring->tail);
321}
322
318#define IXGBEVF_RX_DESC(R, i) \ 323#define IXGBEVF_RX_DESC(R, i) \
319 (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) 324 (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
320#define IXGBEVF_TX_DESC(R, i) \ 325#define IXGBEVF_TX_DESC(R, i) \
@@ -401,6 +406,7 @@ struct ixgbevf_adapter {
401 u64 bp_tx_missed; 406 u64 bp_tx_missed;
402#endif 407#endif
403 408
409 u8 __iomem *io_addr; /* Mainly for iounmap use */
404 u32 link_speed; 410 u32 link_speed;
405 bool link_up; 411 bool link_up;
406 412
@@ -412,7 +418,8 @@ struct ixgbevf_adapter {
412enum ixbgevf_state_t { 418enum ixbgevf_state_t {
413 __IXGBEVF_TESTING, 419 __IXGBEVF_TESTING,
414 __IXGBEVF_RESETTING, 420 __IXGBEVF_RESETTING,
415 __IXGBEVF_DOWN 421 __IXGBEVF_DOWN,
422 __IXGBEVF_REMOVING,
416}; 423};
417 424
418struct ixgbevf_cb { 425struct ixgbevf_cb {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 9df28985eba7..a50e892a5d21 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2012 Intel Corporation. 4 Copyright(c) 1999 - 2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -99,6 +99,49 @@ static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
99static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector); 99static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
100static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter); 100static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
101 101
102static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
103{
104 struct ixgbevf_adapter *adapter = hw->back;
105
106 if (!hw->hw_addr)
107 return;
108 hw->hw_addr = NULL;
109 dev_err(&adapter->pdev->dev, "Adapter removed\n");
110 schedule_work(&adapter->watchdog_task);
111}
112
113static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg)
114{
115 u32 value;
116
117 /* The following check not only optimizes a bit by not
118 * performing a read on the status register when the
119 * register just read was a status register read that
120 * returned IXGBE_FAILED_READ_REG. It also blocks any
121 * potential recursion.
122 */
123 if (reg == IXGBE_VFSTATUS) {
124 ixgbevf_remove_adapter(hw);
125 return;
126 }
127 value = ixgbe_read_reg(hw, IXGBE_VFSTATUS);
128 if (value == IXGBE_FAILED_READ_REG)
129 ixgbevf_remove_adapter(hw);
130}
131
132u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
133{
134 u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
135 u32 value;
136
137 if (IXGBE_REMOVED(reg_addr))
138 return IXGBE_FAILED_READ_REG;
139 value = readl(reg_addr + reg);
140 if (unlikely(value == IXGBE_FAILED_READ_REG))
141 ixgbevf_check_remove(hw, reg);
142 return value;
143}
144
102static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring, 145static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring,
103 u32 val) 146 u32 val)
104{ 147{
@@ -111,7 +154,7 @@ static inline void ixgbevf_release_rx_desc(struct ixgbevf_ring *rx_ring,
111 * such as IA-64). 154 * such as IA-64).
112 */ 155 */
113 wmb(); 156 wmb();
114 writel(val, rx_ring->tail); 157 ixgbevf_write_tail(rx_ring, val);
115} 158}
116 159
117/** 160/**
@@ -516,7 +559,8 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
516 /* Workaround hardware that can't do proper VEPA multicast 559 /* Workaround hardware that can't do proper VEPA multicast
517 * source pruning. 560 * source pruning.
518 */ 561 */
519 if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) && 562 if ((skb->pkt_type == PACKET_BROADCAST ||
563 skb->pkt_type == PACKET_MULTICAST) &&
520 ether_addr_equal(rx_ring->netdev->dev_addr, 564 ether_addr_equal(rx_ring->netdev->dev_addr,
521 eth_hdr(skb)->h_source)) { 565 eth_hdr(skb)->h_source)) {
522 dev_kfree_skb_irq(skb); 566 dev_kfree_skb_irq(skb);
@@ -607,7 +651,8 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
607 napi_complete(napi); 651 napi_complete(napi);
608 if (adapter->rx_itr_setting & 1) 652 if (adapter->rx_itr_setting & 1)
609 ixgbevf_set_itr(q_vector); 653 ixgbevf_set_itr(q_vector);
610 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 654 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
655 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
611 ixgbevf_irq_enable_queues(adapter, 656 ixgbevf_irq_enable_queues(adapter,
612 1 << q_vector->v_idx); 657 1 << q_vector->v_idx);
613 658
@@ -832,7 +877,8 @@ static irqreturn_t ixgbevf_msix_other(int irq, void *data)
832 877
833 hw->mac.get_link_status = 1; 878 hw->mac.get_link_status = 1;
834 879
835 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 880 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
881 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
836 mod_timer(&adapter->watchdog_timer, jiffies); 882 mod_timer(&adapter->watchdog_timer, jiffies);
837 883
838 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other); 884 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
@@ -1136,7 +1182,7 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
1136 /* reset head and tail pointers */ 1182 /* reset head and tail pointers */
1137 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0); 1183 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(reg_idx), 0);
1138 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0); 1184 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(reg_idx), 0);
1139 ring->tail = hw->hw_addr + IXGBE_VFTDT(reg_idx); 1185 ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx);
1140 1186
1141 /* reset ntu and ntc to place SW in sync with hardwdare */ 1187 /* reset ntu and ntc to place SW in sync with hardwdare */
1142 ring->next_to_clean = 0; 1188 ring->next_to_clean = 0;
@@ -1256,6 +1302,8 @@ static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
1256 u32 rxdctl; 1302 u32 rxdctl;
1257 u8 reg_idx = ring->reg_idx; 1303 u8 reg_idx = ring->reg_idx;
1258 1304
1305 if (IXGBE_REMOVED(hw->hw_addr))
1306 return;
1259 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); 1307 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
1260 rxdctl &= ~IXGBE_RXDCTL_ENABLE; 1308 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
1261 1309
@@ -1281,6 +1329,8 @@ static void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1281 u32 rxdctl; 1329 u32 rxdctl;
1282 u8 reg_idx = ring->reg_idx; 1330 u8 reg_idx = ring->reg_idx;
1283 1331
1332 if (IXGBE_REMOVED(hw->hw_addr))
1333 return;
1284 do { 1334 do {
1285 usleep_range(1000, 2000); 1335 usleep_range(1000, 2000);
1286 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); 1336 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
@@ -1315,7 +1365,7 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
1315 /* reset head and tail pointers */ 1365 /* reset head and tail pointers */
1316 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0); 1366 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
1317 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0); 1367 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(reg_idx), 0);
1318 ring->tail = hw->hw_addr + IXGBE_VFRDT(reg_idx); 1368 ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx);
1319 1369
1320 /* reset ntu and ntc to place SW in sync with hardwdare */ 1370 /* reset ntu and ntc to place SW in sync with hardwdare */
1321 ring->next_to_clean = 0; 1371 ring->next_to_clean = 0;
@@ -1617,6 +1667,7 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1617 1667
1618 spin_unlock_bh(&adapter->mbx_lock); 1668 spin_unlock_bh(&adapter->mbx_lock);
1619 1669
1670 smp_mb__before_clear_bit();
1620 clear_bit(__IXGBEVF_DOWN, &adapter->state); 1671 clear_bit(__IXGBEVF_DOWN, &adapter->state);
1621 ixgbevf_napi_enable_all(adapter); 1672 ixgbevf_napi_enable_all(adapter);
1622 1673
@@ -1741,7 +1792,8 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
1741 int i; 1792 int i;
1742 1793
1743 /* signal that we are down to the interrupt handler */ 1794 /* signal that we are down to the interrupt handler */
1744 set_bit(__IXGBEVF_DOWN, &adapter->state); 1795 if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state))
1796 return; /* do nothing if already down */
1745 1797
1746 /* disable all enabled rx queues */ 1798 /* disable all enabled rx queues */
1747 for (i = 0; i < adapter->num_rx_queues; i++) 1799 for (i = 0; i < adapter->num_rx_queues; i++)
@@ -1817,7 +1869,6 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1817static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, 1869static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1818 int vectors) 1870 int vectors)
1819{ 1871{
1820 int err = 0;
1821 int vector_threshold; 1872 int vector_threshold;
1822 1873
1823 /* We'll want at least 2 (vector_threshold): 1874 /* We'll want at least 2 (vector_threshold):
@@ -1831,33 +1882,24 @@ static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1831 * Right now, we simply care about how many we'll get; we'll 1882 * Right now, we simply care about how many we'll get; we'll
1832 * set them up later while requesting irq's. 1883 * set them up later while requesting irq's.
1833 */ 1884 */
1834 while (vectors >= vector_threshold) { 1885 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1835 err = pci_enable_msix(adapter->pdev, adapter->msix_entries, 1886 vector_threshold, vectors);
1836 vectors);
1837 if (!err || err < 0) /* Success or a nasty failure. */
1838 break;
1839 else /* err == number of vectors we should try again with */
1840 vectors = err;
1841 }
1842
1843 if (vectors < vector_threshold)
1844 err = -ENOMEM;
1845 1887
1846 if (err) { 1888 if (vectors < 0) {
1847 dev_err(&adapter->pdev->dev, 1889 dev_err(&adapter->pdev->dev,
1848 "Unable to allocate MSI-X interrupts\n"); 1890 "Unable to allocate MSI-X interrupts\n");
1849 kfree(adapter->msix_entries); 1891 kfree(adapter->msix_entries);
1850 adapter->msix_entries = NULL; 1892 adapter->msix_entries = NULL;
1851 } else { 1893 return vectors;
1852 /*
1853 * Adjust for only the vectors we'll use, which is minimum
1854 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1855 * vectors we were allocated.
1856 */
1857 adapter->num_msix_vectors = vectors;
1858 } 1894 }
1859 1895
1860 return err; 1896 /* Adjust for only the vectors we'll use, which is minimum
1897 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1898 * vectors we were allocated.
1899 */
1900 adapter->num_msix_vectors = vectors;
1901
1902 return 0;
1861} 1903}
1862 1904
1863/** 1905/**
@@ -2338,6 +2380,7 @@ static void ixgbevf_reset_task(struct work_struct *work)
2338 2380
2339 /* If we're already down or resetting, just bail */ 2381 /* If we're already down or resetting, just bail */
2340 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || 2382 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2383 test_bit(__IXGBEVF_REMOVING, &adapter->state) ||
2341 test_bit(__IXGBEVF_RESETTING, &adapter->state)) 2384 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2342 return; 2385 return;
2343 2386
@@ -2361,6 +2404,14 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
2361 bool link_up = adapter->link_up; 2404 bool link_up = adapter->link_up;
2362 s32 need_reset; 2405 s32 need_reset;
2363 2406
2407 if (IXGBE_REMOVED(hw->hw_addr)) {
2408 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
2409 rtnl_lock();
2410 ixgbevf_down(adapter);
2411 rtnl_unlock();
2412 }
2413 return;
2414 }
2364 ixgbevf_queue_reset_subtask(adapter); 2415 ixgbevf_queue_reset_subtask(adapter);
2365 2416
2366 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK; 2417 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
@@ -2422,7 +2473,8 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
2422 2473
2423pf_has_reset: 2474pf_has_reset:
2424 /* Reset the timer */ 2475 /* Reset the timer */
2425 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 2476 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
2477 !test_bit(__IXGBEVF_REMOVING, &adapter->state))
2426 mod_timer(&adapter->watchdog_timer, 2478 mod_timer(&adapter->watchdog_timer,
2427 round_jiffies(jiffies + (2 * HZ))); 2479 round_jiffies(jiffies + (2 * HZ)));
2428 2480
@@ -2787,6 +2839,9 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
2787 u32 vlan_macip_lens, type_tucmd; 2839 u32 vlan_macip_lens, type_tucmd;
2788 u32 mss_l4len_idx, l4len; 2840 u32 mss_l4len_idx, l4len;
2789 2841
2842 if (skb->ip_summed != CHECKSUM_PARTIAL)
2843 return 0;
2844
2790 if (!skb_is_gso(skb)) 2845 if (!skb_is_gso(skb))
2791 return 0; 2846 return 0;
2792 2847
@@ -2857,12 +2912,12 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
2857 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2912 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2858 u8 l4_hdr = 0; 2913 u8 l4_hdr = 0;
2859 switch (skb->protocol) { 2914 switch (skb->protocol) {
2860 case __constant_htons(ETH_P_IP): 2915 case htons(ETH_P_IP):
2861 vlan_macip_lens |= skb_network_header_len(skb); 2916 vlan_macip_lens |= skb_network_header_len(skb);
2862 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; 2917 type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
2863 l4_hdr = ip_hdr(skb)->protocol; 2918 l4_hdr = ip_hdr(skb)->protocol;
2864 break; 2919 break;
2865 case __constant_htons(ETH_P_IPV6): 2920 case htons(ETH_P_IPV6):
2866 vlan_macip_lens |= skb_network_header_len(skb); 2921 vlan_macip_lens |= skb_network_header_len(skb);
2867 l4_hdr = ipv6_hdr(skb)->nexthdr; 2922 l4_hdr = ipv6_hdr(skb)->nexthdr;
2868 break; 2923 break;
@@ -3060,7 +3115,7 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
3060 tx_ring->next_to_use = i; 3115 tx_ring->next_to_use = i;
3061 3116
3062 /* notify HW of packet */ 3117 /* notify HW of packet */
3063 writel(i, tx_ring->tail); 3118 ixgbevf_write_tail(tx_ring, i);
3064 3119
3065 return; 3120 return;
3066dma_error: 3121dma_error:
@@ -3165,7 +3220,7 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3165 tso = ixgbevf_tso(tx_ring, first, &hdr_len); 3220 tso = ixgbevf_tso(tx_ring, first, &hdr_len);
3166 if (tso < 0) 3221 if (tso < 0)
3167 goto out_drop; 3222 goto out_drop;
3168 else 3223 else if (!tso)
3169 ixgbevf_tx_csum(tx_ring, first); 3224 ixgbevf_tx_csum(tx_ring, first);
3170 3225
3171 ixgbevf_tx_map(tx_ring, first, hdr_len); 3226 ixgbevf_tx_map(tx_ring, first, hdr_len);
@@ -3286,7 +3341,6 @@ static int ixgbevf_resume(struct pci_dev *pdev)
3286 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3341 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3287 u32 err; 3342 u32 err;
3288 3343
3289 pci_set_power_state(pdev, PCI_D0);
3290 pci_restore_state(pdev); 3344 pci_restore_state(pdev);
3291 /* 3345 /*
3292 * pci_restore_state clears dev->state_saved so call 3346 * pci_restore_state clears dev->state_saved so call
@@ -3344,10 +3398,10 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3344 for (i = 0; i < adapter->num_rx_queues; i++) { 3398 for (i = 0; i < adapter->num_rx_queues; i++) {
3345 ring = adapter->rx_ring[i]; 3399 ring = adapter->rx_ring[i];
3346 do { 3400 do {
3347 start = u64_stats_fetch_begin_bh(&ring->syncp); 3401 start = u64_stats_fetch_begin_irq(&ring->syncp);
3348 bytes = ring->stats.bytes; 3402 bytes = ring->stats.bytes;
3349 packets = ring->stats.packets; 3403 packets = ring->stats.packets;
3350 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 3404 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
3351 stats->rx_bytes += bytes; 3405 stats->rx_bytes += bytes;
3352 stats->rx_packets += packets; 3406 stats->rx_packets += packets;
3353 } 3407 }
@@ -3355,10 +3409,10 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3355 for (i = 0; i < adapter->num_tx_queues; i++) { 3409 for (i = 0; i < adapter->num_tx_queues; i++) {
3356 ring = adapter->tx_ring[i]; 3410 ring = adapter->tx_ring[i];
3357 do { 3411 do {
3358 start = u64_stats_fetch_begin_bh(&ring->syncp); 3412 start = u64_stats_fetch_begin_irq(&ring->syncp);
3359 bytes = ring->stats.bytes; 3413 bytes = ring->stats.bytes;
3360 packets = ring->stats.packets; 3414 packets = ring->stats.packets;
3361 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 3415 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
3362 stats->tx_bytes += bytes; 3416 stats->tx_bytes += bytes;
3363 stats->tx_packets += packets; 3417 stats->tx_packets += packets;
3364 } 3418 }
@@ -3460,6 +3514,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3460 3514
3461 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 3515 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3462 pci_resource_len(pdev, 0)); 3516 pci_resource_len(pdev, 0));
3517 adapter->io_addr = hw->hw_addr;
3463 if (!hw->hw_addr) { 3518 if (!hw->hw_addr) {
3464 err = -EIO; 3519 err = -EIO;
3465 goto err_ioremap; 3520 goto err_ioremap;
@@ -3545,7 +3600,7 @@ err_register:
3545 ixgbevf_clear_interrupt_scheme(adapter); 3600 ixgbevf_clear_interrupt_scheme(adapter);
3546err_sw_init: 3601err_sw_init:
3547 ixgbevf_reset_interrupt_capability(adapter); 3602 ixgbevf_reset_interrupt_capability(adapter);
3548 iounmap(hw->hw_addr); 3603 iounmap(adapter->io_addr);
3549err_ioremap: 3604err_ioremap:
3550 free_netdev(netdev); 3605 free_netdev(netdev);
3551err_alloc_etherdev: 3606err_alloc_etherdev:
@@ -3570,7 +3625,7 @@ static void ixgbevf_remove(struct pci_dev *pdev)
3570 struct net_device *netdev = pci_get_drvdata(pdev); 3625 struct net_device *netdev = pci_get_drvdata(pdev);
3571 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3626 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3572 3627
3573 set_bit(__IXGBEVF_DOWN, &adapter->state); 3628 set_bit(__IXGBEVF_REMOVING, &adapter->state);
3574 3629
3575 del_timer_sync(&adapter->watchdog_timer); 3630 del_timer_sync(&adapter->watchdog_timer);
3576 3631
@@ -3583,7 +3638,7 @@ static void ixgbevf_remove(struct pci_dev *pdev)
3583 ixgbevf_clear_interrupt_scheme(adapter); 3638 ixgbevf_clear_interrupt_scheme(adapter);
3584 ixgbevf_reset_interrupt_capability(adapter); 3639 ixgbevf_reset_interrupt_capability(adapter);
3585 3640
3586 iounmap(adapter->hw.hw_addr); 3641 iounmap(adapter->io_addr);
3587 pci_release_regions(pdev); 3642 pci_release_regions(pdev);
3588 3643
3589 hw_dbg(&adapter->hw, "Remove complete\n"); 3644 hw_dbg(&adapter->hw, "Remove complete\n");
diff --git a/drivers/net/ethernet/intel/ixgbevf/regs.h b/drivers/net/ethernet/intel/ixgbevf/regs.h
index debd8c0e1f28..09dd8f698bea 100644
--- a/drivers/net/ethernet/intel/ixgbevf/regs.h
+++ b/drivers/net/ethernet/intel/ixgbevf/regs.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2012 Intel Corporation. 4 Copyright(c) 1999 - 2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -70,16 +70,6 @@
70#define IXGBE_VFGOTC_MSB 0x02024 70#define IXGBE_VFGOTC_MSB 0x02024
71#define IXGBE_VFMPRC 0x01034 71#define IXGBE_VFMPRC 0x01034
72 72
73#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
74
75#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg))
76
77#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) ( \
78 writel((value), ((a)->hw_addr + (reg) + ((offset) << 2))))
79
80#define IXGBE_READ_REG_ARRAY(a, reg, offset) ( \
81 readl((a)->hw_addr + (reg) + ((offset) << 2)))
82
83#define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS)) 73#define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS))
84 74
85#endif /* _IXGBEVF_REGS_H_ */ 75#endif /* _IXGBEVF_REGS_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index 7b1f502d1716..096d33a59def 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2012 Intel Corporation. 4 Copyright(c) 1999 - 2014 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -172,6 +172,37 @@ struct ixgbevf_info {
172 const struct ixgbe_mac_operations *mac_ops; 172 const struct ixgbe_mac_operations *mac_ops;
173}; 173};
174 174
175#define IXGBE_FAILED_READ_REG 0xffffffffU
176
177#define IXGBE_REMOVED(a) unlikely(!(a))
178
179static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value)
180{
181 u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
182
183 if (IXGBE_REMOVED(reg_addr))
184 return;
185 writel(value, reg_addr + reg);
186}
187#define IXGBE_WRITE_REG(h, r, v) ixgbe_write_reg(h, r, v)
188
189u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg);
190#define IXGBE_READ_REG(h, r) ixgbe_read_reg(h, r)
191
192static inline void ixgbe_write_reg_array(struct ixgbe_hw *hw, u32 reg,
193 u32 offset, u32 value)
194{
195 ixgbe_write_reg(hw, reg + (offset << 2), value);
196}
197#define IXGBE_WRITE_REG_ARRAY(h, r, o, v) ixgbe_write_reg_array(h, r, o, v)
198
199static inline u32 ixgbe_read_reg_array(struct ixgbe_hw *hw, u32 reg,
200 u32 offset)
201{
202 return ixgbe_read_reg(hw, reg + (offset << 2));
203}
204#define IXGBE_READ_REG_ARRAY(h, r, o) ixgbe_read_reg_array(h, r, o)
205
175void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size); 206void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
176int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api); 207int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
177int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, 208int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index f5685c0d0579..14ff8d64257d 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -2059,7 +2059,7 @@ jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb)
2059 if (unlikely(skb_shinfo(skb)->gso_size && 2059 if (unlikely(skb_shinfo(skb)->gso_size &&
2060 skb_header_cloned(skb) && 2060 skb_header_cloned(skb) &&
2061 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) { 2061 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) {
2062 dev_kfree_skb(skb); 2062 dev_kfree_skb_any(skb);
2063 return -1; 2063 return -1;
2064 } 2064 }
2065 2065
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index a2565ce22b7c..b7b8d74c22d9 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -730,7 +730,7 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
730 unlikely(tag_bytes & ~12)) { 730 unlikely(tag_bytes & ~12)) {
731 if (skb_checksum_help(skb) == 0) 731 if (skb_checksum_help(skb) == 0)
732 goto no_csum; 732 goto no_csum;
733 kfree_skb(skb); 733 dev_kfree_skb_any(skb);
734 return 1; 734 return 1;
735 } 735 }
736 736
@@ -819,7 +819,7 @@ static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
819 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) { 819 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
820 if (net_ratelimit()) 820 if (net_ratelimit())
821 netdev_err(dev, "tx queue full?!\n"); 821 netdev_err(dev, "tx queue full?!\n");
822 kfree_skb(skb); 822 dev_kfree_skb_any(skb);
823 return NETDEV_TX_OK; 823 return NETDEV_TX_OK;
824 } 824 }
825 825
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index f418f4f20f94..f3afcbdbb725 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -508,12 +508,12 @@ struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
508 508
509 cpu_stats = per_cpu_ptr(pp->stats, cpu); 509 cpu_stats = per_cpu_ptr(pp->stats, cpu);
510 do { 510 do {
511 start = u64_stats_fetch_begin_bh(&cpu_stats->syncp); 511 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
512 rx_packets = cpu_stats->rx_packets; 512 rx_packets = cpu_stats->rx_packets;
513 rx_bytes = cpu_stats->rx_bytes; 513 rx_bytes = cpu_stats->rx_bytes;
514 tx_packets = cpu_stats->tx_packets; 514 tx_packets = cpu_stats->tx_packets;
515 tx_bytes = cpu_stats->tx_bytes; 515 tx_bytes = cpu_stats->tx_bytes;
516 } while (u64_stats_fetch_retry_bh(&cpu_stats->syncp, start)); 516 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
517 517
518 stats->rx_packets += rx_packets; 518 stats->rx_packets += rx_packets;
519 stats->rx_bytes += rx_bytes; 519 stats->rx_bytes += rx_bytes;
@@ -2784,7 +2784,6 @@ static int mvneta_probe(struct platform_device *pdev)
2784 const char *mac_from; 2784 const char *mac_from;
2785 int phy_mode; 2785 int phy_mode;
2786 int err; 2786 int err;
2787 int cpu;
2788 2787
2789 /* Our multiqueue support is not complete, so for now, only 2788 /* Our multiqueue support is not complete, so for now, only
2790 * allow the usage of the first RX queue 2789 * allow the usage of the first RX queue
@@ -2845,18 +2844,12 @@ static int mvneta_probe(struct platform_device *pdev)
2845 } 2844 }
2846 2845
2847 /* Alloc per-cpu stats */ 2846 /* Alloc per-cpu stats */
2848 pp->stats = alloc_percpu(struct mvneta_pcpu_stats); 2847 pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
2849 if (!pp->stats) { 2848 if (!pp->stats) {
2850 err = -ENOMEM; 2849 err = -ENOMEM;
2851 goto err_unmap; 2850 goto err_unmap;
2852 } 2851 }
2853 2852
2854 for_each_possible_cpu(cpu) {
2855 struct mvneta_pcpu_stats *stats;
2856 stats = per_cpu_ptr(pp->stats, cpu);
2857 u64_stats_init(&stats->syncp);
2858 }
2859
2860 dt_mac_addr = of_get_mac_address(dn); 2853 dt_mac_addr = of_get_mac_address(dn);
2861 if (dt_mac_addr) { 2854 if (dt_mac_addr) {
2862 mac_from = "device tree"; 2855 mac_from = "device tree";
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 597846193869..7f81ae66cc89 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -2845,7 +2845,7 @@ mapping_unwind:
2845mapping_error: 2845mapping_error:
2846 if (net_ratelimit()) 2846 if (net_ratelimit())
2847 dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name); 2847 dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
2848 dev_kfree_skb(skb); 2848 dev_kfree_skb_any(skb);
2849 return NETDEV_TX_OK; 2849 return NETDEV_TX_OK;
2850} 2850}
2851 2851
@@ -3172,7 +3172,7 @@ static void skge_tx_done(struct net_device *dev)
3172 pkts_compl++; 3172 pkts_compl++;
3173 bytes_compl += e->skb->len; 3173 bytes_compl += e->skb->len;
3174 3174
3175 dev_kfree_skb(e->skb); 3175 dev_consume_skb_any(e->skb);
3176 } 3176 }
3177 } 3177 }
3178 netdev_completed_queue(dev, pkts_compl, bytes_compl); 3178 netdev_completed_queue(dev, pkts_compl, bytes_compl);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 55a37ae11440..b81106451a0a 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -44,6 +44,8 @@
44#include <linux/prefetch.h> 44#include <linux/prefetch.h>
45#include <linux/debugfs.h> 45#include <linux/debugfs.h>
46#include <linux/mii.h> 46#include <linux/mii.h>
47#include <linux/of_device.h>
48#include <linux/of_net.h>
47 49
48#include <asm/irq.h> 50#include <asm/irq.h>
49 51
@@ -2000,7 +2002,7 @@ mapping_unwind:
2000mapping_error: 2002mapping_error:
2001 if (net_ratelimit()) 2003 if (net_ratelimit())
2002 dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name); 2004 dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
2003 dev_kfree_skb(skb); 2005 dev_kfree_skb_any(skb);
2004 return NETDEV_TX_OK; 2006 return NETDEV_TX_OK;
2005} 2007}
2006 2008
@@ -2733,6 +2735,9 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
2733 unsigned int total_bytes[2] = { 0 }; 2735 unsigned int total_bytes[2] = { 0 };
2734 unsigned int total_packets[2] = { 0 }; 2736 unsigned int total_packets[2] = { 0 };
2735 2737
2738 if (to_do <= 0)
2739 return work_done;
2740
2736 rmb(); 2741 rmb();
2737 do { 2742 do {
2738 struct sky2_port *sky2; 2743 struct sky2_port *sky2;
@@ -3906,19 +3911,19 @@ static struct rtnl_link_stats64 *sky2_get_stats(struct net_device *dev,
3906 u64 _bytes, _packets; 3911 u64 _bytes, _packets;
3907 3912
3908 do { 3913 do {
3909 start = u64_stats_fetch_begin_bh(&sky2->rx_stats.syncp); 3914 start = u64_stats_fetch_begin_irq(&sky2->rx_stats.syncp);
3910 _bytes = sky2->rx_stats.bytes; 3915 _bytes = sky2->rx_stats.bytes;
3911 _packets = sky2->rx_stats.packets; 3916 _packets = sky2->rx_stats.packets;
3912 } while (u64_stats_fetch_retry_bh(&sky2->rx_stats.syncp, start)); 3917 } while (u64_stats_fetch_retry_irq(&sky2->rx_stats.syncp, start));
3913 3918
3914 stats->rx_packets = _packets; 3919 stats->rx_packets = _packets;
3915 stats->rx_bytes = _bytes; 3920 stats->rx_bytes = _bytes;
3916 3921
3917 do { 3922 do {
3918 start = u64_stats_fetch_begin_bh(&sky2->tx_stats.syncp); 3923 start = u64_stats_fetch_begin_irq(&sky2->tx_stats.syncp);
3919 _bytes = sky2->tx_stats.bytes; 3924 _bytes = sky2->tx_stats.bytes;
3920 _packets = sky2->tx_stats.packets; 3925 _packets = sky2->tx_stats.packets;
3921 } while (u64_stats_fetch_retry_bh(&sky2->tx_stats.syncp, start)); 3926 } while (u64_stats_fetch_retry_irq(&sky2->tx_stats.syncp, start));
3922 3927
3923 stats->tx_packets = _packets; 3928 stats->tx_packets = _packets;
3924 stats->tx_bytes = _bytes; 3929 stats->tx_bytes = _bytes;
@@ -4748,6 +4753,7 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
4748{ 4753{
4749 struct sky2_port *sky2; 4754 struct sky2_port *sky2;
4750 struct net_device *dev = alloc_etherdev(sizeof(*sky2)); 4755 struct net_device *dev = alloc_etherdev(sizeof(*sky2));
4756 const void *iap;
4751 4757
4752 if (!dev) 4758 if (!dev)
4753 return NULL; 4759 return NULL;
@@ -4805,8 +4811,16 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
4805 4811
4806 dev->features |= dev->hw_features; 4812 dev->features |= dev->hw_features;
4807 4813
4808 /* read the mac address */ 4814 /* try to get mac address in the following order:
4809 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN); 4815 * 1) from device tree data
4816 * 2) from internal registers set by bootloader
4817 */
4818 iap = of_get_mac_address(hw->pdev->dev.of_node);
4819 if (iap)
4820 memcpy(dev->dev_addr, iap, ETH_ALEN);
4821 else
4822 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8,
4823 ETH_ALEN);
4810 4824
4811 return dev; 4825 return dev;
4812} 4826}
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
index 563495d8975a..1a6e1887a171 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4 4
5config MLX4_EN 5config MLX4_EN
6 tristate "Mellanox Technologies 10Gbit Ethernet support" 6 tristate "Mellanox Technologies 1/10/40Gbit Ethernet support"
7 depends on PCI 7 depends on PCI
8 select MLX4_CORE 8 select MLX4_CORE
9 select PTP_1588_CLOCK 9 select PTP_1588_CLOCK
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 0d02fba94536..516c1dd4963b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1643,8 +1643,16 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
1643 int port, err; 1643 int port, err;
1644 struct mlx4_vport_state *vp_admin; 1644 struct mlx4_vport_state *vp_admin;
1645 struct mlx4_vport_oper_state *vp_oper; 1645 struct mlx4_vport_oper_state *vp_oper;
1646 1646 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
1647 for (port = 1; port <= MLX4_MAX_PORTS; port++) { 1647 &priv->dev, slave);
1648 int min_port = find_first_bit(actv_ports.ports,
1649 priv->dev.caps.num_ports) + 1;
1650 int max_port = min_port - 1 +
1651 bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
1652
1653 for (port = min_port; port <= max_port; port++) {
1654 if (!test_bit(port - 1, actv_ports.ports))
1655 continue;
1648 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; 1656 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1649 vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; 1657 vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
1650 vp_oper->state = *vp_admin; 1658 vp_oper->state = *vp_admin;
@@ -1685,8 +1693,17 @@ static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave
1685{ 1693{
1686 int port; 1694 int port;
1687 struct mlx4_vport_oper_state *vp_oper; 1695 struct mlx4_vport_oper_state *vp_oper;
1696 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
1697 &priv->dev, slave);
1698 int min_port = find_first_bit(actv_ports.ports,
1699 priv->dev.caps.num_ports) + 1;
1700 int max_port = min_port - 1 +
1701 bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
1702
1688 1703
1689 for (port = 1; port <= MLX4_MAX_PORTS; port++) { 1704 for (port = min_port; port <= max_port; port++) {
1705 if (!test_bit(port - 1, actv_ports.ports))
1706 continue;
1690 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; 1707 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1691 if (NO_INDX != vp_oper->vlan_idx) { 1708 if (NO_INDX != vp_oper->vlan_idx) {
1692 __mlx4_unregister_vlan(&priv->dev, 1709 __mlx4_unregister_vlan(&priv->dev,
@@ -2234,6 +2251,112 @@ static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
2234 return vf+1; 2251 return vf+1;
2235} 2252}
2236 2253
2254int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave)
2255{
2256 if (slave < 1 || slave > dev->num_vfs) {
2257 mlx4_err(dev,
2258 "Bad slave number:%d (number of activated slaves: %lu)\n",
2259 slave, dev->num_slaves);
2260 return -EINVAL;
2261 }
2262 return slave - 1;
2263}
2264
2265struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave)
2266{
2267 struct mlx4_active_ports actv_ports;
2268 int vf;
2269
2270 bitmap_zero(actv_ports.ports, MLX4_MAX_PORTS);
2271
2272 if (slave == 0) {
2273 bitmap_fill(actv_ports.ports, dev->caps.num_ports);
2274 return actv_ports;
2275 }
2276
2277 vf = mlx4_get_vf_indx(dev, slave);
2278 if (vf < 0)
2279 return actv_ports;
2280
2281 bitmap_set(actv_ports.ports, dev->dev_vfs[vf].min_port - 1,
2282 min((int)dev->dev_vfs[mlx4_get_vf_indx(dev, slave)].n_ports,
2283 dev->caps.num_ports));
2284
2285 return actv_ports;
2286}
2287EXPORT_SYMBOL_GPL(mlx4_get_active_ports);
2288
2289int mlx4_slave_convert_port(struct mlx4_dev *dev, int slave, int port)
2290{
2291 unsigned n;
2292 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2293 unsigned m = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2294
2295 if (port <= 0 || port > m)
2296 return -EINVAL;
2297
2298 n = find_first_bit(actv_ports.ports, dev->caps.num_ports);
2299 if (port <= n)
2300 port = n + 1;
2301
2302 return port;
2303}
2304EXPORT_SYMBOL_GPL(mlx4_slave_convert_port);
2305
2306int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port)
2307{
2308 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2309 if (test_bit(port - 1, actv_ports.ports))
2310 return port -
2311 find_first_bit(actv_ports.ports, dev->caps.num_ports);
2312
2313 return -1;
2314}
2315EXPORT_SYMBOL_GPL(mlx4_phys_to_slave_port);
2316
2317struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev,
2318 int port)
2319{
2320 unsigned i;
2321 struct mlx4_slaves_pport slaves_pport;
2322
2323 bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
2324
2325 if (port <= 0 || port > dev->caps.num_ports)
2326 return slaves_pport;
2327
2328 for (i = 0; i < dev->num_vfs + 1; i++) {
2329 struct mlx4_active_ports actv_ports =
2330 mlx4_get_active_ports(dev, i);
2331 if (test_bit(port - 1, actv_ports.ports))
2332 set_bit(i, slaves_pport.slaves);
2333 }
2334
2335 return slaves_pport;
2336}
2337EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport);
2338
2339struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
2340 struct mlx4_dev *dev,
2341 const struct mlx4_active_ports *crit_ports)
2342{
2343 unsigned i;
2344 struct mlx4_slaves_pport slaves_pport;
2345
2346 bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
2347
2348 for (i = 0; i < dev->num_vfs + 1; i++) {
2349 struct mlx4_active_ports actv_ports =
2350 mlx4_get_active_ports(dev, i);
2351 if (bitmap_equal(crit_ports->ports, actv_ports.ports,
2352 dev->caps.num_ports))
2353 set_bit(i, slaves_pport.slaves);
2354 }
2355
2356 return slaves_pport;
2357}
2358EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv);
2359
2237int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) 2360int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
2238{ 2361{
2239 struct mlx4_priv *priv = mlx4_priv(dev); 2362 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -2289,6 +2412,30 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
2289} 2412}
2290EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan); 2413EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan);
2291 2414
2415 /* mlx4_get_slave_default_vlan -
2416 * return true if VST ( default vlan)
2417 * if VST, will return vlan & qos (if not NULL)
2418 */
2419bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave,
2420 u16 *vlan, u8 *qos)
2421{
2422 struct mlx4_vport_oper_state *vp_oper;
2423 struct mlx4_priv *priv;
2424
2425 priv = mlx4_priv(dev);
2426 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
2427
2428 if (MLX4_VGT != vp_oper->state.default_vlan) {
2429 if (vlan)
2430 *vlan = vp_oper->state.default_vlan;
2431 if (qos)
2432 *qos = vp_oper->state.default_qos;
2433 return true;
2434 }
2435 return false;
2436}
2437EXPORT_SYMBOL_GPL(mlx4_get_slave_default_vlan);
2438
2292int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting) 2439int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
2293{ 2440{
2294 struct mlx4_priv *priv = mlx4_priv(dev); 2441 struct mlx4_priv *priv = mlx4_priv(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index abaf6bb22416..57dda95b67d8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -276,6 +276,7 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
276 .n_alarm = 0, 276 .n_alarm = 0,
277 .n_ext_ts = 0, 277 .n_ext_ts = 0,
278 .n_per_out = 0, 278 .n_per_out = 0,
279 .n_pins = 0,
279 .pps = 0, 280 .pps = 0,
280 .adjfreq = mlx4_en_phc_adjfreq, 281 .adjfreq = mlx4_en_phc_adjfreq,
281 .adjtime = mlx4_en_phc_adjtime, 282 .adjtime = mlx4_en_phc_adjtime,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index b4881b686159..c95ca252187c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -62,7 +62,7 @@ static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets)
62 int has_ets_tc = 0; 62 int has_ets_tc = 0;
63 63
64 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { 64 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
65 if (ets->prio_tc[i] > MLX4_EN_NUM_UP) { 65 if (ets->prio_tc[i] >= MLX4_EN_NUM_UP) {
66 en_err(priv, "Bad priority in UP <=> TC mapping. TC: %d, UP: %d\n", 66 en_err(priv, "Bad priority in UP <=> TC mapping. TC: %d, UP: %d\n",
67 i, ets->prio_tc[i]); 67 i, ets->prio_tc[i]);
68 return -EINVAL; 68 return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index d357bf5a4686..0c59d4fe7e3a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -72,6 +72,12 @@ MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
72MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]." 72MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
73 " Per priority bit mask"); 73 " Per priority bit mask");
74 74
75MLX4_EN_PARM_INT(inline_thold, MAX_INLINE,
76 "Threshold for using inline data (range: 17-104, default: 104)");
77
78#define MAX_PFC_TX 0xff
79#define MAX_PFC_RX 0xff
80
75int en_print(const char *level, const struct mlx4_en_priv *priv, 81int en_print(const char *level, const struct mlx4_en_priv *priv,
76 const char *format, ...) 82 const char *format, ...)
77{ 83{
@@ -140,6 +146,7 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
140 params->prof[i].tx_ring_num = params->num_tx_rings_p_up * 146 params->prof[i].tx_ring_num = params->num_tx_rings_p_up *
141 MLX4_EN_NUM_UP; 147 MLX4_EN_NUM_UP;
142 params->prof[i].rss_rings = 0; 148 params->prof[i].rss_rings = 0;
149 params->prof[i].inline_thold = inline_thold;
143 } 150 }
144 151
145 return 0; 152 return 0;
@@ -274,19 +281,8 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
274 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) 281 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
275 mlx4_en_init_timestamp(mdev); 282 mlx4_en_init_timestamp(mdev);
276 283
277 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { 284 /* Set default number of RX rings*/
278 if (!dev->caps.comp_pool) { 285 mlx4_en_set_num_rx_rings(mdev);
279 mdev->profile.prof[i].rx_ring_num =
280 rounddown_pow_of_two(max_t(int, MIN_RX_RINGS,
281 min_t(int,
282 dev->caps.num_comp_vectors,
283 DEF_RX_RINGS)));
284 } else {
285 mdev->profile.prof[i].rx_ring_num = rounddown_pow_of_two(
286 min_t(int, dev->caps.comp_pool/
287 dev->caps.num_ports - 1 , MAX_MSIX_P_PORT - 1));
288 }
289 }
290 286
291 /* Create our own workqueue for reset/multicast tasks 287 /* Create our own workqueue for reset/multicast tasks
292 * Note: we cannot use the shared workqueue because of deadlocks caused 288 * Note: we cannot use the shared workqueue because of deadlocks caused
@@ -336,8 +332,31 @@ static struct mlx4_interface mlx4_en_interface = {
336 .protocol = MLX4_PROT_ETH, 332 .protocol = MLX4_PROT_ETH,
337}; 333};
338 334
335static void mlx4_en_verify_params(void)
336{
337 if (pfctx > MAX_PFC_TX) {
338 pr_warn("mlx4_en: WARNING: illegal module parameter pfctx 0x%x - should be in range 0-0x%x, will be changed to default (0)\n",
339 pfctx, MAX_PFC_TX);
340 pfctx = 0;
341 }
342
343 if (pfcrx > MAX_PFC_RX) {
344 pr_warn("mlx4_en: WARNING: illegal module parameter pfcrx 0x%x - should be in range 0-0x%x, will be changed to default (0)\n",
345 pfcrx, MAX_PFC_RX);
346 pfcrx = 0;
347 }
348
349 if (inline_thold < MIN_PKT_LEN || inline_thold > MAX_INLINE) {
350 pr_warn("mlx4_en: WARNING: illegal module parameter inline_thold %d - should be in range %d-%d, will be changed to default (%d)\n",
351 inline_thold, MIN_PKT_LEN, MAX_INLINE, MAX_INLINE);
352 inline_thold = MAX_INLINE;
353 }
354}
355
339static int __init mlx4_en_init(void) 356static int __init mlx4_en_init(void)
340{ 357{
358 mlx4_en_verify_params();
359
341 return mlx4_register_interface(&mlx4_en_interface); 360 return mlx4_register_interface(&mlx4_en_interface);
342} 361}
343 362
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 84a96f70dfb5..fa5ee719e04b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -603,7 +603,7 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
603 int err = 0; 603 int err = 0;
604 u64 reg_id; 604 u64 reg_id;
605 int *qpn = &priv->base_qpn; 605 int *qpn = &priv->base_qpn;
606 u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr); 606 u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
607 607
608 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n", 608 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
609 priv->dev->dev_addr); 609 priv->dev->dev_addr);
@@ -672,7 +672,7 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
672 u64 mac; 672 u64 mac;
673 673
674 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { 674 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
675 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr); 675 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
676 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", 676 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
677 priv->dev->dev_addr); 677 priv->dev->dev_addr);
678 mlx4_unregister_mac(dev, priv->port, mac); 678 mlx4_unregister_mac(dev, priv->port, mac);
@@ -685,7 +685,7 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
685 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) { 685 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
686 bucket = &priv->mac_hash[i]; 686 bucket = &priv->mac_hash[i];
687 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { 687 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
688 mac = mlx4_en_mac_to_u64(entry->mac); 688 mac = mlx4_mac_to_u64(entry->mac);
689 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", 689 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
690 entry->mac); 690 entry->mac);
691 mlx4_en_uc_steer_release(priv, entry->mac, 691 mlx4_en_uc_steer_release(priv, entry->mac,
@@ -715,14 +715,14 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
715 struct mlx4_en_dev *mdev = priv->mdev; 715 struct mlx4_en_dev *mdev = priv->mdev;
716 struct mlx4_dev *dev = mdev->dev; 716 struct mlx4_dev *dev = mdev->dev;
717 int err = 0; 717 int err = 0;
718 u64 new_mac_u64 = mlx4_en_mac_to_u64(new_mac); 718 u64 new_mac_u64 = mlx4_mac_to_u64(new_mac);
719 719
720 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) { 720 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
721 struct hlist_head *bucket; 721 struct hlist_head *bucket;
722 unsigned int mac_hash; 722 unsigned int mac_hash;
723 struct mlx4_mac_entry *entry; 723 struct mlx4_mac_entry *entry;
724 struct hlist_node *tmp; 724 struct hlist_node *tmp;
725 u64 prev_mac_u64 = mlx4_en_mac_to_u64(prev_mac); 725 u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac);
726 726
727 bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]]; 727 bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
728 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { 728 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
@@ -759,18 +759,6 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
759 return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64); 759 return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
760} 760}
761 761
762u64 mlx4_en_mac_to_u64(u8 *addr)
763{
764 u64 mac = 0;
765 int i;
766
767 for (i = 0; i < ETH_ALEN; i++) {
768 mac <<= 8;
769 mac |= addr[i];
770 }
771 return mac;
772}
773
774static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv) 762static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv)
775{ 763{
776 int err = 0; 764 int err = 0;
@@ -1089,7 +1077,7 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
1089 mlx4_en_cache_mclist(dev); 1077 mlx4_en_cache_mclist(dev);
1090 netif_addr_unlock_bh(dev); 1078 netif_addr_unlock_bh(dev);
1091 list_for_each_entry(mclist, &priv->mc_list, list) { 1079 list_for_each_entry(mclist, &priv->mc_list, list) {
1092 mcast_addr = mlx4_en_mac_to_u64(mclist->addr); 1080 mcast_addr = mlx4_mac_to_u64(mclist->addr);
1093 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 1081 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
1094 mcast_addr, 0, MLX4_MCAST_CONFIG); 1082 mcast_addr, 0, MLX4_MCAST_CONFIG);
1095 } 1083 }
@@ -1181,7 +1169,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
1181 found = true; 1169 found = true;
1182 1170
1183 if (!found) { 1171 if (!found) {
1184 mac = mlx4_en_mac_to_u64(entry->mac); 1172 mac = mlx4_mac_to_u64(entry->mac);
1185 mlx4_en_uc_steer_release(priv, entry->mac, 1173 mlx4_en_uc_steer_release(priv, entry->mac,
1186 priv->base_qpn, 1174 priv->base_qpn,
1187 entry->reg_id); 1175 entry->reg_id);
@@ -1224,7 +1212,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
1224 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC; 1212 priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
1225 break; 1213 break;
1226 } 1214 }
1227 mac = mlx4_en_mac_to_u64(ha->addr); 1215 mac = mlx4_mac_to_u64(ha->addr);
1228 memcpy(entry->mac, ha->addr, ETH_ALEN); 1216 memcpy(entry->mac, ha->addr, ETH_ALEN);
1229 err = mlx4_register_mac(mdev->dev, priv->port, mac); 1217 err = mlx4_register_mac(mdev->dev, priv->port, mac);
1230 if (err < 0) { 1218 if (err < 0) {
@@ -2216,7 +2204,7 @@ static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
2216{ 2204{
2217 struct mlx4_en_priv *en_priv = netdev_priv(dev); 2205 struct mlx4_en_priv *en_priv = netdev_priv(dev);
2218 struct mlx4_en_dev *mdev = en_priv->mdev; 2206 struct mlx4_en_dev *mdev = en_priv->mdev;
2219 u64 mac_u64 = mlx4_en_mac_to_u64(mac); 2207 u64 mac_u64 = mlx4_mac_to_u64(mac);
2220 2208
2221 if (!is_valid_ether_addr(mac)) 2209 if (!is_valid_ether_addr(mac))
2222 return -EINVAL; 2210 return -EINVAL;
@@ -2351,7 +2339,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2351 netif_set_real_num_rx_queues(dev, prof->rx_ring_num); 2339 netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
2352 2340
2353 SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev); 2341 SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
2354 dev->dev_id = port - 1; 2342 dev->dev_port = port - 1;
2355 2343
2356 /* 2344 /*
2357 * Initialize driver private data 2345 * Initialize driver private data
@@ -2417,7 +2405,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2417 if (mlx4_is_slave(priv->mdev->dev)) { 2405 if (mlx4_is_slave(priv->mdev->dev)) {
2418 eth_hw_addr_random(dev); 2406 eth_hw_addr_random(dev);
2419 en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr); 2407 en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
2420 mac_u64 = mlx4_en_mac_to_u64(dev->dev_addr); 2408 mac_u64 = mlx4_mac_to_u64(dev->dev_addr);
2421 mdev->dev->caps.def_mac[priv->port] = mac_u64; 2409 mdev->dev->caps.def_mac[priv->port] = mac_u64;
2422 } else { 2410 } else {
2423 en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n", 2411 en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index dae1a1f4ae55..c2cfb05e7290 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -148,10 +148,16 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
148 stats->tx_packets = 0; 148 stats->tx_packets = 0;
149 stats->tx_bytes = 0; 149 stats->tx_bytes = 0;
150 priv->port_stats.tx_chksum_offload = 0; 150 priv->port_stats.tx_chksum_offload = 0;
151 priv->port_stats.queue_stopped = 0;
152 priv->port_stats.wake_queue = 0;
153
151 for (i = 0; i < priv->tx_ring_num; i++) { 154 for (i = 0; i < priv->tx_ring_num; i++) {
152 stats->tx_packets += priv->tx_ring[i]->packets; 155 stats->tx_packets += priv->tx_ring[i]->packets;
153 stats->tx_bytes += priv->tx_ring[i]->bytes; 156 stats->tx_bytes += priv->tx_ring[i]->bytes;
154 priv->port_stats.tx_chksum_offload += priv->tx_ring[i]->tx_csum; 157 priv->port_stats.tx_chksum_offload += priv->tx_ring[i]->tx_csum;
158 priv->port_stats.queue_stopped +=
159 priv->tx_ring[i]->queue_stopped;
160 priv->port_stats.wake_queue += priv->tx_ring[i]->wake_queue;
155 } 161 }
156 162
157 stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) + 163 stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 890922c1c8ee..ba049ae88749 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -318,6 +318,31 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
318 } 318 }
319} 319}
320 320
321void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
322{
323 int i;
324 int num_of_eqs;
325 int num_rx_rings;
326 struct mlx4_dev *dev = mdev->dev;
327
328 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
329 if (!dev->caps.comp_pool)
330 num_of_eqs = max_t(int, MIN_RX_RINGS,
331 min_t(int,
332 dev->caps.num_comp_vectors,
333 DEF_RX_RINGS));
334 else
335 num_of_eqs = min_t(int, MAX_MSIX_P_PORT,
336 dev->caps.comp_pool/
337 dev->caps.num_ports) - 1;
338
339 num_rx_rings = min_t(int, num_of_eqs,
340 netif_get_num_default_rss_queues());
341 mdev->profile.prof[i].rx_ring_num =
342 rounddown_pow_of_two(num_rx_rings);
343 }
344}
345
321int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, 346int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
322 struct mlx4_en_rx_ring **pring, 347 struct mlx4_en_rx_ring **pring,
323 u32 size, u16 stride, int node) 348 u32 size, u16 stride, int node)
@@ -636,6 +661,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
636 if (!priv->port_up) 661 if (!priv->port_up)
637 return 0; 662 return 0;
638 663
664 if (budget <= 0)
665 return polled;
666
639 /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx 667 /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
640 * descriptor offset can be deduced from the CQE index instead of 668 * descriptor offset can be deduced from the CQE index instead of
641 * reading 'cqe->index' */ 669 * reading 'cqe->index' */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index c11d063473e5..03e5f6ac67e7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -129,8 +129,10 @@ static int mlx4_en_test_speed(struct mlx4_en_priv *priv)
129 if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) 129 if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
130 return -ENOMEM; 130 return -ENOMEM;
131 131
132 /* The device currently only supports 10G speed */ 132 /* The device supports 1G, 10G and 40G speeds */
133 if (priv->port_state.link_speed != SPEED_10000) 133 if (priv->port_state.link_speed != 1000 &&
134 priv->port_state.link_speed != 10000 &&
135 priv->port_state.link_speed != 40000)
134 return priv->port_state.link_speed; 136 return priv->port_state.link_speed;
135 return 0; 137 return 0;
136} 138}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 13457032d15f..dd1f6d346459 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -44,16 +44,6 @@
44 44
45#include "mlx4_en.h" 45#include "mlx4_en.h"
46 46
47enum {
48 MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
49 MAX_BF = 256,
50};
51
52static int inline_thold __read_mostly = MAX_INLINE;
53
54module_param_named(inline_thold, inline_thold, int, 0444);
55MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
56
57int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, 47int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
58 struct mlx4_en_tx_ring **pring, int qpn, u32 size, 48 struct mlx4_en_tx_ring **pring, int qpn, u32 size,
59 u16 stride, int node, int queue_index) 49 u16 stride, int node, int queue_index)
@@ -75,8 +65,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
75 ring->size = size; 65 ring->size = size;
76 ring->size_mask = size - 1; 66 ring->size_mask = size - 1;
77 ring->stride = stride; 67 ring->stride = stride;
78 68 ring->inline_thold = priv->prof->inline_thold;
79 inline_thold = min(inline_thold, MAX_INLINE);
80 69
81 tmp = size * sizeof(struct mlx4_en_tx_info); 70 tmp = size * sizeof(struct mlx4_en_tx_info);
82 ring->tx_info = vmalloc_node(tmp, node); 71 ring->tx_info = vmalloc_node(tmp, node);
@@ -325,7 +314,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
325 } 314 }
326 } 315 }
327 } 316 }
328 dev_kfree_skb(skb); 317 dev_kfree_skb_any(skb);
329 return tx_info->nr_txbb; 318 return tx_info->nr_txbb;
330} 319}
331 320
@@ -456,7 +445,7 @@ static int mlx4_en_process_tx_cq(struct net_device *dev,
456 */ 445 */
457 if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) { 446 if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) {
458 netif_tx_wake_queue(ring->tx_queue); 447 netif_tx_wake_queue(ring->tx_queue);
459 priv->port_stats.wake_queue++; 448 ring->wake_queue++;
460 } 449 }
461 return done; 450 return done;
462} 451}
@@ -520,7 +509,7 @@ static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
520 return ring->buf + index * TXBB_SIZE; 509 return ring->buf + index * TXBB_SIZE;
521} 510}
522 511
523static int is_inline(struct sk_buff *skb, void **pfrag) 512static int is_inline(int inline_thold, struct sk_buff *skb, void **pfrag)
524{ 513{
525 void *ptr; 514 void *ptr;
526 515
@@ -580,7 +569,7 @@ static int get_real_size(struct sk_buff *skb, struct net_device *dev,
580 } 569 }
581 } else { 570 } else {
582 *lso_header_size = 0; 571 *lso_header_size = 0;
583 if (!is_inline(skb, NULL)) 572 if (!is_inline(priv->prof->inline_thold, skb, NULL))
584 real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE; 573 real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE;
585 else 574 else
586 real_size = inline_size(skb); 575 real_size = inline_size(skb);
@@ -596,7 +585,13 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
596 int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl; 585 int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl;
597 586
598 if (skb->len <= spc) { 587 if (skb->len <= spc) {
599 inl->byte_count = cpu_to_be32(1 << 31 | skb->len); 588 if (likely(skb->len >= MIN_PKT_LEN)) {
589 inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
590 } else {
591 inl->byte_count = cpu_to_be32(1 << 31 | MIN_PKT_LEN);
592 memset(((void *)(inl + 1)) + skb->len, 0,
593 MIN_PKT_LEN - skb->len);
594 }
600 skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb)); 595 skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
601 if (skb_shinfo(skb)->nr_frags) 596 if (skb_shinfo(skb)->nr_frags)
602 memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr, 597 memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr,
@@ -696,7 +691,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
696 ring->size - HEADROOM - MAX_DESC_TXBBS)) { 691 ring->size - HEADROOM - MAX_DESC_TXBBS)) {
697 /* every full Tx ring stops queue */ 692 /* every full Tx ring stops queue */
698 netif_tx_stop_queue(ring->tx_queue); 693 netif_tx_stop_queue(ring->tx_queue);
699 priv->port_stats.queue_stopped++; 694 ring->queue_stopped++;
700 695
701 /* If queue was emptied after the if, and before the 696 /* If queue was emptied after the if, and before the
702 * stop_queue - need to wake the queue, or else it will remain 697 * stop_queue - need to wake the queue, or else it will remain
@@ -709,7 +704,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
709 if (unlikely(((int)(ring->prod - ring->cons)) <= 704 if (unlikely(((int)(ring->prod - ring->cons)) <=
710 ring->size - HEADROOM - MAX_DESC_TXBBS)) { 705 ring->size - HEADROOM - MAX_DESC_TXBBS)) {
711 netif_tx_wake_queue(ring->tx_queue); 706 netif_tx_wake_queue(ring->tx_queue);
712 priv->port_stats.wake_queue++; 707 ring->wake_queue++;
713 } else { 708 } else {
714 return NETDEV_TX_BUSY; 709 return NETDEV_TX_BUSY;
715 } 710 }
@@ -747,11 +742,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
747 tx_info->data_offset = (void *)data - (void *)tx_desc; 742 tx_info->data_offset = (void *)data - (void *)tx_desc;
748 743
749 tx_info->linear = (lso_header_size < skb_headlen(skb) && 744 tx_info->linear = (lso_header_size < skb_headlen(skb) &&
750 !is_inline(skb, NULL)) ? 1 : 0; 745 !is_inline(ring->inline_thold, skb, NULL)) ? 1 : 0;
751 746
752 data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1; 747 data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
753 748
754 if (is_inline(skb, &fragptr)) { 749 if (is_inline(ring->inline_thold, skb, &fragptr)) {
755 tx_info->inl = 1; 750 tx_info->inl = 1;
756 } else { 751 } else {
757 /* Map fragments */ 752 /* Map fragments */
@@ -881,7 +876,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
881 skb_tx_timestamp(skb); 876 skb_tx_timestamp(skb);
882 877
883 if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tx_tag_present(skb)) { 878 if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tx_tag_present(skb)) {
884 *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn); 879 tx_desc->ctrl.bf_qpn |= cpu_to_be32(ring->doorbell_qpn);
880
885 op_own |= htonl((bf_index & 0xffff) << 8); 881 op_own |= htonl((bf_index & 0xffff) << 8);
886 /* Ensure new descirptor hits memory 882 /* Ensure new descirptor hits memory
887 * before setting ownership of this descriptor to HW */ 883 * before setting ownership of this descriptor to HW */
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 8992b38578d5..d501a2b0fb79 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -271,7 +271,10 @@ enum slave_port_state mlx4_get_slave_port_state(struct mlx4_dev *dev, int slave,
271{ 271{
272 struct mlx4_priv *priv = mlx4_priv(dev); 272 struct mlx4_priv *priv = mlx4_priv(dev);
273 struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state; 273 struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state;
274 if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS) { 274 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
275
276 if (slave >= dev->num_slaves || port > dev->caps.num_ports ||
277 port <= 0 || !test_bit(port - 1, actv_ports.ports)) {
275 pr_err("%s: Error: asking for slave:%d, port:%d\n", 278 pr_err("%s: Error: asking for slave:%d, port:%d\n",
276 __func__, slave, port); 279 __func__, slave, port);
277 return SLAVE_PORT_DOWN; 280 return SLAVE_PORT_DOWN;
@@ -285,8 +288,10 @@ static int mlx4_set_slave_port_state(struct mlx4_dev *dev, int slave, u8 port,
285{ 288{
286 struct mlx4_priv *priv = mlx4_priv(dev); 289 struct mlx4_priv *priv = mlx4_priv(dev);
287 struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state; 290 struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state;
291 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
288 292
289 if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS || port == 0) { 293 if (slave >= dev->num_slaves || port > dev->caps.num_ports ||
294 port <= 0 || !test_bit(port - 1, actv_ports.ports)) {
290 pr_err("%s: Error: asking for slave:%d, port:%d\n", 295 pr_err("%s: Error: asking for slave:%d, port:%d\n",
291 __func__, slave, port); 296 __func__, slave, port);
292 return -1; 297 return -1;
@@ -300,9 +305,13 @@ static void set_all_slave_state(struct mlx4_dev *dev, u8 port, int event)
300{ 305{
301 int i; 306 int i;
302 enum slave_port_gen_event gen_event; 307 enum slave_port_gen_event gen_event;
308 struct mlx4_slaves_pport slaves_pport = mlx4_phys_to_slaves_pport(dev,
309 port);
303 310
304 for (i = 0; i < dev->num_slaves; i++) 311 for (i = 0; i < dev->num_vfs + 1; i++)
305 set_and_calc_slave_port_state(dev, i, port, event, &gen_event); 312 if (test_bit(i, slaves_pport.slaves))
313 set_and_calc_slave_port_state(dev, i, port,
314 event, &gen_event);
306} 315}
307/************************************************************************** 316/**************************************************************************
308 The function get as input the new event to that port, 317 The function get as input the new event to that port,
@@ -321,12 +330,14 @@ int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave,
321 struct mlx4_slave_state *ctx = NULL; 330 struct mlx4_slave_state *ctx = NULL;
322 unsigned long flags; 331 unsigned long flags;
323 int ret = -1; 332 int ret = -1;
333 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
324 enum slave_port_state cur_state = 334 enum slave_port_state cur_state =
325 mlx4_get_slave_port_state(dev, slave, port); 335 mlx4_get_slave_port_state(dev, slave, port);
326 336
327 *gen_event = SLAVE_PORT_GEN_EVENT_NONE; 337 *gen_event = SLAVE_PORT_GEN_EVENT_NONE;
328 338
329 if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS || port == 0) { 339 if (slave >= dev->num_slaves || port > dev->caps.num_ports ||
340 port <= 0 || !test_bit(port - 1, actv_ports.ports)) {
330 pr_err("%s: Error: asking for slave:%d, port:%d\n", 341 pr_err("%s: Error: asking for slave:%d, port:%d\n",
331 __func__, slave, port); 342 __func__, slave, port);
332 return ret; 343 return ret;
@@ -542,15 +553,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
542 be64_to_cpu(eqe->event.cmd.out_param)); 553 be64_to_cpu(eqe->event.cmd.out_param));
543 break; 554 break;
544 555
545 case MLX4_EVENT_TYPE_PORT_CHANGE: 556 case MLX4_EVENT_TYPE_PORT_CHANGE: {
557 struct mlx4_slaves_pport slaves_port;
546 port = be32_to_cpu(eqe->event.port_change.port) >> 28; 558 port = be32_to_cpu(eqe->event.port_change.port) >> 28;
559 slaves_port = mlx4_phys_to_slaves_pport(dev, port);
547 if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) { 560 if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
548 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN, 561 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN,
549 port); 562 port);
550 mlx4_priv(dev)->sense.do_sense_port[port] = 1; 563 mlx4_priv(dev)->sense.do_sense_port[port] = 1;
551 if (!mlx4_is_master(dev)) 564 if (!mlx4_is_master(dev))
552 break; 565 break;
553 for (i = 0; i < dev->num_slaves; i++) { 566 for (i = 0; i < dev->num_vfs + 1; i++) {
567 if (!test_bit(i, slaves_port.slaves))
568 continue;
554 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) { 569 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) {
555 if (i == mlx4_master_func_num(dev)) 570 if (i == mlx4_master_func_num(dev))
556 continue; 571 continue;
@@ -558,8 +573,13 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
558 " to slave: %d, port:%d\n", 573 " to slave: %d, port:%d\n",
559 __func__, i, port); 574 __func__, i, port);
560 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; 575 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
561 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) 576 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
577 eqe->event.port_change.port =
578 cpu_to_be32(
579 (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF)
580 | (mlx4_phys_to_slave_port(dev, i, port) << 28));
562 mlx4_slave_event(dev, i, eqe); 581 mlx4_slave_event(dev, i, eqe);
582 }
563 } else { /* IB port */ 583 } else { /* IB port */
564 set_and_calc_slave_port_state(dev, i, port, 584 set_and_calc_slave_port_state(dev, i, port,
565 MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN, 585 MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
@@ -580,12 +600,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
580 if (!mlx4_is_master(dev)) 600 if (!mlx4_is_master(dev))
581 break; 601 break;
582 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) 602 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
583 for (i = 0; i < dev->num_slaves; i++) { 603 for (i = 0; i < dev->num_vfs + 1; i++) {
604 if (!test_bit(i, slaves_port.slaves))
605 continue;
584 if (i == mlx4_master_func_num(dev)) 606 if (i == mlx4_master_func_num(dev))
585 continue; 607 continue;
586 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; 608 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
587 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) 609 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
610 eqe->event.port_change.port =
611 cpu_to_be32(
612 (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF)
613 | (mlx4_phys_to_slave_port(dev, i, port) << 28));
588 mlx4_slave_event(dev, i, eqe); 614 mlx4_slave_event(dev, i, eqe);
615 }
589 } 616 }
590 else /* IB port */ 617 else /* IB port */
591 /* port-up event will be sent to a slave when the 618 /* port-up event will be sent to a slave when the
@@ -594,6 +621,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
594 set_all_slave_state(dev, port, MLX4_DEV_EVENT_PORT_UP); 621 set_all_slave_state(dev, port, MLX4_DEV_EVENT_PORT_UP);
595 } 622 }
596 break; 623 break;
624 }
597 625
598 case MLX4_EVENT_TYPE_CQ_ERROR: 626 case MLX4_EVENT_TYPE_CQ_ERROR:
599 mlx4_warn(dev, "CQ %s on CQN %06x\n", 627 mlx4_warn(dev, "CQ %s on CQN %06x\n",
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 7e2995ecea6f..6bd33e2fc17c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -225,13 +225,25 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
225#define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80 225#define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
226 226
227 if (vhcr->op_modifier == 1) { 227 if (vhcr->op_modifier == 1) {
228 struct mlx4_active_ports actv_ports =
229 mlx4_get_active_ports(dev, slave);
230 int converted_port = mlx4_slave_convert_port(
231 dev, slave, vhcr->in_modifier);
232
233 if (converted_port < 0)
234 return -EINVAL;
235
236 vhcr->in_modifier = converted_port;
228 /* Set nic_info bit to mark new fields support */ 237 /* Set nic_info bit to mark new fields support */
229 field = QUERY_FUNC_CAP_FLAGS1_NIC_INFO; 238 field = QUERY_FUNC_CAP_FLAGS1_NIC_INFO;
230 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET); 239 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS1_OFFSET);
231 240
232 field = vhcr->in_modifier; /* phys-port = logical-port */ 241 /* phys-port = logical-port */
242 field = vhcr->in_modifier -
243 find_first_bit(actv_ports.ports, dev->caps.num_ports);
233 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET); 244 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
234 245
246 field = vhcr->in_modifier;
235 /* size is now the QP number */ 247 /* size is now the QP number */
236 size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + field - 1; 248 size = dev->phys_caps.base_tunnel_sqpn + 8 * slave + field - 1;
237 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL); 249 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP0_TUNNEL);
@@ -249,12 +261,16 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
249 QUERY_FUNC_CAP_PHYS_PORT_ID); 261 QUERY_FUNC_CAP_PHYS_PORT_ID);
250 262
251 } else if (vhcr->op_modifier == 0) { 263 } else if (vhcr->op_modifier == 0) {
264 struct mlx4_active_ports actv_ports =
265 mlx4_get_active_ports(dev, slave);
252 /* enable rdma and ethernet interfaces, and new quota locations */ 266 /* enable rdma and ethernet interfaces, and new quota locations */
253 field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA | 267 field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA |
254 QUERY_FUNC_CAP_FLAG_QUOTAS); 268 QUERY_FUNC_CAP_FLAG_QUOTAS);
255 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET); 269 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
256 270
257 field = dev->caps.num_ports; 271 field = min(
272 bitmap_weight(actv_ports.ports, dev->caps.num_ports),
273 dev->caps.num_ports);
258 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); 274 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
259 275
260 size = dev->caps.function_caps; /* set PF behaviours */ 276 size = dev->caps.function_caps; /* set PF behaviours */
@@ -840,6 +856,10 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
840 int err = 0; 856 int err = 0;
841 u8 field; 857 u8 field;
842 u32 bmme_flags; 858 u32 bmme_flags;
859 int real_port;
860 int slave_port;
861 int first_port;
862 struct mlx4_active_ports actv_ports;
843 863
844 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, 864 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
845 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 865 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
@@ -852,8 +872,26 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
852 MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 872 MLX4_GET(flags, outbox->buf, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
853 flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV; 873 flags |= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV;
854 flags &= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW; 874 flags &= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW;
875 actv_ports = mlx4_get_active_ports(dev, slave);
876 first_port = find_first_bit(actv_ports.ports, dev->caps.num_ports);
877 for (slave_port = 0, real_port = first_port;
878 real_port < first_port +
879 bitmap_weight(actv_ports.ports, dev->caps.num_ports);
880 ++real_port, ++slave_port) {
881 if (flags & (MLX4_DEV_CAP_FLAG_WOL_PORT1 << real_port))
882 flags |= MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port;
883 else
884 flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port);
885 }
886 for (; slave_port < dev->caps.num_ports; ++slave_port)
887 flags &= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1 << slave_port);
855 MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); 888 MLX4_PUT(outbox->buf, flags, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
856 889
890 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VL_PORT_OFFSET);
891 field &= ~0x0F;
892 field |= bitmap_weight(actv_ports.ports, dev->caps.num_ports) & 0x0F;
893 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VL_PORT_OFFSET);
894
857 /* For guests, disable timestamp */ 895 /* For guests, disable timestamp */
858 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); 896 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
859 field &= 0x7f; 897 field &= 0x7f;
@@ -903,12 +941,20 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
903 u16 short_field; 941 u16 short_field;
904 int err; 942 int err;
905 int admin_link_state; 943 int admin_link_state;
944 int port = mlx4_slave_convert_port(dev, slave,
945 vhcr->in_modifier & 0xFF);
906 946
907#define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0 947#define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0
908#define MLX4_PORT_LINK_UP_MASK 0x80 948#define MLX4_PORT_LINK_UP_MASK 0x80
909#define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c 949#define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c
910#define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e 950#define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e
911 951
952 if (port < 0)
953 return -EINVAL;
954
955 vhcr->in_modifier = (vhcr->in_modifier & ~0xFF) |
956 (port & 0xFF);
957
912 err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0, 958 err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0,
913 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, 959 MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
914 MLX4_CMD_NATIVE); 960 MLX4_CMD_NATIVE);
@@ -935,7 +981,10 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
935 MLX4_PUT(outbox->buf, port_type, 981 MLX4_PUT(outbox->buf, port_type,
936 QUERY_PORT_SUPPORTED_TYPE_OFFSET); 982 QUERY_PORT_SUPPORTED_TYPE_OFFSET);
937 983
938 short_field = 1; /* slave max gids */ 984 if (dev->caps.port_type[vhcr->in_modifier] == MLX4_PORT_TYPE_ETH)
985 short_field = mlx4_get_slave_num_gids(dev, slave, port);
986 else
987 short_field = 1; /* slave max gids */
939 MLX4_PUT(outbox->buf, short_field, 988 MLX4_PUT(outbox->buf, short_field,
940 QUERY_PORT_CUR_MAX_GID_OFFSET); 989 QUERY_PORT_CUR_MAX_GID_OFFSET);
941 990
@@ -1585,9 +1634,12 @@ int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
1585 struct mlx4_cmd_info *cmd) 1634 struct mlx4_cmd_info *cmd)
1586{ 1635{
1587 struct mlx4_priv *priv = mlx4_priv(dev); 1636 struct mlx4_priv *priv = mlx4_priv(dev);
1588 int port = vhcr->in_modifier; 1637 int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier);
1589 int err; 1638 int err;
1590 1639
1640 if (port < 0)
1641 return -EINVAL;
1642
1591 if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port)) 1643 if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port))
1592 return 0; 1644 return 0;
1593 1645
@@ -1677,9 +1729,12 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
1677 struct mlx4_cmd_info *cmd) 1729 struct mlx4_cmd_info *cmd)
1678{ 1730{
1679 struct mlx4_priv *priv = mlx4_priv(dev); 1731 struct mlx4_priv *priv = mlx4_priv(dev);
1680 int port = vhcr->in_modifier; 1732 int port = mlx4_slave_convert_port(dev, slave, vhcr->in_modifier);
1681 int err; 1733 int err;
1682 1734
1735 if (port < 0)
1736 return -EINVAL;
1737
1683 if (!(priv->mfunc.master.slave_state[slave].init_port_mask & 1738 if (!(priv->mfunc.master.slave_state[slave].init_port_mask &
1684 (1 << port))) 1739 (1 << port)))
1685 return 0; 1740 return 0;
@@ -1891,7 +1946,8 @@ void mlx4_opreq_action(struct work_struct *work)
1891 err = EINVAL; 1946 err = EINVAL;
1892 break; 1947 break;
1893 } 1948 }
1894 err = mlx4_cmd(dev, 0, ((u32) err | cpu_to_be32(token) << 16), 1949 err = mlx4_cmd(dev, 0, ((u32) err |
1950 (__force u32)cpu_to_be32(token) << 16),
1895 1, MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A, 1951 1, MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
1896 MLX4_CMD_NATIVE); 1952 MLX4_CMD_NATIVE);
1897 if (err) { 1953 if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 936c15364739..61d7bcff4533 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -41,7 +41,6 @@
41#include <linux/slab.h> 41#include <linux/slab.h>
42#include <linux/io-mapping.h> 42#include <linux/io-mapping.h>
43#include <linux/delay.h> 43#include <linux/delay.h>
44#include <linux/netdevice.h>
45#include <linux/kmod.h> 44#include <linux/kmod.h>
46 45
47#include <linux/mlx4/device.h> 46#include <linux/mlx4/device.h>
@@ -78,13 +77,17 @@ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
78 77
79#endif /* CONFIG_PCI_MSI */ 78#endif /* CONFIG_PCI_MSI */
80 79
81static int num_vfs; 80static uint8_t num_vfs[3] = {0, 0, 0};
82module_param(num_vfs, int, 0444); 81static int num_vfs_argc = 3;
83MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0"); 82module_param_array(num_vfs, byte , &num_vfs_argc, 0444);
83MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n"
84 "num_vfs=port1,port2,port1+2");
84 85
85static int probe_vf; 86static uint8_t probe_vf[3] = {0, 0, 0};
86module_param(probe_vf, int, 0644); 87static int probe_vfs_argc = 3;
87MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)"); 88module_param_array(probe_vf, byte, &probe_vfs_argc, 0444);
89MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n"
90 "probe_vf=port1,port2,port1+2");
88 91
89int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; 92int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
90module_param_named(log_num_mgm_entry_size, 93module_param_named(log_num_mgm_entry_size,
@@ -1470,7 +1473,11 @@ static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev)
1470 int i; 1473 int i;
1471 1474
1472 for (i = 1; i <= dev->caps.num_ports; i++) { 1475 for (i = 1; i <= dev->caps.num_ports; i++) {
1473 dev->caps.gid_table_len[i] = 1; 1476 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
1477 dev->caps.gid_table_len[i] =
1478 mlx4_get_slave_num_gids(dev, 0, i);
1479 else
1480 dev->caps.gid_table_len[i] = 1;
1474 dev->caps.pkey_table_len[i] = 1481 dev->caps.pkey_table_len[i] =
1475 dev->phys_caps.pkey_phys_table_len[i] - 1; 1482 dev->phys_caps.pkey_phys_table_len[i] - 1;
1476 } 1483 }
@@ -1495,7 +1502,7 @@ static void choose_steering_mode(struct mlx4_dev *dev,
1495 if (mlx4_log_num_mgm_entry_size == -1 && 1502 if (mlx4_log_num_mgm_entry_size == -1 &&
1496 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN && 1503 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN &&
1497 (!mlx4_is_mfunc(dev) || 1504 (!mlx4_is_mfunc(dev) ||
1498 (dev_cap->fs_max_num_qp_per_entry >= (num_vfs + 1))) && 1505 (dev_cap->fs_max_num_qp_per_entry >= (dev->num_vfs + 1))) &&
1499 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >= 1506 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >=
1500 MLX4_MIN_MGM_LOG_ENTRY_SIZE) { 1507 MLX4_MIN_MGM_LOG_ENTRY_SIZE) {
1501 dev->oper_log_mgm_entry_size = 1508 dev->oper_log_mgm_entry_size =
@@ -1981,9 +1988,8 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
1981 struct mlx4_priv *priv = mlx4_priv(dev); 1988 struct mlx4_priv *priv = mlx4_priv(dev);
1982 struct msix_entry *entries; 1989 struct msix_entry *entries;
1983 int nreq = min_t(int, dev->caps.num_ports * 1990 int nreq = min_t(int, dev->caps.num_ports *
1984 min_t(int, netif_get_num_default_rss_queues() + 1, 1991 min_t(int, num_online_cpus() + 1,
1985 MAX_MSIX_P_PORT) + MSIX_LEGACY_SZ, MAX_MSIX); 1992 MAX_MSIX_P_PORT) + MSIX_LEGACY_SZ, MAX_MSIX);
1986 int err;
1987 int i; 1993 int i;
1988 1994
1989 if (msi_x) { 1995 if (msi_x) {
@@ -1997,23 +2003,13 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
1997 for (i = 0; i < nreq; ++i) 2003 for (i = 0; i < nreq; ++i)
1998 entries[i].entry = i; 2004 entries[i].entry = i;
1999 2005
2000 retry: 2006 nreq = pci_enable_msix_range(dev->pdev, entries, 2, nreq);
2001 err = pci_enable_msix(dev->pdev, entries, nreq); 2007
2002 if (err) { 2008 if (nreq < 0) {
2003 /* Try again if at least 2 vectors are available */
2004 if (err > 1) {
2005 mlx4_info(dev, "Requested %d vectors, "
2006 "but only %d MSI-X vectors available, "
2007 "trying again\n", nreq, err);
2008 nreq = err;
2009 goto retry;
2010 }
2011 kfree(entries); 2009 kfree(entries);
2012 goto no_msi; 2010 goto no_msi;
2013 } 2011 } else if (nreq < MSIX_LEGACY_SZ +
2014 2012 dev->caps.num_ports * MIN_MSIX_P_PORT) {
2015 if (nreq <
2016 MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) {
2017 /*Working in legacy mode , all EQ's shared*/ 2013 /*Working in legacy mode , all EQ's shared*/
2018 dev->caps.comp_pool = 0; 2014 dev->caps.comp_pool = 0;
2019 dev->caps.num_comp_vectors = nreq - 1; 2015 dev->caps.num_comp_vectors = nreq - 1;
@@ -2201,6 +2197,13 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2201 struct mlx4_dev *dev; 2197 struct mlx4_dev *dev;
2202 int err; 2198 int err;
2203 int port; 2199 int port;
2200 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
2201 int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0};
2202 const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = {
2203 {2, 0, 0}, {0, 1, 2}, {0, 1, 2} };
2204 unsigned total_vfs = 0;
2205 int sriov_initialized = 0;
2206 unsigned int i;
2204 2207
2205 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); 2208 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
2206 2209
@@ -2215,17 +2218,40 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2215 * per port, we must limit the number of VFs to 63 (since their are 2218 * per port, we must limit the number of VFs to 63 (since their are
2216 * 128 MACs) 2219 * 128 MACs)
2217 */ 2220 */
2218 if (num_vfs >= MLX4_MAX_NUM_VF) { 2221 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc;
2222 total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) {
2223 nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i];
2224 if (nvfs[i] < 0) {
2225 dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n");
2226 return -EINVAL;
2227 }
2228 }
2229 for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc;
2230 i++) {
2231 prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i];
2232 if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) {
2233 dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n");
2234 return -EINVAL;
2235 }
2236 }
2237 if (total_vfs >= MLX4_MAX_NUM_VF) {
2219 dev_err(&pdev->dev, 2238 dev_err(&pdev->dev,
2220 "Requested more VF's (%d) than allowed (%d)\n", 2239 "Requested more VF's (%d) than allowed (%d)\n",
2221 num_vfs, MLX4_MAX_NUM_VF - 1); 2240 total_vfs, MLX4_MAX_NUM_VF - 1);
2222 return -EINVAL; 2241 return -EINVAL;
2223 } 2242 }
2224 2243
2225 if (num_vfs < 0) { 2244 for (i = 0; i < MLX4_MAX_PORTS; i++) {
2226 pr_err("num_vfs module parameter cannot be negative\n"); 2245 if (nvfs[i] + nvfs[2] >= MLX4_MAX_NUM_VF_P_PORT) {
2227 return -EINVAL; 2246 dev_err(&pdev->dev,
2247 "Requested more VF's (%d) for port (%d) than allowed (%d)\n",
2248 nvfs[i] + nvfs[2], i + 1,
2249 MLX4_MAX_NUM_VF_P_PORT - 1);
2250 return -EINVAL;
2251 }
2228 } 2252 }
2253
2254
2229 /* 2255 /*
2230 * Check for BARs. 2256 * Check for BARs.
2231 */ 2257 */
@@ -2300,11 +2326,23 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2300 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { 2326 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
2301 /* When acting as pf, we normally skip vfs unless explicitly 2327 /* When acting as pf, we normally skip vfs unless explicitly
2302 * requested to probe them. */ 2328 * requested to probe them. */
2303 if (num_vfs && extended_func_num(pdev) > probe_vf) { 2329 if (total_vfs) {
2304 mlx4_warn(dev, "Skipping virtual function:%d\n", 2330 unsigned vfs_offset = 0;
2305 extended_func_num(pdev)); 2331 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) &&
2306 err = -ENODEV; 2332 vfs_offset + nvfs[i] < extended_func_num(pdev);
2307 goto err_free_dev; 2333 vfs_offset += nvfs[i], i++)
2334 ;
2335 if (i == sizeof(nvfs)/sizeof(nvfs[0])) {
2336 err = -ENODEV;
2337 goto err_free_dev;
2338 }
2339 if ((extended_func_num(pdev) - vfs_offset)
2340 > prb_vf[i]) {
2341 mlx4_warn(dev, "Skipping virtual function:%d\n",
2342 extended_func_num(pdev));
2343 err = -ENODEV;
2344 goto err_free_dev;
2345 }
2308 } 2346 }
2309 mlx4_warn(dev, "Detected virtual function - running in slave mode\n"); 2347 mlx4_warn(dev, "Detected virtual function - running in slave mode\n");
2310 dev->flags |= MLX4_FLAG_SLAVE; 2348 dev->flags |= MLX4_FLAG_SLAVE;
@@ -2324,22 +2362,30 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2324 } 2362 }
2325 } 2363 }
2326 2364
2327 if (num_vfs) { 2365 if (total_vfs) {
2328 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", num_vfs); 2366 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n",
2329 2367 total_vfs);
2330 atomic_inc(&pf_loading); 2368 dev->dev_vfs = kzalloc(
2331 err = pci_enable_sriov(pdev, num_vfs); 2369 total_vfs * sizeof(*dev->dev_vfs),
2332 atomic_dec(&pf_loading); 2370 GFP_KERNEL);
2333 2371 if (NULL == dev->dev_vfs) {
2334 if (err) { 2372 mlx4_err(dev, "Failed to allocate memory for VFs\n");
2335 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n",
2336 err);
2337 err = 0; 2373 err = 0;
2338 } else { 2374 } else {
2339 mlx4_warn(dev, "Running in master mode\n"); 2375 atomic_inc(&pf_loading);
2340 dev->flags |= MLX4_FLAG_SRIOV | 2376 err = pci_enable_sriov(pdev, total_vfs);
2341 MLX4_FLAG_MASTER; 2377 atomic_dec(&pf_loading);
2342 dev->num_vfs = num_vfs; 2378 if (err) {
2379 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n",
2380 err);
2381 err = 0;
2382 } else {
2383 mlx4_warn(dev, "Running in master mode\n");
2384 dev->flags |= MLX4_FLAG_SRIOV |
2385 MLX4_FLAG_MASTER;
2386 dev->num_vfs = total_vfs;
2387 sriov_initialized = 1;
2388 }
2343 } 2389 }
2344 } 2390 }
2345 2391
@@ -2404,12 +2450,37 @@ slave_start:
2404 /* In master functions, the communication channel must be initialized 2450 /* In master functions, the communication channel must be initialized
2405 * after obtaining its address from fw */ 2451 * after obtaining its address from fw */
2406 if (mlx4_is_master(dev)) { 2452 if (mlx4_is_master(dev)) {
2453 unsigned sum = 0;
2407 err = mlx4_multi_func_init(dev); 2454 err = mlx4_multi_func_init(dev);
2408 if (err) { 2455 if (err) {
2409 mlx4_err(dev, "Failed to init master mfunc" 2456 mlx4_err(dev, "Failed to init master mfunc"
2410 "interface, aborting.\n"); 2457 "interface, aborting.\n");
2411 goto err_close; 2458 goto err_close;
2412 } 2459 }
2460 if (sriov_initialized) {
2461 int ib_ports = 0;
2462 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
2463 ib_ports++;
2464
2465 if (ib_ports &&
2466 (num_vfs_argc > 1 || probe_vfs_argc > 1)) {
2467 mlx4_err(dev,
2468 "Invalid syntax of num_vfs/probe_vfs "
2469 "with IB port. Single port VFs syntax"
2470 " is only supported when all ports "
2471 "are configured as ethernet\n");
2472 goto err_close;
2473 }
2474 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]); i++) {
2475 unsigned j;
2476 for (j = 0; j < nvfs[i]; ++sum, ++j) {
2477 dev->dev_vfs[sum].min_port =
2478 i < 2 ? i + 1 : 1;
2479 dev->dev_vfs[sum].n_ports = i < 2 ? 1 :
2480 dev->caps.num_ports;
2481 }
2482 }
2483 }
2413 } 2484 }
2414 2485
2415 err = mlx4_alloc_eq_table(dev); 2486 err = mlx4_alloc_eq_table(dev);
@@ -2517,6 +2588,8 @@ err_rel_own:
2517 if (!mlx4_is_slave(dev)) 2588 if (!mlx4_is_slave(dev))
2518 mlx4_free_ownership(dev); 2589 mlx4_free_ownership(dev);
2519 2590
2591 kfree(priv->dev.dev_vfs);
2592
2520err_free_dev: 2593err_free_dev:
2521 kfree(priv); 2594 kfree(priv);
2522 2595
@@ -2603,6 +2676,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
2603 kfree(dev->caps.qp0_proxy); 2676 kfree(dev->caps.qp0_proxy);
2604 kfree(dev->caps.qp1_tunnel); 2677 kfree(dev->caps.qp1_tunnel);
2605 kfree(dev->caps.qp1_proxy); 2678 kfree(dev->caps.qp1_proxy);
2679 kfree(dev->dev_vfs);
2606 2680
2607 kfree(priv); 2681 kfree(priv);
2608 pci_release_regions(pdev); 2682 pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index db7dc0b6667d..80ccb4edf825 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1387,9 +1387,12 @@ int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
1387 struct mlx4_cmd_info *cmd) 1387 struct mlx4_cmd_info *cmd)
1388{ 1388{
1389 u32 qpn = (u32) vhcr->in_param & 0xffffffff; 1389 u32 qpn = (u32) vhcr->in_param & 0xffffffff;
1390 u8 port = vhcr->in_param >> 62; 1390 int port = mlx4_slave_convert_port(dev, slave, vhcr->in_param >> 62);
1391 enum mlx4_steer_type steer = vhcr->in_modifier; 1391 enum mlx4_steer_type steer = vhcr->in_modifier;
1392 1392
1393 if (port < 0)
1394 return -EINVAL;
1395
1393 /* Promiscuous unicast is not allowed in mfunc */ 1396 /* Promiscuous unicast is not allowed in mfunc */
1394 if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER) 1397 if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)
1395 return 0; 1398 return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 7aec6c833973..9fca6c150de3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -788,6 +788,10 @@ enum {
788 MLX4_USE_RR = 1, 788 MLX4_USE_RR = 1,
789}; 789};
790 790
791struct mlx4_roce_gid_entry {
792 u8 raw[16];
793};
794
791struct mlx4_priv { 795struct mlx4_priv {
792 struct mlx4_dev dev; 796 struct mlx4_dev dev;
793 797
@@ -834,6 +838,7 @@ struct mlx4_priv {
834 int fs_hash_mode; 838 int fs_hash_mode;
835 u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS]; 839 u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
836 __be64 slave_node_guids[MLX4_MFUNC_MAX]; 840 __be64 slave_node_guids[MLX4_MFUNC_MAX];
841 struct mlx4_roce_gid_entry roce_gids[MLX4_MAX_PORTS][MLX4_ROCE_MAX_GIDS];
837 842
838 atomic_t opreq_count; 843 atomic_t opreq_count;
839 struct work_struct opreq_task; 844 struct work_struct opreq_task;
@@ -1282,4 +1287,8 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work);
1282 1287
1283void mlx4_init_quotas(struct mlx4_dev *dev); 1288void mlx4_init_quotas(struct mlx4_dev *dev);
1284 1289
1290int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port);
1291/* Returns the VF index of slave */
1292int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave);
1293
1285#endif /* MLX4_H */ 1294#endif /* MLX4_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index b57e8c87a34e..69e1f36858e0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -187,6 +187,13 @@ enum {
187#define GET_AVG_PERF_COUNTER(cnt) (0) 187#define GET_AVG_PERF_COUNTER(cnt) (0)
188#endif /* MLX4_EN_PERF_STAT */ 188#endif /* MLX4_EN_PERF_STAT */
189 189
190/* Constants for TX flow */
191enum {
192 MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
193 MAX_BF = 256,
194 MIN_PKT_LEN = 17,
195};
196
190/* 197/*
191 * Configurables 198 * Configurables
192 */ 199 */
@@ -267,10 +274,13 @@ struct mlx4_en_tx_ring {
267 unsigned long bytes; 274 unsigned long bytes;
268 unsigned long packets; 275 unsigned long packets;
269 unsigned long tx_csum; 276 unsigned long tx_csum;
277 unsigned long queue_stopped;
278 unsigned long wake_queue;
270 struct mlx4_bf bf; 279 struct mlx4_bf bf;
271 bool bf_enabled; 280 bool bf_enabled;
272 struct netdev_queue *tx_queue; 281 struct netdev_queue *tx_queue;
273 int hwtstamp_tx_type; 282 int hwtstamp_tx_type;
283 int inline_thold;
274}; 284};
275 285
276struct mlx4_en_rx_desc { 286struct mlx4_en_rx_desc {
@@ -346,6 +356,7 @@ struct mlx4_en_port_profile {
346 u8 tx_pause; 356 u8 tx_pause;
347 u8 tx_ppp; 357 u8 tx_ppp;
348 int rss_rings; 358 int rss_rings;
359 int inline_thold;
349}; 360};
350 361
351struct mlx4_en_profile { 362struct mlx4_en_profile {
@@ -737,7 +748,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
737 int cq, int user_prio); 748 int cq, int user_prio);
738void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, 749void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
739 struct mlx4_en_tx_ring *ring); 750 struct mlx4_en_tx_ring *ring);
740 751void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev);
741int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, 752int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
742 struct mlx4_en_rx_ring **pring, 753 struct mlx4_en_rx_ring **pring,
743 u32 size, u16 stride, int node); 754 u32 size, u16 stride, int node);
@@ -786,7 +797,6 @@ void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv);
786 797
787#define MLX4_EN_NUM_SELF_TEST 5 798#define MLX4_EN_NUM_SELF_TEST 5
788void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf); 799void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
789u64 mlx4_en_mac_to_u64(u8 *addr);
790void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev); 800void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev);
791 801
792/* 802/*
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index a58bcbf1b806..2705b9ab9463 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -505,6 +505,84 @@ int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
505 mlx4_free_cmd_mailbox(dev, outmailbox); 505 mlx4_free_cmd_mailbox(dev, outmailbox);
506 return err; 506 return err;
507} 507}
508static struct mlx4_roce_gid_entry zgid_entry;
509
510int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port)
511{
512 int vfs;
513 int slave_gid = slave;
514 unsigned i;
515 struct mlx4_slaves_pport slaves_pport;
516 struct mlx4_active_ports actv_ports;
517 unsigned max_port_p_one;
518
519 if (slave == 0)
520 return MLX4_ROCE_PF_GIDS;
521
522 /* Slave is a VF */
523 slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
524 actv_ports = mlx4_get_active_ports(dev, slave);
525 max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) +
526 bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1;
527
528 for (i = 1; i < max_port_p_one; i++) {
529 struct mlx4_active_ports exclusive_ports;
530 struct mlx4_slaves_pport slaves_pport_actv;
531 bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
532 set_bit(i - 1, exclusive_ports.ports);
533 if (i == port)
534 continue;
535 slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
536 dev, &exclusive_ports);
537 slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
538 dev->num_vfs + 1);
539 }
540 vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1;
541 if (slave_gid <= ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) % vfs))
542 return ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs) + 1;
543 return (MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs;
544}
545
546int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port)
547{
548 int gids;
549 unsigned i;
550 int slave_gid = slave;
551 int vfs;
552
553 struct mlx4_slaves_pport slaves_pport;
554 struct mlx4_active_ports actv_ports;
555 unsigned max_port_p_one;
556
557 if (slave == 0)
558 return 0;
559
560 slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
561 actv_ports = mlx4_get_active_ports(dev, slave);
562 max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) +
563 bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1;
564
565 for (i = 1; i < max_port_p_one; i++) {
566 struct mlx4_active_ports exclusive_ports;
567 struct mlx4_slaves_pport slaves_pport_actv;
568 bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
569 set_bit(i - 1, exclusive_ports.ports);
570 if (i == port)
571 continue;
572 slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
573 dev, &exclusive_ports);
574 slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
575 dev->num_vfs + 1);
576 }
577 gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
578 vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1;
579 if (slave_gid <= gids % vfs)
580 return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave_gid - 1);
581
582 return MLX4_ROCE_PF_GIDS + (gids % vfs) +
583 ((gids / vfs) * (slave_gid - 1));
584}
585EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix);
508 586
509static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, 587static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
510 u8 op_mod, struct mlx4_cmd_mailbox *inbox) 588 u8 op_mod, struct mlx4_cmd_mailbox *inbox)
@@ -515,14 +593,18 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
515 struct mlx4_slave_state *slave_st = &master->slave_state[slave]; 593 struct mlx4_slave_state *slave_st = &master->slave_state[slave];
516 struct mlx4_set_port_rqp_calc_context *qpn_context; 594 struct mlx4_set_port_rqp_calc_context *qpn_context;
517 struct mlx4_set_port_general_context *gen_context; 595 struct mlx4_set_port_general_context *gen_context;
596 struct mlx4_roce_gid_entry *gid_entry_tbl, *gid_entry_mbox, *gid_entry_mb1;
518 int reset_qkey_viols; 597 int reset_qkey_viols;
519 int port; 598 int port;
520 int is_eth; 599 int is_eth;
600 int num_gids;
601 int base;
521 u32 in_modifier; 602 u32 in_modifier;
522 u32 promisc; 603 u32 promisc;
523 u16 mtu, prev_mtu; 604 u16 mtu, prev_mtu;
524 int err; 605 int err;
525 int i; 606 int i, j;
607 int offset;
526 __be32 agg_cap_mask; 608 __be32 agg_cap_mask;
527 __be32 slave_cap_mask; 609 __be32 slave_cap_mask;
528 __be32 new_cap_mask; 610 __be32 new_cap_mask;
@@ -535,7 +617,8 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
535 /* Slaves cannot perform SET_PORT operations except changing MTU */ 617 /* Slaves cannot perform SET_PORT operations except changing MTU */
536 if (is_eth) { 618 if (is_eth) {
537 if (slave != dev->caps.function && 619 if (slave != dev->caps.function &&
538 in_modifier != MLX4_SET_PORT_GENERAL) { 620 in_modifier != MLX4_SET_PORT_GENERAL &&
621 in_modifier != MLX4_SET_PORT_GID_TABLE) {
539 mlx4_warn(dev, "denying SET_PORT for slave:%d\n", 622 mlx4_warn(dev, "denying SET_PORT for slave:%d\n",
540 slave); 623 slave);
541 return -EINVAL; 624 return -EINVAL;
@@ -581,6 +664,67 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
581 664
582 gen_context->mtu = cpu_to_be16(master->max_mtu[port]); 665 gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
583 break; 666 break;
667 case MLX4_SET_PORT_GID_TABLE:
668 /* change to MULTIPLE entries: number of guest's gids
669 * need a FOR-loop here over number of gids the guest has.
670 * 1. Check no duplicates in gids passed by slave
671 */
672 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
673 base = mlx4_get_base_gid_ix(dev, slave, port);
674 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
675 for (i = 0; i < num_gids; gid_entry_mbox++, i++) {
676 if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw,
677 sizeof(zgid_entry)))
678 continue;
679 gid_entry_mb1 = gid_entry_mbox + 1;
680 for (j = i + 1; j < num_gids; gid_entry_mb1++, j++) {
681 if (!memcmp(gid_entry_mb1->raw,
682 zgid_entry.raw, sizeof(zgid_entry)))
683 continue;
684 if (!memcmp(gid_entry_mb1->raw, gid_entry_mbox->raw,
685 sizeof(gid_entry_mbox->raw))) {
686 /* found duplicate */
687 return -EINVAL;
688 }
689 }
690 }
691
692 /* 2. Check that do not have duplicates in OTHER
693 * entries in the port GID table
694 */
695 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
696 if (i >= base && i < base + num_gids)
697 continue; /* don't compare to slave's current gids */
698 gid_entry_tbl = &priv->roce_gids[port - 1][i];
699 if (!memcmp(gid_entry_tbl->raw, zgid_entry.raw, sizeof(zgid_entry)))
700 continue;
701 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
702 for (j = 0; j < num_gids; gid_entry_mbox++, j++) {
703 if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw,
704 sizeof(zgid_entry)))
705 continue;
706 if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw,
707 sizeof(gid_entry_tbl->raw))) {
708 /* found duplicate */
709 mlx4_warn(dev, "requested gid entry for slave:%d "
710 "is a duplicate of gid at index %d\n",
711 slave, i);
712 return -EINVAL;
713 }
714 }
715 }
716
717 /* insert slave GIDs with memcpy, starting at slave's base index */
718 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
719 for (i = 0, offset = base; i < num_gids; gid_entry_mbox++, offset++, i++)
720 memcpy(priv->roce_gids[port - 1][offset].raw, gid_entry_mbox->raw, 16);
721
722 /* Now, copy roce port gids table to current mailbox for passing to FW */
723 gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
724 for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
725 memcpy(gid_entry_mbox->raw, priv->roce_gids[port - 1][i].raw, 16);
726
727 break;
584 } 728 }
585 return mlx4_cmd(dev, inbox->dma, in_mod, op_mod, 729 return mlx4_cmd(dev, inbox->dma, in_mod, op_mod,
586 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B, 730 MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
@@ -646,6 +790,15 @@ int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
646 struct mlx4_cmd_mailbox *outbox, 790 struct mlx4_cmd_mailbox *outbox,
647 struct mlx4_cmd_info *cmd) 791 struct mlx4_cmd_info *cmd)
648{ 792{
793 int port = mlx4_slave_convert_port(
794 dev, slave, vhcr->in_modifier & 0xFF);
795
796 if (port < 0)
797 return -EINVAL;
798
799 vhcr->in_modifier = (vhcr->in_modifier & ~0xFF) |
800 (port & 0xFF);
801
649 return mlx4_common_set_port(dev, slave, vhcr->in_modifier, 802 return mlx4_common_set_port(dev, slave, vhcr->in_modifier,
650 vhcr->op_modifier, inbox); 803 vhcr->op_modifier, inbox);
651} 804}
@@ -927,3 +1080,108 @@ void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap)
927 *stats_bitmap |= MLX4_STATS_ERROR_COUNTERS_MASK; 1080 *stats_bitmap |= MLX4_STATS_ERROR_COUNTERS_MASK;
928} 1081}
929EXPORT_SYMBOL(mlx4_set_stats_bitmap); 1082EXPORT_SYMBOL(mlx4_set_stats_bitmap);
1083
1084int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
1085 int *slave_id)
1086{
1087 struct mlx4_priv *priv = mlx4_priv(dev);
1088 int i, found_ix = -1;
1089 int vf_gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
1090 struct mlx4_slaves_pport slaves_pport;
1091 unsigned num_vfs;
1092 int slave_gid;
1093
1094 if (!mlx4_is_mfunc(dev))
1095 return -EINVAL;
1096
1097 slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
1098 num_vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1;
1099
1100 for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
1101 if (!memcmp(priv->roce_gids[port - 1][i].raw, gid, 16)) {
1102 found_ix = i;
1103 break;
1104 }
1105 }
1106
1107 if (found_ix >= 0) {
1108 if (found_ix < MLX4_ROCE_PF_GIDS)
1109 slave_gid = 0;
1110 else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) *
1111 (vf_gids / num_vfs + 1))
1112 slave_gid = ((found_ix - MLX4_ROCE_PF_GIDS) /
1113 (vf_gids / num_vfs + 1)) + 1;
1114 else
1115 slave_gid =
1116 ((found_ix - MLX4_ROCE_PF_GIDS -
1117 ((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) /
1118 (vf_gids / num_vfs)) + vf_gids % num_vfs + 1;
1119
1120 if (slave_gid) {
1121 struct mlx4_active_ports exclusive_ports;
1122 struct mlx4_active_ports actv_ports;
1123 struct mlx4_slaves_pport slaves_pport_actv;
1124 unsigned max_port_p_one;
1125 int num_slaves_before = 1;
1126
1127 for (i = 1; i < port; i++) {
1128 bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
1129 set_bit(i, exclusive_ports.ports);
1130 slaves_pport_actv =
1131 mlx4_phys_to_slaves_pport_actv(
1132 dev, &exclusive_ports);
1133 num_slaves_before += bitmap_weight(
1134 slaves_pport_actv.slaves,
1135 dev->num_vfs + 1);
1136 }
1137
1138 if (slave_gid < num_slaves_before) {
1139 bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
1140 set_bit(port - 1, exclusive_ports.ports);
1141 slaves_pport_actv =
1142 mlx4_phys_to_slaves_pport_actv(
1143 dev, &exclusive_ports);
1144 slave_gid += bitmap_weight(
1145 slaves_pport_actv.slaves,
1146 dev->num_vfs + 1) -
1147 num_slaves_before;
1148 }
1149 actv_ports = mlx4_get_active_ports(dev, slave_gid);
1150 max_port_p_one = find_first_bit(
1151 actv_ports.ports, dev->caps.num_ports) +
1152 bitmap_weight(actv_ports.ports,
1153 dev->caps.num_ports) + 1;
1154
1155 for (i = 1; i < max_port_p_one; i++) {
1156 if (i == port)
1157 continue;
1158 bitmap_zero(exclusive_ports.ports,
1159 dev->caps.num_ports);
1160 set_bit(i - 1, exclusive_ports.ports);
1161 slaves_pport_actv =
1162 mlx4_phys_to_slaves_pport_actv(
1163 dev, &exclusive_ports);
1164 slave_gid += bitmap_weight(
1165 slaves_pport_actv.slaves,
1166 dev->num_vfs + 1);
1167 }
1168 }
1169 *slave_id = slave_gid;
1170 }
1171
1172 return (found_ix >= 0) ? 0 : -EINVAL;
1173}
1174EXPORT_SYMBOL(mlx4_get_slave_from_roce_gid);
1175
1176int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id,
1177 u8 *gid)
1178{
1179 struct mlx4_priv *priv = mlx4_priv(dev);
1180
1181 if (!mlx4_is_master(dev))
1182 return -EINVAL;
1183
1184 memcpy(gid, priv->roce_gids[port - 1][slave_id].raw, 16);
1185 return 0;
1186}
1187EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 57428a0cb9dd..2a33513a0e31 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -52,6 +52,8 @@
52struct mac_res { 52struct mac_res {
53 struct list_head list; 53 struct list_head list;
54 u64 mac; 54 u64 mac;
55 int ref_count;
56 u8 smac_index;
55 u8 port; 57 u8 port;
56}; 58};
57 59
@@ -219,6 +221,11 @@ struct res_fs_rule {
219 int qpn; 221 int qpn;
220}; 222};
221 223
224static int mlx4_is_eth(struct mlx4_dev *dev, int port)
225{
226 return dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB ? 0 : 1;
227}
228
222static void *res_tracker_lookup(struct rb_root *root, u64 res_id) 229static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
223{ 230{
224 struct rb_node *node = root->rb_node; 231 struct rb_node *node = root->rb_node;
@@ -461,6 +468,8 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
461 468
462 spin_lock_init(&res_alloc->alloc_lock); 469 spin_lock_init(&res_alloc->alloc_lock);
463 for (t = 0; t < dev->num_vfs + 1; t++) { 470 for (t = 0; t < dev->num_vfs + 1; t++) {
471 struct mlx4_active_ports actv_ports =
472 mlx4_get_active_ports(dev, t);
464 switch (i) { 473 switch (i) {
465 case RES_QP: 474 case RES_QP:
466 initialize_res_quotas(dev, res_alloc, RES_QP, 475 initialize_res_quotas(dev, res_alloc, RES_QP,
@@ -490,10 +499,27 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
490 break; 499 break;
491 case RES_MAC: 500 case RES_MAC:
492 if (t == mlx4_master_func_num(dev)) { 501 if (t == mlx4_master_func_num(dev)) {
493 res_alloc->quota[t] = MLX4_MAX_MAC_NUM; 502 int max_vfs_pport = 0;
503 /* Calculate the max vfs per port for */
504 /* both ports. */
505 for (j = 0; j < dev->caps.num_ports;
506 j++) {
507 struct mlx4_slaves_pport slaves_pport =
508 mlx4_phys_to_slaves_pport(dev, j + 1);
509 unsigned current_slaves =
510 bitmap_weight(slaves_pport.slaves,
511 dev->caps.num_ports) - 1;
512 if (max_vfs_pport < current_slaves)
513 max_vfs_pport =
514 current_slaves;
515 }
516 res_alloc->quota[t] =
517 MLX4_MAX_MAC_NUM -
518 2 * max_vfs_pport;
494 res_alloc->guaranteed[t] = 2; 519 res_alloc->guaranteed[t] = 2;
495 for (j = 0; j < MLX4_MAX_PORTS; j++) 520 for (j = 0; j < MLX4_MAX_PORTS; j++)
496 res_alloc->res_port_free[j] = MLX4_MAX_MAC_NUM; 521 res_alloc->res_port_free[j] =
522 MLX4_MAX_MAC_NUM;
497 } else { 523 } else {
498 res_alloc->quota[t] = MLX4_MAX_MAC_NUM; 524 res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
499 res_alloc->guaranteed[t] = 2; 525 res_alloc->guaranteed[t] = 2;
@@ -521,9 +547,10 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
521 break; 547 break;
522 } 548 }
523 if (i == RES_MAC || i == RES_VLAN) { 549 if (i == RES_MAC || i == RES_VLAN) {
524 for (j = 0; j < MLX4_MAX_PORTS; j++) 550 for (j = 0; j < dev->caps.num_ports; j++)
525 res_alloc->res_port_rsvd[j] += 551 if (test_bit(j, actv_ports.ports))
526 res_alloc->guaranteed[t]; 552 res_alloc->res_port_rsvd[j] +=
553 res_alloc->guaranteed[t];
527 } else { 554 } else {
528 res_alloc->res_reserved += res_alloc->guaranteed[t]; 555 res_alloc->res_reserved += res_alloc->guaranteed[t];
529 } 556 }
@@ -600,15 +627,37 @@ static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
600 struct mlx4_qp_context *qp_ctx = inbox->buf + 8; 627 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
601 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf); 628 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
602 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff; 629 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
630 int port;
603 631
604 if (MLX4_QP_ST_UD == ts) 632 if (MLX4_QP_ST_UD == ts) {
605 qp_ctx->pri_path.mgid_index = 0x80 | slave; 633 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
606 634 if (mlx4_is_eth(dev, port))
607 if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) { 635 qp_ctx->pri_path.mgid_index =
608 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) 636 mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
609 qp_ctx->pri_path.mgid_index = slave & 0x7F; 637 else
610 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) 638 qp_ctx->pri_path.mgid_index = slave | 0x80;
611 qp_ctx->alt_path.mgid_index = slave & 0x7F; 639
640 } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
641 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
642 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
643 if (mlx4_is_eth(dev, port)) {
644 qp_ctx->pri_path.mgid_index +=
645 mlx4_get_base_gid_ix(dev, slave, port);
646 qp_ctx->pri_path.mgid_index &= 0x7f;
647 } else {
648 qp_ctx->pri_path.mgid_index = slave & 0x7F;
649 }
650 }
651 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
652 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
653 if (mlx4_is_eth(dev, port)) {
654 qp_ctx->alt_path.mgid_index +=
655 mlx4_get_base_gid_ix(dev, slave, port);
656 qp_ctx->alt_path.mgid_index &= 0x7f;
657 } else {
658 qp_ctx->alt_path.mgid_index = slave & 0x7F;
659 }
660 }
612 } 661 }
613} 662}
614 663
@@ -619,7 +668,6 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
619 struct mlx4_qp_context *qpc = inbox->buf + 8; 668 struct mlx4_qp_context *qpc = inbox->buf + 8;
620 struct mlx4_vport_oper_state *vp_oper; 669 struct mlx4_vport_oper_state *vp_oper;
621 struct mlx4_priv *priv; 670 struct mlx4_priv *priv;
622 u32 qp_type;
623 int port; 671 int port;
624 672
625 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1; 673 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
@@ -627,12 +675,6 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
627 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; 675 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
628 676
629 if (MLX4_VGT != vp_oper->state.default_vlan) { 677 if (MLX4_VGT != vp_oper->state.default_vlan) {
630 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
631 if (MLX4_QP_ST_RC == qp_type ||
632 (MLX4_QP_ST_UD == qp_type &&
633 !mlx4_is_qp_reserved(dev, qpn)))
634 return -EINVAL;
635
636 /* the reserved QPs (special, proxy, tunnel) 678 /* the reserved QPs (special, proxy, tunnel)
637 * do not operate over vlans 679 * do not operate over vlans
638 */ 680 */
@@ -1659,11 +1701,39 @@ static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1659 return err; 1701 return err;
1660} 1702}
1661 1703
1662static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port) 1704static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1705 u8 smac_index, u64 *mac)
1706{
1707 struct mlx4_priv *priv = mlx4_priv(dev);
1708 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1709 struct list_head *mac_list =
1710 &tracker->slave_list[slave].res_list[RES_MAC];
1711 struct mac_res *res, *tmp;
1712
1713 list_for_each_entry_safe(res, tmp, mac_list, list) {
1714 if (res->smac_index == smac_index && res->port == (u8) port) {
1715 *mac = res->mac;
1716 return 0;
1717 }
1718 }
1719 return -ENOENT;
1720}
1721
1722static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
1663{ 1723{
1664 struct mlx4_priv *priv = mlx4_priv(dev); 1724 struct mlx4_priv *priv = mlx4_priv(dev);
1665 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1725 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1666 struct mac_res *res; 1726 struct list_head *mac_list =
1727 &tracker->slave_list[slave].res_list[RES_MAC];
1728 struct mac_res *res, *tmp;
1729
1730 list_for_each_entry_safe(res, tmp, mac_list, list) {
1731 if (res->mac == mac && res->port == (u8) port) {
1732 /* mac found. update ref count */
1733 ++res->ref_count;
1734 return 0;
1735 }
1736 }
1667 1737
1668 if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port)) 1738 if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1669 return -EINVAL; 1739 return -EINVAL;
@@ -1674,6 +1744,8 @@ static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
1674 } 1744 }
1675 res->mac = mac; 1745 res->mac = mac;
1676 res->port = (u8) port; 1746 res->port = (u8) port;
1747 res->smac_index = smac_index;
1748 res->ref_count = 1;
1677 list_add_tail(&res->list, 1749 list_add_tail(&res->list,
1678 &tracker->slave_list[slave].res_list[RES_MAC]); 1750 &tracker->slave_list[slave].res_list[RES_MAC]);
1679 return 0; 1751 return 0;
@@ -1690,9 +1762,11 @@ static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1690 1762
1691 list_for_each_entry_safe(res, tmp, mac_list, list) { 1763 list_for_each_entry_safe(res, tmp, mac_list, list) {
1692 if (res->mac == mac && res->port == (u8) port) { 1764 if (res->mac == mac && res->port == (u8) port) {
1693 list_del(&res->list); 1765 if (!--res->ref_count) {
1694 mlx4_release_resource(dev, slave, RES_MAC, 1, port); 1766 list_del(&res->list);
1695 kfree(res); 1767 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1768 kfree(res);
1769 }
1696 break; 1770 break;
1697 } 1771 }
1698 } 1772 }
@@ -1705,10 +1779,13 @@ static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1705 struct list_head *mac_list = 1779 struct list_head *mac_list =
1706 &tracker->slave_list[slave].res_list[RES_MAC]; 1780 &tracker->slave_list[slave].res_list[RES_MAC];
1707 struct mac_res *res, *tmp; 1781 struct mac_res *res, *tmp;
1782 int i;
1708 1783
1709 list_for_each_entry_safe(res, tmp, mac_list, list) { 1784 list_for_each_entry_safe(res, tmp, mac_list, list) {
1710 list_del(&res->list); 1785 list_del(&res->list);
1711 __mlx4_unregister_mac(dev, res->port, res->mac); 1786 /* dereference the mac the num times the slave referenced it */
1787 for (i = 0; i < res->ref_count; i++)
1788 __mlx4_unregister_mac(dev, res->port, res->mac);
1712 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port); 1789 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
1713 kfree(res); 1790 kfree(res);
1714 } 1791 }
@@ -1720,21 +1797,28 @@ static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1720 int err = -EINVAL; 1797 int err = -EINVAL;
1721 int port; 1798 int port;
1722 u64 mac; 1799 u64 mac;
1800 u8 smac_index;
1723 1801
1724 if (op != RES_OP_RESERVE_AND_MAP) 1802 if (op != RES_OP_RESERVE_AND_MAP)
1725 return err; 1803 return err;
1726 1804
1727 port = !in_port ? get_param_l(out_param) : in_port; 1805 port = !in_port ? get_param_l(out_param) : in_port;
1806 port = mlx4_slave_convert_port(
1807 dev, slave, port);
1808
1809 if (port < 0)
1810 return -EINVAL;
1728 mac = in_param; 1811 mac = in_param;
1729 1812
1730 err = __mlx4_register_mac(dev, port, mac); 1813 err = __mlx4_register_mac(dev, port, mac);
1731 if (err >= 0) { 1814 if (err >= 0) {
1815 smac_index = err;
1732 set_param_l(out_param, err); 1816 set_param_l(out_param, err);
1733 err = 0; 1817 err = 0;
1734 } 1818 }
1735 1819
1736 if (!err) { 1820 if (!err) {
1737 err = mac_add_to_slave(dev, slave, mac, port); 1821 err = mac_add_to_slave(dev, slave, mac, port, smac_index);
1738 if (err) 1822 if (err)
1739 __mlx4_unregister_mac(dev, port, mac); 1823 __mlx4_unregister_mac(dev, port, mac);
1740 } 1824 }
@@ -1831,6 +1915,11 @@ static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1831 if (!port || op != RES_OP_RESERVE_AND_MAP) 1915 if (!port || op != RES_OP_RESERVE_AND_MAP)
1832 return -EINVAL; 1916 return -EINVAL;
1833 1917
1918 port = mlx4_slave_convert_port(
1919 dev, slave, port);
1920
1921 if (port < 0)
1922 return -EINVAL;
1834 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */ 1923 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
1835 if (!in_port && port > 0 && port <= dev->caps.num_ports) { 1924 if (!in_port && port > 0 && port <= dev->caps.num_ports) {
1836 slave_state[slave].old_vlan_api = true; 1925 slave_state[slave].old_vlan_api = true;
@@ -2128,6 +2217,11 @@ static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2128 switch (op) { 2217 switch (op) {
2129 case RES_OP_RESERVE_AND_MAP: 2218 case RES_OP_RESERVE_AND_MAP:
2130 port = !in_port ? get_param_l(out_param) : in_port; 2219 port = !in_port ? get_param_l(out_param) : in_port;
2220 port = mlx4_slave_convert_port(
2221 dev, slave, port);
2222
2223 if (port < 0)
2224 return -EINVAL;
2131 mac_del_from_slave(dev, slave, in_param, port); 2225 mac_del_from_slave(dev, slave, in_param, port);
2132 __mlx4_unregister_mac(dev, port, in_param); 2226 __mlx4_unregister_mac(dev, port, in_param);
2133 break; 2227 break;
@@ -2147,6 +2241,11 @@ static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2147 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; 2241 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2148 int err = 0; 2242 int err = 0;
2149 2243
2244 port = mlx4_slave_convert_port(
2245 dev, slave, port);
2246
2247 if (port < 0)
2248 return -EINVAL;
2150 switch (op) { 2249 switch (op) {
2151 case RES_OP_RESERVE_AND_MAP: 2250 case RES_OP_RESERVE_AND_MAP:
2152 if (slave_state[slave].old_vlan_api) 2251 if (slave_state[slave].old_vlan_api)
@@ -2734,6 +2833,8 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
2734 u32 qp_type; 2833 u32 qp_type;
2735 struct mlx4_qp_context *qp_ctx; 2834 struct mlx4_qp_context *qp_ctx;
2736 enum mlx4_qp_optpar optpar; 2835 enum mlx4_qp_optpar optpar;
2836 int port;
2837 int num_gids;
2737 2838
2738 qp_ctx = inbox->buf + 8; 2839 qp_ctx = inbox->buf + 8;
2739 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff; 2840 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
@@ -2741,6 +2842,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
2741 2842
2742 switch (qp_type) { 2843 switch (qp_type) {
2743 case MLX4_QP_ST_RC: 2844 case MLX4_QP_ST_RC:
2845 case MLX4_QP_ST_XRC:
2744 case MLX4_QP_ST_UC: 2846 case MLX4_QP_ST_UC:
2745 switch (transition) { 2847 switch (transition) {
2746 case QP_TRANS_INIT2RTR: 2848 case QP_TRANS_INIT2RTR:
@@ -2749,13 +2851,24 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
2749 case QP_TRANS_SQD2SQD: 2851 case QP_TRANS_SQD2SQD:
2750 case QP_TRANS_SQD2RTS: 2852 case QP_TRANS_SQD2RTS:
2751 if (slave != mlx4_master_func_num(dev)) 2853 if (slave != mlx4_master_func_num(dev))
2752 /* slaves have only gid index 0 */ 2854 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
2753 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) 2855 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
2754 if (qp_ctx->pri_path.mgid_index) 2856 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2857 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
2858 else
2859 num_gids = 1;
2860 if (qp_ctx->pri_path.mgid_index >= num_gids)
2755 return -EINVAL; 2861 return -EINVAL;
2756 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) 2862 }
2757 if (qp_ctx->alt_path.mgid_index) 2863 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
2864 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
2865 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2866 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
2867 else
2868 num_gids = 1;
2869 if (qp_ctx->alt_path.mgid_index >= num_gids)
2758 return -EINVAL; 2870 return -EINVAL;
2871 }
2759 break; 2872 break;
2760 default: 2873 default:
2761 break; 2874 break;
@@ -3268,6 +3381,58 @@ int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3268 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3381 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3269} 3382}
3270 3383
3384static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3385 struct mlx4_qp_context *qpc,
3386 struct mlx4_cmd_mailbox *inbox)
3387{
3388 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3389 u8 pri_sched_queue;
3390 int port = mlx4_slave_convert_port(
3391 dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3392
3393 if (port < 0)
3394 return -EINVAL;
3395
3396 pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3397 ((port & 1) << 6);
3398
3399 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH ||
3400 mlx4_is_eth(dev, port + 1)) {
3401 qpc->pri_path.sched_queue = pri_sched_queue;
3402 }
3403
3404 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3405 port = mlx4_slave_convert_port(
3406 dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3407 + 1) - 1;
3408 if (port < 0)
3409 return -EINVAL;
3410 qpc->alt_path.sched_queue =
3411 (qpc->alt_path.sched_queue & ~(1 << 6)) |
3412 (port & 1) << 6;
3413 }
3414 return 0;
3415}
3416
3417static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3418 struct mlx4_qp_context *qpc,
3419 struct mlx4_cmd_mailbox *inbox)
3420{
3421 u64 mac;
3422 int port;
3423 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3424 u8 sched = *(u8 *)(inbox->buf + 64);
3425 u8 smac_ix;
3426
3427 port = (sched >> 6 & 1) + 1;
3428 if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3429 smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3430 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3431 return -ENOENT;
3432 }
3433 return 0;
3434}
3435
3271int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave, 3436int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3272 struct mlx4_vhcr *vhcr, 3437 struct mlx4_vhcr *vhcr,
3273 struct mlx4_cmd_mailbox *inbox, 3438 struct mlx4_cmd_mailbox *inbox,
@@ -3286,10 +3451,16 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3286 u8 orig_vlan_index = qpc->pri_path.vlan_index; 3451 u8 orig_vlan_index = qpc->pri_path.vlan_index;
3287 u8 orig_feup = qpc->pri_path.feup; 3452 u8 orig_feup = qpc->pri_path.feup;
3288 3453
3454 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3455 if (err)
3456 return err;
3289 err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave); 3457 err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
3290 if (err) 3458 if (err)
3291 return err; 3459 return err;
3292 3460
3461 if (roce_verify_mac(dev, slave, qpc, inbox))
3462 return -EINVAL;
3463
3293 update_pkey_index(dev, slave, inbox); 3464 update_pkey_index(dev, slave, inbox);
3294 update_gid(dev, inbox, (u8)slave); 3465 update_gid(dev, inbox, (u8)slave);
3295 adjust_proxy_tun_qkey(dev, vhcr, qpc); 3466 adjust_proxy_tun_qkey(dev, vhcr, qpc);
@@ -3334,6 +3505,9 @@ int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3334 int err; 3505 int err;
3335 struct mlx4_qp_context *context = inbox->buf + 8; 3506 struct mlx4_qp_context *context = inbox->buf + 8;
3336 3507
3508 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3509 if (err)
3510 return err;
3337 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave); 3511 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
3338 if (err) 3512 if (err)
3339 return err; 3513 return err;
@@ -3353,6 +3527,9 @@ int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3353 int err; 3527 int err;
3354 struct mlx4_qp_context *context = inbox->buf + 8; 3528 struct mlx4_qp_context *context = inbox->buf + 8;
3355 3529
3530 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3531 if (err)
3532 return err;
3356 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave); 3533 err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
3357 if (err) 3534 if (err)
3358 return err; 3535 return err;
@@ -3371,6 +3548,9 @@ int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3371 struct mlx4_cmd_info *cmd) 3548 struct mlx4_cmd_info *cmd)
3372{ 3549{
3373 struct mlx4_qp_context *context = inbox->buf + 8; 3550 struct mlx4_qp_context *context = inbox->buf + 8;
3551 int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3552 if (err)
3553 return err;
3374 adjust_proxy_tun_qkey(dev, vhcr, context); 3554 adjust_proxy_tun_qkey(dev, vhcr, context);
3375 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd); 3555 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3376} 3556}
@@ -3384,6 +3564,9 @@ int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3384 int err; 3564 int err;
3385 struct mlx4_qp_context *context = inbox->buf + 8; 3565 struct mlx4_qp_context *context = inbox->buf + 8;
3386 3566
3567 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3568 if (err)
3569 return err;
3387 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave); 3570 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
3388 if (err) 3571 if (err)
3389 return err; 3572 return err;
@@ -3403,6 +3586,9 @@ int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3403 int err; 3586 int err;
3404 struct mlx4_qp_context *context = inbox->buf + 8; 3587 struct mlx4_qp_context *context = inbox->buf + 8;
3405 3588
3589 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3590 if (err)
3591 return err;
3406 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave); 3592 err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
3407 if (err) 3593 if (err)
3408 return err; 3594 return err;
@@ -3506,16 +3692,26 @@ static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3506 return err; 3692 return err;
3507} 3693}
3508 3694
3509static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 3695static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
3510 int block_loopback, enum mlx4_protocol prot, 3696 u8 gid[16], int block_loopback, enum mlx4_protocol prot,
3511 enum mlx4_steer_type type, u64 *reg_id) 3697 enum mlx4_steer_type type, u64 *reg_id)
3512{ 3698{
3513 switch (dev->caps.steering_mode) { 3699 switch (dev->caps.steering_mode) {
3514 case MLX4_STEERING_MODE_DEVICE_MANAGED: 3700 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
3515 return mlx4_trans_to_dmfs_attach(dev, qp, gid, gid[5], 3701 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3702 if (port < 0)
3703 return port;
3704 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
3516 block_loopback, prot, 3705 block_loopback, prot,
3517 reg_id); 3706 reg_id);
3707 }
3518 case MLX4_STEERING_MODE_B0: 3708 case MLX4_STEERING_MODE_B0:
3709 if (prot == MLX4_PROT_ETH) {
3710 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3711 if (port < 0)
3712 return port;
3713 gid[5] = port;
3714 }
3519 return mlx4_qp_attach_common(dev, qp, gid, 3715 return mlx4_qp_attach_common(dev, qp, gid,
3520 block_loopback, prot, type); 3716 block_loopback, prot, type);
3521 default: 3717 default:
@@ -3523,9 +3719,9 @@ static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3523 } 3719 }
3524} 3720}
3525 3721
3526static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 3722static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
3527 enum mlx4_protocol prot, enum mlx4_steer_type type, 3723 u8 gid[16], enum mlx4_protocol prot,
3528 u64 reg_id) 3724 enum mlx4_steer_type type, u64 reg_id)
3529{ 3725{
3530 switch (dev->caps.steering_mode) { 3726 switch (dev->caps.steering_mode) {
3531 case MLX4_STEERING_MODE_DEVICE_MANAGED: 3727 case MLX4_STEERING_MODE_DEVICE_MANAGED:
@@ -3562,7 +3758,7 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3562 3758
3563 qp.qpn = qpn; 3759 qp.qpn = qpn;
3564 if (attach) { 3760 if (attach) {
3565 err = qp_attach(dev, &qp, gid, block_loopback, prot, 3761 err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
3566 type, &reg_id); 3762 type, &reg_id);
3567 if (err) { 3763 if (err) {
3568 pr_err("Fail to attach rule to qp 0x%x\n", qpn); 3764 pr_err("Fail to attach rule to qp 0x%x\n", qpn);
@@ -3698,6 +3894,9 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3698 return -EOPNOTSUPP; 3894 return -EOPNOTSUPP;
3699 3895
3700 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; 3896 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3897 ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port);
3898 if (ctrl->port <= 0)
3899 return -EINVAL;
3701 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; 3900 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
3702 err = get_res(dev, slave, qpn, RES_QP, &rqp); 3901 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3703 if (err) { 3902 if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 23b7e2d35a93..77ac95f052da 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -116,7 +116,6 @@ static int mlx5_enable_msix(struct mlx5_core_dev *dev)
116 struct mlx5_eq_table *table = &dev->priv.eq_table; 116 struct mlx5_eq_table *table = &dev->priv.eq_table;
117 int num_eqs = 1 << dev->caps.log_max_eq; 117 int num_eqs = 1 << dev->caps.log_max_eq;
118 int nvec; 118 int nvec;
119 int err;
120 int i; 119 int i;
121 120
122 nvec = dev->caps.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE; 121 nvec = dev->caps.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE;
@@ -131,17 +130,12 @@ static int mlx5_enable_msix(struct mlx5_core_dev *dev)
131 for (i = 0; i < nvec; i++) 130 for (i = 0; i < nvec; i++)
132 table->msix_arr[i].entry = i; 131 table->msix_arr[i].entry = i;
133 132
134retry: 133 nvec = pci_enable_msix_range(dev->pdev, table->msix_arr,
135 table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE; 134 MLX5_EQ_VEC_COMP_BASE, nvec);
136 err = pci_enable_msix(dev->pdev, table->msix_arr, nvec); 135 if (nvec < 0)
137 if (err <= 0) { 136 return nvec;
138 return err;
139 } else if (err > 2) {
140 nvec = err;
141 goto retry;
142 }
143 137
144 mlx5_core_dbg(dev, "received %d MSI vectors out of %d requested\n", err, nvec); 138 table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
145 139
146 return 0; 140 return 0;
147} 141}
@@ -537,7 +531,6 @@ static int __init init(void)
537 531
538 return 0; 532 return 0;
539 533
540 mlx5_health_cleanup();
541err_debug: 534err_debug:
542 mlx5_unregister_debugfs(); 535 mlx5_unregister_debugfs();
543 return err; 536 return err;
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index ce84dc289c8f..14ac0e2bc09f 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4832,7 +4832,7 @@ static inline void copy_old_skb(struct sk_buff *old, struct sk_buff *skb)
4832 skb->csum = old->csum; 4832 skb->csum = old->csum;
4833 skb_set_network_header(skb, ETH_HLEN); 4833 skb_set_network_header(skb, ETH_HLEN);
4834 4834
4835 dev_kfree_skb(old); 4835 dev_consume_skb_any(old);
4836} 4836}
4837 4837
4838/** 4838/**
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 68026f7e8ba3..130f6b204efa 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -2329,16 +2329,14 @@ static int myri10ge_request_irq(struct myri10ge_priv *mgp)
2329 status = 0; 2329 status = 0;
2330 if (myri10ge_msi) { 2330 if (myri10ge_msi) {
2331 if (mgp->num_slices > 1) { 2331 if (mgp->num_slices > 1) {
2332 status = 2332 status = pci_enable_msix_range(pdev, mgp->msix_vectors,
2333 pci_enable_msix(pdev, mgp->msix_vectors, 2333 mgp->num_slices, mgp->num_slices);
2334 mgp->num_slices); 2334 if (status < 0) {
2335 if (status == 0) {
2336 mgp->msix_enabled = 1;
2337 } else {
2338 dev_err(&pdev->dev, 2335 dev_err(&pdev->dev,
2339 "Error %d setting up MSI-X\n", status); 2336 "Error %d setting up MSI-X\n", status);
2340 return status; 2337 return status;
2341 } 2338 }
2339 mgp->msix_enabled = 1;
2342 } 2340 }
2343 if (mgp->msix_enabled == 0) { 2341 if (mgp->msix_enabled == 0) {
2344 status = pci_enable_msi(pdev); 2342 status = pci_enable_msi(pdev);
@@ -3895,32 +3893,34 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
3895 mgp->msix_vectors = kcalloc(mgp->num_slices, sizeof(*mgp->msix_vectors), 3893 mgp->msix_vectors = kcalloc(mgp->num_slices, sizeof(*mgp->msix_vectors),
3896 GFP_KERNEL); 3894 GFP_KERNEL);
3897 if (mgp->msix_vectors == NULL) 3895 if (mgp->msix_vectors == NULL)
3898 goto disable_msix; 3896 goto no_msix;
3899 for (i = 0; i < mgp->num_slices; i++) { 3897 for (i = 0; i < mgp->num_slices; i++) {
3900 mgp->msix_vectors[i].entry = i; 3898 mgp->msix_vectors[i].entry = i;
3901 } 3899 }
3902 3900
3903 while (mgp->num_slices > 1) { 3901 while (mgp->num_slices > 1) {
3904 /* make sure it is a power of two */ 3902 mgp->num_slices = rounddown_pow_of_two(mgp->num_slices);
3905 while (!is_power_of_2(mgp->num_slices))
3906 mgp->num_slices--;
3907 if (mgp->num_slices == 1) 3903 if (mgp->num_slices == 1)
3908 goto disable_msix; 3904 goto no_msix;
3909 status = pci_enable_msix(pdev, mgp->msix_vectors, 3905 status = pci_enable_msix_range(pdev,
3910 mgp->num_slices); 3906 mgp->msix_vectors,
3911 if (status == 0) { 3907 mgp->num_slices,
3912 pci_disable_msix(pdev); 3908 mgp->num_slices);
3909 if (status < 0)
3910 goto no_msix;
3911
3912 pci_disable_msix(pdev);
3913
3914 if (status == mgp->num_slices) {
3913 if (old_allocated) 3915 if (old_allocated)
3914 kfree(old_fw); 3916 kfree(old_fw);
3915 return; 3917 return;
3916 } 3918 } else {
3917 if (status > 0)
3918 mgp->num_slices = status; 3919 mgp->num_slices = status;
3919 else 3920 }
3920 goto disable_msix;
3921 } 3921 }
3922 3922
3923disable_msix: 3923no_msix:
3924 if (mgp->msix_vectors != NULL) { 3924 if (mgp->msix_vectors != NULL) {
3925 kfree(mgp->msix_vectors); 3925 kfree(mgp->msix_vectors);
3926 mgp->msix_vectors = NULL; 3926 mgp->msix_vectors = NULL;
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 9eeddbd0b2c7..a2844ff322c4 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -2914,6 +2914,9 @@ static int rx_intr_handler(struct ring_info *ring_data, int budget)
2914 struct RxD1 *rxdp1; 2914 struct RxD1 *rxdp1;
2915 struct RxD3 *rxdp3; 2915 struct RxD3 *rxdp3;
2916 2916
2917 if (budget <= 0)
2918 return napi_pkts;
2919
2917 get_info = ring_data->rx_curr_get_info; 2920 get_info = ring_data->rx_curr_get_info;
2918 get_block = get_info.block_index; 2921 get_block = get_info.block_index;
2919 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info)); 2922 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
@@ -3792,9 +3795,10 @@ static int s2io_enable_msi_x(struct s2io_nic *nic)
3792 writeq(rx_mat, &bar0->rx_mat); 3795 writeq(rx_mat, &bar0->rx_mat);
3793 readq(&bar0->rx_mat); 3796 readq(&bar0->rx_mat);
3794 3797
3795 ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries); 3798 ret = pci_enable_msix_range(nic->pdev, nic->entries,
3799 nic->num_entries, nic->num_entries);
3796 /* We fail init if error or we get less vectors than min required */ 3800 /* We fail init if error or we get less vectors than min required */
3797 if (ret) { 3801 if (ret < 0) {
3798 DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n"); 3802 DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
3799 kfree(nic->entries); 3803 kfree(nic->entries);
3800 swstats->mem_freed += nic->num_entries * 3804 swstats->mem_freed += nic->num_entries *
@@ -4045,7 +4049,7 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4045 if (!is_s2io_card_up(sp)) { 4049 if (!is_s2io_card_up(sp)) {
4046 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n", 4050 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
4047 dev->name); 4051 dev->name);
4048 dev_kfree_skb(skb); 4052 dev_kfree_skb_any(skb);
4049 return NETDEV_TX_OK; 4053 return NETDEV_TX_OK;
4050 } 4054 }
4051 4055
@@ -4118,7 +4122,7 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4118 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) { 4122 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4119 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n"); 4123 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4120 s2io_stop_tx_queue(sp, fifo->fifo_no); 4124 s2io_stop_tx_queue(sp, fifo->fifo_no);
4121 dev_kfree_skb(skb); 4125 dev_kfree_skb_any(skb);
4122 spin_unlock_irqrestore(&fifo->tx_lock, flags); 4126 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4123 return NETDEV_TX_OK; 4127 return NETDEV_TX_OK;
4124 } 4128 }
@@ -4240,7 +4244,7 @@ pci_map_failed:
4240 swstats->pci_map_fail_cnt++; 4244 swstats->pci_map_fail_cnt++;
4241 s2io_stop_tx_queue(sp, fifo->fifo_no); 4245 s2io_stop_tx_queue(sp, fifo->fifo_no);
4242 swstats->mem_freed += skb->truesize; 4246 swstats->mem_freed += skb->truesize;
4243 dev_kfree_skb(skb); 4247 dev_kfree_skb_any(skb);
4244 spin_unlock_irqrestore(&fifo->tx_lock, flags); 4248 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4245 return NETDEV_TX_OK; 4249 return NETDEV_TX_OK;
4246} 4250}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index e46e8698e630..d107bcbb8543 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -368,6 +368,9 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
368 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", 368 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
369 ring->ndev->name, __func__, __LINE__); 369 ring->ndev->name, __func__, __LINE__);
370 370
371 if (ring->budget <= 0)
372 goto out;
373
371 do { 374 do {
372 prefetch((char *)dtr + L1_CACHE_BYTES); 375 prefetch((char *)dtr + L1_CACHE_BYTES);
373 rx_priv = vxge_hw_ring_rxd_private_get(dtr); 376 rx_priv = vxge_hw_ring_rxd_private_get(dtr);
@@ -525,6 +528,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
525 if (first_dtr) 528 if (first_dtr)
526 vxge_hw_ring_rxd_post_post_wmb(ringh, first_dtr); 529 vxge_hw_ring_rxd_post_post_wmb(ringh, first_dtr);
527 530
531out:
528 vxge_debug_entryexit(VXGE_TRACE, 532 vxge_debug_entryexit(VXGE_TRACE,
529 "%s:%d Exiting...", 533 "%s:%d Exiting...",
530 __func__, __LINE__); 534 __func__, __LINE__);
@@ -820,7 +824,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
820 if (unlikely(skb->len <= 0)) { 824 if (unlikely(skb->len <= 0)) {
821 vxge_debug_tx(VXGE_ERR, 825 vxge_debug_tx(VXGE_ERR,
822 "%s: Buffer has no data..", dev->name); 826 "%s: Buffer has no data..", dev->name);
823 dev_kfree_skb(skb); 827 dev_kfree_skb_any(skb);
824 return NETDEV_TX_OK; 828 return NETDEV_TX_OK;
825 } 829 }
826 830
@@ -829,7 +833,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
829 if (unlikely(!is_vxge_card_up(vdev))) { 833 if (unlikely(!is_vxge_card_up(vdev))) {
830 vxge_debug_tx(VXGE_ERR, 834 vxge_debug_tx(VXGE_ERR,
831 "%s: vdev not initialized", dev->name); 835 "%s: vdev not initialized", dev->name);
832 dev_kfree_skb(skb); 836 dev_kfree_skb_any(skb);
833 return NETDEV_TX_OK; 837 return NETDEV_TX_OK;
834 } 838 }
835 839
@@ -839,7 +843,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
839 vxge_debug_tx(VXGE_ERR, 843 vxge_debug_tx(VXGE_ERR,
840 "%s: Failed to store the mac address", 844 "%s: Failed to store the mac address",
841 dev->name); 845 dev->name);
842 dev_kfree_skb(skb); 846 dev_kfree_skb_any(skb);
843 return NETDEV_TX_OK; 847 return NETDEV_TX_OK;
844 } 848 }
845 } 849 }
@@ -986,7 +990,7 @@ _exit1:
986 vxge_hw_fifo_txdl_free(fifo_hw, dtr); 990 vxge_hw_fifo_txdl_free(fifo_hw, dtr);
987_exit0: 991_exit0:
988 netif_tx_stop_queue(fifo->txq); 992 netif_tx_stop_queue(fifo->txq);
989 dev_kfree_skb(skb); 993 dev_kfree_skb_any(skb);
990 994
991 return NETDEV_TX_OK; 995 return NETDEV_TX_OK;
992} 996}
@@ -2349,12 +2353,18 @@ start:
2349 vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID; 2353 vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID;
2350 vdev->vxge_entries[j].in_use = 0; 2354 vdev->vxge_entries[j].in_use = 0;
2351 2355
2352 ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt); 2356 ret = pci_enable_msix_range(vdev->pdev,
2353 if (ret > 0) { 2357 vdev->entries, 3, vdev->intr_cnt);
2358 if (ret < 0) {
2359 ret = -ENODEV;
2360 goto enable_msix_failed;
2361 } else if (ret < vdev->intr_cnt) {
2362 pci_disable_msix(vdev->pdev);
2363
2354 vxge_debug_init(VXGE_ERR, 2364 vxge_debug_init(VXGE_ERR,
2355 "%s: MSI-X enable failed for %d vectors, ret: %d", 2365 "%s: MSI-X enable failed for %d vectors, ret: %d",
2356 VXGE_DRIVER_NAME, vdev->intr_cnt, ret); 2366 VXGE_DRIVER_NAME, vdev->intr_cnt, ret);
2357 if ((max_config_vpath != VXGE_USE_DEFAULT) || (ret < 3)) { 2367 if (max_config_vpath != VXGE_USE_DEFAULT) {
2358 ret = -ENODEV; 2368 ret = -ENODEV;
2359 goto enable_msix_failed; 2369 goto enable_msix_failed;
2360 } 2370 }
@@ -2368,9 +2378,6 @@ start:
2368 vxge_close_vpaths(vdev, temp); 2378 vxge_close_vpaths(vdev, temp);
2369 vdev->no_of_vpath = temp; 2379 vdev->no_of_vpath = temp;
2370 goto start; 2380 goto start;
2371 } else if (ret < 0) {
2372 ret = -ENODEV;
2373 goto enable_msix_failed;
2374 } 2381 }
2375 return 0; 2382 return 0;
2376 2383
@@ -3131,12 +3138,12 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
3131 u64 packets, bytes, multicast; 3138 u64 packets, bytes, multicast;
3132 3139
3133 do { 3140 do {
3134 start = u64_stats_fetch_begin_bh(&rxstats->syncp); 3141 start = u64_stats_fetch_begin_irq(&rxstats->syncp);
3135 3142
3136 packets = rxstats->rx_frms; 3143 packets = rxstats->rx_frms;
3137 multicast = rxstats->rx_mcast; 3144 multicast = rxstats->rx_mcast;
3138 bytes = rxstats->rx_bytes; 3145 bytes = rxstats->rx_bytes;
3139 } while (u64_stats_fetch_retry_bh(&rxstats->syncp, start)); 3146 } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start));
3140 3147
3141 net_stats->rx_packets += packets; 3148 net_stats->rx_packets += packets;
3142 net_stats->rx_bytes += bytes; 3149 net_stats->rx_bytes += bytes;
@@ -3146,11 +3153,11 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
3146 net_stats->rx_dropped += rxstats->rx_dropped; 3153 net_stats->rx_dropped += rxstats->rx_dropped;
3147 3154
3148 do { 3155 do {
3149 start = u64_stats_fetch_begin_bh(&txstats->syncp); 3156 start = u64_stats_fetch_begin_irq(&txstats->syncp);
3150 3157
3151 packets = txstats->tx_frms; 3158 packets = txstats->tx_frms;
3152 bytes = txstats->tx_bytes; 3159 bytes = txstats->tx_bytes;
3153 } while (u64_stats_fetch_retry_bh(&txstats->syncp, start)); 3160 } while (u64_stats_fetch_retry_irq(&txstats->syncp, start));
3154 3161
3155 net_stats->tx_packets += packets; 3162 net_stats->tx_packets += packets;
3156 net_stats->tx_bytes += bytes; 3163 net_stats->tx_bytes += bytes;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 70cf97fe67f2..fddb464aeab3 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -1753,19 +1753,19 @@ nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
1753 1753
1754 /* software stats */ 1754 /* software stats */
1755 do { 1755 do {
1756 syncp_start = u64_stats_fetch_begin_bh(&np->swstats_rx_syncp); 1756 syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp);
1757 storage->rx_packets = np->stat_rx_packets; 1757 storage->rx_packets = np->stat_rx_packets;
1758 storage->rx_bytes = np->stat_rx_bytes; 1758 storage->rx_bytes = np->stat_rx_bytes;
1759 storage->rx_dropped = np->stat_rx_dropped; 1759 storage->rx_dropped = np->stat_rx_dropped;
1760 storage->rx_missed_errors = np->stat_rx_missed_errors; 1760 storage->rx_missed_errors = np->stat_rx_missed_errors;
1761 } while (u64_stats_fetch_retry_bh(&np->swstats_rx_syncp, syncp_start)); 1761 } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start));
1762 1762
1763 do { 1763 do {
1764 syncp_start = u64_stats_fetch_begin_bh(&np->swstats_tx_syncp); 1764 syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp);
1765 storage->tx_packets = np->stat_tx_packets; 1765 storage->tx_packets = np->stat_tx_packets;
1766 storage->tx_bytes = np->stat_tx_bytes; 1766 storage->tx_bytes = np->stat_tx_bytes;
1767 storage->tx_dropped = np->stat_tx_dropped; 1767 storage->tx_dropped = np->stat_tx_dropped;
1768 } while (u64_stats_fetch_retry_bh(&np->swstats_tx_syncp, syncp_start)); 1768 } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start));
1769 1769
1770 /* If the nic supports hw counters then retrieve latest values */ 1770 /* If the nic supports hw counters then retrieve latest values */
1771 if (np->driver_data & DEV_HAS_STATISTICS_V123) { 1771 if (np->driver_data & DEV_HAS_STATISTICS_V123) {
@@ -2231,7 +2231,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2231 if (pci_dma_mapping_error(np->pci_dev, 2231 if (pci_dma_mapping_error(np->pci_dev,
2232 np->put_tx_ctx->dma)) { 2232 np->put_tx_ctx->dma)) {
2233 /* on DMA mapping error - drop the packet */ 2233 /* on DMA mapping error - drop the packet */
2234 kfree_skb(skb); 2234 dev_kfree_skb_any(skb);
2235 u64_stats_update_begin(&np->swstats_tx_syncp); 2235 u64_stats_update_begin(&np->swstats_tx_syncp);
2236 np->stat_tx_dropped++; 2236 np->stat_tx_dropped++;
2237 u64_stats_update_end(&np->swstats_tx_syncp); 2237 u64_stats_update_end(&np->swstats_tx_syncp);
@@ -2277,7 +2277,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2277 if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx)) 2277 if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
2278 tmp_tx_ctx = np->first_tx_ctx; 2278 tmp_tx_ctx = np->first_tx_ctx;
2279 } while (tmp_tx_ctx != np->put_tx_ctx); 2279 } while (tmp_tx_ctx != np->put_tx_ctx);
2280 kfree_skb(skb); 2280 dev_kfree_skb_any(skb);
2281 np->put_tx_ctx = start_tx_ctx; 2281 np->put_tx_ctx = start_tx_ctx;
2282 u64_stats_update_begin(&np->swstats_tx_syncp); 2282 u64_stats_update_begin(&np->swstats_tx_syncp);
2283 np->stat_tx_dropped++; 2283 np->stat_tx_dropped++;
@@ -2380,7 +2380,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2380 if (pci_dma_mapping_error(np->pci_dev, 2380 if (pci_dma_mapping_error(np->pci_dev,
2381 np->put_tx_ctx->dma)) { 2381 np->put_tx_ctx->dma)) {
2382 /* on DMA mapping error - drop the packet */ 2382 /* on DMA mapping error - drop the packet */
2383 kfree_skb(skb); 2383 dev_kfree_skb_any(skb);
2384 u64_stats_update_begin(&np->swstats_tx_syncp); 2384 u64_stats_update_begin(&np->swstats_tx_syncp);
2385 np->stat_tx_dropped++; 2385 np->stat_tx_dropped++;
2386 u64_stats_update_end(&np->swstats_tx_syncp); 2386 u64_stats_update_end(&np->swstats_tx_syncp);
@@ -2427,7 +2427,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2427 if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx)) 2427 if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
2428 tmp_tx_ctx = np->first_tx_ctx; 2428 tmp_tx_ctx = np->first_tx_ctx;
2429 } while (tmp_tx_ctx != np->put_tx_ctx); 2429 } while (tmp_tx_ctx != np->put_tx_ctx);
2430 kfree_skb(skb); 2430 dev_kfree_skb_any(skb);
2431 np->put_tx_ctx = start_tx_ctx; 2431 np->put_tx_ctx = start_tx_ctx;
2432 u64_stats_update_begin(&np->swstats_tx_syncp); 2432 u64_stats_update_begin(&np->swstats_tx_syncp);
2433 np->stat_tx_dropped++; 2433 np->stat_tx_dropped++;
@@ -3930,7 +3930,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
3930{ 3930{
3931 struct fe_priv *np = get_nvpriv(dev); 3931 struct fe_priv *np = get_nvpriv(dev);
3932 u8 __iomem *base = get_hwbase(dev); 3932 u8 __iomem *base = get_hwbase(dev);
3933 int ret = 1; 3933 int ret;
3934 int i; 3934 int i;
3935 irqreturn_t (*handler)(int foo, void *data); 3935 irqreturn_t (*handler)(int foo, void *data);
3936 3936
@@ -3946,14 +3946,18 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
3946 if (np->msi_flags & NV_MSI_X_CAPABLE) { 3946 if (np->msi_flags & NV_MSI_X_CAPABLE) {
3947 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) 3947 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
3948 np->msi_x_entry[i].entry = i; 3948 np->msi_x_entry[i].entry = i;
3949 ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK)); 3949 ret = pci_enable_msix_range(np->pci_dev,
3950 if (ret == 0) { 3950 np->msi_x_entry,
3951 np->msi_flags & NV_MSI_X_VECTORS_MASK,
3952 np->msi_flags & NV_MSI_X_VECTORS_MASK);
3953 if (ret > 0) {
3951 np->msi_flags |= NV_MSI_X_ENABLED; 3954 np->msi_flags |= NV_MSI_X_ENABLED;
3952 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) { 3955 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
3953 /* Request irq for rx handling */ 3956 /* Request irq for rx handling */
3954 sprintf(np->name_rx, "%s-rx", dev->name); 3957 sprintf(np->name_rx, "%s-rx", dev->name);
3955 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, 3958 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
3956 nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) { 3959 nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev);
3960 if (ret) {
3957 netdev_info(dev, 3961 netdev_info(dev,
3958 "request_irq failed for rx %d\n", 3962 "request_irq failed for rx %d\n",
3959 ret); 3963 ret);
@@ -3963,8 +3967,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
3963 } 3967 }
3964 /* Request irq for tx handling */ 3968 /* Request irq for tx handling */
3965 sprintf(np->name_tx, "%s-tx", dev->name); 3969 sprintf(np->name_tx, "%s-tx", dev->name);
3966 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, 3970 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
3967 nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) { 3971 nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev);
3972 if (ret) {
3968 netdev_info(dev, 3973 netdev_info(dev,
3969 "request_irq failed for tx %d\n", 3974 "request_irq failed for tx %d\n",
3970 ret); 3975 ret);
@@ -3974,8 +3979,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
3974 } 3979 }
3975 /* Request irq for link and timer handling */ 3980 /* Request irq for link and timer handling */
3976 sprintf(np->name_other, "%s-other", dev->name); 3981 sprintf(np->name_other, "%s-other", dev->name);
3977 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, 3982 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
3978 nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) { 3983 nv_nic_irq_other, IRQF_SHARED, np->name_other, dev);
3984 if (ret) {
3979 netdev_info(dev, 3985 netdev_info(dev,
3980 "request_irq failed for link %d\n", 3986 "request_irq failed for link %d\n",
3981 ret); 3987 ret);
@@ -3991,7 +3997,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
3991 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); 3997 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
3992 } else { 3998 } else {
3993 /* Request irq for all interrupts */ 3999 /* Request irq for all interrupts */
3994 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) { 4000 ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector,
4001 handler, IRQF_SHARED, dev->name, dev);
4002 if (ret) {
3995 netdev_info(dev, 4003 netdev_info(dev,
3996 "request_irq failed %d\n", 4004 "request_irq failed %d\n",
3997 ret); 4005 ret);
@@ -4005,13 +4013,15 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
4005 writel(0, base + NvRegMSIXMap1); 4013 writel(0, base + NvRegMSIXMap1);
4006 } 4014 }
4007 netdev_info(dev, "MSI-X enabled\n"); 4015 netdev_info(dev, "MSI-X enabled\n");
4016 return 0;
4008 } 4017 }
4009 } 4018 }
4010 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { 4019 if (np->msi_flags & NV_MSI_CAPABLE) {
4011 ret = pci_enable_msi(np->pci_dev); 4020 ret = pci_enable_msi(np->pci_dev);
4012 if (ret == 0) { 4021 if (ret == 0) {
4013 np->msi_flags |= NV_MSI_ENABLED; 4022 np->msi_flags |= NV_MSI_ENABLED;
4014 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) { 4023 ret = request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev);
4024 if (ret) {
4015 netdev_info(dev, "request_irq failed %d\n", 4025 netdev_info(dev, "request_irq failed %d\n",
4016 ret); 4026 ret);
4017 pci_disable_msi(np->pci_dev); 4027 pci_disable_msi(np->pci_dev);
@@ -4025,13 +4035,12 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
4025 /* enable msi vector 0 */ 4035 /* enable msi vector 0 */
4026 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); 4036 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
4027 netdev_info(dev, "MSI enabled\n"); 4037 netdev_info(dev, "MSI enabled\n");
4038 return 0;
4028 } 4039 }
4029 } 4040 }
4030 if (ret != 0) {
4031 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
4032 goto out_err;
4033 4041
4034 } 4042 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
4043 goto out_err;
4035 4044
4036 return 0; 4045 return 0;
4037out_free_tx: 4046out_free_tx:
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 70849dea32b1..f09c35d669b3 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -643,8 +643,9 @@ static int netxen_setup_msi_interrupts(struct netxen_adapter *adapter,
643 643
644 if (adapter->msix_supported) { 644 if (adapter->msix_supported) {
645 netxen_init_msix_entries(adapter, num_msix); 645 netxen_init_msix_entries(adapter, num_msix);
646 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix); 646 err = pci_enable_msix_range(pdev, adapter->msix_entries,
647 if (err == 0) { 647 num_msix, num_msix);
648 if (err > 0) {
648 adapter->flags |= NETXEN_NIC_MSIX_ENABLED; 649 adapter->flags |= NETXEN_NIC_MSIX_ENABLED;
649 netxen_set_msix_bit(pdev, 1); 650 netxen_set_msix_bit(pdev, 1);
650 651
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index f19f81cde134..b9039b569beb 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -38,8 +38,8 @@
38 38
39#define _QLCNIC_LINUX_MAJOR 5 39#define _QLCNIC_LINUX_MAJOR 5
40#define _QLCNIC_LINUX_MINOR 3 40#define _QLCNIC_LINUX_MINOR 3
41#define _QLCNIC_LINUX_SUBVERSION 55 41#define _QLCNIC_LINUX_SUBVERSION 57
42#define QLCNIC_LINUX_VERSIONID "5.3.55" 42#define QLCNIC_LINUX_VERSIONID "5.3.57"
43#define QLCNIC_DRV_IDC_VER 0x01 43#define QLCNIC_DRV_IDC_VER 0x01
44#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 44#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
45 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) 45 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -169,11 +169,20 @@ struct cmd_desc_type0 {
169 169
170 __le64 addr_buffer2; 170 __le64 addr_buffer2;
171 171
172 __le16 reference_handle; 172 __le16 encap_descr; /* 15:10 offset of outer L3 header,
173 * 9:6 number of 32bit words in outer L3 header,
174 * 5 offload outer L4 checksum,
175 * 4 offload outer L3 checksum,
176 * 3 Inner L4 type, TCP=0, UDP=1,
177 * 2 Inner L3 type, IPv4=0, IPv6=1,
178 * 1 Outer L3 type,IPv4=0, IPv6=1,
179 * 0 type of encapsulation, GRE=0, VXLAN=1
180 */
173 __le16 mss; 181 __le16 mss;
174 u8 port_ctxid; /* 7:4 ctxid 3:0 port */ 182 u8 port_ctxid; /* 7:4 ctxid 3:0 port */
175 u8 total_hdr_length; /* LSO only : MAC+IP+TCP Hdr size */ 183 u8 hdr_length; /* LSO only : MAC+IP+TCP Hdr size */
176 __le16 conn_id; /* IPSec offoad only */ 184 u8 outer_hdr_length; /* Encapsulation only */
185 u8 rsvd1;
177 186
178 __le64 addr_buffer3; 187 __le64 addr_buffer3;
179 __le64 addr_buffer1; 188 __le64 addr_buffer1;
@@ -183,7 +192,9 @@ struct cmd_desc_type0 {
183 __le64 addr_buffer4; 192 __le64 addr_buffer4;
184 193
185 u8 eth_addr[ETH_ALEN]; 194 u8 eth_addr[ETH_ALEN];
186 __le16 vlan_TCI; 195 __le16 vlan_TCI; /* In case of encapsulation,
196 * this is for outer VLAN
197 */
187 198
188} __attribute__ ((aligned(64))); 199} __attribute__ ((aligned(64)));
189 200
@@ -394,7 +405,7 @@ struct qlcnic_nic_intr_coalesce {
394 u32 timer_out; 405 u32 timer_out;
395}; 406};
396 407
397struct qlcnic_dump_template_hdr { 408struct qlcnic_83xx_dump_template_hdr {
398 u32 type; 409 u32 type;
399 u32 offset; 410 u32 offset;
400 u32 size; 411 u32 size;
@@ -411,15 +422,42 @@ struct qlcnic_dump_template_hdr {
411 u32 rsvd[0]; 422 u32 rsvd[0];
412}; 423};
413 424
425struct qlcnic_82xx_dump_template_hdr {
426 u32 type;
427 u32 offset;
428 u32 size;
429 u32 cap_mask;
430 u32 num_entries;
431 u32 version;
432 u32 timestamp;
433 u32 checksum;
434 u32 drv_cap_mask;
435 u32 sys_info[3];
436 u32 saved_state[16];
437 u32 cap_sizes[8];
438 u32 rsvd[7];
439 u32 capabilities;
440 u32 rsvd1[0];
441};
442
414struct qlcnic_fw_dump { 443struct qlcnic_fw_dump {
415 u8 clr; /* flag to indicate if dump is cleared */ 444 u8 clr; /* flag to indicate if dump is cleared */
416 bool enable; /* enable/disable dump */ 445 bool enable; /* enable/disable dump */
417 u32 size; /* total size of the dump */ 446 u32 size; /* total size of the dump */
447 u32 cap_mask; /* Current capture mask */
418 void *data; /* dump data area */ 448 void *data; /* dump data area */
419 struct qlcnic_dump_template_hdr *tmpl_hdr; 449 void *tmpl_hdr;
420 dma_addr_t phys_addr; 450 dma_addr_t phys_addr;
421 void *dma_buffer; 451 void *dma_buffer;
422 bool use_pex_dma; 452 bool use_pex_dma;
453 /* Read only elements which are common between 82xx and 83xx
454 * template header. Update these values immediately after we read
455 * template header from Firmware
456 */
457 u32 tmpl_hdr_size;
458 u32 version;
459 u32 num_entries;
460 u32 offset;
423}; 461};
424 462
425/* 463/*
@@ -497,6 +535,7 @@ struct qlcnic_hardware_context {
497 u8 extend_lb_time; 535 u8 extend_lb_time;
498 u8 phys_port_id[ETH_ALEN]; 536 u8 phys_port_id[ETH_ALEN];
499 u8 lb_mode; 537 u8 lb_mode;
538 u16 vxlan_port;
500}; 539};
501 540
502struct qlcnic_adapter_stats { 541struct qlcnic_adapter_stats {
@@ -511,6 +550,9 @@ struct qlcnic_adapter_stats {
511 u64 txbytes; 550 u64 txbytes;
512 u64 lrobytes; 551 u64 lrobytes;
513 u64 lso_frames; 552 u64 lso_frames;
553 u64 encap_lso_frames;
554 u64 encap_tx_csummed;
555 u64 encap_rx_csummed;
514 u64 xmit_on; 556 u64 xmit_on;
515 u64 xmit_off; 557 u64 xmit_off;
516 u64 skb_alloc_failure; 558 u64 skb_alloc_failure;
@@ -872,6 +914,10 @@ struct qlcnic_mac_vlan_list {
872#define QLCNIC_FW_CAPABILITY_2_BEACON BIT_7 914#define QLCNIC_FW_CAPABILITY_2_BEACON BIT_7
873#define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG BIT_9 915#define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG BIT_9
874 916
917#define QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD BIT_0
918#define QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD BIT_1
919#define QLCNIC_83XX_FW_CAPAB_ENCAP_CKO_OFFLOAD BIT_4
920
875/* module types */ 921/* module types */
876#define LINKEVENT_MODULE_NOT_PRESENT 1 922#define LINKEVENT_MODULE_NOT_PRESENT 1
877#define LINKEVENT_MODULE_OPTICAL_UNKNOWN 2 923#define LINKEVENT_MODULE_OPTICAL_UNKNOWN 2
@@ -965,6 +1011,8 @@ struct qlcnic_ipaddr {
965#define QLCNIC_APP_CHANGED_FLAGS 0x20000 1011#define QLCNIC_APP_CHANGED_FLAGS 0x20000
966#define QLCNIC_HAS_PHYS_PORT_ID 0x40000 1012#define QLCNIC_HAS_PHYS_PORT_ID 0x40000
967#define QLCNIC_TSS_RSS 0x80000 1013#define QLCNIC_TSS_RSS 0x80000
1014#define QLCNIC_ADD_VXLAN_PORT 0x100000
1015#define QLCNIC_DEL_VXLAN_PORT 0x200000
968 1016
969#define QLCNIC_IS_MSI_FAMILY(adapter) \ 1017#define QLCNIC_IS_MSI_FAMILY(adapter) \
970 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED)) 1018 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
@@ -1769,10 +1817,28 @@ struct qlcnic_hardware_ops {
1769 struct qlcnic_host_tx_ring *); 1817 struct qlcnic_host_tx_ring *);
1770 void (*disable_tx_intr) (struct qlcnic_adapter *, 1818 void (*disable_tx_intr) (struct qlcnic_adapter *,
1771 struct qlcnic_host_tx_ring *); 1819 struct qlcnic_host_tx_ring *);
1820 u32 (*get_saved_state)(void *, u32);
1821 void (*set_saved_state)(void *, u32, u32);
1822 void (*cache_tmpl_hdr_values)(struct qlcnic_fw_dump *);
1823 u32 (*get_cap_size)(void *, int);
1824 void (*set_sys_info)(void *, int, u32);
1825 void (*store_cap_mask)(void *, u32);
1772}; 1826};
1773 1827
1774extern struct qlcnic_nic_template qlcnic_vf_ops; 1828extern struct qlcnic_nic_template qlcnic_vf_ops;
1775 1829
1830static inline bool qlcnic_encap_tx_offload(struct qlcnic_adapter *adapter)
1831{
1832 return adapter->ahw->extra_capability[0] &
1833 QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD;
1834}
1835
1836static inline bool qlcnic_encap_rx_offload(struct qlcnic_adapter *adapter)
1837{
1838 return adapter->ahw->extra_capability[0] &
1839 QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD;
1840}
1841
1776static inline int qlcnic_start_firmware(struct qlcnic_adapter *adapter) 1842static inline int qlcnic_start_firmware(struct qlcnic_adapter *adapter)
1777{ 1843{
1778 return adapter->nic_ops->start_firmware(adapter); 1844 return adapter->nic_ops->start_firmware(adapter);
@@ -2007,6 +2073,42 @@ static inline void qlcnic_read_phys_port_id(struct qlcnic_adapter *adapter)
2007 adapter->ahw->hw_ops->read_phys_port_id(adapter); 2073 adapter->ahw->hw_ops->read_phys_port_id(adapter);
2008} 2074}
2009 2075
2076static inline u32 qlcnic_get_saved_state(struct qlcnic_adapter *adapter,
2077 void *t_hdr, u32 index)
2078{
2079 return adapter->ahw->hw_ops->get_saved_state(t_hdr, index);
2080}
2081
2082static inline void qlcnic_set_saved_state(struct qlcnic_adapter *adapter,
2083 void *t_hdr, u32 index, u32 value)
2084{
2085 adapter->ahw->hw_ops->set_saved_state(t_hdr, index, value);
2086}
2087
2088static inline void qlcnic_cache_tmpl_hdr_values(struct qlcnic_adapter *adapter,
2089 struct qlcnic_fw_dump *fw_dump)
2090{
2091 adapter->ahw->hw_ops->cache_tmpl_hdr_values(fw_dump);
2092}
2093
2094static inline u32 qlcnic_get_cap_size(struct qlcnic_adapter *adapter,
2095 void *tmpl_hdr, int index)
2096{
2097 return adapter->ahw->hw_ops->get_cap_size(tmpl_hdr, index);
2098}
2099
2100static inline void qlcnic_set_sys_info(struct qlcnic_adapter *adapter,
2101 void *tmpl_hdr, int idx, u32 value)
2102{
2103 adapter->ahw->hw_ops->set_sys_info(tmpl_hdr, idx, value);
2104}
2105
2106static inline void qlcnic_store_cap_mask(struct qlcnic_adapter *adapter,
2107 void *tmpl_hdr, u32 mask)
2108{
2109 adapter->ahw->hw_ops->store_cap_mask(tmpl_hdr, mask);
2110}
2111
2010static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter, 2112static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter,
2011 u32 key) 2113 u32 key)
2012{ 2114{
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 27c4f131863b..b7cffb46a75d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -77,7 +77,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
77 {QLCNIC_CMD_GET_PORT_CONFIG, 2, 2}, 77 {QLCNIC_CMD_GET_PORT_CONFIG, 2, 2},
78 {QLCNIC_CMD_GET_LINK_STATUS, 2, 4}, 78 {QLCNIC_CMD_GET_LINK_STATUS, 2, 4},
79 {QLCNIC_CMD_IDC_ACK, 5, 1}, 79 {QLCNIC_CMD_IDC_ACK, 5, 1},
80 {QLCNIC_CMD_INIT_NIC_FUNC, 2, 1}, 80 {QLCNIC_CMD_INIT_NIC_FUNC, 3, 1},
81 {QLCNIC_CMD_STOP_NIC_FUNC, 2, 1}, 81 {QLCNIC_CMD_STOP_NIC_FUNC, 2, 1},
82 {QLCNIC_CMD_SET_LED_CONFIG, 5, 1}, 82 {QLCNIC_CMD_SET_LED_CONFIG, 5, 1},
83 {QLCNIC_CMD_GET_LED_CONFIG, 1, 5}, 83 {QLCNIC_CMD_GET_LED_CONFIG, 1, 5},
@@ -87,6 +87,7 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
87 {QLCNIC_CMD_BC_EVENT_SETUP, 2, 1}, 87 {QLCNIC_CMD_BC_EVENT_SETUP, 2, 1},
88 {QLCNIC_CMD_DCB_QUERY_CAP, 1, 2}, 88 {QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
89 {QLCNIC_CMD_DCB_QUERY_PARAM, 1, 50}, 89 {QLCNIC_CMD_DCB_QUERY_PARAM, 1, 50},
90 {QLCNIC_CMD_SET_INGRESS_ENCAP, 2, 1},
90}; 91};
91 92
92const u32 qlcnic_83xx_ext_reg_tbl[] = { 93const u32 qlcnic_83xx_ext_reg_tbl[] = {
@@ -203,7 +204,12 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
203 .disable_sds_intr = qlcnic_83xx_disable_sds_intr, 204 .disable_sds_intr = qlcnic_83xx_disable_sds_intr,
204 .enable_tx_intr = qlcnic_83xx_enable_tx_intr, 205 .enable_tx_intr = qlcnic_83xx_enable_tx_intr,
205 .disable_tx_intr = qlcnic_83xx_disable_tx_intr, 206 .disable_tx_intr = qlcnic_83xx_disable_tx_intr,
206 207 .get_saved_state = qlcnic_83xx_get_saved_state,
208 .set_saved_state = qlcnic_83xx_set_saved_state,
209 .cache_tmpl_hdr_values = qlcnic_83xx_cache_tmpl_hdr_values,
210 .get_cap_size = qlcnic_83xx_get_cap_size,
211 .set_sys_info = qlcnic_83xx_set_sys_info,
212 .store_cap_mask = qlcnic_83xx_store_cap_mask,
207}; 213};
208 214
209static struct qlcnic_nic_template qlcnic_83xx_ops = { 215static struct qlcnic_nic_template qlcnic_83xx_ops = {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index f92485ca21d1..88d809c35633 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -308,6 +308,8 @@ struct qlc_83xx_reset {
308#define QLC_83XX_IDC_FLASH_PARAM_ADDR 0x3e8020 308#define QLC_83XX_IDC_FLASH_PARAM_ADDR 0x3e8020
309 309
310struct qlcnic_adapter; 310struct qlcnic_adapter;
311struct qlcnic_fw_dump;
312
311struct qlc_83xx_idc { 313struct qlc_83xx_idc {
312 int (*state_entry) (struct qlcnic_adapter *); 314 int (*state_entry) (struct qlcnic_adapter *);
313 u64 sec_counter; 315 u64 sec_counter;
@@ -526,8 +528,9 @@ enum qlc_83xx_ext_regs {
526}; 528};
527 529
528/* Initialize/Stop NIC command bit definitions */ 530/* Initialize/Stop NIC command bit definitions */
529#define QLC_REGISTER_DCB_AEN BIT_1
530#define QLC_REGISTER_LB_IDC BIT_0 531#define QLC_REGISTER_LB_IDC BIT_0
532#define QLC_REGISTER_DCB_AEN BIT_1
533#define QLC_83XX_MULTI_TENANCY_INFO BIT_29
531#define QLC_INIT_FW_RESOURCES BIT_31 534#define QLC_INIT_FW_RESOURCES BIT_31
532 535
533/* 83xx funcitons */ 536/* 83xx funcitons */
@@ -650,4 +653,10 @@ int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *);
650void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *); 653void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *);
651int qlcnic_83xx_aer_reset(struct qlcnic_adapter *); 654int qlcnic_83xx_aer_reset(struct qlcnic_adapter *);
652void qlcnic_83xx_aer_start_poll_work(struct qlcnic_adapter *); 655void qlcnic_83xx_aer_start_poll_work(struct qlcnic_adapter *);
656u32 qlcnic_83xx_get_saved_state(void *, u32);
657void qlcnic_83xx_set_saved_state(void *, u32, u32);
658void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *);
659u32 qlcnic_83xx_get_cap_size(void *, int);
660void qlcnic_83xx_set_sys_info(void *, int, u32);
661void qlcnic_83xx_store_cap_mask(void *, u32);
653#endif 662#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 90a2dda351ec..ec399b7f5bd7 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1020,10 +1020,97 @@ static int qlcnic_83xx_idc_check_state_validity(struct qlcnic_adapter *adapter,
1020 return 0; 1020 return 0;
1021} 1021}
1022 1022
1023#define QLC_83XX_ENCAP_TYPE_VXLAN BIT_1
1024#define QLC_83XX_MATCH_ENCAP_ID BIT_2
1025#define QLC_83XX_SET_VXLAN_UDP_DPORT BIT_3
1026#define QLC_83XX_VXLAN_UDP_DPORT(PORT) ((PORT & 0xffff) << 16)
1027
1028#define QLCNIC_ENABLE_INGRESS_ENCAP_PARSING 1
1029#define QLCNIC_DISABLE_INGRESS_ENCAP_PARSING 0
1030
1031static int qlcnic_set_vxlan_port(struct qlcnic_adapter *adapter)
1032{
1033 u16 port = adapter->ahw->vxlan_port;
1034 struct qlcnic_cmd_args cmd;
1035 int ret = 0;
1036
1037 memset(&cmd, 0, sizeof(cmd));
1038
1039 ret = qlcnic_alloc_mbx_args(&cmd, adapter,
1040 QLCNIC_CMD_INIT_NIC_FUNC);
1041 if (ret)
1042 return ret;
1043
1044 cmd.req.arg[1] = QLC_83XX_MULTI_TENANCY_INFO;
1045 cmd.req.arg[2] = QLC_83XX_ENCAP_TYPE_VXLAN |
1046 QLC_83XX_SET_VXLAN_UDP_DPORT |
1047 QLC_83XX_VXLAN_UDP_DPORT(port);
1048
1049 ret = qlcnic_issue_cmd(adapter, &cmd);
1050 if (ret)
1051 netdev_err(adapter->netdev,
1052 "Failed to set VXLAN port %d in adapter\n",
1053 port);
1054
1055 qlcnic_free_mbx_args(&cmd);
1056
1057 return ret;
1058}
1059
1060static int qlcnic_set_vxlan_parsing(struct qlcnic_adapter *adapter,
1061 bool state)
1062{
1063 u16 vxlan_port = adapter->ahw->vxlan_port;
1064 struct qlcnic_cmd_args cmd;
1065 int ret = 0;
1066
1067 memset(&cmd, 0, sizeof(cmd));
1068
1069 ret = qlcnic_alloc_mbx_args(&cmd, adapter,
1070 QLCNIC_CMD_SET_INGRESS_ENCAP);
1071 if (ret)
1072 return ret;
1073
1074 cmd.req.arg[1] = state ? QLCNIC_ENABLE_INGRESS_ENCAP_PARSING :
1075 QLCNIC_DISABLE_INGRESS_ENCAP_PARSING;
1076
1077 ret = qlcnic_issue_cmd(adapter, &cmd);
1078 if (ret)
1079 netdev_err(adapter->netdev,
1080 "Failed to %s VXLAN parsing for port %d\n",
1081 state ? "enable" : "disable", vxlan_port);
1082 else
1083 netdev_info(adapter->netdev,
1084 "%s VXLAN parsing for port %d\n",
1085 state ? "Enabled" : "Disabled", vxlan_port);
1086
1087 qlcnic_free_mbx_args(&cmd);
1088
1089 return ret;
1090}
1091
1023static void qlcnic_83xx_periodic_tasks(struct qlcnic_adapter *adapter) 1092static void qlcnic_83xx_periodic_tasks(struct qlcnic_adapter *adapter)
1024{ 1093{
1094 struct qlcnic_hardware_context *ahw = adapter->ahw;
1095
1025 if (adapter->fhash.fnum) 1096 if (adapter->fhash.fnum)
1026 qlcnic_prune_lb_filters(adapter); 1097 qlcnic_prune_lb_filters(adapter);
1098
1099 if (adapter->flags & QLCNIC_ADD_VXLAN_PORT) {
1100 if (qlcnic_set_vxlan_port(adapter))
1101 return;
1102
1103 if (qlcnic_set_vxlan_parsing(adapter, true))
1104 return;
1105
1106 adapter->flags &= ~QLCNIC_ADD_VXLAN_PORT;
1107 } else if (adapter->flags & QLCNIC_DEL_VXLAN_PORT) {
1108 if (qlcnic_set_vxlan_parsing(adapter, false))
1109 return;
1110
1111 ahw->vxlan_port = 0;
1112 adapter->flags &= ~QLCNIC_DEL_VXLAN_PORT;
1113 }
1027} 1114}
1028 1115
1029/** 1116/**
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index acee1a5d80c6..5bacf5210aed 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -47,6 +47,12 @@ static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
47 {"lro_pkts", QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)}, 47 {"lro_pkts", QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)},
48 {"lrobytes", QLC_SIZEOF(stats.lrobytes), QLC_OFF(stats.lrobytes)}, 48 {"lrobytes", QLC_SIZEOF(stats.lrobytes), QLC_OFF(stats.lrobytes)},
49 {"lso_frames", QLC_SIZEOF(stats.lso_frames), QLC_OFF(stats.lso_frames)}, 49 {"lso_frames", QLC_SIZEOF(stats.lso_frames), QLC_OFF(stats.lso_frames)},
50 {"encap_lso_frames", QLC_SIZEOF(stats.encap_lso_frames),
51 QLC_OFF(stats.encap_lso_frames)},
52 {"encap_tx_csummed", QLC_SIZEOF(stats.encap_tx_csummed),
53 QLC_OFF(stats.encap_tx_csummed)},
54 {"encap_rx_csummed", QLC_SIZEOF(stats.encap_rx_csummed),
55 QLC_OFF(stats.encap_rx_csummed)},
50 {"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure), 56 {"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure),
51 QLC_OFF(stats.skb_alloc_failure)}, 57 QLC_OFF(stats.skb_alloc_failure)},
52 {"mac_filter_limit_overrun", QLC_SIZEOF(stats.mac_filter_limit_overrun), 58 {"mac_filter_limit_overrun", QLC_SIZEOF(stats.mac_filter_limit_overrun),
@@ -1639,14 +1645,14 @@ qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
1639 } 1645 }
1640 1646
1641 if (fw_dump->clr) 1647 if (fw_dump->clr)
1642 dump->len = fw_dump->tmpl_hdr->size + fw_dump->size; 1648 dump->len = fw_dump->tmpl_hdr_size + fw_dump->size;
1643 else 1649 else
1644 dump->len = 0; 1650 dump->len = 0;
1645 1651
1646 if (!qlcnic_check_fw_dump_state(adapter)) 1652 if (!qlcnic_check_fw_dump_state(adapter))
1647 dump->flag = ETH_FW_DUMP_DISABLE; 1653 dump->flag = ETH_FW_DUMP_DISABLE;
1648 else 1654 else
1649 dump->flag = fw_dump->tmpl_hdr->drv_cap_mask; 1655 dump->flag = fw_dump->cap_mask;
1650 1656
1651 dump->version = adapter->fw_version; 1657 dump->version = adapter->fw_version;
1652 return 0; 1658 return 0;
@@ -1671,9 +1677,10 @@ qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
1671 netdev_info(netdev, "Dump not available\n"); 1677 netdev_info(netdev, "Dump not available\n");
1672 return -EINVAL; 1678 return -EINVAL;
1673 } 1679 }
1680
1674 /* Copy template header first */ 1681 /* Copy template header first */
1675 copy_sz = fw_dump->tmpl_hdr->size; 1682 copy_sz = fw_dump->tmpl_hdr_size;
1676 hdr_ptr = (u32 *) fw_dump->tmpl_hdr; 1683 hdr_ptr = (u32 *)fw_dump->tmpl_hdr;
1677 data = buffer; 1684 data = buffer;
1678 for (i = 0; i < copy_sz/sizeof(u32); i++) 1685 for (i = 0; i < copy_sz/sizeof(u32); i++)
1679 *data++ = cpu_to_le32(*hdr_ptr++); 1686 *data++ = cpu_to_le32(*hdr_ptr++);
@@ -1681,7 +1688,7 @@ qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
1681 /* Copy captured dump data */ 1688 /* Copy captured dump data */
1682 memcpy(buffer + copy_sz, fw_dump->data, fw_dump->size); 1689 memcpy(buffer + copy_sz, fw_dump->data, fw_dump->size);
1683 dump->len = copy_sz + fw_dump->size; 1690 dump->len = copy_sz + fw_dump->size;
1684 dump->flag = fw_dump->tmpl_hdr->drv_cap_mask; 1691 dump->flag = fw_dump->cap_mask;
1685 1692
1686 /* Free dump area once data has been captured */ 1693 /* Free dump area once data has been captured */
1687 vfree(fw_dump->data); 1694 vfree(fw_dump->data);
@@ -1703,7 +1710,11 @@ static int qlcnic_set_dump_mask(struct qlcnic_adapter *adapter, u32 mask)
1703 return -EOPNOTSUPP; 1710 return -EOPNOTSUPP;
1704 } 1711 }
1705 1712
1706 fw_dump->tmpl_hdr->drv_cap_mask = mask; 1713 fw_dump->cap_mask = mask;
1714
1715 /* Store new capture mask in template header as well*/
1716 qlcnic_store_cap_mask(adapter, fw_dump->tmpl_hdr, mask);
1717
1707 netdev_info(netdev, "Driver mask changed to: 0x%x\n", mask); 1718 netdev_info(netdev, "Driver mask changed to: 0x%x\n", mask);
1708 return 0; 1719 return 0;
1709} 1720}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 03d18a0be6ce..9f3adf4e70b5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -317,9 +317,7 @@ static void qlcnic_write_window_reg(u32 addr, void __iomem *bar0, u32 data)
317int 317int
318qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg) 318qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
319{ 319{
320 int timeout = 0; 320 int timeout = 0, err = 0, done = 0;
321 int err = 0;
322 u32 done = 0;
323 321
324 while (!done) { 322 while (!done) {
325 done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)), 323 done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)),
@@ -327,10 +325,20 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
327 if (done == 1) 325 if (done == 1)
328 break; 326 break;
329 if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) { 327 if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) {
330 dev_err(&adapter->pdev->dev, 328 if (id_reg) {
331 "Failed to acquire sem=%d lock; holdby=%d\n", 329 done = QLCRD32(adapter, id_reg, &err);
332 sem, 330 if (done != -1)
333 id_reg ? QLCRD32(adapter, id_reg, &err) : -1); 331 dev_err(&adapter->pdev->dev,
332 "Failed to acquire sem=%d lock held by=%d\n",
333 sem, done);
334 else
335 dev_err(&adapter->pdev->dev,
336 "Failed to acquire sem=%d lock",
337 sem);
338 } else {
339 dev_err(&adapter->pdev->dev,
340 "Failed to acquire sem=%d lock", sem);
341 }
334 return -EIO; 342 return -EIO;
335 } 343 }
336 msleep(1); 344 msleep(1);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 63d75617d445..cbe2399c30a0 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -98,6 +98,7 @@ enum qlcnic_regs {
98#define QLCNIC_CMD_GET_LINK_EVENT 0x48 98#define QLCNIC_CMD_GET_LINK_EVENT 0x48
99#define QLCNIC_CMD_CONFIGURE_MAC_RX_MODE 0x49 99#define QLCNIC_CMD_CONFIGURE_MAC_RX_MODE 0x49
100#define QLCNIC_CMD_CONFIGURE_HW_LRO 0x4A 100#define QLCNIC_CMD_CONFIGURE_HW_LRO 0x4A
101#define QLCNIC_CMD_SET_INGRESS_ENCAP 0x4E
101#define QLCNIC_CMD_INIT_NIC_FUNC 0x60 102#define QLCNIC_CMD_INIT_NIC_FUNC 0x60
102#define QLCNIC_CMD_STOP_NIC_FUNC 0x61 103#define QLCNIC_CMD_STOP_NIC_FUNC 0x61
103#define QLCNIC_CMD_IDC_ACK 0x63 104#define QLCNIC_CMD_IDC_ACK 0x63
@@ -161,6 +162,7 @@ struct qlcnic_host_sds_ring;
161struct qlcnic_host_tx_ring; 162struct qlcnic_host_tx_ring;
162struct qlcnic_hardware_context; 163struct qlcnic_hardware_context;
163struct qlcnic_adapter; 164struct qlcnic_adapter;
165struct qlcnic_fw_dump;
164 166
165int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong, int *); 167int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong, int *);
166int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32); 168int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32);
@@ -213,4 +215,11 @@ int qlcnic_82xx_shutdown(struct pci_dev *);
213int qlcnic_82xx_resume(struct qlcnic_adapter *); 215int qlcnic_82xx_resume(struct qlcnic_adapter *);
214void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed); 216void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed);
215void qlcnic_fw_poll_work(struct work_struct *work); 217void qlcnic_fw_poll_work(struct work_struct *work);
218
219u32 qlcnic_82xx_get_saved_state(void *, u32);
220void qlcnic_82xx_set_saved_state(void *, u32, u32);
221void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *);
222u32 qlcnic_82xx_get_cap_size(void *, int);
223void qlcnic_82xx_set_sys_info(void *, int, u32);
224void qlcnic_82xx_store_cap_mask(void *, u32);
216#endif /* __QLCNIC_HW_H_ */ 225#endif /* __QLCNIC_HW_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 54ebf300332a..173b3d12991f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -13,16 +13,19 @@
13 13
14#include "qlcnic.h" 14#include "qlcnic.h"
15 15
16#define TX_ETHER_PKT 0x01 16#define QLCNIC_TX_ETHER_PKT 0x01
17#define TX_TCP_PKT 0x02 17#define QLCNIC_TX_TCP_PKT 0x02
18#define TX_UDP_PKT 0x03 18#define QLCNIC_TX_UDP_PKT 0x03
19#define TX_IP_PKT 0x04 19#define QLCNIC_TX_IP_PKT 0x04
20#define TX_TCP_LSO 0x05 20#define QLCNIC_TX_TCP_LSO 0x05
21#define TX_TCP_LSO6 0x06 21#define QLCNIC_TX_TCP_LSO6 0x06
22#define TX_TCPV6_PKT 0x0b 22#define QLCNIC_TX_ENCAP_PKT 0x07
23#define TX_UDPV6_PKT 0x0c 23#define QLCNIC_TX_ENCAP_LSO 0x08
24#define FLAGS_VLAN_TAGGED 0x10 24#define QLCNIC_TX_TCPV6_PKT 0x0b
25#define FLAGS_VLAN_OOB 0x40 25#define QLCNIC_TX_UDPV6_PKT 0x0c
26
27#define QLCNIC_FLAGS_VLAN_TAGGED 0x10
28#define QLCNIC_FLAGS_VLAN_OOB 0x40
26 29
27#define qlcnic_set_tx_vlan_tci(cmd_desc, v) \ 30#define qlcnic_set_tx_vlan_tci(cmd_desc, v) \
28 (cmd_desc)->vlan_TCI = cpu_to_le16(v); 31 (cmd_desc)->vlan_TCI = cpu_to_le16(v);
@@ -364,6 +367,101 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
364 spin_unlock(&adapter->mac_learn_lock); 367 spin_unlock(&adapter->mac_learn_lock);
365} 368}
366 369
370#define QLCNIC_ENCAP_VXLAN_PKT BIT_0
371#define QLCNIC_ENCAP_OUTER_L3_IP6 BIT_1
372#define QLCNIC_ENCAP_INNER_L3_IP6 BIT_2
373#define QLCNIC_ENCAP_INNER_L4_UDP BIT_3
374#define QLCNIC_ENCAP_DO_L3_CSUM BIT_4
375#define QLCNIC_ENCAP_DO_L4_CSUM BIT_5
376
377static int qlcnic_tx_encap_pkt(struct qlcnic_adapter *adapter,
378 struct cmd_desc_type0 *first_desc,
379 struct sk_buff *skb,
380 struct qlcnic_host_tx_ring *tx_ring)
381{
382 u8 opcode = 0, inner_hdr_len = 0, outer_hdr_len = 0, total_hdr_len = 0;
383 int copied, copy_len, descr_size;
384 u32 producer = tx_ring->producer;
385 struct cmd_desc_type0 *hwdesc;
386 u16 flags = 0, encap_descr = 0;
387
388 opcode = QLCNIC_TX_ETHER_PKT;
389 encap_descr = QLCNIC_ENCAP_VXLAN_PKT;
390
391 if (skb_is_gso(skb)) {
392 inner_hdr_len = skb_inner_transport_header(skb) +
393 inner_tcp_hdrlen(skb) -
394 skb_inner_mac_header(skb);
395
396 /* VXLAN header size = 8 */
397 outer_hdr_len = skb_transport_offset(skb) + 8 +
398 sizeof(struct udphdr);
399 first_desc->outer_hdr_length = outer_hdr_len;
400 total_hdr_len = inner_hdr_len + outer_hdr_len;
401 encap_descr |= QLCNIC_ENCAP_DO_L3_CSUM |
402 QLCNIC_ENCAP_DO_L4_CSUM;
403 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
404 first_desc->hdr_length = inner_hdr_len;
405
406 /* Copy inner and outer headers in Tx descriptor(s)
407 * If total_hdr_len > cmd_desc_type0, use multiple
408 * descriptors
409 */
410 copied = 0;
411 descr_size = (int)sizeof(struct cmd_desc_type0);
412 while (copied < total_hdr_len) {
413 copy_len = min(descr_size, (total_hdr_len - copied));
414 hwdesc = &tx_ring->desc_head[producer];
415 tx_ring->cmd_buf_arr[producer].skb = NULL;
416 skb_copy_from_linear_data_offset(skb, copied,
417 (char *)hwdesc,
418 copy_len);
419 copied += copy_len;
420 producer = get_next_index(producer, tx_ring->num_desc);
421 }
422
423 tx_ring->producer = producer;
424
425 /* Make sure updated tx_ring->producer is visible
426 * for qlcnic_tx_avail()
427 */
428 smp_mb();
429 adapter->stats.encap_lso_frames++;
430
431 opcode = QLCNIC_TX_ENCAP_LSO;
432 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
433 if (inner_ip_hdr(skb)->version == 6) {
434 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
435 encap_descr |= QLCNIC_ENCAP_INNER_L4_UDP;
436 } else {
437 if (inner_ip_hdr(skb)->protocol == IPPROTO_UDP)
438 encap_descr |= QLCNIC_ENCAP_INNER_L4_UDP;
439 }
440
441 adapter->stats.encap_tx_csummed++;
442 opcode = QLCNIC_TX_ENCAP_PKT;
443 }
444
445 /* Prepare first 16 bits of byte offset 16 of Tx descriptor */
446 if (ip_hdr(skb)->version == 6)
447 encap_descr |= QLCNIC_ENCAP_OUTER_L3_IP6;
448
449 /* outer IP header's size in 32bit words size*/
450 encap_descr |= (skb_network_header_len(skb) >> 2) << 6;
451
452 /* outer IP header offset */
453 encap_descr |= skb_network_offset(skb) << 10;
454 first_desc->encap_descr = cpu_to_le16(encap_descr);
455
456 first_desc->tcp_hdr_offset = skb_inner_transport_header(skb) -
457 skb->data;
458 first_desc->ip_hdr_offset = skb_inner_network_offset(skb);
459
460 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
461
462 return 0;
463}
464
367static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter, 465static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
368 struct cmd_desc_type0 *first_desc, struct sk_buff *skb, 466 struct cmd_desc_type0 *first_desc, struct sk_buff *skb,
369 struct qlcnic_host_tx_ring *tx_ring) 467 struct qlcnic_host_tx_ring *tx_ring)
@@ -378,11 +476,11 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
378 476
379 if (protocol == ETH_P_8021Q) { 477 if (protocol == ETH_P_8021Q) {
380 vh = (struct vlan_ethhdr *)skb->data; 478 vh = (struct vlan_ethhdr *)skb->data;
381 flags = FLAGS_VLAN_TAGGED; 479 flags = QLCNIC_FLAGS_VLAN_TAGGED;
382 vlan_tci = ntohs(vh->h_vlan_TCI); 480 vlan_tci = ntohs(vh->h_vlan_TCI);
383 protocol = ntohs(vh->h_vlan_encapsulated_proto); 481 protocol = ntohs(vh->h_vlan_encapsulated_proto);
384 } else if (vlan_tx_tag_present(skb)) { 482 } else if (vlan_tx_tag_present(skb)) {
385 flags = FLAGS_VLAN_OOB; 483 flags = QLCNIC_FLAGS_VLAN_OOB;
386 vlan_tci = vlan_tx_tag_get(skb); 484 vlan_tci = vlan_tx_tag_get(skb);
387 } 485 }
388 if (unlikely(adapter->tx_pvid)) { 486 if (unlikely(adapter->tx_pvid)) {
@@ -391,7 +489,7 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
391 if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED)) 489 if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
392 goto set_flags; 490 goto set_flags;
393 491
394 flags = FLAGS_VLAN_OOB; 492 flags = QLCNIC_FLAGS_VLAN_OOB;
395 vlan_tci = adapter->tx_pvid; 493 vlan_tci = adapter->tx_pvid;
396 } 494 }
397set_flags: 495set_flags:
@@ -402,25 +500,26 @@ set_flags:
402 flags |= BIT_0; 500 flags |= BIT_0;
403 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN); 501 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
404 } 502 }
405 opcode = TX_ETHER_PKT; 503 opcode = QLCNIC_TX_ETHER_PKT;
406 if (skb_is_gso(skb)) { 504 if (skb_is_gso(skb)) {
407 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 505 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
408 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); 506 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
409 first_desc->total_hdr_length = hdr_len; 507 first_desc->hdr_length = hdr_len;
410 opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO; 508 opcode = (protocol == ETH_P_IPV6) ? QLCNIC_TX_TCP_LSO6 :
509 QLCNIC_TX_TCP_LSO;
411 510
412 /* For LSO, we need to copy the MAC/IP/TCP headers into 511 /* For LSO, we need to copy the MAC/IP/TCP headers into
413 * the descriptor ring */ 512 * the descriptor ring */
414 copied = 0; 513 copied = 0;
415 offset = 2; 514 offset = 2;
416 515
417 if (flags & FLAGS_VLAN_OOB) { 516 if (flags & QLCNIC_FLAGS_VLAN_OOB) {
418 first_desc->total_hdr_length += VLAN_HLEN; 517 first_desc->hdr_length += VLAN_HLEN;
419 first_desc->tcp_hdr_offset = VLAN_HLEN; 518 first_desc->tcp_hdr_offset = VLAN_HLEN;
420 first_desc->ip_hdr_offset = VLAN_HLEN; 519 first_desc->ip_hdr_offset = VLAN_HLEN;
421 520
422 /* Only in case of TSO on vlan device */ 521 /* Only in case of TSO on vlan device */
423 flags |= FLAGS_VLAN_TAGGED; 522 flags |= QLCNIC_FLAGS_VLAN_TAGGED;
424 523
425 /* Create a TSO vlan header template for firmware */ 524 /* Create a TSO vlan header template for firmware */
426 hwdesc = &tx_ring->desc_head[producer]; 525 hwdesc = &tx_ring->desc_head[producer];
@@ -464,16 +563,16 @@ set_flags:
464 l4proto = ip_hdr(skb)->protocol; 563 l4proto = ip_hdr(skb)->protocol;
465 564
466 if (l4proto == IPPROTO_TCP) 565 if (l4proto == IPPROTO_TCP)
467 opcode = TX_TCP_PKT; 566 opcode = QLCNIC_TX_TCP_PKT;
468 else if (l4proto == IPPROTO_UDP) 567 else if (l4proto == IPPROTO_UDP)
469 opcode = TX_UDP_PKT; 568 opcode = QLCNIC_TX_UDP_PKT;
470 } else if (protocol == ETH_P_IPV6) { 569 } else if (protocol == ETH_P_IPV6) {
471 l4proto = ipv6_hdr(skb)->nexthdr; 570 l4proto = ipv6_hdr(skb)->nexthdr;
472 571
473 if (l4proto == IPPROTO_TCP) 572 if (l4proto == IPPROTO_TCP)
474 opcode = TX_TCPV6_PKT; 573 opcode = QLCNIC_TX_TCPV6_PKT;
475 else if (l4proto == IPPROTO_UDP) 574 else if (l4proto == IPPROTO_UDP)
476 opcode = TX_UDPV6_PKT; 575 opcode = QLCNIC_TX_UDPV6_PKT;
477 } 576 }
478 } 577 }
479 first_desc->tcp_hdr_offset += skb_transport_offset(skb); 578 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
@@ -563,6 +662,8 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
563 struct ethhdr *phdr; 662 struct ethhdr *phdr;
564 int i, k, frag_count, delta = 0; 663 int i, k, frag_count, delta = 0;
565 u32 producer, num_txd; 664 u32 producer, num_txd;
665 u16 protocol;
666 bool l4_is_udp = false;
566 667
567 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) { 668 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
568 netif_tx_stop_all_queues(netdev); 669 netif_tx_stop_all_queues(netdev);
@@ -653,8 +754,23 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
653 tx_ring->producer = get_next_index(producer, num_txd); 754 tx_ring->producer = get_next_index(producer, num_txd);
654 smp_mb(); 755 smp_mb();
655 756
656 if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb, tx_ring))) 757 protocol = ntohs(skb->protocol);
657 goto unwind_buff; 758 if (protocol == ETH_P_IP)
759 l4_is_udp = ip_hdr(skb)->protocol == IPPROTO_UDP;
760 else if (protocol == ETH_P_IPV6)
761 l4_is_udp = ipv6_hdr(skb)->nexthdr == IPPROTO_UDP;
762
763 /* Check if it is a VXLAN packet */
764 if (!skb->encapsulation || !l4_is_udp ||
765 !qlcnic_encap_tx_offload(adapter)) {
766 if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb,
767 tx_ring)))
768 goto unwind_buff;
769 } else {
770 if (unlikely(qlcnic_tx_encap_pkt(adapter, first_desc,
771 skb, tx_ring)))
772 goto unwind_buff;
773 }
658 774
659 if (adapter->drv_mac_learn) 775 if (adapter->drv_mac_learn)
660 qlcnic_send_filter(adapter, first_desc, skb); 776 qlcnic_send_filter(adapter, first_desc, skb);
@@ -1587,6 +1703,13 @@ static inline int qlcnic_83xx_is_lb_pkt(u64 sts_data, int lro_pkt)
1587 return (sts_data & QLC_83XX_NORMAL_LB_PKT) ? 1 : 0; 1703 return (sts_data & QLC_83XX_NORMAL_LB_PKT) ? 1 : 0;
1588} 1704}
1589 1705
1706#define QLCNIC_ENCAP_LENGTH_MASK 0x7f
1707
1708static inline u8 qlcnic_encap_length(u64 sts_data)
1709{
1710 return sts_data & QLCNIC_ENCAP_LENGTH_MASK;
1711}
1712
1590static struct qlcnic_rx_buffer * 1713static struct qlcnic_rx_buffer *
1591qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter, 1714qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
1592 struct qlcnic_host_sds_ring *sds_ring, 1715 struct qlcnic_host_sds_ring *sds_ring,
@@ -1637,6 +1760,12 @@ qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
1637 1760
1638 skb->protocol = eth_type_trans(skb, netdev); 1761 skb->protocol = eth_type_trans(skb, netdev);
1639 1762
1763 if (qlcnic_encap_length(sts_data[1]) &&
1764 skb->ip_summed == CHECKSUM_UNNECESSARY) {
1765 skb->encapsulation = 1;
1766 adapter->stats.encap_rx_csummed++;
1767 }
1768
1640 if (vid != 0xffff) 1769 if (vid != 0xffff)
1641 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); 1770 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1642 1771
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 1222865cfb73..79be451a3ffc 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -21,6 +21,7 @@
21#include <linux/aer.h> 21#include <linux/aer.h>
22#include <linux/log2.h> 22#include <linux/log2.h>
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <net/vxlan.h>
24 25
25MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver"); 26MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
26MODULE_LICENSE("GPL"); 27MODULE_LICENSE("GPL");
@@ -90,7 +91,6 @@ static void qlcnic_82xx_io_resume(struct pci_dev *);
90static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *); 91static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *);
91static pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *, 92static pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *,
92 pci_channel_state_t); 93 pci_channel_state_t);
93
94static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter) 94static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter)
95{ 95{
96 struct qlcnic_hardware_context *ahw = adapter->ahw; 96 struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -462,6 +462,35 @@ static int qlcnic_get_phys_port_id(struct net_device *netdev,
462 return 0; 462 return 0;
463} 463}
464 464
465static void qlcnic_add_vxlan_port(struct net_device *netdev,
466 sa_family_t sa_family, __be16 port)
467{
468 struct qlcnic_adapter *adapter = netdev_priv(netdev);
469 struct qlcnic_hardware_context *ahw = adapter->ahw;
470
471 /* Adapter supports only one VXLAN port. Use very first port
472 * for enabling offload
473 */
474 if (!qlcnic_encap_rx_offload(adapter) || ahw->vxlan_port)
475 return;
476
477 ahw->vxlan_port = ntohs(port);
478 adapter->flags |= QLCNIC_ADD_VXLAN_PORT;
479}
480
481static void qlcnic_del_vxlan_port(struct net_device *netdev,
482 sa_family_t sa_family, __be16 port)
483{
484 struct qlcnic_adapter *adapter = netdev_priv(netdev);
485 struct qlcnic_hardware_context *ahw = adapter->ahw;
486
487 if (!qlcnic_encap_rx_offload(adapter) || !ahw->vxlan_port ||
488 (ahw->vxlan_port != ntohs(port)))
489 return;
490
491 adapter->flags |= QLCNIC_DEL_VXLAN_PORT;
492}
493
465static const struct net_device_ops qlcnic_netdev_ops = { 494static const struct net_device_ops qlcnic_netdev_ops = {
466 .ndo_open = qlcnic_open, 495 .ndo_open = qlcnic_open,
467 .ndo_stop = qlcnic_close, 496 .ndo_stop = qlcnic_close,
@@ -480,6 +509,8 @@ static const struct net_device_ops qlcnic_netdev_ops = {
480 .ndo_fdb_del = qlcnic_fdb_del, 509 .ndo_fdb_del = qlcnic_fdb_del,
481 .ndo_fdb_dump = qlcnic_fdb_dump, 510 .ndo_fdb_dump = qlcnic_fdb_dump,
482 .ndo_get_phys_port_id = qlcnic_get_phys_port_id, 511 .ndo_get_phys_port_id = qlcnic_get_phys_port_id,
512 .ndo_add_vxlan_port = qlcnic_add_vxlan_port,
513 .ndo_del_vxlan_port = qlcnic_del_vxlan_port,
483#ifdef CONFIG_NET_POLL_CONTROLLER 514#ifdef CONFIG_NET_POLL_CONTROLLER
484 .ndo_poll_controller = qlcnic_poll_controller, 515 .ndo_poll_controller = qlcnic_poll_controller,
485#endif 516#endif
@@ -561,6 +592,12 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = {
561 .disable_sds_intr = qlcnic_82xx_disable_sds_intr, 592 .disable_sds_intr = qlcnic_82xx_disable_sds_intr,
562 .enable_tx_intr = qlcnic_82xx_enable_tx_intr, 593 .enable_tx_intr = qlcnic_82xx_enable_tx_intr,
563 .disable_tx_intr = qlcnic_82xx_disable_tx_intr, 594 .disable_tx_intr = qlcnic_82xx_disable_tx_intr,
595 .get_saved_state = qlcnic_82xx_get_saved_state,
596 .set_saved_state = qlcnic_82xx_set_saved_state,
597 .cache_tmpl_hdr_values = qlcnic_82xx_cache_tmpl_hdr_values,
598 .get_cap_size = qlcnic_82xx_get_cap_size,
599 .set_sys_info = qlcnic_82xx_set_sys_info,
600 .store_cap_mask = qlcnic_82xx_store_cap_mask,
564}; 601};
565 602
566static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter) 603static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter)
@@ -684,7 +721,7 @@ restore:
684int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix) 721int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
685{ 722{
686 struct pci_dev *pdev = adapter->pdev; 723 struct pci_dev *pdev = adapter->pdev;
687 int err = -1, vector; 724 int err, vector;
688 725
689 if (!adapter->msix_entries) { 726 if (!adapter->msix_entries) {
690 adapter->msix_entries = kcalloc(num_msix, 727 adapter->msix_entries = kcalloc(num_msix,
@@ -701,13 +738,17 @@ enable_msix:
701 for (vector = 0; vector < num_msix; vector++) 738 for (vector = 0; vector < num_msix; vector++)
702 adapter->msix_entries[vector].entry = vector; 739 adapter->msix_entries[vector].entry = vector;
703 740
704 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix); 741 err = pci_enable_msix_range(pdev,
705 if (err == 0) { 742 adapter->msix_entries, 1, num_msix);
743
744 if (err == num_msix) {
706 adapter->flags |= QLCNIC_MSIX_ENABLED; 745 adapter->flags |= QLCNIC_MSIX_ENABLED;
707 adapter->ahw->num_msix = num_msix; 746 adapter->ahw->num_msix = num_msix;
708 dev_info(&pdev->dev, "using msi-x interrupts\n"); 747 dev_info(&pdev->dev, "using msi-x interrupts\n");
709 return err; 748 return 0;
710 } else if (err > 0) { 749 } else if (err > 0) {
750 pci_disable_msix(pdev);
751
711 dev_info(&pdev->dev, 752 dev_info(&pdev->dev,
712 "Unable to allocate %d MSI-X vectors, Available vectors %d\n", 753 "Unable to allocate %d MSI-X vectors, Available vectors %d\n",
713 num_msix, err); 754 num_msix, err);
@@ -715,12 +756,12 @@ enable_msix:
715 if (qlcnic_82xx_check(adapter)) { 756 if (qlcnic_82xx_check(adapter)) {
716 num_msix = rounddown_pow_of_two(err); 757 num_msix = rounddown_pow_of_two(err);
717 if (err < QLCNIC_82XX_MINIMUM_VECTOR) 758 if (err < QLCNIC_82XX_MINIMUM_VECTOR)
718 return -EIO; 759 return -ENOSPC;
719 } else { 760 } else {
720 num_msix = rounddown_pow_of_two(err - 1); 761 num_msix = rounddown_pow_of_two(err - 1);
721 num_msix += 1; 762 num_msix += 1;
722 if (err < QLCNIC_83XX_MINIMUM_VECTOR) 763 if (err < QLCNIC_83XX_MINIMUM_VECTOR)
723 return -EIO; 764 return -ENOSPC;
724 } 765 }
725 766
726 if (qlcnic_82xx_check(adapter) && 767 if (qlcnic_82xx_check(adapter) &&
@@ -747,7 +788,7 @@ enable_msix:
747 } 788 }
748 } 789 }
749 790
750 return err; 791 return -EIO;
751} 792}
752 793
753static int qlcnic_82xx_calculate_msix_vector(struct qlcnic_adapter *adapter) 794static int qlcnic_82xx_calculate_msix_vector(struct qlcnic_adapter *adapter)
@@ -1934,6 +1975,9 @@ qlcnic_attach(struct qlcnic_adapter *adapter)
1934 1975
1935 qlcnic_create_sysfs_entries(adapter); 1976 qlcnic_create_sysfs_entries(adapter);
1936 1977
1978 if (qlcnic_encap_rx_offload(adapter))
1979 vxlan_get_rx_port(netdev);
1980
1937 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC; 1981 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
1938 return 0; 1982 return 0;
1939 1983
@@ -2196,6 +2240,19 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
2196 if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO) 2240 if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
2197 netdev->features |= NETIF_F_LRO; 2241 netdev->features |= NETIF_F_LRO;
2198 2242
2243 if (qlcnic_encap_tx_offload(adapter)) {
2244 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
2245
2246 /* encapsulation Tx offload supported by Adapter */
2247 netdev->hw_enc_features = NETIF_F_IP_CSUM |
2248 NETIF_F_GSO_UDP_TUNNEL |
2249 NETIF_F_TSO |
2250 NETIF_F_TSO6;
2251 }
2252
2253 if (qlcnic_encap_rx_offload(adapter))
2254 netdev->hw_enc_features |= NETIF_F_RXCSUM;
2255
2199 netdev->hw_features = netdev->features; 2256 netdev->hw_features = netdev->features;
2200 netdev->priv_flags |= IFF_UNICAST_FLT; 2257 netdev->priv_flags |= IFF_UNICAST_FLT;
2201 netdev->irq = adapter->msix_entries[0].vector; 2258 netdev->irq = adapter->msix_entries[0].vector;
@@ -2442,8 +2499,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2442 if (err) { 2499 if (err) {
2443 switch (err) { 2500 switch (err) {
2444 case -ENOTRECOVERABLE: 2501 case -ENOTRECOVERABLE:
2445 dev_err(&pdev->dev, "Adapter initialization failed due to a faulty hardware. Please reboot\n"); 2502 dev_err(&pdev->dev, "Adapter initialization failed due to a faulty hardware\n");
2446 dev_err(&pdev->dev, "If reboot doesn't help, please replace the adapter with new one and return the faulty adapter for repair\n"); 2503 dev_err(&pdev->dev, "Please replace the adapter with new one and return the faulty adapter for repair\n");
2447 goto err_out_free_hw; 2504 goto err_out_free_hw;
2448 case -ENOMEM: 2505 case -ENOMEM:
2449 dev_err(&pdev->dev, "Adapter initialization failed. Please reboot\n"); 2506 dev_err(&pdev->dev, "Adapter initialization failed. Please reboot\n");
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index 7763962e2ec4..37b979b1266b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -211,6 +211,107 @@ enum qlcnic_minidump_opcode {
211 QLCNIC_DUMP_RDEND = 255 211 QLCNIC_DUMP_RDEND = 255
212}; 212};
213 213
214inline u32 qlcnic_82xx_get_saved_state(void *t_hdr, u32 index)
215{
216 struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
217
218 return hdr->saved_state[index];
219}
220
221inline void qlcnic_82xx_set_saved_state(void *t_hdr, u32 index,
222 u32 value)
223{
224 struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
225
226 hdr->saved_state[index] = value;
227}
228
229void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
230{
231 struct qlcnic_82xx_dump_template_hdr *hdr;
232
233 hdr = fw_dump->tmpl_hdr;
234 fw_dump->tmpl_hdr_size = hdr->size;
235 fw_dump->version = hdr->version;
236 fw_dump->num_entries = hdr->num_entries;
237 fw_dump->offset = hdr->offset;
238
239 hdr->drv_cap_mask = hdr->cap_mask;
240 fw_dump->cap_mask = hdr->cap_mask;
241}
242
243inline u32 qlcnic_82xx_get_cap_size(void *t_hdr, int index)
244{
245 struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
246
247 return hdr->cap_sizes[index];
248}
249
250void qlcnic_82xx_set_sys_info(void *t_hdr, int idx, u32 value)
251{
252 struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
253
254 hdr->sys_info[idx] = value;
255}
256
257void qlcnic_82xx_store_cap_mask(void *tmpl_hdr, u32 mask)
258{
259 struct qlcnic_82xx_dump_template_hdr *hdr = tmpl_hdr;
260
261 hdr->drv_cap_mask = mask;
262}
263
264inline u32 qlcnic_83xx_get_saved_state(void *t_hdr, u32 index)
265{
266 struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
267
268 return hdr->saved_state[index];
269}
270
271inline void qlcnic_83xx_set_saved_state(void *t_hdr, u32 index,
272 u32 value)
273{
274 struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
275
276 hdr->saved_state[index] = value;
277}
278
279void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
280{
281 struct qlcnic_83xx_dump_template_hdr *hdr;
282
283 hdr = fw_dump->tmpl_hdr;
284 fw_dump->tmpl_hdr_size = hdr->size;
285 fw_dump->version = hdr->version;
286 fw_dump->num_entries = hdr->num_entries;
287 fw_dump->offset = hdr->offset;
288
289 hdr->drv_cap_mask = hdr->cap_mask;
290 fw_dump->cap_mask = hdr->cap_mask;
291}
292
293inline u32 qlcnic_83xx_get_cap_size(void *t_hdr, int index)
294{
295 struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
296
297 return hdr->cap_sizes[index];
298}
299
300void qlcnic_83xx_set_sys_info(void *t_hdr, int idx, u32 value)
301{
302 struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
303
304 hdr->sys_info[idx] = value;
305}
306
307void qlcnic_83xx_store_cap_mask(void *tmpl_hdr, u32 mask)
308{
309 struct qlcnic_83xx_dump_template_hdr *hdr;
310
311 hdr = tmpl_hdr;
312 hdr->drv_cap_mask = mask;
313}
314
214struct qlcnic_dump_operations { 315struct qlcnic_dump_operations {
215 enum qlcnic_minidump_opcode opcode; 316 enum qlcnic_minidump_opcode opcode;
216 u32 (*handler)(struct qlcnic_adapter *, struct qlcnic_dump_entry *, 317 u32 (*handler)(struct qlcnic_adapter *, struct qlcnic_dump_entry *,
@@ -238,11 +339,11 @@ static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter,
238static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter, 339static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
239 struct qlcnic_dump_entry *entry, __le32 *buffer) 340 struct qlcnic_dump_entry *entry, __le32 *buffer)
240{ 341{
342 void *hdr = adapter->ahw->fw_dump.tmpl_hdr;
343 struct __ctrl *ctr = &entry->region.ctrl;
241 int i, k, timeout = 0; 344 int i, k, timeout = 0;
242 u32 addr, data; 345 u32 addr, data, temp;
243 u8 no_ops; 346 u8 no_ops;
244 struct __ctrl *ctr = &entry->region.ctrl;
245 struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr;
246 347
247 addr = ctr->addr; 348 addr = ctr->addr;
248 no_ops = ctr->no_ops; 349 no_ops = ctr->no_ops;
@@ -285,29 +386,42 @@ static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
285 } 386 }
286 break; 387 break;
287 case QLCNIC_DUMP_RD_SAVE: 388 case QLCNIC_DUMP_RD_SAVE:
288 if (ctr->index_a) 389 temp = ctr->index_a;
289 addr = t_hdr->saved_state[ctr->index_a]; 390 if (temp)
391 addr = qlcnic_get_saved_state(adapter,
392 hdr,
393 temp);
290 data = qlcnic_ind_rd(adapter, addr); 394 data = qlcnic_ind_rd(adapter, addr);
291 t_hdr->saved_state[ctr->index_v] = data; 395 qlcnic_set_saved_state(adapter, hdr,
396 ctr->index_v, data);
292 break; 397 break;
293 case QLCNIC_DUMP_WRT_SAVED: 398 case QLCNIC_DUMP_WRT_SAVED:
294 if (ctr->index_v) 399 temp = ctr->index_v;
295 data = t_hdr->saved_state[ctr->index_v]; 400 if (temp)
401 data = qlcnic_get_saved_state(adapter,
402 hdr,
403 temp);
296 else 404 else
297 data = ctr->val1; 405 data = ctr->val1;
298 if (ctr->index_a) 406
299 addr = t_hdr->saved_state[ctr->index_a]; 407 temp = ctr->index_a;
408 if (temp)
409 addr = qlcnic_get_saved_state(adapter,
410 hdr,
411 temp);
300 qlcnic_ind_wr(adapter, addr, data); 412 qlcnic_ind_wr(adapter, addr, data);
301 break; 413 break;
302 case QLCNIC_DUMP_MOD_SAVE_ST: 414 case QLCNIC_DUMP_MOD_SAVE_ST:
303 data = t_hdr->saved_state[ctr->index_v]; 415 data = qlcnic_get_saved_state(adapter, hdr,
416 ctr->index_v);
304 data <<= ctr->shl_val; 417 data <<= ctr->shl_val;
305 data >>= ctr->shr_val; 418 data >>= ctr->shr_val;
306 if (ctr->val2) 419 if (ctr->val2)
307 data &= ctr->val2; 420 data &= ctr->val2;
308 data |= ctr->val3; 421 data |= ctr->val3;
309 data += ctr->val1; 422 data += ctr->val1;
310 t_hdr->saved_state[ctr->index_v] = data; 423 qlcnic_set_saved_state(adapter, hdr,
424 ctr->index_v, data);
311 break; 425 break;
312 default: 426 default:
313 dev_info(&adapter->pdev->dev, 427 dev_info(&adapter->pdev->dev,
@@ -544,7 +658,7 @@ out:
544static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter, 658static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter,
545 struct __mem *mem) 659 struct __mem *mem)
546{ 660{
547 struct qlcnic_dump_template_hdr *tmpl_hdr; 661 struct qlcnic_83xx_dump_template_hdr *tmpl_hdr;
548 struct device *dev = &adapter->pdev->dev; 662 struct device *dev = &adapter->pdev->dev;
549 u32 dma_no, dma_base_addr, temp_addr; 663 u32 dma_no, dma_base_addr, temp_addr;
550 int i, ret, dma_sts; 664 int i, ret, dma_sts;
@@ -596,7 +710,7 @@ static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
596 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; 710 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
597 u32 temp, dma_base_addr, size = 0, read_size = 0; 711 u32 temp, dma_base_addr, size = 0, read_size = 0;
598 struct qlcnic_pex_dma_descriptor *dma_descr; 712 struct qlcnic_pex_dma_descriptor *dma_descr;
599 struct qlcnic_dump_template_hdr *tmpl_hdr; 713 struct qlcnic_83xx_dump_template_hdr *tmpl_hdr;
600 struct device *dev = &adapter->pdev->dev; 714 struct device *dev = &adapter->pdev->dev;
601 dma_addr_t dma_phys_addr; 715 dma_addr_t dma_phys_addr;
602 void *dma_buffer; 716 void *dma_buffer;
@@ -938,8 +1052,8 @@ static int
938qlcnic_fw_flash_get_minidump_temp_size(struct qlcnic_adapter *adapter, 1052qlcnic_fw_flash_get_minidump_temp_size(struct qlcnic_adapter *adapter,
939 struct qlcnic_cmd_args *cmd) 1053 struct qlcnic_cmd_args *cmd)
940{ 1054{
941 struct qlcnic_dump_template_hdr tmp_hdr; 1055 struct qlcnic_83xx_dump_template_hdr tmp_hdr;
942 u32 size = sizeof(struct qlcnic_dump_template_hdr) / sizeof(u32); 1056 u32 size = sizeof(tmp_hdr) / sizeof(u32);
943 int ret = 0; 1057 int ret = 0;
944 1058
945 if (qlcnic_82xx_check(adapter)) 1059 if (qlcnic_82xx_check(adapter))
@@ -1027,17 +1141,19 @@ free_mem:
1027 return err; 1141 return err;
1028} 1142}
1029 1143
1144#define QLCNIC_TEMPLATE_VERSION (0x20001)
1145
1030int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter) 1146int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
1031{ 1147{
1032 int err;
1033 u32 temp_size = 0;
1034 u32 version, csum, *tmp_buf;
1035 struct qlcnic_hardware_context *ahw; 1148 struct qlcnic_hardware_context *ahw;
1036 struct qlcnic_dump_template_hdr *tmpl_hdr; 1149 struct qlcnic_fw_dump *fw_dump;
1150 u32 version, csum, *tmp_buf;
1037 u8 use_flash_temp = 0; 1151 u8 use_flash_temp = 0;
1152 u32 temp_size = 0;
1153 int err;
1038 1154
1039 ahw = adapter->ahw; 1155 ahw = adapter->ahw;
1040 1156 fw_dump = &ahw->fw_dump;
1041 err = qlcnic_fw_get_minidump_temp_size(adapter, &version, &temp_size, 1157 err = qlcnic_fw_get_minidump_temp_size(adapter, &version, &temp_size,
1042 &use_flash_temp); 1158 &use_flash_temp);
1043 if (err) { 1159 if (err) {
@@ -1046,11 +1162,11 @@ int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
1046 return -EIO; 1162 return -EIO;
1047 } 1163 }
1048 1164
1049 ahw->fw_dump.tmpl_hdr = vzalloc(temp_size); 1165 fw_dump->tmpl_hdr = vzalloc(temp_size);
1050 if (!ahw->fw_dump.tmpl_hdr) 1166 if (!fw_dump->tmpl_hdr)
1051 return -ENOMEM; 1167 return -ENOMEM;
1052 1168
1053 tmp_buf = (u32 *)ahw->fw_dump.tmpl_hdr; 1169 tmp_buf = (u32 *)fw_dump->tmpl_hdr;
1054 if (use_flash_temp) 1170 if (use_flash_temp)
1055 goto flash_temp; 1171 goto flash_temp;
1056 1172
@@ -1065,8 +1181,8 @@ flash_temp:
1065 dev_err(&adapter->pdev->dev, 1181 dev_err(&adapter->pdev->dev,
1066 "Failed to get minidump template header %d\n", 1182 "Failed to get minidump template header %d\n",
1067 err); 1183 err);
1068 vfree(ahw->fw_dump.tmpl_hdr); 1184 vfree(fw_dump->tmpl_hdr);
1069 ahw->fw_dump.tmpl_hdr = NULL; 1185 fw_dump->tmpl_hdr = NULL;
1070 return -EIO; 1186 return -EIO;
1071 } 1187 }
1072 } 1188 }
@@ -1076,21 +1192,22 @@ flash_temp:
1076 if (csum) { 1192 if (csum) {
1077 dev_err(&adapter->pdev->dev, 1193 dev_err(&adapter->pdev->dev,
1078 "Template header checksum validation failed\n"); 1194 "Template header checksum validation failed\n");
1079 vfree(ahw->fw_dump.tmpl_hdr); 1195 vfree(fw_dump->tmpl_hdr);
1080 ahw->fw_dump.tmpl_hdr = NULL; 1196 fw_dump->tmpl_hdr = NULL;
1081 return -EIO; 1197 return -EIO;
1082 } 1198 }
1083 1199
1084 tmpl_hdr = ahw->fw_dump.tmpl_hdr; 1200 qlcnic_cache_tmpl_hdr_values(adapter, fw_dump);
1085 tmpl_hdr->drv_cap_mask = tmpl_hdr->cap_mask; 1201
1086 dev_info(&adapter->pdev->dev, 1202 dev_info(&adapter->pdev->dev,
1087 "Default minidump capture mask 0x%x\n", 1203 "Default minidump capture mask 0x%x\n",
1088 tmpl_hdr->cap_mask); 1204 fw_dump->cap_mask);
1089 1205
1090 if ((tmpl_hdr->version & 0xfffff) >= 0x20001) 1206 if (qlcnic_83xx_check(adapter) &&
1091 ahw->fw_dump.use_pex_dma = true; 1207 (fw_dump->version & 0xfffff) >= QLCNIC_TEMPLATE_VERSION)
1208 fw_dump->use_pex_dma = true;
1092 else 1209 else
1093 ahw->fw_dump.use_pex_dma = false; 1210 fw_dump->use_pex_dma = false;
1094 1211
1095 qlcnic_enable_fw_dump_state(adapter); 1212 qlcnic_enable_fw_dump_state(adapter);
1096 1213
@@ -1099,21 +1216,22 @@ flash_temp:
1099 1216
1100int qlcnic_dump_fw(struct qlcnic_adapter *adapter) 1217int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
1101{ 1218{
1102 __le32 *buffer;
1103 u32 ocm_window;
1104 char mesg[64];
1105 char *msg[] = {mesg, NULL};
1106 int i, k, ops_cnt, ops_index, dump_size = 0;
1107 u32 entry_offset, dump, no_entries, buf_offset = 0;
1108 struct qlcnic_dump_entry *entry;
1109 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; 1219 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1110 struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
1111 static const struct qlcnic_dump_operations *fw_dump_ops; 1220 static const struct qlcnic_dump_operations *fw_dump_ops;
1221 struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
1222 u32 entry_offset, dump, no_entries, buf_offset = 0;
1223 int i, k, ops_cnt, ops_index, dump_size = 0;
1112 struct device *dev = &adapter->pdev->dev; 1224 struct device *dev = &adapter->pdev->dev;
1113 struct qlcnic_hardware_context *ahw; 1225 struct qlcnic_hardware_context *ahw;
1114 void *temp_buffer; 1226 struct qlcnic_dump_entry *entry;
1227 void *temp_buffer, *tmpl_hdr;
1228 u32 ocm_window;
1229 __le32 *buffer;
1230 char mesg[64];
1231 char *msg[] = {mesg, NULL};
1115 1232
1116 ahw = adapter->ahw; 1233 ahw = adapter->ahw;
1234 tmpl_hdr = fw_dump->tmpl_hdr;
1117 1235
1118 /* Return if we don't have firmware dump template header */ 1236 /* Return if we don't have firmware dump template header */
1119 if (!tmpl_hdr) 1237 if (!tmpl_hdr)
@@ -1133,8 +1251,9 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
1133 netif_info(adapter->ahw, drv, adapter->netdev, "Take FW dump\n"); 1251 netif_info(adapter->ahw, drv, adapter->netdev, "Take FW dump\n");
1134 /* Calculate the size for dump data area only */ 1252 /* Calculate the size for dump data area only */
1135 for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++) 1253 for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
1136 if (i & tmpl_hdr->drv_cap_mask) 1254 if (i & fw_dump->cap_mask)
1137 dump_size += tmpl_hdr->cap_sizes[k]; 1255 dump_size += qlcnic_get_cap_size(adapter, tmpl_hdr, k);
1256
1138 if (!dump_size) 1257 if (!dump_size)
1139 return -EIO; 1258 return -EIO;
1140 1259
@@ -1144,10 +1263,10 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
1144 1263
1145 buffer = fw_dump->data; 1264 buffer = fw_dump->data;
1146 fw_dump->size = dump_size; 1265 fw_dump->size = dump_size;
1147 no_entries = tmpl_hdr->num_entries; 1266 no_entries = fw_dump->num_entries;
1148 entry_offset = tmpl_hdr->offset; 1267 entry_offset = fw_dump->offset;
1149 tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION; 1268 qlcnic_set_sys_info(adapter, tmpl_hdr, 0, QLCNIC_DRIVER_VERSION);
1150 tmpl_hdr->sys_info[1] = adapter->fw_version; 1269 qlcnic_set_sys_info(adapter, tmpl_hdr, 1, adapter->fw_version);
1151 1270
1152 if (fw_dump->use_pex_dma) { 1271 if (fw_dump->use_pex_dma) {
1153 temp_buffer = dma_alloc_coherent(dev, QLC_PEX_DMA_READ_SIZE, 1272 temp_buffer = dma_alloc_coherent(dev, QLC_PEX_DMA_READ_SIZE,
@@ -1163,16 +1282,17 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
1163 ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops); 1282 ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops);
1164 fw_dump_ops = qlcnic_fw_dump_ops; 1283 fw_dump_ops = qlcnic_fw_dump_ops;
1165 } else { 1284 } else {
1285 hdr_83xx = tmpl_hdr;
1166 ops_cnt = ARRAY_SIZE(qlcnic_83xx_fw_dump_ops); 1286 ops_cnt = ARRAY_SIZE(qlcnic_83xx_fw_dump_ops);
1167 fw_dump_ops = qlcnic_83xx_fw_dump_ops; 1287 fw_dump_ops = qlcnic_83xx_fw_dump_ops;
1168 ocm_window = tmpl_hdr->ocm_wnd_reg[adapter->ahw->pci_func]; 1288 ocm_window = hdr_83xx->ocm_wnd_reg[ahw->pci_func];
1169 tmpl_hdr->saved_state[QLC_83XX_OCM_INDEX] = ocm_window; 1289 hdr_83xx->saved_state[QLC_83XX_OCM_INDEX] = ocm_window;
1170 tmpl_hdr->saved_state[QLC_83XX_PCI_INDEX] = ahw->pci_func; 1290 hdr_83xx->saved_state[QLC_83XX_PCI_INDEX] = ahw->pci_func;
1171 } 1291 }
1172 1292
1173 for (i = 0; i < no_entries; i++) { 1293 for (i = 0; i < no_entries; i++) {
1174 entry = (void *)tmpl_hdr + entry_offset; 1294 entry = tmpl_hdr + entry_offset;
1175 if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) { 1295 if (!(entry->hdr.mask & fw_dump->cap_mask)) {
1176 entry->hdr.flags |= QLCNIC_DUMP_SKIP; 1296 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1177 entry_offset += entry->hdr.offset; 1297 entry_offset += entry->hdr.offset;
1178 continue; 1298 continue;
@@ -1209,8 +1329,9 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
1209 1329
1210 fw_dump->clr = 1; 1330 fw_dump->clr = 1;
1211 snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name); 1331 snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name);
1212 dev_info(dev, "%s: Dump data %d bytes captured, template header size %d bytes\n", 1332 netdev_info(adapter->netdev,
1213 adapter->netdev->name, fw_dump->size, tmpl_hdr->size); 1333 "Dump data %d bytes captured, template header size %d bytes\n",
1334 fw_dump->size, fw_dump->tmpl_hdr_size);
1214 /* Send a udev event to notify availability of FW dump */ 1335 /* Send a udev event to notify availability of FW dump */
1215 kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg); 1336 kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg);
1216 1337
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index e5277a632671..14f748cbf0de 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -15,6 +15,7 @@
15#define QLC_MAC_OPCODE_MASK 0x7 15#define QLC_MAC_OPCODE_MASK 0x7
16#define QLC_VF_FLOOD_BIT BIT_16 16#define QLC_VF_FLOOD_BIT BIT_16
17#define QLC_FLOOD_MODE 0x5 17#define QLC_FLOOD_MODE 0x5
18#define QLC_SRIOV_ALLOW_VLAN0 BIT_19
18 19
19static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *, u8); 20static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *, u8);
20 21
@@ -335,8 +336,11 @@ static int qlcnic_sriov_pf_cfg_vlan_filtering(struct qlcnic_adapter *adapter,
335 return err; 336 return err;
336 337
337 cmd.req.arg[1] = 0x4; 338 cmd.req.arg[1] = 0x4;
338 if (enable) 339 if (enable) {
339 cmd.req.arg[1] |= BIT_16; 340 cmd.req.arg[1] |= BIT_16;
341 if (qlcnic_84xx_check(adapter))
342 cmd.req.arg[1] |= QLC_SRIOV_ALLOW_VLAN0;
343 }
340 344
341 err = qlcnic_issue_cmd(adapter, &cmd); 345 err = qlcnic_issue_cmd(adapter, &cmd);
342 if (err) 346 if (err)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 3d64113a35af..448d156c3d08 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -350,33 +350,15 @@ static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
350 return size; 350 return size;
351} 351}
352 352
353static u32 qlcnic_get_pci_func_count(struct qlcnic_adapter *adapter)
354{
355 struct qlcnic_hardware_context *ahw = adapter->ahw;
356 u32 count = 0;
357
358 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
359 return ahw->total_nic_func;
360
361 if (ahw->total_pci_func <= QLC_DEFAULT_VNIC_COUNT)
362 count = QLC_DEFAULT_VNIC_COUNT;
363 else
364 count = ahw->max_vnic_func;
365
366 return count;
367}
368
369int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func) 353int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
370{ 354{
371 u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
372 int i; 355 int i;
373 356
374 for (i = 0; i < pci_func_count; i++) { 357 for (i = 0; i < adapter->ahw->max_vnic_func; i++) {
375 if (adapter->npars[i].pci_func == pci_func) 358 if (adapter->npars[i].pci_func == pci_func)
376 return i; 359 return i;
377 } 360 }
378 361 return -EINVAL;
379 return -1;
380} 362}
381 363
382static int validate_pm_config(struct qlcnic_adapter *adapter, 364static int validate_pm_config(struct qlcnic_adapter *adapter,
@@ -464,23 +446,21 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
464{ 446{
465 struct device *dev = container_of(kobj, struct device, kobj); 447 struct device *dev = container_of(kobj, struct device, kobj);
466 struct qlcnic_adapter *adapter = dev_get_drvdata(dev); 448 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
467 u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
468 struct qlcnic_pm_func_cfg *pm_cfg; 449 struct qlcnic_pm_func_cfg *pm_cfg;
469 int i, pm_cfg_size;
470 u8 pci_func; 450 u8 pci_func;
451 u32 count;
452 int i;
471 453
472 pm_cfg_size = pci_func_count * sizeof(*pm_cfg); 454 memset(buf, 0, size);
473 if (size != pm_cfg_size)
474 return QL_STATUS_INVALID_PARAM;
475
476 memset(buf, 0, pm_cfg_size);
477 pm_cfg = (struct qlcnic_pm_func_cfg *)buf; 455 pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
478 456 count = size / sizeof(struct qlcnic_pm_func_cfg);
479 for (i = 0; i < pci_func_count; i++) { 457 for (i = 0; i < adapter->ahw->total_nic_func; i++) {
480 pci_func = adapter->npars[i].pci_func; 458 pci_func = adapter->npars[i].pci_func;
481 if (!adapter->npars[i].active) 459 if (pci_func >= count) {
460 dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n",
461 __func__, adapter->ahw->total_nic_func, count);
482 continue; 462 continue;
483 463 }
484 if (!adapter->npars[i].eswitch_status) 464 if (!adapter->npars[i].eswitch_status)
485 continue; 465 continue;
486 466
@@ -494,7 +474,6 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
494static int validate_esw_config(struct qlcnic_adapter *adapter, 474static int validate_esw_config(struct qlcnic_adapter *adapter,
495 struct qlcnic_esw_func_cfg *esw_cfg, int count) 475 struct qlcnic_esw_func_cfg *esw_cfg, int count)
496{ 476{
497 u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
498 struct qlcnic_hardware_context *ahw = adapter->ahw; 477 struct qlcnic_hardware_context *ahw = adapter->ahw;
499 int i, ret; 478 int i, ret;
500 u32 op_mode; 479 u32 op_mode;
@@ -507,7 +486,7 @@ static int validate_esw_config(struct qlcnic_adapter *adapter,
507 486
508 for (i = 0; i < count; i++) { 487 for (i = 0; i < count; i++) {
509 pci_func = esw_cfg[i].pci_func; 488 pci_func = esw_cfg[i].pci_func;
510 if (pci_func >= pci_func_count) 489 if (pci_func >= ahw->max_vnic_func)
511 return QL_STATUS_INVALID_PARAM; 490 return QL_STATUS_INVALID_PARAM;
512 491
513 if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) 492 if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
@@ -642,23 +621,21 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
642{ 621{
643 struct device *dev = container_of(kobj, struct device, kobj); 622 struct device *dev = container_of(kobj, struct device, kobj);
644 struct qlcnic_adapter *adapter = dev_get_drvdata(dev); 623 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
645 u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
646 struct qlcnic_esw_func_cfg *esw_cfg; 624 struct qlcnic_esw_func_cfg *esw_cfg;
647 size_t esw_cfg_size; 625 u8 pci_func;
648 u8 i, pci_func; 626 u32 count;
649 627 int i;
650 esw_cfg_size = pci_func_count * sizeof(*esw_cfg);
651 if (size != esw_cfg_size)
652 return QL_STATUS_INVALID_PARAM;
653 628
654 memset(buf, 0, esw_cfg_size); 629 memset(buf, 0, size);
655 esw_cfg = (struct qlcnic_esw_func_cfg *)buf; 630 esw_cfg = (struct qlcnic_esw_func_cfg *)buf;
656 631 count = size / sizeof(struct qlcnic_esw_func_cfg);
657 for (i = 0; i < pci_func_count; i++) { 632 for (i = 0; i < adapter->ahw->total_nic_func; i++) {
658 pci_func = adapter->npars[i].pci_func; 633 pci_func = adapter->npars[i].pci_func;
659 if (!adapter->npars[i].active) 634 if (pci_func >= count) {
635 dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n",
636 __func__, adapter->ahw->total_nic_func, count);
660 continue; 637 continue;
661 638 }
662 if (!adapter->npars[i].eswitch_status) 639 if (!adapter->npars[i].eswitch_status)
663 continue; 640 continue;
664 641
@@ -741,23 +718,24 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
741{ 718{
742 struct device *dev = container_of(kobj, struct device, kobj); 719 struct device *dev = container_of(kobj, struct device, kobj);
743 struct qlcnic_adapter *adapter = dev_get_drvdata(dev); 720 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
744 u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
745 struct qlcnic_npar_func_cfg *np_cfg; 721 struct qlcnic_npar_func_cfg *np_cfg;
746 struct qlcnic_info nic_info; 722 struct qlcnic_info nic_info;
747 size_t np_cfg_size;
748 int i, ret; 723 int i, ret;
749 724 u32 count;
750 np_cfg_size = pci_func_count * sizeof(*np_cfg);
751 if (size != np_cfg_size)
752 return QL_STATUS_INVALID_PARAM;
753 725
754 memset(&nic_info, 0, sizeof(struct qlcnic_info)); 726 memset(&nic_info, 0, sizeof(struct qlcnic_info));
755 memset(buf, 0, np_cfg_size); 727 memset(buf, 0, size);
756 np_cfg = (struct qlcnic_npar_func_cfg *)buf; 728 np_cfg = (struct qlcnic_npar_func_cfg *)buf;
757 729
758 for (i = 0; i < pci_func_count; i++) { 730 count = size / sizeof(struct qlcnic_npar_func_cfg);
731 for (i = 0; i < adapter->ahw->total_nic_func; i++) {
759 if (qlcnic_is_valid_nic_func(adapter, i) < 0) 732 if (qlcnic_is_valid_nic_func(adapter, i) < 0)
760 continue; 733 continue;
734 if (adapter->npars[i].pci_func >= count) {
735 dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n",
736 __func__, adapter->ahw->total_nic_func, count);
737 continue;
738 }
761 ret = qlcnic_get_nic_info(adapter, &nic_info, i); 739 ret = qlcnic_get_nic_info(adapter, &nic_info, i);
762 if (ret) 740 if (ret)
763 return ret; 741 return ret;
@@ -783,7 +761,6 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
783{ 761{
784 struct device *dev = container_of(kobj, struct device, kobj); 762 struct device *dev = container_of(kobj, struct device, kobj);
785 struct qlcnic_adapter *adapter = dev_get_drvdata(dev); 763 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
786 u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
787 struct qlcnic_esw_statistics port_stats; 764 struct qlcnic_esw_statistics port_stats;
788 int ret; 765 int ret;
789 766
@@ -793,7 +770,7 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
793 if (size != sizeof(struct qlcnic_esw_statistics)) 770 if (size != sizeof(struct qlcnic_esw_statistics))
794 return QL_STATUS_INVALID_PARAM; 771 return QL_STATUS_INVALID_PARAM;
795 772
796 if (offset >= pci_func_count) 773 if (offset >= adapter->ahw->max_vnic_func)
797 return QL_STATUS_INVALID_PARAM; 774 return QL_STATUS_INVALID_PARAM;
798 775
799 memset(&port_stats, 0, size); 776 memset(&port_stats, 0, size);
@@ -884,13 +861,12 @@ static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
884 861
885 struct device *dev = container_of(kobj, struct device, kobj); 862 struct device *dev = container_of(kobj, struct device, kobj);
886 struct qlcnic_adapter *adapter = dev_get_drvdata(dev); 863 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
887 u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
888 int ret; 864 int ret;
889 865
890 if (qlcnic_83xx_check(adapter)) 866 if (qlcnic_83xx_check(adapter))
891 return QLC_STATUS_UNSUPPORTED_CMD; 867 return QLC_STATUS_UNSUPPORTED_CMD;
892 868
893 if (offset >= pci_func_count) 869 if (offset >= adapter->ahw->max_vnic_func)
894 return QL_STATUS_INVALID_PARAM; 870 return QL_STATUS_INVALID_PARAM;
895 871
896 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset, 872 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
@@ -914,17 +890,12 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
914{ 890{
915 struct device *dev = container_of(kobj, struct device, kobj); 891 struct device *dev = container_of(kobj, struct device, kobj);
916 struct qlcnic_adapter *adapter = dev_get_drvdata(dev); 892 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
917 u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
918 struct qlcnic_pci_func_cfg *pci_cfg; 893 struct qlcnic_pci_func_cfg *pci_cfg;
919 struct qlcnic_pci_info *pci_info; 894 struct qlcnic_pci_info *pci_info;
920 size_t pci_cfg_sz;
921 int i, ret; 895 int i, ret;
896 u32 count;
922 897
923 pci_cfg_sz = pci_func_count * sizeof(*pci_cfg); 898 pci_info = kcalloc(size, sizeof(*pci_info), GFP_KERNEL);
924 if (size != pci_cfg_sz)
925 return QL_STATUS_INVALID_PARAM;
926
927 pci_info = kcalloc(pci_func_count, sizeof(*pci_info), GFP_KERNEL);
928 if (!pci_info) 899 if (!pci_info)
929 return -ENOMEM; 900 return -ENOMEM;
930 901
@@ -935,7 +906,8 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
935 } 906 }
936 907
937 pci_cfg = (struct qlcnic_pci_func_cfg *)buf; 908 pci_cfg = (struct qlcnic_pci_func_cfg *)buf;
938 for (i = 0; i < pci_func_count; i++) { 909 count = size / sizeof(struct qlcnic_pci_func_cfg);
910 for (i = 0; i < count; i++) {
939 pci_cfg[i].pci_func = pci_info[i].id; 911 pci_cfg[i].pci_func = pci_info[i].id;
940 pci_cfg[i].func_type = pci_info[i].type; 912 pci_cfg[i].func_type = pci_info[i].type;
941 pci_cfg[i].func_state = 0; 913 pci_cfg[i].func_state = 0;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index ce2cfddbed50..adf87d26e68f 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -3331,24 +3331,16 @@ static void ql_enable_msix(struct ql_adapter *qdev)
3331 for (i = 0; i < qdev->intr_count; i++) 3331 for (i = 0; i < qdev->intr_count; i++)
3332 qdev->msi_x_entry[i].entry = i; 3332 qdev->msi_x_entry[i].entry = i;
3333 3333
3334 /* Loop to get our vectors. We start with 3334 err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
3335 * what we want and settle for what we get. 3335 1, qdev->intr_count);
3336 */
3337 do {
3338 err = pci_enable_msix(qdev->pdev,
3339 qdev->msi_x_entry, qdev->intr_count);
3340 if (err > 0)
3341 qdev->intr_count = err;
3342 } while (err > 0);
3343
3344 if (err < 0) { 3336 if (err < 0) {
3345 kfree(qdev->msi_x_entry); 3337 kfree(qdev->msi_x_entry);
3346 qdev->msi_x_entry = NULL; 3338 qdev->msi_x_entry = NULL;
3347 netif_warn(qdev, ifup, qdev->ndev, 3339 netif_warn(qdev, ifup, qdev->ndev,
3348 "MSI-X Enable failed, trying MSI.\n"); 3340 "MSI-X Enable failed, trying MSI.\n");
3349 qdev->intr_count = 1;
3350 qlge_irq_type = MSI_IRQ; 3341 qlge_irq_type = MSI_IRQ;
3351 } else if (err == 0) { 3342 } else {
3343 qdev->intr_count = err;
3352 set_bit(QL_MSIX_ENABLED, &qdev->flags); 3344 set_bit(QL_MSIX_ENABLED, &qdev->flags);
3353 netif_info(qdev, ifup, qdev->ndev, 3345 netif_info(qdev, ifup, qdev->ndev,
3354 "MSI-X Enabled, got %d vectors.\n", 3346 "MSI-X Enabled, got %d vectors.\n",
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 737c1a881f78..2bc728e65e24 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -476,7 +476,7 @@ rx_status_loop:
476 rx = 0; 476 rx = 0;
477 cpw16(IntrStatus, cp_rx_intr_mask); 477 cpw16(IntrStatus, cp_rx_intr_mask);
478 478
479 while (1) { 479 while (rx < budget) {
480 u32 status, len; 480 u32 status, len;
481 dma_addr_t mapping, new_mapping; 481 dma_addr_t mapping, new_mapping;
482 struct sk_buff *skb, *new_skb; 482 struct sk_buff *skb, *new_skb;
@@ -554,9 +554,6 @@ rx_next:
554 else 554 else
555 desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz); 555 desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
556 rx_tail = NEXT_RX(rx_tail); 556 rx_tail = NEXT_RX(rx_tail);
557
558 if (rx >= budget)
559 break;
560 } 557 }
561 558
562 cp->rx_tail = rx_tail; 559 cp->rx_tail = rx_tail;
@@ -899,7 +896,7 @@ out_unlock:
899 896
900 return NETDEV_TX_OK; 897 return NETDEV_TX_OK;
901out_dma_error: 898out_dma_error:
902 kfree_skb(skb); 899 dev_kfree_skb_any(skb);
903 cp->dev->stats.tx_dropped++; 900 cp->dev->stats.tx_dropped++;
904 goto out_unlock; 901 goto out_unlock;
905} 902}
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index da5972eefdd2..2e5df148af4c 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -1717,9 +1717,9 @@ static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb,
1717 if (len < ETH_ZLEN) 1717 if (len < ETH_ZLEN)
1718 memset(tp->tx_buf[entry], 0, ETH_ZLEN); 1718 memset(tp->tx_buf[entry], 0, ETH_ZLEN);
1719 skb_copy_and_csum_dev(skb, tp->tx_buf[entry]); 1719 skb_copy_and_csum_dev(skb, tp->tx_buf[entry]);
1720 dev_kfree_skb(skb); 1720 dev_kfree_skb_any(skb);
1721 } else { 1721 } else {
1722 dev_kfree_skb(skb); 1722 dev_kfree_skb_any(skb);
1723 dev->stats.tx_dropped++; 1723 dev->stats.tx_dropped++;
1724 return NETDEV_TX_OK; 1724 return NETDEV_TX_OK;
1725 } 1725 }
@@ -2522,16 +2522,16 @@ rtl8139_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
2522 netdev_stats_to_stats64(stats, &dev->stats); 2522 netdev_stats_to_stats64(stats, &dev->stats);
2523 2523
2524 do { 2524 do {
2525 start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp); 2525 start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp);
2526 stats->rx_packets = tp->rx_stats.packets; 2526 stats->rx_packets = tp->rx_stats.packets;
2527 stats->rx_bytes = tp->rx_stats.bytes; 2527 stats->rx_bytes = tp->rx_stats.bytes;
2528 } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start)); 2528 } while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start));
2529 2529
2530 do { 2530 do {
2531 start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp); 2531 start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp);
2532 stats->tx_packets = tp->tx_stats.packets; 2532 stats->tx_packets = tp->tx_stats.packets;
2533 stats->tx_bytes = tp->tx_stats.bytes; 2533 stats->tx_bytes = tp->tx_stats.bytes;
2534 } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start)); 2534 } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start));
2535 2535
2536 return stats; 2536 return stats;
2537} 2537}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 3ff7bc3e7a23..aa1c079f231d 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -5834,7 +5834,7 @@ static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
5834 tp->TxDescArray + entry); 5834 tp->TxDescArray + entry);
5835 if (skb) { 5835 if (skb) {
5836 tp->dev->stats.tx_dropped++; 5836 tp->dev->stats.tx_dropped++;
5837 dev_kfree_skb(skb); 5837 dev_kfree_skb_any(skb);
5838 tx_skb->skb = NULL; 5838 tx_skb->skb = NULL;
5839 } 5839 }
5840 } 5840 }
@@ -6059,7 +6059,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
6059err_dma_1: 6059err_dma_1:
6060 rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd); 6060 rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
6061err_dma_0: 6061err_dma_0:
6062 dev_kfree_skb(skb); 6062 dev_kfree_skb_any(skb);
6063err_update_stats: 6063err_update_stats:
6064 dev->stats.tx_dropped++; 6064 dev->stats.tx_dropped++;
6065 return NETDEV_TX_OK; 6065 return NETDEV_TX_OK;
@@ -6142,7 +6142,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
6142 tp->tx_stats.packets++; 6142 tp->tx_stats.packets++;
6143 tp->tx_stats.bytes += tx_skb->skb->len; 6143 tp->tx_stats.bytes += tx_skb->skb->len;
6144 u64_stats_update_end(&tp->tx_stats.syncp); 6144 u64_stats_update_end(&tp->tx_stats.syncp);
6145 dev_kfree_skb(tx_skb->skb); 6145 dev_kfree_skb_any(tx_skb->skb);
6146 tx_skb->skb = NULL; 6146 tx_skb->skb = NULL;
6147 } 6147 }
6148 dirty_tx++; 6148 dirty_tx++;
@@ -6590,17 +6590,17 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6590 rtl8169_rx_missed(dev, ioaddr); 6590 rtl8169_rx_missed(dev, ioaddr);
6591 6591
6592 do { 6592 do {
6593 start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp); 6593 start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp);
6594 stats->rx_packets = tp->rx_stats.packets; 6594 stats->rx_packets = tp->rx_stats.packets;
6595 stats->rx_bytes = tp->rx_stats.bytes; 6595 stats->rx_bytes = tp->rx_stats.bytes;
6596 } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start)); 6596 } while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start));
6597 6597
6598 6598
6599 do { 6599 do {
6600 start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp); 6600 start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp);
6601 stats->tx_packets = tp->tx_stats.packets; 6601 stats->tx_packets = tp->tx_stats.packets;
6602 stats->tx_bytes = tp->tx_stats.bytes; 6602 stats->tx_bytes = tp->tx_stats.bytes;
6603 } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start)); 6603 } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start));
6604 6604
6605 stats->rx_dropped = dev->stats.rx_dropped; 6605 stats->rx_dropped = dev->stats.rx_dropped;
6606 stats->tx_dropped = dev->stats.tx_dropped; 6606 stats->tx_dropped = dev->stats.tx_dropped;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 040cb94e8219..e4bff181c910 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1,8 +1,9 @@
1/* SuperH Ethernet device driver 1/* SuperH Ethernet device driver
2 * 2 *
3 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu 3 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu
4 * Copyright (C) 2008-2013 Renesas Solutions Corp. 4 * Copyright (C) 2008-2014 Renesas Solutions Corp.
5 * Copyright (C) 2013 Cogent Embedded, Inc. 5 * Copyright (C) 2013-2014 Cogent Embedded, Inc.
6 * Copyright (C) 2014 Codethink Limited
6 * 7 *
7 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License, 9 * under the terms and conditions of the GNU General Public License,
@@ -27,6 +28,10 @@
27#include <linux/platform_device.h> 28#include <linux/platform_device.h>
28#include <linux/mdio-bitbang.h> 29#include <linux/mdio-bitbang.h>
29#include <linux/netdevice.h> 30#include <linux/netdevice.h>
31#include <linux/of.h>
32#include <linux/of_device.h>
33#include <linux/of_irq.h>
34#include <linux/of_net.h>
30#include <linux/phy.h> 35#include <linux/phy.h>
31#include <linux/cache.h> 36#include <linux/cache.h>
32#include <linux/io.h> 37#include <linux/io.h>
@@ -36,6 +41,7 @@
36#include <linux/if_vlan.h> 41#include <linux/if_vlan.h>
37#include <linux/clk.h> 42#include <linux/clk.h>
38#include <linux/sh_eth.h> 43#include <linux/sh_eth.h>
44#include <linux/of_mdio.h>
39 45
40#include "sh_eth.h" 46#include "sh_eth.h"
41 47
@@ -394,7 +400,8 @@ static void sh_eth_select_mii(struct net_device *ndev)
394 value = 0x0; 400 value = 0x0;
395 break; 401 break;
396 default: 402 default:
397 pr_warn("PHY interface mode was not setup. Set to MII.\n"); 403 netdev_warn(ndev,
404 "PHY interface mode was not setup. Set to MII.\n");
398 value = 0x1; 405 value = 0x1;
399 break; 406 break;
400 } 407 }
@@ -848,7 +855,7 @@ static int sh_eth_check_reset(struct net_device *ndev)
848 cnt--; 855 cnt--;
849 } 856 }
850 if (cnt <= 0) { 857 if (cnt <= 0) {
851 pr_err("Device reset failed\n"); 858 netdev_err(ndev, "Device reset failed\n");
852 ret = -ETIMEDOUT; 859 ret = -ETIMEDOUT;
853 } 860 }
854 return ret; 861 return ret;
@@ -866,7 +873,7 @@ static int sh_eth_reset(struct net_device *ndev)
866 873
867 ret = sh_eth_check_reset(ndev); 874 ret = sh_eth_check_reset(ndev);
868 if (ret) 875 if (ret)
869 goto out; 876 return ret;
870 877
871 /* Table Init */ 878 /* Table Init */
872 sh_eth_write(ndev, 0x0, TDLAR); 879 sh_eth_write(ndev, 0x0, TDLAR);
@@ -893,7 +900,6 @@ static int sh_eth_reset(struct net_device *ndev)
893 EDMR); 900 EDMR);
894 } 901 }
895 902
896out:
897 return ret; 903 return ret;
898} 904}
899 905
@@ -1257,7 +1263,7 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
1257 /* Soft Reset */ 1263 /* Soft Reset */
1258 ret = sh_eth_reset(ndev); 1264 ret = sh_eth_reset(ndev);
1259 if (ret) 1265 if (ret)
1260 goto out; 1266 return ret;
1261 1267
1262 if (mdp->cd->rmiimode) 1268 if (mdp->cd->rmiimode)
1263 sh_eth_write(ndev, 0x1, RMIIMODE); 1269 sh_eth_write(ndev, 0x1, RMIIMODE);
@@ -1336,7 +1342,6 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
1336 netif_start_queue(ndev); 1342 netif_start_queue(ndev);
1337 } 1343 }
1338 1344
1339out:
1340 return ret; 1345 return ret;
1341} 1346}
1342 1347
@@ -1550,8 +1555,7 @@ ignore_link:
1550 /* Unused write back interrupt */ 1555 /* Unused write back interrupt */
1551 if (intr_status & EESR_TABT) { /* Transmit Abort int */ 1556 if (intr_status & EESR_TABT) { /* Transmit Abort int */
1552 ndev->stats.tx_aborted_errors++; 1557 ndev->stats.tx_aborted_errors++;
1553 if (netif_msg_tx_err(mdp)) 1558 netif_err(mdp, tx_err, ndev, "Transmit Abort\n");
1554 dev_err(&ndev->dev, "Transmit Abort\n");
1555 } 1559 }
1556 } 1560 }
1557 1561
@@ -1560,45 +1564,38 @@ ignore_link:
1560 if (intr_status & EESR_RFRMER) { 1564 if (intr_status & EESR_RFRMER) {
1561 /* Receive Frame Overflow int */ 1565 /* Receive Frame Overflow int */
1562 ndev->stats.rx_frame_errors++; 1566 ndev->stats.rx_frame_errors++;
1563 if (netif_msg_rx_err(mdp)) 1567 netif_err(mdp, rx_err, ndev, "Receive Abort\n");
1564 dev_err(&ndev->dev, "Receive Abort\n");
1565 } 1568 }
1566 } 1569 }
1567 1570
1568 if (intr_status & EESR_TDE) { 1571 if (intr_status & EESR_TDE) {
1569 /* Transmit Descriptor Empty int */ 1572 /* Transmit Descriptor Empty int */
1570 ndev->stats.tx_fifo_errors++; 1573 ndev->stats.tx_fifo_errors++;
1571 if (netif_msg_tx_err(mdp)) 1574 netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n");
1572 dev_err(&ndev->dev, "Transmit Descriptor Empty\n");
1573 } 1575 }
1574 1576
1575 if (intr_status & EESR_TFE) { 1577 if (intr_status & EESR_TFE) {
1576 /* FIFO under flow */ 1578 /* FIFO under flow */
1577 ndev->stats.tx_fifo_errors++; 1579 ndev->stats.tx_fifo_errors++;
1578 if (netif_msg_tx_err(mdp)) 1580 netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n");
1579 dev_err(&ndev->dev, "Transmit FIFO Under flow\n");
1580 } 1581 }
1581 1582
1582 if (intr_status & EESR_RDE) { 1583 if (intr_status & EESR_RDE) {
1583 /* Receive Descriptor Empty int */ 1584 /* Receive Descriptor Empty int */
1584 ndev->stats.rx_over_errors++; 1585 ndev->stats.rx_over_errors++;
1585 1586 netif_err(mdp, rx_err, ndev, "Receive Descriptor Empty\n");
1586 if (netif_msg_rx_err(mdp))
1587 dev_err(&ndev->dev, "Receive Descriptor Empty\n");
1588 } 1587 }
1589 1588
1590 if (intr_status & EESR_RFE) { 1589 if (intr_status & EESR_RFE) {
1591 /* Receive FIFO Overflow int */ 1590 /* Receive FIFO Overflow int */
1592 ndev->stats.rx_fifo_errors++; 1591 ndev->stats.rx_fifo_errors++;
1593 if (netif_msg_rx_err(mdp)) 1592 netif_err(mdp, rx_err, ndev, "Receive FIFO Overflow\n");
1594 dev_err(&ndev->dev, "Receive FIFO Overflow\n");
1595 } 1593 }
1596 1594
1597 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { 1595 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1598 /* Address Error */ 1596 /* Address Error */
1599 ndev->stats.tx_fifo_errors++; 1597 ndev->stats.tx_fifo_errors++;
1600 if (netif_msg_tx_err(mdp)) 1598 netif_err(mdp, tx_err, ndev, "Address Error\n");
1601 dev_err(&ndev->dev, "Address Error\n");
1602 } 1599 }
1603 1600
1604 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE; 1601 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
@@ -1609,9 +1606,9 @@ ignore_link:
1609 u32 edtrr = sh_eth_read(ndev, EDTRR); 1606 u32 edtrr = sh_eth_read(ndev, EDTRR);
1610 1607
1611 /* dmesg */ 1608 /* dmesg */
1612 dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n", 1609 netdev_err(ndev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1613 intr_status, mdp->cur_tx, mdp->dirty_tx, 1610 intr_status, mdp->cur_tx, mdp->dirty_tx,
1614 (u32)ndev->state, edtrr); 1611 (u32)ndev->state, edtrr);
1615 /* dirty buffer free */ 1612 /* dirty buffer free */
1616 sh_eth_txfree(ndev); 1613 sh_eth_txfree(ndev);
1617 1614
@@ -1656,9 +1653,9 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1656 EESIPR); 1653 EESIPR);
1657 __napi_schedule(&mdp->napi); 1654 __napi_schedule(&mdp->napi);
1658 } else { 1655 } else {
1659 dev_warn(&ndev->dev, 1656 netdev_warn(ndev,
1660 "ignoring interrupt, status 0x%08lx, mask 0x%08lx.\n", 1657 "ignoring interrupt, status 0x%08lx, mask 0x%08lx.\n",
1661 intr_status, intr_enable); 1658 intr_status, intr_enable);
1662 } 1659 }
1663 } 1660 }
1664 1661
@@ -1757,27 +1754,42 @@ static void sh_eth_adjust_link(struct net_device *ndev)
1757/* PHY init function */ 1754/* PHY init function */
1758static int sh_eth_phy_init(struct net_device *ndev) 1755static int sh_eth_phy_init(struct net_device *ndev)
1759{ 1756{
1757 struct device_node *np = ndev->dev.parent->of_node;
1760 struct sh_eth_private *mdp = netdev_priv(ndev); 1758 struct sh_eth_private *mdp = netdev_priv(ndev);
1761 char phy_id[MII_BUS_ID_SIZE + 3];
1762 struct phy_device *phydev = NULL; 1759 struct phy_device *phydev = NULL;
1763 1760
1764 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
1765 mdp->mii_bus->id, mdp->phy_id);
1766
1767 mdp->link = 0; 1761 mdp->link = 0;
1768 mdp->speed = 0; 1762 mdp->speed = 0;
1769 mdp->duplex = -1; 1763 mdp->duplex = -1;
1770 1764
1771 /* Try connect to PHY */ 1765 /* Try connect to PHY */
1772 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link, 1766 if (np) {
1773 mdp->phy_interface); 1767 struct device_node *pn;
1768
1769 pn = of_parse_phandle(np, "phy-handle", 0);
1770 phydev = of_phy_connect(ndev, pn,
1771 sh_eth_adjust_link, 0,
1772 mdp->phy_interface);
1773
1774 if (!phydev)
1775 phydev = ERR_PTR(-ENOENT);
1776 } else {
1777 char phy_id[MII_BUS_ID_SIZE + 3];
1778
1779 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
1780 mdp->mii_bus->id, mdp->phy_id);
1781
1782 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
1783 mdp->phy_interface);
1784 }
1785
1774 if (IS_ERR(phydev)) { 1786 if (IS_ERR(phydev)) {
1775 dev_err(&ndev->dev, "phy_connect failed\n"); 1787 netdev_err(ndev, "failed to connect PHY\n");
1776 return PTR_ERR(phydev); 1788 return PTR_ERR(phydev);
1777 } 1789 }
1778 1790
1779 dev_info(&ndev->dev, "attached PHY %d (IRQ %d) to driver %s\n", 1791 netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n",
1780 phydev->addr, phydev->irq, phydev->drv->name); 1792 phydev->addr, phydev->irq, phydev->drv->name);
1781 1793
1782 mdp->phydev = phydev; 1794 mdp->phydev = phydev;
1783 1795
@@ -1958,12 +1970,12 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
1958 1970
1959 ret = sh_eth_ring_init(ndev); 1971 ret = sh_eth_ring_init(ndev);
1960 if (ret < 0) { 1972 if (ret < 0) {
1961 dev_err(&ndev->dev, "%s: sh_eth_ring_init failed.\n", __func__); 1973 netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", __func__);
1962 return ret; 1974 return ret;
1963 } 1975 }
1964 ret = sh_eth_dev_init(ndev, false); 1976 ret = sh_eth_dev_init(ndev, false);
1965 if (ret < 0) { 1977 if (ret < 0) {
1966 dev_err(&ndev->dev, "%s: sh_eth_dev_init failed.\n", __func__); 1978 netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", __func__);
1967 return ret; 1979 return ret;
1968 } 1980 }
1969 1981
@@ -2004,7 +2016,7 @@ static int sh_eth_open(struct net_device *ndev)
2004 ret = request_irq(ndev->irq, sh_eth_interrupt, 2016 ret = request_irq(ndev->irq, sh_eth_interrupt,
2005 mdp->cd->irq_flags, ndev->name, ndev); 2017 mdp->cd->irq_flags, ndev->name, ndev);
2006 if (ret) { 2018 if (ret) {
2007 dev_err(&ndev->dev, "Can not assign IRQ number\n"); 2019 netdev_err(ndev, "Can not assign IRQ number\n");
2008 goto out_napi_off; 2020 goto out_napi_off;
2009 } 2021 }
2010 2022
@@ -2042,10 +2054,9 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
2042 2054
2043 netif_stop_queue(ndev); 2055 netif_stop_queue(ndev);
2044 2056
2045 if (netif_msg_timer(mdp)) { 2057 netif_err(mdp, timer, ndev,
2046 dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x, resetting...\n", 2058 "transmit timed out, status %8.8x, resetting...\n",
2047 ndev->name, (int)sh_eth_read(ndev, EESR)); 2059 (int)sh_eth_read(ndev, EESR));
2048 }
2049 2060
2050 /* tx_errors count up */ 2061 /* tx_errors count up */
2051 ndev->stats.tx_errors++; 2062 ndev->stats.tx_errors++;
@@ -2080,8 +2091,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2080 spin_lock_irqsave(&mdp->lock, flags); 2091 spin_lock_irqsave(&mdp->lock, flags);
2081 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) { 2092 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
2082 if (!sh_eth_txfree(ndev)) { 2093 if (!sh_eth_txfree(ndev)) {
2083 if (netif_msg_tx_queued(mdp)) 2094 netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
2084 dev_warn(&ndev->dev, "TxFD exhausted.\n");
2085 netif_stop_queue(ndev); 2095 netif_stop_queue(ndev);
2086 spin_unlock_irqrestore(&mdp->lock, flags); 2096 spin_unlock_irqrestore(&mdp->lock, flags);
2087 return NETDEV_TX_BUSY; 2097 return NETDEV_TX_BUSY;
@@ -2098,8 +2108,8 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2098 skb->len + 2); 2108 skb->len + 2);
2099 txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len, 2109 txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
2100 DMA_TO_DEVICE); 2110 DMA_TO_DEVICE);
2101 if (skb->len < ETHERSMALL) 2111 if (skb->len < ETH_ZLEN)
2102 txdesc->buffer_length = ETHERSMALL; 2112 txdesc->buffer_length = ETH_ZLEN;
2103 else 2113 else
2104 txdesc->buffer_length = skb->len; 2114 txdesc->buffer_length = skb->len;
2105 2115
@@ -2251,7 +2261,7 @@ static int sh_eth_tsu_busy(struct net_device *ndev)
2251 udelay(10); 2261 udelay(10);
2252 timeout--; 2262 timeout--;
2253 if (timeout <= 0) { 2263 if (timeout <= 0) {
2254 dev_err(&ndev->dev, "%s: timeout\n", __func__); 2264 netdev_err(ndev, "%s: timeout\n", __func__);
2255 return -ETIMEDOUT; 2265 return -ETIMEDOUT;
2256 } 2266 }
2257 } 2267 }
@@ -2571,37 +2581,30 @@ static void sh_eth_tsu_init(struct sh_eth_private *mdp)
2571} 2581}
2572 2582
2573/* MDIO bus release function */ 2583/* MDIO bus release function */
2574static int sh_mdio_release(struct net_device *ndev) 2584static int sh_mdio_release(struct sh_eth_private *mdp)
2575{ 2585{
2576 struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
2577
2578 /* unregister mdio bus */ 2586 /* unregister mdio bus */
2579 mdiobus_unregister(bus); 2587 mdiobus_unregister(mdp->mii_bus);
2580
2581 /* remove mdio bus info from net_device */
2582 dev_set_drvdata(&ndev->dev, NULL);
2583 2588
2584 /* free bitbang info */ 2589 /* free bitbang info */
2585 free_mdio_bitbang(bus); 2590 free_mdio_bitbang(mdp->mii_bus);
2586 2591
2587 return 0; 2592 return 0;
2588} 2593}
2589 2594
2590/* MDIO bus init function */ 2595/* MDIO bus init function */
2591static int sh_mdio_init(struct net_device *ndev, int id, 2596static int sh_mdio_init(struct sh_eth_private *mdp,
2592 struct sh_eth_plat_data *pd) 2597 struct sh_eth_plat_data *pd)
2593{ 2598{
2594 int ret, i; 2599 int ret, i;
2595 struct bb_info *bitbang; 2600 struct bb_info *bitbang;
2596 struct sh_eth_private *mdp = netdev_priv(ndev); 2601 struct platform_device *pdev = mdp->pdev;
2602 struct device *dev = &mdp->pdev->dev;
2597 2603
2598 /* create bit control struct for PHY */ 2604 /* create bit control struct for PHY */
2599 bitbang = devm_kzalloc(&ndev->dev, sizeof(struct bb_info), 2605 bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL);
2600 GFP_KERNEL); 2606 if (!bitbang)
2601 if (!bitbang) { 2607 return -ENOMEM;
2602 ret = -ENOMEM;
2603 goto out;
2604 }
2605 2608
2606 /* bitbang init */ 2609 /* bitbang init */
2607 bitbang->addr = mdp->addr + mdp->reg_offset[PIR]; 2610 bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
@@ -2614,44 +2617,42 @@ static int sh_mdio_init(struct net_device *ndev, int id,
2614 2617
2615 /* MII controller setting */ 2618 /* MII controller setting */
2616 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl); 2619 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
2617 if (!mdp->mii_bus) { 2620 if (!mdp->mii_bus)
2618 ret = -ENOMEM; 2621 return -ENOMEM;
2619 goto out;
2620 }
2621 2622
2622 /* Hook up MII support for ethtool */ 2623 /* Hook up MII support for ethtool */
2623 mdp->mii_bus->name = "sh_mii"; 2624 mdp->mii_bus->name = "sh_mii";
2624 mdp->mii_bus->parent = &ndev->dev; 2625 mdp->mii_bus->parent = dev;
2625 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 2626 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2626 mdp->pdev->name, id); 2627 pdev->name, pdev->id);
2627 2628
2628 /* PHY IRQ */ 2629 /* PHY IRQ */
2629 mdp->mii_bus->irq = devm_kzalloc(&ndev->dev, 2630 mdp->mii_bus->irq = devm_kzalloc(dev, sizeof(int) * PHY_MAX_ADDR,
2630 sizeof(int) * PHY_MAX_ADDR,
2631 GFP_KERNEL); 2631 GFP_KERNEL);
2632 if (!mdp->mii_bus->irq) { 2632 if (!mdp->mii_bus->irq) {
2633 ret = -ENOMEM; 2633 ret = -ENOMEM;
2634 goto out_free_bus; 2634 goto out_free_bus;
2635 } 2635 }
2636 2636
2637 for (i = 0; i < PHY_MAX_ADDR; i++) 2637 /* register MDIO bus */
2638 mdp->mii_bus->irq[i] = PHY_POLL; 2638 if (dev->of_node) {
2639 if (pd->phy_irq > 0) 2639 ret = of_mdiobus_register(mdp->mii_bus, dev->of_node);
2640 mdp->mii_bus->irq[pd->phy] = pd->phy_irq; 2640 } else {
2641 for (i = 0; i < PHY_MAX_ADDR; i++)
2642 mdp->mii_bus->irq[i] = PHY_POLL;
2643 if (pd->phy_irq > 0)
2644 mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
2645
2646 ret = mdiobus_register(mdp->mii_bus);
2647 }
2641 2648
2642 /* register mdio bus */
2643 ret = mdiobus_register(mdp->mii_bus);
2644 if (ret) 2649 if (ret)
2645 goto out_free_bus; 2650 goto out_free_bus;
2646 2651
2647 dev_set_drvdata(&ndev->dev, mdp->mii_bus);
2648
2649 return 0; 2652 return 0;
2650 2653
2651out_free_bus: 2654out_free_bus:
2652 free_mdio_bitbang(mdp->mii_bus); 2655 free_mdio_bitbang(mdp->mii_bus);
2653
2654out:
2655 return ret; 2656 return ret;
2656} 2657}
2657 2658
@@ -2676,7 +2677,6 @@ static const u16 *sh_eth_get_register_offset(int register_type)
2676 reg_offset = sh_eth_offset_fast_sh3_sh2; 2677 reg_offset = sh_eth_offset_fast_sh3_sh2;
2677 break; 2678 break;
2678 default: 2679 default:
2679 pr_err("Unknown register type (%d)\n", register_type);
2680 break; 2680 break;
2681 } 2681 }
2682 2682
@@ -2710,6 +2710,48 @@ static const struct net_device_ops sh_eth_netdev_ops_tsu = {
2710 .ndo_change_mtu = eth_change_mtu, 2710 .ndo_change_mtu = eth_change_mtu,
2711}; 2711};
2712 2712
2713#ifdef CONFIG_OF
2714static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
2715{
2716 struct device_node *np = dev->of_node;
2717 struct sh_eth_plat_data *pdata;
2718 const char *mac_addr;
2719
2720 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2721 if (!pdata)
2722 return NULL;
2723
2724 pdata->phy_interface = of_get_phy_mode(np);
2725
2726 mac_addr = of_get_mac_address(np);
2727 if (mac_addr)
2728 memcpy(pdata->mac_addr, mac_addr, ETH_ALEN);
2729
2730 pdata->no_ether_link =
2731 of_property_read_bool(np, "renesas,no-ether-link");
2732 pdata->ether_link_active_low =
2733 of_property_read_bool(np, "renesas,ether-link-active-low");
2734
2735 return pdata;
2736}
2737
2738static const struct of_device_id sh_eth_match_table[] = {
2739 { .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data },
2740 { .compatible = "renesas,ether-r8a7778", .data = &r8a777x_data },
2741 { .compatible = "renesas,ether-r8a7779", .data = &r8a777x_data },
2742 { .compatible = "renesas,ether-r8a7790", .data = &r8a779x_data },
2743 { .compatible = "renesas,ether-r8a7791", .data = &r8a779x_data },
2744 { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
2745 { }
2746};
2747MODULE_DEVICE_TABLE(of, sh_eth_match_table);
2748#else
2749static inline struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
2750{
2751 return NULL;
2752}
2753#endif
2754
2713static int sh_eth_drv_probe(struct platform_device *pdev) 2755static int sh_eth_drv_probe(struct platform_device *pdev)
2714{ 2756{
2715 int ret, devno = 0; 2757 int ret, devno = 0;
@@ -2723,15 +2765,12 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2723 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2765 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2724 if (unlikely(res == NULL)) { 2766 if (unlikely(res == NULL)) {
2725 dev_err(&pdev->dev, "invalid resource\n"); 2767 dev_err(&pdev->dev, "invalid resource\n");
2726 ret = -EINVAL; 2768 return -EINVAL;
2727 goto out;
2728 } 2769 }
2729 2770
2730 ndev = alloc_etherdev(sizeof(struct sh_eth_private)); 2771 ndev = alloc_etherdev(sizeof(struct sh_eth_private));
2731 if (!ndev) { 2772 if (!ndev)
2732 ret = -ENOMEM; 2773 return -ENOMEM;
2733 goto out;
2734 }
2735 2774
2736 /* The sh Ether-specific entries in the device structure. */ 2775 /* The sh Ether-specific entries in the device structure. */
2737 ndev->base_addr = res->start; 2776 ndev->base_addr = res->start;
@@ -2763,6 +2802,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2763 pm_runtime_enable(&pdev->dev); 2802 pm_runtime_enable(&pdev->dev);
2764 pm_runtime_resume(&pdev->dev); 2803 pm_runtime_resume(&pdev->dev);
2765 2804
2805 if (pdev->dev.of_node)
2806 pd = sh_eth_parse_dt(&pdev->dev);
2766 if (!pd) { 2807 if (!pd) {
2767 dev_err(&pdev->dev, "no platform data\n"); 2808 dev_err(&pdev->dev, "no platform data\n");
2768 ret = -EINVAL; 2809 ret = -EINVAL;
@@ -2778,8 +2819,22 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2778 mdp->ether_link_active_low = pd->ether_link_active_low; 2819 mdp->ether_link_active_low = pd->ether_link_active_low;
2779 2820
2780 /* set cpu data */ 2821 /* set cpu data */
2781 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data; 2822 if (id) {
2823 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
2824 } else {
2825 const struct of_device_id *match;
2826
2827 match = of_match_device(of_match_ptr(sh_eth_match_table),
2828 &pdev->dev);
2829 mdp->cd = (struct sh_eth_cpu_data *)match->data;
2830 }
2782 mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type); 2831 mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
2832 if (!mdp->reg_offset) {
2833 dev_err(&pdev->dev, "Unknown register type (%d)\n",
2834 mdp->cd->register_type);
2835 ret = -EINVAL;
2836 goto out_release;
2837 }
2783 sh_eth_set_default_cpu_data(mdp->cd); 2838 sh_eth_set_default_cpu_data(mdp->cd);
2784 2839
2785 /* set function */ 2840 /* set function */
@@ -2825,6 +2880,13 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2825 } 2880 }
2826 } 2881 }
2827 2882
2883 /* MDIO bus init */
2884 ret = sh_mdio_init(mdp, pd);
2885 if (ret) {
2886 dev_err(&ndev->dev, "failed to initialise MDIO\n");
2887 goto out_release;
2888 }
2889
2828 netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64); 2890 netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64);
2829 2891
2830 /* network device register */ 2892 /* network device register */
@@ -2832,31 +2894,23 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2832 if (ret) 2894 if (ret)
2833 goto out_napi_del; 2895 goto out_napi_del;
2834 2896
2835 /* mdio bus init */
2836 ret = sh_mdio_init(ndev, pdev->id, pd);
2837 if (ret)
2838 goto out_unregister;
2839
2840 /* print device information */ 2897 /* print device information */
2841 pr_info("Base address at 0x%x, %pM, IRQ %d.\n", 2898 netdev_info(ndev, "Base address at 0x%x, %pM, IRQ %d.\n",
2842 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); 2899 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
2843 2900
2844 platform_set_drvdata(pdev, ndev); 2901 platform_set_drvdata(pdev, ndev);
2845 2902
2846 return ret; 2903 return ret;
2847 2904
2848out_unregister:
2849 unregister_netdev(ndev);
2850
2851out_napi_del: 2905out_napi_del:
2852 netif_napi_del(&mdp->napi); 2906 netif_napi_del(&mdp->napi);
2907 sh_mdio_release(mdp);
2853 2908
2854out_release: 2909out_release:
2855 /* net_dev free */ 2910 /* net_dev free */
2856 if (ndev) 2911 if (ndev)
2857 free_netdev(ndev); 2912 free_netdev(ndev);
2858 2913
2859out:
2860 return ret; 2914 return ret;
2861} 2915}
2862 2916
@@ -2865,9 +2919,9 @@ static int sh_eth_drv_remove(struct platform_device *pdev)
2865 struct net_device *ndev = platform_get_drvdata(pdev); 2919 struct net_device *ndev = platform_get_drvdata(pdev);
2866 struct sh_eth_private *mdp = netdev_priv(ndev); 2920 struct sh_eth_private *mdp = netdev_priv(ndev);
2867 2921
2868 sh_mdio_release(ndev);
2869 unregister_netdev(ndev); 2922 unregister_netdev(ndev);
2870 netif_napi_del(&mdp->napi); 2923 netif_napi_del(&mdp->napi);
2924 sh_mdio_release(mdp);
2871 pm_runtime_disable(&pdev->dev); 2925 pm_runtime_disable(&pdev->dev);
2872 free_netdev(ndev); 2926 free_netdev(ndev);
2873 2927
@@ -2920,6 +2974,7 @@ static struct platform_driver sh_eth_driver = {
2920 .driver = { 2974 .driver = {
2921 .name = CARDNAME, 2975 .name = CARDNAME,
2922 .pm = SH_ETH_PM_OPS, 2976 .pm = SH_ETH_PM_OPS,
2977 .of_match_table = of_match_ptr(sh_eth_match_table),
2923 }, 2978 },
2924}; 2979};
2925 2980
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 6075915b88ec..d55e37cd5fec 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -27,8 +27,7 @@
27#define RX_RING_MIN 64 27#define RX_RING_MIN 64
28#define TX_RING_MAX 1024 28#define TX_RING_MAX 1024
29#define RX_RING_MAX 1024 29#define RX_RING_MAX 1024
30#define ETHERSMALL 60 30#define PKT_BUF_SZ 1538
31#define PKT_BUF_SZ 1538
32#define SH_ETH_TSU_TIMEOUT_MS 500 31#define SH_ETH_TSU_TIMEOUT_MS 500
33#define SH_ETH_TSU_CAM_ENTRIES 32 32#define SH_ETH_TSU_CAM_ENTRIES 32
34 33
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 174a92f5fe51..651626e133f9 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -162,8 +162,8 @@ static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address)
162 if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) 162 if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)
163 return -EIO; 163 return -EIO;
164 164
165 memcpy(mac_address, 165 ether_addr_copy(mac_address,
166 MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE), ETH_ALEN); 166 MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE));
167 return 0; 167 return 0;
168} 168}
169 169
@@ -172,8 +172,8 @@ static int efx_ef10_probe(struct efx_nic *efx)
172 struct efx_ef10_nic_data *nic_data; 172 struct efx_ef10_nic_data *nic_data;
173 int i, rc; 173 int i, rc;
174 174
175 /* We can have one VI for each 8K region. However we need 175 /* We can have one VI for each 8K region. However, until we
176 * multiple TX queues per channel. 176 * use TX option descriptors we need two TX queues per channel.
177 */ 177 */
178 efx->max_channels = 178 efx->max_channels =
179 min_t(unsigned int, 179 min_t(unsigned int,
@@ -1955,6 +1955,9 @@ static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
1955 int tx_descs = 0; 1955 int tx_descs = 0;
1956 int spent = 0; 1956 int spent = 0;
1957 1957
1958 if (quota <= 0)
1959 return spent;
1960
1958 read_ptr = channel->eventq_read_ptr; 1961 read_ptr = channel->eventq_read_ptr;
1959 1962
1960 for (;;) { 1963 for (;;) {
@@ -3145,12 +3148,10 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
3145 table->dev_uc_count = -1; 3148 table->dev_uc_count = -1;
3146 } else { 3149 } else {
3147 table->dev_uc_count = 1 + netdev_uc_count(net_dev); 3150 table->dev_uc_count = 1 + netdev_uc_count(net_dev);
3148 memcpy(table->dev_uc_list[0].addr, net_dev->dev_addr, 3151 ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
3149 ETH_ALEN);
3150 i = 1; 3152 i = 1;
3151 netdev_for_each_uc_addr(uc, net_dev) { 3153 netdev_for_each_uc_addr(uc, net_dev) {
3152 memcpy(table->dev_uc_list[i].addr, 3154 ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
3153 uc->addr, ETH_ALEN);
3154 i++; 3155 i++;
3155 } 3156 }
3156 } 3157 }
@@ -3162,8 +3163,7 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
3162 eth_broadcast_addr(table->dev_mc_list[0].addr); 3163 eth_broadcast_addr(table->dev_mc_list[0].addr);
3163 i = 1; 3164 i = 1;
3164 netdev_for_each_mc_addr(mc, net_dev) { 3165 netdev_for_each_mc_addr(mc, net_dev) {
3165 memcpy(table->dev_mc_list[i].addr, 3166 ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
3166 mc->addr, ETH_ALEN);
3167 i++; 3167 i++;
3168 } 3168 }
3169 } 3169 }
diff --git a/drivers/net/ethernet/sfc/ef10_regs.h b/drivers/net/ethernet/sfc/ef10_regs.h
index 207ac9a1e3de..62a55dde61d5 100644
--- a/drivers/net/ethernet/sfc/ef10_regs.h
+++ b/drivers/net/ethernet/sfc/ef10_regs.h
@@ -227,36 +227,6 @@
227#define ESF_DZ_RX_KER_BUF_ADDR_LBN 0 227#define ESF_DZ_RX_KER_BUF_ADDR_LBN 0
228#define ESF_DZ_RX_KER_BUF_ADDR_WIDTH 48 228#define ESF_DZ_RX_KER_BUF_ADDR_WIDTH 48
229 229
230/* RX_USER_DESC */
231#define ESF_DZ_RX_USR_RESERVED_LBN 62
232#define ESF_DZ_RX_USR_RESERVED_WIDTH 2
233#define ESF_DZ_RX_USR_BYTE_CNT_LBN 48
234#define ESF_DZ_RX_USR_BYTE_CNT_WIDTH 14
235#define ESF_DZ_RX_USR_BUF_PAGE_SIZE_LBN 44
236#define ESF_DZ_RX_USR_BUF_PAGE_SIZE_WIDTH 4
237#define ESE_DZ_USR_BUF_PAGE_SZ_4MB 10
238#define ESE_DZ_USR_BUF_PAGE_SZ_1MB 8
239#define ESE_DZ_USR_BUF_PAGE_SZ_64KB 4
240#define ESE_DZ_USR_BUF_PAGE_SZ_4KB 0
241#define ESF_DZ_RX_USR_BUF_ID_OFFSET_LBN 0
242#define ESF_DZ_RX_USR_BUF_ID_OFFSET_WIDTH 44
243#define ESF_DZ_RX_USR_4KBPS_BUF_ID_LBN 12
244#define ESF_DZ_RX_USR_4KBPS_BUF_ID_WIDTH 32
245#define ESF_DZ_RX_USR_64KBPS_BUF_ID_LBN 16
246#define ESF_DZ_RX_USR_64KBPS_BUF_ID_WIDTH 28
247#define ESF_DZ_RX_USR_1MBPS_BUF_ID_LBN 20
248#define ESF_DZ_RX_USR_1MBPS_BUF_ID_WIDTH 24
249#define ESF_DZ_RX_USR_4MBPS_BUF_ID_LBN 22
250#define ESF_DZ_RX_USR_4MBPS_BUF_ID_WIDTH 22
251#define ESF_DZ_RX_USR_4MBPS_BYTE_OFFSET_LBN 0
252#define ESF_DZ_RX_USR_4MBPS_BYTE_OFFSET_WIDTH 22
253#define ESF_DZ_RX_USR_1MBPS_BYTE_OFFSET_LBN 0
254#define ESF_DZ_RX_USR_1MBPS_BYTE_OFFSET_WIDTH 20
255#define ESF_DZ_RX_USR_64KBPS_BYTE_OFFSET_LBN 0
256#define ESF_DZ_RX_USR_64KBPS_BYTE_OFFSET_WIDTH 16
257#define ESF_DZ_RX_USR_4KBPS_BYTE_OFFSET_LBN 0
258#define ESF_DZ_RX_USR_4KBPS_BYTE_OFFSET_WIDTH 12
259
260/* TX_CSUM_TSTAMP_DESC */ 230/* TX_CSUM_TSTAMP_DESC */
261#define ESF_DZ_TX_DESC_IS_OPT_LBN 63 231#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
262#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1 232#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
@@ -338,37 +308,6 @@
338#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0 308#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
339#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32 309#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
340 310
341/* TX_USER_DESC */
342#define ESF_DZ_TX_USR_TYPE_LBN 63
343#define ESF_DZ_TX_USR_TYPE_WIDTH 1
344#define ESF_DZ_TX_USR_CONT_LBN 62
345#define ESF_DZ_TX_USR_CONT_WIDTH 1
346#define ESF_DZ_TX_USR_BYTE_CNT_LBN 48
347#define ESF_DZ_TX_USR_BYTE_CNT_WIDTH 14
348#define ESF_DZ_TX_USR_BUF_PAGE_SIZE_LBN 44
349#define ESF_DZ_TX_USR_BUF_PAGE_SIZE_WIDTH 4
350#define ESE_DZ_USR_BUF_PAGE_SZ_4MB 10
351#define ESE_DZ_USR_BUF_PAGE_SZ_1MB 8
352#define ESE_DZ_USR_BUF_PAGE_SZ_64KB 4
353#define ESE_DZ_USR_BUF_PAGE_SZ_4KB 0
354#define ESF_DZ_TX_USR_BUF_ID_OFFSET_LBN 0
355#define ESF_DZ_TX_USR_BUF_ID_OFFSET_WIDTH 44
356#define ESF_DZ_TX_USR_4KBPS_BUF_ID_LBN 12
357#define ESF_DZ_TX_USR_4KBPS_BUF_ID_WIDTH 32
358#define ESF_DZ_TX_USR_64KBPS_BUF_ID_LBN 16
359#define ESF_DZ_TX_USR_64KBPS_BUF_ID_WIDTH 28
360#define ESF_DZ_TX_USR_1MBPS_BUF_ID_LBN 20
361#define ESF_DZ_TX_USR_1MBPS_BUF_ID_WIDTH 24
362#define ESF_DZ_TX_USR_4MBPS_BUF_ID_LBN 22
363#define ESF_DZ_TX_USR_4MBPS_BUF_ID_WIDTH 22
364#define ESF_DZ_TX_USR_4MBPS_BYTE_OFFSET_LBN 0
365#define ESF_DZ_TX_USR_4MBPS_BYTE_OFFSET_WIDTH 22
366#define ESF_DZ_TX_USR_1MBPS_BYTE_OFFSET_LBN 0
367#define ESF_DZ_TX_USR_1MBPS_BYTE_OFFSET_WIDTH 20
368#define ESF_DZ_TX_USR_64KBPS_BYTE_OFFSET_LBN 0
369#define ESF_DZ_TX_USR_64KBPS_BYTE_OFFSET_WIDTH 16
370#define ESF_DZ_TX_USR_4KBPS_BYTE_OFFSET_LBN 0
371#define ESF_DZ_TX_USR_4KBPS_BYTE_OFFSET_WIDTH 12
372/*************************************************************************/ 311/*************************************************************************/
373 312
374/* TX_DESC_UPD_REG: Transmit descriptor update register. 313/* TX_DESC_UPD_REG: Transmit descriptor update register.
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 83d464347021..52589f6a8beb 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -503,8 +503,6 @@ static int efx_probe_channel(struct efx_channel *channel)
503 goto fail; 503 goto fail;
504 } 504 }
505 505
506 channel->n_rx_frm_trunc = 0;
507
508 return 0; 506 return 0;
509 507
510fail: 508fail:
@@ -1014,7 +1012,7 @@ static int efx_probe_port(struct efx_nic *efx)
1014 return rc; 1012 return rc;
1015 1013
1016 /* Initialise MAC address to permanent address */ 1014 /* Initialise MAC address to permanent address */
1017 memcpy(efx->net_dev->dev_addr, efx->net_dev->perm_addr, ETH_ALEN); 1015 ether_addr_copy(efx->net_dev->dev_addr, efx->net_dev->perm_addr);
1018 1016
1019 return 0; 1017 return 0;
1020} 1018}
@@ -1346,20 +1344,23 @@ static int efx_probe_interrupts(struct efx_nic *efx)
1346 1344
1347 for (i = 0; i < n_channels; i++) 1345 for (i = 0; i < n_channels; i++)
1348 xentries[i].entry = i; 1346 xentries[i].entry = i;
1349 rc = pci_enable_msix(efx->pci_dev, xentries, n_channels); 1347 rc = pci_enable_msix_range(efx->pci_dev,
1350 if (rc > 0) { 1348 xentries, 1, n_channels);
1349 if (rc < 0) {
1350 /* Fall back to single channel MSI */
1351 efx->interrupt_mode = EFX_INT_MODE_MSI;
1352 netif_err(efx, drv, efx->net_dev,
1353 "could not enable MSI-X\n");
1354 } else if (rc < n_channels) {
1351 netif_err(efx, drv, efx->net_dev, 1355 netif_err(efx, drv, efx->net_dev,
1352 "WARNING: Insufficient MSI-X vectors" 1356 "WARNING: Insufficient MSI-X vectors"
1353 " available (%d < %u).\n", rc, n_channels); 1357 " available (%d < %u).\n", rc, n_channels);
1354 netif_err(efx, drv, efx->net_dev, 1358 netif_err(efx, drv, efx->net_dev,
1355 "WARNING: Performance may be reduced.\n"); 1359 "WARNING: Performance may be reduced.\n");
1356 EFX_BUG_ON_PARANOID(rc >= n_channels);
1357 n_channels = rc; 1360 n_channels = rc;
1358 rc = pci_enable_msix(efx->pci_dev, xentries,
1359 n_channels);
1360 } 1361 }
1361 1362
1362 if (rc == 0) { 1363 if (rc > 0) {
1363 efx->n_channels = n_channels; 1364 efx->n_channels = n_channels;
1364 if (n_channels > extra_channels) 1365 if (n_channels > extra_channels)
1365 n_channels -= extra_channels; 1366 n_channels -= extra_channels;
@@ -1375,11 +1376,6 @@ static int efx_probe_interrupts(struct efx_nic *efx)
1375 for (i = 0; i < efx->n_channels; i++) 1376 for (i = 0; i < efx->n_channels; i++)
1376 efx_get_channel(efx, i)->irq = 1377 efx_get_channel(efx, i)->irq =
1377 xentries[i].vector; 1378 xentries[i].vector;
1378 } else {
1379 /* Fall back to single channel MSI */
1380 efx->interrupt_mode = EFX_INT_MODE_MSI;
1381 netif_err(efx, drv, efx->net_dev,
1382 "could not enable MSI-X\n");
1383 } 1379 }
1384 } 1380 }
1385 1381
@@ -2115,7 +2111,7 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
2115{ 2111{
2116 struct efx_nic *efx = netdev_priv(net_dev); 2112 struct efx_nic *efx = netdev_priv(net_dev);
2117 struct sockaddr *addr = data; 2113 struct sockaddr *addr = data;
2118 char *new_addr = addr->sa_data; 2114 u8 *new_addr = addr->sa_data;
2119 2115
2120 if (!is_valid_ether_addr(new_addr)) { 2116 if (!is_valid_ether_addr(new_addr)) {
2121 netif_err(efx, drv, efx->net_dev, 2117 netif_err(efx, drv, efx->net_dev,
@@ -2124,7 +2120,7 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
2124 return -EADDRNOTAVAIL; 2120 return -EADDRNOTAVAIL;
2125 } 2121 }
2126 2122
2127 memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len); 2123 ether_addr_copy(net_dev->dev_addr, new_addr);
2128 efx_sriov_mac_address_changed(efx); 2124 efx_sriov_mac_address_changed(efx);
2129 2125
2130 /* Reconfigure the MAC */ 2126 /* Reconfigure the MAC */
@@ -3273,6 +3269,6 @@ module_exit(efx_exit_module);
3273 3269
3274MODULE_AUTHOR("Solarflare Communications and " 3270MODULE_AUTHOR("Solarflare Communications and "
3275 "Michael Brown <mbrown@fensystems.co.uk>"); 3271 "Michael Brown <mbrown@fensystems.co.uk>");
3276MODULE_DESCRIPTION("Solarflare Communications network driver"); 3272MODULE_DESCRIPTION("Solarflare network driver");
3277MODULE_LICENSE("GPL"); 3273MODULE_LICENSE("GPL");
3278MODULE_DEVICE_TABLE(pci, efx_pci_table); 3274MODULE_DEVICE_TABLE(pci, efx_pci_table);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index dbd7b78fe01c..99032581336f 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -14,7 +14,7 @@
14#include "net_driver.h" 14#include "net_driver.h"
15#include "filter.h" 15#include "filter.h"
16 16
17/* Solarstorm controllers use BAR 0 for I/O space and BAR 2(&3) for memory */ 17/* All controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
18#define EFX_MEM_BAR 2 18#define EFX_MEM_BAR 2
19 19
20/* TX */ 20/* TX */
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 229428915aa8..0de8b07c24c2 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -251,6 +251,9 @@ static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data,
251 * @test_index: Starting index of the test 251 * @test_index: Starting index of the test
252 * @strings: Ethtool strings, or %NULL 252 * @strings: Ethtool strings, or %NULL
253 * @data: Ethtool test results, or %NULL 253 * @data: Ethtool test results, or %NULL
254 *
255 * Fill in a block of loopback self-test entries. Return new test
256 * index.
254 */ 257 */
255static int efx_fill_loopback_test(struct efx_nic *efx, 258static int efx_fill_loopback_test(struct efx_nic *efx,
256 struct efx_loopback_self_tests *lb_tests, 259 struct efx_loopback_self_tests *lb_tests,
@@ -290,6 +293,12 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
290 * @tests: Efx self-test results structure, or %NULL 293 * @tests: Efx self-test results structure, or %NULL
291 * @strings: Ethtool strings, or %NULL 294 * @strings: Ethtool strings, or %NULL
292 * @data: Ethtool test results, or %NULL 295 * @data: Ethtool test results, or %NULL
296 *
297 * Get self-test number of strings, strings, and/or test results.
298 * Return number of strings (== number of test results).
299 *
300 * The reason for merging these three functions is to make sure that
301 * they can never be inconsistent.
293 */ 302 */
294static int efx_ethtool_fill_self_tests(struct efx_nic *efx, 303static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
295 struct efx_self_tests *tests, 304 struct efx_self_tests *tests,
@@ -444,7 +453,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
444{ 453{
445 struct efx_nic *efx = netdev_priv(net_dev); 454 struct efx_nic *efx = netdev_priv(net_dev);
446 struct efx_self_tests *efx_tests; 455 struct efx_self_tests *efx_tests;
447 int already_up; 456 bool already_up;
448 int rc = -ENOMEM; 457 int rc = -ENOMEM;
449 458
450 efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL); 459 efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL);
@@ -452,8 +461,8 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
452 goto fail; 461 goto fail;
453 462
454 if (efx->state != STATE_READY) { 463 if (efx->state != STATE_READY) {
455 rc = -EIO; 464 rc = -EBUSY;
456 goto fail1; 465 goto out;
457 } 466 }
458 467
459 netif_info(efx, drv, efx->net_dev, "starting %sline testing\n", 468 netif_info(efx, drv, efx->net_dev, "starting %sline testing\n",
@@ -466,7 +475,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
466 if (rc) { 475 if (rc) {
467 netif_err(efx, drv, efx->net_dev, 476 netif_err(efx, drv, efx->net_dev,
468 "failed opening device.\n"); 477 "failed opening device.\n");
469 goto fail1; 478 goto out;
470 } 479 }
471 } 480 }
472 481
@@ -479,8 +488,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
479 rc == 0 ? "passed" : "failed", 488 rc == 0 ? "passed" : "failed",
480 (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); 489 (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
481 490
482fail1: 491out:
483 /* Fill ethtool results structures */
484 efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data); 492 efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data);
485 kfree(efx_tests); 493 kfree(efx_tests);
486fail: 494fail:
@@ -691,7 +699,6 @@ static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
691 pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO); 699 pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO);
692} 700}
693 701
694
695static void efx_ethtool_get_wol(struct net_device *net_dev, 702static void efx_ethtool_get_wol(struct net_device *net_dev,
696 struct ethtool_wolinfo *wol) 703 struct ethtool_wolinfo *wol)
697{ 704{
@@ -720,7 +727,7 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
720} 727}
721 728
722/* MAC address mask including only I/G bit */ 729/* MAC address mask including only I/G bit */
723static const u8 mac_addr_ig_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 }; 730static const u8 mac_addr_ig_mask[ETH_ALEN] __aligned(2) = {0x01, 0, 0, 0, 0, 0};
724 731
725#define IP4_ADDR_FULL_MASK ((__force __be32)~0) 732#define IP4_ADDR_FULL_MASK ((__force __be32)~0)
726#define PORT_FULL_MASK ((__force __be16)~0) 733#define PORT_FULL_MASK ((__force __be16)~0)
@@ -780,16 +787,16 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
780 rule->flow_type = ETHER_FLOW; 787 rule->flow_type = ETHER_FLOW;
781 if (spec.match_flags & 788 if (spec.match_flags &
782 (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG)) { 789 (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG)) {
783 memcpy(mac_entry->h_dest, spec.loc_mac, ETH_ALEN); 790 ether_addr_copy(mac_entry->h_dest, spec.loc_mac);
784 if (spec.match_flags & EFX_FILTER_MATCH_LOC_MAC) 791 if (spec.match_flags & EFX_FILTER_MATCH_LOC_MAC)
785 memset(mac_mask->h_dest, ~0, ETH_ALEN); 792 eth_broadcast_addr(mac_mask->h_dest);
786 else 793 else
787 memcpy(mac_mask->h_dest, mac_addr_ig_mask, 794 ether_addr_copy(mac_mask->h_dest,
788 ETH_ALEN); 795 mac_addr_ig_mask);
789 } 796 }
790 if (spec.match_flags & EFX_FILTER_MATCH_REM_MAC) { 797 if (spec.match_flags & EFX_FILTER_MATCH_REM_MAC) {
791 memcpy(mac_entry->h_source, spec.rem_mac, ETH_ALEN); 798 ether_addr_copy(mac_entry->h_source, spec.rem_mac);
792 memset(mac_mask->h_source, ~0, ETH_ALEN); 799 eth_broadcast_addr(mac_mask->h_source);
793 } 800 }
794 if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) { 801 if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
795 mac_entry->h_proto = spec.ether_type; 802 mac_entry->h_proto = spec.ether_type;
@@ -961,13 +968,13 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
961 spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC; 968 spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC;
962 else 969 else
963 return -EINVAL; 970 return -EINVAL;
964 memcpy(spec.loc_mac, mac_entry->h_dest, ETH_ALEN); 971 ether_addr_copy(spec.loc_mac, mac_entry->h_dest);
965 } 972 }
966 if (!is_zero_ether_addr(mac_mask->h_source)) { 973 if (!is_zero_ether_addr(mac_mask->h_source)) {
967 if (!is_broadcast_ether_addr(mac_mask->h_source)) 974 if (!is_broadcast_ether_addr(mac_mask->h_source))
968 return -EINVAL; 975 return -EINVAL;
969 spec.match_flags |= EFX_FILTER_MATCH_REM_MAC; 976 spec.match_flags |= EFX_FILTER_MATCH_REM_MAC;
970 memcpy(spec.rem_mac, mac_entry->h_source, ETH_ALEN); 977 ether_addr_copy(spec.rem_mac, mac_entry->h_source);
971 } 978 }
972 if (mac_mask->h_proto) { 979 if (mac_mask->h_proto) {
973 if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK) 980 if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK)
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 18d6f761f4d0..8ec20b713cc6 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -422,7 +422,6 @@ static inline void falcon_irq_ack_a1(struct efx_nic *efx)
422 efx_readd(efx, &reg, FR_AA_WORK_AROUND_BROKEN_PCI_READS); 422 efx_readd(efx, &reg, FR_AA_WORK_AROUND_BROKEN_PCI_READS);
423} 423}
424 424
425
426static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) 425static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
427{ 426{
428 struct efx_nic *efx = dev_id; 427 struct efx_nic *efx = dev_id;
@@ -467,6 +466,7 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
467 efx_schedule_channel_irq(efx_get_channel(efx, 1)); 466 efx_schedule_channel_irq(efx_get_channel(efx, 1));
468 return IRQ_HANDLED; 467 return IRQ_HANDLED;
469} 468}
469
470/************************************************************************** 470/**************************************************************************
471 * 471 *
472 * RSS 472 * RSS
@@ -1358,6 +1358,7 @@ static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
1358 case 100: link_speed = 1; break; 1358 case 100: link_speed = 1; break;
1359 default: link_speed = 0; break; 1359 default: link_speed = 0; break;
1360 } 1360 }
1361
1361 /* MAC_LINK_STATUS controls MAC backpressure but doesn't work 1362 /* MAC_LINK_STATUS controls MAC backpressure but doesn't work
1362 * as advertised. Disable to ensure packets are not 1363 * as advertised. Disable to ensure packets are not
1363 * indefinitely held and TX queue can be flushed at any point 1364 * indefinitely held and TX queue can be flushed at any point
@@ -2182,7 +2183,7 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
2182 } 2183 }
2183 2184
2184 /* Read the MAC addresses */ 2185 /* Read the MAC addresses */
2185 memcpy(efx->net_dev->perm_addr, nvconfig->mac_address[0], ETH_ALEN); 2186 ether_addr_copy(efx->net_dev->perm_addr, nvconfig->mac_address[0]);
2186 2187
2187 netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n", 2188 netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n",
2188 efx->phy_type, efx->mdio.prtad); 2189 efx->phy_type, efx->mdio.prtad);
@@ -2868,4 +2869,3 @@ const struct efx_nic_type falcon_b0_nic_type = {
2868 .mcdi_max_ver = -1, 2869 .mcdi_max_ver = -1,
2869 .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS, 2870 .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
2870}; 2871};
2871
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index f72489a105ca..a08761360cdf 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -311,7 +311,6 @@ static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue,
311 */ 311 */
312void efx_farch_tx_write(struct efx_tx_queue *tx_queue) 312void efx_farch_tx_write(struct efx_tx_queue *tx_queue)
313{ 313{
314
315 struct efx_tx_buffer *buffer; 314 struct efx_tx_buffer *buffer;
316 efx_qword_t *txd; 315 efx_qword_t *txd;
317 unsigned write_ptr; 316 unsigned write_ptr;
@@ -1249,6 +1248,9 @@ int efx_farch_ev_process(struct efx_channel *channel, int budget)
1249 int tx_packets = 0; 1248 int tx_packets = 0;
1250 int spent = 0; 1249 int spent = 0;
1251 1250
1251 if (budget <= 0)
1252 return spent;
1253
1252 read_ptr = channel->eventq_read_ptr; 1254 read_ptr = channel->eventq_read_ptr;
1253 1255
1254 for (;;) { 1256 for (;;) {
@@ -1609,7 +1611,6 @@ irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id)
1609 return IRQ_HANDLED; 1611 return IRQ_HANDLED;
1610} 1612}
1611 1613
1612
1613/* Setup RSS indirection table. 1614/* Setup RSS indirection table.
1614 * This maps from the hash value of the packet to RXQ 1615 * This maps from the hash value of the packet to RXQ
1615 */ 1616 */
diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
index 3ef298d3c47e..d0ed7f71ea7e 100644
--- a/drivers/net/ethernet/sfc/filter.h
+++ b/drivers/net/ethernet/sfc/filter.h
@@ -243,7 +243,7 @@ static inline int efx_filter_set_eth_local(struct efx_filter_spec *spec,
243 } 243 }
244 if (addr != NULL) { 244 if (addr != NULL) {
245 spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC; 245 spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC;
246 memcpy(spec->loc_mac, addr, ETH_ALEN); 246 ether_addr_copy(spec->loc_mac, addr);
247 } 247 }
248 return 0; 248 return 0;
249} 249}
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index eb59abb57e85..7bd4b14bf3b3 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1187,6 +1187,9 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
1187 int rc; 1187 int rc;
1188 1188
1189 BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0); 1189 BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
1190 /* we need __aligned(2) for ether_addr_copy */
1191 BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST & 1);
1192 BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST & 1);
1190 1193
1191 rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0, 1194 rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0,
1192 outbuf, sizeof(outbuf), &outlen); 1195 outbuf, sizeof(outbuf), &outlen);
@@ -1199,11 +1202,10 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
1199 } 1202 }
1200 1203
1201 if (mac_address) 1204 if (mac_address)
1202 memcpy(mac_address, 1205 ether_addr_copy(mac_address,
1203 port_num ? 1206 port_num ?
1204 MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) : 1207 MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) :
1205 MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0), 1208 MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0));
1206 ETH_ALEN);
1207 if (fw_subtype_list) { 1209 if (fw_subtype_list) {
1208 for (i = 0; 1210 for (i = 0;
1209 i < MCDI_VAR_ARRAY_LEN(outlen, 1211 i < MCDI_VAR_ARRAY_LEN(outlen,
@@ -1532,7 +1534,7 @@ static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
1532 MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type); 1534 MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type);
1533 MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE, 1535 MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE,
1534 MC_CMD_FILTER_MODE_SIMPLE); 1536 MC_CMD_FILTER_MODE_SIMPLE);
1535 memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN); 1537 ether_addr_copy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac);
1536 1538
1537 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf), 1539 rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf),
1538 outbuf, sizeof(outbuf), &outlen); 1540 outbuf, sizeof(outbuf), &outlen);
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 91d23252f8fa..e5fc4e1574b5 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -854,8 +854,8 @@ int efx_mcdi_set_mac(struct efx_nic *efx)
854 854
855 BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0); 855 BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0);
856 856
857 memcpy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR), 857 ether_addr_copy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR),
858 efx->net_dev->dev_addr, ETH_ALEN); 858 efx->net_dev->dev_addr);
859 859
860 MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU, 860 MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU,
861 EFX_MAX_FRAME_LEN(efx->net_dev->mtu)); 861 EFX_MAX_FRAME_LEN(efx->net_dev->mtu));
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index af2b8c59a903..8a400a0595eb 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -1323,7 +1323,6 @@ static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
1323 return &rx_queue->buffer[index]; 1323 return &rx_queue->buffer[index];
1324} 1324}
1325 1325
1326
1327/** 1326/**
1328 * EFX_MAX_FRAME_LEN - calculate maximum frame length 1327 * EFX_MAX_FRAME_LEN - calculate maximum frame length
1329 * 1328 *
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 79226b19e3c4..32d969e857f7 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -530,4 +530,3 @@ void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *rx_nodesc_drops)
530 efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP); 530 efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP);
531 *rx_nodesc_drops -= efx->rx_nodesc_drops_while_down; 531 *rx_nodesc_drops -= efx->rx_nodesc_drops_while_down;
532} 532}
533
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index d7a36829649a..6b861e3de4b0 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -223,7 +223,6 @@ struct efx_ptp_timeset {
223 * @evt_list: List of MC receive events awaiting packets 223 * @evt_list: List of MC receive events awaiting packets
224 * @evt_free_list: List of free events 224 * @evt_free_list: List of free events
225 * @evt_lock: Lock for manipulating evt_list and evt_free_list 225 * @evt_lock: Lock for manipulating evt_list and evt_free_list
226 * @evt_overflow: Boolean indicating that event list has overflowed
227 * @rx_evts: Instantiated events (on evt_list and evt_free_list) 226 * @rx_evts: Instantiated events (on evt_list and evt_free_list)
228 * @workwq: Work queue for processing pending PTP operations 227 * @workwq: Work queue for processing pending PTP operations
229 * @work: Work task 228 * @work: Work task
@@ -275,7 +274,6 @@ struct efx_ptp_data {
275 struct list_head evt_list; 274 struct list_head evt_list;
276 struct list_head evt_free_list; 275 struct list_head evt_free_list;
277 spinlock_t evt_lock; 276 spinlock_t evt_lock;
278 bool evt_overflow;
279 struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS]; 277 struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS];
280 struct workqueue_struct *workwq; 278 struct workqueue_struct *workwq;
281 struct work_struct work; 279 struct work_struct work;
@@ -768,37 +766,36 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf),
768 return -EAGAIN; 766 return -EAGAIN;
769 } 767 }
770 768
771 /* Convert the NIC time into kernel time. No correction is required- 769 /* Calculate delay from last good sync (host time) to last_time.
772 * this time is the output of a firmware process. 770 * It is possible that the seconds rolled over between taking
773 */
774 mc_time = ptp->nic_to_kernel_time(ptp->timeset[last_good].major,
775 ptp->timeset[last_good].minor, 0);
776
777 /* Calculate delay from actual PPS to last_time */
778 delta = ktime_to_timespec(mc_time);
779 delta.tv_nsec +=
780 last_time->ts_real.tv_nsec -
781 (ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK);
782
783 /* It is possible that the seconds rolled over between taking
784 * the start reading and the last value written by the host. The 771 * the start reading and the last value written by the host. The
785 * timescales are such that a gap of more than one second is never 772 * timescales are such that a gap of more than one second is never
786 * expected. 773 * expected. delta is *not* normalised.
787 */ 774 */
788 start_sec = ptp->timeset[last_good].host_start >> MC_NANOSECOND_BITS; 775 start_sec = ptp->timeset[last_good].host_start >> MC_NANOSECOND_BITS;
789 last_sec = last_time->ts_real.tv_sec & MC_SECOND_MASK; 776 last_sec = last_time->ts_real.tv_sec & MC_SECOND_MASK;
790 if (start_sec != last_sec) { 777 if (start_sec != last_sec &&
791 if (((start_sec + 1) & MC_SECOND_MASK) != last_sec) { 778 ((start_sec + 1) & MC_SECOND_MASK) != last_sec) {
792 netif_warn(efx, hw, efx->net_dev, 779 netif_warn(efx, hw, efx->net_dev,
793 "PTP bad synchronisation seconds\n"); 780 "PTP bad synchronisation seconds\n");
794 return -EAGAIN; 781 return -EAGAIN;
795 } else {
796 delta.tv_sec = 1;
797 }
798 } else {
799 delta.tv_sec = 0;
800 } 782 }
783 delta.tv_sec = (last_sec - start_sec) & 1;
784 delta.tv_nsec =
785 last_time->ts_real.tv_nsec -
786 (ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK);
787
788 /* Convert the NIC time at last good sync into kernel time.
789 * No correction is required - this time is the output of a
790 * firmware process.
791 */
792 mc_time = ptp->nic_to_kernel_time(ptp->timeset[last_good].major,
793 ptp->timeset[last_good].minor, 0);
794
795 /* Calculate delay from NIC top of second to last_time */
796 delta.tv_nsec += ktime_to_timespec(mc_time).tv_nsec;
801 797
798 /* Set PPS timestamp to match NIC top of second */
802 ptp->host_time_pps = *last_time; 799 ptp->host_time_pps = *last_time;
803 pps_sub_ts(&ptp->host_time_pps, delta); 800 pps_sub_ts(&ptp->host_time_pps, delta);
804 801
@@ -941,11 +938,6 @@ static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
941 } 938 }
942 } 939 }
943 } 940 }
944 /* If the event overflow flag is set and the event list is now empty
945 * clear the flag to re-enable the overflow warning message.
946 */
947 if (ptp->evt_overflow && list_empty(&ptp->evt_list))
948 ptp->evt_overflow = false;
949 spin_unlock_bh(&ptp->evt_lock); 941 spin_unlock_bh(&ptp->evt_lock);
950} 942}
951 943
@@ -989,11 +981,6 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
989 break; 981 break;
990 } 982 }
991 } 983 }
992 /* If the event overflow flag is set and the event list is now empty
993 * clear the flag to re-enable the overflow warning message.
994 */
995 if (ptp->evt_overflow && list_empty(&ptp->evt_list))
996 ptp->evt_overflow = false;
997 spin_unlock_bh(&ptp->evt_lock); 984 spin_unlock_bh(&ptp->evt_lock);
998 985
999 return rc; 986 return rc;
@@ -1147,7 +1134,6 @@ static int efx_ptp_stop(struct efx_nic *efx)
1147 list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) { 1134 list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) {
1148 list_move(cursor, &efx->ptp_data->evt_free_list); 1135 list_move(cursor, &efx->ptp_data->evt_free_list);
1149 } 1136 }
1150 ptp->evt_overflow = false;
1151 spin_unlock_bh(&efx->ptp_data->evt_lock); 1137 spin_unlock_bh(&efx->ptp_data->evt_lock);
1152 1138
1153 return rc; 1139 return rc;
@@ -1208,6 +1194,7 @@ static const struct ptp_clock_info efx_phc_clock_info = {
1208 .n_alarm = 0, 1194 .n_alarm = 0,
1209 .n_ext_ts = 0, 1195 .n_ext_ts = 0,
1210 .n_per_out = 0, 1196 .n_per_out = 0,
1197 .n_pins = 0,
1211 .pps = 1, 1198 .pps = 1,
1212 .adjfreq = efx_phc_adjfreq, 1199 .adjfreq = efx_phc_adjfreq,
1213 .adjtime = efx_phc_adjtime, 1200 .adjtime = efx_phc_adjtime,
@@ -1253,7 +1240,6 @@ int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel)
1253 spin_lock_init(&ptp->evt_lock); 1240 spin_lock_init(&ptp->evt_lock);
1254 for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++) 1241 for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++)
1255 list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list); 1242 list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list);
1256 ptp->evt_overflow = false;
1257 1243
1258 /* Get the NIC PTP attributes and set up time conversions */ 1244 /* Get the NIC PTP attributes and set up time conversions */
1259 rc = efx_ptp_get_attributes(efx); 1245 rc = efx_ptp_get_attributes(efx);
@@ -1380,6 +1366,7 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
1380 struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb; 1366 struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb;
1381 u8 *match_data_012, *match_data_345; 1367 u8 *match_data_012, *match_data_345;
1382 unsigned int version; 1368 unsigned int version;
1369 u8 *data;
1383 1370
1384 match->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS); 1371 match->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
1385 1372
@@ -1388,7 +1375,8 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
1388 if (!pskb_may_pull(skb, PTP_V1_MIN_LENGTH)) { 1375 if (!pskb_may_pull(skb, PTP_V1_MIN_LENGTH)) {
1389 return false; 1376 return false;
1390 } 1377 }
1391 version = ntohs(*(__be16 *)&skb->data[PTP_V1_VERSION_OFFSET]); 1378 data = skb->data;
1379 version = ntohs(*(__be16 *)&data[PTP_V1_VERSION_OFFSET]);
1392 if (version != PTP_VERSION_V1) { 1380 if (version != PTP_VERSION_V1) {
1393 return false; 1381 return false;
1394 } 1382 }
@@ -1396,13 +1384,14 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
1396 /* PTP V1 uses all six bytes of the UUID to match the packet 1384 /* PTP V1 uses all six bytes of the UUID to match the packet
1397 * to the timestamp 1385 * to the timestamp
1398 */ 1386 */
1399 match_data_012 = skb->data + PTP_V1_UUID_OFFSET; 1387 match_data_012 = data + PTP_V1_UUID_OFFSET;
1400 match_data_345 = skb->data + PTP_V1_UUID_OFFSET + 3; 1388 match_data_345 = data + PTP_V1_UUID_OFFSET + 3;
1401 } else { 1389 } else {
1402 if (!pskb_may_pull(skb, PTP_V2_MIN_LENGTH)) { 1390 if (!pskb_may_pull(skb, PTP_V2_MIN_LENGTH)) {
1403 return false; 1391 return false;
1404 } 1392 }
1405 version = skb->data[PTP_V2_VERSION_OFFSET]; 1393 data = skb->data;
1394 version = data[PTP_V2_VERSION_OFFSET];
1406 if ((version & PTP_VERSION_V2_MASK) != PTP_VERSION_V2) { 1395 if ((version & PTP_VERSION_V2_MASK) != PTP_VERSION_V2) {
1407 return false; 1396 return false;
1408 } 1397 }
@@ -1414,17 +1403,17 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
1414 * enhanced mode fixes this issue and uses bytes 0-2 1403 * enhanced mode fixes this issue and uses bytes 0-2
1415 * and byte 5-7 of the UUID. 1404 * and byte 5-7 of the UUID.
1416 */ 1405 */
1417 match_data_345 = skb->data + PTP_V2_UUID_OFFSET + 5; 1406 match_data_345 = data + PTP_V2_UUID_OFFSET + 5;
1418 if (ptp->mode == MC_CMD_PTP_MODE_V2) { 1407 if (ptp->mode == MC_CMD_PTP_MODE_V2) {
1419 match_data_012 = skb->data + PTP_V2_UUID_OFFSET + 2; 1408 match_data_012 = data + PTP_V2_UUID_OFFSET + 2;
1420 } else { 1409 } else {
1421 match_data_012 = skb->data + PTP_V2_UUID_OFFSET + 0; 1410 match_data_012 = data + PTP_V2_UUID_OFFSET + 0;
1422 BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2_ENHANCED); 1411 BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2_ENHANCED);
1423 } 1412 }
1424 } 1413 }
1425 1414
1426 /* Does this packet require timestamping? */ 1415 /* Does this packet require timestamping? */
1427 if (ntohs(*(__be16 *)&skb->data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) { 1416 if (ntohs(*(__be16 *)&data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) {
1428 match->state = PTP_PACKET_STATE_UNMATCHED; 1417 match->state = PTP_PACKET_STATE_UNMATCHED;
1429 1418
1430 /* We expect the sequence number to be in the same position in 1419 /* We expect the sequence number to be in the same position in
@@ -1440,8 +1429,8 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
1440 (match_data_345[0] << 24)); 1429 (match_data_345[0] << 24));
1441 match->words[1] = (match_data_345[1] | 1430 match->words[1] = (match_data_345[1] |
1442 (match_data_345[2] << 8) | 1431 (match_data_345[2] << 8) |
1443 (skb->data[PTP_V1_SEQUENCE_OFFSET + 1432 (data[PTP_V1_SEQUENCE_OFFSET +
1444 PTP_V1_SEQUENCE_LENGTH - 1] << 1433 PTP_V1_SEQUENCE_LENGTH - 1] <<
1445 16)); 1434 16));
1446 } else { 1435 } else {
1447 match->state = PTP_PACKET_STATE_MATCH_UNWANTED; 1436 match->state = PTP_PACKET_STATE_MATCH_UNWANTED;
@@ -1635,13 +1624,9 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
1635 list_add_tail(&evt->link, &ptp->evt_list); 1624 list_add_tail(&evt->link, &ptp->evt_list);
1636 1625
1637 queue_work(ptp->workwq, &ptp->work); 1626 queue_work(ptp->workwq, &ptp->work);
1638 } else if (!ptp->evt_overflow) { 1627 } else if (net_ratelimit()) {
1639 /* Log a warning message and set the event overflow flag. 1628 /* Log a rate-limited warning message. */
1640 * The message won't be logged again until the event queue
1641 * becomes empty.
1642 */
1643 netif_err(efx, rx_err, efx->net_dev, "PTP event queue overflow\n"); 1629 netif_err(efx, rx_err, efx->net_dev, "PTP event queue overflow\n");
1644 ptp->evt_overflow = true;
1645 } 1630 }
1646 spin_unlock_bh(&ptp->evt_lock); 1631 spin_unlock_bh(&ptp->evt_lock);
1647} 1632}
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index 26641817a9c7..0fc5baef45b1 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -50,7 +50,7 @@ struct efx_loopback_payload {
50} __packed; 50} __packed;
51 51
52/* Loopback test source MAC address */ 52/* Loopback test source MAC address */
53static const unsigned char payload_source[ETH_ALEN] = { 53static const u8 payload_source[ETH_ALEN] __aligned(2) = {
54 0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b, 54 0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b,
55}; 55};
56 56
@@ -366,8 +366,8 @@ static void efx_iterate_state(struct efx_nic *efx)
366 struct efx_loopback_payload *payload = &state->payload; 366 struct efx_loopback_payload *payload = &state->payload;
367 367
368 /* Initialise the layerII header */ 368 /* Initialise the layerII header */
369 memcpy(&payload->header.h_dest, net_dev->dev_addr, ETH_ALEN); 369 ether_addr_copy((u8 *)&payload->header.h_dest, net_dev->dev_addr);
370 memcpy(&payload->header.h_source, &payload_source, ETH_ALEN); 370 ether_addr_copy((u8 *)&payload->header.h_source, payload_source);
371 payload->header.h_proto = htons(ETH_P_IP); 371 payload->header.h_proto = htons(ETH_P_IP);
372 372
373 /* saddr set later and used as incrementing count */ 373 /* saddr set later and used as incrementing count */
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index 0c38f926871e..9a9205e77896 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -1095,7 +1095,7 @@ static void efx_sriov_peer_work(struct work_struct *data)
1095 1095
1096 /* Fill the remaining addresses */ 1096 /* Fill the remaining addresses */
1097 list_for_each_entry(local_addr, &efx->local_addr_list, link) { 1097 list_for_each_entry(local_addr, &efx->local_addr_list, link) {
1098 memcpy(peer->mac_addr, local_addr->addr, ETH_ALEN); 1098 ether_addr_copy(peer->mac_addr, local_addr->addr);
1099 peer->tci = 0; 1099 peer->tci = 0;
1100 ++peer; 1100 ++peer;
1101 ++peer_count; 1101 ++peer_count;
@@ -1303,8 +1303,7 @@ int efx_sriov_init(struct efx_nic *efx)
1303 goto fail_vfs; 1303 goto fail_vfs;
1304 1304
1305 rtnl_lock(); 1305 rtnl_lock();
1306 memcpy(vfdi_status->peers[0].mac_addr, 1306 ether_addr_copy(vfdi_status->peers[0].mac_addr, net_dev->dev_addr);
1307 net_dev->dev_addr, ETH_ALEN);
1308 efx->vf_init_count = efx->vf_count; 1307 efx->vf_init_count = efx->vf_count;
1309 rtnl_unlock(); 1308 rtnl_unlock();
1310 1309
@@ -1452,8 +1451,8 @@ void efx_sriov_mac_address_changed(struct efx_nic *efx)
1452 1451
1453 if (!efx->vf_init_count) 1452 if (!efx->vf_init_count)
1454 return; 1453 return;
1455 memcpy(vfdi_status->peers[0].mac_addr, 1454 ether_addr_copy(vfdi_status->peers[0].mac_addr,
1456 efx->net_dev->dev_addr, ETH_ALEN); 1455 efx->net_dev->dev_addr);
1457 queue_work(vfdi_workqueue, &efx->peer_work); 1456 queue_work(vfdi_workqueue, &efx->peer_work);
1458} 1457}
1459 1458
@@ -1570,7 +1569,7 @@ int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
1570 vf = efx->vf + vf_i; 1569 vf = efx->vf + vf_i;
1571 1570
1572 mutex_lock(&vf->status_lock); 1571 mutex_lock(&vf->status_lock);
1573 memcpy(vf->addr.mac_addr, mac, ETH_ALEN); 1572 ether_addr_copy(vf->addr.mac_addr, mac);
1574 __efx_sriov_update_vf_addr(vf); 1573 __efx_sriov_update_vf_addr(vf);
1575 mutex_unlock(&vf->status_lock); 1574 mutex_unlock(&vf->status_lock);
1576 1575
@@ -1633,7 +1632,7 @@ int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
1633 vf = efx->vf + vf_i; 1632 vf = efx->vf + vf_i;
1634 1633
1635 ivi->vf = vf_i; 1634 ivi->vf = vf_i;
1636 memcpy(ivi->mac, vf->addr.mac_addr, ETH_ALEN); 1635 ether_addr_copy(ivi->mac, vf->addr.mac_addr);
1637 ivi->tx_rate = 0; 1636 ivi->tx_rate = 0;
1638 tci = ntohs(vf->addr.tci); 1637 tci = ntohs(vf->addr.tci);
1639 ivi->vlan = tci & VLAN_VID_MASK; 1638 ivi->vlan = tci & VLAN_VID_MASK;
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 75d11fa4eb0a..fa9475300411 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -787,15 +787,6 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
787 * Requires TX checksum offload support. 787 * Requires TX checksum offload support.
788 */ 788 */
789 789
790/* Number of bytes inserted at the start of a TSO header buffer,
791 * similar to NET_IP_ALIGN.
792 */
793#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
794#define TSOH_OFFSET 0
795#else
796#define TSOH_OFFSET NET_IP_ALIGN
797#endif
798
799#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2)) 790#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
800 791
801/** 792/**
@@ -882,13 +873,13 @@ static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
882 EFX_BUG_ON_PARANOID(buffer->flags); 873 EFX_BUG_ON_PARANOID(buffer->flags);
883 EFX_BUG_ON_PARANOID(buffer->unmap_len); 874 EFX_BUG_ON_PARANOID(buffer->unmap_len);
884 875
885 if (likely(len <= TSOH_STD_SIZE - TSOH_OFFSET)) { 876 if (likely(len <= TSOH_STD_SIZE - NET_IP_ALIGN)) {
886 unsigned index = 877 unsigned index =
887 (tx_queue->insert_count & tx_queue->ptr_mask) / 2; 878 (tx_queue->insert_count & tx_queue->ptr_mask) / 2;
888 struct efx_buffer *page_buf = 879 struct efx_buffer *page_buf =
889 &tx_queue->tsoh_page[index / TSOH_PER_PAGE]; 880 &tx_queue->tsoh_page[index / TSOH_PER_PAGE];
890 unsigned offset = 881 unsigned offset =
891 TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET; 882 TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + NET_IP_ALIGN;
892 883
893 if (unlikely(!page_buf->addr) && 884 if (unlikely(!page_buf->addr) &&
894 efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE, 885 efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
@@ -901,10 +892,10 @@ static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
901 } else { 892 } else {
902 tx_queue->tso_long_headers++; 893 tx_queue->tso_long_headers++;
903 894
904 buffer->heap_buf = kmalloc(TSOH_OFFSET + len, GFP_ATOMIC); 895 buffer->heap_buf = kmalloc(NET_IP_ALIGN + len, GFP_ATOMIC);
905 if (unlikely(!buffer->heap_buf)) 896 if (unlikely(!buffer->heap_buf))
906 return NULL; 897 return NULL;
907 result = (u8 *)buffer->heap_buf + TSOH_OFFSET; 898 result = (u8 *)buffer->heap_buf + NET_IP_ALIGN;
908 buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP; 899 buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP;
909 } 900 }
910 901
@@ -1011,7 +1002,7 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
1011static int tso_start(struct tso_state *st, struct efx_nic *efx, 1002static int tso_start(struct tso_state *st, struct efx_nic *efx,
1012 const struct sk_buff *skb) 1003 const struct sk_buff *skb)
1013{ 1004{
1014 bool use_options = efx_nic_rev(efx) >= EFX_REV_HUNT_A0; 1005 bool use_opt_desc = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
1015 struct device *dma_dev = &efx->pci_dev->dev; 1006 struct device *dma_dev = &efx->pci_dev->dev;
1016 unsigned int header_len, in_len; 1007 unsigned int header_len, in_len;
1017 dma_addr_t dma_addr; 1008 dma_addr_t dma_addr;
@@ -1037,7 +1028,7 @@ static int tso_start(struct tso_state *st, struct efx_nic *efx,
1037 1028
1038 st->out_len = skb->len - header_len; 1029 st->out_len = skb->len - header_len;
1039 1030
1040 if (!use_options) { 1031 if (!use_opt_desc) {
1041 st->header_unmap_len = 0; 1032 st->header_unmap_len = 0;
1042 1033
1043 if (likely(in_len == 0)) { 1034 if (likely(in_len == 0)) {
diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c
index 5eb933c97bba..7daa7d433099 100644
--- a/drivers/net/ethernet/silan/sc92031.c
+++ b/drivers/net/ethernet/silan/sc92031.c
@@ -987,7 +987,7 @@ out_unlock:
987 spin_unlock(&priv->lock); 987 spin_unlock(&priv->lock);
988 988
989out: 989out:
990 dev_kfree_skb(skb); 990 dev_consume_skb_any(skb);
991 991
992 return NETDEV_TX_OK; 992 return NETDEV_TX_OK;
993} 993}
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index ff57a46388ee..6072f093e6b4 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -1614,7 +1614,7 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
1614 skb->data, skb->len, PCI_DMA_TODEVICE); 1614 skb->data, skb->len, PCI_DMA_TODEVICE);
1615 if (unlikely(pci_dma_mapping_error(sis_priv->pci_dev, 1615 if (unlikely(pci_dma_mapping_error(sis_priv->pci_dev,
1616 sis_priv->tx_ring[entry].bufptr))) { 1616 sis_priv->tx_ring[entry].bufptr))) {
1617 dev_kfree_skb(skb); 1617 dev_kfree_skb_any(skb);
1618 sis_priv->tx_skbuff[entry] = NULL; 1618 sis_priv->tx_skbuff[entry] = NULL;
1619 net_dev->stats.tx_dropped++; 1619 net_dev->stats.tx_dropped++;
1620 spin_unlock_irqrestore(&sis_priv->lock, flags); 1620 spin_unlock_irqrestore(&sis_priv->lock, flags);
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index c50fb08c9905..66b05e62f70a 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -551,7 +551,7 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
551 dev->stats.tx_errors++; 551 dev->stats.tx_errors++;
552 dev->stats.tx_dropped++; 552 dev->stats.tx_dropped++;
553 spin_unlock_irqrestore(&lp->lock, flags); 553 spin_unlock_irqrestore(&lp->lock, flags);
554 dev_kfree_skb(skb); 554 dev_kfree_skb_any(skb);
555 return NETDEV_TX_OK; 555 return NETDEV_TX_OK;
556 } 556 }
557 557
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 839c0e6cca01..d1b4dca53a9d 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -621,7 +621,7 @@ static void smc_hardware_send_pkt(unsigned long data)
621done: if (!THROTTLE_TX_PKTS) 621done: if (!THROTTLE_TX_PKTS)
622 netif_wake_queue(dev); 622 netif_wake_queue(dev);
623 623
624 dev_kfree_skb(skb); 624 dev_consume_skb_any(skb);
625} 625}
626 626
627/* 627/*
@@ -657,7 +657,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
657 netdev_warn(dev, "Far too big packet error.\n"); 657 netdev_warn(dev, "Far too big packet error.\n");
658 dev->stats.tx_errors++; 658 dev->stats.tx_errors++;
659 dev->stats.tx_dropped++; 659 dev->stats.tx_dropped++;
660 dev_kfree_skb(skb); 660 dev_kfree_skb_any(skb);
661 return NETDEV_TX_OK; 661 return NETDEV_TX_OK;
662 } 662 }
663 663
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 6382b7c416f4..ed36ff48af57 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -439,7 +439,7 @@ static int smsc911x_request_resources(struct platform_device *pdev)
439 /* Request clock */ 439 /* Request clock */
440 pdata->clk = clk_get(&pdev->dev, NULL); 440 pdata->clk = clk_get(&pdev->dev, NULL);
441 if (IS_ERR(pdata->clk)) 441 if (IS_ERR(pdata->clk))
442 netdev_warn(ndev, "couldn't get clock %li\n", PTR_ERR(pdata->clk)); 442 netdev_dbg(ndev, "couldn't get clock %li\n", PTR_ERR(pdata->clk));
443 443
444 return ret; 444 return ret;
445} 445}
@@ -1672,7 +1672,7 @@ static int smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
1672 pdata->ops->tx_writefifo(pdata, (unsigned int *)bufp, wrsz); 1672 pdata->ops->tx_writefifo(pdata, (unsigned int *)bufp, wrsz);
1673 freespace -= (skb->len + 32); 1673 freespace -= (skb->len + 32);
1674 skb_tx_timestamp(skb); 1674 skb_tx_timestamp(skb);
1675 dev_kfree_skb(skb); 1675 dev_consume_skb_any(skb);
1676 1676
1677 if (unlikely(smsc911x_tx_get_txstatcount(pdata) >= 30)) 1677 if (unlikely(smsc911x_tx_get_txstatcount(pdata) >= 30))
1678 smsc911x_tx_update_txcounters(dev); 1678 smsc911x_tx_update_txcounters(dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 8543e1cfd55e..d940034acdd4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1303,7 +1303,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
1303 priv->hw->mode->clean_desc3(priv, p); 1303 priv->hw->mode->clean_desc3(priv, p);
1304 1304
1305 if (likely(skb != NULL)) { 1305 if (likely(skb != NULL)) {
1306 dev_kfree_skb(skb); 1306 dev_consume_skb_any(skb);
1307 priv->tx_skbuff[entry] = NULL; 1307 priv->tx_skbuff[entry] = NULL;
1308 } 1308 }
1309 1309
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 7680581ebe12..b7ad3565566c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -164,6 +164,7 @@ static struct ptp_clock_info stmmac_ptp_clock_ops = {
164 .n_alarm = 0, 164 .n_alarm = 0,
165 .n_ext_ts = 0, 165 .n_ext_ts = 0,
166 .n_per_out = 0, 166 .n_per_out = 0,
167 .n_pins = 0,
167 .pps = 0, 168 .pps = 0,
168 .adjfreq = stmmac_adjust_freq, 169 .adjfreq = stmmac_adjust_freq,
169 .adjtime = stmmac_adjust_time, 170 .adjtime = stmmac_adjust_time,
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 8e2266e1f260..79606f47a08e 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -9041,7 +9041,7 @@ static void niu_try_msix(struct niu *np, u8 *ldg_num_map)
9041 struct msix_entry msi_vec[NIU_NUM_LDG]; 9041 struct msix_entry msi_vec[NIU_NUM_LDG];
9042 struct niu_parent *parent = np->parent; 9042 struct niu_parent *parent = np->parent;
9043 struct pci_dev *pdev = np->pdev; 9043 struct pci_dev *pdev = np->pdev;
9044 int i, num_irqs, err; 9044 int i, num_irqs;
9045 u8 first_ldg; 9045 u8 first_ldg;
9046 9046
9047 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port; 9047 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
@@ -9053,21 +9053,16 @@ static void niu_try_msix(struct niu *np, u8 *ldg_num_map)
9053 (np->port == 0 ? 3 : 1)); 9053 (np->port == 0 ? 3 : 1));
9054 BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports)); 9054 BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports));
9055 9055
9056retry:
9057 for (i = 0; i < num_irqs; i++) { 9056 for (i = 0; i < num_irqs; i++) {
9058 msi_vec[i].vector = 0; 9057 msi_vec[i].vector = 0;
9059 msi_vec[i].entry = i; 9058 msi_vec[i].entry = i;
9060 } 9059 }
9061 9060
9062 err = pci_enable_msix(pdev, msi_vec, num_irqs); 9061 num_irqs = pci_enable_msix_range(pdev, msi_vec, 1, num_irqs);
9063 if (err < 0) { 9062 if (num_irqs < 0) {
9064 np->flags &= ~NIU_FLAGS_MSIX; 9063 np->flags &= ~NIU_FLAGS_MSIX;
9065 return; 9064 return;
9066 } 9065 }
9067 if (err > 0) {
9068 num_irqs = err;
9069 goto retry;
9070 }
9071 9066
9072 np->flags |= NIU_FLAGS_MSIX; 9067 np->flags |= NIU_FLAGS_MSIX;
9073 for (i = 0; i < num_irqs; i++) 9068 for (i = 0; i < num_irqs; i++)
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index c2799dc46325..102a66fc54a2 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -688,7 +688,7 @@ static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_st
688 } 688 }
689 689
690 dev->stats.tx_packets++; 690 dev->stats.tx_packets++;
691 dev_kfree_skb(skb); 691 dev_consume_skb_any(skb);
692 } 692 }
693 gp->tx_old = entry; 693 gp->tx_old = entry;
694 694
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 7d6d8ec676c8..5d5fec6c4eb0 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -378,7 +378,6 @@ struct cpsw_priv {
378 u32 version; 378 u32 version;
379 u32 coal_intvl; 379 u32 coal_intvl;
380 u32 bus_freq_mhz; 380 u32 bus_freq_mhz;
381 struct net_device_stats stats;
382 int rx_packet_max; 381 int rx_packet_max;
383 int host_port; 382 int host_port;
384 struct clk *clk; 383 struct clk *clk;
@@ -673,8 +672,8 @@ static void cpsw_tx_handler(void *token, int len, int status)
673 if (unlikely(netif_queue_stopped(ndev))) 672 if (unlikely(netif_queue_stopped(ndev)))
674 netif_wake_queue(ndev); 673 netif_wake_queue(ndev);
675 cpts_tx_timestamp(priv->cpts, skb); 674 cpts_tx_timestamp(priv->cpts, skb);
676 priv->stats.tx_packets++; 675 ndev->stats.tx_packets++;
677 priv->stats.tx_bytes += len; 676 ndev->stats.tx_bytes += len;
678 dev_kfree_skb_any(skb); 677 dev_kfree_skb_any(skb);
679} 678}
680 679
@@ -700,10 +699,10 @@ static void cpsw_rx_handler(void *token, int len, int status)
700 cpts_rx_timestamp(priv->cpts, skb); 699 cpts_rx_timestamp(priv->cpts, skb);
701 skb->protocol = eth_type_trans(skb, ndev); 700 skb->protocol = eth_type_trans(skb, ndev);
702 netif_receive_skb(skb); 701 netif_receive_skb(skb);
703 priv->stats.rx_bytes += len; 702 ndev->stats.rx_bytes += len;
704 priv->stats.rx_packets++; 703 ndev->stats.rx_packets++;
705 } else { 704 } else {
706 priv->stats.rx_dropped++; 705 ndev->stats.rx_dropped++;
707 new_skb = skb; 706 new_skb = skb;
708 } 707 }
709 708
@@ -1313,7 +1312,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
1313 1312
1314 if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) { 1313 if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
1315 cpsw_err(priv, tx_err, "packet pad failed\n"); 1314 cpsw_err(priv, tx_err, "packet pad failed\n");
1316 priv->stats.tx_dropped++; 1315 ndev->stats.tx_dropped++;
1317 return NETDEV_TX_OK; 1316 return NETDEV_TX_OK;
1318 } 1317 }
1319 1318
@@ -1337,7 +1336,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
1337 1336
1338 return NETDEV_TX_OK; 1337 return NETDEV_TX_OK;
1339fail: 1338fail:
1340 priv->stats.tx_dropped++; 1339 ndev->stats.tx_dropped++;
1341 netif_stop_queue(ndev); 1340 netif_stop_queue(ndev);
1342 return NETDEV_TX_BUSY; 1341 return NETDEV_TX_BUSY;
1343} 1342}
@@ -1477,7 +1476,6 @@ static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
1477static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd) 1476static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1478{ 1477{
1479 struct cpsw_priv *priv = netdev_priv(dev); 1478 struct cpsw_priv *priv = netdev_priv(dev);
1480 struct mii_ioctl_data *data = if_mii(req);
1481 int slave_no = cpsw_slave_index(priv); 1479 int slave_no = cpsw_slave_index(priv);
1482 1480
1483 if (!netif_running(dev)) 1481 if (!netif_running(dev))
@@ -1490,14 +1488,11 @@ static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1490 case SIOCGHWTSTAMP: 1488 case SIOCGHWTSTAMP:
1491 return cpsw_hwtstamp_get(dev, req); 1489 return cpsw_hwtstamp_get(dev, req);
1492#endif 1490#endif
1493 case SIOCGMIIPHY:
1494 data->phy_id = priv->slaves[slave_no].phy->addr;
1495 break;
1496 default:
1497 return -ENOTSUPP;
1498 } 1491 }
1499 1492
1500 return 0; 1493 if (!priv->slaves[slave_no].phy)
1494 return -EOPNOTSUPP;
1495 return phy_mii_ioctl(priv->slaves[slave_no].phy, req, cmd);
1501} 1496}
1502 1497
1503static void cpsw_ndo_tx_timeout(struct net_device *ndev) 1498static void cpsw_ndo_tx_timeout(struct net_device *ndev)
@@ -1505,7 +1500,7 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
1505 struct cpsw_priv *priv = netdev_priv(ndev); 1500 struct cpsw_priv *priv = netdev_priv(ndev);
1506 1501
1507 cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n"); 1502 cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
1508 priv->stats.tx_errors++; 1503 ndev->stats.tx_errors++;
1509 cpsw_intr_disable(priv); 1504 cpsw_intr_disable(priv);
1510 cpdma_ctlr_int_ctrl(priv->dma, false); 1505 cpdma_ctlr_int_ctrl(priv->dma, false);
1511 cpdma_chan_stop(priv->txch); 1506 cpdma_chan_stop(priv->txch);
@@ -1544,12 +1539,6 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
1544 return 0; 1539 return 0;
1545} 1540}
1546 1541
1547static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev)
1548{
1549 struct cpsw_priv *priv = netdev_priv(ndev);
1550 return &priv->stats;
1551}
1552
1553#ifdef CONFIG_NET_POLL_CONTROLLER 1542#ifdef CONFIG_NET_POLL_CONTROLLER
1554static void cpsw_ndo_poll_controller(struct net_device *ndev) 1543static void cpsw_ndo_poll_controller(struct net_device *ndev)
1555{ 1544{
@@ -1642,7 +1631,6 @@ static const struct net_device_ops cpsw_netdev_ops = {
1642 .ndo_validate_addr = eth_validate_addr, 1631 .ndo_validate_addr = eth_validate_addr,
1643 .ndo_change_mtu = eth_change_mtu, 1632 .ndo_change_mtu = eth_change_mtu,
1644 .ndo_tx_timeout = cpsw_ndo_tx_timeout, 1633 .ndo_tx_timeout = cpsw_ndo_tx_timeout,
1645 .ndo_get_stats = cpsw_ndo_get_stats,
1646 .ndo_set_rx_mode = cpsw_ndo_set_rx_mode, 1634 .ndo_set_rx_mode = cpsw_ndo_set_rx_mode,
1647#ifdef CONFIG_NET_POLL_CONTROLLER 1635#ifdef CONFIG_NET_POLL_CONTROLLER
1648 .ndo_poll_controller = cpsw_ndo_poll_controller, 1636 .ndo_poll_controller = cpsw_ndo_poll_controller,
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 8c351f100aca..372cb192c5aa 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -217,6 +217,7 @@ static struct ptp_clock_info cpts_info = {
217 .name = "CTPS timer", 217 .name = "CTPS timer",
218 .max_adj = 1000000, 218 .max_adj = 1000000,
219 .n_ext_ts = 0, 219 .n_ext_ts = 0,
220 .n_pins = 0,
220 .pps = 0, 221 .pps = 0,
221 .adjfreq = cpts_ptp_adjfreq, 222 .adjfreq = cpts_ptp_adjfreq,
222 .adjtime = cpts_ptp_adjtime, 223 .adjtime = cpts_ptp_adjtime,
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 17503da9f7a5..7e1c91d41a87 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -659,6 +659,9 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
659 struct info_mpipe *info_mpipe = 659 struct info_mpipe *info_mpipe =
660 container_of(napi, struct info_mpipe, napi); 660 container_of(napi, struct info_mpipe, napi);
661 661
662 if (budget <= 0)
663 goto done;
664
662 instance = info_mpipe->instance; 665 instance = info_mpipe->instance;
663 while ((n = gxio_mpipe_iqueue_try_peek( 666 while ((n = gxio_mpipe_iqueue_try_peek(
664 &info_mpipe->iqueue, 667 &info_mpipe->iqueue,
@@ -870,6 +873,7 @@ static struct ptp_clock_info ptp_mpipe_caps = {
870 .name = "mPIPE clock", 873 .name = "mPIPE clock",
871 .max_adj = 999999999, 874 .max_adj = 999999999,
872 .n_ext_ts = 0, 875 .n_ext_ts = 0,
876 .n_pins = 0,
873 .pps = 0, 877 .pps = 0,
874 .adjfreq = ptp_mpipe_adjfreq, 878 .adjfreq = ptp_mpipe_adjfreq,
875 .adjtime = ptp_mpipe_adjtime, 879 .adjtime = ptp_mpipe_adjtime,
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index edb2e12a0fe2..e5a5c5d4ce0c 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -831,6 +831,9 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
831 831
832 unsigned int work = 0; 832 unsigned int work = 0;
833 833
834 if (budget <= 0)
835 goto done;
836
834 while (priv->active) { 837 while (priv->active) {
835 int index = qup->__packet_receive_read; 838 int index = qup->__packet_receive_read;
836 if (index == qsp->__packet_receive_queue.__packet_write) 839 if (index == qsp->__packet_receive_queue.__packet_write)
@@ -1821,7 +1824,7 @@ busy:
1821 1824
1822 /* Handle completions. */ 1825 /* Handle completions. */
1823 for (i = 0; i < nolds; i++) 1826 for (i = 0; i < nolds; i++)
1824 kfree_skb(olds[i]); 1827 dev_consume_skb_any(olds[i]);
1825 1828
1826 /* Update stats. */ 1829 /* Update stats. */
1827 u64_stats_update_begin(&stats->syncp); 1830 u64_stats_update_begin(&stats->syncp);
@@ -2005,7 +2008,7 @@ busy:
2005 2008
2006 /* Handle completions. */ 2009 /* Handle completions. */
2007 for (i = 0; i < nolds; i++) 2010 for (i = 0; i < nolds; i++)
2008 kfree_skb(olds[i]); 2011 dev_consume_skb_any(olds[i]);
2009 2012
2010 /* HACK: Track "expanded" size for short packets (e.g. 42 < 60). */ 2013 /* HACK: Track "expanded" size for short packets (e.g. 42 < 60). */
2011 u64_stats_update_begin(&stats->syncp); 2014 u64_stats_update_begin(&stats->syncp);
@@ -2068,14 +2071,14 @@ static struct rtnl_link_stats64 *tile_net_get_stats64(struct net_device *dev,
2068 cpu_stats = &priv->cpu[i]->stats; 2071 cpu_stats = &priv->cpu[i]->stats;
2069 2072
2070 do { 2073 do {
2071 start = u64_stats_fetch_begin_bh(&cpu_stats->syncp); 2074 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
2072 trx_packets = cpu_stats->rx_packets; 2075 trx_packets = cpu_stats->rx_packets;
2073 ttx_packets = cpu_stats->tx_packets; 2076 ttx_packets = cpu_stats->tx_packets;
2074 trx_bytes = cpu_stats->rx_bytes; 2077 trx_bytes = cpu_stats->rx_bytes;
2075 ttx_bytes = cpu_stats->tx_bytes; 2078 ttx_bytes = cpu_stats->tx_bytes;
2076 trx_errors = cpu_stats->rx_errors; 2079 trx_errors = cpu_stats->rx_errors;
2077 trx_dropped = cpu_stats->rx_dropped; 2080 trx_dropped = cpu_stats->rx_dropped;
2078 } while (u64_stats_fetch_retry_bh(&cpu_stats->syncp, start)); 2081 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
2079 2082
2080 rx_packets += trx_packets; 2083 rx_packets += trx_packets;
2081 tx_packets += ttx_packets; 2084 tx_packets += ttx_packets;
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 3f4a32e39d27..0282d0161859 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -860,7 +860,7 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
860 if (skb) { 860 if (skb) {
861 pci_unmap_single(card->pdev, buf_addr, skb->len, 861 pci_unmap_single(card->pdev, buf_addr, skb->len,
862 PCI_DMA_TODEVICE); 862 PCI_DMA_TODEVICE);
863 dev_kfree_skb(skb); 863 dev_consume_skb_any(skb);
864 } 864 }
865 } 865 }
866 return 0; 866 return 0;
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 88e9c73cebc0..fef5573dbfca 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -1645,6 +1645,9 @@ static int tc35815_poll(struct napi_struct *napi, int budget)
1645 int received = 0, handled; 1645 int received = 0, handled;
1646 u32 status; 1646 u32 status;
1647 1647
1648 if (budget <= 0)
1649 return received;
1650
1648 spin_lock(&lp->rx_lock); 1651 spin_lock(&lp->rx_lock);
1649 status = tc_readl(&tr->Int_Src); 1652 status = tc_readl(&tr->Int_Src);
1650 do { 1653 do {
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 6ac20a6738f4..f61dc2b72bb2 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -1022,7 +1022,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1022 1022
1023 /* The chip-specific entries in the device structure. */ 1023 /* The chip-specific entries in the device structure. */
1024 dev->netdev_ops = &rhine_netdev_ops; 1024 dev->netdev_ops = &rhine_netdev_ops;
1025 dev->ethtool_ops = &netdev_ethtool_ops, 1025 dev->ethtool_ops = &netdev_ethtool_ops;
1026 dev->watchdog_timeo = TX_TIMEOUT; 1026 dev->watchdog_timeo = TX_TIMEOUT;
1027 1027
1028 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64); 1028 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
@@ -1678,7 +1678,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1678 /* Must use alignment buffer. */ 1678 /* Must use alignment buffer. */
1679 if (skb->len > PKT_BUF_SZ) { 1679 if (skb->len > PKT_BUF_SZ) {
1680 /* packet too long, drop it */ 1680 /* packet too long, drop it */
1681 dev_kfree_skb(skb); 1681 dev_kfree_skb_any(skb);
1682 rp->tx_skbuff[entry] = NULL; 1682 rp->tx_skbuff[entry] = NULL;
1683 dev->stats.tx_dropped++; 1683 dev->stats.tx_dropped++;
1684 return NETDEV_TX_OK; 1684 return NETDEV_TX_OK;
@@ -1698,7 +1698,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1698 pci_map_single(rp->pdev, skb->data, skb->len, 1698 pci_map_single(rp->pdev, skb->data, skb->len,
1699 PCI_DMA_TODEVICE); 1699 PCI_DMA_TODEVICE);
1700 if (dma_mapping_error(&rp->pdev->dev, rp->tx_skbuff_dma[entry])) { 1700 if (dma_mapping_error(&rp->pdev->dev, rp->tx_skbuff_dma[entry])) {
1701 dev_kfree_skb(skb); 1701 dev_kfree_skb_any(skb);
1702 rp->tx_skbuff_dma[entry] = 0; 1702 rp->tx_skbuff_dma[entry] = 0;
1703 dev->stats.tx_dropped++; 1703 dev->stats.tx_dropped++;
1704 return NETDEV_TX_OK; 1704 return NETDEV_TX_OK;
@@ -1836,7 +1836,7 @@ static void rhine_tx(struct net_device *dev)
1836 rp->tx_skbuff[entry]->len, 1836 rp->tx_skbuff[entry]->len,
1837 PCI_DMA_TODEVICE); 1837 PCI_DMA_TODEVICE);
1838 } 1838 }
1839 dev_kfree_skb(rp->tx_skbuff[entry]); 1839 dev_consume_skb_any(rp->tx_skbuff[entry]);
1840 rp->tx_skbuff[entry] = NULL; 1840 rp->tx_skbuff[entry] = NULL;
1841 entry = (++rp->dirty_tx) % TX_RING_SIZE; 1841 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1842 } 1842 }
@@ -2072,16 +2072,16 @@ rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
2072 netdev_stats_to_stats64(stats, &dev->stats); 2072 netdev_stats_to_stats64(stats, &dev->stats);
2073 2073
2074 do { 2074 do {
2075 start = u64_stats_fetch_begin_bh(&rp->rx_stats.syncp); 2075 start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp);
2076 stats->rx_packets = rp->rx_stats.packets; 2076 stats->rx_packets = rp->rx_stats.packets;
2077 stats->rx_bytes = rp->rx_stats.bytes; 2077 stats->rx_bytes = rp->rx_stats.bytes;
2078 } while (u64_stats_fetch_retry_bh(&rp->rx_stats.syncp, start)); 2078 } while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start));
2079 2079
2080 do { 2080 do {
2081 start = u64_stats_fetch_begin_bh(&rp->tx_stats.syncp); 2081 start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp);
2082 stats->tx_packets = rp->tx_stats.packets; 2082 stats->tx_packets = rp->tx_stats.packets;
2083 stats->tx_bytes = rp->tx_stats.bytes; 2083 stats->tx_bytes = rp->tx_stats.bytes;
2084 } while (u64_stats_fetch_retry_bh(&rp->tx_stats.syncp, start)); 2084 } while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start));
2085 2085
2086 return stats; 2086 return stats;
2087} 2087}
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index ad61d26a44f3..de08e86db209 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2565,7 +2565,7 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2565 /* The hardware can handle at most 7 memory segments, so merge 2565 /* The hardware can handle at most 7 memory segments, so merge
2566 * the skb if there are more */ 2566 * the skb if there are more */
2567 if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) { 2567 if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
2568 kfree_skb(skb); 2568 dev_kfree_skb_any(skb);
2569 return NETDEV_TX_OK; 2569 return NETDEV_TX_OK;
2570 } 2570 }
2571 2571
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 0df36c6ec7f4..104d46f37969 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -641,11 +641,10 @@ static int w5100_hw_probe(struct platform_device *pdev)
641 if (!mem) 641 if (!mem)
642 return -ENXIO; 642 return -ENXIO;
643 mem_size = resource_size(mem); 643 mem_size = resource_size(mem);
644 if (!devm_request_mem_region(&pdev->dev, mem->start, mem_size, name)) 644
645 return -EBUSY; 645 priv->base = devm_ioremap_resource(&pdev->dev, mem);
646 priv->base = devm_ioremap(&pdev->dev, mem->start, mem_size); 646 if (IS_ERR(priv->base))
647 if (!priv->base) 647 return PTR_ERR(priv->base);
648 return -EBUSY;
649 648
650 spin_lock_init(&priv->reg_lock); 649 spin_lock_init(&priv->reg_lock);
651 priv->indirect = mem_size < W5100_BUS_DIRECT_SIZE; 650 priv->indirect = mem_size < W5100_BUS_DIRECT_SIZE;
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 71c27b3292f1..1f33c4c86c20 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -561,11 +561,10 @@ static int w5300_hw_probe(struct platform_device *pdev)
561 if (!mem) 561 if (!mem)
562 return -ENXIO; 562 return -ENXIO;
563 mem_size = resource_size(mem); 563 mem_size = resource_size(mem);
564 if (!devm_request_mem_region(&pdev->dev, mem->start, mem_size, name)) 564
565 return -EBUSY; 565 priv->base = devm_ioremap_resource(&pdev->dev, mem);
566 priv->base = devm_ioremap(&pdev->dev, mem->start, mem_size); 566 if (IS_ERR(priv->base))
567 if (!priv->base) 567 return PTR_ERR(priv->base);
568 return -EBUSY;
569 568
570 spin_lock_init(&priv->reg_lock); 569 spin_lock_init(&priv->reg_lock);
571 priv->indirect = mem_size < W5300_BUS_DIRECT_SIZE; 570 priv->indirect = mem_size < W5300_BUS_DIRECT_SIZE;
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index a4347508031c..fa193c4688da 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -771,8 +771,8 @@ static void ll_temac_recv(struct net_device *ndev)
771 771
772 /* if we're doing rx csum offload, set it up */ 772 /* if we're doing rx csum offload, set it up */
773 if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) && 773 if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) &&
774 (skb->protocol == __constant_htons(ETH_P_IP)) && 774 (skb->protocol == htons(ETH_P_IP)) &&
775 (skb->len > 64)) { 775 (skb->len > 64)) {
776 776
777 skb->csum = cur_p->app3 & 0xFFFF; 777 skb->csum = cur_p->app3 & 0xFFFF;
778 skb->ip_summed = CHECKSUM_COMPLETE; 778 skb->ip_summed = CHECKSUM_COMPLETE;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 4bfdf8c7ada0..7b0a73556264 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -756,7 +756,7 @@ static void axienet_recv(struct net_device *ndev)
756 skb->ip_summed = CHECKSUM_UNNECESSARY; 756 skb->ip_summed = CHECKSUM_UNNECESSARY;
757 } 757 }
758 } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && 758 } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
759 skb->protocol == __constant_htons(ETH_P_IP) && 759 skb->protocol == htons(ETH_P_IP) &&
760 skb->len > 64) { 760 skb->len > 64) {
761 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); 761 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
762 skb->ip_summed = CHECKSUM_COMPLETE; 762 skb->ip_summed = CHECKSUM_COMPLETE;
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 36052b98b3fc..58756617644f 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1037,7 +1037,7 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
1037 skb_tx_timestamp(new_skb); 1037 skb_tx_timestamp(new_skb);
1038 1038
1039 dev->stats.tx_bytes += len; 1039 dev->stats.tx_bytes += len;
1040 dev_kfree_skb(new_skb); 1040 dev_consume_skb_any(new_skb);
1041 1041
1042 return 0; 1042 return 0;
1043} 1043}