aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2014-10-20 12:55:07 -0400
committerMark Brown <broonie@kernel.org>2014-10-20 13:27:32 -0400
commitb7a40242c82cd73cfcea305f23e67d068dd8401a (patch)
tree251b49d19cd7c371847ae1f951e1b537ca0e1c15 /drivers/net
parentd26833bfce5e56017bea9f1f50838f20e18e7b7e (diff)
parent9c6de47d53a3ce8df1642ae67823688eb98a190a (diff)
Merge branch 'fix/dw' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi into spi-dw
Conflicts: drivers/spi/spi-dw-mid.c
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bonding/bond_main.c19
-rw-r--r--drivers/net/can/at91_can.c8
-rw-r--r--drivers/net/can/c_can/c_can_platform.c6
-rw-r--r--drivers/net/can/flexcan.c62
-rw-r--r--drivers/net/can/sja1000/peak_pci.c6
-rw-r--r--drivers/net/can/sja1000/sja1000.c62
-rw-r--r--drivers/net/ethernet/3com/3c59x.c50
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c86
-rw-r--r--drivers/net/ethernet/aeroflex/greth.h2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c209
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c7
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h4
-rw-r--r--drivers/net/ethernet/apm/xgene/Kconfig1
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c20
-rw-r--r--drivers/net/ethernet/arc/emac_main.c53
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig2
-rw-r--r--drivers/net/ethernet/broadcom/b44.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c31
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c74
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c149
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c4
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c143
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c36
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h1
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c7
-rw-r--r--drivers/net/ethernet/calxeda/Kconfig1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c93
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c79
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c3
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/freescale/fec.h5
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c18
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c33
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c18
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c19
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c21
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c44
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c38
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c34
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c3
-rw-r--r--drivers/net/ethernet/octeon/octeon_mgmt.c4
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/Kconfig1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h15
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c35
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c57
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c16
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c5
-rw-r--r--drivers/net/ethernet/realtek/r8169.c67
-rw-r--r--drivers/net/ethernet/renesas/Kconfig1
-rw-r--r--drivers/net/ethernet/sfc/farch.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c142
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h2
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c7
-rw-r--r--drivers/net/ethernet/ti/cpsw.c52
-rw-r--r--drivers/net/fddi/skfp/h/skfbi.h5
-rw-r--r--drivers/net/macvlan.c14
-rw-r--r--drivers/net/phy/bcm7xxx.c42
-rw-r--r--drivers/net/phy/micrel.c3
-rw-r--r--drivers/net/phy/phy.c18
-rw-r--r--drivers/net/phy/smsc.c33
-rw-r--r--drivers/net/usb/r8152.c62
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c15
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/net/vxlan.c8
-rw-r--r--drivers/net/wireless/at76c50x-usb.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/common-beacon.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/spectral.c2
-rw-r--r--drivers/net/wireless/brcm80211/Kconfig10
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/Makefile10
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcdc.h7
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fweh.c12
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fweh.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h11
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c11
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/power.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rxon.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c20
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-8000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex.c9
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c10
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c25
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sf.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c8
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c7
-rw-r--r--drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c1
-rw-r--r--drivers/net/xen-netback/interface.c6
129 files changed, 1662 insertions, 890 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index f0f5eab0fab1..798ae69fb63c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -175,7 +175,7 @@ MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to "
175 "the same MAC; 0 for none (default), " 175 "the same MAC; 0 for none (default), "
176 "1 for active, 2 for follow"); 176 "1 for active, 2 for follow");
177module_param(all_slaves_active, int, 0); 177module_param(all_slaves_active, int, 0);
178MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface" 178MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface "
179 "by setting active flag for all slaves; " 179 "by setting active flag for all slaves; "
180 "0 for never (default), 1 for always."); 180 "0 for never (default), 1 for always.");
181module_param(resend_igmp, int, 0); 181module_param(resend_igmp, int, 0);
@@ -3659,8 +3659,14 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
3659 else 3659 else
3660 bond_xmit_slave_id(bond, skb, 0); 3660 bond_xmit_slave_id(bond, skb, 0);
3661 } else { 3661 } else {
3662 slave_id = bond_rr_gen_slave_id(bond); 3662 int slave_cnt = ACCESS_ONCE(bond->slave_cnt);
3663 bond_xmit_slave_id(bond, skb, slave_id % bond->slave_cnt); 3663
3664 if (likely(slave_cnt)) {
3665 slave_id = bond_rr_gen_slave_id(bond);
3666 bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
3667 } else {
3668 dev_kfree_skb_any(skb);
3669 }
3664 } 3670 }
3665 3671
3666 return NETDEV_TX_OK; 3672 return NETDEV_TX_OK;
@@ -3691,8 +3697,13 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
3691static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev) 3697static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
3692{ 3698{
3693 struct bonding *bond = netdev_priv(bond_dev); 3699 struct bonding *bond = netdev_priv(bond_dev);
3700 int slave_cnt = ACCESS_ONCE(bond->slave_cnt);
3694 3701
3695 bond_xmit_slave_id(bond, skb, bond_xmit_hash(bond, skb) % bond->slave_cnt); 3702 if (likely(slave_cnt))
3703 bond_xmit_slave_id(bond, skb,
3704 bond_xmit_hash(bond, skb) % slave_cnt);
3705 else
3706 dev_kfree_skb_any(skb);
3696 3707
3697 return NETDEV_TX_OK; 3708 return NETDEV_TX_OK;
3698} 3709}
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index f07fa89b5fd5..05e1aa090add 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -1123,7 +1123,9 @@ static int at91_open(struct net_device *dev)
1123 struct at91_priv *priv = netdev_priv(dev); 1123 struct at91_priv *priv = netdev_priv(dev);
1124 int err; 1124 int err;
1125 1125
1126 clk_enable(priv->clk); 1126 err = clk_prepare_enable(priv->clk);
1127 if (err)
1128 return err;
1127 1129
1128 /* check or determine and set bittime */ 1130 /* check or determine and set bittime */
1129 err = open_candev(dev); 1131 err = open_candev(dev);
@@ -1149,7 +1151,7 @@ static int at91_open(struct net_device *dev)
1149 out_close: 1151 out_close:
1150 close_candev(dev); 1152 close_candev(dev);
1151 out: 1153 out:
1152 clk_disable(priv->clk); 1154 clk_disable_unprepare(priv->clk);
1153 1155
1154 return err; 1156 return err;
1155} 1157}
@@ -1166,7 +1168,7 @@ static int at91_close(struct net_device *dev)
1166 at91_chip_stop(dev, CAN_STATE_STOPPED); 1168 at91_chip_stop(dev, CAN_STATE_STOPPED);
1167 1169
1168 free_irq(dev->irq, dev); 1170 free_irq(dev->irq, dev);
1169 clk_disable(priv->clk); 1171 clk_disable_unprepare(priv->clk);
1170 1172
1171 close_candev(dev); 1173 close_candev(dev);
1172 1174
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 5dede6e64376..fb279d6ae484 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -97,14 +97,14 @@ static void c_can_hw_raminit_ti(const struct c_can_priv *priv, bool enable)
97 ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance); 97 ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance);
98 writel(ctrl, priv->raminit_ctrlreg); 98 writel(ctrl, priv->raminit_ctrlreg);
99 ctrl &= ~CAN_RAMINIT_DONE_MASK(priv->instance); 99 ctrl &= ~CAN_RAMINIT_DONE_MASK(priv->instance);
100 c_can_hw_raminit_wait_ti(priv, ctrl, mask); 100 c_can_hw_raminit_wait_ti(priv, mask, ctrl);
101 101
102 if (enable) { 102 if (enable) {
103 /* Set start bit and wait for the done bit. */ 103 /* Set start bit and wait for the done bit. */
104 ctrl |= CAN_RAMINIT_START_MASK(priv->instance); 104 ctrl |= CAN_RAMINIT_START_MASK(priv->instance);
105 writel(ctrl, priv->raminit_ctrlreg); 105 writel(ctrl, priv->raminit_ctrlreg);
106 ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance); 106 ctrl |= CAN_RAMINIT_DONE_MASK(priv->instance);
107 c_can_hw_raminit_wait_ti(priv, ctrl, mask); 107 c_can_hw_raminit_wait_ti(priv, mask, ctrl);
108 } 108 }
109 spin_unlock(&raminit_lock); 109 spin_unlock(&raminit_lock);
110} 110}
@@ -280,7 +280,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
280 280
281 priv->raminit_ctrlreg = devm_ioremap(&pdev->dev, res->start, 281 priv->raminit_ctrlreg = devm_ioremap(&pdev->dev, res->start,
282 resource_size(res)); 282 resource_size(res));
283 if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0) 283 if (!priv->raminit_ctrlreg || priv->instance < 0)
284 dev_info(&pdev->dev, "control memory is not used for raminit\n"); 284 dev_info(&pdev->dev, "control memory is not used for raminit\n");
285 else 285 else
286 priv->raminit = c_can_hw_raminit_ti; 286 priv->raminit = c_can_hw_raminit_ti;
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index f425ec2c7839..6586309329e6 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -62,7 +62,7 @@
62#define FLEXCAN_MCR_BCC BIT(16) 62#define FLEXCAN_MCR_BCC BIT(16)
63#define FLEXCAN_MCR_LPRIO_EN BIT(13) 63#define FLEXCAN_MCR_LPRIO_EN BIT(13)
64#define FLEXCAN_MCR_AEN BIT(12) 64#define FLEXCAN_MCR_AEN BIT(12)
65#define FLEXCAN_MCR_MAXMB(x) ((x) & 0x1f) 65#define FLEXCAN_MCR_MAXMB(x) ((x) & 0x7f)
66#define FLEXCAN_MCR_IDAM_A (0 << 8) 66#define FLEXCAN_MCR_IDAM_A (0 << 8)
67#define FLEXCAN_MCR_IDAM_B (1 << 8) 67#define FLEXCAN_MCR_IDAM_B (1 << 8)
68#define FLEXCAN_MCR_IDAM_C (2 << 8) 68#define FLEXCAN_MCR_IDAM_C (2 << 8)
@@ -125,7 +125,9 @@
125 FLEXCAN_ESR_BOFF_INT | FLEXCAN_ESR_ERR_INT) 125 FLEXCAN_ESR_BOFF_INT | FLEXCAN_ESR_ERR_INT)
126 126
127/* FLEXCAN interrupt flag register (IFLAG) bits */ 127/* FLEXCAN interrupt flag register (IFLAG) bits */
128#define FLEXCAN_TX_BUF_ID 8 128/* Errata ERR005829 step7: Reserve first valid MB */
129#define FLEXCAN_TX_BUF_RESERVED 8
130#define FLEXCAN_TX_BUF_ID 9
129#define FLEXCAN_IFLAG_BUF(x) BIT(x) 131#define FLEXCAN_IFLAG_BUF(x) BIT(x)
130#define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7) 132#define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7)
131#define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6) 133#define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6)
@@ -136,6 +138,17 @@
136 138
137/* FLEXCAN message buffers */ 139/* FLEXCAN message buffers */
138#define FLEXCAN_MB_CNT_CODE(x) (((x) & 0xf) << 24) 140#define FLEXCAN_MB_CNT_CODE(x) (((x) & 0xf) << 24)
141#define FLEXCAN_MB_CODE_RX_INACTIVE (0x0 << 24)
142#define FLEXCAN_MB_CODE_RX_EMPTY (0x4 << 24)
143#define FLEXCAN_MB_CODE_RX_FULL (0x2 << 24)
144#define FLEXCAN_MB_CODE_RX_OVERRRUN (0x6 << 24)
145#define FLEXCAN_MB_CODE_RX_RANSWER (0xa << 24)
146
147#define FLEXCAN_MB_CODE_TX_INACTIVE (0x8 << 24)
148#define FLEXCAN_MB_CODE_TX_ABORT (0x9 << 24)
149#define FLEXCAN_MB_CODE_TX_DATA (0xc << 24)
150#define FLEXCAN_MB_CODE_TX_TANSWER (0xe << 24)
151
139#define FLEXCAN_MB_CNT_SRR BIT(22) 152#define FLEXCAN_MB_CNT_SRR BIT(22)
140#define FLEXCAN_MB_CNT_IDE BIT(21) 153#define FLEXCAN_MB_CNT_IDE BIT(21)
141#define FLEXCAN_MB_CNT_RTR BIT(20) 154#define FLEXCAN_MB_CNT_RTR BIT(20)
@@ -298,7 +311,7 @@ static int flexcan_chip_enable(struct flexcan_priv *priv)
298 flexcan_write(reg, &regs->mcr); 311 flexcan_write(reg, &regs->mcr);
299 312
300 while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)) 313 while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
301 usleep_range(10, 20); 314 udelay(10);
302 315
303 if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK) 316 if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)
304 return -ETIMEDOUT; 317 return -ETIMEDOUT;
@@ -317,7 +330,7 @@ static int flexcan_chip_disable(struct flexcan_priv *priv)
317 flexcan_write(reg, &regs->mcr); 330 flexcan_write(reg, &regs->mcr);
318 331
319 while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)) 332 while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
320 usleep_range(10, 20); 333 udelay(10);
321 334
322 if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)) 335 if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
323 return -ETIMEDOUT; 336 return -ETIMEDOUT;
@@ -336,7 +349,7 @@ static int flexcan_chip_freeze(struct flexcan_priv *priv)
336 flexcan_write(reg, &regs->mcr); 349 flexcan_write(reg, &regs->mcr);
337 350
338 while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK)) 351 while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
339 usleep_range(100, 200); 352 udelay(100);
340 353
341 if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK)) 354 if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
342 return -ETIMEDOUT; 355 return -ETIMEDOUT;
@@ -355,7 +368,7 @@ static int flexcan_chip_unfreeze(struct flexcan_priv *priv)
355 flexcan_write(reg, &regs->mcr); 368 flexcan_write(reg, &regs->mcr);
356 369
357 while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK)) 370 while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
358 usleep_range(10, 20); 371 udelay(10);
359 372
360 if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK) 373 if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK)
361 return -ETIMEDOUT; 374 return -ETIMEDOUT;
@@ -370,7 +383,7 @@ static int flexcan_chip_softreset(struct flexcan_priv *priv)
370 383
371 flexcan_write(FLEXCAN_MCR_SOFTRST, &regs->mcr); 384 flexcan_write(FLEXCAN_MCR_SOFTRST, &regs->mcr);
372 while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_SOFTRST)) 385 while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_SOFTRST))
373 usleep_range(10, 20); 386 udelay(10);
374 387
375 if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_SOFTRST) 388 if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_SOFTRST)
376 return -ETIMEDOUT; 389 return -ETIMEDOUT;
@@ -428,6 +441,14 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
428 flexcan_write(can_id, &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_id); 441 flexcan_write(can_id, &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_id);
429 flexcan_write(ctrl, &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl); 442 flexcan_write(ctrl, &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl);
430 443
444 /* Errata ERR005829 step8:
445 * Write twice INACTIVE(0x8) code to first MB.
446 */
447 flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
448 &regs->cantxfg[FLEXCAN_TX_BUF_RESERVED].can_ctrl);
449 flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
450 &regs->cantxfg[FLEXCAN_TX_BUF_RESERVED].can_ctrl);
451
431 return NETDEV_TX_OK; 452 return NETDEV_TX_OK;
432} 453}
433 454
@@ -549,6 +570,13 @@ static void do_state(struct net_device *dev,
549 570
550 /* process state changes depending on the new state */ 571 /* process state changes depending on the new state */
551 switch (new_state) { 572 switch (new_state) {
573 case CAN_STATE_ERROR_WARNING:
574 netdev_dbg(dev, "Error Warning\n");
575 cf->can_id |= CAN_ERR_CRTL;
576 cf->data[1] = (bec.txerr > bec.rxerr) ?
577 CAN_ERR_CRTL_TX_WARNING :
578 CAN_ERR_CRTL_RX_WARNING;
579 break;
552 case CAN_STATE_ERROR_ACTIVE: 580 case CAN_STATE_ERROR_ACTIVE:
553 netdev_dbg(dev, "Error Active\n"); 581 netdev_dbg(dev, "Error Active\n");
554 cf->can_id |= CAN_ERR_PROT; 582 cf->can_id |= CAN_ERR_PROT;
@@ -737,6 +765,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
737 stats->tx_bytes += can_get_echo_skb(dev, 0); 765 stats->tx_bytes += can_get_echo_skb(dev, 0);
738 stats->tx_packets++; 766 stats->tx_packets++;
739 can_led_event(dev, CAN_LED_EVENT_TX); 767 can_led_event(dev, CAN_LED_EVENT_TX);
768 /* after sending a RTR frame mailbox is in RX mode */
769 flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
770 &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl);
740 flexcan_write((1 << FLEXCAN_TX_BUF_ID), &regs->iflag1); 771 flexcan_write((1 << FLEXCAN_TX_BUF_ID), &regs->iflag1);
741 netif_wake_queue(dev); 772 netif_wake_queue(dev);
742 } 773 }
@@ -794,6 +825,7 @@ static int flexcan_chip_start(struct net_device *dev)
794 struct flexcan_regs __iomem *regs = priv->base; 825 struct flexcan_regs __iomem *regs = priv->base;
795 int err; 826 int err;
796 u32 reg_mcr, reg_ctrl; 827 u32 reg_mcr, reg_ctrl;
828 int i;
797 829
798 /* enable module */ 830 /* enable module */
799 err = flexcan_chip_enable(priv); 831 err = flexcan_chip_enable(priv);
@@ -852,14 +884,26 @@ static int flexcan_chip_start(struct net_device *dev)
852 if (priv->devtype_data->features & FLEXCAN_HAS_BROKEN_ERR_STATE || 884 if (priv->devtype_data->features & FLEXCAN_HAS_BROKEN_ERR_STATE ||
853 priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) 885 priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
854 reg_ctrl |= FLEXCAN_CTRL_ERR_MSK; 886 reg_ctrl |= FLEXCAN_CTRL_ERR_MSK;
887 else
888 reg_ctrl &= ~FLEXCAN_CTRL_ERR_MSK;
855 889
856 /* save for later use */ 890 /* save for later use */
857 priv->reg_ctrl_default = reg_ctrl; 891 priv->reg_ctrl_default = reg_ctrl;
858 netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl); 892 netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
859 flexcan_write(reg_ctrl, &regs->ctrl); 893 flexcan_write(reg_ctrl, &regs->ctrl);
860 894
861 /* Abort any pending TX, mark Mailbox as INACTIVE */ 895 /* clear and invalidate all mailboxes first */
862 flexcan_write(FLEXCAN_MB_CNT_CODE(0x4), 896 for (i = FLEXCAN_TX_BUF_ID; i < ARRAY_SIZE(regs->cantxfg); i++) {
897 flexcan_write(FLEXCAN_MB_CODE_RX_INACTIVE,
898 &regs->cantxfg[i].can_ctrl);
899 }
900
901 /* Errata ERR005829: mark first TX mailbox as INACTIVE */
902 flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
903 &regs->cantxfg[FLEXCAN_TX_BUF_RESERVED].can_ctrl);
904
905 /* mark TX mailbox as INACTIVE */
906 flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
863 &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl); 907 &regs->cantxfg[FLEXCAN_TX_BUF_ID].can_ctrl);
864 908
865 /* acceptance mask/acceptance code (accept everything) */ 909 /* acceptance mask/acceptance code (accept everything) */
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 7a85590fefb9..e5fac368068a 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -70,6 +70,8 @@ struct peak_pci_chan {
70#define PEAK_PC_104P_DEVICE_ID 0x0006 /* PCAN-PC/104+ cards */ 70#define PEAK_PC_104P_DEVICE_ID 0x0006 /* PCAN-PC/104+ cards */
71#define PEAK_PCI_104E_DEVICE_ID 0x0007 /* PCAN-PCI/104 Express cards */ 71#define PEAK_PCI_104E_DEVICE_ID 0x0007 /* PCAN-PCI/104 Express cards */
72#define PEAK_MPCIE_DEVICE_ID 0x0008 /* The miniPCIe slot cards */ 72#define PEAK_MPCIE_DEVICE_ID 0x0008 /* The miniPCIe slot cards */
73#define PEAK_PCIE_OEM_ID 0x0009 /* PCAN-PCI Express OEM */
74#define PEAK_PCIEC34_DEVICE_ID 0x000A /* PCAN-PCI Express 34 (one channel) */
73 75
74#define PEAK_PCI_CHAN_MAX 4 76#define PEAK_PCI_CHAN_MAX 4
75 77
@@ -87,6 +89,7 @@ static const struct pci_device_id peak_pci_tbl[] = {
87 {PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, 89 {PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
88#ifdef CONFIG_CAN_PEAK_PCIEC 90#ifdef CONFIG_CAN_PEAK_PCIEC
89 {PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, 91 {PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
92 {PEAK_PCI_VENDOR_ID, PEAK_PCIEC34_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
90#endif 93#endif
91 {0,} 94 {0,}
92}; 95};
@@ -653,7 +656,8 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
653 * This must be done *before* register_sja1000dev() but 656 * This must be done *before* register_sja1000dev() but
654 * *after* devices linkage 657 * *after* devices linkage
655 */ 658 */
656 if (pdev->device == PEAK_PCIEC_DEVICE_ID) { 659 if (pdev->device == PEAK_PCIEC_DEVICE_ID ||
660 pdev->device == PEAK_PCIEC34_DEVICE_ID) {
657 err = peak_pciec_probe(pdev, dev); 661 err = peak_pciec_probe(pdev, dev);
658 if (err) { 662 if (err) {
659 dev_err(&pdev->dev, 663 dev_err(&pdev->dev,
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index d1692154ed1b..b27ac6074afb 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -172,6 +172,35 @@ static void set_normal_mode(struct net_device *dev)
172 netdev_err(dev, "setting SJA1000 into normal mode failed!\n"); 172 netdev_err(dev, "setting SJA1000 into normal mode failed!\n");
173} 173}
174 174
175/*
176 * initialize SJA1000 chip:
177 * - reset chip
178 * - set output mode
179 * - set baudrate
180 * - enable interrupts
181 * - start operating mode
182 */
183static void chipset_init(struct net_device *dev)
184{
185 struct sja1000_priv *priv = netdev_priv(dev);
186
187 /* set clock divider and output control register */
188 priv->write_reg(priv, SJA1000_CDR, priv->cdr | CDR_PELICAN);
189
190 /* set acceptance filter (accept all) */
191 priv->write_reg(priv, SJA1000_ACCC0, 0x00);
192 priv->write_reg(priv, SJA1000_ACCC1, 0x00);
193 priv->write_reg(priv, SJA1000_ACCC2, 0x00);
194 priv->write_reg(priv, SJA1000_ACCC3, 0x00);
195
196 priv->write_reg(priv, SJA1000_ACCM0, 0xFF);
197 priv->write_reg(priv, SJA1000_ACCM1, 0xFF);
198 priv->write_reg(priv, SJA1000_ACCM2, 0xFF);
199 priv->write_reg(priv, SJA1000_ACCM3, 0xFF);
200
201 priv->write_reg(priv, SJA1000_OCR, priv->ocr | OCR_MODE_NORMAL);
202}
203
175static void sja1000_start(struct net_device *dev) 204static void sja1000_start(struct net_device *dev)
176{ 205{
177 struct sja1000_priv *priv = netdev_priv(dev); 206 struct sja1000_priv *priv = netdev_priv(dev);
@@ -180,6 +209,10 @@ static void sja1000_start(struct net_device *dev)
180 if (priv->can.state != CAN_STATE_STOPPED) 209 if (priv->can.state != CAN_STATE_STOPPED)
181 set_reset_mode(dev); 210 set_reset_mode(dev);
182 211
212 /* Initialize chip if uninitialized at this stage */
213 if (!(priv->read_reg(priv, SJA1000_CDR) & CDR_PELICAN))
214 chipset_init(dev);
215
183 /* Clear error counters and error code capture */ 216 /* Clear error counters and error code capture */
184 priv->write_reg(priv, SJA1000_TXERR, 0x0); 217 priv->write_reg(priv, SJA1000_TXERR, 0x0);
185 priv->write_reg(priv, SJA1000_RXERR, 0x0); 218 priv->write_reg(priv, SJA1000_RXERR, 0x0);
@@ -237,35 +270,6 @@ static int sja1000_get_berr_counter(const struct net_device *dev,
237} 270}
238 271
239/* 272/*
240 * initialize SJA1000 chip:
241 * - reset chip
242 * - set output mode
243 * - set baudrate
244 * - enable interrupts
245 * - start operating mode
246 */
247static void chipset_init(struct net_device *dev)
248{
249 struct sja1000_priv *priv = netdev_priv(dev);
250
251 /* set clock divider and output control register */
252 priv->write_reg(priv, SJA1000_CDR, priv->cdr | CDR_PELICAN);
253
254 /* set acceptance filter (accept all) */
255 priv->write_reg(priv, SJA1000_ACCC0, 0x00);
256 priv->write_reg(priv, SJA1000_ACCC1, 0x00);
257 priv->write_reg(priv, SJA1000_ACCC2, 0x00);
258 priv->write_reg(priv, SJA1000_ACCC3, 0x00);
259
260 priv->write_reg(priv, SJA1000_ACCM0, 0xFF);
261 priv->write_reg(priv, SJA1000_ACCM1, 0xFF);
262 priv->write_reg(priv, SJA1000_ACCM2, 0xFF);
263 priv->write_reg(priv, SJA1000_ACCM3, 0xFF);
264
265 priv->write_reg(priv, SJA1000_OCR, priv->ocr | OCR_MODE_NORMAL);
266}
267
268/*
269 * transmit a CAN message 273 * transmit a CAN message
270 * message layout in the sk_buff should be like this: 274 * message layout in the sk_buff should be like this:
271 * xx xx xx xx ff ll 00 11 22 33 44 55 66 77 275 * xx xx xx xx ff ll 00 11 22 33 44 55 66 77
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 059c7414e303..8ca49f04acec 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -2129,6 +2129,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
2129 int entry = vp->cur_tx % TX_RING_SIZE; 2129 int entry = vp->cur_tx % TX_RING_SIZE;
2130 struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE]; 2130 struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
2131 unsigned long flags; 2131 unsigned long flags;
2132 dma_addr_t dma_addr;
2132 2133
2133 if (vortex_debug > 6) { 2134 if (vortex_debug > 6) {
2134 pr_debug("boomerang_start_xmit()\n"); 2135 pr_debug("boomerang_start_xmit()\n");
@@ -2163,24 +2164,48 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
2163 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum); 2164 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
2164 2165
2165 if (!skb_shinfo(skb)->nr_frags) { 2166 if (!skb_shinfo(skb)->nr_frags) {
2166 vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, 2167 dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len,
2167 skb->len, PCI_DMA_TODEVICE)); 2168 PCI_DMA_TODEVICE);
2169 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
2170 goto out_dma_err;
2171
2172 vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
2168 vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG); 2173 vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG);
2169 } else { 2174 } else {
2170 int i; 2175 int i;
2171 2176
2172 vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, 2177 dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data,
2173 skb_headlen(skb), PCI_DMA_TODEVICE)); 2178 skb_headlen(skb), PCI_DMA_TODEVICE);
2179 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
2180 goto out_dma_err;
2181
2182 vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
2174 vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb)); 2183 vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb));
2175 2184
2176 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2185 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2177 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2186 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2178 2187
2188 dma_addr = skb_frag_dma_map(&VORTEX_PCI(vp)->dev, frag,
2189 0,
2190 frag->size,
2191 DMA_TO_DEVICE);
2192 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr)) {
2193 for(i = i-1; i >= 0; i--)
2194 dma_unmap_page(&VORTEX_PCI(vp)->dev,
2195 le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr),
2196 le32_to_cpu(vp->tx_ring[entry].frag[i+1].length),
2197 DMA_TO_DEVICE);
2198
2199 pci_unmap_single(VORTEX_PCI(vp),
2200 le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
2201 le32_to_cpu(vp->tx_ring[entry].frag[0].length),
2202 PCI_DMA_TODEVICE);
2203
2204 goto out_dma_err;
2205 }
2206
2179 vp->tx_ring[entry].frag[i+1].addr = 2207 vp->tx_ring[entry].frag[i+1].addr =
2180 cpu_to_le32(pci_map_single( 2208 cpu_to_le32(dma_addr);
2181 VORTEX_PCI(vp),
2182 (void *)skb_frag_address(frag),
2183 skb_frag_size(frag), PCI_DMA_TODEVICE));
2184 2209
2185 if (i == skb_shinfo(skb)->nr_frags-1) 2210 if (i == skb_shinfo(skb)->nr_frags-1)
2186 vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG); 2211 vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG);
@@ -2189,7 +2214,10 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
2189 } 2214 }
2190 } 2215 }
2191#else 2216#else
2192 vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE)); 2217 dma_addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE));
2218 if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
2219 goto out_dma_err;
2220 vp->tx_ring[entry].addr = cpu_to_le32(dma_addr);
2193 vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG); 2221 vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
2194 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded); 2222 vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
2195#endif 2223#endif
@@ -2217,7 +2245,11 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
2217 skb_tx_timestamp(skb); 2245 skb_tx_timestamp(skb);
2218 iowrite16(DownUnstall, ioaddr + EL3_CMD); 2246 iowrite16(DownUnstall, ioaddr + EL3_CMD);
2219 spin_unlock_irqrestore(&vp->lock, flags); 2247 spin_unlock_irqrestore(&vp->lock, flags);
2248out:
2220 return NETDEV_TX_OK; 2249 return NETDEV_TX_OK;
2250out_dma_err:
2251 dev_err(&VORTEX_PCI(vp)->dev, "Error mapping dma buffer\n");
2252 goto out;
2221} 2253}
2222 2254
2223/* The interrupt handler does all of the Rx thread work and cleans up 2255/* The interrupt handler does all of the Rx thread work and cleans up
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 23578dfee249..3005155e412b 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -123,6 +123,12 @@ static inline void greth_enable_tx(struct greth_private *greth)
123 GRETH_REGORIN(greth->regs->control, GRETH_TXEN); 123 GRETH_REGORIN(greth->regs->control, GRETH_TXEN);
124} 124}
125 125
126static inline void greth_enable_tx_and_irq(struct greth_private *greth)
127{
128 wmb(); /* BDs must been written to memory before enabling TX */
129 GRETH_REGORIN(greth->regs->control, GRETH_TXEN | GRETH_TXI);
130}
131
126static inline void greth_disable_tx(struct greth_private *greth) 132static inline void greth_disable_tx(struct greth_private *greth)
127{ 133{
128 GRETH_REGANDIN(greth->regs->control, ~GRETH_TXEN); 134 GRETH_REGANDIN(greth->regs->control, ~GRETH_TXEN);
@@ -447,29 +453,30 @@ out:
447 return err; 453 return err;
448} 454}
449 455
456static inline u16 greth_num_free_bds(u16 tx_last, u16 tx_next)
457{
458 if (tx_next < tx_last)
459 return (tx_last - tx_next) - 1;
460 else
461 return GRETH_TXBD_NUM - (tx_next - tx_last) - 1;
462}
450 463
451static netdev_tx_t 464static netdev_tx_t
452greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) 465greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
453{ 466{
454 struct greth_private *greth = netdev_priv(dev); 467 struct greth_private *greth = netdev_priv(dev);
455 struct greth_bd *bdp; 468 struct greth_bd *bdp;
456 u32 status = 0, dma_addr, ctrl; 469 u32 status, dma_addr;
457 int curr_tx, nr_frags, i, err = NETDEV_TX_OK; 470 int curr_tx, nr_frags, i, err = NETDEV_TX_OK;
458 unsigned long flags; 471 unsigned long flags;
472 u16 tx_last;
459 473
460 nr_frags = skb_shinfo(skb)->nr_frags; 474 nr_frags = skb_shinfo(skb)->nr_frags;
475 tx_last = greth->tx_last;
476 rmb(); /* tx_last is updated by the poll task */
461 477
462 /* Clean TX Ring */ 478 if (greth_num_free_bds(tx_last, greth->tx_next) < nr_frags + 1) {
463 greth_clean_tx_gbit(dev);
464
465 if (greth->tx_free < nr_frags + 1) {
466 spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/
467 ctrl = GRETH_REGLOAD(greth->regs->control);
468 /* Enable TX IRQ only if not already in poll() routine */
469 if (ctrl & GRETH_RXI)
470 GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI);
471 netif_stop_queue(dev); 479 netif_stop_queue(dev);
472 spin_unlock_irqrestore(&greth->devlock, flags);
473 err = NETDEV_TX_BUSY; 480 err = NETDEV_TX_BUSY;
474 goto out; 481 goto out;
475 } 482 }
@@ -488,6 +495,8 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
488 /* Linear buf */ 495 /* Linear buf */
489 if (nr_frags != 0) 496 if (nr_frags != 0)
490 status = GRETH_TXBD_MORE; 497 status = GRETH_TXBD_MORE;
498 else
499 status = GRETH_BD_IE;
491 500
492 if (skb->ip_summed == CHECKSUM_PARTIAL) 501 if (skb->ip_summed == CHECKSUM_PARTIAL)
493 status |= GRETH_TXBD_CSALL; 502 status |= GRETH_TXBD_CSALL;
@@ -545,14 +554,12 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
545 554
546 /* Enable the descriptor chain by enabling the first descriptor */ 555 /* Enable the descriptor chain by enabling the first descriptor */
547 bdp = greth->tx_bd_base + greth->tx_next; 556 bdp = greth->tx_bd_base + greth->tx_next;
548 greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN); 557 greth_write_bd(&bdp->stat,
549 greth->tx_next = curr_tx; 558 greth_read_bd(&bdp->stat) | GRETH_BD_EN);
550 greth->tx_free -= nr_frags + 1;
551
552 wmb();
553 559
554 spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/ 560 spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/
555 greth_enable_tx(greth); 561 greth->tx_next = curr_tx;
562 greth_enable_tx_and_irq(greth);
556 spin_unlock_irqrestore(&greth->devlock, flags); 563 spin_unlock_irqrestore(&greth->devlock, flags);
557 564
558 return NETDEV_TX_OK; 565 return NETDEV_TX_OK;
@@ -648,7 +655,6 @@ static void greth_clean_tx(struct net_device *dev)
648 if (greth->tx_free > 0) { 655 if (greth->tx_free > 0) {
649 netif_wake_queue(dev); 656 netif_wake_queue(dev);
650 } 657 }
651
652} 658}
653 659
654static inline void greth_update_tx_stats(struct net_device *dev, u32 stat) 660static inline void greth_update_tx_stats(struct net_device *dev, u32 stat)
@@ -670,20 +676,22 @@ static void greth_clean_tx_gbit(struct net_device *dev)
670{ 676{
671 struct greth_private *greth; 677 struct greth_private *greth;
672 struct greth_bd *bdp, *bdp_last_frag; 678 struct greth_bd *bdp, *bdp_last_frag;
673 struct sk_buff *skb; 679 struct sk_buff *skb = NULL;
674 u32 stat; 680 u32 stat;
675 int nr_frags, i; 681 int nr_frags, i;
682 u16 tx_last;
676 683
677 greth = netdev_priv(dev); 684 greth = netdev_priv(dev);
685 tx_last = greth->tx_last;
678 686
679 while (greth->tx_free < GRETH_TXBD_NUM) { 687 while (tx_last != greth->tx_next) {
680 688
681 skb = greth->tx_skbuff[greth->tx_last]; 689 skb = greth->tx_skbuff[tx_last];
682 690
683 nr_frags = skb_shinfo(skb)->nr_frags; 691 nr_frags = skb_shinfo(skb)->nr_frags;
684 692
685 /* We only clean fully completed SKBs */ 693 /* We only clean fully completed SKBs */
686 bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags); 694 bdp_last_frag = greth->tx_bd_base + SKIP_TX(tx_last, nr_frags);
687 695
688 GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX); 696 GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX);
689 mb(); 697 mb();
@@ -692,14 +700,14 @@ static void greth_clean_tx_gbit(struct net_device *dev)
692 if (stat & GRETH_BD_EN) 700 if (stat & GRETH_BD_EN)
693 break; 701 break;
694 702
695 greth->tx_skbuff[greth->tx_last] = NULL; 703 greth->tx_skbuff[tx_last] = NULL;
696 704
697 greth_update_tx_stats(dev, stat); 705 greth_update_tx_stats(dev, stat);
698 dev->stats.tx_bytes += skb->len; 706 dev->stats.tx_bytes += skb->len;
699 707
700 bdp = greth->tx_bd_base + greth->tx_last; 708 bdp = greth->tx_bd_base + tx_last;
701 709
702 greth->tx_last = NEXT_TX(greth->tx_last); 710 tx_last = NEXT_TX(tx_last);
703 711
704 dma_unmap_single(greth->dev, 712 dma_unmap_single(greth->dev,
705 greth_read_bd(&bdp->addr), 713 greth_read_bd(&bdp->addr),
@@ -708,21 +716,26 @@ static void greth_clean_tx_gbit(struct net_device *dev)
708 716
709 for (i = 0; i < nr_frags; i++) { 717 for (i = 0; i < nr_frags; i++) {
710 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 718 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
711 bdp = greth->tx_bd_base + greth->tx_last; 719 bdp = greth->tx_bd_base + tx_last;
712 720
713 dma_unmap_page(greth->dev, 721 dma_unmap_page(greth->dev,
714 greth_read_bd(&bdp->addr), 722 greth_read_bd(&bdp->addr),
715 skb_frag_size(frag), 723 skb_frag_size(frag),
716 DMA_TO_DEVICE); 724 DMA_TO_DEVICE);
717 725
718 greth->tx_last = NEXT_TX(greth->tx_last); 726 tx_last = NEXT_TX(tx_last);
719 } 727 }
720 greth->tx_free += nr_frags+1;
721 dev_kfree_skb(skb); 728 dev_kfree_skb(skb);
722 } 729 }
730 if (skb) { /* skb is set only if the above while loop was entered */
731 wmb();
732 greth->tx_last = tx_last;
723 733
724 if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS+1))) 734 if (netif_queue_stopped(dev) &&
725 netif_wake_queue(dev); 735 (greth_num_free_bds(tx_last, greth->tx_next) >
736 (MAX_SKB_FRAGS+1)))
737 netif_wake_queue(dev);
738 }
726} 739}
727 740
728static int greth_rx(struct net_device *dev, int limit) 741static int greth_rx(struct net_device *dev, int limit)
@@ -965,16 +978,12 @@ static int greth_poll(struct napi_struct *napi, int budget)
965 greth = container_of(napi, struct greth_private, napi); 978 greth = container_of(napi, struct greth_private, napi);
966 979
967restart_txrx_poll: 980restart_txrx_poll:
968 if (netif_queue_stopped(greth->netdev)) {
969 if (greth->gbit_mac)
970 greth_clean_tx_gbit(greth->netdev);
971 else
972 greth_clean_tx(greth->netdev);
973 }
974
975 if (greth->gbit_mac) { 981 if (greth->gbit_mac) {
982 greth_clean_tx_gbit(greth->netdev);
976 work_done += greth_rx_gbit(greth->netdev, budget - work_done); 983 work_done += greth_rx_gbit(greth->netdev, budget - work_done);
977 } else { 984 } else {
985 if (netif_queue_stopped(greth->netdev))
986 greth_clean_tx(greth->netdev);
978 work_done += greth_rx(greth->netdev, budget - work_done); 987 work_done += greth_rx(greth->netdev, budget - work_done);
979 } 988 }
980 989
@@ -983,7 +992,8 @@ restart_txrx_poll:
983 spin_lock_irqsave(&greth->devlock, flags); 992 spin_lock_irqsave(&greth->devlock, flags);
984 993
985 ctrl = GRETH_REGLOAD(greth->regs->control); 994 ctrl = GRETH_REGLOAD(greth->regs->control);
986 if (netif_queue_stopped(greth->netdev)) { 995 if ((greth->gbit_mac && (greth->tx_last != greth->tx_next)) ||
996 (!greth->gbit_mac && netif_queue_stopped(greth->netdev))) {
987 GRETH_REGSAVE(greth->regs->control, 997 GRETH_REGSAVE(greth->regs->control,
988 ctrl | GRETH_TXI | GRETH_RXI); 998 ctrl | GRETH_TXI | GRETH_RXI);
989 mask = GRETH_INT_RX | GRETH_INT_RE | 999 mask = GRETH_INT_RX | GRETH_INT_RE |
diff --git a/drivers/net/ethernet/aeroflex/greth.h b/drivers/net/ethernet/aeroflex/greth.h
index 232a622a85b7..ae16ac94daf8 100644
--- a/drivers/net/ethernet/aeroflex/greth.h
+++ b/drivers/net/ethernet/aeroflex/greth.h
@@ -107,7 +107,7 @@ struct greth_private {
107 107
108 u16 tx_next; 108 u16 tx_next;
109 u16 tx_last; 109 u16 tx_last;
110 u16 tx_free; 110 u16 tx_free; /* only used on 10/100Mbit */
111 u16 rx_cur; 111 u16 rx_cur;
112 112
113 struct greth_regs *regs; /* Address of controller registers. */ 113 struct greth_regs *regs; /* Address of controller registers. */
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
index 346592dca33c..a3c11355a34d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
@@ -272,8 +272,8 @@ static ssize_t xpcs_reg_value_read(struct file *filp, char __user *buffer,
272 struct xgbe_prv_data *pdata = filp->private_data; 272 struct xgbe_prv_data *pdata = filp->private_data;
273 unsigned int value; 273 unsigned int value;
274 274
275 value = pdata->hw_if.read_mmd_regs(pdata, pdata->debugfs_xpcs_mmd, 275 value = XMDIO_READ(pdata, pdata->debugfs_xpcs_mmd,
276 pdata->debugfs_xpcs_reg); 276 pdata->debugfs_xpcs_reg);
277 277
278 return xgbe_common_read(buffer, count, ppos, value); 278 return xgbe_common_read(buffer, count, ppos, value);
279} 279}
@@ -290,8 +290,8 @@ static ssize_t xpcs_reg_value_write(struct file *filp,
290 if (len < 0) 290 if (len < 0)
291 return len; 291 return len;
292 292
293 pdata->hw_if.write_mmd_regs(pdata, pdata->debugfs_xpcs_mmd, 293 XMDIO_WRITE(pdata, pdata->debugfs_xpcs_mmd, pdata->debugfs_xpcs_reg,
294 pdata->debugfs_xpcs_reg, value); 294 value);
295 295
296 return len; 296 return len;
297} 297}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index edaca4496264..ea273836d999 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -348,7 +348,7 @@ static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
348 348
349 /* Clear MAC flow control */ 349 /* Clear MAC flow control */
350 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; 350 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
351 q_count = min_t(unsigned int, pdata->rx_q_count, max_q_count); 351 q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
352 reg = MAC_Q0TFCR; 352 reg = MAC_Q0TFCR;
353 for (i = 0; i < q_count; i++) { 353 for (i = 0; i < q_count; i++) {
354 reg_val = XGMAC_IOREAD(pdata, reg); 354 reg_val = XGMAC_IOREAD(pdata, reg);
@@ -373,7 +373,7 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
373 373
374 /* Set MAC flow control */ 374 /* Set MAC flow control */
375 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; 375 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
376 q_count = min_t(unsigned int, pdata->rx_q_count, max_q_count); 376 q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
377 reg = MAC_Q0TFCR; 377 reg = MAC_Q0TFCR;
378 for (i = 0; i < q_count; i++) { 378 for (i = 0; i < q_count; i++) {
379 reg_val = XGMAC_IOREAD(pdata, reg); 379 reg_val = XGMAC_IOREAD(pdata, reg);
@@ -509,8 +509,8 @@ static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
509 XGMAC_IOWRITE(pdata, MAC_IER, mac_ier); 509 XGMAC_IOWRITE(pdata, MAC_IER, mac_ier);
510 510
511 /* Enable all counter interrupts */ 511 /* Enable all counter interrupts */
512 XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xff); 512 XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff);
513 XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xff); 513 XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff);
514} 514}
515 515
516static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata) 516static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata)
@@ -1633,6 +1633,9 @@ static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
1633{ 1633{
1634 unsigned int i, count; 1634 unsigned int i, count;
1635 1635
1636 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
1637 return 0;
1638
1636 for (i = 0; i < pdata->tx_q_count; i++) 1639 for (i = 0; i < pdata->tx_q_count; i++)
1637 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1); 1640 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
1638 1641
@@ -1703,8 +1706,8 @@ static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
1703 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP); 1706 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
1704} 1707}
1705 1708
1706static unsigned int xgbe_calculate_per_queue_fifo(unsigned long fifo_size, 1709static unsigned int xgbe_calculate_per_queue_fifo(unsigned int fifo_size,
1707 unsigned char queue_count) 1710 unsigned int queue_count)
1708{ 1711{
1709 unsigned int q_fifo_size = 0; 1712 unsigned int q_fifo_size = 0;
1710 enum xgbe_mtl_fifo_size p_fifo = XGMAC_MTL_FIFO_SIZE_256; 1713 enum xgbe_mtl_fifo_size p_fifo = XGMAC_MTL_FIFO_SIZE_256;
@@ -1748,6 +1751,10 @@ static unsigned int xgbe_calculate_per_queue_fifo(unsigned long fifo_size,
1748 q_fifo_size = XGBE_FIFO_SIZE_KB(256); 1751 q_fifo_size = XGBE_FIFO_SIZE_KB(256);
1749 break; 1752 break;
1750 } 1753 }
1754
1755 /* The configured value is not the actual amount of fifo RAM */
1756 q_fifo_size = min_t(unsigned int, XGBE_FIFO_MAX, q_fifo_size);
1757
1751 q_fifo_size = q_fifo_size / queue_count; 1758 q_fifo_size = q_fifo_size / queue_count;
1752 1759
1753 /* Set the queue fifo size programmable value */ 1760 /* Set the queue fifo size programmable value */
@@ -1947,6 +1954,32 @@ static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
1947 xgbe_disable_rx_vlan_stripping(pdata); 1954 xgbe_disable_rx_vlan_stripping(pdata);
1948} 1955}
1949 1956
1957static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo)
1958{
1959 bool read_hi;
1960 u64 val;
1961
1962 switch (reg_lo) {
1963 /* These registers are always 64 bit */
1964 case MMC_TXOCTETCOUNT_GB_LO:
1965 case MMC_TXOCTETCOUNT_G_LO:
1966 case MMC_RXOCTETCOUNT_GB_LO:
1967 case MMC_RXOCTETCOUNT_G_LO:
1968 read_hi = true;
1969 break;
1970
1971 default:
1972 read_hi = false;
1973 };
1974
1975 val = XGMAC_IOREAD(pdata, reg_lo);
1976
1977 if (read_hi)
1978 val |= ((u64)XGMAC_IOREAD(pdata, reg_lo + 4) << 32);
1979
1980 return val;
1981}
1982
1950static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata) 1983static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
1951{ 1984{
1952 struct xgbe_mmc_stats *stats = &pdata->mmc_stats; 1985 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
@@ -1954,75 +1987,75 @@ static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
1954 1987
1955 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB)) 1988 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB))
1956 stats->txoctetcount_gb += 1989 stats->txoctetcount_gb +=
1957 XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO); 1990 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
1958 1991
1959 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB)) 1992 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB))
1960 stats->txframecount_gb += 1993 stats->txframecount_gb +=
1961 XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO); 1994 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
1962 1995
1963 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G)) 1996 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G))
1964 stats->txbroadcastframes_g += 1997 stats->txbroadcastframes_g +=
1965 XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO); 1998 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
1966 1999
1967 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G)) 2000 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G))
1968 stats->txmulticastframes_g += 2001 stats->txmulticastframes_g +=
1969 XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO); 2002 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
1970 2003
1971 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB)) 2004 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB))
1972 stats->tx64octets_gb += 2005 stats->tx64octets_gb +=
1973 XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO); 2006 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
1974 2007
1975 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB)) 2008 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB))
1976 stats->tx65to127octets_gb += 2009 stats->tx65to127octets_gb +=
1977 XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO); 2010 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
1978 2011
1979 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB)) 2012 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB))
1980 stats->tx128to255octets_gb += 2013 stats->tx128to255octets_gb +=
1981 XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO); 2014 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
1982 2015
1983 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB)) 2016 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB))
1984 stats->tx256to511octets_gb += 2017 stats->tx256to511octets_gb +=
1985 XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO); 2018 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
1986 2019
1987 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB)) 2020 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB))
1988 stats->tx512to1023octets_gb += 2021 stats->tx512to1023octets_gb +=
1989 XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO); 2022 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
1990 2023
1991 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB)) 2024 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB))
1992 stats->tx1024tomaxoctets_gb += 2025 stats->tx1024tomaxoctets_gb +=
1993 XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); 2026 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
1994 2027
1995 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB)) 2028 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB))
1996 stats->txunicastframes_gb += 2029 stats->txunicastframes_gb +=
1997 XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO); 2030 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
1998 2031
1999 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB)) 2032 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB))
2000 stats->txmulticastframes_gb += 2033 stats->txmulticastframes_gb +=
2001 XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO); 2034 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
2002 2035
2003 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB)) 2036 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB))
2004 stats->txbroadcastframes_g += 2037 stats->txbroadcastframes_g +=
2005 XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO); 2038 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
2006 2039
2007 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR)) 2040 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR))
2008 stats->txunderflowerror += 2041 stats->txunderflowerror +=
2009 XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO); 2042 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
2010 2043
2011 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G)) 2044 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G))
2012 stats->txoctetcount_g += 2045 stats->txoctetcount_g +=
2013 XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO); 2046 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
2014 2047
2015 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G)) 2048 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G))
2016 stats->txframecount_g += 2049 stats->txframecount_g +=
2017 XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO); 2050 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
2018 2051
2019 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES)) 2052 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES))
2020 stats->txpauseframes += 2053 stats->txpauseframes +=
2021 XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO); 2054 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
2022 2055
2023 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G)) 2056 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G))
2024 stats->txvlanframes_g += 2057 stats->txvlanframes_g +=
2025 XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO); 2058 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
2026} 2059}
2027 2060
2028static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata) 2061static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
@@ -2032,95 +2065,95 @@ static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
2032 2065
2033 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB)) 2066 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB))
2034 stats->rxframecount_gb += 2067 stats->rxframecount_gb +=
2035 XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO); 2068 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
2036 2069
2037 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB)) 2070 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB))
2038 stats->rxoctetcount_gb += 2071 stats->rxoctetcount_gb +=
2039 XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO); 2072 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
2040 2073
2041 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G)) 2074 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G))
2042 stats->rxoctetcount_g += 2075 stats->rxoctetcount_g +=
2043 XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO); 2076 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
2044 2077
2045 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G)) 2078 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G))
2046 stats->rxbroadcastframes_g += 2079 stats->rxbroadcastframes_g +=
2047 XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO); 2080 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
2048 2081
2049 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G)) 2082 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G))
2050 stats->rxmulticastframes_g += 2083 stats->rxmulticastframes_g +=
2051 XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO); 2084 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
2052 2085
2053 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR)) 2086 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR))
2054 stats->rxcrcerror += 2087 stats->rxcrcerror +=
2055 XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO); 2088 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
2056 2089
2057 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR)) 2090 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR))
2058 stats->rxrunterror += 2091 stats->rxrunterror +=
2059 XGMAC_IOREAD(pdata, MMC_RXRUNTERROR); 2092 xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
2060 2093
2061 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR)) 2094 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR))
2062 stats->rxjabbererror += 2095 stats->rxjabbererror +=
2063 XGMAC_IOREAD(pdata, MMC_RXJABBERERROR); 2096 xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
2064 2097
2065 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G)) 2098 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G))
2066 stats->rxundersize_g += 2099 stats->rxundersize_g +=
2067 XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G); 2100 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
2068 2101
2069 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G)) 2102 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G))
2070 stats->rxoversize_g += 2103 stats->rxoversize_g +=
2071 XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G); 2104 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
2072 2105
2073 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB)) 2106 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB))
2074 stats->rx64octets_gb += 2107 stats->rx64octets_gb +=
2075 XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO); 2108 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
2076 2109
2077 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB)) 2110 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB))
2078 stats->rx65to127octets_gb += 2111 stats->rx65to127octets_gb +=
2079 XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO); 2112 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
2080 2113
2081 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB)) 2114 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB))
2082 stats->rx128to255octets_gb += 2115 stats->rx128to255octets_gb +=
2083 XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO); 2116 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
2084 2117
2085 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB)) 2118 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB))
2086 stats->rx256to511octets_gb += 2119 stats->rx256to511octets_gb +=
2087 XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO); 2120 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
2088 2121
2089 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB)) 2122 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB))
2090 stats->rx512to1023octets_gb += 2123 stats->rx512to1023octets_gb +=
2091 XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO); 2124 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
2092 2125
2093 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB)) 2126 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB))
2094 stats->rx1024tomaxoctets_gb += 2127 stats->rx1024tomaxoctets_gb +=
2095 XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); 2128 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
2096 2129
2097 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G)) 2130 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G))
2098 stats->rxunicastframes_g += 2131 stats->rxunicastframes_g +=
2099 XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO); 2132 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
2100 2133
2101 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR)) 2134 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR))
2102 stats->rxlengtherror += 2135 stats->rxlengtherror +=
2103 XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO); 2136 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
2104 2137
2105 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE)) 2138 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE))
2106 stats->rxoutofrangetype += 2139 stats->rxoutofrangetype +=
2107 XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO); 2140 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
2108 2141
2109 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES)) 2142 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES))
2110 stats->rxpauseframes += 2143 stats->rxpauseframes +=
2111 XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO); 2144 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
2112 2145
2113 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW)) 2146 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW))
2114 stats->rxfifooverflow += 2147 stats->rxfifooverflow +=
2115 XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO); 2148 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
2116 2149
2117 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB)) 2150 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB))
2118 stats->rxvlanframes_gb += 2151 stats->rxvlanframes_gb +=
2119 XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO); 2152 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
2120 2153
2121 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR)) 2154 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR))
2122 stats->rxwatchdogerror += 2155 stats->rxwatchdogerror +=
2123 XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR); 2156 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
2124} 2157}
2125 2158
2126static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata) 2159static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
@@ -2131,127 +2164,127 @@ static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
2131 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1); 2164 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
2132 2165
2133 stats->txoctetcount_gb += 2166 stats->txoctetcount_gb +=
2134 XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO); 2167 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
2135 2168
2136 stats->txframecount_gb += 2169 stats->txframecount_gb +=
2137 XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO); 2170 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
2138 2171
2139 stats->txbroadcastframes_g += 2172 stats->txbroadcastframes_g +=
2140 XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO); 2173 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
2141 2174
2142 stats->txmulticastframes_g += 2175 stats->txmulticastframes_g +=
2143 XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO); 2176 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
2144 2177
2145 stats->tx64octets_gb += 2178 stats->tx64octets_gb +=
2146 XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO); 2179 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
2147 2180
2148 stats->tx65to127octets_gb += 2181 stats->tx65to127octets_gb +=
2149 XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO); 2182 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
2150 2183
2151 stats->tx128to255octets_gb += 2184 stats->tx128to255octets_gb +=
2152 XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO); 2185 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
2153 2186
2154 stats->tx256to511octets_gb += 2187 stats->tx256to511octets_gb +=
2155 XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO); 2188 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
2156 2189
2157 stats->tx512to1023octets_gb += 2190 stats->tx512to1023octets_gb +=
2158 XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO); 2191 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
2159 2192
2160 stats->tx1024tomaxoctets_gb += 2193 stats->tx1024tomaxoctets_gb +=
2161 XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); 2194 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
2162 2195
2163 stats->txunicastframes_gb += 2196 stats->txunicastframes_gb +=
2164 XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO); 2197 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
2165 2198
2166 stats->txmulticastframes_gb += 2199 stats->txmulticastframes_gb +=
2167 XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO); 2200 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
2168 2201
2169 stats->txbroadcastframes_g += 2202 stats->txbroadcastframes_g +=
2170 XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO); 2203 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
2171 2204
2172 stats->txunderflowerror += 2205 stats->txunderflowerror +=
2173 XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO); 2206 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
2174 2207
2175 stats->txoctetcount_g += 2208 stats->txoctetcount_g +=
2176 XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO); 2209 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
2177 2210
2178 stats->txframecount_g += 2211 stats->txframecount_g +=
2179 XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO); 2212 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
2180 2213
2181 stats->txpauseframes += 2214 stats->txpauseframes +=
2182 XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO); 2215 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
2183 2216
2184 stats->txvlanframes_g += 2217 stats->txvlanframes_g +=
2185 XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO); 2218 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
2186 2219
2187 stats->rxframecount_gb += 2220 stats->rxframecount_gb +=
2188 XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO); 2221 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
2189 2222
2190 stats->rxoctetcount_gb += 2223 stats->rxoctetcount_gb +=
2191 XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO); 2224 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
2192 2225
2193 stats->rxoctetcount_g += 2226 stats->rxoctetcount_g +=
2194 XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO); 2227 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
2195 2228
2196 stats->rxbroadcastframes_g += 2229 stats->rxbroadcastframes_g +=
2197 XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO); 2230 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
2198 2231
2199 stats->rxmulticastframes_g += 2232 stats->rxmulticastframes_g +=
2200 XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO); 2233 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
2201 2234
2202 stats->rxcrcerror += 2235 stats->rxcrcerror +=
2203 XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO); 2236 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
2204 2237
2205 stats->rxrunterror += 2238 stats->rxrunterror +=
2206 XGMAC_IOREAD(pdata, MMC_RXRUNTERROR); 2239 xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
2207 2240
2208 stats->rxjabbererror += 2241 stats->rxjabbererror +=
2209 XGMAC_IOREAD(pdata, MMC_RXJABBERERROR); 2242 xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
2210 2243
2211 stats->rxundersize_g += 2244 stats->rxundersize_g +=
2212 XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G); 2245 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
2213 2246
2214 stats->rxoversize_g += 2247 stats->rxoversize_g +=
2215 XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G); 2248 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
2216 2249
2217 stats->rx64octets_gb += 2250 stats->rx64octets_gb +=
2218 XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO); 2251 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
2219 2252
2220 stats->rx65to127octets_gb += 2253 stats->rx65to127octets_gb +=
2221 XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO); 2254 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
2222 2255
2223 stats->rx128to255octets_gb += 2256 stats->rx128to255octets_gb +=
2224 XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO); 2257 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
2225 2258
2226 stats->rx256to511octets_gb += 2259 stats->rx256to511octets_gb +=
2227 XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO); 2260 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
2228 2261
2229 stats->rx512to1023octets_gb += 2262 stats->rx512to1023octets_gb +=
2230 XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO); 2263 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
2231 2264
2232 stats->rx1024tomaxoctets_gb += 2265 stats->rx1024tomaxoctets_gb +=
2233 XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); 2266 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
2234 2267
2235 stats->rxunicastframes_g += 2268 stats->rxunicastframes_g +=
2236 XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO); 2269 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
2237 2270
2238 stats->rxlengtherror += 2271 stats->rxlengtherror +=
2239 XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO); 2272 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
2240 2273
2241 stats->rxoutofrangetype += 2274 stats->rxoutofrangetype +=
2242 XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO); 2275 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
2243 2276
2244 stats->rxpauseframes += 2277 stats->rxpauseframes +=
2245 XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO); 2278 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
2246 2279
2247 stats->rxfifooverflow += 2280 stats->rxfifooverflow +=
2248 XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO); 2281 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
2249 2282
2250 stats->rxvlanframes_gb += 2283 stats->rxvlanframes_gb +=
2251 XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO); 2284 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
2252 2285
2253 stats->rxwatchdogerror += 2286 stats->rxwatchdogerror +=
2254 XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR); 2287 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
2255 2288
2256 /* Un-freeze counters */ 2289 /* Un-freeze counters */
2257 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0); 2290 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index dc84f7193c2d..b26d75856553 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -361,6 +361,8 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
361 361
362 memset(hw_feat, 0, sizeof(*hw_feat)); 362 memset(hw_feat, 0, sizeof(*hw_feat));
363 363
364 hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR);
365
364 /* Hardware feature register 0 */ 366 /* Hardware feature register 0 */
365 hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL); 367 hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
366 hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH); 368 hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index a076aca138a1..46f613028e9c 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -361,15 +361,16 @@ static void xgbe_get_drvinfo(struct net_device *netdev,
361 struct ethtool_drvinfo *drvinfo) 361 struct ethtool_drvinfo *drvinfo)
362{ 362{
363 struct xgbe_prv_data *pdata = netdev_priv(netdev); 363 struct xgbe_prv_data *pdata = netdev_priv(netdev);
364 struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
364 365
365 strlcpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver)); 366 strlcpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver));
366 strlcpy(drvinfo->version, XGBE_DRV_VERSION, sizeof(drvinfo->version)); 367 strlcpy(drvinfo->version, XGBE_DRV_VERSION, sizeof(drvinfo->version));
367 strlcpy(drvinfo->bus_info, dev_name(pdata->dev), 368 strlcpy(drvinfo->bus_info, dev_name(pdata->dev),
368 sizeof(drvinfo->bus_info)); 369 sizeof(drvinfo->bus_info));
369 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d", 370 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d",
370 XGMAC_IOREAD_BITS(pdata, MAC_VR, USERVER), 371 XGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER),
371 XGMAC_IOREAD_BITS(pdata, MAC_VR, DEVID), 372 XGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID),
372 XGMAC_IOREAD_BITS(pdata, MAC_VR, SNPSVER)); 373 XGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER));
373 drvinfo->n_stats = XGBE_STATS_COUNT; 374 drvinfo->n_stats = XGBE_STATS_COUNT;
374} 375}
375 376
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index 8aa6a9353f7b..bdf9cfa70e88 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -172,7 +172,7 @@ static struct xgbe_channel *xgbe_alloc_rings(struct xgbe_prv_data *pdata)
172 } 172 }
173 173
174 if (i < pdata->rx_ring_count) { 174 if (i < pdata->rx_ring_count) {
175 spin_lock_init(&tx_ring->lock); 175 spin_lock_init(&rx_ring->lock);
176 channel->rx_ring = rx_ring++; 176 channel->rx_ring = rx_ring++;
177 } 177 }
178 178
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 07bf70a82908..e9fe6e6ddcc3 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -183,6 +183,7 @@
183#define XGMAC_DRIVER_CONTEXT 1 183#define XGMAC_DRIVER_CONTEXT 1
184#define XGMAC_IOCTL_CONTEXT 2 184#define XGMAC_IOCTL_CONTEXT 2
185 185
186#define XGBE_FIFO_MAX 81920
186#define XGBE_FIFO_SIZE_B(x) (x) 187#define XGBE_FIFO_SIZE_B(x) (x)
187#define XGBE_FIFO_SIZE_KB(x) (x * 1024) 188#define XGBE_FIFO_SIZE_KB(x) (x * 1024)
188 189
@@ -526,6 +527,9 @@ struct xgbe_desc_if {
526 * or configurations are present in the device. 527 * or configurations are present in the device.
527 */ 528 */
528struct xgbe_hw_features { 529struct xgbe_hw_features {
530 /* HW Version */
531 unsigned int version;
532
529 /* HW Feature Register0 */ 533 /* HW Feature Register0 */
530 unsigned int gmii; /* 1000 Mbps support */ 534 unsigned int gmii; /* 1000 Mbps support */
531 unsigned int vlhash; /* VLAN Hash Filter */ 535 unsigned int vlhash; /* VLAN Hash Filter */
diff --git a/drivers/net/ethernet/apm/xgene/Kconfig b/drivers/net/ethernet/apm/xgene/Kconfig
index 616dff6d3f5f..f4054d242f3c 100644
--- a/drivers/net/ethernet/apm/xgene/Kconfig
+++ b/drivers/net/ethernet/apm/xgene/Kconfig
@@ -1,5 +1,6 @@
1config NET_XGENE 1config NET_XGENE
2 tristate "APM X-Gene SoC Ethernet Driver" 2 tristate "APM X-Gene SoC Ethernet Driver"
3 depends on HAS_DMA
3 select PHYLIB 4 select PHYLIB
4 help 5 help
5 This is the Ethernet driver for the on-chip ethernet interface on the 6 This is the Ethernet driver for the on-chip ethernet interface on the
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index e1a8f4e19983..e4222af2baa6 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -563,15 +563,21 @@ static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata)
563 struct xgene_enet_desc_ring *ring; 563 struct xgene_enet_desc_ring *ring;
564 564
565 ring = pdata->tx_ring; 565 ring = pdata->tx_ring;
566 if (ring && ring->cp_ring && ring->cp_ring->cp_skb) 566 if (ring) {
567 devm_kfree(dev, ring->cp_ring->cp_skb); 567 if (ring->cp_ring && ring->cp_ring->cp_skb)
568 xgene_enet_free_desc_ring(ring); 568 devm_kfree(dev, ring->cp_ring->cp_skb);
569 xgene_enet_free_desc_ring(ring);
570 }
569 571
570 ring = pdata->rx_ring; 572 ring = pdata->rx_ring;
571 if (ring && ring->buf_pool && ring->buf_pool->rx_skb) 573 if (ring) {
572 devm_kfree(dev, ring->buf_pool->rx_skb); 574 if (ring->buf_pool) {
573 xgene_enet_free_desc_ring(ring->buf_pool); 575 if (ring->buf_pool->rx_skb)
574 xgene_enet_free_desc_ring(ring); 576 devm_kfree(dev, ring->buf_pool->rx_skb);
577 xgene_enet_free_desc_ring(ring->buf_pool);
578 }
579 xgene_enet_free_desc_ring(ring);
580 }
575} 581}
576 582
577static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring( 583static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index fe5cfeace6e3..5919394d9f58 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -30,6 +30,17 @@
30#define DRV_VERSION "1.0" 30#define DRV_VERSION "1.0"
31 31
32/** 32/**
33 * arc_emac_tx_avail - Return the number of available slots in the tx ring.
34 * @priv: Pointer to ARC EMAC private data structure.
35 *
36 * returns: the number of slots available for transmission in tx the ring.
37 */
38static inline int arc_emac_tx_avail(struct arc_emac_priv *priv)
39{
40 return (priv->txbd_dirty + TX_BD_NUM - priv->txbd_curr - 1) % TX_BD_NUM;
41}
42
43/**
33 * arc_emac_adjust_link - Adjust the PHY link duplex. 44 * arc_emac_adjust_link - Adjust the PHY link duplex.
34 * @ndev: Pointer to the net_device structure. 45 * @ndev: Pointer to the net_device structure.
35 * 46 *
@@ -180,10 +191,15 @@ static void arc_emac_tx_clean(struct net_device *ndev)
180 txbd->info = 0; 191 txbd->info = 0;
181 192
182 *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM; 193 *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM;
183
184 if (netif_queue_stopped(ndev))
185 netif_wake_queue(ndev);
186 } 194 }
195
196 /* Ensure that txbd_dirty is visible to tx() before checking
197 * for queue stopped.
198 */
199 smp_mb();
200
201 if (netif_queue_stopped(ndev) && arc_emac_tx_avail(priv))
202 netif_wake_queue(ndev);
187} 203}
188 204
189/** 205/**
@@ -298,7 +314,7 @@ static int arc_emac_poll(struct napi_struct *napi, int budget)
298 work_done = arc_emac_rx(ndev, budget); 314 work_done = arc_emac_rx(ndev, budget);
299 if (work_done < budget) { 315 if (work_done < budget) {
300 napi_complete(napi); 316 napi_complete(napi);
301 arc_reg_or(priv, R_ENABLE, RXINT_MASK); 317 arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
302 } 318 }
303 319
304 return work_done; 320 return work_done;
@@ -327,9 +343,9 @@ static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
327 /* Reset all flags except "MDIO complete" */ 343 /* Reset all flags except "MDIO complete" */
328 arc_reg_set(priv, R_STATUS, status); 344 arc_reg_set(priv, R_STATUS, status);
329 345
330 if (status & RXINT_MASK) { 346 if (status & (RXINT_MASK | TXINT_MASK)) {
331 if (likely(napi_schedule_prep(&priv->napi))) { 347 if (likely(napi_schedule_prep(&priv->napi))) {
332 arc_reg_clr(priv, R_ENABLE, RXINT_MASK); 348 arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
333 __napi_schedule(&priv->napi); 349 __napi_schedule(&priv->napi);
334 } 350 }
335 } 351 }
@@ -440,7 +456,7 @@ static int arc_emac_open(struct net_device *ndev)
440 arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma); 456 arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma);
441 457
442 /* Enable interrupts */ 458 /* Enable interrupts */
443 arc_reg_set(priv, R_ENABLE, RXINT_MASK | ERR_MASK); 459 arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
444 460
445 /* Set CONTROL */ 461 /* Set CONTROL */
446 arc_reg_set(priv, R_CTRL, 462 arc_reg_set(priv, R_CTRL,
@@ -511,7 +527,7 @@ static int arc_emac_stop(struct net_device *ndev)
511 netif_stop_queue(ndev); 527 netif_stop_queue(ndev);
512 528
513 /* Disable interrupts */ 529 /* Disable interrupts */
514 arc_reg_clr(priv, R_ENABLE, RXINT_MASK | ERR_MASK); 530 arc_reg_clr(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);
515 531
516 /* Disable EMAC */ 532 /* Disable EMAC */
517 arc_reg_clr(priv, R_CTRL, EN_MASK); 533 arc_reg_clr(priv, R_CTRL, EN_MASK);
@@ -574,11 +590,9 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
574 590
575 len = max_t(unsigned int, ETH_ZLEN, skb->len); 591 len = max_t(unsigned int, ETH_ZLEN, skb->len);
576 592
577 /* EMAC still holds this buffer in its possession. 593 if (unlikely(!arc_emac_tx_avail(priv))) {
578 * CPU must not modify this buffer descriptor
579 */
580 if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) {
581 netif_stop_queue(ndev); 594 netif_stop_queue(ndev);
595 netdev_err(ndev, "BUG! Tx Ring full when queue awake!\n");
582 return NETDEV_TX_BUSY; 596 return NETDEV_TX_BUSY;
583 } 597 }
584 598
@@ -607,12 +621,19 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
607 /* Increment index to point to the next BD */ 621 /* Increment index to point to the next BD */
608 *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM; 622 *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM;
609 623
610 /* Get "info" of the next BD */ 624 /* Ensure that tx_clean() sees the new txbd_curr before
611 info = &priv->txbd[*txbd_curr].info; 625 * checking the queue status. This prevents an unneeded wake
626 * of the queue in tx_clean().
627 */
628 smp_mb();
612 629
613 /* Check if if Tx BD ring is full - next BD is still owned by EMAC */ 630 if (!arc_emac_tx_avail(priv)) {
614 if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC))
615 netif_stop_queue(ndev); 631 netif_stop_queue(ndev);
632 /* Refresh tx_dirty */
633 smp_mb();
634 if (arc_emac_tx_avail(priv))
635 netif_start_queue(ndev);
636 }
616 637
617 arc_reg_set(priv, R_STATUS, TXPL_MASK); 638 arc_reg_set(priv, R_STATUS, TXPL_MASK);
618 639
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 7dcfb19a31c8..d8d07a818b89 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -84,7 +84,7 @@ config BNX2
84 84
85config CNIC 85config CNIC
86 tristate "QLogic CNIC support" 86 tristate "QLogic CNIC support"
87 depends on PCI 87 depends on PCI && (IPV6 || IPV6=n)
88 select BNX2 88 select BNX2
89 select UIO 89 select UIO
90 ---help--- 90 ---help---
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 4a7028d65912..d588136b23b9 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -1697,7 +1697,7 @@ static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
1697 hwstat->tx_underruns + 1697 hwstat->tx_underruns +
1698 hwstat->tx_excessive_cols + 1698 hwstat->tx_excessive_cols +
1699 hwstat->tx_late_cols); 1699 hwstat->tx_late_cols);
1700 nstat->multicast = hwstat->tx_multicast_pkts; 1700 nstat->multicast = hwstat->rx_multicast_pkts;
1701 nstat->collisions = hwstat->tx_total_cols; 1701 nstat->collisions = hwstat->tx_total_cols;
1702 1702
1703 nstat->rx_length_errors = (hwstat->rx_oversize_pkts + 1703 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 6f4e18644bd4..d9b9170ed2fc 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -534,6 +534,25 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
534 while ((processed < to_process) && (processed < budget)) { 534 while ((processed < to_process) && (processed < budget)) {
535 cb = &priv->rx_cbs[priv->rx_read_ptr]; 535 cb = &priv->rx_cbs[priv->rx_read_ptr];
536 skb = cb->skb; 536 skb = cb->skb;
537
538 processed++;
539 priv->rx_read_ptr++;
540
541 if (priv->rx_read_ptr == priv->num_rx_bds)
542 priv->rx_read_ptr = 0;
543
544 /* We do not have a backing SKB, so we do not a corresponding
545 * DMA mapping for this incoming packet since
546 * bcm_sysport_rx_refill always either has both skb and mapping
547 * or none.
548 */
549 if (unlikely(!skb)) {
550 netif_err(priv, rx_err, ndev, "out of memory!\n");
551 ndev->stats.rx_dropped++;
552 ndev->stats.rx_errors++;
553 goto refill;
554 }
555
537 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), 556 dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
538 RX_BUF_LENGTH, DMA_FROM_DEVICE); 557 RX_BUF_LENGTH, DMA_FROM_DEVICE);
539 558
@@ -543,23 +562,11 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
543 status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) & 562 status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
544 DESC_STATUS_MASK; 563 DESC_STATUS_MASK;
545 564
546 processed++;
547 priv->rx_read_ptr++;
548 if (priv->rx_read_ptr == priv->num_rx_bds)
549 priv->rx_read_ptr = 0;
550
551 netif_dbg(priv, rx_status, ndev, 565 netif_dbg(priv, rx_status, ndev,
552 "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n", 566 "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
553 p_index, priv->rx_c_index, priv->rx_read_ptr, 567 p_index, priv->rx_c_index, priv->rx_read_ptr,
554 len, status); 568 len, status);
555 569
556 if (unlikely(!skb)) {
557 netif_err(priv, rx_err, ndev, "out of memory!\n");
558 ndev->stats.rx_dropped++;
559 ndev->stats.rx_errors++;
560 goto refill;
561 }
562
563 if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) { 570 if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
564 netif_err(priv, rx_status, ndev, "fragmented packet!\n"); 571 netif_err(priv, rx_status, ndev, "fragmented packet!\n");
565 ndev->stats.rx_dropped++; 572 ndev->stats.rx_dropped++;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 4e6c82e20224..4ccc806b1150 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -483,11 +483,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
483 483
484#ifdef BNX2X_STOP_ON_ERROR 484#ifdef BNX2X_STOP_ON_ERROR
485 fp->tpa_queue_used |= (1 << queue); 485 fp->tpa_queue_used |= (1 << queue);
486#ifdef _ASM_GENERIC_INT_L64_H
487 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
488#else
489 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n", 486 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
490#endif
491 fp->tpa_queue_used); 487 fp->tpa_queue_used);
492#endif 488#endif
493} 489}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 5ba8af50c84f..c4daa068f1db 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -2233,7 +2233,12 @@ struct shmem2_region {
2233 u32 reserved3; /* Offset 0x14C */ 2233 u32 reserved3; /* Offset 0x14C */
2234 u32 reserved4; /* Offset 0x150 */ 2234 u32 reserved4; /* Offset 0x150 */
2235 u32 link_attr_sync[PORT_MAX]; /* Offset 0x154 */ 2235 u32 link_attr_sync[PORT_MAX]; /* Offset 0x154 */
2236 #define LINK_ATTR_SYNC_KR2_ENABLE (1<<0) 2236 #define LINK_ATTR_SYNC_KR2_ENABLE 0x00000001
2237 #define LINK_SFP_EEPROM_COMP_CODE_MASK 0x0000ff00
2238 #define LINK_SFP_EEPROM_COMP_CODE_SHIFT 8
2239 #define LINK_SFP_EEPROM_COMP_CODE_SR 0x00001000
2240 #define LINK_SFP_EEPROM_COMP_CODE_LR 0x00002000
2241 #define LINK_SFP_EEPROM_COMP_CODE_LRM 0x00004000
2237 2242
2238 u32 reserved5[2]; 2243 u32 reserved5[2];
2239 u32 reserved6[PORT_MAX]; 2244 u32 reserved6[PORT_MAX];
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 53fb4fa61b40..549549eaf580 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -154,15 +154,22 @@ typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy,
154 LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE) 154 LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE)
155 155
156#define SFP_EEPROM_CON_TYPE_ADDR 0x2 156#define SFP_EEPROM_CON_TYPE_ADDR 0x2
157 #define SFP_EEPROM_CON_TYPE_VAL_UNKNOWN 0x0
157 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7 158 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
158 #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21 159 #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21
159 #define SFP_EEPROM_CON_TYPE_VAL_RJ45 0x22 160 #define SFP_EEPROM_CON_TYPE_VAL_RJ45 0x22
160 161
161 162
162#define SFP_EEPROM_COMP_CODE_ADDR 0x3 163#define SFP_EEPROM_10G_COMP_CODE_ADDR 0x3
163 #define SFP_EEPROM_COMP_CODE_SR_MASK (1<<4) 164 #define SFP_EEPROM_10G_COMP_CODE_SR_MASK (1<<4)
164 #define SFP_EEPROM_COMP_CODE_LR_MASK (1<<5) 165 #define SFP_EEPROM_10G_COMP_CODE_LR_MASK (1<<5)
165 #define SFP_EEPROM_COMP_CODE_LRM_MASK (1<<6) 166 #define SFP_EEPROM_10G_COMP_CODE_LRM_MASK (1<<6)
167
168#define SFP_EEPROM_1G_COMP_CODE_ADDR 0x6
169 #define SFP_EEPROM_1G_COMP_CODE_SX (1<<0)
170 #define SFP_EEPROM_1G_COMP_CODE_LX (1<<1)
171 #define SFP_EEPROM_1G_COMP_CODE_CX (1<<2)
172 #define SFP_EEPROM_1G_COMP_CODE_BASE_T (1<<3)
166 173
167#define SFP_EEPROM_FC_TX_TECH_ADDR 0x8 174#define SFP_EEPROM_FC_TX_TECH_ADDR 0x8
168 #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4 175 #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4
@@ -3633,8 +3640,8 @@ static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy,
3633 reg_set[i].val); 3640 reg_set[i].val);
3634 3641
3635 /* Start KR2 work-around timer which handles BCM8073 link-parner */ 3642 /* Start KR2 work-around timer which handles BCM8073 link-parner */
3636 vars->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE; 3643 params->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE;
3637 bnx2x_update_link_attr(params, vars->link_attr_sync); 3644 bnx2x_update_link_attr(params, params->link_attr_sync);
3638} 3645}
3639 3646
3640static void bnx2x_disable_kr2(struct link_params *params, 3647static void bnx2x_disable_kr2(struct link_params *params,
@@ -3666,8 +3673,8 @@ static void bnx2x_disable_kr2(struct link_params *params,
3666 for (i = 0; i < ARRAY_SIZE(reg_set); i++) 3673 for (i = 0; i < ARRAY_SIZE(reg_set); i++)
3667 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, 3674 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
3668 reg_set[i].val); 3675 reg_set[i].val);
3669 vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE; 3676 params->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
3670 bnx2x_update_link_attr(params, vars->link_attr_sync); 3677 bnx2x_update_link_attr(params, params->link_attr_sync);
3671 3678
3672 vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT; 3679 vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT;
3673} 3680}
@@ -4810,7 +4817,7 @@ void bnx2x_link_status_update(struct link_params *params,
4810 ~FEATURE_CONFIG_PFC_ENABLED; 4817 ~FEATURE_CONFIG_PFC_ENABLED;
4811 4818
4812 if (SHMEM2_HAS(bp, link_attr_sync)) 4819 if (SHMEM2_HAS(bp, link_attr_sync))
4813 vars->link_attr_sync = SHMEM2_RD(bp, 4820 params->link_attr_sync = SHMEM2_RD(bp,
4814 link_attr_sync[params->port]); 4821 link_attr_sync[params->port]);
4815 4822
4816 DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x int_mask 0x%x\n", 4823 DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x int_mask 0x%x\n",
@@ -8057,21 +8064,24 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
8057{ 8064{
8058 struct bnx2x *bp = params->bp; 8065 struct bnx2x *bp = params->bp;
8059 u32 sync_offset = 0, phy_idx, media_types; 8066 u32 sync_offset = 0, phy_idx, media_types;
8060 u8 gport, val[2], check_limiting_mode = 0; 8067 u8 val[SFP_EEPROM_FC_TX_TECH_ADDR + 1], check_limiting_mode = 0;
8061 *edc_mode = EDC_MODE_LIMITING; 8068 *edc_mode = EDC_MODE_LIMITING;
8062 phy->media_type = ETH_PHY_UNSPECIFIED; 8069 phy->media_type = ETH_PHY_UNSPECIFIED;
8063 /* First check for copper cable */ 8070 /* First check for copper cable */
8064 if (bnx2x_read_sfp_module_eeprom(phy, 8071 if (bnx2x_read_sfp_module_eeprom(phy,
8065 params, 8072 params,
8066 I2C_DEV_ADDR_A0, 8073 I2C_DEV_ADDR_A0,
8067 SFP_EEPROM_CON_TYPE_ADDR, 8074 0,
8068 2, 8075 SFP_EEPROM_FC_TX_TECH_ADDR + 1,
8069 (u8 *)val) != 0) { 8076 (u8 *)val) != 0) {
8070 DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n"); 8077 DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n");
8071 return -EINVAL; 8078 return -EINVAL;
8072 } 8079 }
8073 8080 params->link_attr_sync &= ~LINK_SFP_EEPROM_COMP_CODE_MASK;
8074 switch (val[0]) { 8081 params->link_attr_sync |= val[SFP_EEPROM_10G_COMP_CODE_ADDR] <<
8082 LINK_SFP_EEPROM_COMP_CODE_SHIFT;
8083 bnx2x_update_link_attr(params, params->link_attr_sync);
8084 switch (val[SFP_EEPROM_CON_TYPE_ADDR]) {
8075 case SFP_EEPROM_CON_TYPE_VAL_COPPER: 8085 case SFP_EEPROM_CON_TYPE_VAL_COPPER:
8076 { 8086 {
8077 u8 copper_module_type; 8087 u8 copper_module_type;
@@ -8079,17 +8089,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
8079 /* Check if its active cable (includes SFP+ module) 8089 /* Check if its active cable (includes SFP+ module)
8080 * of passive cable 8090 * of passive cable
8081 */ 8091 */
8082 if (bnx2x_read_sfp_module_eeprom(phy, 8092 copper_module_type = val[SFP_EEPROM_FC_TX_TECH_ADDR];
8083 params,
8084 I2C_DEV_ADDR_A0,
8085 SFP_EEPROM_FC_TX_TECH_ADDR,
8086 1,
8087 &copper_module_type) != 0) {
8088 DP(NETIF_MSG_LINK,
8089 "Failed to read copper-cable-type"
8090 " from SFP+ EEPROM\n");
8091 return -EINVAL;
8092 }
8093 8093
8094 if (copper_module_type & 8094 if (copper_module_type &
8095 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) { 8095 SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) {
@@ -8115,16 +8115,18 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
8115 } 8115 }
8116 break; 8116 break;
8117 } 8117 }
8118 case SFP_EEPROM_CON_TYPE_VAL_UNKNOWN:
8118 case SFP_EEPROM_CON_TYPE_VAL_LC: 8119 case SFP_EEPROM_CON_TYPE_VAL_LC:
8119 case SFP_EEPROM_CON_TYPE_VAL_RJ45: 8120 case SFP_EEPROM_CON_TYPE_VAL_RJ45:
8120 check_limiting_mode = 1; 8121 check_limiting_mode = 1;
8121 if ((val[1] & (SFP_EEPROM_COMP_CODE_SR_MASK | 8122 if ((val[SFP_EEPROM_10G_COMP_CODE_ADDR] &
8122 SFP_EEPROM_COMP_CODE_LR_MASK | 8123 (SFP_EEPROM_10G_COMP_CODE_SR_MASK |
8123 SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) { 8124 SFP_EEPROM_10G_COMP_CODE_LR_MASK |
8125 SFP_EEPROM_10G_COMP_CODE_LRM_MASK)) == 0) {
8124 DP(NETIF_MSG_LINK, "1G SFP module detected\n"); 8126 DP(NETIF_MSG_LINK, "1G SFP module detected\n");
8125 gport = params->port;
8126 phy->media_type = ETH_PHY_SFP_1G_FIBER; 8127 phy->media_type = ETH_PHY_SFP_1G_FIBER;
8127 if (phy->req_line_speed != SPEED_1000) { 8128 if (phy->req_line_speed != SPEED_1000) {
8129 u8 gport = params->port;
8128 phy->req_line_speed = SPEED_1000; 8130 phy->req_line_speed = SPEED_1000;
8129 if (!CHIP_IS_E1x(bp)) { 8131 if (!CHIP_IS_E1x(bp)) {
8130 gport = BP_PATH(bp) + 8132 gport = BP_PATH(bp) +
@@ -8134,6 +8136,12 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
8134 "Warning: Link speed was forced to 1000Mbps. Current SFP module in port %d is not compliant with 10G Ethernet\n", 8136 "Warning: Link speed was forced to 1000Mbps. Current SFP module in port %d is not compliant with 10G Ethernet\n",
8135 gport); 8137 gport);
8136 } 8138 }
8139 if (val[SFP_EEPROM_1G_COMP_CODE_ADDR] &
8140 SFP_EEPROM_1G_COMP_CODE_BASE_T) {
8141 bnx2x_sfp_set_transmitter(params, phy, 0);
8142 msleep(40);
8143 bnx2x_sfp_set_transmitter(params, phy, 1);
8144 }
8137 } else { 8145 } else {
8138 int idx, cfg_idx = 0; 8146 int idx, cfg_idx = 0;
8139 DP(NETIF_MSG_LINK, "10G Optic module detected\n"); 8147 DP(NETIF_MSG_LINK, "10G Optic module detected\n");
@@ -8149,7 +8157,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
8149 break; 8157 break;
8150 default: 8158 default:
8151 DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n", 8159 DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n",
8152 val[0]); 8160 val[SFP_EEPROM_CON_TYPE_ADDR]);
8153 return -EINVAL; 8161 return -EINVAL;
8154 } 8162 }
8155 sync_offset = params->shmem_base + 8163 sync_offset = params->shmem_base +
@@ -13507,7 +13515,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
13507 13515
13508 sigdet = bnx2x_warpcore_get_sigdet(phy, params); 13516 sigdet = bnx2x_warpcore_get_sigdet(phy, params);
13509 if (!sigdet) { 13517 if (!sigdet) {
13510 if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { 13518 if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
13511 bnx2x_kr2_recovery(params, vars, phy); 13519 bnx2x_kr2_recovery(params, vars, phy);
13512 DP(NETIF_MSG_LINK, "No sigdet\n"); 13520 DP(NETIF_MSG_LINK, "No sigdet\n");
13513 } 13521 }
@@ -13525,7 +13533,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
13525 13533
13526 /* CL73 has not begun yet */ 13534 /* CL73 has not begun yet */
13527 if (base_page == 0) { 13535 if (base_page == 0) {
13528 if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { 13536 if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
13529 bnx2x_kr2_recovery(params, vars, phy); 13537 bnx2x_kr2_recovery(params, vars, phy);
13530 DP(NETIF_MSG_LINK, "No BP\n"); 13538 DP(NETIF_MSG_LINK, "No BP\n");
13531 } 13539 }
@@ -13541,7 +13549,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
13541 ((next_page & 0xe0) == 0x20)))); 13549 ((next_page & 0xe0) == 0x20))));
13542 13550
13543 /* In case KR2 is already disabled, check if we need to re-enable it */ 13551 /* In case KR2 is already disabled, check if we need to re-enable it */
13544 if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { 13552 if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
13545 if (!not_kr2_device) { 13553 if (!not_kr2_device) {
13546 DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, 13554 DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page,
13547 next_page); 13555 next_page);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 389f5f8cb0a3..d9cce4c3899b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -323,6 +323,9 @@ struct link_params {
323#define LINK_FLAGS_INT_DISABLED (1<<0) 323#define LINK_FLAGS_INT_DISABLED (1<<0)
324#define PHY_INITIALIZED (1<<1) 324#define PHY_INITIALIZED (1<<1)
325 u32 lfa_base; 325 u32 lfa_base;
326
327 /* The same definitions as the shmem2 parameter */
328 u32 link_attr_sync;
326}; 329};
327 330
328/* Output parameters */ 331/* Output parameters */
@@ -364,8 +367,6 @@ struct link_vars {
364 u8 rx_tx_asic_rst; 367 u8 rx_tx_asic_rst;
365 u8 turn_to_run_wc_rt; 368 u8 turn_to_run_wc_rt;
366 u16 rsrv2; 369 u16 rsrv2;
367 /* The same definitions as the shmem2 parameter */
368 u32 link_attr_sync;
369}; 370};
370 371
371/***********************************************************/ 372/***********************************************************/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index c13364b6cc19..d1c093dcb054 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -6849,6 +6849,37 @@ static void bnx2x__common_init_phy(struct bnx2x *bp)
6849 bnx2x_release_phy_lock(bp); 6849 bnx2x_release_phy_lock(bp);
6850} 6850}
6851 6851
6852static void bnx2x_config_endianity(struct bnx2x *bp, u32 val)
6853{
6854 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, val);
6855 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, val);
6856 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, val);
6857 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, val);
6858 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, val);
6859
6860 /* make sure this value is 0 */
6861 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6862
6863 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, val);
6864 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, val);
6865 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, val);
6866 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, val);
6867}
6868
6869static void bnx2x_set_endianity(struct bnx2x *bp)
6870{
6871#ifdef __BIG_ENDIAN
6872 bnx2x_config_endianity(bp, 1);
6873#else
6874 bnx2x_config_endianity(bp, 0);
6875#endif
6876}
6877
6878static void bnx2x_reset_endianity(struct bnx2x *bp)
6879{
6880 bnx2x_config_endianity(bp, 0);
6881}
6882
6852/** 6883/**
6853 * bnx2x_init_hw_common - initialize the HW at the COMMON phase. 6884 * bnx2x_init_hw_common - initialize the HW at the COMMON phase.
6854 * 6885 *
@@ -6915,23 +6946,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
6915 6946
6916 bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON); 6947 bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
6917 bnx2x_init_pxp(bp); 6948 bnx2x_init_pxp(bp);
6918 6949 bnx2x_set_endianity(bp);
6919#ifdef __BIG_ENDIAN
6920 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6921 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6922 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6923 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6924 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6925 /* make sure this value is 0 */
6926 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6927
6928/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6929 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6930 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6931 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6932 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6933#endif
6934
6935 bnx2x_ilt_init_page_size(bp, INITOP_SET); 6950 bnx2x_ilt_init_page_size(bp, INITOP_SET);
6936 6951
6937 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp)) 6952 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
@@ -10052,6 +10067,8 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
10052} 10067}
10053 10068
10054#define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4)) 10069#define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
10070#define BNX2X_PREV_UNDI_PROD_ADDR_H(f) (BAR_TSTRORM_INTMEM + \
10071 0x1848 + ((f) << 4))
10055#define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff) 10072#define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff)
10056#define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff) 10073#define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
10057#define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq)) 10074#define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
@@ -10059,8 +10076,6 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
10059#define BCM_5710_UNDI_FW_MF_MAJOR (0x07) 10076#define BCM_5710_UNDI_FW_MF_MAJOR (0x07)
10060#define BCM_5710_UNDI_FW_MF_MINOR (0x08) 10077#define BCM_5710_UNDI_FW_MF_MINOR (0x08)
10061#define BCM_5710_UNDI_FW_MF_VERS (0x05) 10078#define BCM_5710_UNDI_FW_MF_VERS (0x05)
10062#define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4))
10063#define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4))
10064 10079
10065static bool bnx2x_prev_is_after_undi(struct bnx2x *bp) 10080static bool bnx2x_prev_is_after_undi(struct bnx2x *bp)
10066{ 10081{
@@ -10079,72 +10094,25 @@ static bool bnx2x_prev_is_after_undi(struct bnx2x *bp)
10079 return false; 10094 return false;
10080} 10095}
10081 10096
10082static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp) 10097static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 inc)
10083{
10084 u8 major, minor, version;
10085 u32 fw;
10086
10087 /* Must check that FW is loaded */
10088 if (!(REG_RD(bp, MISC_REG_RESET_REG_1) &
10089 MISC_REGISTERS_RESET_REG_1_RST_XSEM)) {
10090 BNX2X_DEV_INFO("XSEM is reset - UNDI MF FW is not loaded\n");
10091 return false;
10092 }
10093
10094 /* Read Currently loaded FW version */
10095 fw = REG_RD(bp, XSEM_REG_PRAM);
10096 major = fw & 0xff;
10097 minor = (fw >> 0x8) & 0xff;
10098 version = (fw >> 0x10) & 0xff;
10099 BNX2X_DEV_INFO("Loaded FW: 0x%08x: Major 0x%02x Minor 0x%02x Version 0x%02x\n",
10100 fw, major, minor, version);
10101
10102 if (major > BCM_5710_UNDI_FW_MF_MAJOR)
10103 return true;
10104
10105 if ((major == BCM_5710_UNDI_FW_MF_MAJOR) &&
10106 (minor > BCM_5710_UNDI_FW_MF_MINOR))
10107 return true;
10108
10109 if ((major == BCM_5710_UNDI_FW_MF_MAJOR) &&
10110 (minor == BCM_5710_UNDI_FW_MF_MINOR) &&
10111 (version >= BCM_5710_UNDI_FW_MF_VERS))
10112 return true;
10113
10114 return false;
10115}
10116
10117static void bnx2x_prev_unload_undi_mf(struct bnx2x *bp)
10118{
10119 int i;
10120
10121 /* Due to legacy (FW) code, the first function on each engine has a
10122 * different offset macro from the rest of the functions.
10123 * Setting this for all 8 functions is harmless regardless of whether
10124 * this is actually a multi-function device.
10125 */
10126 for (i = 0; i < 2; i++)
10127 REG_WR(bp, BNX2X_PREV_UNDI_MF_PORT(i), 1);
10128
10129 for (i = 2; i < 8; i++)
10130 REG_WR(bp, BNX2X_PREV_UNDI_MF_FUNC(i - 2), 1);
10131
10132 BNX2X_DEV_INFO("UNDI FW (MF) set to discard\n");
10133}
10134
10135static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port, u8 inc)
10136{ 10098{
10137 u16 rcq, bd; 10099 u16 rcq, bd;
10138 u32 tmp_reg = REG_RD(bp, BNX2X_PREV_UNDI_PROD_ADDR(port)); 10100 u32 addr, tmp_reg;
10101
10102 if (BP_FUNC(bp) < 2)
10103 addr = BNX2X_PREV_UNDI_PROD_ADDR(BP_PORT(bp));
10104 else
10105 addr = BNX2X_PREV_UNDI_PROD_ADDR_H(BP_FUNC(bp) - 2);
10139 10106
10107 tmp_reg = REG_RD(bp, addr);
10140 rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc; 10108 rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc;
10141 bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc; 10109 bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc;
10142 10110
10143 tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd); 10111 tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd);
10144 REG_WR(bp, BNX2X_PREV_UNDI_PROD_ADDR(port), tmp_reg); 10112 REG_WR(bp, addr, tmp_reg);
10145 10113
10146 BNX2X_DEV_INFO("UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n", 10114 BNX2X_DEV_INFO("UNDI producer [%d/%d][%08x] rings bd -> 0x%04x, rcq -> 0x%04x\n",
10147 port, bd, rcq); 10115 BP_PORT(bp), BP_FUNC(bp), addr, bd, rcq);
10148} 10116}
10149 10117
10150static int bnx2x_prev_mcp_done(struct bnx2x *bp) 10118static int bnx2x_prev_mcp_done(struct bnx2x *bp)
@@ -10383,7 +10351,6 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
10383 /* Reset should be performed after BRB is emptied */ 10351 /* Reset should be performed after BRB is emptied */
10384 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { 10352 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
10385 u32 timer_count = 1000; 10353 u32 timer_count = 1000;
10386 bool need_write = true;
10387 10354
10388 /* Close the MAC Rx to prevent BRB from filling up */ 10355 /* Close the MAC Rx to prevent BRB from filling up */
10389 bnx2x_prev_unload_close_mac(bp, &mac_vals); 10356 bnx2x_prev_unload_close_mac(bp, &mac_vals);
@@ -10420,20 +10387,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
10420 else 10387 else
10421 timer_count--; 10388 timer_count--;
10422 10389
10423 /* New UNDI FW supports MF and contains better 10390 /* If UNDI resides in memory, manually increment it */
10424 * cleaning methods - might be redundant but harmless. 10391 if (prev_undi)
10425 */ 10392 bnx2x_prev_unload_undi_inc(bp, 1);
10426 if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) { 10393
10427 if (need_write) {
10428 bnx2x_prev_unload_undi_mf(bp);
10429 need_write = false;
10430 }
10431 } else if (prev_undi) {
10432 /* If UNDI resides in memory,
10433 * manually increment it
10434 */
10435 bnx2x_prev_unload_undi_inc(bp, BP_PORT(bp), 1);
10436 }
10437 udelay(10); 10394 udelay(10);
10438 } 10395 }
10439 10396
@@ -13227,9 +13184,15 @@ static void __bnx2x_remove(struct pci_dev *pdev,
13227 bnx2x_iov_remove_one(bp); 13184 bnx2x_iov_remove_one(bp);
13228 13185
13229 /* Power on: we can't let PCI layer write to us while we are in D3 */ 13186 /* Power on: we can't let PCI layer write to us while we are in D3 */
13230 if (IS_PF(bp)) 13187 if (IS_PF(bp)) {
13231 bnx2x_set_power_state(bp, PCI_D0); 13188 bnx2x_set_power_state(bp, PCI_D0);
13232 13189
13190 /* Set endianity registers to reset values in case next driver
13191 * boots in different endianty environment.
13192 */
13193 bnx2x_reset_endianity(bp);
13194 }
13195
13233 /* Disable MSI/MSI-X */ 13196 /* Disable MSI/MSI-X */
13234 bnx2x_disable_msi(bp); 13197 bnx2x_disable_msi(bp);
13235 13198
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 27861a6c7ca5..a6a9f284c8dd 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -31,7 +31,7 @@
31#include <linux/if_vlan.h> 31#include <linux/if_vlan.h>
32#include <linux/prefetch.h> 32#include <linux/prefetch.h>
33#include <linux/random.h> 33#include <linux/random.h>
34#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 34#if IS_ENABLED(CONFIG_VLAN_8021Q)
35#define BCM_VLAN 1 35#define BCM_VLAN 1
36#endif 36#endif
37#include <net/ip.h> 37#include <net/ip.h>
@@ -3685,7 +3685,7 @@ static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
3685static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr, 3685static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
3686 struct dst_entry **dst) 3686 struct dst_entry **dst)
3687{ 3687{
3688#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE)) 3688#if IS_ENABLED(CONFIG_IPV6)
3689 struct flowi6 fl6; 3689 struct flowi6 fl6;
3690 3690
3691 memset(&fl6, 0, sizeof(fl6)); 3691 memset(&fl6, 0, sizeof(fl6));
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 3f9d4de8173c..5cc9cae21ed5 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -875,6 +875,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
875 int last_tx_cn, last_c_index, num_tx_bds; 875 int last_tx_cn, last_c_index, num_tx_bds;
876 struct enet_cb *tx_cb_ptr; 876 struct enet_cb *tx_cb_ptr;
877 struct netdev_queue *txq; 877 struct netdev_queue *txq;
878 unsigned int bds_compl;
878 unsigned int c_index; 879 unsigned int c_index;
879 880
880 /* Compute how many buffers are transmitted since last xmit call */ 881 /* Compute how many buffers are transmitted since last xmit call */
@@ -899,7 +900,9 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
899 /* Reclaim transmitted buffers */ 900 /* Reclaim transmitted buffers */
900 while (last_tx_cn-- > 0) { 901 while (last_tx_cn-- > 0) {
901 tx_cb_ptr = ring->cbs + last_c_index; 902 tx_cb_ptr = ring->cbs + last_c_index;
903 bds_compl = 0;
902 if (tx_cb_ptr->skb) { 904 if (tx_cb_ptr->skb) {
905 bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1;
903 dev->stats.tx_bytes += tx_cb_ptr->skb->len; 906 dev->stats.tx_bytes += tx_cb_ptr->skb->len;
904 dma_unmap_single(&dev->dev, 907 dma_unmap_single(&dev->dev,
905 dma_unmap_addr(tx_cb_ptr, dma_addr), 908 dma_unmap_addr(tx_cb_ptr, dma_addr),
@@ -916,7 +919,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
916 dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0); 919 dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
917 } 920 }
918 dev->stats.tx_packets++; 921 dev->stats.tx_packets++;
919 ring->free_bds += 1; 922 ring->free_bds += bds_compl;
920 923
921 last_c_index++; 924 last_c_index++;
922 last_c_index &= (num_tx_bds - 1); 925 last_c_index &= (num_tx_bds - 1);
@@ -1274,12 +1277,29 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
1274 1277
1275 while ((rxpktprocessed < rxpkttoprocess) && 1278 while ((rxpktprocessed < rxpkttoprocess) &&
1276 (rxpktprocessed < budget)) { 1279 (rxpktprocessed < budget)) {
1280 cb = &priv->rx_cbs[priv->rx_read_ptr];
1281 skb = cb->skb;
1282
1283 rxpktprocessed++;
1284
1285 priv->rx_read_ptr++;
1286 priv->rx_read_ptr &= (priv->num_rx_bds - 1);
1287
1288 /* We do not have a backing SKB, so we do not have a
1289 * corresponding DMA mapping for this incoming packet since
1290 * bcmgenet_rx_refill always either has both skb and mapping or
1291 * none.
1292 */
1293 if (unlikely(!skb)) {
1294 dev->stats.rx_dropped++;
1295 dev->stats.rx_errors++;
1296 goto refill;
1297 }
1298
1277 /* Unmap the packet contents such that we can use the 1299 /* Unmap the packet contents such that we can use the
1278 * RSV from the 64 bytes descriptor when enabled and save 1300 * RSV from the 64 bytes descriptor when enabled and save
1279 * a 32-bits register read 1301 * a 32-bits register read
1280 */ 1302 */
1281 cb = &priv->rx_cbs[priv->rx_read_ptr];
1282 skb = cb->skb;
1283 dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr), 1303 dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr),
1284 priv->rx_buf_len, DMA_FROM_DEVICE); 1304 priv->rx_buf_len, DMA_FROM_DEVICE);
1285 1305
@@ -1307,18 +1327,6 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
1307 __func__, p_index, priv->rx_c_index, 1327 __func__, p_index, priv->rx_c_index,
1308 priv->rx_read_ptr, dma_length_status); 1328 priv->rx_read_ptr, dma_length_status);
1309 1329
1310 rxpktprocessed++;
1311
1312 priv->rx_read_ptr++;
1313 priv->rx_read_ptr &= (priv->num_rx_bds - 1);
1314
1315 /* out of memory, just drop packets at the hardware level */
1316 if (unlikely(!skb)) {
1317 dev->stats.rx_dropped++;
1318 dev->stats.rx_errors++;
1319 goto refill;
1320 }
1321
1322 if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { 1330 if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
1323 netif_err(priv, rx_status, dev, 1331 netif_err(priv, rx_status, dev,
1324 "dropping fragmented packet!\n"); 1332 "dropping fragmented packet!\n");
@@ -1736,13 +1744,63 @@ static void bcmgenet_init_multiq(struct net_device *dev)
1736 bcmgenet_tdma_writel(priv, reg, DMA_CTRL); 1744 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
1737} 1745}
1738 1746
1747static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
1748{
1749 int ret = 0;
1750 int timeout = 0;
1751 u32 reg;
1752
1753 /* Disable TDMA to stop add more frames in TX DMA */
1754 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
1755 reg &= ~DMA_EN;
1756 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
1757
1758 /* Check TDMA status register to confirm TDMA is disabled */
1759 while (timeout++ < DMA_TIMEOUT_VAL) {
1760 reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
1761 if (reg & DMA_DISABLED)
1762 break;
1763
1764 udelay(1);
1765 }
1766
1767 if (timeout == DMA_TIMEOUT_VAL) {
1768 netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
1769 ret = -ETIMEDOUT;
1770 }
1771
1772 /* Wait 10ms for packet drain in both tx and rx dma */
1773 usleep_range(10000, 20000);
1774
1775 /* Disable RDMA */
1776 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
1777 reg &= ~DMA_EN;
1778 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
1779
1780 timeout = 0;
1781 /* Check RDMA status register to confirm RDMA is disabled */
1782 while (timeout++ < DMA_TIMEOUT_VAL) {
1783 reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
1784 if (reg & DMA_DISABLED)
1785 break;
1786
1787 udelay(1);
1788 }
1789
1790 if (timeout == DMA_TIMEOUT_VAL) {
1791 netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
1792 ret = -ETIMEDOUT;
1793 }
1794
1795 return ret;
1796}
1797
1739static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) 1798static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
1740{ 1799{
1741 int i; 1800 int i;
1742 1801
1743 /* disable DMA */ 1802 /* disable DMA */
1744 bcmgenet_rdma_writel(priv, 0, DMA_CTRL); 1803 bcmgenet_dma_teardown(priv);
1745 bcmgenet_tdma_writel(priv, 0, DMA_CTRL);
1746 1804
1747 for (i = 0; i < priv->num_tx_bds; i++) { 1805 for (i = 0; i < priv->num_tx_bds; i++) {
1748 if (priv->tx_cbs[i].skb != NULL) { 1806 if (priv->tx_cbs[i].skb != NULL) {
@@ -2101,57 +2159,6 @@ err_clk_disable:
2101 return ret; 2159 return ret;
2102} 2160}
2103 2161
2104static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
2105{
2106 int ret = 0;
2107 int timeout = 0;
2108 u32 reg;
2109
2110 /* Disable TDMA to stop add more frames in TX DMA */
2111 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2112 reg &= ~DMA_EN;
2113 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2114
2115 /* Check TDMA status register to confirm TDMA is disabled */
2116 while (timeout++ < DMA_TIMEOUT_VAL) {
2117 reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
2118 if (reg & DMA_DISABLED)
2119 break;
2120
2121 udelay(1);
2122 }
2123
2124 if (timeout == DMA_TIMEOUT_VAL) {
2125 netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
2126 ret = -ETIMEDOUT;
2127 }
2128
2129 /* Wait 10ms for packet drain in both tx and rx dma */
2130 usleep_range(10000, 20000);
2131
2132 /* Disable RDMA */
2133 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2134 reg &= ~DMA_EN;
2135 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2136
2137 timeout = 0;
2138 /* Check RDMA status register to confirm RDMA is disabled */
2139 while (timeout++ < DMA_TIMEOUT_VAL) {
2140 reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
2141 if (reg & DMA_DISABLED)
2142 break;
2143
2144 udelay(1);
2145 }
2146
2147 if (timeout == DMA_TIMEOUT_VAL) {
2148 netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
2149 ret = -ETIMEDOUT;
2150 }
2151
2152 return ret;
2153}
2154
2155static void bcmgenet_netif_stop(struct net_device *dev) 2162static void bcmgenet_netif_stop(struct net_device *dev)
2156{ 2163{
2157 struct bcmgenet_priv *priv = netdev_priv(dev); 2164 struct bcmgenet_priv *priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 3ac5d23454a8..e7d3a620d96a 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -7914,8 +7914,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7914 7914
7915 entry = tnapi->tx_prod; 7915 entry = tnapi->tx_prod;
7916 base_flags = 0; 7916 base_flags = 0;
7917 if (skb->ip_summed == CHECKSUM_PARTIAL)
7918 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7919 7917
7920 mss = skb_shinfo(skb)->gso_size; 7918 mss = skb_shinfo(skb)->gso_size;
7921 if (mss) { 7919 if (mss) {
@@ -7929,6 +7927,13 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7929 7927
7930 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN; 7928 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7931 7929
7930 /* HW/FW can not correctly segment packets that have been
7931 * vlan encapsulated.
7932 */
7933 if (skb->protocol == htons(ETH_P_8021Q) ||
7934 skb->protocol == htons(ETH_P_8021AD))
7935 return tg3_tso_bug(tp, tnapi, txq, skb);
7936
7932 if (!skb_is_gso_v6(skb)) { 7937 if (!skb_is_gso_v6(skb)) {
7933 if (unlikely((ETH_HLEN + hdr_len) > 80) && 7938 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7934 tg3_flag(tp, TSO_BUG)) 7939 tg3_flag(tp, TSO_BUG))
@@ -7979,6 +7984,17 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7979 base_flags |= tsflags << 12; 7984 base_flags |= tsflags << 12;
7980 } 7985 }
7981 } 7986 }
7987 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
7988 /* HW/FW can not correctly checksum packets that have been
7989 * vlan encapsulated.
7990 */
7991 if (skb->protocol == htons(ETH_P_8021Q) ||
7992 skb->protocol == htons(ETH_P_8021AD)) {
7993 if (skb_checksum_help(skb))
7994 goto drop;
7995 } else {
7996 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7997 }
7982 } 7998 }
7983 7999
7984 if (tg3_flag(tp, USE_JUMBO_BDFLAG) && 8000 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
@@ -11617,6 +11633,12 @@ static int tg3_open(struct net_device *dev)
11617 struct tg3 *tp = netdev_priv(dev); 11633 struct tg3 *tp = netdev_priv(dev);
11618 int err; 11634 int err;
11619 11635
11636 if (tp->pcierr_recovery) {
11637 netdev_err(dev, "Failed to open device. PCI error recovery "
11638 "in progress\n");
11639 return -EAGAIN;
11640 }
11641
11620 if (tp->fw_needed) { 11642 if (tp->fw_needed) {
11621 err = tg3_request_firmware(tp); 11643 err = tg3_request_firmware(tp);
11622 if (tg3_asic_rev(tp) == ASIC_REV_57766) { 11644 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
@@ -11674,6 +11696,12 @@ static int tg3_close(struct net_device *dev)
11674{ 11696{
11675 struct tg3 *tp = netdev_priv(dev); 11697 struct tg3 *tp = netdev_priv(dev);
11676 11698
11699 if (tp->pcierr_recovery) {
11700 netdev_err(dev, "Failed to close device. PCI error recovery "
11701 "in progress\n");
11702 return -EAGAIN;
11703 }
11704
11677 tg3_ptp_fini(tp); 11705 tg3_ptp_fini(tp);
11678 11706
11679 tg3_stop(tp); 11707 tg3_stop(tp);
@@ -17561,6 +17589,7 @@ static int tg3_init_one(struct pci_dev *pdev,
17561 tp->rx_mode = TG3_DEF_RX_MODE; 17589 tp->rx_mode = TG3_DEF_RX_MODE;
17562 tp->tx_mode = TG3_DEF_TX_MODE; 17590 tp->tx_mode = TG3_DEF_TX_MODE;
17563 tp->irq_sync = 1; 17591 tp->irq_sync = 1;
17592 tp->pcierr_recovery = false;
17564 17593
17565 if (tg3_debug > 0) 17594 if (tg3_debug > 0)
17566 tp->msg_enable = tg3_debug; 17595 tp->msg_enable = tg3_debug;
@@ -18071,6 +18100,8 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18071 18100
18072 rtnl_lock(); 18101 rtnl_lock();
18073 18102
18103 tp->pcierr_recovery = true;
18104
18074 /* We probably don't have netdev yet */ 18105 /* We probably don't have netdev yet */
18075 if (!netdev || !netif_running(netdev)) 18106 if (!netdev || !netif_running(netdev))
18076 goto done; 18107 goto done;
@@ -18195,6 +18226,7 @@ static void tg3_io_resume(struct pci_dev *pdev)
18195 tg3_phy_start(tp); 18226 tg3_phy_start(tp);
18196 18227
18197done: 18228done:
18229 tp->pcierr_recovery = false;
18198 rtnl_unlock(); 18230 rtnl_unlock();
18199} 18231}
18200 18232
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 461accaf0aa4..31c9f8295953 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -3407,6 +3407,7 @@ struct tg3 {
3407 3407
3408 struct device *hwmon_dev; 3408 struct device *hwmon_dev;
3409 bool link_up; 3409 bool link_up;
3410 bool pcierr_recovery;
3410}; 3411};
3411 3412
3412/* Accessor macros for chip and asic attributes 3413/* Accessor macros for chip and asic attributes
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index ff8cae5e2535..ffc92a41d75b 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -2506,7 +2506,7 @@ bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2506 * For TSO, the TCP checksum field is seeded with pseudo-header sum 2506 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2507 * excluding the length field. 2507 * excluding the length field.
2508 */ 2508 */
2509 if (skb->protocol == htons(ETH_P_IP)) { 2509 if (vlan_get_protocol(skb) == htons(ETH_P_IP)) {
2510 struct iphdr *iph = ip_hdr(skb); 2510 struct iphdr *iph = ip_hdr(skb);
2511 2511
2512 /* Do we really need these? */ 2512 /* Do we really need these? */
@@ -2870,12 +2870,13 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2870 } 2870 }
2871 2871
2872 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2872 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2873 __be16 net_proto = vlan_get_protocol(skb);
2873 u8 proto = 0; 2874 u8 proto = 0;
2874 2875
2875 if (skb->protocol == htons(ETH_P_IP)) 2876 if (net_proto == htons(ETH_P_IP))
2876 proto = ip_hdr(skb)->protocol; 2877 proto = ip_hdr(skb)->protocol;
2877#ifdef NETIF_F_IPV6_CSUM 2878#ifdef NETIF_F_IPV6_CSUM
2878 else if (skb->protocol == htons(ETH_P_IPV6)) { 2879 else if (net_proto == htons(ETH_P_IPV6)) {
2879 /* nexthdr may not be TCP immediately. */ 2880 /* nexthdr may not be TCP immediately. */
2880 proto = ipv6_hdr(skb)->nexthdr; 2881 proto = ipv6_hdr(skb)->nexthdr;
2881 } 2882 }
diff --git a/drivers/net/ethernet/calxeda/Kconfig b/drivers/net/ethernet/calxeda/Kconfig
index 184a063bed5f..07d2201530d2 100644
--- a/drivers/net/ethernet/calxeda/Kconfig
+++ b/drivers/net/ethernet/calxeda/Kconfig
@@ -1,6 +1,7 @@
1config NET_CALXEDA_XGMAC 1config NET_CALXEDA_XGMAC
2 tristate "Calxeda 1G/10G XGMAC Ethernet driver" 2 tristate "Calxeda 1G/10G XGMAC Ethernet driver"
3 depends on HAS_IOMEM && HAS_DMA 3 depends on HAS_IOMEM && HAS_DMA
4 depends on ARCH_HIGHBANK || COMPILE_TEST
4 select CRC32 5 select CRC32
5 help 6 help
6 This is the driver for the XGMAC Ethernet IP block found on Calxeda 7 This is the driver for the XGMAC Ethernet IP block found on Calxeda
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index d57282172ea5..c067b7888ac4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -652,6 +652,7 @@ struct adapter {
652 struct tid_info tids; 652 struct tid_info tids;
653 void **tid_release_head; 653 void **tid_release_head;
654 spinlock_t tid_release_lock; 654 spinlock_t tid_release_lock;
655 struct workqueue_struct *workq;
655 struct work_struct tid_release_task; 656 struct work_struct tid_release_task;
656 struct work_struct db_full_task; 657 struct work_struct db_full_task;
657 struct work_struct db_drop_task; 658 struct work_struct db_drop_task;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 1afee70ce856..e5be511a3c38 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -643,8 +643,6 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
643 return ret; 643 return ret;
644} 644}
645 645
646static struct workqueue_struct *workq;
647
648/** 646/**
649 * link_start - enable a port 647 * link_start - enable a port
650 * @dev: the port to enable 648 * @dev: the port to enable
@@ -1255,7 +1253,9 @@ freeout: t4_free_sge_resources(adap);
1255 goto freeout; 1253 goto freeout;
1256 } 1254 }
1257 1255
1258 t4_write_reg(adap, MPS_TRC_RSS_CONTROL, 1256 t4_write_reg(adap, is_t4(adap->params.chip) ?
1257 MPS_TRC_RSS_CONTROL :
1258 MPS_T5_TRC_RSS_CONTROL,
1259 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) | 1259 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
1260 QUEUENUMBER(s->ethrxq[0].rspq.abs_id)); 1260 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
1261 return 0; 1261 return 0;
@@ -1763,7 +1763,8 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1763 0xd004, 0xd03c, 1763 0xd004, 0xd03c,
1764 0xdfc0, 0xdfe0, 1764 0xdfc0, 0xdfe0,
1765 0xe000, 0xea7c, 1765 0xe000, 0xea7c,
1766 0xf000, 0x11190, 1766 0xf000, 0x11110,
1767 0x11118, 0x11190,
1767 0x19040, 0x1906c, 1768 0x19040, 0x1906c,
1768 0x19078, 0x19080, 1769 0x19078, 0x19080,
1769 0x1908c, 0x19124, 1770 0x1908c, 0x19124,
@@ -1970,7 +1971,8 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1970 0xd004, 0xd03c, 1971 0xd004, 0xd03c,
1971 0xdfc0, 0xdfe0, 1972 0xdfc0, 0xdfe0,
1972 0xe000, 0x11088, 1973 0xe000, 0x11088,
1973 0x1109c, 0x1117c, 1974 0x1109c, 0x11110,
1975 0x11118, 0x1117c,
1974 0x11190, 0x11204, 1976 0x11190, 0x11204,
1975 0x19040, 0x1906c, 1977 0x19040, 0x1906c,
1976 0x19078, 0x19080, 1978 0x19078, 0x19080,
@@ -3340,7 +3342,7 @@ static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
3340 adap->tid_release_head = (void **)((uintptr_t)p | chan); 3342 adap->tid_release_head = (void **)((uintptr_t)p | chan);
3341 if (!adap->tid_release_task_busy) { 3343 if (!adap->tid_release_task_busy) {
3342 adap->tid_release_task_busy = true; 3344 adap->tid_release_task_busy = true;
3343 queue_work(workq, &adap->tid_release_task); 3345 queue_work(adap->workq, &adap->tid_release_task);
3344 } 3346 }
3345 spin_unlock_bh(&adap->tid_release_lock); 3347 spin_unlock_bh(&adap->tid_release_lock);
3346} 3348}
@@ -4140,7 +4142,7 @@ void t4_db_full(struct adapter *adap)
4140 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); 4142 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
4141 t4_set_reg_field(adap, SGE_INT_ENABLE3, 4143 t4_set_reg_field(adap, SGE_INT_ENABLE3,
4142 DBFIFO_HP_INT | DBFIFO_LP_INT, 0); 4144 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
4143 queue_work(workq, &adap->db_full_task); 4145 queue_work(adap->workq, &adap->db_full_task);
4144 } 4146 }
4145} 4147}
4146 4148
@@ -4150,7 +4152,7 @@ void t4_db_dropped(struct adapter *adap)
4150 disable_dbs(adap); 4152 disable_dbs(adap);
4151 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); 4153 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
4152 } 4154 }
4153 queue_work(workq, &adap->db_drop_task); 4155 queue_work(adap->workq, &adap->db_drop_task);
4154} 4156}
4155 4157
4156static void uld_attach(struct adapter *adap, unsigned int uld) 4158static void uld_attach(struct adapter *adap, unsigned int uld)
@@ -5957,7 +5959,8 @@ static int adap_init0(struct adapter *adap)
5957 params[3] = FW_PARAM_PFVF(CQ_END); 5959 params[3] = FW_PARAM_PFVF(CQ_END);
5958 params[4] = FW_PARAM_PFVF(OCQ_START); 5960 params[4] = FW_PARAM_PFVF(OCQ_START);
5959 params[5] = FW_PARAM_PFVF(OCQ_END); 5961 params[5] = FW_PARAM_PFVF(OCQ_END);
5960 ret = t4_query_params(adap, 0, 0, 0, 6, params, val); 5962 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params,
5963 val);
5961 if (ret < 0) 5964 if (ret < 0)
5962 goto bye; 5965 goto bye;
5963 adap->vres.qp.start = val[0]; 5966 adap->vres.qp.start = val[0];
@@ -5969,7 +5972,8 @@ static int adap_init0(struct adapter *adap)
5969 5972
5970 params[0] = FW_PARAM_DEV(MAXORDIRD_QP); 5973 params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
5971 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER); 5974 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
5972 ret = t4_query_params(adap, 0, 0, 0, 2, params, val); 5975 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params,
5976 val);
5973 if (ret < 0) { 5977 if (ret < 0) {
5974 adap->params.max_ordird_qp = 8; 5978 adap->params.max_ordird_qp = 8;
5975 adap->params.max_ird_adapter = 32 * adap->tids.ntids; 5979 adap->params.max_ird_adapter = 32 * adap->tids.ntids;
@@ -6474,6 +6478,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6474 struct port_info *pi; 6478 struct port_info *pi;
6475 bool highdma = false; 6479 bool highdma = false;
6476 struct adapter *adapter = NULL; 6480 struct adapter *adapter = NULL;
6481 void __iomem *regs;
6477 6482
6478 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); 6483 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
6479 6484
@@ -6490,19 +6495,35 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6490 goto out_release_regions; 6495 goto out_release_regions;
6491 } 6496 }
6492 6497
6498 regs = pci_ioremap_bar(pdev, 0);
6499 if (!regs) {
6500 dev_err(&pdev->dev, "cannot map device registers\n");
6501 err = -ENOMEM;
6502 goto out_disable_device;
6503 }
6504
6505 /* We control everything through one PF */
6506 func = SOURCEPF_GET(readl(regs + PL_WHOAMI));
6507 if (func != ent->driver_data) {
6508 iounmap(regs);
6509 pci_disable_device(pdev);
6510 pci_save_state(pdev); /* to restore SR-IOV later */
6511 goto sriov;
6512 }
6513
6493 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 6514 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
6494 highdma = true; 6515 highdma = true;
6495 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 6516 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
6496 if (err) { 6517 if (err) {
6497 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " 6518 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
6498 "coherent allocations\n"); 6519 "coherent allocations\n");
6499 goto out_disable_device; 6520 goto out_unmap_bar0;
6500 } 6521 }
6501 } else { 6522 } else {
6502 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 6523 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6503 if (err) { 6524 if (err) {
6504 dev_err(&pdev->dev, "no usable DMA configuration\n"); 6525 dev_err(&pdev->dev, "no usable DMA configuration\n");
6505 goto out_disable_device; 6526 goto out_unmap_bar0;
6506 } 6527 }
6507 } 6528 }
6508 6529
@@ -6514,26 +6535,19 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6514 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); 6535 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
6515 if (!adapter) { 6536 if (!adapter) {
6516 err = -ENOMEM; 6537 err = -ENOMEM;
6517 goto out_disable_device; 6538 goto out_unmap_bar0;
6518 } 6539 }
6519 6540
6520 /* PCI device has been enabled */ 6541 adapter->workq = create_singlethread_workqueue("cxgb4");
6521 adapter->flags |= DEV_ENABLED; 6542 if (!adapter->workq) {
6522
6523 adapter->regs = pci_ioremap_bar(pdev, 0);
6524 if (!adapter->regs) {
6525 dev_err(&pdev->dev, "cannot map device registers\n");
6526 err = -ENOMEM; 6543 err = -ENOMEM;
6527 goto out_free_adapter; 6544 goto out_free_adapter;
6528 } 6545 }
6529 6546
6530 /* We control everything through one PF */ 6547 /* PCI device has been enabled */
6531 func = SOURCEPF_GET(readl(adapter->regs + PL_WHOAMI)); 6548 adapter->flags |= DEV_ENABLED;
6532 if (func != ent->driver_data) {
6533 pci_save_state(pdev); /* to restore SR-IOV later */
6534 goto sriov;
6535 }
6536 6549
6550 adapter->regs = regs;
6537 adapter->pdev = pdev; 6551 adapter->pdev = pdev;
6538 adapter->pdev_dev = &pdev->dev; 6552 adapter->pdev_dev = &pdev->dev;
6539 adapter->mbox = func; 6553 adapter->mbox = func;
@@ -6550,7 +6564,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6550 6564
6551 err = t4_prep_adapter(adapter); 6565 err = t4_prep_adapter(adapter);
6552 if (err) 6566 if (err)
6553 goto out_unmap_bar0; 6567 goto out_free_adapter;
6568
6554 6569
6555 if (!is_t4(adapter->params.chip)) { 6570 if (!is_t4(adapter->params.chip)) {
6556 s_qpp = QUEUESPERPAGEPF1 * adapter->fn; 6571 s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
@@ -6567,14 +6582,14 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6567 dev_err(&pdev->dev, 6582 dev_err(&pdev->dev,
6568 "Incorrect number of egress queues per page\n"); 6583 "Incorrect number of egress queues per page\n");
6569 err = -EINVAL; 6584 err = -EINVAL;
6570 goto out_unmap_bar0; 6585 goto out_free_adapter;
6571 } 6586 }
6572 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2), 6587 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
6573 pci_resource_len(pdev, 2)); 6588 pci_resource_len(pdev, 2));
6574 if (!adapter->bar2) { 6589 if (!adapter->bar2) {
6575 dev_err(&pdev->dev, "cannot map device bar2 region\n"); 6590 dev_err(&pdev->dev, "cannot map device bar2 region\n");
6576 err = -ENOMEM; 6591 err = -ENOMEM;
6577 goto out_unmap_bar0; 6592 goto out_free_adapter;
6578 } 6593 }
6579 } 6594 }
6580 6595
@@ -6712,10 +6727,13 @@ sriov:
6712 out_unmap_bar: 6727 out_unmap_bar:
6713 if (!is_t4(adapter->params.chip)) 6728 if (!is_t4(adapter->params.chip))
6714 iounmap(adapter->bar2); 6729 iounmap(adapter->bar2);
6715 out_unmap_bar0:
6716 iounmap(adapter->regs);
6717 out_free_adapter: 6730 out_free_adapter:
6731 if (adapter->workq)
6732 destroy_workqueue(adapter->workq);
6733
6718 kfree(adapter); 6734 kfree(adapter);
6735 out_unmap_bar0:
6736 iounmap(regs);
6719 out_disable_device: 6737 out_disable_device:
6720 pci_disable_pcie_error_reporting(pdev); 6738 pci_disable_pcie_error_reporting(pdev);
6721 pci_disable_device(pdev); 6739 pci_disable_device(pdev);
@@ -6736,6 +6754,11 @@ static void remove_one(struct pci_dev *pdev)
6736 if (adapter) { 6754 if (adapter) {
6737 int i; 6755 int i;
6738 6756
6757 /* Tear down per-adapter Work Queue first since it can contain
6758 * references to our adapter data structure.
6759 */
6760 destroy_workqueue(adapter->workq);
6761
6739 if (is_offload(adapter)) 6762 if (is_offload(adapter))
6740 detach_ulds(adapter); 6763 detach_ulds(adapter);
6741 6764
@@ -6788,20 +6811,14 @@ static int __init cxgb4_init_module(void)
6788{ 6811{
6789 int ret; 6812 int ret;
6790 6813
6791 workq = create_singlethread_workqueue("cxgb4");
6792 if (!workq)
6793 return -ENOMEM;
6794
6795 /* Debugfs support is optional, just warn if this fails */ 6814 /* Debugfs support is optional, just warn if this fails */
6796 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); 6815 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
6797 if (!cxgb4_debugfs_root) 6816 if (!cxgb4_debugfs_root)
6798 pr_warn("could not create debugfs entry, continuing\n"); 6817 pr_warn("could not create debugfs entry, continuing\n");
6799 6818
6800 ret = pci_register_driver(&cxgb4_driver); 6819 ret = pci_register_driver(&cxgb4_driver);
6801 if (ret < 0) { 6820 if (ret < 0)
6802 debugfs_remove(cxgb4_debugfs_root); 6821 debugfs_remove(cxgb4_debugfs_root);
6803 destroy_workqueue(workq);
6804 }
6805 6822
6806 register_inet6addr_notifier(&cxgb4_inet6addr_notifier); 6823 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6807 6824
@@ -6813,8 +6830,6 @@ static void __exit cxgb4_cleanup_module(void)
6813 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier); 6830 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6814 pci_unregister_driver(&cxgb4_driver); 6831 pci_unregister_driver(&cxgb4_driver);
6815 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */ 6832 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
6816 flush_workqueue(workq);
6817 destroy_workqueue(workq);
6818} 6833}
6819 6834
6820module_init(cxgb4_init_module); 6835module_init(cxgb4_init_module);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index b0bba32d69d5..d22d728d4e5c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2303,7 +2303,8 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
2303 FW_EQ_ETH_CMD_PFN(adap->fn) | FW_EQ_ETH_CMD_VFN(0)); 2303 FW_EQ_ETH_CMD_PFN(adap->fn) | FW_EQ_ETH_CMD_VFN(0));
2304 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC | 2304 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC |
2305 FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); 2305 FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
2306 c.viid_pkd = htonl(FW_EQ_ETH_CMD_VIID(pi->viid)); 2306 c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE |
2307 FW_EQ_ETH_CMD_VIID(pi->viid));
2307 c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) | 2308 c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) |
2308 FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) | 2309 FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) |
2309 FW_EQ_ETH_CMD_FETCHRO(1) | 2310 FW_EQ_ETH_CMD_FETCHRO(1) |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index a853133d8db8..41d04462b72e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -168,6 +168,34 @@ void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
168} 168}
169 169
170/* 170/*
171 * t4_report_fw_error - report firmware error
172 * @adap: the adapter
173 *
174 * The adapter firmware can indicate error conditions to the host.
175 * If the firmware has indicated an error, print out the reason for
176 * the firmware error.
177 */
178static void t4_report_fw_error(struct adapter *adap)
179{
180 static const char *const reason[] = {
181 "Crash", /* PCIE_FW_EVAL_CRASH */
182 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
183 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
184 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
185 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
186 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
187 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
188 "Reserved", /* reserved */
189 };
190 u32 pcie_fw;
191
192 pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
193 if (pcie_fw & FW_PCIE_FW_ERR)
194 dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
195 reason[FW_PCIE_FW_EVAL_GET(pcie_fw)]);
196}
197
198/*
171 * Get the reply to a mailbox command and store it in @rpl in big-endian order. 199 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
172 */ 200 */
173static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, 201static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
@@ -300,6 +328,7 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
300 dump_mbox(adap, mbox, data_reg); 328 dump_mbox(adap, mbox, data_reg);
301 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", 329 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
302 *(const u8 *)cmd, mbox); 330 *(const u8 *)cmd, mbox);
331 t4_report_fw_error(adap);
303 return -ETIMEDOUT; 332 return -ETIMEDOUT;
304} 333}
305 334
@@ -566,6 +595,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
566#define VPD_BASE 0x400 595#define VPD_BASE 0x400
567#define VPD_BASE_OLD 0 596#define VPD_BASE_OLD 0
568#define VPD_LEN 1024 597#define VPD_LEN 1024
598#define CHELSIO_VPD_UNIQUE_ID 0x82
569 599
570/** 600/**
571 * t4_seeprom_wp - enable/disable EEPROM write protection 601 * t4_seeprom_wp - enable/disable EEPROM write protection
@@ -603,7 +633,14 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
603 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd); 633 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
604 if (ret < 0) 634 if (ret < 0)
605 goto out; 635 goto out;
606 addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD; 636
637 /* The VPD shall have a unique identifier specified by the PCI SIG.
638 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
639 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
640 * is expected to automatically put this entry at the
641 * beginning of the VPD.
642 */
643 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
607 644
608 ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd); 645 ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
609 if (ret < 0) 646 if (ret < 0)
@@ -667,6 +704,7 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
667 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE); 704 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
668 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); 705 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
669 strim(p->sn); 706 strim(p->sn);
707 i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
670 memcpy(p->pn, vpd + pn, min(i, PN_LEN)); 708 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
671 strim(p->pn); 709 strim(p->pn);
672 710
@@ -1394,15 +1432,18 @@ static void pcie_intr_handler(struct adapter *adapter)
1394 1432
1395 int fat; 1433 int fat;
1396 1434
1397 fat = t4_handle_intr_status(adapter, 1435 if (is_t4(adapter->params.chip))
1398 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, 1436 fat = t4_handle_intr_status(adapter,
1399 sysbus_intr_info) + 1437 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1400 t4_handle_intr_status(adapter, 1438 sysbus_intr_info) +
1401 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, 1439 t4_handle_intr_status(adapter,
1402 pcie_port_intr_info) + 1440 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1403 t4_handle_intr_status(adapter, PCIE_INT_CAUSE, 1441 pcie_port_intr_info) +
1404 is_t4(adapter->params.chip) ? 1442 t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
1405 pcie_intr_info : t5_pcie_intr_info); 1443 pcie_intr_info);
1444 else
1445 fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
1446 t5_pcie_intr_info);
1406 1447
1407 if (fat) 1448 if (fat)
1408 t4_fatal_err(adapter); 1449 t4_fatal_err(adapter);
@@ -1521,6 +1562,9 @@ static void cim_intr_handler(struct adapter *adapter)
1521 1562
1522 int fat; 1563 int fat;
1523 1564
1565 if (t4_read_reg(adapter, MA_PCIE_FW) & FW_PCIE_FW_ERR)
1566 t4_report_fw_error(adapter);
1567
1524 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE, 1568 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
1525 cim_intr_info) + 1569 cim_intr_info) +
1526 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE, 1570 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
@@ -1768,10 +1812,16 @@ static void ma_intr_handler(struct adapter *adap)
1768{ 1812{
1769 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE); 1813 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
1770 1814
1771 if (status & MEM_PERR_INT_CAUSE) 1815 if (status & MEM_PERR_INT_CAUSE) {
1772 dev_alert(adap->pdev_dev, 1816 dev_alert(adap->pdev_dev,
1773 "MA parity error, parity status %#x\n", 1817 "MA parity error, parity status %#x\n",
1774 t4_read_reg(adap, MA_PARITY_ERROR_STATUS)); 1818 t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
1819 if (is_t5(adap->params.chip))
1820 dev_alert(adap->pdev_dev,
1821 "MA parity error, parity status %#x\n",
1822 t4_read_reg(adap,
1823 MA_PARITY_ERROR_STATUS2));
1824 }
1775 if (status & MEM_WRAP_INT_CAUSE) { 1825 if (status & MEM_WRAP_INT_CAUSE) {
1776 v = t4_read_reg(adap, MA_INT_WRAP_STATUS); 1826 v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
1777 dev_alert(adap->pdev_dev, "MA address wrap-around error by " 1827 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
@@ -2733,12 +2783,16 @@ retry:
2733 /* 2783 /*
2734 * Issue the HELLO command to the firmware. If it's not successful 2784 * Issue the HELLO command to the firmware. If it's not successful
2735 * but indicates that we got a "busy" or "timeout" condition, retry 2785 * but indicates that we got a "busy" or "timeout" condition, retry
2736 * the HELLO until we exhaust our retry limit. 2786 * the HELLO until we exhaust our retry limit. If we do exceed our
2787 * retry limit, check to see if the firmware left us any error
2788 * information and report that if so.
2737 */ 2789 */
2738 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 2790 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2739 if (ret < 0) { 2791 if (ret < 0) {
2740 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0) 2792 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
2741 goto retry; 2793 goto retry;
2794 if (t4_read_reg(adap, MA_PCIE_FW) & FW_PCIE_FW_ERR)
2795 t4_report_fw_error(adap);
2742 return ret; 2796 return ret;
2743 } 2797 }
2744 2798
@@ -3742,6 +3796,7 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
3742 lc->link_ok = link_ok; 3796 lc->link_ok = link_ok;
3743 lc->speed = speed; 3797 lc->speed = speed;
3744 lc->fc = fc; 3798 lc->fc = fc;
3799 lc->supported = be16_to_cpu(p->u.info.pcap);
3745 t4_os_link_changed(adap, port, link_ok); 3800 t4_os_link_changed(adap, port, link_ok);
3746 } 3801 }
3747 if (mod != pi->mod_type) { 3802 if (mod != pi->mod_type) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index e3146e83df20..39fb325474f7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -511,6 +511,7 @@
511#define MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT) 511#define MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT)
512#define MA_PCIE_FW 0x30b8 512#define MA_PCIE_FW 0x30b8
513#define MA_PARITY_ERROR_STATUS 0x77f4 513#define MA_PARITY_ERROR_STATUS 0x77f4
514#define MA_PARITY_ERROR_STATUS2 0x7804
514 515
515#define MA_EXT_MEMORY1_BAR 0x7808 516#define MA_EXT_MEMORY1_BAR 0x7808
516#define EDC_0_BASE_ADDR 0x7900 517#define EDC_0_BASE_ADDR 0x7900
@@ -959,6 +960,7 @@
959#define TRCMULTIFILTER 0x00000001U 960#define TRCMULTIFILTER 0x00000001U
960 961
961#define MPS_TRC_RSS_CONTROL 0x9808 962#define MPS_TRC_RSS_CONTROL 0x9808
963#define MPS_T5_TRC_RSS_CONTROL 0xa00c
962#define RSSCONTROL_MASK 0x00ff0000U 964#define RSSCONTROL_MASK 0x00ff0000U
963#define RSSCONTROL_SHIFT 16 965#define RSSCONTROL_SHIFT 16
964#define RSSCONTROL(x) ((x) << RSSCONTROL_SHIFT) 966#define RSSCONTROL(x) ((x) << RSSCONTROL_SHIFT)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 0549170d7e2e..3409756a85b9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -1227,6 +1227,7 @@ struct fw_eq_eth_cmd {
1227#define FW_EQ_ETH_CMD_CIDXFTHRESH(x) ((x) << 16) 1227#define FW_EQ_ETH_CMD_CIDXFTHRESH(x) ((x) << 16)
1228#define FW_EQ_ETH_CMD_EQSIZE(x) ((x) << 0) 1228#define FW_EQ_ETH_CMD_EQSIZE(x) ((x) << 0)
1229 1229
1230#define FW_EQ_ETH_CMD_AUTOEQUEQE (1U << 30)
1230#define FW_EQ_ETH_CMD_VIID(x) ((x) << 16) 1231#define FW_EQ_ETH_CMD_VIID(x) ((x) << 16)
1231 1232
1232struct fw_eq_ctrl_cmd { 1233struct fw_eq_ctrl_cmd {
@@ -2227,6 +2228,10 @@ struct fw_debug_cmd {
2227#define FW_PCIE_FW_MASTER(x) ((x) << FW_PCIE_FW_MASTER_SHIFT) 2228#define FW_PCIE_FW_MASTER(x) ((x) << FW_PCIE_FW_MASTER_SHIFT)
2228#define FW_PCIE_FW_MASTER_GET(x) (((x) >> FW_PCIE_FW_MASTER_SHIFT) & \ 2229#define FW_PCIE_FW_MASTER_GET(x) (((x) >> FW_PCIE_FW_MASTER_SHIFT) & \
2229 FW_PCIE_FW_MASTER_MASK) 2230 FW_PCIE_FW_MASTER_MASK)
2231#define FW_PCIE_FW_EVAL_MASK 0x7
2232#define FW_PCIE_FW_EVAL_SHIFT 24
2233#define FW_PCIE_FW_EVAL_GET(x) (((x) >> FW_PCIE_FW_EVAL_SHIFT) & \
2234 FW_PCIE_FW_EVAL_MASK)
2230 2235
2231struct fw_hdr { 2236struct fw_hdr {
2232 u8 ver; 2237 u8 ver;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index bdfa80ca5e31..a5fb9493dee8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -2250,7 +2250,8 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2250 cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC | 2250 cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC |
2251 FW_EQ_ETH_CMD_EQSTART | 2251 FW_EQ_ETH_CMD_EQSTART |
2252 FW_LEN16(cmd)); 2252 FW_LEN16(cmd));
2253 cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_VIID(pi->viid)); 2253 cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE |
2254 FW_EQ_ETH_CMD_VIID(pi->viid));
2254 cmd.fetchszm_to_iqid = 2255 cmd.fetchszm_to_iqid =
2255 cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE(SGE_HOSTFCMODE_STPG) | 2256 cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE(SGE_HOSTFCMODE_STPG) |
2256 FW_EQ_ETH_CMD_PCIECHN(pi->port_id) | 2257 FW_EQ_ETH_CMD_PCIECHN(pi->port_id) |
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 9b33057a9477..70089c29d307 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1399,7 +1399,7 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
1399 const void *mac_addr; 1399 const void *mac_addr;
1400 1400
1401 if (!IS_ENABLED(CONFIG_OF) || !np) 1401 if (!IS_ENABLED(CONFIG_OF) || !np)
1402 return NULL; 1402 return ERR_PTR(-ENXIO);
1403 1403
1404 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); 1404 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
1405 if (!pdata) 1405 if (!pdata)
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 9f7fa644a397..ee41d98b44b6 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -275,6 +275,9 @@ struct fec_enet_private {
275 struct clk *clk_enet_out; 275 struct clk *clk_enet_out;
276 struct clk *clk_ptp; 276 struct clk *clk_ptp;
277 277
278 bool ptp_clk_on;
279 struct mutex ptp_clk_mutex;
280
278 /* The saved address of a sent-in-place packet/buffer, for skfree(). */ 281 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
279 unsigned char *tx_bounce[TX_RING_SIZE]; 282 unsigned char *tx_bounce[TX_RING_SIZE];
280 struct sk_buff *tx_skbuff[TX_RING_SIZE]; 283 struct sk_buff *tx_skbuff[TX_RING_SIZE];
@@ -335,7 +338,7 @@ struct fec_enet_private {
335 u32 cycle_speed; 338 u32 cycle_speed;
336 int hwts_rx_en; 339 int hwts_rx_en;
337 int hwts_tx_en; 340 int hwts_tx_en;
338 struct timer_list time_keep; 341 struct delayed_work time_keep;
339 struct regulator *reg_phy; 342 struct regulator *reg_phy;
340}; 343};
341 344
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 4f87dffcb9b2..89355a719625 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1611,17 +1611,27 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
1611 goto failed_clk_enet_out; 1611 goto failed_clk_enet_out;
1612 } 1612 }
1613 if (fep->clk_ptp) { 1613 if (fep->clk_ptp) {
1614 mutex_lock(&fep->ptp_clk_mutex);
1614 ret = clk_prepare_enable(fep->clk_ptp); 1615 ret = clk_prepare_enable(fep->clk_ptp);
1615 if (ret) 1616 if (ret) {
1617 mutex_unlock(&fep->ptp_clk_mutex);
1616 goto failed_clk_ptp; 1618 goto failed_clk_ptp;
1619 } else {
1620 fep->ptp_clk_on = true;
1621 }
1622 mutex_unlock(&fep->ptp_clk_mutex);
1617 } 1623 }
1618 } else { 1624 } else {
1619 clk_disable_unprepare(fep->clk_ahb); 1625 clk_disable_unprepare(fep->clk_ahb);
1620 clk_disable_unprepare(fep->clk_ipg); 1626 clk_disable_unprepare(fep->clk_ipg);
1621 if (fep->clk_enet_out) 1627 if (fep->clk_enet_out)
1622 clk_disable_unprepare(fep->clk_enet_out); 1628 clk_disable_unprepare(fep->clk_enet_out);
1623 if (fep->clk_ptp) 1629 if (fep->clk_ptp) {
1630 mutex_lock(&fep->ptp_clk_mutex);
1624 clk_disable_unprepare(fep->clk_ptp); 1631 clk_disable_unprepare(fep->clk_ptp);
1632 fep->ptp_clk_on = false;
1633 mutex_unlock(&fep->ptp_clk_mutex);
1634 }
1625 } 1635 }
1626 1636
1627 return 0; 1637 return 0;
@@ -2625,6 +2635,8 @@ fec_probe(struct platform_device *pdev)
2625 if (IS_ERR(fep->clk_enet_out)) 2635 if (IS_ERR(fep->clk_enet_out))
2626 fep->clk_enet_out = NULL; 2636 fep->clk_enet_out = NULL;
2627 2637
2638 fep->ptp_clk_on = false;
2639 mutex_init(&fep->ptp_clk_mutex);
2628 fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); 2640 fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
2629 fep->bufdesc_ex = 2641 fep->bufdesc_ex =
2630 pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX; 2642 pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX;
@@ -2715,10 +2727,10 @@ fec_drv_remove(struct platform_device *pdev)
2715 struct net_device *ndev = platform_get_drvdata(pdev); 2727 struct net_device *ndev = platform_get_drvdata(pdev);
2716 struct fec_enet_private *fep = netdev_priv(ndev); 2728 struct fec_enet_private *fep = netdev_priv(ndev);
2717 2729
2730 cancel_delayed_work_sync(&fep->time_keep);
2718 cancel_work_sync(&fep->tx_timeout_work); 2731 cancel_work_sync(&fep->tx_timeout_work);
2719 unregister_netdev(ndev); 2732 unregister_netdev(ndev);
2720 fec_enet_mii_remove(fep); 2733 fec_enet_mii_remove(fep);
2721 del_timer_sync(&fep->time_keep);
2722 if (fep->reg_phy) 2734 if (fep->reg_phy)
2723 regulator_disable(fep->reg_phy); 2735 regulator_disable(fep->reg_phy);
2724 if (fep->ptp_clock) 2736 if (fep->ptp_clock)
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 82386b29914a..cca3617a2321 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -245,12 +245,20 @@ static int fec_ptp_settime(struct ptp_clock_info *ptp,
245 u64 ns; 245 u64 ns;
246 unsigned long flags; 246 unsigned long flags;
247 247
248 mutex_lock(&fep->ptp_clk_mutex);
249 /* Check the ptp clock */
250 if (!fep->ptp_clk_on) {
251 mutex_unlock(&fep->ptp_clk_mutex);
252 return -EINVAL;
253 }
254
248 ns = ts->tv_sec * 1000000000ULL; 255 ns = ts->tv_sec * 1000000000ULL;
249 ns += ts->tv_nsec; 256 ns += ts->tv_nsec;
250 257
251 spin_lock_irqsave(&fep->tmreg_lock, flags); 258 spin_lock_irqsave(&fep->tmreg_lock, flags);
252 timecounter_init(&fep->tc, &fep->cc, ns); 259 timecounter_init(&fep->tc, &fep->cc, ns);
253 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 260 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
261 mutex_unlock(&fep->ptp_clk_mutex);
254 return 0; 262 return 0;
255} 263}
256 264
@@ -338,17 +346,22 @@ int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr)
338 * fec_time_keep - call timecounter_read every second to avoid timer overrun 346 * fec_time_keep - call timecounter_read every second to avoid timer overrun
339 * because ENET just support 32bit counter, will timeout in 4s 347 * because ENET just support 32bit counter, will timeout in 4s
340 */ 348 */
341static void fec_time_keep(unsigned long _data) 349static void fec_time_keep(struct work_struct *work)
342{ 350{
343 struct fec_enet_private *fep = (struct fec_enet_private *)_data; 351 struct delayed_work *dwork = to_delayed_work(work);
352 struct fec_enet_private *fep = container_of(dwork, struct fec_enet_private, time_keep);
344 u64 ns; 353 u64 ns;
345 unsigned long flags; 354 unsigned long flags;
346 355
347 spin_lock_irqsave(&fep->tmreg_lock, flags); 356 mutex_lock(&fep->ptp_clk_mutex);
348 ns = timecounter_read(&fep->tc); 357 if (fep->ptp_clk_on) {
349 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 358 spin_lock_irqsave(&fep->tmreg_lock, flags);
359 ns = timecounter_read(&fep->tc);
360 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
361 }
362 mutex_unlock(&fep->ptp_clk_mutex);
350 363
351 mod_timer(&fep->time_keep, jiffies + HZ); 364 schedule_delayed_work(&fep->time_keep, HZ);
352} 365}
353 366
354/** 367/**
@@ -386,15 +399,13 @@ void fec_ptp_init(struct platform_device *pdev)
386 399
387 fec_ptp_start_cyclecounter(ndev); 400 fec_ptp_start_cyclecounter(ndev);
388 401
389 init_timer(&fep->time_keep); 402 INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
390 fep->time_keep.data = (unsigned long)fep;
391 fep->time_keep.function = fec_time_keep;
392 fep->time_keep.expires = jiffies + HZ;
393 add_timer(&fep->time_keep);
394 403
395 fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev); 404 fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev);
396 if (IS_ERR(fep->ptp_clock)) { 405 if (IS_ERR(fep->ptp_clock)) {
397 fep->ptp_clock = NULL; 406 fep->ptp_clock = NULL;
398 pr_err("ptp_clock_register failed\n"); 407 pr_err("ptp_clock_register failed\n");
399 } 408 }
409
410 schedule_delayed_work(&fep->time_keep, HZ);
400} 411}
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index a0b418e007a0..566b17db135a 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -1994,7 +1994,7 @@ static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe)
1994{ 1994{
1995 swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC; 1995 swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC;
1996 1996
1997 if (skb->protocol != htons(ETH_P_IP)) 1997 if (vlan_get_protocol(skb) != htons(ETH_P_IP))
1998 return; 1998 return;
1999 1999
2000 if (skb->ip_summed == CHECKSUM_PARTIAL) 2000 if (skb->ip_summed == CHECKSUM_PARTIAL)
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index c9127562bd22..21978cc019e7 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -292,6 +292,18 @@ failure:
292 atomic_add(buffers_added, &(pool->available)); 292 atomic_add(buffers_added, &(pool->available));
293} 293}
294 294
295/*
296 * The final 8 bytes of the buffer list is a counter of frames dropped
297 * because there was not a buffer in the buffer list capable of holding
298 * the frame.
299 */
300static void ibmveth_update_rx_no_buffer(struct ibmveth_adapter *adapter)
301{
302 __be64 *p = adapter->buffer_list_addr + 4096 - 8;
303
304 adapter->rx_no_buffer = be64_to_cpup(p);
305}
306
295/* replenish routine */ 307/* replenish routine */
296static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) 308static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
297{ 309{
@@ -307,8 +319,7 @@ static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
307 ibmveth_replenish_buffer_pool(adapter, pool); 319 ibmveth_replenish_buffer_pool(adapter, pool);
308 } 320 }
309 321
310 adapter->rx_no_buffer = *(u64 *)(((char*)adapter->buffer_list_addr) + 322 ibmveth_update_rx_no_buffer(adapter);
311 4096 - 8);
312} 323}
313 324
314/* empty and free ana buffer pool - also used to do cleanup in error paths */ 325/* empty and free ana buffer pool - also used to do cleanup in error paths */
@@ -698,8 +709,7 @@ static int ibmveth_close(struct net_device *netdev)
698 709
699 free_irq(netdev->irq, netdev); 710 free_irq(netdev->irq, netdev);
700 711
701 adapter->rx_no_buffer = *(u64 *)(((char *)adapter->buffer_list_addr) + 712 ibmveth_update_rx_no_buffer(adapter);
702 4096 - 8);
703 713
704 ibmveth_cleanup(adapter); 714 ibmveth_cleanup(adapter);
705 715
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index cbc330b301cd..ad3d5d12173f 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -2674,7 +2674,8 @@ set_itr_now:
2674#define E1000_TX_FLAGS_VLAN_SHIFT 16 2674#define E1000_TX_FLAGS_VLAN_SHIFT 16
2675 2675
2676static int e1000_tso(struct e1000_adapter *adapter, 2676static int e1000_tso(struct e1000_adapter *adapter,
2677 struct e1000_tx_ring *tx_ring, struct sk_buff *skb) 2677 struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2678 __be16 protocol)
2678{ 2679{
2679 struct e1000_context_desc *context_desc; 2680 struct e1000_context_desc *context_desc;
2680 struct e1000_buffer *buffer_info; 2681 struct e1000_buffer *buffer_info;
@@ -2692,7 +2693,7 @@ static int e1000_tso(struct e1000_adapter *adapter,
2692 2693
2693 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 2694 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2694 mss = skb_shinfo(skb)->gso_size; 2695 mss = skb_shinfo(skb)->gso_size;
2695 if (skb->protocol == htons(ETH_P_IP)) { 2696 if (protocol == htons(ETH_P_IP)) {
2696 struct iphdr *iph = ip_hdr(skb); 2697 struct iphdr *iph = ip_hdr(skb);
2697 iph->tot_len = 0; 2698 iph->tot_len = 0;
2698 iph->check = 0; 2699 iph->check = 0;
@@ -2702,7 +2703,7 @@ static int e1000_tso(struct e1000_adapter *adapter,
2702 0); 2703 0);
2703 cmd_length = E1000_TXD_CMD_IP; 2704 cmd_length = E1000_TXD_CMD_IP;
2704 ipcse = skb_transport_offset(skb) - 1; 2705 ipcse = skb_transport_offset(skb) - 1;
2705 } else if (skb->protocol == htons(ETH_P_IPV6)) { 2706 } else if (skb_is_gso_v6(skb)) {
2706 ipv6_hdr(skb)->payload_len = 0; 2707 ipv6_hdr(skb)->payload_len = 0;
2707 tcp_hdr(skb)->check = 2708 tcp_hdr(skb)->check =
2708 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 2709 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
@@ -2745,7 +2746,8 @@ static int e1000_tso(struct e1000_adapter *adapter,
2745} 2746}
2746 2747
2747static bool e1000_tx_csum(struct e1000_adapter *adapter, 2748static bool e1000_tx_csum(struct e1000_adapter *adapter,
2748 struct e1000_tx_ring *tx_ring, struct sk_buff *skb) 2749 struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2750 __be16 protocol)
2749{ 2751{
2750 struct e1000_context_desc *context_desc; 2752 struct e1000_context_desc *context_desc;
2751 struct e1000_buffer *buffer_info; 2753 struct e1000_buffer *buffer_info;
@@ -2756,7 +2758,7 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter,
2756 if (skb->ip_summed != CHECKSUM_PARTIAL) 2758 if (skb->ip_summed != CHECKSUM_PARTIAL)
2757 return false; 2759 return false;
2758 2760
2759 switch (skb->protocol) { 2761 switch (protocol) {
2760 case cpu_to_be16(ETH_P_IP): 2762 case cpu_to_be16(ETH_P_IP):
2761 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 2763 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2762 cmd_len |= E1000_TXD_CMD_TCP; 2764 cmd_len |= E1000_TXD_CMD_TCP;
@@ -3097,6 +3099,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3097 int count = 0; 3099 int count = 0;
3098 int tso; 3100 int tso;
3099 unsigned int f; 3101 unsigned int f;
3102 __be16 protocol = vlan_get_protocol(skb);
3100 3103
3101 /* This goes back to the question of how to logically map a Tx queue 3104 /* This goes back to the question of how to logically map a Tx queue
3102 * to a flow. Right now, performance is impacted slightly negatively 3105 * to a flow. Right now, performance is impacted slightly negatively
@@ -3210,7 +3213,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3210 3213
3211 first = tx_ring->next_to_use; 3214 first = tx_ring->next_to_use;
3212 3215
3213 tso = e1000_tso(adapter, tx_ring, skb); 3216 tso = e1000_tso(adapter, tx_ring, skb, protocol);
3214 if (tso < 0) { 3217 if (tso < 0) {
3215 dev_kfree_skb_any(skb); 3218 dev_kfree_skb_any(skb);
3216 return NETDEV_TX_OK; 3219 return NETDEV_TX_OK;
@@ -3220,10 +3223,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3220 if (likely(hw->mac_type != e1000_82544)) 3223 if (likely(hw->mac_type != e1000_82544))
3221 tx_ring->last_tx_tso = true; 3224 tx_ring->last_tx_tso = true;
3222 tx_flags |= E1000_TX_FLAGS_TSO; 3225 tx_flags |= E1000_TX_FLAGS_TSO;
3223 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb))) 3226 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol)))
3224 tx_flags |= E1000_TX_FLAGS_CSUM; 3227 tx_flags |= E1000_TX_FLAGS_CSUM;
3225 3228
3226 if (likely(skb->protocol == htons(ETH_P_IP))) 3229 if (protocol == htons(ETH_P_IP))
3227 tx_flags |= E1000_TX_FLAGS_IPV4; 3230 tx_flags |= E1000_TX_FLAGS_IPV4;
3228 3231
3229 if (unlikely(skb->no_fcs)) 3232 if (unlikely(skb->no_fcs))
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 65c3aef2bd36..247335d2c7ec 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -5164,7 +5164,8 @@ link_up:
5164#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 5164#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
5165#define E1000_TX_FLAGS_VLAN_SHIFT 16 5165#define E1000_TX_FLAGS_VLAN_SHIFT 16
5166 5166
5167static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb) 5167static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb,
5168 __be16 protocol)
5168{ 5169{
5169 struct e1000_context_desc *context_desc; 5170 struct e1000_context_desc *context_desc;
5170 struct e1000_buffer *buffer_info; 5171 struct e1000_buffer *buffer_info;
@@ -5183,7 +5184,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
5183 5184
5184 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 5185 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
5185 mss = skb_shinfo(skb)->gso_size; 5186 mss = skb_shinfo(skb)->gso_size;
5186 if (skb->protocol == htons(ETH_P_IP)) { 5187 if (protocol == htons(ETH_P_IP)) {
5187 struct iphdr *iph = ip_hdr(skb); 5188 struct iphdr *iph = ip_hdr(skb);
5188 iph->tot_len = 0; 5189 iph->tot_len = 0;
5189 iph->check = 0; 5190 iph->check = 0;
@@ -5231,7 +5232,8 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
5231 return 1; 5232 return 1;
5232} 5233}
5233 5234
5234static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb) 5235static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb,
5236 __be16 protocol)
5235{ 5237{
5236 struct e1000_adapter *adapter = tx_ring->adapter; 5238 struct e1000_adapter *adapter = tx_ring->adapter;
5237 struct e1000_context_desc *context_desc; 5239 struct e1000_context_desc *context_desc;
@@ -5239,16 +5241,10 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
5239 unsigned int i; 5241 unsigned int i;
5240 u8 css; 5242 u8 css;
5241 u32 cmd_len = E1000_TXD_CMD_DEXT; 5243 u32 cmd_len = E1000_TXD_CMD_DEXT;
5242 __be16 protocol;
5243 5244
5244 if (skb->ip_summed != CHECKSUM_PARTIAL) 5245 if (skb->ip_summed != CHECKSUM_PARTIAL)
5245 return false; 5246 return false;
5246 5247
5247 if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
5248 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
5249 else
5250 protocol = skb->protocol;
5251
5252 switch (protocol) { 5248 switch (protocol) {
5253 case cpu_to_be16(ETH_P_IP): 5249 case cpu_to_be16(ETH_P_IP):
5254 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 5250 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
@@ -5546,6 +5542,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5546 int count = 0; 5542 int count = 0;
5547 int tso; 5543 int tso;
5548 unsigned int f; 5544 unsigned int f;
5545 __be16 protocol = vlan_get_protocol(skb);
5549 5546
5550 if (test_bit(__E1000_DOWN, &adapter->state)) { 5547 if (test_bit(__E1000_DOWN, &adapter->state)) {
5551 dev_kfree_skb_any(skb); 5548 dev_kfree_skb_any(skb);
@@ -5620,7 +5617,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5620 5617
5621 first = tx_ring->next_to_use; 5618 first = tx_ring->next_to_use;
5622 5619
5623 tso = e1000_tso(tx_ring, skb); 5620 tso = e1000_tso(tx_ring, skb, protocol);
5624 if (tso < 0) { 5621 if (tso < 0) {
5625 dev_kfree_skb_any(skb); 5622 dev_kfree_skb_any(skb);
5626 return NETDEV_TX_OK; 5623 return NETDEV_TX_OK;
@@ -5628,14 +5625,14 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5628 5625
5629 if (tso) 5626 if (tso)
5630 tx_flags |= E1000_TX_FLAGS_TSO; 5627 tx_flags |= E1000_TX_FLAGS_TSO;
5631 else if (e1000_tx_csum(tx_ring, skb)) 5628 else if (e1000_tx_csum(tx_ring, skb, protocol))
5632 tx_flags |= E1000_TX_FLAGS_CSUM; 5629 tx_flags |= E1000_TX_FLAGS_CSUM;
5633 5630
5634 /* Old method was to assume IPv4 packet by default if TSO was enabled. 5631 /* Old method was to assume IPv4 packet by default if TSO was enabled.
5635 * 82571 hardware supports TSO capabilities for IPv6 as well... 5632 * 82571 hardware supports TSO capabilities for IPv6 as well...
5636 * no longer assume, we must. 5633 * no longer assume, we must.
5637 */ 5634 */
5638 if (skb->protocol == htons(ETH_P_IP)) 5635 if (protocol == htons(ETH_P_IP))
5639 tx_flags |= E1000_TX_FLAGS_IPV4; 5636 tx_flags |= E1000_TX_FLAGS_IPV4;
5640 5637
5641 if (unlikely(skb->no_fcs)) 5638 if (unlikely(skb->no_fcs))
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index bb7fe98b3a6c..537b6216971d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -247,7 +247,7 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
247 u32 prttsyn_stat; 247 u32 prttsyn_stat;
248 int n; 248 int n;
249 249
250 if (pf->flags & I40E_FLAG_PTP) 250 if (!(pf->flags & I40E_FLAG_PTP))
251 return; 251 return;
252 252
253 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1); 253 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_1);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index a51aa37b7b5a..369848e107f8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2295,7 +2295,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
2295 goto out_drop; 2295 goto out_drop;
2296 2296
2297 /* obtain protocol of skb */ 2297 /* obtain protocol of skb */
2298 protocol = skb->protocol; 2298 protocol = vlan_get_protocol(skb);
2299 2299
2300 /* record the location of the first descriptor for this packet */ 2300 /* record the location of the first descriptor for this packet */
2301 first = &tx_ring->tx_bi[tx_ring->next_to_use]; 2301 first = &tx_ring->tx_bi[tx_ring->next_to_use];
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 89672551dce9..3ac6a0d2f143 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1003,11 +1003,19 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1003static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, 1003static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
1004 u32 v_retval, u8 *msg, u16 msglen) 1004 u32 v_retval, u8 *msg, u16 msglen)
1005{ 1005{
1006 struct i40e_pf *pf = vf->pf; 1006 struct i40e_pf *pf;
1007 struct i40e_hw *hw = &pf->hw; 1007 struct i40e_hw *hw;
1008 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id; 1008 int abs_vf_id;
1009 i40e_status aq_ret; 1009 i40e_status aq_ret;
1010 1010
1011 /* validate the request */
1012 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
1013 return -EINVAL;
1014
1015 pf = vf->pf;
1016 hw = &pf->hw;
1017 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1018
1011 /* single place to detect unsuccessful return values */ 1019 /* single place to detect unsuccessful return values */
1012 if (v_retval) { 1020 if (v_retval) {
1013 vf->num_invalid_msgs++; 1021 vf->num_invalid_msgs++;
@@ -1928,17 +1936,20 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
1928{ 1936{
1929 struct i40e_hw *hw = &pf->hw; 1937 struct i40e_hw *hw = &pf->hw;
1930 struct i40e_vf *vf = pf->vf; 1938 struct i40e_vf *vf = pf->vf;
1931 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1932 int i; 1939 int i;
1933 1940
1934 for (i = 0; i < pf->num_alloc_vfs; i++) { 1941 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
1942 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1943 /* Not all vfs are enabled so skip the ones that are not */
1944 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
1945 !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
1946 continue;
1947
1935 /* Ignore return value on purpose - a given VF may fail, but 1948 /* Ignore return value on purpose - a given VF may fail, but
1936 * we need to keep going and send to all of them 1949 * we need to keep going and send to all of them
1937 */ 1950 */
1938 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval, 1951 i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
1939 msg, msglen, NULL); 1952 msg, msglen, NULL);
1940 vf++;
1941 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1942 } 1953 }
1943} 1954}
1944 1955
@@ -1954,12 +1965,12 @@ void i40e_vc_notify_link_state(struct i40e_pf *pf)
1954 struct i40e_hw *hw = &pf->hw; 1965 struct i40e_hw *hw = &pf->hw;
1955 struct i40e_vf *vf = pf->vf; 1966 struct i40e_vf *vf = pf->vf;
1956 struct i40e_link_status *ls = &pf->hw.phy.link_info; 1967 struct i40e_link_status *ls = &pf->hw.phy.link_info;
1957 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1958 int i; 1968 int i;
1959 1969
1960 pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE; 1970 pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
1961 pfe.severity = I40E_PF_EVENT_SEVERITY_INFO; 1971 pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
1962 for (i = 0; i < pf->num_alloc_vfs; i++) { 1972 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
1973 int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1963 if (vf->link_forced) { 1974 if (vf->link_forced) {
1964 pfe.event_data.link_event.link_status = vf->link_up; 1975 pfe.event_data.link_event.link_status = vf->link_up;
1965 pfe.event_data.link_event.link_speed = 1976 pfe.event_data.link_event.link_speed =
@@ -1972,8 +1983,6 @@ void i40e_vc_notify_link_state(struct i40e_pf *pf)
1972 i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, 1983 i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
1973 0, (u8 *)&pfe, sizeof(pfe), 1984 0, (u8 *)&pfe, sizeof(pfe),
1974 NULL); 1985 NULL);
1975 vf++;
1976 abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
1977 } 1986 }
1978} 1987}
1979 1988
@@ -2002,7 +2011,18 @@ void i40e_vc_notify_reset(struct i40e_pf *pf)
2002void i40e_vc_notify_vf_reset(struct i40e_vf *vf) 2011void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
2003{ 2012{
2004 struct i40e_virtchnl_pf_event pfe; 2013 struct i40e_virtchnl_pf_event pfe;
2005 int abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id; 2014 int abs_vf_id;
2015
2016 /* validate the request */
2017 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
2018 return;
2019
2020 /* verify if the VF is in either init or active before proceeding */
2021 if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
2022 !test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
2023 return;
2024
2025 abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id;
2006 2026
2007 pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING; 2027 pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
2008 pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM; 2028 pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 79bf96ca6489..95a3ec236b49 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -1597,7 +1597,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
1597 goto out_drop; 1597 goto out_drop;
1598 1598
1599 /* obtain protocol of skb */ 1599 /* obtain protocol of skb */
1600 protocol = skb->protocol; 1600 protocol = vlan_get_protocol(skb);
1601 1601
1602 /* record the location of the first descriptor for this packet */ 1602 /* record the location of the first descriptor for this packet */
1603 first = &tx_ring->tx_bi[tx_ring->next_to_use]; 1603 first = &tx_ring->tx_bi[tx_ring->next_to_use];
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index c9f1d1b7ef37..ade067de1689 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -20,6 +20,7 @@
20#include <linux/mbus.h> 20#include <linux/mbus.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/if_vlan.h>
23#include <net/ip.h> 24#include <net/ip.h>
24#include <net/ipv6.h> 25#include <net/ipv6.h>
25#include <linux/io.h> 26#include <linux/io.h>
@@ -1371,15 +1372,16 @@ static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
1371{ 1372{
1372 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1373 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1373 int ip_hdr_len = 0; 1374 int ip_hdr_len = 0;
1375 __be16 l3_proto = vlan_get_protocol(skb);
1374 u8 l4_proto; 1376 u8 l4_proto;
1375 1377
1376 if (skb->protocol == htons(ETH_P_IP)) { 1378 if (l3_proto == htons(ETH_P_IP)) {
1377 struct iphdr *ip4h = ip_hdr(skb); 1379 struct iphdr *ip4h = ip_hdr(skb);
1378 1380
1379 /* Calculate IPv4 checksum and L4 checksum */ 1381 /* Calculate IPv4 checksum and L4 checksum */
1380 ip_hdr_len = ip4h->ihl; 1382 ip_hdr_len = ip4h->ihl;
1381 l4_proto = ip4h->protocol; 1383 l4_proto = ip4h->protocol;
1382 } else if (skb->protocol == htons(ETH_P_IPV6)) { 1384 } else if (l3_proto == htons(ETH_P_IPV6)) {
1383 struct ipv6hdr *ip6h = ipv6_hdr(skb); 1385 struct ipv6hdr *ip6h = ipv6_hdr(skb);
1384 1386
1385 /* Read l4_protocol from one of IPv6 extra headers */ 1387 /* Read l4_protocol from one of IPv6 extra headers */
@@ -1390,7 +1392,7 @@ static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
1390 return MVNETA_TX_L4_CSUM_NOT; 1392 return MVNETA_TX_L4_CSUM_NOT;
1391 1393
1392 return mvneta_txq_desc_csum(skb_network_offset(skb), 1394 return mvneta_txq_desc_csum(skb_network_offset(skb),
1393 skb->protocol, ip_hdr_len, l4_proto); 1395 l3_proto, ip_hdr_len, l4_proto);
1394 } 1396 }
1395 1397
1396 return MVNETA_TX_L4_CSUM_NOT; 1398 return MVNETA_TX_L4_CSUM_NOT;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 65a4a0f88ea0..02a2e90d581a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2389,6 +2389,22 @@ struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
2389} 2389}
2390EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv); 2390EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv);
2391 2391
2392static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port)
2393{
2394 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2395 int min_port = find_first_bit(actv_ports.ports, dev->caps.num_ports)
2396 + 1;
2397 int max_port = min_port +
2398 bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2399
2400 if (port < min_port)
2401 port = min_port;
2402 else if (port >= max_port)
2403 port = max_port - 1;
2404
2405 return port;
2406}
2407
2392int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac) 2408int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
2393{ 2409{
2394 struct mlx4_priv *priv = mlx4_priv(dev); 2410 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -2402,6 +2418,7 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
2402 if (slave < 0) 2418 if (slave < 0)
2403 return -EINVAL; 2419 return -EINVAL;
2404 2420
2421 port = mlx4_slaves_closest_port(dev, slave, port);
2405 s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; 2422 s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2406 s_info->mac = mac; 2423 s_info->mac = mac;
2407 mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n", 2424 mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n",
@@ -2428,6 +2445,7 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
2428 if (slave < 0) 2445 if (slave < 0)
2429 return -EINVAL; 2446 return -EINVAL;
2430 2447
2448 port = mlx4_slaves_closest_port(dev, slave, port);
2431 vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port]; 2449 vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
2432 2450
2433 if ((0 == vlan) && (0 == qos)) 2451 if ((0 == vlan) && (0 == qos))
@@ -2455,6 +2473,7 @@ bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave,
2455 struct mlx4_priv *priv; 2473 struct mlx4_priv *priv;
2456 2474
2457 priv = mlx4_priv(dev); 2475 priv = mlx4_priv(dev);
2476 port = mlx4_slaves_closest_port(dev, slave, port);
2458 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; 2477 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
2459 2478
2460 if (MLX4_VGT != vp_oper->state.default_vlan) { 2479 if (MLX4_VGT != vp_oper->state.default_vlan) {
@@ -2482,6 +2501,7 @@ int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
2482 if (slave < 0) 2501 if (slave < 0)
2483 return -EINVAL; 2502 return -EINVAL;
2484 2503
2504 port = mlx4_slaves_closest_port(dev, slave, port);
2485 s_info = &priv->mfunc.master.vf_admin[slave].vport[port]; 2505 s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2486 s_info->spoofchk = setting; 2506 s_info->spoofchk = setting;
2487 2507
@@ -2535,6 +2555,7 @@ int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_stat
2535 if (slave < 0) 2555 if (slave < 0)
2536 return -EINVAL; 2556 return -EINVAL;
2537 2557
2558 port = mlx4_slaves_closest_port(dev, slave, port);
2538 switch (link_state) { 2559 switch (link_state) {
2539 case IFLA_VF_LINK_STATE_AUTO: 2560 case IFLA_VF_LINK_STATE_AUTO:
2540 /* get current link state */ 2561 /* get current link state */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index e22f24f784fc..35ff2925110a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -487,6 +487,9 @@ static int mlx4_en_set_pauseparam(struct net_device *dev,
487 struct mlx4_en_dev *mdev = priv->mdev; 487 struct mlx4_en_dev *mdev = priv->mdev;
488 int err; 488 int err;
489 489
490 if (pause->autoneg)
491 return -EINVAL;
492
490 priv->prof->tx_pause = pause->tx_pause != 0; 493 priv->prof->tx_pause = pause->tx_pause != 0;
491 priv->prof->rx_pause = pause->rx_pause != 0; 494 priv->prof->rx_pause = pause->rx_pause != 0;
492 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 495 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index bb536aa613f4..abddcf8c40aa 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -474,39 +474,12 @@ static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *ad
474 int qpn, u64 *reg_id) 474 int qpn, u64 *reg_id)
475{ 475{
476 int err; 476 int err;
477 struct mlx4_spec_list spec_eth_outer = { {NULL} };
478 struct mlx4_spec_list spec_vxlan = { {NULL} };
479 struct mlx4_spec_list spec_eth_inner = { {NULL} };
480
481 struct mlx4_net_trans_rule rule = {
482 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
483 .exclusive = 0,
484 .allow_loopback = 1,
485 .promisc_mode = MLX4_FS_REGULAR,
486 .priority = MLX4_DOMAIN_NIC,
487 };
488
489 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
490 477
491 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) 478 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
492 return 0; /* do nothing */ 479 return 0; /* do nothing */
493 480
494 rule.port = priv->port; 481 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
495 rule.qpn = qpn; 482 MLX4_DOMAIN_NIC, reg_id);
496 INIT_LIST_HEAD(&rule.list);
497
498 spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH;
499 memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN);
500 memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
501
502 spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */
503 spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */
504
505 list_add_tail(&spec_eth_outer.list, &rule.list);
506 list_add_tail(&spec_vxlan.list, &rule.list);
507 list_add_tail(&spec_eth_inner.list, &rule.list);
508
509 err = mlx4_flow_attach(priv->mdev->dev, &rule, reg_id);
510 if (err) { 483 if (err) {
511 en_err(priv, "failed to add vxlan steering rule, err %d\n", err); 484 en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
512 return err; 485 return err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index d80e7a6fac74..ca0f98c95105 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1020,6 +1020,44 @@ int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id)
1020} 1020}
1021EXPORT_SYMBOL_GPL(mlx4_flow_detach); 1021EXPORT_SYMBOL_GPL(mlx4_flow_detach);
1022 1022
1023int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr,
1024 int port, int qpn, u16 prio, u64 *reg_id)
1025{
1026 int err;
1027 struct mlx4_spec_list spec_eth_outer = { {NULL} };
1028 struct mlx4_spec_list spec_vxlan = { {NULL} };
1029 struct mlx4_spec_list spec_eth_inner = { {NULL} };
1030
1031 struct mlx4_net_trans_rule rule = {
1032 .queue_mode = MLX4_NET_TRANS_Q_FIFO,
1033 .exclusive = 0,
1034 .allow_loopback = 1,
1035 .promisc_mode = MLX4_FS_REGULAR,
1036 };
1037
1038 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
1039
1040 rule.port = port;
1041 rule.qpn = qpn;
1042 rule.priority = prio;
1043 INIT_LIST_HEAD(&rule.list);
1044
1045 spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH;
1046 memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN);
1047 memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
1048
1049 spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */
1050 spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */
1051
1052 list_add_tail(&spec_eth_outer.list, &rule.list);
1053 list_add_tail(&spec_vxlan.list, &rule.list);
1054 list_add_tail(&spec_eth_inner.list, &rule.list);
1055
1056 err = mlx4_flow_attach(dev, &rule, reg_id);
1057 return err;
1058}
1059EXPORT_SYMBOL(mlx4_tunnel_steer_add);
1060
1023int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn, 1061int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn,
1024 u32 max_range_qpn) 1062 u32 max_range_qpn)
1025{ 1063{
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 7d717eccb7b0..193a6adb5d04 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -298,6 +298,7 @@ static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
298 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); 298 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
299} 299}
300 300
301/* Must protect against concurrent access */
301int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr, 302int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
302 struct mlx4_mpt_entry ***mpt_entry) 303 struct mlx4_mpt_entry ***mpt_entry)
303{ 304{
@@ -305,13 +306,10 @@ int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
305 int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1); 306 int key = key_to_hw_index(mmr->key) & (dev->caps.num_mpts - 1);
306 struct mlx4_cmd_mailbox *mailbox = NULL; 307 struct mlx4_cmd_mailbox *mailbox = NULL;
307 308
308 /* Make sure that at this point we have single-threaded access only */
309
310 if (mmr->enabled != MLX4_MPT_EN_HW) 309 if (mmr->enabled != MLX4_MPT_EN_HW)
311 return -EINVAL; 310 return -EINVAL;
312 311
313 err = mlx4_HW2SW_MPT(dev, NULL, key); 312 err = mlx4_HW2SW_MPT(dev, NULL, key);
314
315 if (err) { 313 if (err) {
316 mlx4_warn(dev, "HW2SW_MPT failed (%d).", err); 314 mlx4_warn(dev, "HW2SW_MPT failed (%d).", err);
317 mlx4_warn(dev, "Most likely the MR has MWs bound to it.\n"); 315 mlx4_warn(dev, "Most likely the MR has MWs bound to it.\n");
@@ -333,7 +331,6 @@ int mlx4_mr_hw_get_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
333 0, MLX4_CMD_QUERY_MPT, 331 0, MLX4_CMD_QUERY_MPT,
334 MLX4_CMD_TIME_CLASS_B, 332 MLX4_CMD_TIME_CLASS_B,
335 MLX4_CMD_WRAPPED); 333 MLX4_CMD_WRAPPED);
336
337 if (err) 334 if (err)
338 goto free_mailbox; 335 goto free_mailbox;
339 336
@@ -378,9 +375,10 @@ int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
378 err = mlx4_SW2HW_MPT(dev, mailbox, key); 375 err = mlx4_SW2HW_MPT(dev, mailbox, key);
379 } 376 }
380 377
381 mmr->pd = be32_to_cpu((*mpt_entry)->pd_flags) & MLX4_MPT_PD_MASK; 378 if (!err) {
382 if (!err) 379 mmr->pd = be32_to_cpu((*mpt_entry)->pd_flags) & MLX4_MPT_PD_MASK;
383 mmr->enabled = MLX4_MPT_EN_HW; 380 mmr->enabled = MLX4_MPT_EN_HW;
381 }
384 return err; 382 return err;
385} 383}
386EXPORT_SYMBOL_GPL(mlx4_mr_hw_write_mpt); 384EXPORT_SYMBOL_GPL(mlx4_mr_hw_write_mpt);
@@ -400,11 +398,12 @@ EXPORT_SYMBOL_GPL(mlx4_mr_hw_put_mpt);
400int mlx4_mr_hw_change_pd(struct mlx4_dev *dev, struct mlx4_mpt_entry *mpt_entry, 398int mlx4_mr_hw_change_pd(struct mlx4_dev *dev, struct mlx4_mpt_entry *mpt_entry,
401 u32 pdn) 399 u32 pdn)
402{ 400{
403 u32 pd_flags = be32_to_cpu(mpt_entry->pd_flags); 401 u32 pd_flags = be32_to_cpu(mpt_entry->pd_flags) & ~MLX4_MPT_PD_MASK;
404 /* The wrapper function will put the slave's id here */ 402 /* The wrapper function will put the slave's id here */
405 if (mlx4_is_mfunc(dev)) 403 if (mlx4_is_mfunc(dev))
406 pd_flags &= ~MLX4_MPT_PD_VF_MASK; 404 pd_flags &= ~MLX4_MPT_PD_VF_MASK;
407 mpt_entry->pd_flags = cpu_to_be32((pd_flags & ~MLX4_MPT_PD_MASK) | 405
406 mpt_entry->pd_flags = cpu_to_be32(pd_flags |
408 (pdn & MLX4_MPT_PD_MASK) 407 (pdn & MLX4_MPT_PD_MASK)
409 | MLX4_MPT_PD_FLAG_EN_INV); 408 | MLX4_MPT_PD_FLAG_EN_INV);
410 return 0; 409 return 0;
@@ -600,14 +599,18 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
600{ 599{
601 int err; 600 int err;
602 601
603 mpt_entry->start = cpu_to_be64(mr->iova); 602 mpt_entry->start = cpu_to_be64(iova);
604 mpt_entry->length = cpu_to_be64(mr->size); 603 mpt_entry->length = cpu_to_be64(size);
605 mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift); 604 mpt_entry->entity_size = cpu_to_be32(page_shift);
606 605
607 err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); 606 err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
608 if (err) 607 if (err)
609 return err; 608 return err;
610 609
610 mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK |
611 MLX4_MPT_PD_FLAG_EN_INV);
612 mpt_entry->flags &= cpu_to_be32(MLX4_MPT_FLAG_FREE |
613 MLX4_MPT_FLAG_SW_OWNS);
611 if (mr->mtt.order < 0) { 614 if (mr->mtt.order < 0) {
612 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); 615 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
613 mpt_entry->mtt_addr = 0; 616 mpt_entry->mtt_addr = 0;
@@ -617,6 +620,14 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
617 if (mr->mtt.page_shift == 0) 620 if (mr->mtt.page_shift == 0)
618 mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order); 621 mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order);
619 } 622 }
623 if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
624 /* fast register MR in free state */
625 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
626 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
627 MLX4_MPT_PD_FLAG_RAE);
628 } else {
629 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
630 }
620 mr->enabled = MLX4_MPT_EN_SW; 631 mr->enabled = MLX4_MPT_EN_SW;
621 632
622 return 0; 633 return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 9ba0c1ca10d5..94eeb2c7d7e4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -103,7 +103,8 @@ static int find_index(struct mlx4_dev *dev,
103 int i; 103 int i;
104 104
105 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { 105 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
106 if ((mac & MLX4_MAC_MASK) == 106 if (table->refs[i] &&
107 (MLX4_MAC_MASK & mac) ==
107 (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) 108 (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
108 return i; 109 return i;
109 } 110 }
@@ -165,12 +166,14 @@ int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
165 166
166 mutex_lock(&table->mutex); 167 mutex_lock(&table->mutex);
167 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { 168 for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
168 if (free < 0 && !table->entries[i]) { 169 if (!table->refs[i]) {
169 free = i; 170 if (free < 0)
171 free = i;
170 continue; 172 continue;
171 } 173 }
172 174
173 if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { 175 if ((MLX4_MAC_MASK & mac) ==
176 (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
174 /* MAC already registered, increment ref count */ 177 /* MAC already registered, increment ref count */
175 err = i; 178 err = i;
176 ++table->refs[i]; 179 ++table->refs[i];
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 0dc31d85fc3b..2301365c79c7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -390,13 +390,14 @@ err_icm:
390EXPORT_SYMBOL_GPL(mlx4_qp_alloc); 390EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
391 391
392#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC 392#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
393int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp, 393int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
394 enum mlx4_update_qp_attr attr, 394 enum mlx4_update_qp_attr attr,
395 struct mlx4_update_qp_params *params) 395 struct mlx4_update_qp_params *params)
396{ 396{
397 struct mlx4_cmd_mailbox *mailbox; 397 struct mlx4_cmd_mailbox *mailbox;
398 struct mlx4_update_qp_context *cmd; 398 struct mlx4_update_qp_context *cmd;
399 u64 pri_addr_path_mask = 0; 399 u64 pri_addr_path_mask = 0;
400 u64 qp_mask = 0;
400 int err = 0; 401 int err = 0;
401 402
402 mailbox = mlx4_alloc_cmd_mailbox(dev); 403 mailbox = mlx4_alloc_cmd_mailbox(dev);
@@ -413,9 +414,16 @@ int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp,
413 cmd->qp_context.pri_path.grh_mylmc = params->smac_index; 414 cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
414 } 415 }
415 416
417 if (attr & MLX4_UPDATE_QP_VSD) {
418 qp_mask |= 1ULL << MLX4_UPD_QP_MASK_VSD;
419 if (params->flags & MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE)
420 cmd->qp_context.param3 |= cpu_to_be32(MLX4_STRIP_VLAN);
421 }
422
416 cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask); 423 cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
424 cmd->qp_mask = cpu_to_be64(qp_mask);
417 425
418 err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0, 426 err = mlx4_cmd(dev, mailbox->dma, qpn & 0xffffff, 0,
419 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A, 427 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
420 MLX4_CMD_NATIVE); 428 MLX4_CMD_NATIVE);
421 429
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 1089367fed22..5d2498dcf536 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -702,11 +702,13 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
702 struct mlx4_qp_context *qpc = inbox->buf + 8; 702 struct mlx4_qp_context *qpc = inbox->buf + 8;
703 struct mlx4_vport_oper_state *vp_oper; 703 struct mlx4_vport_oper_state *vp_oper;
704 struct mlx4_priv *priv; 704 struct mlx4_priv *priv;
705 u32 qp_type;
705 int port; 706 int port;
706 707
707 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1; 708 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
708 priv = mlx4_priv(dev); 709 priv = mlx4_priv(dev);
709 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; 710 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
711 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
710 712
711 if (MLX4_VGT != vp_oper->state.default_vlan) { 713 if (MLX4_VGT != vp_oper->state.default_vlan) {
712 /* the reserved QPs (special, proxy, tunnel) 714 /* the reserved QPs (special, proxy, tunnel)
@@ -715,8 +717,20 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
715 if (mlx4_is_qp_reserved(dev, qpn)) 717 if (mlx4_is_qp_reserved(dev, qpn))
716 return 0; 718 return 0;
717 719
718 /* force strip vlan by clear vsd */ 720 /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
719 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN); 721 if (qp_type == MLX4_QP_ST_UD ||
722 (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
723 if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
724 *(__be32 *)inbox->buf =
725 cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
726 MLX4_QP_OPTPAR_VLAN_STRIPPING);
727 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
728 } else {
729 struct mlx4_update_qp_params params = {.flags = 0};
730
731 mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
732 }
733 }
720 734
721 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE && 735 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
722 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) { 736 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
@@ -3998,13 +4012,17 @@ int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
3998 } 4012 }
3999 4013
4000 port = (rqp->sched_queue >> 6 & 1) + 1; 4014 port = (rqp->sched_queue >> 6 & 1) + 1;
4001 smac_index = cmd->qp_context.pri_path.grh_mylmc; 4015
4002 err = mac_find_smac_ix_in_slave(dev, slave, port, 4016 if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
4003 smac_index, &mac); 4017 smac_index = cmd->qp_context.pri_path.grh_mylmc;
4004 if (err) { 4018 err = mac_find_smac_ix_in_slave(dev, slave, port,
4005 mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n", 4019 smac_index, &mac);
4006 qpn, smac_index); 4020
4007 goto err_mac; 4021 if (err) {
4022 mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4023 qpn, smac_index);
4024 goto err_mac;
4025 }
4008 } 4026 }
4009 4027
4010 err = mlx4_cmd(dev, inbox->dma, 4028 err = mlx4_cmd(dev, inbox->dma,
@@ -4818,7 +4836,7 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4818 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; 4836 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4819 4837
4820 upd_context = mailbox->buf; 4838 upd_context = mailbox->buf;
4821 upd_context->qp_mask = cpu_to_be64(MLX4_UPD_QP_MASK_VSD); 4839 upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
4822 4840
4823 spin_lock_irq(mlx4_tlock(dev)); 4841 spin_lock_irq(mlx4_tlock(dev));
4824 list_for_each_entry_safe(qp, tmp, qp_list, com.list) { 4842 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 5020fd47825d..2f12c88c66ab 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -206,7 +206,7 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
206 int rx_head = priv->rx_head; 206 int rx_head = priv->rx_head;
207 int rx = 0; 207 int rx = 0;
208 208
209 while (1) { 209 while (rx < budget) {
210 desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head); 210 desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head);
211 desc0 = readl(desc + RX_REG_OFFSET_DESC0); 211 desc0 = readl(desc + RX_REG_OFFSET_DESC0);
212 212
@@ -218,7 +218,7 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
218 net_dbg_ratelimited("packet error\n"); 218 net_dbg_ratelimited("packet error\n");
219 priv->stats.rx_dropped++; 219 priv->stats.rx_dropped++;
220 priv->stats.rx_errors++; 220 priv->stats.rx_errors++;
221 continue; 221 goto rx_next;
222 } 222 }
223 223
224 len = desc0 & RX_DESC0_FRAME_LEN_MASK; 224 len = desc0 & RX_DESC0_FRAME_LEN_MASK;
@@ -226,13 +226,19 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
226 if (len > RX_BUF_SIZE) 226 if (len > RX_BUF_SIZE)
227 len = RX_BUF_SIZE; 227 len = RX_BUF_SIZE;
228 228
229 skb = build_skb(priv->rx_buf[rx_head], priv->rx_buf_size); 229 dma_sync_single_for_cpu(&ndev->dev,
230 priv->rx_mapping[rx_head],
231 priv->rx_buf_size, DMA_FROM_DEVICE);
232 skb = netdev_alloc_skb_ip_align(ndev, len);
233
230 if (unlikely(!skb)) { 234 if (unlikely(!skb)) {
231 net_dbg_ratelimited("build_skb failed\n"); 235 net_dbg_ratelimited("netdev_alloc_skb_ip_align failed\n");
232 priv->stats.rx_dropped++; 236 priv->stats.rx_dropped++;
233 priv->stats.rx_errors++; 237 priv->stats.rx_errors++;
238 goto rx_next;
234 } 239 }
235 240
241 memcpy(skb->data, priv->rx_buf[rx_head], len);
236 skb_put(skb, len); 242 skb_put(skb, len);
237 skb->protocol = eth_type_trans(skb, ndev); 243 skb->protocol = eth_type_trans(skb, ndev);
238 napi_gro_receive(&priv->napi, skb); 244 napi_gro_receive(&priv->napi, skb);
@@ -244,18 +250,15 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
244 if (desc0 & RX_DESC0_MULTICAST) 250 if (desc0 & RX_DESC0_MULTICAST)
245 priv->stats.multicast++; 251 priv->stats.multicast++;
246 252
253rx_next:
247 writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0); 254 writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
248 255
249 rx_head = RX_NEXT(rx_head); 256 rx_head = RX_NEXT(rx_head);
250 priv->rx_head = rx_head; 257 priv->rx_head = rx_head;
251
252 if (rx >= budget)
253 break;
254 } 258 }
255 259
256 if (rx < budget) { 260 if (rx < budget) {
257 napi_gro_flush(napi, false); 261 napi_complete(napi);
258 __napi_complete(napi);
259 } 262 }
260 263
261 priv->reg_imr |= RPKT_FINISH_M; 264 priv->reg_imr |= RPKT_FINISH_M;
@@ -346,10 +349,12 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
346 len = ETH_ZLEN; 349 len = ETH_ZLEN;
347 } 350 }
348 351
349 txdes1 = readl(desc + TX_REG_OFFSET_DESC1); 352 dma_sync_single_for_device(&ndev->dev, priv->tx_mapping[tx_head],
350 txdes1 |= TX_DESC1_LTS | TX_DESC1_FTS; 353 priv->tx_buf_size, DMA_TO_DEVICE);
351 txdes1 &= ~(TX_DESC1_FIFO_COMPLETE | TX_DESC1_INTR_COMPLETE); 354
352 txdes1 |= (len & TX_DESC1_BUF_SIZE_MASK); 355 txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK);
356 if (tx_head == TX_DESC_NUM_MASK)
357 txdes1 |= TX_DESC1_END;
353 writel(txdes1, desc + TX_REG_OFFSET_DESC1); 358 writel(txdes1, desc + TX_REG_OFFSET_DESC1);
354 writel(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0); 359 writel(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0);
355 360
@@ -465,8 +470,7 @@ static int moxart_mac_probe(struct platform_device *pdev)
465 spin_lock_init(&priv->txlock); 470 spin_lock_init(&priv->txlock);
466 471
467 priv->tx_buf_size = TX_BUF_SIZE; 472 priv->tx_buf_size = TX_BUF_SIZE;
468 priv->rx_buf_size = RX_BUF_SIZE + 473 priv->rx_buf_size = RX_BUF_SIZE;
469 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
470 474
471 priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE * 475 priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE *
472 TX_DESC_NUM, &priv->tx_base, 476 TX_DESC_NUM, &priv->tx_base,
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 8706c0dbd0c3..a44a03c45014 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1220,6 +1220,9 @@ static int lpc_eth_open(struct net_device *ndev)
1220 1220
1221 __lpc_eth_clock_enable(pldat, true); 1221 __lpc_eth_clock_enable(pldat, true);
1222 1222
1223 /* Suspended PHY makes LPC ethernet core block, so resume now */
1224 phy_resume(pldat->phy_dev);
1225
1223 /* Reset and initialize */ 1226 /* Reset and initialize */
1224 __lpc_eth_reset(pldat); 1227 __lpc_eth_reset(pldat);
1225 __lpc_eth_init(pldat); 1228 __lpc_eth_init(pldat);
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index 979c6980639f..a42293092ea4 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -290,9 +290,11 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
290 /* Read the hardware TX timestamp if one was recorded */ 290 /* Read the hardware TX timestamp if one was recorded */
291 if (unlikely(re.s.tstamp)) { 291 if (unlikely(re.s.tstamp)) {
292 struct skb_shared_hwtstamps ts; 292 struct skb_shared_hwtstamps ts;
293 u64 ns;
294
293 memset(&ts, 0, sizeof(ts)); 295 memset(&ts, 0, sizeof(ts));
294 /* Read the timestamp */ 296 /* Read the timestamp */
295 u64 ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port)); 297 ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port));
296 /* Remove the timestamp from the FIFO */ 298 /* Remove the timestamp from the FIFO */
297 cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0); 299 cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0);
298 /* Tell the kernel about the timestamp */ 300 /* Tell the kernel about the timestamp */
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
index 44c8be1c6805..5f7a35212796 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
@@ -7,6 +7,7 @@ config PCH_GBE
7 depends on PCI && (X86_32 || COMPILE_TEST) 7 depends on PCI && (X86_32 || COMPILE_TEST)
8 select MII 8 select MII
9 select PTP_1588_CLOCK_PCH 9 select PTP_1588_CLOCK_PCH
10 select NET_PTP_CLASSIFY
10 ---help--- 11 ---help---
11 This is a gigabit ethernet driver for EG20T PCH. 12 This is a gigabit ethernet driver for EG20T PCH.
12 EG20T PCH is the platform controller hub that is used in Intel's 13 EG20T PCH is the platform controller hub that is used in Intel's
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 16039d1497b8..b84f5ea3d659 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -268,7 +268,7 @@ struct qlcnic_fdt {
268 u16 cksum; 268 u16 cksum;
269 u16 unused; 269 u16 unused;
270 u8 model[16]; 270 u8 model[16];
271 u16 mfg_id; 271 u8 mfg_id;
272 u16 id; 272 u16 id;
273 u8 flag; 273 u8 flag;
274 u8 erase_cmd; 274 u8 erase_cmd;
@@ -2362,6 +2362,19 @@ static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter)
2362 return QLC_DEFAULT_VNIC_COUNT; 2362 return QLC_DEFAULT_VNIC_COUNT;
2363} 2363}
2364 2364
2365static inline void qlcnic_swap32_buffer(u32 *buffer, int count)
2366{
2367#if defined(__BIG_ENDIAN)
2368 u32 *tmp = buffer;
2369 int i;
2370
2371 for (i = 0; i < count; i++) {
2372 *tmp = swab32(*tmp);
2373 tmp++;
2374 }
2375#endif
2376}
2377
2365#ifdef CONFIG_QLCNIC_HWMON 2378#ifdef CONFIG_QLCNIC_HWMON
2366void qlcnic_register_hwmon_dev(struct qlcnic_adapter *); 2379void qlcnic_register_hwmon_dev(struct qlcnic_adapter *);
2367void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *); 2380void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index a4a4ec0b68f8..476e4998ef99 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -2603,7 +2603,7 @@ int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *adapter,
2603 } 2603 }
2604 2604
2605 qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_DIRECT_WINDOW, 2605 qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_DIRECT_WINDOW,
2606 (addr)); 2606 (addr & 0xFFFF0000));
2607 2607
2608 range = flash_offset + (count * sizeof(u32)); 2608 range = flash_offset + (count * sizeof(u32));
2609 /* Check if data is spread across multiple sectors */ 2609 /* Check if data is spread across multiple sectors */
@@ -2753,7 +2753,7 @@ int qlcnic_83xx_read_flash_descriptor_table(struct qlcnic_adapter *adapter)
2753 ret = qlcnic_83xx_lockless_flash_read32(adapter, QLCNIC_FDT_LOCATION, 2753 ret = qlcnic_83xx_lockless_flash_read32(adapter, QLCNIC_FDT_LOCATION,
2754 (u8 *)&adapter->ahw->fdt, 2754 (u8 *)&adapter->ahw->fdt,
2755 count); 2755 count);
2756 2756 qlcnic_swap32_buffer((u32 *)&adapter->ahw->fdt, count);
2757 qlcnic_83xx_unlock_flash(adapter); 2757 qlcnic_83xx_unlock_flash(adapter);
2758 return ret; 2758 return ret;
2759} 2759}
@@ -2788,7 +2788,7 @@ int qlcnic_83xx_erase_flash_sector(struct qlcnic_adapter *adapter,
2788 2788
2789 addr1 = (sector_start_addr & 0xFF) << 16; 2789 addr1 = (sector_start_addr & 0xFF) << 16;
2790 addr2 = (sector_start_addr & 0xFF0000) >> 16; 2790 addr2 = (sector_start_addr & 0xFF0000) >> 16;
2791 reversed_addr = addr1 | addr2; 2791 reversed_addr = addr1 | addr2 | (sector_start_addr & 0xFF00);
2792 2792
2793 qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, 2793 qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA,
2794 reversed_addr); 2794 reversed_addr);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index f33559b72528..86783e1afcf7 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1378,31 +1378,45 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
1378{ 1378{
1379 struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info; 1379 struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info;
1380 const struct firmware *fw = fw_info->fw; 1380 const struct firmware *fw = fw_info->fw;
1381 u32 dest, *p_cache; 1381 u32 dest, *p_cache, *temp;
1382 int i, ret = -EIO; 1382 int i, ret = -EIO;
1383 __le32 *temp_le;
1383 u8 data[16]; 1384 u8 data[16];
1384 size_t size; 1385 size_t size;
1385 u64 addr; 1386 u64 addr;
1386 1387
1388 temp = kzalloc(fw->size, GFP_KERNEL);
1389 if (!temp) {
1390 release_firmware(fw);
1391 fw_info->fw = NULL;
1392 return -ENOMEM;
1393 }
1394
1395 temp_le = (__le32 *)fw->data;
1396
1397 /* FW image in file is in little endian, swap the data to nullify
1398 * the effect of writel() operation on big endian platform.
1399 */
1400 for (i = 0; i < fw->size / sizeof(u32); i++)
1401 temp[i] = __le32_to_cpu(temp_le[i]);
1402
1387 dest = QLCRDX(adapter->ahw, QLCNIC_FW_IMAGE_ADDR); 1403 dest = QLCRDX(adapter->ahw, QLCNIC_FW_IMAGE_ADDR);
1388 size = (fw->size & ~0xF); 1404 size = (fw->size & ~0xF);
1389 p_cache = (u32 *)fw->data; 1405 p_cache = temp;
1390 addr = (u64)dest; 1406 addr = (u64)dest;
1391 1407
1392 ret = qlcnic_ms_mem_write128(adapter, addr, 1408 ret = qlcnic_ms_mem_write128(adapter, addr,
1393 p_cache, size / 16); 1409 p_cache, size / 16);
1394 if (ret) { 1410 if (ret) {
1395 dev_err(&adapter->pdev->dev, "MS memory write failed\n"); 1411 dev_err(&adapter->pdev->dev, "MS memory write failed\n");
1396 release_firmware(fw); 1412 goto exit;
1397 fw_info->fw = NULL;
1398 return -EIO;
1399 } 1413 }
1400 1414
1401 /* alignment check */ 1415 /* alignment check */
1402 if (fw->size & 0xF) { 1416 if (fw->size & 0xF) {
1403 addr = dest + size; 1417 addr = dest + size;
1404 for (i = 0; i < (fw->size & 0xF); i++) 1418 for (i = 0; i < (fw->size & 0xF); i++)
1405 data[i] = fw->data[size + i]; 1419 data[i] = temp[size + i];
1406 for (; i < 16; i++) 1420 for (; i < 16; i++)
1407 data[i] = 0; 1421 data[i] = 0;
1408 ret = qlcnic_ms_mem_write128(adapter, addr, 1422 ret = qlcnic_ms_mem_write128(adapter, addr,
@@ -1410,15 +1424,16 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
1410 if (ret) { 1424 if (ret) {
1411 dev_err(&adapter->pdev->dev, 1425 dev_err(&adapter->pdev->dev,
1412 "MS memory write failed\n"); 1426 "MS memory write failed\n");
1413 release_firmware(fw); 1427 goto exit;
1414 fw_info->fw = NULL;
1415 return -EIO;
1416 } 1428 }
1417 } 1429 }
1430
1431exit:
1418 release_firmware(fw); 1432 release_firmware(fw);
1419 fw_info->fw = NULL; 1433 fw_info->fw = NULL;
1434 kfree(temp);
1420 1435
1421 return 0; 1436 return ret;
1422} 1437}
1423 1438
1424static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter) 1439static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index e46fc39d425d..c9f57fb84b9e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -47,15 +47,26 @@ struct qlcnic_common_entry_hdr {
47 u32 type; 47 u32 type;
48 u32 offset; 48 u32 offset;
49 u32 cap_size; 49 u32 cap_size;
50#if defined(__LITTLE_ENDIAN)
50 u8 mask; 51 u8 mask;
51 u8 rsvd[2]; 52 u8 rsvd[2];
52 u8 flags; 53 u8 flags;
54#else
55 u8 flags;
56 u8 rsvd[2];
57 u8 mask;
58#endif
53} __packed; 59} __packed;
54 60
55struct __crb { 61struct __crb {
56 u32 addr; 62 u32 addr;
63#if defined(__LITTLE_ENDIAN)
57 u8 stride; 64 u8 stride;
58 u8 rsvd1[3]; 65 u8 rsvd1[3];
66#else
67 u8 rsvd1[3];
68 u8 stride;
69#endif
59 u32 data_size; 70 u32 data_size;
60 u32 no_ops; 71 u32 no_ops;
61 u32 rsvd2[4]; 72 u32 rsvd2[4];
@@ -63,15 +74,28 @@ struct __crb {
63 74
64struct __ctrl { 75struct __ctrl {
65 u32 addr; 76 u32 addr;
77#if defined(__LITTLE_ENDIAN)
66 u8 stride; 78 u8 stride;
67 u8 index_a; 79 u8 index_a;
68 u16 timeout; 80 u16 timeout;
81#else
82 u16 timeout;
83 u8 index_a;
84 u8 stride;
85#endif
69 u32 data_size; 86 u32 data_size;
70 u32 no_ops; 87 u32 no_ops;
88#if defined(__LITTLE_ENDIAN)
71 u8 opcode; 89 u8 opcode;
72 u8 index_v; 90 u8 index_v;
73 u8 shl_val; 91 u8 shl_val;
74 u8 shr_val; 92 u8 shr_val;
93#else
94 u8 shr_val;
95 u8 shl_val;
96 u8 index_v;
97 u8 opcode;
98#endif
75 u32 val1; 99 u32 val1;
76 u32 val2; 100 u32 val2;
77 u32 val3; 101 u32 val3;
@@ -79,16 +103,27 @@ struct __ctrl {
79 103
80struct __cache { 104struct __cache {
81 u32 addr; 105 u32 addr;
106#if defined(__LITTLE_ENDIAN)
82 u16 stride; 107 u16 stride;
83 u16 init_tag_val; 108 u16 init_tag_val;
109#else
110 u16 init_tag_val;
111 u16 stride;
112#endif
84 u32 size; 113 u32 size;
85 u32 no_ops; 114 u32 no_ops;
86 u32 ctrl_addr; 115 u32 ctrl_addr;
87 u32 ctrl_val; 116 u32 ctrl_val;
88 u32 read_addr; 117 u32 read_addr;
118#if defined(__LITTLE_ENDIAN)
89 u8 read_addr_stride; 119 u8 read_addr_stride;
90 u8 read_addr_num; 120 u8 read_addr_num;
91 u8 rsvd1[2]; 121 u8 rsvd1[2];
122#else
123 u8 rsvd1[2];
124 u8 read_addr_num;
125 u8 read_addr_stride;
126#endif
92} __packed; 127} __packed;
93 128
94struct __ocm { 129struct __ocm {
@@ -122,23 +157,39 @@ struct __mux {
122 157
123struct __queue { 158struct __queue {
124 u32 sel_addr; 159 u32 sel_addr;
160#if defined(__LITTLE_ENDIAN)
125 u16 stride; 161 u16 stride;
126 u8 rsvd[2]; 162 u8 rsvd[2];
163#else
164 u8 rsvd[2];
165 u16 stride;
166#endif
127 u32 size; 167 u32 size;
128 u32 no_ops; 168 u32 no_ops;
129 u8 rsvd2[8]; 169 u8 rsvd2[8];
130 u32 read_addr; 170 u32 read_addr;
171#if defined(__LITTLE_ENDIAN)
131 u8 read_addr_stride; 172 u8 read_addr_stride;
132 u8 read_addr_cnt; 173 u8 read_addr_cnt;
133 u8 rsvd3[2]; 174 u8 rsvd3[2];
175#else
176 u8 rsvd3[2];
177 u8 read_addr_cnt;
178 u8 read_addr_stride;
179#endif
134} __packed; 180} __packed;
135 181
136struct __pollrd { 182struct __pollrd {
137 u32 sel_addr; 183 u32 sel_addr;
138 u32 read_addr; 184 u32 read_addr;
139 u32 sel_val; 185 u32 sel_val;
186#if defined(__LITTLE_ENDIAN)
140 u16 sel_val_stride; 187 u16 sel_val_stride;
141 u16 no_ops; 188 u16 no_ops;
189#else
190 u16 no_ops;
191 u16 sel_val_stride;
192#endif
142 u32 poll_wait; 193 u32 poll_wait;
143 u32 poll_mask; 194 u32 poll_mask;
144 u32 data_size; 195 u32 data_size;
@@ -153,9 +204,15 @@ struct __mux2 {
153 u32 no_ops; 204 u32 no_ops;
154 u32 sel_val_mask; 205 u32 sel_val_mask;
155 u32 read_addr; 206 u32 read_addr;
207#if defined(__LITTLE_ENDIAN)
156 u8 sel_val_stride; 208 u8 sel_val_stride;
157 u8 data_size; 209 u8 data_size;
158 u8 rsvd[2]; 210 u8 rsvd[2];
211#else
212 u8 rsvd[2];
213 u8 data_size;
214 u8 sel_val_stride;
215#endif
159} __packed; 216} __packed;
160 217
161struct __pollrdmwr { 218struct __pollrdmwr {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index f5786d5792df..59a721fba018 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -280,6 +280,7 @@ static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
280 if (ret != 0) 280 if (ret != 0)
281 return ret; 281 return ret;
282 qlcnic_read_crb(adapter, buf, offset, size); 282 qlcnic_read_crb(adapter, buf, offset, size);
283 qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
283 284
284 return size; 285 return size;
285} 286}
@@ -296,6 +297,7 @@ static ssize_t qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
296 if (ret != 0) 297 if (ret != 0)
297 return ret; 298 return ret;
298 299
300 qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
299 qlcnic_write_crb(adapter, buf, offset, size); 301 qlcnic_write_crb(adapter, buf, offset, size);
300 return size; 302 return size;
301} 303}
@@ -329,6 +331,7 @@ static ssize_t qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
329 return -EIO; 331 return -EIO;
330 332
331 memcpy(buf, &data, size); 333 memcpy(buf, &data, size);
334 qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
332 335
333 return size; 336 return size;
334} 337}
@@ -346,6 +349,7 @@ static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
346 if (ret != 0) 349 if (ret != 0)
347 return ret; 350 return ret;
348 351
352 qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
349 memcpy(&data, buf, size); 353 memcpy(&data, buf, size);
350 354
351 if (qlcnic_pci_mem_write_2M(adapter, offset, data)) 355 if (qlcnic_pci_mem_write_2M(adapter, offset, data))
@@ -412,6 +416,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
412 if (rem) 416 if (rem)
413 return QL_STATUS_INVALID_PARAM; 417 return QL_STATUS_INVALID_PARAM;
414 418
419 qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
415 pm_cfg = (struct qlcnic_pm_func_cfg *)buf; 420 pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
416 ret = validate_pm_config(adapter, pm_cfg, count); 421 ret = validate_pm_config(adapter, pm_cfg, count);
417 422
@@ -474,6 +479,7 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
474 pm_cfg[pci_func].dest_npar = 0; 479 pm_cfg[pci_func].dest_npar = 0;
475 pm_cfg[pci_func].pci_func = i; 480 pm_cfg[pci_func].pci_func = i;
476 } 481 }
482 qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
477 return size; 483 return size;
478} 484}
479 485
@@ -555,6 +561,7 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
555 if (rem) 561 if (rem)
556 return QL_STATUS_INVALID_PARAM; 562 return QL_STATUS_INVALID_PARAM;
557 563
564 qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
558 esw_cfg = (struct qlcnic_esw_func_cfg *)buf; 565 esw_cfg = (struct qlcnic_esw_func_cfg *)buf;
559 ret = validate_esw_config(adapter, esw_cfg, count); 566 ret = validate_esw_config(adapter, esw_cfg, count);
560 if (ret) 567 if (ret)
@@ -649,6 +656,7 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
649 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func])) 656 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func]))
650 return QL_STATUS_INVALID_PARAM; 657 return QL_STATUS_INVALID_PARAM;
651 } 658 }
659 qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
652 return size; 660 return size;
653} 661}
654 662
@@ -688,6 +696,7 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
688 if (rem) 696 if (rem)
689 return QL_STATUS_INVALID_PARAM; 697 return QL_STATUS_INVALID_PARAM;
690 698
699 qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
691 np_cfg = (struct qlcnic_npar_func_cfg *)buf; 700 np_cfg = (struct qlcnic_npar_func_cfg *)buf;
692 ret = validate_npar_config(adapter, np_cfg, count); 701 ret = validate_npar_config(adapter, np_cfg, count);
693 if (ret) 702 if (ret)
@@ -759,6 +768,7 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
759 np_cfg[pci_func].max_tx_queues = nic_info.max_tx_ques; 768 np_cfg[pci_func].max_tx_queues = nic_info.max_tx_ques;
760 np_cfg[pci_func].max_rx_queues = nic_info.max_rx_ques; 769 np_cfg[pci_func].max_rx_queues = nic_info.max_rx_ques;
761 } 770 }
771 qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
762 return size; 772 return size;
763} 773}
764 774
@@ -916,6 +926,7 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
916 926
917 pci_cfg = (struct qlcnic_pci_func_cfg *)buf; 927 pci_cfg = (struct qlcnic_pci_func_cfg *)buf;
918 count = size / sizeof(struct qlcnic_pci_func_cfg); 928 count = size / sizeof(struct qlcnic_pci_func_cfg);
929 qlcnic_swap32_buffer((u32 *)pci_info, size / sizeof(u32));
919 for (i = 0; i < count; i++) { 930 for (i = 0; i < count; i++) {
920 pci_cfg[i].pci_func = pci_info[i].id; 931 pci_cfg[i].pci_func = pci_info[i].id;
921 pci_cfg[i].func_type = pci_info[i].type; 932 pci_cfg[i].func_type = pci_info[i].type;
@@ -969,6 +980,7 @@ static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp,
969 } 980 }
970 981
971 qlcnic_83xx_unlock_flash(adapter); 982 qlcnic_83xx_unlock_flash(adapter);
983 qlcnic_swap32_buffer((u32 *)p_read_buf, count);
972 memcpy(buf, p_read_buf, size); 984 memcpy(buf, p_read_buf, size);
973 kfree(p_read_buf); 985 kfree(p_read_buf);
974 986
@@ -986,9 +998,10 @@ static int qlcnic_83xx_sysfs_flash_bulk_write(struct qlcnic_adapter *adapter,
986 if (!p_cache) 998 if (!p_cache)
987 return -ENOMEM; 999 return -ENOMEM;
988 1000
1001 count = size / sizeof(u32);
1002 qlcnic_swap32_buffer((u32 *)buf, count);
989 memcpy(p_cache, buf, size); 1003 memcpy(p_cache, buf, size);
990 p_src = p_cache; 1004 p_src = p_cache;
991 count = size / sizeof(u32);
992 1005
993 if (qlcnic_83xx_lock_flash(adapter) != 0) { 1006 if (qlcnic_83xx_lock_flash(adapter) != 0) {
994 kfree(p_cache); 1007 kfree(p_cache);
@@ -1053,6 +1066,7 @@ static int qlcnic_83xx_sysfs_flash_write(struct qlcnic_adapter *adapter,
1053 if (!p_cache) 1066 if (!p_cache)
1054 return -ENOMEM; 1067 return -ENOMEM;
1055 1068
1069 qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
1056 memcpy(p_cache, buf, size); 1070 memcpy(p_cache, buf, size);
1057 p_src = p_cache; 1071 p_src = p_cache;
1058 count = size / sizeof(u32); 1072 count = size / sizeof(u32);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 188626e2a861..3e96f269150d 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2556,6 +2556,7 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2556 2556
2557 if (skb_is_gso(skb)) { 2557 if (skb_is_gso(skb)) {
2558 int err; 2558 int err;
2559 __be16 l3_proto = vlan_get_protocol(skb);
2559 2560
2560 err = skb_cow_head(skb, 0); 2561 err = skb_cow_head(skb, 0);
2561 if (err < 0) 2562 if (err < 0)
@@ -2572,7 +2573,7 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2572 << OB_MAC_TRANSPORT_HDR_SHIFT); 2573 << OB_MAC_TRANSPORT_HDR_SHIFT);
2573 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); 2574 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2574 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO; 2575 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2575 if (likely(skb->protocol == htons(ETH_P_IP))) { 2576 if (likely(l3_proto == htons(ETH_P_IP))) {
2576 struct iphdr *iph = ip_hdr(skb); 2577 struct iphdr *iph = ip_hdr(skb);
2577 iph->check = 0; 2578 iph->check = 0;
2578 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4; 2579 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
@@ -2580,7 +2581,7 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2580 iph->daddr, 0, 2581 iph->daddr, 0,
2581 IPPROTO_TCP, 2582 IPPROTO_TCP,
2582 0); 2583 0);
2583 } else if (skb->protocol == htons(ETH_P_IPV6)) { 2584 } else if (l3_proto == htons(ETH_P_IPV6)) {
2584 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6; 2585 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2585 tcp_hdr(skb)->check = 2586 tcp_hdr(skb)->check =
2586 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 2587 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 91652e7235e4..0921302553c6 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -1783,33 +1783,31 @@ static void __rtl8169_set_features(struct net_device *dev,
1783 netdev_features_t features) 1783 netdev_features_t features)
1784{ 1784{
1785 struct rtl8169_private *tp = netdev_priv(dev); 1785 struct rtl8169_private *tp = netdev_priv(dev);
1786 netdev_features_t changed = features ^ dev->features;
1787 void __iomem *ioaddr = tp->mmio_addr; 1786 void __iomem *ioaddr = tp->mmio_addr;
1787 u32 rx_config;
1788 1788
1789 if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM | 1789 rx_config = RTL_R32(RxConfig);
1790 NETIF_F_HW_VLAN_CTAG_RX))) 1790 if (features & NETIF_F_RXALL)
1791 return; 1791 rx_config |= (AcceptErr | AcceptRunt);
1792 else
1793 rx_config &= ~(AcceptErr | AcceptRunt);
1792 1794
1793 if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) { 1795 RTL_W32(RxConfig, rx_config);
1794 if (features & NETIF_F_RXCSUM)
1795 tp->cp_cmd |= RxChkSum;
1796 else
1797 tp->cp_cmd &= ~RxChkSum;
1798 1796
1799 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) 1797 if (features & NETIF_F_RXCSUM)
1800 tp->cp_cmd |= RxVlan; 1798 tp->cp_cmd |= RxChkSum;
1801 else 1799 else
1802 tp->cp_cmd &= ~RxVlan; 1800 tp->cp_cmd &= ~RxChkSum;
1803 1801
1804 RTL_W16(CPlusCmd, tp->cp_cmd); 1802 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1805 RTL_R16(CPlusCmd); 1803 tp->cp_cmd |= RxVlan;
1806 } 1804 else
1807 if (changed & NETIF_F_RXALL) { 1805 tp->cp_cmd &= ~RxVlan;
1808 int tmp = (RTL_R32(RxConfig) & ~(AcceptErr | AcceptRunt)); 1806
1809 if (features & NETIF_F_RXALL) 1807 tp->cp_cmd |= RTL_R16(CPlusCmd) & ~(RxVlan | RxChkSum);
1810 tmp |= (AcceptErr | AcceptRunt); 1808
1811 RTL_W32(RxConfig, tmp); 1809 RTL_W16(CPlusCmd, tp->cp_cmd);
1812 } 1810 RTL_R16(CPlusCmd);
1813} 1811}
1814 1812
1815static int rtl8169_set_features(struct net_device *dev, 1813static int rtl8169_set_features(struct net_device *dev,
@@ -1817,8 +1815,11 @@ static int rtl8169_set_features(struct net_device *dev,
1817{ 1815{
1818 struct rtl8169_private *tp = netdev_priv(dev); 1816 struct rtl8169_private *tp = netdev_priv(dev);
1819 1817
1818 features &= NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
1819
1820 rtl_lock_work(tp); 1820 rtl_lock_work(tp);
1821 __rtl8169_set_features(dev, features); 1821 if (features ^ dev->features)
1822 __rtl8169_set_features(dev, features);
1822 rtl_unlock_work(tp); 1823 rtl_unlock_work(tp);
1823 1824
1824 return 0; 1825 return 0;
@@ -7118,8 +7119,7 @@ static void rtl_hw_initialize(struct rtl8169_private *tp)
7118 } 7119 }
7119} 7120}
7120 7121
7121static int 7122static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7122rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7123{ 7123{
7124 const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; 7124 const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
7125 const unsigned int region = cfg->region; 7125 const unsigned int region = cfg->region;
@@ -7194,7 +7194,7 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7194 goto err_out_mwi_2; 7194 goto err_out_mwi_2;
7195 } 7195 }
7196 7196
7197 tp->cp_cmd = RxChkSum; 7197 tp->cp_cmd = 0;
7198 7198
7199 if ((sizeof(dma_addr_t) > 4) && 7199 if ((sizeof(dma_addr_t) > 4) &&
7200 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { 7200 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
@@ -7235,13 +7235,6 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7235 7235
7236 pci_set_master(pdev); 7236 pci_set_master(pdev);
7237 7237
7238 /*
7239 * Pretend we are using VLANs; This bypasses a nasty bug where
7240 * Interrupts stop flowing on high load on 8110SCd controllers.
7241 */
7242 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
7243 tp->cp_cmd |= RxVlan;
7244
7245 rtl_init_mdio_ops(tp); 7238 rtl_init_mdio_ops(tp);
7246 rtl_init_pll_power_ops(tp); 7239 rtl_init_pll_power_ops(tp);
7247 rtl_init_jumbo_ops(tp); 7240 rtl_init_jumbo_ops(tp);
@@ -7302,8 +7295,14 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7302 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | 7295 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
7303 NETIF_F_HIGHDMA; 7296 NETIF_F_HIGHDMA;
7304 7297
7298 tp->cp_cmd |= RxChkSum | RxVlan;
7299
7300 /*
7301 * Pretend we are using VLANs; This bypasses a nasty bug where
7302 * Interrupts stop flowing on high load on 8110SCd controllers.
7303 */
7305 if (tp->mac_version == RTL_GIGA_MAC_VER_05) 7304 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
7306 /* 8110SCd requires hardware Rx VLAN - disallow toggling */ 7305 /* Disallow toggling */
7307 dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; 7306 dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
7308 7307
7309 if (tp->txd_version == RTL_TD_0) 7308 if (tp->txd_version == RTL_TD_0)
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 9e757c792d84..196e98a2d93b 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -5,6 +5,7 @@
5config SH_ETH 5config SH_ETH
6 tristate "Renesas SuperH Ethernet support" 6 tristate "Renesas SuperH Ethernet support"
7 depends on HAS_DMA 7 depends on HAS_DMA
8 depends on ARCH_SHMOBILE || SUPERH || COMPILE_TEST
8 select CRC32 9 select CRC32
9 select MII 10 select MII
10 select MDIO_BITBANG 11 select MDIO_BITBANG
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 0537381cd2f6..6859437b59fb 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -2933,6 +2933,9 @@ void efx_farch_filter_sync_rx_mode(struct efx_nic *efx)
2933 u32 crc; 2933 u32 crc;
2934 int bit; 2934 int bit;
2935 2935
2936 if (!efx_dev_registered(efx))
2937 return;
2938
2936 netif_addr_lock_bh(net_dev); 2939 netif_addr_lock_bh(net_dev);
2937 2940
2938 efx->unicast_filter = !(net_dev->flags & IFF_PROMISC); 2941 efx->unicast_filter = !(net_dev->flags & IFF_PROMISC);
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
index c553f6b5a913..cf28daba4346 100644
--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -28,7 +28,7 @@
28 28
29#include "stmmac.h" 29#include "stmmac.h"
30 30
31static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) 31static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
32{ 32{
33 struct stmmac_priv *priv = (struct stmmac_priv *)p; 33 struct stmmac_priv *priv = (struct stmmac_priv *)p;
34 unsigned int txsize = priv->dma_tx_size; 34 unsigned int txsize = priv->dma_tx_size;
@@ -47,7 +47,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
47 47
48 desc->des2 = dma_map_single(priv->device, skb->data, 48 desc->des2 = dma_map_single(priv->device, skb->data,
49 bmax, DMA_TO_DEVICE); 49 bmax, DMA_TO_DEVICE);
50 priv->tx_skbuff_dma[entry] = desc->des2; 50 if (dma_mapping_error(priv->device, desc->des2))
51 return -1;
52 priv->tx_skbuff_dma[entry].buf = desc->des2;
51 priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE); 53 priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE);
52 54
53 while (len != 0) { 55 while (len != 0) {
@@ -59,7 +61,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
59 desc->des2 = dma_map_single(priv->device, 61 desc->des2 = dma_map_single(priv->device,
60 (skb->data + bmax * i), 62 (skb->data + bmax * i),
61 bmax, DMA_TO_DEVICE); 63 bmax, DMA_TO_DEVICE);
62 priv->tx_skbuff_dma[entry] = desc->des2; 64 if (dma_mapping_error(priv->device, desc->des2))
65 return -1;
66 priv->tx_skbuff_dma[entry].buf = desc->des2;
63 priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum, 67 priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum,
64 STMMAC_CHAIN_MODE); 68 STMMAC_CHAIN_MODE);
65 priv->hw->desc->set_tx_owner(desc); 69 priv->hw->desc->set_tx_owner(desc);
@@ -69,7 +73,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
69 desc->des2 = dma_map_single(priv->device, 73 desc->des2 = dma_map_single(priv->device,
70 (skb->data + bmax * i), len, 74 (skb->data + bmax * i), len,
71 DMA_TO_DEVICE); 75 DMA_TO_DEVICE);
72 priv->tx_skbuff_dma[entry] = desc->des2; 76 if (dma_mapping_error(priv->device, desc->des2))
77 return -1;
78 priv->tx_skbuff_dma[entry].buf = desc->des2;
73 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, 79 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
74 STMMAC_CHAIN_MODE); 80 STMMAC_CHAIN_MODE);
75 priv->hw->desc->set_tx_owner(desc); 81 priv->hw->desc->set_tx_owner(desc);
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index de507c32036c..593e6c4144a7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -220,10 +220,10 @@ enum dma_irq_status {
220 handle_tx = 0x8, 220 handle_tx = 0x8,
221}; 221};
222 222
223#define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 1) 223#define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 0)
224#define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 2) 224#define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 1)
225#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 3) 225#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 2)
226#define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 4) 226#define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 3)
227 227
228#define CORE_PCS_ANE_COMPLETE (1 << 5) 228#define CORE_PCS_ANE_COMPLETE (1 << 5)
229#define CORE_PCS_LINK_STATUS (1 << 6) 229#define CORE_PCS_LINK_STATUS (1 << 6)
@@ -287,7 +287,7 @@ struct dma_features {
287 287
288/* Default LPI timers */ 288/* Default LPI timers */
289#define STMMAC_DEFAULT_LIT_LS 0x3E8 289#define STMMAC_DEFAULT_LIT_LS 0x3E8
290#define STMMAC_DEFAULT_TWT_LS 0x0 290#define STMMAC_DEFAULT_TWT_LS 0x1E
291 291
292#define STMMAC_CHAIN_MODE 0x1 292#define STMMAC_CHAIN_MODE 0x1
293#define STMMAC_RING_MODE 0x2 293#define STMMAC_RING_MODE 0x2
@@ -425,7 +425,7 @@ struct stmmac_mode_ops {
425 void (*init) (void *des, dma_addr_t phy_addr, unsigned int size, 425 void (*init) (void *des, dma_addr_t phy_addr, unsigned int size,
426 unsigned int extend_desc); 426 unsigned int extend_desc);
427 unsigned int (*is_jumbo_frm) (int len, int ehn_desc); 427 unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
428 unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum); 428 int (*jumbo_frm)(void *priv, struct sk_buff *skb, int csum);
429 int (*set_16kib_bfsize)(int mtu); 429 int (*set_16kib_bfsize)(int mtu);
430 void (*init_desc3)(struct dma_desc *p); 430 void (*init_desc3)(struct dma_desc *p);
431 void (*refill_desc3) (void *priv, struct dma_desc *p); 431 void (*refill_desc3) (void *priv, struct dma_desc *p);
@@ -445,6 +445,7 @@ struct mac_device_info {
445 int multicast_filter_bins; 445 int multicast_filter_bins;
446 int unicast_filter_entries; 446 int unicast_filter_entries;
447 int mcast_bits_log2; 447 int mcast_bits_log2;
448 unsigned int rx_csum;
448}; 449};
449 450
450struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, 451struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index 71b5419256c1..64d8f56a9c17 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -153,7 +153,7 @@ enum inter_frame_gap {
153#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */ 153#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
154 154
155#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \ 155#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \
156 GMAC_CONTROL_BE) 156 GMAC_CONTROL_BE | GMAC_CONTROL_DCRS)
157 157
158/* GMAC Frame Filter defines */ 158/* GMAC Frame Filter defines */
159#define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */ 159#define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index d8ef18786a1c..5efe60ea6526 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -58,7 +58,11 @@ static int dwmac1000_rx_ipc_enable(struct mac_device_info *hw)
58 void __iomem *ioaddr = hw->pcsr; 58 void __iomem *ioaddr = hw->pcsr;
59 u32 value = readl(ioaddr + GMAC_CONTROL); 59 u32 value = readl(ioaddr + GMAC_CONTROL);
60 60
61 value |= GMAC_CONTROL_IPC; 61 if (hw->rx_csum)
62 value |= GMAC_CONTROL_IPC;
63 else
64 value &= ~GMAC_CONTROL_IPC;
65
62 writel(value, ioaddr + GMAC_CONTROL); 66 writel(value, ioaddr + GMAC_CONTROL);
63 67
64 value = readl(ioaddr + GMAC_CONTROL); 68 value = readl(ioaddr + GMAC_CONTROL);
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index 8607488cbcfc..192c2491330b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -68,7 +68,7 @@ struct stmmac_counters {
68 unsigned int mmc_rx_octetcount_g; 68 unsigned int mmc_rx_octetcount_g;
69 unsigned int mmc_rx_broadcastframe_g; 69 unsigned int mmc_rx_broadcastframe_g;
70 unsigned int mmc_rx_multicastframe_g; 70 unsigned int mmc_rx_multicastframe_g;
71 unsigned int mmc_rx_crc_errror; 71 unsigned int mmc_rx_crc_error;
72 unsigned int mmc_rx_align_error; 72 unsigned int mmc_rx_align_error;
73 unsigned int mmc_rx_run_error; 73 unsigned int mmc_rx_run_error;
74 unsigned int mmc_rx_jabber_error; 74 unsigned int mmc_rx_jabber_error;
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index 50617c5a0bdb..08c483bd2ec7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -196,7 +196,7 @@ void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc)
196 mmc->mmc_rx_octetcount_g += readl(ioaddr + MMC_RX_OCTETCOUNT_G); 196 mmc->mmc_rx_octetcount_g += readl(ioaddr + MMC_RX_OCTETCOUNT_G);
197 mmc->mmc_rx_broadcastframe_g += readl(ioaddr + MMC_RX_BROADCASTFRAME_G); 197 mmc->mmc_rx_broadcastframe_g += readl(ioaddr + MMC_RX_BROADCASTFRAME_G);
198 mmc->mmc_rx_multicastframe_g += readl(ioaddr + MMC_RX_MULTICASTFRAME_G); 198 mmc->mmc_rx_multicastframe_g += readl(ioaddr + MMC_RX_MULTICASTFRAME_G);
199 mmc->mmc_rx_crc_errror += readl(ioaddr + MMC_RX_CRC_ERRROR); 199 mmc->mmc_rx_crc_error += readl(ioaddr + MMC_RX_CRC_ERRROR);
200 mmc->mmc_rx_align_error += readl(ioaddr + MMC_RX_ALIGN_ERROR); 200 mmc->mmc_rx_align_error += readl(ioaddr + MMC_RX_ALIGN_ERROR);
201 mmc->mmc_rx_run_error += readl(ioaddr + MMC_RX_RUN_ERROR); 201 mmc->mmc_rx_run_error += readl(ioaddr + MMC_RX_RUN_ERROR);
202 mmc->mmc_rx_jabber_error += readl(ioaddr + MMC_RX_JABBER_ERROR); 202 mmc->mmc_rx_jabber_error += readl(ioaddr + MMC_RX_JABBER_ERROR);
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index 650a4be6bce5..5dd50c6cda5b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -28,7 +28,7 @@
28 28
29#include "stmmac.h" 29#include "stmmac.h"
30 30
31static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) 31static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
32{ 32{
33 struct stmmac_priv *priv = (struct stmmac_priv *)p; 33 struct stmmac_priv *priv = (struct stmmac_priv *)p;
34 unsigned int txsize = priv->dma_tx_size; 34 unsigned int txsize = priv->dma_tx_size;
@@ -53,7 +53,10 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
53 53
54 desc->des2 = dma_map_single(priv->device, skb->data, 54 desc->des2 = dma_map_single(priv->device, skb->data,
55 bmax, DMA_TO_DEVICE); 55 bmax, DMA_TO_DEVICE);
56 priv->tx_skbuff_dma[entry] = desc->des2; 56 if (dma_mapping_error(priv->device, desc->des2))
57 return -1;
58
59 priv->tx_skbuff_dma[entry].buf = desc->des2;
57 desc->des3 = desc->des2 + BUF_SIZE_4KiB; 60 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
58 priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, 61 priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
59 STMMAC_RING_MODE); 62 STMMAC_RING_MODE);
@@ -68,7 +71,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
68 71
69 desc->des2 = dma_map_single(priv->device, skb->data + bmax, 72 desc->des2 = dma_map_single(priv->device, skb->data + bmax,
70 len, DMA_TO_DEVICE); 73 len, DMA_TO_DEVICE);
71 priv->tx_skbuff_dma[entry] = desc->des2; 74 if (dma_mapping_error(priv->device, desc->des2))
75 return -1;
76 priv->tx_skbuff_dma[entry].buf = desc->des2;
72 desc->des3 = desc->des2 + BUF_SIZE_4KiB; 77 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
73 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, 78 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
74 STMMAC_RING_MODE); 79 STMMAC_RING_MODE);
@@ -77,7 +82,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
77 } else { 82 } else {
78 desc->des2 = dma_map_single(priv->device, skb->data, 83 desc->des2 = dma_map_single(priv->device, skb->data,
79 nopaged_len, DMA_TO_DEVICE); 84 nopaged_len, DMA_TO_DEVICE);
80 priv->tx_skbuff_dma[entry] = desc->des2; 85 if (dma_mapping_error(priv->device, desc->des2))
86 return -1;
87 priv->tx_skbuff_dma[entry].buf = desc->des2;
81 desc->des3 = desc->des2 + BUF_SIZE_4KiB; 88 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
82 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum, 89 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum,
83 STMMAC_RING_MODE); 90 STMMAC_RING_MODE);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index ca01035634a7..58097c0e2ad5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -34,6 +34,11 @@
34#include <linux/ptp_clock_kernel.h> 34#include <linux/ptp_clock_kernel.h>
35#include <linux/reset.h> 35#include <linux/reset.h>
36 36
37struct stmmac_tx_info {
38 dma_addr_t buf;
39 bool map_as_page;
40};
41
37struct stmmac_priv { 42struct stmmac_priv {
38 /* Frequently used values are kept adjacent for cache effect */ 43 /* Frequently used values are kept adjacent for cache effect */
39 struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp; 44 struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
@@ -45,7 +50,7 @@ struct stmmac_priv {
45 u32 tx_count_frames; 50 u32 tx_count_frames;
46 u32 tx_coal_frames; 51 u32 tx_coal_frames;
47 u32 tx_coal_timer; 52 u32 tx_coal_timer;
48 dma_addr_t *tx_skbuff_dma; 53 struct stmmac_tx_info *tx_skbuff_dma;
49 dma_addr_t dma_tx_phy; 54 dma_addr_t dma_tx_phy;
50 int tx_coalesce; 55 int tx_coalesce;
51 int hwts_tx_en; 56 int hwts_tx_en;
@@ -105,6 +110,8 @@ struct stmmac_priv {
105 struct ptp_clock *ptp_clock; 110 struct ptp_clock *ptp_clock;
106 struct ptp_clock_info ptp_clock_ops; 111 struct ptp_clock_info ptp_clock_ops;
107 unsigned int default_addend; 112 unsigned int default_addend;
113 struct clk *clk_ptp_ref;
114 unsigned int clk_ptp_rate;
108 u32 adv_ts; 115 u32 adv_ts;
109 int use_riwt; 116 int use_riwt;
110 int irq_wake; 117 int irq_wake;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 9af50bae4dde..cf4f38db1c0a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -175,7 +175,7 @@ static const struct stmmac_stats stmmac_mmc[] = {
175 STMMAC_MMC_STAT(mmc_rx_octetcount_g), 175 STMMAC_MMC_STAT(mmc_rx_octetcount_g),
176 STMMAC_MMC_STAT(mmc_rx_broadcastframe_g), 176 STMMAC_MMC_STAT(mmc_rx_broadcastframe_g),
177 STMMAC_MMC_STAT(mmc_rx_multicastframe_g), 177 STMMAC_MMC_STAT(mmc_rx_multicastframe_g),
178 STMMAC_MMC_STAT(mmc_rx_crc_errror), 178 STMMAC_MMC_STAT(mmc_rx_crc_error),
179 STMMAC_MMC_STAT(mmc_rx_align_error), 179 STMMAC_MMC_STAT(mmc_rx_align_error),
180 STMMAC_MMC_STAT(mmc_rx_run_error), 180 STMMAC_MMC_STAT(mmc_rx_run_error),
181 STMMAC_MMC_STAT(mmc_rx_jabber_error), 181 STMMAC_MMC_STAT(mmc_rx_jabber_error),
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 08addd653728..6e6ee226de04 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -275,6 +275,7 @@ static void stmmac_eee_ctrl_timer(unsigned long arg)
275 */ 275 */
276bool stmmac_eee_init(struct stmmac_priv *priv) 276bool stmmac_eee_init(struct stmmac_priv *priv)
277{ 277{
278 char *phy_bus_name = priv->plat->phy_bus_name;
278 bool ret = false; 279 bool ret = false;
279 280
280 /* Using PCS we cannot dial with the phy registers at this stage 281 /* Using PCS we cannot dial with the phy registers at this stage
@@ -284,6 +285,10 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
284 (priv->pcs == STMMAC_PCS_RTBI)) 285 (priv->pcs == STMMAC_PCS_RTBI))
285 goto out; 286 goto out;
286 287
288 /* Never init EEE in case of a switch is attached */
289 if (phy_bus_name && (!strcmp(phy_bus_name, "fixed")))
290 goto out;
291
287 /* MAC core supports the EEE feature. */ 292 /* MAC core supports the EEE feature. */
288 if (priv->dma_cap.eee) { 293 if (priv->dma_cap.eee) {
289 int tx_lpi_timer = priv->tx_lpi_timer; 294 int tx_lpi_timer = priv->tx_lpi_timer;
@@ -316,10 +321,9 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
316 priv->hw->mac->set_eee_timer(priv->hw, 321 priv->hw->mac->set_eee_timer(priv->hw,
317 STMMAC_DEFAULT_LIT_LS, 322 STMMAC_DEFAULT_LIT_LS,
318 tx_lpi_timer); 323 tx_lpi_timer);
319 } else 324 }
320 /* Set HW EEE according to the speed */ 325 /* Set HW EEE according to the speed */
321 priv->hw->mac->set_eee_pls(priv->hw, 326 priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link);
322 priv->phydev->link);
323 327
324 pr_debug("stmmac: Energy-Efficient Ethernet initialized\n"); 328 pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
325 329
@@ -603,16 +607,16 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
603 /* calculate default added value: 607 /* calculate default added value:
604 * formula is : 608 * formula is :
605 * addend = (2^32)/freq_div_ratio; 609 * addend = (2^32)/freq_div_ratio;
606 * where, freq_div_ratio = STMMAC_SYSCLOCK/50MHz 610 * where, freq_div_ratio = clk_ptp_ref_i/50MHz
607 * hence, addend = ((2^32) * 50MHz)/STMMAC_SYSCLOCK; 611 * hence, addend = ((2^32) * 50MHz)/clk_ptp_ref_i;
608 * NOTE: STMMAC_SYSCLOCK should be >= 50MHz to 612 * NOTE: clk_ptp_ref_i should be >= 50MHz to
609 * achive 20ns accuracy. 613 * achive 20ns accuracy.
610 * 614 *
611 * 2^x * y == (y << x), hence 615 * 2^x * y == (y << x), hence
612 * 2^32 * 50000000 ==> (50000000 << 32) 616 * 2^32 * 50000000 ==> (50000000 << 32)
613 */ 617 */
614 temp = (u64) (50000000ULL << 32); 618 temp = (u64) (50000000ULL << 32);
615 priv->default_addend = div_u64(temp, STMMAC_SYSCLOCK); 619 priv->default_addend = div_u64(temp, priv->clk_ptp_rate);
616 priv->hw->ptp->config_addend(priv->ioaddr, 620 priv->hw->ptp->config_addend(priv->ioaddr,
617 priv->default_addend); 621 priv->default_addend);
618 622
@@ -638,6 +642,16 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
638 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) 642 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
639 return -EOPNOTSUPP; 643 return -EOPNOTSUPP;
640 644
645 /* Fall-back to main clock in case of no PTP ref is passed */
646 priv->clk_ptp_ref = devm_clk_get(priv->device, "clk_ptp_ref");
647 if (IS_ERR(priv->clk_ptp_ref)) {
648 priv->clk_ptp_rate = clk_get_rate(priv->stmmac_clk);
649 priv->clk_ptp_ref = NULL;
650 } else {
651 clk_prepare_enable(priv->clk_ptp_ref);
652 priv->clk_ptp_rate = clk_get_rate(priv->clk_ptp_ref);
653 }
654
641 priv->adv_ts = 0; 655 priv->adv_ts = 0;
642 if (priv->dma_cap.atime_stamp && priv->extend_desc) 656 if (priv->dma_cap.atime_stamp && priv->extend_desc)
643 priv->adv_ts = 1; 657 priv->adv_ts = 1;
@@ -657,6 +671,8 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
657 671
658static void stmmac_release_ptp(struct stmmac_priv *priv) 672static void stmmac_release_ptp(struct stmmac_priv *priv)
659{ 673{
674 if (priv->clk_ptp_ref)
675 clk_disable_unprepare(priv->clk_ptp_ref);
660 stmmac_ptp_unregister(priv); 676 stmmac_ptp_unregister(priv);
661} 677}
662 678
@@ -1061,7 +1077,8 @@ static int init_dma_desc_rings(struct net_device *dev)
1061 else 1077 else
1062 p = priv->dma_tx + i; 1078 p = priv->dma_tx + i;
1063 p->des2 = 0; 1079 p->des2 = 0;
1064 priv->tx_skbuff_dma[i] = 0; 1080 priv->tx_skbuff_dma[i].buf = 0;
1081 priv->tx_skbuff_dma[i].map_as_page = false;
1065 priv->tx_skbuff[i] = NULL; 1082 priv->tx_skbuff[i] = NULL;
1066 } 1083 }
1067 1084
@@ -1100,17 +1117,24 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
1100 else 1117 else
1101 p = priv->dma_tx + i; 1118 p = priv->dma_tx + i;
1102 1119
1103 if (priv->tx_skbuff_dma[i]) { 1120 if (priv->tx_skbuff_dma[i].buf) {
1104 dma_unmap_single(priv->device, 1121 if (priv->tx_skbuff_dma[i].map_as_page)
1105 priv->tx_skbuff_dma[i], 1122 dma_unmap_page(priv->device,
1106 priv->hw->desc->get_tx_len(p), 1123 priv->tx_skbuff_dma[i].buf,
1107 DMA_TO_DEVICE); 1124 priv->hw->desc->get_tx_len(p),
1108 priv->tx_skbuff_dma[i] = 0; 1125 DMA_TO_DEVICE);
1126 else
1127 dma_unmap_single(priv->device,
1128 priv->tx_skbuff_dma[i].buf,
1129 priv->hw->desc->get_tx_len(p),
1130 DMA_TO_DEVICE);
1109 } 1131 }
1110 1132
1111 if (priv->tx_skbuff[i] != NULL) { 1133 if (priv->tx_skbuff[i] != NULL) {
1112 dev_kfree_skb_any(priv->tx_skbuff[i]); 1134 dev_kfree_skb_any(priv->tx_skbuff[i]);
1113 priv->tx_skbuff[i] = NULL; 1135 priv->tx_skbuff[i] = NULL;
1136 priv->tx_skbuff_dma[i].buf = 0;
1137 priv->tx_skbuff_dma[i].map_as_page = false;
1114 } 1138 }
1115 } 1139 }
1116} 1140}
@@ -1131,7 +1155,8 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1131 if (!priv->rx_skbuff) 1155 if (!priv->rx_skbuff)
1132 goto err_rx_skbuff; 1156 goto err_rx_skbuff;
1133 1157
1134 priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t), 1158 priv->tx_skbuff_dma = kmalloc_array(txsize,
1159 sizeof(*priv->tx_skbuff_dma),
1135 GFP_KERNEL); 1160 GFP_KERNEL);
1136 if (!priv->tx_skbuff_dma) 1161 if (!priv->tx_skbuff_dma)
1137 goto err_tx_skbuff_dma; 1162 goto err_tx_skbuff_dma;
@@ -1293,12 +1318,19 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
1293 pr_debug("%s: curr %d, dirty %d\n", __func__, 1318 pr_debug("%s: curr %d, dirty %d\n", __func__,
1294 priv->cur_tx, priv->dirty_tx); 1319 priv->cur_tx, priv->dirty_tx);
1295 1320
1296 if (likely(priv->tx_skbuff_dma[entry])) { 1321 if (likely(priv->tx_skbuff_dma[entry].buf)) {
1297 dma_unmap_single(priv->device, 1322 if (priv->tx_skbuff_dma[entry].map_as_page)
1298 priv->tx_skbuff_dma[entry], 1323 dma_unmap_page(priv->device,
1299 priv->hw->desc->get_tx_len(p), 1324 priv->tx_skbuff_dma[entry].buf,
1300 DMA_TO_DEVICE); 1325 priv->hw->desc->get_tx_len(p),
1301 priv->tx_skbuff_dma[entry] = 0; 1326 DMA_TO_DEVICE);
1327 else
1328 dma_unmap_single(priv->device,
1329 priv->tx_skbuff_dma[entry].buf,
1330 priv->hw->desc->get_tx_len(p),
1331 DMA_TO_DEVICE);
1332 priv->tx_skbuff_dma[entry].buf = 0;
1333 priv->tx_skbuff_dma[entry].map_as_page = false;
1302 } 1334 }
1303 priv->hw->mode->clean_desc3(priv, p); 1335 priv->hw->mode->clean_desc3(priv, p);
1304 1336
@@ -1637,6 +1669,13 @@ static int stmmac_hw_setup(struct net_device *dev)
1637 /* Initialize the MAC Core */ 1669 /* Initialize the MAC Core */
1638 priv->hw->mac->core_init(priv->hw, dev->mtu); 1670 priv->hw->mac->core_init(priv->hw, dev->mtu);
1639 1671
1672 ret = priv->hw->mac->rx_ipc(priv->hw);
1673 if (!ret) {
1674 pr_warn(" RX IPC Checksum Offload disabled\n");
1675 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
1676 priv->hw->rx_csum = 0;
1677 }
1678
1640 /* Enable the MAC Rx/Tx */ 1679 /* Enable the MAC Rx/Tx */
1641 stmmac_set_mac(priv->ioaddr, true); 1680 stmmac_set_mac(priv->ioaddr, true);
1642 1681
@@ -1887,12 +1926,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1887 if (likely(!is_jumbo)) { 1926 if (likely(!is_jumbo)) {
1888 desc->des2 = dma_map_single(priv->device, skb->data, 1927 desc->des2 = dma_map_single(priv->device, skb->data,
1889 nopaged_len, DMA_TO_DEVICE); 1928 nopaged_len, DMA_TO_DEVICE);
1890 priv->tx_skbuff_dma[entry] = desc->des2; 1929 if (dma_mapping_error(priv->device, desc->des2))
1930 goto dma_map_err;
1931 priv->tx_skbuff_dma[entry].buf = desc->des2;
1891 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, 1932 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
1892 csum_insertion, priv->mode); 1933 csum_insertion, priv->mode);
1893 } else { 1934 } else {
1894 desc = first; 1935 desc = first;
1895 entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion); 1936 entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
1937 if (unlikely(entry < 0))
1938 goto dma_map_err;
1896 } 1939 }
1897 1940
1898 for (i = 0; i < nfrags; i++) { 1941 for (i = 0; i < nfrags; i++) {
@@ -1908,7 +1951,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1908 1951
1909 desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len, 1952 desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
1910 DMA_TO_DEVICE); 1953 DMA_TO_DEVICE);
1911 priv->tx_skbuff_dma[entry] = desc->des2; 1954 if (dma_mapping_error(priv->device, desc->des2))
1955 goto dma_map_err; /* should reuse desc w/o issues */
1956
1957 priv->tx_skbuff_dma[entry].buf = desc->des2;
1958 priv->tx_skbuff_dma[entry].map_as_page = true;
1912 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion, 1959 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
1913 priv->mode); 1960 priv->mode);
1914 wmb(); 1961 wmb();
@@ -1975,7 +2022,12 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1975 priv->hw->dma->enable_dma_transmission(priv->ioaddr); 2022 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
1976 2023
1977 spin_unlock(&priv->tx_lock); 2024 spin_unlock(&priv->tx_lock);
2025 return NETDEV_TX_OK;
1978 2026
2027dma_map_err:
2028 dev_err(priv->device, "Tx dma map failed\n");
2029 dev_kfree_skb(skb);
2030 priv->dev->stats.tx_dropped++;
1979 return NETDEV_TX_OK; 2031 return NETDEV_TX_OK;
1980} 2032}
1981 2033
@@ -2028,7 +2080,12 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
2028 priv->rx_skbuff_dma[entry] = 2080 priv->rx_skbuff_dma[entry] =
2029 dma_map_single(priv->device, skb->data, bfsize, 2081 dma_map_single(priv->device, skb->data, bfsize,
2030 DMA_FROM_DEVICE); 2082 DMA_FROM_DEVICE);
2031 2083 if (dma_mapping_error(priv->device,
2084 priv->rx_skbuff_dma[entry])) {
2085 dev_err(priv->device, "Rx dma map failed\n");
2086 dev_kfree_skb(skb);
2087 break;
2088 }
2032 p->des2 = priv->rx_skbuff_dma[entry]; 2089 p->des2 = priv->rx_skbuff_dma[entry];
2033 2090
2034 priv->hw->mode->refill_desc3(priv, p); 2091 priv->hw->mode->refill_desc3(priv, p);
@@ -2055,7 +2112,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
2055 unsigned int entry = priv->cur_rx % rxsize; 2112 unsigned int entry = priv->cur_rx % rxsize;
2056 unsigned int next_entry; 2113 unsigned int next_entry;
2057 unsigned int count = 0; 2114 unsigned int count = 0;
2058 int coe = priv->plat->rx_coe; 2115 int coe = priv->hw->rx_csum;
2059 2116
2060 if (netif_msg_rx_status(priv)) { 2117 if (netif_msg_rx_status(priv)) {
2061 pr_debug("%s: descriptor ring:\n", __func__); 2118 pr_debug("%s: descriptor ring:\n", __func__);
@@ -2276,8 +2333,7 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev,
2276 2333
2277 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) 2334 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
2278 features &= ~NETIF_F_RXCSUM; 2335 features &= ~NETIF_F_RXCSUM;
2279 else if (priv->plat->rx_coe == STMMAC_RX_COE_TYPE1) 2336
2280 features &= ~NETIF_F_IPV6_CSUM;
2281 if (!priv->plat->tx_coe) 2337 if (!priv->plat->tx_coe)
2282 features &= ~NETIF_F_ALL_CSUM; 2338 features &= ~NETIF_F_ALL_CSUM;
2283 2339
@@ -2292,6 +2348,24 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev,
2292 return features; 2348 return features;
2293} 2349}
2294 2350
2351static int stmmac_set_features(struct net_device *netdev,
2352 netdev_features_t features)
2353{
2354 struct stmmac_priv *priv = netdev_priv(netdev);
2355
2356 /* Keep the COE Type in case of csum is supporting */
2357 if (features & NETIF_F_RXCSUM)
2358 priv->hw->rx_csum = priv->plat->rx_coe;
2359 else
2360 priv->hw->rx_csum = 0;
2361 /* No check needed because rx_coe has been set before and it will be
2362 * fixed in case of issue.
2363 */
2364 priv->hw->mac->rx_ipc(priv->hw);
2365
2366 return 0;
2367}
2368
2295/** 2369/**
2296 * stmmac_interrupt - main ISR 2370 * stmmac_interrupt - main ISR
2297 * @irq: interrupt number. 2371 * @irq: interrupt number.
@@ -2572,6 +2646,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
2572 .ndo_stop = stmmac_release, 2646 .ndo_stop = stmmac_release,
2573 .ndo_change_mtu = stmmac_change_mtu, 2647 .ndo_change_mtu = stmmac_change_mtu,
2574 .ndo_fix_features = stmmac_fix_features, 2648 .ndo_fix_features = stmmac_fix_features,
2649 .ndo_set_features = stmmac_set_features,
2575 .ndo_set_rx_mode = stmmac_set_rx_mode, 2650 .ndo_set_rx_mode = stmmac_set_rx_mode,
2576 .ndo_tx_timeout = stmmac_tx_timeout, 2651 .ndo_tx_timeout = stmmac_tx_timeout,
2577 .ndo_do_ioctl = stmmac_ioctl, 2652 .ndo_do_ioctl = stmmac_ioctl,
@@ -2592,7 +2667,6 @@ static const struct net_device_ops stmmac_netdev_ops = {
2592 */ 2667 */
2593static int stmmac_hw_init(struct stmmac_priv *priv) 2668static int stmmac_hw_init(struct stmmac_priv *priv)
2594{ 2669{
2595 int ret;
2596 struct mac_device_info *mac; 2670 struct mac_device_info *mac;
2597 2671
2598 /* Identify the MAC HW device */ 2672 /* Identify the MAC HW device */
@@ -2649,15 +2723,11 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
2649 /* To use alternate (extended) or normal descriptor structures */ 2723 /* To use alternate (extended) or normal descriptor structures */
2650 stmmac_selec_desc_mode(priv); 2724 stmmac_selec_desc_mode(priv);
2651 2725
2652 ret = priv->hw->mac->rx_ipc(priv->hw); 2726 if (priv->plat->rx_coe) {
2653 if (!ret) { 2727 priv->hw->rx_csum = priv->plat->rx_coe;
2654 pr_warn(" RX IPC Checksum Offload not configured.\n");
2655 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2656 }
2657
2658 if (priv->plat->rx_coe)
2659 pr_info(" RX Checksum Offload Engine supported (type %d)\n", 2728 pr_info(" RX Checksum Offload Engine supported (type %d)\n",
2660 priv->plat->rx_coe); 2729 priv->plat->rx_coe);
2730 }
2661 if (priv->plat->tx_coe) 2731 if (priv->plat->tx_coe)
2662 pr_info(" TX Checksum insertion supported\n"); 2732 pr_info(" TX Checksum insertion supported\n");
2663 2733
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index b7ad3565566c..c5ee79d8a8c5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -206,6 +206,7 @@ void stmmac_ptp_unregister(struct stmmac_priv *priv)
206{ 206{
207 if (priv->ptp_clock) { 207 if (priv->ptp_clock) {
208 ptp_clock_unregister(priv->ptp_clock); 208 ptp_clock_unregister(priv->ptp_clock);
209 priv->ptp_clock = NULL;
209 pr_debug("Removed PTP HW clock successfully on %s\n", 210 pr_debug("Removed PTP HW clock successfully on %s\n",
210 priv->dev->name); 211 priv->dev->name);
211 } 212 }
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
index 3dbc047622fa..4535df37c227 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
@@ -25,8 +25,6 @@
25#ifndef __STMMAC_PTP_H__ 25#ifndef __STMMAC_PTP_H__
26#define __STMMAC_PTP_H__ 26#define __STMMAC_PTP_H__
27 27
28#define STMMAC_SYSCLOCK 62500000
29
30/* IEEE 1588 PTP register offsets */ 28/* IEEE 1588 PTP register offsets */
31#define PTP_TCR 0x0700 /* Timestamp Control Reg */ 29#define PTP_TCR 0x0700 /* Timestamp Control Reg */
32#define PTP_SSIR 0x0704 /* Sub-Second Increment Reg */ 30#define PTP_SSIR 0x0704 /* Sub-Second Increment Reg */
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 23c89ab5a6ad..f67539650c38 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -350,14 +350,17 @@ static int vnet_walk_rx_one(struct vnet_port *port,
350 if (IS_ERR(desc)) 350 if (IS_ERR(desc))
351 return PTR_ERR(desc); 351 return PTR_ERR(desc);
352 352
353 if (desc->hdr.state != VIO_DESC_READY)
354 return 1;
355
356 rmb();
357
353 viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n", 358 viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n",
354 desc->hdr.state, desc->hdr.ack, 359 desc->hdr.state, desc->hdr.ack,
355 desc->size, desc->ncookies, 360 desc->size, desc->ncookies,
356 desc->cookies[0].cookie_addr, 361 desc->cookies[0].cookie_addr,
357 desc->cookies[0].cookie_size); 362 desc->cookies[0].cookie_size);
358 363
359 if (desc->hdr.state != VIO_DESC_READY)
360 return 1;
361 err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies); 364 err = vnet_rx_one(port, desc->size, desc->cookies, desc->ncookies);
362 if (err == -ECONNRESET) 365 if (err == -ECONNRESET)
363 return err; 366 return err;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 999fb72688d2..e2a00287f8eb 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -699,6 +699,28 @@ static void cpsw_rx_handler(void *token, int len, int status)
699 cpsw_dual_emac_src_port_detect(status, priv, ndev, skb); 699 cpsw_dual_emac_src_port_detect(status, priv, ndev, skb);
700 700
701 if (unlikely(status < 0) || unlikely(!netif_running(ndev))) { 701 if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
702 bool ndev_status = false;
703 struct cpsw_slave *slave = priv->slaves;
704 int n;
705
706 if (priv->data.dual_emac) {
707 /* In dual emac mode check for all interfaces */
708 for (n = priv->data.slaves; n; n--, slave++)
709 if (netif_running(slave->ndev))
710 ndev_status = true;
711 }
712
713 if (ndev_status && (status >= 0)) {
714 /* The packet received is for the interface which
715 * is already down and the other interface is up
716 * and running, intead of freeing which results
717 * in reducing of the number of rx descriptor in
718 * DMA engine, requeue skb back to cpdma.
719 */
720 new_skb = skb;
721 goto requeue;
722 }
723
702 /* the interface is going down, skbs are purged */ 724 /* the interface is going down, skbs are purged */
703 dev_kfree_skb_any(skb); 725 dev_kfree_skb_any(skb);
704 return; 726 return;
@@ -717,6 +739,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
717 new_skb = skb; 739 new_skb = skb;
718 } 740 }
719 741
742requeue:
720 ret = cpdma_chan_submit(priv->rxch, new_skb, new_skb->data, 743 ret = cpdma_chan_submit(priv->rxch, new_skb, new_skb->data,
721 skb_tailroom(new_skb), 0); 744 skb_tailroom(new_skb), 0);
722 if (WARN_ON(ret < 0)) 745 if (WARN_ON(ret < 0))
@@ -2311,10 +2334,19 @@ static int cpsw_suspend(struct device *dev)
2311 struct net_device *ndev = platform_get_drvdata(pdev); 2334 struct net_device *ndev = platform_get_drvdata(pdev);
2312 struct cpsw_priv *priv = netdev_priv(ndev); 2335 struct cpsw_priv *priv = netdev_priv(ndev);
2313 2336
2314 if (netif_running(ndev)) 2337 if (priv->data.dual_emac) {
2315 cpsw_ndo_stop(ndev); 2338 int i;
2316 2339
2317 for_each_slave(priv, soft_reset_slave); 2340 for (i = 0; i < priv->data.slaves; i++) {
2341 if (netif_running(priv->slaves[i].ndev))
2342 cpsw_ndo_stop(priv->slaves[i].ndev);
2343 soft_reset_slave(priv->slaves + i);
2344 }
2345 } else {
2346 if (netif_running(ndev))
2347 cpsw_ndo_stop(ndev);
2348 for_each_slave(priv, soft_reset_slave);
2349 }
2318 2350
2319 pm_runtime_put_sync(&pdev->dev); 2351 pm_runtime_put_sync(&pdev->dev);
2320 2352
@@ -2328,14 +2360,24 @@ static int cpsw_resume(struct device *dev)
2328{ 2360{
2329 struct platform_device *pdev = to_platform_device(dev); 2361 struct platform_device *pdev = to_platform_device(dev);
2330 struct net_device *ndev = platform_get_drvdata(pdev); 2362 struct net_device *ndev = platform_get_drvdata(pdev);
2363 struct cpsw_priv *priv = netdev_priv(ndev);
2331 2364
2332 pm_runtime_get_sync(&pdev->dev); 2365 pm_runtime_get_sync(&pdev->dev);
2333 2366
2334 /* Select default pin state */ 2367 /* Select default pin state */
2335 pinctrl_pm_select_default_state(&pdev->dev); 2368 pinctrl_pm_select_default_state(&pdev->dev);
2336 2369
2337 if (netif_running(ndev)) 2370 if (priv->data.dual_emac) {
2338 cpsw_ndo_open(ndev); 2371 int i;
2372
2373 for (i = 0; i < priv->data.slaves; i++) {
2374 if (netif_running(priv->slaves[i].ndev))
2375 cpsw_ndo_open(priv->slaves[i].ndev);
2376 }
2377 } else {
2378 if (netif_running(ndev))
2379 cpsw_ndo_open(ndev);
2380 }
2339 return 0; 2381 return 0;
2340} 2382}
2341 2383
diff --git a/drivers/net/fddi/skfp/h/skfbi.h b/drivers/net/fddi/skfp/h/skfbi.h
index c1ba26c06d73..3de2f0d15fe2 100644
--- a/drivers/net/fddi/skfp/h/skfbi.h
+++ b/drivers/net/fddi/skfp/h/skfbi.h
@@ -147,11 +147,6 @@
147#define PCI_MEM64BIT (2<<1) /* Base addr anywhere in 64 Bit range */ 147#define PCI_MEM64BIT (2<<1) /* Base addr anywhere in 64 Bit range */
148#define PCI_MEMSPACE 0x00000001L /* Bit 0: Memory Space Indic. */ 148#define PCI_MEMSPACE 0x00000001L /* Bit 0: Memory Space Indic. */
149 149
150/* PCI_BASE_2ND 32 bit 2nd Base address */
151#define PCI_IOBASE 0xffffff00L /* Bit 31..8: I/O Base address */
152#define PCI_IOSIZE 0x000000fcL /* Bit 7..2: I/O Size Requirements */
153#define PCI_IOSPACE 0x00000001L /* Bit 0: I/O Space Indicator */
154
155/* PCI_SUB_VID 16 bit Subsystem Vendor ID */ 150/* PCI_SUB_VID 16 bit Subsystem Vendor ID */
156/* PCI_SUB_ID 16 bit Subsystem ID */ 151/* PCI_SUB_ID 16 bit Subsystem ID */
157 152
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 60e4ca01ccbb..726edabff26b 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -36,6 +36,7 @@
36#include <linux/netpoll.h> 36#include <linux/netpoll.h>
37 37
38#define MACVLAN_HASH_SIZE (1 << BITS_PER_BYTE) 38#define MACVLAN_HASH_SIZE (1 << BITS_PER_BYTE)
39#define MACVLAN_BC_QUEUE_LEN 1000
39 40
40struct macvlan_port { 41struct macvlan_port {
41 struct net_device *dev; 42 struct net_device *dev;
@@ -248,7 +249,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port,
248 goto err; 249 goto err;
249 250
250 spin_lock(&port->bc_queue.lock); 251 spin_lock(&port->bc_queue.lock);
251 if (skb_queue_len(&port->bc_queue) < skb->dev->tx_queue_len) { 252 if (skb_queue_len(&port->bc_queue) < MACVLAN_BC_QUEUE_LEN) {
252 __skb_queue_tail(&port->bc_queue, nskb); 253 __skb_queue_tail(&port->bc_queue, nskb);
253 err = 0; 254 err = 0;
254 } 255 }
@@ -739,7 +740,10 @@ static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
739 struct macvlan_dev *vlan = netdev_priv(dev); 740 struct macvlan_dev *vlan = netdev_priv(dev);
740 int err = -EINVAL; 741 int err = -EINVAL;
741 742
742 if (!vlan->port->passthru) 743 /* Support unicast filter only on passthru devices.
744 * Multicast filter should be allowed on all devices.
745 */
746 if (!vlan->port->passthru && is_unicast_ether_addr(addr))
743 return -EOPNOTSUPP; 747 return -EOPNOTSUPP;
744 748
745 if (flags & NLM_F_REPLACE) 749 if (flags & NLM_F_REPLACE)
@@ -760,7 +764,10 @@ static int macvlan_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
760 struct macvlan_dev *vlan = netdev_priv(dev); 764 struct macvlan_dev *vlan = netdev_priv(dev);
761 int err = -EINVAL; 765 int err = -EINVAL;
762 766
763 if (!vlan->port->passthru) 767 /* Support unicast filter only on passthru devices.
768 * Multicast filter should be allowed on all devices.
769 */
770 if (!vlan->port->passthru && is_unicast_ether_addr(addr))
764 return -EOPNOTSUPP; 771 return -EOPNOTSUPP;
765 772
766 if (is_unicast_ether_addr(addr)) 773 if (is_unicast_ether_addr(addr))
@@ -800,6 +807,7 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev,
800 features, 807 features,
801 mask); 808 mask);
802 features |= ALWAYS_ON_FEATURES; 809 features |= ALWAYS_ON_FEATURES;
810 features &= ~NETIF_F_NETNS_LOCAL;
803 811
804 return features; 812 return features;
805} 813}
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 526b94cea569..fdce1ea28790 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -157,6 +157,23 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev)
157 return bcm7xxx_28nm_afe_config_init(phydev); 157 return bcm7xxx_28nm_afe_config_init(phydev);
158} 158}
159 159
160static int bcm7xxx_28nm_resume(struct phy_device *phydev)
161{
162 int ret;
163
164 /* Re-apply workarounds coming out suspend/resume */
165 ret = bcm7xxx_28nm_config_init(phydev);
166 if (ret)
167 return ret;
168
169 /* 28nm Gigabit PHYs come out of reset without any half-duplex
170 * or "hub" compliant advertised mode, fix that. This does not
171 * cause any problems with the PHY library since genphy_config_aneg()
172 * gracefully handles auto-negotiated and forced modes.
173 */
174 return genphy_config_aneg(phydev);
175}
176
160static int phy_set_clr_bits(struct phy_device *dev, int location, 177static int phy_set_clr_bits(struct phy_device *dev, int location,
161 int set_mask, int clr_mask) 178 int set_mask, int clr_mask)
162{ 179{
@@ -212,7 +229,7 @@ static int bcm7xxx_config_init(struct phy_device *phydev)
212} 229}
213 230
214/* Workaround for putting the PHY in IDDQ mode, required 231/* Workaround for putting the PHY in IDDQ mode, required
215 * for all BCM7XXX PHYs 232 * for all BCM7XXX 40nm and 65nm PHYs
216 */ 233 */
217static int bcm7xxx_suspend(struct phy_device *phydev) 234static int bcm7xxx_suspend(struct phy_device *phydev)
218{ 235{
@@ -257,8 +274,7 @@ static struct phy_driver bcm7xxx_driver[] = {
257 .config_init = bcm7xxx_28nm_afe_config_init, 274 .config_init = bcm7xxx_28nm_afe_config_init,
258 .config_aneg = genphy_config_aneg, 275 .config_aneg = genphy_config_aneg,
259 .read_status = genphy_read_status, 276 .read_status = genphy_read_status,
260 .suspend = bcm7xxx_suspend, 277 .resume = bcm7xxx_28nm_resume,
261 .resume = bcm7xxx_28nm_afe_config_init,
262 .driver = { .owner = THIS_MODULE }, 278 .driver = { .owner = THIS_MODULE },
263}, { 279}, {
264 .phy_id = PHY_ID_BCM7439, 280 .phy_id = PHY_ID_BCM7439,
@@ -270,8 +286,7 @@ static struct phy_driver bcm7xxx_driver[] = {
270 .config_init = bcm7xxx_28nm_afe_config_init, 286 .config_init = bcm7xxx_28nm_afe_config_init,
271 .config_aneg = genphy_config_aneg, 287 .config_aneg = genphy_config_aneg,
272 .read_status = genphy_read_status, 288 .read_status = genphy_read_status,
273 .suspend = bcm7xxx_suspend, 289 .resume = bcm7xxx_28nm_resume,
274 .resume = bcm7xxx_28nm_afe_config_init,
275 .driver = { .owner = THIS_MODULE }, 290 .driver = { .owner = THIS_MODULE },
276}, { 291}, {
277 .phy_id = PHY_ID_BCM7445, 292 .phy_id = PHY_ID_BCM7445,
@@ -283,21 +298,7 @@ static struct phy_driver bcm7xxx_driver[] = {
283 .config_init = bcm7xxx_28nm_config_init, 298 .config_init = bcm7xxx_28nm_config_init,
284 .config_aneg = genphy_config_aneg, 299 .config_aneg = genphy_config_aneg,
285 .read_status = genphy_read_status, 300 .read_status = genphy_read_status,
286 .suspend = bcm7xxx_suspend, 301 .resume = bcm7xxx_28nm_afe_config_init,
287 .resume = bcm7xxx_28nm_config_init,
288 .driver = { .owner = THIS_MODULE },
289}, {
290 .name = "Broadcom BCM7XXX 28nm",
291 .phy_id = PHY_ID_BCM7XXX_28,
292 .phy_id_mask = PHY_BCM_OUI_MASK,
293 .features = PHY_GBIT_FEATURES |
294 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
295 .flags = PHY_IS_INTERNAL,
296 .config_init = bcm7xxx_28nm_config_init,
297 .config_aneg = genphy_config_aneg,
298 .read_status = genphy_read_status,
299 .suspend = bcm7xxx_suspend,
300 .resume = bcm7xxx_28nm_config_init,
301 .driver = { .owner = THIS_MODULE }, 302 .driver = { .owner = THIS_MODULE },
302}, { 303}, {
303 .phy_id = PHY_BCM_OUI_4, 304 .phy_id = PHY_BCM_OUI_4,
@@ -331,7 +332,6 @@ static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
331 { PHY_ID_BCM7366, 0xfffffff0, }, 332 { PHY_ID_BCM7366, 0xfffffff0, },
332 { PHY_ID_BCM7439, 0xfffffff0, }, 333 { PHY_ID_BCM7439, 0xfffffff0, },
333 { PHY_ID_BCM7445, 0xfffffff0, }, 334 { PHY_ID_BCM7445, 0xfffffff0, },
334 { PHY_ID_BCM7XXX_28, 0xfffffc00 },
335 { PHY_BCM_OUI_4, 0xffff0000 }, 335 { PHY_BCM_OUI_4, 0xffff0000 },
336 { PHY_BCM_OUI_5, 0xffffff00 }, 336 { PHY_BCM_OUI_5, 0xffffff00 },
337 { } 337 { }
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index fd0ea7c50ee6..011dbda2b2f1 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -592,8 +592,7 @@ static struct phy_driver ksphy_driver[] = {
592 .phy_id = PHY_ID_KSZ9031, 592 .phy_id = PHY_ID_KSZ9031,
593 .phy_id_mask = 0x00fffff0, 593 .phy_id_mask = 0x00fffff0,
594 .name = "Micrel KSZ9031 Gigabit PHY", 594 .name = "Micrel KSZ9031 Gigabit PHY",
595 .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause 595 .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause),
596 | SUPPORTED_Asym_Pause),
597 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, 596 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
598 .config_init = ksz9031_config_init, 597 .config_init = ksz9031_config_init,
599 .config_aneg = genphy_config_aneg, 598 .config_aneg = genphy_config_aneg,
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index c94e2a27446a..a854d38c231d 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -1036,31 +1036,31 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
1036 /* First check if the EEE ability is supported */ 1036 /* First check if the EEE ability is supported */
1037 eee_cap = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE, 1037 eee_cap = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE,
1038 MDIO_MMD_PCS, phydev->addr); 1038 MDIO_MMD_PCS, phydev->addr);
1039 if (eee_cap < 0) 1039 if (eee_cap <= 0)
1040 return eee_cap; 1040 goto eee_exit_err;
1041 1041
1042 cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap); 1042 cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap);
1043 if (!cap) 1043 if (!cap)
1044 return -EPROTONOSUPPORT; 1044 goto eee_exit_err;
1045 1045
1046 /* Check which link settings negotiated and verify it in 1046 /* Check which link settings negotiated and verify it in
1047 * the EEE advertising registers. 1047 * the EEE advertising registers.
1048 */ 1048 */
1049 eee_lp = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE, 1049 eee_lp = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE,
1050 MDIO_MMD_AN, phydev->addr); 1050 MDIO_MMD_AN, phydev->addr);
1051 if (eee_lp < 0) 1051 if (eee_lp <= 0)
1052 return eee_lp; 1052 goto eee_exit_err;
1053 1053
1054 eee_adv = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV, 1054 eee_adv = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV,
1055 MDIO_MMD_AN, phydev->addr); 1055 MDIO_MMD_AN, phydev->addr);
1056 if (eee_adv < 0) 1056 if (eee_adv <= 0)
1057 return eee_adv; 1057 goto eee_exit_err;
1058 1058
1059 adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv); 1059 adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
1060 lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp); 1060 lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
1061 idx = phy_find_setting(phydev->speed, phydev->duplex); 1061 idx = phy_find_setting(phydev->speed, phydev->duplex);
1062 if (!(lp & adv & settings[idx].setting)) 1062 if (!(lp & adv & settings[idx].setting))
1063 return -EPROTONOSUPPORT; 1063 goto eee_exit_err;
1064 1064
1065 if (clk_stop_enable) { 1065 if (clk_stop_enable) {
1066 /* Configure the PHY to stop receiving xMII 1066 /* Configure the PHY to stop receiving xMII
@@ -1080,7 +1080,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
1080 1080
1081 return 0; /* EEE supported */ 1081 return 0; /* EEE supported */
1082 } 1082 }
1083 1083eee_exit_err:
1084 return -EPROTONOSUPPORT; 1084 return -EPROTONOSUPPORT;
1085} 1085}
1086EXPORT_SYMBOL(phy_init_eee); 1086EXPORT_SYMBOL(phy_init_eee);
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 180c49479c42..a4b08198fb9f 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -43,6 +43,22 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev)
43 43
44static int smsc_phy_config_init(struct phy_device *phydev) 44static int smsc_phy_config_init(struct phy_device *phydev)
45{ 45{
46 int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
47
48 if (rc < 0)
49 return rc;
50
51 /* Enable energy detect mode for this SMSC Transceivers */
52 rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
53 rc | MII_LAN83C185_EDPWRDOWN);
54 if (rc < 0)
55 return rc;
56
57 return smsc_phy_ack_interrupt(phydev);
58}
59
60static int smsc_phy_reset(struct phy_device *phydev)
61{
46 int rc = phy_read(phydev, MII_LAN83C185_SPECIAL_MODES); 62 int rc = phy_read(phydev, MII_LAN83C185_SPECIAL_MODES);
47 if (rc < 0) 63 if (rc < 0)
48 return rc; 64 return rc;
@@ -66,18 +82,7 @@ static int smsc_phy_config_init(struct phy_device *phydev)
66 rc = phy_read(phydev, MII_BMCR); 82 rc = phy_read(phydev, MII_BMCR);
67 } while (rc & BMCR_RESET); 83 } while (rc & BMCR_RESET);
68 } 84 }
69 85 return 0;
70 rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
71 if (rc < 0)
72 return rc;
73
74 /* Enable energy detect mode for this SMSC Transceivers */
75 rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
76 rc | MII_LAN83C185_EDPWRDOWN);
77 if (rc < 0)
78 return rc;
79
80 return smsc_phy_ack_interrupt (phydev);
81} 86}
82 87
83static int lan911x_config_init(struct phy_device *phydev) 88static int lan911x_config_init(struct phy_device *phydev)
@@ -142,6 +147,7 @@ static struct phy_driver smsc_phy_driver[] = {
142 .config_aneg = genphy_config_aneg, 147 .config_aneg = genphy_config_aneg,
143 .read_status = genphy_read_status, 148 .read_status = genphy_read_status,
144 .config_init = smsc_phy_config_init, 149 .config_init = smsc_phy_config_init,
150 .soft_reset = smsc_phy_reset,
145 151
146 /* IRQ related */ 152 /* IRQ related */
147 .ack_interrupt = smsc_phy_ack_interrupt, 153 .ack_interrupt = smsc_phy_ack_interrupt,
@@ -164,6 +170,7 @@ static struct phy_driver smsc_phy_driver[] = {
164 .config_aneg = genphy_config_aneg, 170 .config_aneg = genphy_config_aneg,
165 .read_status = genphy_read_status, 171 .read_status = genphy_read_status,
166 .config_init = smsc_phy_config_init, 172 .config_init = smsc_phy_config_init,
173 .soft_reset = smsc_phy_reset,
167 174
168 /* IRQ related */ 175 /* IRQ related */
169 .ack_interrupt = smsc_phy_ack_interrupt, 176 .ack_interrupt = smsc_phy_ack_interrupt,
@@ -186,6 +193,7 @@ static struct phy_driver smsc_phy_driver[] = {
186 .config_aneg = genphy_config_aneg, 193 .config_aneg = genphy_config_aneg,
187 .read_status = genphy_read_status, 194 .read_status = genphy_read_status,
188 .config_init = smsc_phy_config_init, 195 .config_init = smsc_phy_config_init,
196 .soft_reset = smsc_phy_reset,
189 197
190 /* IRQ related */ 198 /* IRQ related */
191 .ack_interrupt = smsc_phy_ack_interrupt, 199 .ack_interrupt = smsc_phy_ack_interrupt,
@@ -230,6 +238,7 @@ static struct phy_driver smsc_phy_driver[] = {
230 .config_aneg = genphy_config_aneg, 238 .config_aneg = genphy_config_aneg,
231 .read_status = lan87xx_read_status, 239 .read_status = lan87xx_read_status,
232 .config_init = smsc_phy_config_init, 240 .config_init = smsc_phy_config_init,
241 .soft_reset = smsc_phy_reset,
233 242
234 /* IRQ related */ 243 /* IRQ related */
235 .ack_interrupt = smsc_phy_ack_interrupt, 244 .ack_interrupt = smsc_phy_ack_interrupt,
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 87f710476217..74760e8143e3 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -2019,7 +2019,7 @@ static int rtl8153_enable(struct r8152 *tp)
2019 return rtl_enable(tp); 2019 return rtl_enable(tp);
2020} 2020}
2021 2021
2022static void rtl8152_disable(struct r8152 *tp) 2022static void rtl_disable(struct r8152 *tp)
2023{ 2023{
2024 u32 ocp_data; 2024 u32 ocp_data;
2025 int i; 2025 int i;
@@ -2232,6 +2232,13 @@ static inline void r8152b_enable_aldps(struct r8152 *tp)
2232 LINKENA | DIS_SDSAVE); 2232 LINKENA | DIS_SDSAVE);
2233} 2233}
2234 2234
2235static void rtl8152_disable(struct r8152 *tp)
2236{
2237 r8152b_disable_aldps(tp);
2238 rtl_disable(tp);
2239 r8152b_enable_aldps(tp);
2240}
2241
2235static void r8152b_hw_phy_cfg(struct r8152 *tp) 2242static void r8152b_hw_phy_cfg(struct r8152 *tp)
2236{ 2243{
2237 u16 data; 2244 u16 data;
@@ -2242,11 +2249,8 @@ static void r8152b_hw_phy_cfg(struct r8152 *tp)
2242 r8152_mdio_write(tp, MII_BMCR, data); 2249 r8152_mdio_write(tp, MII_BMCR, data);
2243 } 2250 }
2244 2251
2245 r8152b_disable_aldps(tp);
2246
2247 rtl_clear_bp(tp); 2252 rtl_clear_bp(tp);
2248 2253
2249 r8152b_enable_aldps(tp);
2250 set_bit(PHY_RESET, &tp->flags); 2254 set_bit(PHY_RESET, &tp->flags);
2251} 2255}
2252 2256
@@ -2255,9 +2259,6 @@ static void r8152b_exit_oob(struct r8152 *tp)
2255 u32 ocp_data; 2259 u32 ocp_data;
2256 int i; 2260 int i;
2257 2261
2258 if (test_bit(RTL8152_UNPLUG, &tp->flags))
2259 return;
2260
2261 ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); 2262 ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
2262 ocp_data &= ~RCR_ACPT_ALL; 2263 ocp_data &= ~RCR_ACPT_ALL;
2263 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data); 2264 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
@@ -2347,7 +2348,7 @@ static void r8152b_enter_oob(struct r8152 *tp)
2347 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_OOB); 2348 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1, RXFIFO_THR2_OOB);
2348 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_OOB); 2349 ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL2, RXFIFO_THR3_OOB);
2349 2350
2350 rtl8152_disable(tp); 2351 rtl_disable(tp);
2351 2352
2352 for (i = 0; i < 1000; i++) { 2353 for (i = 0; i < 1000; i++) {
2353 ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); 2354 ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
@@ -2485,9 +2486,6 @@ static void r8153_first_init(struct r8152 *tp)
2485 u32 ocp_data; 2486 u32 ocp_data;
2486 int i; 2487 int i;
2487 2488
2488 if (test_bit(RTL8152_UNPLUG, &tp->flags))
2489 return;
2490
2491 rxdy_gated_en(tp, true); 2489 rxdy_gated_en(tp, true);
2492 r8153_teredo_off(tp); 2490 r8153_teredo_off(tp);
2493 2491
@@ -2560,7 +2558,7 @@ static void r8153_enter_oob(struct r8152 *tp)
2560 ocp_data &= ~NOW_IS_OOB; 2558 ocp_data &= ~NOW_IS_OOB;
2561 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data); 2559 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
2562 2560
2563 rtl8152_disable(tp); 2561 rtl_disable(tp);
2564 2562
2565 for (i = 0; i < 1000; i++) { 2563 for (i = 0; i < 1000; i++) {
2566 ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL); 2564 ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
@@ -2624,6 +2622,13 @@ static void r8153_enable_aldps(struct r8152 *tp)
2624 ocp_reg_write(tp, OCP_POWER_CFG, data); 2622 ocp_reg_write(tp, OCP_POWER_CFG, data);
2625} 2623}
2626 2624
2625static void rtl8153_disable(struct r8152 *tp)
2626{
2627 r8153_disable_aldps(tp);
2628 rtl_disable(tp);
2629 r8153_enable_aldps(tp);
2630}
2631
2627static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex) 2632static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
2628{ 2633{
2629 u16 bmcr, anar, gbcr; 2634 u16 bmcr, anar, gbcr;
@@ -2714,6 +2719,16 @@ out:
2714 return ret; 2719 return ret;
2715} 2720}
2716 2721
2722static void rtl8152_up(struct r8152 *tp)
2723{
2724 if (test_bit(RTL8152_UNPLUG, &tp->flags))
2725 return;
2726
2727 r8152b_disable_aldps(tp);
2728 r8152b_exit_oob(tp);
2729 r8152b_enable_aldps(tp);
2730}
2731
2717static void rtl8152_down(struct r8152 *tp) 2732static void rtl8152_down(struct r8152 *tp)
2718{ 2733{
2719 if (test_bit(RTL8152_UNPLUG, &tp->flags)) { 2734 if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
@@ -2727,6 +2742,16 @@ static void rtl8152_down(struct r8152 *tp)
2727 r8152b_enable_aldps(tp); 2742 r8152b_enable_aldps(tp);
2728} 2743}
2729 2744
2745static void rtl8153_up(struct r8152 *tp)
2746{
2747 if (test_bit(RTL8152_UNPLUG, &tp->flags))
2748 return;
2749
2750 r8153_disable_aldps(tp);
2751 r8153_first_init(tp);
2752 r8153_enable_aldps(tp);
2753}
2754
2730static void rtl8153_down(struct r8152 *tp) 2755static void rtl8153_down(struct r8152 *tp)
2731{ 2756{
2732 if (test_bit(RTL8152_UNPLUG, &tp->flags)) { 2757 if (test_bit(RTL8152_UNPLUG, &tp->flags)) {
@@ -2946,6 +2971,8 @@ static void r8152b_init(struct r8152 *tp)
2946 if (test_bit(RTL8152_UNPLUG, &tp->flags)) 2971 if (test_bit(RTL8152_UNPLUG, &tp->flags))
2947 return; 2972 return;
2948 2973
2974 r8152b_disable_aldps(tp);
2975
2949 if (tp->version == RTL_VER_01) { 2976 if (tp->version == RTL_VER_01) {
2950 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE); 2977 ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE);
2951 ocp_data &= ~LED_MODE_MASK; 2978 ocp_data &= ~LED_MODE_MASK;
@@ -2984,6 +3011,7 @@ static void r8153_init(struct r8152 *tp)
2984 if (test_bit(RTL8152_UNPLUG, &tp->flags)) 3011 if (test_bit(RTL8152_UNPLUG, &tp->flags))
2985 return; 3012 return;
2986 3013
3014 r8153_disable_aldps(tp);
2987 r8153_u1u2en(tp, false); 3015 r8153_u1u2en(tp, false);
2988 3016
2989 for (i = 0; i < 500; i++) { 3017 for (i = 0; i < 500; i++) {
@@ -3392,7 +3420,7 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
3392 ops->init = r8152b_init; 3420 ops->init = r8152b_init;
3393 ops->enable = rtl8152_enable; 3421 ops->enable = rtl8152_enable;
3394 ops->disable = rtl8152_disable; 3422 ops->disable = rtl8152_disable;
3395 ops->up = r8152b_exit_oob; 3423 ops->up = rtl8152_up;
3396 ops->down = rtl8152_down; 3424 ops->down = rtl8152_down;
3397 ops->unload = rtl8152_unload; 3425 ops->unload = rtl8152_unload;
3398 ret = 0; 3426 ret = 0;
@@ -3400,8 +3428,8 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
3400 case PRODUCT_ID_RTL8153: 3428 case PRODUCT_ID_RTL8153:
3401 ops->init = r8153_init; 3429 ops->init = r8153_init;
3402 ops->enable = rtl8153_enable; 3430 ops->enable = rtl8153_enable;
3403 ops->disable = rtl8152_disable; 3431 ops->disable = rtl8153_disable;
3404 ops->up = r8153_first_init; 3432 ops->up = rtl8153_up;
3405 ops->down = rtl8153_down; 3433 ops->down = rtl8153_down;
3406 ops->unload = rtl8153_unload; 3434 ops->unload = rtl8153_unload;
3407 ret = 0; 3435 ret = 0;
@@ -3416,8 +3444,8 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
3416 case PRODUCT_ID_SAMSUNG: 3444 case PRODUCT_ID_SAMSUNG:
3417 ops->init = r8153_init; 3445 ops->init = r8153_init;
3418 ops->enable = rtl8153_enable; 3446 ops->enable = rtl8153_enable;
3419 ops->disable = rtl8152_disable; 3447 ops->disable = rtl8153_disable;
3420 ops->up = r8153_first_init; 3448 ops->up = rtl8153_up;
3421 ops->down = rtl8153_down; 3449 ops->down = rtl8153_down;
3422 ops->unload = rtl8153_unload; 3450 ops->unload = rtl8153_unload;
3423 ret = 0; 3451 ret = 0;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index d6e90c72c257..6dfcbf523936 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2056,7 +2056,6 @@ vmxnet3_set_mc(struct net_device *netdev)
2056 if (!netdev_mc_empty(netdev)) { 2056 if (!netdev_mc_empty(netdev)) {
2057 new_table = vmxnet3_copy_mc(netdev); 2057 new_table = vmxnet3_copy_mc(netdev);
2058 if (new_table) { 2058 if (new_table) {
2059 new_mode |= VMXNET3_RXM_MCAST;
2060 rxConf->mfTableLen = cpu_to_le16( 2059 rxConf->mfTableLen = cpu_to_le16(
2061 netdev_mc_count(netdev) * ETH_ALEN); 2060 netdev_mc_count(netdev) * ETH_ALEN);
2062 new_table_pa = dma_map_single( 2061 new_table_pa = dma_map_single(
@@ -2064,15 +2063,18 @@ vmxnet3_set_mc(struct net_device *netdev)
2064 new_table, 2063 new_table,
2065 rxConf->mfTableLen, 2064 rxConf->mfTableLen,
2066 PCI_DMA_TODEVICE); 2065 PCI_DMA_TODEVICE);
2066 }
2067
2068 if (new_table_pa) {
2069 new_mode |= VMXNET3_RXM_MCAST;
2067 rxConf->mfTablePA = cpu_to_le64(new_table_pa); 2070 rxConf->mfTablePA = cpu_to_le64(new_table_pa);
2068 } else { 2071 } else {
2069 netdev_info(netdev, "failed to copy mcast list" 2072 netdev_info(netdev,
2070 ", setting ALL_MULTI\n"); 2073 "failed to copy mcast list, setting ALL_MULTI\n");
2071 new_mode |= VMXNET3_RXM_ALL_MULTI; 2074 new_mode |= VMXNET3_RXM_ALL_MULTI;
2072 } 2075 }
2073 } 2076 }
2074 2077
2075
2076 if (!(new_mode & VMXNET3_RXM_MCAST)) { 2078 if (!(new_mode & VMXNET3_RXM_MCAST)) {
2077 rxConf->mfTableLen = 0; 2079 rxConf->mfTableLen = 0;
2078 rxConf->mfTablePA = 0; 2080 rxConf->mfTablePA = 0;
@@ -2091,11 +2093,10 @@ vmxnet3_set_mc(struct net_device *netdev)
2091 VMXNET3_CMD_UPDATE_MAC_FILTERS); 2093 VMXNET3_CMD_UPDATE_MAC_FILTERS);
2092 spin_unlock_irqrestore(&adapter->cmd_lock, flags); 2094 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2093 2095
2094 if (new_table) { 2096 if (new_table_pa)
2095 dma_unmap_single(&adapter->pdev->dev, new_table_pa, 2097 dma_unmap_single(&adapter->pdev->dev, new_table_pa,
2096 rxConf->mfTableLen, PCI_DMA_TODEVICE); 2098 rxConf->mfTableLen, PCI_DMA_TODEVICE);
2097 kfree(new_table); 2099 kfree(new_table);
2098 }
2099} 2100}
2100 2101
2101void 2102void
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 29ee77f2c97f..3759479f959a 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
69/* 69/*
70 * Version numbers 70 * Version numbers
71 */ 71 */
72#define VMXNET3_DRIVER_VERSION_STRING "1.2.0.0-k" 72#define VMXNET3_DRIVER_VERSION_STRING "1.2.1.0-k"
73 73
74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
75#define VMXNET3_DRIVER_VERSION_NUM 0x01020000 75#define VMXNET3_DRIVER_VERSION_NUM 0x01020100
76 76
77#if defined(CONFIG_PCI_MSI) 77#if defined(CONFIG_PCI_MSI)
78 /* RSS only makes sense if MSI-X is supported. */ 78 /* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 1fb7b37d1402..beb377b2d4b7 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1327,7 +1327,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
1327 } else if (vxlan->flags & VXLAN_F_L3MISS) { 1327 } else if (vxlan->flags & VXLAN_F_L3MISS) {
1328 union vxlan_addr ipa = { 1328 union vxlan_addr ipa = {
1329 .sin.sin_addr.s_addr = tip, 1329 .sin.sin_addr.s_addr = tip,
1330 .sa.sa_family = AF_INET, 1330 .sin.sin_family = AF_INET,
1331 }; 1331 };
1332 1332
1333 vxlan_ip_miss(dev, &ipa); 1333 vxlan_ip_miss(dev, &ipa);
@@ -1488,7 +1488,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
1488 } else if (vxlan->flags & VXLAN_F_L3MISS) { 1488 } else if (vxlan->flags & VXLAN_F_L3MISS) {
1489 union vxlan_addr ipa = { 1489 union vxlan_addr ipa = {
1490 .sin6.sin6_addr = msg->target, 1490 .sin6.sin6_addr = msg->target,
1491 .sa.sa_family = AF_INET6, 1491 .sin6.sin6_family = AF_INET6,
1492 }; 1492 };
1493 1493
1494 vxlan_ip_miss(dev, &ipa); 1494 vxlan_ip_miss(dev, &ipa);
@@ -1521,7 +1521,7 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
1521 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) { 1521 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
1522 union vxlan_addr ipa = { 1522 union vxlan_addr ipa = {
1523 .sin.sin_addr.s_addr = pip->daddr, 1523 .sin.sin_addr.s_addr = pip->daddr,
1524 .sa.sa_family = AF_INET, 1524 .sin.sin_family = AF_INET,
1525 }; 1525 };
1526 1526
1527 vxlan_ip_miss(dev, &ipa); 1527 vxlan_ip_miss(dev, &ipa);
@@ -1542,7 +1542,7 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
1542 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) { 1542 if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
1543 union vxlan_addr ipa = { 1543 union vxlan_addr ipa = {
1544 .sin6.sin6_addr = pip6->daddr, 1544 .sin6.sin6_addr = pip6->daddr,
1545 .sa.sa_family = AF_INET6, 1545 .sin6.sin6_family = AF_INET6,
1546 }; 1546 };
1547 1547
1548 vxlan_ip_miss(dev, &ipa); 1548 vxlan_ip_miss(dev, &ipa);
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 334c2ece855a..da92bfa76b7c 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -2423,8 +2423,6 @@ static void at76_delete_device(struct at76_priv *priv)
2423 2423
2424 kfree_skb(priv->rx_skb); 2424 kfree_skb(priv->rx_skb);
2425 2425
2426 usb_put_dev(priv->udev);
2427
2428 at76_dbg(DBG_PROC_ENTRY, "%s: before freeing priv/ieee80211_hw", 2426 at76_dbg(DBG_PROC_ENTRY, "%s: before freeing priv/ieee80211_hw",
2429 __func__); 2427 __func__);
2430 ieee80211_free_hw(priv->hw); 2428 ieee80211_free_hw(priv->hw);
@@ -2558,6 +2556,7 @@ static void at76_disconnect(struct usb_interface *interface)
2558 2556
2559 wiphy_info(priv->hw->wiphy, "disconnecting\n"); 2557 wiphy_info(priv->hw->wiphy, "disconnecting\n");
2560 at76_delete_device(priv); 2558 at76_delete_device(priv);
2559 usb_put_dev(priv->udev);
2561 dev_info(&interface->dev, "disconnected\n"); 2560 dev_info(&interface->dev, "disconnected\n");
2562} 2561}
2563 2562
diff --git a/drivers/net/wireless/ath/ath9k/common-beacon.c b/drivers/net/wireless/ath/ath9k/common-beacon.c
index 733be5178481..6ad44470d0f2 100644
--- a/drivers/net/wireless/ath/ath9k/common-beacon.c
+++ b/drivers/net/wireless/ath/ath9k/common-beacon.c
@@ -57,7 +57,7 @@ int ath9k_cmn_beacon_config_sta(struct ath_hw *ah,
57 struct ath9k_beacon_state *bs) 57 struct ath9k_beacon_state *bs)
58{ 58{
59 struct ath_common *common = ath9k_hw_common(ah); 59 struct ath_common *common = ath9k_hw_common(ah);
60 int dtim_intval, sleepduration; 60 int dtim_intval;
61 u64 tsf; 61 u64 tsf;
62 62
63 /* No need to configure beacon if we are not associated */ 63 /* No need to configure beacon if we are not associated */
@@ -75,7 +75,6 @@ int ath9k_cmn_beacon_config_sta(struct ath_hw *ah,
75 * last beacon we received (which may be none). 75 * last beacon we received (which may be none).
76 */ 76 */
77 dtim_intval = conf->intval * conf->dtim_period; 77 dtim_intval = conf->intval * conf->dtim_period;
78 sleepduration = ah->hw->conf.listen_interval * conf->intval;
79 78
80 /* 79 /*
81 * Pull nexttbtt forward to reflect the current 80 * Pull nexttbtt forward to reflect the current
@@ -113,7 +112,7 @@ int ath9k_cmn_beacon_config_sta(struct ath_hw *ah,
113 */ 112 */
114 113
115 bs->bs_sleepduration = TU_TO_USEC(roundup(IEEE80211_MS_TO_TU(100), 114 bs->bs_sleepduration = TU_TO_USEC(roundup(IEEE80211_MS_TO_TU(100),
116 sleepduration)); 115 conf->intval));
117 if (bs->bs_sleepduration > bs->bs_dtimperiod) 116 if (bs->bs_sleepduration > bs->bs_dtimperiod)
118 bs->bs_sleepduration = bs->bs_dtimperiod; 117 bs->bs_sleepduration = bs->bs_dtimperiod;
119 118
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index bb86eb2ffc95..f0484b1b617e 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -978,7 +978,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
978 struct ath_hw *ah = common->ah; 978 struct ath_hw *ah = common->ah;
979 struct ath_htc_rx_status *rxstatus; 979 struct ath_htc_rx_status *rxstatus;
980 struct ath_rx_status rx_stats; 980 struct ath_rx_status rx_stats;
981 bool decrypt_error; 981 bool decrypt_error = false;
982 982
983 if (skb->len < HTC_RX_FRAME_HEADER_SIZE) { 983 if (skb->len < HTC_RX_FRAME_HEADER_SIZE) {
984 ath_err(common, "Corrupted RX frame, dropping (len: %d)\n", 984 ath_err(common, "Corrupted RX frame, dropping (len: %d)\n",
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index e6ac8d2e610c..4b148bbb2bf6 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -513,7 +513,7 @@ irqreturn_t ath_isr(int irq, void *dev)
513 * touch anything. Note this can happen early 513 * touch anything. Note this can happen early
514 * on if the IRQ is shared. 514 * on if the IRQ is shared.
515 */ 515 */
516 if (test_bit(ATH_OP_INVALID, &common->op_flags)) 516 if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags))
517 return IRQ_NONE; 517 return IRQ_NONE;
518 518
519 /* shared irq, not for us */ 519 /* shared irq, not for us */
diff --git a/drivers/net/wireless/ath/ath9k/spectral.c b/drivers/net/wireless/ath/ath9k/spectral.c
index 5fe29b9f8fa2..8f68426ca653 100644
--- a/drivers/net/wireless/ath/ath9k/spectral.c
+++ b/drivers/net/wireless/ath/ath9k/spectral.c
@@ -253,7 +253,7 @@ static ssize_t write_file_spec_scan_ctl(struct file *file,
253 253
254 if (strncmp("trigger", buf, 7) == 0) { 254 if (strncmp("trigger", buf, 7) == 0) {
255 ath9k_spectral_scan_trigger(sc->hw); 255 ath9k_spectral_scan_trigger(sc->hw);
256 } else if (strncmp("background", buf, 9) == 0) { 256 } else if (strncmp("background", buf, 10) == 0) {
257 ath9k_spectral_scan_config(sc->hw, SPECTRAL_BACKGROUND); 257 ath9k_spectral_scan_config(sc->hw, SPECTRAL_BACKGROUND);
258 ath_dbg(common, CONFIG, "spectral scan: background mode enabled\n"); 258 ath_dbg(common, CONFIG, "spectral scan: background mode enabled\n");
259 } else if (strncmp("chanscan", buf, 8) == 0) { 259 } else if (strncmp("chanscan", buf, 8) == 0) {
diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig
index b8e2561ea645..fe3dc126b149 100644
--- a/drivers/net/wireless/brcm80211/Kconfig
+++ b/drivers/net/wireless/brcm80211/Kconfig
@@ -27,10 +27,17 @@ config BRCMFMAC
27 one of the bus interface support. If you choose to build a module, 27 one of the bus interface support. If you choose to build a module,
28 it'll be called brcmfmac.ko. 28 it'll be called brcmfmac.ko.
29 29
30config BRCMFMAC_PROTO_BCDC
31 bool
32
33config BRCMFMAC_PROTO_MSGBUF
34 bool
35
30config BRCMFMAC_SDIO 36config BRCMFMAC_SDIO
31 bool "SDIO bus interface support for FullMAC driver" 37 bool "SDIO bus interface support for FullMAC driver"
32 depends on (MMC = y || MMC = BRCMFMAC) 38 depends on (MMC = y || MMC = BRCMFMAC)
33 depends on BRCMFMAC 39 depends on BRCMFMAC
40 select BRCMFMAC_PROTO_BCDC
34 select FW_LOADER 41 select FW_LOADER
35 default y 42 default y
36 ---help--- 43 ---help---
@@ -42,6 +49,7 @@ config BRCMFMAC_USB
42 bool "USB bus interface support for FullMAC driver" 49 bool "USB bus interface support for FullMAC driver"
43 depends on (USB = y || USB = BRCMFMAC) 50 depends on (USB = y || USB = BRCMFMAC)
44 depends on BRCMFMAC 51 depends on BRCMFMAC
52 select BRCMFMAC_PROTO_BCDC
45 select FW_LOADER 53 select FW_LOADER
46 ---help--- 54 ---help---
47 This option enables the USB bus interface support for Broadcom 55 This option enables the USB bus interface support for Broadcom
@@ -52,6 +60,8 @@ config BRCMFMAC_PCIE
52 bool "PCIE bus interface support for FullMAC driver" 60 bool "PCIE bus interface support for FullMAC driver"
53 depends on BRCMFMAC 61 depends on BRCMFMAC
54 depends on PCI 62 depends on PCI
63 depends on HAS_DMA
64 select BRCMFMAC_PROTO_MSGBUF
55 select FW_LOADER 65 select FW_LOADER
56 ---help--- 66 ---help---
57 This option enables the PCIE bus interface support for Broadcom 67 This option enables the PCIE bus interface support for Broadcom
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
index c35adf4bc70b..90a977fe9a64 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
@@ -30,16 +30,18 @@ brcmfmac-objs += \
30 fwsignal.o \ 30 fwsignal.o \
31 p2p.o \ 31 p2p.o \
32 proto.o \ 32 proto.o \
33 bcdc.o \
34 commonring.o \
35 flowring.o \
36 msgbuf.o \
37 dhd_common.o \ 33 dhd_common.o \
38 dhd_linux.o \ 34 dhd_linux.o \
39 firmware.o \ 35 firmware.o \
40 feature.o \ 36 feature.o \
41 btcoex.o \ 37 btcoex.o \
42 vendor.o 38 vendor.o
39brcmfmac-$(CONFIG_BRCMFMAC_PROTO_BCDC) += \
40 bcdc.o
41brcmfmac-$(CONFIG_BRCMFMAC_PROTO_MSGBUF) += \
42 commonring.o \
43 flowring.o \
44 msgbuf.o
43brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \ 45brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
44 dhd_sdio.o \ 46 dhd_sdio.o \
45 bcmsdh.o 47 bcmsdh.o
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcdc.h b/drivers/net/wireless/brcm80211/brcmfmac/bcdc.h
index 17e8c039ff32..6003179c0ceb 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcdc.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcdc.h
@@ -16,9 +16,12 @@
16#ifndef BRCMFMAC_BCDC_H 16#ifndef BRCMFMAC_BCDC_H
17#define BRCMFMAC_BCDC_H 17#define BRCMFMAC_BCDC_H
18 18
19 19#ifdef CONFIG_BRCMFMAC_PROTO_BCDC
20int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr); 20int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr);
21void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr); 21void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr);
22 22#else
23static inline int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr) { return 0; }
24static inline void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr) {}
25#endif
23 26
24#endif /* BRCMFMAC_BCDC_H */ 27#endif /* BRCMFMAC_BCDC_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
index 4f1daabc551b..44fc85f68f7a 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
@@ -185,7 +185,13 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
185 ifevent->action, ifevent->ifidx, ifevent->bssidx, 185 ifevent->action, ifevent->ifidx, ifevent->bssidx,
186 ifevent->flags, ifevent->role); 186 ifevent->flags, ifevent->role);
187 187
188 if (ifevent->flags & BRCMF_E_IF_FLAG_NOIF) { 188 /* The P2P Device interface event must not be ignored
189 * contrary to what firmware tells us. The only way to
190 * distinguish the P2P Device is by looking at the ifidx
191 * and bssidx received.
192 */
193 if (!(ifevent->ifidx == 0 && ifevent->bssidx == 1) &&
194 (ifevent->flags & BRCMF_E_IF_FLAG_NOIF)) {
189 brcmf_dbg(EVENT, "event can be ignored\n"); 195 brcmf_dbg(EVENT, "event can be ignored\n");
190 return; 196 return;
191 } 197 }
@@ -210,12 +216,12 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
210 return; 216 return;
211 } 217 }
212 218
213 if (ifevent->action == BRCMF_E_IF_CHANGE) 219 if (ifp && ifevent->action == BRCMF_E_IF_CHANGE)
214 brcmf_fws_reset_interface(ifp); 220 brcmf_fws_reset_interface(ifp);
215 221
216 err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data); 222 err = brcmf_fweh_call_event_handler(ifp, emsg->event_code, emsg, data);
217 223
218 if (ifevent->action == BRCMF_E_IF_DEL) { 224 if (ifp && ifevent->action == BRCMF_E_IF_DEL) {
219 brcmf_fws_del_interface(ifp); 225 brcmf_fws_del_interface(ifp);
220 brcmf_del_if(drvr, ifevent->bssidx); 226 brcmf_del_if(drvr, ifevent->bssidx);
221 } 227 }
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
index dd20b1862d44..cbf033f59109 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.h
@@ -172,6 +172,8 @@ enum brcmf_fweh_event_code {
172#define BRCMF_E_IF_ROLE_STA 0 172#define BRCMF_E_IF_ROLE_STA 0
173#define BRCMF_E_IF_ROLE_AP 1 173#define BRCMF_E_IF_ROLE_AP 1
174#define BRCMF_E_IF_ROLE_WDS 2 174#define BRCMF_E_IF_ROLE_WDS 2
175#define BRCMF_E_IF_ROLE_P2P_GO 3
176#define BRCMF_E_IF_ROLE_P2P_CLIENT 4
175 177
176/** 178/**
177 * definitions for event packet validation. 179 * definitions for event packet validation.
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h
index f901ae52bf2b..77a51b8c1e12 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h
@@ -15,6 +15,7 @@
15#ifndef BRCMFMAC_MSGBUF_H 15#ifndef BRCMFMAC_MSGBUF_H
16#define BRCMFMAC_MSGBUF_H 16#define BRCMFMAC_MSGBUF_H
17 17
18#ifdef CONFIG_BRCMFMAC_PROTO_MSGBUF
18 19
19#define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM 20 20#define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM 20
20#define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM 256 21#define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM 256
@@ -32,9 +33,15 @@
32 33
33 34
34int brcmf_proto_msgbuf_rx_trigger(struct device *dev); 35int brcmf_proto_msgbuf_rx_trigger(struct device *dev);
36void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid);
35int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr); 37int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr);
36void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr); 38void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr);
37void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid); 39#else
38 40static inline int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
41{
42 return 0;
43}
44static inline void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr) {}
45#endif
39 46
40#endif /* BRCMFMAC_MSGBUF_H */ 47#endif /* BRCMFMAC_MSGBUF_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index 02fe706fc9ec..16a246bfc343 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -497,8 +497,11 @@ brcmf_configure_arp_offload(struct brcmf_if *ifp, bool enable)
497static void 497static void
498brcmf_cfg80211_update_proto_addr_mode(struct wireless_dev *wdev) 498brcmf_cfg80211_update_proto_addr_mode(struct wireless_dev *wdev)
499{ 499{
500 struct net_device *ndev = wdev->netdev; 500 struct brcmf_cfg80211_vif *vif;
501 struct brcmf_if *ifp = netdev_priv(ndev); 501 struct brcmf_if *ifp;
502
503 vif = container_of(wdev, struct brcmf_cfg80211_vif, wdev);
504 ifp = vif->ifp;
502 505
503 if ((wdev->iftype == NL80211_IFTYPE_ADHOC) || 506 if ((wdev->iftype == NL80211_IFTYPE_ADHOC) ||
504 (wdev->iftype == NL80211_IFTYPE_AP) || 507 (wdev->iftype == NL80211_IFTYPE_AP) ||
@@ -4918,7 +4921,7 @@ static void brcmf_count_20mhz_channels(struct brcmf_cfg80211_info *cfg,
4918 struct brcmu_chan ch; 4921 struct brcmu_chan ch;
4919 int i; 4922 int i;
4920 4923
4921 for (i = 0; i <= total; i++) { 4924 for (i = 0; i < total; i++) {
4922 ch.chspec = (u16)le32_to_cpu(chlist->element[i]); 4925 ch.chspec = (u16)le32_to_cpu(chlist->element[i]);
4923 cfg->d11inf.decchspec(&ch); 4926 cfg->d11inf.decchspec(&ch);
4924 4927
@@ -5143,6 +5146,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg)
5143 5146
5144 ch.band = BRCMU_CHAN_BAND_2G; 5147 ch.band = BRCMU_CHAN_BAND_2G;
5145 ch.bw = BRCMU_CHAN_BW_40; 5148 ch.bw = BRCMU_CHAN_BW_40;
5149 ch.sb = BRCMU_CHAN_SB_NONE;
5146 ch.chnum = 0; 5150 ch.chnum = 0;
5147 cfg->d11inf.encchspec(&ch); 5151 cfg->d11inf.encchspec(&ch);
5148 5152
@@ -5176,6 +5180,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg)
5176 5180
5177 brcmf_update_bw40_channel_flag(&band->channels[j], &ch); 5181 brcmf_update_bw40_channel_flag(&band->channels[j], &ch);
5178 } 5182 }
5183 kfree(pbuf);
5179 } 5184 }
5180 return err; 5185 return err;
5181} 5186}
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 6451d2b6abcf..824f5e287783 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -51,7 +51,6 @@ config IWLWIFI_LEDS
51 51
52config IWLDVM 52config IWLDVM
53 tristate "Intel Wireless WiFi DVM Firmware support" 53 tristate "Intel Wireless WiFi DVM Firmware support"
54 depends on m
55 default IWLWIFI 54 default IWLWIFI
56 help 55 help
57 This is the driver that supports the DVM firmware which is 56 This is the driver that supports the DVM firmware which is
@@ -60,7 +59,6 @@ config IWLDVM
60 59
61config IWLMVM 60config IWLMVM
62 tristate "Intel Wireless WiFi MVM Firmware support" 61 tristate "Intel Wireless WiFi MVM Firmware support"
63 depends on m
64 help 62 help
65 This is the driver that supports the MVM firmware which is 63 This is the driver that supports the MVM firmware which is
66 currently only available for 7260 and 3160 devices. 64 currently only available for 7260 and 3160 devices.
diff --git a/drivers/net/wireless/iwlwifi/dvm/power.c b/drivers/net/wireless/iwlwifi/dvm/power.c
index 760c45c34ef3..1513dbc79c14 100644
--- a/drivers/net/wireless/iwlwifi/dvm/power.c
+++ b/drivers/net/wireless/iwlwifi/dvm/power.c
@@ -40,7 +40,7 @@
40#include "commands.h" 40#include "commands.h"
41#include "power.h" 41#include "power.h"
42 42
43static bool force_cam; 43static bool force_cam = true;
44module_param(force_cam, bool, 0644); 44module_param(force_cam, bool, 0644);
45MODULE_PARM_DESC(force_cam, "force continuously aware mode (no power saving at all)"); 45MODULE_PARM_DESC(force_cam, "force continuously aware mode (no power saving at all)");
46 46
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
index 6dc5dd3ced44..ed50de6362ed 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -1068,6 +1068,13 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1068 /* recalculate basic rates */ 1068 /* recalculate basic rates */
1069 iwl_calc_basic_rates(priv, ctx); 1069 iwl_calc_basic_rates(priv, ctx);
1070 1070
1071 /*
1072 * force CTS-to-self frames protection if RTS-CTS is not preferred
1073 * one aggregation protection method
1074 */
1075 if (!priv->hw_params.use_rts_for_aggregation)
1076 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
1077
1071 if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) || 1078 if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
1072 !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK)) 1079 !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
1073 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; 1080 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
@@ -1473,6 +1480,11 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
1473 else 1480 else
1474 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK; 1481 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
1475 1482
1483 if (bss_conf->use_cts_prot)
1484 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
1485 else
1486 ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
1487
1476 memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN); 1488 memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
1477 1489
1478 if (vif->type == NL80211_IFTYPE_AP || 1490 if (vif->type == NL80211_IFTYPE_AP ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 48730064da73..d53adc245497 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -67,8 +67,8 @@
67#include "iwl-agn-hw.h" 67#include "iwl-agn-hw.h"
68 68
69/* Highest firmware API version supported */ 69/* Highest firmware API version supported */
70#define IWL7260_UCODE_API_MAX 9 70#define IWL7260_UCODE_API_MAX 10
71#define IWL3160_UCODE_API_MAX 9 71#define IWL3160_UCODE_API_MAX 10
72 72
73/* Oldest version we won't warn about */ 73/* Oldest version we won't warn about */
74#define IWL7260_UCODE_API_OK 9 74#define IWL7260_UCODE_API_OK 9
@@ -83,6 +83,8 @@
83#define IWL7260_TX_POWER_VERSION 0xffff /* meaningless */ 83#define IWL7260_TX_POWER_VERSION 0xffff /* meaningless */
84#define IWL3160_NVM_VERSION 0x709 84#define IWL3160_NVM_VERSION 0x709
85#define IWL3160_TX_POWER_VERSION 0xffff /* meaningless */ 85#define IWL3160_TX_POWER_VERSION 0xffff /* meaningless */
86#define IWL3165_NVM_VERSION 0x709
87#define IWL3165_TX_POWER_VERSION 0xffff /* meaningless */
86#define IWL7265_NVM_VERSION 0x0a1d 88#define IWL7265_NVM_VERSION 0x0a1d
87#define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */ 89#define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */
88 90
@@ -92,6 +94,9 @@
92#define IWL3160_FW_PRE "iwlwifi-3160-" 94#define IWL3160_FW_PRE "iwlwifi-3160-"
93#define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode" 95#define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode"
94 96
97#define IWL3165_FW_PRE "iwlwifi-3165-"
98#define IWL3165_MODULE_FIRMWARE(api) IWL3165_FW_PRE __stringify(api) ".ucode"
99
95#define IWL7265_FW_PRE "iwlwifi-7265-" 100#define IWL7265_FW_PRE "iwlwifi-7265-"
96#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" 101#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
97 102
@@ -213,6 +218,16 @@ static const struct iwl_pwr_tx_backoff iwl7265_pwr_tx_backoffs[] = {
213 {0}, 218 {0},
214}; 219};
215 220
221const struct iwl_cfg iwl3165_2ac_cfg = {
222 .name = "Intel(R) Dual Band Wireless AC 3165",
223 .fw_name_pre = IWL3165_FW_PRE,
224 IWL_DEVICE_7000,
225 .ht_params = &iwl7000_ht_params,
226 .nvm_ver = IWL3165_NVM_VERSION,
227 .nvm_calib_ver = IWL3165_TX_POWER_VERSION,
228 .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
229};
230
216const struct iwl_cfg iwl7265_2ac_cfg = { 231const struct iwl_cfg iwl7265_2ac_cfg = {
217 .name = "Intel(R) Dual Band Wireless AC 7265", 232 .name = "Intel(R) Dual Band Wireless AC 7265",
218 .fw_name_pre = IWL7265_FW_PRE, 233 .fw_name_pre = IWL7265_FW_PRE,
@@ -245,4 +260,5 @@ const struct iwl_cfg iwl7265_n_cfg = {
245 260
246MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); 261MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
247MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); 262MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
263MODULE_FIRMWARE(IWL3165_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
248MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); 264MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c
index 44b19e015102..e93c6972290b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-8000.c
@@ -67,7 +67,7 @@
67#include "iwl-agn-hw.h" 67#include "iwl-agn-hw.h"
68 68
69/* Highest firmware API version supported */ 69/* Highest firmware API version supported */
70#define IWL8000_UCODE_API_MAX 9 70#define IWL8000_UCODE_API_MAX 10
71 71
72/* Oldest version we won't warn about */ 72/* Oldest version we won't warn about */
73#define IWL8000_UCODE_API_OK 8 73#define IWL8000_UCODE_API_OK 8
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 8da596db9abe..3d7cc37420ae 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -120,6 +120,8 @@ enum iwl_led_mode {
120#define IWL_LONG_WD_TIMEOUT 10000 120#define IWL_LONG_WD_TIMEOUT 10000
121#define IWL_MAX_WD_TIMEOUT 120000 121#define IWL_MAX_WD_TIMEOUT 120000
122 122
123#define IWL_DEFAULT_MAX_TX_POWER 22
124
123/* Antenna presence definitions */ 125/* Antenna presence definitions */
124#define ANT_NONE 0x0 126#define ANT_NONE 0x0
125#define ANT_A BIT(0) 127#define ANT_A BIT(0)
@@ -335,6 +337,7 @@ extern const struct iwl_cfg iwl7260_n_cfg;
335extern const struct iwl_cfg iwl3160_2ac_cfg; 337extern const struct iwl_cfg iwl3160_2ac_cfg;
336extern const struct iwl_cfg iwl3160_2n_cfg; 338extern const struct iwl_cfg iwl3160_2n_cfg;
337extern const struct iwl_cfg iwl3160_n_cfg; 339extern const struct iwl_cfg iwl3160_n_cfg;
340extern const struct iwl_cfg iwl3165_2ac_cfg;
338extern const struct iwl_cfg iwl7265_2ac_cfg; 341extern const struct iwl_cfg iwl7265_2ac_cfg;
339extern const struct iwl_cfg iwl7265_2n_cfg; 342extern const struct iwl_cfg iwl7265_2n_cfg;
340extern const struct iwl_cfg iwl7265_n_cfg; 343extern const struct iwl_cfg iwl7265_n_cfg;
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index 018af2957d3b..354255f08754 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -146,8 +146,6 @@ static const u8 iwl_nvm_channels_family_8000[] = {
146#define LAST_2GHZ_HT_PLUS 9 146#define LAST_2GHZ_HT_PLUS 9
147#define LAST_5GHZ_HT 161 147#define LAST_5GHZ_HT 161
148 148
149#define DEFAULT_MAX_TX_POWER 16
150
151/* rate data (static) */ 149/* rate data (static) */
152static struct ieee80211_rate iwl_cfg80211_rates[] = { 150static struct ieee80211_rate iwl_cfg80211_rates[] = {
153 { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, }, 151 { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, },
@@ -295,7 +293,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
295 * Default value - highest tx power value. max_power 293 * Default value - highest tx power value. max_power
296 * is not used in mvm, and is used for backwards compatibility 294 * is not used in mvm, and is used for backwards compatibility
297 */ 295 */
298 channel->max_power = DEFAULT_MAX_TX_POWER; 296 channel->max_power = IWL_DEFAULT_MAX_TX_POWER;
299 is_5ghz = channel->band == IEEE80211_BAND_5GHZ; 297 is_5ghz = channel->band == IEEE80211_BAND_5GHZ;
300 IWL_DEBUG_EEPROM(dev, 298 IWL_DEBUG_EEPROM(dev,
301 "Ch. %d [%sGHz] %s%s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n", 299 "Ch. %d [%sGHz] %s%s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index 2291bbcaaeab..ce71625f497f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -585,8 +585,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
585 lockdep_assert_held(&mvm->mutex); 585 lockdep_assert_held(&mvm->mutex);
586 586
587 if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) { 587 if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) {
588 u32 mode;
589
590 switch (mvm->bt_force_ant_mode) { 588 switch (mvm->bt_force_ant_mode) {
591 case BT_FORCE_ANT_BT: 589 case BT_FORCE_ANT_BT:
592 mode = BT_COEX_BT; 590 mode = BT_COEX_BT;
@@ -756,7 +754,8 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
756 struct iwl_bt_iterator_data *data = _data; 754 struct iwl_bt_iterator_data *data = _data;
757 struct iwl_mvm *mvm = data->mvm; 755 struct iwl_mvm *mvm = data->mvm;
758 struct ieee80211_chanctx_conf *chanctx_conf; 756 struct ieee80211_chanctx_conf *chanctx_conf;
759 enum ieee80211_smps_mode smps_mode; 757 /* default smps_mode is AUTOMATIC - only used for client modes */
758 enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC;
760 u32 bt_activity_grading; 759 u32 bt_activity_grading;
761 int ave_rssi; 760 int ave_rssi;
762 761
@@ -764,8 +763,6 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
764 763
765 switch (vif->type) { 764 switch (vif->type) {
766 case NL80211_IFTYPE_STATION: 765 case NL80211_IFTYPE_STATION:
767 /* default smps_mode for BSS / P2P client is AUTOMATIC */
768 smps_mode = IEEE80211_SMPS_AUTOMATIC;
769 break; 766 break;
770 case NL80211_IFTYPE_AP: 767 case NL80211_IFTYPE_AP:
771 if (!mvmvif->ap_ibss_active) 768 if (!mvmvif->ap_ibss_active)
@@ -797,7 +794,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
797 else if (bt_activity_grading >= BT_LOW_TRAFFIC) 794 else if (bt_activity_grading >= BT_LOW_TRAFFIC)
798 smps_mode = IEEE80211_SMPS_DYNAMIC; 795 smps_mode = IEEE80211_SMPS_DYNAMIC;
799 796
800 /* relax SMPS contraints for next association */ 797 /* relax SMPS constraints for next association */
801 if (!vif->bss_conf.assoc) 798 if (!vif->bss_conf.assoc)
802 smps_mode = IEEE80211_SMPS_AUTOMATIC; 799 smps_mode = IEEE80211_SMPS_AUTOMATIC;
803 800
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
index 2e90ff795c13..87e517bffedc 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
@@ -74,8 +74,7 @@ static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
74 74
75 switch (param) { 75 switch (param) {
76 case MVM_DEBUGFS_PM_KEEP_ALIVE: { 76 case MVM_DEBUGFS_PM_KEEP_ALIVE: {
77 struct ieee80211_hw *hw = mvm->hw; 77 int dtimper = vif->bss_conf.dtim_period ?: 1;
78 int dtimper = hw->conf.ps_dtim_period ?: 1;
79 int dtimper_msec = dtimper * vif->bss_conf.beacon_int; 78 int dtimper_msec = dtimper * vif->bss_conf.beacon_int;
80 79
81 IWL_DEBUG_POWER(mvm, "debugfs: set keep_alive= %d sec\n", val); 80 IWL_DEBUG_POWER(mvm, "debugfs: set keep_alive= %d sec\n", val);
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index 95f5b3274efb..9a922f3bd16b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -1563,14 +1563,14 @@ enum iwl_sf_scenario {
1563 1563
1564/** 1564/**
1565 * Smart Fifo configuration command. 1565 * Smart Fifo configuration command.
1566 * @state: smart fifo state, types listed in iwl_sf_sate. 1566 * @state: smart fifo state, types listed in enum %iwl_sf_sate.
1567 * @watermark: Minimum allowed availabe free space in RXF for transient state. 1567 * @watermark: Minimum allowed availabe free space in RXF for transient state.
1568 * @long_delay_timeouts: aging and idle timer values for each scenario 1568 * @long_delay_timeouts: aging and idle timer values for each scenario
1569 * in long delay state. 1569 * in long delay state.
1570 * @full_on_timeouts: timer values for each scenario in full on state. 1570 * @full_on_timeouts: timer values for each scenario in full on state.
1571 */ 1571 */
1572struct iwl_sf_cfg_cmd { 1572struct iwl_sf_cfg_cmd {
1573 enum iwl_sf_state state; 1573 __le32 state;
1574 __le32 watermark[SF_TRANSIENT_STATES_NUMBER]; 1574 __le32 watermark[SF_TRANSIENT_STATES_NUMBER];
1575 __le32 long_delay_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; 1575 __le32 long_delay_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
1576 __le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; 1576 __le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index 0e523e28cabf..8242e689ddb1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -721,11 +721,6 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
721 !force_assoc_off) { 721 !force_assoc_off) {
722 u32 dtim_offs; 722 u32 dtim_offs;
723 723
724 /* Allow beacons to pass through as long as we are not
725 * associated, or we do not have dtim period information.
726 */
727 cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
728
729 /* 724 /*
730 * The DTIM count counts down, so when it is N that means N 725 * The DTIM count counts down, so when it is N that means N
731 * more beacon intervals happen until the DTIM TBTT. Therefore 726 * more beacon intervals happen until the DTIM TBTT. Therefore
@@ -759,6 +754,11 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
759 ctxt_sta->is_assoc = cpu_to_le32(1); 754 ctxt_sta->is_assoc = cpu_to_le32(1);
760 } else { 755 } else {
761 ctxt_sta->is_assoc = cpu_to_le32(0); 756 ctxt_sta->is_assoc = cpu_to_le32(0);
757
758 /* Allow beacons to pass through as long as we are not
759 * associated, or we do not have dtim period information.
760 */
761 cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
762 } 762 }
763 763
764 ctxt_sta->bi = cpu_to_le32(vif->bss_conf.beacon_int); 764 ctxt_sta->bi = cpu_to_le32(vif->bss_conf.beacon_int);
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 7c8796584c25..cdc272d776e7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -396,12 +396,14 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
396 else 396 else
397 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 397 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
398 398
399 /* TODO: enable that only for firmwares that don't crash */ 399 if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 10) {
400 /* hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; */ 400 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
401 hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX; 401 hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
402 hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES; 402 hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
403 /* we create the 802.11 header and zero length SSID IE. */ 403 /* we create the 802.11 header and zero length SSID IE. */
404 hw->wiphy->max_sched_scan_ie_len = SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2; 404 hw->wiphy->max_sched_scan_ie_len =
405 SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
406 }
405 407
406 hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN | 408 hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
407 NL80211_FEATURE_LOW_PRIORITY_SCAN | 409 NL80211_FEATURE_LOW_PRIORITY_SCAN |
@@ -1524,11 +1526,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
1524 */ 1526 */
1525 iwl_mvm_remove_time_event(mvm, mvmvif, 1527 iwl_mvm_remove_time_event(mvm, mvmvif,
1526 &mvmvif->time_event_data); 1528 &mvmvif->time_event_data);
1527 } else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS |
1528 BSS_CHANGED_QOS)) {
1529 ret = iwl_mvm_power_update_mac(mvm);
1530 if (ret)
1531 IWL_ERR(mvm, "failed to update power mode\n");
1532 } 1529 }
1533 1530
1534 if (changes & BSS_CHANGED_BEACON_INFO) { 1531 if (changes & BSS_CHANGED_BEACON_INFO) {
@@ -1536,6 +1533,12 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
1536 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); 1533 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
1537 } 1534 }
1538 1535
1536 if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS)) {
1537 ret = iwl_mvm_power_update_mac(mvm);
1538 if (ret)
1539 IWL_ERR(mvm, "failed to update power mode\n");
1540 }
1541
1539 if (changes & BSS_CHANGED_TXPOWER) { 1542 if (changes & BSS_CHANGED_TXPOWER) {
1540 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n", 1543 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
1541 bss_conf->txpower); 1544 bss_conf->txpower);
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
index 2b2d10800a55..d9769a23c68b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -281,7 +281,6 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
281 struct ieee80211_vif *vif, 281 struct ieee80211_vif *vif,
282 struct iwl_mac_power_cmd *cmd) 282 struct iwl_mac_power_cmd *cmd)
283{ 283{
284 struct ieee80211_hw *hw = mvm->hw;
285 struct ieee80211_chanctx_conf *chanctx_conf; 284 struct ieee80211_chanctx_conf *chanctx_conf;
286 struct ieee80211_channel *chan; 285 struct ieee80211_channel *chan;
287 int dtimper, dtimper_msec; 286 int dtimper, dtimper_msec;
@@ -292,7 +291,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
292 291
293 cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 292 cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
294 mvmvif->color)); 293 mvmvif->color));
295 dtimper = hw->conf.ps_dtim_period ?: 1; 294 dtimper = vif->bss_conf.dtim_period;
296 295
297 /* 296 /*
298 * Regardless of power management state the driver must set 297 * Regardless of power management state the driver must set
@@ -885,7 +884,7 @@ int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
885 iwl_mvm_power_build_cmd(mvm, vif, &cmd); 884 iwl_mvm_power_build_cmd(mvm, vif, &cmd);
886 if (enable) { 885 if (enable) {
887 /* configure skip over dtim up to 300 msec */ 886 /* configure skip over dtim up to 300 msec */
888 int dtimper = mvm->hw->conf.ps_dtim_period ?: 1; 887 int dtimper = vif->bss_conf.dtim_period ?: 1;
889 int dtimper_msec = dtimper * vif->bss_conf.beacon_int; 888 int dtimper_msec = dtimper * vif->bss_conf.beacon_int;
890 889
891 if (WARN_ON(!dtimper_msec)) 890 if (WARN_ON(!dtimper_msec))
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index 4b98987fc413..bf5cd8c8b0f7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -149,13 +149,13 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
149 le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_ENERGY_ANT_ABC_IDX]); 149 le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_ENERGY_ANT_ABC_IDX]);
150 energy_a = (val & IWL_RX_INFO_ENERGY_ANT_A_MSK) >> 150 energy_a = (val & IWL_RX_INFO_ENERGY_ANT_A_MSK) >>
151 IWL_RX_INFO_ENERGY_ANT_A_POS; 151 IWL_RX_INFO_ENERGY_ANT_A_POS;
152 energy_a = energy_a ? -energy_a : -256; 152 energy_a = energy_a ? -energy_a : S8_MIN;
153 energy_b = (val & IWL_RX_INFO_ENERGY_ANT_B_MSK) >> 153 energy_b = (val & IWL_RX_INFO_ENERGY_ANT_B_MSK) >>
154 IWL_RX_INFO_ENERGY_ANT_B_POS; 154 IWL_RX_INFO_ENERGY_ANT_B_POS;
155 energy_b = energy_b ? -energy_b : -256; 155 energy_b = energy_b ? -energy_b : S8_MIN;
156 energy_c = (val & IWL_RX_INFO_ENERGY_ANT_C_MSK) >> 156 energy_c = (val & IWL_RX_INFO_ENERGY_ANT_C_MSK) >>
157 IWL_RX_INFO_ENERGY_ANT_C_POS; 157 IWL_RX_INFO_ENERGY_ANT_C_POS;
158 energy_c = energy_c ? -energy_c : -256; 158 energy_c = energy_c ? -energy_c : S8_MIN;
159 max_energy = max(energy_a, energy_b); 159 max_energy = max(energy_a, energy_b);
160 max_energy = max(max_energy, energy_c); 160 max_energy = max(max_energy, energy_c);
161 161
diff --git a/drivers/net/wireless/iwlwifi/mvm/sf.c b/drivers/net/wireless/iwlwifi/mvm/sf.c
index 7edfd15efc9d..e843b67f2201 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sf.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sf.c
@@ -172,7 +172,7 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
172 enum iwl_sf_state new_state) 172 enum iwl_sf_state new_state)
173{ 173{
174 struct iwl_sf_cfg_cmd sf_cmd = { 174 struct iwl_sf_cfg_cmd sf_cmd = {
175 .state = new_state, 175 .state = cpu_to_le32(new_state),
176 }; 176 };
177 struct ieee80211_sta *sta; 177 struct ieee80211_sta *sta;
178 int ret = 0; 178 int ret = 0;
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index dbc870713882..9ee410bf6da2 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -168,10 +168,14 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
168 168
169 /* 169 /*
170 * for data packets, rate info comes from the table inside the fw. This 170 * for data packets, rate info comes from the table inside the fw. This
171 * table is controlled by LINK_QUALITY commands 171 * table is controlled by LINK_QUALITY commands. Exclude ctrl port
172 * frames like EAPOLs which should be treated as mgmt frames. This
173 * avoids them being sent initially in high rates which increases the
174 * chances for completion of the 4-Way handshake.
172 */ 175 */
173 176
174 if (ieee80211_is_data(fc) && sta) { 177 if (ieee80211_is_data(fc) && sta &&
178 !(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)) {
175 tx_cmd->initial_rate_index = 0; 179 tx_cmd->initial_rate_index = 0;
176 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); 180 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
177 return; 181 return;
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index f0e722ced080..073a68b97a72 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -352,11 +352,17 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
352 {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)}, 352 {IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)},
353 {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)}, 353 {IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)},
354 {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)}, 354 {IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)},
355 {IWL_PCI_DEVICE(0x08B4, 0x8370, iwl3160_2ac_cfg)},
356 {IWL_PCI_DEVICE(0x08B4, 0x8272, iwl3160_2ac_cfg)},
355 {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)}, 357 {IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)},
356 {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)}, 358 {IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)},
357 {IWL_PCI_DEVICE(0x08B3, 0x1070, iwl3160_2ac_cfg)}, 359 {IWL_PCI_DEVICE(0x08B3, 0x1070, iwl3160_2ac_cfg)},
358 {IWL_PCI_DEVICE(0x08B3, 0x1170, iwl3160_2ac_cfg)}, 360 {IWL_PCI_DEVICE(0x08B3, 0x1170, iwl3160_2ac_cfg)},
359 361
362/* 3165 Series */
363 {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)},
364 {IWL_PCI_DEVICE(0x3165, 0x4210, iwl3165_2ac_cfg)},
365
360/* 7265 Series */ 366/* 7265 Series */
361 {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, 367 {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
362 {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)}, 368 {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)},
@@ -378,6 +384,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
378 {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)}, 384 {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)},
379 {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)}, 385 {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)},
380 {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)}, 386 {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)},
387 {IWL_PCI_DEVICE(0x095A, 0x900A, iwl7265_2ac_cfg)},
381 {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, 388 {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
382 {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)}, 389 {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
383 {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, 390 {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c
index 33da3dfcfa4f..d4bd550f505c 100644
--- a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c
+++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c
@@ -101,7 +101,7 @@ static bool halbtc_legacy(struct rtl_priv *adapter)
101 101
102 bool is_legacy = false; 102 bool is_legacy = false;
103 103
104 if ((mac->mode == WIRELESS_MODE_B) || (mac->mode == WIRELESS_MODE_B)) 104 if ((mac->mode == WIRELESS_MODE_B) || (mac->mode == WIRELESS_MODE_G))
105 is_legacy = true; 105 is_legacy = true;
106 106
107 return is_legacy; 107 return is_legacy;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 361435f8608a..1ac6383e7947 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -317,6 +317,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
317 {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/ 317 {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
318 {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ 318 {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
319 {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ 319 {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
320 {RTL_USB_DEVICE(0x0df6, 0x0070, rtl92cu_hal_cfg)}, /*Sitecom - 150N */
320 {RTL_USB_DEVICE(0x0df6, 0x0077, rtl92cu_hal_cfg)}, /*Sitecom-WLA2100V2*/ 321 {RTL_USB_DEVICE(0x0df6, 0x0077, rtl92cu_hal_cfg)}, /*Sitecom-WLA2100V2*/
321 {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/ 322 {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
322 {RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/ 323 {RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index e29e15dca86e..f379689dde30 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -576,6 +576,9 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
576 init_waitqueue_head(&queue->dealloc_wq); 576 init_waitqueue_head(&queue->dealloc_wq);
577 atomic_set(&queue->inflight_packets, 0); 577 atomic_set(&queue->inflight_packets, 0);
578 578
579 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
580 XENVIF_NAPI_WEIGHT);
581
579 if (tx_evtchn == rx_evtchn) { 582 if (tx_evtchn == rx_evtchn) {
580 /* feature-split-event-channels == 0 */ 583 /* feature-split-event-channels == 0 */
581 err = bind_interdomain_evtchn_to_irqhandler( 584 err = bind_interdomain_evtchn_to_irqhandler(
@@ -629,9 +632,6 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
629 wake_up_process(queue->task); 632 wake_up_process(queue->task);
630 wake_up_process(queue->dealloc_task); 633 wake_up_process(queue->dealloc_task);
631 634
632 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
633 XENVIF_NAPI_WEIGHT);
634
635 return 0; 635 return 0;
636 636
637err_rx_unbind: 637err_rx_unbind: