aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c162
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h19
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c39
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c6
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c5
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c3
-rw-r--r--drivers/net/ethernet/jme.c8
-rw-r--r--drivers/net/ethernet/marvell/skge.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c6
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c4
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c1
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c116
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c9
-rw-r--r--drivers/net/ethernet/realtek/r8169.c5
-rw-r--r--drivers/net/ethernet/ti/Kconfig2
-rw-r--r--drivers/net/ethernet/tile/tilegx.c35
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
24 files changed, 265 insertions, 211 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index ec3f9e5187df..c40c0253e105 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1702,7 +1702,7 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
1702 SHMEM_EEE_ADV_STATUS_SHIFT); 1702 SHMEM_EEE_ADV_STATUS_SHIFT);
1703 if ((advertised != (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK))) { 1703 if ((advertised != (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK))) {
1704 DP(BNX2X_MSG_ETHTOOL, 1704 DP(BNX2X_MSG_ETHTOOL,
1705 "Direct manipulation of EEE advertisment is not supported\n"); 1705 "Direct manipulation of EEE advertisement is not supported\n");
1706 return -EINVAL; 1706 return -EINVAL;
1707 } 1707 }
1708 1708
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index a2b94650c271..cd002943fac8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -137,7 +137,16 @@
137#define LINK_20GTFD LINK_STATUS_SPEED_AND_DUPLEX_20GTFD 137#define LINK_20GTFD LINK_STATUS_SPEED_AND_DUPLEX_20GTFD
138#define LINK_20GXFD LINK_STATUS_SPEED_AND_DUPLEX_20GXFD 138#define LINK_20GXFD LINK_STATUS_SPEED_AND_DUPLEX_20GXFD
139 139
140 140#define LINK_UPDATE_MASK \
141 (LINK_STATUS_SPEED_AND_DUPLEX_MASK | \
142 LINK_STATUS_LINK_UP | \
143 LINK_STATUS_PHYSICAL_LINK_FLAG | \
144 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE | \
145 LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK | \
146 LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK | \
147 LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK | \
148 LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE | \
149 LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE)
141 150
142#define SFP_EEPROM_CON_TYPE_ADDR 0x2 151#define SFP_EEPROM_CON_TYPE_ADDR 0x2
143 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7 152 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
@@ -3295,6 +3304,21 @@ static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port)
3295 DEFAULT_PHY_DEV_ADDR); 3304 DEFAULT_PHY_DEV_ADDR);
3296} 3305}
3297 3306
3307static void bnx2x_xgxs_specific_func(struct bnx2x_phy *phy,
3308 struct link_params *params,
3309 u32 action)
3310{
3311 struct bnx2x *bp = params->bp;
3312 switch (action) {
3313 case PHY_INIT:
3314 /* Set correct devad */
3315 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + params->port*0x18, 0);
3316 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + params->port*0x18,
3317 phy->def_md_devad);
3318 break;
3319 }
3320}
3321
3298static void bnx2x_xgxs_deassert(struct link_params *params) 3322static void bnx2x_xgxs_deassert(struct link_params *params)
3299{ 3323{
3300 struct bnx2x *bp = params->bp; 3324 struct bnx2x *bp = params->bp;
@@ -3309,10 +3333,8 @@ static void bnx2x_xgxs_deassert(struct link_params *params)
3309 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val); 3333 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
3310 udelay(500); 3334 udelay(500);
3311 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val); 3335 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
3312 3336 bnx2x_xgxs_specific_func(&params->phy[INT_PHY], params,
3313 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + port*0x18, 0); 3337 PHY_INIT);
3314 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
3315 params->phy[INT_PHY].def_md_devad);
3316} 3338}
3317 3339
3318static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy, 3340static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
@@ -3545,14 +3567,11 @@ static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy,
3545static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, 3567static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3546 struct link_params *params, 3568 struct link_params *params,
3547 struct link_vars *vars) { 3569 struct link_vars *vars) {
3548 u16 val16 = 0, lane, i; 3570 u16 lane, i, cl72_ctrl, an_adv = 0;
3571 u16 ucode_ver;
3549 struct bnx2x *bp = params->bp; 3572 struct bnx2x *bp = params->bp;
3550 static struct bnx2x_reg_set reg_set[] = { 3573 static struct bnx2x_reg_set reg_set[] = {
3551 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, 3574 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
3552 {MDIO_AN_DEVAD, MDIO_WC_REG_PAR_DET_10G_CTRL, 0},
3553 {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0},
3554 {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0xff},
3555 {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0x5555},
3556 {MDIO_PMA_DEVAD, MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0}, 3575 {MDIO_PMA_DEVAD, MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0},
3557 {MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, 0x7415}, 3576 {MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, 0x7415},
3558 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190}, 3577 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190},
@@ -3565,12 +3584,19 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3565 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, 3584 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
3566 reg_set[i].val); 3585 reg_set[i].val);
3567 3586
3587 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3588 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, &cl72_ctrl);
3589 cl72_ctrl &= 0xf8ff;
3590 cl72_ctrl |= 0x3800;
3591 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3592 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, cl72_ctrl);
3593
3568 /* Check adding advertisement for 1G KX */ 3594 /* Check adding advertisement for 1G KX */
3569 if (((vars->line_speed == SPEED_AUTO_NEG) && 3595 if (((vars->line_speed == SPEED_AUTO_NEG) &&
3570 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || 3596 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
3571 (vars->line_speed == SPEED_1000)) { 3597 (vars->line_speed == SPEED_1000)) {
3572 u32 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2; 3598 u32 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2;
3573 val16 |= (1<<5); 3599 an_adv |= (1<<5);
3574 3600
3575 /* Enable CL37 1G Parallel Detect */ 3601 /* Enable CL37 1G Parallel Detect */
3576 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, addr, 0x1); 3602 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, addr, 0x1);
@@ -3580,11 +3606,14 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3580 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) || 3606 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) ||
3581 (vars->line_speed == SPEED_10000)) { 3607 (vars->line_speed == SPEED_10000)) {
3582 /* Check adding advertisement for 10G KR */ 3608 /* Check adding advertisement for 10G KR */
3583 val16 |= (1<<7); 3609 an_adv |= (1<<7);
3584 /* Enable 10G Parallel Detect */ 3610 /* Enable 10G Parallel Detect */
3611 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
3612 MDIO_AER_BLOCK_AER_REG, 0);
3613
3585 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3614 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
3586 MDIO_WC_REG_PAR_DET_10G_CTRL, 1); 3615 MDIO_WC_REG_PAR_DET_10G_CTRL, 1);
3587 3616 bnx2x_set_aer_mmd(params, phy);
3588 DP(NETIF_MSG_LINK, "Advertize 10G\n"); 3617 DP(NETIF_MSG_LINK, "Advertize 10G\n");
3589 } 3618 }
3590 3619
@@ -3604,7 +3633,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3604 3633
3605 /* Advertised speeds */ 3634 /* Advertised speeds */
3606 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3635 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
3607 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16); 3636 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, an_adv);
3608 3637
3609 /* Advertised and set FEC (Forward Error Correction) */ 3638 /* Advertised and set FEC (Forward Error Correction) */
3610 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3639 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
@@ -3628,9 +3657,10 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3628 /* Set KR Autoneg Work-Around flag for Warpcore version older than D108 3657 /* Set KR Autoneg Work-Around flag for Warpcore version older than D108
3629 */ 3658 */
3630 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3659 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3631 MDIO_WC_REG_UC_INFO_B1_VERSION, &val16); 3660 MDIO_WC_REG_UC_INFO_B1_VERSION, &ucode_ver);
3632 if (val16 < 0xd108) { 3661 if (ucode_ver < 0xd108) {
3633 DP(NETIF_MSG_LINK, "Enable AN KR work-around\n"); 3662 DP(NETIF_MSG_LINK, "Enable AN KR work-around. WC ver:0x%x\n",
3663 ucode_ver);
3634 vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY; 3664 vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
3635 } 3665 }
3636 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 3666 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
@@ -3651,21 +3681,16 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
3651 struct link_vars *vars) 3681 struct link_vars *vars)
3652{ 3682{
3653 struct bnx2x *bp = params->bp; 3683 struct bnx2x *bp = params->bp;
3654 u16 i; 3684 u16 val16, i, lane;
3655 static struct bnx2x_reg_set reg_set[] = { 3685 static struct bnx2x_reg_set reg_set[] = {
3656 /* Disable Autoneg */ 3686 /* Disable Autoneg */
3657 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, 3687 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
3658 {MDIO_AN_DEVAD, MDIO_WC_REG_PAR_DET_10G_CTRL, 0},
3659 {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 3688 {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
3660 0x3f00}, 3689 0x3f00},
3661 {MDIO_AN_DEVAD, MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0}, 3690 {MDIO_AN_DEVAD, MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0},
3662 {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0}, 3691 {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0},
3663 {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL3_UP1, 0x1}, 3692 {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL3_UP1, 0x1},
3664 {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC7, 0xa}, 3693 {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC7, 0xa},
3665 /* Disable CL36 PCS Tx */
3666 {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0x0},
3667 /* Double Wide Single Data Rate @ pll rate */
3668 {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0xFFFF},
3669 /* Leave cl72 training enable, needed for KR */ 3694 /* Leave cl72 training enable, needed for KR */
3670 {MDIO_PMA_DEVAD, 3695 {MDIO_PMA_DEVAD,
3671 MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150, 3696 MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150,
@@ -3676,11 +3701,24 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
3676 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, 3701 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
3677 reg_set[i].val); 3702 reg_set[i].val);
3678 3703
3679 /* Leave CL72 enabled */ 3704 lane = bnx2x_get_warpcore_lane(phy, params);
3680 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 3705 /* Global registers */
3681 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 3706 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
3682 0x3800); 3707 MDIO_AER_BLOCK_AER_REG, 0);
3708 /* Disable CL36 PCS Tx */
3709 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3710 MDIO_WC_REG_XGXSBLK1_LANECTRL0, &val16);
3711 val16 &= ~(0x0011 << lane);
3712 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3713 MDIO_WC_REG_XGXSBLK1_LANECTRL0, val16);
3683 3714
3715 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3716 MDIO_WC_REG_XGXSBLK1_LANECTRL1, &val16);
3717 val16 |= (0x0303 << (lane << 1));
3718 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3719 MDIO_WC_REG_XGXSBLK1_LANECTRL1, val16);
3720 /* Restore AER */
3721 bnx2x_set_aer_mmd(params, phy);
3684 /* Set speed via PMA/PMD register */ 3722 /* Set speed via PMA/PMD register */
3685 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 3723 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
3686 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040); 3724 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040);
@@ -4303,7 +4341,7 @@ static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy,
4303 struct link_params *params) 4341 struct link_params *params)
4304{ 4342{
4305 struct bnx2x *bp = params->bp; 4343 struct bnx2x *bp = params->bp;
4306 u16 val16; 4344 u16 val16, lane;
4307 bnx2x_sfp_e3_set_transmitter(params, phy, 0); 4345 bnx2x_sfp_e3_set_transmitter(params, phy, 0);
4308 bnx2x_set_mdio_clk(bp, params->chip_id, params->port); 4346 bnx2x_set_mdio_clk(bp, params->chip_id, params->port);
4309 bnx2x_set_aer_mmd(params, phy); 4347 bnx2x_set_aer_mmd(params, phy);
@@ -4340,6 +4378,30 @@ static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy,
4340 MDIO_WC_REG_XGXSBLK1_LANECTRL2, 4378 MDIO_WC_REG_XGXSBLK1_LANECTRL2,
4341 val16 & 0xff00); 4379 val16 & 0xff00);
4342 4380
4381 lane = bnx2x_get_warpcore_lane(phy, params);
4382 /* Disable CL36 PCS Tx */
4383 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
4384 MDIO_WC_REG_XGXSBLK1_LANECTRL0, &val16);
4385 val16 |= (0x11 << lane);
4386 if (phy->flags & FLAGS_WC_DUAL_MODE)
4387 val16 |= (0x22 << lane);
4388 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4389 MDIO_WC_REG_XGXSBLK1_LANECTRL0, val16);
4390
4391 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
4392 MDIO_WC_REG_XGXSBLK1_LANECTRL1, &val16);
4393 val16 &= ~(0x0303 << (lane << 1));
4394 val16 |= (0x0101 << (lane << 1));
4395 if (phy->flags & FLAGS_WC_DUAL_MODE) {
4396 val16 &= ~(0x0c0c << (lane << 1));
4397 val16 |= (0x0404 << (lane << 1));
4398 }
4399
4400 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4401 MDIO_WC_REG_XGXSBLK1_LANECTRL1, val16);
4402 /* Restore AER */
4403 bnx2x_set_aer_mmd(params, phy);
4404
4343} 4405}
4344 4406
4345static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy, 4407static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
@@ -6296,15 +6358,7 @@ static int bnx2x_update_link_down(struct link_params *params,
6296 vars->mac_type = MAC_TYPE_NONE; 6358 vars->mac_type = MAC_TYPE_NONE;
6297 6359
6298 /* Update shared memory */ 6360 /* Update shared memory */
6299 vars->link_status &= ~(LINK_STATUS_SPEED_AND_DUPLEX_MASK | 6361 vars->link_status &= ~LINK_UPDATE_MASK;
6300 LINK_STATUS_LINK_UP |
6301 LINK_STATUS_PHYSICAL_LINK_FLAG |
6302 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE |
6303 LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK |
6304 LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK |
6305 LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK |
6306 LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE |
6307 LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE);
6308 vars->line_speed = 0; 6362 vars->line_speed = 0;
6309 bnx2x_update_mng(params, vars->link_status); 6363 bnx2x_update_mng(params, vars->link_status);
6310 6364
@@ -6452,6 +6506,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6452 u16 ext_phy_line_speed = 0, prev_line_speed = vars->line_speed; 6506 u16 ext_phy_line_speed = 0, prev_line_speed = vars->line_speed;
6453 u8 active_external_phy = INT_PHY; 6507 u8 active_external_phy = INT_PHY;
6454 vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG; 6508 vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG;
6509 vars->link_status &= ~LINK_UPDATE_MASK;
6455 for (phy_index = INT_PHY; phy_index < params->num_phys; 6510 for (phy_index = INT_PHY; phy_index < params->num_phys;
6456 phy_index++) { 6511 phy_index++) {
6457 phy_vars[phy_index].flow_ctrl = 0; 6512 phy_vars[phy_index].flow_ctrl = 0;
@@ -7579,7 +7634,7 @@ static void bnx2x_warpcore_power_module(struct link_params *params,
7579static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy, 7634static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7580 struct link_params *params, 7635 struct link_params *params,
7581 u16 addr, u8 byte_cnt, 7636 u16 addr, u8 byte_cnt,
7582 u8 *o_buf) 7637 u8 *o_buf, u8 is_init)
7583{ 7638{
7584 int rc = 0; 7639 int rc = 0;
7585 u8 i, j = 0, cnt = 0; 7640 u8 i, j = 0, cnt = 0;
@@ -7596,10 +7651,10 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7596 /* 4 byte aligned address */ 7651 /* 4 byte aligned address */
7597 addr32 = addr & (~0x3); 7652 addr32 = addr & (~0x3);
7598 do { 7653 do {
7599 if (cnt == I2C_WA_PWR_ITER) { 7654 if ((!is_init) && (cnt == I2C_WA_PWR_ITER)) {
7600 bnx2x_warpcore_power_module(params, phy, 0); 7655 bnx2x_warpcore_power_module(params, phy, 0);
7601 /* Note that 100us are not enough here */ 7656 /* Note that 100us are not enough here */
7602 usleep_range(1000,1000); 7657 usleep_range(1000, 2000);
7603 bnx2x_warpcore_power_module(params, phy, 1); 7658 bnx2x_warpcore_power_module(params, phy, 1);
7604 } 7659 }
7605 rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt, 7660 rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt,
@@ -7719,7 +7774,7 @@ int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7719 break; 7774 break;
7720 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: 7775 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7721 rc = bnx2x_warpcore_read_sfp_module_eeprom(phy, params, addr, 7776 rc = bnx2x_warpcore_read_sfp_module_eeprom(phy, params, addr,
7722 byte_cnt, o_buf); 7777 byte_cnt, o_buf, 0);
7723 break; 7778 break;
7724 } 7779 }
7725 return rc; 7780 return rc;
@@ -7923,6 +7978,7 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
7923 7978
7924{ 7979{
7925 u8 val; 7980 u8 val;
7981 int rc;
7926 struct bnx2x *bp = params->bp; 7982 struct bnx2x *bp = params->bp;
7927 u16 timeout; 7983 u16 timeout;
7928 /* Initialization time after hot-plug may take up to 300ms for 7984 /* Initialization time after hot-plug may take up to 300ms for
@@ -7930,8 +7986,14 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
7930 */ 7986 */
7931 7987
7932 for (timeout = 0; timeout < 60; timeout++) { 7988 for (timeout = 0; timeout < 60; timeout++) {
7933 if (bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val) 7989 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
7934 == 0) { 7990 rc = bnx2x_warpcore_read_sfp_module_eeprom(phy,
7991 params, 1,
7992 1, &val, 1);
7993 else
7994 rc = bnx2x_read_sfp_module_eeprom(phy, params, 1, 1,
7995 &val);
7996 if (rc == 0) {
7935 DP(NETIF_MSG_LINK, 7997 DP(NETIF_MSG_LINK,
7936 "SFP+ module initialization took %d ms\n", 7998 "SFP+ module initialization took %d ms\n",
7937 timeout * 5); 7999 timeout * 5);
@@ -7939,7 +8001,8 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
7939 } 8001 }
7940 usleep_range(5000, 10000); 8002 usleep_range(5000, 10000);
7941 } 8003 }
7942 return -EINVAL; 8004 rc = bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val);
8005 return rc;
7943} 8006}
7944 8007
7945static void bnx2x_8727_power_module(struct bnx2x *bp, 8008static void bnx2x_8727_power_module(struct bnx2x *bp,
@@ -9878,7 +9941,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
9878 else 9941 else
9879 rc = bnx2x_8483x_disable_eee(phy, params, vars); 9942 rc = bnx2x_8483x_disable_eee(phy, params, vars);
9880 if (rc) { 9943 if (rc) {
9881 DP(NETIF_MSG_LINK, "Failed to set EEE advertisment\n"); 9944 DP(NETIF_MSG_LINK, "Failed to set EEE advertisement\n");
9882 return rc; 9945 return rc;
9883 } 9946 }
9884 } else { 9947 } else {
@@ -10993,7 +11056,7 @@ static struct bnx2x_phy phy_xgxs = {
10993 .format_fw_ver = (format_fw_ver_t)NULL, 11056 .format_fw_ver = (format_fw_ver_t)NULL,
10994 .hw_reset = (hw_reset_t)NULL, 11057 .hw_reset = (hw_reset_t)NULL,
10995 .set_link_led = (set_link_led_t)NULL, 11058 .set_link_led = (set_link_led_t)NULL,
10996 .phy_specific_func = (phy_specific_func_t)NULL 11059 .phy_specific_func = (phy_specific_func_t)bnx2x_xgxs_specific_func
10997}; 11060};
10998static struct bnx2x_phy phy_warpcore = { 11061static struct bnx2x_phy phy_warpcore = {
10999 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, 11062 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
@@ -11465,6 +11528,11 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
11465 phy->media_type = ETH_PHY_BASE_T; 11528 phy->media_type = ETH_PHY_BASE_T;
11466 break; 11529 break;
11467 case PORT_HW_CFG_NET_SERDES_IF_XFI: 11530 case PORT_HW_CFG_NET_SERDES_IF_XFI:
11531 phy->supported &= (SUPPORTED_1000baseT_Full |
11532 SUPPORTED_10000baseT_Full |
11533 SUPPORTED_FIBRE |
11534 SUPPORTED_Pause |
11535 SUPPORTED_Asym_Pause);
11468 phy->media_type = ETH_PHY_XFP_FIBER; 11536 phy->media_type = ETH_PHY_XFP_FIBER;
11469 break; 11537 break;
11470 case PORT_HW_CFG_NET_SERDES_IF_SFI: 11538 case PORT_HW_CFG_NET_SERDES_IF_SFI:
@@ -12919,7 +12987,7 @@ static u8 bnx2x_analyze_link_error(struct link_params *params,
12919 DP(NETIF_MSG_LINK, "Analyze TX Fault\n"); 12987 DP(NETIF_MSG_LINK, "Analyze TX Fault\n");
12920 break; 12988 break;
12921 default: 12989 default:
12922 DP(NETIF_MSG_LINK, "Analyze UNKOWN\n"); 12990 DP(NETIF_MSG_LINK, "Analyze UNKNOWN\n");
12923 } 12991 }
12924 DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up, 12992 DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up,
12925 old_status, status); 12993 old_status, status);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 0546cf4f762e..04b9f0ab183b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -6804,8 +6804,9 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
6804 6804
6805 bnx2x_init_block(bp, BLOCK_DORQ, init_phase); 6805 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
6806 6806
6807 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
6808
6807 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) { 6809 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
6808 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
6809 6810
6810 if (IS_MF(bp)) 6811 if (IS_MF(bp))
6811 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246); 6812 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
@@ -12069,6 +12070,16 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12069 if (CHIP_IS_E1x(bp)) 12070 if (CHIP_IS_E1x(bp))
12070 bp->flags |= NO_FCOE_FLAG; 12071 bp->flags |= NO_FCOE_FLAG;
12071 12072
12073 /* disable FCOE for 57840 device, until FW supports it */
12074 switch (ent->driver_data) {
12075 case BCM57840_O:
12076 case BCM57840_4_10:
12077 case BCM57840_2_20:
12078 case BCM57840_MFO:
12079 case BCM57840_MF:
12080 bp->flags |= NO_FCOE_FLAG;
12081 }
12082
12072 /* Set bp->num_queues for MSI-X mode*/ 12083 /* Set bp->num_queues for MSI-X mode*/
12073 bnx2x_set_num_queues(bp); 12084 bnx2x_set_num_queues(bp);
12074 12085
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index a4da893ac1e1..378988b5709a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -251,6 +251,8 @@ struct adapter_params {
251 unsigned char rev; /* chip revision */ 251 unsigned char rev; /* chip revision */
252 unsigned char offload; 252 unsigned char offload;
253 253
254 unsigned char bypass;
255
254 unsigned int ofldq_wr_cred; 256 unsigned int ofldq_wr_cred;
255}; 257};
256 258
@@ -642,6 +644,23 @@ extern int dbfifo_int_thresh;
642#define for_each_port(adapter, iter) \ 644#define for_each_port(adapter, iter) \
643 for (iter = 0; iter < (adapter)->params.nports; ++iter) 645 for (iter = 0; iter < (adapter)->params.nports; ++iter)
644 646
647static inline int is_bypass(struct adapter *adap)
648{
649 return adap->params.bypass;
650}
651
652static inline int is_bypass_device(int device)
653{
654 /* this should be set based upon device capabilities */
655 switch (device) {
656 case 0x440b:
657 case 0x440c:
658 return 1;
659 default:
660 return 0;
661 }
662}
663
645static inline unsigned int core_ticks_per_usec(const struct adapter *adap) 664static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
646{ 665{
647 return adap->params.vpd.cclk / 1000; 666 return adap->params.vpd.cclk / 1000;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 604f4f87f550..0df1284df497 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3416,16 +3416,6 @@ static int adap_init0_config(struct adapter *adapter, int reset)
3416 finicsum, cfcsum); 3416 finicsum, cfcsum);
3417 3417
3418 /* 3418 /*
3419 * If we're a pure NIC driver then disable all offloading facilities.
3420 * This will allow the firmware to optimize aspects of the hardware
3421 * configuration which will result in improved performance.
3422 */
3423 caps_cmd.ofldcaps = 0;
3424 caps_cmd.iscsicaps = 0;
3425 caps_cmd.rdmacaps = 0;
3426 caps_cmd.fcoecaps = 0;
3427
3428 /*
3429 * And now tell the firmware to use the configuration we just loaded. 3419 * And now tell the firmware to use the configuration we just loaded.
3430 */ 3420 */
3431 caps_cmd.op_to_write = 3421 caps_cmd.op_to_write =
@@ -3513,18 +3503,6 @@ static int adap_init0_no_config(struct adapter *adapter, int reset)
3513 if (ret < 0) 3503 if (ret < 0)
3514 goto bye; 3504 goto bye;
3515 3505
3516#ifndef CONFIG_CHELSIO_T4_OFFLOAD
3517 /*
3518 * If we're a pure NIC driver then disable all offloading facilities.
3519 * This will allow the firmware to optimize aspects of the hardware
3520 * configuration which will result in improved performance.
3521 */
3522 caps_cmd.ofldcaps = 0;
3523 caps_cmd.iscsicaps = 0;
3524 caps_cmd.rdmacaps = 0;
3525 caps_cmd.fcoecaps = 0;
3526#endif
3527
3528 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) { 3506 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
3529 if (!vf_acls) 3507 if (!vf_acls)
3530 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM); 3508 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
@@ -3745,6 +3723,7 @@ static int adap_init0(struct adapter *adap)
3745 u32 v, port_vec; 3723 u32 v, port_vec;
3746 enum dev_state state; 3724 enum dev_state state;
3747 u32 params[7], val[7]; 3725 u32 params[7], val[7];
3726 struct fw_caps_config_cmd caps_cmd;
3748 int reset = 1, j; 3727 int reset = 1, j;
3749 3728
3750 /* 3729 /*
@@ -3898,6 +3877,9 @@ static int adap_init0(struct adapter *adap)
3898 goto bye; 3877 goto bye;
3899 } 3878 }
3900 3879
3880 if (is_bypass_device(adap->pdev->device))
3881 adap->params.bypass = 1;
3882
3901 /* 3883 /*
3902 * Grab some of our basic fundamental operating parameters. 3884 * Grab some of our basic fundamental operating parameters.
3903 */ 3885 */
@@ -3940,13 +3922,12 @@ static int adap_init0(struct adapter *adap)
3940 adap->tids.aftid_end = val[1]; 3922 adap->tids.aftid_end = val[1];
3941 } 3923 }
3942 3924
3943#ifdef CONFIG_CHELSIO_T4_OFFLOAD
3944 /* 3925 /*
3945 * Get device capabilities so we can determine what resources we need 3926 * Get device capabilities so we can determine what resources we need
3946 * to manage. 3927 * to manage.
3947 */ 3928 */
3948 memset(&caps_cmd, 0, sizeof(caps_cmd)); 3929 memset(&caps_cmd, 0, sizeof(caps_cmd));
3949 caps_cmd.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3930 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3950 FW_CMD_REQUEST | FW_CMD_READ); 3931 FW_CMD_REQUEST | FW_CMD_READ);
3951 caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd)); 3932 caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
3952 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd), 3933 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
@@ -3991,15 +3972,6 @@ static int adap_init0(struct adapter *adap)
3991 adap->vres.ddp.size = val[4] - val[3] + 1; 3972 adap->vres.ddp.size = val[4] - val[3] + 1;
3992 adap->params.ofldq_wr_cred = val[5]; 3973 adap->params.ofldq_wr_cred = val[5];
3993 3974
3994 params[0] = FW_PARAM_PFVF(ETHOFLD_START);
3995 params[1] = FW_PARAM_PFVF(ETHOFLD_END);
3996 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
3997 params, val);
3998 if ((val[0] != val[1]) && (ret >= 0)) {
3999 adap->tids.uotid_base = val[0];
4000 adap->tids.nuotids = val[1] - val[0] + 1;
4001 }
4002
4003 adap->params.offload = 1; 3975 adap->params.offload = 1;
4004 } 3976 }
4005 if (caps_cmd.rdmacaps) { 3977 if (caps_cmd.rdmacaps) {
@@ -4048,7 +4020,6 @@ static int adap_init0(struct adapter *adap)
4048 } 4020 }
4049#undef FW_PARAM_PFVF 4021#undef FW_PARAM_PFVF
4050#undef FW_PARAM_DEV 4022#undef FW_PARAM_DEV
4051#endif /* CONFIG_CHELSIO_T4_OFFLOAD */
4052 4023
4053 /* 4024 /*
4054 * These are finalized by FW initialization, load their values now. 4025 * These are finalized by FW initialization, load their values now.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 1b899fea1a91..39bec73ff87c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -102,6 +102,9 @@ struct tid_info {
102 unsigned int ftid_base; 102 unsigned int ftid_base;
103 unsigned int aftid_base; 103 unsigned int aftid_base;
104 unsigned int aftid_end; 104 unsigned int aftid_end;
105 /* Server filter region */
106 unsigned int sftid_base;
107 unsigned int nsftids;
105 108
106 spinlock_t atid_lock ____cacheline_aligned_in_smp; 109 spinlock_t atid_lock ____cacheline_aligned_in_smp;
107 union aopen_entry *afree; 110 union aopen_entry *afree;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 32eec15fe4c2..730ae2cfa49e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2519,6 +2519,7 @@ int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2519{ 2519{
2520 struct fw_bye_cmd c; 2520 struct fw_bye_cmd c;
2521 2521
2522 memset(&c, 0, sizeof(c));
2522 INIT_CMD(c, BYE, WRITE); 2523 INIT_CMD(c, BYE, WRITE);
2523 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2524 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2524} 2525}
@@ -2535,6 +2536,7 @@ int t4_early_init(struct adapter *adap, unsigned int mbox)
2535{ 2536{
2536 struct fw_initialize_cmd c; 2537 struct fw_initialize_cmd c;
2537 2538
2539 memset(&c, 0, sizeof(c));
2538 INIT_CMD(c, INITIALIZE, WRITE); 2540 INIT_CMD(c, INITIALIZE, WRITE);
2539 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2541 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2540} 2542}
@@ -2551,6 +2553,7 @@ int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2551{ 2553{
2552 struct fw_reset_cmd c; 2554 struct fw_reset_cmd c;
2553 2555
2556 memset(&c, 0, sizeof(c));
2554 INIT_CMD(c, RESET, WRITE); 2557 INIT_CMD(c, RESET, WRITE);
2555 c.val = htonl(reset); 2558 c.val = htonl(reset);
2556 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2559 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
@@ -2828,7 +2831,7 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
2828 HOSTPAGESIZEPF7(sge_hps)); 2831 HOSTPAGESIZEPF7(sge_hps));
2829 2832
2830 t4_set_reg_field(adap, SGE_CONTROL, 2833 t4_set_reg_field(adap, SGE_CONTROL,
2831 INGPADBOUNDARY(INGPADBOUNDARY_MASK) | 2834 INGPADBOUNDARY_MASK |
2832 EGRSTATUSPAGESIZE_MASK, 2835 EGRSTATUSPAGESIZE_MASK,
2833 INGPADBOUNDARY(fl_align_log - 5) | 2836 INGPADBOUNDARY(fl_align_log - 5) |
2834 EGRSTATUSPAGESIZE(stat_len != 64)); 2837 EGRSTATUSPAGESIZE(stat_len != 64));
@@ -3278,6 +3281,7 @@ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
3278{ 3281{
3279 struct fw_vi_enable_cmd c; 3282 struct fw_vi_enable_cmd c;
3280 3283
3284 memset(&c, 0, sizeof(c));
3281 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | 3285 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
3282 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); 3286 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
3283 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c)); 3287 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 81a0f33d263f..bffb2edd6858 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1349,8 +1349,11 @@ static int gfar_restore(struct device *dev)
1349 struct gfar_private *priv = dev_get_drvdata(dev); 1349 struct gfar_private *priv = dev_get_drvdata(dev);
1350 struct net_device *ndev = priv->ndev; 1350 struct net_device *ndev = priv->ndev;
1351 1351
1352 if (!netif_running(ndev)) 1352 if (!netif_running(ndev)) {
1353 netif_device_attach(ndev);
1354
1353 return 0; 1355 return 0;
1356 }
1354 1357
1355 if (gfar_init_bds(ndev)) { 1358 if (gfar_init_bds(ndev)) {
1356 free_skb_resources(priv); 1359 free_skb_resources(priv);
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index b9db0e040563..2e5daee0438a 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -478,7 +478,7 @@ static int gianfar_ptp_probe(struct platform_device *dev)
478 pr_err("no resource\n"); 478 pr_err("no resource\n");
479 goto no_resource; 479 goto no_resource;
480 } 480 }
481 if (request_resource(&ioport_resource, etsects->rsrc)) { 481 if (request_resource(&iomem_resource, etsects->rsrc)) {
482 pr_err("resource busy\n"); 482 pr_err("resource busy\n");
483 goto no_resource; 483 goto no_resource;
484 } 484 }
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index bbf07bd6ab9d..a545728e100c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -2670,6 +2670,9 @@ static int ixgbe_get_ts_info(struct net_device *dev,
2670 case ixgbe_mac_X540: 2670 case ixgbe_mac_X540:
2671 case ixgbe_mac_82599EB: 2671 case ixgbe_mac_82599EB:
2672 info->so_timestamping = 2672 info->so_timestamping =
2673 SOF_TIMESTAMPING_TX_SOFTWARE |
2674 SOF_TIMESTAMPING_RX_SOFTWARE |
2675 SOF_TIMESTAMPING_SOFTWARE |
2673 SOF_TIMESTAMPING_TX_HARDWARE | 2676 SOF_TIMESTAMPING_TX_HARDWARE |
2674 SOF_TIMESTAMPING_RX_HARDWARE | 2677 SOF_TIMESTAMPING_RX_HARDWARE |
2675 SOF_TIMESTAMPING_RAW_HARDWARE; 2678 SOF_TIMESTAMPING_RAW_HARDWARE;
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index f8064df10cc4..92317e9c0f73 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1948,10 +1948,10 @@ jme_close(struct net_device *netdev)
1948 1948
1949 JME_NAPI_DISABLE(jme); 1949 JME_NAPI_DISABLE(jme);
1950 1950
1951 tasklet_disable(&jme->linkch_task); 1951 tasklet_kill(&jme->linkch_task);
1952 tasklet_disable(&jme->txclean_task); 1952 tasklet_kill(&jme->txclean_task);
1953 tasklet_disable(&jme->rxclean_task); 1953 tasklet_kill(&jme->rxclean_task);
1954 tasklet_disable(&jme->rxempty_task); 1954 tasklet_kill(&jme->rxempty_task);
1955 1955
1956 jme_disable_rx_engine(jme); 1956 jme_disable_rx_engine(jme);
1957 jme_disable_tx_engine(jme); 1957 jme_disable_tx_engine(jme);
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 9b9c2ac5c4c2..d19a143aa5a8 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -4026,7 +4026,7 @@ static void __devexit skge_remove(struct pci_dev *pdev)
4026 dev0 = hw->dev[0]; 4026 dev0 = hw->dev[0];
4027 unregister_netdev(dev0); 4027 unregister_netdev(dev0);
4028 4028
4029 tasklet_disable(&hw->phy_task); 4029 tasklet_kill(&hw->phy_task);
4030 4030
4031 spin_lock_irq(&hw->hw_lock); 4031 spin_lock_irq(&hw->hw_lock);
4032 hw->intr_mask = 0; 4032 hw->intr_mask = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index c10e3a6de09f..b35094c590ba 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -143,7 +143,6 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
143 mlx4_bf_free(mdev->dev, &ring->bf); 143 mlx4_bf_free(mdev->dev, &ring->bf);
144 mlx4_qp_remove(mdev->dev, &ring->qp); 144 mlx4_qp_remove(mdev->dev, &ring->qp);
145 mlx4_qp_free(mdev->dev, &ring->qp); 145 mlx4_qp_free(mdev->dev, &ring->qp);
146 mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
147 mlx4_en_unmap_buffer(&ring->wqres.buf); 146 mlx4_en_unmap_buffer(&ring->wqres.buf);
148 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); 147 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
149 kfree(ring->bounce_buf); 148 kfree(ring->bounce_buf);
@@ -712,7 +711,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
712 if (bounce) 711 if (bounce)
713 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size); 712 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
714 713
715 if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) { 714 if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tx_tag_present(skb)) {
716 *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn); 715 *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn);
717 op_own |= htonl((bf_index & 0xffff) << 8); 716 op_own |= htonl((bf_index & 0xffff) << 8);
718 /* Ensure new descirptor hits memory 717 /* Ensure new descirptor hits memory
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 51c764901ad2..b84a88bc44dc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -329,9 +329,6 @@ int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave,
329 ctx = &priv->mfunc.master.slave_state[slave]; 329 ctx = &priv->mfunc.master.slave_state[slave];
330 spin_lock_irqsave(&ctx->lock, flags); 330 spin_lock_irqsave(&ctx->lock, flags);
331 331
332 mlx4_dbg(dev, "%s: slave: %d, current state: %d new event :%d\n",
333 __func__, slave, cur_state, event);
334
335 switch (cur_state) { 332 switch (cur_state) {
336 case SLAVE_PORT_DOWN: 333 case SLAVE_PORT_DOWN:
337 if (MLX4_PORT_STATE_DEV_EVENT_PORT_UP == event) 334 if (MLX4_PORT_STATE_DEV_EVENT_PORT_UP == event)
@@ -366,9 +363,6 @@ int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave,
366 goto out; 363 goto out;
367 } 364 }
368 ret = mlx4_get_slave_port_state(dev, slave, port); 365 ret = mlx4_get_slave_port_state(dev, slave, port);
369 mlx4_dbg(dev, "%s: slave: %d, current state: %d new event"
370 " :%d gen_event: %d\n",
371 __func__, slave, cur_state, event, *gen_event);
372 366
373out: 367out:
374 spin_unlock_irqrestore(&ctx->lock, flags); 368 spin_unlock_irqrestore(&ctx->lock, flags);
@@ -843,6 +837,18 @@ static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
843 return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4); 837 return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4);
844} 838}
845 839
840static void mlx4_unmap_uar(struct mlx4_dev *dev)
841{
842 struct mlx4_priv *priv = mlx4_priv(dev);
843 int i;
844
845 for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
846 if (priv->eq_table.uar_map[i]) {
847 iounmap(priv->eq_table.uar_map[i]);
848 priv->eq_table.uar_map[i] = NULL;
849 }
850}
851
846static int mlx4_create_eq(struct mlx4_dev *dev, int nent, 852static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
847 u8 intr, struct mlx4_eq *eq) 853 u8 intr, struct mlx4_eq *eq)
848{ 854{
@@ -1207,6 +1213,7 @@ err_out_unmap:
1207 mlx4_free_irqs(dev); 1213 mlx4_free_irqs(dev);
1208 1214
1209err_out_bitmap: 1215err_out_bitmap:
1216 mlx4_unmap_uar(dev);
1210 mlx4_bitmap_cleanup(&priv->eq_table.bitmap); 1217 mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
1211 1218
1212err_out_free: 1219err_out_free:
@@ -1231,10 +1238,7 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
1231 if (!mlx4_is_slave(dev)) 1238 if (!mlx4_is_slave(dev))
1232 mlx4_unmap_clr_int(dev); 1239 mlx4_unmap_clr_int(dev);
1233 1240
1234 for (i = 0; i < mlx4_num_eq_uar(dev); ++i) 1241 mlx4_unmap_uar(dev);
1235 if (priv->eq_table.uar_map[i])
1236 iounmap(priv->eq_table.uar_map[i]);
1237
1238 mlx4_bitmap_cleanup(&priv->eq_table.bitmap); 1242 mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
1239 1243
1240 kfree(priv->eq_table.uar_map); 1244 kfree(priv->eq_table.uar_map);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 80df2ab0177c..2aa80afd98d2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1405,7 +1405,10 @@ unmap_bf:
1405 unmap_bf_area(dev); 1405 unmap_bf_area(dev);
1406 1406
1407err_close: 1407err_close:
1408 mlx4_close_hca(dev); 1408 if (mlx4_is_slave(dev))
1409 mlx4_slave_exit(dev);
1410 else
1411 mlx4_CLOSE_HCA(dev, 0);
1409 1412
1410err_free_icm: 1413err_free_icm:
1411 if (!mlx4_is_slave(dev)) 1414 if (!mlx4_is_slave(dev))
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 926c911c0ac4..b05705f50f0f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -330,9 +330,6 @@ static void update_pkey_index(struct mlx4_dev *dev, int slave,
330 330
331 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index]; 331 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
332 *(u8 *)(inbox->buf + 35) = new_index; 332 *(u8 *)(inbox->buf + 35) = new_index;
333
334 mlx4_dbg(dev, "port = %d, orig pkey index = %d, "
335 "new pkey index = %d\n", port, orig_index, new_index);
336} 333}
337 334
338static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox, 335static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
@@ -351,9 +348,6 @@ static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
351 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) 348 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
352 qp_ctx->alt_path.mgid_index = slave & 0x7F; 349 qp_ctx->alt_path.mgid_index = slave & 0x7F;
353 } 350 }
354
355 mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
356 slave, qp_ctx->pri_path.mgid_index);
357} 351}
358 352
359static int mpt_mask(struct mlx4_dev *dev) 353static int mpt_mask(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 658afc45f378..e4ba868e232c 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -5407,8 +5407,8 @@ static int netdev_close(struct net_device *dev)
5407 /* Delay for receive task to stop scheduling itself. */ 5407 /* Delay for receive task to stop scheduling itself. */
5408 msleep(2000 / HZ); 5408 msleep(2000 / HZ);
5409 5409
5410 tasklet_disable(&hw_priv->rx_tasklet); 5410 tasklet_kill(&hw_priv->rx_tasklet);
5411 tasklet_disable(&hw_priv->tx_tasklet); 5411 tasklet_kill(&hw_priv->tx_tasklet);
5412 free_irq(dev->irq, hw_priv->dev); 5412 free_irq(dev->irq, hw_priv->dev);
5413 5413
5414 transmit_cleanup(hw_priv, 0); 5414 transmit_cleanup(hw_priv, 0);
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 53743f7a2ca9..af8b4142088c 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1524,6 +1524,7 @@ static int lpc_eth_drv_remove(struct platform_device *pdev)
1524 pldat->dma_buff_base_p); 1524 pldat->dma_buff_base_p);
1525 free_irq(ndev->irq, ndev); 1525 free_irq(ndev->irq, ndev);
1526 iounmap(pldat->net_base); 1526 iounmap(pldat->net_base);
1527 mdiobus_unregister(pldat->mii_bus);
1527 mdiobus_free(pldat->mii_bus); 1528 mdiobus_free(pldat->mii_bus);
1528 clk_disable(pldat->clk); 1529 clk_disable(pldat->clk);
1529 clk_put(pldat->clk); 1530 clk_put(pldat->clk);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 499249a15e88..39ab4d09faaa 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -333,26 +333,6 @@ static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
333} 333}
334 334
335/** 335/**
336 * pch_gbe_wait_clr_bit_irq - Wait to clear a bit for interrupt context
337 * @reg: Pointer of register
338 * @busy: Busy bit
339 */
340static int pch_gbe_wait_clr_bit_irq(void *reg, u32 bit)
341{
342 u32 tmp;
343 int ret = -1;
344 /* wait busy */
345 tmp = 20;
346 while ((ioread32(reg) & bit) && --tmp)
347 udelay(5);
348 if (!tmp)
349 pr_err("Error: busy bit is not cleared\n");
350 else
351 ret = 0;
352 return ret;
353}
354
355/**
356 * pch_gbe_mac_mar_set - Set MAC address register 336 * pch_gbe_mac_mar_set - Set MAC address register
357 * @hw: Pointer to the HW structure 337 * @hw: Pointer to the HW structure
358 * @addr: Pointer to the MAC address 338 * @addr: Pointer to the MAC address
@@ -403,15 +383,20 @@ static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
403 return; 383 return;
404} 384}
405 385
406static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw) 386static void pch_gbe_disable_mac_rx(struct pch_gbe_hw *hw)
407{ 387{
408 /* Read the MAC addresses. and store to the private data */ 388 u32 rctl;
409 pch_gbe_mac_read_mac_addr(hw); 389 /* Disables Receive MAC */
410 iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET); 390 rctl = ioread32(&hw->reg->MAC_RX_EN);
411 pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST); 391 iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
412 /* Setup the MAC addresses */ 392}
413 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0); 393
414 return; 394static void pch_gbe_enable_mac_rx(struct pch_gbe_hw *hw)
395{
396 u32 rctl;
397 /* Enables Receive MAC */
398 rctl = ioread32(&hw->reg->MAC_RX_EN);
399 iowrite32((rctl | PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
415} 400}
416 401
417/** 402/**
@@ -907,7 +892,7 @@ static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
907static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter) 892static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
908{ 893{
909 struct pch_gbe_hw *hw = &adapter->hw; 894 struct pch_gbe_hw *hw = &adapter->hw;
910 u32 rdba, rdlen, rctl, rxdma; 895 u32 rdba, rdlen, rxdma;
911 896
912 pr_debug("dma adr = 0x%08llx size = 0x%08x\n", 897 pr_debug("dma adr = 0x%08llx size = 0x%08x\n",
913 (unsigned long long)adapter->rx_ring->dma, 898 (unsigned long long)adapter->rx_ring->dma,
@@ -915,9 +900,7 @@ static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
915 900
916 pch_gbe_mac_force_mac_fc(hw); 901 pch_gbe_mac_force_mac_fc(hw);
917 902
918 /* Disables Receive MAC */ 903 pch_gbe_disable_mac_rx(hw);
919 rctl = ioread32(&hw->reg->MAC_RX_EN);
920 iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
921 904
922 /* Disables Receive DMA */ 905 /* Disables Receive DMA */
923 rxdma = ioread32(&hw->reg->DMA_CTRL); 906 rxdma = ioread32(&hw->reg->DMA_CTRL);
@@ -1308,38 +1291,17 @@ void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
1308 spin_unlock_irqrestore(&adapter->stats_lock, flags); 1291 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1309} 1292}
1310 1293
1311static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter) 1294static void pch_gbe_disable_dma_rx(struct pch_gbe_hw *hw)
1312{ 1295{
1313 struct pch_gbe_hw *hw = &adapter->hw;
1314 u32 rxdma; 1296 u32 rxdma;
1315 u16 value;
1316 int ret;
1317 1297
1318 /* Disable Receive DMA */ 1298 /* Disable Receive DMA */
1319 rxdma = ioread32(&hw->reg->DMA_CTRL); 1299 rxdma = ioread32(&hw->reg->DMA_CTRL);
1320 rxdma &= ~PCH_GBE_RX_DMA_EN; 1300 rxdma &= ~PCH_GBE_RX_DMA_EN;
1321 iowrite32(rxdma, &hw->reg->DMA_CTRL); 1301 iowrite32(rxdma, &hw->reg->DMA_CTRL);
1322 /* Wait Rx DMA BUS is IDLE */
1323 ret = pch_gbe_wait_clr_bit_irq(&hw->reg->RX_DMA_ST, PCH_GBE_IDLE_CHECK);
1324 if (ret) {
1325 /* Disable Bus master */
1326 pci_read_config_word(adapter->pdev, PCI_COMMAND, &value);
1327 value &= ~PCI_COMMAND_MASTER;
1328 pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
1329 /* Stop Receive */
1330 pch_gbe_mac_reset_rx(hw);
1331 /* Enable Bus master */
1332 value |= PCI_COMMAND_MASTER;
1333 pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
1334 } else {
1335 /* Stop Receive */
1336 pch_gbe_mac_reset_rx(hw);
1337 }
1338 /* reprogram multicast address register after reset */
1339 pch_gbe_set_multi(adapter->netdev);
1340} 1302}
1341 1303
1342static void pch_gbe_start_receive(struct pch_gbe_hw *hw) 1304static void pch_gbe_enable_dma_rx(struct pch_gbe_hw *hw)
1343{ 1305{
1344 u32 rxdma; 1306 u32 rxdma;
1345 1307
@@ -1347,9 +1309,6 @@ static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
1347 rxdma = ioread32(&hw->reg->DMA_CTRL); 1309 rxdma = ioread32(&hw->reg->DMA_CTRL);
1348 rxdma |= PCH_GBE_RX_DMA_EN; 1310 rxdma |= PCH_GBE_RX_DMA_EN;
1349 iowrite32(rxdma, &hw->reg->DMA_CTRL); 1311 iowrite32(rxdma, &hw->reg->DMA_CTRL);
1350 /* Enables Receive */
1351 iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
1352 return;
1353} 1312}
1354 1313
1355/** 1314/**
@@ -1385,7 +1344,7 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
1385 int_en = ioread32(&hw->reg->INT_EN); 1344 int_en = ioread32(&hw->reg->INT_EN);
1386 iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR), 1345 iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
1387 &hw->reg->INT_EN); 1346 &hw->reg->INT_EN);
1388 pch_gbe_stop_receive(adapter); 1347 pch_gbe_disable_dma_rx(&adapter->hw);
1389 int_st |= ioread32(&hw->reg->INT_ST); 1348 int_st |= ioread32(&hw->reg->INT_ST);
1390 int_st = int_st & ioread32(&hw->reg->INT_EN); 1349 int_st = int_st & ioread32(&hw->reg->INT_EN);
1391 } 1350 }
@@ -1961,12 +1920,12 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
1961 struct net_device *netdev = adapter->netdev; 1920 struct net_device *netdev = adapter->netdev;
1962 struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring; 1921 struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
1963 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring; 1922 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1964 int err; 1923 int err = -EINVAL;
1965 1924
1966 /* Ensure we have a valid MAC */ 1925 /* Ensure we have a valid MAC */
1967 if (!is_valid_ether_addr(adapter->hw.mac.addr)) { 1926 if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
1968 pr_err("Error: Invalid MAC address\n"); 1927 pr_err("Error: Invalid MAC address\n");
1969 return -EINVAL; 1928 goto out;
1970 } 1929 }
1971 1930
1972 /* hardware has been reset, we need to reload some things */ 1931 /* hardware has been reset, we need to reload some things */
@@ -1979,18 +1938,19 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
1979 1938
1980 err = pch_gbe_request_irq(adapter); 1939 err = pch_gbe_request_irq(adapter);
1981 if (err) { 1940 if (err) {
1982 pr_err("Error: can't bring device up\n"); 1941 pr_err("Error: can't bring device up - irq request failed\n");
1983 return err; 1942 goto out;
1984 } 1943 }
1985 err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count); 1944 err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
1986 if (err) { 1945 if (err) {
1987 pr_err("Error: can't bring device up\n"); 1946 pr_err("Error: can't bring device up - alloc rx buffers pool failed\n");
1988 return err; 1947 goto freeirq;
1989 } 1948 }
1990 pch_gbe_alloc_tx_buffers(adapter, tx_ring); 1949 pch_gbe_alloc_tx_buffers(adapter, tx_ring);
1991 pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count); 1950 pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
1992 adapter->tx_queue_len = netdev->tx_queue_len; 1951 adapter->tx_queue_len = netdev->tx_queue_len;
1993 pch_gbe_start_receive(&adapter->hw); 1952 pch_gbe_enable_dma_rx(&adapter->hw);
1953 pch_gbe_enable_mac_rx(&adapter->hw);
1994 1954
1995 mod_timer(&adapter->watchdog_timer, jiffies); 1955 mod_timer(&adapter->watchdog_timer, jiffies);
1996 1956
@@ -1999,6 +1959,11 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
1999 netif_start_queue(adapter->netdev); 1959 netif_start_queue(adapter->netdev);
2000 1960
2001 return 0; 1961 return 0;
1962
1963freeirq:
1964 pch_gbe_free_irq(adapter);
1965out:
1966 return err;
2002} 1967}
2003 1968
2004/** 1969/**
@@ -2393,7 +2358,6 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2393 int work_done = 0; 2358 int work_done = 0;
2394 bool poll_end_flag = false; 2359 bool poll_end_flag = false;
2395 bool cleaned = false; 2360 bool cleaned = false;
2396 u32 int_en;
2397 2361
2398 pr_debug("budget : %d\n", budget); 2362 pr_debug("budget : %d\n", budget);
2399 2363
@@ -2410,19 +2374,13 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2410 2374
2411 if (poll_end_flag) { 2375 if (poll_end_flag) {
2412 napi_complete(napi); 2376 napi_complete(napi);
2413 if (adapter->rx_stop_flag) {
2414 adapter->rx_stop_flag = false;
2415 pch_gbe_start_receive(&adapter->hw);
2416 }
2417 pch_gbe_irq_enable(adapter); 2377 pch_gbe_irq_enable(adapter);
2418 } else 2378 }
2419 if (adapter->rx_stop_flag) { 2379
2420 adapter->rx_stop_flag = false; 2380 if (adapter->rx_stop_flag) {
2421 pch_gbe_start_receive(&adapter->hw); 2381 adapter->rx_stop_flag = false;
2422 int_en = ioread32(&adapter->hw.reg->INT_EN); 2382 pch_gbe_enable_dma_rx(&adapter->hw);
2423 iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR), 2383 }
2424 &adapter->hw.reg->INT_EN);
2425 }
2426 2384
2427 pr_debug("poll_end_flag : %d work_done : %d budget : %d\n", 2385 pr_debug("poll_end_flag : %d work_done : %d budget : %d\n",
2428 poll_end_flag, work_done, budget); 2386 poll_end_flag, work_done, budget);
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 80ba7292ec3d..12d1f2470d5c 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -2522,6 +2522,13 @@ static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2522 qdev->req_q_size = 2522 qdev->req_q_size =
2523 (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req)); 2523 (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req));
2524 2524
2525 qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
2526
2527 /* The barrier is required to ensure request and response queue
2528 * addr writes to the registers.
2529 */
2530 wmb();
2531
2525 qdev->req_q_virt_addr = 2532 qdev->req_q_virt_addr =
2526 pci_alloc_consistent(qdev->pdev, 2533 pci_alloc_consistent(qdev->pdev,
2527 (size_t) qdev->req_q_size, 2534 (size_t) qdev->req_q_size,
@@ -2533,8 +2540,6 @@ static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2533 return -ENOMEM; 2540 return -ENOMEM;
2534 } 2541 }
2535 2542
2536 qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
2537
2538 qdev->rsp_q_virt_addr = 2543 qdev->rsp_q_virt_addr =
2539 pci_alloc_consistent(qdev->pdev, 2544 pci_alloc_consistent(qdev->pdev,
2540 (size_t) qdev->rsp_q_size, 2545 (size_t) qdev->rsp_q_size,
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index d6c6cfb68631..50a55fb10368 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -3865,6 +3865,8 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
3865 void __iomem *ioaddr = tp->mmio_addr; 3865 void __iomem *ioaddr = tp->mmio_addr;
3866 3866
3867 switch (tp->mac_version) { 3867 switch (tp->mac_version) {
3868 case RTL_GIGA_MAC_VER_25:
3869 case RTL_GIGA_MAC_VER_26:
3868 case RTL_GIGA_MAC_VER_29: 3870 case RTL_GIGA_MAC_VER_29:
3869 case RTL_GIGA_MAC_VER_30: 3871 case RTL_GIGA_MAC_VER_30:
3870 case RTL_GIGA_MAC_VER_32: 3872 case RTL_GIGA_MAC_VER_32:
@@ -4557,6 +4559,9 @@ static void rtl_set_rx_mode(struct net_device *dev)
4557 mc_filter[1] = swab32(data); 4559 mc_filter[1] = swab32(data);
4558 } 4560 }
4559 4561
4562 if (tp->mac_version == RTL_GIGA_MAC_VER_35)
4563 mc_filter[1] = mc_filter[0] = 0xffffffff;
4564
4560 RTL_W32(MAR0 + 4, mc_filter[1]); 4565 RTL_W32(MAR0 + 4, mc_filter[1]);
4561 RTL_W32(MAR0 + 0, mc_filter[0]); 4566 RTL_W32(MAR0 + 0, mc_filter[0]);
4562 4567
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 14297ff9c6dc..48fcb5e3bd3d 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -5,7 +5,7 @@
5config NET_VENDOR_TI 5config NET_VENDOR_TI
6 bool "Texas Instruments (TI) devices" 6 bool "Texas Instruments (TI) devices"
7 default y 7 default y
8 depends on PCI || EISA || AR7 || (ARM && (ARCH_DAVINCI || ARCH_OMAP3)) 8 depends on PCI || EISA || AR7 || (ARM && (ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX))
9 ---help--- 9 ---help---
10 If you have a network (Ethernet) card belonging to this class, say Y 10 If you have a network (Ethernet) card belonging to this class, say Y
11 and read the Ethernet-HOWTO, available from 11 and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 4e2a1628484d..4e9810013850 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -1334,11 +1334,11 @@ static int tso_count_edescs(struct sk_buff *skb)
1334{ 1334{
1335 struct skb_shared_info *sh = skb_shinfo(skb); 1335 struct skb_shared_info *sh = skb_shinfo(skb);
1336 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1336 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1337 unsigned int data_len = skb->data_len + skb->hdr_len - sh_len; 1337 unsigned int data_len = skb->len - sh_len;
1338 unsigned int p_len = sh->gso_size; 1338 unsigned int p_len = sh->gso_size;
1339 long f_id = -1; /* id of the current fragment */ 1339 long f_id = -1; /* id of the current fragment */
1340 long f_size = skb->hdr_len; /* size of the current fragment */ 1340 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
1341 long f_used = sh_len; /* bytes used from the current fragment */ 1341 long f_used = 0; /* bytes used from the current fragment */
1342 long n; /* size of the current piece of payload */ 1342 long n; /* size of the current piece of payload */
1343 int num_edescs = 0; 1343 int num_edescs = 0;
1344 int segment; 1344 int segment;
@@ -1353,7 +1353,7 @@ static int tso_count_edescs(struct sk_buff *skb)
1353 /* Advance as needed. */ 1353 /* Advance as needed. */
1354 while (f_used >= f_size) { 1354 while (f_used >= f_size) {
1355 f_id++; 1355 f_id++;
1356 f_size = sh->frags[f_id].size; 1356 f_size = skb_frag_size(&sh->frags[f_id]);
1357 f_used = 0; 1357 f_used = 0;
1358 } 1358 }
1359 1359
@@ -1384,13 +1384,13 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
1384 struct iphdr *ih; 1384 struct iphdr *ih;
1385 struct tcphdr *th; 1385 struct tcphdr *th;
1386 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1386 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1387 unsigned int data_len = skb->data_len + skb->hdr_len - sh_len; 1387 unsigned int data_len = skb->len - sh_len;
1388 unsigned char *data = skb->data; 1388 unsigned char *data = skb->data;
1389 unsigned int ih_off, th_off, p_len; 1389 unsigned int ih_off, th_off, p_len;
1390 unsigned int isum_seed, tsum_seed, id, seq; 1390 unsigned int isum_seed, tsum_seed, id, seq;
1391 long f_id = -1; /* id of the current fragment */ 1391 long f_id = -1; /* id of the current fragment */
1392 long f_size = skb->hdr_len; /* size of the current fragment */ 1392 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
1393 long f_used = sh_len; /* bytes used from the current fragment */ 1393 long f_used = 0; /* bytes used from the current fragment */
1394 long n; /* size of the current piece of payload */ 1394 long n; /* size of the current piece of payload */
1395 int segment; 1395 int segment;
1396 1396
@@ -1405,7 +1405,7 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
1405 isum_seed = ((0xFFFF - ih->check) + 1405 isum_seed = ((0xFFFF - ih->check) +
1406 (0xFFFF - ih->tot_len) + 1406 (0xFFFF - ih->tot_len) +
1407 (0xFFFF - ih->id)); 1407 (0xFFFF - ih->id));
1408 tsum_seed = th->check + (0xFFFF ^ htons(sh_len + data_len)); 1408 tsum_seed = th->check + (0xFFFF ^ htons(skb->len));
1409 id = ntohs(ih->id); 1409 id = ntohs(ih->id);
1410 seq = ntohl(th->seq); 1410 seq = ntohl(th->seq);
1411 1411
@@ -1444,7 +1444,7 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
1444 /* Advance as needed. */ 1444 /* Advance as needed. */
1445 while (f_used >= f_size) { 1445 while (f_used >= f_size) {
1446 f_id++; 1446 f_id++;
1447 f_size = sh->frags[f_id].size; 1447 f_size = skb_frag_size(&sh->frags[f_id]);
1448 f_used = 0; 1448 f_used = 0;
1449 } 1449 }
1450 1450
@@ -1478,14 +1478,14 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
1478 struct tile_net_priv *priv = netdev_priv(dev); 1478 struct tile_net_priv *priv = netdev_priv(dev);
1479 struct skb_shared_info *sh = skb_shinfo(skb); 1479 struct skb_shared_info *sh = skb_shinfo(skb);
1480 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1480 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1481 unsigned int data_len = skb->data_len + skb->hdr_len - sh_len; 1481 unsigned int data_len = skb->len - sh_len;
1482 unsigned int p_len = sh->gso_size; 1482 unsigned int p_len = sh->gso_size;
1483 gxio_mpipe_edesc_t edesc_head = { { 0 } }; 1483 gxio_mpipe_edesc_t edesc_head = { { 0 } };
1484 gxio_mpipe_edesc_t edesc_body = { { 0 } }; 1484 gxio_mpipe_edesc_t edesc_body = { { 0 } };
1485 long f_id = -1; /* id of the current fragment */ 1485 long f_id = -1; /* id of the current fragment */
1486 long f_size = skb->hdr_len; /* size of the current fragment */ 1486 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
1487 long f_used = sh_len; /* bytes used from the current fragment */ 1487 long f_used = 0; /* bytes used from the current fragment */
1488 void *f_data = skb->data; 1488 void *f_data = skb->data + sh_len;
1489 long n; /* size of the current piece of payload */ 1489 long n; /* size of the current piece of payload */
1490 unsigned long tx_packets = 0, tx_bytes = 0; 1490 unsigned long tx_packets = 0, tx_bytes = 0;
1491 unsigned int csum_start; 1491 unsigned int csum_start;
@@ -1516,15 +1516,18 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
1516 1516
1517 /* Egress the payload. */ 1517 /* Egress the payload. */
1518 while (p_used < p_len) { 1518 while (p_used < p_len) {
1519 void *va;
1519 1520
1520 /* Advance as needed. */ 1521 /* Advance as needed. */
1521 while (f_used >= f_size) { 1522 while (f_used >= f_size) {
1522 f_id++; 1523 f_id++;
1523 f_size = sh->frags[f_id].size; 1524 f_size = skb_frag_size(&sh->frags[f_id]);
1524 f_used = 0;
1525 f_data = tile_net_frag_buf(&sh->frags[f_id]); 1525 f_data = tile_net_frag_buf(&sh->frags[f_id]);
1526 f_used = 0;
1526 } 1527 }
1527 1528
1529 va = f_data + f_used;
1530
1528 /* Use bytes from the current fragment. */ 1531 /* Use bytes from the current fragment. */
1529 n = p_len - p_used; 1532 n = p_len - p_used;
1530 if (n > f_size - f_used) 1533 if (n > f_size - f_used)
@@ -1533,7 +1536,7 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
1533 p_used += n; 1536 p_used += n;
1534 1537
1535 /* Egress a piece of the payload. */ 1538 /* Egress a piece of the payload. */
1536 edesc_body.va = va_to_tile_io_addr(f_data) + f_used; 1539 edesc_body.va = va_to_tile_io_addr(va);
1537 edesc_body.xfer_size = n; 1540 edesc_body.xfer_size = n;
1538 edesc_body.bound = !(p_used < p_len); 1541 edesc_body.bound = !(p_used < p_len);
1539 gxio_mpipe_equeue_put_at(equeue, edesc_body, slot); 1542 gxio_mpipe_equeue_put_at(equeue, edesc_body, slot);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 0793299bd39e..1d04754a6637 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -990,7 +990,7 @@ static int axienet_stop(struct net_device *ndev)
990 axienet_setoptions(ndev, lp->options & 990 axienet_setoptions(ndev, lp->options &
991 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 991 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
992 992
993 tasklet_disable(&lp->dma_err_tasklet); 993 tasklet_kill(&lp->dma_err_tasklet);
994 994
995 free_irq(lp->tx_irq, ndev); 995 free_irq(lp->tx_irq, ndev);
996 free_irq(lp->rx_irq, ndev); 996 free_irq(lp->rx_irq, ndev);