aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/8390/ne.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c162
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c24
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h19
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c39
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c6
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c5
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c3
-rw-r--r--drivers/net/ethernet/jme.c36
-rw-r--r--drivers/net/ethernet/marvell/skge.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c6
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c20
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c1
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c116
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c9
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c22
-rw-r--r--drivers/net/ethernet/realtek/r8169.c5
-rw-r--r--drivers/net/ethernet/sis/sis900.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c17
-rw-r--r--drivers/net/ethernet/ti/Kconfig2
-rw-r--r--drivers/net/ethernet/tile/tilegx.c37
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c16
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c8
29 files changed, 325 insertions, 272 deletions
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
index d04911d33b64..47618e505355 100644
--- a/drivers/net/ethernet/8390/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
@@ -813,6 +813,7 @@ static int __init ne_drv_probe(struct platform_device *pdev)
813 dev->irq = irq[this_dev]; 813 dev->irq = irq[this_dev];
814 dev->mem_end = bad[this_dev]; 814 dev->mem_end = bad[this_dev];
815 } 815 }
816 SET_NETDEV_DEV(dev, &pdev->dev);
816 err = do_ne_probe(dev); 817 err = do_ne_probe(dev);
817 if (err) { 818 if (err) {
818 free_netdev(dev); 819 free_netdev(dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index c65295dded39..6e5bdd1a31d9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1702,7 +1702,7 @@ static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
1702 SHMEM_EEE_ADV_STATUS_SHIFT); 1702 SHMEM_EEE_ADV_STATUS_SHIFT);
1703 if ((advertised != (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK))) { 1703 if ((advertised != (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK))) {
1704 DP(BNX2X_MSG_ETHTOOL, 1704 DP(BNX2X_MSG_ETHTOOL,
1705 "Direct manipulation of EEE advertisment is not supported\n"); 1705 "Direct manipulation of EEE advertisement is not supported\n");
1706 return -EINVAL; 1706 return -EINVAL;
1707 } 1707 }
1708 1708
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index e2e45ee5df33..f6cfdc6cf20f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -137,7 +137,16 @@
137#define LINK_20GTFD LINK_STATUS_SPEED_AND_DUPLEX_20GTFD 137#define LINK_20GTFD LINK_STATUS_SPEED_AND_DUPLEX_20GTFD
138#define LINK_20GXFD LINK_STATUS_SPEED_AND_DUPLEX_20GXFD 138#define LINK_20GXFD LINK_STATUS_SPEED_AND_DUPLEX_20GXFD
139 139
140 140#define LINK_UPDATE_MASK \
141 (LINK_STATUS_SPEED_AND_DUPLEX_MASK | \
142 LINK_STATUS_LINK_UP | \
143 LINK_STATUS_PHYSICAL_LINK_FLAG | \
144 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE | \
145 LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK | \
146 LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK | \
147 LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK | \
148 LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE | \
149 LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE)
141 150
142#define SFP_EEPROM_CON_TYPE_ADDR 0x2 151#define SFP_EEPROM_CON_TYPE_ADDR 0x2
143 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7 152 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
@@ -3295,6 +3304,21 @@ static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port)
3295 DEFAULT_PHY_DEV_ADDR); 3304 DEFAULT_PHY_DEV_ADDR);
3296} 3305}
3297 3306
3307static void bnx2x_xgxs_specific_func(struct bnx2x_phy *phy,
3308 struct link_params *params,
3309 u32 action)
3310{
3311 struct bnx2x *bp = params->bp;
3312 switch (action) {
3313 case PHY_INIT:
3314 /* Set correct devad */
3315 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + params->port*0x18, 0);
3316 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + params->port*0x18,
3317 phy->def_md_devad);
3318 break;
3319 }
3320}
3321
3298static void bnx2x_xgxs_deassert(struct link_params *params) 3322static void bnx2x_xgxs_deassert(struct link_params *params)
3299{ 3323{
3300 struct bnx2x *bp = params->bp; 3324 struct bnx2x *bp = params->bp;
@@ -3309,10 +3333,8 @@ static void bnx2x_xgxs_deassert(struct link_params *params)
3309 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val); 3333 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
3310 udelay(500); 3334 udelay(500);
3311 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val); 3335 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
3312 3336 bnx2x_xgxs_specific_func(&params->phy[INT_PHY], params,
3313 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + port*0x18, 0); 3337 PHY_INIT);
3314 REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
3315 params->phy[INT_PHY].def_md_devad);
3316} 3338}
3317 3339
3318static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy, 3340static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
@@ -3545,14 +3567,11 @@ static void bnx2x_warpcore_set_lpi_passthrough(struct bnx2x_phy *phy,
3545static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, 3567static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3546 struct link_params *params, 3568 struct link_params *params,
3547 struct link_vars *vars) { 3569 struct link_vars *vars) {
3548 u16 val16 = 0, lane, i; 3570 u16 lane, i, cl72_ctrl, an_adv = 0;
3571 u16 ucode_ver;
3549 struct bnx2x *bp = params->bp; 3572 struct bnx2x *bp = params->bp;
3550 static struct bnx2x_reg_set reg_set[] = { 3573 static struct bnx2x_reg_set reg_set[] = {
3551 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, 3574 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
3552 {MDIO_AN_DEVAD, MDIO_WC_REG_PAR_DET_10G_CTRL, 0},
3553 {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0},
3554 {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0xff},
3555 {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0x5555},
3556 {MDIO_PMA_DEVAD, MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0}, 3575 {MDIO_PMA_DEVAD, MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0},
3557 {MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, 0x7415}, 3576 {MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, 0x7415},
3558 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190}, 3577 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190},
@@ -3565,12 +3584,19 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3565 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, 3584 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
3566 reg_set[i].val); 3585 reg_set[i].val);
3567 3586
3587 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3588 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, &cl72_ctrl);
3589 cl72_ctrl &= 0xf8ff;
3590 cl72_ctrl |= 0x3800;
3591 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3592 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, cl72_ctrl);
3593
3568 /* Check adding advertisement for 1G KX */ 3594 /* Check adding advertisement for 1G KX */
3569 if (((vars->line_speed == SPEED_AUTO_NEG) && 3595 if (((vars->line_speed == SPEED_AUTO_NEG) &&
3570 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || 3596 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
3571 (vars->line_speed == SPEED_1000)) { 3597 (vars->line_speed == SPEED_1000)) {
3572 u32 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2; 3598 u32 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2;
3573 val16 |= (1<<5); 3599 an_adv |= (1<<5);
3574 3600
3575 /* Enable CL37 1G Parallel Detect */ 3601 /* Enable CL37 1G Parallel Detect */
3576 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, addr, 0x1); 3602 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, addr, 0x1);
@@ -3580,11 +3606,14 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3580 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) || 3606 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) ||
3581 (vars->line_speed == SPEED_10000)) { 3607 (vars->line_speed == SPEED_10000)) {
3582 /* Check adding advertisement for 10G KR */ 3608 /* Check adding advertisement for 10G KR */
3583 val16 |= (1<<7); 3609 an_adv |= (1<<7);
3584 /* Enable 10G Parallel Detect */ 3610 /* Enable 10G Parallel Detect */
3611 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
3612 MDIO_AER_BLOCK_AER_REG, 0);
3613
3585 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3614 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
3586 MDIO_WC_REG_PAR_DET_10G_CTRL, 1); 3615 MDIO_WC_REG_PAR_DET_10G_CTRL, 1);
3587 3616 bnx2x_set_aer_mmd(params, phy);
3588 DP(NETIF_MSG_LINK, "Advertize 10G\n"); 3617 DP(NETIF_MSG_LINK, "Advertize 10G\n");
3589 } 3618 }
3590 3619
@@ -3604,7 +3633,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3604 3633
3605 /* Advertised speeds */ 3634 /* Advertised speeds */
3606 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3635 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
3607 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16); 3636 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, an_adv);
3608 3637
3609 /* Advertised and set FEC (Forward Error Correction) */ 3638 /* Advertised and set FEC (Forward Error Correction) */
3610 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, 3639 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
@@ -3628,9 +3657,10 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3628 /* Set KR Autoneg Work-Around flag for Warpcore version older than D108 3657 /* Set KR Autoneg Work-Around flag for Warpcore version older than D108
3629 */ 3658 */
3630 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3659 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3631 MDIO_WC_REG_UC_INFO_B1_VERSION, &val16); 3660 MDIO_WC_REG_UC_INFO_B1_VERSION, &ucode_ver);
3632 if (val16 < 0xd108) { 3661 if (ucode_ver < 0xd108) {
3633 DP(NETIF_MSG_LINK, "Enable AN KR work-around\n"); 3662 DP(NETIF_MSG_LINK, "Enable AN KR work-around. WC ver:0x%x\n",
3663 ucode_ver);
3634 vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY; 3664 vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
3635 } 3665 }
3636 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 3666 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
@@ -3651,21 +3681,16 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
3651 struct link_vars *vars) 3681 struct link_vars *vars)
3652{ 3682{
3653 struct bnx2x *bp = params->bp; 3683 struct bnx2x *bp = params->bp;
3654 u16 i; 3684 u16 val16, i, lane;
3655 static struct bnx2x_reg_set reg_set[] = { 3685 static struct bnx2x_reg_set reg_set[] = {
3656 /* Disable Autoneg */ 3686 /* Disable Autoneg */
3657 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, 3687 {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
3658 {MDIO_AN_DEVAD, MDIO_WC_REG_PAR_DET_10G_CTRL, 0},
3659 {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 3688 {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
3660 0x3f00}, 3689 0x3f00},
3661 {MDIO_AN_DEVAD, MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0}, 3690 {MDIO_AN_DEVAD, MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0},
3662 {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0}, 3691 {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0},
3663 {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL3_UP1, 0x1}, 3692 {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL3_UP1, 0x1},
3664 {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC7, 0xa}, 3693 {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC7, 0xa},
3665 /* Disable CL36 PCS Tx */
3666 {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0x0},
3667 /* Double Wide Single Data Rate @ pll rate */
3668 {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0xFFFF},
3669 /* Leave cl72 training enable, needed for KR */ 3694 /* Leave cl72 training enable, needed for KR */
3670 {MDIO_PMA_DEVAD, 3695 {MDIO_PMA_DEVAD,
3671 MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150, 3696 MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150,
@@ -3676,11 +3701,24 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
3676 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, 3701 bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
3677 reg_set[i].val); 3702 reg_set[i].val);
3678 3703
3679 /* Leave CL72 enabled */ 3704 lane = bnx2x_get_warpcore_lane(phy, params);
3680 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 3705 /* Global registers */
3681 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 3706 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
3682 0x3800); 3707 MDIO_AER_BLOCK_AER_REG, 0);
3708 /* Disable CL36 PCS Tx */
3709 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3710 MDIO_WC_REG_XGXSBLK1_LANECTRL0, &val16);
3711 val16 &= ~(0x0011 << lane);
3712 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3713 MDIO_WC_REG_XGXSBLK1_LANECTRL0, val16);
3683 3714
3715 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3716 MDIO_WC_REG_XGXSBLK1_LANECTRL1, &val16);
3717 val16 |= (0x0303 << (lane << 1));
3718 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3719 MDIO_WC_REG_XGXSBLK1_LANECTRL1, val16);
3720 /* Restore AER */
3721 bnx2x_set_aer_mmd(params, phy);
3684 /* Set speed via PMA/PMD register */ 3722 /* Set speed via PMA/PMD register */
3685 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 3723 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
3686 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040); 3724 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040);
@@ -4303,7 +4341,7 @@ static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy,
4303 struct link_params *params) 4341 struct link_params *params)
4304{ 4342{
4305 struct bnx2x *bp = params->bp; 4343 struct bnx2x *bp = params->bp;
4306 u16 val16; 4344 u16 val16, lane;
4307 bnx2x_sfp_e3_set_transmitter(params, phy, 0); 4345 bnx2x_sfp_e3_set_transmitter(params, phy, 0);
4308 bnx2x_set_mdio_clk(bp, params->chip_id, params->port); 4346 bnx2x_set_mdio_clk(bp, params->chip_id, params->port);
4309 bnx2x_set_aer_mmd(params, phy); 4347 bnx2x_set_aer_mmd(params, phy);
@@ -4340,6 +4378,30 @@ static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy,
4340 MDIO_WC_REG_XGXSBLK1_LANECTRL2, 4378 MDIO_WC_REG_XGXSBLK1_LANECTRL2,
4341 val16 & 0xff00); 4379 val16 & 0xff00);
4342 4380
4381 lane = bnx2x_get_warpcore_lane(phy, params);
4382 /* Disable CL36 PCS Tx */
4383 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
4384 MDIO_WC_REG_XGXSBLK1_LANECTRL0, &val16);
4385 val16 |= (0x11 << lane);
4386 if (phy->flags & FLAGS_WC_DUAL_MODE)
4387 val16 |= (0x22 << lane);
4388 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4389 MDIO_WC_REG_XGXSBLK1_LANECTRL0, val16);
4390
4391 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
4392 MDIO_WC_REG_XGXSBLK1_LANECTRL1, &val16);
4393 val16 &= ~(0x0303 << (lane << 1));
4394 val16 |= (0x0101 << (lane << 1));
4395 if (phy->flags & FLAGS_WC_DUAL_MODE) {
4396 val16 &= ~(0x0c0c << (lane << 1));
4397 val16 |= (0x0404 << (lane << 1));
4398 }
4399
4400 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4401 MDIO_WC_REG_XGXSBLK1_LANECTRL1, val16);
4402 /* Restore AER */
4403 bnx2x_set_aer_mmd(params, phy);
4404
4343} 4405}
4344 4406
4345static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy, 4407static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
@@ -6296,15 +6358,7 @@ static int bnx2x_update_link_down(struct link_params *params,
6296 vars->mac_type = MAC_TYPE_NONE; 6358 vars->mac_type = MAC_TYPE_NONE;
6297 6359
6298 /* Update shared memory */ 6360 /* Update shared memory */
6299 vars->link_status &= ~(LINK_STATUS_SPEED_AND_DUPLEX_MASK | 6361 vars->link_status &= ~LINK_UPDATE_MASK;
6300 LINK_STATUS_LINK_UP |
6301 LINK_STATUS_PHYSICAL_LINK_FLAG |
6302 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE |
6303 LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK |
6304 LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK |
6305 LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK |
6306 LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE |
6307 LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE);
6308 vars->line_speed = 0; 6362 vars->line_speed = 0;
6309 bnx2x_update_mng(params, vars->link_status); 6363 bnx2x_update_mng(params, vars->link_status);
6310 6364
@@ -6452,6 +6506,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6452 u16 ext_phy_line_speed = 0, prev_line_speed = vars->line_speed; 6506 u16 ext_phy_line_speed = 0, prev_line_speed = vars->line_speed;
6453 u8 active_external_phy = INT_PHY; 6507 u8 active_external_phy = INT_PHY;
6454 vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG; 6508 vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG;
6509 vars->link_status &= ~LINK_UPDATE_MASK;
6455 for (phy_index = INT_PHY; phy_index < params->num_phys; 6510 for (phy_index = INT_PHY; phy_index < params->num_phys;
6456 phy_index++) { 6511 phy_index++) {
6457 phy_vars[phy_index].flow_ctrl = 0; 6512 phy_vars[phy_index].flow_ctrl = 0;
@@ -7579,7 +7634,7 @@ static void bnx2x_warpcore_power_module(struct link_params *params,
7579static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy, 7634static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7580 struct link_params *params, 7635 struct link_params *params,
7581 u16 addr, u8 byte_cnt, 7636 u16 addr, u8 byte_cnt,
7582 u8 *o_buf) 7637 u8 *o_buf, u8 is_init)
7583{ 7638{
7584 int rc = 0; 7639 int rc = 0;
7585 u8 i, j = 0, cnt = 0; 7640 u8 i, j = 0, cnt = 0;
@@ -7596,10 +7651,10 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7596 /* 4 byte aligned address */ 7651 /* 4 byte aligned address */
7597 addr32 = addr & (~0x3); 7652 addr32 = addr & (~0x3);
7598 do { 7653 do {
7599 if (cnt == I2C_WA_PWR_ITER) { 7654 if ((!is_init) && (cnt == I2C_WA_PWR_ITER)) {
7600 bnx2x_warpcore_power_module(params, phy, 0); 7655 bnx2x_warpcore_power_module(params, phy, 0);
7601 /* Note that 100us are not enough here */ 7656 /* Note that 100us are not enough here */
7602 usleep_range(1000,1000); 7657 usleep_range(1000, 2000);
7603 bnx2x_warpcore_power_module(params, phy, 1); 7658 bnx2x_warpcore_power_module(params, phy, 1);
7604 } 7659 }
7605 rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt, 7660 rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt,
@@ -7719,7 +7774,7 @@ int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7719 break; 7774 break;
7720 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: 7775 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7721 rc = bnx2x_warpcore_read_sfp_module_eeprom(phy, params, addr, 7776 rc = bnx2x_warpcore_read_sfp_module_eeprom(phy, params, addr,
7722 byte_cnt, o_buf); 7777 byte_cnt, o_buf, 0);
7723 break; 7778 break;
7724 } 7779 }
7725 return rc; 7780 return rc;
@@ -7923,6 +7978,7 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
7923 7978
7924{ 7979{
7925 u8 val; 7980 u8 val;
7981 int rc;
7926 struct bnx2x *bp = params->bp; 7982 struct bnx2x *bp = params->bp;
7927 u16 timeout; 7983 u16 timeout;
7928 /* Initialization time after hot-plug may take up to 300ms for 7984 /* Initialization time after hot-plug may take up to 300ms for
@@ -7930,8 +7986,14 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
7930 */ 7986 */
7931 7987
7932 for (timeout = 0; timeout < 60; timeout++) { 7988 for (timeout = 0; timeout < 60; timeout++) {
7933 if (bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val) 7989 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
7934 == 0) { 7990 rc = bnx2x_warpcore_read_sfp_module_eeprom(phy,
7991 params, 1,
7992 1, &val, 1);
7993 else
7994 rc = bnx2x_read_sfp_module_eeprom(phy, params, 1, 1,
7995 &val);
7996 if (rc == 0) {
7935 DP(NETIF_MSG_LINK, 7997 DP(NETIF_MSG_LINK,
7936 "SFP+ module initialization took %d ms\n", 7998 "SFP+ module initialization took %d ms\n",
7937 timeout * 5); 7999 timeout * 5);
@@ -7939,7 +8001,8 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
7939 } 8001 }
7940 usleep_range(5000, 10000); 8002 usleep_range(5000, 10000);
7941 } 8003 }
7942 return -EINVAL; 8004 rc = bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val);
8005 return rc;
7943} 8006}
7944 8007
7945static void bnx2x_8727_power_module(struct bnx2x *bp, 8008static void bnx2x_8727_power_module(struct bnx2x *bp,
@@ -9878,7 +9941,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
9878 else 9941 else
9879 rc = bnx2x_8483x_disable_eee(phy, params, vars); 9942 rc = bnx2x_8483x_disable_eee(phy, params, vars);
9880 if (rc) { 9943 if (rc) {
9881 DP(NETIF_MSG_LINK, "Failed to set EEE advertisment\n"); 9944 DP(NETIF_MSG_LINK, "Failed to set EEE advertisement\n");
9882 return rc; 9945 return rc;
9883 } 9946 }
9884 } else { 9947 } else {
@@ -10993,7 +11056,7 @@ static struct bnx2x_phy phy_xgxs = {
10993 .format_fw_ver = (format_fw_ver_t)NULL, 11056 .format_fw_ver = (format_fw_ver_t)NULL,
10994 .hw_reset = (hw_reset_t)NULL, 11057 .hw_reset = (hw_reset_t)NULL,
10995 .set_link_led = (set_link_led_t)NULL, 11058 .set_link_led = (set_link_led_t)NULL,
10996 .phy_specific_func = (phy_specific_func_t)NULL 11059 .phy_specific_func = (phy_specific_func_t)bnx2x_xgxs_specific_func
10997}; 11060};
10998static struct bnx2x_phy phy_warpcore = { 11061static struct bnx2x_phy phy_warpcore = {
10999 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, 11062 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
@@ -11465,6 +11528,11 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
11465 phy->media_type = ETH_PHY_BASE_T; 11528 phy->media_type = ETH_PHY_BASE_T;
11466 break; 11529 break;
11467 case PORT_HW_CFG_NET_SERDES_IF_XFI: 11530 case PORT_HW_CFG_NET_SERDES_IF_XFI:
11531 phy->supported &= (SUPPORTED_1000baseT_Full |
11532 SUPPORTED_10000baseT_Full |
11533 SUPPORTED_FIBRE |
11534 SUPPORTED_Pause |
11535 SUPPORTED_Asym_Pause);
11468 phy->media_type = ETH_PHY_XFP_FIBER; 11536 phy->media_type = ETH_PHY_XFP_FIBER;
11469 break; 11537 break;
11470 case PORT_HW_CFG_NET_SERDES_IF_SFI: 11538 case PORT_HW_CFG_NET_SERDES_IF_SFI:
@@ -12919,7 +12987,7 @@ static u8 bnx2x_analyze_link_error(struct link_params *params,
12919 DP(NETIF_MSG_LINK, "Analyze TX Fault\n"); 12987 DP(NETIF_MSG_LINK, "Analyze TX Fault\n");
12920 break; 12988 break;
12921 default: 12989 default:
12922 DP(NETIF_MSG_LINK, "Analyze UNKOWN\n"); 12990 DP(NETIF_MSG_LINK, "Analyze UNKNOWN\n");
12923 } 12991 }
12924 DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up, 12992 DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up,
12925 old_status, status); 12993 old_status, status);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index d5648fc666bd..01611b33a93d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -6794,8 +6794,9 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
6794 6794
6795 bnx2x_init_block(bp, BLOCK_DORQ, init_phase); 6795 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
6796 6796
6797 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
6798
6797 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) { 6799 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
6798 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
6799 6800
6800 if (IS_MF(bp)) 6801 if (IS_MF(bp))
6801 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246); 6802 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
@@ -9544,10 +9545,13 @@ static int __devinit bnx2x_prev_unload_common(struct bnx2x *bp)
9544 */ 9545 */
9545static void __devinit bnx2x_prev_interrupted_dmae(struct bnx2x *bp) 9546static void __devinit bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
9546{ 9547{
9547 u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS); 9548 if (!CHIP_IS_E1x(bp)) {
9548 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { 9549 u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
9549 BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing"); 9550 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
9550 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << BP_FUNC(bp)); 9551 BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing");
9552 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
9553 1 << BP_FUNC(bp));
9554 }
9551 } 9555 }
9552} 9556}
9553 9557
@@ -11902,7 +11906,15 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11902 /* disable FCOE L2 queue for E1x */ 11906 /* disable FCOE L2 queue for E1x */
11903 if (CHIP_IS_E1x(bp)) 11907 if (CHIP_IS_E1x(bp))
11904 bp->flags |= NO_FCOE_FLAG; 11908 bp->flags |= NO_FCOE_FLAG;
11905 11909 /* disable FCOE for 57840 device, until FW supports it */
11910 switch (ent->driver_data) {
11911 case BCM57840_O:
11912 case BCM57840_4_10:
11913 case BCM57840_2_20:
11914 case BCM57840_MFO:
11915 case BCM57840_MF:
11916 bp->flags |= NO_FCOE_FLAG;
11917 }
11906#endif 11918#endif
11907 11919
11908 11920
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index a4da893ac1e1..378988b5709a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -251,6 +251,8 @@ struct adapter_params {
251 unsigned char rev; /* chip revision */ 251 unsigned char rev; /* chip revision */
252 unsigned char offload; 252 unsigned char offload;
253 253
254 unsigned char bypass;
255
254 unsigned int ofldq_wr_cred; 256 unsigned int ofldq_wr_cred;
255}; 257};
256 258
@@ -642,6 +644,23 @@ extern int dbfifo_int_thresh;
642#define for_each_port(adapter, iter) \ 644#define for_each_port(adapter, iter) \
643 for (iter = 0; iter < (adapter)->params.nports; ++iter) 645 for (iter = 0; iter < (adapter)->params.nports; ++iter)
644 646
647static inline int is_bypass(struct adapter *adap)
648{
649 return adap->params.bypass;
650}
651
652static inline int is_bypass_device(int device)
653{
654 /* this should be set based upon device capabilities */
655 switch (device) {
656 case 0x440b:
657 case 0x440c:
658 return 1;
659 default:
660 return 0;
661 }
662}
663
645static inline unsigned int core_ticks_per_usec(const struct adapter *adap) 664static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
646{ 665{
647 return adap->params.vpd.cclk / 1000; 666 return adap->params.vpd.cclk / 1000;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 604f4f87f550..0df1284df497 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3416,16 +3416,6 @@ static int adap_init0_config(struct adapter *adapter, int reset)
3416 finicsum, cfcsum); 3416 finicsum, cfcsum);
3417 3417
3418 /* 3418 /*
3419 * If we're a pure NIC driver then disable all offloading facilities.
3420 * This will allow the firmware to optimize aspects of the hardware
3421 * configuration which will result in improved performance.
3422 */
3423 caps_cmd.ofldcaps = 0;
3424 caps_cmd.iscsicaps = 0;
3425 caps_cmd.rdmacaps = 0;
3426 caps_cmd.fcoecaps = 0;
3427
3428 /*
3429 * And now tell the firmware to use the configuration we just loaded. 3419 * And now tell the firmware to use the configuration we just loaded.
3430 */ 3420 */
3431 caps_cmd.op_to_write = 3421 caps_cmd.op_to_write =
@@ -3513,18 +3503,6 @@ static int adap_init0_no_config(struct adapter *adapter, int reset)
3513 if (ret < 0) 3503 if (ret < 0)
3514 goto bye; 3504 goto bye;
3515 3505
3516#ifndef CONFIG_CHELSIO_T4_OFFLOAD
3517 /*
3518 * If we're a pure NIC driver then disable all offloading facilities.
3519 * This will allow the firmware to optimize aspects of the hardware
3520 * configuration which will result in improved performance.
3521 */
3522 caps_cmd.ofldcaps = 0;
3523 caps_cmd.iscsicaps = 0;
3524 caps_cmd.rdmacaps = 0;
3525 caps_cmd.fcoecaps = 0;
3526#endif
3527
3528 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) { 3506 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
3529 if (!vf_acls) 3507 if (!vf_acls)
3530 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM); 3508 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
@@ -3745,6 +3723,7 @@ static int adap_init0(struct adapter *adap)
3745 u32 v, port_vec; 3723 u32 v, port_vec;
3746 enum dev_state state; 3724 enum dev_state state;
3747 u32 params[7], val[7]; 3725 u32 params[7], val[7];
3726 struct fw_caps_config_cmd caps_cmd;
3748 int reset = 1, j; 3727 int reset = 1, j;
3749 3728
3750 /* 3729 /*
@@ -3898,6 +3877,9 @@ static int adap_init0(struct adapter *adap)
3898 goto bye; 3877 goto bye;
3899 } 3878 }
3900 3879
3880 if (is_bypass_device(adap->pdev->device))
3881 adap->params.bypass = 1;
3882
3901 /* 3883 /*
3902 * Grab some of our basic fundamental operating parameters. 3884 * Grab some of our basic fundamental operating parameters.
3903 */ 3885 */
@@ -3940,13 +3922,12 @@ static int adap_init0(struct adapter *adap)
3940 adap->tids.aftid_end = val[1]; 3922 adap->tids.aftid_end = val[1];
3941 } 3923 }
3942 3924
3943#ifdef CONFIG_CHELSIO_T4_OFFLOAD
3944 /* 3925 /*
3945 * Get device capabilities so we can determine what resources we need 3926 * Get device capabilities so we can determine what resources we need
3946 * to manage. 3927 * to manage.
3947 */ 3928 */
3948 memset(&caps_cmd, 0, sizeof(caps_cmd)); 3929 memset(&caps_cmd, 0, sizeof(caps_cmd));
3949 caps_cmd.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3930 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3950 FW_CMD_REQUEST | FW_CMD_READ); 3931 FW_CMD_REQUEST | FW_CMD_READ);
3951 caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd)); 3932 caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
3952 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd), 3933 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
@@ -3991,15 +3972,6 @@ static int adap_init0(struct adapter *adap)
3991 adap->vres.ddp.size = val[4] - val[3] + 1; 3972 adap->vres.ddp.size = val[4] - val[3] + 1;
3992 adap->params.ofldq_wr_cred = val[5]; 3973 adap->params.ofldq_wr_cred = val[5];
3993 3974
3994 params[0] = FW_PARAM_PFVF(ETHOFLD_START);
3995 params[1] = FW_PARAM_PFVF(ETHOFLD_END);
3996 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
3997 params, val);
3998 if ((val[0] != val[1]) && (ret >= 0)) {
3999 adap->tids.uotid_base = val[0];
4000 adap->tids.nuotids = val[1] - val[0] + 1;
4001 }
4002
4003 adap->params.offload = 1; 3975 adap->params.offload = 1;
4004 } 3976 }
4005 if (caps_cmd.rdmacaps) { 3977 if (caps_cmd.rdmacaps) {
@@ -4048,7 +4020,6 @@ static int adap_init0(struct adapter *adap)
4048 } 4020 }
4049#undef FW_PARAM_PFVF 4021#undef FW_PARAM_PFVF
4050#undef FW_PARAM_DEV 4022#undef FW_PARAM_DEV
4051#endif /* CONFIG_CHELSIO_T4_OFFLOAD */
4052 4023
4053 /* 4024 /*
4054 * These are finalized by FW initialization, load their values now. 4025 * These are finalized by FW initialization, load their values now.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 1b899fea1a91..39bec73ff87c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -102,6 +102,9 @@ struct tid_info {
102 unsigned int ftid_base; 102 unsigned int ftid_base;
103 unsigned int aftid_base; 103 unsigned int aftid_base;
104 unsigned int aftid_end; 104 unsigned int aftid_end;
105 /* Server filter region */
106 unsigned int sftid_base;
107 unsigned int nsftids;
105 108
106 spinlock_t atid_lock ____cacheline_aligned_in_smp; 109 spinlock_t atid_lock ____cacheline_aligned_in_smp;
107 union aopen_entry *afree; 110 union aopen_entry *afree;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 32eec15fe4c2..730ae2cfa49e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2519,6 +2519,7 @@ int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2519{ 2519{
2520 struct fw_bye_cmd c; 2520 struct fw_bye_cmd c;
2521 2521
2522 memset(&c, 0, sizeof(c));
2522 INIT_CMD(c, BYE, WRITE); 2523 INIT_CMD(c, BYE, WRITE);
2523 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2524 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2524} 2525}
@@ -2535,6 +2536,7 @@ int t4_early_init(struct adapter *adap, unsigned int mbox)
2535{ 2536{
2536 struct fw_initialize_cmd c; 2537 struct fw_initialize_cmd c;
2537 2538
2539 memset(&c, 0, sizeof(c));
2538 INIT_CMD(c, INITIALIZE, WRITE); 2540 INIT_CMD(c, INITIALIZE, WRITE);
2539 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2541 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2540} 2542}
@@ -2551,6 +2553,7 @@ int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2551{ 2553{
2552 struct fw_reset_cmd c; 2554 struct fw_reset_cmd c;
2553 2555
2556 memset(&c, 0, sizeof(c));
2554 INIT_CMD(c, RESET, WRITE); 2557 INIT_CMD(c, RESET, WRITE);
2555 c.val = htonl(reset); 2558 c.val = htonl(reset);
2556 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2559 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
@@ -2828,7 +2831,7 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
2828 HOSTPAGESIZEPF7(sge_hps)); 2831 HOSTPAGESIZEPF7(sge_hps));
2829 2832
2830 t4_set_reg_field(adap, SGE_CONTROL, 2833 t4_set_reg_field(adap, SGE_CONTROL,
2831 INGPADBOUNDARY(INGPADBOUNDARY_MASK) | 2834 INGPADBOUNDARY_MASK |
2832 EGRSTATUSPAGESIZE_MASK, 2835 EGRSTATUSPAGESIZE_MASK,
2833 INGPADBOUNDARY(fl_align_log - 5) | 2836 INGPADBOUNDARY(fl_align_log - 5) |
2834 EGRSTATUSPAGESIZE(stat_len != 64)); 2837 EGRSTATUSPAGESIZE(stat_len != 64));
@@ -3278,6 +3281,7 @@ int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
3278{ 3281{
3279 struct fw_vi_enable_cmd c; 3282 struct fw_vi_enable_cmd c;
3280 3283
3284 memset(&c, 0, sizeof(c));
3281 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | 3285 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
3282 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); 3286 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
3283 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c)); 3287 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 1d03dcdd5e56..19ac096cb07b 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1353,8 +1353,11 @@ static int gfar_restore(struct device *dev)
1353 struct gfar_private *priv = dev_get_drvdata(dev); 1353 struct gfar_private *priv = dev_get_drvdata(dev);
1354 struct net_device *ndev = priv->ndev; 1354 struct net_device *ndev = priv->ndev;
1355 1355
1356 if (!netif_running(ndev)) 1356 if (!netif_running(ndev)) {
1357 netif_device_attach(ndev);
1358
1357 return 0; 1359 return 0;
1360 }
1358 1361
1359 gfar_init_bds(ndev); 1362 gfar_init_bds(ndev);
1360 init_registers(ndev); 1363 init_registers(ndev);
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index b9db0e040563..2e5daee0438a 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -478,7 +478,7 @@ static int gianfar_ptp_probe(struct platform_device *dev)
478 pr_err("no resource\n"); 478 pr_err("no resource\n");
479 goto no_resource; 479 goto no_resource;
480 } 480 }
481 if (request_resource(&ioport_resource, etsects->rsrc)) { 481 if (request_resource(&iomem_resource, etsects->rsrc)) {
482 pr_err("resource busy\n"); 482 pr_err("resource busy\n");
483 goto no_resource; 483 goto no_resource;
484 } 484 }
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 56b20d17d0e4..116f0e901bee 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -2673,6 +2673,9 @@ static int ixgbe_get_ts_info(struct net_device *dev,
2673 case ixgbe_mac_X540: 2673 case ixgbe_mac_X540:
2674 case ixgbe_mac_82599EB: 2674 case ixgbe_mac_82599EB:
2675 info->so_timestamping = 2675 info->so_timestamping =
2676 SOF_TIMESTAMPING_TX_SOFTWARE |
2677 SOF_TIMESTAMPING_RX_SOFTWARE |
2678 SOF_TIMESTAMPING_SOFTWARE |
2676 SOF_TIMESTAMPING_TX_HARDWARE | 2679 SOF_TIMESTAMPING_TX_HARDWARE |
2677 SOF_TIMESTAMPING_RX_HARDWARE | 2680 SOF_TIMESTAMPING_RX_HARDWARE |
2678 SOF_TIMESTAMPING_RAW_HARDWARE; 2681 SOF_TIMESTAMPING_RAW_HARDWARE;
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index f8064df10cc4..60ac46f4ac08 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1860,10 +1860,14 @@ jme_open(struct net_device *netdev)
1860 jme_clear_pm(jme); 1860 jme_clear_pm(jme);
1861 JME_NAPI_ENABLE(jme); 1861 JME_NAPI_ENABLE(jme);
1862 1862
1863 tasklet_enable(&jme->linkch_task); 1863 tasklet_init(&jme->linkch_task, jme_link_change_tasklet,
1864 tasklet_enable(&jme->txclean_task); 1864 (unsigned long) jme);
1865 tasklet_hi_enable(&jme->rxclean_task); 1865 tasklet_init(&jme->txclean_task, jme_tx_clean_tasklet,
1866 tasklet_hi_enable(&jme->rxempty_task); 1866 (unsigned long) jme);
1867 tasklet_init(&jme->rxclean_task, jme_rx_clean_tasklet,
1868 (unsigned long) jme);
1869 tasklet_init(&jme->rxempty_task, jme_rx_empty_tasklet,
1870 (unsigned long) jme);
1867 1871
1868 rc = jme_request_irq(jme); 1872 rc = jme_request_irq(jme);
1869 if (rc) 1873 if (rc)
@@ -1948,10 +1952,10 @@ jme_close(struct net_device *netdev)
1948 1952
1949 JME_NAPI_DISABLE(jme); 1953 JME_NAPI_DISABLE(jme);
1950 1954
1951 tasklet_disable(&jme->linkch_task); 1955 tasklet_kill(&jme->linkch_task);
1952 tasklet_disable(&jme->txclean_task); 1956 tasklet_kill(&jme->txclean_task);
1953 tasklet_disable(&jme->rxclean_task); 1957 tasklet_kill(&jme->rxclean_task);
1954 tasklet_disable(&jme->rxempty_task); 1958 tasklet_kill(&jme->rxempty_task);
1955 1959
1956 jme_disable_rx_engine(jme); 1960 jme_disable_rx_engine(jme);
1957 jme_disable_tx_engine(jme); 1961 jme_disable_tx_engine(jme);
@@ -3079,22 +3083,6 @@ jme_init_one(struct pci_dev *pdev,
3079 tasklet_init(&jme->pcc_task, 3083 tasklet_init(&jme->pcc_task,
3080 jme_pcc_tasklet, 3084 jme_pcc_tasklet,
3081 (unsigned long) jme); 3085 (unsigned long) jme);
3082 tasklet_init(&jme->linkch_task,
3083 jme_link_change_tasklet,
3084 (unsigned long) jme);
3085 tasklet_init(&jme->txclean_task,
3086 jme_tx_clean_tasklet,
3087 (unsigned long) jme);
3088 tasklet_init(&jme->rxclean_task,
3089 jme_rx_clean_tasklet,
3090 (unsigned long) jme);
3091 tasklet_init(&jme->rxempty_task,
3092 jme_rx_empty_tasklet,
3093 (unsigned long) jme);
3094 tasklet_disable_nosync(&jme->linkch_task);
3095 tasklet_disable_nosync(&jme->txclean_task);
3096 tasklet_disable_nosync(&jme->rxclean_task);
3097 tasklet_disable_nosync(&jme->rxempty_task);
3098 jme->dpi.cur = PCC_P1; 3086 jme->dpi.cur = PCC_P1;
3099 3087
3100 jme->reg_ghc = 0; 3088 jme->reg_ghc = 0;
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 9b9c2ac5c4c2..d19a143aa5a8 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -4026,7 +4026,7 @@ static void __devexit skge_remove(struct pci_dev *pdev)
4026 dev0 = hw->dev[0]; 4026 dev0 = hw->dev[0];
4027 unregister_netdev(dev0); 4027 unregister_netdev(dev0);
4028 4028
4029 tasklet_disable(&hw->phy_task); 4029 tasklet_kill(&hw->phy_task);
4030 4030
4031 spin_lock_irq(&hw->hw_lock); 4031 spin_lock_irq(&hw->hw_lock);
4032 hw->intr_mask = 0; 4032 hw->intr_mask = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index c10e3a6de09f..b35094c590ba 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -143,7 +143,6 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
143 mlx4_bf_free(mdev->dev, &ring->bf); 143 mlx4_bf_free(mdev->dev, &ring->bf);
144 mlx4_qp_remove(mdev->dev, &ring->qp); 144 mlx4_qp_remove(mdev->dev, &ring->qp);
145 mlx4_qp_free(mdev->dev, &ring->qp); 145 mlx4_qp_free(mdev->dev, &ring->qp);
146 mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
147 mlx4_en_unmap_buffer(&ring->wqres.buf); 146 mlx4_en_unmap_buffer(&ring->wqres.buf);
148 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); 147 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
149 kfree(ring->bounce_buf); 148 kfree(ring->bounce_buf);
@@ -712,7 +711,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
712 if (bounce) 711 if (bounce)
713 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size); 712 tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
714 713
715 if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) { 714 if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tx_tag_present(skb)) {
716 *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn); 715 *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn);
717 op_own |= htonl((bf_index & 0xffff) << 8); 716 op_own |= htonl((bf_index & 0xffff) << 8);
718 /* Ensure new descirptor hits memory 717 /* Ensure new descirptor hits memory
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 51c764901ad2..b84a88bc44dc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -329,9 +329,6 @@ int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave,
329 ctx = &priv->mfunc.master.slave_state[slave]; 329 ctx = &priv->mfunc.master.slave_state[slave];
330 spin_lock_irqsave(&ctx->lock, flags); 330 spin_lock_irqsave(&ctx->lock, flags);
331 331
332 mlx4_dbg(dev, "%s: slave: %d, current state: %d new event :%d\n",
333 __func__, slave, cur_state, event);
334
335 switch (cur_state) { 332 switch (cur_state) {
336 case SLAVE_PORT_DOWN: 333 case SLAVE_PORT_DOWN:
337 if (MLX4_PORT_STATE_DEV_EVENT_PORT_UP == event) 334 if (MLX4_PORT_STATE_DEV_EVENT_PORT_UP == event)
@@ -366,9 +363,6 @@ int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave,
366 goto out; 363 goto out;
367 } 364 }
368 ret = mlx4_get_slave_port_state(dev, slave, port); 365 ret = mlx4_get_slave_port_state(dev, slave, port);
369 mlx4_dbg(dev, "%s: slave: %d, current state: %d new event"
370 " :%d gen_event: %d\n",
371 __func__, slave, cur_state, event, *gen_event);
372 366
373out: 367out:
374 spin_unlock_irqrestore(&ctx->lock, flags); 368 spin_unlock_irqrestore(&ctx->lock, flags);
@@ -843,6 +837,18 @@ static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
843 return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4); 837 return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4);
844} 838}
845 839
840static void mlx4_unmap_uar(struct mlx4_dev *dev)
841{
842 struct mlx4_priv *priv = mlx4_priv(dev);
843 int i;
844
845 for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
846 if (priv->eq_table.uar_map[i]) {
847 iounmap(priv->eq_table.uar_map[i]);
848 priv->eq_table.uar_map[i] = NULL;
849 }
850}
851
846static int mlx4_create_eq(struct mlx4_dev *dev, int nent, 852static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
847 u8 intr, struct mlx4_eq *eq) 853 u8 intr, struct mlx4_eq *eq)
848{ 854{
@@ -1207,6 +1213,7 @@ err_out_unmap:
1207 mlx4_free_irqs(dev); 1213 mlx4_free_irqs(dev);
1208 1214
1209err_out_bitmap: 1215err_out_bitmap:
1216 mlx4_unmap_uar(dev);
1210 mlx4_bitmap_cleanup(&priv->eq_table.bitmap); 1217 mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
1211 1218
1212err_out_free: 1219err_out_free:
@@ -1231,10 +1238,7 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
1231 if (!mlx4_is_slave(dev)) 1238 if (!mlx4_is_slave(dev))
1232 mlx4_unmap_clr_int(dev); 1239 mlx4_unmap_clr_int(dev);
1233 1240
1234 for (i = 0; i < mlx4_num_eq_uar(dev); ++i) 1241 mlx4_unmap_uar(dev);
1235 if (priv->eq_table.uar_map[i])
1236 iounmap(priv->eq_table.uar_map[i]);
1237
1238 mlx4_bitmap_cleanup(&priv->eq_table.bitmap); 1242 mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
1239 1243
1240 kfree(priv->eq_table.uar_map); 1244 kfree(priv->eq_table.uar_map);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 80df2ab0177c..2aa80afd98d2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1405,7 +1405,10 @@ unmap_bf:
1405 unmap_bf_area(dev); 1405 unmap_bf_area(dev);
1406 1406
1407err_close: 1407err_close:
1408 mlx4_close_hca(dev); 1408 if (mlx4_is_slave(dev))
1409 mlx4_slave_exit(dev);
1410 else
1411 mlx4_CLOSE_HCA(dev, 0);
1409 1412
1410err_free_icm: 1413err_free_icm:
1411 if (!mlx4_is_slave(dev)) 1414 if (!mlx4_is_slave(dev))
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 926c911c0ac4..b05705f50f0f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -330,9 +330,6 @@ static void update_pkey_index(struct mlx4_dev *dev, int slave,
330 330
331 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index]; 331 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
332 *(u8 *)(inbox->buf + 35) = new_index; 332 *(u8 *)(inbox->buf + 35) = new_index;
333
334 mlx4_dbg(dev, "port = %d, orig pkey index = %d, "
335 "new pkey index = %d\n", port, orig_index, new_index);
336} 333}
337 334
338static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox, 335static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
@@ -351,9 +348,6 @@ static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
351 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) 348 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH)
352 qp_ctx->alt_path.mgid_index = slave & 0x7F; 349 qp_ctx->alt_path.mgid_index = slave & 0x7F;
353 } 350 }
354
355 mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
356 slave, qp_ctx->pri_path.mgid_index);
357} 351}
358 352
359static int mpt_mask(struct mlx4_dev *dev) 353static int mpt_mask(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 318fee91c79d..69e01977a1dd 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -5407,8 +5407,8 @@ static int netdev_close(struct net_device *dev)
5407 /* Delay for receive task to stop scheduling itself. */ 5407 /* Delay for receive task to stop scheduling itself. */
5408 msleep(2000 / HZ); 5408 msleep(2000 / HZ);
5409 5409
5410 tasklet_disable(&hw_priv->rx_tasklet); 5410 tasklet_kill(&hw_priv->rx_tasklet);
5411 tasklet_disable(&hw_priv->tx_tasklet); 5411 tasklet_kill(&hw_priv->tx_tasklet);
5412 free_irq(dev->irq, hw_priv->dev); 5412 free_irq(dev->irq, hw_priv->dev);
5413 5413
5414 transmit_cleanup(hw_priv, 0); 5414 transmit_cleanup(hw_priv, 0);
@@ -5459,8 +5459,10 @@ static int prepare_hardware(struct net_device *dev)
5459 rc = request_irq(dev->irq, netdev_intr, IRQF_SHARED, dev->name, dev); 5459 rc = request_irq(dev->irq, netdev_intr, IRQF_SHARED, dev->name, dev);
5460 if (rc) 5460 if (rc)
5461 return rc; 5461 return rc;
5462 tasklet_enable(&hw_priv->rx_tasklet); 5462 tasklet_init(&hw_priv->rx_tasklet, rx_proc_task,
5463 tasklet_enable(&hw_priv->tx_tasklet); 5463 (unsigned long) hw_priv);
5464 tasklet_init(&hw_priv->tx_tasklet, tx_proc_task,
5465 (unsigned long) hw_priv);
5464 5466
5465 hw->promiscuous = 0; 5467 hw->promiscuous = 0;
5466 hw->all_multi = 0; 5468 hw->all_multi = 0;
@@ -7033,16 +7035,6 @@ static int __devinit pcidev_init(struct pci_dev *pdev,
7033 spin_lock_init(&hw_priv->hwlock); 7035 spin_lock_init(&hw_priv->hwlock);
7034 mutex_init(&hw_priv->lock); 7036 mutex_init(&hw_priv->lock);
7035 7037
7036 /* tasklet is enabled. */
7037 tasklet_init(&hw_priv->rx_tasklet, rx_proc_task,
7038 (unsigned long) hw_priv);
7039 tasklet_init(&hw_priv->tx_tasklet, tx_proc_task,
7040 (unsigned long) hw_priv);
7041
7042 /* tasklet_enable will decrement the atomic counter. */
7043 tasklet_disable(&hw_priv->rx_tasklet);
7044 tasklet_disable(&hw_priv->tx_tasklet);
7045
7046 for (i = 0; i < TOTAL_PORT_NUM; i++) 7038 for (i = 0; i < TOTAL_PORT_NUM; i++)
7047 init_waitqueue_head(&hw_priv->counter[i].counter); 7039 init_waitqueue_head(&hw_priv->counter[i].counter);
7048 7040
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 53743f7a2ca9..af8b4142088c 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1524,6 +1524,7 @@ static int lpc_eth_drv_remove(struct platform_device *pdev)
1524 pldat->dma_buff_base_p); 1524 pldat->dma_buff_base_p);
1525 free_irq(ndev->irq, ndev); 1525 free_irq(ndev->irq, ndev);
1526 iounmap(pldat->net_base); 1526 iounmap(pldat->net_base);
1527 mdiobus_unregister(pldat->mii_bus);
1527 mdiobus_free(pldat->mii_bus); 1528 mdiobus_free(pldat->mii_bus);
1528 clk_disable(pldat->clk); 1529 clk_disable(pldat->clk);
1529 clk_put(pldat->clk); 1530 clk_put(pldat->clk);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index b2a94d02a521..4c4fe5b1a29a 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -339,26 +339,6 @@ static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
339} 339}
340 340
341/** 341/**
342 * pch_gbe_wait_clr_bit_irq - Wait to clear a bit for interrupt context
343 * @reg: Pointer of register
344 * @busy: Busy bit
345 */
346static int pch_gbe_wait_clr_bit_irq(void *reg, u32 bit)
347{
348 u32 tmp;
349 int ret = -1;
350 /* wait busy */
351 tmp = 20;
352 while ((ioread32(reg) & bit) && --tmp)
353 udelay(5);
354 if (!tmp)
355 pr_err("Error: busy bit is not cleared\n");
356 else
357 ret = 0;
358 return ret;
359}
360
361/**
362 * pch_gbe_mac_mar_set - Set MAC address register 342 * pch_gbe_mac_mar_set - Set MAC address register
363 * @hw: Pointer to the HW structure 343 * @hw: Pointer to the HW structure
364 * @addr: Pointer to the MAC address 344 * @addr: Pointer to the MAC address
@@ -409,15 +389,20 @@ static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
409 return; 389 return;
410} 390}
411 391
412static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw) 392static void pch_gbe_disable_mac_rx(struct pch_gbe_hw *hw)
413{ 393{
414 /* Read the MAC addresses. and store to the private data */ 394 u32 rctl;
415 pch_gbe_mac_read_mac_addr(hw); 395 /* Disables Receive MAC */
416 iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET); 396 rctl = ioread32(&hw->reg->MAC_RX_EN);
417 pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST); 397 iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
418 /* Setup the MAC addresses */ 398}
419 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0); 399
420 return; 400static void pch_gbe_enable_mac_rx(struct pch_gbe_hw *hw)
401{
402 u32 rctl;
403 /* Enables Receive MAC */
404 rctl = ioread32(&hw->reg->MAC_RX_EN);
405 iowrite32((rctl | PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
421} 406}
422 407
423/** 408/**
@@ -913,7 +898,7 @@ static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
913static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter) 898static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
914{ 899{
915 struct pch_gbe_hw *hw = &adapter->hw; 900 struct pch_gbe_hw *hw = &adapter->hw;
916 u32 rdba, rdlen, rctl, rxdma; 901 u32 rdba, rdlen, rxdma;
917 902
918 pr_debug("dma adr = 0x%08llx size = 0x%08x\n", 903 pr_debug("dma adr = 0x%08llx size = 0x%08x\n",
919 (unsigned long long)adapter->rx_ring->dma, 904 (unsigned long long)adapter->rx_ring->dma,
@@ -921,9 +906,7 @@ static void pch_gbe_configure_rx(struct pch_gbe_adapter *adapter)
921 906
922 pch_gbe_mac_force_mac_fc(hw); 907 pch_gbe_mac_force_mac_fc(hw);
923 908
924 /* Disables Receive MAC */ 909 pch_gbe_disable_mac_rx(hw);
925 rctl = ioread32(&hw->reg->MAC_RX_EN);
926 iowrite32((rctl & ~PCH_GBE_MRE_MAC_RX_EN), &hw->reg->MAC_RX_EN);
927 910
928 /* Disables Receive DMA */ 911 /* Disables Receive DMA */
929 rxdma = ioread32(&hw->reg->DMA_CTRL); 912 rxdma = ioread32(&hw->reg->DMA_CTRL);
@@ -1316,38 +1299,17 @@ void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
1316 spin_unlock_irqrestore(&adapter->stats_lock, flags); 1299 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1317} 1300}
1318 1301
1319static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter) 1302static void pch_gbe_disable_dma_rx(struct pch_gbe_hw *hw)
1320{ 1303{
1321 struct pch_gbe_hw *hw = &adapter->hw;
1322 u32 rxdma; 1304 u32 rxdma;
1323 u16 value;
1324 int ret;
1325 1305
1326 /* Disable Receive DMA */ 1306 /* Disable Receive DMA */
1327 rxdma = ioread32(&hw->reg->DMA_CTRL); 1307 rxdma = ioread32(&hw->reg->DMA_CTRL);
1328 rxdma &= ~PCH_GBE_RX_DMA_EN; 1308 rxdma &= ~PCH_GBE_RX_DMA_EN;
1329 iowrite32(rxdma, &hw->reg->DMA_CTRL); 1309 iowrite32(rxdma, &hw->reg->DMA_CTRL);
1330 /* Wait Rx DMA BUS is IDLE */
1331 ret = pch_gbe_wait_clr_bit_irq(&hw->reg->RX_DMA_ST, PCH_GBE_IDLE_CHECK);
1332 if (ret) {
1333 /* Disable Bus master */
1334 pci_read_config_word(adapter->pdev, PCI_COMMAND, &value);
1335 value &= ~PCI_COMMAND_MASTER;
1336 pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
1337 /* Stop Receive */
1338 pch_gbe_mac_reset_rx(hw);
1339 /* Enable Bus master */
1340 value |= PCI_COMMAND_MASTER;
1341 pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
1342 } else {
1343 /* Stop Receive */
1344 pch_gbe_mac_reset_rx(hw);
1345 }
1346 /* reprogram multicast address register after reset */
1347 pch_gbe_set_multi(adapter->netdev);
1348} 1310}
1349 1311
1350static void pch_gbe_start_receive(struct pch_gbe_hw *hw) 1312static void pch_gbe_enable_dma_rx(struct pch_gbe_hw *hw)
1351{ 1313{
1352 u32 rxdma; 1314 u32 rxdma;
1353 1315
@@ -1355,9 +1317,6 @@ static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
1355 rxdma = ioread32(&hw->reg->DMA_CTRL); 1317 rxdma = ioread32(&hw->reg->DMA_CTRL);
1356 rxdma |= PCH_GBE_RX_DMA_EN; 1318 rxdma |= PCH_GBE_RX_DMA_EN;
1357 iowrite32(rxdma, &hw->reg->DMA_CTRL); 1319 iowrite32(rxdma, &hw->reg->DMA_CTRL);
1358 /* Enables Receive */
1359 iowrite32(PCH_GBE_MRE_MAC_RX_EN, &hw->reg->MAC_RX_EN);
1360 return;
1361} 1320}
1362 1321
1363/** 1322/**
@@ -1393,7 +1352,7 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
1393 int_en = ioread32(&hw->reg->INT_EN); 1352 int_en = ioread32(&hw->reg->INT_EN);
1394 iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR), 1353 iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
1395 &hw->reg->INT_EN); 1354 &hw->reg->INT_EN);
1396 pch_gbe_stop_receive(adapter); 1355 pch_gbe_disable_dma_rx(&adapter->hw);
1397 int_st |= ioread32(&hw->reg->INT_ST); 1356 int_st |= ioread32(&hw->reg->INT_ST);
1398 int_st = int_st & ioread32(&hw->reg->INT_EN); 1357 int_st = int_st & ioread32(&hw->reg->INT_EN);
1399 } 1358 }
@@ -1971,12 +1930,12 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
1971 struct net_device *netdev = adapter->netdev; 1930 struct net_device *netdev = adapter->netdev;
1972 struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring; 1931 struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
1973 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring; 1932 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1974 int err; 1933 int err = -EINVAL;
1975 1934
1976 /* Ensure we have a valid MAC */ 1935 /* Ensure we have a valid MAC */
1977 if (!is_valid_ether_addr(adapter->hw.mac.addr)) { 1936 if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
1978 pr_err("Error: Invalid MAC address\n"); 1937 pr_err("Error: Invalid MAC address\n");
1979 return -EINVAL; 1938 goto out;
1980 } 1939 }
1981 1940
1982 /* hardware has been reset, we need to reload some things */ 1941 /* hardware has been reset, we need to reload some things */
@@ -1989,18 +1948,19 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
1989 1948
1990 err = pch_gbe_request_irq(adapter); 1949 err = pch_gbe_request_irq(adapter);
1991 if (err) { 1950 if (err) {
1992 pr_err("Error: can't bring device up\n"); 1951 pr_err("Error: can't bring device up - irq request failed\n");
1993 return err; 1952 goto out;
1994 } 1953 }
1995 err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count); 1954 err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
1996 if (err) { 1955 if (err) {
1997 pr_err("Error: can't bring device up\n"); 1956 pr_err("Error: can't bring device up - alloc rx buffers pool failed\n");
1998 return err; 1957 goto freeirq;
1999 } 1958 }
2000 pch_gbe_alloc_tx_buffers(adapter, tx_ring); 1959 pch_gbe_alloc_tx_buffers(adapter, tx_ring);
2001 pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count); 1960 pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
2002 adapter->tx_queue_len = netdev->tx_queue_len; 1961 adapter->tx_queue_len = netdev->tx_queue_len;
2003 pch_gbe_start_receive(&adapter->hw); 1962 pch_gbe_enable_dma_rx(&adapter->hw);
1963 pch_gbe_enable_mac_rx(&adapter->hw);
2004 1964
2005 mod_timer(&adapter->watchdog_timer, jiffies); 1965 mod_timer(&adapter->watchdog_timer, jiffies);
2006 1966
@@ -2009,6 +1969,11 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
2009 netif_start_queue(adapter->netdev); 1969 netif_start_queue(adapter->netdev);
2010 1970
2011 return 0; 1971 return 0;
1972
1973freeirq:
1974 pch_gbe_free_irq(adapter);
1975out:
1976 return err;
2012} 1977}
2013 1978
2014/** 1979/**
@@ -2405,7 +2370,6 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2405 int work_done = 0; 2370 int work_done = 0;
2406 bool poll_end_flag = false; 2371 bool poll_end_flag = false;
2407 bool cleaned = false; 2372 bool cleaned = false;
2408 u32 int_en;
2409 2373
2410 pr_debug("budget : %d\n", budget); 2374 pr_debug("budget : %d\n", budget);
2411 2375
@@ -2422,19 +2386,13 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2422 2386
2423 if (poll_end_flag) { 2387 if (poll_end_flag) {
2424 napi_complete(napi); 2388 napi_complete(napi);
2425 if (adapter->rx_stop_flag) {
2426 adapter->rx_stop_flag = false;
2427 pch_gbe_start_receive(&adapter->hw);
2428 }
2429 pch_gbe_irq_enable(adapter); 2389 pch_gbe_irq_enable(adapter);
2430 } else 2390 }
2431 if (adapter->rx_stop_flag) { 2391
2432 adapter->rx_stop_flag = false; 2392 if (adapter->rx_stop_flag) {
2433 pch_gbe_start_receive(&adapter->hw); 2393 adapter->rx_stop_flag = false;
2434 int_en = ioread32(&adapter->hw.reg->INT_EN); 2394 pch_gbe_enable_dma_rx(&adapter->hw);
2435 iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR), 2395 }
2436 &adapter->hw.reg->INT_EN);
2437 }
2438 2396
2439 pr_debug("poll_end_flag : %d work_done : %d budget : %d\n", 2397 pr_debug("poll_end_flag : %d work_done : %d budget : %d\n",
2440 poll_end_flag, work_done, budget); 2398 poll_end_flag, work_done, budget);
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index df09b1cb742f..6407d0d77e81 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -2525,6 +2525,13 @@ static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2525 qdev->req_q_size = 2525 qdev->req_q_size =
2526 (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req)); 2526 (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req));
2527 2527
2528 qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
2529
2530 /* The barrier is required to ensure request and response queue
2531 * addr writes to the registers.
2532 */
2533 wmb();
2534
2528 qdev->req_q_virt_addr = 2535 qdev->req_q_virt_addr =
2529 pci_alloc_consistent(qdev->pdev, 2536 pci_alloc_consistent(qdev->pdev,
2530 (size_t) qdev->req_q_size, 2537 (size_t) qdev->req_q_size,
@@ -2536,8 +2543,6 @@ static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2536 return -ENOMEM; 2543 return -ENOMEM;
2537 } 2544 }
2538 2545
2539 qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
2540
2541 qdev->rsp_q_virt_addr = 2546 qdev->rsp_q_virt_addr =
2542 pci_alloc_consistent(qdev->pdev, 2547 pci_alloc_consistent(qdev->pdev,
2543 (size_t) qdev->rsp_q_size, 2548 (size_t) qdev->rsp_q_size,
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 1c818254b7be..b01f83a044c4 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -979,17 +979,6 @@ static void cp_init_hw (struct cp_private *cp)
979 cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0))); 979 cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
980 cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4))); 980 cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
981 981
982 cpw32_f(HiTxRingAddr, 0);
983 cpw32_f(HiTxRingAddr + 4, 0);
984
985 ring_dma = cp->ring_dma;
986 cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
987 cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
988
989 ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
990 cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
991 cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
992
993 cp_start_hw(cp); 982 cp_start_hw(cp);
994 cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */ 983 cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
995 984
@@ -1003,6 +992,17 @@ static void cp_init_hw (struct cp_private *cp)
1003 992
1004 cpw8(Config5, cpr8(Config5) & PMEStatus); 993 cpw8(Config5, cpr8(Config5) & PMEStatus);
1005 994
995 cpw32_f(HiTxRingAddr, 0);
996 cpw32_f(HiTxRingAddr + 4, 0);
997
998 ring_dma = cp->ring_dma;
999 cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
1000 cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
1001
1002 ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
1003 cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
1004 cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
1005
1006 cpw16(MultiIntr, 0); 1006 cpw16(MultiIntr, 0);
1007 1007
1008 cpw8_f(Cfg9346, Cfg9346_Lock); 1008 cpw8_f(Cfg9346, Cfg9346_Lock);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index e7ff886e8047..927aa33d4349 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -3827,6 +3827,8 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
3827 void __iomem *ioaddr = tp->mmio_addr; 3827 void __iomem *ioaddr = tp->mmio_addr;
3828 3828
3829 switch (tp->mac_version) { 3829 switch (tp->mac_version) {
3830 case RTL_GIGA_MAC_VER_25:
3831 case RTL_GIGA_MAC_VER_26:
3830 case RTL_GIGA_MAC_VER_29: 3832 case RTL_GIGA_MAC_VER_29:
3831 case RTL_GIGA_MAC_VER_30: 3833 case RTL_GIGA_MAC_VER_30:
3832 case RTL_GIGA_MAC_VER_32: 3834 case RTL_GIGA_MAC_VER_32:
@@ -4519,6 +4521,9 @@ static void rtl_set_rx_mode(struct net_device *dev)
4519 mc_filter[1] = swab32(data); 4521 mc_filter[1] = swab32(data);
4520 } 4522 }
4521 4523
4524 if (tp->mac_version == RTL_GIGA_MAC_VER_35)
4525 mc_filter[1] = mc_filter[0] = 0xffffffff;
4526
4522 RTL_W32(MAR0 + 4, mc_filter[1]); 4527 RTL_W32(MAR0 + 4, mc_filter[1]);
4523 RTL_W32(MAR0 + 0, mc_filter[0]); 4528 RTL_W32(MAR0 + 0, mc_filter[0]);
4524 4529
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index fb9f6b38511f..edf5edb13140 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -2479,7 +2479,7 @@ static int sis900_resume(struct pci_dev *pci_dev)
2479 netif_start_queue(net_dev); 2479 netif_start_queue(net_dev);
2480 2480
2481 /* Workaround for EDB */ 2481 /* Workaround for EDB */
2482 sis900_set_mode(ioaddr, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED); 2482 sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
2483 2483
2484 /* Enable all known interrupts by setting the interrupt mask. */ 2484 /* Enable all known interrupts by setting the interrupt mask. */
2485 sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE); 2485 sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 62d1baf111ea..c53c0f4e2ce3 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2110,7 +2110,7 @@ static void __devinit smsc911x_read_mac_address(struct net_device *dev)
2110static int __devinit smsc911x_init(struct net_device *dev) 2110static int __devinit smsc911x_init(struct net_device *dev)
2111{ 2111{
2112 struct smsc911x_data *pdata = netdev_priv(dev); 2112 struct smsc911x_data *pdata = netdev_priv(dev);
2113 unsigned int byte_test; 2113 unsigned int byte_test, mask;
2114 unsigned int to = 100; 2114 unsigned int to = 100;
2115 2115
2116 SMSC_TRACE(pdata, probe, "Driver Parameters:"); 2116 SMSC_TRACE(pdata, probe, "Driver Parameters:");
@@ -2130,9 +2130,22 @@ static int __devinit smsc911x_init(struct net_device *dev)
2130 /* 2130 /*
2131 * poll the READY bit in PMT_CTRL. Any other access to the device is 2131 * poll the READY bit in PMT_CTRL. Any other access to the device is
2132 * forbidden while this bit isn't set. Try for 100ms 2132 * forbidden while this bit isn't set. Try for 100ms
2133 *
2134 * Note that this test is done before the WORD_SWAP register is
2135 * programmed. So in some configurations the READY bit is at 16 before
2136 * WORD_SWAP is written to. This issue is worked around by waiting
2137 * until either bit 0 or bit 16 gets set in PMT_CTRL.
2138 *
2139 * SMSC has confirmed that checking bit 16 (marked as reserved in
2140 * the datasheet) is fine since these bits "will either never be set
2141 * or can only go high after READY does (so also indicate the device
2142 * is ready)".
2133 */ 2143 */
2134 while (!(smsc911x_reg_read(pdata, PMT_CTRL) & PMT_CTRL_READY_) && --to) 2144
2145 mask = PMT_CTRL_READY_ | swahw32(PMT_CTRL_READY_);
2146 while (!(smsc911x_reg_read(pdata, PMT_CTRL) & mask) && --to)
2135 udelay(1000); 2147 udelay(1000);
2148
2136 if (to == 0) { 2149 if (to == 0) {
2137 pr_err("Device not READY in 100ms aborting\n"); 2150 pr_err("Device not READY in 100ms aborting\n");
2138 return -ENODEV; 2151 return -ENODEV;
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index b26cbda5efa9..2c41894d5472 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -5,7 +5,7 @@
5config NET_VENDOR_TI 5config NET_VENDOR_TI
6 bool "Texas Instruments (TI) devices" 6 bool "Texas Instruments (TI) devices"
7 default y 7 default y
8 depends on PCI || EISA || AR7 || (ARM && (ARCH_DAVINCI || ARCH_OMAP3)) 8 depends on PCI || EISA || AR7 || (ARM && (ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX))
9 ---help--- 9 ---help---
10 If you have a network (Ethernet) card belonging to this class, say Y 10 If you have a network (Ethernet) card belonging to this class, say Y
11 and read the Ethernet-HOWTO, available from 11 and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 4e2a1628484d..66e025ad5df1 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -917,7 +917,7 @@ static int tile_net_setup_interrupts(struct net_device *dev)
917 ingress_irq = rc; 917 ingress_irq = rc;
918 tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU); 918 tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU);
919 rc = request_irq(ingress_irq, tile_net_handle_ingress_irq, 919 rc = request_irq(ingress_irq, tile_net_handle_ingress_irq,
920 0, NULL, NULL); 920 0, "tile_net", NULL);
921 if (rc != 0) { 921 if (rc != 0) {
922 netdev_err(dev, "request_irq failed: %d\n", rc); 922 netdev_err(dev, "request_irq failed: %d\n", rc);
923 destroy_irq(ingress_irq); 923 destroy_irq(ingress_irq);
@@ -1334,11 +1334,11 @@ static int tso_count_edescs(struct sk_buff *skb)
1334{ 1334{
1335 struct skb_shared_info *sh = skb_shinfo(skb); 1335 struct skb_shared_info *sh = skb_shinfo(skb);
1336 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1336 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1337 unsigned int data_len = skb->data_len + skb->hdr_len - sh_len; 1337 unsigned int data_len = skb->len - sh_len;
1338 unsigned int p_len = sh->gso_size; 1338 unsigned int p_len = sh->gso_size;
1339 long f_id = -1; /* id of the current fragment */ 1339 long f_id = -1; /* id of the current fragment */
1340 long f_size = skb->hdr_len; /* size of the current fragment */ 1340 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
1341 long f_used = sh_len; /* bytes used from the current fragment */ 1341 long f_used = 0; /* bytes used from the current fragment */
1342 long n; /* size of the current piece of payload */ 1342 long n; /* size of the current piece of payload */
1343 int num_edescs = 0; 1343 int num_edescs = 0;
1344 int segment; 1344 int segment;
@@ -1353,7 +1353,7 @@ static int tso_count_edescs(struct sk_buff *skb)
1353 /* Advance as needed. */ 1353 /* Advance as needed. */
1354 while (f_used >= f_size) { 1354 while (f_used >= f_size) {
1355 f_id++; 1355 f_id++;
1356 f_size = sh->frags[f_id].size; 1356 f_size = skb_frag_size(&sh->frags[f_id]);
1357 f_used = 0; 1357 f_used = 0;
1358 } 1358 }
1359 1359
@@ -1384,13 +1384,13 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
1384 struct iphdr *ih; 1384 struct iphdr *ih;
1385 struct tcphdr *th; 1385 struct tcphdr *th;
1386 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1386 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1387 unsigned int data_len = skb->data_len + skb->hdr_len - sh_len; 1387 unsigned int data_len = skb->len - sh_len;
1388 unsigned char *data = skb->data; 1388 unsigned char *data = skb->data;
1389 unsigned int ih_off, th_off, p_len; 1389 unsigned int ih_off, th_off, p_len;
1390 unsigned int isum_seed, tsum_seed, id, seq; 1390 unsigned int isum_seed, tsum_seed, id, seq;
1391 long f_id = -1; /* id of the current fragment */ 1391 long f_id = -1; /* id of the current fragment */
1392 long f_size = skb->hdr_len; /* size of the current fragment */ 1392 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
1393 long f_used = sh_len; /* bytes used from the current fragment */ 1393 long f_used = 0; /* bytes used from the current fragment */
1394 long n; /* size of the current piece of payload */ 1394 long n; /* size of the current piece of payload */
1395 int segment; 1395 int segment;
1396 1396
@@ -1405,7 +1405,7 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
1405 isum_seed = ((0xFFFF - ih->check) + 1405 isum_seed = ((0xFFFF - ih->check) +
1406 (0xFFFF - ih->tot_len) + 1406 (0xFFFF - ih->tot_len) +
1407 (0xFFFF - ih->id)); 1407 (0xFFFF - ih->id));
1408 tsum_seed = th->check + (0xFFFF ^ htons(sh_len + data_len)); 1408 tsum_seed = th->check + (0xFFFF ^ htons(skb->len));
1409 id = ntohs(ih->id); 1409 id = ntohs(ih->id);
1410 seq = ntohl(th->seq); 1410 seq = ntohl(th->seq);
1411 1411
@@ -1444,7 +1444,7 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
1444 /* Advance as needed. */ 1444 /* Advance as needed. */
1445 while (f_used >= f_size) { 1445 while (f_used >= f_size) {
1446 f_id++; 1446 f_id++;
1447 f_size = sh->frags[f_id].size; 1447 f_size = skb_frag_size(&sh->frags[f_id]);
1448 f_used = 0; 1448 f_used = 0;
1449 } 1449 }
1450 1450
@@ -1478,14 +1478,14 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
1478 struct tile_net_priv *priv = netdev_priv(dev); 1478 struct tile_net_priv *priv = netdev_priv(dev);
1479 struct skb_shared_info *sh = skb_shinfo(skb); 1479 struct skb_shared_info *sh = skb_shinfo(skb);
1480 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1480 unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1481 unsigned int data_len = skb->data_len + skb->hdr_len - sh_len; 1481 unsigned int data_len = skb->len - sh_len;
1482 unsigned int p_len = sh->gso_size; 1482 unsigned int p_len = sh->gso_size;
1483 gxio_mpipe_edesc_t edesc_head = { { 0 } }; 1483 gxio_mpipe_edesc_t edesc_head = { { 0 } };
1484 gxio_mpipe_edesc_t edesc_body = { { 0 } }; 1484 gxio_mpipe_edesc_t edesc_body = { { 0 } };
1485 long f_id = -1; /* id of the current fragment */ 1485 long f_id = -1; /* id of the current fragment */
1486 long f_size = skb->hdr_len; /* size of the current fragment */ 1486 long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
1487 long f_used = sh_len; /* bytes used from the current fragment */ 1487 long f_used = 0; /* bytes used from the current fragment */
1488 void *f_data = skb->data; 1488 void *f_data = skb->data + sh_len;
1489 long n; /* size of the current piece of payload */ 1489 long n; /* size of the current piece of payload */
1490 unsigned long tx_packets = 0, tx_bytes = 0; 1490 unsigned long tx_packets = 0, tx_bytes = 0;
1491 unsigned int csum_start; 1491 unsigned int csum_start;
@@ -1516,15 +1516,18 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
1516 1516
1517 /* Egress the payload. */ 1517 /* Egress the payload. */
1518 while (p_used < p_len) { 1518 while (p_used < p_len) {
1519 void *va;
1519 1520
1520 /* Advance as needed. */ 1521 /* Advance as needed. */
1521 while (f_used >= f_size) { 1522 while (f_used >= f_size) {
1522 f_id++; 1523 f_id++;
1523 f_size = sh->frags[f_id].size; 1524 f_size = skb_frag_size(&sh->frags[f_id]);
1524 f_used = 0;
1525 f_data = tile_net_frag_buf(&sh->frags[f_id]); 1525 f_data = tile_net_frag_buf(&sh->frags[f_id]);
1526 f_used = 0;
1526 } 1527 }
1527 1528
1529 va = f_data + f_used;
1530
1528 /* Use bytes from the current fragment. */ 1531 /* Use bytes from the current fragment. */
1529 n = p_len - p_used; 1532 n = p_len - p_used;
1530 if (n > f_size - f_used) 1533 if (n > f_size - f_used)
@@ -1533,7 +1536,7 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
1533 p_used += n; 1536 p_used += n;
1534 1537
1535 /* Egress a piece of the payload. */ 1538 /* Egress a piece of the payload. */
1536 edesc_body.va = va_to_tile_io_addr(f_data) + f_used; 1539 edesc_body.va = va_to_tile_io_addr(va);
1537 edesc_body.xfer_size = n; 1540 edesc_body.xfer_size = n;
1538 edesc_body.bound = !(p_used < p_len); 1541 edesc_body.bound = !(p_used < p_len);
1539 gxio_mpipe_equeue_put_at(equeue, edesc_body, slot); 1542 gxio_mpipe_equeue_put_at(equeue, edesc_body, slot);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 0793299bd39e..a788501e978e 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -894,6 +894,8 @@ out:
894 return IRQ_HANDLED; 894 return IRQ_HANDLED;
895} 895}
896 896
897static void axienet_dma_err_handler(unsigned long data);
898
897/** 899/**
898 * axienet_open - Driver open routine. 900 * axienet_open - Driver open routine.
899 * @ndev: Pointer to net_device structure 901 * @ndev: Pointer to net_device structure
@@ -942,6 +944,10 @@ static int axienet_open(struct net_device *ndev)
942 phy_start(lp->phy_dev); 944 phy_start(lp->phy_dev);
943 } 945 }
944 946
947 /* Enable tasklets for Axi DMA error handling */
948 tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler,
949 (unsigned long) lp);
950
945 /* Enable interrupts for Axi DMA Tx */ 951 /* Enable interrupts for Axi DMA Tx */
946 ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev); 952 ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev);
947 if (ret) 953 if (ret)
@@ -950,8 +956,7 @@ static int axienet_open(struct net_device *ndev)
950 ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev); 956 ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev);
951 if (ret) 957 if (ret)
952 goto err_rx_irq; 958 goto err_rx_irq;
953 /* Enable tasklets for Axi DMA error handling */ 959
954 tasklet_enable(&lp->dma_err_tasklet);
955 return 0; 960 return 0;
956 961
957err_rx_irq: 962err_rx_irq:
@@ -960,6 +965,7 @@ err_tx_irq:
960 if (lp->phy_dev) 965 if (lp->phy_dev)
961 phy_disconnect(lp->phy_dev); 966 phy_disconnect(lp->phy_dev);
962 lp->phy_dev = NULL; 967 lp->phy_dev = NULL;
968 tasklet_kill(&lp->dma_err_tasklet);
963 dev_err(lp->dev, "request_irq() failed\n"); 969 dev_err(lp->dev, "request_irq() failed\n");
964 return ret; 970 return ret;
965} 971}
@@ -990,7 +996,7 @@ static int axienet_stop(struct net_device *ndev)
990 axienet_setoptions(ndev, lp->options & 996 axienet_setoptions(ndev, lp->options &
991 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 997 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
992 998
993 tasklet_disable(&lp->dma_err_tasklet); 999 tasklet_kill(&lp->dma_err_tasklet);
994 1000
995 free_irq(lp->tx_irq, ndev); 1001 free_irq(lp->tx_irq, ndev);
996 free_irq(lp->rx_irq, ndev); 1002 free_irq(lp->rx_irq, ndev);
@@ -1613,10 +1619,6 @@ static int __devinit axienet_of_probe(struct platform_device *op)
1613 goto err_iounmap_2; 1619 goto err_iounmap_2;
1614 } 1620 }
1615 1621
1616 tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler,
1617 (unsigned long) lp);
1618 tasklet_disable(&lp->dma_err_tasklet);
1619
1620 return 0; 1622 return 0;
1621 1623
1622err_iounmap_2: 1624err_iounmap_2:
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 98934bdf6acf..477d6729b17f 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1102,10 +1102,12 @@ static int init_queues(struct port *port)
1102{ 1102{
1103 int i; 1103 int i;
1104 1104
1105 if (!ports_open) 1105 if (!ports_open) {
1106 if (!(dma_pool = dma_pool_create(DRV_NAME, NULL, 1106 dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev,
1107 POOL_ALLOC_SIZE, 32, 0))) 1107 POOL_ALLOC_SIZE, 32, 0);
1108 if (!dma_pool)
1108 return -ENOMEM; 1109 return -ENOMEM;
1110 }
1109 1111
1110 if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL, 1112 if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
1111 &port->desc_tab_phys))) 1113 &port->desc_tab_phys)))