aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/bnx2x/bnx2x.h4
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h4
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c173
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h4
-rw-r--r--drivers/net/gianfar.c2
-rw-r--r--drivers/net/irda/sh_irda.c14
-rw-r--r--drivers/net/ns83820.c5
-rw-r--r--drivers/net/usb/cdc_ncm.c19
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c93
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c274
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h7
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c2
-rw-r--r--drivers/net/wireless/iwmc3200wifi/netdev.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00firmware.c1
-rw-r--r--drivers/s390/net/qeth_l2_main.c18
-rw-r--r--drivers/s390/net/qeth_l3_main.c22
22 files changed, 459 insertions, 215 deletions
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index a6cd335c943..8e4183717d9 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -22,8 +22,8 @@
22 * (you will need to reboot afterwards) */ 22 * (you will need to reboot afterwards) */
23/* #define BNX2X_STOP_ON_ERROR */ 23/* #define BNX2X_STOP_ON_ERROR */
24 24
25#define DRV_MODULE_VERSION "1.62.00-3" 25#define DRV_MODULE_VERSION "1.62.00-4"
26#define DRV_MODULE_RELDATE "2010/12/21" 26#define DRV_MODULE_RELDATE "2011/01/18"
27#define BNX2X_BC_VER 0x040200 27#define BNX2X_BC_VER 0x040200
28 28
29#define BNX2X_MULTI_QUEUE 29#define BNX2X_MULTI_QUEUE
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index 6238d4f6398..548f5631c0d 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -352,6 +352,10 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
352#define PORT_HW_CFG_LANE_SWAP_CFG_31203120 0x0000d8d8 352#define PORT_HW_CFG_LANE_SWAP_CFG_31203120 0x0000d8d8
353 /* forced only */ 353 /* forced only */
354#define PORT_HW_CFG_LANE_SWAP_CFG_32103210 0x0000e4e4 354#define PORT_HW_CFG_LANE_SWAP_CFG_32103210 0x0000e4e4
355 /* Indicate whether to swap the external phy polarity */
356#define PORT_HW_CFG_SWAP_PHY_POLARITY_MASK 0x00010000
357#define PORT_HW_CFG_SWAP_PHY_POLARITY_DISABLED 0x00000000
358#define PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED 0x00010000
355 359
356 u32 external_phy_config; 360 u32 external_phy_config;
357#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK 0xff000000 361#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK 0xff000000
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index 43b0de24f39..7160ec51093 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -1573,7 +1573,7 @@ static void bnx2x_set_aer_mmd_xgxs(struct link_params *params,
1573 1573
1574 offset = phy->addr + ser_lane; 1574 offset = phy->addr + ser_lane;
1575 if (CHIP_IS_E2(bp)) 1575 if (CHIP_IS_E2(bp))
1576 aer_val = 0x2800 + offset - 1; 1576 aer_val = 0x3800 + offset - 1;
1577 else 1577 else
1578 aer_val = 0x3800 + offset; 1578 aer_val = 0x3800 + offset;
1579 CL45_WR_OVER_CL22(bp, phy, 1579 CL45_WR_OVER_CL22(bp, phy,
@@ -3166,7 +3166,23 @@ u8 bnx2x_set_led(struct link_params *params,
3166 if (!vars->link_up) 3166 if (!vars->link_up)
3167 break; 3167 break;
3168 case LED_MODE_ON: 3168 case LED_MODE_ON:
3169 if (SINGLE_MEDIA_DIRECT(params)) { 3169 if (params->phy[EXT_PHY1].type ==
3170 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 &&
3171 CHIP_IS_E2(bp) && params->num_phys == 2) {
3172 /**
3173 * This is a work-around for E2+8727 Configurations
3174 */
3175 if (mode == LED_MODE_ON ||
3176 speed == SPEED_10000){
3177 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
3178 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
3179
3180 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
3181 EMAC_WR(bp, EMAC_REG_EMAC_LED,
3182 (tmp | EMAC_LED_OVERRIDE));
3183 return rc;
3184 }
3185 } else if (SINGLE_MEDIA_DIRECT(params)) {
3170 /** 3186 /**
3171 * This is a work-around for HW issue found when link 3187 * This is a work-around for HW issue found when link
3172 * is up in CL73 3188 * is up in CL73
@@ -3854,11 +3870,14 @@ static void bnx2x_8073_resolve_fc(struct bnx2x_phy *phy,
3854 pause_result); 3870 pause_result);
3855 } 3871 }
3856} 3872}
3857 3873static u8 bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
3858static void bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
3859 struct bnx2x_phy *phy, 3874 struct bnx2x_phy *phy,
3860 u8 port) 3875 u8 port)
3861{ 3876{
3877 u32 count = 0;
3878 u16 fw_ver1, fw_msgout;
3879 u8 rc = 0;
3880
3862 /* Boot port from external ROM */ 3881 /* Boot port from external ROM */
3863 /* EDC grst */ 3882 /* EDC grst */
3864 bnx2x_cl45_write(bp, phy, 3883 bnx2x_cl45_write(bp, phy,
@@ -3888,14 +3907,45 @@ static void bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
3888 MDIO_PMA_REG_GEN_CTRL, 3907 MDIO_PMA_REG_GEN_CTRL,
3889 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); 3908 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
3890 3909
3891 /* wait for 120ms for code download via SPI port */ 3910 /* Delay 100ms per the PHY specifications */
3892 msleep(120); 3911 msleep(100);
3912
3913 /* 8073 sometimes taking longer to download */
3914 do {
3915 count++;
3916 if (count > 300) {
3917 DP(NETIF_MSG_LINK,
3918 "bnx2x_8073_8727_external_rom_boot port %x:"
3919 "Download failed. fw version = 0x%x\n",
3920 port, fw_ver1);
3921 rc = -EINVAL;
3922 break;
3923 }
3924
3925 bnx2x_cl45_read(bp, phy,
3926 MDIO_PMA_DEVAD,
3927 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
3928 bnx2x_cl45_read(bp, phy,
3929 MDIO_PMA_DEVAD,
3930 MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout);
3931
3932 msleep(1);
3933 } while (fw_ver1 == 0 || fw_ver1 == 0x4321 ||
3934 ((fw_msgout & 0xff) != 0x03 && (phy->type ==
3935 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)));
3893 3936
3894 /* Clear ser_boot_ctl bit */ 3937 /* Clear ser_boot_ctl bit */
3895 bnx2x_cl45_write(bp, phy, 3938 bnx2x_cl45_write(bp, phy,
3896 MDIO_PMA_DEVAD, 3939 MDIO_PMA_DEVAD,
3897 MDIO_PMA_REG_MISC_CTRL1, 0x0000); 3940 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
3898 bnx2x_save_bcm_spirom_ver(bp, phy, port); 3941 bnx2x_save_bcm_spirom_ver(bp, phy, port);
3942
3943 DP(NETIF_MSG_LINK,
3944 "bnx2x_8073_8727_external_rom_boot port %x:"
3945 "Download complete. fw version = 0x%x\n",
3946 port, fw_ver1);
3947
3948 return rc;
3899} 3949}
3900 3950
3901static void bnx2x_8073_set_xaui_low_power_mode(struct bnx2x *bp, 3951static void bnx2x_8073_set_xaui_low_power_mode(struct bnx2x *bp,
@@ -4108,6 +4158,25 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
4108 4158
4109 DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1); 4159 DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
4110 4160
4161 /**
4162 * If this is forced speed, set to KR or KX (all other are not
4163 * supported)
4164 */
4165 /* Swap polarity if required - Must be done only in non-1G mode */
4166 if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
4167 /* Configure the 8073 to swap _P and _N of the KR lines */
4168 DP(NETIF_MSG_LINK, "Swapping polarity for the 8073\n");
4169 /* 10G Rx/Tx and 1G Tx signal polarity swap */
4170 bnx2x_cl45_read(bp, phy,
4171 MDIO_PMA_DEVAD,
4172 MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL, &val);
4173 bnx2x_cl45_write(bp, phy,
4174 MDIO_PMA_DEVAD,
4175 MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL,
4176 (val | (3<<9)));
4177 }
4178
4179
4111 /* Enable CL37 BAM */ 4180 /* Enable CL37 BAM */
4112 if (REG_RD(bp, params->shmem_base + 4181 if (REG_RD(bp, params->shmem_base +
4113 offsetof(struct shmem_region, dev_info. 4182 offsetof(struct shmem_region, dev_info.
@@ -4314,8 +4383,32 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
4314 } 4383 }
4315 4384
4316 if (link_up) { 4385 if (link_up) {
4386 /* Swap polarity if required */
4387 if (params->lane_config &
4388 PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
4389 /* Configure the 8073 to swap P and N of the KR lines */
4390 bnx2x_cl45_read(bp, phy,
4391 MDIO_XS_DEVAD,
4392 MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1);
4393 /**
4394 * Set bit 3 to invert Rx in 1G mode and clear this bit
4395 * when it`s in 10G mode.
4396 */
4397 if (vars->line_speed == SPEED_1000) {
4398 DP(NETIF_MSG_LINK, "Swapping 1G polarity for"
4399 "the 8073\n");
4400 val1 |= (1<<3);
4401 } else
4402 val1 &= ~(1<<3);
4403
4404 bnx2x_cl45_write(bp, phy,
4405 MDIO_XS_DEVAD,
4406 MDIO_XS_REG_8073_RX_CTRL_PCIE,
4407 val1);
4408 }
4317 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); 4409 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
4318 bnx2x_8073_resolve_fc(phy, params, vars); 4410 bnx2x_8073_resolve_fc(phy, params, vars);
4411 vars->duplex = DUPLEX_FULL;
4319 } 4412 }
4320 return link_up; 4413 return link_up;
4321} 4414}
@@ -5062,6 +5155,7 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
5062 else 5155 else
5063 vars->line_speed = SPEED_10000; 5156 vars->line_speed = SPEED_10000;
5064 bnx2x_ext_phy_resolve_fc(phy, params, vars); 5157 bnx2x_ext_phy_resolve_fc(phy, params, vars);
5158 vars->duplex = DUPLEX_FULL;
5065 } 5159 }
5066 return link_up; 5160 return link_up;
5067} 5161}
@@ -5758,8 +5852,11 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
5758 DP(NETIF_MSG_LINK, "port %x: External link is down\n", 5852 DP(NETIF_MSG_LINK, "port %x: External link is down\n",
5759 params->port); 5853 params->port);
5760 } 5854 }
5761 if (link_up) 5855 if (link_up) {
5762 bnx2x_ext_phy_resolve_fc(phy, params, vars); 5856 bnx2x_ext_phy_resolve_fc(phy, params, vars);
5857 vars->duplex = DUPLEX_FULL;
5858 DP(NETIF_MSG_LINK, "duplex = 0x%x\n", vars->duplex);
5859 }
5763 5860
5764 if ((DUAL_MEDIA(params)) && 5861 if ((DUAL_MEDIA(params)) &&
5765 (phy->req_line_speed == SPEED_1000)) { 5862 (phy->req_line_speed == SPEED_1000)) {
@@ -5875,10 +5972,26 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp,
5875 MDIO_PMA_REG_8481_LED2_MASK, 5972 MDIO_PMA_REG_8481_LED2_MASK,
5876 0x18); 5973 0x18);
5877 5974
5975 /* Select activity source by Tx and Rx, as suggested by PHY AE */
5878 bnx2x_cl45_write(bp, phy, 5976 bnx2x_cl45_write(bp, phy,
5879 MDIO_PMA_DEVAD, 5977 MDIO_PMA_DEVAD,
5880 MDIO_PMA_REG_8481_LED3_MASK, 5978 MDIO_PMA_REG_8481_LED3_MASK,
5881 0x0040); 5979 0x0006);
5980
5981 /* Select the closest activity blink rate to that in 10/100/1000 */
5982 bnx2x_cl45_write(bp, phy,
5983 MDIO_PMA_DEVAD,
5984 MDIO_PMA_REG_8481_LED3_BLINK,
5985 0);
5986
5987 bnx2x_cl45_read(bp, phy,
5988 MDIO_PMA_DEVAD,
5989 MDIO_PMA_REG_84823_CTL_LED_CTL_1, &val);
5990 val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
5991
5992 bnx2x_cl45_write(bp, phy,
5993 MDIO_PMA_DEVAD,
5994 MDIO_PMA_REG_84823_CTL_LED_CTL_1, val);
5882 5995
5883 /* 'Interrupt Mask' */ 5996 /* 'Interrupt Mask' */
5884 bnx2x_cl45_write(bp, phy, 5997 bnx2x_cl45_write(bp, phy,
@@ -6126,6 +6239,7 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
6126 /* Check link 10G */ 6239 /* Check link 10G */
6127 if (val2 & (1<<11)) { 6240 if (val2 & (1<<11)) {
6128 vars->line_speed = SPEED_10000; 6241 vars->line_speed = SPEED_10000;
6242 vars->duplex = DUPLEX_FULL;
6129 link_up = 1; 6243 link_up = 1;
6130 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); 6244 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
6131 } else { /* Check Legacy speed link */ 6245 } else { /* Check Legacy speed link */
@@ -6489,6 +6603,7 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
6489 MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS, 6603 MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS,
6490 &val2); 6604 &val2);
6491 vars->line_speed = SPEED_10000; 6605 vars->line_speed = SPEED_10000;
6606 vars->duplex = DUPLEX_FULL;
6492 DP(NETIF_MSG_LINK, "SFX7101 AN status 0x%x->Master=%x\n", 6607 DP(NETIF_MSG_LINK, "SFX7101 AN status 0x%x->Master=%x\n",
6493 val2, (val2 & (1<<14))); 6608 val2, (val2 & (1<<14)));
6494 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); 6609 bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
@@ -7663,7 +7778,6 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
7663 7778
7664 /* PART2 - Download firmware to both phys */ 7779 /* PART2 - Download firmware to both phys */
7665 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 7780 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
7666 u16 fw_ver1;
7667 if (CHIP_IS_E2(bp)) 7781 if (CHIP_IS_E2(bp))
7668 port_of_path = 0; 7782 port_of_path = 0;
7669 else 7783 else
@@ -7671,19 +7785,9 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp,
7671 7785
7672 DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n", 7786 DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
7673 phy_blk[port]->addr); 7787 phy_blk[port]->addr);
7674 bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port], 7788 if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
7675 port_of_path); 7789 port_of_path))
7676
7677 bnx2x_cl45_read(bp, phy_blk[port],
7678 MDIO_PMA_DEVAD,
7679 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
7680 if (fw_ver1 == 0 || fw_ver1 == 0x4321) {
7681 DP(NETIF_MSG_LINK,
7682 "bnx2x_8073_common_init_phy port %x:"
7683 "Download failed. fw version = 0x%x\n",
7684 port, fw_ver1);
7685 return -EINVAL; 7790 return -EINVAL;
7686 }
7687 7791
7688 /* Only set bit 10 = 1 (Tx power down) */ 7792 /* Only set bit 10 = 1 (Tx power down) */
7689 bnx2x_cl45_read(bp, phy_blk[port], 7793 bnx2x_cl45_read(bp, phy_blk[port],
@@ -7848,27 +7952,17 @@ static u8 bnx2x_8727_common_init_phy(struct bnx2x *bp,
7848 } 7952 }
7849 /* PART2 - Download firmware to both phys */ 7953 /* PART2 - Download firmware to both phys */
7850 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 7954 for (port = PORT_MAX - 1; port >= PORT_0; port--) {
7851 u16 fw_ver1;
7852 if (CHIP_IS_E2(bp)) 7955 if (CHIP_IS_E2(bp))
7853 port_of_path = 0; 7956 port_of_path = 0;
7854 else 7957 else
7855 port_of_path = port; 7958 port_of_path = port;
7856 DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n", 7959 DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
7857 phy_blk[port]->addr); 7960 phy_blk[port]->addr);
7858 bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port], 7961 if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
7859 port_of_path); 7962 port_of_path))
7860 bnx2x_cl45_read(bp, phy_blk[port],
7861 MDIO_PMA_DEVAD,
7862 MDIO_PMA_REG_ROM_VER1, &fw_ver1);
7863 if (fw_ver1 == 0 || fw_ver1 == 0x4321) {
7864 DP(NETIF_MSG_LINK,
7865 "bnx2x_8727_common_init_phy port %x:"
7866 "Download failed. fw version = 0x%x\n",
7867 port, fw_ver1);
7868 return -EINVAL; 7963 return -EINVAL;
7869 }
7870 }
7871 7964
7965 }
7872 return 0; 7966 return 0;
7873} 7967}
7874 7968
@@ -7916,6 +8010,7 @@ u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
7916 u32 shmem2_base_path[], u32 chip_id) 8010 u32 shmem2_base_path[], u32 chip_id)
7917{ 8011{
7918 u8 rc = 0; 8012 u8 rc = 0;
8013 u32 phy_ver;
7919 u8 phy_index; 8014 u8 phy_index;
7920 u32 ext_phy_type, ext_phy_config; 8015 u32 ext_phy_type, ext_phy_config;
7921 DP(NETIF_MSG_LINK, "Begin common phy init\n"); 8016 DP(NETIF_MSG_LINK, "Begin common phy init\n");
@@ -7923,6 +8018,16 @@ u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
7923 if (CHIP_REV_IS_EMUL(bp)) 8018 if (CHIP_REV_IS_EMUL(bp))
7924 return 0; 8019 return 0;
7925 8020
8021 /* Check if common init was already done */
8022 phy_ver = REG_RD(bp, shmem_base_path[0] +
8023 offsetof(struct shmem_region,
8024 port_mb[PORT_0].ext_phy_fw_version));
8025 if (phy_ver) {
8026 DP(NETIF_MSG_LINK, "Not doing common init; phy ver is 0x%x\n",
8027 phy_ver);
8028 return 0;
8029 }
8030
7926 /* Read the ext_phy_type for arbitrary port(0) */ 8031 /* Read the ext_phy_type for arbitrary port(0) */
7927 for (phy_index = EXT_PHY1; phy_index < MAX_PHYS; 8032 for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
7928 phy_index++) { 8033 phy_index++) {
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index c939683e3d6..e01330bb36c 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -6194,7 +6194,11 @@ Theotherbitsarereservedandshouldbezero*/
6194#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER 0x0000 6194#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER 0x0000
6195#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER 0x0100 6195#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER 0x0100
6196#define MDIO_CTL_REG_84823_MEDIA_FIBER_1G 0x1000 6196#define MDIO_CTL_REG_84823_MEDIA_FIBER_1G 0x1000
6197#define MDIO_CTL_REG_84823_USER_CTRL_REG 0x4005
6198#define MDIO_CTL_REG_84823_USER_CTRL_CMS 0x0080
6197 6199
6200#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3
6201#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
6198 6202
6199#define IGU_FUNC_BASE 0x0400 6203#define IGU_FUNC_BASE 0x0400
6200 6204
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 119aa2000c2..5ed8f9f9419 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -1920,7 +1920,7 @@ int startup_gfar(struct net_device *ndev)
1920 if (err) { 1920 if (err) {
1921 for (j = 0; j < i; j++) 1921 for (j = 0; j < i; j++)
1922 free_grp_irqs(&priv->gfargrp[j]); 1922 free_grp_irqs(&priv->gfargrp[j]);
1923 goto irq_fail; 1923 goto irq_fail;
1924 } 1924 }
1925 } 1925 }
1926 1926
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index 9e3f4f54281..4488bd581ec 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -635,7 +635,7 @@ static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
635 635
636 ret = sh_irda_set_baudrate(self, speed); 636 ret = sh_irda_set_baudrate(self, speed);
637 if (ret < 0) 637 if (ret < 0)
638 return ret; 638 goto sh_irda_hard_xmit_end;
639 639
640 self->tx_buff.len = 0; 640 self->tx_buff.len = 0;
641 if (skb->len) { 641 if (skb->len) {
@@ -652,11 +652,21 @@ static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
652 652
653 sh_irda_write(self, IRTFLR, self->tx_buff.len); 653 sh_irda_write(self, IRTFLR, self->tx_buff.len);
654 sh_irda_write(self, IRTCTR, ARMOD | TE); 654 sh_irda_write(self, IRTCTR, ARMOD | TE);
655 } 655 } else
656 goto sh_irda_hard_xmit_end;
656 657
657 dev_kfree_skb(skb); 658 dev_kfree_skb(skb);
658 659
659 return 0; 660 return 0;
661
662sh_irda_hard_xmit_end:
663 sh_irda_set_baudrate(self, 9600);
664 netif_wake_queue(self->ndev);
665 sh_irda_rcv_ctrl(self, 1);
666 dev_kfree_skb(skb);
667
668 return ret;
669
660} 670}
661 671
662static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd) 672static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd)
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 84134c766f3..a41b2cf4d91 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -1988,12 +1988,11 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev,
1988 } 1988 }
1989 1989
1990 ndev = alloc_etherdev(sizeof(struct ns83820)); 1990 ndev = alloc_etherdev(sizeof(struct ns83820));
1991 dev = PRIV(ndev);
1992
1993 err = -ENOMEM; 1991 err = -ENOMEM;
1994 if (!dev) 1992 if (!ndev)
1995 goto out; 1993 goto out;
1996 1994
1995 dev = PRIV(ndev);
1997 dev->ndev = ndev; 1996 dev->ndev = ndev;
1998 1997
1999 spin_lock_init(&dev->rx_info.lock); 1998 spin_lock_init(&dev->rx_info.lock);
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index d776c4a8d3c..04e8ce14a1d 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -54,7 +54,7 @@
54#include <linux/usb/usbnet.h> 54#include <linux/usb/usbnet.h>
55#include <linux/usb/cdc.h> 55#include <linux/usb/cdc.h>
56 56
57#define DRIVER_VERSION "30-Nov-2010" 57#define DRIVER_VERSION "17-Jan-2011"
58 58
59/* CDC NCM subclass 3.2.1 */ 59/* CDC NCM subclass 3.2.1 */
60#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10 60#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10
@@ -868,15 +868,19 @@ static void cdc_ncm_tx_timeout(unsigned long arg)
868 if (ctx->tx_timer_pending != 0) { 868 if (ctx->tx_timer_pending != 0) {
869 ctx->tx_timer_pending--; 869 ctx->tx_timer_pending--;
870 restart = 1; 870 restart = 1;
871 } else 871 } else {
872 restart = 0; 872 restart = 0;
873 }
873 874
874 spin_unlock(&ctx->mtx); 875 spin_unlock(&ctx->mtx);
875 876
876 if (restart) 877 if (restart) {
878 spin_lock(&ctx->mtx);
877 cdc_ncm_tx_timeout_start(ctx); 879 cdc_ncm_tx_timeout_start(ctx);
878 else if (ctx->netdev != NULL) 880 spin_unlock(&ctx->mtx);
881 } else if (ctx->netdev != NULL) {
879 usbnet_start_xmit(NULL, ctx->netdev); 882 usbnet_start_xmit(NULL, ctx->netdev);
883 }
880} 884}
881 885
882static struct sk_buff * 886static struct sk_buff *
@@ -900,7 +904,6 @@ cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
900 skb_out = cdc_ncm_fill_tx_frame(ctx, skb); 904 skb_out = cdc_ncm_fill_tx_frame(ctx, skb);
901 if (ctx->tx_curr_skb != NULL) 905 if (ctx->tx_curr_skb != NULL)
902 need_timer = 1; 906 need_timer = 1;
903 spin_unlock(&ctx->mtx);
904 907
905 /* Start timer, if there is a remaining skb */ 908 /* Start timer, if there is a remaining skb */
906 if (need_timer) 909 if (need_timer)
@@ -908,6 +911,8 @@ cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
908 911
909 if (skb_out) 912 if (skb_out)
910 dev->net->stats.tx_packets += ctx->tx_curr_frame_num; 913 dev->net->stats.tx_packets += ctx->tx_curr_frame_num;
914
915 spin_unlock(&ctx->mtx);
911 return skb_out; 916 return skb_out;
912 917
913error: 918error:
@@ -1020,8 +1025,8 @@ static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
1020 if (((offset + temp) > actlen) || 1025 if (((offset + temp) > actlen) ||
1021 (temp > CDC_NCM_MAX_DATAGRAM_SIZE) || (temp < ETH_HLEN)) { 1026 (temp > CDC_NCM_MAX_DATAGRAM_SIZE) || (temp < ETH_HLEN)) {
1022 pr_debug("invalid frame detected (ignored)" 1027 pr_debug("invalid frame detected (ignored)"
1023 "offset[%u]=%u, length=%u, skb=%p\n", 1028 "offset[%u]=%u, length=%u, skb=%p\n",
1024 x, offset, temp, skb_in); 1029 x, offset, temp, skb_in);
1025 if (!x) 1030 if (!x)
1026 goto error; 1031 goto error;
1027 break; 1032 break;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index d143e8b72b5..cc14b4a7504 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -48,6 +48,9 @@ static atomic_t devices_found;
48static int enable_mq = 1; 48static int enable_mq = 1;
49static int irq_share_mode; 49static int irq_share_mode;
50 50
51static void
52vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
53
51/* 54/*
52 * Enable/Disable the given intr 55 * Enable/Disable the given intr
53 */ 56 */
@@ -139,9 +142,13 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
139{ 142{
140 u32 ret; 143 u32 ret;
141 int i; 144 int i;
145 unsigned long flags;
142 146
147 spin_lock_irqsave(&adapter->cmd_lock, flags);
143 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK); 148 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
144 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 149 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
150 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
151
145 adapter->link_speed = ret >> 16; 152 adapter->link_speed = ret >> 16;
146 if (ret & 1) { /* Link is up. */ 153 if (ret & 1) { /* Link is up. */
147 printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n", 154 printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",
@@ -183,8 +190,10 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter)
183 190
184 /* Check if there is an error on xmit/recv queues */ 191 /* Check if there is an error on xmit/recv queues */
185 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) { 192 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
193 spin_lock(&adapter->cmd_lock);
186 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 194 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
187 VMXNET3_CMD_GET_QUEUE_STATUS); 195 VMXNET3_CMD_GET_QUEUE_STATUS);
196 spin_unlock(&adapter->cmd_lock);
188 197
189 for (i = 0; i < adapter->num_tx_queues; i++) 198 for (i = 0; i < adapter->num_tx_queues; i++)
190 if (adapter->tqd_start[i].status.stopped) 199 if (adapter->tqd_start[i].status.stopped)
@@ -804,30 +813,25 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
804 skb_transport_header(skb))->doff * 4; 813 skb_transport_header(skb))->doff * 4;
805 ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size; 814 ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
806 } else { 815 } else {
807 unsigned int pull_size;
808
809 if (skb->ip_summed == CHECKSUM_PARTIAL) { 816 if (skb->ip_summed == CHECKSUM_PARTIAL) {
810 ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb); 817 ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
811 818
812 if (ctx->ipv4) { 819 if (ctx->ipv4) {
813 struct iphdr *iph = (struct iphdr *) 820 struct iphdr *iph = (struct iphdr *)
814 skb_network_header(skb); 821 skb_network_header(skb);
815 if (iph->protocol == IPPROTO_TCP) { 822 if (iph->protocol == IPPROTO_TCP)
816 pull_size = ctx->eth_ip_hdr_size +
817 sizeof(struct tcphdr);
818
819 if (unlikely(!pskb_may_pull(skb,
820 pull_size))) {
821 goto err;
822 }
823 ctx->l4_hdr_size = ((struct tcphdr *) 823 ctx->l4_hdr_size = ((struct tcphdr *)
824 skb_transport_header(skb))->doff * 4; 824 skb_transport_header(skb))->doff * 4;
825 } else if (iph->protocol == IPPROTO_UDP) { 825 else if (iph->protocol == IPPROTO_UDP)
826 /*
827 * Use tcp header size so that bytes to
828 * be copied are more than required by
829 * the device.
830 */
826 ctx->l4_hdr_size = 831 ctx->l4_hdr_size =
827 sizeof(struct udphdr); 832 sizeof(struct tcphdr);
828 } else { 833 else
829 ctx->l4_hdr_size = 0; 834 ctx->l4_hdr_size = 0;
830 }
831 } else { 835 } else {
832 /* for simplicity, don't copy L4 headers */ 836 /* for simplicity, don't copy L4 headers */
833 ctx->l4_hdr_size = 0; 837 ctx->l4_hdr_size = 0;
@@ -1859,18 +1863,14 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1859 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1863 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1860 struct Vmxnet3_DriverShared *shared = adapter->shared; 1864 struct Vmxnet3_DriverShared *shared = adapter->shared;
1861 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; 1865 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1866 unsigned long flags;
1862 1867
1863 if (grp) { 1868 if (grp) {
1864 /* add vlan rx stripping. */ 1869 /* add vlan rx stripping. */
1865 if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) { 1870 if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) {
1866 int i; 1871 int i;
1867 struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
1868 adapter->vlan_grp = grp; 1872 adapter->vlan_grp = grp;
1869 1873
1870 /* update FEATURES to device */
1871 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
1872 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1873 VMXNET3_CMD_UPDATE_FEATURE);
1874 /* 1874 /*
1875 * Clear entire vfTable; then enable untagged pkts. 1875 * Clear entire vfTable; then enable untagged pkts.
1876 * Note: setting one entry in vfTable to non-zero turns 1876 * Note: setting one entry in vfTable to non-zero turns
@@ -1880,8 +1880,10 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1880 vfTable[i] = 0; 1880 vfTable[i] = 0;
1881 1881
1882 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0); 1882 VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
1883 spin_lock_irqsave(&adapter->cmd_lock, flags);
1883 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1884 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1884 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1885 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1886 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1885 } else { 1887 } else {
1886 printk(KERN_ERR "%s: vlan_rx_register when device has " 1888 printk(KERN_ERR "%s: vlan_rx_register when device has "
1887 "no NETIF_F_HW_VLAN_RX\n", netdev->name); 1889 "no NETIF_F_HW_VLAN_RX\n", netdev->name);
@@ -1900,13 +1902,10 @@ vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
1900 */ 1902 */
1901 vfTable[i] = 0; 1903 vfTable[i] = 0;
1902 } 1904 }
1905 spin_lock_irqsave(&adapter->cmd_lock, flags);
1903 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1906 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1904 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1907 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1905 1908 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1906 /* update FEATURES to device */
1907 devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
1908 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1909 VMXNET3_CMD_UPDATE_FEATURE);
1910 } 1909 }
1911 } 1910 }
1912} 1911}
@@ -1939,10 +1938,13 @@ vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1939{ 1938{
1940 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1939 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1941 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; 1940 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1941 unsigned long flags;
1942 1942
1943 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); 1943 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
1944 spin_lock_irqsave(&adapter->cmd_lock, flags);
1944 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1945 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1945 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1946 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1947 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1946} 1948}
1947 1949
1948 1950
@@ -1951,10 +1953,13 @@ vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1951{ 1953{
1952 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1954 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1953 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; 1955 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1956 unsigned long flags;
1954 1957
1955 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid); 1958 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
1959 spin_lock_irqsave(&adapter->cmd_lock, flags);
1956 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 1960 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1957 VMXNET3_CMD_UPDATE_VLAN_FILTERS); 1961 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1962 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1958} 1963}
1959 1964
1960 1965
@@ -1985,6 +1990,7 @@ static void
1985vmxnet3_set_mc(struct net_device *netdev) 1990vmxnet3_set_mc(struct net_device *netdev)
1986{ 1991{
1987 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1992 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1993 unsigned long flags;
1988 struct Vmxnet3_RxFilterConf *rxConf = 1994 struct Vmxnet3_RxFilterConf *rxConf =
1989 &adapter->shared->devRead.rxFilterConf; 1995 &adapter->shared->devRead.rxFilterConf;
1990 u8 *new_table = NULL; 1996 u8 *new_table = NULL;
@@ -2020,6 +2026,7 @@ vmxnet3_set_mc(struct net_device *netdev)
2020 rxConf->mfTablePA = 0; 2026 rxConf->mfTablePA = 0;
2021 } 2027 }
2022 2028
2029 spin_lock_irqsave(&adapter->cmd_lock, flags);
2023 if (new_mode != rxConf->rxMode) { 2030 if (new_mode != rxConf->rxMode) {
2024 rxConf->rxMode = cpu_to_le32(new_mode); 2031 rxConf->rxMode = cpu_to_le32(new_mode);
2025 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2032 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
@@ -2028,6 +2035,7 @@ vmxnet3_set_mc(struct net_device *netdev)
2028 2035
2029 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2036 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2030 VMXNET3_CMD_UPDATE_MAC_FILTERS); 2037 VMXNET3_CMD_UPDATE_MAC_FILTERS);
2038 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2031 2039
2032 kfree(new_table); 2040 kfree(new_table);
2033} 2041}
@@ -2080,10 +2088,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2080 devRead->misc.uptFeatures |= UPT1_F_LRO; 2088 devRead->misc.uptFeatures |= UPT1_F_LRO;
2081 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS); 2089 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
2082 } 2090 }
2083 if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) && 2091 if (adapter->netdev->features & NETIF_F_HW_VLAN_RX)
2084 adapter->vlan_grp) {
2085 devRead->misc.uptFeatures |= UPT1_F_RXVLAN; 2092 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
2086 }
2087 2093
2088 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu); 2094 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
2089 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa); 2095 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
@@ -2168,6 +2174,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2168 /* rx filter settings */ 2174 /* rx filter settings */
2169 devRead->rxFilterConf.rxMode = 0; 2175 devRead->rxFilterConf.rxMode = 0;
2170 vmxnet3_restore_vlan(adapter); 2176 vmxnet3_restore_vlan(adapter);
2177 vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
2178
2171 /* the rest are already zeroed */ 2179 /* the rest are already zeroed */
2172} 2180}
2173 2181
@@ -2177,6 +2185,7 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
2177{ 2185{
2178 int err, i; 2186 int err, i;
2179 u32 ret; 2187 u32 ret;
2188 unsigned long flags;
2180 2189
2181 dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d," 2190 dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
2182 " ring sizes %u %u %u\n", adapter->netdev->name, 2191 " ring sizes %u %u %u\n", adapter->netdev->name,
@@ -2206,9 +2215,11 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
2206 adapter->shared_pa)); 2215 adapter->shared_pa));
2207 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI( 2216 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
2208 adapter->shared_pa)); 2217 adapter->shared_pa));
2218 spin_lock_irqsave(&adapter->cmd_lock, flags);
2209 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2219 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2210 VMXNET3_CMD_ACTIVATE_DEV); 2220 VMXNET3_CMD_ACTIVATE_DEV);
2211 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 2221 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2222 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2212 2223
2213 if (ret != 0) { 2224 if (ret != 0) {
2214 printk(KERN_ERR "Failed to activate dev %s: error %u\n", 2225 printk(KERN_ERR "Failed to activate dev %s: error %u\n",
@@ -2255,7 +2266,10 @@ rq_err:
2255void 2266void
2256vmxnet3_reset_dev(struct vmxnet3_adapter *adapter) 2267vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
2257{ 2268{
2269 unsigned long flags;
2270 spin_lock_irqsave(&adapter->cmd_lock, flags);
2258 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV); 2271 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
2272 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2259} 2273}
2260 2274
2261 2275
@@ -2263,12 +2277,15 @@ int
2263vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter) 2277vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
2264{ 2278{
2265 int i; 2279 int i;
2280 unsigned long flags;
2266 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)) 2281 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
2267 return 0; 2282 return 0;
2268 2283
2269 2284
2285 spin_lock_irqsave(&adapter->cmd_lock, flags);
2270 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2286 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2271 VMXNET3_CMD_QUIESCE_DEV); 2287 VMXNET3_CMD_QUIESCE_DEV);
2288 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2272 vmxnet3_disable_all_intrs(adapter); 2289 vmxnet3_disable_all_intrs(adapter);
2273 2290
2274 for (i = 0; i < adapter->num_rx_queues; i++) 2291 for (i = 0; i < adapter->num_rx_queues; i++)
@@ -2426,7 +2443,7 @@ vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
2426 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN; 2443 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
2427 ring0_size = adapter->rx_queue[0].rx_ring[0].size; 2444 ring0_size = adapter->rx_queue[0].rx_ring[0].size;
2428 ring0_size = (ring0_size + sz - 1) / sz * sz; 2445 ring0_size = (ring0_size + sz - 1) / sz * sz;
2429 ring0_size = min_t(u32, rq->rx_ring[0].size, VMXNET3_RX_RING_MAX_SIZE / 2446 ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
2430 sz * sz); 2447 sz * sz);
2431 ring1_size = adapter->rx_queue[0].rx_ring[1].size; 2448 ring1_size = adapter->rx_queue[0].rx_ring[1].size;
2432 comp_size = ring0_size + ring1_size; 2449 comp_size = ring0_size + ring1_size;
@@ -2695,7 +2712,7 @@ vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter,
2695 break; 2712 break;
2696 } else { 2713 } else {
2697 /* If fails to enable required number of MSI-x vectors 2714 /* If fails to enable required number of MSI-x vectors
2698 * try enabling 3 of them. One each for rx, tx and event 2715 * try enabling minimum number of vectors required.
2699 */ 2716 */
2700 vectors = vector_threshold; 2717 vectors = vector_threshold;
2701 printk(KERN_ERR "Failed to enable %d MSI-X for %s, try" 2718 printk(KERN_ERR "Failed to enable %d MSI-X for %s, try"
@@ -2718,9 +2735,11 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2718 u32 cfg; 2735 u32 cfg;
2719 2736
2720 /* intr settings */ 2737 /* intr settings */
2738 spin_lock(&adapter->cmd_lock);
2721 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 2739 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2722 VMXNET3_CMD_GET_CONF_INTR); 2740 VMXNET3_CMD_GET_CONF_INTR);
2723 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 2741 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2742 spin_unlock(&adapter->cmd_lock);
2724 adapter->intr.type = cfg & 0x3; 2743 adapter->intr.type = cfg & 0x3;
2725 adapter->intr.mask_mode = (cfg >> 2) & 0x3; 2744 adapter->intr.mask_mode = (cfg >> 2) & 0x3;
2726 2745
@@ -2755,7 +2774,7 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2755 */ 2774 */
2756 if (err == VMXNET3_LINUX_MIN_MSIX_VECT) { 2775 if (err == VMXNET3_LINUX_MIN_MSIX_VECT) {
2757 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE 2776 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
2758 || adapter->num_rx_queues != 2) { 2777 || adapter->num_rx_queues != 1) {
2759 adapter->share_intr = VMXNET3_INTR_TXSHARE; 2778 adapter->share_intr = VMXNET3_INTR_TXSHARE;
2760 printk(KERN_ERR "Number of rx queues : 1\n"); 2779 printk(KERN_ERR "Number of rx queues : 1\n");
2761 adapter->num_rx_queues = 1; 2780 adapter->num_rx_queues = 1;
@@ -2905,6 +2924,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2905 adapter->netdev = netdev; 2924 adapter->netdev = netdev;
2906 adapter->pdev = pdev; 2925 adapter->pdev = pdev;
2907 2926
2927 spin_lock_init(&adapter->cmd_lock);
2908 adapter->shared = pci_alloc_consistent(adapter->pdev, 2928 adapter->shared = pci_alloc_consistent(adapter->pdev,
2909 sizeof(struct Vmxnet3_DriverShared), 2929 sizeof(struct Vmxnet3_DriverShared),
2910 &adapter->shared_pa); 2930 &adapter->shared_pa);
@@ -3108,11 +3128,15 @@ vmxnet3_suspend(struct device *device)
3108 u8 *arpreq; 3128 u8 *arpreq;
3109 struct in_device *in_dev; 3129 struct in_device *in_dev;
3110 struct in_ifaddr *ifa; 3130 struct in_ifaddr *ifa;
3131 unsigned long flags;
3111 int i = 0; 3132 int i = 0;
3112 3133
3113 if (!netif_running(netdev)) 3134 if (!netif_running(netdev))
3114 return 0; 3135 return 0;
3115 3136
3137 for (i = 0; i < adapter->num_rx_queues; i++)
3138 napi_disable(&adapter->rx_queue[i].napi);
3139
3116 vmxnet3_disable_all_intrs(adapter); 3140 vmxnet3_disable_all_intrs(adapter);
3117 vmxnet3_free_irqs(adapter); 3141 vmxnet3_free_irqs(adapter);
3118 vmxnet3_free_intr_resources(adapter); 3142 vmxnet3_free_intr_resources(adapter);
@@ -3188,8 +3212,10 @@ skip_arp:
3188 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys( 3212 adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
3189 pmConf)); 3213 pmConf));
3190 3214
3215 spin_lock_irqsave(&adapter->cmd_lock, flags);
3191 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 3216 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3192 VMXNET3_CMD_UPDATE_PMCFG); 3217 VMXNET3_CMD_UPDATE_PMCFG);
3218 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3193 3219
3194 pci_save_state(pdev); 3220 pci_save_state(pdev);
3195 pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND), 3221 pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
@@ -3204,7 +3230,8 @@ skip_arp:
3204static int 3230static int
3205vmxnet3_resume(struct device *device) 3231vmxnet3_resume(struct device *device)
3206{ 3232{
3207 int err; 3233 int err, i = 0;
3234 unsigned long flags;
3208 struct pci_dev *pdev = to_pci_dev(device); 3235 struct pci_dev *pdev = to_pci_dev(device);
3209 struct net_device *netdev = pci_get_drvdata(pdev); 3236 struct net_device *netdev = pci_get_drvdata(pdev);
3210 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 3237 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
@@ -3232,10 +3259,14 @@ vmxnet3_resume(struct device *device)
3232 3259
3233 pci_enable_wake(pdev, PCI_D0, 0); 3260 pci_enable_wake(pdev, PCI_D0, 0);
3234 3261
3262 spin_lock_irqsave(&adapter->cmd_lock, flags);
3235 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 3263 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3236 VMXNET3_CMD_UPDATE_PMCFG); 3264 VMXNET3_CMD_UPDATE_PMCFG);
3265 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3237 vmxnet3_alloc_intr_resources(adapter); 3266 vmxnet3_alloc_intr_resources(adapter);
3238 vmxnet3_request_irqs(adapter); 3267 vmxnet3_request_irqs(adapter);
3268 for (i = 0; i < adapter->num_rx_queues; i++)
3269 napi_enable(&adapter->rx_queue[i].napi);
3239 vmxnet3_enable_all_intrs(adapter); 3270 vmxnet3_enable_all_intrs(adapter);
3240 3271
3241 return 0; 3272 return 0;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 8e17fc8a7fe..81254be85b9 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -45,6 +45,7 @@ static int
45vmxnet3_set_rx_csum(struct net_device *netdev, u32 val) 45vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
46{ 46{
47 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 47 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
48 unsigned long flags;
48 49
49 if (adapter->rxcsum != val) { 50 if (adapter->rxcsum != val) {
50 adapter->rxcsum = val; 51 adapter->rxcsum = val;
@@ -56,8 +57,10 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
56 adapter->shared->devRead.misc.uptFeatures &= 57 adapter->shared->devRead.misc.uptFeatures &=
57 ~UPT1_F_RXCSUM; 58 ~UPT1_F_RXCSUM;
58 59
60 spin_lock_irqsave(&adapter->cmd_lock, flags);
59 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 61 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
60 VMXNET3_CMD_UPDATE_FEATURE); 62 VMXNET3_CMD_UPDATE_FEATURE);
63 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
61 } 64 }
62 } 65 }
63 return 0; 66 return 0;
@@ -68,76 +71,78 @@ vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
68static const struct vmxnet3_stat_desc 71static const struct vmxnet3_stat_desc
69vmxnet3_tq_dev_stats[] = { 72vmxnet3_tq_dev_stats[] = {
70 /* description, offset */ 73 /* description, offset */
71 { "TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) }, 74 { "Tx Queue#", 0 },
72 { "TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) }, 75 { " TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) },
73 { "ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) }, 76 { " TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) },
74 { "ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) }, 77 { " ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) },
75 { "mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) }, 78 { " ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) },
76 { "mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) }, 79 { " mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) },
77 { "bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) }, 80 { " mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) },
78 { "bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) }, 81 { " bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) },
79 { "pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) }, 82 { " bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) },
80 { "pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) }, 83 { " pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) },
84 { " pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) },
81}; 85};
82 86
83/* per tq stats maintained by the driver */ 87/* per tq stats maintained by the driver */
84static const struct vmxnet3_stat_desc 88static const struct vmxnet3_stat_desc
85vmxnet3_tq_driver_stats[] = { 89vmxnet3_tq_driver_stats[] = {
86 /* description, offset */ 90 /* description, offset */
87 {"drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats, 91 {" drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats,
88 drop_total) }, 92 drop_total) },
89 { " too many frags", offsetof(struct vmxnet3_tq_driver_stats, 93 { " too many frags", offsetof(struct vmxnet3_tq_driver_stats,
90 drop_too_many_frags) }, 94 drop_too_many_frags) },
91 { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, 95 { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats,
92 drop_oversized_hdr) }, 96 drop_oversized_hdr) },
93 { " hdr err", offsetof(struct vmxnet3_tq_driver_stats, 97 { " hdr err", offsetof(struct vmxnet3_tq_driver_stats,
94 drop_hdr_inspect_err) }, 98 drop_hdr_inspect_err) },
95 { " tso", offsetof(struct vmxnet3_tq_driver_stats, 99 { " tso", offsetof(struct vmxnet3_tq_driver_stats,
96 drop_tso) }, 100 drop_tso) },
97 { "ring full", offsetof(struct vmxnet3_tq_driver_stats, 101 { " ring full", offsetof(struct vmxnet3_tq_driver_stats,
98 tx_ring_full) }, 102 tx_ring_full) },
99 { "pkts linearized", offsetof(struct vmxnet3_tq_driver_stats, 103 { " pkts linearized", offsetof(struct vmxnet3_tq_driver_stats,
100 linearized) }, 104 linearized) },
101 { "hdr cloned", offsetof(struct vmxnet3_tq_driver_stats, 105 { " hdr cloned", offsetof(struct vmxnet3_tq_driver_stats,
102 copy_skb_header) }, 106 copy_skb_header) },
103 { "giant hdr", offsetof(struct vmxnet3_tq_driver_stats, 107 { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats,
104 oversized_hdr) }, 108 oversized_hdr) },
105}; 109};
106 110
107/* per rq stats maintained by the device */ 111/* per rq stats maintained by the device */
108static const struct vmxnet3_stat_desc 112static const struct vmxnet3_stat_desc
109vmxnet3_rq_dev_stats[] = { 113vmxnet3_rq_dev_stats[] = {
110 { "LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) }, 114 { "Rx Queue#", 0 },
111 { "LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) }, 115 { " LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) },
112 { "ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) }, 116 { " LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) },
113 { "ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) }, 117 { " ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) },
114 { "mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) }, 118 { " ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) },
115 { "mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) }, 119 { " mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) },
116 { "bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) }, 120 { " mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) },
117 { "bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) }, 121 { " bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) },
118 { "pkts rx out of buf", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) }, 122 { " bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) },
119 { "pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) }, 123 { " pkts rx OOB", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) },
124 { " pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) },
120}; 125};
121 126
122/* per rq stats maintained by the driver */ 127/* per rq stats maintained by the driver */
123static const struct vmxnet3_stat_desc 128static const struct vmxnet3_stat_desc
124vmxnet3_rq_driver_stats[] = { 129vmxnet3_rq_driver_stats[] = {
125 /* description, offset */ 130 /* description, offset */
126 { "drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats, 131 { " drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats,
127 drop_total) }, 132 drop_total) },
128 { " err", offsetof(struct vmxnet3_rq_driver_stats, 133 { " err", offsetof(struct vmxnet3_rq_driver_stats,
129 drop_err) }, 134 drop_err) },
130 { " fcs", offsetof(struct vmxnet3_rq_driver_stats, 135 { " fcs", offsetof(struct vmxnet3_rq_driver_stats,
131 drop_fcs) }, 136 drop_fcs) },
132 { "rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats, 137 { " rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats,
133 rx_buf_alloc_failure) }, 138 rx_buf_alloc_failure) },
134}; 139};
135 140
136/* gloabl stats maintained by the driver */ 141/* gloabl stats maintained by the driver */
137static const struct vmxnet3_stat_desc 142static const struct vmxnet3_stat_desc
138vmxnet3_global_stats[] = { 143vmxnet3_global_stats[] = {
139 /* description, offset */ 144 /* description, offset */
140 { "tx timeout count", offsetof(struct vmxnet3_adapter, 145 { "tx timeout count", offsetof(struct vmxnet3_adapter,
141 tx_timeout_count) } 146 tx_timeout_count) }
142}; 147};
143 148
@@ -151,12 +156,15 @@ vmxnet3_get_stats(struct net_device *netdev)
151 struct UPT1_TxStats *devTxStats; 156 struct UPT1_TxStats *devTxStats;
152 struct UPT1_RxStats *devRxStats; 157 struct UPT1_RxStats *devRxStats;
153 struct net_device_stats *net_stats = &netdev->stats; 158 struct net_device_stats *net_stats = &netdev->stats;
159 unsigned long flags;
154 int i; 160 int i;
155 161
156 adapter = netdev_priv(netdev); 162 adapter = netdev_priv(netdev);
157 163
158 /* Collect the dev stats into the shared area */ 164 /* Collect the dev stats into the shared area */
165 spin_lock_irqsave(&adapter->cmd_lock, flags);
159 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); 166 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
167 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
160 168
161 memset(net_stats, 0, sizeof(*net_stats)); 169 memset(net_stats, 0, sizeof(*net_stats));
162 for (i = 0; i < adapter->num_tx_queues; i++) { 170 for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -193,12 +201,15 @@ vmxnet3_get_stats(struct net_device *netdev)
193static int 201static int
194vmxnet3_get_sset_count(struct net_device *netdev, int sset) 202vmxnet3_get_sset_count(struct net_device *netdev, int sset)
195{ 203{
204 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
196 switch (sset) { 205 switch (sset) {
197 case ETH_SS_STATS: 206 case ETH_SS_STATS:
198 return ARRAY_SIZE(vmxnet3_tq_dev_stats) + 207 return (ARRAY_SIZE(vmxnet3_tq_dev_stats) +
199 ARRAY_SIZE(vmxnet3_tq_driver_stats) + 208 ARRAY_SIZE(vmxnet3_tq_driver_stats)) *
200 ARRAY_SIZE(vmxnet3_rq_dev_stats) + 209 adapter->num_tx_queues +
201 ARRAY_SIZE(vmxnet3_rq_driver_stats) + 210 (ARRAY_SIZE(vmxnet3_rq_dev_stats) +
211 ARRAY_SIZE(vmxnet3_rq_driver_stats)) *
212 adapter->num_rx_queues +
202 ARRAY_SIZE(vmxnet3_global_stats); 213 ARRAY_SIZE(vmxnet3_global_stats);
203 default: 214 default:
204 return -EOPNOTSUPP; 215 return -EOPNOTSUPP;
@@ -206,10 +217,16 @@ vmxnet3_get_sset_count(struct net_device *netdev, int sset)
206} 217}
207 218
208 219
220/* Should be multiple of 4 */
221#define NUM_TX_REGS 8
222#define NUM_RX_REGS 12
223
209static int 224static int
210vmxnet3_get_regs_len(struct net_device *netdev) 225vmxnet3_get_regs_len(struct net_device *netdev)
211{ 226{
212 return 20 * sizeof(u32); 227 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
228 return (adapter->num_tx_queues * NUM_TX_REGS * sizeof(u32) +
229 adapter->num_rx_queues * NUM_RX_REGS * sizeof(u32));
213} 230}
214 231
215 232
@@ -240,29 +257,37 @@ vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
240static void 257static void
241vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) 258vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
242{ 259{
260 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
243 if (stringset == ETH_SS_STATS) { 261 if (stringset == ETH_SS_STATS) {
244 int i; 262 int i, j;
245 263 for (j = 0; j < adapter->num_tx_queues; j++) {
246 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) { 264 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) {
247 memcpy(buf, vmxnet3_tq_dev_stats[i].desc, 265 memcpy(buf, vmxnet3_tq_dev_stats[i].desc,
248 ETH_GSTRING_LEN); 266 ETH_GSTRING_LEN);
249 buf += ETH_GSTRING_LEN; 267 buf += ETH_GSTRING_LEN;
250 } 268 }
251 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) { 269 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats);
252 memcpy(buf, vmxnet3_tq_driver_stats[i].desc, 270 i++) {
253 ETH_GSTRING_LEN); 271 memcpy(buf, vmxnet3_tq_driver_stats[i].desc,
254 buf += ETH_GSTRING_LEN; 272 ETH_GSTRING_LEN);
255 } 273 buf += ETH_GSTRING_LEN;
256 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) { 274 }
257 memcpy(buf, vmxnet3_rq_dev_stats[i].desc,
258 ETH_GSTRING_LEN);
259 buf += ETH_GSTRING_LEN;
260 } 275 }
261 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) { 276
262 memcpy(buf, vmxnet3_rq_driver_stats[i].desc, 277 for (j = 0; j < adapter->num_rx_queues; j++) {
263 ETH_GSTRING_LEN); 278 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) {
264 buf += ETH_GSTRING_LEN; 279 memcpy(buf, vmxnet3_rq_dev_stats[i].desc,
280 ETH_GSTRING_LEN);
281 buf += ETH_GSTRING_LEN;
282 }
283 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats);
284 i++) {
285 memcpy(buf, vmxnet3_rq_driver_stats[i].desc,
286 ETH_GSTRING_LEN);
287 buf += ETH_GSTRING_LEN;
288 }
265 } 289 }
290
266 for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) { 291 for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) {
267 memcpy(buf, vmxnet3_global_stats[i].desc, 292 memcpy(buf, vmxnet3_global_stats[i].desc,
268 ETH_GSTRING_LEN); 293 ETH_GSTRING_LEN);
@@ -277,6 +302,7 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data)
277 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 302 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
278 u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1; 303 u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1;
279 u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1; 304 u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1;
305 unsigned long flags;
280 306
281 if (data & ~ETH_FLAG_LRO) 307 if (data & ~ETH_FLAG_LRO)
282 return -EOPNOTSUPP; 308 return -EOPNOTSUPP;
@@ -292,8 +318,10 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data)
292 else 318 else
293 adapter->shared->devRead.misc.uptFeatures &= 319 adapter->shared->devRead.misc.uptFeatures &=
294 ~UPT1_F_LRO; 320 ~UPT1_F_LRO;
321 spin_lock_irqsave(&adapter->cmd_lock, flags);
295 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 322 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
296 VMXNET3_CMD_UPDATE_FEATURE); 323 VMXNET3_CMD_UPDATE_FEATURE);
324 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
297 } 325 }
298 return 0; 326 return 0;
299} 327}
@@ -303,30 +331,41 @@ vmxnet3_get_ethtool_stats(struct net_device *netdev,
303 struct ethtool_stats *stats, u64 *buf) 331 struct ethtool_stats *stats, u64 *buf)
304{ 332{
305 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 333 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
334 unsigned long flags;
306 u8 *base; 335 u8 *base;
307 int i; 336 int i;
308 int j = 0; 337 int j = 0;
309 338
339 spin_lock_irqsave(&adapter->cmd_lock, flags);
310 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); 340 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
341 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
311 342
312 /* this does assume each counter is 64-bit wide */ 343 /* this does assume each counter is 64-bit wide */
313/* TODO change this for multiple queues */ 344 for (j = 0; j < adapter->num_tx_queues; j++) {
314 345 base = (u8 *)&adapter->tqd_start[j].stats;
315 base = (u8 *)&adapter->tqd_start[j].stats; 346 *buf++ = (u64)j;
316 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) 347 for (i = 1; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
317 *buf++ = *(u64 *)(base + vmxnet3_tq_dev_stats[i].offset); 348 *buf++ = *(u64 *)(base +
318 349 vmxnet3_tq_dev_stats[i].offset);
319 base = (u8 *)&adapter->tx_queue[j].stats; 350
320 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) 351 base = (u8 *)&adapter->tx_queue[j].stats;
321 *buf++ = *(u64 *)(base + vmxnet3_tq_driver_stats[i].offset); 352 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
322 353 *buf++ = *(u64 *)(base +
323 base = (u8 *)&adapter->rqd_start[j].stats; 354 vmxnet3_tq_driver_stats[i].offset);
324 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) 355 }
325 *buf++ = *(u64 *)(base + vmxnet3_rq_dev_stats[i].offset);
326 356
327 base = (u8 *)&adapter->rx_queue[j].stats; 357 for (j = 0; j < adapter->num_tx_queues; j++) {
328 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) 358 base = (u8 *)&adapter->rqd_start[j].stats;
329 *buf++ = *(u64 *)(base + vmxnet3_rq_driver_stats[i].offset); 359 *buf++ = (u64) j;
360 for (i = 1; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
361 *buf++ = *(u64 *)(base +
362 vmxnet3_rq_dev_stats[i].offset);
363
364 base = (u8 *)&adapter->rx_queue[j].stats;
365 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
366 *buf++ = *(u64 *)(base +
367 vmxnet3_rq_driver_stats[i].offset);
368 }
330 369
331 base = (u8 *)adapter; 370 base = (u8 *)adapter;
332 for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) 371 for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++)
@@ -339,7 +378,7 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
339{ 378{
340 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 379 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
341 u32 *buf = p; 380 u32 *buf = p;
342 int i = 0; 381 int i = 0, j = 0;
343 382
344 memset(p, 0, vmxnet3_get_regs_len(netdev)); 383 memset(p, 0, vmxnet3_get_regs_len(netdev));
345 384
@@ -348,31 +387,35 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
348 /* Update vmxnet3_get_regs_len if we want to dump more registers */ 387 /* Update vmxnet3_get_regs_len if we want to dump more registers */
349 388
350 /* make each ring use multiple of 16 bytes */ 389 /* make each ring use multiple of 16 bytes */
351/* TODO change this for multiple queues */ 390 for (i = 0; i < adapter->num_tx_queues; i++) {
352 buf[0] = adapter->tx_queue[i].tx_ring.next2fill; 391 buf[j++] = adapter->tx_queue[i].tx_ring.next2fill;
353 buf[1] = adapter->tx_queue[i].tx_ring.next2comp; 392 buf[j++] = adapter->tx_queue[i].tx_ring.next2comp;
354 buf[2] = adapter->tx_queue[i].tx_ring.gen; 393 buf[j++] = adapter->tx_queue[i].tx_ring.gen;
355 buf[3] = 0; 394 buf[j++] = 0;
356 395
357 buf[4] = adapter->tx_queue[i].comp_ring.next2proc; 396 buf[j++] = adapter->tx_queue[i].comp_ring.next2proc;
358 buf[5] = adapter->tx_queue[i].comp_ring.gen; 397 buf[j++] = adapter->tx_queue[i].comp_ring.gen;
359 buf[6] = adapter->tx_queue[i].stopped; 398 buf[j++] = adapter->tx_queue[i].stopped;
360 buf[7] = 0; 399 buf[j++] = 0;
361 400 }
362 buf[8] = adapter->rx_queue[i].rx_ring[0].next2fill; 401
363 buf[9] = adapter->rx_queue[i].rx_ring[0].next2comp; 402 for (i = 0; i < adapter->num_rx_queues; i++) {
364 buf[10] = adapter->rx_queue[i].rx_ring[0].gen; 403 buf[j++] = adapter->rx_queue[i].rx_ring[0].next2fill;
365 buf[11] = 0; 404 buf[j++] = adapter->rx_queue[i].rx_ring[0].next2comp;
366 405 buf[j++] = adapter->rx_queue[i].rx_ring[0].gen;
367 buf[12] = adapter->rx_queue[i].rx_ring[1].next2fill; 406 buf[j++] = 0;
368 buf[13] = adapter->rx_queue[i].rx_ring[1].next2comp; 407
369 buf[14] = adapter->rx_queue[i].rx_ring[1].gen; 408 buf[j++] = adapter->rx_queue[i].rx_ring[1].next2fill;
370 buf[15] = 0; 409 buf[j++] = adapter->rx_queue[i].rx_ring[1].next2comp;
371 410 buf[j++] = adapter->rx_queue[i].rx_ring[1].gen;
372 buf[16] = adapter->rx_queue[i].comp_ring.next2proc; 411 buf[j++] = 0;
373 buf[17] = adapter->rx_queue[i].comp_ring.gen; 412
374 buf[18] = 0; 413 buf[j++] = adapter->rx_queue[i].comp_ring.next2proc;
375 buf[19] = 0; 414 buf[j++] = adapter->rx_queue[i].comp_ring.gen;
415 buf[j++] = 0;
416 buf[j++] = 0;
417 }
418
376} 419}
377 420
378 421
@@ -574,6 +617,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
574 const struct ethtool_rxfh_indir *p) 617 const struct ethtool_rxfh_indir *p)
575{ 618{
576 unsigned int i; 619 unsigned int i;
620 unsigned long flags;
577 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 621 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
578 struct UPT1_RSSConf *rssConf = adapter->rss_conf; 622 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
579 623
@@ -592,8 +636,10 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
592 for (i = 0; i < rssConf->indTableSize; i++) 636 for (i = 0; i < rssConf->indTableSize; i++)
593 rssConf->indTable[i] = p->ring_index[i]; 637 rssConf->indTable[i] = p->ring_index[i];
594 638
639 spin_lock_irqsave(&adapter->cmd_lock, flags);
595 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 640 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
596 VMXNET3_CMD_UPDATE_RSSIDT); 641 VMXNET3_CMD_UPDATE_RSSIDT);
642 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
597 643
598 return 0; 644 return 0;
599 645
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 7fadeed37f0..fb5d245ac87 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -68,10 +68,10 @@
68/* 68/*
69 * Version numbers 69 * Version numbers
70 */ 70 */
71#define VMXNET3_DRIVER_VERSION_STRING "1.0.16.0-k" 71#define VMXNET3_DRIVER_VERSION_STRING "1.0.25.0-k"
72 72
73/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 73/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
74#define VMXNET3_DRIVER_VERSION_NUM 0x01001000 74#define VMXNET3_DRIVER_VERSION_NUM 0x01001900
75 75
76#if defined(CONFIG_PCI_MSI) 76#if defined(CONFIG_PCI_MSI)
77 /* RSS only makes sense if MSI-X is supported. */ 77 /* RSS only makes sense if MSI-X is supported. */
@@ -289,7 +289,7 @@ struct vmxnet3_rx_queue {
289 289
290#define VMXNET3_LINUX_MAX_MSIX_VECT (VMXNET3_DEVICE_MAX_TX_QUEUES + \ 290#define VMXNET3_LINUX_MAX_MSIX_VECT (VMXNET3_DEVICE_MAX_TX_QUEUES + \
291 VMXNET3_DEVICE_MAX_RX_QUEUES + 1) 291 VMXNET3_DEVICE_MAX_RX_QUEUES + 1)
292#define VMXNET3_LINUX_MIN_MSIX_VECT 3 /* 1 for each : tx, rx and event */ 292#define VMXNET3_LINUX_MIN_MSIX_VECT 2 /* 1 for tx-rx pair and 1 for event */
293 293
294 294
295struct vmxnet3_intr { 295struct vmxnet3_intr {
@@ -317,6 +317,7 @@ struct vmxnet3_adapter {
317 struct vmxnet3_rx_queue rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES]; 317 struct vmxnet3_rx_queue rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES];
318 struct vlan_group *vlan_grp; 318 struct vlan_group *vlan_grp;
319 struct vmxnet3_intr intr; 319 struct vmxnet3_intr intr;
320 spinlock_t cmd_lock;
320 struct Vmxnet3_DriverShared *shared; 321 struct Vmxnet3_DriverShared *shared;
321 struct Vmxnet3_PMConf *pm_conf; 322 struct Vmxnet3_PMConf *pm_conf;
322 struct Vmxnet3_TxQueueDesc *tqd_start; /* all tx queue desc */ 323 struct Vmxnet3_TxQueueDesc *tqd_start; /* all tx queue desc */
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 019a74d533a..09ae4ef0fd5 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -2294,6 +2294,8 @@ ath5k_tx_complete_poll_work(struct work_struct *work)
2294 int i; 2294 int i;
2295 bool needreset = false; 2295 bool needreset = false;
2296 2296
2297 mutex_lock(&sc->lock);
2298
2297 for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) { 2299 for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) {
2298 if (sc->txqs[i].setup) { 2300 if (sc->txqs[i].setup) {
2299 txq = &sc->txqs[i]; 2301 txq = &sc->txqs[i];
@@ -2321,6 +2323,8 @@ ath5k_tx_complete_poll_work(struct work_struct *work)
2321 ath5k_reset(sc, NULL, true); 2323 ath5k_reset(sc, NULL, true);
2322 } 2324 }
2323 2325
2326 mutex_unlock(&sc->lock);
2327
2324 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 2328 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
2325 msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT)); 2329 msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
2326} 2330}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index ea2e7d714bd..5e300bd3d26 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -679,10 +679,6 @@ static bool ar9002_hw_calibrate(struct ath_hw *ah,
679 679
680 /* Do NF cal only at longer intervals */ 680 /* Do NF cal only at longer intervals */
681 if (longcal || nfcal_pending) { 681 if (longcal || nfcal_pending) {
682 /* Do periodic PAOffset Cal */
683 ar9002_hw_pa_cal(ah, false);
684 ar9002_hw_olc_temp_compensation(ah);
685
686 /* 682 /*
687 * Get the value from the previous NF cal and update 683 * Get the value from the previous NF cal and update
688 * history buffer. 684 * history buffer.
@@ -697,8 +693,12 @@ static bool ar9002_hw_calibrate(struct ath_hw *ah,
697 ath9k_hw_loadnf(ah, ah->curchan); 693 ath9k_hw_loadnf(ah, ah->curchan);
698 } 694 }
699 695
700 if (longcal) 696 if (longcal) {
701 ath9k_hw_start_nfcal(ah, false); 697 ath9k_hw_start_nfcal(ah, false);
698 /* Do periodic PAOffset Cal */
699 ar9002_hw_pa_cal(ah, false);
700 ar9002_hw_olc_temp_compensation(ah);
701 }
702 } 702 }
703 703
704 return iscaldone; 704 return iscaldone;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index 81f9cf294de..9ecca93392e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -1842,7 +1842,7 @@ static const u32 ar9300_2p2_soc_preamble[][2] = {
1842 1842
1843static const u32 ar9300PciePhy_pll_on_clkreq_disable_L1_2p2[][2] = { 1843static const u32 ar9300PciePhy_pll_on_clkreq_disable_L1_2p2[][2] = {
1844 /* Addr allmodes */ 1844 /* Addr allmodes */
1845 {0x00004040, 0x08212e5e}, 1845 {0x00004040, 0x0821265e},
1846 {0x00004040, 0x0008003b}, 1846 {0x00004040, 0x0008003b},
1847 {0x00004044, 0x00000000}, 1847 {0x00004044, 0x00000000},
1848}; 1848};
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 6137634e46c..06fb2c85053 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -146,8 +146,8 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
146 /* Sleep Setting */ 146 /* Sleep Setting */
147 147
148 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, 148 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
149 ar9300PciePhy_clkreq_enable_L1_2p2, 149 ar9300PciePhy_pll_on_clkreq_disable_L1_2p2,
150 ARRAY_SIZE(ar9300PciePhy_clkreq_enable_L1_2p2), 150 ARRAY_SIZE(ar9300PciePhy_pll_on_clkreq_disable_L1_2p2),
151 2); 151 2);
152 152
153 /* Fast clock modal settings */ 153 /* Fast clock modal settings */
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 1ce506f2311..780ac5eac50 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -78,7 +78,7 @@ struct tx_frame_hdr {
78 u8 node_idx; 78 u8 node_idx;
79 u8 vif_idx; 79 u8 vif_idx;
80 u8 tidno; 80 u8 tidno;
81 u32 flags; /* ATH9K_HTC_TX_* */ 81 __be32 flags; /* ATH9K_HTC_TX_* */
82 u8 key_type; 82 u8 key_type;
83 u8 keyix; 83 u8 keyix;
84 u8 reserved[26]; 84 u8 reserved[26];
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 33f36029fa4..7a5ffca2195 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -113,6 +113,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
113 113
114 if (ieee80211_is_data(fc)) { 114 if (ieee80211_is_data(fc)) {
115 struct tx_frame_hdr tx_hdr; 115 struct tx_frame_hdr tx_hdr;
116 u32 flags = 0;
116 u8 *qc; 117 u8 *qc;
117 118
118 memset(&tx_hdr, 0, sizeof(struct tx_frame_hdr)); 119 memset(&tx_hdr, 0, sizeof(struct tx_frame_hdr));
@@ -136,13 +137,14 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
136 /* Check for RTS protection */ 137 /* Check for RTS protection */
137 if (priv->hw->wiphy->rts_threshold != (u32) -1) 138 if (priv->hw->wiphy->rts_threshold != (u32) -1)
138 if (skb->len > priv->hw->wiphy->rts_threshold) 139 if (skb->len > priv->hw->wiphy->rts_threshold)
139 tx_hdr.flags |= ATH9K_HTC_TX_RTSCTS; 140 flags |= ATH9K_HTC_TX_RTSCTS;
140 141
141 /* CTS-to-self */ 142 /* CTS-to-self */
142 if (!(tx_hdr.flags & ATH9K_HTC_TX_RTSCTS) && 143 if (!(flags & ATH9K_HTC_TX_RTSCTS) &&
143 (priv->op_flags & OP_PROTECT_ENABLE)) 144 (priv->op_flags & OP_PROTECT_ENABLE))
144 tx_hdr.flags |= ATH9K_HTC_TX_CTSONLY; 145 flags |= ATH9K_HTC_TX_CTSONLY;
145 146
147 tx_hdr.flags = cpu_to_be32(flags);
146 tx_hdr.key_type = ath9k_cmn_get_hw_crypto_keytype(skb); 148 tx_hdr.key_type = ath9k_cmn_get_hw_crypto_keytype(skb);
147 if (tx_hdr.key_type == ATH9K_KEY_TYPE_CLEAR) 149 if (tx_hdr.key_type == ATH9K_KEY_TYPE_CLEAR)
148 tx_hdr.keyix = (u8) ATH9K_TXKEYIX_INVALID; 150 tx_hdr.keyix = (u8) ATH9K_TXKEYIX_INVALID;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
index 97906dd442e..14ceb4df72f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
@@ -168,7 +168,7 @@ int iwl_eeprom_check_sku(struct iwl_priv *priv)
168 /* not using .cfg overwrite */ 168 /* not using .cfg overwrite */
169 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG); 169 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
170 priv->cfg->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg); 170 priv->cfg->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
171 priv->cfg->valid_rx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg); 171 priv->cfg->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
172 if (!priv->cfg->valid_tx_ant || !priv->cfg->valid_rx_ant) { 172 if (!priv->cfg->valid_tx_ant || !priv->cfg->valid_rx_ant) {
173 IWL_ERR(priv, "Invalid chain (0X%x, 0X%x)\n", 173 IWL_ERR(priv, "Invalid chain (0X%x, 0X%x)\n",
174 priv->cfg->valid_tx_ant, 174 priv->cfg->valid_tx_ant,
diff --git a/drivers/net/wireless/iwmc3200wifi/netdev.c b/drivers/net/wireless/iwmc3200wifi/netdev.c
index 13a69ebf2a9..5091d77e02c 100644
--- a/drivers/net/wireless/iwmc3200wifi/netdev.c
+++ b/drivers/net/wireless/iwmc3200wifi/netdev.c
@@ -126,6 +126,7 @@ void *iwm_if_alloc(int sizeof_bus, struct device *dev,
126 ndev = alloc_netdev_mq(0, "wlan%d", ether_setup, IWM_TX_QUEUES); 126 ndev = alloc_netdev_mq(0, "wlan%d", ether_setup, IWM_TX_QUEUES);
127 if (!ndev) { 127 if (!ndev) {
128 dev_err(dev, "no memory for network device instance\n"); 128 dev_err(dev, "no memory for network device instance\n");
129 ret = -ENOMEM;
129 goto out_priv; 130 goto out_priv;
130 } 131 }
131 132
@@ -138,6 +139,7 @@ void *iwm_if_alloc(int sizeof_bus, struct device *dev,
138 GFP_KERNEL); 139 GFP_KERNEL);
139 if (!iwm->umac_profile) { 140 if (!iwm->umac_profile) {
140 dev_err(dev, "Couldn't alloc memory for profile\n"); 141 dev_err(dev, "Couldn't alloc memory for profile\n");
142 ret = -ENOMEM;
141 goto out_profile; 143 goto out_profile;
142 } 144 }
143 145
diff --git a/drivers/net/wireless/rt2x00/rt2x00firmware.c b/drivers/net/wireless/rt2x00/rt2x00firmware.c
index f0e1eb72bef..be0ff78c1b1 100644
--- a/drivers/net/wireless/rt2x00/rt2x00firmware.c
+++ b/drivers/net/wireless/rt2x00/rt2x00firmware.c
@@ -58,6 +58,7 @@ static int rt2x00lib_request_firmware(struct rt2x00_dev *rt2x00dev)
58 58
59 if (!fw || !fw->size || !fw->data) { 59 if (!fw || !fw->size || !fw->data) {
60 ERROR(rt2x00dev, "Failed to read Firmware.\n"); 60 ERROR(rt2x00dev, "Failed to read Firmware.\n");
61 release_firmware(fw);
61 return -ENOENT; 62 return -ENOENT;
62 } 63 }
63 64
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 7a7a1b66478..2ac8f6aff5a 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -831,12 +831,14 @@ tx_drop:
831 return NETDEV_TX_OK; 831 return NETDEV_TX_OK;
832} 832}
833 833
834static int qeth_l2_open(struct net_device *dev) 834static int __qeth_l2_open(struct net_device *dev)
835{ 835{
836 struct qeth_card *card = dev->ml_priv; 836 struct qeth_card *card = dev->ml_priv;
837 int rc = 0; 837 int rc = 0;
838 838
839 QETH_CARD_TEXT(card, 4, "qethopen"); 839 QETH_CARD_TEXT(card, 4, "qethopen");
840 if (card->state == CARD_STATE_UP)
841 return rc;
840 if (card->state != CARD_STATE_SOFTSETUP) 842 if (card->state != CARD_STATE_SOFTSETUP)
841 return -ENODEV; 843 return -ENODEV;
842 844
@@ -857,6 +859,18 @@ static int qeth_l2_open(struct net_device *dev)
857 return rc; 859 return rc;
858} 860}
859 861
862static int qeth_l2_open(struct net_device *dev)
863{
864 struct qeth_card *card = dev->ml_priv;
865
866 QETH_CARD_TEXT(card, 5, "qethope_");
867 if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
868 QETH_CARD_TEXT(card, 3, "openREC");
869 return -ERESTARTSYS;
870 }
871 return __qeth_l2_open(dev);
872}
873
860static int qeth_l2_stop(struct net_device *dev) 874static int qeth_l2_stop(struct net_device *dev)
861{ 875{
862 struct qeth_card *card = dev->ml_priv; 876 struct qeth_card *card = dev->ml_priv;
@@ -1046,7 +1060,7 @@ contin:
1046 if (recover_flag == CARD_STATE_RECOVER) { 1060 if (recover_flag == CARD_STATE_RECOVER) {
1047 if (recovery_mode && 1061 if (recovery_mode &&
1048 card->info.type != QETH_CARD_TYPE_OSN) { 1062 card->info.type != QETH_CARD_TYPE_OSN) {
1049 qeth_l2_open(card->dev); 1063 __qeth_l2_open(card->dev);
1050 } else { 1064 } else {
1051 rtnl_lock(); 1065 rtnl_lock();
1052 dev_open(card->dev); 1066 dev_open(card->dev);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index e227e465bfc..d09b0c44fc3 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2998,7 +2998,9 @@ static inline void qeth_l3_hdr_csum(struct qeth_card *card,
2998 */ 2998 */
2999 if (iph->protocol == IPPROTO_UDP) 2999 if (iph->protocol == IPPROTO_UDP)
3000 hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_UDP; 3000 hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_UDP;
3001 hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ; 3001 hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ |
3002 QETH_HDR_EXT_CSUM_HDR_REQ;
3003 iph->check = 0;
3002 if (card->options.performance_stats) 3004 if (card->options.performance_stats)
3003 card->perf_stats.tx_csum++; 3005 card->perf_stats.tx_csum++;
3004} 3006}
@@ -3240,12 +3242,14 @@ tx_drop:
3240 return NETDEV_TX_OK; 3242 return NETDEV_TX_OK;
3241} 3243}
3242 3244
3243static int qeth_l3_open(struct net_device *dev) 3245static int __qeth_l3_open(struct net_device *dev)
3244{ 3246{
3245 struct qeth_card *card = dev->ml_priv; 3247 struct qeth_card *card = dev->ml_priv;
3246 int rc = 0; 3248 int rc = 0;
3247 3249
3248 QETH_CARD_TEXT(card, 4, "qethopen"); 3250 QETH_CARD_TEXT(card, 4, "qethopen");
3251 if (card->state == CARD_STATE_UP)
3252 return rc;
3249 if (card->state != CARD_STATE_SOFTSETUP) 3253 if (card->state != CARD_STATE_SOFTSETUP)
3250 return -ENODEV; 3254 return -ENODEV;
3251 card->data.state = CH_STATE_UP; 3255 card->data.state = CH_STATE_UP;
@@ -3260,6 +3264,18 @@ static int qeth_l3_open(struct net_device *dev)
3260 return rc; 3264 return rc;
3261} 3265}
3262 3266
3267static int qeth_l3_open(struct net_device *dev)
3268{
3269 struct qeth_card *card = dev->ml_priv;
3270
3271 QETH_CARD_TEXT(card, 5, "qethope_");
3272 if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
3273 QETH_CARD_TEXT(card, 3, "openREC");
3274 return -ERESTARTSYS;
3275 }
3276 return __qeth_l3_open(dev);
3277}
3278
3263static int qeth_l3_stop(struct net_device *dev) 3279static int qeth_l3_stop(struct net_device *dev)
3264{ 3280{
3265 struct qeth_card *card = dev->ml_priv; 3281 struct qeth_card *card = dev->ml_priv;
@@ -3564,7 +3580,7 @@ contin:
3564 netif_carrier_off(card->dev); 3580 netif_carrier_off(card->dev);
3565 if (recover_flag == CARD_STATE_RECOVER) { 3581 if (recover_flag == CARD_STATE_RECOVER) {
3566 if (recovery_mode) 3582 if (recovery_mode)
3567 qeth_l3_open(card->dev); 3583 __qeth_l3_open(card->dev);
3568 else { 3584 else {
3569 rtnl_lock(); 3585 rtnl_lock();
3570 dev_open(card->dev); 3586 dev_open(card->dev);