aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2013-04-08 12:26:15 -0400
committerArnd Bergmann <arnd@arndb.de>2013-04-08 12:26:15 -0400
commite9069cf8b74b50d804fd540a9fd1383504f4af93 (patch)
tree0b3a30308ffc00a73f681bfdf19214b5ba9ae5a6 /drivers/net
parent4680ebc2c90f663ba70c6bb3d8596b0f2c4dfa9e (diff)
parentce63d6d4bb9f601de32d4b99f925a65182521873 (diff)
Merge tag 'vt8500/pinctrl' of git://server.prisktech.co.nz/git/linuxwmt into next/drivers
From Tony Prisk <linux@prisktech.co.nz>: arm: vt8500: Add pinctrl driver for arch-vt8500 This series adds support for the pinctrl/gpio module on all arch-vt8500 supported SoCs. As part of the review process, some tidy up is also done to drivers/of/base.c to remove some code that is being constantly duplicated. Also, a patch for the bcm2835 pinctrl driver is included to take advantage of the new of/base.c code. * tag 'vt8500/pinctrl' of git://server.prisktech.co.nz/git/linuxwmt: (606 commits) pinctrl: bcm2835: make use of of_property_read_u32_index() gpio: vt8500: Remove arch-vt8500 gpio driver arm: vt8500: Remove gpio devicetree nodes arm: dts: vt8500: Update Wondermedia SoC dtsi files for pinctrl driver pinctrl: gpio: vt8500: Add pincontrol driver for arch-vt8500 arm: vt8500: Increase available GPIOs on arch-vt8500 of: Remove duplicated code for validating property and value of: Add support for reading a u32 from a multi-value property. Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bonding/bond_main.c11
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h3
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c22
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c12
-rw-r--r--drivers/net/ethernet/dec/tulip/Kconfig1
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c36
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c10
-rw-r--r--drivers/net/ethernet/freescale/fec.c27
-rw-r--r--drivers/net/ethernet/freescale/fec.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c13
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c71
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c82
-rw-r--r--drivers/net/ethernet/intel/e1000e/regs.h1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c11
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c14
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c76
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c55
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c2
-rw-r--r--drivers/net/ethernet/sfc/efx.h4
-rw-r--r--drivers/net/ethernet/sfc/nic.c3
-rw-r--r--drivers/net/ethernet/sfc/rx.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c2
-rw-r--r--drivers/net/hippi/rrunner.c3
-rw-r--r--drivers/net/macvlan.c1
-rw-r--r--drivers/net/netconsole.c15
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/usb/Kconfig2
-rw-r--r--drivers/net/usb/cdc_mbim.c11
-rw-r--r--drivers/net/usb/cdc_ncm.c49
-rw-r--r--drivers/net/usb/qmi_wwan.c49
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c1
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c6
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/net/vxlan.c10
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/sta.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-modparams.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h20
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h18
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c133
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c18
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c37
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c10
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c6
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h34
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c14
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c266
-rw-r--r--drivers/net/wireless/mwifiex/join.c7
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c14
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c89
73 files changed, 723 insertions, 738 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 7bd068a6056a..6bbd90e1123c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1746,6 +1746,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1746 1746
1747 bond_compute_features(bond); 1747 bond_compute_features(bond);
1748 1748
1749 bond_update_speed_duplex(new_slave);
1750
1749 read_lock(&bond->lock); 1751 read_lock(&bond->lock);
1750 1752
1751 new_slave->last_arp_rx = jiffies - 1753 new_slave->last_arp_rx = jiffies -
@@ -1798,8 +1800,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1798 new_slave->link == BOND_LINK_DOWN ? "DOWN" : 1800 new_slave->link == BOND_LINK_DOWN ? "DOWN" :
1799 (new_slave->link == BOND_LINK_UP ? "UP" : "BACK")); 1801 (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
1800 1802
1801 bond_update_speed_duplex(new_slave);
1802
1803 if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) { 1803 if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) {
1804 /* if there is a primary slave, remember it */ 1804 /* if there is a primary slave, remember it */
1805 if (strcmp(bond->params.primary, new_slave->dev->name) == 0) { 1805 if (strcmp(bond->params.primary, new_slave->dev->name) == 0) {
@@ -1964,7 +1964,6 @@ static int __bond_release_one(struct net_device *bond_dev,
1964 } 1964 }
1965 1965
1966 block_netpoll_tx(); 1966 block_netpoll_tx();
1967 call_netdevice_notifiers(NETDEV_RELEASE, bond_dev);
1968 write_lock_bh(&bond->lock); 1967 write_lock_bh(&bond->lock);
1969 1968
1970 slave = bond_get_slave_by_dev(bond, slave_dev); 1969 slave = bond_get_slave_by_dev(bond, slave_dev);
@@ -2066,8 +2065,10 @@ static int __bond_release_one(struct net_device *bond_dev,
2066 write_unlock_bh(&bond->lock); 2065 write_unlock_bh(&bond->lock);
2067 unblock_netpoll_tx(); 2066 unblock_netpoll_tx();
2068 2067
2069 if (bond->slave_cnt == 0) 2068 if (bond->slave_cnt == 0) {
2070 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev); 2069 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
2070 call_netdevice_notifiers(NETDEV_RELEASE, bond->dev);
2071 }
2071 2072
2072 bond_compute_features(bond); 2073 bond_compute_features(bond);
2073 if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) && 2074 if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
@@ -2373,8 +2374,6 @@ static void bond_miimon_commit(struct bonding *bond)
2373 bond_set_backup_slave(slave); 2374 bond_set_backup_slave(slave);
2374 } 2375 }
2375 2376
2376 bond_update_speed_duplex(slave);
2377
2378 pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n", 2377 pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n",
2379 bond->dev->name, slave->dev->name, 2378 bond->dev->name, slave->dev->name,
2380 slave->speed, slave->duplex ? "full" : "half"); 2379 slave->speed, slave->duplex ? "full" : "half");
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index a923bc4d5a1f..4046f97378c2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2760,6 +2760,7 @@ load_error2:
2760 bp->port.pmf = 0; 2760 bp->port.pmf = 0;
2761load_error1: 2761load_error1:
2762 bnx2x_napi_disable(bp); 2762 bnx2x_napi_disable(bp);
2763 bnx2x_del_all_napi(bp);
2763 2764
2764 /* clear pf_load status, as it was already set */ 2765 /* clear pf_load status, as it was already set */
2765 if (IS_PF(bp)) 2766 if (IS_PF(bp))
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 31c5787970db..77ebae0ac64a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -8647,7 +8647,9 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
8647 MDIO_WC_DEVAD, 8647 MDIO_WC_DEVAD,
8648 MDIO_WC_REG_DIGITAL5_MISC6, 8648 MDIO_WC_REG_DIGITAL5_MISC6,
8649 &rx_tx_in_reset); 8649 &rx_tx_in_reset);
8650 if (!rx_tx_in_reset) { 8650 if ((!rx_tx_in_reset) &&
8651 (params->link_flags &
8652 PHY_INITIALIZED)) {
8651 bnx2x_warpcore_reset_lane(bp, phy, 1); 8653 bnx2x_warpcore_reset_lane(bp, phy, 1);
8652 bnx2x_warpcore_config_sfi(phy, params); 8654 bnx2x_warpcore_config_sfi(phy, params);
8653 bnx2x_warpcore_reset_lane(bp, phy, 0); 8655 bnx2x_warpcore_reset_lane(bp, phy, 0);
@@ -12527,6 +12529,8 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
12527 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; 12529 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
12528 vars->mac_type = MAC_TYPE_NONE; 12530 vars->mac_type = MAC_TYPE_NONE;
12529 vars->phy_flags = 0; 12531 vars->phy_flags = 0;
12532 vars->check_kr2_recovery_cnt = 0;
12533 params->link_flags = PHY_INITIALIZED;
12530 /* Driver opens NIG-BRB filters */ 12534 /* Driver opens NIG-BRB filters */
12531 bnx2x_set_rx_filter(params, 1); 12535 bnx2x_set_rx_filter(params, 1);
12532 /* Check if link flap can be avoided */ 12536 /* Check if link flap can be avoided */
@@ -12691,6 +12695,7 @@ int bnx2x_lfa_reset(struct link_params *params,
12691 struct bnx2x *bp = params->bp; 12695 struct bnx2x *bp = params->bp;
12692 vars->link_up = 0; 12696 vars->link_up = 0;
12693 vars->phy_flags = 0; 12697 vars->phy_flags = 0;
12698 params->link_flags &= ~PHY_INITIALIZED;
12694 if (!params->lfa_base) 12699 if (!params->lfa_base)
12695 return bnx2x_link_reset(params, vars, 1); 12700 return bnx2x_link_reset(params, vars, 1);
12696 /* 12701 /*
@@ -13411,6 +13416,7 @@ static void bnx2x_disable_kr2(struct link_params *params,
13411 vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE; 13416 vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE;
13412 bnx2x_update_link_attr(params, vars->link_attr_sync); 13417 bnx2x_update_link_attr(params, vars->link_attr_sync);
13413 13418
13419 vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT;
13414 /* Restart AN on leading lane */ 13420 /* Restart AN on leading lane */
13415 bnx2x_warpcore_restart_AN_KR(phy, params); 13421 bnx2x_warpcore_restart_AN_KR(phy, params);
13416} 13422}
@@ -13439,6 +13445,15 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
13439 return; 13445 return;
13440 } 13446 }
13441 13447
13448 /* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery
13449 * since some switches tend to reinit the AN process and clear the
13450 * advertised BP/NP after ~2 seconds causing the KR2 to be disabled
13451 * and recovered many times
13452 */
13453 if (vars->check_kr2_recovery_cnt > 0) {
13454 vars->check_kr2_recovery_cnt--;
13455 return;
13456 }
13442 lane = bnx2x_get_warpcore_lane(phy, params); 13457 lane = bnx2x_get_warpcore_lane(phy, params);
13443 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, 13458 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
13444 MDIO_AER_BLOCK_AER_REG, lane); 13459 MDIO_AER_BLOCK_AER_REG, lane);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index be5c195d03dd..56c2aae4e2c8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -309,6 +309,7 @@ struct link_params {
309 req_flow_ctrl is set to AUTO */ 309 req_flow_ctrl is set to AUTO */
310 u16 link_flags; 310 u16 link_flags;
311#define LINK_FLAGS_INT_DISABLED (1<<0) 311#define LINK_FLAGS_INT_DISABLED (1<<0)
312#define PHY_INITIALIZED (1<<1)
312 u32 lfa_base; 313 u32 lfa_base;
313}; 314};
314 315
@@ -342,7 +343,8 @@ struct link_vars {
342 u32 link_status; 343 u32 link_status;
343 u32 eee_status; 344 u32 eee_status;
344 u8 fault_detected; 345 u8 fault_detected;
345 u8 rsrv1; 346 u8 check_kr2_recovery_cnt;
347#define CHECK_KR2_RECOVERY_CNT 5
346 u16 periodic_flags; 348 u16 periodic_flags;
347#define PERIODIC_FLAGS_LINK_EVENT 0x0001 349#define PERIODIC_FLAGS_LINK_EVENT 0x0001
348 350
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 364e37ecbc5c..198f6f1c9ad5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -459,8 +459,9 @@ struct bnx2x_fw_port_stats_old {
459 459
460#define UPDATE_QSTAT(s, t) \ 460#define UPDATE_QSTAT(s, t) \
461 do { \ 461 do { \
462 qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi); \
463 qstats->t##_lo = qstats_old->t##_lo + le32_to_cpu(s.lo); \ 462 qstats->t##_lo = qstats_old->t##_lo + le32_to_cpu(s.lo); \
463 qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi) \
464 + ((qstats->t##_lo < qstats_old->t##_lo) ? 1 : 0); \
464 } while (0) 465 } while (0)
465 466
466#define UPDATE_QSTAT_OLD(f) \ 467#define UPDATE_QSTAT_OLD(f) \
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index fdb9b5655414..67d2663b3974 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -1869,6 +1869,8 @@ static void tg3_link_report(struct tg3 *tp)
1869 1869
1870 tg3_ump_link_report(tp); 1870 tg3_ump_link_report(tp);
1871 } 1871 }
1872
1873 tp->link_up = netif_carrier_ok(tp->dev);
1872} 1874}
1873 1875
1874static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl) 1876static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
@@ -2522,12 +2524,6 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2522 return err; 2524 return err;
2523} 2525}
2524 2526
2525static void tg3_carrier_on(struct tg3 *tp)
2526{
2527 netif_carrier_on(tp->dev);
2528 tp->link_up = true;
2529}
2530
2531static void tg3_carrier_off(struct tg3 *tp) 2527static void tg3_carrier_off(struct tg3 *tp)
2532{ 2528{
2533 netif_carrier_off(tp->dev); 2529 netif_carrier_off(tp->dev);
@@ -2553,7 +2549,7 @@ static int tg3_phy_reset(struct tg3 *tp)
2553 return -EBUSY; 2549 return -EBUSY;
2554 2550
2555 if (netif_running(tp->dev) && tp->link_up) { 2551 if (netif_running(tp->dev) && tp->link_up) {
2556 tg3_carrier_off(tp); 2552 netif_carrier_off(tp->dev);
2557 tg3_link_report(tp); 2553 tg3_link_report(tp);
2558 } 2554 }
2559 2555
@@ -4134,6 +4130,14 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
4134 tp->link_config.active_speed = tp->link_config.speed; 4130 tp->link_config.active_speed = tp->link_config.speed;
4135 tp->link_config.active_duplex = tp->link_config.duplex; 4131 tp->link_config.active_duplex = tp->link_config.duplex;
4136 4132
4133 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4134 /* With autoneg disabled, 5715 only links up when the
4135 * advertisement register has the configured speed
4136 * enabled.
4137 */
4138 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4139 }
4140
4137 bmcr = 0; 4141 bmcr = 0;
4138 switch (tp->link_config.speed) { 4142 switch (tp->link_config.speed) {
4139 default: 4143 default:
@@ -4262,9 +4266,9 @@ static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4262{ 4266{
4263 if (curr_link_up != tp->link_up) { 4267 if (curr_link_up != tp->link_up) {
4264 if (curr_link_up) { 4268 if (curr_link_up) {
4265 tg3_carrier_on(tp); 4269 netif_carrier_on(tp->dev);
4266 } else { 4270 } else {
4267 tg3_carrier_off(tp); 4271 netif_carrier_off(tp->dev);
4268 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) 4272 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4269 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT; 4273 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4270 } 4274 }
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 4ce62031f62f..8049268ce0f2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -497,8 +497,9 @@ int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len,
497} 497}
498 498
499#define EEPROM_STAT_ADDR 0x7bfc 499#define EEPROM_STAT_ADDR 0x7bfc
500#define VPD_BASE 0
501#define VPD_LEN 512 500#define VPD_LEN 512
501#define VPD_BASE 0x400
502#define VPD_BASE_OLD 0
502 503
503/** 504/**
504 * t4_seeprom_wp - enable/disable EEPROM write protection 505 * t4_seeprom_wp - enable/disable EEPROM write protection
@@ -524,7 +525,7 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable)
524int get_vpd_params(struct adapter *adapter, struct vpd_params *p) 525int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
525{ 526{
526 u32 cclk_param, cclk_val; 527 u32 cclk_param, cclk_val;
527 int i, ret; 528 int i, ret, addr;
528 int ec, sn; 529 int ec, sn;
529 u8 *vpd, csum; 530 u8 *vpd, csum;
530 unsigned int vpdr_len, kw_offset, id_len; 531 unsigned int vpdr_len, kw_offset, id_len;
@@ -533,7 +534,12 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
533 if (!vpd) 534 if (!vpd)
534 return -ENOMEM; 535 return -ENOMEM;
535 536
536 ret = pci_read_vpd(adapter->pdev, VPD_BASE, VPD_LEN, vpd); 537 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
538 if (ret < 0)
539 goto out;
540 addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;
541
542 ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
537 if (ret < 0) 543 if (ret < 0)
538 goto out; 544 goto out;
539 545
diff --git a/drivers/net/ethernet/dec/tulip/Kconfig b/drivers/net/ethernet/dec/tulip/Kconfig
index 0c37fb2cc867..1df33c799c00 100644
--- a/drivers/net/ethernet/dec/tulip/Kconfig
+++ b/drivers/net/ethernet/dec/tulip/Kconfig
@@ -108,6 +108,7 @@ config TULIP_DM910X
108config DE4X5 108config DE4X5
109 tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA" 109 tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA"
110 depends on (PCI || EISA) 110 depends on (PCI || EISA)
111 depends on VIRT_TO_BUS || ALPHA || PPC || SPARC
111 select CRC32 112 select CRC32
112 ---help--- 113 ---help---
113 This is support for the DIGITAL series of PCI/EISA Ethernet cards. 114 This is support for the DIGITAL series of PCI/EISA Ethernet cards.
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 28ceb8414185..29aff55f2eea 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -349,6 +349,7 @@ struct be_adapter {
349 struct pci_dev *pdev; 349 struct pci_dev *pdev;
350 struct net_device *netdev; 350 struct net_device *netdev;
351 351
352 u8 __iomem *csr; /* CSR BAR used only for BE2/3 */
352 u8 __iomem *db; /* Door Bell */ 353 u8 __iomem *db; /* Door Bell */
353 354
354 struct mutex mbox_lock; /* For serializing mbox cmds to BE card */ 355 struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 071aea79d218..3c9b4f12e3e5 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -473,19 +473,17 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
473 return 0; 473 return 0;
474} 474}
475 475
476static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage) 476static u16 be_POST_stage_get(struct be_adapter *adapter)
477{ 477{
478 u32 sem; 478 u32 sem;
479 u32 reg = skyhawk_chip(adapter) ? SLIPORT_SEMAPHORE_OFFSET_SH :
480 SLIPORT_SEMAPHORE_OFFSET_BE;
481 479
482 pci_read_config_dword(adapter->pdev, reg, &sem); 480 if (BEx_chip(adapter))
483 *stage = sem & POST_STAGE_MASK; 481 sem = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
484
485 if ((sem >> POST_ERR_SHIFT) & POST_ERR_MASK)
486 return -1;
487 else 482 else
488 return 0; 483 pci_read_config_dword(adapter->pdev,
484 SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
485
486 return sem & POST_STAGE_MASK;
489} 487}
490 488
491int lancer_wait_ready(struct be_adapter *adapter) 489int lancer_wait_ready(struct be_adapter *adapter)
@@ -579,19 +577,17 @@ int be_fw_wait_ready(struct be_adapter *adapter)
579 } 577 }
580 578
581 do { 579 do {
582 status = be_POST_stage_get(adapter, &stage); 580 stage = be_POST_stage_get(adapter);
583 if (status) { 581 if (stage == POST_STAGE_ARMFW_RDY)
584 dev_err(dev, "POST error; stage=0x%x\n", stage);
585 return -1;
586 } else if (stage != POST_STAGE_ARMFW_RDY) {
587 if (msleep_interruptible(2000)) {
588 dev_err(dev, "Waiting for POST aborted\n");
589 return -EINTR;
590 }
591 timeout += 2;
592 } else {
593 return 0; 582 return 0;
583
584 dev_info(dev, "Waiting for POST, %ds elapsed\n",
585 timeout);
586 if (msleep_interruptible(2000)) {
587 dev_err(dev, "Waiting for POST aborted\n");
588 return -EINTR;
594 } 589 }
590 timeout += 2;
595 } while (timeout < 60); 591 } while (timeout < 60);
596 592
597 dev_err(dev, "POST timeout; stage=0x%x\n", stage); 593 dev_err(dev, "POST timeout; stage=0x%x\n", stage);
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 541d4530d5bf..62dc220695f7 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -32,8 +32,8 @@
32#define MPU_EP_CONTROL 0 32#define MPU_EP_CONTROL 0
33 33
34/********** MPU semphore: used for SH & BE *************/ 34/********** MPU semphore: used for SH & BE *************/
35#define SLIPORT_SEMAPHORE_OFFSET_BE 0x7c 35#define SLIPORT_SEMAPHORE_OFFSET_BEx 0xac /* CSR BAR offset */
36#define SLIPORT_SEMAPHORE_OFFSET_SH 0x94 36#define SLIPORT_SEMAPHORE_OFFSET_SH 0x94 /* PCI-CFG offset */
37#define POST_STAGE_MASK 0x0000FFFF 37#define POST_STAGE_MASK 0x0000FFFF
38#define POST_ERR_MASK 0x1 38#define POST_ERR_MASK 0x1
39#define POST_ERR_SHIFT 31 39#define POST_ERR_SHIFT 31
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 3860888ac711..08e54f3d288b 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -3688,6 +3688,8 @@ static void be_netdev_init(struct net_device *netdev)
3688 3688
3689static void be_unmap_pci_bars(struct be_adapter *adapter) 3689static void be_unmap_pci_bars(struct be_adapter *adapter)
3690{ 3690{
3691 if (adapter->csr)
3692 pci_iounmap(adapter->pdev, adapter->csr);
3691 if (adapter->db) 3693 if (adapter->db)
3692 pci_iounmap(adapter->pdev, adapter->db); 3694 pci_iounmap(adapter->pdev, adapter->db);
3693} 3695}
@@ -3721,6 +3723,12 @@ static int be_map_pci_bars(struct be_adapter *adapter)
3721 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >> 3723 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3722 SLI_INTF_IF_TYPE_SHIFT; 3724 SLI_INTF_IF_TYPE_SHIFT;
3723 3725
3726 if (BEx_chip(adapter) && be_physfn(adapter)) {
3727 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3728 if (adapter->csr == NULL)
3729 return -ENOMEM;
3730 }
3731
3724 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0); 3732 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
3725 if (addr == NULL) 3733 if (addr == NULL)
3726 goto pci_map_err; 3734 goto pci_map_err;
@@ -4329,6 +4337,8 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4329 pci_restore_state(pdev); 4337 pci_restore_state(pdev);
4330 4338
4331 /* Check if card is ok and fw is ready */ 4339 /* Check if card is ok and fw is ready */
4340 dev_info(&adapter->pdev->dev,
4341 "Waiting for FW to be ready after EEH reset\n");
4332 status = be_fw_wait_ready(adapter); 4342 status = be_fw_wait_ready(adapter);
4333 if (status) 4343 if (status)
4334 return PCI_ERS_RESULT_DISCONNECT; 4344 return PCI_ERS_RESULT_DISCONNECT;
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index 069a155d16ed..e3f39372ce25 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -934,24 +934,28 @@ static void fec_enet_adjust_link(struct net_device *ndev)
934 goto spin_unlock; 934 goto spin_unlock;
935 } 935 }
936 936
937 /* Duplex link change */
938 if (phy_dev->link) { 937 if (phy_dev->link) {
939 if (fep->full_duplex != phy_dev->duplex) { 938 if (!fep->link) {
940 fec_restart(ndev, phy_dev->duplex);
941 /* prevent unnecessary second fec_restart() below */
942 fep->link = phy_dev->link; 939 fep->link = phy_dev->link;
943 status_change = 1; 940 status_change = 1;
944 } 941 }
945 }
946 942
947 /* Link on or off change */ 943 if (fep->full_duplex != phy_dev->duplex)
948 if (phy_dev->link != fep->link) { 944 status_change = 1;
949 fep->link = phy_dev->link; 945
950 if (phy_dev->link) 946 if (phy_dev->speed != fep->speed) {
947 fep->speed = phy_dev->speed;
948 status_change = 1;
949 }
950
951 /* if any of the above changed restart the FEC */
952 if (status_change)
951 fec_restart(ndev, phy_dev->duplex); 953 fec_restart(ndev, phy_dev->duplex);
952 else 954 } else {
955 if (fep->link) {
953 fec_stop(ndev); 956 fec_stop(ndev);
954 status_change = 1; 957 status_change = 1;
958 }
955 } 959 }
956 960
957spin_unlock: 961spin_unlock:
@@ -1437,6 +1441,7 @@ fec_enet_close(struct net_device *ndev)
1437 struct fec_enet_private *fep = netdev_priv(ndev); 1441 struct fec_enet_private *fep = netdev_priv(ndev);
1438 1442
1439 /* Don't know what to do yet. */ 1443 /* Don't know what to do yet. */
1444 napi_disable(&fep->napi);
1440 fep->opened = 0; 1445 fep->opened = 0;
1441 netif_stop_queue(ndev); 1446 netif_stop_queue(ndev);
1442 fec_stop(ndev); 1447 fec_stop(ndev);
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index f5390071efd0..eb4372962839 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -240,6 +240,7 @@ struct fec_enet_private {
240 phy_interface_t phy_interface; 240 phy_interface_t phy_interface;
241 int link; 241 int link;
242 int full_duplex; 242 int full_duplex;
243 int speed;
243 struct completion mdio_done; 244 struct completion mdio_done;
244 int irq[FEC_IRQ_NUM]; 245 int irq[FEC_IRQ_NUM];
245 int bufdesc_ex; 246 int bufdesc_ex;
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 2c1813737f6d..f91a8f3f9d48 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -36,6 +36,7 @@
36#include <linux/delay.h> 36#include <linux/delay.h>
37#include <linux/vmalloc.h> 37#include <linux/vmalloc.h>
38#include <linux/mdio.h> 38#include <linux/mdio.h>
39#include <linux/pm_runtime.h>
39 40
40#include "e1000.h" 41#include "e1000.h"
41 42
@@ -2229,7 +2230,19 @@ static int e1000e_get_ts_info(struct net_device *netdev,
2229 return 0; 2230 return 0;
2230} 2231}
2231 2232
2233static int e1000e_ethtool_begin(struct net_device *netdev)
2234{
2235 return pm_runtime_get_sync(netdev->dev.parent);
2236}
2237
2238static void e1000e_ethtool_complete(struct net_device *netdev)
2239{
2240 pm_runtime_put_sync(netdev->dev.parent);
2241}
2242
2232static const struct ethtool_ops e1000_ethtool_ops = { 2243static const struct ethtool_ops e1000_ethtool_ops = {
2244 .begin = e1000e_ethtool_begin,
2245 .complete = e1000e_ethtool_complete,
2233 .get_settings = e1000_get_settings, 2246 .get_settings = e1000_get_settings,
2234 .set_settings = e1000_set_settings, 2247 .set_settings = e1000_set_settings,
2235 .get_drvinfo = e1000_get_drvinfo, 2248 .get_drvinfo = e1000_get_drvinfo,
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index dff7bff8b8e0..121a865c7fbd 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -782,6 +782,59 @@ release:
782} 782}
783 783
784/** 784/**
785 * e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
786 * @hw: pointer to the HW structure
787 * @link: link up bool flag
788 *
789 * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
790 * preventing further DMA write requests. Workaround the issue by disabling
791 * the de-assertion of the clock request when in 1Gpbs mode.
792 **/
793static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
794{
795 u32 fextnvm6 = er32(FEXTNVM6);
796 s32 ret_val = 0;
797
798 if (link && (er32(STATUS) & E1000_STATUS_SPEED_1000)) {
799 u16 kmrn_reg;
800
801 ret_val = hw->phy.ops.acquire(hw);
802 if (ret_val)
803 return ret_val;
804
805 ret_val =
806 e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
807 &kmrn_reg);
808 if (ret_val)
809 goto release;
810
811 ret_val =
812 e1000e_write_kmrn_reg_locked(hw,
813 E1000_KMRNCTRLSTA_K1_CONFIG,
814 kmrn_reg &
815 ~E1000_KMRNCTRLSTA_K1_ENABLE);
816 if (ret_val)
817 goto release;
818
819 usleep_range(10, 20);
820
821 ew32(FEXTNVM6, fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
822
823 ret_val =
824 e1000e_write_kmrn_reg_locked(hw,
825 E1000_KMRNCTRLSTA_K1_CONFIG,
826 kmrn_reg);
827release:
828 hw->phy.ops.release(hw);
829 } else {
830 /* clear FEXTNVM6 bit 8 on link down or 10/100 */
831 ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
832 }
833
834 return ret_val;
835}
836
837/**
785 * e1000_check_for_copper_link_ich8lan - Check for link (Copper) 838 * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
786 * @hw: pointer to the HW structure 839 * @hw: pointer to the HW structure
787 * 840 *
@@ -818,6 +871,14 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
818 return ret_val; 871 return ret_val;
819 } 872 }
820 873
874 /* Work-around I218 hang issue */
875 if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
876 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
877 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
878 if (ret_val)
879 return ret_val;
880 }
881
821 /* Clear link partner's EEE ability */ 882 /* Clear link partner's EEE ability */
822 hw->dev_spec.ich8lan.eee_lp_ability = 0; 883 hw->dev_spec.ich8lan.eee_lp_ability = 0;
823 884
@@ -3954,8 +4015,16 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
3954 4015
3955 phy_ctrl = er32(PHY_CTRL); 4016 phy_ctrl = er32(PHY_CTRL);
3956 phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE; 4017 phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
4018
3957 if (hw->phy.type == e1000_phy_i217) { 4019 if (hw->phy.type == e1000_phy_i217) {
3958 u16 phy_reg; 4020 u16 phy_reg, device_id = hw->adapter->pdev->device;
4021
4022 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
4023 (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
4024 u32 fextnvm6 = er32(FEXTNVM6);
4025
4026 ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
4027 }
3959 4028
3960 ret_val = hw->phy.ops.acquire(hw); 4029 ret_val = hw->phy.ops.acquire(hw);
3961 if (ret_val) 4030 if (ret_val)
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index b6d3174d7d2d..8bf4655c2e17 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -92,6 +92,8 @@
92#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7 92#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7
93#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3 93#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
94 94
95#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100
96
95#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL 97#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
96 98
97#define E1000_ICH_RAR_ENTRIES 7 99#define E1000_ICH_RAR_ENTRIES 7
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index a177b8b65c44..948b86ffa4f0 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -4303,6 +4303,7 @@ static int e1000_open(struct net_device *netdev)
4303 netif_start_queue(netdev); 4303 netif_start_queue(netdev);
4304 4304
4305 adapter->idle_check = true; 4305 adapter->idle_check = true;
4306 hw->mac.get_link_status = true;
4306 pm_runtime_put(&pdev->dev); 4307 pm_runtime_put(&pdev->dev);
4307 4308
4308 /* fire a link status change interrupt to start the watchdog */ 4309 /* fire a link status change interrupt to start the watchdog */
@@ -4662,6 +4663,7 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
4662 (adapter->hw.phy.media_type == e1000_media_type_copper)) { 4663 (adapter->hw.phy.media_type == e1000_media_type_copper)) {
4663 int ret_val; 4664 int ret_val;
4664 4665
4666 pm_runtime_get_sync(&adapter->pdev->dev);
4665 ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr); 4667 ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr);
4666 ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr); 4668 ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr);
4667 ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise); 4669 ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise);
@@ -4672,6 +4674,7 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
4672 ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus); 4674 ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus);
4673 if (ret_val) 4675 if (ret_val)
4674 e_warn("Error reading PHY register\n"); 4676 e_warn("Error reading PHY register\n");
4677 pm_runtime_put_sync(&adapter->pdev->dev);
4675 } else { 4678 } else {
4676 /* Do not read PHY registers if link is not up 4679 /* Do not read PHY registers if link is not up
4677 * Set values to typical power-on defaults 4680 * Set values to typical power-on defaults
@@ -5887,8 +5890,7 @@ release:
5887 return retval; 5890 return retval;
5888} 5891}
5889 5892
5890static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake, 5893static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
5891 bool runtime)
5892{ 5894{
5893 struct net_device *netdev = pci_get_drvdata(pdev); 5895 struct net_device *netdev = pci_get_drvdata(pdev);
5894 struct e1000_adapter *adapter = netdev_priv(netdev); 5896 struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -5912,10 +5914,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
5912 } 5914 }
5913 e1000e_reset_interrupt_capability(adapter); 5915 e1000e_reset_interrupt_capability(adapter);
5914 5916
5915 retval = pci_save_state(pdev);
5916 if (retval)
5917 return retval;
5918
5919 status = er32(STATUS); 5917 status = er32(STATUS);
5920 if (status & E1000_STATUS_LU) 5918 if (status & E1000_STATUS_LU)
5921 wufc &= ~E1000_WUFC_LNKC; 5919 wufc &= ~E1000_WUFC_LNKC;
@@ -5971,13 +5969,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
5971 ew32(WUFC, 0); 5969 ew32(WUFC, 0);
5972 } 5970 }
5973 5971
5974 *enable_wake = !!wufc;
5975
5976 /* make sure adapter isn't asleep if manageability is enabled */
5977 if ((adapter->flags & FLAG_MNG_PT_ENABLED) ||
5978 (hw->mac.ops.check_mng_mode(hw)))
5979 *enable_wake = true;
5980
5981 if (adapter->hw.phy.type == e1000_phy_igp_3) 5972 if (adapter->hw.phy.type == e1000_phy_igp_3)
5982 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); 5973 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
5983 5974
@@ -5986,27 +5977,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
5986 */ 5977 */
5987 e1000e_release_hw_control(adapter); 5978 e1000e_release_hw_control(adapter);
5988 5979
5989 pci_disable_device(pdev); 5980 pci_clear_master(pdev);
5990
5991 return 0;
5992}
5993
5994static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake)
5995{
5996 if (sleep && wake) {
5997 pci_prepare_to_sleep(pdev);
5998 return;
5999 }
6000
6001 pci_wake_from_d3(pdev, wake);
6002 pci_set_power_state(pdev, PCI_D3hot);
6003}
6004
6005static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
6006 bool wake)
6007{
6008 struct net_device *netdev = pci_get_drvdata(pdev);
6009 struct e1000_adapter *adapter = netdev_priv(netdev);
6010 5981
6011 /* The pci-e switch on some quad port adapters will report a 5982 /* The pci-e switch on some quad port adapters will report a
6012 * correctable error when the MAC transitions from D0 to D3. To 5983 * correctable error when the MAC transitions from D0 to D3. To
@@ -6021,12 +5992,13 @@ static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
6021 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, 5992 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL,
6022 (devctl & ~PCI_EXP_DEVCTL_CERE)); 5993 (devctl & ~PCI_EXP_DEVCTL_CERE));
6023 5994
6024 e1000_power_off(pdev, sleep, wake); 5995 pci_save_state(pdev);
5996 pci_prepare_to_sleep(pdev);
6025 5997
6026 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl); 5998 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl);
6027 } else {
6028 e1000_power_off(pdev, sleep, wake);
6029 } 5999 }
6000
6001 return 0;
6030} 6002}
6031 6003
6032#ifdef CONFIG_PCIEASPM 6004#ifdef CONFIG_PCIEASPM
@@ -6084,9 +6056,7 @@ static int __e1000_resume(struct pci_dev *pdev)
6084 if (aspm_disable_flag) 6056 if (aspm_disable_flag)
6085 e1000e_disable_aspm(pdev, aspm_disable_flag); 6057 e1000e_disable_aspm(pdev, aspm_disable_flag);
6086 6058
6087 pci_set_power_state(pdev, PCI_D0); 6059 pci_set_master(pdev);
6088 pci_restore_state(pdev);
6089 pci_save_state(pdev);
6090 6060
6091 e1000e_set_interrupt_capability(adapter); 6061 e1000e_set_interrupt_capability(adapter);
6092 if (netif_running(netdev)) { 6062 if (netif_running(netdev)) {
@@ -6152,14 +6122,8 @@ static int __e1000_resume(struct pci_dev *pdev)
6152static int e1000_suspend(struct device *dev) 6122static int e1000_suspend(struct device *dev)
6153{ 6123{
6154 struct pci_dev *pdev = to_pci_dev(dev); 6124 struct pci_dev *pdev = to_pci_dev(dev);
6155 int retval;
6156 bool wake;
6157
6158 retval = __e1000_shutdown(pdev, &wake, false);
6159 if (!retval)
6160 e1000_complete_shutdown(pdev, true, wake);
6161 6125
6162 return retval; 6126 return __e1000_shutdown(pdev, false);
6163} 6127}
6164 6128
6165static int e1000_resume(struct device *dev) 6129static int e1000_resume(struct device *dev)
@@ -6182,13 +6146,10 @@ static int e1000_runtime_suspend(struct device *dev)
6182 struct net_device *netdev = pci_get_drvdata(pdev); 6146 struct net_device *netdev = pci_get_drvdata(pdev);
6183 struct e1000_adapter *adapter = netdev_priv(netdev); 6147 struct e1000_adapter *adapter = netdev_priv(netdev);
6184 6148
6185 if (e1000e_pm_ready(adapter)) { 6149 if (!e1000e_pm_ready(adapter))
6186 bool wake; 6150 return 0;
6187
6188 __e1000_shutdown(pdev, &wake, true);
6189 }
6190 6151
6191 return 0; 6152 return __e1000_shutdown(pdev, true);
6192} 6153}
6193 6154
6194static int e1000_idle(struct device *dev) 6155static int e1000_idle(struct device *dev)
@@ -6226,12 +6187,7 @@ static int e1000_runtime_resume(struct device *dev)
6226 6187
6227static void e1000_shutdown(struct pci_dev *pdev) 6188static void e1000_shutdown(struct pci_dev *pdev)
6228{ 6189{
6229 bool wake = false; 6190 __e1000_shutdown(pdev, false);
6230
6231 __e1000_shutdown(pdev, &wake, false);
6232
6233 if (system_state == SYSTEM_POWER_OFF)
6234 e1000_complete_shutdown(pdev, false, wake);
6235} 6191}
6236 6192
6237#ifdef CONFIG_NET_POLL_CONTROLLER 6193#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -6352,9 +6308,9 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
6352 "Cannot re-enable PCI device after reset.\n"); 6308 "Cannot re-enable PCI device after reset.\n");
6353 result = PCI_ERS_RESULT_DISCONNECT; 6309 result = PCI_ERS_RESULT_DISCONNECT;
6354 } else { 6310 } else {
6355 pci_set_master(pdev);
6356 pdev->state_saved = true; 6311 pdev->state_saved = true;
6357 pci_restore_state(pdev); 6312 pci_restore_state(pdev);
6313 pci_set_master(pdev);
6358 6314
6359 pci_enable_wake(pdev, PCI_D3hot, 0); 6315 pci_enable_wake(pdev, PCI_D3hot, 0);
6360 pci_enable_wake(pdev, PCI_D3cold, 0); 6316 pci_enable_wake(pdev, PCI_D3cold, 0);
@@ -6783,7 +6739,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6783 6739
6784 /* initialize the wol settings based on the eeprom settings */ 6740 /* initialize the wol settings based on the eeprom settings */
6785 adapter->wol = adapter->eeprom_wol; 6741 adapter->wol = adapter->eeprom_wol;
6786 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 6742
6743 /* make sure adapter isn't asleep if manageability is enabled */
6744 if (adapter->wol || (adapter->flags & FLAG_MNG_PT_ENABLED) ||
6745 (hw->mac.ops.check_mng_mode(hw)))
6746 device_wakeup_enable(&pdev->dev);
6787 6747
6788 /* save off EEPROM version number */ 6748 /* save off EEPROM version number */
6789 e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers); 6749 e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
index 794fe1497666..a7e6a3e37257 100644
--- a/drivers/net/ethernet/intel/e1000e/regs.h
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -42,6 +42,7 @@
42#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */ 42#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */
43#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */ 43#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */
44#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */ 44#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
45#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */
45#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */ 46#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
46#define E1000_FCT 0x00030 /* Flow Control Type - RW */ 47#define E1000_FCT 0x00030 /* Flow Control Type - RW */
47#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ 48#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 84e7e0909def..b64542acfa34 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1361,11 +1361,16 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
1361 switch (hw->phy.type) { 1361 switch (hw->phy.type) {
1362 case e1000_phy_i210: 1362 case e1000_phy_i210:
1363 case e1000_phy_m88: 1363 case e1000_phy_m88:
1364 if (hw->phy.id == I347AT4_E_PHY_ID || 1364 switch (hw->phy.id) {
1365 hw->phy.id == M88E1112_E_PHY_ID) 1365 case I347AT4_E_PHY_ID:
1366 case M88E1112_E_PHY_ID:
1367 case I210_I_PHY_ID:
1366 ret_val = igb_copper_link_setup_m88_gen2(hw); 1368 ret_val = igb_copper_link_setup_m88_gen2(hw);
1367 else 1369 break;
1370 default:
1368 ret_val = igb_copper_link_setup_m88(hw); 1371 ret_val = igb_copper_link_setup_m88(hw);
1372 break;
1373 }
1369 break; 1374 break;
1370 case e1000_phy_igp_3: 1375 case e1000_phy_igp_3:
1371 ret_val = igb_copper_link_setup_igp(hw); 1376 ret_val = igb_copper_link_setup_igp(hw);
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index d27edbc63923..25151401c2ab 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -447,7 +447,7 @@ struct igb_adapter {
447#endif 447#endif
448 struct i2c_algo_bit_data i2c_algo; 448 struct i2c_algo_bit_data i2c_algo;
449 struct i2c_adapter i2c_adap; 449 struct i2c_adapter i2c_adap;
450 struct igb_i2c_client_list *i2c_clients; 450 struct i2c_client *i2c_client;
451}; 451};
452 452
453#define IGB_FLAG_HAS_MSI (1 << 0) 453#define IGB_FLAG_HAS_MSI (1 << 0)
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
index 0a9b073d0b03..4623502054d5 100644
--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -39,6 +39,10 @@
39#include <linux/pci.h> 39#include <linux/pci.h>
40 40
41#ifdef CONFIG_IGB_HWMON 41#ifdef CONFIG_IGB_HWMON
42struct i2c_board_info i350_sensor_info = {
43 I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)),
44};
45
42/* hwmon callback functions */ 46/* hwmon callback functions */
43static ssize_t igb_hwmon_show_location(struct device *dev, 47static ssize_t igb_hwmon_show_location(struct device *dev,
44 struct device_attribute *attr, 48 struct device_attribute *attr,
@@ -188,6 +192,7 @@ int igb_sysfs_init(struct igb_adapter *adapter)
188 unsigned int i; 192 unsigned int i;
189 int n_attrs; 193 int n_attrs;
190 int rc = 0; 194 int rc = 0;
195 struct i2c_client *client = NULL;
191 196
192 /* If this method isn't defined we don't support thermals */ 197 /* If this method isn't defined we don't support thermals */
193 if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL) 198 if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL)
@@ -198,6 +203,15 @@ int igb_sysfs_init(struct igb_adapter *adapter)
198 if (rc) 203 if (rc)
199 goto exit; 204 goto exit;
200 205
206 /* init i2c_client */
207 client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info);
208 if (client == NULL) {
209 dev_info(&adapter->pdev->dev,
210 "Failed to create new i2c device..\n");
211 goto exit;
212 }
213 adapter->i2c_client = client;
214
201 /* Allocation space for max attributes 215 /* Allocation space for max attributes
202 * max num sensors * values (loc, temp, max, caution) 216 * max num sensors * values (loc, temp, max, caution)
203 */ 217 */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index ed79a1c53b59..4dbd62968c7a 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1923,10 +1923,6 @@ void igb_set_fw_version(struct igb_adapter *adapter)
1923 return; 1923 return;
1924} 1924}
1925 1925
1926static const struct i2c_board_info i350_sensor_info = {
1927 I2C_BOARD_INFO("i350bb", 0Xf8),
1928};
1929
1930/* igb_init_i2c - Init I2C interface 1926/* igb_init_i2c - Init I2C interface
1931 * @adapter: pointer to adapter structure 1927 * @adapter: pointer to adapter structure
1932 * 1928 *
@@ -6227,13 +6223,6 @@ static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring,
6227 /* If we spanned a buffer we have a huge mess so test for it */ 6223 /* If we spanned a buffer we have a huge mess so test for it */
6228 BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP))); 6224 BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)));
6229 6225
6230 /* Guarantee this function can be used by verifying buffer sizes */
6231 BUILD_BUG_ON(SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) < (NET_SKB_PAD +
6232 NET_IP_ALIGN +
6233 IGB_TS_HDR_LEN +
6234 ETH_FRAME_LEN +
6235 ETH_FCS_LEN));
6236
6237 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; 6226 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
6238 page = rx_buffer->page; 6227 page = rx_buffer->page;
6239 prefetchw(page); 6228 prefetchw(page);
@@ -7724,67 +7713,6 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
7724 } 7713 }
7725} 7714}
7726 7715
7727static DEFINE_SPINLOCK(i2c_clients_lock);
7728
7729/* igb_get_i2c_client - returns matching client
7730 * in adapters's client list.
7731 * @adapter: adapter struct
7732 * @dev_addr: device address of i2c needed.
7733 */
7734static struct i2c_client *
7735igb_get_i2c_client(struct igb_adapter *adapter, u8 dev_addr)
7736{
7737 ulong flags;
7738 struct igb_i2c_client_list *client_list;
7739 struct i2c_client *client = NULL;
7740 struct i2c_board_info client_info = {
7741 I2C_BOARD_INFO("igb", 0x00),
7742 };
7743
7744 spin_lock_irqsave(&i2c_clients_lock, flags);
7745 client_list = adapter->i2c_clients;
7746
7747 /* See if we already have an i2c_client */
7748 while (client_list) {
7749 if (client_list->client->addr == (dev_addr >> 1)) {
7750 client = client_list->client;
7751 goto exit;
7752 } else {
7753 client_list = client_list->next;
7754 }
7755 }
7756
7757 /* no client_list found, create a new one */
7758 client_list = kzalloc(sizeof(*client_list), GFP_ATOMIC);
7759 if (client_list == NULL)
7760 goto exit;
7761
7762 /* dev_addr passed to us is left-shifted by 1 bit
7763 * i2c_new_device call expects it to be flush to the right.
7764 */
7765 client_info.addr = dev_addr >> 1;
7766 client_info.platform_data = adapter;
7767 client_list->client = i2c_new_device(&adapter->i2c_adap, &client_info);
7768 if (client_list->client == NULL) {
7769 dev_info(&adapter->pdev->dev,
7770 "Failed to create new i2c device..\n");
7771 goto err_no_client;
7772 }
7773
7774 /* insert new client at head of list */
7775 client_list->next = adapter->i2c_clients;
7776 adapter->i2c_clients = client_list;
7777
7778 client = client_list->client;
7779 goto exit;
7780
7781err_no_client:
7782 kfree(client_list);
7783exit:
7784 spin_unlock_irqrestore(&i2c_clients_lock, flags);
7785 return client;
7786}
7787
7788/* igb_read_i2c_byte - Reads 8 bit word over I2C 7716/* igb_read_i2c_byte - Reads 8 bit word over I2C
7789 * @hw: pointer to hardware structure 7717 * @hw: pointer to hardware structure
7790 * @byte_offset: byte offset to read 7718 * @byte_offset: byte offset to read
@@ -7798,7 +7726,7 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
7798 u8 dev_addr, u8 *data) 7726 u8 dev_addr, u8 *data)
7799{ 7727{
7800 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); 7728 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
7801 struct i2c_client *this_client = igb_get_i2c_client(adapter, dev_addr); 7729 struct i2c_client *this_client = adapter->i2c_client;
7802 s32 status; 7730 s32 status;
7803 u16 swfw_mask = 0; 7731 u16 swfw_mask = 0;
7804 7732
@@ -7835,7 +7763,7 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
7835 u8 dev_addr, u8 data) 7763 u8 dev_addr, u8 data)
7836{ 7764{
7837 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); 7765 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
7838 struct i2c_client *this_client = igb_get_i2c_client(adapter, dev_addr); 7766 struct i2c_client *this_client = adapter->i2c_client;
7839 s32 status; 7767 s32 status;
7840 u16 swfw_mask = E1000_SWFW_PHY0_SM; 7768 u16 swfw_mask = E1000_SWFW_PHY0_SM;
7841 7769
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 29140502b71a..6562c736a1d8 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1081,6 +1081,45 @@ static void txq_set_fixed_prio_mode(struct tx_queue *txq)
1081 1081
1082 1082
1083/* mii management interface *************************************************/ 1083/* mii management interface *************************************************/
1084static void mv643xx_adjust_pscr(struct mv643xx_eth_private *mp)
1085{
1086 u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
1087 u32 autoneg_disable = FORCE_LINK_PASS |
1088 DISABLE_AUTO_NEG_SPEED_GMII |
1089 DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
1090 DISABLE_AUTO_NEG_FOR_DUPLEX;
1091
1092 if (mp->phy->autoneg == AUTONEG_ENABLE) {
1093 /* enable auto negotiation */
1094 pscr &= ~autoneg_disable;
1095 goto out_write;
1096 }
1097
1098 pscr |= autoneg_disable;
1099
1100 if (mp->phy->speed == SPEED_1000) {
1101 /* force gigabit, half duplex not supported */
1102 pscr |= SET_GMII_SPEED_TO_1000;
1103 pscr |= SET_FULL_DUPLEX_MODE;
1104 goto out_write;
1105 }
1106
1107 pscr &= ~SET_GMII_SPEED_TO_1000;
1108
1109 if (mp->phy->speed == SPEED_100)
1110 pscr |= SET_MII_SPEED_TO_100;
1111 else
1112 pscr &= ~SET_MII_SPEED_TO_100;
1113
1114 if (mp->phy->duplex == DUPLEX_FULL)
1115 pscr |= SET_FULL_DUPLEX_MODE;
1116 else
1117 pscr &= ~SET_FULL_DUPLEX_MODE;
1118
1119out_write:
1120 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
1121}
1122
1084static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id) 1123static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id)
1085{ 1124{
1086 struct mv643xx_eth_shared_private *msp = dev_id; 1125 struct mv643xx_eth_shared_private *msp = dev_id;
@@ -1499,6 +1538,7 @@ static int
1499mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1538mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1500{ 1539{
1501 struct mv643xx_eth_private *mp = netdev_priv(dev); 1540 struct mv643xx_eth_private *mp = netdev_priv(dev);
1541 int ret;
1502 1542
1503 if (mp->phy == NULL) 1543 if (mp->phy == NULL)
1504 return -EINVAL; 1544 return -EINVAL;
@@ -1508,7 +1548,10 @@ mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1508 */ 1548 */
1509 cmd->advertising &= ~ADVERTISED_1000baseT_Half; 1549 cmd->advertising &= ~ADVERTISED_1000baseT_Half;
1510 1550
1511 return phy_ethtool_sset(mp->phy, cmd); 1551 ret = phy_ethtool_sset(mp->phy, cmd);
1552 if (!ret)
1553 mv643xx_adjust_pscr(mp);
1554 return ret;
1512} 1555}
1513 1556
1514static void mv643xx_eth_get_drvinfo(struct net_device *dev, 1557static void mv643xx_eth_get_drvinfo(struct net_device *dev,
@@ -2442,11 +2485,15 @@ static int mv643xx_eth_stop(struct net_device *dev)
2442static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2485static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2443{ 2486{
2444 struct mv643xx_eth_private *mp = netdev_priv(dev); 2487 struct mv643xx_eth_private *mp = netdev_priv(dev);
2488 int ret;
2445 2489
2446 if (mp->phy != NULL) 2490 if (mp->phy == NULL)
2447 return phy_mii_ioctl(mp->phy, ifr, cmd); 2491 return -ENOTSUPP;
2448 2492
2449 return -EOPNOTSUPP; 2493 ret = phy_mii_ioctl(mp->phy, ifr, cmd);
2494 if (!ret)
2495 mv643xx_adjust_pscr(mp);
2496 return ret;
2450} 2497}
2451 2498
2452static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) 2499static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 7e64033d7de3..0706623cfb96 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -226,7 +226,7 @@ void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
226 226
227static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn) 227static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
228{ 228{
229 u64 in_param; 229 u64 in_param = 0;
230 int err; 230 int err;
231 231
232 if (mlx4_is_mfunc(dev)) { 232 if (mlx4_is_mfunc(dev)) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index bb4d8d99f36d..995d4b6d5c1e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -565,34 +565,38 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
565 struct mlx4_en_dev *mdev = priv->mdev; 565 struct mlx4_en_dev *mdev = priv->mdev;
566 struct mlx4_dev *dev = mdev->dev; 566 struct mlx4_dev *dev = mdev->dev;
567 int qpn = priv->base_qpn; 567 int qpn = priv->base_qpn;
568 u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr); 568 u64 mac;
569
570 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
571 priv->dev->dev_addr);
572 mlx4_unregister_mac(dev, priv->port, mac);
573 569
574 if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) { 570 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
571 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr);
572 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
573 priv->dev->dev_addr);
574 mlx4_unregister_mac(dev, priv->port, mac);
575 } else {
575 struct mlx4_mac_entry *entry; 576 struct mlx4_mac_entry *entry;
576 struct hlist_node *tmp; 577 struct hlist_node *tmp;
577 struct hlist_head *bucket; 578 struct hlist_head *bucket;
578 unsigned int mac_hash; 579 unsigned int i;
579 580
580 mac_hash = priv->dev->dev_addr[MLX4_EN_MAC_HASH_IDX]; 581 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
581 bucket = &priv->mac_hash[mac_hash]; 582 bucket = &priv->mac_hash[i];
582 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) { 583 hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
583 if (ether_addr_equal_64bits(entry->mac, 584 mac = mlx4_en_mac_to_u64(entry->mac);
584 priv->dev->dev_addr)) { 585 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
585 en_dbg(DRV, priv, "Releasing qp: port %d, MAC %pM, qpn %d\n", 586 entry->mac);
586 priv->port, priv->dev->dev_addr, qpn);
587 mlx4_en_uc_steer_release(priv, entry->mac, 587 mlx4_en_uc_steer_release(priv, entry->mac,
588 qpn, entry->reg_id); 588 qpn, entry->reg_id);
589 mlx4_qp_release_range(dev, qpn, 1);
590 589
590 mlx4_unregister_mac(dev, priv->port, mac);
591 hlist_del_rcu(&entry->hlist); 591 hlist_del_rcu(&entry->hlist);
592 kfree_rcu(entry, rcu); 592 kfree_rcu(entry, rcu);
593 break;
594 } 593 }
595 } 594 }
595
596 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
597 priv->port, qpn);
598 mlx4_qp_release_range(dev, qpn, 1);
599 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
596 } 600 }
597} 601}
598 602
@@ -650,28 +654,10 @@ u64 mlx4_en_mac_to_u64(u8 *addr)
650 return mac; 654 return mac;
651} 655}
652 656
653static int mlx4_en_set_mac(struct net_device *dev, void *addr) 657static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv)
654{
655 struct mlx4_en_priv *priv = netdev_priv(dev);
656 struct mlx4_en_dev *mdev = priv->mdev;
657 struct sockaddr *saddr = addr;
658
659 if (!is_valid_ether_addr(saddr->sa_data))
660 return -EADDRNOTAVAIL;
661
662 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
663 queue_work(mdev->workqueue, &priv->mac_task);
664 return 0;
665}
666
667static void mlx4_en_do_set_mac(struct work_struct *work)
668{ 658{
669 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
670 mac_task);
671 struct mlx4_en_dev *mdev = priv->mdev;
672 int err = 0; 659 int err = 0;
673 660
674 mutex_lock(&mdev->state_lock);
675 if (priv->port_up) { 661 if (priv->port_up) {
676 /* Remove old MAC and insert the new one */ 662 /* Remove old MAC and insert the new one */
677 err = mlx4_en_replace_mac(priv, priv->base_qpn, 663 err = mlx4_en_replace_mac(priv, priv->base_qpn,
@@ -683,7 +669,26 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
683 } else 669 } else
684 en_dbg(HW, priv, "Port is down while registering mac, exiting...\n"); 670 en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
685 671
672 return err;
673}
674
675static int mlx4_en_set_mac(struct net_device *dev, void *addr)
676{
677 struct mlx4_en_priv *priv = netdev_priv(dev);
678 struct mlx4_en_dev *mdev = priv->mdev;
679 struct sockaddr *saddr = addr;
680 int err;
681
682 if (!is_valid_ether_addr(saddr->sa_data))
683 return -EADDRNOTAVAIL;
684
685 memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
686
687 mutex_lock(&mdev->state_lock);
688 err = mlx4_en_do_set_mac(priv);
686 mutex_unlock(&mdev->state_lock); 689 mutex_unlock(&mdev->state_lock);
690
691 return err;
687} 692}
688 693
689static void mlx4_en_clear_list(struct net_device *dev) 694static void mlx4_en_clear_list(struct net_device *dev)
@@ -1348,7 +1353,7 @@ static void mlx4_en_do_get_stats(struct work_struct *work)
1348 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 1353 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1349 } 1354 }
1350 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) { 1355 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
1351 queue_work(mdev->workqueue, &priv->mac_task); 1356 mlx4_en_do_set_mac(priv);
1352 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0; 1357 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
1353 } 1358 }
1354 mutex_unlock(&mdev->state_lock); 1359 mutex_unlock(&mdev->state_lock);
@@ -1828,9 +1833,11 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
1828 } 1833 }
1829 1834
1830#ifdef CONFIG_RFS_ACCEL 1835#ifdef CONFIG_RFS_ACCEL
1831 priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool); 1836 if (priv->mdev->dev->caps.comp_pool) {
1832 if (!priv->dev->rx_cpu_rmap) 1837 priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool);
1833 goto err; 1838 if (!priv->dev->rx_cpu_rmap)
1839 goto err;
1840 }
1834#endif 1841#endif
1835 1842
1836 return 0; 1843 return 0;
@@ -2078,7 +2085,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2078 priv->msg_enable = MLX4_EN_MSG_LEVEL; 2085 priv->msg_enable = MLX4_EN_MSG_LEVEL;
2079 spin_lock_init(&priv->stats_lock); 2086 spin_lock_init(&priv->stats_lock);
2080 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); 2087 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
2081 INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac);
2082 INIT_WORK(&priv->watchdog_task, mlx4_en_restart); 2088 INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
2083 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); 2089 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
2084 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); 2090 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 50917eb3013e..f6245579962d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -787,6 +787,14 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
787 bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN; 787 bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN;
788 MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); 788 MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
789 789
790 /* turn off device-managed steering capability if not enabled */
791 if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) {
792 MLX4_GET(field, outbox->buf,
793 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
794 field &= 0x7f;
795 MLX4_PUT(outbox->buf, field,
796 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
797 }
790 return 0; 798 return 0;
791} 799}
792 800
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index d180bc46826a..16abde20e1fc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1555,7 +1555,7 @@ void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
1555 1555
1556void mlx4_counter_free(struct mlx4_dev *dev, u32 idx) 1556void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
1557{ 1557{
1558 u64 in_param; 1558 u64 in_param = 0;
1559 1559
1560 if (mlx4_is_mfunc(dev)) { 1560 if (mlx4_is_mfunc(dev)) {
1561 set_param_l(&in_param, idx); 1561 set_param_l(&in_param, idx);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index cf883345af88..d738454116a0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -1235,7 +1235,7 @@ int mlx4_get_qp_per_mgm(struct mlx4_dev *dev);
1235 1235
1236static inline void set_param_l(u64 *arg, u32 val) 1236static inline void set_param_l(u64 *arg, u32 val)
1237{ 1237{
1238 *((u32 *)arg) = val; 1238 *arg = (*arg & 0xffffffff00000000ULL) | (u64) val;
1239} 1239}
1240 1240
1241static inline void set_param_h(u64 *arg, u32 val) 1241static inline void set_param_h(u64 *arg, u32 val)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index c313d7e943a9..f710b7ce0dcb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -509,7 +509,6 @@ struct mlx4_en_priv {
509 struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; 509 struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
510 struct mlx4_qp drop_qp; 510 struct mlx4_qp drop_qp;
511 struct work_struct rx_mode_task; 511 struct work_struct rx_mode_task;
512 struct work_struct mac_task;
513 struct work_struct watchdog_task; 512 struct work_struct watchdog_task;
514 struct work_struct linkstate_task; 513 struct work_struct linkstate_task;
515 struct delayed_work stats_task; 514 struct delayed_work stats_task;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 602ca9bf78e4..f91719a08cba 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -183,7 +183,7 @@ u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
183 183
184static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) 184static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
185{ 185{
186 u64 in_param; 186 u64 in_param = 0;
187 u64 out_param; 187 u64 out_param;
188 int err; 188 int err;
189 189
@@ -240,7 +240,7 @@ void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
240 240
241static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order) 241static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
242{ 242{
243 u64 in_param; 243 u64 in_param = 0;
244 int err; 244 int err;
245 245
246 if (mlx4_is_mfunc(dev)) { 246 if (mlx4_is_mfunc(dev)) {
@@ -351,7 +351,7 @@ void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
351 351
352static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index) 352static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index)
353{ 353{
354 u64 in_param; 354 u64 in_param = 0;
355 355
356 if (mlx4_is_mfunc(dev)) { 356 if (mlx4_is_mfunc(dev)) {
357 set_param_l(&in_param, index); 357 set_param_l(&in_param, index);
@@ -374,7 +374,7 @@ int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
374 374
375static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index) 375static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
376{ 376{
377 u64 param; 377 u64 param = 0;
378 378
379 if (mlx4_is_mfunc(dev)) { 379 if (mlx4_is_mfunc(dev)) {
380 set_param_l(&param, index); 380 set_param_l(&param, index);
@@ -395,7 +395,7 @@ void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
395 395
396static void mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index) 396static void mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
397{ 397{
398 u64 in_param; 398 u64 in_param = 0;
399 399
400 if (mlx4_is_mfunc(dev)) { 400 if (mlx4_is_mfunc(dev)) {
401 set_param_l(&in_param, index); 401 set_param_l(&in_param, index);
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index 1ac88637ad9d..00f223acada7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -101,7 +101,7 @@ void __mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn)
101 101
102void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn) 102void mlx4_xrcd_free(struct mlx4_dev *dev, u32 xrcdn)
103{ 103{
104 u64 in_param; 104 u64 in_param = 0;
105 int err; 105 int err;
106 106
107 if (mlx4_is_mfunc(dev)) { 107 if (mlx4_is_mfunc(dev)) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 719ead15e491..10c57c86388b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -175,7 +175,7 @@ EXPORT_SYMBOL_GPL(__mlx4_register_mac);
175 175
176int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) 176int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
177{ 177{
178 u64 out_param; 178 u64 out_param = 0;
179 int err; 179 int err;
180 180
181 if (mlx4_is_mfunc(dev)) { 181 if (mlx4_is_mfunc(dev)) {
@@ -222,7 +222,7 @@ EXPORT_SYMBOL_GPL(__mlx4_unregister_mac);
222 222
223void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac) 223void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
224{ 224{
225 u64 out_param; 225 u64 out_param = 0;
226 226
227 if (mlx4_is_mfunc(dev)) { 227 if (mlx4_is_mfunc(dev)) {
228 set_param_l(&out_param, port); 228 set_param_l(&out_param, port);
@@ -361,7 +361,7 @@ out:
361 361
362int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index) 362int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
363{ 363{
364 u64 out_param; 364 u64 out_param = 0;
365 int err; 365 int err;
366 366
367 if (mlx4_is_mfunc(dev)) { 367 if (mlx4_is_mfunc(dev)) {
@@ -406,7 +406,7 @@ out:
406 406
407void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index) 407void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
408{ 408{
409 u64 in_param; 409 u64 in_param = 0;
410 int err; 410 int err;
411 411
412 if (mlx4_is_mfunc(dev)) { 412 if (mlx4_is_mfunc(dev)) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 81e2abe07bbb..e891b058c1be 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -222,7 +222,7 @@ int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
222 222
223int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base) 223int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
224{ 224{
225 u64 in_param; 225 u64 in_param = 0;
226 u64 out_param; 226 u64 out_param;
227 int err; 227 int err;
228 228
@@ -255,7 +255,7 @@ void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
255 255
256void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) 256void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
257{ 257{
258 u64 in_param; 258 u64 in_param = 0;
259 int err; 259 int err;
260 260
261 if (mlx4_is_mfunc(dev)) { 261 if (mlx4_is_mfunc(dev)) {
@@ -319,7 +319,7 @@ err_out:
319 319
320static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn) 320static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
321{ 321{
322 u64 param; 322 u64 param = 0;
323 323
324 if (mlx4_is_mfunc(dev)) { 324 if (mlx4_is_mfunc(dev)) {
325 set_param_l(&param, qpn); 325 set_param_l(&param, qpn);
@@ -344,7 +344,7 @@ void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
344 344
345static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) 345static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
346{ 346{
347 u64 in_param; 347 u64 in_param = 0;
348 348
349 if (mlx4_is_mfunc(dev)) { 349 if (mlx4_is_mfunc(dev)) {
350 set_param_l(&in_param, qpn); 350 set_param_l(&in_param, qpn);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 083fb48dc3d7..2995687f1aee 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2990,6 +2990,9 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
2990 u8 steer_type_mask = 2; 2990 u8 steer_type_mask = 2;
2991 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1; 2991 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
2992 2992
2993 if (dev->caps.steering_mode != MLX4_STEERING_MODE_B0)
2994 return -EINVAL;
2995
2993 qpn = vhcr->in_modifier & 0xffffff; 2996 qpn = vhcr->in_modifier & 0xffffff;
2994 err = get_res(dev, slave, qpn, RES_QP, &rqp); 2997 err = get_res(dev, slave, qpn, RES_QP, &rqp);
2995 if (err) 2998 if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index feda6c00829f..e329fe1f11b7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -149,7 +149,7 @@ void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
149 149
150static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn) 150static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
151{ 151{
152 u64 in_param; 152 u64 in_param = 0;
153 153
154 if (mlx4_is_mfunc(dev)) { 154 if (mlx4_is_mfunc(dev)) {
155 set_param_l(&in_param, srqn); 155 set_param_l(&in_param, srqn);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 50247dfe8f57..d2f790df6dcb 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -171,9 +171,9 @@ static inline void efx_device_detach_sync(struct efx_nic *efx)
171 * TX scheduler is stopped when we're done and before 171 * TX scheduler is stopped when we're done and before
172 * netif_device_present() becomes false. 172 * netif_device_present() becomes false.
173 */ 173 */
174 netif_tx_lock(dev); 174 netif_tx_lock_bh(dev);
175 netif_device_detach(dev); 175 netif_device_detach(dev);
176 netif_tx_unlock(dev); 176 netif_tx_unlock_bh(dev);
177} 177}
178 178
179#endif /* EFX_EFX_H */ 179#endif /* EFX_EFX_H */
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 0ad790cc473c..eaa8e874a3cb 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -376,7 +376,8 @@ efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count)
376 return false; 376 return false;
377 377
378 tx_queue->empty_read_count = 0; 378 tx_queue->empty_read_count = 0;
379 return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; 379 return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0
380 && tx_queue->write_count - write_count == 1;
380} 381}
381 382
382/* For each entry inserted into the software descriptor ring, create a 383/* For each entry inserted into the software descriptor ring, create a
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 879ff5849bbd..bb579a6128c8 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -215,7 +215,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
215 rx_buf = efx_rx_buffer(rx_queue, index); 215 rx_buf = efx_rx_buffer(rx_queue, index);
216 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; 216 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
217 rx_buf->u.page = page; 217 rx_buf->u.page = page;
218 rx_buf->page_offset = page_offset; 218 rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN;
219 rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; 219 rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
220 rx_buf->flags = EFX_RX_BUF_PAGE; 220 rx_buf->flags = EFX_RX_BUF_PAGE;
221 ++rx_queue->added_count; 221 ++rx_queue->added_count;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 01ffbc486982..75c48558e6fd 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -905,7 +905,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
905 /* If there is no more tx desc left free then we need to 905 /* If there is no more tx desc left free then we need to
906 * tell the kernel to stop sending us tx frames. 906 * tell the kernel to stop sending us tx frames.
907 */ 907 */
908 if (unlikely(cpdma_check_free_tx_desc(priv->txch))) 908 if (unlikely(!cpdma_check_free_tx_desc(priv->txch)))
909 netif_stop_queue(ndev); 909 netif_stop_queue(ndev);
910 910
911 return NETDEV_TX_OK; 911 return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 52c05366599a..ae1b77aa199f 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1102,7 +1102,7 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
1102 /* If there is no more tx desc left free then we need to 1102 /* If there is no more tx desc left free then we need to
1103 * tell the kernel to stop sending us tx frames. 1103 * tell the kernel to stop sending us tx frames.
1104 */ 1104 */
1105 if (unlikely(cpdma_check_free_tx_desc(priv->txchan))) 1105 if (unlikely(!cpdma_check_free_tx_desc(priv->txchan)))
1106 netif_stop_queue(ndev); 1106 netif_stop_queue(ndev);
1107 1107
1108 return NETDEV_TX_OK; 1108 return NETDEV_TX_OK;
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index e5b19b056909..3c4d6274bb9b 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -202,6 +202,9 @@ static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
202 return 0; 202 return 0;
203 203
204 out: 204 out:
205 if (rrpriv->evt_ring)
206 pci_free_consistent(pdev, EVT_RING_SIZE, rrpriv->evt_ring,
207 rrpriv->evt_ring_dma);
205 if (rrpriv->rx_ring) 208 if (rrpriv->rx_ring)
206 pci_free_consistent(pdev, RX_TOTAL_SIZE, rrpriv->rx_ring, 209 pci_free_consistent(pdev, RX_TOTAL_SIZE, rrpriv->rx_ring,
207 rrpriv->rx_ring_dma); 210 rrpriv->rx_ring_dma);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 417b2af1aa80..73abbc1655d5 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -660,6 +660,7 @@ void macvlan_common_setup(struct net_device *dev)
660 ether_setup(dev); 660 ether_setup(dev);
661 661
662 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); 662 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
663 dev->priv_flags |= IFF_UNICAST_FLT;
663 dev->netdev_ops = &macvlan_netdev_ops; 664 dev->netdev_ops = &macvlan_netdev_ops;
664 dev->destructor = free_netdev; 665 dev->destructor = free_netdev;
665 dev->header_ops = &macvlan_hard_header_ops, 666 dev->header_ops = &macvlan_hard_header_ops,
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 37add21a3d7d..59ac143dec25 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -666,6 +666,7 @@ static int netconsole_netdev_event(struct notifier_block *this,
666 goto done; 666 goto done;
667 667
668 spin_lock_irqsave(&target_list_lock, flags); 668 spin_lock_irqsave(&target_list_lock, flags);
669restart:
669 list_for_each_entry(nt, &target_list, list) { 670 list_for_each_entry(nt, &target_list, list) {
670 netconsole_target_get(nt); 671 netconsole_target_get(nt);
671 if (nt->np.dev == dev) { 672 if (nt->np.dev == dev) {
@@ -678,15 +679,17 @@ static int netconsole_netdev_event(struct notifier_block *this,
678 case NETDEV_UNREGISTER: 679 case NETDEV_UNREGISTER:
679 /* 680 /*
680 * rtnl_lock already held 681 * rtnl_lock already held
682 * we might sleep in __netpoll_cleanup()
681 */ 683 */
682 if (nt->np.dev) { 684 spin_unlock_irqrestore(&target_list_lock, flags);
683 __netpoll_cleanup(&nt->np); 685 __netpoll_cleanup(&nt->np);
684 dev_put(nt->np.dev); 686 spin_lock_irqsave(&target_list_lock, flags);
685 nt->np.dev = NULL; 687 dev_put(nt->np.dev);
686 } 688 nt->np.dev = NULL;
687 nt->enabled = 0; 689 nt->enabled = 0;
688 stopped = true; 690 stopped = true;
689 break; 691 netconsole_target_put(nt);
692 goto restart;
690 } 693 }
691 } 694 }
692 netconsole_target_put(nt); 695 netconsole_target_put(nt);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 05c5efe84591..bf3419297875 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1138,6 +1138,8 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
1138 netdev_upper_dev_unlink(port_dev, dev); 1138 netdev_upper_dev_unlink(port_dev, dev);
1139 team_port_disable_netpoll(port); 1139 team_port_disable_netpoll(port);
1140 vlan_vids_del_by_dev(port_dev, dev); 1140 vlan_vids_del_by_dev(port_dev, dev);
1141 dev_uc_unsync(port_dev, dev);
1142 dev_mc_unsync(port_dev, dev);
1141 dev_close(port_dev); 1143 dev_close(port_dev);
1142 team_port_leave(team, port); 1144 team_port_leave(team, port);
1143 1145
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 2c6a22e278ea..b7c457adc0dc 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -747,6 +747,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
747 goto drop; 747 goto drop;
748 skb_orphan(skb); 748 skb_orphan(skb);
749 749
750 nf_reset(skb);
751
750 /* Enqueue packet */ 752 /* Enqueue packet */
751 skb_queue_tail(&tfile->socket.sk->sk_receive_queue, skb); 753 skb_queue_tail(&tfile->socket.sk->sk_receive_queue, skb);
752 754
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 3b6e9b83342d..7c769d8e25ad 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -268,7 +268,7 @@ config USB_NET_SMSC75XX
268 select CRC16 268 select CRC16
269 select CRC32 269 select CRC32
270 help 270 help
271 This option adds support for SMSC LAN95XX based USB 2.0 271 This option adds support for SMSC LAN75XX based USB 2.0
272 Gigabit Ethernet adapters. 272 Gigabit Ethernet adapters.
273 273
274config USB_NET_SMSC95XX 274config USB_NET_SMSC95XX
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 248d2dc765a5..16c842997291 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -68,18 +68,9 @@ static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf)
68 struct cdc_ncm_ctx *ctx; 68 struct cdc_ncm_ctx *ctx;
69 struct usb_driver *subdriver = ERR_PTR(-ENODEV); 69 struct usb_driver *subdriver = ERR_PTR(-ENODEV);
70 int ret = -ENODEV; 70 int ret = -ENODEV;
71 u8 data_altsetting = CDC_NCM_DATA_ALTSETTING_NCM; 71 u8 data_altsetting = cdc_ncm_select_altsetting(dev, intf);
72 struct cdc_mbim_state *info = (void *)&dev->data; 72 struct cdc_mbim_state *info = (void *)&dev->data;
73 73
74 /* see if interface supports MBIM alternate setting */
75 if (intf->num_altsetting == 2) {
76 if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
77 usb_set_interface(dev->udev,
78 intf->cur_altsetting->desc.bInterfaceNumber,
79 CDC_NCM_COMM_ALTSETTING_MBIM);
80 data_altsetting = CDC_NCM_DATA_ALTSETTING_MBIM;
81 }
82
83 /* Probably NCM, defer for cdc_ncm_bind */ 74 /* Probably NCM, defer for cdc_ncm_bind */
84 if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) 75 if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
85 goto err; 76 goto err;
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 61b74a2b89ac..4709fa3497cf 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -55,6 +55,14 @@
55 55
56#define DRIVER_VERSION "14-Mar-2012" 56#define DRIVER_VERSION "14-Mar-2012"
57 57
58#if IS_ENABLED(CONFIG_USB_NET_CDC_MBIM)
59static bool prefer_mbim = true;
60#else
61static bool prefer_mbim;
62#endif
63module_param(prefer_mbim, bool, S_IRUGO | S_IWUSR);
64MODULE_PARM_DESC(prefer_mbim, "Prefer MBIM setting on dual NCM/MBIM functions");
65
58static void cdc_ncm_txpath_bh(unsigned long param); 66static void cdc_ncm_txpath_bh(unsigned long param);
59static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx); 67static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx);
60static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer); 68static enum hrtimer_restart cdc_ncm_tx_timer_cb(struct hrtimer *hr_timer);
@@ -550,9 +558,12 @@ void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
550} 558}
551EXPORT_SYMBOL_GPL(cdc_ncm_unbind); 559EXPORT_SYMBOL_GPL(cdc_ncm_unbind);
552 560
553static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) 561/* Select the MBIM altsetting iff it is preferred and available,
562 * returning the number of the corresponding data interface altsetting
563 */
564u8 cdc_ncm_select_altsetting(struct usbnet *dev, struct usb_interface *intf)
554{ 565{
555 int ret; 566 struct usb_host_interface *alt;
556 567
557 /* The MBIM spec defines a NCM compatible default altsetting, 568 /* The MBIM spec defines a NCM compatible default altsetting,
558 * which we may have matched: 569 * which we may have matched:
@@ -568,23 +579,27 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
568 * endpoint descriptors, shall be constructed according to 579 * endpoint descriptors, shall be constructed according to
569 * the rules given in section 6 (USB Device Model) of this 580 * the rules given in section 6 (USB Device Model) of this
570 * specification." 581 * specification."
571 *
572 * Do not bind to such interfaces, allowing cdc_mbim to handle
573 * them
574 */ 582 */
575#if IS_ENABLED(CONFIG_USB_NET_CDC_MBIM) 583 if (prefer_mbim && intf->num_altsetting == 2) {
576 if ((intf->num_altsetting == 2) && 584 alt = usb_altnum_to_altsetting(intf, CDC_NCM_COMM_ALTSETTING_MBIM);
577 !usb_set_interface(dev->udev, 585 if (alt && cdc_ncm_comm_intf_is_mbim(alt) &&
578 intf->cur_altsetting->desc.bInterfaceNumber, 586 !usb_set_interface(dev->udev,
579 CDC_NCM_COMM_ALTSETTING_MBIM)) { 587 intf->cur_altsetting->desc.bInterfaceNumber,
580 if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) 588 CDC_NCM_COMM_ALTSETTING_MBIM))
581 return -ENODEV; 589 return CDC_NCM_DATA_ALTSETTING_MBIM;
582 else
583 usb_set_interface(dev->udev,
584 intf->cur_altsetting->desc.bInterfaceNumber,
585 CDC_NCM_COMM_ALTSETTING_NCM);
586 } 590 }
587#endif 591 return CDC_NCM_DATA_ALTSETTING_NCM;
592}
593EXPORT_SYMBOL_GPL(cdc_ncm_select_altsetting);
594
595static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
596{
597 int ret;
598
599 /* MBIM backwards compatible function? */
600 cdc_ncm_select_altsetting(dev, intf);
601 if (cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
602 return -ENODEV;
588 603
589 /* NCM data altsetting is always 1 */ 604 /* NCM data altsetting is always 1 */
590 ret = cdc_ncm_bind_common(dev, intf, 1); 605 ret = cdc_ncm_bind_common(dev, intf, 1);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index efb5c7c33a28..968d5d50751d 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -139,16 +139,9 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
139 139
140 BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state))); 140 BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state)));
141 141
142 /* control and data is shared? */ 142 /* set up initial state */
143 if (intf->cur_altsetting->desc.bNumEndpoints == 3) { 143 info->control = intf;
144 info->control = intf; 144 info->data = intf;
145 info->data = intf;
146 goto shared;
147 }
148
149 /* else require a single interrupt status endpoint on control intf */
150 if (intf->cur_altsetting->desc.bNumEndpoints != 1)
151 goto err;
152 145
153 /* and a number of CDC descriptors */ 146 /* and a number of CDC descriptors */
154 while (len > 3) { 147 while (len > 3) {
@@ -207,25 +200,14 @@ next_desc:
207 buf += h->bLength; 200 buf += h->bLength;
208 } 201 }
209 202
210 /* did we find all the required ones? */ 203 /* Use separate control and data interfaces if we found a CDC Union */
211 if (!(found & (1 << USB_CDC_HEADER_TYPE)) || 204 if (cdc_union) {
212 !(found & (1 << USB_CDC_UNION_TYPE))) { 205 info->data = usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0);
213 dev_err(&intf->dev, "CDC functional descriptors missing\n"); 206 if (desc->bInterfaceNumber != cdc_union->bMasterInterface0 || !info->data) {
214 goto err; 207 dev_err(&intf->dev, "bogus CDC Union: master=%u, slave=%u\n",
215 } 208 cdc_union->bMasterInterface0, cdc_union->bSlaveInterface0);
216 209 goto err;
217 /* verify CDC Union */ 210 }
218 if (desc->bInterfaceNumber != cdc_union->bMasterInterface0) {
219 dev_err(&intf->dev, "bogus CDC Union: master=%u\n", cdc_union->bMasterInterface0);
220 goto err;
221 }
222
223 /* need to save these for unbind */
224 info->control = intf;
225 info->data = usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0);
226 if (!info->data) {
227 dev_err(&intf->dev, "bogus CDC Union: slave=%u\n", cdc_union->bSlaveInterface0);
228 goto err;
229 } 211 }
230 212
231 /* errors aren't fatal - we can live with the dynamic address */ 213 /* errors aren't fatal - we can live with the dynamic address */
@@ -235,11 +217,12 @@ next_desc:
235 } 217 }
236 218
237 /* claim data interface and set it up */ 219 /* claim data interface and set it up */
238 status = usb_driver_claim_interface(driver, info->data, dev); 220 if (info->control != info->data) {
239 if (status < 0) 221 status = usb_driver_claim_interface(driver, info->data, dev);
240 goto err; 222 if (status < 0)
223 goto err;
224 }
241 225
242shared:
243 status = qmi_wwan_register_subdriver(dev); 226 status = qmi_wwan_register_subdriver(dev);
244 if (status < 0 && info->control != info->data) { 227 if (status < 0 && info->control != info->data) {
245 usb_set_intfdata(info->data, NULL); 228 usb_set_intfdata(info->data, NULL);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 4aad350e4dae..eae7a03d4f9b 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2958,6 +2958,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2958 2958
2959 adapter->num_rx_queues = num_rx_queues; 2959 adapter->num_rx_queues = num_rx_queues;
2960 adapter->num_tx_queues = num_tx_queues; 2960 adapter->num_tx_queues = num_tx_queues;
2961 adapter->rx_buf_per_pkt = 1;
2961 2962
2962 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues; 2963 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
2963 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues; 2964 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index a0feb17a0238..63a124340cbe 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -472,6 +472,12 @@ vmxnet3_set_ringparam(struct net_device *netdev,
472 VMXNET3_RX_RING_MAX_SIZE) 472 VMXNET3_RX_RING_MAX_SIZE)
473 return -EINVAL; 473 return -EINVAL;
474 474
475 /* if adapter not yet initialized, do nothing */
476 if (adapter->rx_buf_per_pkt == 0) {
477 netdev_err(netdev, "adapter not completely initialized, "
478 "ring size cannot be changed yet\n");
479 return -EOPNOTSUPP;
480 }
475 481
476 /* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */ 482 /* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */
477 new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) & 483 new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) &
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 3198384689d9..35418146fa17 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -70,10 +70,10 @@
70/* 70/*
71 * Version numbers 71 * Version numbers
72 */ 72 */
73#define VMXNET3_DRIVER_VERSION_STRING "1.1.29.0-k" 73#define VMXNET3_DRIVER_VERSION_STRING "1.1.30.0-k"
74 74
75/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 75/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
76#define VMXNET3_DRIVER_VERSION_NUM 0x01011D00 76#define VMXNET3_DRIVER_VERSION_NUM 0x01011E00
77 77
78#if defined(CONFIG_PCI_MSI) 78#if defined(CONFIG_PCI_MSI)
79 /* RSS only makes sense if MSI-X is supported. */ 79 /* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index f10e58ac9c1b..7cee7a3068ec 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -961,6 +961,8 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
961 iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); 961 iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
962 tunnel_ip_select_ident(skb, old_iph, &rt->dst); 962 tunnel_ip_select_ident(skb, old_iph, &rt->dst);
963 963
964 nf_reset(skb);
965
964 vxlan_set_owner(dev, skb); 966 vxlan_set_owner(dev, skb);
965 967
966 /* See iptunnel_xmit() */ 968 /* See iptunnel_xmit() */
@@ -1504,6 +1506,14 @@ static __net_init int vxlan_init_net(struct net *net)
1504static __net_exit void vxlan_exit_net(struct net *net) 1506static __net_exit void vxlan_exit_net(struct net *net)
1505{ 1507{
1506 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 1508 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1509 struct vxlan_dev *vxlan;
1510 unsigned h;
1511
1512 rtnl_lock();
1513 for (h = 0; h < VNI_HASH_SIZE; ++h)
1514 hlist_for_each_entry(vxlan, &vn->vni_list[h], hlist)
1515 dev_close(vxlan->dev);
1516 rtnl_unlock();
1507 1517
1508 if (vn->sock) { 1518 if (vn->sock) {
1509 sk_release_kernel(vn->sock->sk); 1519 sk_release_kernel(vn->sock->sk);
diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
index 94ef33838bc6..b775769f8322 100644
--- a/drivers/net/wireless/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/dvm/sta.c
@@ -151,7 +151,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
151 sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : ""); 151 sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
152 152
153 if (!(flags & CMD_ASYNC)) { 153 if (!(flags & CMD_ASYNC)) {
154 cmd.flags |= CMD_WANT_SKB | CMD_WANT_HCMD; 154 cmd.flags |= CMD_WANT_SKB;
155 might_sleep(); 155 might_sleep();
156 } 156 }
157 157
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 10f01793d7a6..81aa91fab5aa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -363,7 +363,7 @@ TRACE_EVENT(iwlwifi_dev_hcmd,
363 __entry->flags = cmd->flags; 363 __entry->flags = cmd->flags;
364 memcpy(__get_dynamic_array(hcmd), hdr, sizeof(*hdr)); 364 memcpy(__get_dynamic_array(hcmd), hdr, sizeof(*hdr));
365 365
366 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 366 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
367 if (!cmd->len[i]) 367 if (!cmd->len[i])
368 continue; 368 continue;
369 memcpy((u8 *)__get_dynamic_array(hcmd) + offset, 369 memcpy((u8 *)__get_dynamic_array(hcmd) + offset,
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 6f228bb2b844..fbfd2d137117 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -1102,7 +1102,6 @@ void iwl_drv_stop(struct iwl_drv *drv)
1102 1102
1103/* shared module parameters */ 1103/* shared module parameters */
1104struct iwl_mod_params iwlwifi_mod_params = { 1104struct iwl_mod_params iwlwifi_mod_params = {
1105 .amsdu_size_8K = 1,
1106 .restart_fw = 1, 1105 .restart_fw = 1,
1107 .plcp_check = true, 1106 .plcp_check = true,
1108 .bt_coex_active = true, 1107 .bt_coex_active = true,
@@ -1207,7 +1206,7 @@ MODULE_PARM_DESC(11n_disable,
1207 "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX"); 1206 "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX");
1208module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K, 1207module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K,
1209 int, S_IRUGO); 1208 int, S_IRUGO);
1210MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size"); 1209MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)");
1211module_param_named(fw_restart, iwlwifi_mod_params.restart_fw, int, S_IRUGO); 1210module_param_named(fw_restart, iwlwifi_mod_params.restart_fw, int, S_IRUGO);
1212MODULE_PARM_DESC(fw_restart, "restart firmware in case of error"); 1211MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
1213 1212
diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
index e5e3a79eae2f..2c2a729092f5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
@@ -91,7 +91,7 @@ enum iwl_power_level {
91 * @sw_crypto: using hardware encryption, default = 0 91 * @sw_crypto: using hardware encryption, default = 0
92 * @disable_11n: disable 11n capabilities, default = 0, 92 * @disable_11n: disable 11n capabilities, default = 0,
93 * use IWL_DISABLE_HT_* constants 93 * use IWL_DISABLE_HT_* constants
94 * @amsdu_size_8K: enable 8K amsdu size, default = 1 94 * @amsdu_size_8K: enable 8K amsdu size, default = 0
95 * @restart_fw: restart firmware, default = 1 95 * @restart_fw: restart firmware, default = 1
96 * @plcp_check: enable plcp health check, default = true 96 * @plcp_check: enable plcp health check, default = true
97 * @wd_disable: enable stuck queue check, default = 0 97 * @wd_disable: enable stuck queue check, default = 0
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 8c7bec6b9a0b..0cac2b7af78b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -186,19 +186,13 @@ struct iwl_rx_packet {
186 * @CMD_ASYNC: Return right away and don't want for the response 186 * @CMD_ASYNC: Return right away and don't want for the response
187 * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the 187 * @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
188 * response. The caller needs to call iwl_free_resp when done. 188 * response. The caller needs to call iwl_free_resp when done.
189 * @CMD_WANT_HCMD: The caller needs to get the HCMD that was sent in the
190 * response handler. Chunks flagged by %IWL_HCMD_DFL_NOCOPY won't be
191 * copied. The pointer passed to the response handler is in the transport
192 * ownership and don't need to be freed by the op_mode. This also means
193 * that the pointer is invalidated after the op_mode's handler returns.
194 * @CMD_ON_DEMAND: This command is sent by the test mode pipe. 189 * @CMD_ON_DEMAND: This command is sent by the test mode pipe.
195 */ 190 */
196enum CMD_MODE { 191enum CMD_MODE {
197 CMD_SYNC = 0, 192 CMD_SYNC = 0,
198 CMD_ASYNC = BIT(0), 193 CMD_ASYNC = BIT(0),
199 CMD_WANT_SKB = BIT(1), 194 CMD_WANT_SKB = BIT(1),
200 CMD_WANT_HCMD = BIT(2), 195 CMD_ON_DEMAND = BIT(2),
201 CMD_ON_DEMAND = BIT(3),
202}; 196};
203 197
204#define DEF_CMD_PAYLOAD_SIZE 320 198#define DEF_CMD_PAYLOAD_SIZE 320
@@ -217,7 +211,11 @@ struct iwl_device_cmd {
217 211
218#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd)) 212#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
219 213
220#define IWL_MAX_CMD_TFDS 2 214/*
215 * number of transfer buffers (fragments) per transmit frame descriptor;
216 * this is just the driver's idea, the hardware supports 20
217 */
218#define IWL_MAX_CMD_TBS_PER_TFD 2
221 219
222/** 220/**
223 * struct iwl_hcmd_dataflag - flag for each one of the chunks of the command 221 * struct iwl_hcmd_dataflag - flag for each one of the chunks of the command
@@ -254,15 +252,15 @@ enum iwl_hcmd_dataflag {
254 * @id: id of the host command 252 * @id: id of the host command
255 */ 253 */
256struct iwl_host_cmd { 254struct iwl_host_cmd {
257 const void *data[IWL_MAX_CMD_TFDS]; 255 const void *data[IWL_MAX_CMD_TBS_PER_TFD];
258 struct iwl_rx_packet *resp_pkt; 256 struct iwl_rx_packet *resp_pkt;
259 unsigned long _rx_page_addr; 257 unsigned long _rx_page_addr;
260 u32 _rx_page_order; 258 u32 _rx_page_order;
261 int handler_status; 259 int handler_status;
262 260
263 u32 flags; 261 u32 flags;
264 u16 len[IWL_MAX_CMD_TFDS]; 262 u16 len[IWL_MAX_CMD_TBS_PER_TFD];
265 u8 dataflags[IWL_MAX_CMD_TFDS]; 263 u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
266 u8 id; 264 u8 id;
267}; 265};
268 266
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index 23eebda848b0..2adb61f103f4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -762,18 +762,20 @@ struct iwl_phy_context_cmd {
762#define IWL_RX_INFO_PHY_CNT 8 762#define IWL_RX_INFO_PHY_CNT 8
763#define IWL_RX_INFO_AGC_IDX 1 763#define IWL_RX_INFO_AGC_IDX 1
764#define IWL_RX_INFO_RSSI_AB_IDX 2 764#define IWL_RX_INFO_RSSI_AB_IDX 2
765#define IWL_RX_INFO_RSSI_C_IDX 3 765#define IWL_OFDM_AGC_A_MSK 0x0000007f
766#define IWL_OFDM_AGC_DB_MSK 0xfe00 766#define IWL_OFDM_AGC_A_POS 0
767#define IWL_OFDM_AGC_DB_POS 9 767#define IWL_OFDM_AGC_B_MSK 0x00003f80
768#define IWL_OFDM_AGC_B_POS 7
769#define IWL_OFDM_AGC_CODE_MSK 0x3fe00000
770#define IWL_OFDM_AGC_CODE_POS 20
768#define IWL_OFDM_RSSI_INBAND_A_MSK 0x00ff 771#define IWL_OFDM_RSSI_INBAND_A_MSK 0x00ff
769#define IWL_OFDM_RSSI_ALLBAND_A_MSK 0xff00
770#define IWL_OFDM_RSSI_A_POS 0 772#define IWL_OFDM_RSSI_A_POS 0
773#define IWL_OFDM_RSSI_ALLBAND_A_MSK 0xff00
774#define IWL_OFDM_RSSI_ALLBAND_A_POS 8
771#define IWL_OFDM_RSSI_INBAND_B_MSK 0xff0000 775#define IWL_OFDM_RSSI_INBAND_B_MSK 0xff0000
772#define IWL_OFDM_RSSI_ALLBAND_B_MSK 0xff000000
773#define IWL_OFDM_RSSI_B_POS 16 776#define IWL_OFDM_RSSI_B_POS 16
774#define IWL_OFDM_RSSI_INBAND_C_MSK 0x00ff 777#define IWL_OFDM_RSSI_ALLBAND_B_MSK 0xff000000
775#define IWL_OFDM_RSSI_ALLBAND_C_MSK 0xff00 778#define IWL_OFDM_RSSI_ALLBAND_B_POS 24
776#define IWL_OFDM_RSSI_C_POS 0
777 779
778/** 780/**
779 * struct iwl_rx_phy_info - phy info 781 * struct iwl_rx_phy_info - phy info
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index d3d959db03a9..500f818dba04 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -79,17 +79,8 @@
79#define UCODE_VALID_OK cpu_to_le32(0x1) 79#define UCODE_VALID_OK cpu_to_le32(0x1)
80 80
81/* Default calibration values for WkP - set to INIT image w/o running */ 81/* Default calibration values for WkP - set to INIT image w/o running */
82static const u8 wkp_calib_values_bb_filter[] = { 0xbf, 0x00, 0x5f, 0x00, 0x2f,
83 0x00, 0x18, 0x00 };
84static const u8 wkp_calib_values_rx_dc[] = { 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
85 0x7f, 0x7f, 0x7f };
86static const u8 wkp_calib_values_tx_lo[] = { 0x00, 0x00, 0x00, 0x00 };
87static const u8 wkp_calib_values_tx_iq[] = { 0xff, 0x00, 0xff, 0x00, 0x00,
88 0x00 };
89static const u8 wkp_calib_values_rx_iq[] = { 0xff, 0x00, 0x00, 0x00 };
90static const u8 wkp_calib_values_rx_iq_skew[] = { 0x00, 0x00, 0x01, 0x00 }; 82static const u8 wkp_calib_values_rx_iq_skew[] = { 0x00, 0x00, 0x01, 0x00 };
91static const u8 wkp_calib_values_tx_iq_skew[] = { 0x01, 0x00, 0x00, 0x00 }; 83static const u8 wkp_calib_values_tx_iq_skew[] = { 0x01, 0x00, 0x00, 0x00 };
92static const u8 wkp_calib_values_xtal[] = { 0xd2, 0xd2 };
93 84
94struct iwl_calib_default_data { 85struct iwl_calib_default_data {
95 u16 size; 86 u16 size;
@@ -99,12 +90,7 @@ struct iwl_calib_default_data {
99#define CALIB_SIZE_N_DATA(_buf) {.size = sizeof(_buf), .data = &_buf} 90#define CALIB_SIZE_N_DATA(_buf) {.size = sizeof(_buf), .data = &_buf}
100 91
101static const struct iwl_calib_default_data wkp_calib_default_data[12] = { 92static const struct iwl_calib_default_data wkp_calib_default_data[12] = {
102 [5] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_dc),
103 [6] = CALIB_SIZE_N_DATA(wkp_calib_values_bb_filter),
104 [7] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_lo),
105 [8] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq),
106 [9] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq_skew), 93 [9] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq_skew),
107 [10] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq),
108 [11] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq_skew), 94 [11] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq_skew),
109}; 95};
110 96
@@ -241,20 +227,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
241 227
242 return 0; 228 return 0;
243} 229}
244#define IWL_HW_REV_ID_RAINBOW 0x2
245#define IWL_PROJ_TYPE_LHP 0x5
246
247static u32 iwl_mvm_build_phy_cfg(struct iwl_mvm *mvm)
248{
249 struct iwl_nvm_data *data = mvm->nvm_data;
250 /* Temp calls to static definitions, will be changed to CSR calls */
251 u8 hw_rev_id = IWL_HW_REV_ID_RAINBOW;
252 u8 project_type = IWL_PROJ_TYPE_LHP;
253
254 return data->radio_cfg_dash | (data->radio_cfg_step << 2) |
255 (hw_rev_id << 4) | ((project_type & 0x7f) << 6) |
256 (data->valid_tx_ant << 16) | (data->valid_rx_ant << 20);
257}
258 230
259static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) 231static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
260{ 232{
@@ -262,7 +234,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
262 enum iwl_ucode_type ucode_type = mvm->cur_ucode; 234 enum iwl_ucode_type ucode_type = mvm->cur_ucode;
263 235
264 /* Set parameters */ 236 /* Set parameters */
265 phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_build_phy_cfg(mvm)); 237 phy_cfg_cmd.phy_cfg = cpu_to_le32(mvm->fw->phy_config);
266 phy_cfg_cmd.calib_control.event_trigger = 238 phy_cfg_cmd.calib_control.event_trigger =
267 mvm->fw->default_calib[ucode_type].event_trigger; 239 mvm->fw->default_calib[ucode_type].event_trigger;
268 phy_cfg_cmd.calib_control.flow_trigger = 240 phy_cfg_cmd.calib_control.flow_trigger =
@@ -275,103 +247,6 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
275 sizeof(phy_cfg_cmd), &phy_cfg_cmd); 247 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
276} 248}
277 249
278/* Starting with the new PHY DB implementation - New calibs are enabled */
279/* Value - 0x405e7 */
280#define IWL_CALIB_DEFAULT_FLOW_INIT (IWL_CALIB_CFG_XTAL_IDX |\
281 IWL_CALIB_CFG_TEMPERATURE_IDX |\
282 IWL_CALIB_CFG_VOLTAGE_READ_IDX |\
283 IWL_CALIB_CFG_DC_IDX |\
284 IWL_CALIB_CFG_BB_FILTER_IDX |\
285 IWL_CALIB_CFG_LO_LEAKAGE_IDX |\
286 IWL_CALIB_CFG_TX_IQ_IDX |\
287 IWL_CALIB_CFG_RX_IQ_IDX |\
288 IWL_CALIB_CFG_AGC_IDX)
289
290#define IWL_CALIB_DEFAULT_EVENT_INIT 0x0
291
292/* Value 0x41567 */
293#define IWL_CALIB_DEFAULT_FLOW_RUN (IWL_CALIB_CFG_XTAL_IDX |\
294 IWL_CALIB_CFG_TEMPERATURE_IDX |\
295 IWL_CALIB_CFG_VOLTAGE_READ_IDX |\
296 IWL_CALIB_CFG_BB_FILTER_IDX |\
297 IWL_CALIB_CFG_DC_IDX |\
298 IWL_CALIB_CFG_TX_IQ_IDX |\
299 IWL_CALIB_CFG_RX_IQ_IDX |\
300 IWL_CALIB_CFG_SENSITIVITY_IDX |\
301 IWL_CALIB_CFG_AGC_IDX)
302
303#define IWL_CALIB_DEFAULT_EVENT_RUN (IWL_CALIB_CFG_XTAL_IDX |\
304 IWL_CALIB_CFG_TEMPERATURE_IDX |\
305 IWL_CALIB_CFG_VOLTAGE_READ_IDX |\
306 IWL_CALIB_CFG_TX_PWR_IDX |\
307 IWL_CALIB_CFG_DC_IDX |\
308 IWL_CALIB_CFG_TX_IQ_IDX |\
309 IWL_CALIB_CFG_SENSITIVITY_IDX)
310
311/*
312 * Sets the calibrations trigger values that will be sent to the FW for runtime
313 * and init calibrations.
314 * The ones given in the FW TLV are not correct.
315 */
316static void iwl_set_default_calib_trigger(struct iwl_mvm *mvm)
317{
318 struct iwl_tlv_calib_ctrl default_calib;
319
320 /*
321 * WkP FW TLV calib bits are wrong, overwrite them.
322 * This defines the dynamic calibrations which are implemented in the
323 * uCode both for init(flow) calculation and event driven calibs.
324 */
325
326 /* Init Image */
327 default_calib.event_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_EVENT_INIT);
328 default_calib.flow_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_FLOW_INIT);
329
330 if (default_calib.event_trigger !=
331 mvm->fw->default_calib[IWL_UCODE_INIT].event_trigger)
332 IWL_ERR(mvm,
333 "Updating the event calib for INIT image: 0x%x -> 0x%x\n",
334 mvm->fw->default_calib[IWL_UCODE_INIT].event_trigger,
335 default_calib.event_trigger);
336 if (default_calib.flow_trigger !=
337 mvm->fw->default_calib[IWL_UCODE_INIT].flow_trigger)
338 IWL_ERR(mvm,
339 "Updating the flow calib for INIT image: 0x%x -> 0x%x\n",
340 mvm->fw->default_calib[IWL_UCODE_INIT].flow_trigger,
341 default_calib.flow_trigger);
342
343 memcpy((void *)&mvm->fw->default_calib[IWL_UCODE_INIT],
344 &default_calib, sizeof(struct iwl_tlv_calib_ctrl));
345 IWL_ERR(mvm,
346 "Setting uCode init calibrations event 0x%x, trigger 0x%x\n",
347 default_calib.event_trigger,
348 default_calib.flow_trigger);
349
350 /* Run time image */
351 default_calib.event_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_EVENT_RUN);
352 default_calib.flow_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_FLOW_RUN);
353
354 if (default_calib.event_trigger !=
355 mvm->fw->default_calib[IWL_UCODE_REGULAR].event_trigger)
356 IWL_ERR(mvm,
357 "Updating the event calib for RT image: 0x%x -> 0x%x\n",
358 mvm->fw->default_calib[IWL_UCODE_REGULAR].event_trigger,
359 default_calib.event_trigger);
360 if (default_calib.flow_trigger !=
361 mvm->fw->default_calib[IWL_UCODE_REGULAR].flow_trigger)
362 IWL_ERR(mvm,
363 "Updating the flow calib for RT image: 0x%x -> 0x%x\n",
364 mvm->fw->default_calib[IWL_UCODE_REGULAR].flow_trigger,
365 default_calib.flow_trigger);
366
367 memcpy((void *)&mvm->fw->default_calib[IWL_UCODE_REGULAR],
368 &default_calib, sizeof(struct iwl_tlv_calib_ctrl));
369 IWL_ERR(mvm,
370 "Setting uCode runtime calibs event 0x%x, trigger 0x%x\n",
371 default_calib.event_trigger,
372 default_calib.flow_trigger);
373}
374
375static int iwl_set_default_calibrations(struct iwl_mvm *mvm) 250static int iwl_set_default_calibrations(struct iwl_mvm *mvm)
376{ 251{
377 u8 cmd_raw[16]; /* holds the variable size commands */ 252 u8 cmd_raw[16]; /* holds the variable size commands */
@@ -446,8 +321,10 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
446 ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans); 321 ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
447 WARN_ON(ret); 322 WARN_ON(ret);
448 323
449 /* Override the calibrations from TLV and the const of fw */ 324 /* Send TX valid antennas before triggering calibrations */
450 iwl_set_default_calib_trigger(mvm); 325 ret = iwl_send_tx_ant_cfg(mvm, mvm->nvm_data->valid_tx_ant);
326 if (ret)
327 goto error;
451 328
452 /* WkP doesn't have all calibrations, need to set default values */ 329 /* WkP doesn't have all calibrations, need to set default values */
453 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) { 330 if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index 537711b10478..bdae700c769e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -80,7 +80,8 @@
80 80
81#define IWL_INVALID_MAC80211_QUEUE 0xff 81#define IWL_INVALID_MAC80211_QUEUE 0xff
82#define IWL_MVM_MAX_ADDRESSES 2 82#define IWL_MVM_MAX_ADDRESSES 2
83#define IWL_RSSI_OFFSET 44 83/* RSSI offset for WkP */
84#define IWL_RSSI_OFFSET 50
84 85
85enum iwl_mvm_tx_fifo { 86enum iwl_mvm_tx_fifo {
86 IWL_MVM_TX_FIFO_BK = 0, 87 IWL_MVM_TX_FIFO_BK = 0,
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index aa59adf87db3..d0f9c1e0475e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -624,12 +624,8 @@ static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
624 ieee80211_free_txskb(mvm->hw, skb); 624 ieee80211_free_txskb(mvm->hw, skb);
625} 625}
626 626
627static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode) 627static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
628{ 628{
629 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
630
631 iwl_mvm_dump_nic_error_log(mvm);
632
633 iwl_abort_notification_waits(&mvm->notif_wait); 629 iwl_abort_notification_waits(&mvm->notif_wait);
634 630
635 /* 631 /*
@@ -663,9 +659,21 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
663 } 659 }
664} 660}
665 661
662static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
663{
664 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
665
666 iwl_mvm_dump_nic_error_log(mvm);
667
668 iwl_mvm_nic_restart(mvm);
669}
670
666static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode) 671static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
667{ 672{
673 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
674
668 WARN_ON(1); 675 WARN_ON(1);
676 iwl_mvm_nic_restart(mvm);
669} 677}
670 678
671static const struct iwl_op_mode_ops iwl_mvm_ops = { 679static const struct iwl_op_mode_ops iwl_mvm_ops = {
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index 3f40ab05bbd8..b0b190d0ec23 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -131,33 +131,42 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
131static int iwl_mvm_calc_rssi(struct iwl_mvm *mvm, 131static int iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
132 struct iwl_rx_phy_info *phy_info) 132 struct iwl_rx_phy_info *phy_info)
133{ 133{
134 u32 rssi_a, rssi_b, rssi_c, max_rssi, agc_db; 134 int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
135 int rssi_all_band_a, rssi_all_band_b;
136 u32 agc_a, agc_b, max_agc;
135 u32 val; 137 u32 val;
136 138
137 /* Find max rssi among 3 possible receivers. 139 /* Find max rssi among 2 possible receivers.
138 * These values are measured by the Digital Signal Processor (DSP). 140 * These values are measured by the Digital Signal Processor (DSP).
139 * They should stay fairly constant even as the signal strength varies, 141 * They should stay fairly constant even as the signal strength varies,
140 * if the radio's Automatic Gain Control (AGC) is working right. 142 * if the radio's Automatic Gain Control (AGC) is working right.
141 * AGC value (see below) will provide the "interesting" info. 143 * AGC value (see below) will provide the "interesting" info.
142 */ 144 */
145 val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]);
146 agc_a = (val & IWL_OFDM_AGC_A_MSK) >> IWL_OFDM_AGC_A_POS;
147 agc_b = (val & IWL_OFDM_AGC_B_MSK) >> IWL_OFDM_AGC_B_POS;
148 max_agc = max_t(u32, agc_a, agc_b);
149
143 val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_AB_IDX]); 150 val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_AB_IDX]);
144 rssi_a = (val & IWL_OFDM_RSSI_INBAND_A_MSK) >> IWL_OFDM_RSSI_A_POS; 151 rssi_a = (val & IWL_OFDM_RSSI_INBAND_A_MSK) >> IWL_OFDM_RSSI_A_POS;
145 rssi_b = (val & IWL_OFDM_RSSI_INBAND_B_MSK) >> IWL_OFDM_RSSI_B_POS; 152 rssi_b = (val & IWL_OFDM_RSSI_INBAND_B_MSK) >> IWL_OFDM_RSSI_B_POS;
146 val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_C_IDX]); 153 rssi_all_band_a = (val & IWL_OFDM_RSSI_ALLBAND_A_MSK) >>
147 rssi_c = (val & IWL_OFDM_RSSI_INBAND_C_MSK) >> IWL_OFDM_RSSI_C_POS; 154 IWL_OFDM_RSSI_ALLBAND_A_POS;
148 155 rssi_all_band_b = (val & IWL_OFDM_RSSI_ALLBAND_B_MSK) >>
149 val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]); 156 IWL_OFDM_RSSI_ALLBAND_B_POS;
150 agc_db = (val & IWL_OFDM_AGC_DB_MSK) >> IWL_OFDM_AGC_DB_POS;
151 157
152 max_rssi = max_t(u32, rssi_a, rssi_b); 158 /*
153 max_rssi = max_t(u32, max_rssi, rssi_c); 159 * dBm = rssi dB - agc dB - constant.
160 * Higher AGC (higher radio gain) means lower signal.
161 */
162 rssi_a_dbm = rssi_a - IWL_RSSI_OFFSET - agc_a;
163 rssi_b_dbm = rssi_b - IWL_RSSI_OFFSET - agc_b;
164 max_rssi_dbm = max_t(int, rssi_a_dbm, rssi_b_dbm);
154 165
155 IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d C %d Max %d AGC dB %d\n", 166 IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
156 rssi_a, rssi_b, rssi_c, max_rssi, agc_db); 167 rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
157 168
158 /* dBm = max_rssi dB - agc dB - constant. 169 return max_rssi_dbm;
159 * Higher AGC (higher radio gain) means lower signal. */
160 return max_rssi - agc_db - IWL_RSSI_OFFSET;
161} 170}
162 171
163/* 172/*
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 861a7f9f8e7f..274f44e2ef60 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -770,6 +770,16 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
770 u16 txq_id; 770 u16 txq_id;
771 int err; 771 int err;
772 772
773
774 /*
775 * If mac80211 is cleaning its state, then say that we finished since
776 * our state has been cleared anyway.
777 */
778 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
779 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
780 return 0;
781 }
782
773 spin_lock_bh(&mvmsta->lock); 783 spin_lock_bh(&mvmsta->lock);
774 784
775 txq_id = tid_data->txq_id; 785 txq_id = tid_data->txq_id;
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 6b67ce3f679c..6645efe5c03e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -607,12 +607,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
607 607
608 /* Single frame failure in an AMPDU queue => send BAR */ 608 /* Single frame failure in an AMPDU queue => send BAR */
609 if (txq_id >= IWL_FIRST_AMPDU_QUEUE && 609 if (txq_id >= IWL_FIRST_AMPDU_QUEUE &&
610 !(info->flags & IEEE80211_TX_STAT_ACK)) { 610 !(info->flags & IEEE80211_TX_STAT_ACK))
611 /* there must be only one skb in the skb_list */
612 WARN_ON_ONCE(skb_freed > 1 ||
613 !skb_queue_empty(&skbs));
614 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; 611 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
615 }
616 612
617 /* W/A FW bug: seq_ctl is wrong when the queue is flushed */ 613 /* W/A FW bug: seq_ctl is wrong when the queue is flushed */
618 if (status == TX_STATUS_FAIL_FIFO_FLUSHED) { 614 if (status == TX_STATUS_FAIL_FIFO_FLUSHED) {
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 3d62e8055352..148843e7f34f 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -137,10 +137,6 @@ static inline int iwl_queue_dec_wrap(int index, int n_bd)
137struct iwl_cmd_meta { 137struct iwl_cmd_meta {
138 /* only for SYNC commands, iff the reply skb is wanted */ 138 /* only for SYNC commands, iff the reply skb is wanted */
139 struct iwl_host_cmd *source; 139 struct iwl_host_cmd *source;
140
141 DEFINE_DMA_UNMAP_ADDR(mapping);
142 DEFINE_DMA_UNMAP_LEN(len);
143
144 u32 flags; 140 u32 flags;
145}; 141};
146 142
@@ -185,25 +181,36 @@ struct iwl_queue {
185/* 181/*
186 * The FH will write back to the first TB only, so we need 182 * The FH will write back to the first TB only, so we need
187 * to copy some data into the buffer regardless of whether 183 * to copy some data into the buffer regardless of whether
188 * it should be mapped or not. This indicates how much to 184 * it should be mapped or not. This indicates how big the
189 * copy, even for HCMDs it must be big enough to fit the 185 * first TB must be to include the scratch buffer. Since
190 * DRAM scratch from the TX cmd, at least 16 bytes. 186 * the scratch is 4 bytes at offset 12, it's 16 now. If we
187 * make it bigger then allocations will be bigger and copy
188 * slower, so that's probably not useful.
191 */ 189 */
192#define IWL_HCMD_MIN_COPY_SIZE 16 190#define IWL_HCMD_SCRATCHBUF_SIZE 16
193 191
194struct iwl_pcie_txq_entry { 192struct iwl_pcie_txq_entry {
195 struct iwl_device_cmd *cmd; 193 struct iwl_device_cmd *cmd;
196 struct iwl_device_cmd *copy_cmd;
197 struct sk_buff *skb; 194 struct sk_buff *skb;
198 /* buffer to free after command completes */ 195 /* buffer to free after command completes */
199 const void *free_buf; 196 const void *free_buf;
200 struct iwl_cmd_meta meta; 197 struct iwl_cmd_meta meta;
201}; 198};
202 199
200struct iwl_pcie_txq_scratch_buf {
201 struct iwl_cmd_header hdr;
202 u8 buf[8];
203 __le32 scratch;
204};
205
203/** 206/**
204 * struct iwl_txq - Tx Queue for DMA 207 * struct iwl_txq - Tx Queue for DMA
205 * @q: generic Rx/Tx queue descriptor 208 * @q: generic Rx/Tx queue descriptor
206 * @tfds: transmit frame descriptors (DMA memory) 209 * @tfds: transmit frame descriptors (DMA memory)
210 * @scratchbufs: start of command headers, including scratch buffers, for
211 * the writeback -- this is DMA memory and an array holding one buffer
212 * for each command on the queue
213 * @scratchbufs_dma: DMA address for the scratchbufs start
207 * @entries: transmit entries (driver state) 214 * @entries: transmit entries (driver state)
208 * @lock: queue lock 215 * @lock: queue lock
209 * @stuck_timer: timer that fires if queue gets stuck 216 * @stuck_timer: timer that fires if queue gets stuck
@@ -217,6 +224,8 @@ struct iwl_pcie_txq_entry {
217struct iwl_txq { 224struct iwl_txq {
218 struct iwl_queue q; 225 struct iwl_queue q;
219 struct iwl_tfd *tfds; 226 struct iwl_tfd *tfds;
227 struct iwl_pcie_txq_scratch_buf *scratchbufs;
228 dma_addr_t scratchbufs_dma;
220 struct iwl_pcie_txq_entry *entries; 229 struct iwl_pcie_txq_entry *entries;
221 spinlock_t lock; 230 spinlock_t lock;
222 struct timer_list stuck_timer; 231 struct timer_list stuck_timer;
@@ -225,6 +234,13 @@ struct iwl_txq {
225 u8 active; 234 u8 active;
226}; 235};
227 236
237static inline dma_addr_t
238iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
239{
240 return txq->scratchbufs_dma +
241 sizeof(struct iwl_pcie_txq_scratch_buf) * idx;
242}
243
228/** 244/**
229 * struct iwl_trans_pcie - PCIe transport specific data 245 * struct iwl_trans_pcie - PCIe transport specific data
230 * @rxq: all the RX queue data 246 * @rxq: all the RX queue data
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index b0ae06d2456f..567e67ad1f61 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -637,22 +637,14 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
637 index = SEQ_TO_INDEX(sequence); 637 index = SEQ_TO_INDEX(sequence);
638 cmd_index = get_cmd_index(&txq->q, index); 638 cmd_index = get_cmd_index(&txq->q, index);
639 639
640 if (reclaim) { 640 if (reclaim)
641 struct iwl_pcie_txq_entry *ent; 641 cmd = txq->entries[cmd_index].cmd;
642 ent = &txq->entries[cmd_index]; 642 else
643 cmd = ent->copy_cmd;
644 WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD);
645 } else {
646 cmd = NULL; 643 cmd = NULL;
647 }
648 644
649 err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd); 645 err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
650 646
651 if (reclaim) { 647 if (reclaim) {
652 /* The original command isn't needed any more */
653 kfree(txq->entries[cmd_index].copy_cmd);
654 txq->entries[cmd_index].copy_cmd = NULL;
655 /* nor is the duplicated part of the command */
656 kfree(txq->entries[cmd_index].free_buf); 648 kfree(txq->entries[cmd_index].free_buf);
657 txq->entries[cmd_index].free_buf = NULL; 649 txq->entries[cmd_index].free_buf = NULL;
658 } 650 }
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 8b625a7f5685..8595c16f74de 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -191,12 +191,9 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
191 } 191 }
192 192
193 for (i = q->read_ptr; i != q->write_ptr; 193 for (i = q->read_ptr; i != q->write_ptr;
194 i = iwl_queue_inc_wrap(i, q->n_bd)) { 194 i = iwl_queue_inc_wrap(i, q->n_bd))
195 struct iwl_tx_cmd *tx_cmd =
196 (struct iwl_tx_cmd *)txq->entries[i].cmd->payload;
197 IWL_ERR(trans, "scratch %d = 0x%08x\n", i, 195 IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
198 get_unaligned_le32(&tx_cmd->scratch)); 196 le32_to_cpu(txq->scratchbufs[i].scratch));
199 }
200 197
201 iwl_op_mode_nic_error(trans->op_mode); 198 iwl_op_mode_nic_error(trans->op_mode);
202} 199}
@@ -367,8 +364,8 @@ static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd)
367} 364}
368 365
369static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, 366static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
370 struct iwl_cmd_meta *meta, struct iwl_tfd *tfd, 367 struct iwl_cmd_meta *meta,
371 enum dma_data_direction dma_dir) 368 struct iwl_tfd *tfd)
372{ 369{
373 int i; 370 int i;
374 int num_tbs; 371 int num_tbs;
@@ -382,17 +379,12 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
382 return; 379 return;
383 } 380 }
384 381
385 /* Unmap tx_cmd */ 382 /* first TB is never freed - it's the scratchbuf data */
386 if (num_tbs)
387 dma_unmap_single(trans->dev,
388 dma_unmap_addr(meta, mapping),
389 dma_unmap_len(meta, len),
390 DMA_BIDIRECTIONAL);
391 383
392 /* Unmap chunks, if any. */
393 for (i = 1; i < num_tbs; i++) 384 for (i = 1; i < num_tbs; i++)
394 dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i), 385 dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i),
395 iwl_pcie_tfd_tb_get_len(tfd, i), dma_dir); 386 iwl_pcie_tfd_tb_get_len(tfd, i),
387 DMA_TO_DEVICE);
396 388
397 tfd->num_tbs = 0; 389 tfd->num_tbs = 0;
398} 390}
@@ -406,8 +398,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
406 * Does NOT advance any TFD circular buffer read/write indexes 398 * Does NOT advance any TFD circular buffer read/write indexes
407 * Does NOT free the TFD itself (which is within circular buffer) 399 * Does NOT free the TFD itself (which is within circular buffer)
408 */ 400 */
409static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq, 401static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
410 enum dma_data_direction dma_dir)
411{ 402{
412 struct iwl_tfd *tfd_tmp = txq->tfds; 403 struct iwl_tfd *tfd_tmp = txq->tfds;
413 404
@@ -418,8 +409,7 @@ static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
418 lockdep_assert_held(&txq->lock); 409 lockdep_assert_held(&txq->lock);
419 410
420 /* We have only q->n_window txq->entries, but we use q->n_bd tfds */ 411 /* We have only q->n_window txq->entries, but we use q->n_bd tfds */
421 iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr], 412 iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]);
422 dma_dir);
423 413
424 /* free SKB */ 414 /* free SKB */
425 if (txq->entries) { 415 if (txq->entries) {
@@ -479,6 +469,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
479{ 469{
480 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 470 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
481 size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX; 471 size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
472 size_t scratchbuf_sz;
482 int i; 473 int i;
483 474
484 if (WARN_ON(txq->entries || txq->tfds)) 475 if (WARN_ON(txq->entries || txq->tfds))
@@ -514,9 +505,25 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
514 IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz); 505 IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
515 goto error; 506 goto error;
516 } 507 }
508
509 BUILD_BUG_ON(IWL_HCMD_SCRATCHBUF_SIZE != sizeof(*txq->scratchbufs));
510 BUILD_BUG_ON(offsetof(struct iwl_pcie_txq_scratch_buf, scratch) !=
511 sizeof(struct iwl_cmd_header) +
512 offsetof(struct iwl_tx_cmd, scratch));
513
514 scratchbuf_sz = sizeof(*txq->scratchbufs) * slots_num;
515
516 txq->scratchbufs = dma_alloc_coherent(trans->dev, scratchbuf_sz,
517 &txq->scratchbufs_dma,
518 GFP_KERNEL);
519 if (!txq->scratchbufs)
520 goto err_free_tfds;
521
517 txq->q.id = txq_id; 522 txq->q.id = txq_id;
518 523
519 return 0; 524 return 0;
525err_free_tfds:
526 dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->q.dma_addr);
520error: 527error:
521 if (txq->entries && txq_id == trans_pcie->cmd_queue) 528 if (txq->entries && txq_id == trans_pcie->cmd_queue)
522 for (i = 0; i < slots_num; i++) 529 for (i = 0; i < slots_num; i++)
@@ -565,22 +572,13 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
565 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 572 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
566 struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 573 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
567 struct iwl_queue *q = &txq->q; 574 struct iwl_queue *q = &txq->q;
568 enum dma_data_direction dma_dir;
569 575
570 if (!q->n_bd) 576 if (!q->n_bd)
571 return; 577 return;
572 578
573 /* In the command queue, all the TBs are mapped as BIDI
574 * so unmap them as such.
575 */
576 if (txq_id == trans_pcie->cmd_queue)
577 dma_dir = DMA_BIDIRECTIONAL;
578 else
579 dma_dir = DMA_TO_DEVICE;
580
581 spin_lock_bh(&txq->lock); 579 spin_lock_bh(&txq->lock);
582 while (q->write_ptr != q->read_ptr) { 580 while (q->write_ptr != q->read_ptr) {
583 iwl_pcie_txq_free_tfd(trans, txq, dma_dir); 581 iwl_pcie_txq_free_tfd(trans, txq);
584 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); 582 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
585 } 583 }
586 spin_unlock_bh(&txq->lock); 584 spin_unlock_bh(&txq->lock);
@@ -610,7 +608,6 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
610 if (txq_id == trans_pcie->cmd_queue) 608 if (txq_id == trans_pcie->cmd_queue)
611 for (i = 0; i < txq->q.n_window; i++) { 609 for (i = 0; i < txq->q.n_window; i++) {
612 kfree(txq->entries[i].cmd); 610 kfree(txq->entries[i].cmd);
613 kfree(txq->entries[i].copy_cmd);
614 kfree(txq->entries[i].free_buf); 611 kfree(txq->entries[i].free_buf);
615 } 612 }
616 613
@@ -619,6 +616,10 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
619 dma_free_coherent(dev, sizeof(struct iwl_tfd) * 616 dma_free_coherent(dev, sizeof(struct iwl_tfd) *
620 txq->q.n_bd, txq->tfds, txq->q.dma_addr); 617 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
621 txq->q.dma_addr = 0; 618 txq->q.dma_addr = 0;
619
620 dma_free_coherent(dev,
621 sizeof(*txq->scratchbufs) * txq->q.n_window,
622 txq->scratchbufs, txq->scratchbufs_dma);
622 } 623 }
623 624
624 kfree(txq->entries); 625 kfree(txq->entries);
@@ -962,7 +963,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
962 963
963 iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq); 964 iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
964 965
965 iwl_pcie_txq_free_tfd(trans, txq, DMA_TO_DEVICE); 966 iwl_pcie_txq_free_tfd(trans, txq);
966 } 967 }
967 968
968 iwl_pcie_txq_progress(trans_pcie, txq); 969 iwl_pcie_txq_progress(trans_pcie, txq);
@@ -1152,29 +1153,29 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1152 void *dup_buf = NULL; 1153 void *dup_buf = NULL;
1153 dma_addr_t phys_addr; 1154 dma_addr_t phys_addr;
1154 int idx; 1155 int idx;
1155 u16 copy_size, cmd_size, dma_size; 1156 u16 copy_size, cmd_size, scratch_size;
1156 bool had_nocopy = false; 1157 bool had_nocopy = false;
1157 int i; 1158 int i;
1158 u32 cmd_pos; 1159 u32 cmd_pos;
1159 const u8 *cmddata[IWL_MAX_CMD_TFDS]; 1160 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
1160 u16 cmdlen[IWL_MAX_CMD_TFDS]; 1161 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
1161 1162
1162 copy_size = sizeof(out_cmd->hdr); 1163 copy_size = sizeof(out_cmd->hdr);
1163 cmd_size = sizeof(out_cmd->hdr); 1164 cmd_size = sizeof(out_cmd->hdr);
1164 1165
1165 /* need one for the header if the first is NOCOPY */ 1166 /* need one for the header if the first is NOCOPY */
1166 BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1); 1167 BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
1167 1168
1168 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 1169 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1169 cmddata[i] = cmd->data[i]; 1170 cmddata[i] = cmd->data[i];
1170 cmdlen[i] = cmd->len[i]; 1171 cmdlen[i] = cmd->len[i];
1171 1172
1172 if (!cmd->len[i]) 1173 if (!cmd->len[i])
1173 continue; 1174 continue;
1174 1175
1175 /* need at least IWL_HCMD_MIN_COPY_SIZE copied */ 1176 /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
1176 if (copy_size < IWL_HCMD_MIN_COPY_SIZE) { 1177 if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
1177 int copy = IWL_HCMD_MIN_COPY_SIZE - copy_size; 1178 int copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
1178 1179
1179 if (copy > cmdlen[i]) 1180 if (copy > cmdlen[i])
1180 copy = cmdlen[i]; 1181 copy = cmdlen[i];
@@ -1260,15 +1261,15 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1260 /* and copy the data that needs to be copied */ 1261 /* and copy the data that needs to be copied */
1261 cmd_pos = offsetof(struct iwl_device_cmd, payload); 1262 cmd_pos = offsetof(struct iwl_device_cmd, payload);
1262 copy_size = sizeof(out_cmd->hdr); 1263 copy_size = sizeof(out_cmd->hdr);
1263 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 1264 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1264 int copy = 0; 1265 int copy = 0;
1265 1266
1266 if (!cmd->len) 1267 if (!cmd->len)
1267 continue; 1268 continue;
1268 1269
1269 /* need at least IWL_HCMD_MIN_COPY_SIZE copied */ 1270 /* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
1270 if (copy_size < IWL_HCMD_MIN_COPY_SIZE) { 1271 if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
1271 copy = IWL_HCMD_MIN_COPY_SIZE - copy_size; 1272 copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
1272 1273
1273 if (copy > cmd->len[i]) 1274 if (copy > cmd->len[i])
1274 copy = cmd->len[i]; 1275 copy = cmd->len[i];
@@ -1286,50 +1287,38 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1286 } 1287 }
1287 } 1288 }
1288 1289
1289 WARN_ON_ONCE(txq->entries[idx].copy_cmd);
1290
1291 /*
1292 * since out_cmd will be the source address of the FH, it will write
1293 * the retry count there. So when the user needs to receivce the HCMD
1294 * that corresponds to the response in the response handler, it needs
1295 * to set CMD_WANT_HCMD.
1296 */
1297 if (cmd->flags & CMD_WANT_HCMD) {
1298 txq->entries[idx].copy_cmd =
1299 kmemdup(out_cmd, cmd_pos, GFP_ATOMIC);
1300 if (unlikely(!txq->entries[idx].copy_cmd)) {
1301 idx = -ENOMEM;
1302 goto out;
1303 }
1304 }
1305
1306 IWL_DEBUG_HC(trans, 1290 IWL_DEBUG_HC(trans,
1307 "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", 1291 "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
1308 get_cmd_string(trans_pcie, out_cmd->hdr.cmd), 1292 get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
1309 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), 1293 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
1310 cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue); 1294 cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
1311 1295
1312 /* 1296 /* start the TFD with the scratchbuf */
1313 * If the entire command is smaller than IWL_HCMD_MIN_COPY_SIZE, we must 1297 scratch_size = min_t(int, copy_size, IWL_HCMD_SCRATCHBUF_SIZE);
1314 * still map at least that many bytes for the hardware to write back to. 1298 memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size);
1315 * We have enough space, so that's not a problem. 1299 iwl_pcie_txq_build_tfd(trans, txq,
1316 */ 1300 iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr),
1317 dma_size = max_t(u16, copy_size, IWL_HCMD_MIN_COPY_SIZE); 1301 scratch_size, 1);
1302
1303 /* map first command fragment, if any remains */
1304 if (copy_size > scratch_size) {
1305 phys_addr = dma_map_single(trans->dev,
1306 ((u8 *)&out_cmd->hdr) + scratch_size,
1307 copy_size - scratch_size,
1308 DMA_TO_DEVICE);
1309 if (dma_mapping_error(trans->dev, phys_addr)) {
1310 iwl_pcie_tfd_unmap(trans, out_meta,
1311 &txq->tfds[q->write_ptr]);
1312 idx = -ENOMEM;
1313 goto out;
1314 }
1318 1315
1319 phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, dma_size, 1316 iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
1320 DMA_BIDIRECTIONAL); 1317 copy_size - scratch_size, 0);
1321 if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
1322 idx = -ENOMEM;
1323 goto out;
1324 } 1318 }
1325 1319
1326 dma_unmap_addr_set(out_meta, mapping, phys_addr);
1327 dma_unmap_len_set(out_meta, len, dma_size);
1328
1329 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, copy_size, 1);
1330
1331 /* map the remaining (adjusted) nocopy/dup fragments */ 1320 /* map the remaining (adjusted) nocopy/dup fragments */
1332 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { 1321 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1333 const void *data = cmddata[i]; 1322 const void *data = cmddata[i];
1334 1323
1335 if (!cmdlen[i]) 1324 if (!cmdlen[i])
@@ -1340,11 +1329,10 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1340 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) 1329 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
1341 data = dup_buf; 1330 data = dup_buf;
1342 phys_addr = dma_map_single(trans->dev, (void *)data, 1331 phys_addr = dma_map_single(trans->dev, (void *)data,
1343 cmdlen[i], DMA_BIDIRECTIONAL); 1332 cmdlen[i], DMA_TO_DEVICE);
1344 if (dma_mapping_error(trans->dev, phys_addr)) { 1333 if (dma_mapping_error(trans->dev, phys_addr)) {
1345 iwl_pcie_tfd_unmap(trans, out_meta, 1334 iwl_pcie_tfd_unmap(trans, out_meta,
1346 &txq->tfds[q->write_ptr], 1335 &txq->tfds[q->write_ptr]);
1347 DMA_BIDIRECTIONAL);
1348 idx = -ENOMEM; 1336 idx = -ENOMEM;
1349 goto out; 1337 goto out;
1350 } 1338 }
@@ -1418,7 +1406,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
1418 cmd = txq->entries[cmd_index].cmd; 1406 cmd = txq->entries[cmd_index].cmd;
1419 meta = &txq->entries[cmd_index].meta; 1407 meta = &txq->entries[cmd_index].meta;
1420 1408
1421 iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL); 1409 iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]);
1422 1410
1423 /* Input error checking is done when commands are added to queue. */ 1411 /* Input error checking is done when commands are added to queue. */
1424 if (meta->flags & CMD_WANT_SKB) { 1412 if (meta->flags & CMD_WANT_SKB) {
@@ -1597,10 +1585,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1597 struct iwl_cmd_meta *out_meta; 1585 struct iwl_cmd_meta *out_meta;
1598 struct iwl_txq *txq; 1586 struct iwl_txq *txq;
1599 struct iwl_queue *q; 1587 struct iwl_queue *q;
1600 dma_addr_t phys_addr = 0; 1588 dma_addr_t tb0_phys, tb1_phys, scratch_phys;
1601 dma_addr_t txcmd_phys; 1589 void *tb1_addr;
1602 dma_addr_t scratch_phys; 1590 u16 len, tb1_len, tb2_len;
1603 u16 len, firstlen, secondlen;
1604 u8 wait_write_ptr = 0; 1591 u8 wait_write_ptr = 0;
1605 __le16 fc = hdr->frame_control; 1592 __le16 fc = hdr->frame_control;
1606 u8 hdr_len = ieee80211_hdrlen(fc); 1593 u8 hdr_len = ieee80211_hdrlen(fc);
@@ -1638,85 +1625,80 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1638 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | 1625 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
1639 INDEX_TO_SEQ(q->write_ptr))); 1626 INDEX_TO_SEQ(q->write_ptr)));
1640 1627
1628 tb0_phys = iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr);
1629 scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
1630 offsetof(struct iwl_tx_cmd, scratch);
1631
1632 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1633 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1634
1641 /* Set up first empty entry in queue's array of Tx/cmd buffers */ 1635 /* Set up first empty entry in queue's array of Tx/cmd buffers */
1642 out_meta = &txq->entries[q->write_ptr].meta; 1636 out_meta = &txq->entries[q->write_ptr].meta;
1643 1637
1644 /* 1638 /*
1645 * Use the first empty entry in this queue's command buffer array 1639 * The second TB (tb1) points to the remainder of the TX command
1646 * to contain the Tx command and MAC header concatenated together 1640 * and the 802.11 header - dword aligned size
1647 * (payload data will be in another buffer). 1641 * (This calculation modifies the TX command, so do it before the
1648 * Size of this varies, due to varying MAC header length. 1642 * setup of the first TB)
1649 * If end is not dword aligned, we'll have 2 extra bytes at the end
1650 * of the MAC header (device reads on dword boundaries).
1651 * We'll tell device about this padding later.
1652 */ 1643 */
1653 len = sizeof(struct iwl_tx_cmd) + 1644 len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
1654 sizeof(struct iwl_cmd_header) + hdr_len; 1645 hdr_len - IWL_HCMD_SCRATCHBUF_SIZE;
1655 firstlen = (len + 3) & ~3; 1646 tb1_len = (len + 3) & ~3;
1656 1647
1657 /* Tell NIC about any 2-byte padding after MAC header */ 1648 /* Tell NIC about any 2-byte padding after MAC header */
1658 if (firstlen != len) 1649 if (tb1_len != len)
1659 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; 1650 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1660 1651
1661 /* Physical address of this Tx command's header (not MAC header!), 1652 /* The first TB points to the scratchbuf data - min_copy bytes */
1662 * within command buffer array. */ 1653 memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr,
1663 txcmd_phys = dma_map_single(trans->dev, 1654 IWL_HCMD_SCRATCHBUF_SIZE);
1664 &dev_cmd->hdr, firstlen, 1655 iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
1665 DMA_BIDIRECTIONAL); 1656 IWL_HCMD_SCRATCHBUF_SIZE, 1);
1666 if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
1667 goto out_err;
1668 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
1669 dma_unmap_len_set(out_meta, len, firstlen);
1670 1657
1671 if (!ieee80211_has_morefrags(fc)) { 1658 /* there must be data left over for TB1 or this code must be changed */
1672 txq->need_update = 1; 1659 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_HCMD_SCRATCHBUF_SIZE);
1673 } else {
1674 wait_write_ptr = 1;
1675 txq->need_update = 0;
1676 }
1677 1660
1678 /* Set up TFD's 2nd entry to point directly to remainder of skb, 1661 /* map the data for TB1 */
1679 * if any (802.11 null frames have no payload). */ 1662 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_HCMD_SCRATCHBUF_SIZE;
1680 secondlen = skb->len - hdr_len; 1663 tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
1681 if (secondlen > 0) { 1664 if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
1682 phys_addr = dma_map_single(trans->dev, skb->data + hdr_len, 1665 goto out_err;
1683 secondlen, DMA_TO_DEVICE); 1666 iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, 0);
1684 if (unlikely(dma_mapping_error(trans->dev, phys_addr))) { 1667
1685 dma_unmap_single(trans->dev, 1668 /*
1686 dma_unmap_addr(out_meta, mapping), 1669 * Set up TFD's third entry to point directly to remainder
1687 dma_unmap_len(out_meta, len), 1670 * of skb, if any (802.11 null frames have no payload).
1688 DMA_BIDIRECTIONAL); 1671 */
1672 tb2_len = skb->len - hdr_len;
1673 if (tb2_len > 0) {
1674 dma_addr_t tb2_phys = dma_map_single(trans->dev,
1675 skb->data + hdr_len,
1676 tb2_len, DMA_TO_DEVICE);
1677 if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) {
1678 iwl_pcie_tfd_unmap(trans, out_meta,
1679 &txq->tfds[q->write_ptr]);
1689 goto out_err; 1680 goto out_err;
1690 } 1681 }
1682 iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, 0);
1691 } 1683 }
1692 1684
1693 /* Attach buffers to TFD */
1694 iwl_pcie_txq_build_tfd(trans, txq, txcmd_phys, firstlen, 1);
1695 if (secondlen > 0)
1696 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, secondlen, 0);
1697
1698 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
1699 offsetof(struct iwl_tx_cmd, scratch);
1700
1701 /* take back ownership of DMA buffer to enable update */
1702 dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
1703 DMA_BIDIRECTIONAL);
1704 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1705 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1706
1707 /* Set up entry for this TFD in Tx byte-count array */ 1685 /* Set up entry for this TFD in Tx byte-count array */
1708 iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len)); 1686 iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
1709 1687
1710 dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
1711 DMA_BIDIRECTIONAL);
1712
1713 trace_iwlwifi_dev_tx(trans->dev, skb, 1688 trace_iwlwifi_dev_tx(trans->dev, skb,
1714 &txq->tfds[txq->q.write_ptr], 1689 &txq->tfds[txq->q.write_ptr],
1715 sizeof(struct iwl_tfd), 1690 sizeof(struct iwl_tfd),
1716 &dev_cmd->hdr, firstlen, 1691 &dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len,
1717 skb->data + hdr_len, secondlen); 1692 skb->data + hdr_len, tb2_len);
1718 trace_iwlwifi_dev_tx_data(trans->dev, skb, 1693 trace_iwlwifi_dev_tx_data(trans->dev, skb,
1719 skb->data + hdr_len, secondlen); 1694 skb->data + hdr_len, tb2_len);
1695
1696 if (!ieee80211_has_morefrags(fc)) {
1697 txq->need_update = 1;
1698 } else {
1699 wait_write_ptr = 1;
1700 txq->need_update = 0;
1701 }
1720 1702
1721 /* start timer if queue currently empty */ 1703 /* start timer if queue currently empty */
1722 if (txq->need_update && q->read_ptr == q->write_ptr && 1704 if (txq->need_update && q->read_ptr == q->write_ptr &&
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 246aa62a4817..2fe0ceba4400 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -1117,10 +1117,9 @@ mwifiex_cmd_802_11_ad_hoc_join(struct mwifiex_private *priv,
1117 adhoc_join->bss_descriptor.bssid, 1117 adhoc_join->bss_descriptor.bssid,
1118 adhoc_join->bss_descriptor.ssid); 1118 adhoc_join->bss_descriptor.ssid);
1119 1119
1120 for (i = 0; bss_desc->supported_rates[i] && 1120 for (i = 0; i < MWIFIEX_SUPPORTED_RATES &&
1121 i < MWIFIEX_SUPPORTED_RATES; 1121 bss_desc->supported_rates[i]; i++)
1122 i++) 1122 ;
1123 ;
1124 rates_size = i; 1123 rates_size = i;
1125 1124
1126 /* Copy Data Rates from the Rates recorded in scan response */ 1125 /* Copy Data Rates from the Rates recorded in scan response */
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 44d6ead43341..2bf4efa33186 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -55,10 +55,10 @@ config RT61PCI
55 55
56config RT2800PCI 56config RT2800PCI
57 tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support" 57 tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support"
58 depends on PCI || RALINK_RT288X || RALINK_RT305X 58 depends on PCI || SOC_RT288X || SOC_RT305X
59 select RT2800_LIB 59 select RT2800_LIB
60 select RT2X00_LIB_PCI if PCI 60 select RT2X00_LIB_PCI if PCI
61 select RT2X00_LIB_SOC if RALINK_RT288X || RALINK_RT305X 61 select RT2X00_LIB_SOC if SOC_RT288X || SOC_RT305X
62 select RT2X00_LIB_FIRMWARE 62 select RT2X00_LIB_FIRMWARE
63 select RT2X00_LIB_CRYPTO 63 select RT2X00_LIB_CRYPTO
64 select CRC_CCITT 64 select CRC_CCITT
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index 48a01aa21f1c..ded73da4de0b 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -89,7 +89,7 @@ static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
89 rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0); 89 rt2x00pci_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
90} 90}
91 91
92#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) 92#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
93static int rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev) 93static int rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
94{ 94{
95 void __iomem *base_addr = ioremap(0x1F040000, EEPROM_SIZE); 95 void __iomem *base_addr = ioremap(0x1F040000, EEPROM_SIZE);
@@ -107,7 +107,7 @@ static inline int rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
107{ 107{
108 return -ENOMEM; 108 return -ENOMEM;
109} 109}
110#endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT305X */ 110#endif /* CONFIG_SOC_RT288X || CONFIG_SOC_RT305X */
111 111
112#ifdef CONFIG_PCI 112#ifdef CONFIG_PCI
113static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom) 113static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom)
@@ -1177,7 +1177,7 @@ MODULE_DEVICE_TABLE(pci, rt2800pci_device_table);
1177#endif /* CONFIG_PCI */ 1177#endif /* CONFIG_PCI */
1178MODULE_LICENSE("GPL"); 1178MODULE_LICENSE("GPL");
1179 1179
1180#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) 1180#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
1181static int rt2800soc_probe(struct platform_device *pdev) 1181static int rt2800soc_probe(struct platform_device *pdev)
1182{ 1182{
1183 return rt2x00soc_probe(pdev, &rt2800pci_ops); 1183 return rt2x00soc_probe(pdev, &rt2800pci_ops);
@@ -1194,7 +1194,7 @@ static struct platform_driver rt2800soc_driver = {
1194 .suspend = rt2x00soc_suspend, 1194 .suspend = rt2x00soc_suspend,
1195 .resume = rt2x00soc_resume, 1195 .resume = rt2x00soc_resume,
1196}; 1196};
1197#endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT305X */ 1197#endif /* CONFIG_SOC_RT288X || CONFIG_SOC_RT305X */
1198 1198
1199#ifdef CONFIG_PCI 1199#ifdef CONFIG_PCI
1200static int rt2800pci_probe(struct pci_dev *pci_dev, 1200static int rt2800pci_probe(struct pci_dev *pci_dev,
@@ -1217,7 +1217,7 @@ static int __init rt2800pci_init(void)
1217{ 1217{
1218 int ret = 0; 1218 int ret = 0;
1219 1219
1220#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) 1220#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
1221 ret = platform_driver_register(&rt2800soc_driver); 1221 ret = platform_driver_register(&rt2800soc_driver);
1222 if (ret) 1222 if (ret)
1223 return ret; 1223 return ret;
@@ -1225,7 +1225,7 @@ static int __init rt2800pci_init(void)
1225#ifdef CONFIG_PCI 1225#ifdef CONFIG_PCI
1226 ret = pci_register_driver(&rt2800pci_driver); 1226 ret = pci_register_driver(&rt2800pci_driver);
1227 if (ret) { 1227 if (ret) {
1228#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) 1228#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
1229 platform_driver_unregister(&rt2800soc_driver); 1229 platform_driver_unregister(&rt2800soc_driver);
1230#endif 1230#endif
1231 return ret; 1231 return ret;
@@ -1240,7 +1240,7 @@ static void __exit rt2800pci_exit(void)
1240#ifdef CONFIG_PCI 1240#ifdef CONFIG_PCI
1241 pci_unregister_driver(&rt2800pci_driver); 1241 pci_unregister_driver(&rt2800pci_driver);
1242#endif 1242#endif
1243#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X) 1243#if defined(CONFIG_SOC_RT288X) || defined(CONFIG_SOC_RT305X)
1244 platform_driver_unregister(&rt2800soc_driver); 1244 platform_driver_unregister(&rt2800soc_driver);
1245#endif 1245#endif
1246} 1246}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index b1ccff474c79..c08d0f4c5f3d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -1377,74 +1377,57 @@ void rtl92cu_card_disable(struct ieee80211_hw *hw)
1377 1377
1378void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid) 1378void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
1379{ 1379{
1380 /* dummy routine needed for callback from rtl_op_configure_filter() */
1381}
1382
1383/*========================================================================== */
1384
1385static void _rtl92cu_set_check_bssid(struct ieee80211_hw *hw,
1386 enum nl80211_iftype type)
1387{
1388 struct rtl_priv *rtlpriv = rtl_priv(hw); 1380 struct rtl_priv *rtlpriv = rtl_priv(hw);
1389 u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR);
1390 struct rtl_hal *rtlhal = rtl_hal(rtlpriv); 1381 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
1391 struct rtl_phy *rtlphy = &(rtlpriv->phy); 1382 u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR);
1392 u8 filterout_non_associated_bssid = false;
1393 1383
1394 switch (type) { 1384 if (rtlpriv->psc.rfpwr_state != ERFON)
1395 case NL80211_IFTYPE_ADHOC: 1385 return;
1396 case NL80211_IFTYPE_STATION: 1386
1397 filterout_non_associated_bssid = true; 1387 if (check_bssid) {
1398 break; 1388 u8 tmp;
1399 case NL80211_IFTYPE_UNSPECIFIED:
1400 case NL80211_IFTYPE_AP:
1401 default:
1402 break;
1403 }
1404 if (filterout_non_associated_bssid) {
1405 if (IS_NORMAL_CHIP(rtlhal->version)) { 1389 if (IS_NORMAL_CHIP(rtlhal->version)) {
1406 switch (rtlphy->current_io_type) { 1390 reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
1407 case IO_CMD_RESUME_DM_BY_SCAN: 1391 tmp = BIT(4);
1408 reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
1409 rtlpriv->cfg->ops->set_hw_reg(hw,
1410 HW_VAR_RCR, (u8 *)(&reg_rcr));
1411 /* enable update TSF */
1412 _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4));
1413 break;
1414 case IO_CMD_PAUSE_DM_BY_SCAN:
1415 reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);
1416 rtlpriv->cfg->ops->set_hw_reg(hw,
1417 HW_VAR_RCR, (u8 *)(&reg_rcr));
1418 /* disable update TSF */
1419 _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
1420 break;
1421 }
1422 } else { 1392 } else {
1423 reg_rcr |= (RCR_CBSSID); 1393 reg_rcr |= RCR_CBSSID;
1424 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, 1394 tmp = BIT(4) | BIT(5);
1425 (u8 *)(&reg_rcr));
1426 _rtl92cu_set_bcn_ctrl_reg(hw, 0, (BIT(4)|BIT(5)));
1427 } 1395 }
1428 } else if (filterout_non_associated_bssid == false) { 1396 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
1397 (u8 *) (&reg_rcr));
1398 _rtl92cu_set_bcn_ctrl_reg(hw, 0, tmp);
1399 } else {
1400 u8 tmp;
1429 if (IS_NORMAL_CHIP(rtlhal->version)) { 1401 if (IS_NORMAL_CHIP(rtlhal->version)) {
1430 reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN)); 1402 reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);
1431 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, 1403 tmp = BIT(4);
1432 (u8 *)(&reg_rcr));
1433 _rtl92cu_set_bcn_ctrl_reg(hw, BIT(4), 0);
1434 } else { 1404 } else {
1435 reg_rcr &= (~RCR_CBSSID); 1405 reg_rcr &= ~RCR_CBSSID;
1436 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, 1406 tmp = BIT(4) | BIT(5);
1437 (u8 *)(&reg_rcr));
1438 _rtl92cu_set_bcn_ctrl_reg(hw, (BIT(4)|BIT(5)), 0);
1439 } 1407 }
1408 reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
1409 rtlpriv->cfg->ops->set_hw_reg(hw,
1410 HW_VAR_RCR, (u8 *) (&reg_rcr));
1411 _rtl92cu_set_bcn_ctrl_reg(hw, tmp, 0);
1440 } 1412 }
1441} 1413}
1442 1414
1415/*========================================================================== */
1416
1443int rtl92cu_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type) 1417int rtl92cu_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
1444{ 1418{
1419 struct rtl_priv *rtlpriv = rtl_priv(hw);
1420
1445 if (_rtl92cu_set_media_status(hw, type)) 1421 if (_rtl92cu_set_media_status(hw, type))
1446 return -EOPNOTSUPP; 1422 return -EOPNOTSUPP;
1447 _rtl92cu_set_check_bssid(hw, type); 1423
1424 if (rtlpriv->mac80211.link_state == MAC80211_LINKED) {
1425 if (type != NL80211_IFTYPE_AP)
1426 rtl92cu_set_check_bssid(hw, true);
1427 } else {
1428 rtl92cu_set_check_bssid(hw, false);
1429 }
1430
1448 return 0; 1431 return 0;
1449} 1432}
1450 1433
@@ -2058,8 +2041,6 @@ void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
2058 (shortgi_rate << 4) | (shortgi_rate); 2041 (shortgi_rate << 4) | (shortgi_rate);
2059 } 2042 }
2060 rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value); 2043 rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
2061 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n",
2062 rtl_read_dword(rtlpriv, REG_ARFR0));
2063} 2044}
2064 2045
2065void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level) 2046void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)