aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorMark Brown <broonie@linaro.org>2014-04-29 13:01:28 -0400
committerMark Brown <broonie@linaro.org>2014-04-29 13:01:28 -0400
commit3e93457b45a1a8c69227ce596ee2005fa06f20dd (patch)
tree248c27e432533b1af80a9b2240eaa8e48e3b87cc /drivers/net
parent290414499cf94284a97cc3c33214d13ccfcd896a (diff)
parentc42ba72ec3a7a1b6aa30122931f1f4b91b601c31 (diff)
Merge tag 'ib-mfd-regulator-3.16' of git://git.kernel.org/pub/scm/linux/kernel/git/lee/mfd into regulator-tps65090
Immutable branch between MFD and Regulator due for v3.16 merge-window.
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c4
-rw-r--r--drivers/net/ethernet/cadence/Kconfig6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c17
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c181
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c21
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c33
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c31
-rw-r--r--drivers/net/ethernet/sfc/ef10.c12
-rw-r--r--drivers/net/ethernet/sfc/efx.c19
-rw-r--r--drivers/net/ethernet/sfc/enum.h23
-rw-r--r--drivers/net/ethernet/sfc/falcon.c4
-rw-r--r--drivers/net/ethernet/sfc/farch.c22
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c55
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h13
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h4
-rw-r--r--drivers/net/ethernet/sfc/nic.h1
-rw-r--r--drivers/net/ethernet/sfc/siena.c2
-rw-r--r--drivers/net/ieee802154/at86rf230.c10
-rw-r--r--drivers/net/phy/mdio-gpio.c68
-rw-r--r--drivers/net/vxlan.c4
-rw-r--r--drivers/net/wan/cosa.c4
-rw-r--r--drivers/net/wireless/cw1200/debug.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex.c18
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c261
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h14
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sf.c3
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c2
-rw-r--r--drivers/net/wireless/mwifiex/main.c12
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c7
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_core.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c21
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.h20
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c5
47 files changed, 649 insertions, 337 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index a8efb18e42fa..0ab83708b6a1 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -8627,6 +8627,7 @@ bnx2_remove_one(struct pci_dev *pdev)
8627 pci_disable_device(pdev); 8627 pci_disable_device(pdev);
8628} 8628}
8629 8629
8630#ifdef CONFIG_PM_SLEEP
8630static int 8631static int
8631bnx2_suspend(struct device *device) 8632bnx2_suspend(struct device *device)
8632{ 8633{
@@ -8665,7 +8666,6 @@ bnx2_resume(struct device *device)
8665 return 0; 8666 return 0;
8666} 8667}
8667 8668
8668#ifdef CONFIG_PM_SLEEP
8669static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume); 8669static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
8670#define BNX2_PM_OPS (&bnx2_pm_ops) 8670#define BNX2_PM_OPS (&bnx2_pm_ops)
8671 8671
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index b9f7022f4e81..e5d95c5ce1ad 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -12286,7 +12286,9 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
12286 if (tg3_flag(tp, MAX_RXPEND_64) && 12286 if (tg3_flag(tp, MAX_RXPEND_64) &&
12287 tp->rx_pending > 63) 12287 tp->rx_pending > 63)
12288 tp->rx_pending = 63; 12288 tp->rx_pending = 63;
12289 tp->rx_jumbo_pending = ering->rx_jumbo_pending; 12289
12290 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12291 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12290 12292
12291 for (i = 0; i < tp->irq_max; i++) 12293 for (i = 0; i < tp->irq_max; i++)
12292 tp->napi[i].tx_pending = ering->tx_pending; 12294 tp->napi[i].tx_pending = ering->tx_pending;
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index 751d5c7b312d..7e49c43b7af3 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -4,7 +4,7 @@
4 4
5config NET_CADENCE 5config NET_CADENCE
6 bool "Cadence devices" 6 bool "Cadence devices"
7 depends on HAS_IOMEM 7 depends on HAS_IOMEM && (ARM || AVR32 || COMPILE_TEST)
8 default y 8 default y
9 ---help--- 9 ---help---
10 If you have a network (Ethernet) card belonging to this class, say Y. 10 If you have a network (Ethernet) card belonging to this class, say Y.
@@ -22,7 +22,7 @@ if NET_CADENCE
22 22
23config ARM_AT91_ETHER 23config ARM_AT91_ETHER
24 tristate "AT91RM9200 Ethernet support" 24 tristate "AT91RM9200 Ethernet support"
25 depends on HAS_DMA 25 depends on HAS_DMA && (ARCH_AT91RM9200 || COMPILE_TEST)
26 select MACB 26 select MACB
27 ---help--- 27 ---help---
28 If you wish to compile a kernel for the AT91RM9200 and enable 28 If you wish to compile a kernel for the AT91RM9200 and enable
@@ -30,7 +30,7 @@ config ARM_AT91_ETHER
30 30
31config MACB 31config MACB
32 tristate "Cadence MACB/GEM support" 32 tristate "Cadence MACB/GEM support"
33 depends on HAS_DMA 33 depends on HAS_DMA && (PLATFORM_AT32AP || ARCH_AT91 || ARCH_PICOXCELL || ARCH_ZYNQ || COMPILE_TEST)
34 select PHYLIB 34 select PHYLIB
35 ---help--- 35 ---help---
36 The Cadence MACB ethernet interface is found on many Atmel AT32 and 36 The Cadence MACB ethernet interface is found on many Atmel AT32 and
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 81e8402a74b4..8a96572fdde0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -154,7 +154,7 @@ static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
154 req->params = htons(L2T_W_PORT(e->lport) | L2T_W_NOREPLY(!sync)); 154 req->params = htons(L2T_W_PORT(e->lport) | L2T_W_NOREPLY(!sync));
155 req->l2t_idx = htons(e->idx); 155 req->l2t_idx = htons(e->idx);
156 req->vlan = htons(e->vlan); 156 req->vlan = htons(e->vlan);
157 if (e->neigh) 157 if (e->neigh && !(e->neigh->dev->flags & IFF_LOOPBACK))
158 memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac)); 158 memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
159 memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac)); 159 memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
160 160
@@ -394,6 +394,8 @@ struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
394 if (e) { 394 if (e) {
395 spin_lock(&e->lock); /* avoid race with t4_l2t_free */ 395 spin_lock(&e->lock); /* avoid race with t4_l2t_free */
396 e->state = L2T_STATE_RESOLVING; 396 e->state = L2T_STATE_RESOLVING;
397 if (neigh->dev->flags & IFF_LOOPBACK)
398 memcpy(e->dmac, physdev->dev_addr, sizeof(e->dmac));
397 memcpy(e->addr, addr, addr_len); 399 memcpy(e->addr, addr, addr_len);
398 e->ifindex = ifidx; 400 e->ifindex = ifidx;
399 e->hash = hash; 401 e->hash = hash;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index fb2fe65903c2..bba67681aeaa 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -682,7 +682,7 @@ enum {
682 SF_RD_ID = 0x9f, /* read ID */ 682 SF_RD_ID = 0x9f, /* read ID */
683 SF_ERASE_SECTOR = 0xd8, /* erase sector */ 683 SF_ERASE_SECTOR = 0xd8, /* erase sector */
684 684
685 FW_MAX_SIZE = 512 * 1024, 685 FW_MAX_SIZE = 16 * SF_SEC_SIZE,
686}; 686};
687 687
688/** 688/**
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 8ccaa2520dc3..97db5a7179df 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -374,6 +374,7 @@ enum vf_state {
374#define BE_FLAGS_NAPI_ENABLED (1 << 9) 374#define BE_FLAGS_NAPI_ENABLED (1 << 9)
375#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11) 375#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11)
376#define BE_FLAGS_VXLAN_OFFLOADS (1 << 12) 376#define BE_FLAGS_VXLAN_OFFLOADS (1 << 12)
377#define BE_FLAGS_SETUP_DONE (1 << 13)
377 378
378#define BE_UC_PMAC_COUNT 30 379#define BE_UC_PMAC_COUNT 30
379#define BE_VF_UC_PMAC_COUNT 2 380#define BE_VF_UC_PMAC_COUNT 2
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 3e6df47b6973..a18645407d21 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2033,11 +2033,13 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
2033 bool dummy_wrb; 2033 bool dummy_wrb;
2034 int i, pending_txqs; 2034 int i, pending_txqs;
2035 2035
2036 /* Wait for a max of 200ms for all the tx-completions to arrive. */ 2036 /* Stop polling for compls when HW has been silent for 10ms */
2037 do { 2037 do {
2038 pending_txqs = adapter->num_tx_qs; 2038 pending_txqs = adapter->num_tx_qs;
2039 2039
2040 for_all_tx_queues(adapter, txo, i) { 2040 for_all_tx_queues(adapter, txo, i) {
2041 cmpl = 0;
2042 num_wrbs = 0;
2041 txq = &txo->q; 2043 txq = &txo->q;
2042 while ((txcp = be_tx_compl_get(&txo->cq))) { 2044 while ((txcp = be_tx_compl_get(&txo->cq))) {
2043 end_idx = 2045 end_idx =
@@ -2050,14 +2052,13 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
2050 if (cmpl) { 2052 if (cmpl) {
2051 be_cq_notify(adapter, txo->cq.id, false, cmpl); 2053 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2052 atomic_sub(num_wrbs, &txq->used); 2054 atomic_sub(num_wrbs, &txq->used);
2053 cmpl = 0; 2055 timeo = 0;
2054 num_wrbs = 0;
2055 } 2056 }
2056 if (atomic_read(&txq->used) == 0) 2057 if (atomic_read(&txq->used) == 0)
2057 pending_txqs--; 2058 pending_txqs--;
2058 } 2059 }
2059 2060
2060 if (pending_txqs == 0 || ++timeo > 200) 2061 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
2061 break; 2062 break;
2062 2063
2063 mdelay(1); 2064 mdelay(1);
@@ -2725,6 +2726,12 @@ static int be_close(struct net_device *netdev)
2725 struct be_eq_obj *eqo; 2726 struct be_eq_obj *eqo;
2726 int i; 2727 int i;
2727 2728
2729 /* This protection is needed as be_close() may be called even when the
2730 * adapter is in cleared state (after eeh perm failure)
2731 */
2732 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2733 return 0;
2734
2728 be_roce_dev_close(adapter); 2735 be_roce_dev_close(adapter);
2729 2736
2730 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { 2737 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
@@ -3055,6 +3062,7 @@ static int be_clear(struct be_adapter *adapter)
3055 be_clear_queues(adapter); 3062 be_clear_queues(adapter);
3056 3063
3057 be_msix_disable(adapter); 3064 be_msix_disable(adapter);
3065 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
3058 return 0; 3066 return 0;
3059} 3067}
3060 3068
@@ -3559,6 +3567,7 @@ static int be_setup(struct be_adapter *adapter)
3559 adapter->phy.fc_autoneg = 1; 3567 adapter->phy.fc_autoneg = 1;
3560 3568
3561 be_schedule_worker(adapter); 3569 be_schedule_worker(adapter);
3570 adapter->flags |= BE_FLAGS_SETUP_DONE;
3562 return 0; 3571 return 0;
3563err: 3572err:
3564 be_clear(adapter); 3573 be_clear(adapter);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index d04b1c3c9b85..14786c8bf99e 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -91,7 +91,7 @@
91#define MVNETA_RX_MIN_FRAME_SIZE 0x247c 91#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
92#define MVNETA_SERDES_CFG 0x24A0 92#define MVNETA_SERDES_CFG 0x24A0
93#define MVNETA_SGMII_SERDES_PROTO 0x0cc7 93#define MVNETA_SGMII_SERDES_PROTO 0x0cc7
94#define MVNETA_RGMII_SERDES_PROTO 0x0667 94#define MVNETA_QSGMII_SERDES_PROTO 0x0667
95#define MVNETA_TYPE_PRIO 0x24bc 95#define MVNETA_TYPE_PRIO 0x24bc
96#define MVNETA_FORCE_UNI BIT(21) 96#define MVNETA_FORCE_UNI BIT(21)
97#define MVNETA_TXQ_CMD_1 0x24e4 97#define MVNETA_TXQ_CMD_1 0x24e4
@@ -2721,29 +2721,44 @@ static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
2721} 2721}
2722 2722
2723/* Power up the port */ 2723/* Power up the port */
2724static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) 2724static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
2725{ 2725{
2726 u32 val; 2726 u32 ctrl;
2727 2727
2728 /* MAC Cause register should be cleared */ 2728 /* MAC Cause register should be cleared */
2729 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); 2729 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
2730 2730
2731 if (phy_mode == PHY_INTERFACE_MODE_SGMII) 2731 ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
2732 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
2733 else
2734 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_RGMII_SERDES_PROTO);
2735 2732
2736 val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); 2733 /* Even though it might look weird, when we're configured in
2737 2734 * SGMII or QSGMII mode, the RGMII bit needs to be set.
2738 val |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII; 2735 */
2736 switch(phy_mode) {
2737 case PHY_INTERFACE_MODE_QSGMII:
2738 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
2739 ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
2740 break;
2741 case PHY_INTERFACE_MODE_SGMII:
2742 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
2743 ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
2744 break;
2745 case PHY_INTERFACE_MODE_RGMII:
2746 case PHY_INTERFACE_MODE_RGMII_ID:
2747 ctrl |= MVNETA_GMAC2_PORT_RGMII;
2748 break;
2749 default:
2750 return -EINVAL;
2751 }
2739 2752
2740 /* Cancel Port Reset */ 2753 /* Cancel Port Reset */
2741 val &= ~MVNETA_GMAC2_PORT_RESET; 2754 ctrl &= ~MVNETA_GMAC2_PORT_RESET;
2742 mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); 2755 mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl);
2743 2756
2744 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) & 2757 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
2745 MVNETA_GMAC2_PORT_RESET) != 0) 2758 MVNETA_GMAC2_PORT_RESET) != 0)
2746 continue; 2759 continue;
2760
2761 return 0;
2747} 2762}
2748 2763
2749/* Device initialization routine */ 2764/* Device initialization routine */
@@ -2854,7 +2869,12 @@ static int mvneta_probe(struct platform_device *pdev)
2854 dev_err(&pdev->dev, "can't init eth hal\n"); 2869 dev_err(&pdev->dev, "can't init eth hal\n");
2855 goto err_free_stats; 2870 goto err_free_stats;
2856 } 2871 }
2857 mvneta_port_power_up(pp, phy_mode); 2872
2873 err = mvneta_port_power_up(pp, phy_mode);
2874 if (err < 0) {
2875 dev_err(&pdev->dev, "can't power up port\n");
2876 goto err_deinit;
2877 }
2858 2878
2859 dram_target_info = mv_mbus_dram_info(); 2879 dram_target_info = mv_mbus_dram_info();
2860 if (dram_target_info) 2880 if (dram_target_info)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 70e95324a97d..c2cd8d31bcad 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -66,7 +66,6 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
66 66
67 cq->ring = ring; 67 cq->ring = ring;
68 cq->is_tx = mode; 68 cq->is_tx = mode;
69 spin_lock_init(&cq->lock);
70 69
71 /* Allocate HW buffers on provided NUMA node. 70 /* Allocate HW buffers on provided NUMA node.
72 * dev->numa_node is used in mtt range allocation flow. 71 * dev->numa_node is used in mtt range allocation flow.
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index f085c2df5e69..7e4b1720c3d1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1304,15 +1304,11 @@ static void mlx4_en_netpoll(struct net_device *dev)
1304{ 1304{
1305 struct mlx4_en_priv *priv = netdev_priv(dev); 1305 struct mlx4_en_priv *priv = netdev_priv(dev);
1306 struct mlx4_en_cq *cq; 1306 struct mlx4_en_cq *cq;
1307 unsigned long flags;
1308 int i; 1307 int i;
1309 1308
1310 for (i = 0; i < priv->rx_ring_num; i++) { 1309 for (i = 0; i < priv->rx_ring_num; i++) {
1311 cq = priv->rx_cq[i]; 1310 cq = priv->rx_cq[i];
1312 spin_lock_irqsave(&cq->lock, flags); 1311 napi_schedule(&cq->napi);
1313 napi_synchronize(&cq->napi);
1314 mlx4_en_process_rx_cq(dev, cq, 0);
1315 spin_unlock_irqrestore(&cq->lock, flags);
1316 } 1312 }
1317} 1313}
1318#endif 1314#endif
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index f0ae95f66ceb..cef267e24f9c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2301,13 +2301,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2301 /* Allow large DMA segments, up to the firmware limit of 1 GB */ 2301 /* Allow large DMA segments, up to the firmware limit of 1 GB */
2302 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); 2302 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
2303 2303
2304 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 2304 dev = pci_get_drvdata(pdev);
2305 if (!priv) { 2305 priv = mlx4_priv(dev);
2306 err = -ENOMEM;
2307 goto err_release_regions;
2308 }
2309
2310 dev = &priv->dev;
2311 dev->pdev = pdev; 2306 dev->pdev = pdev;
2312 INIT_LIST_HEAD(&priv->ctx_list); 2307 INIT_LIST_HEAD(&priv->ctx_list);
2313 spin_lock_init(&priv->ctx_lock); 2308 spin_lock_init(&priv->ctx_lock);
@@ -2374,10 +2369,10 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
2374 } else { 2369 } else {
2375 atomic_inc(&pf_loading); 2370 atomic_inc(&pf_loading);
2376 err = pci_enable_sriov(pdev, total_vfs); 2371 err = pci_enable_sriov(pdev, total_vfs);
2377 atomic_dec(&pf_loading);
2378 if (err) { 2372 if (err) {
2379 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n", 2373 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n",
2380 err); 2374 err);
2375 atomic_dec(&pf_loading);
2381 err = 0; 2376 err = 0;
2382 } else { 2377 } else {
2383 mlx4_warn(dev, "Running in master mode\n"); 2378 mlx4_warn(dev, "Running in master mode\n");
@@ -2535,8 +2530,10 @@ slave_start:
2535 mlx4_sense_init(dev); 2530 mlx4_sense_init(dev);
2536 mlx4_start_sense(dev); 2531 mlx4_start_sense(dev);
2537 2532
2538 priv->pci_dev_data = pci_dev_data; 2533 priv->removed = 0;
2539 pci_set_drvdata(pdev, dev); 2534
2535 if (mlx4_is_master(dev) && dev->num_vfs)
2536 atomic_dec(&pf_loading);
2540 2537
2541 return 0; 2538 return 0;
2542 2539
@@ -2588,6 +2585,9 @@ err_rel_own:
2588 if (!mlx4_is_slave(dev)) 2585 if (!mlx4_is_slave(dev))
2589 mlx4_free_ownership(dev); 2586 mlx4_free_ownership(dev);
2590 2587
2588 if (mlx4_is_master(dev) && dev->num_vfs)
2589 atomic_dec(&pf_loading);
2590
2591 kfree(priv->dev.dev_vfs); 2591 kfree(priv->dev.dev_vfs);
2592 2592
2593err_free_dev: 2593err_free_dev:
@@ -2604,85 +2604,110 @@ err_disable_pdev:
2604 2604
2605static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 2605static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
2606{ 2606{
2607 struct mlx4_priv *priv;
2608 struct mlx4_dev *dev;
2609
2607 printk_once(KERN_INFO "%s", mlx4_version); 2610 printk_once(KERN_INFO "%s", mlx4_version);
2608 2611
2612 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
2613 if (!priv)
2614 return -ENOMEM;
2615
2616 dev = &priv->dev;
2617 pci_set_drvdata(pdev, dev);
2618 priv->pci_dev_data = id->driver_data;
2619
2609 return __mlx4_init_one(pdev, id->driver_data); 2620 return __mlx4_init_one(pdev, id->driver_data);
2610} 2621}
2611 2622
2612static void mlx4_remove_one(struct pci_dev *pdev) 2623static void __mlx4_remove_one(struct pci_dev *pdev)
2613{ 2624{
2614 struct mlx4_dev *dev = pci_get_drvdata(pdev); 2625 struct mlx4_dev *dev = pci_get_drvdata(pdev);
2615 struct mlx4_priv *priv = mlx4_priv(dev); 2626 struct mlx4_priv *priv = mlx4_priv(dev);
2627 int pci_dev_data;
2616 int p; 2628 int p;
2617 2629
2618 if (dev) { 2630 if (priv->removed)
2619 /* in SRIOV it is not allowed to unload the pf's 2631 return;
2620 * driver while there are alive vf's */
2621 if (mlx4_is_master(dev)) {
2622 if (mlx4_how_many_lives_vf(dev))
2623 printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n");
2624 }
2625 mlx4_stop_sense(dev);
2626 mlx4_unregister_device(dev);
2627 2632
2628 for (p = 1; p <= dev->caps.num_ports; p++) { 2633 pci_dev_data = priv->pci_dev_data;
2629 mlx4_cleanup_port_info(&priv->port[p]);
2630 mlx4_CLOSE_PORT(dev, p);
2631 }
2632 2634
2633 if (mlx4_is_master(dev)) 2635 /* in SRIOV it is not allowed to unload the pf's
2634 mlx4_free_resource_tracker(dev, 2636 * driver while there are alive vf's */
2635 RES_TR_FREE_SLAVES_ONLY); 2637 if (mlx4_is_master(dev) && mlx4_how_many_lives_vf(dev))
2636 2638 printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n");
2637 mlx4_cleanup_counters_table(dev); 2639 mlx4_stop_sense(dev);
2638 mlx4_cleanup_qp_table(dev); 2640 mlx4_unregister_device(dev);
2639 mlx4_cleanup_srq_table(dev);
2640 mlx4_cleanup_cq_table(dev);
2641 mlx4_cmd_use_polling(dev);
2642 mlx4_cleanup_eq_table(dev);
2643 mlx4_cleanup_mcg_table(dev);
2644 mlx4_cleanup_mr_table(dev);
2645 mlx4_cleanup_xrcd_table(dev);
2646 mlx4_cleanup_pd_table(dev);
2647 2641
2648 if (mlx4_is_master(dev)) 2642 for (p = 1; p <= dev->caps.num_ports; p++) {
2649 mlx4_free_resource_tracker(dev, 2643 mlx4_cleanup_port_info(&priv->port[p]);
2650 RES_TR_FREE_STRUCTS_ONLY); 2644 mlx4_CLOSE_PORT(dev, p);
2651 2645 }
2652 iounmap(priv->kar);
2653 mlx4_uar_free(dev, &priv->driver_uar);
2654 mlx4_cleanup_uar_table(dev);
2655 if (!mlx4_is_slave(dev))
2656 mlx4_clear_steering(dev);
2657 mlx4_free_eq_table(dev);
2658 if (mlx4_is_master(dev))
2659 mlx4_multi_func_cleanup(dev);
2660 mlx4_close_hca(dev);
2661 if (mlx4_is_slave(dev))
2662 mlx4_multi_func_cleanup(dev);
2663 mlx4_cmd_cleanup(dev);
2664
2665 if (dev->flags & MLX4_FLAG_MSI_X)
2666 pci_disable_msix(pdev);
2667 if (dev->flags & MLX4_FLAG_SRIOV) {
2668 mlx4_warn(dev, "Disabling SR-IOV\n");
2669 pci_disable_sriov(pdev);
2670 }
2671 2646
2672 if (!mlx4_is_slave(dev)) 2647 if (mlx4_is_master(dev))
2673 mlx4_free_ownership(dev); 2648 mlx4_free_resource_tracker(dev,
2649 RES_TR_FREE_SLAVES_ONLY);
2674 2650
2675 kfree(dev->caps.qp0_tunnel); 2651 mlx4_cleanup_counters_table(dev);
2676 kfree(dev->caps.qp0_proxy); 2652 mlx4_cleanup_qp_table(dev);
2677 kfree(dev->caps.qp1_tunnel); 2653 mlx4_cleanup_srq_table(dev);
2678 kfree(dev->caps.qp1_proxy); 2654 mlx4_cleanup_cq_table(dev);
2679 kfree(dev->dev_vfs); 2655 mlx4_cmd_use_polling(dev);
2656 mlx4_cleanup_eq_table(dev);
2657 mlx4_cleanup_mcg_table(dev);
2658 mlx4_cleanup_mr_table(dev);
2659 mlx4_cleanup_xrcd_table(dev);
2660 mlx4_cleanup_pd_table(dev);
2680 2661
2681 kfree(priv); 2662 if (mlx4_is_master(dev))
2682 pci_release_regions(pdev); 2663 mlx4_free_resource_tracker(dev,
2683 pci_disable_device(pdev); 2664 RES_TR_FREE_STRUCTS_ONLY);
2684 pci_set_drvdata(pdev, NULL); 2665
2666 iounmap(priv->kar);
2667 mlx4_uar_free(dev, &priv->driver_uar);
2668 mlx4_cleanup_uar_table(dev);
2669 if (!mlx4_is_slave(dev))
2670 mlx4_clear_steering(dev);
2671 mlx4_free_eq_table(dev);
2672 if (mlx4_is_master(dev))
2673 mlx4_multi_func_cleanup(dev);
2674 mlx4_close_hca(dev);
2675 if (mlx4_is_slave(dev))
2676 mlx4_multi_func_cleanup(dev);
2677 mlx4_cmd_cleanup(dev);
2678
2679 if (dev->flags & MLX4_FLAG_MSI_X)
2680 pci_disable_msix(pdev);
2681 if (dev->flags & MLX4_FLAG_SRIOV) {
2682 mlx4_warn(dev, "Disabling SR-IOV\n");
2683 pci_disable_sriov(pdev);
2684 dev->num_vfs = 0;
2685 } 2685 }
2686
2687 if (!mlx4_is_slave(dev))
2688 mlx4_free_ownership(dev);
2689
2690 kfree(dev->caps.qp0_tunnel);
2691 kfree(dev->caps.qp0_proxy);
2692 kfree(dev->caps.qp1_tunnel);
2693 kfree(dev->caps.qp1_proxy);
2694 kfree(dev->dev_vfs);
2695
2696 pci_release_regions(pdev);
2697 pci_disable_device(pdev);
2698 memset(priv, 0, sizeof(*priv));
2699 priv->pci_dev_data = pci_dev_data;
2700 priv->removed = 1;
2701}
2702
2703static void mlx4_remove_one(struct pci_dev *pdev)
2704{
2705 struct mlx4_dev *dev = pci_get_drvdata(pdev);
2706 struct mlx4_priv *priv = mlx4_priv(dev);
2707
2708 __mlx4_remove_one(pdev);
2709 kfree(priv);
2710 pci_set_drvdata(pdev, NULL);
2686} 2711}
2687 2712
2688int mlx4_restart_one(struct pci_dev *pdev) 2713int mlx4_restart_one(struct pci_dev *pdev)
@@ -2692,7 +2717,7 @@ int mlx4_restart_one(struct pci_dev *pdev)
2692 int pci_dev_data; 2717 int pci_dev_data;
2693 2718
2694 pci_dev_data = priv->pci_dev_data; 2719 pci_dev_data = priv->pci_dev_data;
2695 mlx4_remove_one(pdev); 2720 __mlx4_remove_one(pdev);
2696 return __mlx4_init_one(pdev, pci_dev_data); 2721 return __mlx4_init_one(pdev, pci_dev_data);
2697} 2722}
2698 2723
@@ -2747,7 +2772,7 @@ MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
2747static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev, 2772static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
2748 pci_channel_state_t state) 2773 pci_channel_state_t state)
2749{ 2774{
2750 mlx4_remove_one(pdev); 2775 __mlx4_remove_one(pdev);
2751 2776
2752 return state == pci_channel_io_perm_failure ? 2777 return state == pci_channel_io_perm_failure ?
2753 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; 2778 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
@@ -2755,11 +2780,11 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
2755 2780
2756static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) 2781static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
2757{ 2782{
2758 const struct pci_device_id *id; 2783 struct mlx4_dev *dev = pci_get_drvdata(pdev);
2759 int ret; 2784 struct mlx4_priv *priv = mlx4_priv(dev);
2785 int ret;
2760 2786
2761 id = pci_match_id(mlx4_pci_table, pdev); 2787 ret = __mlx4_init_one(pdev, priv->pci_dev_data);
2762 ret = __mlx4_init_one(pdev, id->driver_data);
2763 2788
2764 return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; 2789 return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
2765} 2790}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index cf8be41abb36..f9c465101963 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -800,6 +800,7 @@ struct mlx4_priv {
800 spinlock_t ctx_lock; 800 spinlock_t ctx_lock;
801 801
802 int pci_dev_data; 802 int pci_dev_data;
803 int removed;
803 804
804 struct list_head pgdir_list; 805 struct list_head pgdir_list;
805 struct mutex pgdir_mutex; 806 struct mutex pgdir_mutex;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 7a733c287744..04d9b6fe3e80 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -319,7 +319,6 @@ struct mlx4_en_cq {
319 struct mlx4_cq mcq; 319 struct mlx4_cq mcq;
320 struct mlx4_hwq_resources wqres; 320 struct mlx4_hwq_resources wqres;
321 int ring; 321 int ring;
322 spinlock_t lock;
323 struct net_device *dev; 322 struct net_device *dev;
324 struct napi_struct napi; 323 struct napi_struct napi;
325 int size; 324 int size;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index b48737dcd3c5..ba20c721ee97 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -2139,8 +2139,6 @@ static int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
2139 ahw->max_mac_filters = nic_info.max_mac_filters; 2139 ahw->max_mac_filters = nic_info.max_mac_filters;
2140 ahw->max_mtu = nic_info.max_mtu; 2140 ahw->max_mtu = nic_info.max_mtu;
2141 2141
2142 adapter->max_tx_rings = ahw->max_tx_ques;
2143 adapter->max_sds_rings = ahw->max_rx_ques;
2144 /* eSwitch capability indicates vNIC mode. 2142 /* eSwitch capability indicates vNIC mode.
2145 * vNIC and SRIOV are mutually exclusive operational modes. 2143 * vNIC and SRIOV are mutually exclusive operational modes.
2146 * If SR-IOV capability is detected, SR-IOV physical function 2144 * If SR-IOV capability is detected, SR-IOV physical function
@@ -2161,6 +2159,7 @@ static int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
2161int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter) 2159int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
2162{ 2160{
2163 struct qlcnic_hardware_context *ahw = adapter->ahw; 2161 struct qlcnic_hardware_context *ahw = adapter->ahw;
2162 u16 max_sds_rings, max_tx_rings;
2164 int ret; 2163 int ret;
2165 2164
2166 ret = qlcnic_83xx_get_nic_configuration(adapter); 2165 ret = qlcnic_83xx_get_nic_configuration(adapter);
@@ -2173,18 +2172,21 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
2173 if (qlcnic_83xx_config_vnic_opmode(adapter)) 2172 if (qlcnic_83xx_config_vnic_opmode(adapter))
2174 return -EIO; 2173 return -EIO;
2175 2174
2176 adapter->max_sds_rings = QLCNIC_MAX_VNIC_SDS_RINGS; 2175 max_sds_rings = QLCNIC_MAX_VNIC_SDS_RINGS;
2177 adapter->max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS; 2176 max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
2178 } else if (ret == QLC_83XX_DEFAULT_OPMODE) { 2177 } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
2179 ahw->nic_mode = QLCNIC_DEFAULT_MODE; 2178 ahw->nic_mode = QLCNIC_DEFAULT_MODE;
2180 adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver; 2179 adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
2181 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; 2180 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
2182 adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS; 2181 max_sds_rings = QLCNIC_MAX_SDS_RINGS;
2183 adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS; 2182 max_tx_rings = QLCNIC_MAX_TX_RINGS;
2184 } else { 2183 } else {
2185 return -EIO; 2184 return -EIO;
2186 } 2185 }
2187 2186
2187 adapter->max_sds_rings = min(ahw->max_rx_ques, max_sds_rings);
2188 adapter->max_tx_rings = min(ahw->max_tx_ques, max_tx_rings);
2189
2188 return 0; 2190 return 0;
2189} 2191}
2190 2192
@@ -2348,15 +2350,16 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
2348 goto disable_intr; 2350 goto disable_intr;
2349 } 2351 }
2350 2352
2353 INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
2354
2351 err = qlcnic_83xx_setup_mbx_intr(adapter); 2355 err = qlcnic_83xx_setup_mbx_intr(adapter);
2352 if (err) 2356 if (err)
2353 goto disable_mbx_intr; 2357 goto disable_mbx_intr;
2354 2358
2355 qlcnic_83xx_clear_function_resources(adapter); 2359 qlcnic_83xx_clear_function_resources(adapter);
2356 2360 qlcnic_dcb_enable(adapter->dcb);
2357 INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
2358
2359 qlcnic_83xx_initialize_nic(adapter, 1); 2361 qlcnic_83xx_initialize_nic(adapter, 1);
2362 qlcnic_dcb_get_info(adapter->dcb);
2360 2363
2361 /* Configure default, SR-IOV or Virtual NIC mode of operation */ 2364 /* Configure default, SR-IOV or Virtual NIC mode of operation */
2362 err = qlcnic_83xx_configure_opmode(adapter); 2365 err = qlcnic_83xx_configure_opmode(adapter);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 64dcbf33d8f0..c1e11f5715b0 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -883,8 +883,6 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
883 npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques); 883 npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques);
884 npar_info->capabilities = le32_to_cpu(nic_info->capabilities); 884 npar_info->capabilities = le32_to_cpu(nic_info->capabilities);
885 npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu); 885 npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu);
886 adapter->max_tx_rings = npar_info->max_tx_ques;
887 adapter->max_sds_rings = npar_info->max_rx_ques;
888 } 886 }
889 887
890 qlcnic_free_mbx_args(&cmd); 888 qlcnic_free_mbx_args(&cmd);
@@ -1356,6 +1354,7 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
1356 arg2 &= ~BIT_3; 1354 arg2 &= ~BIT_3;
1357 break; 1355 break;
1358 case QLCNIC_ADD_VLAN: 1356 case QLCNIC_ADD_VLAN:
1357 arg1 &= ~(0x0ffff << 16);
1359 arg1 |= (BIT_2 | BIT_5); 1358 arg1 |= (BIT_2 | BIT_5);
1360 arg1 |= (esw_cfg->vlan_id << 16); 1359 arg1 |= (esw_cfg->vlan_id << 16);
1361 break; 1360 break;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
index 7d4f54912bad..a51fe18f09a8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
@@ -330,8 +330,6 @@ static int __qlcnic_dcb_attach(struct qlcnic_dcb *dcb)
330 goto out_free_cfg; 330 goto out_free_cfg;
331 } 331 }
332 332
333 qlcnic_dcb_get_info(dcb);
334
335 return 0; 333 return 0;
336out_free_cfg: 334out_free_cfg:
337 kfree(dcb->cfg); 335 kfree(dcb->cfg);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 309d05640883..dbf75393f758 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -670,7 +670,7 @@ int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *adapter)
670 else 670 else
671 num_msix += adapter->drv_tx_rings; 671 num_msix += adapter->drv_tx_rings;
672 672
673 if (adapter->drv_rss_rings > 0) 673 if (adapter->drv_rss_rings > 0)
674 num_msix += adapter->drv_rss_rings; 674 num_msix += adapter->drv_rss_rings;
675 else 675 else
676 num_msix += adapter->drv_sds_rings; 676 num_msix += adapter->drv_sds_rings;
@@ -686,19 +686,15 @@ int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *adapter)
686 return -ENOMEM; 686 return -ENOMEM;
687 } 687 }
688 688
689restore:
690 for (vector = 0; vector < num_msix; vector++) 689 for (vector = 0; vector < num_msix; vector++)
691 adapter->msix_entries[vector].entry = vector; 690 adapter->msix_entries[vector].entry = vector;
692 691
692restore:
693 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix); 693 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
694 if (err == 0) { 694 if (err > 0) {
695 adapter->ahw->num_msix = num_msix; 695 if (!adapter->drv_tss_rings && !adapter->drv_rss_rings)
696 if (adapter->drv_tss_rings > 0) 696 return -ENOSPC;
697 adapter->drv_tx_rings = adapter->drv_tss_rings;
698 697
699 if (adapter->drv_rss_rings > 0)
700 adapter->drv_sds_rings = adapter->drv_rss_rings;
701 } else {
702 netdev_info(adapter->netdev, 698 netdev_info(adapter->netdev,
703 "Unable to allocate %d MSI-X vectors, Available vectors %d\n", 699 "Unable to allocate %d MSI-X vectors, Available vectors %d\n",
704 num_msix, err); 700 num_msix, err);
@@ -716,12 +712,20 @@ restore:
716 "Restoring %d Tx, %d SDS rings for total %d vectors.\n", 712 "Restoring %d Tx, %d SDS rings for total %d vectors.\n",
717 adapter->drv_tx_rings, adapter->drv_sds_rings, 713 adapter->drv_tx_rings, adapter->drv_sds_rings,
718 num_msix); 714 num_msix);
719 goto restore;
720 715
721 err = -EIO; 716 goto restore;
717 } else if (err < 0) {
718 return err;
722 } 719 }
723 720
724 return err; 721 adapter->ahw->num_msix = num_msix;
722 if (adapter->drv_tss_rings > 0)
723 adapter->drv_tx_rings = adapter->drv_tss_rings;
724
725 if (adapter->drv_rss_rings > 0)
726 adapter->drv_sds_rings = adapter->drv_rss_rings;
727
728 return 0;
725} 729}
726 730
727int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix) 731int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
@@ -2528,8 +2532,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2528 goto err_out_free_hw; 2532 goto err_out_free_hw;
2529 } 2533 }
2530 2534
2531 qlcnic_dcb_enable(adapter->dcb);
2532
2533 if (qlcnic_read_mac_addr(adapter)) 2535 if (qlcnic_read_mac_addr(adapter))
2534 dev_warn(&pdev->dev, "failed to read mac addr\n"); 2536 dev_warn(&pdev->dev, "failed to read mac addr\n");
2535 2537
@@ -2549,7 +2551,10 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2549 "Device does not support MSI interrupts\n"); 2551 "Device does not support MSI interrupts\n");
2550 2552
2551 if (qlcnic_82xx_check(adapter)) { 2553 if (qlcnic_82xx_check(adapter)) {
2554 qlcnic_dcb_enable(adapter->dcb);
2555 qlcnic_dcb_get_info(adapter->dcb);
2552 err = qlcnic_setup_intr(adapter); 2556 err = qlcnic_setup_intr(adapter);
2557
2553 if (err) { 2558 if (err) {
2554 dev_err(&pdev->dev, "Failed to setup interrupt\n"); 2559 dev_err(&pdev->dev, "Failed to setup interrupt\n");
2555 goto err_out_disable_msi; 2560 goto err_out_disable_msi;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 14f748cbf0de..280137991544 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -461,6 +461,16 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
461{ 461{
462 struct net_device *netdev = adapter->netdev; 462 struct net_device *netdev = adapter->netdev;
463 463
464 if (pci_vfs_assigned(adapter->pdev)) {
465 netdev_err(adapter->netdev,
466 "SR-IOV VFs belonging to port %d are assigned to VMs. SR-IOV can not be disabled on this port\n",
467 adapter->portnum);
468 netdev_info(adapter->netdev,
469 "Please detach SR-IOV VFs belonging to port %d from VMs, and then try to disable SR-IOV on this port\n",
470 adapter->portnum);
471 return -EPERM;
472 }
473
464 rtnl_lock(); 474 rtnl_lock();
465 if (netif_running(netdev)) 475 if (netif_running(netdev))
466 __qlcnic_down(adapter, netdev); 476 __qlcnic_down(adapter, netdev);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 448d156c3d08..cd346e27f2e1 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -354,7 +354,7 @@ int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
354{ 354{
355 int i; 355 int i;
356 356
357 for (i = 0; i < adapter->ahw->max_vnic_func; i++) { 357 for (i = 0; i < adapter->ahw->total_nic_func; i++) {
358 if (adapter->npars[i].pci_func == pci_func) 358 if (adapter->npars[i].pci_func == pci_func)
359 return i; 359 return i;
360 } 360 }
@@ -720,6 +720,7 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
720 struct qlcnic_adapter *adapter = dev_get_drvdata(dev); 720 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
721 struct qlcnic_npar_func_cfg *np_cfg; 721 struct qlcnic_npar_func_cfg *np_cfg;
722 struct qlcnic_info nic_info; 722 struct qlcnic_info nic_info;
723 u8 pci_func;
723 int i, ret; 724 int i, ret;
724 u32 count; 725 u32 count;
725 726
@@ -729,26 +730,28 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
729 730
730 count = size / sizeof(struct qlcnic_npar_func_cfg); 731 count = size / sizeof(struct qlcnic_npar_func_cfg);
731 for (i = 0; i < adapter->ahw->total_nic_func; i++) { 732 for (i = 0; i < adapter->ahw->total_nic_func; i++) {
732 if (qlcnic_is_valid_nic_func(adapter, i) < 0)
733 continue;
734 if (adapter->npars[i].pci_func >= count) { 733 if (adapter->npars[i].pci_func >= count) {
735 dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n", 734 dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n",
736 __func__, adapter->ahw->total_nic_func, count); 735 __func__, adapter->ahw->total_nic_func, count);
737 continue; 736 continue;
738 } 737 }
739 ret = qlcnic_get_nic_info(adapter, &nic_info, i);
740 if (ret)
741 return ret;
742 if (!adapter->npars[i].eswitch_status) 738 if (!adapter->npars[i].eswitch_status)
743 continue; 739 continue;
744 np_cfg[i].pci_func = i; 740 pci_func = adapter->npars[i].pci_func;
745 np_cfg[i].op_mode = (u8)nic_info.op_mode; 741 if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
746 np_cfg[i].port_num = nic_info.phys_port; 742 continue;
747 np_cfg[i].fw_capab = nic_info.capabilities; 743 ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
748 np_cfg[i].min_bw = nic_info.min_tx_bw; 744 if (ret)
749 np_cfg[i].max_bw = nic_info.max_tx_bw; 745 return ret;
750 np_cfg[i].max_tx_queues = nic_info.max_tx_ques; 746
751 np_cfg[i].max_rx_queues = nic_info.max_rx_ques; 747 np_cfg[pci_func].pci_func = pci_func;
748 np_cfg[pci_func].op_mode = (u8)nic_info.op_mode;
749 np_cfg[pci_func].port_num = nic_info.phys_port;
750 np_cfg[pci_func].fw_capab = nic_info.capabilities;
751 np_cfg[pci_func].min_bw = nic_info.min_tx_bw;
752 np_cfg[pci_func].max_bw = nic_info.max_tx_bw;
753 np_cfg[pci_func].max_tx_queues = nic_info.max_tx_ques;
754 np_cfg[pci_func].max_rx_queues = nic_info.max_rx_ques;
752 } 755 }
753 return size; 756 return size;
754} 757}
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 21c20ea0dad0..b5ed30a39144 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -738,8 +738,11 @@ static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
738 /* If it was a port reset, trigger reallocation of MC resources. 738 /* If it was a port reset, trigger reallocation of MC resources.
739 * Note that on an MC reset nothing needs to be done now because we'll 739 * Note that on an MC reset nothing needs to be done now because we'll
740 * detect the MC reset later and handle it then. 740 * detect the MC reset later and handle it then.
741 * For an FLR, we never get an MC reset event, but the MC has reset all
742 * resources assigned to us, so we have to trigger reallocation now.
741 */ 743 */
742 if (reset_type == RESET_TYPE_ALL && !rc) 744 if ((reset_type == RESET_TYPE_ALL ||
745 reset_type == RESET_TYPE_MCDI_TIMEOUT) && !rc)
743 efx_ef10_reset_mc_allocations(efx); 746 efx_ef10_reset_mc_allocations(efx);
744 return rc; 747 return rc;
745} 748}
@@ -2141,6 +2144,11 @@ static int efx_ef10_fini_dmaq(struct efx_nic *efx)
2141 return 0; 2144 return 0;
2142} 2145}
2143 2146
2147static void efx_ef10_prepare_flr(struct efx_nic *efx)
2148{
2149 atomic_set(&efx->active_queues, 0);
2150}
2151
2144static bool efx_ef10_filter_equal(const struct efx_filter_spec *left, 2152static bool efx_ef10_filter_equal(const struct efx_filter_spec *left,
2145 const struct efx_filter_spec *right) 2153 const struct efx_filter_spec *right)
2146{ 2154{
@@ -3603,6 +3611,8 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
3603 .probe_port = efx_mcdi_port_probe, 3611 .probe_port = efx_mcdi_port_probe,
3604 .remove_port = efx_mcdi_port_remove, 3612 .remove_port = efx_mcdi_port_remove,
3605 .fini_dmaq = efx_ef10_fini_dmaq, 3613 .fini_dmaq = efx_ef10_fini_dmaq,
3614 .prepare_flr = efx_ef10_prepare_flr,
3615 .finish_flr = efx_port_dummy_op_void,
3606 .describe_stats = efx_ef10_describe_stats, 3616 .describe_stats = efx_ef10_describe_stats,
3607 .update_stats = efx_ef10_update_stats, 3617 .update_stats = efx_ef10_update_stats,
3608 .start_stats = efx_mcdi_mac_start_stats, 3618 .start_stats = efx_mcdi_mac_start_stats,
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 57b971e5e6b2..63d595fd3cc5 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -76,6 +76,7 @@ const char *const efx_reset_type_names[] = {
76 [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL", 76 [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL",
77 [RESET_TYPE_WORLD] = "WORLD", 77 [RESET_TYPE_WORLD] = "WORLD",
78 [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE", 78 [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
79 [RESET_TYPE_MC_BIST] = "MC_BIST",
79 [RESET_TYPE_DISABLE] = "DISABLE", 80 [RESET_TYPE_DISABLE] = "DISABLE",
80 [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG", 81 [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
81 [RESET_TYPE_INT_ERROR] = "INT_ERROR", 82 [RESET_TYPE_INT_ERROR] = "INT_ERROR",
@@ -83,7 +84,7 @@ const char *const efx_reset_type_names[] = {
83 [RESET_TYPE_DMA_ERROR] = "DMA_ERROR", 84 [RESET_TYPE_DMA_ERROR] = "DMA_ERROR",
84 [RESET_TYPE_TX_SKIP] = "TX_SKIP", 85 [RESET_TYPE_TX_SKIP] = "TX_SKIP",
85 [RESET_TYPE_MC_FAILURE] = "MC_FAILURE", 86 [RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
86 [RESET_TYPE_MC_BIST] = "MC_BIST", 87 [RESET_TYPE_MCDI_TIMEOUT] = "MCDI_TIMEOUT (FLR)",
87}; 88};
88 89
89/* Reset workqueue. If any NIC has a hardware failure then a reset will be 90/* Reset workqueue. If any NIC has a hardware failure then a reset will be
@@ -1739,7 +1740,8 @@ static void efx_start_all(struct efx_nic *efx)
1739 1740
1740 /* Check that it is appropriate to restart the interface. All 1741 /* Check that it is appropriate to restart the interface. All
1741 * of these flags are safe to read under just the rtnl lock */ 1742 * of these flags are safe to read under just the rtnl lock */
1742 if (efx->port_enabled || !netif_running(efx->net_dev)) 1743 if (efx->port_enabled || !netif_running(efx->net_dev) ||
1744 efx->reset_pending)
1743 return; 1745 return;
1744 1746
1745 efx_start_port(efx); 1747 efx_start_port(efx);
@@ -2334,6 +2336,9 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
2334{ 2336{
2335 EFX_ASSERT_RESET_SERIALISED(efx); 2337 EFX_ASSERT_RESET_SERIALISED(efx);
2336 2338
2339 if (method == RESET_TYPE_MCDI_TIMEOUT)
2340 efx->type->prepare_flr(efx);
2341
2337 efx_stop_all(efx); 2342 efx_stop_all(efx);
2338 efx_disable_interrupts(efx); 2343 efx_disable_interrupts(efx);
2339 2344
@@ -2354,6 +2359,10 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
2354 2359
2355 EFX_ASSERT_RESET_SERIALISED(efx); 2360 EFX_ASSERT_RESET_SERIALISED(efx);
2356 2361
2362 if (method == RESET_TYPE_MCDI_TIMEOUT)
2363 efx->type->finish_flr(efx);
2364
2365 /* Ensure that SRAM is initialised even if we're disabling the device */
2357 rc = efx->type->init(efx); 2366 rc = efx->type->init(efx);
2358 if (rc) { 2367 if (rc) {
2359 netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n"); 2368 netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
@@ -2417,7 +2426,10 @@ int efx_reset(struct efx_nic *efx, enum reset_type method)
2417 /* Clear flags for the scopes we covered. We assume the NIC and 2426 /* Clear flags for the scopes we covered. We assume the NIC and
2418 * driver are now quiescent so that there is no race here. 2427 * driver are now quiescent so that there is no race here.
2419 */ 2428 */
2420 efx->reset_pending &= -(1 << (method + 1)); 2429 if (method < RESET_TYPE_MAX_METHOD)
2430 efx->reset_pending &= -(1 << (method + 1));
2431 else /* it doesn't fit into the well-ordered scope hierarchy */
2432 __clear_bit(method, &efx->reset_pending);
2421 2433
2422 /* Reinitialise bus-mastering, which may have been turned off before 2434 /* Reinitialise bus-mastering, which may have been turned off before
2423 * the reset was scheduled. This is still appropriate, even in the 2435 * the reset was scheduled. This is still appropriate, even in the
@@ -2546,6 +2558,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
2546 case RESET_TYPE_DISABLE: 2558 case RESET_TYPE_DISABLE:
2547 case RESET_TYPE_RECOVER_OR_DISABLE: 2559 case RESET_TYPE_RECOVER_OR_DISABLE:
2548 case RESET_TYPE_MC_BIST: 2560 case RESET_TYPE_MC_BIST:
2561 case RESET_TYPE_MCDI_TIMEOUT:
2549 method = type; 2562 method = type;
2550 netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n", 2563 netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
2551 RESET_TYPE(method)); 2564 RESET_TYPE(method));
diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h
index 75ef7ef6450b..d1dbb5fb31bb 100644
--- a/drivers/net/ethernet/sfc/enum.h
+++ b/drivers/net/ethernet/sfc/enum.h
@@ -143,6 +143,7 @@ enum efx_loopback_mode {
143 * @RESET_TYPE_WORLD: Reset as much as possible 143 * @RESET_TYPE_WORLD: Reset as much as possible
144 * @RESET_TYPE_RECOVER_OR_DISABLE: Try to recover. Apply RESET_TYPE_DISABLE if 144 * @RESET_TYPE_RECOVER_OR_DISABLE: Try to recover. Apply RESET_TYPE_DISABLE if
145 * unsuccessful. 145 * unsuccessful.
146 * @RESET_TYPE_MC_BIST: MC entering BIST mode.
146 * @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled 147 * @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled
147 * @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog 148 * @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
148 * @RESET_TYPE_INT_ERROR: reset due to internal error 149 * @RESET_TYPE_INT_ERROR: reset due to internal error
@@ -150,14 +151,16 @@ enum efx_loopback_mode {
150 * @RESET_TYPE_DMA_ERROR: DMA error 151 * @RESET_TYPE_DMA_ERROR: DMA error
151 * @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors 152 * @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors
152 * @RESET_TYPE_MC_FAILURE: MC reboot/assertion 153 * @RESET_TYPE_MC_FAILURE: MC reboot/assertion
154 * @RESET_TYPE_MCDI_TIMEOUT: MCDI timeout.
153 */ 155 */
154enum reset_type { 156enum reset_type {
155 RESET_TYPE_INVISIBLE = 0, 157 RESET_TYPE_INVISIBLE,
156 RESET_TYPE_RECOVER_OR_ALL = 1, 158 RESET_TYPE_RECOVER_OR_ALL,
157 RESET_TYPE_ALL = 2, 159 RESET_TYPE_ALL,
158 RESET_TYPE_WORLD = 3, 160 RESET_TYPE_WORLD,
159 RESET_TYPE_RECOVER_OR_DISABLE = 4, 161 RESET_TYPE_RECOVER_OR_DISABLE,
160 RESET_TYPE_DISABLE = 5, 162 RESET_TYPE_MC_BIST,
163 RESET_TYPE_DISABLE,
161 RESET_TYPE_MAX_METHOD, 164 RESET_TYPE_MAX_METHOD,
162 RESET_TYPE_TX_WATCHDOG, 165 RESET_TYPE_TX_WATCHDOG,
163 RESET_TYPE_INT_ERROR, 166 RESET_TYPE_INT_ERROR,
@@ -165,7 +168,13 @@ enum reset_type {
165 RESET_TYPE_DMA_ERROR, 168 RESET_TYPE_DMA_ERROR,
166 RESET_TYPE_TX_SKIP, 169 RESET_TYPE_TX_SKIP,
167 RESET_TYPE_MC_FAILURE, 170 RESET_TYPE_MC_FAILURE,
168 RESET_TYPE_MC_BIST, 171 /* RESET_TYPE_MCDI_TIMEOUT is actually a method, not just a reason, but
172 * it doesn't fit the scope hierarchy (not well-ordered by inclusion).
173 * We encode this by having its enum value be greater than
174 * RESET_TYPE_MAX_METHOD. This also prevents issuing it with
175 * efx_ioctl_reset.
176 */
177 RESET_TYPE_MCDI_TIMEOUT,
169 RESET_TYPE_MAX, 178 RESET_TYPE_MAX,
170}; 179};
171 180
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 8ec20b713cc6..fae25a418647 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -2696,6 +2696,8 @@ const struct efx_nic_type falcon_a1_nic_type = {
2696 .fini_dmaq = efx_farch_fini_dmaq, 2696 .fini_dmaq = efx_farch_fini_dmaq,
2697 .prepare_flush = falcon_prepare_flush, 2697 .prepare_flush = falcon_prepare_flush,
2698 .finish_flush = efx_port_dummy_op_void, 2698 .finish_flush = efx_port_dummy_op_void,
2699 .prepare_flr = efx_port_dummy_op_void,
2700 .finish_flr = efx_farch_finish_flr,
2699 .describe_stats = falcon_describe_nic_stats, 2701 .describe_stats = falcon_describe_nic_stats,
2700 .update_stats = falcon_update_nic_stats, 2702 .update_stats = falcon_update_nic_stats,
2701 .start_stats = falcon_start_nic_stats, 2703 .start_stats = falcon_start_nic_stats,
@@ -2790,6 +2792,8 @@ const struct efx_nic_type falcon_b0_nic_type = {
2790 .fini_dmaq = efx_farch_fini_dmaq, 2792 .fini_dmaq = efx_farch_fini_dmaq,
2791 .prepare_flush = falcon_prepare_flush, 2793 .prepare_flush = falcon_prepare_flush,
2792 .finish_flush = efx_port_dummy_op_void, 2794 .finish_flush = efx_port_dummy_op_void,
2795 .prepare_flr = efx_port_dummy_op_void,
2796 .finish_flr = efx_farch_finish_flr,
2793 .describe_stats = falcon_describe_nic_stats, 2797 .describe_stats = falcon_describe_nic_stats,
2794 .update_stats = falcon_update_nic_stats, 2798 .update_stats = falcon_update_nic_stats,
2795 .start_stats = falcon_start_nic_stats, 2799 .start_stats = falcon_start_nic_stats,
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index a08761360cdf..0537381cd2f6 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -741,6 +741,28 @@ int efx_farch_fini_dmaq(struct efx_nic *efx)
741 return rc; 741 return rc;
742} 742}
743 743
744/* Reset queue and flush accounting after FLR
745 *
746 * One possible cause of FLR recovery is that DMA may be failing (eg. if bus
747 * mastering was disabled), in which case we don't receive (RXQ) flush
748 * completion events. This means that efx->rxq_flush_outstanding remained at 4
749 * after the FLR; also, efx->active_queues was non-zero (as no flush completion
750 * events were received, and we didn't go through efx_check_tx_flush_complete())
751 * If we don't fix this up, on the next call to efx_realloc_channels() we won't
752 * flush any RX queues because efx->rxq_flush_outstanding is at the limit of 4
753 * for batched flush requests; and the efx->active_queues gets messed up because
754 * we keep incrementing for the newly initialised queues, but it never went to
755 * zero previously. Then we get a timeout every time we try to restart the
756 * queues, as it doesn't go back to zero when we should be flushing the queues.
757 */
758void efx_farch_finish_flr(struct efx_nic *efx)
759{
760 atomic_set(&efx->rxq_flush_pending, 0);
761 atomic_set(&efx->rxq_flush_outstanding, 0);
762 atomic_set(&efx->active_queues, 0);
763}
764
765
744/************************************************************************** 766/**************************************************************************
745 * 767 *
746 * Event queue processing 768 * Event queue processing
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 7bd4b14bf3b3..5239cf9bdc56 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -52,12 +52,7 @@ static void efx_mcdi_timeout_async(unsigned long context);
52static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, 52static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
53 bool *was_attached_out); 53 bool *was_attached_out);
54static bool efx_mcdi_poll_once(struct efx_nic *efx); 54static bool efx_mcdi_poll_once(struct efx_nic *efx);
55 55static void efx_mcdi_abandon(struct efx_nic *efx);
56static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
57{
58 EFX_BUG_ON_PARANOID(!efx->mcdi);
59 return &efx->mcdi->iface;
60}
61 56
62int efx_mcdi_init(struct efx_nic *efx) 57int efx_mcdi_init(struct efx_nic *efx)
63{ 58{
@@ -558,6 +553,8 @@ static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
558 rc = 0; 553 rc = 0;
559 } 554 }
560 555
556 efx_mcdi_abandon(efx);
557
561 /* Close the race with efx_mcdi_ev_cpl() executing just too late 558 /* Close the race with efx_mcdi_ev_cpl() executing just too late
562 * and completing a request we've just cancelled, by ensuring 559 * and completing a request we've just cancelled, by ensuring
563 * that the seqno check therein fails. 560 * that the seqno check therein fails.
@@ -672,6 +669,9 @@ int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
672 if (efx->mc_bist_for_other_fn) 669 if (efx->mc_bist_for_other_fn)
673 return -ENETDOWN; 670 return -ENETDOWN;
674 671
672 if (mcdi->mode == MCDI_MODE_FAIL)
673 return -ENETDOWN;
674
675 efx_mcdi_acquire_sync(mcdi); 675 efx_mcdi_acquire_sync(mcdi);
676 efx_mcdi_send_request(efx, cmd, inbuf, inlen); 676 efx_mcdi_send_request(efx, cmd, inbuf, inlen);
677 return 0; 677 return 0;
@@ -812,7 +812,11 @@ void efx_mcdi_mode_poll(struct efx_nic *efx)
812 return; 812 return;
813 813
814 mcdi = efx_mcdi(efx); 814 mcdi = efx_mcdi(efx);
815 if (mcdi->mode == MCDI_MODE_POLL) 815 /* If already in polling mode, nothing to do.
816 * If in fail-fast state, don't switch to polled completion.
817 * FLR recovery will do that later.
818 */
819 if (mcdi->mode == MCDI_MODE_POLL || mcdi->mode == MCDI_MODE_FAIL)
816 return; 820 return;
817 821
818 /* We can switch from event completion to polled completion, because 822 /* We can switch from event completion to polled completion, because
@@ -841,8 +845,8 @@ void efx_mcdi_flush_async(struct efx_nic *efx)
841 845
842 mcdi = efx_mcdi(efx); 846 mcdi = efx_mcdi(efx);
843 847
844 /* We must be in polling mode so no more requests can be queued */ 848 /* We must be in poll or fail mode so no more requests can be queued */
845 BUG_ON(mcdi->mode != MCDI_MODE_POLL); 849 BUG_ON(mcdi->mode == MCDI_MODE_EVENTS);
846 850
847 del_timer_sync(&mcdi->async_timer); 851 del_timer_sync(&mcdi->async_timer);
848 852
@@ -875,8 +879,11 @@ void efx_mcdi_mode_event(struct efx_nic *efx)
875 return; 879 return;
876 880
877 mcdi = efx_mcdi(efx); 881 mcdi = efx_mcdi(efx);
878 882 /* If already in event completion mode, nothing to do.
879 if (mcdi->mode == MCDI_MODE_EVENTS) 883 * If in fail-fast state, don't switch to event completion. FLR
884 * recovery will do that later.
885 */
886 if (mcdi->mode == MCDI_MODE_EVENTS || mcdi->mode == MCDI_MODE_FAIL)
880 return; 887 return;
881 888
882 /* We can't switch from polled to event completion in the middle of a 889 /* We can't switch from polled to event completion in the middle of a
@@ -966,6 +973,19 @@ static void efx_mcdi_ev_bist(struct efx_nic *efx)
966 spin_unlock(&mcdi->iface_lock); 973 spin_unlock(&mcdi->iface_lock);
967} 974}
968 975
976/* MCDI timeouts seen, so make all MCDI calls fail-fast and issue an FLR to try
977 * to recover.
978 */
979static void efx_mcdi_abandon(struct efx_nic *efx)
980{
981 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
982
983 if (xchg(&mcdi->mode, MCDI_MODE_FAIL) == MCDI_MODE_FAIL)
984 return; /* it had already been done */
985 netif_dbg(efx, hw, efx->net_dev, "MCDI is timing out; trying to recover\n");
986 efx_schedule_reset(efx, RESET_TYPE_MCDI_TIMEOUT);
987}
988
969/* Called from falcon_process_eventq for MCDI events */ 989/* Called from falcon_process_eventq for MCDI events */
970void efx_mcdi_process_event(struct efx_channel *channel, 990void efx_mcdi_process_event(struct efx_channel *channel,
971 efx_qword_t *event) 991 efx_qword_t *event)
@@ -1512,6 +1532,19 @@ int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
1512{ 1532{
1513 int rc; 1533 int rc;
1514 1534
1535 /* If MCDI is down, we can't handle_assertion */
1536 if (method == RESET_TYPE_MCDI_TIMEOUT) {
1537 rc = pci_reset_function(efx->pci_dev);
1538 if (rc)
1539 return rc;
1540 /* Re-enable polled MCDI completion */
1541 if (efx->mcdi) {
1542 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
1543 mcdi->mode = MCDI_MODE_POLL;
1544 }
1545 return 0;
1546 }
1547
1515 /* Recover from a failed assertion pre-reset */ 1548 /* Recover from a failed assertion pre-reset */
1516 rc = efx_mcdi_handle_assertion(efx); 1549 rc = efx_mcdi_handle_assertion(efx);
1517 if (rc) 1550 if (rc)
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 52931aebf3c3..56465f7465a2 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -28,9 +28,16 @@ enum efx_mcdi_state {
28 MCDI_STATE_COMPLETED, 28 MCDI_STATE_COMPLETED,
29}; 29};
30 30
31/**
32 * enum efx_mcdi_mode - MCDI transaction mode
33 * @MCDI_MODE_POLL: poll for MCDI completion, until timeout
34 * @MCDI_MODE_EVENTS: wait for an mcdi_event. On timeout, poll once
35 * @MCDI_MODE_FAIL: we think MCDI is dead, so fail-fast all calls
36 */
31enum efx_mcdi_mode { 37enum efx_mcdi_mode {
32 MCDI_MODE_POLL, 38 MCDI_MODE_POLL,
33 MCDI_MODE_EVENTS, 39 MCDI_MODE_EVENTS,
40 MCDI_MODE_FAIL,
34}; 41};
35 42
36/** 43/**
@@ -104,6 +111,12 @@ struct efx_mcdi_data {
104 u32 fn_flags; 111 u32 fn_flags;
105}; 112};
106 113
114static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
115{
116 EFX_BUG_ON_PARANOID(!efx->mcdi);
117 return &efx->mcdi->iface;
118}
119
107#ifdef CONFIG_SFC_MCDI_MON 120#ifdef CONFIG_SFC_MCDI_MON
108static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx) 121static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
109{ 122{
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 8a400a0595eb..5bdae8ed7c57 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -972,6 +972,8 @@ struct efx_mtd_partition {
972 * (for Falcon architecture) 972 * (for Falcon architecture)
973 * @finish_flush: Clean up after flushing the DMA queues (for Falcon 973 * @finish_flush: Clean up after flushing the DMA queues (for Falcon
974 * architecture) 974 * architecture)
975 * @prepare_flr: Prepare for an FLR
976 * @finish_flr: Clean up after an FLR
975 * @describe_stats: Describe statistics for ethtool 977 * @describe_stats: Describe statistics for ethtool
976 * @update_stats: Update statistics not provided by event handling. 978 * @update_stats: Update statistics not provided by event handling.
977 * Either argument may be %NULL. 979 * Either argument may be %NULL.
@@ -1100,6 +1102,8 @@ struct efx_nic_type {
1100 int (*fini_dmaq)(struct efx_nic *efx); 1102 int (*fini_dmaq)(struct efx_nic *efx);
1101 void (*prepare_flush)(struct efx_nic *efx); 1103 void (*prepare_flush)(struct efx_nic *efx);
1102 void (*finish_flush)(struct efx_nic *efx); 1104 void (*finish_flush)(struct efx_nic *efx);
1105 void (*prepare_flr)(struct efx_nic *efx);
1106 void (*finish_flr)(struct efx_nic *efx);
1103 size_t (*describe_stats)(struct efx_nic *efx, u8 *names); 1107 size_t (*describe_stats)(struct efx_nic *efx, u8 *names);
1104 size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats, 1108 size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats,
1105 struct rtnl_link_stats64 *core_stats); 1109 struct rtnl_link_stats64 *core_stats);
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index a001fae1a8d7..d3ad8ed8d901 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -757,6 +757,7 @@ static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
757int efx_nic_flush_queues(struct efx_nic *efx); 757int efx_nic_flush_queues(struct efx_nic *efx);
758void siena_prepare_flush(struct efx_nic *efx); 758void siena_prepare_flush(struct efx_nic *efx);
759int efx_farch_fini_dmaq(struct efx_nic *efx); 759int efx_farch_fini_dmaq(struct efx_nic *efx);
760void efx_farch_finish_flr(struct efx_nic *efx);
760void siena_finish_flush(struct efx_nic *efx); 761void siena_finish_flush(struct efx_nic *efx);
761void falcon_start_nic_stats(struct efx_nic *efx); 762void falcon_start_nic_stats(struct efx_nic *efx);
762void falcon_stop_nic_stats(struct efx_nic *efx); 763void falcon_stop_nic_stats(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 23f3a6f7737a..50ffefed492c 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -921,6 +921,8 @@ const struct efx_nic_type siena_a0_nic_type = {
921 .fini_dmaq = efx_farch_fini_dmaq, 921 .fini_dmaq = efx_farch_fini_dmaq,
922 .prepare_flush = siena_prepare_flush, 922 .prepare_flush = siena_prepare_flush,
923 .finish_flush = siena_finish_flush, 923 .finish_flush = siena_finish_flush,
924 .prepare_flr = efx_port_dummy_op_void,
925 .finish_flr = efx_farch_finish_flr,
924 .describe_stats = siena_describe_nic_stats, 926 .describe_stats = siena_describe_nic_stats,
925 .update_stats = siena_update_nic_stats, 927 .update_stats = siena_update_nic_stats,
926 .start_stats = efx_mcdi_mac_start_stats, 928 .start_stats = efx_mcdi_mac_start_stats,
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 430bb0db9bc4..e36f194673a4 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -365,7 +365,7 @@ __at86rf230_read_subreg(struct at86rf230_local *lp,
365 dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]); 365 dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
366 366
367 if (status == 0) 367 if (status == 0)
368 *data = buf[1]; 368 *data = (buf[1] & mask) >> shift;
369 369
370 return status; 370 return status;
371} 371}
@@ -1025,14 +1025,6 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
1025 return -EINVAL; 1025 return -EINVAL;
1026 } 1026 }
1027 1027
1028 rc = at86rf230_read_subreg(lp, SR_AVDD_OK, &status);
1029 if (rc)
1030 return rc;
1031 if (!status) {
1032 dev_err(&lp->spi->dev, "AVDD error\n");
1033 return -EINVAL;
1034 }
1035
1036 return 0; 1028 return 0;
1037} 1029}
1038 1030
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index e701433bf52f..9c4defdec67b 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -32,29 +32,39 @@
32 32
33struct mdio_gpio_info { 33struct mdio_gpio_info {
34 struct mdiobb_ctrl ctrl; 34 struct mdiobb_ctrl ctrl;
35 int mdc, mdio; 35 int mdc, mdio, mdo;
36 int mdc_active_low, mdio_active_low, mdo_active_low;
36}; 37};
37 38
38static void *mdio_gpio_of_get_data(struct platform_device *pdev) 39static void *mdio_gpio_of_get_data(struct platform_device *pdev)
39{ 40{
40 struct device_node *np = pdev->dev.of_node; 41 struct device_node *np = pdev->dev.of_node;
41 struct mdio_gpio_platform_data *pdata; 42 struct mdio_gpio_platform_data *pdata;
43 enum of_gpio_flags flags;
42 int ret; 44 int ret;
43 45
44 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 46 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
45 if (!pdata) 47 if (!pdata)
46 return NULL; 48 return NULL;
47 49
48 ret = of_get_gpio(np, 0); 50 ret = of_get_gpio_flags(np, 0, &flags);
49 if (ret < 0) 51 if (ret < 0)
50 return NULL; 52 return NULL;
51 53
52 pdata->mdc = ret; 54 pdata->mdc = ret;
55 pdata->mdc_active_low = flags & OF_GPIO_ACTIVE_LOW;
53 56
54 ret = of_get_gpio(np, 1); 57 ret = of_get_gpio_flags(np, 1, &flags);
55 if (ret < 0) 58 if (ret < 0)
56 return NULL; 59 return NULL;
57 pdata->mdio = ret; 60 pdata->mdio = ret;
61 pdata->mdio_active_low = flags & OF_GPIO_ACTIVE_LOW;
62
63 ret = of_get_gpio_flags(np, 2, &flags);
64 if (ret > 0) {
65 pdata->mdo = ret;
66 pdata->mdo_active_low = flags & OF_GPIO_ACTIVE_LOW;
67 }
58 68
59 return pdata; 69 return pdata;
60} 70}
@@ -64,8 +74,19 @@ static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir)
64 struct mdio_gpio_info *bitbang = 74 struct mdio_gpio_info *bitbang =
65 container_of(ctrl, struct mdio_gpio_info, ctrl); 75 container_of(ctrl, struct mdio_gpio_info, ctrl);
66 76
77 if (bitbang->mdo) {
78 /* Separate output pin. Always set its value to high
79 * when changing direction. If direction is input,
80 * assume the pin serves as pull-up. If direction is
81 * output, the default value is high.
82 */
83 gpio_set_value(bitbang->mdo, 1 ^ bitbang->mdo_active_low);
84 return;
85 }
86
67 if (dir) 87 if (dir)
68 gpio_direction_output(bitbang->mdio, 1); 88 gpio_direction_output(bitbang->mdio,
89 1 ^ bitbang->mdio_active_low);
69 else 90 else
70 gpio_direction_input(bitbang->mdio); 91 gpio_direction_input(bitbang->mdio);
71} 92}
@@ -75,7 +96,7 @@ static int mdio_get(struct mdiobb_ctrl *ctrl)
75 struct mdio_gpio_info *bitbang = 96 struct mdio_gpio_info *bitbang =
76 container_of(ctrl, struct mdio_gpio_info, ctrl); 97 container_of(ctrl, struct mdio_gpio_info, ctrl);
77 98
78 return gpio_get_value(bitbang->mdio); 99 return gpio_get_value(bitbang->mdio) ^ bitbang->mdio_active_low;
79} 100}
80 101
81static void mdio_set(struct mdiobb_ctrl *ctrl, int what) 102static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
@@ -83,7 +104,10 @@ static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
83 struct mdio_gpio_info *bitbang = 104 struct mdio_gpio_info *bitbang =
84 container_of(ctrl, struct mdio_gpio_info, ctrl); 105 container_of(ctrl, struct mdio_gpio_info, ctrl);
85 106
86 gpio_set_value(bitbang->mdio, what); 107 if (bitbang->mdo)
108 gpio_set_value(bitbang->mdo, what ^ bitbang->mdo_active_low);
109 else
110 gpio_set_value(bitbang->mdio, what ^ bitbang->mdio_active_low);
87} 111}
88 112
89static void mdc_set(struct mdiobb_ctrl *ctrl, int what) 113static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
@@ -91,7 +115,7 @@ static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
91 struct mdio_gpio_info *bitbang = 115 struct mdio_gpio_info *bitbang =
92 container_of(ctrl, struct mdio_gpio_info, ctrl); 116 container_of(ctrl, struct mdio_gpio_info, ctrl);
93 117
94 gpio_set_value(bitbang->mdc, what); 118 gpio_set_value(bitbang->mdc, what ^ bitbang->mdc_active_low);
95} 119}
96 120
97static struct mdiobb_ops mdio_gpio_ops = { 121static struct mdiobb_ops mdio_gpio_ops = {
@@ -110,18 +134,22 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev,
110 struct mdio_gpio_info *bitbang; 134 struct mdio_gpio_info *bitbang;
111 int i; 135 int i;
112 136
113 bitbang = kzalloc(sizeof(*bitbang), GFP_KERNEL); 137 bitbang = devm_kzalloc(dev, sizeof(*bitbang), GFP_KERNEL);
114 if (!bitbang) 138 if (!bitbang)
115 goto out; 139 goto out;
116 140
117 bitbang->ctrl.ops = &mdio_gpio_ops; 141 bitbang->ctrl.ops = &mdio_gpio_ops;
118 bitbang->ctrl.reset = pdata->reset; 142 bitbang->ctrl.reset = pdata->reset;
119 bitbang->mdc = pdata->mdc; 143 bitbang->mdc = pdata->mdc;
144 bitbang->mdc_active_low = pdata->mdc_active_low;
120 bitbang->mdio = pdata->mdio; 145 bitbang->mdio = pdata->mdio;
146 bitbang->mdio_active_low = pdata->mdio_active_low;
147 bitbang->mdo = pdata->mdo;
148 bitbang->mdo_active_low = pdata->mdo_active_low;
121 149
122 new_bus = alloc_mdio_bitbang(&bitbang->ctrl); 150 new_bus = alloc_mdio_bitbang(&bitbang->ctrl);
123 if (!new_bus) 151 if (!new_bus)
124 goto out_free_bitbang; 152 goto out;
125 153
126 new_bus->name = "GPIO Bitbanged MDIO", 154 new_bus->name = "GPIO Bitbanged MDIO",
127 155
@@ -138,11 +166,18 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev,
138 166
139 snprintf(new_bus->id, MII_BUS_ID_SIZE, "gpio-%x", bus_id); 167 snprintf(new_bus->id, MII_BUS_ID_SIZE, "gpio-%x", bus_id);
140 168
141 if (gpio_request(bitbang->mdc, "mdc")) 169 if (devm_gpio_request(dev, bitbang->mdc, "mdc"))
170 goto out_free_bus;
171
172 if (devm_gpio_request(dev, bitbang->mdio, "mdio"))
142 goto out_free_bus; 173 goto out_free_bus;
143 174
144 if (gpio_request(bitbang->mdio, "mdio")) 175 if (bitbang->mdo) {
145 goto out_free_mdc; 176 if (devm_gpio_request(dev, bitbang->mdo, "mdo"))
177 goto out_free_bus;
178 gpio_direction_output(bitbang->mdo, 1);
179 gpio_direction_input(bitbang->mdio);
180 }
146 181
147 gpio_direction_output(bitbang->mdc, 0); 182 gpio_direction_output(bitbang->mdc, 0);
148 183
@@ -150,12 +185,8 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev,
150 185
151 return new_bus; 186 return new_bus;
152 187
153out_free_mdc:
154 gpio_free(bitbang->mdc);
155out_free_bus: 188out_free_bus:
156 free_mdio_bitbang(new_bus); 189 free_mdio_bitbang(new_bus);
157out_free_bitbang:
158 kfree(bitbang);
159out: 190out:
160 return NULL; 191 return NULL;
161} 192}
@@ -163,13 +194,8 @@ out:
163static void mdio_gpio_bus_deinit(struct device *dev) 194static void mdio_gpio_bus_deinit(struct device *dev)
164{ 195{
165 struct mii_bus *bus = dev_get_drvdata(dev); 196 struct mii_bus *bus = dev_get_drvdata(dev);
166 struct mdio_gpio_info *bitbang = bus->priv;
167 197
168 dev_set_drvdata(dev, NULL);
169 gpio_free(bitbang->mdio);
170 gpio_free(bitbang->mdc);
171 free_mdio_bitbang(bus); 198 free_mdio_bitbang(bus);
172 kfree(bitbang);
173} 199}
174 200
175static void mdio_gpio_bus_destroy(struct device *dev) 201static void mdio_gpio_bus_destroy(struct device *dev)
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index c55e316373a1..82355d5d155a 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1755,8 +1755,8 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
1755 if (err) 1755 if (err)
1756 return err; 1756 return err;
1757 1757
1758 return iptunnel_xmit(rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df, 1758 return iptunnel_xmit(vs->sock->sk, rt, skb, src, dst, IPPROTO_UDP,
1759 false); 1759 tos, ttl, df, false);
1760} 1760}
1761EXPORT_SYMBOL_GPL(vxlan_xmit_skb); 1761EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
1762 1762
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 84734a805092..83c39e2858bf 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -1521,11 +1521,7 @@ static int cosa_reset_and_read_id(struct cosa_data *cosa, char *idstring)
1521 cosa_putstatus(cosa, 0); 1521 cosa_putstatus(cosa, 0);
1522 cosa_getdata8(cosa); 1522 cosa_getdata8(cosa);
1523 cosa_putstatus(cosa, SR_RST); 1523 cosa_putstatus(cosa, SR_RST);
1524#ifdef MODULE
1525 msleep(500); 1524 msleep(500);
1526#else
1527 udelay(5*100000);
1528#endif
1529 /* Disable all IRQs from the card */ 1525 /* Disable all IRQs from the card */
1530 cosa_putstatus(cosa, 0); 1526 cosa_putstatus(cosa, 0);
1531 1527
diff --git a/drivers/net/wireless/cw1200/debug.c b/drivers/net/wireless/cw1200/debug.c
index e323b4d54338..34f97c31eecf 100644
--- a/drivers/net/wireless/cw1200/debug.c
+++ b/drivers/net/wireless/cw1200/debug.c
@@ -41,6 +41,8 @@ static const char * const cw1200_debug_link_id[] = {
41 "REQ", 41 "REQ",
42 "SOFT", 42 "SOFT",
43 "HARD", 43 "HARD",
44 "RESET",
45 "RESET_REMAP",
44}; 46};
45 47
46static const char *cw1200_debug_mode(int mode) 48static const char *cw1200_debug_mode(int mode)
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 003a546571d4..4c2d4ef28b22 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -67,8 +67,8 @@
67#include "iwl-agn-hw.h" 67#include "iwl-agn-hw.h"
68 68
69/* Highest firmware API version supported */ 69/* Highest firmware API version supported */
70#define IWL7260_UCODE_API_MAX 8 70#define IWL7260_UCODE_API_MAX 9
71#define IWL3160_UCODE_API_MAX 8 71#define IWL3160_UCODE_API_MAX 9
72 72
73/* Oldest version we won't warn about */ 73/* Oldest version we won't warn about */
74#define IWL7260_UCODE_API_OK 8 74#define IWL7260_UCODE_API_OK 8
@@ -244,3 +244,4 @@ const struct iwl_cfg iwl7265_n_cfg = {
244 244
245MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); 245MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
246MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK)); 246MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
247MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index 685f7e8e6943..fa858d548d13 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -190,7 +190,7 @@ static const __le32 iwl_combined_lookup[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
190 cpu_to_le32(0xcc00aaaa), 190 cpu_to_le32(0xcc00aaaa),
191 cpu_to_le32(0x0000aaaa), 191 cpu_to_le32(0x0000aaaa),
192 cpu_to_le32(0xc0004000), 192 cpu_to_le32(0xc0004000),
193 cpu_to_le32(0x00000000), 193 cpu_to_le32(0x00004000),
194 cpu_to_le32(0xf0005000), 194 cpu_to_le32(0xf0005000),
195 cpu_to_le32(0xf0005000), 195 cpu_to_le32(0xf0005000),
196 }, 196 },
@@ -213,16 +213,16 @@ static const __le32 iwl_combined_lookup[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
213 /* Tx Tx disabled */ 213 /* Tx Tx disabled */
214 cpu_to_le32(0xaaaaaaaa), 214 cpu_to_le32(0xaaaaaaaa),
215 cpu_to_le32(0xaaaaaaaa), 215 cpu_to_le32(0xaaaaaaaa),
216 cpu_to_le32(0xaaaaaaaa), 216 cpu_to_le32(0xeeaaaaaa),
217 cpu_to_le32(0xaaaaaaaa), 217 cpu_to_le32(0xaaaaaaaa),
218 cpu_to_le32(0xcc00ff28), 218 cpu_to_le32(0xcc00ff28),
219 cpu_to_le32(0x0000aaaa), 219 cpu_to_le32(0x0000aaaa),
220 cpu_to_le32(0xcc00aaaa), 220 cpu_to_le32(0xcc00aaaa),
221 cpu_to_le32(0x0000aaaa), 221 cpu_to_le32(0x0000aaaa),
222 cpu_to_le32(0xC0004000), 222 cpu_to_le32(0xc0004000),
223 cpu_to_le32(0xC0004000), 223 cpu_to_le32(0xc0004000),
224 cpu_to_le32(0xF0005000), 224 cpu_to_le32(0xf0005000),
225 cpu_to_le32(0xF0005000), 225 cpu_to_le32(0xf0005000),
226 }, 226 },
227}; 227};
228 228
@@ -1262,6 +1262,7 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
1262 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1262 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1263 u32 ant_isolation = le32_to_cpup((void *)pkt->data); 1263 u32 ant_isolation = le32_to_cpup((void *)pkt->data);
1264 u8 __maybe_unused lower_bound, upper_bound; 1264 u8 __maybe_unused lower_bound, upper_bound;
1265 int ret;
1265 u8 lut; 1266 u8 lut;
1266 1267
1267 struct iwl_bt_coex_cmd *bt_cmd; 1268 struct iwl_bt_coex_cmd *bt_cmd;
@@ -1318,5 +1319,8 @@ int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
1318 memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[lut].lut20, 1319 memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[lut].lut20,
1319 sizeof(bt_cmd->bt4_corun_lut40)); 1320 sizeof(bt_cmd->bt4_corun_lut40));
1320 1321
1321 return 0; 1322 ret = iwl_mvm_send_cmd(mvm, &cmd);
1323
1324 kfree(bt_cmd);
1325 return ret;
1322} 1326}
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 4dd9ff43b8b6..f0cebf12c7b8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -1332,6 +1332,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
1332 */ 1332 */
1333 iwl_mvm_remove_time_event(mvm, mvmvif, 1333 iwl_mvm_remove_time_event(mvm, mvmvif,
1334 &mvmvif->time_event_data); 1334 &mvmvif->time_event_data);
1335 iwl_mvm_sf_update(mvm, vif, false);
1335 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, CMD_SYNC)); 1336 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, CMD_SYNC));
1336 } else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | 1337 } else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS |
1337 BSS_CHANGED_QOS)) { 1338 BSS_CHANGED_QOS)) {
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 568abd61b14f..9f52c5b3f0ec 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -59,7 +59,7 @@
59/* max allowed rate miss before sync LQ cmd */ 59/* max allowed rate miss before sync LQ cmd */
60#define IWL_MISSED_RATE_MAX 15 60#define IWL_MISSED_RATE_MAX 15
61#define RS_STAY_IN_COLUMN_TIMEOUT (5*HZ) 61#define RS_STAY_IN_COLUMN_TIMEOUT (5*HZ)
62 62#define RS_IDLE_TIMEOUT (5*HZ)
63 63
64static u8 rs_ht_to_legacy[] = { 64static u8 rs_ht_to_legacy[] = {
65 [IWL_RATE_MCS_0_INDEX] = IWL_RATE_6M_INDEX, 65 [IWL_RATE_MCS_0_INDEX] = IWL_RATE_6M_INDEX,
@@ -142,7 +142,7 @@ enum rs_column_mode {
142 RS_MIMO2, 142 RS_MIMO2,
143}; 143};
144 144
145#define MAX_NEXT_COLUMNS 5 145#define MAX_NEXT_COLUMNS 7
146#define MAX_COLUMN_CHECKS 3 146#define MAX_COLUMN_CHECKS 3
147 147
148typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm, 148typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm,
@@ -212,8 +212,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
212 RS_COLUMN_LEGACY_ANT_B, 212 RS_COLUMN_LEGACY_ANT_B,
213 RS_COLUMN_SISO_ANT_A, 213 RS_COLUMN_SISO_ANT_A,
214 RS_COLUMN_SISO_ANT_B, 214 RS_COLUMN_SISO_ANT_B,
215 RS_COLUMN_MIMO2, 215 RS_COLUMN_INVALID,
216 RS_COLUMN_MIMO2_SGI, 216 RS_COLUMN_INVALID,
217 RS_COLUMN_INVALID,
218 RS_COLUMN_INVALID,
217 }, 219 },
218 }, 220 },
219 [RS_COLUMN_LEGACY_ANT_B] = { 221 [RS_COLUMN_LEGACY_ANT_B] = {
@@ -223,8 +225,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
223 RS_COLUMN_LEGACY_ANT_A, 225 RS_COLUMN_LEGACY_ANT_A,
224 RS_COLUMN_SISO_ANT_A, 226 RS_COLUMN_SISO_ANT_A,
225 RS_COLUMN_SISO_ANT_B, 227 RS_COLUMN_SISO_ANT_B,
226 RS_COLUMN_MIMO2, 228 RS_COLUMN_INVALID,
227 RS_COLUMN_MIMO2_SGI, 229 RS_COLUMN_INVALID,
230 RS_COLUMN_INVALID,
231 RS_COLUMN_INVALID,
228 }, 232 },
229 }, 233 },
230 [RS_COLUMN_SISO_ANT_A] = { 234 [RS_COLUMN_SISO_ANT_A] = {
@@ -235,7 +239,9 @@ static const struct rs_tx_column rs_tx_columns[] = {
235 RS_COLUMN_MIMO2, 239 RS_COLUMN_MIMO2,
236 RS_COLUMN_SISO_ANT_A_SGI, 240 RS_COLUMN_SISO_ANT_A_SGI,
237 RS_COLUMN_SISO_ANT_B_SGI, 241 RS_COLUMN_SISO_ANT_B_SGI,
238 RS_COLUMN_MIMO2_SGI, 242 RS_COLUMN_LEGACY_ANT_A,
243 RS_COLUMN_LEGACY_ANT_B,
244 RS_COLUMN_INVALID,
239 }, 245 },
240 .checks = { 246 .checks = {
241 rs_siso_allow, 247 rs_siso_allow,
@@ -249,7 +255,9 @@ static const struct rs_tx_column rs_tx_columns[] = {
249 RS_COLUMN_MIMO2, 255 RS_COLUMN_MIMO2,
250 RS_COLUMN_SISO_ANT_B_SGI, 256 RS_COLUMN_SISO_ANT_B_SGI,
251 RS_COLUMN_SISO_ANT_A_SGI, 257 RS_COLUMN_SISO_ANT_A_SGI,
252 RS_COLUMN_MIMO2_SGI, 258 RS_COLUMN_LEGACY_ANT_A,
259 RS_COLUMN_LEGACY_ANT_B,
260 RS_COLUMN_INVALID,
253 }, 261 },
254 .checks = { 262 .checks = {
255 rs_siso_allow, 263 rs_siso_allow,
@@ -265,6 +273,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
265 RS_COLUMN_SISO_ANT_A, 273 RS_COLUMN_SISO_ANT_A,
266 RS_COLUMN_SISO_ANT_B, 274 RS_COLUMN_SISO_ANT_B,
267 RS_COLUMN_MIMO2, 275 RS_COLUMN_MIMO2,
276 RS_COLUMN_LEGACY_ANT_A,
277 RS_COLUMN_LEGACY_ANT_B,
268 }, 278 },
269 .checks = { 279 .checks = {
270 rs_siso_allow, 280 rs_siso_allow,
@@ -281,6 +291,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
281 RS_COLUMN_SISO_ANT_B, 291 RS_COLUMN_SISO_ANT_B,
282 RS_COLUMN_SISO_ANT_A, 292 RS_COLUMN_SISO_ANT_A,
283 RS_COLUMN_MIMO2, 293 RS_COLUMN_MIMO2,
294 RS_COLUMN_LEGACY_ANT_A,
295 RS_COLUMN_LEGACY_ANT_B,
284 }, 296 },
285 .checks = { 297 .checks = {
286 rs_siso_allow, 298 rs_siso_allow,
@@ -296,6 +308,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
296 RS_COLUMN_SISO_ANT_A_SGI, 308 RS_COLUMN_SISO_ANT_A_SGI,
297 RS_COLUMN_SISO_ANT_B_SGI, 309 RS_COLUMN_SISO_ANT_B_SGI,
298 RS_COLUMN_MIMO2_SGI, 310 RS_COLUMN_MIMO2_SGI,
311 RS_COLUMN_LEGACY_ANT_A,
312 RS_COLUMN_LEGACY_ANT_B,
299 }, 313 },
300 .checks = { 314 .checks = {
301 rs_mimo_allow, 315 rs_mimo_allow,
@@ -311,6 +325,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
311 RS_COLUMN_SISO_ANT_A, 325 RS_COLUMN_SISO_ANT_A,
312 RS_COLUMN_SISO_ANT_B, 326 RS_COLUMN_SISO_ANT_B,
313 RS_COLUMN_MIMO2, 327 RS_COLUMN_MIMO2,
328 RS_COLUMN_LEGACY_ANT_A,
329 RS_COLUMN_LEGACY_ANT_B,
314 }, 330 },
315 .checks = { 331 .checks = {
316 rs_mimo_allow, 332 rs_mimo_allow,
@@ -503,10 +519,12 @@ static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
503 window->average_tpt = IWL_INVALID_VALUE; 519 window->average_tpt = IWL_INVALID_VALUE;
504} 520}
505 521
506static void rs_rate_scale_clear_tbl_windows(struct iwl_scale_tbl_info *tbl) 522static void rs_rate_scale_clear_tbl_windows(struct iwl_mvm *mvm,
523 struct iwl_scale_tbl_info *tbl)
507{ 524{
508 int i; 525 int i;
509 526
527 IWL_DEBUG_RATE(mvm, "Clearing up window stats\n");
510 for (i = 0; i < IWL_RATE_COUNT; i++) 528 for (i = 0; i < IWL_RATE_COUNT; i++)
511 rs_rate_scale_clear_window(&tbl->win[i]); 529 rs_rate_scale_clear_window(&tbl->win[i]);
512} 530}
@@ -992,6 +1010,13 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
992 return; 1010 return;
993 } 1011 }
994 1012
1013#ifdef CPTCFG_MAC80211_DEBUGFS
1014 /* Disable last tx check if we are debugging with fixed rate */
1015 if (lq_sta->dbg_fixed_rate) {
1016 IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n");
1017 return;
1018 }
1019#endif
995 if (!ieee80211_is_data(hdr->frame_control) || 1020 if (!ieee80211_is_data(hdr->frame_control) ||
996 info->flags & IEEE80211_TX_CTL_NO_ACK) 1021 info->flags & IEEE80211_TX_CTL_NO_ACK)
997 return; 1022 return;
@@ -1034,6 +1059,18 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband,
1034 mac_index++; 1059 mac_index++;
1035 } 1060 }
1036 1061
1062 if (time_after(jiffies,
1063 (unsigned long)(lq_sta->last_tx + RS_IDLE_TIMEOUT))) {
1064 int tid;
1065 IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n");
1066 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
1067 ieee80211_stop_tx_ba_session(sta, tid);
1068
1069 iwl_mvm_rs_rate_init(mvm, sta, sband->band, false);
1070 return;
1071 }
1072 lq_sta->last_tx = jiffies;
1073
1037 /* Here we actually compare this rate to the latest LQ command */ 1074 /* Here we actually compare this rate to the latest LQ command */
1038 if ((mac_index < 0) || 1075 if ((mac_index < 0) ||
1039 (rate.sgi != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) || 1076 (rate.sgi != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
@@ -1186,9 +1223,26 @@ static void rs_set_stay_in_table(struct iwl_mvm *mvm, u8 is_legacy,
1186 lq_sta->visited_columns = 0; 1223 lq_sta->visited_columns = 0;
1187} 1224}
1188 1225
1226static int rs_get_max_allowed_rate(struct iwl_lq_sta *lq_sta,
1227 const struct rs_tx_column *column)
1228{
1229 switch (column->mode) {
1230 case RS_LEGACY:
1231 return lq_sta->max_legacy_rate_idx;
1232 case RS_SISO:
1233 return lq_sta->max_siso_rate_idx;
1234 case RS_MIMO2:
1235 return lq_sta->max_mimo2_rate_idx;
1236 default:
1237 WARN_ON_ONCE(1);
1238 }
1239
1240 return lq_sta->max_legacy_rate_idx;
1241}
1242
1189static const u16 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta, 1243static const u16 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta,
1190 const struct rs_tx_column *column, 1244 const struct rs_tx_column *column,
1191 u32 bw) 1245 u32 bw)
1192{ 1246{
1193 /* Used to choose among HT tables */ 1247 /* Used to choose among HT tables */
1194 const u16 (*ht_tbl_pointer)[IWL_RATE_COUNT]; 1248 const u16 (*ht_tbl_pointer)[IWL_RATE_COUNT];
@@ -1438,7 +1492,7 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
1438 1492
1439 IWL_DEBUG_RATE(mvm, 1493 IWL_DEBUG_RATE(mvm,
1440 "LQ: stay in table clear win\n"); 1494 "LQ: stay in table clear win\n");
1441 rs_rate_scale_clear_tbl_windows(tbl); 1495 rs_rate_scale_clear_tbl_windows(mvm, tbl);
1442 } 1496 }
1443 } 1497 }
1444 1498
@@ -1446,8 +1500,7 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
1446 * bitmaps and stats in active table (this will become the new 1500 * bitmaps and stats in active table (this will become the new
1447 * "search" table). */ 1501 * "search" table). */
1448 if (lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED) { 1502 if (lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED) {
1449 IWL_DEBUG_RATE(mvm, "Clearing up window stats\n"); 1503 rs_rate_scale_clear_tbl_windows(mvm, tbl);
1450 rs_rate_scale_clear_tbl_windows(tbl);
1451 } 1504 }
1452 } 1505 }
1453} 1506}
@@ -1485,14 +1538,14 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
1485 struct ieee80211_sta *sta, 1538 struct ieee80211_sta *sta,
1486 struct iwl_scale_tbl_info *tbl) 1539 struct iwl_scale_tbl_info *tbl)
1487{ 1540{
1488 int i, j, n; 1541 int i, j, max_rate;
1489 enum rs_column next_col_id; 1542 enum rs_column next_col_id;
1490 const struct rs_tx_column *curr_col = &rs_tx_columns[tbl->column]; 1543 const struct rs_tx_column *curr_col = &rs_tx_columns[tbl->column];
1491 const struct rs_tx_column *next_col; 1544 const struct rs_tx_column *next_col;
1492 allow_column_func_t allow_func; 1545 allow_column_func_t allow_func;
1493 u8 valid_ants = mvm->fw->valid_tx_ant; 1546 u8 valid_ants = mvm->fw->valid_tx_ant;
1494 const u16 *expected_tpt_tbl; 1547 const u16 *expected_tpt_tbl;
1495 s32 tpt, max_expected_tpt; 1548 u16 tpt, max_expected_tpt;
1496 1549
1497 for (i = 0; i < MAX_NEXT_COLUMNS; i++) { 1550 for (i = 0; i < MAX_NEXT_COLUMNS; i++) {
1498 next_col_id = curr_col->next_columns[i]; 1551 next_col_id = curr_col->next_columns[i];
@@ -1535,11 +1588,11 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
1535 if (WARN_ON_ONCE(!expected_tpt_tbl)) 1588 if (WARN_ON_ONCE(!expected_tpt_tbl))
1536 continue; 1589 continue;
1537 1590
1538 max_expected_tpt = 0; 1591 max_rate = rs_get_max_allowed_rate(lq_sta, next_col);
1539 for (n = 0; n < IWL_RATE_COUNT; n++) 1592 if (WARN_ON_ONCE(max_rate == IWL_RATE_INVALID))
1540 if (expected_tpt_tbl[n] > max_expected_tpt) 1593 continue;
1541 max_expected_tpt = expected_tpt_tbl[n];
1542 1594
1595 max_expected_tpt = expected_tpt_tbl[max_rate];
1543 if (tpt >= max_expected_tpt) { 1596 if (tpt >= max_expected_tpt) {
1544 IWL_DEBUG_RATE(mvm, 1597 IWL_DEBUG_RATE(mvm,
1545 "Skip column %d: can't beat current TPT. Max expected %d current %d\n", 1598 "Skip column %d: can't beat current TPT. Max expected %d current %d\n",
@@ -1547,14 +1600,15 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
1547 continue; 1600 continue;
1548 } 1601 }
1549 1602
1603 IWL_DEBUG_RATE(mvm,
1604 "Found potential column %d. Max expected %d current %d\n",
1605 next_col_id, max_expected_tpt, tpt);
1550 break; 1606 break;
1551 } 1607 }
1552 1608
1553 if (i == MAX_NEXT_COLUMNS) 1609 if (i == MAX_NEXT_COLUMNS)
1554 return RS_COLUMN_INVALID; 1610 return RS_COLUMN_INVALID;
1555 1611
1556 IWL_DEBUG_RATE(mvm, "Found potential column %d\n", next_col_id);
1557
1558 return next_col_id; 1612 return next_col_id;
1559} 1613}
1560 1614
@@ -1640,85 +1694,76 @@ static enum rs_action rs_get_rate_action(struct iwl_mvm *mvm,
1640{ 1694{
1641 enum rs_action action = RS_ACTION_STAY; 1695 enum rs_action action = RS_ACTION_STAY;
1642 1696
1643 /* Too many failures, decrease rate */
1644 if ((sr <= RS_SR_FORCE_DECREASE) || (current_tpt == 0)) { 1697 if ((sr <= RS_SR_FORCE_DECREASE) || (current_tpt == 0)) {
1645 IWL_DEBUG_RATE(mvm, 1698 IWL_DEBUG_RATE(mvm,
1646 "decrease rate because of low SR\n"); 1699 "Decrease rate because of low SR\n");
1647 action = RS_ACTION_DOWNSCALE; 1700 return RS_ACTION_DOWNSCALE;
1648 /* No throughput measured yet for adjacent rates; try increase. */
1649 } else if ((low_tpt == IWL_INVALID_VALUE) &&
1650 (high_tpt == IWL_INVALID_VALUE)) {
1651 if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH) {
1652 IWL_DEBUG_RATE(mvm,
1653 "Good SR and no high rate measurement. "
1654 "Increase rate\n");
1655 action = RS_ACTION_UPSCALE;
1656 } else if (low != IWL_RATE_INVALID) {
1657 IWL_DEBUG_RATE(mvm,
1658 "Remain in current rate\n");
1659 action = RS_ACTION_STAY;
1660 }
1661 } 1701 }
1662 1702
1663 /* Both adjacent throughputs are measured, but neither one has better 1703 if ((low_tpt == IWL_INVALID_VALUE) &&
1664 * throughput; we're using the best rate, don't change it! 1704 (high_tpt == IWL_INVALID_VALUE) &&
1665 */ 1705 (high != IWL_RATE_INVALID)) {
1666 else if ((low_tpt != IWL_INVALID_VALUE) &&
1667 (high_tpt != IWL_INVALID_VALUE) &&
1668 (low_tpt < current_tpt) &&
1669 (high_tpt < current_tpt)) {
1670 IWL_DEBUG_RATE(mvm, 1706 IWL_DEBUG_RATE(mvm,
1671 "Both high and low are worse. " 1707 "No data about high/low rates. Increase rate\n");
1672 "Maintain rate\n"); 1708 return RS_ACTION_UPSCALE;
1673 action = RS_ACTION_STAY;
1674 } 1709 }
1675 1710
1676 /* At least one adjacent rate's throughput is measured, 1711 if ((high_tpt == IWL_INVALID_VALUE) &&
1677 * and may have better performance. 1712 (high != IWL_RATE_INVALID) &&
1678 */ 1713 (low_tpt != IWL_INVALID_VALUE) &&
1679 else { 1714 (low_tpt < current_tpt)) {
1680 /* Higher adjacent rate's throughput is measured */ 1715 IWL_DEBUG_RATE(mvm,
1681 if (high_tpt != IWL_INVALID_VALUE) { 1716 "No data about high rate and low rate is worse. Increase rate\n");
1682 /* Higher rate has better throughput */ 1717 return RS_ACTION_UPSCALE;
1683 if (high_tpt > current_tpt && 1718 }
1684 sr >= IWL_RATE_INCREASE_TH) {
1685 IWL_DEBUG_RATE(mvm,
1686 "Higher rate is better and good "
1687 "SR. Increate rate\n");
1688 action = RS_ACTION_UPSCALE;
1689 } else {
1690 IWL_DEBUG_RATE(mvm,
1691 "Higher rate isn't better OR "
1692 "no good SR. Maintain rate\n");
1693 action = RS_ACTION_STAY;
1694 }
1695 1719
1696 /* Lower adjacent rate's throughput is measured */ 1720 if ((high_tpt != IWL_INVALID_VALUE) &&
1697 } else if (low_tpt != IWL_INVALID_VALUE) { 1721 (high_tpt > current_tpt)) {
1698 /* Lower rate has better throughput */ 1722 IWL_DEBUG_RATE(mvm,
1699 if (low_tpt > current_tpt) { 1723 "Higher rate is better. Increate rate\n");
1700 IWL_DEBUG_RATE(mvm, 1724 return RS_ACTION_UPSCALE;
1701 "Lower rate is better. "
1702 "Decrease rate\n");
1703 action = RS_ACTION_DOWNSCALE;
1704 } else if (sr >= IWL_RATE_INCREASE_TH) {
1705 IWL_DEBUG_RATE(mvm,
1706 "Lower rate isn't better and "
1707 "good SR. Increase rate\n");
1708 action = RS_ACTION_UPSCALE;
1709 }
1710 }
1711 } 1725 }
1712 1726
1713 /* Sanity check; asked for decrease, but success rate or throughput 1727 if ((low_tpt != IWL_INVALID_VALUE) &&
1714 * has been good at old rate. Don't change it. 1728 (high_tpt != IWL_INVALID_VALUE) &&
1715 */ 1729 (low_tpt < current_tpt) &&
1716 if ((action == RS_ACTION_DOWNSCALE) && (low != IWL_RATE_INVALID) && 1730 (high_tpt < current_tpt)) {
1717 ((sr > IWL_RATE_HIGH_TH) || 1731 IWL_DEBUG_RATE(mvm,
1718 (current_tpt > (100 * tbl->expected_tpt[low])))) { 1732 "Both high and low are worse. Maintain rate\n");
1733 return RS_ACTION_STAY;
1734 }
1735
1736 if ((low_tpt != IWL_INVALID_VALUE) &&
1737 (low_tpt > current_tpt)) {
1738 IWL_DEBUG_RATE(mvm,
1739 "Lower rate is better\n");
1740 action = RS_ACTION_DOWNSCALE;
1741 goto out;
1742 }
1743
1744 if ((low_tpt == IWL_INVALID_VALUE) &&
1745 (low != IWL_RATE_INVALID)) {
1719 IWL_DEBUG_RATE(mvm, 1746 IWL_DEBUG_RATE(mvm,
1720 "Sanity check failed. Maintain rate\n"); 1747 "No data about lower rate\n");
1721 action = RS_ACTION_STAY; 1748 action = RS_ACTION_DOWNSCALE;
1749 goto out;
1750 }
1751
1752 IWL_DEBUG_RATE(mvm, "Maintain rate\n");
1753
1754out:
1755 if ((action == RS_ACTION_DOWNSCALE) && (low != IWL_RATE_INVALID)) {
1756 if (sr >= RS_SR_NO_DECREASE) {
1757 IWL_DEBUG_RATE(mvm,
1758 "SR is above NO DECREASE. Avoid downscale\n");
1759 action = RS_ACTION_STAY;
1760 } else if (current_tpt > (100 * tbl->expected_tpt[low])) {
1761 IWL_DEBUG_RATE(mvm,
1762 "Current TPT is higher than max expected in low rate. Avoid downscale\n");
1763 action = RS_ACTION_STAY;
1764 } else {
1765 IWL_DEBUG_RATE(mvm, "Decrease rate\n");
1766 }
1722 } 1767 }
1723 1768
1724 return action; 1769 return action;
@@ -1792,6 +1837,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
1792 "Aggregation changed: prev %d current %d. Update expected TPT table\n", 1837 "Aggregation changed: prev %d current %d. Update expected TPT table\n",
1793 prev_agg, lq_sta->is_agg); 1838 prev_agg, lq_sta->is_agg);
1794 rs_set_expected_tpt_table(lq_sta, tbl); 1839 rs_set_expected_tpt_table(lq_sta, tbl);
1840 rs_rate_scale_clear_tbl_windows(mvm, tbl);
1795 } 1841 }
1796 1842
1797 /* current tx rate */ 1843 /* current tx rate */
@@ -2021,7 +2067,7 @@ lq_update:
2021 if (lq_sta->search_better_tbl) { 2067 if (lq_sta->search_better_tbl) {
2022 /* Access the "search" table, clear its history. */ 2068 /* Access the "search" table, clear its history. */
2023 tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]); 2069 tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
2024 rs_rate_scale_clear_tbl_windows(tbl); 2070 rs_rate_scale_clear_tbl_windows(mvm, tbl);
2025 2071
2026 /* Use new "search" start rate */ 2072 /* Use new "search" start rate */
2027 index = tbl->rate.index; 2073 index = tbl->rate.index;
@@ -2042,8 +2088,18 @@ lq_update:
2042 * stay with best antenna legacy modulation for a while 2088 * stay with best antenna legacy modulation for a while
2043 * before next round of mode comparisons. */ 2089 * before next round of mode comparisons. */
2044 tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]); 2090 tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
2045 if (is_legacy(&tbl1->rate) && !sta->ht_cap.ht_supported) { 2091 if (is_legacy(&tbl1->rate)) {
2046 IWL_DEBUG_RATE(mvm, "LQ: STAY in legacy table\n"); 2092 IWL_DEBUG_RATE(mvm, "LQ: STAY in legacy table\n");
2093
2094 if (tid != IWL_MAX_TID_COUNT) {
2095 tid_data = &sta_priv->tid_data[tid];
2096 if (tid_data->state != IWL_AGG_OFF) {
2097 IWL_DEBUG_RATE(mvm,
2098 "Stop aggregation on tid %d\n",
2099 tid);
2100 ieee80211_stop_tx_ba_session(sta, tid);
2101 }
2102 }
2047 rs_set_stay_in_table(mvm, 1, lq_sta); 2103 rs_set_stay_in_table(mvm, 1, lq_sta);
2048 } else { 2104 } else {
2049 /* If we're in an HT mode, and all 3 mode switch actions 2105 /* If we're in an HT mode, and all 3 mode switch actions
@@ -2342,9 +2398,10 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2342 lq_sta->lq.sta_id = sta_priv->sta_id; 2398 lq_sta->lq.sta_id = sta_priv->sta_id;
2343 2399
2344 for (j = 0; j < LQ_SIZE; j++) 2400 for (j = 0; j < LQ_SIZE; j++)
2345 rs_rate_scale_clear_tbl_windows(&lq_sta->lq_info[j]); 2401 rs_rate_scale_clear_tbl_windows(mvm, &lq_sta->lq_info[j]);
2346 2402
2347 lq_sta->flush_timer = 0; 2403 lq_sta->flush_timer = 0;
2404 lq_sta->last_tx = jiffies;
2348 2405
2349 IWL_DEBUG_RATE(mvm, 2406 IWL_DEBUG_RATE(mvm,
2350 "LQ: *** rate scale station global init for station %d ***\n", 2407 "LQ: *** rate scale station global init for station %d ***\n",
@@ -2388,11 +2445,22 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2388 lq_sta->is_vht = true; 2445 lq_sta->is_vht = true;
2389 } 2446 }
2390 2447
2391 IWL_DEBUG_RATE(mvm, 2448 lq_sta->max_legacy_rate_idx = find_last_bit(&lq_sta->active_legacy_rate,
2392 "SISO-RATE=%X MIMO2-RATE=%X VHT=%d\n", 2449 BITS_PER_LONG);
2450 lq_sta->max_siso_rate_idx = find_last_bit(&lq_sta->active_siso_rate,
2451 BITS_PER_LONG);
2452 lq_sta->max_mimo2_rate_idx = find_last_bit(&lq_sta->active_mimo2_rate,
2453 BITS_PER_LONG);
2454
2455 IWL_DEBUG_RATE(mvm, "RATE MASK: LEGACY=%lX SISO=%lX MIMO2=%lX VHT=%d\n",
2456 lq_sta->active_legacy_rate,
2393 lq_sta->active_siso_rate, 2457 lq_sta->active_siso_rate,
2394 lq_sta->active_mimo2_rate, 2458 lq_sta->active_mimo2_rate,
2395 lq_sta->is_vht); 2459 lq_sta->is_vht);
2460 IWL_DEBUG_RATE(mvm, "MAX RATE: LEGACY=%d SISO=%d MIMO2=%d\n",
2461 lq_sta->max_legacy_rate_idx,
2462 lq_sta->max_siso_rate_idx,
2463 lq_sta->max_mimo2_rate_idx);
2396 2464
2397 /* These values will be overridden later */ 2465 /* These values will be overridden later */
2398 lq_sta->lq.single_stream_ant_msk = 2466 lq_sta->lq.single_stream_ant_msk =
@@ -2547,6 +2615,7 @@ static void rs_build_rates_table(struct iwl_mvm *mvm,
2547 if (is_siso(&rate)) { 2615 if (is_siso(&rate)) {
2548 num_rates = RS_SECONDARY_SISO_NUM_RATES; 2616 num_rates = RS_SECONDARY_SISO_NUM_RATES;
2549 num_retries = RS_SECONDARY_SISO_RETRIES; 2617 num_retries = RS_SECONDARY_SISO_RETRIES;
2618 lq_cmd->mimo_delim = index;
2550 } else if (is_legacy(&rate)) { 2619 } else if (is_legacy(&rate)) {
2551 num_rates = RS_SECONDARY_LEGACY_NUM_RATES; 2620 num_rates = RS_SECONDARY_LEGACY_NUM_RATES;
2552 num_retries = RS_LEGACY_RETRIES_PER_RATE; 2621 num_retries = RS_LEGACY_RETRIES_PER_RATE;
@@ -2749,7 +2818,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
2749 return -ENOMEM; 2818 return -ENOMEM;
2750 2819
2751 desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id); 2820 desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
2752 desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n", 2821 desc += sprintf(buff+desc, "failed=%d success=%d rate=0%lX\n",
2753 lq_sta->total_failed, lq_sta->total_success, 2822 lq_sta->total_failed, lq_sta->total_success,
2754 lq_sta->active_legacy_rate); 2823 lq_sta->active_legacy_rate);
2755 desc += sprintf(buff+desc, "fixed rate 0x%X\n", 2824 desc += sprintf(buff+desc, "fixed rate 0x%X\n",
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
index 3332b396011e..0acfac96a56c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -156,6 +156,7 @@ enum {
156#define IWL_RATE_HIGH_TH 10880 /* 85% */ 156#define IWL_RATE_HIGH_TH 10880 /* 85% */
157#define IWL_RATE_INCREASE_TH 6400 /* 50% */ 157#define IWL_RATE_INCREASE_TH 6400 /* 50% */
158#define RS_SR_FORCE_DECREASE 1920 /* 15% */ 158#define RS_SR_FORCE_DECREASE 1920 /* 15% */
159#define RS_SR_NO_DECREASE 10880 /* 85% */
159 160
160#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */ 161#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
161#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000) 162#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000)
@@ -310,13 +311,20 @@ struct iwl_lq_sta {
310 u32 visited_columns; /* Bitmask marking which Tx columns were 311 u32 visited_columns; /* Bitmask marking which Tx columns were
311 * explored during a search cycle 312 * explored during a search cycle
312 */ 313 */
314 u64 last_tx;
313 bool is_vht; 315 bool is_vht;
314 enum ieee80211_band band; 316 enum ieee80211_band band;
315 317
316 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */ 318 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
317 u16 active_legacy_rate; 319 unsigned long active_legacy_rate;
318 u16 active_siso_rate; 320 unsigned long active_siso_rate;
319 u16 active_mimo2_rate; 321 unsigned long active_mimo2_rate;
322
323 /* Highest rate per Tx mode */
324 u8 max_legacy_rate_idx;
325 u8 max_siso_rate_idx;
326 u8 max_mimo2_rate_idx;
327
320 s8 max_rate_idx; /* Max rate set by user */ 328 s8 max_rate_idx; /* Max rate set by user */
321 u8 missed_rate_counter; 329 u8 missed_rate_counter;
322 330
diff --git a/drivers/net/wireless/iwlwifi/mvm/sf.c b/drivers/net/wireless/iwlwifi/mvm/sf.c
index 8401627c0030..88809b2d1654 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sf.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sf.c
@@ -274,7 +274,8 @@ int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif,
274 return -EINVAL; 274 return -EINVAL;
275 if (changed_vif->type != NL80211_IFTYPE_STATION) { 275 if (changed_vif->type != NL80211_IFTYPE_STATION) {
276 new_state = SF_UNINIT; 276 new_state = SF_UNINIT;
277 } else if (changed_vif->bss_conf.assoc) { 277 } else if (changed_vif->bss_conf.assoc &&
278 changed_vif->bss_conf.dtim_period) {
278 mvmvif = iwl_mvm_vif_from_mac80211(changed_vif); 279 mvmvif = iwl_mvm_vif_from_mac80211(changed_vif);
279 sta_id = mvmvif->ap_sta_id; 280 sta_id = mvmvif->ap_sta_id;
280 new_state = SF_FULL_ON; 281 new_state = SF_FULL_ON;
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index edb015c99049..3d1d57f9f5bc 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -373,12 +373,14 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
373 {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2n_cfg)}, 373 {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2n_cfg)},
374 {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)}, 374 {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)},
375 {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)}, 375 {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)},
376 {IWL_PCI_DEVICE(0x095A, 0x5102, iwl7265_n_cfg)},
376 {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)}, 377 {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)},
377 {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)}, 378 {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)},
378 {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)}, 379 {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)},
379 {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, 380 {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
380 {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)}, 381 {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
381 {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, 382 {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
383 {IWL_PCI_DEVICE(0x095A, 0x9200, iwl7265_2ac_cfg)},
382 {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)}, 384 {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
383 {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)}, 385 {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
384 {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)}, 386 {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 77db0886c6e2..9c771b3e9918 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -292,6 +292,12 @@ process_start:
292 while ((skb = skb_dequeue(&adapter->usb_rx_data_q))) 292 while ((skb = skb_dequeue(&adapter->usb_rx_data_q)))
293 mwifiex_handle_rx_packet(adapter, skb); 293 mwifiex_handle_rx_packet(adapter, skb);
294 294
295 /* Check for event */
296 if (adapter->event_received) {
297 adapter->event_received = false;
298 mwifiex_process_event(adapter);
299 }
300
295 /* Check for Cmd Resp */ 301 /* Check for Cmd Resp */
296 if (adapter->cmd_resp_received) { 302 if (adapter->cmd_resp_received) {
297 adapter->cmd_resp_received = false; 303 adapter->cmd_resp_received = false;
@@ -304,12 +310,6 @@ process_start:
304 } 310 }
305 } 311 }
306 312
307 /* Check for event */
308 if (adapter->event_received) {
309 adapter->event_received = false;
310 mwifiex_process_event(adapter);
311 }
312
313 /* Check if we need to confirm Sleep Request 313 /* Check if we need to confirm Sleep Request
314 received previously */ 314 received previously */
315 if (adapter->ps_state == PS_STATE_PRE_SLEEP) { 315 if (adapter->ps_state == PS_STATE_PRE_SLEEP) {
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 894270611f2c..536c14aa71f3 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -60,9 +60,10 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter,
60 int status; 60 int status;
61 61
62 /* Wait for completion */ 62 /* Wait for completion */
63 status = wait_event_interruptible(adapter->cmd_wait_q.wait, 63 status = wait_event_interruptible_timeout(adapter->cmd_wait_q.wait,
64 *(cmd_queued->condition)); 64 *(cmd_queued->condition),
65 if (status) { 65 (12 * HZ));
66 if (status <= 0) {
66 dev_err(adapter->dev, "cmd_wait_q terminated: %d\n", status); 67 dev_err(adapter->dev, "cmd_wait_q terminated: %d\n", status);
67 mwifiex_cancel_all_pending_cmd(adapter); 68 mwifiex_cancel_all_pending_cmd(adapter);
68 return status; 69 return status;
diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c
index 1a8d32138593..cf61d6e3eaa7 100644
--- a/drivers/net/wireless/rsi/rsi_91x_core.c
+++ b/drivers/net/wireless/rsi/rsi_91x_core.c
@@ -88,7 +88,7 @@ static u8 rsi_core_determine_hal_queue(struct rsi_common *common)
88 bool recontend_queue = false; 88 bool recontend_queue = false;
89 u32 q_len = 0; 89 u32 q_len = 0;
90 u8 q_num = INVALID_QUEUE; 90 u8 q_num = INVALID_QUEUE;
91 u8 ii, min = 0; 91 u8 ii = 0, min = 0;
92 92
93 if (skb_queue_len(&common->tx_queue[MGMT_SOFT_Q])) { 93 if (skb_queue_len(&common->tx_queue[MGMT_SOFT_Q])) {
94 if (!common->mgmt_q_block) 94 if (!common->mgmt_q_block)
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index 73694295648f..1b28cda6ca88 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -841,16 +841,6 @@ int rsi_set_channel(struct rsi_common *common, u16 channel)
841 rsi_dbg(MGMT_TX_ZONE, 841 rsi_dbg(MGMT_TX_ZONE,
842 "%s: Sending scan req frame\n", __func__); 842 "%s: Sending scan req frame\n", __func__);
843 843
844 skb = dev_alloc_skb(FRAME_DESC_SZ);
845 if (!skb) {
846 rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
847 __func__);
848 return -ENOMEM;
849 }
850
851 memset(skb->data, 0, FRAME_DESC_SZ);
852 mgmt_frame = (struct rsi_mac_frame *)skb->data;
853
854 if (common->band == IEEE80211_BAND_5GHZ) { 844 if (common->band == IEEE80211_BAND_5GHZ) {
855 if ((channel >= 36) && (channel <= 64)) 845 if ((channel >= 36) && (channel <= 64))
856 channel = ((channel - 32) / 4); 846 channel = ((channel - 32) / 4);
@@ -868,6 +858,16 @@ int rsi_set_channel(struct rsi_common *common, u16 channel)
868 } 858 }
869 } 859 }
870 860
861 skb = dev_alloc_skb(FRAME_DESC_SZ);
862 if (!skb) {
863 rsi_dbg(ERR_ZONE, "%s: Failed in allocation of skb\n",
864 __func__);
865 return -ENOMEM;
866 }
867
868 memset(skb->data, 0, FRAME_DESC_SZ);
869 mgmt_frame = (struct rsi_mac_frame *)skb->data;
870
871 mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12); 871 mgmt_frame->desc_word[0] = cpu_to_le16(RSI_WIFI_MGMT_Q << 12);
872 mgmt_frame->desc_word[1] = cpu_to_le16(SCAN_REQUEST); 872 mgmt_frame->desc_word[1] = cpu_to_le16(SCAN_REQUEST);
873 mgmt_frame->desc_word[4] = cpu_to_le16(channel); 873 mgmt_frame->desc_word[4] = cpu_to_le16(channel);
@@ -966,6 +966,7 @@ static int rsi_send_auto_rate_request(struct rsi_common *common)
966 if (!selected_rates) { 966 if (!selected_rates) {
967 rsi_dbg(ERR_ZONE, "%s: Failed in allocation of mem\n", 967 rsi_dbg(ERR_ZONE, "%s: Failed in allocation of mem\n",
968 __func__); 968 __func__);
969 dev_kfree_skb(skb);
969 return -ENOMEM; 970 return -ENOMEM;
970 } 971 }
971 972
diff --git a/drivers/net/wireless/ti/wl18xx/event.h b/drivers/net/wireless/ti/wl18xx/event.h
index 398f3d2c0a6c..a76e98eb8372 100644
--- a/drivers/net/wireless/ti/wl18xx/event.h
+++ b/drivers/net/wireless/ti/wl18xx/event.h
@@ -68,6 +68,26 @@ struct wl18xx_event_mailbox {
68 68
69 /* bitmap of inactive stations (by HLID) */ 69 /* bitmap of inactive stations (by HLID) */
70 __le32 inactive_sta_bitmap; 70 __le32 inactive_sta_bitmap;
71
72 /* rx BA win size indicated by RX_BA_WIN_SIZE_CHANGE_EVENT_ID */
73 u8 rx_ba_role_id;
74 u8 rx_ba_link_id;
75 u8 rx_ba_win_size;
76 u8 padding;
77
78 /* smart config */
79 u8 sc_ssid_len;
80 u8 sc_pwd_len;
81 u8 sc_token_len;
82 u8 padding1;
83 u8 sc_ssid[32];
84 u8 sc_pwd[32];
85 u8 sc_token[32];
86
87 /* smart config sync channel */
88 u8 sc_sync_channel;
89 u8 sc_sync_band;
90 u8 padding2[2];
71} __packed; 91} __packed;
72 92
73int wl18xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event, 93int wl18xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event,
diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
index 1f9a36031b06..16d10281798d 100644
--- a/drivers/net/wireless/ti/wlcore/event.c
+++ b/drivers/net/wireless/ti/wlcore/event.c
@@ -158,6 +158,11 @@ EXPORT_SYMBOL_GPL(wlcore_event_channel_switch);
158 158
159void wlcore_event_dummy_packet(struct wl1271 *wl) 159void wlcore_event_dummy_packet(struct wl1271 *wl)
160{ 160{
161 if (wl->plt) {
162 wl1271_info("Got DUMMY_PACKET event in PLT mode. FW bug, ignoring.");
163 return;
164 }
165
161 wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID"); 166 wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID");
162 wl1271_tx_dummy_packet(wl); 167 wl1271_tx_dummy_packet(wl);
163} 168}