aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bonding/bond_main.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c22
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c30
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c94
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c24
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c85
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c83
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c7
-rw-r--r--drivers/net/team/team.c26
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c71
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c53
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c14
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c10
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.c15
55 files changed, 524 insertions, 411 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 8ddbada9e281..062fa7e3af4c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3213,8 +3213,12 @@ static int bond_netdev_event(struct notifier_block *this,
3213 return NOTIFY_DONE; 3213 return NOTIFY_DONE;
3214 3214
3215 if (event_dev->flags & IFF_MASTER) { 3215 if (event_dev->flags & IFF_MASTER) {
3216 int ret;
3217
3216 netdev_dbg(event_dev, "IFF_MASTER\n"); 3218 netdev_dbg(event_dev, "IFF_MASTER\n");
3217 return bond_master_netdev_event(event, event_dev); 3219 ret = bond_master_netdev_event(event, event_dev);
3220 if (ret != NOTIFY_DONE)
3221 return ret;
3218 } 3222 }
3219 3223
3220 if (event_dev->flags & IFF_SLAVE) { 3224 if (event_dev->flags & IFF_SLAVE) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index a9bdc21873d3..10ff37d6dc78 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -957,7 +957,7 @@ int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add)
957 bnx2x_sample_bulletin(bp); 957 bnx2x_sample_bulletin(bp);
958 958
959 if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) { 959 if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) {
960 BNX2X_ERR("Hypervisor will dicline the request, avoiding\n"); 960 BNX2X_ERR("Hypervisor will decline the request, avoiding\n");
961 rc = -EINVAL; 961 rc = -EINVAL;
962 goto out; 962 goto out;
963 } 963 }
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 28eac9056211..c032bef1b776 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -32,6 +32,13 @@
32#define DRV_NAME "nicvf" 32#define DRV_NAME "nicvf"
33#define DRV_VERSION "1.0" 33#define DRV_VERSION "1.0"
34 34
35/* NOTE: Packets bigger than 1530 are split across multiple pages and XDP needs
36 * the buffer to be contiguous. Allow XDP to be set up only if we don't exceed
37 * this value, keeping headroom for the 14 byte Ethernet header and two
38 * VLAN tags (for QinQ)
39 */
40#define MAX_XDP_MTU (1530 - ETH_HLEN - VLAN_HLEN * 2)
41
35/* Supported devices */ 42/* Supported devices */
36static const struct pci_device_id nicvf_id_table[] = { 43static const struct pci_device_id nicvf_id_table[] = {
37 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, 44 { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
@@ -1582,6 +1589,15 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
1582 struct nicvf *nic = netdev_priv(netdev); 1589 struct nicvf *nic = netdev_priv(netdev);
1583 int orig_mtu = netdev->mtu; 1590 int orig_mtu = netdev->mtu;
1584 1591
1592 /* For now just support only the usual MTU sized frames,
1593 * plus some headroom for VLAN, QinQ.
1594 */
1595 if (nic->xdp_prog && new_mtu > MAX_XDP_MTU) {
1596 netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
1597 netdev->mtu);
1598 return -EINVAL;
1599 }
1600
1585 netdev->mtu = new_mtu; 1601 netdev->mtu = new_mtu;
1586 1602
1587 if (!netif_running(netdev)) 1603 if (!netif_running(netdev))
@@ -1830,8 +1846,10 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
1830 bool bpf_attached = false; 1846 bool bpf_attached = false;
1831 int ret = 0; 1847 int ret = 0;
1832 1848
1833 /* For now just support only the usual MTU sized frames */ 1849 /* For now just support only the usual MTU sized frames,
1834 if (prog && (dev->mtu > 1500)) { 1850 * plus some headroom for VLAN, QinQ.
1851 */
1852 if (prog && dev->mtu > MAX_XDP_MTU) {
1835 netdev_warn(dev, "Jumbo frames not yet supported with XDP, current MTU %d.\n", 1853 netdev_warn(dev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
1836 dev->mtu); 1854 dev->mtu);
1837 return -EOPNOTSUPP; 1855 return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 697c2427f2b7..a96ad20ee484 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1840,13 +1840,9 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
1840 int ret; 1840 int ret;
1841 1841
1842 if (enable) { 1842 if (enable) {
1843 ret = clk_prepare_enable(fep->clk_ahb);
1844 if (ret)
1845 return ret;
1846
1847 ret = clk_prepare_enable(fep->clk_enet_out); 1843 ret = clk_prepare_enable(fep->clk_enet_out);
1848 if (ret) 1844 if (ret)
1849 goto failed_clk_enet_out; 1845 return ret;
1850 1846
1851 if (fep->clk_ptp) { 1847 if (fep->clk_ptp) {
1852 mutex_lock(&fep->ptp_clk_mutex); 1848 mutex_lock(&fep->ptp_clk_mutex);
@@ -1866,7 +1862,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
1866 1862
1867 phy_reset_after_clk_enable(ndev->phydev); 1863 phy_reset_after_clk_enable(ndev->phydev);
1868 } else { 1864 } else {
1869 clk_disable_unprepare(fep->clk_ahb);
1870 clk_disable_unprepare(fep->clk_enet_out); 1865 clk_disable_unprepare(fep->clk_enet_out);
1871 if (fep->clk_ptp) { 1866 if (fep->clk_ptp) {
1872 mutex_lock(&fep->ptp_clk_mutex); 1867 mutex_lock(&fep->ptp_clk_mutex);
@@ -1885,8 +1880,6 @@ failed_clk_ref:
1885failed_clk_ptp: 1880failed_clk_ptp:
1886 if (fep->clk_enet_out) 1881 if (fep->clk_enet_out)
1887 clk_disable_unprepare(fep->clk_enet_out); 1882 clk_disable_unprepare(fep->clk_enet_out);
1888failed_clk_enet_out:
1889 clk_disable_unprepare(fep->clk_ahb);
1890 1883
1891 return ret; 1884 return ret;
1892} 1885}
@@ -3470,6 +3463,9 @@ fec_probe(struct platform_device *pdev)
3470 ret = clk_prepare_enable(fep->clk_ipg); 3463 ret = clk_prepare_enable(fep->clk_ipg);
3471 if (ret) 3464 if (ret)
3472 goto failed_clk_ipg; 3465 goto failed_clk_ipg;
3466 ret = clk_prepare_enable(fep->clk_ahb);
3467 if (ret)
3468 goto failed_clk_ahb;
3473 3469
3474 fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy"); 3470 fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy");
3475 if (!IS_ERR(fep->reg_phy)) { 3471 if (!IS_ERR(fep->reg_phy)) {
@@ -3563,6 +3559,9 @@ failed_reset:
3563 pm_runtime_put(&pdev->dev); 3559 pm_runtime_put(&pdev->dev);
3564 pm_runtime_disable(&pdev->dev); 3560 pm_runtime_disable(&pdev->dev);
3565failed_regulator: 3561failed_regulator:
3562 clk_disable_unprepare(fep->clk_ahb);
3563failed_clk_ahb:
3564 clk_disable_unprepare(fep->clk_ipg);
3566failed_clk_ipg: 3565failed_clk_ipg:
3567 fec_enet_clk_enable(ndev, false); 3566 fec_enet_clk_enable(ndev, false);
3568failed_clk: 3567failed_clk:
@@ -3686,6 +3685,7 @@ static int __maybe_unused fec_runtime_suspend(struct device *dev)
3686 struct net_device *ndev = dev_get_drvdata(dev); 3685 struct net_device *ndev = dev_get_drvdata(dev);
3687 struct fec_enet_private *fep = netdev_priv(ndev); 3686 struct fec_enet_private *fep = netdev_priv(ndev);
3688 3687
3688 clk_disable_unprepare(fep->clk_ahb);
3689 clk_disable_unprepare(fep->clk_ipg); 3689 clk_disable_unprepare(fep->clk_ipg);
3690 3690
3691 return 0; 3691 return 0;
@@ -3695,8 +3695,20 @@ static int __maybe_unused fec_runtime_resume(struct device *dev)
3695{ 3695{
3696 struct net_device *ndev = dev_get_drvdata(dev); 3696 struct net_device *ndev = dev_get_drvdata(dev);
3697 struct fec_enet_private *fep = netdev_priv(ndev); 3697 struct fec_enet_private *fep = netdev_priv(ndev);
3698 int ret;
3698 3699
3699 return clk_prepare_enable(fep->clk_ipg); 3700 ret = clk_prepare_enable(fep->clk_ahb);
3701 if (ret)
3702 return ret;
3703 ret = clk_prepare_enable(fep->clk_ipg);
3704 if (ret)
3705 goto failed_clk_ipg;
3706
3707 return 0;
3708
3709failed_clk_ipg:
3710 clk_disable_unprepare(fep->clk_ahb);
3711 return ret;
3700} 3712}
3701 3713
3702static const struct dev_pm_ops fec_pm_ops = { 3714static const struct dev_pm_ops fec_pm_ops = {
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 1de691e76b86..5e3cdb0b46d5 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -3758,6 +3758,7 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
3758{ 3758{
3759 struct device *dev = &adapter->vdev->dev; 3759 struct device *dev = &adapter->vdev->dev;
3760 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf; 3760 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
3761 netdev_features_t old_hw_features = 0;
3761 union ibmvnic_crq crq; 3762 union ibmvnic_crq crq;
3762 int i; 3763 int i;
3763 3764
@@ -3833,24 +3834,41 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
3833 adapter->ip_offload_ctrl.large_rx_ipv4 = 0; 3834 adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
3834 adapter->ip_offload_ctrl.large_rx_ipv6 = 0; 3835 adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
3835 3836
3836 adapter->netdev->features = NETIF_F_SG | NETIF_F_GSO; 3837 if (adapter->state != VNIC_PROBING) {
3838 old_hw_features = adapter->netdev->hw_features;
3839 adapter->netdev->hw_features = 0;
3840 }
3841
3842 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
3837 3843
3838 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum) 3844 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
3839 adapter->netdev->features |= NETIF_F_IP_CSUM; 3845 adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
3840 3846
3841 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum) 3847 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
3842 adapter->netdev->features |= NETIF_F_IPV6_CSUM; 3848 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
3843 3849
3844 if ((adapter->netdev->features & 3850 if ((adapter->netdev->features &
3845 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) 3851 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
3846 adapter->netdev->features |= NETIF_F_RXCSUM; 3852 adapter->netdev->hw_features |= NETIF_F_RXCSUM;
3847 3853
3848 if (buf->large_tx_ipv4) 3854 if (buf->large_tx_ipv4)
3849 adapter->netdev->features |= NETIF_F_TSO; 3855 adapter->netdev->hw_features |= NETIF_F_TSO;
3850 if (buf->large_tx_ipv6) 3856 if (buf->large_tx_ipv6)
3851 adapter->netdev->features |= NETIF_F_TSO6; 3857 adapter->netdev->hw_features |= NETIF_F_TSO6;
3852 3858
3853 adapter->netdev->hw_features |= adapter->netdev->features; 3859 if (adapter->state == VNIC_PROBING) {
3860 adapter->netdev->features |= adapter->netdev->hw_features;
3861 } else if (old_hw_features != adapter->netdev->hw_features) {
3862 netdev_features_t tmp = 0;
3863
3864 /* disable features no longer supported */
3865 adapter->netdev->features &= adapter->netdev->hw_features;
3866 /* turn on features now supported if previously enabled */
3867 tmp = (old_hw_features ^ adapter->netdev->hw_features) &
3868 adapter->netdev->hw_features;
3869 adapter->netdev->features |=
3870 tmp & adapter->netdev->wanted_features;
3871 }
3854 3872
3855 memset(&crq, 0, sizeof(crq)); 3873 memset(&crq, 0, sizeof(crq));
3856 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD; 3874 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index bd855ab8dfe2..51e109fdeec1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -856,6 +856,7 @@ void mlx5e_close_channels(struct mlx5e_channels *chs);
856 * switching channels 856 * switching channels
857 */ 857 */
858typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv); 858typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv);
859int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv);
859int mlx5e_safe_switch_channels(struct mlx5e_priv *priv, 860int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
860 struct mlx5e_channels *new_chs, 861 struct mlx5e_channels *new_chs,
861 mlx5e_fp_hw_modify hw_modify); 862 mlx5e_fp_hw_modify hw_modify);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 9d38e62cdf24..476dd97f7f2f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -186,12 +186,17 @@ static int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_tx_err_ctx *err_ctx)
186 186
187static int mlx5e_tx_reporter_recover_all(struct mlx5e_priv *priv) 187static int mlx5e_tx_reporter_recover_all(struct mlx5e_priv *priv)
188{ 188{
189 int err; 189 int err = 0;
190 190
191 rtnl_lock(); 191 rtnl_lock();
192 mutex_lock(&priv->state_lock); 192 mutex_lock(&priv->state_lock);
193 mlx5e_close_locked(priv->netdev); 193
194 err = mlx5e_open_locked(priv->netdev); 194 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
195 goto out;
196
197 err = mlx5e_safe_reopen_channels(priv);
198
199out:
195 mutex_unlock(&priv->state_lock); 200 mutex_unlock(&priv->state_lock);
196 rtnl_unlock(); 201 rtnl_unlock();
197 202
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index b9d5830e8344..fe5d4d7f15ed 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -39,6 +39,10 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
39 return -EOPNOTSUPP; 39 return -EOPNOTSUPP;
40 } 40 }
41 41
42 if (!(mlx5e_eswitch_rep(*out_dev) &&
43 mlx5e_is_uplink_rep(netdev_priv(*out_dev))))
44 return -EOPNOTSUPP;
45
42 return 0; 46 return 0;
43} 47}
44 48
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 5efce4a3ff79..76a3d01a489e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1768,7 +1768,8 @@ static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable)
1768 struct mlx5e_channel *c; 1768 struct mlx5e_channel *c;
1769 int i; 1769 int i;
1770 1770
1771 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) 1771 if (!test_bit(MLX5E_STATE_OPENED, &priv->state) ||
1772 priv->channels.params.xdp_prog)
1772 return 0; 1773 return 0;
1773 1774
1774 for (i = 0; i < channels->num; i++) { 1775 for (i = 0; i < channels->num; i++) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index ba705392b46b..5c127fccad60 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -952,7 +952,11 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
952 if (params->rx_dim_enabled) 952 if (params->rx_dim_enabled)
953 __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); 953 __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
954 954
955 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE)) 955 /* We disable csum_complete when XDP is enabled since
956 * XDP programs might manipulate packets which will render
957 * skb->checksum incorrect.
958 */
959 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || c->xdp)
956 __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state); 960 __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
957 961
958 return 0; 962 return 0;
@@ -2956,6 +2960,14 @@ int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
2956 return 0; 2960 return 0;
2957} 2961}
2958 2962
2963int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv)
2964{
2965 struct mlx5e_channels new_channels = {};
2966
2967 new_channels.params = priv->channels.params;
2968 return mlx5e_safe_switch_channels(priv, &new_channels, NULL);
2969}
2970
2959void mlx5e_timestamp_init(struct mlx5e_priv *priv) 2971void mlx5e_timestamp_init(struct mlx5e_priv *priv)
2960{ 2972{
2961 priv->tstamp.tx_type = HWTSTAMP_TX_OFF; 2973 priv->tstamp.tx_type = HWTSTAMP_TX_OFF;
@@ -4186,11 +4198,10 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
4186 if (!report_failed) 4198 if (!report_failed)
4187 goto unlock; 4199 goto unlock;
4188 4200
4189 mlx5e_close_locked(priv->netdev); 4201 err = mlx5e_safe_reopen_channels(priv);
4190 err = mlx5e_open_locked(priv->netdev);
4191 if (err) 4202 if (err)
4192 netdev_err(priv->netdev, 4203 netdev_err(priv->netdev,
4193 "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n", 4204 "mlx5e_safe_reopen_channels failed recovering from a tx_timeout, err(%d).\n",
4194 err); 4205 err);
4195 4206
4196unlock: 4207unlock:
@@ -4578,7 +4589,7 @@ void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
4578{ 4589{
4579 enum mlx5e_traffic_types tt; 4590 enum mlx5e_traffic_types tt;
4580 4591
4581 rss_params->hfunc = ETH_RSS_HASH_XOR; 4592 rss_params->hfunc = ETH_RSS_HASH_TOP;
4582 netdev_rss_key_fill(rss_params->toeplitz_hash_key, 4593 netdev_rss_key_fill(rss_params->toeplitz_hash_key,
4583 sizeof(rss_params->toeplitz_hash_key)); 4594 sizeof(rss_params->toeplitz_hash_key));
4584 mlx5e_build_default_indir_rqt(rss_params->indirection_rqt, 4595 mlx5e_build_default_indir_rqt(rss_params->indirection_rqt,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 3dde5c7e0739..c3b3002ff62f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -692,7 +692,14 @@ static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth,
692{ 692{
693 *proto = ((struct ethhdr *)skb->data)->h_proto; 693 *proto = ((struct ethhdr *)skb->data)->h_proto;
694 *proto = __vlan_get_protocol(skb, *proto, network_depth); 694 *proto = __vlan_get_protocol(skb, *proto, network_depth);
695 return (*proto == htons(ETH_P_IP) || *proto == htons(ETH_P_IPV6)); 695
696 if (*proto == htons(ETH_P_IP))
697 return pskb_may_pull(skb, *network_depth + sizeof(struct iphdr));
698
699 if (*proto == htons(ETH_P_IPV6))
700 return pskb_may_pull(skb, *network_depth + sizeof(struct ipv6hdr));
701
702 return false;
696} 703}
697 704
698static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb) 705static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
@@ -712,17 +719,6 @@ static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
712 rq->stats->ecn_mark += !!rc; 719 rq->stats->ecn_mark += !!rc;
713} 720}
714 721
715static u32 mlx5e_get_fcs(const struct sk_buff *skb)
716{
717 const void *fcs_bytes;
718 u32 _fcs_bytes;
719
720 fcs_bytes = skb_header_pointer(skb, skb->len - ETH_FCS_LEN,
721 ETH_FCS_LEN, &_fcs_bytes);
722
723 return __get_unaligned_cpu32(fcs_bytes);
724}
725
726static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto) 722static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
727{ 723{
728 void *ip_p = skb->data + network_depth; 724 void *ip_p = skb->data + network_depth;
@@ -733,6 +729,68 @@ static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
733 729
734#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN) 730#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
735 731
732#define MAX_PADDING 8
733
734static void
735tail_padding_csum_slow(struct sk_buff *skb, int offset, int len,
736 struct mlx5e_rq_stats *stats)
737{
738 stats->csum_complete_tail_slow++;
739 skb->csum = csum_block_add(skb->csum,
740 skb_checksum(skb, offset, len, 0),
741 offset);
742}
743
744static void
745tail_padding_csum(struct sk_buff *skb, int offset,
746 struct mlx5e_rq_stats *stats)
747{
748 u8 tail_padding[MAX_PADDING];
749 int len = skb->len - offset;
750 void *tail;
751
752 if (unlikely(len > MAX_PADDING)) {
753 tail_padding_csum_slow(skb, offset, len, stats);
754 return;
755 }
756
757 tail = skb_header_pointer(skb, offset, len, tail_padding);
758 if (unlikely(!tail)) {
759 tail_padding_csum_slow(skb, offset, len, stats);
760 return;
761 }
762
763 stats->csum_complete_tail++;
764 skb->csum = csum_block_add(skb->csum, csum_partial(tail, len, 0), offset);
765}
766
767static void
768mlx5e_skb_padding_csum(struct sk_buff *skb, int network_depth, __be16 proto,
769 struct mlx5e_rq_stats *stats)
770{
771 struct ipv6hdr *ip6;
772 struct iphdr *ip4;
773 int pkt_len;
774
775 switch (proto) {
776 case htons(ETH_P_IP):
777 ip4 = (struct iphdr *)(skb->data + network_depth);
778 pkt_len = network_depth + ntohs(ip4->tot_len);
779 break;
780 case htons(ETH_P_IPV6):
781 ip6 = (struct ipv6hdr *)(skb->data + network_depth);
782 pkt_len = network_depth + sizeof(*ip6) + ntohs(ip6->payload_len);
783 break;
784 default:
785 return;
786 }
787
788 if (likely(pkt_len >= skb->len))
789 return;
790
791 tail_padding_csum(skb, pkt_len, stats);
792}
793
736static inline void mlx5e_handle_csum(struct net_device *netdev, 794static inline void mlx5e_handle_csum(struct net_device *netdev,
737 struct mlx5_cqe64 *cqe, 795 struct mlx5_cqe64 *cqe,
738 struct mlx5e_rq *rq, 796 struct mlx5e_rq *rq,
@@ -752,7 +810,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
752 return; 810 return;
753 } 811 }
754 812
755 if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))) 813 /* True when explicitly set via priv flag, or XDP prog is loaded */
814 if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))
756 goto csum_unnecessary; 815 goto csum_unnecessary;
757 816
758 /* CQE csum doesn't cover padding octets in short ethernet 817 /* CQE csum doesn't cover padding octets in short ethernet
@@ -780,18 +839,15 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
780 skb->csum = csum_partial(skb->data + ETH_HLEN, 839 skb->csum = csum_partial(skb->data + ETH_HLEN,
781 network_depth - ETH_HLEN, 840 network_depth - ETH_HLEN,
782 skb->csum); 841 skb->csum);
783 if (unlikely(netdev->features & NETIF_F_RXFCS)) 842
784 skb->csum = csum_block_add(skb->csum, 843 mlx5e_skb_padding_csum(skb, network_depth, proto, stats);
785 (__force __wsum)mlx5e_get_fcs(skb),
786 skb->len - ETH_FCS_LEN);
787 stats->csum_complete++; 844 stats->csum_complete++;
788 return; 845 return;
789 } 846 }
790 847
791csum_unnecessary: 848csum_unnecessary:
792 if (likely((cqe->hds_ip_ext & CQE_L3_OK) && 849 if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
793 ((cqe->hds_ip_ext & CQE_L4_OK) || 850 (cqe->hds_ip_ext & CQE_L4_OK))) {
794 (get_cqe_l4_hdr_type(cqe) == CQE_L4_HDR_TYPE_NONE)))) {
795 skb->ip_summed = CHECKSUM_UNNECESSARY; 851 skb->ip_summed = CHECKSUM_UNNECESSARY;
796 if (cqe_is_tunneled(cqe)) { 852 if (cqe_is_tunneled(cqe)) {
797 skb->csum_level = 1; 853 skb->csum_level = 1;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 1a78e05cbba8..b75aa8b8bf04 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -59,6 +59,8 @@ static const struct counter_desc sw_stats_desc[] = {
59 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) }, 59 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
60 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) }, 60 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
61 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) }, 61 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
62 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) },
63 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) },
62 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) }, 64 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
63 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) }, 65 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
64 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) }, 66 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
@@ -151,6 +153,8 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
151 s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets; 153 s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
152 s->rx_csum_none += rq_stats->csum_none; 154 s->rx_csum_none += rq_stats->csum_none;
153 s->rx_csum_complete += rq_stats->csum_complete; 155 s->rx_csum_complete += rq_stats->csum_complete;
156 s->rx_csum_complete_tail += rq_stats->csum_complete_tail;
157 s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
154 s->rx_csum_unnecessary += rq_stats->csum_unnecessary; 158 s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
155 s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; 159 s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
156 s->rx_xdp_drop += rq_stats->xdp_drop; 160 s->rx_xdp_drop += rq_stats->xdp_drop;
@@ -1190,6 +1194,8 @@ static const struct counter_desc rq_stats_desc[] = {
1190 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) }, 1194 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
1191 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) }, 1195 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
1192 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) }, 1196 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
1197 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
1198 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
1193 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) }, 1199 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
1194 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, 1200 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
1195 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) }, 1201 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 4640d4f986f8..16c3b785f282 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -71,6 +71,8 @@ struct mlx5e_sw_stats {
71 u64 rx_csum_unnecessary; 71 u64 rx_csum_unnecessary;
72 u64 rx_csum_none; 72 u64 rx_csum_none;
73 u64 rx_csum_complete; 73 u64 rx_csum_complete;
74 u64 rx_csum_complete_tail;
75 u64 rx_csum_complete_tail_slow;
74 u64 rx_csum_unnecessary_inner; 76 u64 rx_csum_unnecessary_inner;
75 u64 rx_xdp_drop; 77 u64 rx_xdp_drop;
76 u64 rx_xdp_redirect; 78 u64 rx_xdp_redirect;
@@ -181,6 +183,8 @@ struct mlx5e_rq_stats {
181 u64 packets; 183 u64 packets;
182 u64 bytes; 184 u64 bytes;
183 u64 csum_complete; 185 u64 csum_complete;
186 u64 csum_complete_tail;
187 u64 csum_complete_tail_slow;
184 u64 csum_unnecessary; 188 u64 csum_unnecessary;
185 u64 csum_unnecessary_inner; 189 u64 csum_unnecessary_inner;
186 u64 csum_none; 190 u64 csum_none;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
index 8de64e88c670..22a2ef111514 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
@@ -148,14 +148,16 @@ static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock,
148 return ret; 148 return ret;
149} 149}
150 150
151static void mlx5_fpga_tls_release_swid(struct idr *idr, 151static void *mlx5_fpga_tls_release_swid(struct idr *idr,
152 spinlock_t *idr_spinlock, u32 swid) 152 spinlock_t *idr_spinlock, u32 swid)
153{ 153{
154 unsigned long flags; 154 unsigned long flags;
155 void *ptr;
155 156
156 spin_lock_irqsave(idr_spinlock, flags); 157 spin_lock_irqsave(idr_spinlock, flags);
157 idr_remove(idr, swid); 158 ptr = idr_remove(idr, swid);
158 spin_unlock_irqrestore(idr_spinlock, flags); 159 spin_unlock_irqrestore(idr_spinlock, flags);
160 return ptr;
159} 161}
160 162
161static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn, 163static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
@@ -165,20 +167,12 @@ static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
165 kfree(buf); 167 kfree(buf);
166} 168}
167 169
168struct mlx5_teardown_stream_context {
169 struct mlx5_fpga_tls_command_context cmd;
170 u32 swid;
171};
172
173static void 170static void
174mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn, 171mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
175 struct mlx5_fpga_device *fdev, 172 struct mlx5_fpga_device *fdev,
176 struct mlx5_fpga_tls_command_context *cmd, 173 struct mlx5_fpga_tls_command_context *cmd,
177 struct mlx5_fpga_dma_buf *resp) 174 struct mlx5_fpga_dma_buf *resp)
178{ 175{
179 struct mlx5_teardown_stream_context *ctx =
180 container_of(cmd, struct mlx5_teardown_stream_context, cmd);
181
182 if (resp) { 176 if (resp) {
183 u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome); 177 u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
184 178
@@ -186,14 +180,6 @@ mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
186 mlx5_fpga_err(fdev, 180 mlx5_fpga_err(fdev,
187 "Teardown stream failed with syndrome = %d", 181 "Teardown stream failed with syndrome = %d",
188 syndrome); 182 syndrome);
189 else if (MLX5_GET(tls_cmd, cmd->buf.sg[0].data, direction_sx))
190 mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr,
191 &fdev->tls->tx_idr_spinlock,
192 ctx->swid);
193 else
194 mlx5_fpga_tls_release_swid(&fdev->tls->rx_idr,
195 &fdev->tls->rx_idr_spinlock,
196 ctx->swid);
197 } 183 }
198 mlx5_fpga_tls_put_command_ctx(cmd); 184 mlx5_fpga_tls_put_command_ctx(cmd);
199} 185}
@@ -217,22 +203,22 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
217 void *cmd; 203 void *cmd;
218 int ret; 204 int ret;
219 205
220 rcu_read_lock();
221 flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
222 rcu_read_unlock();
223
224 if (!flow) {
225 WARN_ONCE(1, "Received NULL pointer for handle\n");
226 return -EINVAL;
227 }
228
229 buf = kzalloc(size, GFP_ATOMIC); 206 buf = kzalloc(size, GFP_ATOMIC);
230 if (!buf) 207 if (!buf)
231 return -ENOMEM; 208 return -ENOMEM;
232 209
233 cmd = (buf + 1); 210 cmd = (buf + 1);
234 211
212 rcu_read_lock();
213 flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
214 if (unlikely(!flow)) {
215 rcu_read_unlock();
216 WARN_ONCE(1, "Received NULL pointer for handle\n");
217 kfree(buf);
218 return -EINVAL;
219 }
235 mlx5_fpga_tls_flow_to_cmd(flow, cmd); 220 mlx5_fpga_tls_flow_to_cmd(flow, cmd);
221 rcu_read_unlock();
236 222
237 MLX5_SET(tls_cmd, cmd, swid, ntohl(handle)); 223 MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
238 MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn)); 224 MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn));
@@ -253,7 +239,7 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
253static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev, 239static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
254 void *flow, u32 swid, gfp_t flags) 240 void *flow, u32 swid, gfp_t flags)
255{ 241{
256 struct mlx5_teardown_stream_context *ctx; 242 struct mlx5_fpga_tls_command_context *ctx;
257 struct mlx5_fpga_dma_buf *buf; 243 struct mlx5_fpga_dma_buf *buf;
258 void *cmd; 244 void *cmd;
259 245
@@ -261,7 +247,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
261 if (!ctx) 247 if (!ctx)
262 return; 248 return;
263 249
264 buf = &ctx->cmd.buf; 250 buf = &ctx->buf;
265 cmd = (ctx + 1); 251 cmd = (ctx + 1);
266 MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM); 252 MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM);
267 MLX5_SET(tls_cmd, cmd, swid, swid); 253 MLX5_SET(tls_cmd, cmd, swid, swid);
@@ -272,8 +258,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
272 buf->sg[0].data = cmd; 258 buf->sg[0].data = cmd;
273 buf->sg[0].size = MLX5_TLS_COMMAND_SIZE; 259 buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
274 260
275 ctx->swid = swid; 261 mlx5_fpga_tls_cmd_send(mdev->fpga, ctx,
276 mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd,
277 mlx5_fpga_tls_teardown_completion); 262 mlx5_fpga_tls_teardown_completion);
278} 263}
279 264
@@ -283,13 +268,14 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
283 struct mlx5_fpga_tls *tls = mdev->fpga->tls; 268 struct mlx5_fpga_tls *tls = mdev->fpga->tls;
284 void *flow; 269 void *flow;
285 270
286 rcu_read_lock();
287 if (direction_sx) 271 if (direction_sx)
288 flow = idr_find(&tls->tx_idr, swid); 272 flow = mlx5_fpga_tls_release_swid(&tls->tx_idr,
273 &tls->tx_idr_spinlock,
274 swid);
289 else 275 else
290 flow = idr_find(&tls->rx_idr, swid); 276 flow = mlx5_fpga_tls_release_swid(&tls->rx_idr,
291 277 &tls->rx_idr_spinlock,
292 rcu_read_unlock(); 278 swid);
293 279
294 if (!flow) { 280 if (!flow) {
295 mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n", 281 mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n",
@@ -297,6 +283,7 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
297 return; 283 return;
298 } 284 }
299 285
286 synchronize_rcu(); /* before kfree(flow) */
300 mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags); 287 mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags);
301} 288}
302 289
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index c2fba5c7c9ee..9e8e3e92f369 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -568,7 +568,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
568 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) 568 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
569 return 0; 569 return 0;
570 570
571 emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_MEM_RECLAIM, 0); 571 emad_wq = alloc_workqueue("mlxsw_core_emad", 0, 0);
572 if (!emad_wq) 572 if (!emad_wq)
573 return -ENOMEM; 573 return -ENOMEM;
574 mlxsw_core->emad_wq = emad_wq; 574 mlxsw_core->emad_wq = emad_wq;
@@ -2004,10 +2004,10 @@ static int __init mlxsw_core_module_init(void)
2004{ 2004{
2005 int err; 2005 int err;
2006 2006
2007 mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_MEM_RECLAIM, 0); 2007 mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0);
2008 if (!mlxsw_wq) 2008 if (!mlxsw_wq)
2009 return -ENOMEM; 2009 return -ENOMEM;
2010 mlxsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM, 2010 mlxsw_owq = alloc_ordered_workqueue("%s_ordered", 0,
2011 mlxsw_core_driver_name); 2011 mlxsw_core_driver_name);
2012 if (!mlxsw_owq) { 2012 if (!mlxsw_owq) {
2013 err = -ENOMEM; 2013 err = -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 9a79b5e11597..d633bef5f105 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -70,6 +70,7 @@ static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = {
70 {MLXSW_REG_SBXX_DIR_EGRESS, 1}, 70 {MLXSW_REG_SBXX_DIR_EGRESS, 1},
71 {MLXSW_REG_SBXX_DIR_EGRESS, 2}, 71 {MLXSW_REG_SBXX_DIR_EGRESS, 2},
72 {MLXSW_REG_SBXX_DIR_EGRESS, 3}, 72 {MLXSW_REG_SBXX_DIR_EGRESS, 3},
73 {MLXSW_REG_SBXX_DIR_EGRESS, 15},
73}; 74};
74 75
75#define MLXSW_SP_SB_ING_TC_COUNT 8 76#define MLXSW_SP_SB_ING_TC_COUNT 8
@@ -428,6 +429,7 @@ static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = {
428 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), 429 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
429 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), 430 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
430 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0), 431 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
432 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI),
431}; 433};
432 434
433static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp, 435static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
@@ -517,14 +519,14 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = {
517 MLXSW_SP_SB_CM(0, 7, 4), 519 MLXSW_SP_SB_CM(0, 7, 4),
518 MLXSW_SP_SB_CM(0, 7, 4), 520 MLXSW_SP_SB_CM(0, 7, 4),
519 MLXSW_SP_SB_CM(0, 7, 4), 521 MLXSW_SP_SB_CM(0, 7, 4),
520 MLXSW_SP_SB_CM(0, 7, 4), 522 MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
521 MLXSW_SP_SB_CM(0, 7, 4), 523 MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
522 MLXSW_SP_SB_CM(0, 7, 4), 524 MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
523 MLXSW_SP_SB_CM(0, 7, 4), 525 MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
524 MLXSW_SP_SB_CM(0, 7, 4), 526 MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
525 MLXSW_SP_SB_CM(0, 7, 4), 527 MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
526 MLXSW_SP_SB_CM(0, 7, 4), 528 MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
527 MLXSW_SP_SB_CM(0, 7, 4), 529 MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
528 MLXSW_SP_SB_CM(1, 0xff, 4), 530 MLXSW_SP_SB_CM(1, 0xff, 4),
529}; 531};
530 532
@@ -671,6 +673,7 @@ static const struct mlxsw_sp_sb_pm mlxsw_sp2_sb_pms[] = {
671 MLXSW_SP_SB_PM(0, 0), 673 MLXSW_SP_SB_PM(0, 0),
672 MLXSW_SP_SB_PM(0, 0), 674 MLXSW_SP_SB_PM(0, 0),
673 MLXSW_SP_SB_PM(0, 0), 675 MLXSW_SP_SB_PM(0, 0),
676 MLXSW_SP_SB_PM(10000, 90000),
674}; 677};
675 678
676static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port) 679static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 31656a2a6252..64498c9f55ab 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -6804,7 +6804,7 @@ static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp,
6804 /* A RIF is not created for macvlan netdevs. Their MAC is used to 6804 /* A RIF is not created for macvlan netdevs. Their MAC is used to
6805 * populate the FDB 6805 * populate the FDB
6806 */ 6806 */
6807 if (netif_is_macvlan(dev)) 6807 if (netif_is_macvlan(dev) || netif_is_l3_master(dev))
6808 return 0; 6808 return 0;
6809 6809
6810 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) { 6810 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index f6ce386c3036..50111f228d77 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -1630,7 +1630,7 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
1630 u16 fid_index; 1630 u16 fid_index;
1631 int err = 0; 1631 int err = 0;
1632 1632
1633 if (switchdev_trans_ph_prepare(trans)) 1633 if (switchdev_trans_ph_commit(trans))
1634 return 0; 1634 return 0;
1635 1635
1636 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); 1636 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index a1d0d6e42533..d715ef4fc92f 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -613,7 +613,7 @@ static int ocelot_mact_mc_add(struct ocelot_port *port,
613 struct netdev_hw_addr *hw_addr) 613 struct netdev_hw_addr *hw_addr)
614{ 614{
615 struct ocelot *ocelot = port->ocelot; 615 struct ocelot *ocelot = port->ocelot;
616 struct netdev_hw_addr *ha = kzalloc(sizeof(*ha), GFP_KERNEL); 616 struct netdev_hw_addr *ha = kzalloc(sizeof(*ha), GFP_ATOMIC);
617 617
618 if (!ha) 618 if (!ha)
619 return -ENOMEM; 619 return -ENOMEM;
@@ -959,10 +959,8 @@ static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data)
959 ETH_GSTRING_LEN); 959 ETH_GSTRING_LEN);
960} 960}
961 961
962static void ocelot_check_stats(struct work_struct *work) 962static void ocelot_update_stats(struct ocelot *ocelot)
963{ 963{
964 struct delayed_work *del_work = to_delayed_work(work);
965 struct ocelot *ocelot = container_of(del_work, struct ocelot, stats_work);
966 int i, j; 964 int i, j;
967 965
968 mutex_lock(&ocelot->stats_lock); 966 mutex_lock(&ocelot->stats_lock);
@@ -986,11 +984,19 @@ static void ocelot_check_stats(struct work_struct *work)
986 } 984 }
987 } 985 }
988 986
989 cancel_delayed_work(&ocelot->stats_work); 987 mutex_unlock(&ocelot->stats_lock);
988}
989
990static void ocelot_check_stats_work(struct work_struct *work)
991{
992 struct delayed_work *del_work = to_delayed_work(work);
993 struct ocelot *ocelot = container_of(del_work, struct ocelot,
994 stats_work);
995
996 ocelot_update_stats(ocelot);
997
990 queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work, 998 queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
991 OCELOT_STATS_CHECK_DELAY); 999 OCELOT_STATS_CHECK_DELAY);
992
993 mutex_unlock(&ocelot->stats_lock);
994} 1000}
995 1001
996static void ocelot_get_ethtool_stats(struct net_device *dev, 1002static void ocelot_get_ethtool_stats(struct net_device *dev,
@@ -1001,7 +1007,7 @@ static void ocelot_get_ethtool_stats(struct net_device *dev,
1001 int i; 1007 int i;
1002 1008
1003 /* check and update now */ 1009 /* check and update now */
1004 ocelot_check_stats(&ocelot->stats_work.work); 1010 ocelot_update_stats(ocelot);
1005 1011
1006 /* Copy all counters */ 1012 /* Copy all counters */
1007 for (i = 0; i < ocelot->num_stats; i++) 1013 for (i = 0; i < ocelot->num_stats; i++)
@@ -1809,7 +1815,7 @@ int ocelot_init(struct ocelot *ocelot)
1809 ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(6), 1815 ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(6),
1810 ANA_CPUQ_8021_CFG, i); 1816 ANA_CPUQ_8021_CFG, i);
1811 1817
1812 INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats); 1818 INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats_work);
1813 queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work, 1819 queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
1814 OCELOT_STATS_CHECK_DELAY); 1820 OCELOT_STATS_CHECK_DELAY);
1815 return 0; 1821 return 0;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index 7cde387e5ec6..51cd57ab3d95 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -2366,6 +2366,7 @@ static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
2366 dma_object->addr))) { 2366 dma_object->addr))) {
2367 vxge_os_dma_free(devh->pdev, memblock, 2367 vxge_os_dma_free(devh->pdev, memblock,
2368 &dma_object->acc_handle); 2368 &dma_object->acc_handle);
2369 memblock = NULL;
2369 goto exit; 2370 goto exit;
2370 } 2371 }
2371 2372
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 512186adab00..c5e96ce20f59 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -431,12 +431,16 @@ struct qed_qm_info {
431 u8 num_pf_rls; 431 u8 num_pf_rls;
432}; 432};
433 433
434#define QED_OVERFLOW_BIT 1
435
434struct qed_db_recovery_info { 436struct qed_db_recovery_info {
435 struct list_head list; 437 struct list_head list;
436 438
437 /* Lock to protect the doorbell recovery mechanism list */ 439 /* Lock to protect the doorbell recovery mechanism list */
438 spinlock_t lock; 440 spinlock_t lock;
441 bool dorq_attn;
439 u32 db_recovery_counter; 442 u32 db_recovery_counter;
443 unsigned long overflow;
440}; 444};
441 445
442struct storm_stats { 446struct storm_stats {
@@ -923,8 +927,7 @@ u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc);
923 927
924/* doorbell recovery mechanism */ 928/* doorbell recovery mechanism */
925void qed_db_recovery_dp(struct qed_hwfn *p_hwfn); 929void qed_db_recovery_dp(struct qed_hwfn *p_hwfn);
926void qed_db_recovery_execute(struct qed_hwfn *p_hwfn, 930void qed_db_recovery_execute(struct qed_hwfn *p_hwfn);
927 enum qed_db_rec_exec db_exec);
928bool qed_edpm_enabled(struct qed_hwfn *p_hwfn); 931bool qed_edpm_enabled(struct qed_hwfn *p_hwfn);
929 932
930/* Other Linux specific common definitions */ 933/* Other Linux specific common definitions */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 195573793352..fccdb06fc5c5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -102,11 +102,15 @@ static void qed_db_recovery_dp_entry(struct qed_hwfn *p_hwfn,
102 102
103/* Doorbell address sanity (address within doorbell bar range) */ 103/* Doorbell address sanity (address within doorbell bar range) */
104static bool qed_db_rec_sanity(struct qed_dev *cdev, 104static bool qed_db_rec_sanity(struct qed_dev *cdev,
105 void __iomem *db_addr, void *db_data) 105 void __iomem *db_addr,
106 enum qed_db_rec_width db_width,
107 void *db_data)
106{ 108{
109 u32 width = (db_width == DB_REC_WIDTH_32B) ? 32 : 64;
110
107 /* Make sure doorbell address is within the doorbell bar */ 111 /* Make sure doorbell address is within the doorbell bar */
108 if (db_addr < cdev->doorbells || 112 if (db_addr < cdev->doorbells ||
109 (u8 __iomem *)db_addr > 113 (u8 __iomem *)db_addr + width >
110 (u8 __iomem *)cdev->doorbells + cdev->db_size) { 114 (u8 __iomem *)cdev->doorbells + cdev->db_size) {
111 WARN(true, 115 WARN(true,
112 "Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n", 116 "Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n",
@@ -159,7 +163,7 @@ int qed_db_recovery_add(struct qed_dev *cdev,
159 } 163 }
160 164
161 /* Sanitize doorbell address */ 165 /* Sanitize doorbell address */
162 if (!qed_db_rec_sanity(cdev, db_addr, db_data)) 166 if (!qed_db_rec_sanity(cdev, db_addr, db_width, db_data))
163 return -EINVAL; 167 return -EINVAL;
164 168
165 /* Obtain hwfn from doorbell address */ 169 /* Obtain hwfn from doorbell address */
@@ -205,10 +209,6 @@ int qed_db_recovery_del(struct qed_dev *cdev,
205 return 0; 209 return 0;
206 } 210 }
207 211
208 /* Sanitize doorbell address */
209 if (!qed_db_rec_sanity(cdev, db_addr, db_data))
210 return -EINVAL;
211
212 /* Obtain hwfn from doorbell address */ 212 /* Obtain hwfn from doorbell address */
213 p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr); 213 p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr);
214 214
@@ -300,31 +300,24 @@ void qed_db_recovery_dp(struct qed_hwfn *p_hwfn)
300 300
301/* Ring the doorbell of a single doorbell recovery entry */ 301/* Ring the doorbell of a single doorbell recovery entry */
302static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn, 302static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn,
303 struct qed_db_recovery_entry *db_entry, 303 struct qed_db_recovery_entry *db_entry)
304 enum qed_db_rec_exec db_exec) 304{
305{ 305 /* Print according to width */
306 if (db_exec != DB_REC_ONCE) { 306 if (db_entry->db_width == DB_REC_WIDTH_32B) {
307 /* Print according to width */ 307 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
308 if (db_entry->db_width == DB_REC_WIDTH_32B) { 308 "ringing doorbell address %p data %x\n",
309 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, 309 db_entry->db_addr,
310 "%s doorbell address %p data %x\n", 310 *(u32 *)db_entry->db_data);
311 db_exec == DB_REC_DRY_RUN ? 311 } else {
312 "would have rung" : "ringing", 312 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
313 db_entry->db_addr, 313 "ringing doorbell address %p data %llx\n",
314 *(u32 *)db_entry->db_data); 314 db_entry->db_addr,
315 } else { 315 *(u64 *)(db_entry->db_data));
316 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
317 "%s doorbell address %p data %llx\n",
318 db_exec == DB_REC_DRY_RUN ?
319 "would have rung" : "ringing",
320 db_entry->db_addr,
321 *(u64 *)(db_entry->db_data));
322 }
323 } 316 }
324 317
325 /* Sanity */ 318 /* Sanity */
326 if (!qed_db_rec_sanity(p_hwfn->cdev, db_entry->db_addr, 319 if (!qed_db_rec_sanity(p_hwfn->cdev, db_entry->db_addr,
327 db_entry->db_data)) 320 db_entry->db_width, db_entry->db_data))
328 return; 321 return;
329 322
330 /* Flush the write combined buffer. Since there are multiple doorbelling 323 /* Flush the write combined buffer. Since there are multiple doorbelling
@@ -334,14 +327,12 @@ static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn,
334 wmb(); 327 wmb();
335 328
336 /* Ring the doorbell */ 329 /* Ring the doorbell */
337 if (db_exec == DB_REC_REAL_DEAL || db_exec == DB_REC_ONCE) { 330 if (db_entry->db_width == DB_REC_WIDTH_32B)
338 if (db_entry->db_width == DB_REC_WIDTH_32B) 331 DIRECT_REG_WR(db_entry->db_addr,
339 DIRECT_REG_WR(db_entry->db_addr, 332 *(u32 *)(db_entry->db_data));
340 *(u32 *)(db_entry->db_data)); 333 else
341 else 334 DIRECT_REG_WR64(db_entry->db_addr,
342 DIRECT_REG_WR64(db_entry->db_addr, 335 *(u64 *)(db_entry->db_data));
343 *(u64 *)(db_entry->db_data));
344 }
345 336
346 /* Flush the write combined buffer. Next doorbell may come from a 337 /* Flush the write combined buffer. Next doorbell may come from a
347 * different entity to the same address... 338 * different entity to the same address...
@@ -350,29 +341,21 @@ static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn,
350} 341}
351 342
352/* Traverse the doorbell recovery entry list and ring all the doorbells */ 343/* Traverse the doorbell recovery entry list and ring all the doorbells */
353void qed_db_recovery_execute(struct qed_hwfn *p_hwfn, 344void qed_db_recovery_execute(struct qed_hwfn *p_hwfn)
354 enum qed_db_rec_exec db_exec)
355{ 345{
356 struct qed_db_recovery_entry *db_entry = NULL; 346 struct qed_db_recovery_entry *db_entry = NULL;
357 347
358 if (db_exec != DB_REC_ONCE) { 348 DP_NOTICE(p_hwfn, "Executing doorbell recovery. Counter was %d\n",
359 DP_NOTICE(p_hwfn, 349 p_hwfn->db_recovery_info.db_recovery_counter);
360 "Executing doorbell recovery. Counter was %d\n",
361 p_hwfn->db_recovery_info.db_recovery_counter);
362 350
363 /* Track amount of times recovery was executed */ 351 /* Track amount of times recovery was executed */
364 p_hwfn->db_recovery_info.db_recovery_counter++; 352 p_hwfn->db_recovery_info.db_recovery_counter++;
365 }
366 353
367 /* Protect the list */ 354 /* Protect the list */
368 spin_lock_bh(&p_hwfn->db_recovery_info.lock); 355 spin_lock_bh(&p_hwfn->db_recovery_info.lock);
369 list_for_each_entry(db_entry, 356 list_for_each_entry(db_entry,
370 &p_hwfn->db_recovery_info.list, list_entry) { 357 &p_hwfn->db_recovery_info.list, list_entry)
371 qed_db_recovery_ring(p_hwfn, db_entry, db_exec); 358 qed_db_recovery_ring(p_hwfn, db_entry);
372 if (db_exec == DB_REC_ONCE)
373 break;
374 }
375
376 spin_unlock_bh(&p_hwfn->db_recovery_info.lock); 359 spin_unlock_bh(&p_hwfn->db_recovery_info.lock);
377} 360}
378 361
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index e23980e301b6..8848d5bed6e5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -378,6 +378,9 @@ static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn,
378 u32 count = QED_DB_REC_COUNT; 378 u32 count = QED_DB_REC_COUNT;
379 u32 usage = 1; 379 u32 usage = 1;
380 380
381 /* Flush any pending (e)dpms as they may never arrive */
382 qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
383
381 /* wait for usage to zero or count to run out. This is necessary since 384 /* wait for usage to zero or count to run out. This is necessary since
382 * EDPM doorbell transactions can take multiple 64b cycles, and as such 385 * EDPM doorbell transactions can take multiple 64b cycles, and as such
383 * can "split" over the pci. Possibly, the doorbell drop can happen with 386 * can "split" over the pci. Possibly, the doorbell drop can happen with
@@ -406,51 +409,74 @@ static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn,
406 409
407int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 410int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
408{ 411{
409 u32 overflow; 412 u32 attn_ovfl, cur_ovfl;
410 int rc; 413 int rc;
411 414
412 overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY); 415 attn_ovfl = test_and_clear_bit(QED_OVERFLOW_BIT,
413 DP_NOTICE(p_hwfn, "PF Overflow sticky 0x%x\n", overflow); 416 &p_hwfn->db_recovery_info.overflow);
414 if (!overflow) { 417 cur_ovfl = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
415 qed_db_recovery_execute(p_hwfn, DB_REC_ONCE); 418 if (!cur_ovfl && !attn_ovfl)
416 return 0; 419 return 0;
417 }
418 420
419 if (qed_edpm_enabled(p_hwfn)) { 421 DP_NOTICE(p_hwfn, "PF Overflow sticky: attn %u current %u\n",
422 attn_ovfl, cur_ovfl);
423
424 if (cur_ovfl && !p_hwfn->db_bar_no_edpm) {
420 rc = qed_db_rec_flush_queue(p_hwfn, p_ptt); 425 rc = qed_db_rec_flush_queue(p_hwfn, p_ptt);
421 if (rc) 426 if (rc)
422 return rc; 427 return rc;
423 } 428 }
424 429
425 /* Flush any pending (e)dpm as they may never arrive */
426 qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
427
428 /* Release overflow sticky indication (stop silently dropping everything) */ 430 /* Release overflow sticky indication (stop silently dropping everything) */
429 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); 431 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
430 432
431 /* Repeat all last doorbells (doorbell drop recovery) */ 433 /* Repeat all last doorbells (doorbell drop recovery) */
432 qed_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL); 434 qed_db_recovery_execute(p_hwfn);
433 435
434 return 0; 436 return 0;
435} 437}
436 438
437static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) 439static void qed_dorq_attn_overflow(struct qed_hwfn *p_hwfn)
438{ 440{
439 u32 int_sts, first_drop_reason, details, address, all_drops_reason;
440 struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt; 441 struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt;
442 u32 overflow;
441 int rc; 443 int rc;
442 444
443 int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS); 445 overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
444 DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts); 446 if (!overflow)
447 goto out;
448
449 /* Run PF doorbell recovery in next periodic handler */
450 set_bit(QED_OVERFLOW_BIT, &p_hwfn->db_recovery_info.overflow);
451
452 if (!p_hwfn->db_bar_no_edpm) {
453 rc = qed_db_rec_flush_queue(p_hwfn, p_ptt);
454 if (rc)
455 goto out;
456 }
457
458 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
459out:
460 /* Schedule the handler even if overflow was not detected */
461 qed_periodic_db_rec_start(p_hwfn);
462}
463
464static int qed_dorq_attn_int_sts(struct qed_hwfn *p_hwfn)
465{
466 u32 int_sts, first_drop_reason, details, address, all_drops_reason;
467 struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt;
445 468
446 /* int_sts may be zero since all PFs were interrupted for doorbell 469 /* int_sts may be zero since all PFs were interrupted for doorbell
447 * overflow but another one already handled it. Can abort here. If 470 * overflow but another one already handled it. Can abort here. If
448 * This PF also requires overflow recovery we will be interrupted again. 471 * This PF also requires overflow recovery we will be interrupted again.
449 * The masked almost full indication may also be set. Ignoring. 472 * The masked almost full indication may also be set. Ignoring.
450 */ 473 */
474 int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS);
451 if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) 475 if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL))
452 return 0; 476 return 0;
453 477
478 DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts);
479
454 /* check if db_drop or overflow happened */ 480 /* check if db_drop or overflow happened */
455 if (int_sts & (DORQ_REG_INT_STS_DB_DROP | 481 if (int_sts & (DORQ_REG_INT_STS_DB_DROP |
456 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) { 482 DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) {
@@ -477,11 +503,6 @@ static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
477 GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4, 503 GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4,
478 first_drop_reason, all_drops_reason); 504 first_drop_reason, all_drops_reason);
479 505
480 rc = qed_db_rec_handler(p_hwfn, p_ptt);
481 qed_periodic_db_rec_start(p_hwfn);
482 if (rc)
483 return rc;
484
485 /* Clear the doorbell drop details and prepare for next drop */ 506 /* Clear the doorbell drop details and prepare for next drop */
486 qed_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0); 507 qed_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0);
487 508
@@ -507,6 +528,25 @@ static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
507 return -EINVAL; 528 return -EINVAL;
508} 529}
509 530
531static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
532{
533 p_hwfn->db_recovery_info.dorq_attn = true;
534 qed_dorq_attn_overflow(p_hwfn);
535
536 return qed_dorq_attn_int_sts(p_hwfn);
537}
538
539static void qed_dorq_attn_handler(struct qed_hwfn *p_hwfn)
540{
541 if (p_hwfn->db_recovery_info.dorq_attn)
542 goto out;
543
544 /* Call DORQ callback if the attention was missed */
545 qed_dorq_attn_cb(p_hwfn);
546out:
547 p_hwfn->db_recovery_info.dorq_attn = false;
548}
549
510/* Instead of major changes to the data-structure, we have a some 'special' 550/* Instead of major changes to the data-structure, we have a some 'special'
511 * identifiers for sources that changed meaning between adapters. 551 * identifiers for sources that changed meaning between adapters.
512 */ 552 */
@@ -1080,6 +1120,9 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
1080 } 1120 }
1081 } 1121 }
1082 1122
1123 /* Handle missed DORQ attention */
1124 qed_dorq_attn_handler(p_hwfn);
1125
1083 /* Clear IGU indication for the deasserted bits */ 1126 /* Clear IGU indication for the deasserted bits */
1084 DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview + 1127 DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
1085 GTT_BAR0_MAP_REG_IGU_CMD + 1128 GTT_BAR0_MAP_REG_IGU_CMD +
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index 1f356ed4f761..d473b522afc5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -192,8 +192,8 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev);
192 192
193/** 193/**
194 * @brief - Doorbell Recovery handler. 194 * @brief - Doorbell Recovery handler.
195 * Run DB_REAL_DEAL doorbell recovery in case of PF overflow 195 * Run doorbell recovery in case of PF overflow (and flush DORQ if
196 * (and flush DORQ if needed), otherwise run DB_REC_ONCE. 196 * needed).
197 * 197 *
198 * @param p_hwfn 198 * @param p_hwfn
199 * @param p_ptt 199 * @param p_ptt
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index f164d4acebcb..6de23b56b294 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -970,7 +970,7 @@ static void qed_update_pf_params(struct qed_dev *cdev,
970 } 970 }
971} 971}
972 972
973#define QED_PERIODIC_DB_REC_COUNT 100 973#define QED_PERIODIC_DB_REC_COUNT 10
974#define QED_PERIODIC_DB_REC_INTERVAL_MS 100 974#define QED_PERIODIC_DB_REC_INTERVAL_MS 100
975#define QED_PERIODIC_DB_REC_INTERVAL \ 975#define QED_PERIODIC_DB_REC_INTERVAL \
976 msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS) 976 msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 9faaa6df78ed..2f318aaf2b05 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -1591,7 +1591,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
1591 p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN; 1591 p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
1592 } else { 1592 } else {
1593 DP_INFO(p_hwfn, 1593 DP_INFO(p_hwfn,
1594 "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n", 1594 "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's fastpath HSI %02x.%02x\n",
1595 vf->abs_vf_id, 1595 vf->abs_vf_id,
1596 req->vfdev_info.eth_fp_hsi_major, 1596 req->vfdev_info.eth_fp_hsi_major,
1597 req->vfdev_info.eth_fp_hsi_minor, 1597 req->vfdev_info.eth_fp_hsi_minor,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index 5f3f42a25361..bddb2b5982dc 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -490,18 +490,17 @@ int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
490 490
491 ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev); 491 ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev);
492 if (IS_ERR(ptp->clock)) { 492 if (IS_ERR(ptp->clock)) {
493 rc = -EINVAL;
494 DP_ERR(edev, "PTP clock registration failed\n"); 493 DP_ERR(edev, "PTP clock registration failed\n");
494 qede_ptp_disable(edev);
495 rc = -EINVAL;
495 goto err2; 496 goto err2;
496 } 497 }
497 498
498 return 0; 499 return 0;
499 500
500err2:
501 qede_ptp_disable(edev);
502 ptp->clock = NULL;
503err1: 501err1:
504 kfree(ptp); 502 kfree(ptp);
503err2:
505 edev->ptp = NULL; 504 edev->ptp = NULL;
506 505
507 return rc; 506 return rc;
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index a622ec33453a..699a8870e928 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1244,6 +1244,23 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
1244 goto err_option_port_add; 1244 goto err_option_port_add;
1245 } 1245 }
1246 1246
1247 /* set promiscuity level to new slave */
1248 if (dev->flags & IFF_PROMISC) {
1249 err = dev_set_promiscuity(port_dev, 1);
1250 if (err)
1251 goto err_set_slave_promisc;
1252 }
1253
1254 /* set allmulti level to new slave */
1255 if (dev->flags & IFF_ALLMULTI) {
1256 err = dev_set_allmulti(port_dev, 1);
1257 if (err) {
1258 if (dev->flags & IFF_PROMISC)
1259 dev_set_promiscuity(port_dev, -1);
1260 goto err_set_slave_promisc;
1261 }
1262 }
1263
1247 netif_addr_lock_bh(dev); 1264 netif_addr_lock_bh(dev);
1248 dev_uc_sync_multiple(port_dev, dev); 1265 dev_uc_sync_multiple(port_dev, dev);
1249 dev_mc_sync_multiple(port_dev, dev); 1266 dev_mc_sync_multiple(port_dev, dev);
@@ -1260,6 +1277,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
1260 1277
1261 return 0; 1278 return 0;
1262 1279
1280err_set_slave_promisc:
1281 __team_option_inst_del_port(team, port);
1282
1263err_option_port_add: 1283err_option_port_add:
1264 team_upper_dev_unlink(team, port); 1284 team_upper_dev_unlink(team, port);
1265 1285
@@ -1305,6 +1325,12 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
1305 1325
1306 team_port_disable(team, port); 1326 team_port_disable(team, port);
1307 list_del_rcu(&port->list); 1327 list_del_rcu(&port->list);
1328
1329 if (dev->flags & IFF_PROMISC)
1330 dev_set_promiscuity(port_dev, -1);
1331 if (dev->flags & IFF_ALLMULTI)
1332 dev_set_allmulti(port_dev, -1);
1333
1308 team_upper_dev_unlink(team, port); 1334 team_upper_dev_unlink(team, port);
1309 netdev_rx_handler_unregister(port_dev); 1335 netdev_rx_handler_unregister(port_dev);
1310 team_port_disable_netpoll(port); 1336 team_port_disable_netpoll(port);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index a20ea270d519..1acc622d2183 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -2728,7 +2728,7 @@ static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
2728 num_msdus++; 2728 num_msdus++;
2729 num_bytes += ret; 2729 num_bytes += ret;
2730 } 2730 }
2731 ieee80211_return_txq(hw, txq); 2731 ieee80211_return_txq(hw, txq, false);
2732 ieee80211_txq_schedule_end(hw, txq->ac); 2732 ieee80211_txq_schedule_end(hw, txq->ac);
2733 2733
2734 record->num_msdus = cpu_to_le16(num_msdus); 2734 record->num_msdus = cpu_to_le16(num_msdus);
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index b73c23d4ce86..41e89db244d2 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -4089,7 +4089,7 @@ static int ath10k_mac_schedule_txq(struct ieee80211_hw *hw, u32 ac)
4089 if (ret < 0) 4089 if (ret < 0)
4090 break; 4090 break;
4091 } 4091 }
4092 ieee80211_return_txq(hw, txq); 4092 ieee80211_return_txq(hw, txq, false);
4093 ath10k_htt_tx_txq_update(hw, txq); 4093 ath10k_htt_tx_txq_update(hw, txq);
4094 if (ret == -EBUSY) 4094 if (ret == -EBUSY)
4095 break; 4095 break;
@@ -4374,7 +4374,7 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
4374 if (ret < 0) 4374 if (ret < 0)
4375 break; 4375 break;
4376 } 4376 }
4377 ieee80211_return_txq(hw, txq); 4377 ieee80211_return_txq(hw, txq, false);
4378 ath10k_htt_tx_txq_update(hw, txq); 4378 ath10k_htt_tx_txq_update(hw, txq);
4379out: 4379out:
4380 ieee80211_txq_schedule_end(hw, ac); 4380 ieee80211_txq_schedule_end(hw, ac);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 773d428ff1b0..b17e1ca40995 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1938,12 +1938,15 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1938 goto out; 1938 goto out;
1939 1939
1940 while ((queue = ieee80211_next_txq(hw, txq->mac80211_qnum))) { 1940 while ((queue = ieee80211_next_txq(hw, txq->mac80211_qnum))) {
1941 bool force;
1942
1941 tid = (struct ath_atx_tid *)queue->drv_priv; 1943 tid = (struct ath_atx_tid *)queue->drv_priv;
1942 1944
1943 ret = ath_tx_sched_aggr(sc, txq, tid); 1945 ret = ath_tx_sched_aggr(sc, txq, tid);
1944 ath_dbg(common, QUEUE, "ath_tx_sched_aggr returned %d\n", ret); 1946 ath_dbg(common, QUEUE, "ath_tx_sched_aggr returned %d\n", ret);
1945 1947
1946 ieee80211_return_txq(hw, queue); 1948 force = !skb_queue_empty(&tid->retry_q);
1949 ieee80211_return_txq(hw, queue, force);
1947 } 1950 }
1948 1951
1949out: 1952out:
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index fdc56f821b5a..eb6defb6d0cd 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -82,6 +82,7 @@
82#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-" 82#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
83#define IWL_22000_SU_Z0_FW_PRE "iwlwifi-su-z0-" 83#define IWL_22000_SU_Z0_FW_PRE "iwlwifi-su-z0-"
84#define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-" 84#define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-"
85#define IWL_QUZ_A_HR_B_FW_PRE "iwlwifi-QuZ-a0-hr-b0-"
85#define IWL_QNJ_B_JF_B_FW_PRE "iwlwifi-QuQnj-b0-jf-b0-" 86#define IWL_QNJ_B_JF_B_FW_PRE "iwlwifi-QuQnj-b0-jf-b0-"
86#define IWL_CC_A_FW_PRE "iwlwifi-cc-a0-" 87#define IWL_CC_A_FW_PRE "iwlwifi-cc-a0-"
87#define IWL_22000_SO_A_JF_B_FW_PRE "iwlwifi-so-a0-jf-b0-" 88#define IWL_22000_SO_A_JF_B_FW_PRE "iwlwifi-so-a0-jf-b0-"
@@ -105,8 +106,8 @@
105 IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode" 106 IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode"
106#define IWL_22000_SU_Z0_MODULE_FIRMWARE(api) \ 107#define IWL_22000_SU_Z0_MODULE_FIRMWARE(api) \
107 IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode" 108 IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode"
108#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \ 109#define IWL_QUZ_A_HR_B_MODULE_FIRMWARE(api) \
109 IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode" 110 IWL_QUZ_A_HR_B_FW_PRE __stringify(api) ".ucode"
110#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \ 111#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
111 IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode" 112 IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode"
112#define IWL_QNJ_B_JF_B_MODULE_FIRMWARE(api) \ 113#define IWL_QNJ_B_JF_B_MODULE_FIRMWARE(api) \
@@ -235,8 +236,20 @@ const struct iwl_cfg iwl_ax101_cfg_qu_hr = {
235 .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, 236 .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
236}; 237};
237 238
238const struct iwl_cfg iwl22260_2ax_cfg = { 239const struct iwl_cfg iwl_ax101_cfg_quz_hr = {
239 .name = "Intel(R) Wireless-AX 22260", 240 .name = "Intel(R) Wi-Fi 6 AX101",
241 .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
242 IWL_DEVICE_22500,
243 /*
244 * This device doesn't support receiving BlockAck with a large bitmap
245 * so we need to restrict the size of transmitted aggregation to the
246 * HT size; mac80211 would otherwise pick the HE max (256) by default.
247 */
248 .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
249};
250
251const struct iwl_cfg iwl_ax200_cfg_cc = {
252 .name = "Intel(R) Wi-Fi 6 AX200 160MHz",
240 .fw_name_pre = IWL_CC_A_FW_PRE, 253 .fw_name_pre = IWL_CC_A_FW_PRE,
241 IWL_DEVICE_22500, 254 IWL_DEVICE_22500,
242 /* 255 /*
@@ -249,7 +262,7 @@ const struct iwl_cfg iwl22260_2ax_cfg = {
249}; 262};
250 263
251const struct iwl_cfg killer1650x_2ax_cfg = { 264const struct iwl_cfg killer1650x_2ax_cfg = {
252 .name = "Killer(R) Wireless-AX 1650x Wireless Network Adapter (200NGW)", 265 .name = "Killer(R) Wi-Fi 6 AX1650x 160MHz Wireless Network Adapter (200NGW)",
253 .fw_name_pre = IWL_CC_A_FW_PRE, 266 .fw_name_pre = IWL_CC_A_FW_PRE,
254 IWL_DEVICE_22500, 267 IWL_DEVICE_22500,
255 /* 268 /*
@@ -262,7 +275,7 @@ const struct iwl_cfg killer1650x_2ax_cfg = {
262}; 275};
263 276
264const struct iwl_cfg killer1650w_2ax_cfg = { 277const struct iwl_cfg killer1650w_2ax_cfg = {
265 .name = "Killer(R) Wireless-AX 1650w Wireless Network Adapter (200D2W)", 278 .name = "Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)",
266 .fw_name_pre = IWL_CC_A_FW_PRE, 279 .fw_name_pre = IWL_CC_A_FW_PRE,
267 IWL_DEVICE_22500, 280 IWL_DEVICE_22500,
268 /* 281 /*
@@ -328,7 +341,7 @@ const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0 = {
328}; 341};
329 342
330const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = { 343const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
331 .name = "Killer(R) Wireless-AX 1650i Wireless Network Adapter (22560NGW)", 344 .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)",
332 .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE, 345 .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
333 IWL_DEVICE_22500, 346 IWL_DEVICE_22500,
334 /* 347 /*
@@ -340,7 +353,7 @@ const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
340}; 353};
341 354
342const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = { 355const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = {
343 .name = "Killer(R) Wireless-AX 1650s Wireless Network Adapter (22560D2W)", 356 .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)",
344 .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE, 357 .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
345 IWL_DEVICE_22500, 358 IWL_DEVICE_22500,
346 /* 359 /*
@@ -444,6 +457,7 @@ MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
444MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); 457MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
445MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); 458MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
446MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); 459MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
460MODULE_FIRMWARE(IWL_QUZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
447MODULE_FIRMWARE(IWL_QNJ_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); 461MODULE_FIRMWARE(IWL_QNJ_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
448MODULE_FIRMWARE(IWL_CC_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); 462MODULE_FIRMWARE(IWL_CC_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
449MODULE_FIRMWARE(IWL_22000_SO_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); 463MODULE_FIRMWARE(IWL_22000_SO_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index f119c49cd39c..d7380016f1c0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -1614,6 +1614,7 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt,
1614 if (!range) { 1614 if (!range) {
1615 IWL_ERR(fwrt, "Failed to fill region header: id=%d, type=%d\n", 1615 IWL_ERR(fwrt, "Failed to fill region header: id=%d, type=%d\n",
1616 le32_to_cpu(reg->region_id), type); 1616 le32_to_cpu(reg->region_id), type);
1617 memset(*data, 0, le32_to_cpu((*data)->len));
1617 return; 1618 return;
1618 } 1619 }
1619 1620
@@ -1623,6 +1624,7 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt,
1623 if (range_size < 0) { 1624 if (range_size < 0) {
1624 IWL_ERR(fwrt, "Failed to dump region: id=%d, type=%d\n", 1625 IWL_ERR(fwrt, "Failed to dump region: id=%d, type=%d\n",
1625 le32_to_cpu(reg->region_id), type); 1626 le32_to_cpu(reg->region_id), type);
1627 memset(*data, 0, le32_to_cpu((*data)->len));
1626 return; 1628 return;
1627 } 1629 }
1628 range = range + range_size; 1630 range = range + range_size;
@@ -1807,12 +1809,12 @@ _iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
1807 1809
1808 trigger = fwrt->dump.active_trigs[id].trig; 1810 trigger = fwrt->dump.active_trigs[id].trig;
1809 1811
1810 size = sizeof(*dump_file); 1812 size = iwl_fw_ini_get_trigger_len(fwrt, trigger);
1811 size += iwl_fw_ini_get_trigger_len(fwrt, trigger);
1812
1813 if (!size) 1813 if (!size)
1814 return NULL; 1814 return NULL;
1815 1815
1816 size += sizeof(*dump_file);
1817
1816 dump_file = vzalloc(size); 1818 dump_file = vzalloc(size);
1817 if (!dump_file) 1819 if (!dump_file)
1818 return NULL; 1820 return NULL;
@@ -1942,14 +1944,10 @@ int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt,
1942 iwl_dump_error_desc->len = 0; 1944 iwl_dump_error_desc->len = 0;
1943 1945
1944 ret = iwl_fw_dbg_collect_desc(fwrt, iwl_dump_error_desc, false, 0); 1946 ret = iwl_fw_dbg_collect_desc(fwrt, iwl_dump_error_desc, false, 0);
1945 if (ret) { 1947 if (ret)
1946 kfree(iwl_dump_error_desc); 1948 kfree(iwl_dump_error_desc);
1947 } else { 1949 else
1948 set_bit(STATUS_FW_WAIT_DUMP, &fwrt->trans->status); 1950 iwl_trans_sync_nmi(fwrt->trans);
1949
1950 /* trigger nmi to halt the fw */
1951 iwl_force_nmi(fwrt->trans);
1952 }
1953 1951
1954 return ret; 1952 return ret;
1955} 1953}
@@ -2489,22 +2487,6 @@ IWL_EXPORT_SYMBOL(iwl_fw_dbg_apply_point);
2489 2487
2490void iwl_fwrt_stop_device(struct iwl_fw_runtime *fwrt) 2488void iwl_fwrt_stop_device(struct iwl_fw_runtime *fwrt)
2491{ 2489{
2492 /* if the wait event timeout elapses instead of wake up then
2493 * the driver did not receive NMI interrupt and can not assume the FW
2494 * is halted
2495 */
2496 int ret = wait_event_timeout(fwrt->trans->fw_halt_waitq,
2497 !test_bit(STATUS_FW_WAIT_DUMP,
2498 &fwrt->trans->status),
2499 msecs_to_jiffies(2000));
2500 if (!ret) {
2501 /* failed to receive NMI interrupt, assuming the FW is stuck */
2502 set_bit(STATUS_FW_ERROR, &fwrt->trans->status);
2503
2504 clear_bit(STATUS_FW_WAIT_DUMP, &fwrt->trans->status);
2505 }
2506
2507 /* Assuming the op mode mutex is held at this point */
2508 iwl_fw_dbg_collect_sync(fwrt); 2490 iwl_fw_dbg_collect_sync(fwrt);
2509 2491
2510 iwl_trans_stop_device(fwrt->trans); 2492 iwl_trans_stop_device(fwrt->trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c
index 7adf4e4e841a..12310e3d2fc5 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/init.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c
@@ -76,7 +76,6 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
76 fwrt->ops_ctx = ops_ctx; 76 fwrt->ops_ctx = ops_ctx;
77 INIT_DELAYED_WORK(&fwrt->dump.wk, iwl_fw_error_dump_wk); 77 INIT_DELAYED_WORK(&fwrt->dump.wk, iwl_fw_error_dump_wk);
78 iwl_fwrt_dbgfs_register(fwrt, dbgfs_dir); 78 iwl_fwrt_dbgfs_register(fwrt, dbgfs_dir);
79 init_waitqueue_head(&fwrt->trans->fw_halt_waitq);
80} 79}
81IWL_EXPORT_SYMBOL(iwl_fw_runtime_init); 80IWL_EXPORT_SYMBOL(iwl_fw_runtime_init);
82 81
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index f5f87773667b..93070848280a 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -549,8 +549,9 @@ extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
549extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb; 549extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
550extern const struct iwl_cfg iwl22000_2ac_cfg_jf; 550extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
551extern const struct iwl_cfg iwl_ax101_cfg_qu_hr; 551extern const struct iwl_cfg iwl_ax101_cfg_qu_hr;
552extern const struct iwl_cfg iwl_ax101_cfg_quz_hr;
552extern const struct iwl_cfg iwl22000_2ax_cfg_hr; 553extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
553extern const struct iwl_cfg iwl22260_2ax_cfg; 554extern const struct iwl_cfg iwl_ax200_cfg_cc;
554extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0; 555extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0;
555extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0; 556extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0;
556extern const struct iwl_cfg killer1650x_2ax_cfg; 557extern const struct iwl_cfg killer1650x_2ax_cfg;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index aea6d03e545a..e539bc94eff7 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -327,6 +327,7 @@ enum {
327#define CSR_HW_REV_TYPE_NONE (0x00001F0) 327#define CSR_HW_REV_TYPE_NONE (0x00001F0)
328#define CSR_HW_REV_TYPE_QNJ (0x0000360) 328#define CSR_HW_REV_TYPE_QNJ (0x0000360)
329#define CSR_HW_REV_TYPE_QNJ_B0 (0x0000364) 329#define CSR_HW_REV_TYPE_QNJ_B0 (0x0000364)
330#define CSR_HW_REV_TYPE_QUZ (0x0000354)
330#define CSR_HW_REV_TYPE_HR_CDB (0x0000340) 331#define CSR_HW_REV_TYPE_HR_CDB (0x0000340)
331#define CSR_HW_REV_TYPE_SO (0x0000370) 332#define CSR_HW_REV_TYPE_SO (0x0000370)
332#define CSR_HW_REV_TYPE_TY (0x0000420) 333#define CSR_HW_REV_TYPE_TY (0x0000420)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index bbebbf3efd57..d8690acee40c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -338,7 +338,6 @@ enum iwl_d3_status {
338 * are sent 338 * are sent
339 * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent 339 * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
340 * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation 340 * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
341 * @STATUS_FW_WAIT_DUMP: if set, wait until cleared before collecting dump
342 */ 341 */
343enum iwl_trans_status { 342enum iwl_trans_status {
344 STATUS_SYNC_HCMD_ACTIVE, 343 STATUS_SYNC_HCMD_ACTIVE,
@@ -351,7 +350,6 @@ enum iwl_trans_status {
351 STATUS_TRANS_GOING_IDLE, 350 STATUS_TRANS_GOING_IDLE,
352 STATUS_TRANS_IDLE, 351 STATUS_TRANS_IDLE,
353 STATUS_TRANS_DEAD, 352 STATUS_TRANS_DEAD,
354 STATUS_FW_WAIT_DUMP,
355}; 353};
356 354
357static inline int 355static inline int
@@ -618,6 +616,7 @@ struct iwl_trans_ops {
618 struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans, 616 struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
619 u32 dump_mask); 617 u32 dump_mask);
620 void (*debugfs_cleanup)(struct iwl_trans *trans); 618 void (*debugfs_cleanup)(struct iwl_trans *trans);
619 void (*sync_nmi)(struct iwl_trans *trans);
621}; 620};
622 621
623/** 622/**
@@ -831,7 +830,6 @@ struct iwl_trans {
831 u32 lmac_error_event_table[2]; 830 u32 lmac_error_event_table[2];
832 u32 umac_error_event_table; 831 u32 umac_error_event_table;
833 unsigned int error_event_table_tlv_status; 832 unsigned int error_event_table_tlv_status;
834 wait_queue_head_t fw_halt_waitq;
835 833
836 /* pointer to trans specific struct */ 834 /* pointer to trans specific struct */
837 /*Ensure that this pointer will always be aligned to sizeof pointer */ 835 /*Ensure that this pointer will always be aligned to sizeof pointer */
@@ -1239,10 +1237,12 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans)
1239 /* prevent double restarts due to the same erroneous FW */ 1237 /* prevent double restarts due to the same erroneous FW */
1240 if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) 1238 if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status))
1241 iwl_op_mode_nic_error(trans->op_mode); 1239 iwl_op_mode_nic_error(trans->op_mode);
1240}
1242 1241
1243 if (test_and_clear_bit(STATUS_FW_WAIT_DUMP, &trans->status)) 1242static inline void iwl_trans_sync_nmi(struct iwl_trans *trans)
1244 wake_up(&trans->fw_halt_waitq); 1243{
1245 1244 if (trans->ops->sync_nmi)
1245 trans->ops->sync_nmi(trans);
1246} 1246}
1247 1247
1248/***************************************************** 1248/*****************************************************
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 3a92c09d4692..6a3b11dd2edf 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -2714,9 +2714,6 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
2714 2714
2715 iwl_mvm_mac_ctxt_remove(mvm, vif); 2715 iwl_mvm_mac_ctxt_remove(mvm, vif);
2716 2716
2717 kfree(mvmvif->ap_wep_key);
2718 mvmvif->ap_wep_key = NULL;
2719
2720 mutex_unlock(&mvm->mutex); 2717 mutex_unlock(&mvm->mutex);
2721} 2718}
2722 2719
@@ -3183,24 +3180,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
3183 ret = iwl_mvm_update_sta(mvm, vif, sta); 3180 ret = iwl_mvm_update_sta(mvm, vif, sta);
3184 } else if (old_state == IEEE80211_STA_ASSOC && 3181 } else if (old_state == IEEE80211_STA_ASSOC &&
3185 new_state == IEEE80211_STA_AUTHORIZED) { 3182 new_state == IEEE80211_STA_AUTHORIZED) {
3186 /* if wep is used, need to set the key for the station now */ 3183 ret = 0;
3187 if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key) {
3188 mvm_sta->wep_key =
3189 kmemdup(mvmvif->ap_wep_key,
3190 sizeof(*mvmvif->ap_wep_key) +
3191 mvmvif->ap_wep_key->keylen,
3192 GFP_KERNEL);
3193 if (!mvm_sta->wep_key) {
3194 ret = -ENOMEM;
3195 goto out_unlock;
3196 }
3197
3198 ret = iwl_mvm_set_sta_key(mvm, vif, sta,
3199 mvm_sta->wep_key,
3200 STA_KEY_IDX_INVALID);
3201 } else {
3202 ret = 0;
3203 }
3204 3184
3205 /* we don't support TDLS during DCM */ 3185 /* we don't support TDLS during DCM */
3206 if (iwl_mvm_phy_ctx_count(mvm) > 1) 3186 if (iwl_mvm_phy_ctx_count(mvm) > 1)
@@ -3242,17 +3222,6 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
3242 NL80211_TDLS_DISABLE_LINK); 3222 NL80211_TDLS_DISABLE_LINK);
3243 } 3223 }
3244 3224
3245 /* Remove STA key if this is an AP using WEP */
3246 if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key) {
3247 int rm_ret = iwl_mvm_remove_sta_key(mvm, vif, sta,
3248 mvm_sta->wep_key);
3249
3250 if (!ret)
3251 ret = rm_ret;
3252 kfree(mvm_sta->wep_key);
3253 mvm_sta->wep_key = NULL;
3254 }
3255
3256 if (unlikely(ret && 3225 if (unlikely(ret &&
3257 test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, 3226 test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
3258 &mvm->status))) 3227 &mvm->status)))
@@ -3289,6 +3258,13 @@ static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw,
3289 struct ieee80211_sta *sta, u32 changed) 3258 struct ieee80211_sta *sta, u32 changed)
3290{ 3259{
3291 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3260 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
3261 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3262
3263 if (changed & (IEEE80211_RC_BW_CHANGED |
3264 IEEE80211_RC_SUPP_RATES_CHANGED |
3265 IEEE80211_RC_NSS_CHANGED))
3266 iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
3267 true);
3292 3268
3293 if (vif->type == NL80211_IFTYPE_STATION && 3269 if (vif->type == NL80211_IFTYPE_STATION &&
3294 changed & IEEE80211_RC_NSS_CHANGED) 3270 changed & IEEE80211_RC_NSS_CHANGED)
@@ -3439,20 +3415,12 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
3439 break; 3415 break;
3440 case WLAN_CIPHER_SUITE_WEP40: 3416 case WLAN_CIPHER_SUITE_WEP40:
3441 case WLAN_CIPHER_SUITE_WEP104: 3417 case WLAN_CIPHER_SUITE_WEP104:
3442 if (vif->type == NL80211_IFTYPE_AP) { 3418 if (vif->type == NL80211_IFTYPE_STATION)
3443 struct iwl_mvm_vif *mvmvif = 3419 break;
3444 iwl_mvm_vif_from_mac80211(vif); 3420 if (iwl_mvm_has_new_tx_api(mvm))
3445 3421 return -EOPNOTSUPP;
3446 mvmvif->ap_wep_key = kmemdup(key, 3422 /* support HW crypto on TX */
3447 sizeof(*key) + key->keylen, 3423 return 0;
3448 GFP_KERNEL);
3449 if (!mvmvif->ap_wep_key)
3450 return -ENOMEM;
3451 }
3452
3453 if (vif->type != NL80211_IFTYPE_STATION)
3454 return 0;
3455 break;
3456 default: 3424 default:
3457 /* currently FW supports only one optional cipher scheme */ 3425 /* currently FW supports only one optional cipher scheme */
3458 if (hw->n_cipher_schemes && 3426 if (hw->n_cipher_schemes &&
@@ -3540,12 +3508,17 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
3540 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset); 3508 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset);
3541 if (ret) { 3509 if (ret) {
3542 IWL_WARN(mvm, "set key failed\n"); 3510 IWL_WARN(mvm, "set key failed\n");
3511 key->hw_key_idx = STA_KEY_IDX_INVALID;
3543 /* 3512 /*
3544 * can't add key for RX, but we don't need it 3513 * can't add key for RX, but we don't need it
3545 * in the device for TX so still return 0 3514 * in the device for TX so still return 0,
3515 * unless we have new TX API where we cannot
3516 * put key material into the TX_CMD
3546 */ 3517 */
3547 key->hw_key_idx = STA_KEY_IDX_INVALID; 3518 if (iwl_mvm_has_new_tx_api(mvm))
3548 ret = 0; 3519 ret = -EOPNOTSUPP;
3520 else
3521 ret = 0;
3549 } 3522 }
3550 3523
3551 break; 3524 break;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index bca6f6b536d9..a50dc53df086 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -498,7 +498,6 @@ struct iwl_mvm_vif {
498 netdev_features_t features; 498 netdev_features_t features;
499 499
500 struct iwl_probe_resp_data __rcu *probe_resp_data; 500 struct iwl_probe_resp_data __rcu *probe_resp_data;
501 struct ieee80211_key_conf *ap_wep_key;
502}; 501};
503 502
504static inline struct iwl_mvm_vif * 503static inline struct iwl_mvm_vif *
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 498c315291cf..98d123dd7177 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -8,7 +8,7 @@
8 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. 8 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 Intel Corporation 11 * Copyright(c) 2018 - 2019 Intel Corporation
12 * 12 *
13 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of version 2 of the GNU General Public License as 14 * it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
31 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. 31 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34 * Copyright(c) 2018 Intel Corporation 34 * Copyright(c) 2018 - 2019 Intel Corporation
35 * All rights reserved. 35 * All rights reserved.
36 * 36 *
37 * Redistribution and use in source and binary forms, with or without 37 * Redistribution and use in source and binary forms, with or without
@@ -1399,7 +1399,9 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1399 1399
1400 iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid); 1400 iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid);
1401 list_del_init(&mvmtxq->list); 1401 list_del_init(&mvmtxq->list);
1402 local_bh_disable();
1402 iwl_mvm_mac_itxq_xmit(mvm->hw, txq); 1403 iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
1404 local_bh_enable();
1403 } 1405 }
1404 1406
1405 mutex_unlock(&mvm->mutex); 1407 mutex_unlock(&mvm->mutex);
@@ -2333,21 +2335,6 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2333 iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg, 2335 iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
2334 timeout); 2336 timeout);
2335 2337
2336 if (mvmvif->ap_wep_key) {
2337 u8 key_offset = iwl_mvm_set_fw_key_idx(mvm);
2338
2339 __set_bit(key_offset, mvm->fw_key_table);
2340
2341 if (key_offset == STA_KEY_IDX_INVALID)
2342 return -ENOSPC;
2343
2344 ret = iwl_mvm_send_sta_key(mvm, mvmvif->mcast_sta.sta_id,
2345 mvmvif->ap_wep_key, true, 0, NULL, 0,
2346 key_offset, 0);
2347 if (ret)
2348 return ret;
2349 }
2350
2351 return 0; 2338 return 0;
2352} 2339}
2353 2340
@@ -2419,28 +2406,6 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2419 2406
2420 iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0); 2407 iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0);
2421 2408
2422 if (mvmvif->ap_wep_key) {
2423 int i;
2424
2425 if (!__test_and_clear_bit(mvmvif->ap_wep_key->hw_key_idx,
2426 mvm->fw_key_table)) {
2427 IWL_ERR(mvm, "offset %d not used in fw key table.\n",
2428 mvmvif->ap_wep_key->hw_key_idx);
2429 return -ENOENT;
2430 }
2431
2432 /* track which key was deleted last */
2433 for (i = 0; i < STA_KEY_MAX_NUM; i++) {
2434 if (mvm->fw_key_deleted[i] < U8_MAX)
2435 mvm->fw_key_deleted[i]++;
2436 }
2437 mvm->fw_key_deleted[mvmvif->ap_wep_key->hw_key_idx] = 0;
2438 ret = __iwl_mvm_remove_sta_key(mvm, mvmvif->mcast_sta.sta_id,
2439 mvmvif->ap_wep_key, true);
2440 if (ret)
2441 return ret;
2442 }
2443
2444 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id); 2409 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2445 if (ret) 2410 if (ret)
2446 IWL_WARN(mvm, "Failed sending remove station\n"); 2411 IWL_WARN(mvm, "Failed sending remove station\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 79700c7310a1..b4d4071b865d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -8,7 +8,7 @@
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
10 * Copyright(c) 2015 - 2016 Intel Deutschland GmbH 10 * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
11 * Copyright(c) 2018 Intel Corporation 11 * Copyright(c) 2018 - 2019 Intel Corporation
12 * 12 *
13 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of version 2 of the GNU General Public License as 14 * it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
31 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 31 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
32 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 32 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
33 * Copyright(c) 2015 - 2016 Intel Deutschland GmbH 33 * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
34 * Copyright(c) 2018 Intel Corporation 34 * Copyright(c) 2018 - 2019 Intel Corporation
35 * All rights reserved. 35 * All rights reserved.
36 * 36 *
37 * Redistribution and use in source and binary forms, with or without 37 * Redistribution and use in source and binary forms, with or without
@@ -394,7 +394,6 @@ struct iwl_mvm_rxq_dup_data {
394 * the BA window. To be used for UAPSD only. 394 * the BA window. To be used for UAPSD only.
395 * @ptk_pn: per-queue PTK PN data structures 395 * @ptk_pn: per-queue PTK PN data structures
396 * @dup_data: per queue duplicate packet detection data 396 * @dup_data: per queue duplicate packet detection data
397 * @wep_key: used in AP mode. Is a duplicate of the WEP key.
398 * @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID 397 * @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID
399 * @tx_ant: the index of the antenna to use for data tx to this station. Only 398 * @tx_ant: the index of the antenna to use for data tx to this station. Only
400 * used during connection establishment (e.g. for the 4 way handshake 399 * used during connection establishment (e.g. for the 4 way handshake
@@ -426,8 +425,6 @@ struct iwl_mvm_sta {
426 struct iwl_mvm_key_pn __rcu *ptk_pn[4]; 425 struct iwl_mvm_key_pn __rcu *ptk_pn[4];
427 struct iwl_mvm_rxq_dup_data *dup_data; 426 struct iwl_mvm_rxq_dup_data *dup_data;
428 427
429 struct ieee80211_key_conf *wep_key;
430
431 u8 reserved_queue; 428 u8 reserved_queue;
432 429
433 /* Temporary, until the new TLC will control the Tx protection */ 430 /* Temporary, until the new TLC will control the Tx protection */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 2b94e4cef56c..9f1af8da9dc1 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -953,14 +953,15 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
953 {IWL_PCI_DEVICE(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)}, 953 {IWL_PCI_DEVICE(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
954 {IWL_PCI_DEVICE(0xA0F0, 0x4070, iwl_ax101_cfg_qu_hr)}, 954 {IWL_PCI_DEVICE(0xA0F0, 0x4070, iwl_ax101_cfg_qu_hr)},
955 955
956 {IWL_PCI_DEVICE(0x2723, 0x0080, iwl22260_2ax_cfg)}, 956 {IWL_PCI_DEVICE(0x2723, 0x0080, iwl_ax200_cfg_cc)},
957 {IWL_PCI_DEVICE(0x2723, 0x0084, iwl22260_2ax_cfg)}, 957 {IWL_PCI_DEVICE(0x2723, 0x0084, iwl_ax200_cfg_cc)},
958 {IWL_PCI_DEVICE(0x2723, 0x0088, iwl22260_2ax_cfg)}, 958 {IWL_PCI_DEVICE(0x2723, 0x0088, iwl_ax200_cfg_cc)},
959 {IWL_PCI_DEVICE(0x2723, 0x008C, iwl22260_2ax_cfg)}, 959 {IWL_PCI_DEVICE(0x2723, 0x008C, iwl_ax200_cfg_cc)},
960 {IWL_PCI_DEVICE(0x2723, 0x1653, killer1650w_2ax_cfg)}, 960 {IWL_PCI_DEVICE(0x2723, 0x1653, killer1650w_2ax_cfg)},
961 {IWL_PCI_DEVICE(0x2723, 0x1654, killer1650x_2ax_cfg)}, 961 {IWL_PCI_DEVICE(0x2723, 0x1654, killer1650x_2ax_cfg)},
962 {IWL_PCI_DEVICE(0x2723, 0x4080, iwl22260_2ax_cfg)}, 962 {IWL_PCI_DEVICE(0x2723, 0x2080, iwl_ax200_cfg_cc)},
963 {IWL_PCI_DEVICE(0x2723, 0x4088, iwl22260_2ax_cfg)}, 963 {IWL_PCI_DEVICE(0x2723, 0x4080, iwl_ax200_cfg_cc)},
964 {IWL_PCI_DEVICE(0x2723, 0x4088, iwl_ax200_cfg_cc)},
964 965
965 {IWL_PCI_DEVICE(0x1a56, 0x1653, killer1650w_2ax_cfg)}, 966 {IWL_PCI_DEVICE(0x1a56, 0x1653, killer1650w_2ax_cfg)},
966 {IWL_PCI_DEVICE(0x1a56, 0x1654, killer1650x_2ax_cfg)}, 967 {IWL_PCI_DEVICE(0x1a56, 0x1654, killer1650x_2ax_cfg)},
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index bf8b61a476c5..59213164f35e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -1043,7 +1043,7 @@ static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
1043 1043
1044void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state); 1044void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
1045void iwl_trans_pcie_dump_regs(struct iwl_trans *trans); 1045void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
1046void iwl_trans_sync_nmi(struct iwl_trans *trans); 1046void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans);
1047 1047
1048#ifdef CONFIG_IWLWIFI_DEBUGFS 1048#ifdef CONFIG_IWLWIFI_DEBUGFS
1049int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans); 1049int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index fe8269d023de..79c1dc05f948 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -3318,7 +3318,8 @@ static void iwl_trans_pcie_resume(struct iwl_trans *trans)
3318 .unref = iwl_trans_pcie_unref, \ 3318 .unref = iwl_trans_pcie_unref, \
3319 .dump_data = iwl_trans_pcie_dump_data, \ 3319 .dump_data = iwl_trans_pcie_dump_data, \
3320 .d3_suspend = iwl_trans_pcie_d3_suspend, \ 3320 .d3_suspend = iwl_trans_pcie_d3_suspend, \
3321 .d3_resume = iwl_trans_pcie_d3_resume 3321 .d3_resume = iwl_trans_pcie_d3_resume, \
3322 .sync_nmi = iwl_trans_pcie_sync_nmi
3322 3323
3323#ifdef CONFIG_PM_SLEEP 3324#ifdef CONFIG_PM_SLEEP
3324#define IWL_TRANS_PM_OPS \ 3325#define IWL_TRANS_PM_OPS \
@@ -3542,6 +3543,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
3542 } 3543 }
3543 } else if (cfg == &iwl_ax101_cfg_qu_hr) { 3544 } else if (cfg == &iwl_ax101_cfg_qu_hr) {
3544 if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == 3545 if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
3546 CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
3547 trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) {
3548 trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
3549 } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
3545 CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) { 3550 CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
3546 trans->cfg = &iwl_ax101_cfg_qu_hr; 3551 trans->cfg = &iwl_ax101_cfg_qu_hr;
3547 } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == 3552 } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
@@ -3560,7 +3565,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
3560 } 3565 }
3561 } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == 3566 } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
3562 CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) && 3567 CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
3563 (trans->cfg != &iwl22260_2ax_cfg || 3568 (trans->cfg != &iwl_ax200_cfg_cc ||
3564 trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) { 3569 trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) {
3565 u32 hw_status; 3570 u32 hw_status;
3566 3571
@@ -3637,7 +3642,7 @@ out_no_pci:
3637 return ERR_PTR(ret); 3642 return ERR_PTR(ret);
3638} 3643}
3639 3644
3640void iwl_trans_sync_nmi(struct iwl_trans *trans) 3645void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
3641{ 3646{
3642 unsigned long timeout = jiffies + IWL_TRANS_NMI_TIMEOUT; 3647 unsigned long timeout = jiffies + IWL_TRANS_NMI_TIMEOUT;
3643 3648
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 88530d9f4a54..38d110338987 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -965,7 +965,7 @@ static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans,
965 cmd_str); 965 cmd_str);
966 ret = -ETIMEDOUT; 966 ret = -ETIMEDOUT;
967 967
968 iwl_trans_sync_nmi(trans); 968 iwl_trans_pcie_sync_nmi(trans);
969 goto cancel; 969 goto cancel;
970 } 970 }
971 971
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 9fbd37d23e85..7be73e2c4681 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -1960,7 +1960,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
1960 iwl_get_cmd_string(trans, cmd->id)); 1960 iwl_get_cmd_string(trans, cmd->id));
1961 ret = -ETIMEDOUT; 1961 ret = -ETIMEDOUT;
1962 1962
1963 iwl_trans_sync_nmi(trans); 1963 iwl_trans_pcie_sync_nmi(trans);
1964 goto cancel; 1964 goto cancel;
1965 } 1965 }
1966 1966
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 4cc7b222859c..7437faae7cf2 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2644,7 +2644,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
2644 enum nl80211_band band; 2644 enum nl80211_band band;
2645 const struct ieee80211_ops *ops = &mac80211_hwsim_ops; 2645 const struct ieee80211_ops *ops = &mac80211_hwsim_ops;
2646 struct net *net; 2646 struct net *net;
2647 int idx; 2647 int idx, i;
2648 int n_limits = 0; 2648 int n_limits = 0;
2649 2649
2650 if (WARN_ON(param->channels > 1 && !param->use_chanctx)) 2650 if (WARN_ON(param->channels > 1 && !param->use_chanctx))
@@ -2768,12 +2768,23 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
2768 goto failed_hw; 2768 goto failed_hw;
2769 } 2769 }
2770 2770
2771 data->if_combination.max_interfaces = 0;
2772 for (i = 0; i < n_limits; i++)
2773 data->if_combination.max_interfaces +=
2774 data->if_limits[i].max;
2775
2771 data->if_combination.n_limits = n_limits; 2776 data->if_combination.n_limits = n_limits;
2772 data->if_combination.max_interfaces = 2048;
2773 data->if_combination.limits = data->if_limits; 2777 data->if_combination.limits = data->if_limits;
2774 2778
2775 hw->wiphy->iface_combinations = &data->if_combination; 2779 /*
2776 hw->wiphy->n_iface_combinations = 1; 2780 * If we actually were asked to support combinations,
2781 * advertise them - if there's only a single thing like
2782 * only IBSS then don't advertise it as combinations.
2783 */
2784 if (data->if_combination.max_interfaces > 1) {
2785 hw->wiphy->iface_combinations = &data->if_combination;
2786 hw->wiphy->n_iface_combinations = 1;
2787 }
2777 2788
2778 if (param->ciphers) { 2789 if (param->ciphers) {
2779 memcpy(data->ciphers, param->ciphers, 2790 memcpy(data->ciphers, param->ciphers,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
index d54dda67d036..3af45949e868 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
@@ -510,6 +510,8 @@ int mt7603_register_device(struct mt7603_dev *dev)
510 bus_ops->rmw = mt7603_rmw; 510 bus_ops->rmw = mt7603_rmw;
511 dev->mt76.bus = bus_ops; 511 dev->mt76.bus = bus_ops;
512 512
513 spin_lock_init(&dev->ps_lock);
514
513 INIT_DELAYED_WORK(&dev->mac_work, mt7603_mac_work); 515 INIT_DELAYED_WORK(&dev->mac_work, mt7603_mac_work);
514 tasklet_init(&dev->pre_tbtt_tasklet, mt7603_pre_tbtt_tasklet, 516 tasklet_init(&dev->pre_tbtt_tasklet, mt7603_pre_tbtt_tasklet,
515 (unsigned long)dev); 517 (unsigned long)dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
index 5e31d7da96fc..5abc02b57818 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
@@ -343,7 +343,7 @@ void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid)
343 MT_BA_CONTROL_1_RESET)); 343 MT_BA_CONTROL_1_RESET));
344} 344}
345 345
346void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn, 346void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid,
347 int ba_size) 347 int ba_size)
348{ 348{
349 u32 addr = mt7603_wtbl2_addr(wcid); 349 u32 addr = mt7603_wtbl2_addr(wcid);
@@ -358,43 +358,6 @@ void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn,
358 mt76_clear(dev, addr + (15 * 4), tid_mask); 358 mt76_clear(dev, addr + (15 * 4), tid_mask);
359 return; 359 return;
360 } 360 }
361 mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
362
363 mt7603_mac_stop(dev);
364 switch (tid) {
365 case 0:
366 mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID0_SN, ssn);
367 break;
368 case 1:
369 mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID1_SN, ssn);
370 break;
371 case 2:
372 mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID2_SN_LO,
373 ssn);
374 mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID2_SN_HI,
375 ssn >> 8);
376 break;
377 case 3:
378 mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID3_SN, ssn);
379 break;
380 case 4:
381 mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID4_SN, ssn);
382 break;
383 case 5:
384 mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID5_SN_LO,
385 ssn);
386 mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID5_SN_HI,
387 ssn >> 4);
388 break;
389 case 6:
390 mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID6_SN, ssn);
391 break;
392 case 7:
393 mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID7_SN, ssn);
394 break;
395 }
396 mt7603_wtbl_update(dev, wcid, MT_WTBL_UPDATE_WTBL2);
397 mt7603_mac_start(dev);
398 361
399 for (i = 7; i > 0; i--) { 362 for (i = 7; i > 0; i--) {
400 if (ba_size >= MT_AGG_SIZE_LIMIT(i)) 363 if (ba_size >= MT_AGG_SIZE_LIMIT(i))
@@ -827,6 +790,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
827 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 790 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
828 struct ieee80211_tx_rate *rate = &info->control.rates[0]; 791 struct ieee80211_tx_rate *rate = &info->control.rates[0];
829 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 792 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
793 struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
830 struct ieee80211_vif *vif = info->control.vif; 794 struct ieee80211_vif *vif = info->control.vif;
831 struct mt7603_vif *mvif; 795 struct mt7603_vif *mvif;
832 int wlan_idx; 796 int wlan_idx;
@@ -834,6 +798,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
834 int tx_count = 8; 798 int tx_count = 8;
835 u8 frame_type, frame_subtype; 799 u8 frame_type, frame_subtype;
836 u16 fc = le16_to_cpu(hdr->frame_control); 800 u16 fc = le16_to_cpu(hdr->frame_control);
801 u16 seqno = 0;
837 u8 vif_idx = 0; 802 u8 vif_idx = 0;
838 u32 val; 803 u32 val;
839 u8 bw; 804 u8 bw;
@@ -919,7 +884,17 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
919 tx_count = 0x1f; 884 tx_count = 0x1f;
920 885
921 val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count) | 886 val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count) |
922 FIELD_PREP(MT_TXD3_SEQ, le16_to_cpu(hdr->seq_ctrl)); 887 MT_TXD3_SN_VALID;
888
889 if (ieee80211_is_data_qos(hdr->frame_control))
890 seqno = le16_to_cpu(hdr->seq_ctrl);
891 else if (ieee80211_is_back_req(hdr->frame_control))
892 seqno = le16_to_cpu(bar->start_seq_num);
893 else
894 val &= ~MT_TXD3_SN_VALID;
895
896 val |= FIELD_PREP(MT_TXD3_SEQ, seqno >> 4);
897
923 txwi[3] = cpu_to_le32(val); 898 txwi[3] = cpu_to_le32(val);
924 899
925 if (key) { 900 if (key) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index cc0fe0933b2d..a3c4ef198bfe 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -372,7 +372,7 @@ mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
372 struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; 372 struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
373 struct sk_buff_head list; 373 struct sk_buff_head list;
374 374
375 mt76_stop_tx_queues(&dev->mt76, sta, false); 375 mt76_stop_tx_queues(&dev->mt76, sta, true);
376 mt7603_wtbl_set_ps(dev, msta, ps); 376 mt7603_wtbl_set_ps(dev, msta, ps);
377 if (ps) 377 if (ps)
378 return; 378 return;
@@ -584,13 +584,13 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
584 case IEEE80211_AMPDU_TX_OPERATIONAL: 584 case IEEE80211_AMPDU_TX_OPERATIONAL:
585 mtxq->aggr = true; 585 mtxq->aggr = true;
586 mtxq->send_bar = false; 586 mtxq->send_bar = false;
587 mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, ba_size); 587 mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, ba_size);
588 break; 588 break;
589 case IEEE80211_AMPDU_TX_STOP_FLUSH: 589 case IEEE80211_AMPDU_TX_STOP_FLUSH:
590 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: 590 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
591 mtxq->aggr = false; 591 mtxq->aggr = false;
592 ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn); 592 ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
593 mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, -1); 593 mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, -1);
594 break; 594 break;
595 case IEEE80211_AMPDU_TX_START: 595 case IEEE80211_AMPDU_TX_START:
596 mtxq->agg_ssn = *ssn << 4; 596 mtxq->agg_ssn = *ssn << 4;
@@ -598,7 +598,7 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
598 break; 598 break;
599 case IEEE80211_AMPDU_TX_STOP_CONT: 599 case IEEE80211_AMPDU_TX_STOP_CONT:
600 mtxq->aggr = false; 600 mtxq->aggr = false;
601 mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, -1); 601 mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, -1);
602 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 602 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
603 break; 603 break;
604 } 604 }
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
index 79f332429432..6049f3b7c8fe 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
@@ -200,7 +200,7 @@ void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval);
200int mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb); 200int mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb);
201void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data); 201void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data);
202void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid); 202void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid);
203void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn, 203void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid,
204 int ba_size); 204 int ba_size);
205 205
206void mt7603_pse_client_reset(struct mt7603_dev *dev); 206void mt7603_pse_client_reset(struct mt7603_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
index 9ed231abe916..4fe5a83ca5a4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
@@ -466,7 +466,6 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
466 return; 466 return;
467 467
468 rcu_read_lock(); 468 rcu_read_lock();
469 mt76_tx_status_lock(mdev, &list);
470 469
471 if (stat->wcid < ARRAY_SIZE(dev->mt76.wcid)) 470 if (stat->wcid < ARRAY_SIZE(dev->mt76.wcid))
472 wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]); 471 wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]);
@@ -479,6 +478,8 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
479 drv_priv); 478 drv_priv);
480 } 479 }
481 480
481 mt76_tx_status_lock(mdev, &list);
482
482 if (wcid) { 483 if (wcid) {
483 if (stat->pktid >= MT_PACKET_ID_FIRST) 484 if (stat->pktid >= MT_PACKET_ID_FIRST)
484 status.skb = mt76_tx_status_skb_get(mdev, wcid, 485 status.skb = mt76_tx_status_skb_get(mdev, wcid,
@@ -498,7 +499,9 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
498 if (*update == 0 && stat_val == stat_cache && 499 if (*update == 0 && stat_val == stat_cache &&
499 stat->wcid == msta->status.wcid && msta->n_frames < 32) { 500 stat->wcid == msta->status.wcid && msta->n_frames < 32) {
500 msta->n_frames++; 501 msta->n_frames++;
501 goto out; 502 mt76_tx_status_unlock(mdev, &list);
503 rcu_read_unlock();
504 return;
502 } 505 }
503 506
504 mt76x02_mac_fill_tx_status(dev, status.info, &msta->status, 507 mt76x02_mac_fill_tx_status(dev, status.info, &msta->status,
@@ -514,11 +517,10 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
514 517
515 if (status.skb) 518 if (status.skb)
516 mt76_tx_status_skb_done(mdev, status.skb, &list); 519 mt76_tx_status_skb_done(mdev, status.skb, &list);
517 else
518 ieee80211_tx_status_ext(mt76_hw(dev), &status);
519
520out:
521 mt76_tx_status_unlock(mdev, &list); 520 mt76_tx_status_unlock(mdev, &list);
521
522 if (!status.skb)
523 ieee80211_tx_status_ext(mt76_hw(dev), &status);
522 rcu_read_unlock(); 524 rcu_read_unlock();
523} 525}
524 526
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index 4b1744e9fb78..50b92ca92bd7 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -673,7 +673,6 @@ enum rt2x00_state_flags {
673 CONFIG_CHANNEL_HT40, 673 CONFIG_CHANNEL_HT40,
674 CONFIG_POWERSAVING, 674 CONFIG_POWERSAVING,
675 CONFIG_HT_DISABLED, 675 CONFIG_HT_DISABLED,
676 CONFIG_QOS_DISABLED,
677 CONFIG_MONITORING, 676 CONFIG_MONITORING,
678 677
679 /* 678 /*
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
index 2825560e2424..e8462f25d252 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
@@ -642,19 +642,9 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
642 rt2x00dev->intf_associated--; 642 rt2x00dev->intf_associated--;
643 643
644 rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated); 644 rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated);
645
646 clear_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
647 } 645 }
648 646
649 /* 647 /*
650 * Check for access point which do not support 802.11e . We have to
651 * generate data frames sequence number in S/W for such AP, because
652 * of H/W bug.
653 */
654 if (changes & BSS_CHANGED_QOS && !bss_conf->qos)
655 set_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
656
657 /*
658 * When the erp information has changed, we should perform 648 * When the erp information has changed, we should perform
659 * additional configuration steps. For all other changes we are done. 649 * additional configuration steps. For all other changes we are done.
660 */ 650 */
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
index 92ddc19e7bf7..4834b4eb0206 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
@@ -201,15 +201,18 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
201 if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) { 201 if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) {
202 /* 202 /*
203 * rt2800 has a H/W (or F/W) bug, device incorrectly increase 203 * rt2800 has a H/W (or F/W) bug, device incorrectly increase
204 * seqno on retransmited data (non-QOS) frames. To workaround 204 * seqno on retransmitted data (non-QOS) and management frames.
205 * the problem let's generate seqno in software if QOS is 205 * To workaround the problem let's generate seqno in software.
206 * disabled. 206 * Except for beacons which are transmitted periodically by H/W
207 * hence hardware has to assign seqno for them.
207 */ 208 */
208 if (test_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags)) 209 if (ieee80211_is_beacon(hdr->frame_control)) {
209 __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); 210 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
210 else
211 /* H/W will generate sequence number */ 211 /* H/W will generate sequence number */
212 return; 212 return;
213 }
214
215 __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
213 } 216 }
214 217
215 /* 218 /*