aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/intel
diff options
context:
space:
mode:
authorLiad Kaufman <liad.kaufman@intel.com>2015-12-23 09:03:46 -0500
committerLuca Coelho <luciano.coelho@intel.com>2016-08-30 07:16:34 -0400
commit9f9af3d7d303a5f622ceb219bd03bba3af553e76 (patch)
tree2c935456fd5d41ed9be2381adeb2be29913ddc87 /drivers/net/wireless/intel
parentd975d72016bb2540eff3018c3c0dd96688711748 (diff)
iwlwifi: mvm: re-aggregate shared queue after unsharing
When a shared queue becomes unshared, aggregations should be re-enabled if they've existed before. Make sure that they do this, if required. Signed-off-by: Liad Kaufman <liad.kaufman@intel.com> Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
Diffstat (limited to 'drivers/net/wireless/intel')
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c166
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c8
5 files changed, 205 insertions, 28 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 28ebc12f1fe0..ee5a9adbf025 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -697,6 +697,10 @@ struct iwl_mvm_baid_data {
697 * it. In this state, when a new queue is needed to be allocated but no 697 * it. In this state, when a new queue is needed to be allocated but no
698 * such free queue exists, an inactive queue might be freed and given to 698 * such free queue exists, an inactive queue might be freed and given to
699 * the new RA/TID. 699 * the new RA/TID.
700 * @IWL_MVM_QUEUE_RECONFIGURING: queue is being reconfigured
701 * This is the state of a queue that has had traffic pass through it, but
702 * needs to be reconfigured for some reason, e.g. the queue needs to
703 * become unshared and aggregations re-enabled on.
700 */ 704 */
701enum iwl_mvm_queue_status { 705enum iwl_mvm_queue_status {
702 IWL_MVM_QUEUE_FREE, 706 IWL_MVM_QUEUE_FREE,
@@ -704,6 +708,7 @@ enum iwl_mvm_queue_status {
704 IWL_MVM_QUEUE_READY, 708 IWL_MVM_QUEUE_READY,
705 IWL_MVM_QUEUE_SHARED, 709 IWL_MVM_QUEUE_SHARED,
706 IWL_MVM_QUEUE_INACTIVE, 710 IWL_MVM_QUEUE_INACTIVE,
711 IWL_MVM_QUEUE_RECONFIGURING,
707}; 712};
708 713
709#define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ) 714#define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ)
@@ -1122,6 +1127,18 @@ static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm)
1122 (mvm->trans->runtime_pm_mode != IWL_PLAT_PM_MODE_D0I3); 1127 (mvm->trans->runtime_pm_mode != IWL_PLAT_PM_MODE_D0I3);
1123} 1128}
1124 1129
1130static inline bool iwl_mvm_is_dqa_data_queue(struct iwl_mvm *mvm, u8 queue)
1131{
1132 return (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE) &&
1133 (queue <= IWL_MVM_DQA_MAX_DATA_QUEUE);
1134}
1135
1136static inline bool iwl_mvm_is_dqa_mgmt_queue(struct iwl_mvm *mvm, u8 queue)
1137{
1138 return (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) &&
1139 (queue <= IWL_MVM_DQA_MAX_MGMT_QUEUE);
1140}
1141
1125static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm) 1142static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
1126{ 1143{
1127 bool nvm_lar = mvm->nvm_data->lar_enabled; 1144 bool nvm_lar = mvm->nvm_data->lar_enabled;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 5960eb4fdf1f..1f235e8d193a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -468,6 +468,11 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
468 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE) 468 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
469 continue; 469 continue;
470 470
471 /* Don't try and take queues being reconfigured */
472 if (mvm->queue_info[queue].status ==
473 IWL_MVM_QUEUE_RECONFIGURING)
474 continue;
475
471 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i; 476 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
472 } 477 }
473 478
@@ -501,27 +506,33 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
501 queue = ac_to_queue[IEEE80211_AC_VO]; 506 queue = ac_to_queue[IEEE80211_AC_VO];
502 507
503 /* Make sure queue found (or not) is legal */ 508 /* Make sure queue found (or not) is legal */
504 if (!((queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE && 509 if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
505 queue <= IWL_MVM_DQA_MAX_MGMT_QUEUE) || 510 !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
506 (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE && 511 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
507 queue <= IWL_MVM_DQA_MAX_DATA_QUEUE) ||
508 (queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE))) {
509 IWL_ERR(mvm, "No DATA queues available to share\n"); 512 IWL_ERR(mvm, "No DATA queues available to share\n");
510 queue = -ENOSPC; 513 return -ENOSPC;
514 }
515
516 /* Make sure the queue isn't in the middle of being reconfigured */
517 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) {
518 IWL_ERR(mvm,
519 "TXQ %d is in the middle of re-config - try again\n",
520 queue);
521 return -EBUSY;
511 } 522 }
512 523
513 return queue; 524 return queue;
514} 525}
515 526
516/* 527/*
517 * If a given queue has a higher AC than the TID stream that is being added to 528 * If a given queue has a higher AC than the TID stream that is being compared
518 * it, the queue needs to be redirected to the lower AC. This function does that 529 * to, the queue needs to be redirected to the lower AC. This function does that
519 * in such a case, otherwise - if no redirection required - it does nothing, 530 * in such a case, otherwise - if no redirection required - it does nothing,
520 * unless the %force param is true. 531 * unless the %force param is true.
521 */ 532 */
522static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, 533int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
523 int ac, int ssn, unsigned int wdg_timeout, 534 int ac, int ssn, unsigned int wdg_timeout,
524 bool force) 535 bool force)
525{ 536{
526 struct iwl_scd_txq_cfg_cmd cmd = { 537 struct iwl_scd_txq_cfg_cmd cmd = {
527 .scd_queue = queue, 538 .scd_queue = queue,
@@ -555,7 +566,7 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
555 shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1); 566 shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1);
556 spin_unlock_bh(&mvm->queue_info_lock); 567 spin_unlock_bh(&mvm->queue_info_lock);
557 568
558 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting shared TXQ #%d to FIFO #%d\n", 569 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
559 queue, iwl_mvm_ac_to_tx_fifo[ac]); 570 queue, iwl_mvm_ac_to_tx_fifo[ac]);
560 571
561 /* Stop MAC queues and wait for this queue to empty */ 572 /* Stop MAC queues and wait for this queue to empty */
@@ -709,7 +720,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
709 if (WARN_ON(queue <= 0)) { 720 if (WARN_ON(queue <= 0)) {
710 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n", 721 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
711 tid, cfg.sta_id); 722 tid, cfg.sta_id);
712 return -ENOSPC; 723 return queue;
713 } 724 }
714 725
715 /* 726 /*
@@ -827,6 +838,84 @@ out_err:
827 return ret; 838 return ret;
828} 839}
829 840
841static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
842{
843 struct ieee80211_sta *sta;
844 struct iwl_mvm_sta *mvmsta;
845 s8 sta_id;
846 int tid = -1;
847 unsigned long tid_bitmap;
848 unsigned int wdg_timeout;
849 int ssn;
850 int ret = true;
851
852 lockdep_assert_held(&mvm->mutex);
853
854 spin_lock_bh(&mvm->queue_info_lock);
855 sta_id = mvm->queue_info[queue].ra_sta_id;
856 tid_bitmap = mvm->queue_info[queue].tid_bitmap;
857 spin_unlock_bh(&mvm->queue_info_lock);
858
859 /* Find TID for queue, and make sure it is the only one on the queue */
860 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
861 if (tid_bitmap != BIT(tid)) {
862 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
863 queue, tid_bitmap);
864 return;
865 }
866
867 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
868 tid);
869
870 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
871 lockdep_is_held(&mvm->mutex));
872
873 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
874 return;
875
876 mvmsta = iwl_mvm_sta_from_mac80211(sta);
877 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
878
879 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
880
881 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
882 tid_to_mac80211_ac[tid], ssn,
883 wdg_timeout, true);
884 if (ret) {
885 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
886 return;
887 }
888
889 /* If aggs should be turned back on - do it */
890 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
891 struct iwl_mvm_add_sta_cmd cmd;
892
893 mvmsta->tid_disable_agg &= ~BIT(tid);
894
895 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
896 cmd.sta_id = mvmsta->sta_id;
897 cmd.add_modify = STA_MODE_MODIFY;
898 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
899 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
900 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
901
902 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
903 iwl_mvm_add_sta_cmd_size(mvm), &cmd);
904 if (!ret) {
905 IWL_DEBUG_TX_QUEUES(mvm,
906 "TXQ #%d is now aggregated again\n",
907 queue);
908
909 /* Mark queue intenally as aggregating again */
910 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
911 }
912 }
913
914 spin_lock_bh(&mvm->queue_info_lock);
915 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
916 spin_unlock_bh(&mvm->queue_info_lock);
917}
918
830static inline u8 iwl_mvm_tid_to_ac_queue(int tid) 919static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
831{ 920{
832 if (tid == IWL_MAX_TID_COUNT) 921 if (tid == IWL_MAX_TID_COUNT)
@@ -894,13 +983,26 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
894 struct ieee80211_sta *sta; 983 struct ieee80211_sta *sta;
895 struct iwl_mvm_sta *mvmsta; 984 struct iwl_mvm_sta *mvmsta;
896 unsigned long deferred_tid_traffic; 985 unsigned long deferred_tid_traffic;
897 int sta_id, tid; 986 int queue, sta_id, tid;
898 987
899 /* Check inactivity of queues */ 988 /* Check inactivity of queues */
900 iwl_mvm_inactivity_check(mvm); 989 iwl_mvm_inactivity_check(mvm);
901 990
902 mutex_lock(&mvm->mutex); 991 mutex_lock(&mvm->mutex);
903 992
993 /* Reconfigure queues requiring reconfiguation */
994 for (queue = 0; queue < IWL_MAX_HW_QUEUES; queue++) {
995 bool reconfig;
996
997 spin_lock_bh(&mvm->queue_info_lock);
998 reconfig = (mvm->queue_info[queue].status ==
999 IWL_MVM_QUEUE_RECONFIGURING);
1000 spin_unlock_bh(&mvm->queue_info_lock);
1001
1002 if (reconfig)
1003 iwl_mvm_unshare_queue(mvm, queue);
1004 }
1005
904 /* Go over all stations with deferred traffic */ 1006 /* Go over all stations with deferred traffic */
905 for_each_set_bit(sta_id, mvm->sta_deferred_frames, 1007 for_each_set_bit(sta_id, mvm->sta_deferred_frames,
906 IWL_MVM_STATION_COUNT) { 1008 IWL_MVM_STATION_COUNT) {
@@ -1956,7 +2058,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1956 return -EIO; 2058 return -EIO;
1957 } 2059 }
1958 2060
1959 spin_lock_bh(&mvm->queue_info_lock); 2061 spin_lock(&mvm->queue_info_lock);
1960 2062
1961 /* 2063 /*
1962 * Note the possible cases: 2064 * Note the possible cases:
@@ -1967,14 +2069,20 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1967 * non-DQA mode, since the TXQ hasn't yet been allocated 2069 * non-DQA mode, since the TXQ hasn't yet been allocated
1968 */ 2070 */
1969 txq_id = mvmsta->tid_data[tid].txq_id; 2071 txq_id = mvmsta->tid_data[tid].txq_id;
1970 if (!iwl_mvm_is_dqa_supported(mvm) || 2072 if (iwl_mvm_is_dqa_supported(mvm) &&
2073 unlikely(mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_SHARED)) {
2074 ret = -ENXIO;
2075 IWL_DEBUG_TX_QUEUES(mvm,
2076 "Can't start tid %d agg on shared queue!\n",
2077 tid);
2078 goto release_locks;
2079 } else if (!iwl_mvm_is_dqa_supported(mvm) ||
1971 mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) { 2080 mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
1972 txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, 2081 txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1973 mvm->first_agg_queue, 2082 mvm->first_agg_queue,
1974 mvm->last_agg_queue); 2083 mvm->last_agg_queue);
1975 if (txq_id < 0) { 2084 if (txq_id < 0) {
1976 ret = txq_id; 2085 ret = txq_id;
1977 spin_unlock_bh(&mvm->queue_info_lock);
1978 IWL_ERR(mvm, "Failed to allocate agg queue\n"); 2086 IWL_ERR(mvm, "Failed to allocate agg queue\n");
1979 goto release_locks; 2087 goto release_locks;
1980 } 2088 }
@@ -1982,7 +2090,8 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1982 /* TXQ hasn't yet been enabled, so mark it only as reserved */ 2090 /* TXQ hasn't yet been enabled, so mark it only as reserved */
1983 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED; 2091 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
1984 } 2092 }
1985 spin_unlock_bh(&mvm->queue_info_lock); 2093
2094 spin_unlock(&mvm->queue_info_lock);
1986 2095
1987 IWL_DEBUG_TX_QUEUES(mvm, 2096 IWL_DEBUG_TX_QUEUES(mvm,
1988 "AGG for tid %d will be on queue #%d\n", 2097 "AGG for tid %d will be on queue #%d\n",
@@ -2006,8 +2115,11 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2006 } 2115 }
2007 2116
2008 ret = 0; 2117 ret = 0;
2118 goto out;
2009 2119
2010release_locks: 2120release_locks:
2121 spin_unlock(&mvm->queue_info_lock);
2122out:
2011 spin_unlock_bh(&mvmsta->lock); 2123 spin_unlock_bh(&mvmsta->lock);
2012 2124
2013 return ret; 2125 return ret;
@@ -2023,6 +2135,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2023 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false); 2135 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
2024 int queue, ret; 2136 int queue, ret;
2025 bool alloc_queue = true; 2137 bool alloc_queue = true;
2138 enum iwl_mvm_queue_status queue_status;
2026 u16 ssn; 2139 u16 ssn;
2027 2140
2028 struct iwl_trans_txq_scd_cfg cfg = { 2141 struct iwl_trans_txq_scd_cfg cfg = {
@@ -2048,13 +2161,15 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2048 2161
2049 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; 2162 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
2050 2163
2164 spin_lock_bh(&mvm->queue_info_lock);
2165 queue_status = mvm->queue_info[queue].status;
2166 spin_unlock_bh(&mvm->queue_info_lock);
2167
2051 /* In DQA mode, the existing queue might need to be reconfigured */ 2168 /* In DQA mode, the existing queue might need to be reconfigured */
2052 if (iwl_mvm_is_dqa_supported(mvm)) { 2169 if (iwl_mvm_is_dqa_supported(mvm)) {
2053 spin_lock_bh(&mvm->queue_info_lock);
2054 /* Maybe there is no need to even alloc a queue... */ 2170 /* Maybe there is no need to even alloc a queue... */
2055 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY) 2171 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
2056 alloc_queue = false; 2172 alloc_queue = false;
2057 spin_unlock_bh(&mvm->queue_info_lock);
2058 2173
2059 /* 2174 /*
2060 * Only reconfig the SCD for the queue if the window size has 2175 * Only reconfig the SCD for the queue if the window size has
@@ -2089,9 +2204,12 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2089 vif->hw_queue[tid_to_mac80211_ac[tid]], ssn, 2204 vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
2090 &cfg, wdg_timeout); 2205 &cfg, wdg_timeout);
2091 2206
2092 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); 2207 /* Send ADD_STA command to enable aggs only if the queue isn't shared */
2093 if (ret) 2208 if (queue_status != IWL_MVM_QUEUE_SHARED) {
2094 return -EIO; 2209 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2210 if (ret)
2211 return -EIO;
2212 }
2095 2213
2096 /* No need to mark as reserved */ 2214 /* No need to mark as reserved */
2097 spin_lock_bh(&mvm->queue_info_lock); 2215 spin_lock_bh(&mvm->queue_info_lock);
@@ -2123,7 +2241,6 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2123 u16 txq_id; 2241 u16 txq_id;
2124 int err; 2242 int err;
2125 2243
2126
2127 /* 2244 /*
2128 * If mac80211 is cleaning its state, then say that we finished since 2245 * If mac80211 is cleaning its state, then say that we finished since
2129 * our state has been cleared anyway. 2246 * our state has been cleared anyway.
@@ -2152,6 +2269,7 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2152 */ 2269 */
2153 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) 2270 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
2154 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; 2271 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
2272
2155 spin_unlock_bh(&mvm->queue_info_lock); 2273 spin_unlock_bh(&mvm->queue_info_lock);
2156 2274
2157 switch (tid_data->state) { 2275 switch (tid_data->state) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index bbc1cab2c3bf..709542bbfce5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -554,4 +554,8 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
554void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 554void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
555void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk); 555void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
556 556
557int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
558 int ac, int ssn, unsigned int wdg_timeout,
559 bool force);
560
557#endif /* __sta_h__ */ 561#endif /* __sta_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index c6585ab48df3..8b91544e6220 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -838,6 +838,22 @@ static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm,
838 } 838 }
839} 839}
840 840
841/* Check if there are any timed-out TIDs on a given shared TXQ */
842static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id)
843{
844 unsigned long queue_tid_bitmap = mvm->queue_info[txq_id].tid_bitmap;
845 unsigned long now = jiffies;
846 int tid;
847
848 for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) {
849 if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] +
850 IWL_MVM_DQA_QUEUE_TIMEOUT, now))
851 return true;
852 }
853
854 return false;
855}
856
841/* 857/*
842 * Sets the fields in the Tx cmd that are crypto related 858 * Sets the fields in the Tx cmd that are crypto related
843 */ 859 */
@@ -940,7 +956,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
940 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); 956 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
941 spin_unlock(&mvmsta->lock); 957 spin_unlock(&mvmsta->lock);
942 return 0; 958 return 0;
943
944 } 959 }
945 960
946 /* If we are here - TXQ exists and needs to be re-activated */ 961 /* If we are here - TXQ exists and needs to be re-activated */
@@ -953,8 +968,25 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
953 txq_id); 968 txq_id);
954 } 969 }
955 970
956 /* Keep track of the time of the last frame for this RA/TID */ 971 if (iwl_mvm_is_dqa_supported(mvm)) {
957 mvm->queue_info[txq_id].last_frame_time[tid] = jiffies; 972 /* Keep track of the time of the last frame for this RA/TID */
973 mvm->queue_info[txq_id].last_frame_time[tid] = jiffies;
974
975 /*
976 * If we have timed-out TIDs - schedule the worker that will
977 * reconfig the queues and update them
978 *
979 * Note that the mvm->queue_info_lock isn't being taken here in
980 * order to not serialize the TX flow. This isn't dangerous
981 * because scheduling mvm->add_stream_wk can't ruin the state,
982 * and if we DON'T schedule it due to some race condition then
983 * next TX we get here we will.
984 */
985 if (unlikely(mvm->queue_info[txq_id].status ==
986 IWL_MVM_QUEUE_SHARED &&
987 iwl_mvm_txq_should_update(mvm, txq_id)))
988 schedule_work(&mvm->add_stream_wk);
989 }
958 990
959 IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id, 991 IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
960 tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number)); 992 tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number));
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 68f4e7fdfc11..dae64a67a531 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -1131,7 +1131,13 @@ static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
1131 BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]); 1131 BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
1132 } 1132 }
1133 1133
1134 /* TODO: if queue was shared - need to re-enable AGGs */ 1134 /* If the queue is marked as shared - "unshare" it */
1135 if (mvm->queue_info[queue].hw_queue_refcount == 1 &&
1136 mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
1137 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RECONFIGURING;
1138 IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
1139 queue);
1140 }
1135} 1141}
1136 1142
1137void iwl_mvm_inactivity_check(struct iwl_mvm *mvm) 1143void iwl_mvm_inactivity_check(struct iwl_mvm *mvm)