diff options
author | Liad Kaufman <liad.kaufman@intel.com> | 2015-07-28 11:56:08 -0400 |
---|---|---|
committer | Emmanuel Grumbach <emmanuel.grumbach@intel.com> | 2016-03-30 09:21:25 -0400 |
commit | 24afba7690e49714795a1e8ee25e617ea0fb566b (patch) | |
tree | 8a22bdbde62540adf6d019acde8b4069d2465a22 | |
parent | 7ec54716e71a846dddf6aa1e33a12e1dcca6d276 (diff) |
iwlwifi: mvm: support bss dynamic alloc/dealloc of queues
"DQA" is shorthand for "dynamic queue allocation". This
enables on-demand allocation of queues per RA/TID rather than
statically allocating per vif, thus allowing a potential
benefit of various factors.
Please refer to the DOC section this patch adds to sta.h to
see a more in-depth explanation of this feature.
There are many things to take into consideration when working
in DQA mode, and this patch is only one in a series. Note that
default operation mode is non-DQA mode, unless the FW
indicates that it supports DQA mode.
This patch enables support of DQA for a station connected to
an AP, and works in a non-aggregated mode.
When a frame for an unused RA/TID arrives at the driver, it
isn't TXed immediately, but deferred first until a suitable
queue is first allocated for it, and then TXed by a worker
that both allocates the queues and TXes deferred traffic.
When a STA is removed, its queues goes back into the queue
pools for reuse as needed.
Signed-off-by: Liad Kaufman <liad.kaufman@intel.com>
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/mvm/d3.c | 2 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h | 22 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c | 21 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 49 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 7 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 1 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/mvm/sta.c | 254 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/mvm/sta.h | 87 | ||||
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 54 |
9 files changed, 481 insertions, 16 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index c1a313149eed..e3561bbc2468 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c | |||
@@ -723,7 +723,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif, | |||
723 | return -EIO; | 723 | return -EIO; |
724 | } | 724 | } |
725 | 725 | ||
726 | ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false); | 726 | ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false, 0); |
727 | if (ret) | 727 | if (ret) |
728 | return ret; | 728 | return ret; |
729 | rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta); | 729 | rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta); |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index e6bd0c8d4cc0..8217eb25b090 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h | |||
@@ -80,12 +80,32 @@ | |||
80 | #include "fw-api-stats.h" | 80 | #include "fw-api-stats.h" |
81 | #include "fw-api-tof.h" | 81 | #include "fw-api-tof.h" |
82 | 82 | ||
83 | /* Tx queue numbers */ | 83 | /* Tx queue numbers for non-DQA mode */ |
84 | enum { | 84 | enum { |
85 | IWL_MVM_OFFCHANNEL_QUEUE = 8, | 85 | IWL_MVM_OFFCHANNEL_QUEUE = 8, |
86 | IWL_MVM_CMD_QUEUE = 9, | 86 | IWL_MVM_CMD_QUEUE = 9, |
87 | }; | 87 | }; |
88 | 88 | ||
89 | /* | ||
90 | * DQA queue numbers | ||
91 | * | ||
92 | * @IWL_MVM_DQA_MIN_MGMT_QUEUE: first TXQ in pool for MGMT and non-QOS frames. | ||
93 | * Each MGMT queue is mapped to a single STA | ||
94 | * MGMT frames are frames that return true on ieee80211_is_mgmt() | ||
95 | * @IWL_MVM_DQA_MAX_MGMT_QUEUE: last TXQ in pool for MGMT frames | ||
96 | * @IWL_MVM_DQA_MIN_DATA_QUEUE: first TXQ in pool for DATA frames. | ||
97 | * DATA frames are intended for !ieee80211_is_mgmt() frames, but if | ||
98 | * the MGMT TXQ pool is exhausted, mgmt frames can be sent on DATA queues | ||
99 | * as well | ||
100 | * @IWL_MVM_DQA_MAX_DATA_QUEUE: last TXQ in pool for DATA frames | ||
101 | */ | ||
102 | enum iwl_mvm_dqa_txq { | ||
103 | IWL_MVM_DQA_MIN_MGMT_QUEUE = 5, | ||
104 | IWL_MVM_DQA_MAX_MGMT_QUEUE = 8, | ||
105 | IWL_MVM_DQA_MIN_DATA_QUEUE = 10, | ||
106 | IWL_MVM_DQA_MAX_DATA_QUEUE = 31, | ||
107 | }; | ||
108 | |||
89 | enum iwl_mvm_tx_fifo { | 109 | enum iwl_mvm_tx_fifo { |
90 | IWL_MVM_TX_FIFO_BK = 0, | 110 | IWL_MVM_TX_FIFO_BK = 0, |
91 | IWL_MVM_TX_FIFO_BE, | 111 | IWL_MVM_TX_FIFO_BE, |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index e885db3464b0..c02c1055d534 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c | |||
@@ -425,12 +425,17 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm, | |||
425 | return 0; | 425 | return 0; |
426 | } | 426 | } |
427 | 427 | ||
428 | /* Find available queues, and allocate them to the ACs */ | 428 | /* |
429 | * Find available queues, and allocate them to the ACs. When in | ||
430 | * DQA-mode they aren't really used, and this is done only so the | ||
431 | * mac80211 ieee80211_check_queues() function won't fail | ||
432 | */ | ||
429 | for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { | 433 | for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { |
430 | u8 queue = find_first_zero_bit(&used_hw_queues, | 434 | u8 queue = find_first_zero_bit(&used_hw_queues, |
431 | mvm->first_agg_queue); | 435 | mvm->first_agg_queue); |
432 | 436 | ||
433 | if (queue >= mvm->first_agg_queue) { | 437 | if (!iwl_mvm_is_dqa_supported(mvm) && |
438 | queue >= mvm->first_agg_queue) { | ||
434 | IWL_ERR(mvm, "Failed to allocate queue\n"); | 439 | IWL_ERR(mvm, "Failed to allocate queue\n"); |
435 | ret = -EIO; | 440 | ret = -EIO; |
436 | goto exit_fail; | 441 | goto exit_fail; |
@@ -495,6 +500,10 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) | |||
495 | IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout); | 500 | IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout); |
496 | /* fall through */ | 501 | /* fall through */ |
497 | default: | 502 | default: |
503 | /* If DQA is supported - queues will be enabled when needed */ | ||
504 | if (iwl_mvm_is_dqa_supported(mvm)) | ||
505 | break; | ||
506 | |||
498 | for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) | 507 | for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) |
499 | iwl_mvm_enable_ac_txq(mvm, vif->hw_queue[ac], | 508 | iwl_mvm_enable_ac_txq(mvm, vif->hw_queue[ac], |
500 | vif->hw_queue[ac], | 509 | vif->hw_queue[ac], |
@@ -523,6 +532,14 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif) | |||
523 | IWL_MAX_TID_COUNT, 0); | 532 | IWL_MAX_TID_COUNT, 0); |
524 | /* fall through */ | 533 | /* fall through */ |
525 | default: | 534 | default: |
535 | /* | ||
536 | * If DQA is supported - queues were already disabled, since in | ||
537 | * DQA-mode the queues are a property of the STA and not of the | ||
538 | * vif, and at this point the STA was already deleted | ||
539 | */ | ||
540 | if (iwl_mvm_is_dqa_supported(mvm)) | ||
541 | break; | ||
542 | |||
526 | for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) | 543 | for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) |
527 | iwl_mvm_disable_txq(mvm, vif->hw_queue[ac], | 544 | iwl_mvm_disable_txq(mvm, vif->hw_queue[ac], |
528 | vif->hw_queue[ac], | 545 | vif->hw_queue[ac], |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 1a3481ba1446..115d7aa5e720 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | |||
@@ -992,6 +992,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) | |||
992 | iwl_mvm_reset_phy_ctxts(mvm); | 992 | iwl_mvm_reset_phy_ctxts(mvm); |
993 | memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); | 993 | memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); |
994 | memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained)); | 994 | memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained)); |
995 | memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames)); | ||
995 | memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained)); | 996 | memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained)); |
996 | memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); | 997 | memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); |
997 | memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old)); | 998 | memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old)); |
@@ -1178,6 +1179,7 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw) | |||
1178 | 1179 | ||
1179 | flush_work(&mvm->d0i3_exit_work); | 1180 | flush_work(&mvm->d0i3_exit_work); |
1180 | flush_work(&mvm->async_handlers_wk); | 1181 | flush_work(&mvm->async_handlers_wk); |
1182 | flush_work(&mvm->add_stream_wk); | ||
1181 | cancel_delayed_work_sync(&mvm->fw_dump_wk); | 1183 | cancel_delayed_work_sync(&mvm->fw_dump_wk); |
1182 | iwl_mvm_free_fw_dump_desc(mvm); | 1184 | iwl_mvm_free_fw_dump_desc(mvm); |
1183 | 1185 | ||
@@ -2382,6 +2384,22 @@ iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm, | |||
2382 | peer_addr, action); | 2384 | peer_addr, action); |
2383 | } | 2385 | } |
2384 | 2386 | ||
2387 | static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm, | ||
2388 | struct iwl_mvm_sta *mvm_sta) | ||
2389 | { | ||
2390 | struct iwl_mvm_tid_data *tid_data; | ||
2391 | struct sk_buff *skb; | ||
2392 | int i; | ||
2393 | |||
2394 | spin_lock_bh(&mvm_sta->lock); | ||
2395 | for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { | ||
2396 | tid_data = &mvm_sta->tid_data[i]; | ||
2397 | while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames))) | ||
2398 | ieee80211_free_txskb(mvm->hw, skb); | ||
2399 | } | ||
2400 | spin_unlock_bh(&mvm_sta->lock); | ||
2401 | } | ||
2402 | |||
2385 | static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, | 2403 | static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, |
2386 | struct ieee80211_vif *vif, | 2404 | struct ieee80211_vif *vif, |
2387 | struct ieee80211_sta *sta, | 2405 | struct ieee80211_sta *sta, |
@@ -2402,6 +2420,33 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, | |||
2402 | /* if a STA is being removed, reuse its ID */ | 2420 | /* if a STA is being removed, reuse its ID */ |
2403 | flush_work(&mvm->sta_drained_wk); | 2421 | flush_work(&mvm->sta_drained_wk); |
2404 | 2422 | ||
2423 | /* | ||
2424 | * If we are in a STA removal flow and in DQA mode: | ||
2425 | * | ||
2426 | * This is after the sync_rcu part, so the queues have already been | ||
2427 | * flushed. No more TXs on their way in mac80211's path, and no more in | ||
2428 | * the queues. | ||
2429 | * Also, we won't be getting any new TX frames for this station. | ||
2430 | * What we might have are deferred TX frames that need to be taken care | ||
2431 | * of. | ||
2432 | * | ||
2433 | * Drop any still-queued deferred-frame before removing the STA, and | ||
2434 | * make sure the worker is no longer handling frames for this STA. | ||
2435 | */ | ||
2436 | if (old_state == IEEE80211_STA_NONE && | ||
2437 | new_state == IEEE80211_STA_NOTEXIST && | ||
2438 | iwl_mvm_is_dqa_supported(mvm)) { | ||
2439 | struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); | ||
2440 | |||
2441 | iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta); | ||
2442 | flush_work(&mvm->add_stream_wk); | ||
2443 | |||
2444 | /* | ||
2445 | * No need to make sure deferred TX indication is off since the | ||
2446 | * worker will already remove it if it was on | ||
2447 | */ | ||
2448 | } | ||
2449 | |||
2405 | mutex_lock(&mvm->mutex); | 2450 | mutex_lock(&mvm->mutex); |
2406 | if (old_state == IEEE80211_STA_NOTEXIST && | 2451 | if (old_state == IEEE80211_STA_NOTEXIST && |
2407 | new_state == IEEE80211_STA_NONE) { | 2452 | new_state == IEEE80211_STA_NONE) { |
@@ -3738,6 +3783,10 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, | |||
3738 | if (!vif || vif->type != NL80211_IFTYPE_STATION) | 3783 | if (!vif || vif->type != NL80211_IFTYPE_STATION) |
3739 | return; | 3784 | return; |
3740 | 3785 | ||
3786 | /* Make sure we're done with the deferred traffic before flushing */ | ||
3787 | if (iwl_mvm_is_dqa_supported(mvm)) | ||
3788 | flush_work(&mvm->add_stream_wk); | ||
3789 | |||
3741 | mutex_lock(&mvm->mutex); | 3790 | mutex_lock(&mvm->mutex); |
3742 | mvmvif = iwl_mvm_vif_from_mac80211(vif); | 3791 | mvmvif = iwl_mvm_vif_from_mac80211(vif); |
3743 | 3792 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 02ef1d91478c..f9430ee8f96b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | |||
@@ -665,10 +665,16 @@ struct iwl_mvm { | |||
665 | /* Map to HW queue */ | 665 | /* Map to HW queue */ |
666 | u32 hw_queue_to_mac80211; | 666 | u32 hw_queue_to_mac80211; |
667 | u8 hw_queue_refcount; | 667 | u8 hw_queue_refcount; |
668 | /* | ||
669 | * This is to mark that queue is reserved for a STA but not yet | ||
670 | * allocated. This is needed to make sure we have at least one | ||
671 | * available queue to use when adding a new STA | ||
672 | */ | ||
668 | bool setup_reserved; | 673 | bool setup_reserved; |
669 | u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */ | 674 | u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */ |
670 | } queue_info[IWL_MAX_HW_QUEUES]; | 675 | } queue_info[IWL_MAX_HW_QUEUES]; |
671 | spinlock_t queue_info_lock; /* For syncing queue mgmt operations */ | 676 | spinlock_t queue_info_lock; /* For syncing queue mgmt operations */ |
677 | struct work_struct add_stream_wk; /* To add streams to queues */ | ||
672 | atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES]; | 678 | atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES]; |
673 | 679 | ||
674 | const char *nvm_file_name; | 680 | const char *nvm_file_name; |
@@ -688,6 +694,7 @@ struct iwl_mvm { | |||
688 | struct iwl_rx_phy_info last_phy_info; | 694 | struct iwl_rx_phy_info last_phy_info; |
689 | struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT]; | 695 | struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT]; |
690 | struct work_struct sta_drained_wk; | 696 | struct work_struct sta_drained_wk; |
697 | unsigned long sta_deferred_frames[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)]; | ||
691 | unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)]; | 698 | unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)]; |
692 | atomic_t pending_frames[IWL_MVM_STATION_COUNT]; | 699 | atomic_t pending_frames[IWL_MVM_STATION_COUNT]; |
693 | u32 tfd_drained[IWL_MVM_STATION_COUNT]; | 700 | u32 tfd_drained[IWL_MVM_STATION_COUNT]; |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index d4b71a7d0645..9fc705ca5841 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c | |||
@@ -579,6 +579,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, | |||
579 | INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work); | 579 | INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work); |
580 | INIT_DELAYED_WORK(&mvm->fw_dump_wk, iwl_mvm_fw_error_dump_wk); | 580 | INIT_DELAYED_WORK(&mvm->fw_dump_wk, iwl_mvm_fw_error_dump_wk); |
581 | INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work); | 581 | INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work); |
582 | INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk); | ||
582 | 583 | ||
583 | spin_lock_init(&mvm->d0i3_tx_lock); | 584 | spin_lock_init(&mvm->d0i3_tx_lock); |
584 | spin_lock_init(&mvm->refs_lock); | 585 | spin_lock_init(&mvm->refs_lock); |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index ef99942d7169..3f36a661ec96 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c | |||
@@ -111,7 +111,7 @@ static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, | |||
111 | 111 | ||
112 | /* send station add/update command to firmware */ | 112 | /* send station add/update command to firmware */ |
113 | int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | 113 | int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, |
114 | bool update) | 114 | bool update, unsigned int flags) |
115 | { | 115 | { |
116 | struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); | 116 | struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); |
117 | struct iwl_mvm_add_sta_cmd add_sta_cmd = { | 117 | struct iwl_mvm_add_sta_cmd add_sta_cmd = { |
@@ -126,9 +126,12 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | |||
126 | u32 status; | 126 | u32 status; |
127 | u32 agg_size = 0, mpdu_dens = 0; | 127 | u32 agg_size = 0, mpdu_dens = 0; |
128 | 128 | ||
129 | if (!update) { | 129 | if (!update || (flags & STA_MODIFY_QUEUES)) { |
130 | add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk); | 130 | add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk); |
131 | memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN); | 131 | memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN); |
132 | |||
133 | if (flags & STA_MODIFY_QUEUES) | ||
134 | add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES; | ||
132 | } | 135 | } |
133 | 136 | ||
134 | switch (sta->bandwidth) { | 137 | switch (sta->bandwidth) { |
@@ -274,6 +277,204 @@ static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm, | |||
274 | iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0); | 277 | iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0); |
275 | } | 278 | } |
276 | 279 | ||
280 | static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, | ||
281 | struct ieee80211_sta *sta, u8 ac, int tid, | ||
282 | struct ieee80211_hdr *hdr) | ||
283 | { | ||
284 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); | ||
285 | struct iwl_trans_txq_scd_cfg cfg = { | ||
286 | .fifo = iwl_mvm_ac_to_tx_fifo[ac], | ||
287 | .sta_id = mvmsta->sta_id, | ||
288 | .tid = tid, | ||
289 | .frame_limit = IWL_FRAME_LIMIT, | ||
290 | }; | ||
291 | unsigned int wdg_timeout = | ||
292 | iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); | ||
293 | u8 mac_queue = mvmsta->vif->hw_queue[ac]; | ||
294 | int queue = -1; | ||
295 | int ssn; | ||
296 | |||
297 | lockdep_assert_held(&mvm->mutex); | ||
298 | |||
299 | spin_lock(&mvm->queue_info_lock); | ||
300 | |||
301 | /* | ||
302 | * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one | ||
303 | * exists | ||
304 | */ | ||
305 | if (!ieee80211_is_data_qos(hdr->frame_control) || | ||
306 | ieee80211_is_qos_nullfunc(hdr->frame_control)) { | ||
307 | queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_MGMT_QUEUE, | ||
308 | IWL_MVM_DQA_MAX_MGMT_QUEUE); | ||
309 | if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) | ||
310 | IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n", | ||
311 | queue); | ||
312 | |||
313 | /* If no such queue is found, we'll use a DATA queue instead */ | ||
314 | } | ||
315 | |||
316 | if (queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) { | ||
317 | queue = mvmsta->reserved_queue; | ||
318 | IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue); | ||
319 | } | ||
320 | |||
321 | if (queue < 0) | ||
322 | queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE, | ||
323 | IWL_MVM_DQA_MAX_DATA_QUEUE); | ||
324 | if (queue >= 0) | ||
325 | mvm->queue_info[queue].setup_reserved = false; | ||
326 | |||
327 | spin_unlock(&mvm->queue_info_lock); | ||
328 | |||
329 | /* TODO: support shared queues for same RA */ | ||
330 | if (queue < 0) | ||
331 | return -ENOSPC; | ||
332 | |||
333 | /* | ||
334 | * Actual en/disablement of aggregations is through the ADD_STA HCMD, | ||
335 | * but for configuring the SCD to send A-MPDUs we need to mark the queue | ||
336 | * as aggregatable. | ||
337 | * Mark all DATA queues as allowing to be aggregated at some point | ||
338 | */ | ||
339 | cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE); | ||
340 | |||
341 | IWL_DEBUG_TX_QUEUES(mvm, "Allocating queue #%d to sta %d on tid %d\n", | ||
342 | queue, mvmsta->sta_id, tid); | ||
343 | |||
344 | ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); | ||
345 | iwl_mvm_enable_txq(mvm, queue, mac_queue, ssn, &cfg, | ||
346 | wdg_timeout); | ||
347 | |||
348 | spin_lock_bh(&mvmsta->lock); | ||
349 | mvmsta->tid_data[tid].txq_id = queue; | ||
350 | mvmsta->tfd_queue_msk |= BIT(queue); | ||
351 | |||
352 | if (mvmsta->reserved_queue == queue) | ||
353 | mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE; | ||
354 | spin_unlock_bh(&mvmsta->lock); | ||
355 | |||
356 | return iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES); | ||
357 | } | ||
358 | |||
359 | static inline u8 iwl_mvm_tid_to_ac_queue(int tid) | ||
360 | { | ||
361 | if (tid == IWL_MAX_TID_COUNT) | ||
362 | return IEEE80211_AC_VO; /* MGMT */ | ||
363 | |||
364 | return tid_to_mac80211_ac[tid]; | ||
365 | } | ||
366 | |||
367 | static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm, | ||
368 | struct ieee80211_sta *sta, int tid) | ||
369 | { | ||
370 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); | ||
371 | struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; | ||
372 | struct sk_buff *skb; | ||
373 | struct ieee80211_hdr *hdr; | ||
374 | struct sk_buff_head deferred_tx; | ||
375 | u8 mac_queue; | ||
376 | bool no_queue = false; /* Marks if there is a problem with the queue */ | ||
377 | u8 ac; | ||
378 | |||
379 | lockdep_assert_held(&mvm->mutex); | ||
380 | |||
381 | skb = skb_peek(&tid_data->deferred_tx_frames); | ||
382 | if (!skb) | ||
383 | return; | ||
384 | hdr = (void *)skb->data; | ||
385 | |||
386 | ac = iwl_mvm_tid_to_ac_queue(tid); | ||
387 | mac_queue = IEEE80211_SKB_CB(skb)->hw_queue; | ||
388 | |||
389 | if (tid_data->txq_id == IEEE80211_INVAL_HW_QUEUE && | ||
390 | iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) { | ||
391 | IWL_ERR(mvm, | ||
392 | "Can't alloc TXQ for sta %d tid %d - dropping frame\n", | ||
393 | mvmsta->sta_id, tid); | ||
394 | |||
395 | /* | ||
396 | * Mark queue as problematic so later the deferred traffic is | ||
397 | * freed, as we can do nothing with it | ||
398 | */ | ||
399 | no_queue = true; | ||
400 | } | ||
401 | |||
402 | __skb_queue_head_init(&deferred_tx); | ||
403 | |||
404 | spin_lock(&mvmsta->lock); | ||
405 | skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx); | ||
406 | spin_unlock(&mvmsta->lock); | ||
407 | |||
408 | /* Disable bottom-halves when entering TX path */ | ||
409 | local_bh_disable(); | ||
410 | while ((skb = __skb_dequeue(&deferred_tx))) | ||
411 | if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta)) | ||
412 | ieee80211_free_txskb(mvm->hw, skb); | ||
413 | local_bh_enable(); | ||
414 | |||
415 | /* Wake queue */ | ||
416 | iwl_mvm_start_mac_queues(mvm, BIT(mac_queue)); | ||
417 | } | ||
418 | |||
419 | void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) | ||
420 | { | ||
421 | struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, | ||
422 | add_stream_wk); | ||
423 | struct ieee80211_sta *sta; | ||
424 | struct iwl_mvm_sta *mvmsta; | ||
425 | unsigned long deferred_tid_traffic; | ||
426 | int sta_id, tid; | ||
427 | |||
428 | mutex_lock(&mvm->mutex); | ||
429 | |||
430 | /* Go over all stations with deferred traffic */ | ||
431 | for_each_set_bit(sta_id, mvm->sta_deferred_frames, | ||
432 | IWL_MVM_STATION_COUNT) { | ||
433 | clear_bit(sta_id, mvm->sta_deferred_frames); | ||
434 | sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], | ||
435 | lockdep_is_held(&mvm->mutex)); | ||
436 | if (IS_ERR_OR_NULL(sta)) | ||
437 | continue; | ||
438 | |||
439 | mvmsta = iwl_mvm_sta_from_mac80211(sta); | ||
440 | deferred_tid_traffic = mvmsta->deferred_traffic_tid_map; | ||
441 | |||
442 | for_each_set_bit(tid, &deferred_tid_traffic, | ||
443 | IWL_MAX_TID_COUNT + 1) | ||
444 | iwl_mvm_tx_deferred_stream(mvm, sta, tid); | ||
445 | } | ||
446 | |||
447 | mutex_unlock(&mvm->mutex); | ||
448 | } | ||
449 | |||
450 | static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, | ||
451 | struct ieee80211_sta *sta) | ||
452 | { | ||
453 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); | ||
454 | int queue; | ||
455 | |||
456 | spin_lock_bh(&mvm->queue_info_lock); | ||
457 | |||
458 | /* Make sure we have free resources for this STA */ | ||
459 | queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE, | ||
460 | IWL_MVM_DQA_MAX_DATA_QUEUE); | ||
461 | if (queue < 0) { | ||
462 | spin_unlock_bh(&mvm->queue_info_lock); | ||
463 | IWL_ERR(mvm, "No available queues for new station\n"); | ||
464 | return -ENOSPC; | ||
465 | } | ||
466 | mvm->queue_info[queue].setup_reserved = true; | ||
467 | |||
468 | spin_unlock_bh(&mvm->queue_info_lock); | ||
469 | |||
470 | mvmsta->reserved_queue = queue; | ||
471 | |||
472 | IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n", | ||
473 | queue, mvmsta->sta_id); | ||
474 | |||
475 | return 0; | ||
476 | } | ||
477 | |||
277 | int iwl_mvm_add_sta(struct iwl_mvm *mvm, | 478 | int iwl_mvm_add_sta(struct iwl_mvm *mvm, |
278 | struct ieee80211_vif *vif, | 479 | struct ieee80211_vif *vif, |
279 | struct ieee80211_sta *sta) | 480 | struct ieee80211_sta *sta) |
@@ -314,18 +515,29 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, | |||
314 | ret = iwl_mvm_tdls_sta_init(mvm, sta); | 515 | ret = iwl_mvm_tdls_sta_init(mvm, sta); |
315 | if (ret) | 516 | if (ret) |
316 | return ret; | 517 | return ret; |
317 | } else { | 518 | } else if (!iwl_mvm_is_dqa_supported(mvm)) { |
318 | for (i = 0; i < IEEE80211_NUM_ACS; i++) | 519 | for (i = 0; i < IEEE80211_NUM_ACS; i++) |
319 | if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE) | 520 | if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE) |
320 | mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]); | 521 | mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]); |
321 | } | 522 | } |
322 | 523 | ||
323 | /* for HW restart - reset everything but the sequence number */ | 524 | /* for HW restart - reset everything but the sequence number */ |
324 | for (i = 0; i < IWL_MAX_TID_COUNT; i++) { | 525 | for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { |
325 | u16 seq = mvm_sta->tid_data[i].seq_number; | 526 | u16 seq = mvm_sta->tid_data[i].seq_number; |
326 | memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i])); | 527 | memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i])); |
327 | mvm_sta->tid_data[i].seq_number = seq; | 528 | mvm_sta->tid_data[i].seq_number = seq; |
529 | |||
530 | if (!iwl_mvm_is_dqa_supported(mvm)) | ||
531 | continue; | ||
532 | |||
533 | /* | ||
534 | * Mark all queues for this STA as unallocated and defer TX | ||
535 | * frames until the queue is allocated | ||
536 | */ | ||
537 | mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE; | ||
538 | skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames); | ||
328 | } | 539 | } |
540 | mvm_sta->deferred_traffic_tid_map = 0; | ||
329 | mvm_sta->agg_tids = 0; | 541 | mvm_sta->agg_tids = 0; |
330 | 542 | ||
331 | if (iwl_mvm_has_new_rx_api(mvm) && | 543 | if (iwl_mvm_has_new_rx_api(mvm) && |
@@ -338,7 +550,13 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, | |||
338 | mvm_sta->dup_data = dup_data; | 550 | mvm_sta->dup_data = dup_data; |
339 | } | 551 | } |
340 | 552 | ||
341 | ret = iwl_mvm_sta_send_to_fw(mvm, sta, false); | 553 | if (iwl_mvm_is_dqa_supported(mvm)) { |
554 | ret = iwl_mvm_reserve_sta_stream(mvm, sta); | ||
555 | if (ret) | ||
556 | goto err; | ||
557 | } | ||
558 | |||
559 | ret = iwl_mvm_sta_send_to_fw(mvm, sta, false, 0); | ||
342 | if (ret) | 560 | if (ret) |
343 | goto err; | 561 | goto err; |
344 | 562 | ||
@@ -364,7 +582,7 @@ int iwl_mvm_update_sta(struct iwl_mvm *mvm, | |||
364 | struct ieee80211_vif *vif, | 582 | struct ieee80211_vif *vif, |
365 | struct ieee80211_sta *sta) | 583 | struct ieee80211_sta *sta) |
366 | { | 584 | { |
367 | return iwl_mvm_sta_send_to_fw(mvm, sta, true); | 585 | return iwl_mvm_sta_send_to_fw(mvm, sta, true, 0); |
368 | } | 586 | } |
369 | 587 | ||
370 | int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, | 588 | int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, |
@@ -509,6 +727,26 @@ void iwl_mvm_sta_drained_wk(struct work_struct *wk) | |||
509 | mutex_unlock(&mvm->mutex); | 727 | mutex_unlock(&mvm->mutex); |
510 | } | 728 | } |
511 | 729 | ||
730 | static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm, | ||
731 | struct ieee80211_vif *vif, | ||
732 | struct iwl_mvm_sta *mvm_sta) | ||
733 | { | ||
734 | int ac; | ||
735 | int i; | ||
736 | |||
737 | lockdep_assert_held(&mvm->mutex); | ||
738 | |||
739 | for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) { | ||
740 | if (mvm_sta->tid_data[i].txq_id == IEEE80211_INVAL_HW_QUEUE) | ||
741 | continue; | ||
742 | |||
743 | ac = iwl_mvm_tid_to_ac_queue(i); | ||
744 | iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id, | ||
745 | vif->hw_queue[ac], i, 0); | ||
746 | mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE; | ||
747 | } | ||
748 | } | ||
749 | |||
512 | int iwl_mvm_rm_sta(struct iwl_mvm *mvm, | 750 | int iwl_mvm_rm_sta(struct iwl_mvm *mvm, |
513 | struct ieee80211_vif *vif, | 751 | struct ieee80211_vif *vif, |
514 | struct ieee80211_sta *sta) | 752 | struct ieee80211_sta *sta) |
@@ -537,6 +775,10 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, | |||
537 | return ret; | 775 | return ret; |
538 | ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); | 776 | ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); |
539 | 777 | ||
778 | /* If DQA is supported - the queues can be disabled now */ | ||
779 | if (iwl_mvm_is_dqa_supported(mvm)) | ||
780 | iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta); | ||
781 | |||
540 | /* if we are associated - we can't remove the AP STA now */ | 782 | /* if we are associated - we can't remove the AP STA now */ |
541 | if (vif->bss_conf.assoc) | 783 | if (vif->bss_conf.assoc) |
542 | return ret; | 784 | return ret; |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 1a8f69a41405..e3efdcd900f0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. | 8 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. |
9 | * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH | 9 | * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH |
10 | * Copyright(c) 2015 Intel Deutschland GmbH | 10 | * Copyright(c) 2015 - 2016 Intel Deutschland GmbH |
11 | * | 11 | * |
12 | * This program is free software; you can redistribute it and/or modify | 12 | * This program is free software; you can redistribute it and/or modify |
13 | * it under the terms of version 2 of the GNU General Public License as | 13 | * it under the terms of version 2 of the GNU General Public License as |
@@ -34,7 +34,7 @@ | |||
34 | * | 34 | * |
35 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. | 35 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. |
36 | * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH | 36 | * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH |
37 | * Copyright(c) 2015 Intel Deutschland GmbH | 37 | * Copyright(c) 2015 - 2016 Intel Deutschland GmbH |
38 | * All rights reserved. | 38 | * All rights reserved. |
39 | * | 39 | * |
40 | * Redistribution and use in source and binary forms, with or without | 40 | * Redistribution and use in source and binary forms, with or without |
@@ -80,6 +80,60 @@ struct iwl_mvm; | |||
80 | struct iwl_mvm_vif; | 80 | struct iwl_mvm_vif; |
81 | 81 | ||
82 | /** | 82 | /** |
83 | * DOC: DQA - Dynamic Queue Allocation -introduction | ||
84 | * | ||
85 | * Dynamic Queue Allocation (AKA "DQA") is a feature implemented in iwlwifi | ||
86 | * driver to allow dynamic allocation of queues on-demand, rather than allocate | ||
87 | * them statically ahead of time. Ideally, we would like to allocate one queue | ||
88 | * per RA/TID, thus allowing an AP - for example - to send BE traffic to STA2 | ||
89 | * even if it also needs to send traffic to a sleeping STA1, without being | ||
90 | * blocked by the sleeping station. | ||
91 | * | ||
92 | * Although the queues in DQA mode are dynamically allocated, there are still | ||
93 | * some queues that are statically allocated: | ||
94 | * TXQ #0 - command queue | ||
95 | * TXQ #1 - aux frames | ||
96 | * TXQ #2 - P2P device frames | ||
97 | * TXQ #3 - P2P GO/SoftAP GCAST/BCAST frames | ||
98 | * TXQ #4 - BSS DATA frames queue | ||
99 | * TXQ #5-8 - Non-QoS and MGMT frames queue pool | ||
100 | * TXQ #9 - P2P GO/SoftAP probe responses | ||
101 | * TXQ #10-31 - DATA frames queue pool | ||
102 | * The queues are dynamically taken from either the MGMT frames queue pool or | ||
103 | * the DATA frames one. See the %iwl_mvm_dqa_txq for more information on every | ||
104 | * queue. | ||
105 | * | ||
106 | * When a frame for a previously unseen RA/TID comes in, it needs to be deferred | ||
107 | * until a queue is allocated for it, and only then can be TXed. Therefore, it | ||
108 | * is placed into %iwl_mvm_tid_data.deferred_tx_frames, and a worker called | ||
109 | * %mvm->add_stream_wk later allocates the queues and TXes the deferred frames. | ||
110 | * | ||
111 | * For convenience, MGMT is considered as if it has TID=8, and go to the MGMT | ||
112 | * queues in the pool. If there is no longer a free MGMT queue to allocate, a | ||
113 | * queue will be allocated from the DATA pool instead. Since QoS NDPs can create | ||
114 | * a problem for aggregations, they too will use a MGMT queue. | ||
115 | * | ||
116 | * When adding a STA, a DATA queue is reserved for it so that it can TX from | ||
117 | * it. If no such free queue exists for reserving, the STA addition will fail. | ||
118 | * | ||
119 | * If the DATA queue pool gets exhausted, no new STA will be accepted, and if a | ||
120 | * new RA/TID comes in for an existing STA, one of the STA's queues will become | ||
121 | * shared and will serve more than the single TID (but always for the same RA!). | ||
122 | * | ||
123 | * When a RA/TID needs to become aggregated, no new queue is required to be | ||
124 | * allocated, only mark the queue as aggregated via the ADD_STA command. Note, | ||
125 | * however, that a shared queue cannot be aggregated, and only after the other | ||
126 | * TIDs become inactive and are removed - only then can the queue be | ||
127 | * reconfigured and become aggregated. | ||
128 | * | ||
129 | * When removing a station, its queues are returned to the pool for reuse. Here | ||
130 | * we also need to make sure that we are synced with the worker thread that TXes | ||
131 | * the deferred frames so we don't get into a situation where the queues are | ||
132 | * removed and then the worker puts deferred frames onto the released queues or | ||
133 | * tries to allocate new queues for a STA we don't need anymore. | ||
134 | */ | ||
135 | |||
136 | /** | ||
83 | * DOC: station table - introduction | 137 | * DOC: station table - introduction |
84 | * | 138 | * |
85 | * The station table is a list of data structure that reprensent the stations. | 139 | * The station table is a list of data structure that reprensent the stations. |
@@ -253,6 +307,7 @@ enum iwl_mvm_agg_state { | |||
253 | 307 | ||
254 | /** | 308 | /** |
255 | * struct iwl_mvm_tid_data - holds the states for each RA / TID | 309 | * struct iwl_mvm_tid_data - holds the states for each RA / TID |
310 | * @deferred_tx_frames: deferred TX frames for this RA/TID | ||
256 | * @seq_number: the next WiFi sequence number to use | 311 | * @seq_number: the next WiFi sequence number to use |
257 | * @next_reclaimed: the WiFi sequence number of the next packet to be acked. | 312 | * @next_reclaimed: the WiFi sequence number of the next packet to be acked. |
258 | * This is basically (last acked packet++). | 313 | * This is basically (last acked packet++). |
@@ -260,7 +315,7 @@ enum iwl_mvm_agg_state { | |||
260 | * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA). | 315 | * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA). |
261 | * @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed. | 316 | * @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed. |
262 | * @state: state of the BA agreement establishment / tear down. | 317 | * @state: state of the BA agreement establishment / tear down. |
263 | * @txq_id: Tx queue used by the BA session | 318 | * @txq_id: Tx queue used by the BA session / DQA |
264 | * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or | 319 | * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or |
265 | * the first packet to be sent in legacy HW queue in Tx AGG stop flow. | 320 | * the first packet to be sent in legacy HW queue in Tx AGG stop flow. |
266 | * Basically when next_reclaimed reaches ssn, we can tell mac80211 that | 321 | * Basically when next_reclaimed reaches ssn, we can tell mac80211 that |
@@ -268,6 +323,7 @@ enum iwl_mvm_agg_state { | |||
268 | * @tx_time: medium time consumed by this A-MPDU | 323 | * @tx_time: medium time consumed by this A-MPDU |
269 | */ | 324 | */ |
270 | struct iwl_mvm_tid_data { | 325 | struct iwl_mvm_tid_data { |
326 | struct sk_buff_head deferred_tx_frames; | ||
271 | u16 seq_number; | 327 | u16 seq_number; |
272 | u16 next_reclaimed; | 328 | u16 next_reclaimed; |
273 | /* The rest is Tx AGG related */ | 329 | /* The rest is Tx AGG related */ |
@@ -316,7 +372,10 @@ struct iwl_mvm_rxq_dup_data { | |||
316 | * we need to signal the EOSP | 372 | * we need to signal the EOSP |
317 | * @lock: lock to protect the whole struct. Since %tid_data is access from Tx | 373 | * @lock: lock to protect the whole struct. Since %tid_data is access from Tx |
318 | * and from Tx response flow, it needs a spinlock. | 374 | * and from Tx response flow, it needs a spinlock. |
319 | * @tid_data: per tid data. Look at %iwl_mvm_tid_data. | 375 | * @tid_data: per tid data + mgmt. Look at %iwl_mvm_tid_data. |
376 | * @reserved_queue: the queue reserved for this STA for DQA purposes | ||
377 | * Every STA has is given one reserved queue to allow it to operate. If no | ||
378 | * such queue can be guaranteed, the STA addition will fail. | ||
320 | * @tx_protection: reference counter for controlling the Tx protection. | 379 | * @tx_protection: reference counter for controlling the Tx protection. |
321 | * @tt_tx_protection: is thermal throttling enable Tx protection? | 380 | * @tt_tx_protection: is thermal throttling enable Tx protection? |
322 | * @disable_tx: is tx to this STA disabled? | 381 | * @disable_tx: is tx to this STA disabled? |
@@ -329,6 +388,7 @@ struct iwl_mvm_rxq_dup_data { | |||
329 | * the BA window. To be used for UAPSD only. | 388 | * the BA window. To be used for UAPSD only. |
330 | * @ptk_pn: per-queue PTK PN data structures | 389 | * @ptk_pn: per-queue PTK PN data structures |
331 | * @dup_data: per queue duplicate packet detection data | 390 | * @dup_data: per queue duplicate packet detection data |
391 | * @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID | ||
332 | * | 392 | * |
333 | * When mac80211 creates a station it reserves some space (hw->sta_data_size) | 393 | * When mac80211 creates a station it reserves some space (hw->sta_data_size) |
334 | * in the structure for use by driver. This structure is placed in that | 394 | * in the structure for use by driver. This structure is placed in that |
@@ -345,12 +405,16 @@ struct iwl_mvm_sta { | |||
345 | bool bt_reduced_txpower; | 405 | bool bt_reduced_txpower; |
346 | bool next_status_eosp; | 406 | bool next_status_eosp; |
347 | spinlock_t lock; | 407 | spinlock_t lock; |
348 | struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT]; | 408 | struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT + 1]; |
349 | struct iwl_lq_sta lq_sta; | 409 | struct iwl_lq_sta lq_sta; |
350 | struct ieee80211_vif *vif; | 410 | struct ieee80211_vif *vif; |
351 | struct iwl_mvm_key_pn __rcu *ptk_pn[4]; | 411 | struct iwl_mvm_key_pn __rcu *ptk_pn[4]; |
352 | struct iwl_mvm_rxq_dup_data *dup_data; | 412 | struct iwl_mvm_rxq_dup_data *dup_data; |
353 | 413 | ||
414 | u16 deferred_traffic_tid_map; | ||
415 | |||
416 | u8 reserved_queue; | ||
417 | |||
354 | /* Temporary, until the new TLC will control the Tx protection */ | 418 | /* Temporary, until the new TLC will control the Tx protection */ |
355 | s8 tx_protection; | 419 | s8 tx_protection; |
356 | bool tt_tx_protection; | 420 | bool tt_tx_protection; |
@@ -378,8 +442,18 @@ struct iwl_mvm_int_sta { | |||
378 | u32 tfd_queue_msk; | 442 | u32 tfd_queue_msk; |
379 | }; | 443 | }; |
380 | 444 | ||
445 | /** | ||
446 | * Send the STA info to the FW. | ||
447 | * | ||
448 | * @mvm: the iwl_mvm* to use | ||
449 | * @sta: the STA | ||
450 | * @update: this is true if the FW is being updated about a STA it already knows | ||
451 | * about. Otherwise (if this is a new STA), this should be false. | ||
452 | * @flags: if update==true, this marks what is being changed via ORs of values | ||
453 | * from enum iwl_sta_modify_flag. Otherwise, this is ignored. | ||
454 | */ | ||
381 | int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | 455 | int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, |
382 | bool update); | 456 | bool update, unsigned int flags); |
383 | int iwl_mvm_add_sta(struct iwl_mvm *mvm, | 457 | int iwl_mvm_add_sta(struct iwl_mvm *mvm, |
384 | struct ieee80211_vif *vif, | 458 | struct ieee80211_vif *vif, |
385 | struct ieee80211_sta *sta); | 459 | struct ieee80211_sta *sta); |
@@ -459,5 +533,6 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, | |||
459 | struct iwl_mvm_vif *mvmvif, | 533 | struct iwl_mvm_vif *mvmvif, |
460 | bool disable); | 534 | bool disable); |
461 | void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif); | 535 | void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif); |
536 | void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk); | ||
462 | 537 | ||
463 | #endif /* __sta_h__ */ | 538 | #endif /* __sta_h__ */ |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index c7c3d7bd38ba..24cff98ecca0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c | |||
@@ -639,6 +639,35 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, | |||
639 | } | 639 | } |
640 | #endif | 640 | #endif |
641 | 641 | ||
642 | static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm, | ||
643 | struct iwl_mvm_sta *mvm_sta, u8 tid, | ||
644 | struct sk_buff *skb) | ||
645 | { | ||
646 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
647 | u8 mac_queue = info->hw_queue; | ||
648 | struct sk_buff_head *deferred_tx_frames; | ||
649 | |||
650 | lockdep_assert_held(&mvm_sta->lock); | ||
651 | |||
652 | mvm_sta->deferred_traffic_tid_map |= BIT(tid); | ||
653 | set_bit(mvm_sta->sta_id, mvm->sta_deferred_frames); | ||
654 | |||
655 | deferred_tx_frames = &mvm_sta->tid_data[tid].deferred_tx_frames; | ||
656 | |||
657 | skb_queue_tail(deferred_tx_frames, skb); | ||
658 | |||
659 | /* | ||
660 | * The first deferred frame should've stopped the MAC queues, so we | ||
661 | * should never get a second deferred frame for the RA/TID. | ||
662 | */ | ||
663 | if (!WARN(skb_queue_len(deferred_tx_frames) != 1, | ||
664 | "RATID %d/%d has %d deferred frames\n", mvm_sta->sta_id, tid, | ||
665 | skb_queue_len(deferred_tx_frames))) { | ||
666 | iwl_mvm_stop_mac_queues(mvm, BIT(mac_queue)); | ||
667 | schedule_work(&mvm->add_stream_wk); | ||
668 | } | ||
669 | } | ||
670 | |||
642 | /* | 671 | /* |
643 | * Sets the fields in the Tx cmd that are crypto related | 672 | * Sets the fields in the Tx cmd that are crypto related |
644 | */ | 673 | */ |
@@ -695,6 +724,14 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, | |||
695 | hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); | 724 | hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); |
696 | hdr->seq_ctrl |= cpu_to_le16(seq_number); | 725 | hdr->seq_ctrl |= cpu_to_le16(seq_number); |
697 | is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU; | 726 | is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU; |
727 | } else if (iwl_mvm_is_dqa_supported(mvm) && | ||
728 | (ieee80211_is_qos_nullfunc(fc) || | ||
729 | ieee80211_is_nullfunc(fc))) { | ||
730 | /* | ||
731 | * nullfunc frames should go to the MGMT queue regardless of QOS | ||
732 | */ | ||
733 | tid = IWL_MAX_TID_COUNT; | ||
734 | txq_id = mvmsta->tid_data[tid].txq_id; | ||
698 | } | 735 | } |
699 | 736 | ||
700 | /* Copy MAC header from skb into command buffer */ | 737 | /* Copy MAC header from skb into command buffer */ |
@@ -715,6 +752,23 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, | |||
715 | txq_id = mvmsta->tid_data[tid].txq_id; | 752 | txq_id = mvmsta->tid_data[tid].txq_id; |
716 | } | 753 | } |
717 | 754 | ||
755 | if (iwl_mvm_is_dqa_supported(mvm)) { | ||
756 | if (unlikely(mvmsta->tid_data[tid].txq_id == | ||
757 | IEEE80211_INVAL_HW_QUEUE)) { | ||
758 | iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb); | ||
759 | |||
760 | /* | ||
761 | * The frame is now deferred, and the worker scheduled | ||
762 | * will re-allocate it, so we can free it for now. | ||
763 | */ | ||
764 | iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); | ||
765 | spin_unlock(&mvmsta->lock); | ||
766 | return 0; | ||
767 | } | ||
768 | |||
769 | txq_id = mvmsta->tid_data[tid].txq_id; | ||
770 | } | ||
771 | |||
718 | IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id, | 772 | IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id, |
719 | tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number)); | 773 | tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number)); |
720 | 774 | ||