diff options
author | Liad Kaufman <liad.kaufman@intel.com> | 2015-08-19 10:34:28 -0400 |
---|---|---|
committer | Luca Coelho <luciano.coelho@intel.com> | 2016-07-05 17:14:27 -0400 |
commit | 9794c64f302d6d544acbb5ab69a327d694a70fcb (patch) | |
tree | 8b3da996e69d2c3257872255f4165320a961f8b8 /drivers/net/wireless/intel/iwlwifi/mvm/utils.c | |
parent | ca221c9b946cd4a9ea67375c8d90379a0e65179d (diff) |
iwlwifi: mvm: support dqa queue inactivation upon timeout
Support marking queues as inactive upon a timeout expiring,
and allow inactive queues to be re-assigned to other RA/TIDs
if no other queue is free.
This is done by keeping a timestamp of the latest frame TXed
for every RA/TID, and then going over the queues currently in
use when a new queue is needed, inactivating all those that
are inactive.
Signed-off-by: Liad Kaufman <liad.kaufman@intel.com>
Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/mvm/utils.c')
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/mvm/utils.c | 166 |
1 files changed, 165 insertions, 1 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 161b99efd63d..a0cb5ca4c9b9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c | |||
@@ -579,17 +579,29 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) | |||
579 | iwl_mvm_dump_umac_error_log(mvm); | 579 | iwl_mvm_dump_umac_error_log(mvm); |
580 | } | 580 | } |
581 | 581 | ||
582 | int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 minq, u8 maxq) | 582 | int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq) |
583 | { | 583 | { |
584 | int i; | 584 | int i; |
585 | 585 | ||
586 | lockdep_assert_held(&mvm->queue_info_lock); | 586 | lockdep_assert_held(&mvm->queue_info_lock); |
587 | 587 | ||
588 | /* Start by looking for a free queue */ | ||
588 | for (i = minq; i <= maxq; i++) | 589 | for (i = minq; i <= maxq; i++) |
589 | if (mvm->queue_info[i].hw_queue_refcount == 0 && | 590 | if (mvm->queue_info[i].hw_queue_refcount == 0 && |
590 | mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE) | 591 | mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE) |
591 | return i; | 592 | return i; |
592 | 593 | ||
594 | /* | ||
595 | * If no free queue found - settle for an inactive one to reconfigure | ||
596 | * Make sure that the inactive queue either already belongs to this STA, | ||
597 | * or that if it belongs to another one - it isn't the reserved queue | ||
598 | */ | ||
599 | for (i = minq; i <= maxq; i++) | ||
600 | if (mvm->queue_info[i].status == IWL_MVM_QUEUE_INACTIVE && | ||
601 | (sta_id == mvm->queue_info[i].ra_sta_id || | ||
602 | !mvm->queue_info[i].reserved)) | ||
603 | return i; | ||
604 | |||
593 | return -ENOSPC; | 605 | return -ENOSPC; |
594 | } | 606 | } |
595 | 607 | ||
@@ -650,6 +662,7 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, | |||
650 | else | 662 | else |
651 | mvm->queue_info[queue].ra_sta_id = cfg->sta_id; | 663 | mvm->queue_info[queue].ra_sta_id = cfg->sta_id; |
652 | mvm->queue_info[queue].tid_bitmap |= BIT(cfg->tid); | 664 | mvm->queue_info[queue].tid_bitmap |= BIT(cfg->tid); |
665 | mvm->queue_info[queue].ra_sta_id = cfg->sta_id; | ||
653 | 666 | ||
654 | IWL_DEBUG_TX_QUEUES(mvm, | 667 | IWL_DEBUG_TX_QUEUES(mvm, |
655 | "Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n", | 668 | "Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n", |
@@ -752,6 +765,9 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, | |||
752 | mvm->queue_info[queue].tid_bitmap = 0; | 765 | mvm->queue_info[queue].tid_bitmap = 0; |
753 | mvm->queue_info[queue].hw_queue_to_mac80211 = 0; | 766 | mvm->queue_info[queue].hw_queue_to_mac80211 = 0; |
754 | 767 | ||
768 | /* Regardless if this is a reserved TXQ for a STA - mark it as false */ | ||
769 | mvm->queue_info[queue].reserved = false; | ||
770 | |||
755 | spin_unlock_bh(&mvm->queue_info_lock); | 771 | spin_unlock_bh(&mvm->queue_info_lock); |
756 | 772 | ||
757 | iwl_trans_txq_disable(mvm->trans, queue, false); | 773 | iwl_trans_txq_disable(mvm->trans, queue, false); |
@@ -1039,6 +1055,154 @@ out: | |||
1039 | ieee80211_connection_loss(vif); | 1055 | ieee80211_connection_loss(vif); |
1040 | } | 1056 | } |
1041 | 1057 | ||
1058 | /* | ||
1059 | * Remove inactive TIDs of a given queue. | ||
1060 | * If all queue TIDs are inactive - mark the queue as inactive | ||
1061 | * If only some the queue TIDs are inactive - unmap them from the queue | ||
1062 | */ | ||
1063 | static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, | ||
1064 | struct iwl_mvm_sta *mvmsta, int queue, | ||
1065 | unsigned long tid_bitmap) | ||
1066 | { | ||
1067 | int tid; | ||
1068 | |||
1069 | lockdep_assert_held(&mvmsta->lock); | ||
1070 | lockdep_assert_held(&mvm->queue_info_lock); | ||
1071 | |||
1072 | /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */ | ||
1073 | for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { | ||
1074 | /* If some TFDs are still queued - don't mark TID as inactive */ | ||
1075 | if (iwl_mvm_tid_queued(&mvmsta->tid_data[tid])) | ||
1076 | tid_bitmap &= ~BIT(tid); | ||
1077 | } | ||
1078 | |||
1079 | /* If all TIDs in the queue are inactive - mark queue as inactive. */ | ||
1080 | if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) { | ||
1081 | mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE; | ||
1082 | |||
1083 | for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) | ||
1084 | mvmsta->tid_data[tid].is_tid_active = false; | ||
1085 | |||
1086 | IWL_DEBUG_TX_QUEUES(mvm, "Queue %d marked as inactive\n", | ||
1087 | queue); | ||
1088 | return; | ||
1089 | } | ||
1090 | |||
1091 | /* | ||
1092 | * If we are here, this is a shared queue and not all TIDs timed-out. | ||
1093 | * Remove the ones that did. | ||
1094 | */ | ||
1095 | for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { | ||
1096 | int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]; | ||
1097 | |||
1098 | mvmsta->tid_data[tid].txq_id = IEEE80211_INVAL_HW_QUEUE; | ||
1099 | mvm->queue_info[queue].hw_queue_to_mac80211 &= ~BIT(mac_queue); | ||
1100 | mvm->queue_info[queue].hw_queue_refcount--; | ||
1101 | mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); | ||
1102 | mvmsta->tid_data[tid].is_tid_active = false; | ||
1103 | |||
1104 | IWL_DEBUG_TX_QUEUES(mvm, | ||
1105 | "Removing inactive TID %d from shared Q:%d\n", | ||
1106 | tid, queue); | ||
1107 | } | ||
1108 | |||
1109 | IWL_DEBUG_TX_QUEUES(mvm, | ||
1110 | "TXQ #%d left with tid bitmap 0x%x\n", queue, | ||
1111 | mvm->queue_info[queue].tid_bitmap); | ||
1112 | |||
1113 | /* | ||
1114 | * There may be different TIDs with the same mac queues, so make | ||
1115 | * sure all TIDs have existing corresponding mac queues enabled | ||
1116 | */ | ||
1117 | tid_bitmap = mvm->queue_info[queue].tid_bitmap; | ||
1118 | for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { | ||
1119 | mvm->queue_info[queue].hw_queue_to_mac80211 |= | ||
1120 | BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]); | ||
1121 | } | ||
1122 | |||
1123 | /* TODO: if queue was shared - need to re-enable AGGs */ | ||
1124 | } | ||
1125 | |||
1126 | void iwl_mvm_inactivity_check(struct iwl_mvm *mvm) | ||
1127 | { | ||
1128 | unsigned long timeout_queues_map = 0; | ||
1129 | unsigned long now = jiffies; | ||
1130 | int i; | ||
1131 | |||
1132 | spin_lock_bh(&mvm->queue_info_lock); | ||
1133 | for (i = 0; i < IWL_MAX_HW_QUEUES; i++) | ||
1134 | if (mvm->queue_info[i].hw_queue_refcount > 0) | ||
1135 | timeout_queues_map |= BIT(i); | ||
1136 | spin_unlock_bh(&mvm->queue_info_lock); | ||
1137 | |||
1138 | rcu_read_lock(); | ||
1139 | |||
1140 | /* | ||
1141 | * If a queue time outs - mark it as INACTIVE (don't remove right away | ||
1142 | * if we don't have to.) This is an optimization in case traffic comes | ||
1143 | * later, and we don't HAVE to use a currently-inactive queue | ||
1144 | */ | ||
1145 | for_each_set_bit(i, &timeout_queues_map, IWL_MAX_HW_QUEUES) { | ||
1146 | struct ieee80211_sta *sta; | ||
1147 | struct iwl_mvm_sta *mvmsta; | ||
1148 | u8 sta_id; | ||
1149 | int tid; | ||
1150 | unsigned long inactive_tid_bitmap = 0; | ||
1151 | unsigned long queue_tid_bitmap; | ||
1152 | |||
1153 | spin_lock_bh(&mvm->queue_info_lock); | ||
1154 | queue_tid_bitmap = mvm->queue_info[i].tid_bitmap; | ||
1155 | |||
1156 | /* If TXQ isn't in active use anyway - nothing to do here... */ | ||
1157 | if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY) { | ||
1158 | spin_unlock_bh(&mvm->queue_info_lock); | ||
1159 | continue; | ||
1160 | } | ||
1161 | |||
1162 | /* Check to see if there are inactive TIDs on this queue */ | ||
1163 | for_each_set_bit(tid, &queue_tid_bitmap, | ||
1164 | IWL_MAX_TID_COUNT + 1) { | ||
1165 | if (time_after(mvm->queue_info[i].last_frame_time[tid] + | ||
1166 | IWL_MVM_DQA_QUEUE_TIMEOUT, now)) | ||
1167 | continue; | ||
1168 | |||
1169 | inactive_tid_bitmap |= BIT(tid); | ||
1170 | } | ||
1171 | spin_unlock_bh(&mvm->queue_info_lock); | ||
1172 | |||
1173 | /* If all TIDs are active - finish check on this queue */ | ||
1174 | if (!inactive_tid_bitmap) | ||
1175 | continue; | ||
1176 | |||
1177 | /* | ||
1178 | * If we are here - the queue hadn't been served recently and is | ||
1179 | * in use | ||
1180 | */ | ||
1181 | |||
1182 | sta_id = mvm->queue_info[i].ra_sta_id; | ||
1183 | sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); | ||
1184 | |||
1185 | /* | ||
1186 | * If the STA doesn't exist anymore, it isn't an error. It could | ||
1187 | * be that it was removed since getting the queues, and in this | ||
1188 | * case it should've inactivated its queues anyway. | ||
1189 | */ | ||
1190 | if (IS_ERR_OR_NULL(sta)) | ||
1191 | continue; | ||
1192 | |||
1193 | mvmsta = iwl_mvm_sta_from_mac80211(sta); | ||
1194 | |||
1195 | spin_lock_bh(&mvmsta->lock); | ||
1196 | spin_lock(&mvm->queue_info_lock); | ||
1197 | iwl_mvm_remove_inactive_tids(mvm, mvmsta, i, | ||
1198 | inactive_tid_bitmap); | ||
1199 | spin_unlock(&mvm->queue_info_lock); | ||
1200 | spin_unlock_bh(&mvmsta->lock); | ||
1201 | } | ||
1202 | |||
1203 | rcu_read_unlock(); | ||
1204 | } | ||
1205 | |||
1042 | int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif, | 1206 | int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif, |
1043 | enum iwl_lqm_cmd_operatrions operation, | 1207 | enum iwl_lqm_cmd_operatrions operation, |
1044 | u32 duration, u32 timeout) | 1208 | u32 duration, u32 timeout) |