aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl-tx.c
diff options
context:
space:
mode:
authorJohannes Berg <johannes.berg@intel.com>2010-02-24 04:57:19 -0500
committerReinette Chatre <reinette.chatre@intel.com>2010-03-09 19:13:11 -0500
commitedc1a3a090e83c48d4b33c23aa16c54b3b790ef5 (patch)
treecf18afd379f68c3244087d19c27b7831b806388b /drivers/net/wireless/iwlwifi/iwl-tx.c
parent4f4d4088b05155d4904e29d5c00316395ce32f27 (diff)
iwlwifi: clean up queue/fifo handling
4965 hardware has 7 queues reserved and the remaining ones used for aggregation, 5000 and higher need to have 10 reserved. This is not very clear in the code right now, unfortunately. Introduce a new IWL_TX_FIFO_UNUSED constant and make the queue/FIFO mapping arrays able to hold that value, and change the setup code to reserve all queues in the arrays (the queue number is the index) and use the new unused constant to not map those queues to any FIFO. Additionally, clear up the AC/queue mapping code to be more understandable. The mapping is the identity mapping right now, but with the mapping function I think it's easier to understand what happens there. Finally, HCCA isn't implemented at all and I think newer microcode removed it, so let's remove all mention of it in the code, some comments remain for 4965. Signed-off-by: Johannes Berg <johannes.berg@intel.com> Acked-by: Shanyu Zhao <shanyu.zhao@intel.com> Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-tx.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c105
1 files changed, 67 insertions, 38 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 08b33fcc0792..045e4a67344b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -37,26 +37,63 @@
37#include "iwl-io.h" 37#include "iwl-io.h"
38#include "iwl-helpers.h" 38#include "iwl-helpers.h"
39 39
40static const u16 default_tid_to_tx_fifo[] = { 40/*
41 IWL_TX_FIFO_AC1, 41 * mac80211 queues, ACs, hardware queues, FIFOs.
42 IWL_TX_FIFO_AC0, 42 *
43 IWL_TX_FIFO_AC0, 43 * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
44 IWL_TX_FIFO_AC1, 44 *
45 IWL_TX_FIFO_AC2, 45 * Mac80211 uses the following numbers, which we get as from it
46 IWL_TX_FIFO_AC2, 46 * by way of skb_get_queue_mapping(skb):
47 IWL_TX_FIFO_AC3, 47 *
48 IWL_TX_FIFO_AC3, 48 * VO 0
49 IWL_TX_FIFO_NONE, 49 * VI 1
50 IWL_TX_FIFO_NONE, 50 * BE 2
51 IWL_TX_FIFO_NONE, 51 * BK 3
52 IWL_TX_FIFO_NONE, 52 *
53 IWL_TX_FIFO_NONE, 53 *
54 IWL_TX_FIFO_NONE, 54 * Regular (not A-MPDU) frames are put into hardware queues corresponding
55 IWL_TX_FIFO_NONE, 55 * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
56 IWL_TX_FIFO_NONE, 56 * own queue per aggregation session (RA/TID combination), such queues are
57 IWL_TX_FIFO_AC3 57 * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
58 * order to map frames to the right queue, we also need an AC->hw queue
59 * mapping. This is implemented here.
60 *
61 * Due to the way hw queues are set up (by the hw specific modules like
62 * iwl-4965.c, iwl-5000.c etc.), the AC->hw queue mapping is the identity
63 * mapping.
64 */
65
66static const u8 tid_to_ac[] = {
67 /* this matches the mac80211 numbers */
68 2, 3, 3, 2, 1, 1, 0, 0
69};
70
71static const u8 ac_to_fifo[] = {
72 IWL_TX_FIFO_VO,
73 IWL_TX_FIFO_VI,
74 IWL_TX_FIFO_BE,
75 IWL_TX_FIFO_BK,
58}; 76};
59 77
78static inline int get_fifo_from_ac(u8 ac)
79{
80 return ac_to_fifo[ac];
81}
82
83static inline int get_queue_from_ac(u16 ac)
84{
85 return ac;
86}
87
88static inline int get_fifo_from_tid(u16 tid)
89{
90 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
91 return get_fifo_from_ac(tid_to_ac[tid]);
92
93 /* no support for TIDs 8-15 yet */
94 return -EINVAL;
95}
96
60static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv, 97static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv,
61 struct iwl_dma_ptr *ptr, size_t size) 98 struct iwl_dma_ptr *ptr, size_t size)
62{ 99{
@@ -591,13 +628,12 @@ static void iwl_tx_cmd_build_basic(struct iwl_priv *priv,
591 tx_cmd->next_frame_len = 0; 628 tx_cmd->next_frame_len = 0;
592} 629}
593 630
594#define RTS_HCCA_RETRY_LIMIT 3
595#define RTS_DFAULT_RETRY_LIMIT 60 631#define RTS_DFAULT_RETRY_LIMIT 60
596 632
597static void iwl_tx_cmd_build_rate(struct iwl_priv *priv, 633static void iwl_tx_cmd_build_rate(struct iwl_priv *priv,
598 struct iwl_tx_cmd *tx_cmd, 634 struct iwl_tx_cmd *tx_cmd,
599 struct ieee80211_tx_info *info, 635 struct ieee80211_tx_info *info,
600 __le16 fc, int is_hcca) 636 __le16 fc)
601{ 637{
602 u32 rate_flags; 638 u32 rate_flags;
603 int rate_idx; 639 int rate_idx;
@@ -613,8 +649,7 @@ static void iwl_tx_cmd_build_rate(struct iwl_priv *priv,
613 tx_cmd->data_retry_limit = data_retry_limit; 649 tx_cmd->data_retry_limit = data_retry_limit;
614 650
615 /* Set retry limit on RTS packets */ 651 /* Set retry limit on RTS packets */
616 rts_retry_limit = (is_hcca) ? RTS_HCCA_RETRY_LIMIT : 652 rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
617 RTS_DFAULT_RETRY_LIMIT;
618 if (data_retry_limit < rts_retry_limit) 653 if (data_retry_limit < rts_retry_limit)
619 rts_retry_limit = data_retry_limit; 654 rts_retry_limit = data_retry_limit;
620 tx_cmd->rts_retry_limit = rts_retry_limit; 655 tx_cmd->rts_retry_limit = rts_retry_limit;
@@ -794,7 +829,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
794 iwl_sta_modify_sleep_tx_count(priv, sta_id, 1); 829 iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
795 } 830 }
796 831
797 txq_id = skb_get_queue_mapping(skb); 832 txq_id = get_queue_from_ac(skb_get_queue_mapping(skb));
798 if (ieee80211_is_data_qos(fc)) { 833 if (ieee80211_is_data_qos(fc)) {
799 qc = ieee80211_get_qos_ctl(hdr); 834 qc = ieee80211_get_qos_ctl(hdr);
800 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; 835 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
@@ -859,8 +894,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
859 iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id); 894 iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id);
860 iwl_dbg_log_tx_data_frame(priv, len, hdr); 895 iwl_dbg_log_tx_data_frame(priv, len, hdr);
861 896
862 /* set is_hcca to 0; it probably will never be implemented */ 897 iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc);
863 iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, 0);
864 898
865 iwl_update_stats(priv, true, fc, len); 899 iwl_update_stats(priv, true, fc, len);
866 /* 900 /*
@@ -1260,7 +1294,7 @@ EXPORT_SYMBOL(iwl_tx_cmd_complete);
1260 * Find first available (lowest unused) Tx Queue, mark it "active". 1294 * Find first available (lowest unused) Tx Queue, mark it "active".
1261 * Called only when finding queue for aggregation. 1295 * Called only when finding queue for aggregation.
1262 * Should never return anything < 7, because they should already 1296 * Should never return anything < 7, because they should already
1263 * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6). 1297 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
1264 */ 1298 */
1265static int iwl_txq_ctx_activate_free(struct iwl_priv *priv) 1299static int iwl_txq_ctx_activate_free(struct iwl_priv *priv)
1266{ 1300{
@@ -1281,10 +1315,9 @@ int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
1281 unsigned long flags; 1315 unsigned long flags;
1282 struct iwl_tid_data *tid_data; 1316 struct iwl_tid_data *tid_data;
1283 1317
1284 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo))) 1318 tx_fifo = get_fifo_from_tid(tid);
1285 tx_fifo = default_tid_to_tx_fifo[tid]; 1319 if (unlikely(tx_fifo < 0))
1286 else 1320 return tx_fifo;
1287 return -EINVAL;
1288 1321
1289 IWL_WARN(priv, "%s on ra = %pM tid = %d\n", 1322 IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
1290 __func__, ra, tid); 1323 __func__, ra, tid);
@@ -1345,13 +1378,9 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
1345 return -EINVAL; 1378 return -EINVAL;
1346 } 1379 }
1347 1380
1348 if (unlikely(tid >= MAX_TID_COUNT)) 1381 tx_fifo_id = get_fifo_from_tid(tid);
1349 return -EINVAL; 1382 if (unlikely(tx_fifo_id < 0))
1350 1383 return tx_fifo_id;
1351 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
1352 tx_fifo_id = default_tid_to_tx_fifo[tid];
1353 else
1354 return -EINVAL;
1355 1384
1356 sta_id = iwl_find_station(priv, ra); 1385 sta_id = iwl_find_station(priv, ra);
1357 1386
@@ -1419,7 +1448,7 @@ int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id)
1419 if ((txq_id == tid_data->agg.txq_id) && 1448 if ((txq_id == tid_data->agg.txq_id) &&
1420 (q->read_ptr == q->write_ptr)) { 1449 (q->read_ptr == q->write_ptr)) {
1421 u16 ssn = SEQ_TO_SN(tid_data->seq_number); 1450 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
1422 int tx_fifo = default_tid_to_tx_fifo[tid]; 1451 int tx_fifo = get_fifo_from_tid(tid);
1423 IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n"); 1452 IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
1424 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, 1453 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
1425 ssn, tx_fifo); 1454 ssn, tx_fifo);