aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorJohannes Berg <johannes@sipsolutions.net>2009-06-19 16:52:43 -0400
committerJohn W. Linville <linville@tuxdriver.com>2009-07-10 15:01:56 -0400
commit45af81956e990440fe78d6d41f847664cb620609 (patch)
tree1b09ab994e02a4c7dd004ddb502a2d9a5e545df3 /drivers/net
parent90e8e424d9c071f2db22100de81af6c8f7df34ee (diff)
iwlwifi: make software queue assignment more efficient
There really is no reason to be assigning txq->swq_id all the time, once at aggregation setup is sufficient. Signed-off-by: Johannes Berg <johannes@sipsolutions.net> Signed-off-by: Reinette Chatre <reinette.chatre@intel.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c14
1 files changed, 8 insertions, 6 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 85ae7a62109c..b35c1fd2cb29 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -348,6 +348,10 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
348 348
349 txq->need_update = 0; 349 txq->need_update = 0;
350 350
351 /* aggregation TX queues will get their ID when aggregation begins */
352 if (txq_id <= IWL_TX_FIFO_AC3)
353 txq->swq_id = txq_id;
354
351 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise 355 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
352 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ 356 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
353 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); 357 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
@@ -734,8 +738,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
734 738
735 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); 739 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
736 740
737 swq_id = skb_get_queue_mapping(skb); 741 txq_id = skb_get_queue_mapping(skb);
738 txq_id = swq_id;
739 if (ieee80211_is_data_qos(fc)) { 742 if (ieee80211_is_data_qos(fc)) {
740 qc = ieee80211_get_qos_ctl(hdr); 743 qc = ieee80211_get_qos_ctl(hdr);
741 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; 744 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
@@ -746,16 +749,14 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
746 hdr->seq_ctrl |= cpu_to_le16(seq_number); 749 hdr->seq_ctrl |= cpu_to_le16(seq_number);
747 seq_number += 0x10; 750 seq_number += 0x10;
748 /* aggregation is on for this <sta,tid> */ 751 /* aggregation is on for this <sta,tid> */
749 if (info->flags & IEEE80211_TX_CTL_AMPDU) { 752 if (info->flags & IEEE80211_TX_CTL_AMPDU)
750 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; 753 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
751 swq_id = iwl_virtual_agg_queue_num(swq_id, txq_id);
752 }
753 priv->stations[sta_id].tid[tid].tfds_in_queue++; 754 priv->stations[sta_id].tid[tid].tfds_in_queue++;
754 } 755 }
755 756
756 txq = &priv->txq[txq_id]; 757 txq = &priv->txq[txq_id];
758 swq_id = txq->swq_id;
757 q = &txq->q; 759 q = &txq->q;
758 txq->swq_id = swq_id;
759 760
760 spin_lock_irqsave(&priv->lock, flags); 761 spin_lock_irqsave(&priv->lock, flags);
761 762
@@ -1186,6 +1187,7 @@ int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
1186 tid_data = &priv->stations[sta_id].tid[tid]; 1187 tid_data = &priv->stations[sta_id].tid[tid];
1187 *ssn = SEQ_TO_SN(tid_data->seq_number); 1188 *ssn = SEQ_TO_SN(tid_data->seq_number);
1188 tid_data->agg.txq_id = txq_id; 1189 tid_data->agg.txq_id = txq_id;
1190 priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(tx_fifo, txq_id);
1189 spin_unlock_irqrestore(&priv->sta_lock, flags); 1191 spin_unlock_irqrestore(&priv->sta_lock, flags);
1190 1192
1191 ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo, 1193 ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,