aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl-3945.c
diff options
context:
space:
mode:
authorJohannes Berg <johannes.berg@intel.com>2010-05-17 05:37:34 -0400
committerReinette Chatre <reinette.chatre@intel.com>2010-06-06 02:18:33 -0400
commitff0d91c3eea6e25b47258349b455671f98f1b0cd (patch)
treecc69b1e6603db5c36e6026518882dd1b458c2327 /drivers/net/wireless/iwlwifi/iwl-3945.c
parent519c7c416870c6e71e9553786a529d89f55ef395 (diff)
iwlwifi: reduce memory allocation
Currently, the driver allocates up to 19 skb pointers for each TFD, of which we have 256 per queue. This means that for each TX queue, we allocate 19k/38k (an order 4 or 5 allocation on 32/64 bit respectively) just for each queue's "txb" array, which contains only the SKB pointers. However, due to the way we use these pointers only the first one can ever be assigned. When the driver was initially written, the idea was that it could be passed multiple SKBs for each TFD and attach all those to implement gather DMA. However, due to constraints in the userspace API and lack of TCP/IP level checksumming in the device, this is in fact not possible. And even if it were, the SKBs would be chained, and we wouldn't need to keep pointers to each anyway. Change this to only keep track of one SKB per TFD, and thereby reduce memory consumption to just one pointer per TFD, which is an order 0 allocation per transmit queue. Signed-off-by: Johannes Berg <johannes.berg@intel.com> Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-3945.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c25
1 files changed, 13 insertions, 12 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index fec05b5c334e..658c6143f998 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -279,8 +279,8 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
279 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 279 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
280 280
281 tx_info = &txq->txb[txq->q.read_ptr]; 281 tx_info = &txq->txb[txq->q.read_ptr];
282 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]); 282 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
283 tx_info->skb[0] = NULL; 283 tx_info->skb = NULL;
284 priv->cfg->ops->lib->txq_free_tfd(priv, txq); 284 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
285 } 285 }
286 286
@@ -315,7 +315,7 @@ static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
315 return; 315 return;
316 } 316 }
317 317
318 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]); 318 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
319 ieee80211_tx_info_clear_status(info); 319 ieee80211_tx_info_clear_status(info);
320 320
321 /* Fill the MRR chain with some info about on-chip retransmissions */ 321 /* Fill the MRR chain with some info about on-chip retransmissions */
@@ -702,19 +702,20 @@ void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
702 702
703 /* unmap chunks if any */ 703 /* unmap chunks if any */
704 704
705 for (i = 1; i < counter; i++) { 705 for (i = 1; i < counter; i++)
706 pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr), 706 pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr),
707 le32_to_cpu(tfd->tbs[i].len), PCI_DMA_TODEVICE); 707 le32_to_cpu(tfd->tbs[i].len), PCI_DMA_TODEVICE);
708 if (txq->txb) {
709 struct sk_buff *skb;
710 708
711 skb = txq->txb[txq->q.read_ptr].skb[i - 1]; 709 /* free SKB */
710 if (txq->txb) {
711 struct sk_buff *skb;
712 712
713 /* can be called from irqs-disabled context */ 713 skb = txq->txb[txq->q.read_ptr].skb;
714 if (skb) { 714
715 dev_kfree_skb_any(skb); 715 /* can be called from irqs-disabled context */
716 txq->txb[txq->q.read_ptr].skb[i - 1] = NULL; 716 if (skb) {
717 } 717 dev_kfree_skb_any(skb);
718 txq->txb[txq->q.read_ptr].skb = NULL;
718 } 719 }
719 } 720 }
720} 721}