aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGregory Greenman <gregory.greenman@intel.com>2008-08-04 04:00:40 -0400
committerJohn W. Linville <linville@tuxdriver.com>2008-08-04 15:09:12 -0400
commitda99c4b6c25964b90c79f19beccda208df1a865a (patch)
treeb16752072e71c6491e18c3ef50e064d858a66452
parent4c43e0d0ecd5196ed5c67f64ed2f1860770eed34 (diff)
iwlwifi: memory allocation optimization
This patch optimizes memory allocation. The cmd member of iwl_tx_queue was allocated previously as a continuous block of memory. This patch allocates separate memory chunks for each command and maps/unmaps these chunks in the run time. Signed-off-by: Gregory Greenman <gregory.greenman@intel.com> Signed-off-by: Tomas Winkler <tomas.winkler@intel.com> Signed-off-by: Zhu Yi <yi.zhu@intel.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c76
4 files changed, 59 insertions, 28 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 1d793c093f1a..56dbc8144a34 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -939,8 +939,8 @@ static void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
939 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; 939 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
940 940
941 if (txq_id != IWL_CMD_QUEUE_NUM) { 941 if (txq_id != IWL_CMD_QUEUE_NUM) {
942 sta = txq->cmd[txq->q.write_ptr].cmd.tx.sta_id; 942 sta = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
943 sec_ctl = txq->cmd[txq->q.write_ptr].cmd.tx.sec_ctl; 943 sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
944 944
945 switch (sec_ctl & TX_CMD_SEC_MSK) { 945 switch (sec_ctl & TX_CMD_SEC_MSK) {
946 case TX_CMD_SEC_CCM: 946 case TX_CMD_SEC_CCM:
@@ -979,7 +979,7 @@ static void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
979 u8 sta = 0; 979 u8 sta = 0;
980 980
981 if (txq_id != IWL_CMD_QUEUE_NUM) 981 if (txq_id != IWL_CMD_QUEUE_NUM)
982 sta = txq->cmd[txq->q.read_ptr].cmd.tx.sta_id; 982 sta = txq->cmd[txq->q.read_ptr]->cmd.tx.sta_id;
983 983
984 shared_data->queues_byte_cnt_tbls[txq_id].tfd_offset[txq->q.read_ptr]. 984 shared_data->queues_byte_cnt_tbls[txq_id].tfd_offset[txq->q.read_ptr].
985 val = cpu_to_le16(1 | (sta << 12)); 985 val = cpu_to_le16(1 | (sta << 12));
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 848786ab7916..c19db438306c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -135,8 +135,7 @@ struct iwl_tx_info {
135struct iwl_tx_queue { 135struct iwl_tx_queue {
136 struct iwl_queue q; 136 struct iwl_queue q;
137 struct iwl_tfd_frame *bd; 137 struct iwl_tfd_frame *bd;
138 struct iwl_cmd *cmd; 138 struct iwl_cmd *cmd[TFD_TX_CMD_SLOTS];
139 dma_addr_t dma_addr_cmd;
140 struct iwl_tx_info *txb; 139 struct iwl_tx_info *txb;
141 int need_update; 140 int need_update;
142 int sched_retry; 141 int sched_retry;
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index 8fa991b7202a..6512834bb916 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -228,7 +228,7 @@ cancel:
228 * TX cmd queue. Otherwise in case the cmd comes 228 * TX cmd queue. Otherwise in case the cmd comes
229 * in later, it will possibly set an invalid 229 * in later, it will possibly set an invalid
230 * address (cmd->meta.source). */ 230 * address (cmd->meta.source). */
231 qcmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_idx]; 231 qcmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_idx];
232 qcmd->meta.flags &= ~CMD_WANT_SKB; 232 qcmd->meta.flags &= ~CMD_WANT_SKB;
233 } 233 }
234fail: 234fail:
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 39f19ebee973..aa98c76d8195 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -208,11 +208,12 @@ EXPORT_SYMBOL(iwl_txq_update_write_ptr);
208 * Free all buffers. 208 * Free all buffers.
209 * 0-fill, but do not free "txq" descriptor structure. 209 * 0-fill, but do not free "txq" descriptor structure.
210 */ 210 */
211static void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq) 211static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
212{ 212{
213 struct iwl_tx_queue *txq = &priv->txq[txq_id];
213 struct iwl_queue *q = &txq->q; 214 struct iwl_queue *q = &txq->q;
214 struct pci_dev *dev = priv->pci_dev; 215 struct pci_dev *dev = priv->pci_dev;
215 int len; 216 int i, slots_num, len;
216 217
217 if (q->n_bd == 0) 218 if (q->n_bd == 0)
218 return; 219 return;
@@ -227,7 +228,12 @@ static void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq)
227 len += IWL_MAX_SCAN_SIZE; 228 len += IWL_MAX_SCAN_SIZE;
228 229
229 /* De-alloc array of command/tx buffers */ 230 /* De-alloc array of command/tx buffers */
230 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd); 231 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
232 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
233 for (i = 0; i < slots_num; i++)
234 kfree(txq->cmd[i]);
235 if (txq_id == IWL_CMD_QUEUE_NUM)
236 kfree(txq->cmd[slots_num]);
231 237
232 /* De-alloc circular buffer of TFDs */ 238 /* De-alloc circular buffer of TFDs */
233 if (txq->q.n_bd) 239 if (txq->q.n_bd)
@@ -400,8 +406,7 @@ static int iwl_tx_queue_init(struct iwl_priv *priv,
400 struct iwl_tx_queue *txq, 406 struct iwl_tx_queue *txq,
401 int slots_num, u32 txq_id) 407 int slots_num, u32 txq_id)
402{ 408{
403 struct pci_dev *dev = priv->pci_dev; 409 int i, len;
404 int len;
405 int rc = 0; 410 int rc = 0;
406 411
407 /* 412 /*
@@ -412,17 +417,25 @@ static int iwl_tx_queue_init(struct iwl_priv *priv,
412 * For normal Tx queues (all other queues), no super-size command 417 * For normal Tx queues (all other queues), no super-size command
413 * space is needed. 418 * space is needed.
414 */ 419 */
415 len = sizeof(struct iwl_cmd) * slots_num; 420 len = sizeof(struct iwl_cmd);
416 if (txq_id == IWL_CMD_QUEUE_NUM) 421 for (i = 0; i <= slots_num; i++) {
417 len += IWL_MAX_SCAN_SIZE; 422 if (i == slots_num) {
418 txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd); 423 if (txq_id == IWL_CMD_QUEUE_NUM)
419 if (!txq->cmd) 424 len += IWL_MAX_SCAN_SIZE;
420 return -ENOMEM; 425 else
426 continue;
427 }
428
429 txq->cmd[i] = kmalloc(len, GFP_KERNEL | GFP_DMA);
430 if (!txq->cmd[i])
431 return -ENOMEM;
432 }
421 433
422 /* Alloc driver data array and TFD circular buffer */ 434 /* Alloc driver data array and TFD circular buffer */
423 rc = iwl_tx_queue_alloc(priv, txq, txq_id); 435 rc = iwl_tx_queue_alloc(priv, txq, txq_id);
424 if (rc) { 436 if (rc) {
425 pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd); 437 for (i = 0; i < slots_num; i++)
438 kfree(txq->cmd[i]);
426 439
427 return -ENOMEM; 440 return -ENOMEM;
428 } 441 }
@@ -451,7 +464,7 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
451 464
452 /* Tx queues */ 465 /* Tx queues */
453 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) 466 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
454 iwl_tx_queue_free(priv, &priv->txq[txq_id]); 467 iwl_tx_queue_free(priv, txq_id);
455 468
456 /* Keep-warm buffer */ 469 /* Keep-warm buffer */
457 iwl_kw_free(priv); 470 iwl_kw_free(priv);
@@ -859,7 +872,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
859 txq->txb[q->write_ptr].skb[0] = skb; 872 txq->txb[q->write_ptr].skb[0] = skb;
860 873
861 /* Set up first empty entry in queue's array of Tx/cmd buffers */ 874 /* Set up first empty entry in queue's array of Tx/cmd buffers */
862 out_cmd = &txq->cmd[idx]; 875 out_cmd = txq->cmd[idx];
863 tx_cmd = &out_cmd->cmd.tx; 876 tx_cmd = &out_cmd->cmd.tx;
864 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); 877 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
865 memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd)); 878 memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
@@ -899,8 +912,9 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
899 912
900 /* Physical address of this Tx command's header (not MAC header!), 913 /* Physical address of this Tx command's header (not MAC header!),
901 * within command buffer array. */ 914 * within command buffer array. */
902 txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl_cmd) * idx + 915 txcmd_phys = pci_map_single(priv->pci_dev, out_cmd,
903 offsetof(struct iwl_cmd, hdr); 916 sizeof(struct iwl_cmd), PCI_DMA_TODEVICE);
917 txcmd_phys += offsetof(struct iwl_cmd, hdr);
904 918
905 /* Add buffer containing Tx command and MAC(!) header to TFD's 919 /* Add buffer containing Tx command and MAC(!) header to TFD's
906 * first entry */ 920 * first entry */
@@ -1004,7 +1018,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1004 u32 idx; 1018 u32 idx;
1005 u16 fix_size; 1019 u16 fix_size;
1006 dma_addr_t phys_addr; 1020 dma_addr_t phys_addr;
1007 int ret; 1021 int len, ret;
1008 unsigned long flags; 1022 unsigned long flags;
1009 1023
1010 cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len); 1024 cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
@@ -1034,7 +1048,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1034 control_flags = (u32 *) tfd; 1048 control_flags = (u32 *) tfd;
1035 1049
1036 idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE); 1050 idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
1037 out_cmd = &txq->cmd[idx]; 1051 out_cmd = txq->cmd[idx];
1038 1052
1039 out_cmd->hdr.cmd = cmd->id; 1053 out_cmd->hdr.cmd = cmd->id;
1040 memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta)); 1054 memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta));
@@ -1048,9 +1062,11 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1048 INDEX_TO_SEQ(q->write_ptr)); 1062 INDEX_TO_SEQ(q->write_ptr));
1049 if (out_cmd->meta.flags & CMD_SIZE_HUGE) 1063 if (out_cmd->meta.flags & CMD_SIZE_HUGE)
1050 out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME); 1064 out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME);
1051 1065 len = (idx == TFD_CMD_SLOTS) ?
1052 phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx + 1066 IWL_MAX_SCAN_SIZE : sizeof(struct iwl_cmd);
1053 offsetof(struct iwl_cmd, hdr); 1067 phys_addr = pci_map_single(priv->pci_dev, out_cmd, len,
1068 PCI_DMA_TODEVICE);
1069 phys_addr += offsetof(struct iwl_cmd, hdr);
1054 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size); 1070 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
1055 1071
1056 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, " 1072 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
@@ -1115,6 +1131,9 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1115{ 1131{
1116 struct iwl_tx_queue *txq = &priv->txq[txq_id]; 1132 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1117 struct iwl_queue *q = &txq->q; 1133 struct iwl_queue *q = &txq->q;
1134 struct iwl_tfd_frame *bd = &txq->bd[index];
1135 dma_addr_t dma_addr;
1136 int is_odd, buf_len;
1118 int nfreed = 0; 1137 int nfreed = 0;
1119 1138
1120 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { 1139 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
@@ -1132,6 +1151,19 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1132 q->write_ptr, q->read_ptr); 1151 q->write_ptr, q->read_ptr);
1133 queue_work(priv->workqueue, &priv->restart); 1152 queue_work(priv->workqueue, &priv->restart);
1134 } 1153 }
1154 is_odd = (index/2) & 0x1;
1155 if (is_odd) {
1156 dma_addr = IWL_GET_BITS(bd->pa[index], tb2_addr_lo16) |
1157 (IWL_GET_BITS(bd->pa[index],
1158 tb2_addr_hi20) << 16);
1159 buf_len = IWL_GET_BITS(bd->pa[index], tb2_len);
1160 } else {
1161 dma_addr = le32_to_cpu(bd->pa[index].tb1_addr);
1162 buf_len = IWL_GET_BITS(bd->pa[index], tb1_len);
1163 }
1164
1165 pci_unmap_single(priv->pci_dev, dma_addr, buf_len,
1166 PCI_DMA_TODEVICE);
1135 nfreed++; 1167 nfreed++;
1136 } 1168 }
1137} 1169}
@@ -1163,7 +1195,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1163 BUG_ON(txq_id != IWL_CMD_QUEUE_NUM); 1195 BUG_ON(txq_id != IWL_CMD_QUEUE_NUM);
1164 1196
1165 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge); 1197 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
1166 cmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index]; 1198 cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
1167 1199
1168 /* Input error checking is done when commands are added to queue. */ 1200 /* Input error checking is done when commands are added to queue. */
1169 if (cmd->meta.flags & CMD_WANT_SKB) { 1201 if (cmd->meta.flags & CMD_WANT_SKB) {