aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl-tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-tx.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c107
1 files changed, 94 insertions, 13 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index f0b7e6cfbe4f..8dd0c036d547 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -194,10 +194,34 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
194 struct iwl_queue *q = &txq->q; 194 struct iwl_queue *q = &txq->q;
195 struct device *dev = &priv->pci_dev->dev; 195 struct device *dev = &priv->pci_dev->dev;
196 int i; 196 int i;
197 bool huge = false;
197 198
198 if (q->n_bd == 0) 199 if (q->n_bd == 0)
199 return; 200 return;
200 201
202 for (; q->read_ptr != q->write_ptr;
203 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
204 /* we have no way to tell if it is a huge cmd ATM */
205 i = get_cmd_index(q, q->read_ptr, 0);
206
207 if (txq->meta[i].flags & CMD_SIZE_HUGE) {
208 huge = true;
209 continue;
210 }
211
212 pci_unmap_single(priv->pci_dev,
213 pci_unmap_addr(&txq->meta[i], mapping),
214 pci_unmap_len(&txq->meta[i], len),
215 PCI_DMA_BIDIRECTIONAL);
216 }
217 if (huge) {
218 i = q->n_window;
219 pci_unmap_single(priv->pci_dev,
220 pci_unmap_addr(&txq->meta[i], mapping),
221 pci_unmap_len(&txq->meta[i], len),
222 PCI_DMA_BIDIRECTIONAL);
223 }
224
201 /* De-alloc array of command/tx buffers */ 225 /* De-alloc array of command/tx buffers */
202 for (i = 0; i <= TFD_CMD_SLOTS; i++) 226 for (i = 0; i <= TFD_CMD_SLOTS; i++)
203 kfree(txq->cmd[i]); 227 kfree(txq->cmd[i]);
@@ -410,6 +434,26 @@ out_free_arrays:
410} 434}
411EXPORT_SYMBOL(iwl_tx_queue_init); 435EXPORT_SYMBOL(iwl_tx_queue_init);
412 436
437void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
438 int slots_num, u32 txq_id)
439{
440 int actual_slots = slots_num;
441
442 if (txq_id == IWL_CMD_QUEUE_NUM)
443 actual_slots++;
444
445 memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);
446
447 txq->need_update = 0;
448
449 /* Initialize queue's high/low-water marks, and head/tail indexes */
450 iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
451
452 /* Tell device where to find queue */
453 priv->cfg->ops->lib->txq_init(priv, txq);
454}
455EXPORT_SYMBOL(iwl_tx_queue_reset);
456
413/** 457/**
414 * iwl_hw_txq_ctx_free - Free TXQ Context 458 * iwl_hw_txq_ctx_free - Free TXQ Context
415 * 459 *
@@ -421,8 +465,7 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
421 465
422 /* Tx queues */ 466 /* Tx queues */
423 if (priv->txq) { 467 if (priv->txq) {
424 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; 468 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
425 txq_id++)
426 if (txq_id == IWL_CMD_QUEUE_NUM) 469 if (txq_id == IWL_CMD_QUEUE_NUM)
427 iwl_cmd_queue_free(priv); 470 iwl_cmd_queue_free(priv);
428 else 471 else
@@ -438,15 +481,15 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
438EXPORT_SYMBOL(iwl_hw_txq_ctx_free); 481EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
439 482
440/** 483/**
441 * iwl_txq_ctx_reset - Reset TX queue context 484 * iwl_txq_ctx_alloc - allocate TX queue context
442 * Destroys all DMA structures and initialize them again 485 * Allocate all Tx DMA structures and initialize them
443 * 486 *
444 * @param priv 487 * @param priv
445 * @return error code 488 * @return error code
446 */ 489 */
447int iwl_txq_ctx_reset(struct iwl_priv *priv) 490int iwl_txq_ctx_alloc(struct iwl_priv *priv)
448{ 491{
449 int ret = 0; 492 int ret;
450 int txq_id, slots_num; 493 int txq_id, slots_num;
451 unsigned long flags; 494 unsigned long flags;
452 495
@@ -504,8 +547,31 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv)
504 return ret; 547 return ret;
505} 548}
506 549
550void iwl_txq_ctx_reset(struct iwl_priv *priv)
551{
552 int txq_id, slots_num;
553 unsigned long flags;
554
555 spin_lock_irqsave(&priv->lock, flags);
556
557 /* Turn off all Tx DMA fifos */
558 priv->cfg->ops->lib->txq_set_sched(priv, 0);
559
560 /* Tell NIC where to find the "keep warm" buffer */
561 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
562
563 spin_unlock_irqrestore(&priv->lock, flags);
564
565 /* Alloc and init all Tx queues, including the command queue (#4) */
566 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
567 slots_num = txq_id == IWL_CMD_QUEUE_NUM ?
568 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
569 iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id);
570 }
571}
572
507/** 573/**
508 * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory 574 * iwl_txq_ctx_stop - Stop all Tx DMA channels
509 */ 575 */
510void iwl_txq_ctx_stop(struct iwl_priv *priv) 576void iwl_txq_ctx_stop(struct iwl_priv *priv)
511{ 577{
@@ -525,9 +591,6 @@ void iwl_txq_ctx_stop(struct iwl_priv *priv)
525 1000); 591 1000);
526 } 592 }
527 spin_unlock_irqrestore(&priv->lock, flags); 593 spin_unlock_irqrestore(&priv->lock, flags);
528
529 /* Deallocate memory for all Tx queues */
530 iwl_hw_txq_ctx_free(priv);
531} 594}
532EXPORT_SYMBOL(iwl_txq_ctx_stop); 595EXPORT_SYMBOL(iwl_txq_ctx_stop);
533 596
@@ -1050,6 +1113,14 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1050 1113
1051 spin_lock_irqsave(&priv->hcmd_lock, flags); 1114 spin_lock_irqsave(&priv->hcmd_lock, flags);
1052 1115
1116 /* If this is a huge cmd, mark the huge flag also on the meta.flags
1117 * of the _original_ cmd. This is used for DMA mapping clean up.
1118 */
1119 if (cmd->flags & CMD_SIZE_HUGE) {
1120 idx = get_cmd_index(q, q->write_ptr, 0);
1121 txq->meta[idx].flags = CMD_SIZE_HUGE;
1122 }
1123
1053 idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); 1124 idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
1054 out_cmd = txq->cmd[idx]; 1125 out_cmd = txq->cmd[idx];
1055 out_meta = &txq->meta[idx]; 1126 out_meta = &txq->meta[idx];
@@ -1227,6 +1298,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1227 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); 1298 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
1228 struct iwl_device_cmd *cmd; 1299 struct iwl_device_cmd *cmd;
1229 struct iwl_cmd_meta *meta; 1300 struct iwl_cmd_meta *meta;
1301 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
1230 1302
1231 /* If a Tx command is being handled and it isn't in the actual 1303 /* If a Tx command is being handled and it isn't in the actual
1232 * command queue then there a command routing bug has been introduced 1304 * command queue then there a command routing bug has been introduced
@@ -1240,9 +1312,17 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1240 return; 1312 return;
1241 } 1313 }
1242 1314
1243 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge); 1315 /* If this is a huge cmd, clear the huge flag on the meta.flags
1244 cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index]; 1316 * of the _original_ cmd. So that iwl_cmd_queue_free won't unmap
1245 meta = &priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_index]; 1317 * the DMA buffer for the scan (huge) command.
1318 */
1319 if (huge) {
1320 cmd_index = get_cmd_index(&txq->q, index, 0);
1321 txq->meta[cmd_index].flags = 0;
1322 }
1323 cmd_index = get_cmd_index(&txq->q, index, huge);
1324 cmd = txq->cmd[cmd_index];
1325 meta = &txq->meta[cmd_index];
1246 1326
1247 pci_unmap_single(priv->pci_dev, 1327 pci_unmap_single(priv->pci_dev,
1248 pci_unmap_addr(meta, mapping), 1328 pci_unmap_addr(meta, mapping),
@@ -1264,6 +1344,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1264 get_cmd_string(cmd->hdr.cmd)); 1344 get_cmd_string(cmd->hdr.cmd));
1265 wake_up_interruptible(&priv->wait_command_queue); 1345 wake_up_interruptible(&priv->wait_command_queue);
1266 } 1346 }
1347 meta->flags = 0;
1267} 1348}
1268EXPORT_SYMBOL(iwl_tx_cmd_complete); 1349EXPORT_SYMBOL(iwl_tx_cmd_complete);
1269 1350