aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorJohannes Berg <johannes.berg@intel.com>2013-02-26 05:28:19 -0500
committerJohannes Berg <johannes.berg@intel.com>2013-02-28 05:48:51 -0500
commit98891754ea9453de4db9111c91b20122ca330101 (patch)
treedbd53937c5979fab1d52523cfd2fbe52687b8193 /drivers
parente477598351a40769f5b46ccea78479a1aad6f161 (diff)
iwlwifi: don't map complete commands bidirectionally
The reason we mapped them bidirectionally was that not doing so had caused IOMMU exceptions, due to the fact that the HW writes back into the command. Now that the first part of the command including the write-back part is always in the first buffer, we don't need to map the remaining buffer(s) bidi and can get rid of the special-casing for commands. This is a requisite patch for another one to fix DMA mapping. Reviewed-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com> Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c33
1 files changed, 11 insertions, 22 deletions
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 8b625a7f5685..975492f0b8c8 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -367,8 +367,8 @@ static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd)
367} 367}
368 368
369static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, 369static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
370 struct iwl_cmd_meta *meta, struct iwl_tfd *tfd, 370 struct iwl_cmd_meta *meta,
371 enum dma_data_direction dma_dir) 371 struct iwl_tfd *tfd)
372{ 372{
373 int i; 373 int i;
374 int num_tbs; 374 int num_tbs;
@@ -392,7 +392,8 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
392 /* Unmap chunks, if any. */ 392 /* Unmap chunks, if any. */
393 for (i = 1; i < num_tbs; i++) 393 for (i = 1; i < num_tbs; i++)
394 dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i), 394 dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i),
395 iwl_pcie_tfd_tb_get_len(tfd, i), dma_dir); 395 iwl_pcie_tfd_tb_get_len(tfd, i),
396 DMA_TO_DEVICE);
396 397
397 tfd->num_tbs = 0; 398 tfd->num_tbs = 0;
398} 399}
@@ -406,8 +407,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
406 * Does NOT advance any TFD circular buffer read/write indexes 407 * Does NOT advance any TFD circular buffer read/write indexes
407 * Does NOT free the TFD itself (which is within circular buffer) 408 * Does NOT free the TFD itself (which is within circular buffer)
408 */ 409 */
409static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq, 410static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
410 enum dma_data_direction dma_dir)
411{ 411{
412 struct iwl_tfd *tfd_tmp = txq->tfds; 412 struct iwl_tfd *tfd_tmp = txq->tfds;
413 413
@@ -418,8 +418,7 @@ static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
418 lockdep_assert_held(&txq->lock); 418 lockdep_assert_held(&txq->lock);
419 419
420 /* We have only q->n_window txq->entries, but we use q->n_bd tfds */ 420 /* We have only q->n_window txq->entries, but we use q->n_bd tfds */
421 iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr], 421 iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]);
422 dma_dir);
423 422
424 /* free SKB */ 423 /* free SKB */
425 if (txq->entries) { 424 if (txq->entries) {
@@ -565,22 +564,13 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
565 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 564 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
566 struct iwl_txq *txq = &trans_pcie->txq[txq_id]; 565 struct iwl_txq *txq = &trans_pcie->txq[txq_id];
567 struct iwl_queue *q = &txq->q; 566 struct iwl_queue *q = &txq->q;
568 enum dma_data_direction dma_dir;
569 567
570 if (!q->n_bd) 568 if (!q->n_bd)
571 return; 569 return;
572 570
573 /* In the command queue, all the TBs are mapped as BIDI
574 * so unmap them as such.
575 */
576 if (txq_id == trans_pcie->cmd_queue)
577 dma_dir = DMA_BIDIRECTIONAL;
578 else
579 dma_dir = DMA_TO_DEVICE;
580
581 spin_lock_bh(&txq->lock); 571 spin_lock_bh(&txq->lock);
582 while (q->write_ptr != q->read_ptr) { 572 while (q->write_ptr != q->read_ptr) {
583 iwl_pcie_txq_free_tfd(trans, txq, dma_dir); 573 iwl_pcie_txq_free_tfd(trans, txq);
584 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); 574 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
585 } 575 }
586 spin_unlock_bh(&txq->lock); 576 spin_unlock_bh(&txq->lock);
@@ -962,7 +952,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
962 952
963 iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq); 953 iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
964 954
965 iwl_pcie_txq_free_tfd(trans, txq, DMA_TO_DEVICE); 955 iwl_pcie_txq_free_tfd(trans, txq);
966 } 956 }
967 957
968 iwl_pcie_txq_progress(trans_pcie, txq); 958 iwl_pcie_txq_progress(trans_pcie, txq);
@@ -1340,11 +1330,10 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1340 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) 1330 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
1341 data = dup_buf; 1331 data = dup_buf;
1342 phys_addr = dma_map_single(trans->dev, (void *)data, 1332 phys_addr = dma_map_single(trans->dev, (void *)data,
1343 cmdlen[i], DMA_BIDIRECTIONAL); 1333 cmdlen[i], DMA_TO_DEVICE);
1344 if (dma_mapping_error(trans->dev, phys_addr)) { 1334 if (dma_mapping_error(trans->dev, phys_addr)) {
1345 iwl_pcie_tfd_unmap(trans, out_meta, 1335 iwl_pcie_tfd_unmap(trans, out_meta,
1346 &txq->tfds[q->write_ptr], 1336 &txq->tfds[q->write_ptr]);
1347 DMA_BIDIRECTIONAL);
1348 idx = -ENOMEM; 1337 idx = -ENOMEM;
1349 goto out; 1338 goto out;
1350 } 1339 }
@@ -1418,7 +1407,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
1418 cmd = txq->entries[cmd_index].cmd; 1407 cmd = txq->entries[cmd_index].cmd;
1419 meta = &txq->entries[cmd_index].meta; 1408 meta = &txq->entries[cmd_index].meta;
1420 1409
1421 iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL); 1410 iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]);
1422 1411
1423 /* Input error checking is done when commands are added to queue. */ 1412 /* Input error checking is done when commands are added to queue. */
1424 if (meta->flags & CMD_WANT_SKB) { 1413 if (meta->flags & CMD_WANT_SKB) {