aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c35
1 files changed, 19 insertions, 16 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
index 918874067bd3..21a8a672fbb2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
@@ -37,6 +37,8 @@
37#include "iwl-agn-hw.h" 37#include "iwl-agn-hw.h"
38#include "iwl-op-mode.h" 38#include "iwl-op-mode.h"
39#include "iwl-trans-pcie-int.h" 39#include "iwl-trans-pcie-int.h"
40/* FIXME: need to abstract out TX command (once we know what it looks like) */
41#include "iwl-commands.h"
40 42
41#define IWL_TX_CRC_SIZE 4 43#define IWL_TX_CRC_SIZE 4
42#define IWL_TX_DELIMITER_SIZE 4 44#define IWL_TX_DELIMITER_SIZE 4
@@ -58,7 +60,7 @@ void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
58 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; 60 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
59 __le16 bc_ent; 61 __le16 bc_ent;
60 struct iwl_tx_cmd *tx_cmd = 62 struct iwl_tx_cmd *tx_cmd =
61 (struct iwl_tx_cmd *) txq->cmd[txq->q.write_ptr]->payload; 63 (void *) txq->entries[txq->q.write_ptr].cmd->payload;
62 64
63 scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; 65 scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
64 66
@@ -99,7 +101,7 @@ void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
99 if (txq->need_update == 0) 101 if (txq->need_update == 0)
100 return; 102 return;
101 103
102 if (cfg(trans)->base_params->shadow_reg_enable) { 104 if (trans->cfg->base_params->shadow_reg_enable) {
103 /* shadow register enabled */ 105 /* shadow register enabled */
104 iwl_write32(trans, HBUS_TARG_WRPTR, 106 iwl_write32(trans, HBUS_TARG_WRPTR,
105 txq->q.write_ptr | (txq_id << 8)); 107 txq->q.write_ptr | (txq_id << 8));
@@ -221,13 +223,14 @@ void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
221 223
222 lockdep_assert_held(&txq->lock); 224 lockdep_assert_held(&txq->lock);
223 225
224 iwlagn_unmap_tfd(trans, &txq->meta[index], &tfd_tmp[index], dma_dir); 226 iwlagn_unmap_tfd(trans, &txq->entries[index].meta,
227 &tfd_tmp[index], dma_dir);
225 228
226 /* free SKB */ 229 /* free SKB */
227 if (txq->skbs) { 230 if (txq->entries) {
228 struct sk_buff *skb; 231 struct sk_buff *skb;
229 232
230 skb = txq->skbs[index]; 233 skb = txq->entries[index].skb;
231 234
232 /* Can be called from irqs-disabled context 235 /* Can be called from irqs-disabled context
233 * If skb is not NULL, it means that the whole queue is being 236 * If skb is not NULL, it means that the whole queue is being
@@ -235,7 +238,7 @@ void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
235 */ 238 */
236 if (skb) { 239 if (skb) {
237 iwl_op_mode_free_skb(trans->op_mode, skb); 240 iwl_op_mode_free_skb(trans->op_mode, skb);
238 txq->skbs[index] = NULL; 241 txq->entries[index].skb = NULL;
239 } 242 }
240 } 243 }
241} 244}
@@ -358,7 +361,7 @@ static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
358 u8 sta_id = 0; 361 u8 sta_id = 0;
359 __le16 bc_ent; 362 __le16 bc_ent;
360 struct iwl_tx_cmd *tx_cmd = 363 struct iwl_tx_cmd *tx_cmd =
361 (struct iwl_tx_cmd *) txq->cmd[txq->q.read_ptr]->payload; 364 (void *)txq->entries[txq->q.read_ptr].cmd->payload;
362 365
363 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); 366 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
364 367
@@ -578,8 +581,8 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
578 } 581 }
579 582
580 idx = get_cmd_index(q, q->write_ptr); 583 idx = get_cmd_index(q, q->write_ptr);
581 out_cmd = txq->cmd[idx]; 584 out_cmd = txq->entries[idx].cmd;
582 out_meta = &txq->meta[idx]; 585 out_meta = &txq->entries[idx].meta;
583 586
584 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ 587 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
585 if (cmd->flags & CMD_WANT_SKB) 588 if (cmd->flags & CMD_WANT_SKB)
@@ -772,8 +775,8 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
772 spin_lock(&txq->lock); 775 spin_lock(&txq->lock);
773 776
774 cmd_index = get_cmd_index(&txq->q, index); 777 cmd_index = get_cmd_index(&txq->q, index);
775 cmd = txq->cmd[cmd_index]; 778 cmd = txq->entries[cmd_index].cmd;
776 meta = &txq->meta[cmd_index]; 779 meta = &txq->entries[cmd_index].meta;
777 780
778 iwlagn_unmap_tfd(trans, meta, &txq->tfds[index], 781 iwlagn_unmap_tfd(trans, meta, &txq->tfds[index],
779 DMA_BIDIRECTIONAL); 782 DMA_BIDIRECTIONAL);
@@ -905,8 +908,8 @@ cancel:
905 * in later, it will possibly set an invalid 908 * in later, it will possibly set an invalid
906 * address (cmd->meta.source). 909 * address (cmd->meta.source).
907 */ 910 */
908 trans_pcie->txq[trans_pcie->cmd_queue].meta[cmd_idx].flags &= 911 trans_pcie->txq[trans_pcie->cmd_queue].
909 ~CMD_WANT_SKB; 912 entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
910 } 913 }
911 914
912 if (cmd->resp_pkt) { 915 if (cmd->resp_pkt) {
@@ -961,12 +964,12 @@ int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
961 q->read_ptr != index; 964 q->read_ptr != index;
962 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 965 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
963 966
964 if (WARN_ON_ONCE(txq->skbs[txq->q.read_ptr] == NULL)) 967 if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL))
965 continue; 968 continue;
966 969
967 __skb_queue_tail(skbs, txq->skbs[txq->q.read_ptr]); 970 __skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb);
968 971
969 txq->skbs[txq->q.read_ptr] = NULL; 972 txq->entries[txq->q.read_ptr].skb = NULL;
970 973
971 iwlagn_txq_inval_byte_cnt_tbl(trans, txq); 974 iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
972 975