aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl-tx.c
diff options
context:
space:
mode:
authorJohannes Berg <johannes.berg@intel.com>2011-05-04 10:50:44 -0400
committerWey-Yi Guy <wey-yi.w.guy@intel.com>2011-05-13 13:32:04 -0400
commit214d14d4d323aab5d455b409e279f9e1e6631123 (patch)
tree7bbdbedb41d551ed803262e3aad1ab36e0e607ba /drivers/net/wireless/iwlwifi/iwl-tx.c
parent3fa507386dc4cdf731344cb9361e9cca373cedb9 (diff)
iwlagn: clean up TXQ indirection
All of these functions no longer need to be accessed indirectly since they're shared in all AGN devices. Signed-off-by: Johannes Berg <johannes.berg@intel.com> Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-tx.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c160
1 files changed, 154 insertions, 6 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 7843195efb05..302284bef961 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -32,6 +32,7 @@
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <net/mac80211.h> 33#include <net/mac80211.h>
34#include "iwl-eeprom.h" 34#include "iwl-eeprom.h"
35#include "iwl-agn.h"
35#include "iwl-dev.h" 36#include "iwl-dev.h"
36#include "iwl-core.h" 37#include "iwl-core.h"
37#include "iwl-sta.h" 38#include "iwl-sta.h"
@@ -85,6 +86,154 @@ void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
85 txq->need_update = 0; 86 txq->need_update = 0;
86} 87}
87 88
89static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
90{
91 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
92
93 dma_addr_t addr = get_unaligned_le32(&tb->lo);
94 if (sizeof(dma_addr_t) > sizeof(u32))
95 addr |=
96 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
97
98 return addr;
99}
100
101static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
102{
103 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
104
105 return le16_to_cpu(tb->hi_n_len) >> 4;
106}
107
108static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
109 dma_addr_t addr, u16 len)
110{
111 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
112 u16 hi_n_len = len << 4;
113
114 put_unaligned_le32(addr, &tb->lo);
115 if (sizeof(dma_addr_t) > sizeof(u32))
116 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
117
118 tb->hi_n_len = cpu_to_le16(hi_n_len);
119
120 tfd->num_tbs = idx + 1;
121}
122
123static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
124{
125 return tfd->num_tbs & 0x1f;
126}
127
128/**
129 * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
130 * @priv - driver private data
131 * @txq - tx queue
132 *
133 * Does NOT advance any TFD circular buffer read/write indexes
134 * Does NOT free the TFD itself (which is within circular buffer)
135 */
136void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
137{
138 struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds;
139 struct iwl_tfd *tfd;
140 struct pci_dev *dev = priv->pci_dev;
141 int index = txq->q.read_ptr;
142 int i;
143 int num_tbs;
144
145 tfd = &tfd_tmp[index];
146
147 /* Sanity check on number of chunks */
148 num_tbs = iwl_tfd_get_num_tbs(tfd);
149
150 if (num_tbs >= IWL_NUM_OF_TBS) {
151 IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
152 /* @todo issue fatal error, it is quite serious situation */
153 return;
154 }
155
156 /* Unmap tx_cmd */
157 if (num_tbs)
158 pci_unmap_single(dev,
159 dma_unmap_addr(&txq->meta[index], mapping),
160 dma_unmap_len(&txq->meta[index], len),
161 PCI_DMA_BIDIRECTIONAL);
162
163 /* Unmap chunks, if any. */
164 for (i = 1; i < num_tbs; i++)
165 pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
166 iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE);
167
168 /* free SKB */
169 if (txq->txb) {
170 struct sk_buff *skb;
171
172 skb = txq->txb[txq->q.read_ptr].skb;
173
174 /* can be called from irqs-disabled context */
175 if (skb) {
176 dev_kfree_skb_any(skb);
177 txq->txb[txq->q.read_ptr].skb = NULL;
178 }
179 }
180}
181
182int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv,
183 struct iwl_tx_queue *txq,
184 dma_addr_t addr, u16 len,
185 u8 reset, u8 pad)
186{
187 struct iwl_queue *q;
188 struct iwl_tfd *tfd, *tfd_tmp;
189 u32 num_tbs;
190
191 q = &txq->q;
192 tfd_tmp = (struct iwl_tfd *)txq->tfds;
193 tfd = &tfd_tmp[q->write_ptr];
194
195 if (reset)
196 memset(tfd, 0, sizeof(*tfd));
197
198 num_tbs = iwl_tfd_get_num_tbs(tfd);
199
200 /* Each TFD can point to a maximum 20 Tx buffers */
201 if (num_tbs >= IWL_NUM_OF_TBS) {
202 IWL_ERR(priv, "Error can not send more than %d chunks\n",
203 IWL_NUM_OF_TBS);
204 return -EINVAL;
205 }
206
207 if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
208 return -EINVAL;
209
210 if (unlikely(addr & ~IWL_TX_DMA_MASK))
211 IWL_ERR(priv, "Unaligned address = %llx\n",
212 (unsigned long long)addr);
213
214 iwl_tfd_set_tb(tfd, num_tbs, addr, len);
215
216 return 0;
217}
218
219/*
220 * Tell nic where to find circular buffer of Tx Frame Descriptors for
221 * given Tx queue, and enable the DMA channel used for that queue.
222 *
223 * supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
224 * channels supported in hardware.
225 */
226static int iwlagn_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
227{
228 int txq_id = txq->q.id;
229
230 /* Circular buffer (TFD queue in DRAM) physical base address */
231 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
232 txq->q.dma_addr >> 8);
233
234 return 0;
235}
236
88/** 237/**
89 * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's 238 * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
90 */ 239 */
@@ -97,7 +246,7 @@ void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
97 return; 246 return;
98 247
99 while (q->write_ptr != q->read_ptr) { 248 while (q->write_ptr != q->read_ptr) {
100 priv->cfg->ops->lib->txq_free_tfd(priv, txq); 249 iwlagn_txq_free_tfd(priv, txq);
101 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); 250 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
102 } 251 }
103} 252}
@@ -391,7 +540,7 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
391 return ret; 540 return ret;
392 541
393 /* Tell device where to find queue */ 542 /* Tell device where to find queue */
394 priv->cfg->ops->lib->txq_init(priv, txq); 543 iwlagn_tx_queue_init(priv, txq);
395 544
396 return 0; 545 return 0;
397err: 546err:
@@ -420,7 +569,7 @@ void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
420 iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); 569 iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
421 570
422 /* Tell device where to find queue */ 571 /* Tell device where to find queue */
423 priv->cfg->ops->lib->txq_init(priv, txq); 572 iwlagn_tx_queue_init(priv, txq);
424} 573}
425 574
426/*************** HOST COMMAND QUEUE FUNCTIONS *****/ 575/*************** HOST COMMAND QUEUE FUNCTIONS *****/
@@ -553,9 +702,8 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
553 702
554 trace_iwlwifi_dev_hcmd(priv, &out_cmd->hdr, fix_size, cmd->flags); 703 trace_iwlwifi_dev_hcmd(priv, &out_cmd->hdr, fix_size, cmd->flags);
555 704
556 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, 705 iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, fix_size, 1,
557 phys_addr, fix_size, 1, 706 U32_PAD(cmd->len[0]));
558 U32_PAD(cmd->len[0]));
559 707
560 /* Increment and update queue's write index */ 708 /* Increment and update queue's write index */
561 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 709 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);