aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl-agn.c
diff options
context:
space:
mode:
authorJohannes Berg <johannes.berg@intel.com>2011-05-04 10:50:44 -0400
committerWey-Yi Guy <wey-yi.w.guy@intel.com>2011-05-13 13:32:04 -0400
commit214d14d4d323aab5d455b409e279f9e1e6631123 (patch)
tree7bbdbedb41d551ed803262e3aad1ab36e0e607ba /drivers/net/wireless/iwlwifi/iwl-agn.c
parent3fa507386dc4cdf731344cb9361e9cca373cedb9 (diff)
iwlagn: clean up TXQ indirection
All of these functions no longer need to be accessed indirectly since they're shared in all AGN devices. Signed-off-by: Johannes Berg <johannes.berg@intel.com> Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-agn.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c149
1 files changed, 0 insertions, 149 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 2713081ed996..2bb08d7e0674 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -200,155 +200,6 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
200 return err; 200 return err;
201} 201}
202 202
203static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
204{
205 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
206
207 dma_addr_t addr = get_unaligned_le32(&tb->lo);
208 if (sizeof(dma_addr_t) > sizeof(u32))
209 addr |=
210 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
211
212 return addr;
213}
214
215static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
216{
217 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
218
219 return le16_to_cpu(tb->hi_n_len) >> 4;
220}
221
222static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
223 dma_addr_t addr, u16 len)
224{
225 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
226 u16 hi_n_len = len << 4;
227
228 put_unaligned_le32(addr, &tb->lo);
229 if (sizeof(dma_addr_t) > sizeof(u32))
230 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
231
232 tb->hi_n_len = cpu_to_le16(hi_n_len);
233
234 tfd->num_tbs = idx + 1;
235}
236
237static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
238{
239 return tfd->num_tbs & 0x1f;
240}
241
242/**
243 * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
244 * @priv - driver private data
245 * @txq - tx queue
246 *
247 * Does NOT advance any TFD circular buffer read/write indexes
248 * Does NOT free the TFD itself (which is within circular buffer)
249 */
250void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
251{
252 struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds;
253 struct iwl_tfd *tfd;
254 struct pci_dev *dev = priv->pci_dev;
255 int index = txq->q.read_ptr;
256 int i;
257 int num_tbs;
258
259 tfd = &tfd_tmp[index];
260
261 /* Sanity check on number of chunks */
262 num_tbs = iwl_tfd_get_num_tbs(tfd);
263
264 if (num_tbs >= IWL_NUM_OF_TBS) {
265 IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
266 /* @todo issue fatal error, it is quite serious situation */
267 return;
268 }
269
270 /* Unmap tx_cmd */
271 if (num_tbs)
272 pci_unmap_single(dev,
273 dma_unmap_addr(&txq->meta[index], mapping),
274 dma_unmap_len(&txq->meta[index], len),
275 PCI_DMA_BIDIRECTIONAL);
276
277 /* Unmap chunks, if any. */
278 for (i = 1; i < num_tbs; i++)
279 pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
280 iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE);
281
282 /* free SKB */
283 if (txq->txb) {
284 struct sk_buff *skb;
285
286 skb = txq->txb[txq->q.read_ptr].skb;
287
288 /* can be called from irqs-disabled context */
289 if (skb) {
290 dev_kfree_skb_any(skb);
291 txq->txb[txq->q.read_ptr].skb = NULL;
292 }
293 }
294}
295
296int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
297 struct iwl_tx_queue *txq,
298 dma_addr_t addr, u16 len,
299 u8 reset, u8 pad)
300{
301 struct iwl_queue *q;
302 struct iwl_tfd *tfd, *tfd_tmp;
303 u32 num_tbs;
304
305 q = &txq->q;
306 tfd_tmp = (struct iwl_tfd *)txq->tfds;
307 tfd = &tfd_tmp[q->write_ptr];
308
309 if (reset)
310 memset(tfd, 0, sizeof(*tfd));
311
312 num_tbs = iwl_tfd_get_num_tbs(tfd);
313
314 /* Each TFD can point to a maximum 20 Tx buffers */
315 if (num_tbs >= IWL_NUM_OF_TBS) {
316 IWL_ERR(priv, "Error can not send more than %d chunks\n",
317 IWL_NUM_OF_TBS);
318 return -EINVAL;
319 }
320
321 if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
322 return -EINVAL;
323
324 if (unlikely(addr & ~IWL_TX_DMA_MASK))
325 IWL_ERR(priv, "Unaligned address = %llx\n",
326 (unsigned long long)addr);
327
328 iwl_tfd_set_tb(tfd, num_tbs, addr, len);
329
330 return 0;
331}
332
333/*
334 * Tell nic where to find circular buffer of Tx Frame Descriptors for
335 * given Tx queue, and enable the DMA channel used for that queue.
336 *
337 * supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
338 * channels supported in hardware.
339 */
340int iwl_hw_tx_queue_init(struct iwl_priv *priv,
341 struct iwl_tx_queue *txq)
342{
343 int txq_id = txq->q.id;
344
345 /* Circular buffer (TFD queue in DRAM) physical base address */
346 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
347 txq->q.dma_addr >> 8);
348
349 return 0;
350}
351
352static void iwl_bg_beacon_update(struct work_struct *work) 203static void iwl_bg_beacon_update(struct work_struct *work)
353{ 204{
354 struct iwl_priv *priv = 205 struct iwl_priv *priv =