aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl-tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-tx.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c233
1 files changed, 45 insertions, 188 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index b0ee86c62685..7d2b6e11f73e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
@@ -76,116 +76,6 @@ static inline void iwl_free_dma_ptr(struct iwl_priv *priv,
76 memset(ptr, 0, sizeof(*ptr)); 76 memset(ptr, 0, sizeof(*ptr));
77} 77}
78 78
79static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
80{
81 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
82
83 dma_addr_t addr = get_unaligned_le32(&tb->lo);
84 if (sizeof(dma_addr_t) > sizeof(u32))
85 addr |=
86 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
87
88 return addr;
89}
90
91static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
92{
93 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
94
95 return le16_to_cpu(tb->hi_n_len) >> 4;
96}
97
98static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
99 dma_addr_t addr, u16 len)
100{
101 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
102 u16 hi_n_len = len << 4;
103
104 put_unaligned_le32(addr, &tb->lo);
105 if (sizeof(dma_addr_t) > sizeof(u32))
106 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
107
108 tb->hi_n_len = cpu_to_le16(hi_n_len);
109
110 tfd->num_tbs = idx + 1;
111}
112
113static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
114{
115 return tfd->num_tbs & 0x1f;
116}
117
118/**
119 * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
120 * @priv - driver private data
121 * @txq - tx queue
122 *
123 * Does NOT advance any TFD circular buffer read/write indexes
124 * Does NOT free the TFD itself (which is within circular buffer)
125 */
126static void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
127{
128 struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)&txq->tfds[0];
129 struct iwl_tfd *tfd;
130 struct pci_dev *dev = priv->pci_dev;
131 int index = txq->q.read_ptr;
132 int i;
133 int num_tbs;
134
135 tfd = &tfd_tmp[index];
136
137 /* Sanity check on number of chunks */
138 num_tbs = iwl_tfd_get_num_tbs(tfd);
139
140 if (num_tbs >= IWL_NUM_OF_TBS) {
141 IWL_ERROR("Too many chunks: %i\n", num_tbs);
142 /* @todo issue fatal error, it is quite serious situation */
143 return;
144 }
145
146 /* Unmap tx_cmd */
147 if (num_tbs)
148 pci_unmap_single(dev,
149 pci_unmap_addr(&txq->cmd[index]->meta, mapping),
150 pci_unmap_len(&txq->cmd[index]->meta, len),
151 PCI_DMA_TODEVICE);
152
153 /* Unmap chunks, if any. */
154 for (i = 1; i < num_tbs; i++) {
155 pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
156 iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE);
157
158 if (txq->txb) {
159 dev_kfree_skb(txq->txb[txq->q.read_ptr].skb[i - 1]);
160 txq->txb[txq->q.read_ptr].skb[i - 1] = NULL;
161 }
162 }
163}
164
165static int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
166 struct iwl_tfd *tfd,
167 dma_addr_t addr, u16 len)
168{
169
170 u32 num_tbs = iwl_tfd_get_num_tbs(tfd);
171
172 /* Each TFD can point to a maximum 20 Tx buffers */
173 if (num_tbs >= IWL_NUM_OF_TBS) {
174 IWL_ERROR("Error can not send more than %d chunks\n",
175 IWL_NUM_OF_TBS);
176 return -EINVAL;
177 }
178
179 BUG_ON(addr & ~DMA_BIT_MASK(36));
180 if (unlikely(addr & ~IWL_TX_DMA_MASK))
181 IWL_ERROR("Unaligned address = %llx\n",
182 (unsigned long long)addr);
183
184 iwl_tfd_set_tb(tfd, num_tbs, addr, len);
185
186 return 0;
187}
188
189/** 79/**
190 * iwl_txq_update_write_ptr - Send new write index to hardware 80 * iwl_txq_update_write_ptr - Send new write index to hardware
191 */ 81 */
@@ -241,7 +131,7 @@ EXPORT_SYMBOL(iwl_txq_update_write_ptr);
241 * Free all buffers. 131 * Free all buffers.
242 * 0-fill, but do not free "txq" descriptor structure. 132 * 0-fill, but do not free "txq" descriptor structure.
243 */ 133 */
244static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id) 134void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
245{ 135{
246 struct iwl_tx_queue *txq = &priv->txq[txq_id]; 136 struct iwl_tx_queue *txq = &priv->txq[txq_id];
247 struct iwl_queue *q = &txq->q; 137 struct iwl_queue *q = &txq->q;
@@ -254,7 +144,7 @@ static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
254 /* first, empty all BD's */ 144 /* first, empty all BD's */
255 for (; q->write_ptr != q->read_ptr; 145 for (; q->write_ptr != q->read_ptr;
256 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) 146 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
257 iwl_hw_txq_free_tfd(priv, txq); 147 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
258 148
259 len = sizeof(struct iwl_cmd) * q->n_window; 149 len = sizeof(struct iwl_cmd) * q->n_window;
260 150
@@ -264,7 +154,7 @@ static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
264 154
265 /* De-alloc circular buffer of TFDs */ 155 /* De-alloc circular buffer of TFDs */
266 if (txq->q.n_bd) 156 if (txq->q.n_bd)
267 pci_free_consistent(dev, sizeof(struct iwl_tfd) * 157 pci_free_consistent(dev, priv->hw_params.tfd_size *
268 txq->q.n_bd, txq->tfds, txq->q.dma_addr); 158 txq->q.n_bd, txq->tfds, txq->q.dma_addr);
269 159
270 /* De-alloc array of per-TFD driver data */ 160 /* De-alloc array of per-TFD driver data */
@@ -274,7 +164,7 @@ static void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
274 /* 0-fill queue descriptor structure */ 164 /* 0-fill queue descriptor structure */
275 memset(txq, 0, sizeof(*txq)); 165 memset(txq, 0, sizeof(*txq));
276} 166}
277 167EXPORT_SYMBOL(iwl_tx_queue_free);
278 168
279/** 169/**
280 * iwl_cmd_queue_free - Deallocate DMA queue. 170 * iwl_cmd_queue_free - Deallocate DMA queue.
@@ -388,6 +278,7 @@ static int iwl_tx_queue_alloc(struct iwl_priv *priv,
388 struct iwl_tx_queue *txq, u32 id) 278 struct iwl_tx_queue *txq, u32 id)
389{ 279{
390 struct pci_dev *dev = priv->pci_dev; 280 struct pci_dev *dev = priv->pci_dev;
281 size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
391 282
392 /* Driver private data, only for Tx (not command) queues, 283 /* Driver private data, only for Tx (not command) queues,
393 * not shared with device. */ 284 * not shared with device. */
@@ -395,22 +286,20 @@ static int iwl_tx_queue_alloc(struct iwl_priv *priv,
395 txq->txb = kmalloc(sizeof(txq->txb[0]) * 286 txq->txb = kmalloc(sizeof(txq->txb[0]) *
396 TFD_QUEUE_SIZE_MAX, GFP_KERNEL); 287 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
397 if (!txq->txb) { 288 if (!txq->txb) {
398 IWL_ERROR("kmalloc for auxiliary BD " 289 IWL_ERR(priv, "kmalloc for auxiliary BD "
399 "structures failed\n"); 290 "structures failed\n");
400 goto error; 291 goto error;
401 } 292 }
402 } else 293 } else {
403 txq->txb = NULL; 294 txq->txb = NULL;
295 }
404 296
405 /* Circular buffer of transmit frame descriptors (TFDs), 297 /* Circular buffer of transmit frame descriptors (TFDs),
406 * shared with device */ 298 * shared with device */
407 txq->tfds = pci_alloc_consistent(dev, 299 txq->tfds = pci_alloc_consistent(dev, tfd_sz, &txq->q.dma_addr);
408 sizeof(txq->tfds[0]) * TFD_QUEUE_SIZE_MAX,
409 &txq->q.dma_addr);
410 300
411 if (!txq->tfds) { 301 if (!txq->tfds) {
412 IWL_ERROR("pci_alloc_consistent(%zd) failed\n", 302 IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
413 sizeof(txq->tfds[0]) * TFD_QUEUE_SIZE_MAX);
414 goto error; 303 goto error;
415 } 304 }
416 txq->q.id = id; 305 txq->q.id = id;
@@ -424,42 +313,11 @@ static int iwl_tx_queue_alloc(struct iwl_priv *priv,
424 return -ENOMEM; 313 return -ENOMEM;
425} 314}
426 315
427/*
428 * Tell nic where to find circular buffer of Tx Frame Descriptors for
429 * given Tx queue, and enable the DMA channel used for that queue.
430 *
431 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
432 * channels supported in hardware.
433 */
434static int iwl_hw_tx_queue_init(struct iwl_priv *priv,
435 struct iwl_tx_queue *txq)
436{
437 int ret;
438 unsigned long flags;
439 int txq_id = txq->q.id;
440
441 spin_lock_irqsave(&priv->lock, flags);
442 ret = iwl_grab_nic_access(priv);
443 if (ret) {
444 spin_unlock_irqrestore(&priv->lock, flags);
445 return ret;
446 }
447
448 /* Circular buffer (TFD queue in DRAM) physical base address */
449 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
450 txq->q.dma_addr >> 8);
451
452 iwl_release_nic_access(priv);
453 spin_unlock_irqrestore(&priv->lock, flags);
454
455 return 0;
456}
457
458/** 316/**
459 * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue 317 * iwl_tx_queue_init - Allocate and initialize one tx/cmd queue
460 */ 318 */
461static int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, 319int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
462 int slots_num, u32 txq_id) 320 int slots_num, u32 txq_id)
463{ 321{
464 int i, len; 322 int i, len;
465 int ret; 323 int ret;
@@ -501,7 +359,7 @@ static int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
501 iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); 359 iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
502 360
503 /* Tell device where to find queue */ 361 /* Tell device where to find queue */
504 iwl_hw_tx_queue_init(priv, txq); 362 priv->cfg->ops->lib->txq_init(priv, txq);
505 363
506 return 0; 364 return 0;
507err: 365err:
@@ -516,6 +374,8 @@ err:
516 } 374 }
517 return -ENOMEM; 375 return -ENOMEM;
518} 376}
377EXPORT_SYMBOL(iwl_tx_queue_init);
378
519/** 379/**
520 * iwl_hw_txq_ctx_free - Free TXQ Context 380 * iwl_hw_txq_ctx_free - Free TXQ Context
521 * 381 *
@@ -557,13 +417,13 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv)
557 ret = iwl_alloc_dma_ptr(priv, &priv->scd_bc_tbls, 417 ret = iwl_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
558 priv->hw_params.scd_bc_tbls_size); 418 priv->hw_params.scd_bc_tbls_size);
559 if (ret) { 419 if (ret) {
560 IWL_ERROR("Scheduler BC Table allocation failed\n"); 420 IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
561 goto error_bc_tbls; 421 goto error_bc_tbls;
562 } 422 }
563 /* Alloc keep-warm buffer */ 423 /* Alloc keep-warm buffer */
564 ret = iwl_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE); 424 ret = iwl_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
565 if (ret) { 425 if (ret) {
566 IWL_ERROR("Keep Warm allocation failed\n"); 426 IWL_ERR(priv, "Keep Warm allocation failed\n");
567 goto error_kw; 427 goto error_kw;
568 } 428 }
569 spin_lock_irqsave(&priv->lock, flags); 429 spin_lock_irqsave(&priv->lock, flags);
@@ -589,7 +449,7 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv)
589 ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num, 449 ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
590 txq_id); 450 txq_id);
591 if (ret) { 451 if (ret) {
592 IWL_ERROR("Tx %d queue init failed\n", txq_id); 452 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
593 goto error; 453 goto error;
594 } 454 }
595 } 455 }
@@ -802,7 +662,7 @@ static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
802 break; 662 break;
803 663
804 default: 664 default:
805 printk(KERN_ERR "Unknown encode alg %d\n", keyconf->alg); 665 IWL_ERR(priv, "Unknown encode alg %d\n", keyconf->alg);
806 break; 666 break;
807 } 667 }
808} 668}
@@ -822,7 +682,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
822{ 682{
823 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 683 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
824 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 684 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
825 struct iwl_tfd *tfd;
826 struct iwl_tx_queue *txq; 685 struct iwl_tx_queue *txq;
827 struct iwl_queue *q; 686 struct iwl_queue *q;
828 struct iwl_cmd *out_cmd; 687 struct iwl_cmd *out_cmd;
@@ -850,7 +709,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
850 709
851 if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == 710 if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) ==
852 IWL_INVALID_RATE) { 711 IWL_INVALID_RATE) {
853 IWL_ERROR("ERROR: No TX rate available.\n"); 712 IWL_ERR(priv, "ERROR: No TX rate available.\n");
854 goto drop_unlock; 713 goto drop_unlock;
855 } 714 }
856 715
@@ -913,10 +772,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
913 772
914 spin_lock_irqsave(&priv->lock, flags); 773 spin_lock_irqsave(&priv->lock, flags);
915 774
916 /* Set up first empty TFD within this queue's circular TFD buffer */
917 tfd = &txq->tfds[q->write_ptr];
918 memset(tfd, 0, sizeof(*tfd));
919
920 /* Set up driver data for this TFD */ 775 /* Set up driver data for this TFD */
921 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); 776 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
922 txq->txb[q->write_ptr].skb[0] = skb; 777 txq->txb[q->write_ptr].skb[0] = skb;
@@ -970,7 +825,8 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
970 /* Add buffer containing Tx command and MAC(!) header to TFD's 825 /* Add buffer containing Tx command and MAC(!) header to TFD's
971 * first entry */ 826 * first entry */
972 txcmd_phys += offsetof(struct iwl_cmd, hdr); 827 txcmd_phys += offsetof(struct iwl_cmd, hdr);
973 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len); 828 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
829 txcmd_phys, len, 1, 0);
974 830
975 if (info->control.hw_key) 831 if (info->control.hw_key)
976 iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id); 832 iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
@@ -981,7 +837,9 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
981 if (len) { 837 if (len) {
982 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, 838 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
983 len, PCI_DMA_TODEVICE); 839 len, PCI_DMA_TODEVICE);
984 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len); 840 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
841 phys_addr, len,
842 0, 0);
985 } 843 }
986 844
987 /* Tell NIC about any 2-byte padding after MAC header */ 845 /* Tell NIC about any 2-byte padding after MAC header */
@@ -1063,7 +921,6 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1063{ 921{
1064 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; 922 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
1065 struct iwl_queue *q = &txq->q; 923 struct iwl_queue *q = &txq->q;
1066 struct iwl_tfd *tfd;
1067 struct iwl_cmd *out_cmd; 924 struct iwl_cmd *out_cmd;
1068 dma_addr_t phys_addr; 925 dma_addr_t phys_addr;
1069 unsigned long flags; 926 unsigned long flags;
@@ -1086,16 +943,12 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1086 } 943 }
1087 944
1088 if (iwl_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) { 945 if (iwl_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) {
1089 IWL_ERROR("No space for Tx\n"); 946 IWL_ERR(priv, "No space for Tx\n");
1090 return -ENOSPC; 947 return -ENOSPC;
1091 } 948 }
1092 949
1093 spin_lock_irqsave(&priv->hcmd_lock, flags); 950 spin_lock_irqsave(&priv->hcmd_lock, flags);
1094 951
1095 tfd = &txq->tfds[q->write_ptr];
1096 memset(tfd, 0, sizeof(*tfd));
1097
1098
1099 idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE); 952 idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE);
1100 out_cmd = txq->cmd[idx]; 953 out_cmd = txq->cmd[idx];
1101 954
@@ -1120,7 +973,9 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1120 pci_unmap_len_set(&out_cmd->meta, len, len); 973 pci_unmap_len_set(&out_cmd->meta, len, len);
1121 phys_addr += offsetof(struct iwl_cmd, hdr); 974 phys_addr += offsetof(struct iwl_cmd, hdr);
1122 975
1123 iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size); 976 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
977 phys_addr, fix_size, 1,
978 U32_PAD(cmd->len));
1124 979
1125#ifdef CONFIG_IWLWIFI_DEBUG 980#ifdef CONFIG_IWLWIFI_DEBUG
1126 switch (out_cmd->hdr.cmd) { 981 switch (out_cmd->hdr.cmd) {
@@ -1144,8 +999,9 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1144#endif 999#endif
1145 txq->need_update = 1; 1000 txq->need_update = 1;
1146 1001
1147 /* Set up entry in queue's byte count circular buffer */ 1002 if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl)
1148 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0); 1003 /* Set up entry in queue's byte count circular buffer */
1004 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
1149 1005
1150 /* Increment and update queue's write index */ 1006 /* Increment and update queue's write index */
1151 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 1007 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
@@ -1163,7 +1019,7 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1163 int nfreed = 0; 1019 int nfreed = 0;
1164 1020
1165 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { 1021 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
1166 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, " 1022 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
1167 "is out of range [0-%d] %d %d.\n", txq_id, 1023 "is out of range [0-%d] %d %d.\n", txq_id,
1168 index, q->n_bd, q->write_ptr, q->read_ptr); 1024 index, q->n_bd, q->write_ptr, q->read_ptr);
1169 return 0; 1025 return 0;
@@ -1180,7 +1036,7 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1180 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl) 1036 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
1181 priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq); 1037 priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
1182 1038
1183 iwl_hw_txq_free_tfd(priv, txq); 1039 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
1184 nfreed++; 1040 nfreed++;
1185 } 1041 }
1186 return nfreed; 1042 return nfreed;
@@ -1203,7 +1059,7 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
1203 int nfreed = 0; 1059 int nfreed = 0;
1204 1060
1205 if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) { 1061 if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
1206 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, " 1062 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
1207 "is out of range [0-%d] %d %d.\n", txq_id, 1063 "is out of range [0-%d] %d %d.\n", txq_id,
1208 idx, q->n_bd, q->write_ptr, q->read_ptr); 1064 idx, q->n_bd, q->write_ptr, q->read_ptr);
1209 return; 1065 return;
@@ -1218,7 +1074,7 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
1218 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 1074 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1219 1075
1220 if (nfreed++ > 0) { 1076 if (nfreed++ > 0) {
1221 IWL_ERROR("HCMD skipped: index (%d) %d %d\n", idx, 1077 IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
1222 q->write_ptr, q->read_ptr); 1078 q->write_ptr, q->read_ptr);
1223 queue_work(priv->workqueue, &priv->restart); 1079 queue_work(priv->workqueue, &priv->restart);
1224 } 1080 }
@@ -1306,7 +1162,7 @@ int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
1306 else 1162 else
1307 return -EINVAL; 1163 return -EINVAL;
1308 1164
1309 IWL_WARNING("%s on ra = %pM tid = %d\n", 1165 IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
1310 __func__, ra, tid); 1166 __func__, ra, tid);
1311 1167
1312 sta_id = iwl_find_station(priv, ra); 1168 sta_id = iwl_find_station(priv, ra);
@@ -1314,7 +1170,7 @@ int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
1314 return -ENXIO; 1170 return -ENXIO;
1315 1171
1316 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) { 1172 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
1317 IWL_ERROR("Start AGG when state is not IWL_AGG_OFF !\n"); 1173 IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
1318 return -ENXIO; 1174 return -ENXIO;
1319 } 1175 }
1320 1176
@@ -1334,7 +1190,7 @@ int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
1334 return ret; 1190 return ret;
1335 1191
1336 if (tid_data->tfds_in_queue == 0) { 1192 if (tid_data->tfds_in_queue == 0) {
1337 printk(KERN_ERR "HW queue is empty\n"); 1193 IWL_ERR(priv, "HW queue is empty\n");
1338 tid_data->agg.state = IWL_AGG_ON; 1194 tid_data->agg.state = IWL_AGG_ON;
1339 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, ra, tid); 1195 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, ra, tid);
1340 } else { 1196 } else {
@@ -1354,7 +1210,7 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
1354 unsigned long flags; 1210 unsigned long flags;
1355 1211
1356 if (!ra) { 1212 if (!ra) {
1357 IWL_ERROR("ra = NULL\n"); 1213 IWL_ERR(priv, "ra = NULL\n");
1358 return -EINVAL; 1214 return -EINVAL;
1359 } 1215 }
1360 1216
@@ -1369,7 +1225,7 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
1369 return -ENXIO; 1225 return -ENXIO;
1370 1226
1371 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON) 1227 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
1372 IWL_WARNING("Stopping AGG while state not IWL_AGG_ON\n"); 1228 IWL_WARN(priv, "Stopping AGG while state not IWL_AGG_ON\n");
1373 1229
1374 tid_data = &priv->stations[sta_id].tid[tid]; 1230 tid_data = &priv->stations[sta_id].tid[tid];
1375 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; 1231 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
@@ -1455,7 +1311,7 @@ static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1455 struct ieee80211_tx_info *info; 1311 struct ieee80211_tx_info *info;
1456 1312
1457 if (unlikely(!agg->wait_for_ba)) { 1313 if (unlikely(!agg->wait_for_ba)) {
1458 IWL_ERROR("Received BA when not expected\n"); 1314 IWL_ERR(priv, "Received BA when not expected\n");
1459 return -EINVAL; 1315 return -EINVAL;
1460 } 1316 }
1461 1317
@@ -1528,7 +1384,8 @@ void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
1528 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); 1384 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
1529 1385
1530 if (scd_flow >= priv->hw_params.max_txq_num) { 1386 if (scd_flow >= priv->hw_params.max_txq_num) {
1531 IWL_ERROR("BUG_ON scd_flow is bigger than number of queues\n"); 1387 IWL_ERR(priv,
1388 "BUG_ON scd_flow is bigger than number of queues\n");
1532 return; 1389 return;
1533 } 1390 }
1534 1391