aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl-tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-tx.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c632
1 files changed, 394 insertions, 238 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index a81989c06983..137dba95b1ad 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
@@ -32,6 +32,7 @@
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <net/mac80211.h> 33#include <net/mac80211.h>
34#include "iwl-eeprom.h" 34#include "iwl-eeprom.h"
35#include "iwl-agn.h"
35#include "iwl-dev.h" 36#include "iwl-dev.h"
36#include "iwl-core.h" 37#include "iwl-core.h"
37#include "iwl-sta.h" 38#include "iwl-sta.h"
@@ -49,33 +50,211 @@ void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
49 if (txq->need_update == 0) 50 if (txq->need_update == 0)
50 return; 51 return;
51 52
52 /* if we're trying to save power */ 53 if (priv->cfg->base_params->shadow_reg_enable) {
53 if (test_bit(STATUS_POWER_PMI, &priv->status)) { 54 /* shadow register enabled */
54 /* wake up nic if it's powered down ...
55 * uCode will wake up, and interrupt us again, so next
56 * time we'll skip this part. */
57 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
58
59 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
60 IWL_DEBUG_INFO(priv, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
61 txq_id, reg);
62 iwl_set_bit(priv, CSR_GP_CNTRL,
63 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
64 return;
65 }
66
67 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
68 txq->q.write_ptr | (txq_id << 8));
69
70 /* else not in power-save mode, uCode will never sleep when we're
71 * trying to tx (during RFKILL, we're not trying to tx). */
72 } else
73 iwl_write32(priv, HBUS_TARG_WRPTR, 55 iwl_write32(priv, HBUS_TARG_WRPTR,
74 txq->q.write_ptr | (txq_id << 8)); 56 txq->q.write_ptr | (txq_id << 8));
57 } else {
58 /* if we're trying to save power */
59 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
60 /* wake up nic if it's powered down ...
61 * uCode will wake up, and interrupt us again, so next
62 * time we'll skip this part. */
63 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
64
65 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
66 IWL_DEBUG_INFO(priv,
67 "Tx queue %d requesting wakeup,"
68 " GP1 = 0x%x\n", txq_id, reg);
69 iwl_set_bit(priv, CSR_GP_CNTRL,
70 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
71 return;
72 }
73
74 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
75 txq->q.write_ptr | (txq_id << 8));
75 76
77 /*
78 * else not in power-save mode,
79 * uCode will never sleep when we're
80 * trying to tx (during RFKILL, we're not trying to tx).
81 */
82 } else
83 iwl_write32(priv, HBUS_TARG_WRPTR,
84 txq->q.write_ptr | (txq_id << 8));
85 }
76 txq->need_update = 0; 86 txq->need_update = 0;
77} 87}
78EXPORT_SYMBOL(iwl_txq_update_write_ptr); 88
89static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
90{
91 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
92
93 dma_addr_t addr = get_unaligned_le32(&tb->lo);
94 if (sizeof(dma_addr_t) > sizeof(u32))
95 addr |=
96 ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
97
98 return addr;
99}
100
101static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
102{
103 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
104
105 return le16_to_cpu(tb->hi_n_len) >> 4;
106}
107
108static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
109 dma_addr_t addr, u16 len)
110{
111 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
112 u16 hi_n_len = len << 4;
113
114 put_unaligned_le32(addr, &tb->lo);
115 if (sizeof(dma_addr_t) > sizeof(u32))
116 hi_n_len |= ((addr >> 16) >> 16) & 0xF;
117
118 tb->hi_n_len = cpu_to_le16(hi_n_len);
119
120 tfd->num_tbs = idx + 1;
121}
122
123static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
124{
125 return tfd->num_tbs & 0x1f;
126}
127
128static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta,
129 struct iwl_tfd *tfd, int dma_dir)
130{
131 struct pci_dev *dev = priv->pci_dev;
132 int i;
133 int num_tbs;
134
135 /* Sanity check on number of chunks */
136 num_tbs = iwl_tfd_get_num_tbs(tfd);
137
138 if (num_tbs >= IWL_NUM_OF_TBS) {
139 IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
140 /* @todo issue fatal error, it is quite serious situation */
141 return;
142 }
143
144 /* Unmap tx_cmd */
145 if (num_tbs)
146 pci_unmap_single(dev,
147 dma_unmap_addr(meta, mapping),
148 dma_unmap_len(meta, len),
149 PCI_DMA_BIDIRECTIONAL);
150
151 /* Unmap chunks, if any. */
152 for (i = 1; i < num_tbs; i++)
153 pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
154 iwl_tfd_tb_get_len(tfd, i), dma_dir);
155}
156
157/**
158 * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
159 * @priv - driver private data
160 * @txq - tx queue
161 *
162 * Does NOT advance any TFD circular buffer read/write indexes
163 * Does NOT free the TFD itself (which is within circular buffer)
164 */
165void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
166{
167 struct iwl_tfd *tfd_tmp = txq->tfds;
168 int index = txq->q.read_ptr;
169
170 iwlagn_unmap_tfd(priv, &txq->meta[index], &tfd_tmp[index],
171 PCI_DMA_TODEVICE);
172
173 /* free SKB */
174 if (txq->txb) {
175 struct sk_buff *skb;
176
177 skb = txq->txb[txq->q.read_ptr].skb;
178
179 /* can be called from irqs-disabled context */
180 if (skb) {
181 dev_kfree_skb_any(skb);
182 txq->txb[txq->q.read_ptr].skb = NULL;
183 }
184 }
185}
186
187int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv,
188 struct iwl_tx_queue *txq,
189 dma_addr_t addr, u16 len,
190 u8 reset)
191{
192 struct iwl_queue *q;
193 struct iwl_tfd *tfd, *tfd_tmp;
194 u32 num_tbs;
195
196 q = &txq->q;
197 tfd_tmp = txq->tfds;
198 tfd = &tfd_tmp[q->write_ptr];
199
200 if (reset)
201 memset(tfd, 0, sizeof(*tfd));
202
203 num_tbs = iwl_tfd_get_num_tbs(tfd);
204
205 /* Each TFD can point to a maximum 20 Tx buffers */
206 if (num_tbs >= IWL_NUM_OF_TBS) {
207 IWL_ERR(priv, "Error can not send more than %d chunks\n",
208 IWL_NUM_OF_TBS);
209 return -EINVAL;
210 }
211
212 if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
213 return -EINVAL;
214
215 if (unlikely(addr & ~IWL_TX_DMA_MASK))
216 IWL_ERR(priv, "Unaligned address = %llx\n",
217 (unsigned long long)addr);
218
219 iwl_tfd_set_tb(tfd, num_tbs, addr, len);
220
221 return 0;
222}
223
224/*
225 * Tell nic where to find circular buffer of Tx Frame Descriptors for
226 * given Tx queue, and enable the DMA channel used for that queue.
227 *
228 * supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
229 * channels supported in hardware.
230 */
231static int iwlagn_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
232{
233 int txq_id = txq->q.id;
234
235 /* Circular buffer (TFD queue in DRAM) physical base address */
236 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
237 txq->q.dma_addr >> 8);
238
239 return 0;
240}
241
242/**
243 * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
244 */
245void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
246{
247 struct iwl_tx_queue *txq = &priv->txq[txq_id];
248 struct iwl_queue *q = &txq->q;
249
250 if (q->n_bd == 0)
251 return;
252
253 while (q->write_ptr != q->read_ptr) {
254 iwlagn_txq_free_tfd(priv, txq);
255 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
256 }
257}
79 258
80/** 259/**
81 * iwl_tx_queue_free - Deallocate DMA queue. 260 * iwl_tx_queue_free - Deallocate DMA queue.
@@ -88,17 +267,10 @@ EXPORT_SYMBOL(iwl_txq_update_write_ptr);
88void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id) 267void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
89{ 268{
90 struct iwl_tx_queue *txq = &priv->txq[txq_id]; 269 struct iwl_tx_queue *txq = &priv->txq[txq_id];
91 struct iwl_queue *q = &txq->q;
92 struct device *dev = &priv->pci_dev->dev; 270 struct device *dev = &priv->pci_dev->dev;
93 int i; 271 int i;
94 272
95 if (q->n_bd == 0) 273 iwl_tx_queue_unmap(priv, txq_id);
96 return;
97
98 /* first, empty all BD's */
99 for (; q->write_ptr != q->read_ptr;
100 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
101 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
102 274
103 /* De-alloc array of command/tx buffers */ 275 /* De-alloc array of command/tx buffers */
104 for (i = 0; i < TFD_TX_CMD_SLOTS; i++) 276 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
@@ -122,52 +294,50 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
122 /* 0-fill queue descriptor structure */ 294 /* 0-fill queue descriptor structure */
123 memset(txq, 0, sizeof(*txq)); 295 memset(txq, 0, sizeof(*txq));
124} 296}
125EXPORT_SYMBOL(iwl_tx_queue_free);
126 297
127/** 298/**
128 * iwl_cmd_queue_free - Deallocate DMA queue. 299 * iwl_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
129 * @txq: Transmit queue to deallocate.
130 *
131 * Empty queue by removing and destroying all BD's.
132 * Free all buffers.
133 * 0-fill, but do not free "txq" descriptor structure.
134 */ 300 */
135void iwl_cmd_queue_free(struct iwl_priv *priv) 301void iwl_cmd_queue_unmap(struct iwl_priv *priv)
136{ 302{
137 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; 303 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
138 struct iwl_queue *q = &txq->q; 304 struct iwl_queue *q = &txq->q;
139 struct device *dev = &priv->pci_dev->dev;
140 int i; 305 int i;
141 bool huge = false;
142 306
143 if (q->n_bd == 0) 307 if (q->n_bd == 0)
144 return; 308 return;
145 309
146 for (; q->read_ptr != q->write_ptr; 310 while (q->read_ptr != q->write_ptr) {
147 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 311 i = get_cmd_index(q, q->read_ptr);
148 /* we have no way to tell if it is a huge cmd ATM */
149 i = get_cmd_index(q, q->read_ptr, 0);
150 312
151 if (txq->meta[i].flags & CMD_SIZE_HUGE) { 313 if (txq->meta[i].flags & CMD_MAPPED) {
152 huge = true; 314 iwlagn_unmap_tfd(priv, &txq->meta[i], &txq->tfds[i],
153 continue; 315 PCI_DMA_BIDIRECTIONAL);
316 txq->meta[i].flags = 0;
154 } 317 }
155 318
156 pci_unmap_single(priv->pci_dev, 319 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
157 dma_unmap_addr(&txq->meta[i], mapping),
158 dma_unmap_len(&txq->meta[i], len),
159 PCI_DMA_BIDIRECTIONAL);
160 }
161 if (huge) {
162 i = q->n_window;
163 pci_unmap_single(priv->pci_dev,
164 dma_unmap_addr(&txq->meta[i], mapping),
165 dma_unmap_len(&txq->meta[i], len),
166 PCI_DMA_BIDIRECTIONAL);
167 } 320 }
321}
322
323/**
324 * iwl_cmd_queue_free - Deallocate DMA queue.
325 * @txq: Transmit queue to deallocate.
326 *
327 * Empty queue by removing and destroying all BD's.
328 * Free all buffers.
329 * 0-fill, but do not free "txq" descriptor structure.
330 */
331void iwl_cmd_queue_free(struct iwl_priv *priv)
332{
333 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
334 struct device *dev = &priv->pci_dev->dev;
335 int i;
336
337 iwl_cmd_queue_unmap(priv);
168 338
169 /* De-alloc array of command/tx buffers */ 339 /* De-alloc array of command/tx buffers */
170 for (i = 0; i <= TFD_CMD_SLOTS; i++) 340 for (i = 0; i < TFD_CMD_SLOTS; i++)
171 kfree(txq->cmd[i]); 341 kfree(txq->cmd[i]);
172 342
173 /* De-alloc circular buffer of TFDs */ 343 /* De-alloc circular buffer of TFDs */
@@ -184,7 +354,6 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
184 /* 0-fill queue descriptor structure */ 354 /* 0-fill queue descriptor structure */
185 memset(txq, 0, sizeof(*txq)); 355 memset(txq, 0, sizeof(*txq));
186} 356}
187EXPORT_SYMBOL(iwl_cmd_queue_free);
188 357
189/*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** 358/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
190 * DMA services 359 * DMA services
@@ -206,7 +375,6 @@ EXPORT_SYMBOL(iwl_cmd_queue_free);
206 * reclaiming packets (on 'tx done IRQ), if free space become > high mark, 375 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
207 * Tx queue resumed. 376 * Tx queue resumed.
208 * 377 *
209 * See more detailed info in iwl-4965-hw.h.
210 ***************************************************/ 378 ***************************************************/
211 379
212int iwl_queue_space(const struct iwl_queue *q) 380int iwl_queue_space(const struct iwl_queue *q)
@@ -224,7 +392,6 @@ int iwl_queue_space(const struct iwl_queue *q)
224 s = 0; 392 s = 0;
225 return s; 393 return s;
226} 394}
227EXPORT_SYMBOL(iwl_queue_space);
228 395
229 396
230/** 397/**
@@ -239,11 +406,13 @@ static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
239 406
240 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap 407 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
241 * and iwl_queue_dec_wrap are broken. */ 408 * and iwl_queue_dec_wrap are broken. */
242 BUG_ON(!is_power_of_2(count)); 409 if (WARN_ON(!is_power_of_2(count)))
410 return -EINVAL;
243 411
244 /* slots_num must be power-of-two size, otherwise 412 /* slots_num must be power-of-two size, otherwise
245 * get_cmd_index is broken. */ 413 * get_cmd_index is broken. */
246 BUG_ON(!is_power_of_2(slots_num)); 414 if (WARN_ON(!is_power_of_2(slots_num)))
415 return -EINVAL;
247 416
248 q->low_mark = q->n_window / 4; 417 q->low_mark = q->n_window / 4;
249 if (q->low_mark < 4) 418 if (q->low_mark < 4)
@@ -254,8 +423,6 @@ static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
254 q->high_mark = 2; 423 q->high_mark = 2;
255 424
256 q->write_ptr = q->read_ptr = 0; 425 q->write_ptr = q->read_ptr = 0;
257 q->last_read_ptr = 0;
258 q->repeat_same_read_ptr = 0;
259 426
260 return 0; 427 return 0;
261} 428}
@@ -271,7 +438,7 @@ static int iwl_tx_queue_alloc(struct iwl_priv *priv,
271 438
272 /* Driver private data, only for Tx (not command) queues, 439 /* Driver private data, only for Tx (not command) queues,
273 * not shared with device. */ 440 * not shared with device. */
274 if (id != IWL_CMD_QUEUE_NUM) { 441 if (id != priv->cmd_queue) {
275 txq->txb = kzalloc(sizeof(txq->txb[0]) * 442 txq->txb = kzalloc(sizeof(txq->txb[0]) *
276 TFD_QUEUE_SIZE_MAX, GFP_KERNEL); 443 TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
277 if (!txq->txb) { 444 if (!txq->txb) {
@@ -310,33 +477,17 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
310{ 477{
311 int i, len; 478 int i, len;
312 int ret; 479 int ret;
313 int actual_slots = slots_num;
314
315 /*
316 * Alloc buffer array for commands (Tx or other types of commands).
317 * For the command queue (#4), allocate command space + one big
318 * command for scan, since scan command is very huge; the system will
319 * not have two scans at the same time, so only one is needed.
320 * For normal Tx queues (all other queues), no super-size command
321 * space is needed.
322 */
323 if (txq_id == IWL_CMD_QUEUE_NUM)
324 actual_slots++;
325 480
326 txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots, 481 txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * slots_num,
327 GFP_KERNEL); 482 GFP_KERNEL);
328 txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots, 483 txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * slots_num,
329 GFP_KERNEL); 484 GFP_KERNEL);
330 485
331 if (!txq->meta || !txq->cmd) 486 if (!txq->meta || !txq->cmd)
332 goto out_free_arrays; 487 goto out_free_arrays;
333 488
334 len = sizeof(struct iwl_device_cmd); 489 len = sizeof(struct iwl_device_cmd);
335 for (i = 0; i < actual_slots; i++) { 490 for (i = 0; i < slots_num; i++) {
336 /* only happens for cmd queue */
337 if (i == slots_num)
338 len = IWL_MAX_CMD_SIZE;
339
340 txq->cmd[i] = kmalloc(len, GFP_KERNEL); 491 txq->cmd[i] = kmalloc(len, GFP_KERNEL);
341 if (!txq->cmd[i]) 492 if (!txq->cmd[i])
342 goto err; 493 goto err;
@@ -350,27 +501,28 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
350 txq->need_update = 0; 501 txq->need_update = 0;
351 502
352 /* 503 /*
353 * Aggregation TX queues will get their ID when aggregation begins; 504 * For the default queues 0-3, set up the swq_id
354 * they overwrite the setting done here. The command FIFO doesn't 505 * already -- all others need to get one later
355 * need an swq_id so don't set one to catch errors, all others can 506 * (if they need one at all).
356 * be set up to the identity mapping.
357 */ 507 */
358 if (txq_id != IWL_CMD_QUEUE_NUM) 508 if (txq_id < 4)
359 txq->swq_id = txq_id; 509 iwl_set_swq_id(txq, txq_id, txq_id);
360 510
361 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise 511 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
362 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ 512 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
363 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); 513 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
364 514
365 /* Initialize queue's high/low-water marks, and head/tail indexes */ 515 /* Initialize queue's high/low-water marks, and head/tail indexes */
366 iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); 516 ret = iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
517 if (ret)
518 return ret;
367 519
368 /* Tell device where to find queue */ 520 /* Tell device where to find queue */
369 priv->cfg->ops->lib->txq_init(priv, txq); 521 iwlagn_tx_queue_init(priv, txq);
370 522
371 return 0; 523 return 0;
372err: 524err:
373 for (i = 0; i < actual_slots; i++) 525 for (i = 0; i < slots_num; i++)
374 kfree(txq->cmd[i]); 526 kfree(txq->cmd[i]);
375out_free_arrays: 527out_free_arrays:
376 kfree(txq->meta); 528 kfree(txq->meta);
@@ -378,17 +530,11 @@ out_free_arrays:
378 530
379 return -ENOMEM; 531 return -ENOMEM;
380} 532}
381EXPORT_SYMBOL(iwl_tx_queue_init);
382 533
383void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, 534void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
384 int slots_num, u32 txq_id) 535 int slots_num, u32 txq_id)
385{ 536{
386 int actual_slots = slots_num; 537 memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * slots_num);
387
388 if (txq_id == IWL_CMD_QUEUE_NUM)
389 actual_slots++;
390
391 memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);
392 538
393 txq->need_update = 0; 539 txq->need_update = 0;
394 540
@@ -396,9 +542,8 @@ void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
396 iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); 542 iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
397 543
398 /* Tell device where to find queue */ 544 /* Tell device where to find queue */
399 priv->cfg->ops->lib->txq_init(priv, txq); 545 iwlagn_tx_queue_init(priv, txq);
400} 546}
401EXPORT_SYMBOL(iwl_tx_queue_reset);
402 547
403/*************** HOST COMMAND QUEUE FUNCTIONS *****/ 548/*************** HOST COMMAND QUEUE FUNCTIONS *****/
404 549
@@ -413,27 +558,57 @@ EXPORT_SYMBOL(iwl_tx_queue_reset);
413 */ 558 */
414int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) 559int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
415{ 560{
416 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; 561 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
417 struct iwl_queue *q = &txq->q; 562 struct iwl_queue *q = &txq->q;
418 struct iwl_device_cmd *out_cmd; 563 struct iwl_device_cmd *out_cmd;
419 struct iwl_cmd_meta *out_meta; 564 struct iwl_cmd_meta *out_meta;
420 dma_addr_t phys_addr; 565 dma_addr_t phys_addr;
421 unsigned long flags; 566 unsigned long flags;
422 int len;
423 u32 idx; 567 u32 idx;
424 u16 fix_size; 568 u16 copy_size, cmd_size;
569 bool is_ct_kill = false;
570 bool had_nocopy = false;
571 int i;
572 u8 *cmd_dest;
573#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
574 const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
575 int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
576 int trace_idx;
577#endif
425 578
426 cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len); 579 if (test_bit(STATUS_FW_ERROR, &priv->status)) {
427 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr)); 580 IWL_WARN(priv, "fw recovery, no hcmd send\n");
581 return -EIO;
582 }
583
584 copy_size = sizeof(out_cmd->hdr);
585 cmd_size = sizeof(out_cmd->hdr);
428 586
429 /* If any of the command structures end up being larger than 587 /* need one for the header if the first is NOCOPY */
430 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then 588 BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
431 * we will need to increase the size of the TFD entries 589
432 * Also, check to see if command buffer should not exceed the size 590 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
433 * of device_cmd and max_cmd_size. */ 591 if (!cmd->len[i])
434 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && 592 continue;
435 !(cmd->flags & CMD_SIZE_HUGE)); 593 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
436 BUG_ON(fix_size > IWL_MAX_CMD_SIZE); 594 had_nocopy = true;
595 } else {
596 /* NOCOPY must not be followed by normal! */
597 if (WARN_ON(had_nocopy))
598 return -EINVAL;
599 copy_size += cmd->len[i];
600 }
601 cmd_size += cmd->len[i];
602 }
603
604 /*
605 * If any of the command structures end up being larger than
606 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
607 * allocated into separate TFDs, then we will need to
608 * increase the size of the buffers.
609 */
610 if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
611 return -EINVAL;
437 612
438 if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) { 613 if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
439 IWL_WARN(priv, "Not sending command - %s KILL\n", 614 IWL_WARN(priv, "Not sending command - %s KILL\n",
@@ -441,94 +616,120 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
441 return -EIO; 616 return -EIO;
442 } 617 }
443 618
619 spin_lock_irqsave(&priv->hcmd_lock, flags);
620
444 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 621 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
622 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
623
445 IWL_ERR(priv, "No space in command queue\n"); 624 IWL_ERR(priv, "No space in command queue\n");
446 if (iwl_within_ct_kill_margin(priv)) 625 is_ct_kill = iwl_check_for_ct_kill(priv);
447 iwl_tt_enter_ct_kill(priv); 626 if (!is_ct_kill) {
448 else {
449 IWL_ERR(priv, "Restarting adapter due to queue full\n"); 627 IWL_ERR(priv, "Restarting adapter due to queue full\n");
450 queue_work(priv->workqueue, &priv->restart); 628 iwlagn_fw_error(priv, false);
451 } 629 }
452 return -ENOSPC; 630 return -ENOSPC;
453 } 631 }
454 632
455 spin_lock_irqsave(&priv->hcmd_lock, flags); 633 idx = get_cmd_index(q, q->write_ptr);
456
457 /* If this is a huge cmd, mark the huge flag also on the meta.flags
458 * of the _original_ cmd. This is used for DMA mapping clean up.
459 */
460 if (cmd->flags & CMD_SIZE_HUGE) {
461 idx = get_cmd_index(q, q->write_ptr, 0);
462 txq->meta[idx].flags = CMD_SIZE_HUGE;
463 }
464
465 idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
466 out_cmd = txq->cmd[idx]; 634 out_cmd = txq->cmd[idx];
467 out_meta = &txq->meta[idx]; 635 out_meta = &txq->meta[idx];
468 636
637 if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
638 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
639 return -ENOSPC;
640 }
641
469 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ 642 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
470 out_meta->flags = cmd->flags;
471 if (cmd->flags & CMD_WANT_SKB) 643 if (cmd->flags & CMD_WANT_SKB)
472 out_meta->source = cmd; 644 out_meta->source = cmd;
473 if (cmd->flags & CMD_ASYNC) 645 if (cmd->flags & CMD_ASYNC)
474 out_meta->callback = cmd->callback; 646 out_meta->callback = cmd->callback;
475 647
648 /* set up the header */
649
476 out_cmd->hdr.cmd = cmd->id; 650 out_cmd->hdr.cmd = cmd->id;
477 memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len); 651 out_cmd->hdr.flags = 0;
652 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
653 INDEX_TO_SEQ(q->write_ptr));
478 654
479 /* At this point, the out_cmd now has all of the incoming cmd 655 /* and copy the data that needs to be copied */
480 * information */
481 656
482 out_cmd->hdr.flags = 0; 657 cmd_dest = &out_cmd->cmd.payload[0];
483 out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) | 658 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
484 INDEX_TO_SEQ(q->write_ptr)); 659 if (!cmd->len[i])
485 if (cmd->flags & CMD_SIZE_HUGE) 660 continue;
486 out_cmd->hdr.sequence |= SEQ_HUGE_FRAME; 661 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
487 len = sizeof(struct iwl_device_cmd); 662 break;
488 if (idx == TFD_CMD_SLOTS) 663 memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
489 len = IWL_MAX_CMD_SIZE; 664 cmd_dest += cmd->len[i];
490
491#ifdef CONFIG_IWLWIFI_DEBUG
492 switch (out_cmd->hdr.cmd) {
493 case REPLY_TX_LINK_QUALITY_CMD:
494 case SENSITIVITY_CMD:
495 IWL_DEBUG_HC_DUMP(priv, "Sending command %s (#%x), seq: 0x%04X, "
496 "%d bytes at %d[%d]:%d\n",
497 get_cmd_string(out_cmd->hdr.cmd),
498 out_cmd->hdr.cmd,
499 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
500 q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
501 break;
502 default:
503 IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
504 "%d bytes at %d[%d]:%d\n",
505 get_cmd_string(out_cmd->hdr.cmd),
506 out_cmd->hdr.cmd,
507 le16_to_cpu(out_cmd->hdr.sequence), fix_size,
508 q->write_ptr, idx, IWL_CMD_QUEUE_NUM);
509 } 665 }
510#endif
511 txq->need_update = 1;
512 666
513 if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl) 667 IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
514 /* Set up entry in queue's byte count circular buffer */ 668 "%d bytes at %d[%d]:%d\n",
515 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0); 669 get_cmd_string(out_cmd->hdr.cmd),
670 out_cmd->hdr.cmd,
671 le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
672 q->write_ptr, idx, priv->cmd_queue);
516 673
517 phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr, 674 phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
518 fix_size, PCI_DMA_BIDIRECTIONAL); 675 copy_size, PCI_DMA_BIDIRECTIONAL);
676 if (unlikely(pci_dma_mapping_error(priv->pci_dev, phys_addr))) {
677 idx = -ENOMEM;
678 goto out;
679 }
680
519 dma_unmap_addr_set(out_meta, mapping, phys_addr); 681 dma_unmap_addr_set(out_meta, mapping, phys_addr);
520 dma_unmap_len_set(out_meta, len, fix_size); 682 dma_unmap_len_set(out_meta, len, copy_size);
521 683
522 trace_iwlwifi_dev_hcmd(priv, &out_cmd->hdr, fix_size, cmd->flags); 684 iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, copy_size, 1);
685#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
686 trace_bufs[0] = &out_cmd->hdr;
687 trace_lens[0] = copy_size;
688 trace_idx = 1;
689#endif
523 690
524 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, 691 for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
525 phys_addr, fix_size, 1, 692 if (!cmd->len[i])
526 U32_PAD(cmd->len)); 693 continue;
694 if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
695 continue;
696 phys_addr = pci_map_single(priv->pci_dev, (void *)cmd->data[i],
697 cmd->len[i], PCI_DMA_BIDIRECTIONAL);
698 if (pci_dma_mapping_error(priv->pci_dev, phys_addr)) {
699 iwlagn_unmap_tfd(priv, out_meta,
700 &txq->tfds[q->write_ptr],
701 PCI_DMA_BIDIRECTIONAL);
702 idx = -ENOMEM;
703 goto out;
704 }
705
706 iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
707 cmd->len[i], 0);
708#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
709 trace_bufs[trace_idx] = cmd->data[i];
710 trace_lens[trace_idx] = cmd->len[i];
711 trace_idx++;
712#endif
713 }
714
715 out_meta->flags = cmd->flags | CMD_MAPPED;
716
717 txq->need_update = 1;
718
719 /* check that tracing gets all possible blocks */
720 BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
721#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
722 trace_iwlwifi_dev_hcmd(priv, cmd->flags,
723 trace_bufs[0], trace_lens[0],
724 trace_bufs[1], trace_lens[1],
725 trace_bufs[2], trace_lens[2]);
726#endif
527 727
528 /* Increment and update queue's write index */ 728 /* Increment and update queue's write index */
529 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 729 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
530 iwl_txq_update_write_ptr(priv, txq); 730 iwl_txq_update_write_ptr(priv, txq);
531 731
732 out:
532 spin_unlock_irqrestore(&priv->hcmd_lock, flags); 733 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
533 return idx; 734 return idx;
534} 735}
@@ -540,8 +741,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
540 * need to be reclaimed. As result, some free space forms. If there is 741 * need to be reclaimed. As result, some free space forms. If there is
541 * enough free space (> low mark), wake the stack that feeds us. 742 * enough free space (> low mark), wake the stack that feeds us.
542 */ 743 */
543static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, 744static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int idx)
544 int idx, int cmd_idx)
545{ 745{
546 struct iwl_tx_queue *txq = &priv->txq[txq_id]; 746 struct iwl_tx_queue *txq = &priv->txq[txq_id];
547 struct iwl_queue *q = &txq->q; 747 struct iwl_queue *q = &txq->q;
@@ -560,7 +760,7 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
560 if (nfreed++ > 0) { 760 if (nfreed++ > 0) {
561 IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx, 761 IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
562 q->write_ptr, q->read_ptr); 762 q->write_ptr, q->read_ptr);
563 queue_work(priv->workqueue, &priv->restart); 763 iwlagn_fw_error(priv, false);
564 } 764 }
565 765
566 } 766 }
@@ -581,39 +781,28 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
581 int txq_id = SEQ_TO_QUEUE(sequence); 781 int txq_id = SEQ_TO_QUEUE(sequence);
582 int index = SEQ_TO_INDEX(sequence); 782 int index = SEQ_TO_INDEX(sequence);
583 int cmd_index; 783 int cmd_index;
584 bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
585 struct iwl_device_cmd *cmd; 784 struct iwl_device_cmd *cmd;
586 struct iwl_cmd_meta *meta; 785 struct iwl_cmd_meta *meta;
587 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; 786 struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
787 unsigned long flags;
588 788
589 /* If a Tx command is being handled and it isn't in the actual 789 /* If a Tx command is being handled and it isn't in the actual
590 * command queue then there a command routing bug has been introduced 790 * command queue then there a command routing bug has been introduced
591 * in the queue management code. */ 791 * in the queue management code. */
592 if (WARN(txq_id != IWL_CMD_QUEUE_NUM, 792 if (WARN(txq_id != priv->cmd_queue,
593 "wrong command queue %d, sequence 0x%X readp=%d writep=%d\n", 793 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
594 txq_id, sequence, 794 txq_id, priv->cmd_queue, sequence,
595 priv->txq[IWL_CMD_QUEUE_NUM].q.read_ptr, 795 priv->txq[priv->cmd_queue].q.read_ptr,
596 priv->txq[IWL_CMD_QUEUE_NUM].q.write_ptr)) { 796 priv->txq[priv->cmd_queue].q.write_ptr)) {
597 iwl_print_hex_error(priv, pkt, 32); 797 iwl_print_hex_error(priv, pkt, 32);
598 return; 798 return;
599 } 799 }
600 800
601 /* If this is a huge cmd, clear the huge flag on the meta.flags 801 cmd_index = get_cmd_index(&txq->q, index);
602 * of the _original_ cmd. So that iwl_cmd_queue_free won't unmap
603 * the DMA buffer for the scan (huge) command.
604 */
605 if (huge) {
606 cmd_index = get_cmd_index(&txq->q, index, 0);
607 txq->meta[cmd_index].flags = 0;
608 }
609 cmd_index = get_cmd_index(&txq->q, index, huge);
610 cmd = txq->cmd[cmd_index]; 802 cmd = txq->cmd[cmd_index];
611 meta = &txq->meta[cmd_index]; 803 meta = &txq->meta[cmd_index];
612 804
613 pci_unmap_single(priv->pci_dev, 805 iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], PCI_DMA_BIDIRECTIONAL);
614 dma_unmap_addr(meta, mapping),
615 dma_unmap_len(meta, len),
616 PCI_DMA_BIDIRECTIONAL);
617 806
618 /* Input error checking is done when commands are added to queue. */ 807 /* Input error checking is done when commands are added to queue. */
619 if (meta->flags & CMD_WANT_SKB) { 808 if (meta->flags & CMD_WANT_SKB) {
@@ -622,7 +811,9 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
622 } else if (meta->callback) 811 } else if (meta->callback)
623 meta->callback(priv, cmd, pkt); 812 meta->callback(priv, cmd, pkt);
624 813
625 iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index); 814 spin_lock_irqsave(&priv->hcmd_lock, flags);
815
816 iwl_hcmd_queue_reclaim(priv, txq_id, index);
626 817
627 if (!(meta->flags & CMD_ASYNC)) { 818 if (!(meta->flags & CMD_ASYNC)) {
628 clear_bit(STATUS_HCMD_ACTIVE, &priv->status); 819 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
@@ -630,44 +821,9 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
630 get_cmd_string(cmd->hdr.cmd)); 821 get_cmd_string(cmd->hdr.cmd));
631 wake_up_interruptible(&priv->wait_command_queue); 822 wake_up_interruptible(&priv->wait_command_queue);
632 } 823 }
633 meta->flags = 0;
634}
635EXPORT_SYMBOL(iwl_tx_cmd_complete);
636 824
637#ifdef CONFIG_IWLWIFI_DEBUG 825 /* Mark as unmapped */
638#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x 826 meta->flags = 0;
639#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
640
641const char *iwl_get_tx_fail_reason(u32 status)
642{
643 switch (status & TX_STATUS_MSK) {
644 case TX_STATUS_SUCCESS:
645 return "SUCCESS";
646 TX_STATUS_POSTPONE(DELAY);
647 TX_STATUS_POSTPONE(FEW_BYTES);
648 TX_STATUS_POSTPONE(BT_PRIO);
649 TX_STATUS_POSTPONE(QUIET_PERIOD);
650 TX_STATUS_POSTPONE(CALC_TTAK);
651 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
652 TX_STATUS_FAIL(SHORT_LIMIT);
653 TX_STATUS_FAIL(LONG_LIMIT);
654 TX_STATUS_FAIL(FIFO_UNDERRUN);
655 TX_STATUS_FAIL(DRAIN_FLOW);
656 TX_STATUS_FAIL(RFKILL_FLUSH);
657 TX_STATUS_FAIL(LIFE_EXPIRE);
658 TX_STATUS_FAIL(DEST_PS);
659 TX_STATUS_FAIL(HOST_ABORTED);
660 TX_STATUS_FAIL(BT_RETRY);
661 TX_STATUS_FAIL(STA_INVALID);
662 TX_STATUS_FAIL(FRAG_DROPPED);
663 TX_STATUS_FAIL(TID_DISABLE);
664 TX_STATUS_FAIL(FIFO_FLUSHED);
665 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
666 TX_STATUS_FAIL(FW_DROP);
667 TX_STATUS_FAIL(STA_COLOR_MISMATCH_DROP);
668 }
669 827
670 return "UNKNOWN"; 828 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
671} 829}
672EXPORT_SYMBOL(iwl_get_tx_fail_reason);
673#endif /* CONFIG_IWLWIFI_DEBUG */