diff options
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-tx.c')
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-tx.c | 357 |
1 files changed, 268 insertions, 89 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c index b7e196e3c8d3..8dd0c036d547 100644 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /****************************************************************************** | 1 | /****************************************************************************** |
2 | * | 2 | * |
3 | * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved. | 3 | * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. |
4 | * | 4 | * |
5 | * Portions of this file are derived from the ipw3945 project, as well | 5 | * Portions of this file are derived from the ipw3945 project, as well |
6 | * as portions of the ieee80211 subsystem header files. | 6 | * as portions of the ieee80211 subsystem header files. |
@@ -29,6 +29,7 @@ | |||
29 | 29 | ||
30 | #include <linux/etherdevice.h> | 30 | #include <linux/etherdevice.h> |
31 | #include <linux/sched.h> | 31 | #include <linux/sched.h> |
32 | #include <linux/slab.h> | ||
32 | #include <net/mac80211.h> | 33 | #include <net/mac80211.h> |
33 | #include "iwl-eeprom.h" | 34 | #include "iwl-eeprom.h" |
34 | #include "iwl-dev.h" | 35 | #include "iwl-dev.h" |
@@ -60,7 +61,8 @@ static const u16 default_tid_to_tx_fifo[] = { | |||
60 | static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv, | 61 | static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv, |
61 | struct iwl_dma_ptr *ptr, size_t size) | 62 | struct iwl_dma_ptr *ptr, size_t size) |
62 | { | 63 | { |
63 | ptr->addr = pci_alloc_consistent(priv->pci_dev, size, &ptr->dma); | 64 | ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma, |
65 | GFP_KERNEL); | ||
64 | if (!ptr->addr) | 66 | if (!ptr->addr) |
65 | return -ENOMEM; | 67 | return -ENOMEM; |
66 | ptr->size = size; | 68 | ptr->size = size; |
@@ -73,21 +75,20 @@ static inline void iwl_free_dma_ptr(struct iwl_priv *priv, | |||
73 | if (unlikely(!ptr->addr)) | 75 | if (unlikely(!ptr->addr)) |
74 | return; | 76 | return; |
75 | 77 | ||
76 | pci_free_consistent(priv->pci_dev, ptr->size, ptr->addr, ptr->dma); | 78 | dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma); |
77 | memset(ptr, 0, sizeof(*ptr)); | 79 | memset(ptr, 0, sizeof(*ptr)); |
78 | } | 80 | } |
79 | 81 | ||
80 | /** | 82 | /** |
81 | * iwl_txq_update_write_ptr - Send new write index to hardware | 83 | * iwl_txq_update_write_ptr - Send new write index to hardware |
82 | */ | 84 | */ |
83 | int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq) | 85 | void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq) |
84 | { | 86 | { |
85 | u32 reg = 0; | 87 | u32 reg = 0; |
86 | int ret = 0; | ||
87 | int txq_id = txq->q.id; | 88 | int txq_id = txq->q.id; |
88 | 89 | ||
89 | if (txq->need_update == 0) | 90 | if (txq->need_update == 0) |
90 | return ret; | 91 | return; |
91 | 92 | ||
92 | /* if we're trying to save power */ | 93 | /* if we're trying to save power */ |
93 | if (test_bit(STATUS_POWER_PMI, &priv->status)) { | 94 | if (test_bit(STATUS_POWER_PMI, &priv->status)) { |
@@ -97,10 +98,11 @@ int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq) | |||
97 | reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); | 98 | reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); |
98 | 99 | ||
99 | if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { | 100 | if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { |
100 | IWL_DEBUG_INFO(priv, "Requesting wakeup, GP1 = 0x%x\n", reg); | 101 | IWL_DEBUG_INFO(priv, "Tx queue %d requesting wakeup, GP1 = 0x%x\n", |
102 | txq_id, reg); | ||
101 | iwl_set_bit(priv, CSR_GP_CNTRL, | 103 | iwl_set_bit(priv, CSR_GP_CNTRL, |
102 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | 104 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); |
103 | return ret; | 105 | return; |
104 | } | 106 | } |
105 | 107 | ||
106 | iwl_write_direct32(priv, HBUS_TARG_WRPTR, | 108 | iwl_write_direct32(priv, HBUS_TARG_WRPTR, |
@@ -113,12 +115,24 @@ int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq) | |||
113 | txq->q.write_ptr | (txq_id << 8)); | 115 | txq->q.write_ptr | (txq_id << 8)); |
114 | 116 | ||
115 | txq->need_update = 0; | 117 | txq->need_update = 0; |
116 | |||
117 | return ret; | ||
118 | } | 118 | } |
119 | EXPORT_SYMBOL(iwl_txq_update_write_ptr); | 119 | EXPORT_SYMBOL(iwl_txq_update_write_ptr); |
120 | 120 | ||
121 | 121 | ||
122 | void iwl_free_tfds_in_queue(struct iwl_priv *priv, | ||
123 | int sta_id, int tid, int freed) | ||
124 | { | ||
125 | if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed) | ||
126 | priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; | ||
127 | else { | ||
128 | IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n", | ||
129 | priv->stations[sta_id].tid[tid].tfds_in_queue, | ||
130 | freed); | ||
131 | priv->stations[sta_id].tid[tid].tfds_in_queue = 0; | ||
132 | } | ||
133 | } | ||
134 | EXPORT_SYMBOL(iwl_free_tfds_in_queue); | ||
135 | |||
122 | /** | 136 | /** |
123 | * iwl_tx_queue_free - Deallocate DMA queue. | 137 | * iwl_tx_queue_free - Deallocate DMA queue. |
124 | * @txq: Transmit queue to deallocate. | 138 | * @txq: Transmit queue to deallocate. |
@@ -131,8 +145,8 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id) | |||
131 | { | 145 | { |
132 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | 146 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; |
133 | struct iwl_queue *q = &txq->q; | 147 | struct iwl_queue *q = &txq->q; |
134 | struct pci_dev *dev = priv->pci_dev; | 148 | struct device *dev = &priv->pci_dev->dev; |
135 | int i, len; | 149 | int i; |
136 | 150 | ||
137 | if (q->n_bd == 0) | 151 | if (q->n_bd == 0) |
138 | return; | 152 | return; |
@@ -142,16 +156,14 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id) | |||
142 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) | 156 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) |
143 | priv->cfg->ops->lib->txq_free_tfd(priv, txq); | 157 | priv->cfg->ops->lib->txq_free_tfd(priv, txq); |
144 | 158 | ||
145 | len = sizeof(struct iwl_device_cmd) * q->n_window; | ||
146 | |||
147 | /* De-alloc array of command/tx buffers */ | 159 | /* De-alloc array of command/tx buffers */ |
148 | for (i = 0; i < TFD_TX_CMD_SLOTS; i++) | 160 | for (i = 0; i < TFD_TX_CMD_SLOTS; i++) |
149 | kfree(txq->cmd[i]); | 161 | kfree(txq->cmd[i]); |
150 | 162 | ||
151 | /* De-alloc circular buffer of TFDs */ | 163 | /* De-alloc circular buffer of TFDs */ |
152 | if (txq->q.n_bd) | 164 | if (txq->q.n_bd) |
153 | pci_free_consistent(dev, priv->hw_params.tfd_size * | 165 | dma_free_coherent(dev, priv->hw_params.tfd_size * |
154 | txq->q.n_bd, txq->tfds, txq->q.dma_addr); | 166 | txq->q.n_bd, txq->tfds, txq->q.dma_addr); |
155 | 167 | ||
156 | /* De-alloc array of per-TFD driver data */ | 168 | /* De-alloc array of per-TFD driver data */ |
157 | kfree(txq->txb); | 169 | kfree(txq->txb); |
@@ -180,14 +192,35 @@ void iwl_cmd_queue_free(struct iwl_priv *priv) | |||
180 | { | 192 | { |
181 | struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; | 193 | struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; |
182 | struct iwl_queue *q = &txq->q; | 194 | struct iwl_queue *q = &txq->q; |
183 | struct pci_dev *dev = priv->pci_dev; | 195 | struct device *dev = &priv->pci_dev->dev; |
184 | int i, len; | 196 | int i; |
197 | bool huge = false; | ||
185 | 198 | ||
186 | if (q->n_bd == 0) | 199 | if (q->n_bd == 0) |
187 | return; | 200 | return; |
188 | 201 | ||
189 | len = sizeof(struct iwl_device_cmd) * q->n_window; | 202 | for (; q->read_ptr != q->write_ptr; |
190 | len += IWL_MAX_SCAN_SIZE; | 203 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { |
204 | /* we have no way to tell if it is a huge cmd ATM */ | ||
205 | i = get_cmd_index(q, q->read_ptr, 0); | ||
206 | |||
207 | if (txq->meta[i].flags & CMD_SIZE_HUGE) { | ||
208 | huge = true; | ||
209 | continue; | ||
210 | } | ||
211 | |||
212 | pci_unmap_single(priv->pci_dev, | ||
213 | pci_unmap_addr(&txq->meta[i], mapping), | ||
214 | pci_unmap_len(&txq->meta[i], len), | ||
215 | PCI_DMA_BIDIRECTIONAL); | ||
216 | } | ||
217 | if (huge) { | ||
218 | i = q->n_window; | ||
219 | pci_unmap_single(priv->pci_dev, | ||
220 | pci_unmap_addr(&txq->meta[i], mapping), | ||
221 | pci_unmap_len(&txq->meta[i], len), | ||
222 | PCI_DMA_BIDIRECTIONAL); | ||
223 | } | ||
191 | 224 | ||
192 | /* De-alloc array of command/tx buffers */ | 225 | /* De-alloc array of command/tx buffers */ |
193 | for (i = 0; i <= TFD_CMD_SLOTS; i++) | 226 | for (i = 0; i <= TFD_CMD_SLOTS; i++) |
@@ -195,8 +228,8 @@ void iwl_cmd_queue_free(struct iwl_priv *priv) | |||
195 | 228 | ||
196 | /* De-alloc circular buffer of TFDs */ | 229 | /* De-alloc circular buffer of TFDs */ |
197 | if (txq->q.n_bd) | 230 | if (txq->q.n_bd) |
198 | pci_free_consistent(dev, priv->hw_params.tfd_size * | 231 | dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd, |
199 | txq->q.n_bd, txq->tfds, txq->q.dma_addr); | 232 | txq->tfds, txq->q.dma_addr); |
200 | 233 | ||
201 | /* deallocate arrays */ | 234 | /* deallocate arrays */ |
202 | kfree(txq->cmd); | 235 | kfree(txq->cmd); |
@@ -287,7 +320,7 @@ static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q, | |||
287 | static int iwl_tx_queue_alloc(struct iwl_priv *priv, | 320 | static int iwl_tx_queue_alloc(struct iwl_priv *priv, |
288 | struct iwl_tx_queue *txq, u32 id) | 321 | struct iwl_tx_queue *txq, u32 id) |
289 | { | 322 | { |
290 | struct pci_dev *dev = priv->pci_dev; | 323 | struct device *dev = &priv->pci_dev->dev; |
291 | size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX; | 324 | size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX; |
292 | 325 | ||
293 | /* Driver private data, only for Tx (not command) queues, | 326 | /* Driver private data, only for Tx (not command) queues, |
@@ -306,8 +339,8 @@ static int iwl_tx_queue_alloc(struct iwl_priv *priv, | |||
306 | 339 | ||
307 | /* Circular buffer of transmit frame descriptors (TFDs), | 340 | /* Circular buffer of transmit frame descriptors (TFDs), |
308 | * shared with device */ | 341 | * shared with device */ |
309 | txq->tfds = pci_alloc_consistent(dev, tfd_sz, &txq->q.dma_addr); | 342 | txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, |
310 | 343 | GFP_KERNEL); | |
311 | if (!txq->tfds) { | 344 | if (!txq->tfds) { |
312 | IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz); | 345 | IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz); |
313 | goto error; | 346 | goto error; |
@@ -356,7 +389,7 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, | |||
356 | for (i = 0; i < actual_slots; i++) { | 389 | for (i = 0; i < actual_slots; i++) { |
357 | /* only happens for cmd queue */ | 390 | /* only happens for cmd queue */ |
358 | if (i == slots_num) | 391 | if (i == slots_num) |
359 | len += IWL_MAX_SCAN_SIZE; | 392 | len = IWL_MAX_CMD_SIZE; |
360 | 393 | ||
361 | txq->cmd[i] = kmalloc(len, GFP_KERNEL); | 394 | txq->cmd[i] = kmalloc(len, GFP_KERNEL); |
362 | if (!txq->cmd[i]) | 395 | if (!txq->cmd[i]) |
@@ -370,8 +403,13 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, | |||
370 | 403 | ||
371 | txq->need_update = 0; | 404 | txq->need_update = 0; |
372 | 405 | ||
373 | /* aggregation TX queues will get their ID when aggregation begins */ | 406 | /* |
374 | if (txq_id <= IWL_TX_FIFO_AC3) | 407 | * Aggregation TX queues will get their ID when aggregation begins; |
408 | * they overwrite the setting done here. The command FIFO doesn't | ||
409 | * need an swq_id so don't set one to catch errors, all others can | ||
410 | * be set up to the identity mapping. | ||
411 | */ | ||
412 | if (txq_id != IWL_CMD_QUEUE_NUM) | ||
375 | txq->swq_id = txq_id; | 413 | txq->swq_id = txq_id; |
376 | 414 | ||
377 | /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise | 415 | /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise |
@@ -396,6 +434,26 @@ out_free_arrays: | |||
396 | } | 434 | } |
397 | EXPORT_SYMBOL(iwl_tx_queue_init); | 435 | EXPORT_SYMBOL(iwl_tx_queue_init); |
398 | 436 | ||
437 | void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, | ||
438 | int slots_num, u32 txq_id) | ||
439 | { | ||
440 | int actual_slots = slots_num; | ||
441 | |||
442 | if (txq_id == IWL_CMD_QUEUE_NUM) | ||
443 | actual_slots++; | ||
444 | |||
445 | memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots); | ||
446 | |||
447 | txq->need_update = 0; | ||
448 | |||
449 | /* Initialize queue's high/low-water marks, and head/tail indexes */ | ||
450 | iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); | ||
451 | |||
452 | /* Tell device where to find queue */ | ||
453 | priv->cfg->ops->lib->txq_init(priv, txq); | ||
454 | } | ||
455 | EXPORT_SYMBOL(iwl_tx_queue_reset); | ||
456 | |||
399 | /** | 457 | /** |
400 | * iwl_hw_txq_ctx_free - Free TXQ Context | 458 | * iwl_hw_txq_ctx_free - Free TXQ Context |
401 | * | 459 | * |
@@ -406,28 +464,32 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv) | |||
406 | int txq_id; | 464 | int txq_id; |
407 | 465 | ||
408 | /* Tx queues */ | 466 | /* Tx queues */ |
409 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) | 467 | if (priv->txq) { |
410 | if (txq_id == IWL_CMD_QUEUE_NUM) | 468 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) |
411 | iwl_cmd_queue_free(priv); | 469 | if (txq_id == IWL_CMD_QUEUE_NUM) |
412 | else | 470 | iwl_cmd_queue_free(priv); |
413 | iwl_tx_queue_free(priv, txq_id); | 471 | else |
414 | 472 | iwl_tx_queue_free(priv, txq_id); | |
473 | } | ||
415 | iwl_free_dma_ptr(priv, &priv->kw); | 474 | iwl_free_dma_ptr(priv, &priv->kw); |
416 | 475 | ||
417 | iwl_free_dma_ptr(priv, &priv->scd_bc_tbls); | 476 | iwl_free_dma_ptr(priv, &priv->scd_bc_tbls); |
477 | |||
478 | /* free tx queue structure */ | ||
479 | iwl_free_txq_mem(priv); | ||
418 | } | 480 | } |
419 | EXPORT_SYMBOL(iwl_hw_txq_ctx_free); | 481 | EXPORT_SYMBOL(iwl_hw_txq_ctx_free); |
420 | 482 | ||
421 | /** | 483 | /** |
422 | * iwl_txq_ctx_reset - Reset TX queue context | 484 | * iwl_txq_ctx_alloc - allocate TX queue context |
423 | * Destroys all DMA structures and initialize them again | 485 | * Allocate all Tx DMA structures and initialize them |
424 | * | 486 | * |
425 | * @param priv | 487 | * @param priv |
426 | * @return error code | 488 | * @return error code |
427 | */ | 489 | */ |
428 | int iwl_txq_ctx_reset(struct iwl_priv *priv) | 490 | int iwl_txq_ctx_alloc(struct iwl_priv *priv) |
429 | { | 491 | { |
430 | int ret = 0; | 492 | int ret; |
431 | int txq_id, slots_num; | 493 | int txq_id, slots_num; |
432 | unsigned long flags; | 494 | unsigned long flags; |
433 | 495 | ||
@@ -446,6 +508,12 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv) | |||
446 | IWL_ERR(priv, "Keep Warm allocation failed\n"); | 508 | IWL_ERR(priv, "Keep Warm allocation failed\n"); |
447 | goto error_kw; | 509 | goto error_kw; |
448 | } | 510 | } |
511 | |||
512 | /* allocate tx queue structure */ | ||
513 | ret = iwl_alloc_txq_mem(priv); | ||
514 | if (ret) | ||
515 | goto error; | ||
516 | |||
449 | spin_lock_irqsave(&priv->lock, flags); | 517 | spin_lock_irqsave(&priv->lock, flags); |
450 | 518 | ||
451 | /* Turn off all Tx DMA fifos */ | 519 | /* Turn off all Tx DMA fifos */ |
@@ -479,8 +547,31 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv) | |||
479 | return ret; | 547 | return ret; |
480 | } | 548 | } |
481 | 549 | ||
550 | void iwl_txq_ctx_reset(struct iwl_priv *priv) | ||
551 | { | ||
552 | int txq_id, slots_num; | ||
553 | unsigned long flags; | ||
554 | |||
555 | spin_lock_irqsave(&priv->lock, flags); | ||
556 | |||
557 | /* Turn off all Tx DMA fifos */ | ||
558 | priv->cfg->ops->lib->txq_set_sched(priv, 0); | ||
559 | |||
560 | /* Tell NIC where to find the "keep warm" buffer */ | ||
561 | iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); | ||
562 | |||
563 | spin_unlock_irqrestore(&priv->lock, flags); | ||
564 | |||
565 | /* Alloc and init all Tx queues, including the command queue (#4) */ | ||
566 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { | ||
567 | slots_num = txq_id == IWL_CMD_QUEUE_NUM ? | ||
568 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; | ||
569 | iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id); | ||
570 | } | ||
571 | } | ||
572 | |||
482 | /** | 573 | /** |
483 | * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory | 574 | * iwl_txq_ctx_stop - Stop all Tx DMA channels |
484 | */ | 575 | */ |
485 | void iwl_txq_ctx_stop(struct iwl_priv *priv) | 576 | void iwl_txq_ctx_stop(struct iwl_priv *priv) |
486 | { | 577 | { |
@@ -500,9 +591,6 @@ void iwl_txq_ctx_stop(struct iwl_priv *priv) | |||
500 | 1000); | 591 | 1000); |
501 | } | 592 | } |
502 | spin_unlock_irqrestore(&priv->lock, flags); | 593 | spin_unlock_irqrestore(&priv->lock, flags); |
503 | |||
504 | /* Deallocate memory for all Tx queues */ | ||
505 | iwl_hw_txq_ctx_free(priv); | ||
506 | } | 594 | } |
507 | EXPORT_SYMBOL(iwl_txq_ctx_stop); | 595 | EXPORT_SYMBOL(iwl_txq_ctx_stop); |
508 | 596 | ||
@@ -582,9 +670,7 @@ static void iwl_tx_cmd_build_rate(struct iwl_priv *priv, | |||
582 | u8 rate_plcp; | 670 | u8 rate_plcp; |
583 | 671 | ||
584 | /* Set retry limit on DATA packets and Probe Responses*/ | 672 | /* Set retry limit on DATA packets and Probe Responses*/ |
585 | if (priv->data_retry_limit != -1) | 673 | if (ieee80211_is_probe_resp(fc)) |
586 | data_retry_limit = priv->data_retry_limit; | ||
587 | else if (ieee80211_is_probe_resp(fc)) | ||
588 | data_retry_limit = 3; | 674 | data_retry_limit = 3; |
589 | else | 675 | else |
590 | data_retry_limit = IWL_DEFAULT_TX_RETRY; | 676 | data_retry_limit = IWL_DEFAULT_TX_RETRY; |
@@ -701,6 +787,8 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
701 | { | 787 | { |
702 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | 788 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
703 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 789 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
790 | struct ieee80211_sta *sta = info->control.sta; | ||
791 | struct iwl_station_priv *sta_priv = NULL; | ||
704 | struct iwl_tx_queue *txq; | 792 | struct iwl_tx_queue *txq; |
705 | struct iwl_queue *q; | 793 | struct iwl_queue *q; |
706 | struct iwl_device_cmd *out_cmd; | 794 | struct iwl_device_cmd *out_cmd; |
@@ -710,7 +798,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
710 | dma_addr_t phys_addr; | 798 | dma_addr_t phys_addr; |
711 | dma_addr_t txcmd_phys; | 799 | dma_addr_t txcmd_phys; |
712 | dma_addr_t scratch_phys; | 800 | dma_addr_t scratch_phys; |
713 | u16 len, len_org; | 801 | u16 len, len_org, firstlen, secondlen; |
714 | u16 seq_number = 0; | 802 | u16 seq_number = 0; |
715 | __le16 fc; | 803 | __le16 fc; |
716 | u8 hdr_len; | 804 | u8 hdr_len; |
@@ -719,7 +807,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
719 | u8 tid = 0; | 807 | u8 tid = 0; |
720 | u8 *qc = NULL; | 808 | u8 *qc = NULL; |
721 | unsigned long flags; | 809 | unsigned long flags; |
722 | int ret; | ||
723 | 810 | ||
724 | spin_lock_irqsave(&priv->lock, flags); | 811 | spin_lock_irqsave(&priv->lock, flags); |
725 | if (iwl_is_rfkill(priv)) { | 812 | if (iwl_is_rfkill(priv)) { |
@@ -763,6 +850,24 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
763 | 850 | ||
764 | IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); | 851 | IWL_DEBUG_TX(priv, "station Id %d\n", sta_id); |
765 | 852 | ||
853 | if (sta) | ||
854 | sta_priv = (void *)sta->drv_priv; | ||
855 | |||
856 | if (sta_priv && sta_id != priv->hw_params.bcast_sta_id && | ||
857 | sta_priv->asleep) { | ||
858 | WARN_ON(!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)); | ||
859 | /* | ||
860 | * This sends an asynchronous command to the device, | ||
861 | * but we can rely on it being processed before the | ||
862 | * next frame is processed -- and the next frame to | ||
863 | * this station is the one that will consume this | ||
864 | * counter. | ||
865 | * For now set the counter to just 1 since we do not | ||
866 | * support uAPSD yet. | ||
867 | */ | ||
868 | iwl_sta_modify_sleep_tx_count(priv, sta_id, 1); | ||
869 | } | ||
870 | |||
766 | txq_id = skb_get_queue_mapping(skb); | 871 | txq_id = skb_get_queue_mapping(skb); |
767 | if (ieee80211_is_data_qos(fc)) { | 872 | if (ieee80211_is_data_qos(fc)) { |
768 | qc = ieee80211_get_qos_ctl(hdr); | 873 | qc = ieee80211_get_qos_ctl(hdr); |
@@ -776,8 +881,10 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
776 | hdr->seq_ctrl |= cpu_to_le16(seq_number); | 881 | hdr->seq_ctrl |= cpu_to_le16(seq_number); |
777 | seq_number += 0x10; | 882 | seq_number += 0x10; |
778 | /* aggregation is on for this <sta,tid> */ | 883 | /* aggregation is on for this <sta,tid> */ |
779 | if (info->flags & IEEE80211_TX_CTL_AMPDU) | 884 | if (info->flags & IEEE80211_TX_CTL_AMPDU && |
885 | priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) { | ||
780 | txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; | 886 | txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; |
887 | } | ||
781 | } | 888 | } |
782 | 889 | ||
783 | txq = &priv->txq[txq_id]; | 890 | txq = &priv->txq[txq_id]; |
@@ -843,7 +950,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
843 | sizeof(struct iwl_cmd_header) + hdr_len; | 950 | sizeof(struct iwl_cmd_header) + hdr_len; |
844 | 951 | ||
845 | len_org = len; | 952 | len_org = len; |
846 | len = (len + 3) & ~3; | 953 | firstlen = len = (len + 3) & ~3; |
847 | 954 | ||
848 | if (len_org != len) | 955 | if (len_org != len) |
849 | len_org = 1; | 956 | len_org = 1; |
@@ -877,7 +984,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
877 | 984 | ||
878 | /* Set up TFD's 2nd entry to point directly to remainder of skb, | 985 | /* Set up TFD's 2nd entry to point directly to remainder of skb, |
879 | * if any (802.11 null frames have no payload). */ | 986 | * if any (802.11 null frames have no payload). */ |
880 | len = skb->len - hdr_len; | 987 | secondlen = len = skb->len - hdr_len; |
881 | if (len) { | 988 | if (len) { |
882 | phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, | 989 | phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, |
883 | len, PCI_DMA_TODEVICE); | 990 | len, PCI_DMA_TODEVICE); |
@@ -911,13 +1018,27 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
911 | pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys, | 1018 | pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys, |
912 | len, PCI_DMA_BIDIRECTIONAL); | 1019 | len, PCI_DMA_BIDIRECTIONAL); |
913 | 1020 | ||
1021 | trace_iwlwifi_dev_tx(priv, | ||
1022 | &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr], | ||
1023 | sizeof(struct iwl_tfd), | ||
1024 | &out_cmd->hdr, firstlen, | ||
1025 | skb->data + hdr_len, secondlen); | ||
1026 | |||
914 | /* Tell device the write index *just past* this latest filled TFD */ | 1027 | /* Tell device the write index *just past* this latest filled TFD */ |
915 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | 1028 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); |
916 | ret = iwl_txq_update_write_ptr(priv, txq); | 1029 | iwl_txq_update_write_ptr(priv, txq); |
917 | spin_unlock_irqrestore(&priv->lock, flags); | 1030 | spin_unlock_irqrestore(&priv->lock, flags); |
918 | 1031 | ||
919 | if (ret) | 1032 | /* |
920 | return ret; | 1033 | * At this point the frame is "transmitted" successfully |
1034 | * and we will get a TX status notification eventually, | ||
1035 | * regardless of the value of ret. "ret" only indicates | ||
1036 | * whether or not we should update the write pointer. | ||
1037 | */ | ||
1038 | |||
1039 | /* avoid atomic ops if it isn't an associated client */ | ||
1040 | if (sta_priv && sta_priv->client) | ||
1041 | atomic_inc(&sta_priv->pending_frames); | ||
921 | 1042 | ||
922 | if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) { | 1043 | if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) { |
923 | if (wait_write_ptr) { | 1044 | if (wait_write_ptr) { |
@@ -957,7 +1078,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | |||
957 | struct iwl_cmd_meta *out_meta; | 1078 | struct iwl_cmd_meta *out_meta; |
958 | dma_addr_t phys_addr; | 1079 | dma_addr_t phys_addr; |
959 | unsigned long flags; | 1080 | unsigned long flags; |
960 | int len, ret; | 1081 | int len; |
961 | u32 idx; | 1082 | u32 idx; |
962 | u16 fix_size; | 1083 | u16 fix_size; |
963 | 1084 | ||
@@ -966,22 +1087,40 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | |||
966 | 1087 | ||
967 | /* If any of the command structures end up being larger than | 1088 | /* If any of the command structures end up being larger than |
968 | * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then | 1089 | * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then |
969 | * we will need to increase the size of the TFD entries */ | 1090 | * we will need to increase the size of the TFD entries |
1091 | * Also, check to see if command buffer should not exceed the size | ||
1092 | * of device_cmd and max_cmd_size. */ | ||
970 | BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && | 1093 | BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && |
971 | !(cmd->flags & CMD_SIZE_HUGE)); | 1094 | !(cmd->flags & CMD_SIZE_HUGE)); |
1095 | BUG_ON(fix_size > IWL_MAX_CMD_SIZE); | ||
972 | 1096 | ||
973 | if (iwl_is_rfkill(priv)) { | 1097 | if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) { |
974 | IWL_DEBUG_INFO(priv, "Not sending command - RF KILL\n"); | 1098 | IWL_WARN(priv, "Not sending command - %s KILL\n", |
1099 | iwl_is_rfkill(priv) ? "RF" : "CT"); | ||
975 | return -EIO; | 1100 | return -EIO; |
976 | } | 1101 | } |
977 | 1102 | ||
978 | if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { | 1103 | if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { |
979 | IWL_ERR(priv, "No space for Tx\n"); | 1104 | IWL_ERR(priv, "No space in command queue\n"); |
1105 | if (iwl_within_ct_kill_margin(priv)) | ||
1106 | iwl_tt_enter_ct_kill(priv); | ||
1107 | else { | ||
1108 | IWL_ERR(priv, "Restarting adapter due to queue full\n"); | ||
1109 | queue_work(priv->workqueue, &priv->restart); | ||
1110 | } | ||
980 | return -ENOSPC; | 1111 | return -ENOSPC; |
981 | } | 1112 | } |
982 | 1113 | ||
983 | spin_lock_irqsave(&priv->hcmd_lock, flags); | 1114 | spin_lock_irqsave(&priv->hcmd_lock, flags); |
984 | 1115 | ||
1116 | /* If this is a huge cmd, mark the huge flag also on the meta.flags | ||
1117 | * of the _original_ cmd. This is used for DMA mapping clean up. | ||
1118 | */ | ||
1119 | if (cmd->flags & CMD_SIZE_HUGE) { | ||
1120 | idx = get_cmd_index(q, q->write_ptr, 0); | ||
1121 | txq->meta[idx].flags = CMD_SIZE_HUGE; | ||
1122 | } | ||
1123 | |||
985 | idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); | 1124 | idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); |
986 | out_cmd = txq->cmd[idx]; | 1125 | out_cmd = txq->cmd[idx]; |
987 | out_meta = &txq->meta[idx]; | 1126 | out_meta = &txq->meta[idx]; |
@@ -1005,8 +1144,8 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | |||
1005 | if (cmd->flags & CMD_SIZE_HUGE) | 1144 | if (cmd->flags & CMD_SIZE_HUGE) |
1006 | out_cmd->hdr.sequence |= SEQ_HUGE_FRAME; | 1145 | out_cmd->hdr.sequence |= SEQ_HUGE_FRAME; |
1007 | len = sizeof(struct iwl_device_cmd); | 1146 | len = sizeof(struct iwl_device_cmd); |
1008 | len += (idx == TFD_CMD_SLOTS) ? IWL_MAX_SCAN_SIZE : 0; | 1147 | if (idx == TFD_CMD_SLOTS) |
1009 | 1148 | len = IWL_MAX_CMD_SIZE; | |
1010 | 1149 | ||
1011 | #ifdef CONFIG_IWLWIFI_DEBUG | 1150 | #ifdef CONFIG_IWLWIFI_DEBUG |
1012 | switch (out_cmd->hdr.cmd) { | 1151 | switch (out_cmd->hdr.cmd) { |
@@ -1039,16 +1178,36 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | |||
1039 | pci_unmap_addr_set(out_meta, mapping, phys_addr); | 1178 | pci_unmap_addr_set(out_meta, mapping, phys_addr); |
1040 | pci_unmap_len_set(out_meta, len, fix_size); | 1179 | pci_unmap_len_set(out_meta, len, fix_size); |
1041 | 1180 | ||
1181 | trace_iwlwifi_dev_hcmd(priv, &out_cmd->hdr, fix_size, cmd->flags); | ||
1182 | |||
1042 | priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, | 1183 | priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, |
1043 | phys_addr, fix_size, 1, | 1184 | phys_addr, fix_size, 1, |
1044 | U32_PAD(cmd->len)); | 1185 | U32_PAD(cmd->len)); |
1045 | 1186 | ||
1046 | /* Increment and update queue's write index */ | 1187 | /* Increment and update queue's write index */ |
1047 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | 1188 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); |
1048 | ret = iwl_txq_update_write_ptr(priv, txq); | 1189 | iwl_txq_update_write_ptr(priv, txq); |
1049 | 1190 | ||
1050 | spin_unlock_irqrestore(&priv->hcmd_lock, flags); | 1191 | spin_unlock_irqrestore(&priv->hcmd_lock, flags); |
1051 | return ret ? ret : idx; | 1192 | return idx; |
1193 | } | ||
1194 | |||
1195 | static void iwl_tx_status(struct iwl_priv *priv, struct sk_buff *skb) | ||
1196 | { | ||
1197 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | ||
1198 | struct ieee80211_sta *sta; | ||
1199 | struct iwl_station_priv *sta_priv; | ||
1200 | |||
1201 | sta = ieee80211_find_sta(priv->vif, hdr->addr1); | ||
1202 | if (sta) { | ||
1203 | sta_priv = (void *)sta->drv_priv; | ||
1204 | /* avoid atomic ops if this isn't a client */ | ||
1205 | if (sta_priv->client && | ||
1206 | atomic_dec_return(&sta_priv->pending_frames) == 0) | ||
1207 | ieee80211_sta_block_awake(priv->hw, sta, false); | ||
1208 | } | ||
1209 | |||
1210 | ieee80211_tx_status_irqsafe(priv->hw, skb); | ||
1052 | } | 1211 | } |
1053 | 1212 | ||
1054 | int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) | 1213 | int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) |
@@ -1057,6 +1216,7 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) | |||
1057 | struct iwl_queue *q = &txq->q; | 1216 | struct iwl_queue *q = &txq->q; |
1058 | struct iwl_tx_info *tx_info; | 1217 | struct iwl_tx_info *tx_info; |
1059 | int nfreed = 0; | 1218 | int nfreed = 0; |
1219 | struct ieee80211_hdr *hdr; | ||
1060 | 1220 | ||
1061 | if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { | 1221 | if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { |
1062 | IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, " | 1222 | IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, " |
@@ -1070,14 +1230,17 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) | |||
1070 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | 1230 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { |
1071 | 1231 | ||
1072 | tx_info = &txq->txb[txq->q.read_ptr]; | 1232 | tx_info = &txq->txb[txq->q.read_ptr]; |
1073 | ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]); | 1233 | iwl_tx_status(priv, tx_info->skb[0]); |
1234 | |||
1235 | hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data; | ||
1236 | if (hdr && ieee80211_is_data_qos(hdr->frame_control)) | ||
1237 | nfreed++; | ||
1074 | tx_info->skb[0] = NULL; | 1238 | tx_info->skb[0] = NULL; |
1075 | 1239 | ||
1076 | if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl) | 1240 | if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl) |
1077 | priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq); | 1241 | priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq); |
1078 | 1242 | ||
1079 | priv->cfg->ops->lib->txq_free_tfd(priv, txq); | 1243 | priv->cfg->ops->lib->txq_free_tfd(priv, txq); |
1080 | nfreed++; | ||
1081 | } | 1244 | } |
1082 | return nfreed; | 1245 | return nfreed; |
1083 | } | 1246 | } |
@@ -1105,11 +1268,6 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, | |||
1105 | return; | 1268 | return; |
1106 | } | 1269 | } |
1107 | 1270 | ||
1108 | pci_unmap_single(priv->pci_dev, | ||
1109 | pci_unmap_addr(&txq->meta[cmd_idx], mapping), | ||
1110 | pci_unmap_len(&txq->meta[cmd_idx], len), | ||
1111 | PCI_DMA_BIDIRECTIONAL); | ||
1112 | |||
1113 | for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; | 1271 | for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; |
1114 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | 1272 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { |
1115 | 1273 | ||
@@ -1132,7 +1290,7 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, | |||
1132 | */ | 1290 | */ |
1133 | void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | 1291 | void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) |
1134 | { | 1292 | { |
1135 | struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; | 1293 | struct iwl_rx_packet *pkt = rxb_addr(rxb); |
1136 | u16 sequence = le16_to_cpu(pkt->hdr.sequence); | 1294 | u16 sequence = le16_to_cpu(pkt->hdr.sequence); |
1137 | int txq_id = SEQ_TO_QUEUE(sequence); | 1295 | int txq_id = SEQ_TO_QUEUE(sequence); |
1138 | int index = SEQ_TO_INDEX(sequence); | 1296 | int index = SEQ_TO_INDEX(sequence); |
@@ -1140,6 +1298,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |||
1140 | bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); | 1298 | bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); |
1141 | struct iwl_device_cmd *cmd; | 1299 | struct iwl_device_cmd *cmd; |
1142 | struct iwl_cmd_meta *meta; | 1300 | struct iwl_cmd_meta *meta; |
1301 | struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; | ||
1143 | 1302 | ||
1144 | /* If a Tx command is being handled and it isn't in the actual | 1303 | /* If a Tx command is being handled and it isn't in the actual |
1145 | * command queue then there a command routing bug has been introduced | 1304 | * command queue then there a command routing bug has been introduced |
@@ -1153,23 +1312,39 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |||
1153 | return; | 1312 | return; |
1154 | } | 1313 | } |
1155 | 1314 | ||
1156 | cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge); | 1315 | /* If this is a huge cmd, clear the huge flag on the meta.flags |
1157 | cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index]; | 1316 | * of the _original_ cmd. So that iwl_cmd_queue_free won't unmap |
1158 | meta = &priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_index]; | 1317 | * the DMA buffer for the scan (huge) command. |
1318 | */ | ||
1319 | if (huge) { | ||
1320 | cmd_index = get_cmd_index(&txq->q, index, 0); | ||
1321 | txq->meta[cmd_index].flags = 0; | ||
1322 | } | ||
1323 | cmd_index = get_cmd_index(&txq->q, index, huge); | ||
1324 | cmd = txq->cmd[cmd_index]; | ||
1325 | meta = &txq->meta[cmd_index]; | ||
1326 | |||
1327 | pci_unmap_single(priv->pci_dev, | ||
1328 | pci_unmap_addr(meta, mapping), | ||
1329 | pci_unmap_len(meta, len), | ||
1330 | PCI_DMA_BIDIRECTIONAL); | ||
1159 | 1331 | ||
1160 | /* Input error checking is done when commands are added to queue. */ | 1332 | /* Input error checking is done when commands are added to queue. */ |
1161 | if (meta->flags & CMD_WANT_SKB) { | 1333 | if (meta->flags & CMD_WANT_SKB) { |
1162 | meta->source->reply_skb = rxb->skb; | 1334 | meta->source->reply_page = (unsigned long)rxb_addr(rxb); |
1163 | rxb->skb = NULL; | 1335 | rxb->page = NULL; |
1164 | } else if (meta->callback) | 1336 | } else if (meta->callback) |
1165 | meta->callback(priv, cmd, rxb->skb); | 1337 | meta->callback(priv, cmd, pkt); |
1166 | 1338 | ||
1167 | iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index); | 1339 | iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index); |
1168 | 1340 | ||
1169 | if (!(meta->flags & CMD_ASYNC)) { | 1341 | if (!(meta->flags & CMD_ASYNC)) { |
1170 | clear_bit(STATUS_HCMD_ACTIVE, &priv->status); | 1342 | clear_bit(STATUS_HCMD_ACTIVE, &priv->status); |
1343 | IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s \n", | ||
1344 | get_cmd_string(cmd->hdr.cmd)); | ||
1171 | wake_up_interruptible(&priv->wait_command_queue); | 1345 | wake_up_interruptible(&priv->wait_command_queue); |
1172 | } | 1346 | } |
1347 | meta->flags = 0; | ||
1173 | } | 1348 | } |
1174 | EXPORT_SYMBOL(iwl_tx_cmd_complete); | 1349 | EXPORT_SYMBOL(iwl_tx_cmd_complete); |
1175 | 1350 | ||
@@ -1240,7 +1415,7 @@ int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn) | |||
1240 | if (tid_data->tfds_in_queue == 0) { | 1415 | if (tid_data->tfds_in_queue == 0) { |
1241 | IWL_DEBUG_HT(priv, "HW queue is empty\n"); | 1416 | IWL_DEBUG_HT(priv, "HW queue is empty\n"); |
1242 | tid_data->agg.state = IWL_AGG_ON; | 1417 | tid_data->agg.state = IWL_AGG_ON; |
1243 | ieee80211_start_tx_ba_cb_irqsafe(priv->hw, ra, tid); | 1418 | ieee80211_start_tx_ba_cb_irqsafe(priv->vif, ra, tid); |
1244 | } else { | 1419 | } else { |
1245 | IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n", | 1420 | IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n", |
1246 | tid_data->tfds_in_queue); | 1421 | tid_data->tfds_in_queue); |
@@ -1254,7 +1429,7 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid) | |||
1254 | { | 1429 | { |
1255 | int tx_fifo_id, txq_id, sta_id, ssn = -1; | 1430 | int tx_fifo_id, txq_id, sta_id, ssn = -1; |
1256 | struct iwl_tid_data *tid_data; | 1431 | struct iwl_tid_data *tid_data; |
1257 | int ret, write_ptr, read_ptr; | 1432 | int write_ptr, read_ptr; |
1258 | unsigned long flags; | 1433 | unsigned long flags; |
1259 | 1434 | ||
1260 | if (!ra) { | 1435 | if (!ra) { |
@@ -1280,7 +1455,7 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid) | |||
1280 | if (priv->stations[sta_id].tid[tid].agg.state == | 1455 | if (priv->stations[sta_id].tid[tid].agg.state == |
1281 | IWL_EMPTYING_HW_QUEUE_ADDBA) { | 1456 | IWL_EMPTYING_HW_QUEUE_ADDBA) { |
1282 | IWL_DEBUG_HT(priv, "AGG stop before setup done\n"); | 1457 | IWL_DEBUG_HT(priv, "AGG stop before setup done\n"); |
1283 | ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, ra, tid); | 1458 | ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid); |
1284 | priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; | 1459 | priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; |
1285 | return 0; | 1460 | return 0; |
1286 | } | 1461 | } |
@@ -1306,14 +1481,18 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid) | |||
1306 | priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; | 1481 | priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; |
1307 | 1482 | ||
1308 | spin_lock_irqsave(&priv->lock, flags); | 1483 | spin_lock_irqsave(&priv->lock, flags); |
1309 | ret = priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn, | 1484 | /* |
1485 | * the only reason this call can fail is queue number out of range, | ||
1486 | * which can happen if uCode is reloaded and all the station | ||
1487 | * information are lost. if it is outside the range, there is no need | ||
1488 | * to deactivate the uCode queue, just return "success" to allow | ||
1489 | * mac80211 to clean up it own data. | ||
1490 | */ | ||
1491 | priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn, | ||
1310 | tx_fifo_id); | 1492 | tx_fifo_id); |
1311 | spin_unlock_irqrestore(&priv->lock, flags); | 1493 | spin_unlock_irqrestore(&priv->lock, flags); |
1312 | 1494 | ||
1313 | if (ret) | 1495 | ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid); |
1314 | return ret; | ||
1315 | |||
1316 | ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, ra, tid); | ||
1317 | 1496 | ||
1318 | return 0; | 1497 | return 0; |
1319 | } | 1498 | } |
@@ -1337,7 +1516,7 @@ int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id) | |||
1337 | priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, | 1516 | priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, |
1338 | ssn, tx_fifo); | 1517 | ssn, tx_fifo); |
1339 | tid_data->agg.state = IWL_AGG_OFF; | 1518 | tid_data->agg.state = IWL_AGG_OFF; |
1340 | ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid); | 1519 | ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, addr, tid); |
1341 | } | 1520 | } |
1342 | break; | 1521 | break; |
1343 | case IWL_EMPTYING_HW_QUEUE_ADDBA: | 1522 | case IWL_EMPTYING_HW_QUEUE_ADDBA: |
@@ -1345,7 +1524,7 @@ int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id) | |||
1345 | if (tid_data->tfds_in_queue == 0) { | 1524 | if (tid_data->tfds_in_queue == 0) { |
1346 | IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n"); | 1525 | IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n"); |
1347 | tid_data->agg.state = IWL_AGG_ON; | 1526 | tid_data->agg.state = IWL_AGG_ON; |
1348 | ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid); | 1527 | ieee80211_start_tx_ba_cb_irqsafe(priv->vif, addr, tid); |
1349 | } | 1528 | } |
1350 | break; | 1529 | break; |
1351 | } | 1530 | } |
@@ -1409,7 +1588,7 @@ static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv, | |||
1409 | 1588 | ||
1410 | info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]); | 1589 | info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]); |
1411 | memset(&info->status, 0, sizeof(info->status)); | 1590 | memset(&info->status, 0, sizeof(info->status)); |
1412 | info->flags = IEEE80211_TX_STAT_ACK; | 1591 | info->flags |= IEEE80211_TX_STAT_ACK; |
1413 | info->flags |= IEEE80211_TX_STAT_AMPDU; | 1592 | info->flags |= IEEE80211_TX_STAT_AMPDU; |
1414 | info->status.ampdu_ack_map = successes; | 1593 | info->status.ampdu_ack_map = successes; |
1415 | info->status.ampdu_ack_len = agg->frame_count; | 1594 | info->status.ampdu_ack_len = agg->frame_count; |
@@ -1429,7 +1608,7 @@ static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv, | |||
1429 | void iwl_rx_reply_compressed_ba(struct iwl_priv *priv, | 1608 | void iwl_rx_reply_compressed_ba(struct iwl_priv *priv, |
1430 | struct iwl_rx_mem_buffer *rxb) | 1609 | struct iwl_rx_mem_buffer *rxb) |
1431 | { | 1610 | { |
1432 | struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; | 1611 | struct iwl_rx_packet *pkt = rxb_addr(rxb); |
1433 | struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; | 1612 | struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; |
1434 | struct iwl_tx_queue *txq = NULL; | 1613 | struct iwl_tx_queue *txq = NULL; |
1435 | struct iwl_ht_agg *agg; | 1614 | struct iwl_ht_agg *agg; |
@@ -1485,7 +1664,7 @@ void iwl_rx_reply_compressed_ba(struct iwl_priv *priv, | |||
1485 | if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { | 1664 | if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { |
1486 | /* calculate mac80211 ampdu sw queue to wake */ | 1665 | /* calculate mac80211 ampdu sw queue to wake */ |
1487 | int freed = iwl_tx_queue_reclaim(priv, scd_flow, index); | 1666 | int freed = iwl_tx_queue_reclaim(priv, scd_flow, index); |
1488 | priv->stations[sta_id].tid[tid].tfds_in_queue -= freed; | 1667 | iwl_free_tfds_in_queue(priv, sta_id, tid, freed); |
1489 | 1668 | ||
1490 | if ((iwl_queue_space(&txq->q) > txq->q.low_mark) && | 1669 | if ((iwl_queue_space(&txq->q) > txq->q.low_mark) && |
1491 | priv->mac80211_registered && | 1670 | priv->mac80211_registered && |