diff options
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-tx.c')
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-tx.c | 1058 |
1 files changed, 1039 insertions, 19 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c index a1e03ccd5147..cfe6f4b233dd 100644 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c | |||
@@ -27,6 +27,7 @@ | |||
27 | * | 27 | * |
28 | *****************************************************************************/ | 28 | *****************************************************************************/ |
29 | 29 | ||
30 | #include <linux/etherdevice.h> | ||
30 | #include <net/mac80211.h> | 31 | #include <net/mac80211.h> |
31 | #include "iwl-eeprom.h" | 32 | #include "iwl-eeprom.h" |
32 | #include "iwl-dev.h" | 33 | #include "iwl-dev.h" |
@@ -35,6 +36,32 @@ | |||
35 | #include "iwl-io.h" | 36 | #include "iwl-io.h" |
36 | #include "iwl-helpers.h" | 37 | #include "iwl-helpers.h" |
37 | 38 | ||
39 | #ifdef CONFIG_IWL4965_HT | ||
40 | |||
41 | static const u16 default_tid_to_tx_fifo[] = { | ||
42 | IWL_TX_FIFO_AC1, | ||
43 | IWL_TX_FIFO_AC0, | ||
44 | IWL_TX_FIFO_AC0, | ||
45 | IWL_TX_FIFO_AC1, | ||
46 | IWL_TX_FIFO_AC2, | ||
47 | IWL_TX_FIFO_AC2, | ||
48 | IWL_TX_FIFO_AC3, | ||
49 | IWL_TX_FIFO_AC3, | ||
50 | IWL_TX_FIFO_NONE, | ||
51 | IWL_TX_FIFO_NONE, | ||
52 | IWL_TX_FIFO_NONE, | ||
53 | IWL_TX_FIFO_NONE, | ||
54 | IWL_TX_FIFO_NONE, | ||
55 | IWL_TX_FIFO_NONE, | ||
56 | IWL_TX_FIFO_NONE, | ||
57 | IWL_TX_FIFO_NONE, | ||
58 | IWL_TX_FIFO_AC3 | ||
59 | }; | ||
60 | |||
61 | #endif /*CONFIG_IWL4965_HT */ | ||
62 | |||
63 | |||
64 | |||
38 | /** | 65 | /** |
39 | * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] | 66 | * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] |
40 | * | 67 | * |
@@ -95,6 +122,89 @@ int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) | |||
95 | } | 122 | } |
96 | EXPORT_SYMBOL(iwl_hw_txq_free_tfd); | 123 | EXPORT_SYMBOL(iwl_hw_txq_free_tfd); |
97 | 124 | ||
125 | |||
126 | int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr, | ||
127 | dma_addr_t addr, u16 len) | ||
128 | { | ||
129 | int index, is_odd; | ||
130 | struct iwl_tfd_frame *tfd = ptr; | ||
131 | u32 num_tbs = IWL_GET_BITS(*tfd, num_tbs); | ||
132 | |||
133 | /* Each TFD can point to a maximum 20 Tx buffers */ | ||
134 | if ((num_tbs >= MAX_NUM_OF_TBS) || (num_tbs < 0)) { | ||
135 | IWL_ERROR("Error can not send more than %d chunks\n", | ||
136 | MAX_NUM_OF_TBS); | ||
137 | return -EINVAL; | ||
138 | } | ||
139 | |||
140 | index = num_tbs / 2; | ||
141 | is_odd = num_tbs & 0x1; | ||
142 | |||
143 | if (!is_odd) { | ||
144 | tfd->pa[index].tb1_addr = cpu_to_le32(addr); | ||
145 | IWL_SET_BITS(tfd->pa[index], tb1_addr_hi, | ||
146 | iwl_get_dma_hi_address(addr)); | ||
147 | IWL_SET_BITS(tfd->pa[index], tb1_len, len); | ||
148 | } else { | ||
149 | IWL_SET_BITS(tfd->pa[index], tb2_addr_lo16, | ||
150 | (u32) (addr & 0xffff)); | ||
151 | IWL_SET_BITS(tfd->pa[index], tb2_addr_hi20, addr >> 16); | ||
152 | IWL_SET_BITS(tfd->pa[index], tb2_len, len); | ||
153 | } | ||
154 | |||
155 | IWL_SET_BITS(*tfd, num_tbs, num_tbs + 1); | ||
156 | |||
157 | return 0; | ||
158 | } | ||
159 | EXPORT_SYMBOL(iwl_hw_txq_attach_buf_to_tfd); | ||
160 | |||
161 | /** | ||
162 | * iwl_txq_update_write_ptr - Send new write index to hardware | ||
163 | */ | ||
164 | int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq) | ||
165 | { | ||
166 | u32 reg = 0; | ||
167 | int ret = 0; | ||
168 | int txq_id = txq->q.id; | ||
169 | |||
170 | if (txq->need_update == 0) | ||
171 | return ret; | ||
172 | |||
173 | /* if we're trying to save power */ | ||
174 | if (test_bit(STATUS_POWER_PMI, &priv->status)) { | ||
175 | /* wake up nic if it's powered down ... | ||
176 | * uCode will wake up, and interrupt us again, so next | ||
177 | * time we'll skip this part. */ | ||
178 | reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); | ||
179 | |||
180 | if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { | ||
181 | IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg); | ||
182 | iwl_set_bit(priv, CSR_GP_CNTRL, | ||
183 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | ||
184 | return ret; | ||
185 | } | ||
186 | |||
187 | /* restore this queue's parameters in nic hardware. */ | ||
188 | ret = iwl_grab_nic_access(priv); | ||
189 | if (ret) | ||
190 | return ret; | ||
191 | iwl_write_direct32(priv, HBUS_TARG_WRPTR, | ||
192 | txq->q.write_ptr | (txq_id << 8)); | ||
193 | iwl_release_nic_access(priv); | ||
194 | |||
195 | /* else not in power-save mode, uCode will never sleep when we're | ||
196 | * trying to tx (during RFKILL, we're not trying to tx). */ | ||
197 | } else | ||
198 | iwl_write32(priv, HBUS_TARG_WRPTR, | ||
199 | txq->q.write_ptr | (txq_id << 8)); | ||
200 | |||
201 | txq->need_update = 0; | ||
202 | |||
203 | return ret; | ||
204 | } | ||
205 | EXPORT_SYMBOL(iwl_txq_update_write_ptr); | ||
206 | |||
207 | |||
98 | /** | 208 | /** |
99 | * iwl_tx_queue_free - Deallocate DMA queue. | 209 | * iwl_tx_queue_free - Deallocate DMA queue. |
100 | * @txq: Transmit queue to deallocate. | 210 | * @txq: Transmit queue to deallocate. |
@@ -105,7 +215,7 @@ EXPORT_SYMBOL(iwl_hw_txq_free_tfd); | |||
105 | */ | 215 | */ |
106 | static void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq) | 216 | static void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq) |
107 | { | 217 | { |
108 | struct iwl4965_queue *q = &txq->q; | 218 | struct iwl_queue *q = &txq->q; |
109 | struct pci_dev *dev = priv->pci_dev; | 219 | struct pci_dev *dev = priv->pci_dev; |
110 | int len; | 220 | int len; |
111 | 221 | ||
@@ -137,28 +247,51 @@ static void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq) | |||
137 | memset(txq, 0, sizeof(*txq)); | 247 | memset(txq, 0, sizeof(*txq)); |
138 | } | 248 | } |
139 | 249 | ||
140 | /** | 250 | /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** |
141 | * iwl_hw_txq_ctx_free - Free TXQ Context | 251 | * DMA services |
142 | * | 252 | * |
143 | * Destroy all TX DMA queues and structures | 253 | * Theory of operation |
144 | */ | 254 | * |
145 | void iwl_hw_txq_ctx_free(struct iwl_priv *priv) | 255 | * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer |
256 | * of buffer descriptors, each of which points to one or more data buffers for | ||
257 | * the device to read from or fill. Driver and device exchange status of each | ||
258 | * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty | ||
259 | * entries in each circular buffer, to protect against confusing empty and full | ||
260 | * queue states. | ||
261 | * | ||
262 | * The device reads or writes the data in the queues via the device's several | ||
263 | * DMA/FIFO channels. Each queue is mapped to a single DMA channel. | ||
264 | * | ||
265 | * For Tx queue, there are low mark and high mark limits. If, after queuing | ||
266 | * the packet for Tx, free space become < low mark, Tx queue stopped. When | ||
267 | * reclaiming packets (on 'tx done IRQ), if free space become > high mark, | ||
268 | * Tx queue resumed. | ||
269 | * | ||
270 | * See more detailed info in iwl-4965-hw.h. | ||
271 | ***************************************************/ | ||
272 | |||
273 | int iwl_queue_space(const struct iwl_queue *q) | ||
146 | { | 274 | { |
147 | int txq_id; | 275 | int s = q->read_ptr - q->write_ptr; |
148 | 276 | ||
149 | /* Tx queues */ | 277 | if (q->read_ptr > q->write_ptr) |
150 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) | 278 | s -= q->n_bd; |
151 | iwl_tx_queue_free(priv, &priv->txq[txq_id]); | ||
152 | 279 | ||
153 | /* Keep-warm buffer */ | 280 | if (s <= 0) |
154 | iwl_kw_free(priv); | 281 | s += q->n_window; |
282 | /* keep some reserve to not confuse empty and full situations */ | ||
283 | s -= 2; | ||
284 | if (s < 0) | ||
285 | s = 0; | ||
286 | return s; | ||
155 | } | 287 | } |
156 | EXPORT_SYMBOL(iwl_hw_txq_ctx_free); | 288 | EXPORT_SYMBOL(iwl_queue_space); |
289 | |||
157 | 290 | ||
158 | /** | 291 | /** |
159 | * iwl_queue_init - Initialize queue's high/low-water and read/write indexes | 292 | * iwl_queue_init - Initialize queue's high/low-water and read/write indexes |
160 | */ | 293 | */ |
161 | static int iwl_queue_init(struct iwl_priv *priv, struct iwl4965_queue *q, | 294 | static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q, |
162 | int count, int slots_num, u32 id) | 295 | int count, int slots_num, u32 id) |
163 | { | 296 | { |
164 | q->n_bd = count; | 297 | q->n_bd = count; |
@@ -312,6 +445,24 @@ static int iwl_tx_queue_init(struct iwl_priv *priv, | |||
312 | 445 | ||
313 | return 0; | 446 | return 0; |
314 | } | 447 | } |
448 | /** | ||
449 | * iwl_hw_txq_ctx_free - Free TXQ Context | ||
450 | * | ||
451 | * Destroy all TX DMA queues and structures | ||
452 | */ | ||
453 | void iwl_hw_txq_ctx_free(struct iwl_priv *priv) | ||
454 | { | ||
455 | int txq_id; | ||
456 | |||
457 | /* Tx queues */ | ||
458 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) | ||
459 | iwl_tx_queue_free(priv, &priv->txq[txq_id]); | ||
460 | |||
461 | /* Keep-warm buffer */ | ||
462 | iwl_kw_free(priv); | ||
463 | } | ||
464 | EXPORT_SYMBOL(iwl_hw_txq_ctx_free); | ||
465 | |||
315 | 466 | ||
316 | /** | 467 | /** |
317 | * iwl_txq_ctx_reset - Reset TX queue context | 468 | * iwl_txq_ctx_reset - Reset TX queue context |
@@ -324,6 +475,7 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv) | |||
324 | { | 475 | { |
325 | int ret = 0; | 476 | int ret = 0; |
326 | int txq_id, slots_num; | 477 | int txq_id, slots_num; |
478 | unsigned long flags; | ||
327 | 479 | ||
328 | iwl_kw_free(priv); | 480 | iwl_kw_free(priv); |
329 | 481 | ||
@@ -336,11 +488,19 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv) | |||
336 | IWL_ERROR("Keep Warm allocation failed"); | 488 | IWL_ERROR("Keep Warm allocation failed"); |
337 | goto error_kw; | 489 | goto error_kw; |
338 | } | 490 | } |
491 | spin_lock_irqsave(&priv->lock, flags); | ||
492 | ret = iwl_grab_nic_access(priv); | ||
493 | if (unlikely(ret)) { | ||
494 | spin_unlock_irqrestore(&priv->lock, flags); | ||
495 | goto error_reset; | ||
496 | } | ||
339 | 497 | ||
340 | /* Turn off all Tx DMA fifos */ | 498 | /* Turn off all Tx DMA fifos */ |
341 | ret = priv->cfg->ops->lib->disable_tx_fifo(priv); | 499 | priv->cfg->ops->lib->txq_set_sched(priv, 0); |
342 | if (unlikely(ret)) | 500 | |
343 | goto error_reset; | 501 | iwl_release_nic_access(priv); |
502 | spin_unlock_irqrestore(&priv->lock, flags); | ||
503 | |||
344 | 504 | ||
345 | /* Tell nic where to find the keep-warm buffer */ | 505 | /* Tell nic where to find the keep-warm buffer */ |
346 | ret = iwl_kw_init(priv); | 506 | ret = iwl_kw_init(priv); |
@@ -349,8 +509,7 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv) | |||
349 | goto error_reset; | 509 | goto error_reset; |
350 | } | 510 | } |
351 | 511 | ||
352 | /* Alloc and init all (default 16) Tx queues, | 512 | /* Alloc and init all Tx queues, including the command queue (#4) */ |
353 | * including the command queue (#4) */ | ||
354 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { | 513 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { |
355 | slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ? | 514 | slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ? |
356 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; | 515 | TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; |
@@ -371,3 +530,864 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv) | |||
371 | error_kw: | 530 | error_kw: |
372 | return ret; | 531 | return ret; |
373 | } | 532 | } |
533 | /** | ||
534 | * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory | ||
535 | */ | ||
536 | void iwl_txq_ctx_stop(struct iwl_priv *priv) | ||
537 | { | ||
538 | |||
539 | int txq_id; | ||
540 | unsigned long flags; | ||
541 | |||
542 | |||
543 | /* Turn off all Tx DMA fifos */ | ||
544 | spin_lock_irqsave(&priv->lock, flags); | ||
545 | if (iwl_grab_nic_access(priv)) { | ||
546 | spin_unlock_irqrestore(&priv->lock, flags); | ||
547 | return; | ||
548 | } | ||
549 | |||
550 | priv->cfg->ops->lib->txq_set_sched(priv, 0); | ||
551 | |||
552 | /* Stop each Tx DMA channel, and wait for it to be idle */ | ||
553 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { | ||
554 | iwl_write_direct32(priv, | ||
555 | FH_TCSR_CHNL_TX_CONFIG_REG(txq_id), 0x0); | ||
556 | iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG, | ||
557 | FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE | ||
558 | (txq_id), 200); | ||
559 | } | ||
560 | iwl_release_nic_access(priv); | ||
561 | spin_unlock_irqrestore(&priv->lock, flags); | ||
562 | |||
563 | /* Deallocate memory for all Tx queues */ | ||
564 | iwl_hw_txq_ctx_free(priv); | ||
565 | } | ||
566 | EXPORT_SYMBOL(iwl_txq_ctx_stop); | ||
567 | |||
568 | /* | ||
569 | * handle build REPLY_TX command notification. | ||
570 | */ | ||
571 | static void iwl_tx_cmd_build_basic(struct iwl_priv *priv, | ||
572 | struct iwl_tx_cmd *tx_cmd, | ||
573 | struct ieee80211_tx_info *info, | ||
574 | struct ieee80211_hdr *hdr, | ||
575 | int is_unicast, u8 std_id) | ||
576 | { | ||
577 | u16 fc = le16_to_cpu(hdr->frame_control); | ||
578 | __le32 tx_flags = tx_cmd->tx_flags; | ||
579 | |||
580 | tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; | ||
581 | if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { | ||
582 | tx_flags |= TX_CMD_FLG_ACK_MSK; | ||
583 | if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) | ||
584 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
585 | if (ieee80211_is_probe_response(fc) && | ||
586 | !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) | ||
587 | tx_flags |= TX_CMD_FLG_TSF_MSK; | ||
588 | } else { | ||
589 | tx_flags &= (~TX_CMD_FLG_ACK_MSK); | ||
590 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
591 | } | ||
592 | |||
593 | if (ieee80211_is_back_request(fc)) | ||
594 | tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; | ||
595 | |||
596 | |||
597 | tx_cmd->sta_id = std_id; | ||
598 | if (ieee80211_get_morefrag(hdr)) | ||
599 | tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; | ||
600 | |||
601 | if (ieee80211_is_qos_data(fc)) { | ||
602 | u8 *qc = ieee80211_get_qos_ctrl(hdr, ieee80211_get_hdrlen(fc)); | ||
603 | tx_cmd->tid_tspec = qc[0] & 0xf; | ||
604 | tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; | ||
605 | } else { | ||
606 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
607 | } | ||
608 | |||
609 | if (info->flags & IEEE80211_TX_CTL_USE_RTS_CTS) { | ||
610 | tx_flags |= TX_CMD_FLG_RTS_MSK; | ||
611 | tx_flags &= ~TX_CMD_FLG_CTS_MSK; | ||
612 | } else if (info->flags & IEEE80211_TX_CTL_USE_CTS_PROTECT) { | ||
613 | tx_flags &= ~TX_CMD_FLG_RTS_MSK; | ||
614 | tx_flags |= TX_CMD_FLG_CTS_MSK; | ||
615 | } | ||
616 | |||
617 | if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK)) | ||
618 | tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; | ||
619 | |||
620 | tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); | ||
621 | if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) { | ||
622 | if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ || | ||
623 | (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ) | ||
624 | tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); | ||
625 | else | ||
626 | tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); | ||
627 | } else { | ||
628 | tx_cmd->timeout.pm_frame_timeout = 0; | ||
629 | } | ||
630 | |||
631 | tx_cmd->driver_txop = 0; | ||
632 | tx_cmd->tx_flags = tx_flags; | ||
633 | tx_cmd->next_frame_len = 0; | ||
634 | } | ||
635 | |||
636 | #define RTS_HCCA_RETRY_LIMIT 3 | ||
637 | #define RTS_DFAULT_RETRY_LIMIT 60 | ||
638 | |||
639 | static void iwl_tx_cmd_build_rate(struct iwl_priv *priv, | ||
640 | struct iwl_tx_cmd *tx_cmd, | ||
641 | struct ieee80211_tx_info *info, | ||
642 | u16 fc, int sta_id, | ||
643 | int is_hcca) | ||
644 | { | ||
645 | u8 rts_retry_limit = 0; | ||
646 | u8 data_retry_limit = 0; | ||
647 | u8 rate_plcp; | ||
648 | u16 rate_flags = 0; | ||
649 | int rate_idx; | ||
650 | |||
651 | rate_idx = min(ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xffff, | ||
652 | IWL_RATE_COUNT - 1); | ||
653 | |||
654 | rate_plcp = iwl_rates[rate_idx].plcp; | ||
655 | |||
656 | rts_retry_limit = (is_hcca) ? | ||
657 | RTS_HCCA_RETRY_LIMIT : RTS_DFAULT_RETRY_LIMIT; | ||
658 | |||
659 | if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) | ||
660 | rate_flags |= RATE_MCS_CCK_MSK; | ||
661 | |||
662 | |||
663 | if (ieee80211_is_probe_response(fc)) { | ||
664 | data_retry_limit = 3; | ||
665 | if (data_retry_limit < rts_retry_limit) | ||
666 | rts_retry_limit = data_retry_limit; | ||
667 | } else | ||
668 | data_retry_limit = IWL_DEFAULT_TX_RETRY; | ||
669 | |||
670 | if (priv->data_retry_limit != -1) | ||
671 | data_retry_limit = priv->data_retry_limit; | ||
672 | |||
673 | |||
674 | if (ieee80211_is_data(fc)) { | ||
675 | tx_cmd->initial_rate_index = 0; | ||
676 | tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; | ||
677 | } else { | ||
678 | switch (fc & IEEE80211_FCTL_STYPE) { | ||
679 | case IEEE80211_STYPE_AUTH: | ||
680 | case IEEE80211_STYPE_DEAUTH: | ||
681 | case IEEE80211_STYPE_ASSOC_REQ: | ||
682 | case IEEE80211_STYPE_REASSOC_REQ: | ||
683 | if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) { | ||
684 | tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK; | ||
685 | tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK; | ||
686 | } | ||
687 | break; | ||
688 | default: | ||
689 | break; | ||
690 | } | ||
691 | |||
692 | /* Alternate between antenna A and B for successive frames */ | ||
693 | if (priv->use_ant_b_for_management_frame) { | ||
694 | priv->use_ant_b_for_management_frame = 0; | ||
695 | rate_flags |= RATE_MCS_ANT_B_MSK; | ||
696 | } else { | ||
697 | priv->use_ant_b_for_management_frame = 1; | ||
698 | rate_flags |= RATE_MCS_ANT_A_MSK; | ||
699 | } | ||
700 | } | ||
701 | |||
702 | tx_cmd->rts_retry_limit = rts_retry_limit; | ||
703 | tx_cmd->data_retry_limit = data_retry_limit; | ||
704 | tx_cmd->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags); | ||
705 | } | ||
706 | |||
707 | static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv, | ||
708 | struct ieee80211_tx_info *info, | ||
709 | struct iwl_tx_cmd *tx_cmd, | ||
710 | struct sk_buff *skb_frag, | ||
711 | int sta_id) | ||
712 | { | ||
713 | struct ieee80211_key_conf *keyconf = info->control.hw_key; | ||
714 | |||
715 | switch (keyconf->alg) { | ||
716 | case ALG_CCMP: | ||
717 | tx_cmd->sec_ctl = TX_CMD_SEC_CCM; | ||
718 | memcpy(tx_cmd->key, keyconf->key, keyconf->keylen); | ||
719 | if (info->flags & IEEE80211_TX_CTL_AMPDU) | ||
720 | tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK; | ||
721 | IWL_DEBUG_TX("tx_cmd with aes hwcrypto\n"); | ||
722 | break; | ||
723 | |||
724 | case ALG_TKIP: | ||
725 | tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; | ||
726 | ieee80211_get_tkip_key(keyconf, skb_frag, | ||
727 | IEEE80211_TKIP_P2_KEY, tx_cmd->key); | ||
728 | IWL_DEBUG_TX("tx_cmd with tkip hwcrypto\n"); | ||
729 | break; | ||
730 | |||
731 | case ALG_WEP: | ||
732 | tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP | | ||
733 | (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT); | ||
734 | |||
735 | if (keyconf->keylen == WEP_KEY_LEN_128) | ||
736 | tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; | ||
737 | |||
738 | memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen); | ||
739 | |||
740 | IWL_DEBUG_TX("Configuring packet for WEP encryption " | ||
741 | "with key %d\n", keyconf->keyidx); | ||
742 | break; | ||
743 | |||
744 | default: | ||
745 | printk(KERN_ERR "Unknown encode alg %d\n", keyconf->alg); | ||
746 | break; | ||
747 | } | ||
748 | } | ||
749 | |||
750 | static void iwl_update_tx_stats(struct iwl_priv *priv, u16 fc, u16 len) | ||
751 | { | ||
752 | /* 0 - mgmt, 1 - cnt, 2 - data */ | ||
753 | int idx = (fc & IEEE80211_FCTL_FTYPE) >> 2; | ||
754 | priv->tx_stats[idx].cnt++; | ||
755 | priv->tx_stats[idx].bytes += len; | ||
756 | } | ||
757 | |||
758 | /* | ||
759 | * start REPLY_TX command process | ||
760 | */ | ||
761 | int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | ||
762 | { | ||
763 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | ||
764 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
765 | struct iwl_tfd_frame *tfd; | ||
766 | u32 *control_flags; | ||
767 | int txq_id = skb_get_queue_mapping(skb); | ||
768 | struct iwl_tx_queue *txq = NULL; | ||
769 | struct iwl_queue *q = NULL; | ||
770 | dma_addr_t phys_addr; | ||
771 | dma_addr_t txcmd_phys; | ||
772 | dma_addr_t scratch_phys; | ||
773 | struct iwl_cmd *out_cmd = NULL; | ||
774 | struct iwl_tx_cmd *tx_cmd; | ||
775 | u16 len, idx, len_org; | ||
776 | u16 seq_number = 0; | ||
777 | u8 id, hdr_len, unicast; | ||
778 | u8 sta_id; | ||
779 | u16 fc; | ||
780 | u8 wait_write_ptr = 0; | ||
781 | u8 tid = 0; | ||
782 | u8 *qc = NULL; | ||
783 | unsigned long flags; | ||
784 | int ret; | ||
785 | |||
786 | spin_lock_irqsave(&priv->lock, flags); | ||
787 | if (iwl_is_rfkill(priv)) { | ||
788 | IWL_DEBUG_DROP("Dropping - RF KILL\n"); | ||
789 | goto drop_unlock; | ||
790 | } | ||
791 | |||
792 | if (!priv->vif) { | ||
793 | IWL_DEBUG_DROP("Dropping - !priv->vif\n"); | ||
794 | goto drop_unlock; | ||
795 | } | ||
796 | |||
797 | if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == | ||
798 | IWL_INVALID_RATE) { | ||
799 | IWL_ERROR("ERROR: No TX rate available.\n"); | ||
800 | goto drop_unlock; | ||
801 | } | ||
802 | |||
803 | unicast = !is_multicast_ether_addr(hdr->addr1); | ||
804 | id = 0; | ||
805 | |||
806 | fc = le16_to_cpu(hdr->frame_control); | ||
807 | |||
808 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
809 | if (ieee80211_is_auth(fc)) | ||
810 | IWL_DEBUG_TX("Sending AUTH frame\n"); | ||
811 | else if (ieee80211_is_assoc_request(fc)) | ||
812 | IWL_DEBUG_TX("Sending ASSOC frame\n"); | ||
813 | else if (ieee80211_is_reassoc_request(fc)) | ||
814 | IWL_DEBUG_TX("Sending REASSOC frame\n"); | ||
815 | #endif | ||
816 | |||
817 | /* drop all data frame if we are not associated */ | ||
818 | if (((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) && | ||
819 | (!iwl_is_associated(priv) || | ||
820 | ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && !priv->assoc_id) || | ||
821 | !priv->assoc_station_added)) { | ||
822 | IWL_DEBUG_DROP("Dropping - !iwl_is_associated\n"); | ||
823 | goto drop_unlock; | ||
824 | } | ||
825 | |||
826 | spin_unlock_irqrestore(&priv->lock, flags); | ||
827 | |||
828 | hdr_len = ieee80211_get_hdrlen(fc); | ||
829 | |||
830 | /* Find (or create) index into station table for destination station */ | ||
831 | sta_id = iwl_get_sta_id(priv, hdr); | ||
832 | if (sta_id == IWL_INVALID_STATION) { | ||
833 | DECLARE_MAC_BUF(mac); | ||
834 | |||
835 | IWL_DEBUG_DROP("Dropping - INVALID STATION: %s\n", | ||
836 | print_mac(mac, hdr->addr1)); | ||
837 | goto drop; | ||
838 | } | ||
839 | |||
840 | IWL_DEBUG_TX("station Id %d\n", sta_id); | ||
841 | |||
842 | if (ieee80211_is_qos_data(fc)) { | ||
843 | qc = ieee80211_get_qos_ctrl(hdr, hdr_len); | ||
844 | tid = qc[0] & 0xf; | ||
845 | seq_number = priv->stations[sta_id].tid[tid].seq_number & | ||
846 | IEEE80211_SCTL_SEQ; | ||
847 | hdr->seq_ctrl = cpu_to_le16(seq_number) | | ||
848 | (hdr->seq_ctrl & | ||
849 | __constant_cpu_to_le16(IEEE80211_SCTL_FRAG)); | ||
850 | seq_number += 0x10; | ||
851 | #ifdef CONFIG_IWL4965_HT | ||
852 | /* aggregation is on for this <sta,tid> */ | ||
853 | if (info->flags & IEEE80211_TX_CTL_AMPDU) | ||
854 | txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; | ||
855 | priv->stations[sta_id].tid[tid].tfds_in_queue++; | ||
856 | #endif /* CONFIG_IWL4965_HT */ | ||
857 | } | ||
858 | |||
859 | /* Descriptor for chosen Tx queue */ | ||
860 | txq = &priv->txq[txq_id]; | ||
861 | q = &txq->q; | ||
862 | |||
863 | spin_lock_irqsave(&priv->lock, flags); | ||
864 | |||
865 | /* Set up first empty TFD within this queue's circular TFD buffer */ | ||
866 | tfd = &txq->bd[q->write_ptr]; | ||
867 | memset(tfd, 0, sizeof(*tfd)); | ||
868 | control_flags = (u32 *) tfd; | ||
869 | idx = get_cmd_index(q, q->write_ptr, 0); | ||
870 | |||
871 | /* Set up driver data for this TFD */ | ||
872 | memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); | ||
873 | txq->txb[q->write_ptr].skb[0] = skb; | ||
874 | |||
875 | /* Set up first empty entry in queue's array of Tx/cmd buffers */ | ||
876 | out_cmd = &txq->cmd[idx]; | ||
877 | tx_cmd = &out_cmd->cmd.tx; | ||
878 | memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); | ||
879 | memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd)); | ||
880 | |||
881 | /* | ||
882 | * Set up the Tx-command (not MAC!) header. | ||
883 | * Store the chosen Tx queue and TFD index within the sequence field; | ||
884 | * after Tx, uCode's Tx response will return this value so driver can | ||
885 | * locate the frame within the tx queue and do post-tx processing. | ||
886 | */ | ||
887 | out_cmd->hdr.cmd = REPLY_TX; | ||
888 | out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | | ||
889 | INDEX_TO_SEQ(q->write_ptr))); | ||
890 | |||
891 | /* Copy MAC header from skb into command buffer */ | ||
892 | memcpy(tx_cmd->hdr, hdr, hdr_len); | ||
893 | |||
894 | /* | ||
895 | * Use the first empty entry in this queue's command buffer array | ||
896 | * to contain the Tx command and MAC header concatenated together | ||
897 | * (payload data will be in another buffer). | ||
898 | * Size of this varies, due to varying MAC header length. | ||
899 | * If end is not dword aligned, we'll have 2 extra bytes at the end | ||
900 | * of the MAC header (device reads on dword boundaries). | ||
901 | * We'll tell device about this padding later. | ||
902 | */ | ||
903 | len = sizeof(struct iwl_tx_cmd) + | ||
904 | sizeof(struct iwl_cmd_header) + hdr_len; | ||
905 | |||
906 | len_org = len; | ||
907 | len = (len + 3) & ~3; | ||
908 | |||
909 | if (len_org != len) | ||
910 | len_org = 1; | ||
911 | else | ||
912 | len_org = 0; | ||
913 | |||
914 | /* Physical address of this Tx command's header (not MAC header!), | ||
915 | * within command buffer array. */ | ||
916 | txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl_cmd) * idx + | ||
917 | offsetof(struct iwl_cmd, hdr); | ||
918 | |||
919 | /* Add buffer containing Tx command and MAC(!) header to TFD's | ||
920 | * first entry */ | ||
921 | iwl_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len); | ||
922 | |||
923 | if (!(info->flags & IEEE80211_TX_CTL_DO_NOT_ENCRYPT)) | ||
924 | iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id); | ||
925 | |||
926 | /* Set up TFD's 2nd entry to point directly to remainder of skb, | ||
927 | * if any (802.11 null frames have no payload). */ | ||
928 | len = skb->len - hdr_len; | ||
929 | if (len) { | ||
930 | phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, | ||
931 | len, PCI_DMA_TODEVICE); | ||
932 | iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len); | ||
933 | } | ||
934 | |||
935 | /* Tell NIC about any 2-byte padding after MAC header */ | ||
936 | if (len_org) | ||
937 | tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; | ||
938 | |||
939 | /* Total # bytes to be transmitted */ | ||
940 | len = (u16)skb->len; | ||
941 | tx_cmd->len = cpu_to_le16(len); | ||
942 | /* TODO need this for burst mode later on */ | ||
943 | iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, unicast, sta_id); | ||
944 | |||
945 | /* set is_hcca to 0; it probably will never be implemented */ | ||
946 | iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, sta_id, 0); | ||
947 | |||
948 | iwl_update_tx_stats(priv, fc, len); | ||
949 | |||
950 | scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + | ||
951 | offsetof(struct iwl_tx_cmd, scratch); | ||
952 | tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); | ||
953 | tx_cmd->dram_msb_ptr = iwl_get_dma_hi_address(scratch_phys); | ||
954 | |||
955 | if (!ieee80211_get_morefrag(hdr)) { | ||
956 | txq->need_update = 1; | ||
957 | if (qc) | ||
958 | priv->stations[sta_id].tid[tid].seq_number = seq_number; | ||
959 | } else { | ||
960 | wait_write_ptr = 1; | ||
961 | txq->need_update = 0; | ||
962 | } | ||
963 | |||
964 | iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd)); | ||
965 | |||
966 | iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); | ||
967 | |||
968 | /* Set up entry for this TFD in Tx byte-count array */ | ||
969 | priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, len); | ||
970 | |||
971 | /* Tell device the write index *just past* this latest filled TFD */ | ||
972 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | ||
973 | ret = iwl_txq_update_write_ptr(priv, txq); | ||
974 | spin_unlock_irqrestore(&priv->lock, flags); | ||
975 | |||
976 | if (ret) | ||
977 | return ret; | ||
978 | |||
979 | if ((iwl_queue_space(q) < q->high_mark) | ||
980 | && priv->mac80211_registered) { | ||
981 | if (wait_write_ptr) { | ||
982 | spin_lock_irqsave(&priv->lock, flags); | ||
983 | txq->need_update = 1; | ||
984 | iwl_txq_update_write_ptr(priv, txq); | ||
985 | spin_unlock_irqrestore(&priv->lock, flags); | ||
986 | } | ||
987 | |||
988 | ieee80211_stop_queue(priv->hw, skb_get_queue_mapping(skb)); | ||
989 | } | ||
990 | |||
991 | return 0; | ||
992 | |||
993 | drop_unlock: | ||
994 | spin_unlock_irqrestore(&priv->lock, flags); | ||
995 | drop: | ||
996 | return -1; | ||
997 | } | ||
998 | EXPORT_SYMBOL(iwl_tx_skb); | ||
999 | |||
1000 | /*************** HOST COMMAND QUEUE FUNCTIONS *****/ | ||
1001 | |||
1002 | /** | ||
1003 | * iwl_enqueue_hcmd - enqueue a uCode command | ||
1004 | * @priv: device private data point | ||
1005 | * @cmd: a point to the ucode command structure | ||
1006 | * | ||
1007 | * The function returns < 0 values to indicate the operation is | ||
1008 | * failed. On success, it turns the index (> 0) of command in the | ||
1009 | * command queue. | ||
1010 | */ | ||
1011 | int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | ||
1012 | { | ||
1013 | struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; | ||
1014 | struct iwl_queue *q = &txq->q; | ||
1015 | struct iwl_tfd_frame *tfd; | ||
1016 | u32 *control_flags; | ||
1017 | struct iwl_cmd *out_cmd; | ||
1018 | u32 idx; | ||
1019 | u16 fix_size; | ||
1020 | dma_addr_t phys_addr; | ||
1021 | int ret; | ||
1022 | unsigned long flags; | ||
1023 | |||
1024 | cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len); | ||
1025 | fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr)); | ||
1026 | |||
1027 | /* If any of the command structures end up being larger than | ||
1028 | * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then | ||
1029 | * we will need to increase the size of the TFD entries */ | ||
1030 | BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && | ||
1031 | !(cmd->meta.flags & CMD_SIZE_HUGE)); | ||
1032 | |||
1033 | if (iwl_is_rfkill(priv)) { | ||
1034 | IWL_DEBUG_INFO("Not sending command - RF KILL"); | ||
1035 | return -EIO; | ||
1036 | } | ||
1037 | |||
1038 | if (iwl_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) { | ||
1039 | IWL_ERROR("No space for Tx\n"); | ||
1040 | return -ENOSPC; | ||
1041 | } | ||
1042 | |||
1043 | spin_lock_irqsave(&priv->hcmd_lock, flags); | ||
1044 | |||
1045 | tfd = &txq->bd[q->write_ptr]; | ||
1046 | memset(tfd, 0, sizeof(*tfd)); | ||
1047 | |||
1048 | control_flags = (u32 *) tfd; | ||
1049 | |||
1050 | idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE); | ||
1051 | out_cmd = &txq->cmd[idx]; | ||
1052 | |||
1053 | out_cmd->hdr.cmd = cmd->id; | ||
1054 | memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta)); | ||
1055 | memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len); | ||
1056 | |||
1057 | /* At this point, the out_cmd now has all of the incoming cmd | ||
1058 | * information */ | ||
1059 | |||
1060 | out_cmd->hdr.flags = 0; | ||
1061 | out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) | | ||
1062 | INDEX_TO_SEQ(q->write_ptr)); | ||
1063 | if (out_cmd->meta.flags & CMD_SIZE_HUGE) | ||
1064 | out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME); | ||
1065 | |||
1066 | phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx + | ||
1067 | offsetof(struct iwl_cmd, hdr); | ||
1068 | iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size); | ||
1069 | |||
1070 | IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, " | ||
1071 | "%d bytes at %d[%d]:%d\n", | ||
1072 | get_cmd_string(out_cmd->hdr.cmd), | ||
1073 | out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), | ||
1074 | fix_size, q->write_ptr, idx, IWL_CMD_QUEUE_NUM); | ||
1075 | |||
1076 | txq->need_update = 1; | ||
1077 | |||
1078 | /* Set up entry in queue's byte count circular buffer */ | ||
1079 | priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0); | ||
1080 | |||
1081 | /* Increment and update queue's write index */ | ||
1082 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | ||
1083 | ret = iwl_txq_update_write_ptr(priv, txq); | ||
1084 | |||
1085 | spin_unlock_irqrestore(&priv->hcmd_lock, flags); | ||
1086 | return ret ? ret : idx; | ||
1087 | } | ||
1088 | |||
1089 | int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) | ||
1090 | { | ||
1091 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | ||
1092 | struct iwl_queue *q = &txq->q; | ||
1093 | struct iwl_tx_info *tx_info; | ||
1094 | int nfreed = 0; | ||
1095 | |||
1096 | if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { | ||
1097 | IWL_ERROR("Read index for DMA queue txq id (%d), index %d, " | ||
1098 | "is out of range [0-%d] %d %d.\n", txq_id, | ||
1099 | index, q->n_bd, q->write_ptr, q->read_ptr); | ||
1100 | return 0; | ||
1101 | } | ||
1102 | |||
1103 | for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index; | ||
1104 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | ||
1105 | |||
1106 | tx_info = &txq->txb[txq->q.read_ptr]; | ||
1107 | ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0]); | ||
1108 | tx_info->skb[0] = NULL; | ||
1109 | |||
1110 | if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl) | ||
1111 | priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq); | ||
1112 | |||
1113 | iwl_hw_txq_free_tfd(priv, txq); | ||
1114 | nfreed++; | ||
1115 | } | ||
1116 | return nfreed; | ||
1117 | } | ||
1118 | EXPORT_SYMBOL(iwl_tx_queue_reclaim); | ||
1119 | |||
1120 | |||
1121 | /** | ||
1122 | * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd | ||
1123 | * | ||
1124 | * When FW advances 'R' index, all entries between old and new 'R' index | ||
1125 | * need to be reclaimed. As result, some free space forms. If there is | ||
1126 | * enough free space (> low mark), wake the stack that feeds us. | ||
1127 | */ | ||
1128 | static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) | ||
1129 | { | ||
1130 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | ||
1131 | struct iwl_queue *q = &txq->q; | ||
1132 | int nfreed = 0; | ||
1133 | |||
1134 | if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) { | ||
1135 | IWL_ERROR("Read index for DMA queue txq id (%d), index %d, " | ||
1136 | "is out of range [0-%d] %d %d.\n", txq_id, | ||
1137 | index, q->n_bd, q->write_ptr, q->read_ptr); | ||
1138 | return; | ||
1139 | } | ||
1140 | |||
1141 | for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index; | ||
1142 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { | ||
1143 | |||
1144 | if (nfreed > 1) { | ||
1145 | IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index, | ||
1146 | q->write_ptr, q->read_ptr); | ||
1147 | queue_work(priv->workqueue, &priv->restart); | ||
1148 | } | ||
1149 | nfreed++; | ||
1150 | } | ||
1151 | } | ||
1152 | |||
1153 | /** | ||
1154 | * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them | ||
1155 | * @rxb: Rx buffer to reclaim | ||
1156 | * | ||
1157 | * If an Rx buffer has an async callback associated with it the callback | ||
1158 | * will be executed. The attached skb (if present) will only be freed | ||
1159 | * if the callback returns 1 | ||
1160 | */ | ||
1161 | void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | ||
1162 | { | ||
1163 | struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; | ||
1164 | u16 sequence = le16_to_cpu(pkt->hdr.sequence); | ||
1165 | int txq_id = SEQ_TO_QUEUE(sequence); | ||
1166 | int index = SEQ_TO_INDEX(sequence); | ||
1167 | int huge = sequence & SEQ_HUGE_FRAME; | ||
1168 | int cmd_index; | ||
1169 | struct iwl_cmd *cmd; | ||
1170 | |||
1171 | /* If a Tx command is being handled and it isn't in the actual | ||
1172 | * command queue then there a command routing bug has been introduced | ||
1173 | * in the queue management code. */ | ||
1174 | if (txq_id != IWL_CMD_QUEUE_NUM) | ||
1175 | IWL_ERROR("Error wrong command queue %d command id 0x%X\n", | ||
1176 | txq_id, pkt->hdr.cmd); | ||
1177 | BUG_ON(txq_id != IWL_CMD_QUEUE_NUM); | ||
1178 | |||
1179 | cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge); | ||
1180 | cmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index]; | ||
1181 | |||
1182 | /* Input error checking is done when commands are added to queue. */ | ||
1183 | if (cmd->meta.flags & CMD_WANT_SKB) { | ||
1184 | cmd->meta.source->u.skb = rxb->skb; | ||
1185 | rxb->skb = NULL; | ||
1186 | } else if (cmd->meta.u.callback && | ||
1187 | !cmd->meta.u.callback(priv, cmd, rxb->skb)) | ||
1188 | rxb->skb = NULL; | ||
1189 | |||
1190 | iwl_hcmd_queue_reclaim(priv, txq_id, index); | ||
1191 | |||
1192 | if (!(cmd->meta.flags & CMD_ASYNC)) { | ||
1193 | clear_bit(STATUS_HCMD_ACTIVE, &priv->status); | ||
1194 | wake_up_interruptible(&priv->wait_command_queue); | ||
1195 | } | ||
1196 | } | ||
1197 | EXPORT_SYMBOL(iwl_tx_cmd_complete); | ||
1198 | |||
1199 | |||
1200 | #ifdef CONFIG_IWL4965_HT | ||
1201 | /* | ||
1202 | * Find first available (lowest unused) Tx Queue, mark it "active". | ||
1203 | * Called only when finding queue for aggregation. | ||
1204 | * Should never return anything < 7, because they should already | ||
1205 | * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6). | ||
1206 | */ | ||
1207 | static int iwl_txq_ctx_activate_free(struct iwl_priv *priv) | ||
1208 | { | ||
1209 | int txq_id; | ||
1210 | |||
1211 | for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) | ||
1212 | if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk)) | ||
1213 | return txq_id; | ||
1214 | return -1; | ||
1215 | } | ||
1216 | |||
1217 | int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn) | ||
1218 | { | ||
1219 | int sta_id; | ||
1220 | int tx_fifo; | ||
1221 | int txq_id; | ||
1222 | int ret; | ||
1223 | unsigned long flags; | ||
1224 | struct iwl_tid_data *tid_data; | ||
1225 | DECLARE_MAC_BUF(mac); | ||
1226 | |||
1227 | if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo))) | ||
1228 | tx_fifo = default_tid_to_tx_fifo[tid]; | ||
1229 | else | ||
1230 | return -EINVAL; | ||
1231 | |||
1232 | IWL_WARNING("%s on ra = %s tid = %d\n", | ||
1233 | __func__, print_mac(mac, ra), tid); | ||
1234 | |||
1235 | sta_id = iwl_find_station(priv, ra); | ||
1236 | if (sta_id == IWL_INVALID_STATION) | ||
1237 | return -ENXIO; | ||
1238 | |||
1239 | if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) { | ||
1240 | IWL_ERROR("Start AGG when state is not IWL_AGG_OFF !\n"); | ||
1241 | return -ENXIO; | ||
1242 | } | ||
1243 | |||
1244 | txq_id = iwl_txq_ctx_activate_free(priv); | ||
1245 | if (txq_id == -1) | ||
1246 | return -ENXIO; | ||
1247 | |||
1248 | spin_lock_irqsave(&priv->sta_lock, flags); | ||
1249 | tid_data = &priv->stations[sta_id].tid[tid]; | ||
1250 | *ssn = SEQ_TO_SN(tid_data->seq_number); | ||
1251 | tid_data->agg.txq_id = txq_id; | ||
1252 | spin_unlock_irqrestore(&priv->sta_lock, flags); | ||
1253 | |||
1254 | ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo, | ||
1255 | sta_id, tid, *ssn); | ||
1256 | if (ret) | ||
1257 | return ret; | ||
1258 | |||
1259 | if (tid_data->tfds_in_queue == 0) { | ||
1260 | printk(KERN_ERR "HW queue is empty\n"); | ||
1261 | tid_data->agg.state = IWL_AGG_ON; | ||
1262 | ieee80211_start_tx_ba_cb_irqsafe(priv->hw, ra, tid); | ||
1263 | } else { | ||
1264 | IWL_DEBUG_HT("HW queue is NOT empty: %d packets in HW queue\n", | ||
1265 | tid_data->tfds_in_queue); | ||
1266 | tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA; | ||
1267 | } | ||
1268 | return ret; | ||
1269 | } | ||
1270 | EXPORT_SYMBOL(iwl_tx_agg_start); | ||
1271 | |||
1272 | int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid) | ||
1273 | { | ||
1274 | int tx_fifo_id, txq_id, sta_id, ssn = -1; | ||
1275 | struct iwl_tid_data *tid_data; | ||
1276 | int ret, write_ptr, read_ptr; | ||
1277 | unsigned long flags; | ||
1278 | DECLARE_MAC_BUF(mac); | ||
1279 | |||
1280 | if (!ra) { | ||
1281 | IWL_ERROR("ra = NULL\n"); | ||
1282 | return -EINVAL; | ||
1283 | } | ||
1284 | |||
1285 | if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo))) | ||
1286 | tx_fifo_id = default_tid_to_tx_fifo[tid]; | ||
1287 | else | ||
1288 | return -EINVAL; | ||
1289 | |||
1290 | sta_id = iwl_find_station(priv, ra); | ||
1291 | |||
1292 | if (sta_id == IWL_INVALID_STATION) | ||
1293 | return -ENXIO; | ||
1294 | |||
1295 | if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON) | ||
1296 | IWL_WARNING("Stopping AGG while state not IWL_AGG_ON\n"); | ||
1297 | |||
1298 | tid_data = &priv->stations[sta_id].tid[tid]; | ||
1299 | ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; | ||
1300 | txq_id = tid_data->agg.txq_id; | ||
1301 | write_ptr = priv->txq[txq_id].q.write_ptr; | ||
1302 | read_ptr = priv->txq[txq_id].q.read_ptr; | ||
1303 | |||
1304 | /* The queue is not empty */ | ||
1305 | if (write_ptr != read_ptr) { | ||
1306 | IWL_DEBUG_HT("Stopping a non empty AGG HW QUEUE\n"); | ||
1307 | priv->stations[sta_id].tid[tid].agg.state = | ||
1308 | IWL_EMPTYING_HW_QUEUE_DELBA; | ||
1309 | return 0; | ||
1310 | } | ||
1311 | |||
1312 | IWL_DEBUG_HT("HW queue is empty\n"); | ||
1313 | priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF; | ||
1314 | |||
1315 | spin_lock_irqsave(&priv->lock, flags); | ||
1316 | ret = priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn, | ||
1317 | tx_fifo_id); | ||
1318 | spin_unlock_irqrestore(&priv->lock, flags); | ||
1319 | |||
1320 | if (ret) | ||
1321 | return ret; | ||
1322 | |||
1323 | ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, ra, tid); | ||
1324 | |||
1325 | return 0; | ||
1326 | } | ||
1327 | EXPORT_SYMBOL(iwl_tx_agg_stop); | ||
1328 | |||
1329 | int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id) | ||
1330 | { | ||
1331 | struct iwl_queue *q = &priv->txq[txq_id].q; | ||
1332 | u8 *addr = priv->stations[sta_id].sta.sta.addr; | ||
1333 | struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid]; | ||
1334 | |||
1335 | switch (priv->stations[sta_id].tid[tid].agg.state) { | ||
1336 | case IWL_EMPTYING_HW_QUEUE_DELBA: | ||
1337 | /* We are reclaiming the last packet of the */ | ||
1338 | /* aggregated HW queue */ | ||
1339 | if (txq_id == tid_data->agg.txq_id && | ||
1340 | q->read_ptr == q->write_ptr) { | ||
1341 | u16 ssn = SEQ_TO_SN(tid_data->seq_number); | ||
1342 | int tx_fifo = default_tid_to_tx_fifo[tid]; | ||
1343 | IWL_DEBUG_HT("HW queue empty: continue DELBA flow\n"); | ||
1344 | priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, | ||
1345 | ssn, tx_fifo); | ||
1346 | tid_data->agg.state = IWL_AGG_OFF; | ||
1347 | ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid); | ||
1348 | } | ||
1349 | break; | ||
1350 | case IWL_EMPTYING_HW_QUEUE_ADDBA: | ||
1351 | /* We are reclaiming the last packet of the queue */ | ||
1352 | if (tid_data->tfds_in_queue == 0) { | ||
1353 | IWL_DEBUG_HT("HW queue empty: continue ADDBA flow\n"); | ||
1354 | tid_data->agg.state = IWL_AGG_ON; | ||
1355 | ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid); | ||
1356 | } | ||
1357 | break; | ||
1358 | } | ||
1359 | return 0; | ||
1360 | } | ||
1361 | EXPORT_SYMBOL(iwl_txq_check_empty); | ||
1362 | #endif /* CONFIG_IWL4965_HT */ | ||
1363 | |||
1364 | #ifdef CONFIG_IWLWIF_DEBUG | ||
1365 | #define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x | ||
1366 | |||
1367 | const char *iwl_get_tx_fail_reason(u32 status) | ||
1368 | { | ||
1369 | switch (status & TX_STATUS_MSK) { | ||
1370 | case TX_STATUS_SUCCESS: | ||
1371 | return "SUCCESS"; | ||
1372 | TX_STATUS_ENTRY(SHORT_LIMIT); | ||
1373 | TX_STATUS_ENTRY(LONG_LIMIT); | ||
1374 | TX_STATUS_ENTRY(FIFO_UNDERRUN); | ||
1375 | TX_STATUS_ENTRY(MGMNT_ABORT); | ||
1376 | TX_STATUS_ENTRY(NEXT_FRAG); | ||
1377 | TX_STATUS_ENTRY(LIFE_EXPIRE); | ||
1378 | TX_STATUS_ENTRY(DEST_PS); | ||
1379 | TX_STATUS_ENTRY(ABORTED); | ||
1380 | TX_STATUS_ENTRY(BT_RETRY); | ||
1381 | TX_STATUS_ENTRY(STA_INVALID); | ||
1382 | TX_STATUS_ENTRY(FRAG_DROPPED); | ||
1383 | TX_STATUS_ENTRY(TID_DISABLE); | ||
1384 | TX_STATUS_ENTRY(FRAME_FLUSHED); | ||
1385 | TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL); | ||
1386 | TX_STATUS_ENTRY(TX_LOCKED); | ||
1387 | TX_STATUS_ENTRY(NO_BEACON_ON_RADAR); | ||
1388 | } | ||
1389 | |||
1390 | return "UNKNOWN"; | ||
1391 | } | ||
1392 | EXPORT_SYMBOL(iwl_get_tx_fail_reason); | ||
1393 | #endif /* CONFIG_IWLWIFI_DEBUG */ | ||