diff options
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-tx.c')
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-tx.c | 662 |
1 files changed, 662 insertions, 0 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c index 870d257bded3..a37ced58c661 100644 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c | |||
@@ -27,6 +27,7 @@ | |||
27 | * | 27 | * |
28 | *****************************************************************************/ | 28 | *****************************************************************************/ |
29 | 29 | ||
30 | #include <linux/etherdevice.h> | ||
30 | #include <net/mac80211.h> | 31 | #include <net/mac80211.h> |
31 | #include "iwl-eeprom.h" | 32 | #include "iwl-eeprom.h" |
32 | #include "iwl-dev.h" | 33 | #include "iwl-dev.h" |
@@ -95,6 +96,89 @@ int iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) | |||
95 | } | 96 | } |
96 | EXPORT_SYMBOL(iwl_hw_txq_free_tfd); | 97 | EXPORT_SYMBOL(iwl_hw_txq_free_tfd); |
97 | 98 | ||
99 | |||
100 | int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr, | ||
101 | dma_addr_t addr, u16 len) | ||
102 | { | ||
103 | int index, is_odd; | ||
104 | struct iwl_tfd_frame *tfd = ptr; | ||
105 | u32 num_tbs = IWL_GET_BITS(*tfd, num_tbs); | ||
106 | |||
107 | /* Each TFD can point to a maximum 20 Tx buffers */ | ||
108 | if ((num_tbs >= MAX_NUM_OF_TBS) || (num_tbs < 0)) { | ||
109 | IWL_ERROR("Error can not send more than %d chunks\n", | ||
110 | MAX_NUM_OF_TBS); | ||
111 | return -EINVAL; | ||
112 | } | ||
113 | |||
114 | index = num_tbs / 2; | ||
115 | is_odd = num_tbs & 0x1; | ||
116 | |||
117 | if (!is_odd) { | ||
118 | tfd->pa[index].tb1_addr = cpu_to_le32(addr); | ||
119 | IWL_SET_BITS(tfd->pa[index], tb1_addr_hi, | ||
120 | iwl_get_dma_hi_address(addr)); | ||
121 | IWL_SET_BITS(tfd->pa[index], tb1_len, len); | ||
122 | } else { | ||
123 | IWL_SET_BITS(tfd->pa[index], tb2_addr_lo16, | ||
124 | (u32) (addr & 0xffff)); | ||
125 | IWL_SET_BITS(tfd->pa[index], tb2_addr_hi20, addr >> 16); | ||
126 | IWL_SET_BITS(tfd->pa[index], tb2_len, len); | ||
127 | } | ||
128 | |||
129 | IWL_SET_BITS(*tfd, num_tbs, num_tbs + 1); | ||
130 | |||
131 | return 0; | ||
132 | } | ||
133 | EXPORT_SYMBOL(iwl_hw_txq_attach_buf_to_tfd); | ||
134 | |||
135 | /** | ||
136 | * iwl_txq_update_write_ptr - Send new write index to hardware | ||
137 | */ | ||
138 | int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq) | ||
139 | { | ||
140 | u32 reg = 0; | ||
141 | int ret = 0; | ||
142 | int txq_id = txq->q.id; | ||
143 | |||
144 | if (txq->need_update == 0) | ||
145 | return ret; | ||
146 | |||
147 | /* if we're trying to save power */ | ||
148 | if (test_bit(STATUS_POWER_PMI, &priv->status)) { | ||
149 | /* wake up nic if it's powered down ... | ||
150 | * uCode will wake up, and interrupt us again, so next | ||
151 | * time we'll skip this part. */ | ||
152 | reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); | ||
153 | |||
154 | if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { | ||
155 | IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg); | ||
156 | iwl_set_bit(priv, CSR_GP_CNTRL, | ||
157 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | ||
158 | return ret; | ||
159 | } | ||
160 | |||
161 | /* restore this queue's parameters in nic hardware. */ | ||
162 | ret = iwl_grab_nic_access(priv); | ||
163 | if (ret) | ||
164 | return ret; | ||
165 | iwl_write_direct32(priv, HBUS_TARG_WRPTR, | ||
166 | txq->q.write_ptr | (txq_id << 8)); | ||
167 | iwl_release_nic_access(priv); | ||
168 | |||
169 | /* else not in power-save mode, uCode will never sleep when we're | ||
170 | * trying to tx (during RFKILL, we're not trying to tx). */ | ||
171 | } else | ||
172 | iwl_write32(priv, HBUS_TARG_WRPTR, | ||
173 | txq->q.write_ptr | (txq_id << 8)); | ||
174 | |||
175 | txq->need_update = 0; | ||
176 | |||
177 | return ret; | ||
178 | } | ||
179 | EXPORT_SYMBOL(iwl_txq_update_write_ptr); | ||
180 | |||
181 | |||
98 | /** | 182 | /** |
99 | * iwl_tx_queue_free - Deallocate DMA queue. | 183 | * iwl_tx_queue_free - Deallocate DMA queue. |
100 | * @txq: Transmit queue to deallocate. | 184 | * @txq: Transmit queue to deallocate. |
@@ -137,6 +221,47 @@ static void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq) | |||
137 | memset(txq, 0, sizeof(*txq)); | 221 | memset(txq, 0, sizeof(*txq)); |
138 | } | 222 | } |
139 | 223 | ||
224 | /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** | ||
225 | * DMA services | ||
226 | * | ||
227 | * Theory of operation | ||
228 | * | ||
229 | * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer | ||
230 | * of buffer descriptors, each of which points to one or more data buffers for | ||
231 | * the device to read from or fill. Driver and device exchange status of each | ||
232 | * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty | ||
233 | * entries in each circular buffer, to protect against confusing empty and full | ||
234 | * queue states. | ||
235 | * | ||
236 | * The device reads or writes the data in the queues via the device's several | ||
237 | * DMA/FIFO channels. Each queue is mapped to a single DMA channel. | ||
238 | * | ||
239 | * For Tx queue, there are low mark and high mark limits. If, after queuing | ||
240 | * the packet for Tx, free space become < low mark, Tx queue stopped. When | ||
241 | * reclaiming packets (on 'tx done IRQ), if free space become > high mark, | ||
242 | * Tx queue resumed. | ||
243 | * | ||
244 | * See more detailed info in iwl-4965-hw.h. | ||
245 | ***************************************************/ | ||
246 | |||
247 | int iwl_queue_space(const struct iwl_queue *q) | ||
248 | { | ||
249 | int s = q->read_ptr - q->write_ptr; | ||
250 | |||
251 | if (q->read_ptr > q->write_ptr) | ||
252 | s -= q->n_bd; | ||
253 | |||
254 | if (s <= 0) | ||
255 | s += q->n_window; | ||
256 | /* keep some reserve to not confuse empty and full situations */ | ||
257 | s -= 2; | ||
258 | if (s < 0) | ||
259 | s = 0; | ||
260 | return s; | ||
261 | } | ||
262 | EXPORT_SYMBOL(iwl_queue_space); | ||
263 | |||
264 | |||
140 | /** | 265 | /** |
141 | * iwl_hw_txq_ctx_free - Free TXQ Context | 266 | * iwl_hw_txq_ctx_free - Free TXQ Context |
142 | * | 267 | * |
@@ -371,3 +496,540 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv) | |||
371 | error_kw: | 496 | error_kw: |
372 | return ret; | 497 | return ret; |
373 | } | 498 | } |
499 | |||
500 | /* | ||
501 | * handle build REPLY_TX command notification. | ||
502 | */ | ||
503 | static void iwl_tx_cmd_build_basic(struct iwl_priv *priv, | ||
504 | struct iwl_tx_cmd *tx_cmd, | ||
505 | struct ieee80211_tx_control *ctrl, | ||
506 | struct ieee80211_hdr *hdr, | ||
507 | int is_unicast, u8 std_id) | ||
508 | { | ||
509 | u16 fc = le16_to_cpu(hdr->frame_control); | ||
510 | __le32 tx_flags = tx_cmd->tx_flags; | ||
511 | |||
512 | tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; | ||
513 | if (!(ctrl->flags & IEEE80211_TXCTL_NO_ACK)) { | ||
514 | tx_flags |= TX_CMD_FLG_ACK_MSK; | ||
515 | if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) | ||
516 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
517 | if (ieee80211_is_probe_response(fc) && | ||
518 | !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) | ||
519 | tx_flags |= TX_CMD_FLG_TSF_MSK; | ||
520 | } else { | ||
521 | tx_flags &= (~TX_CMD_FLG_ACK_MSK); | ||
522 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
523 | } | ||
524 | |||
525 | if (ieee80211_is_back_request(fc)) | ||
526 | tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; | ||
527 | |||
528 | |||
529 | tx_cmd->sta_id = std_id; | ||
530 | if (ieee80211_get_morefrag(hdr)) | ||
531 | tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; | ||
532 | |||
533 | if (ieee80211_is_qos_data(fc)) { | ||
534 | u8 *qc = ieee80211_get_qos_ctrl(hdr, ieee80211_get_hdrlen(fc)); | ||
535 | tx_cmd->tid_tspec = qc[0] & 0xf; | ||
536 | tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; | ||
537 | } else { | ||
538 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
539 | } | ||
540 | |||
541 | if (ctrl->flags & IEEE80211_TXCTL_USE_RTS_CTS) { | ||
542 | tx_flags |= TX_CMD_FLG_RTS_MSK; | ||
543 | tx_flags &= ~TX_CMD_FLG_CTS_MSK; | ||
544 | } else if (ctrl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) { | ||
545 | tx_flags &= ~TX_CMD_FLG_RTS_MSK; | ||
546 | tx_flags |= TX_CMD_FLG_CTS_MSK; | ||
547 | } | ||
548 | |||
549 | if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK)) | ||
550 | tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; | ||
551 | |||
552 | tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); | ||
553 | if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) { | ||
554 | if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ || | ||
555 | (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ) | ||
556 | tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); | ||
557 | else | ||
558 | tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); | ||
559 | } else { | ||
560 | tx_cmd->timeout.pm_frame_timeout = 0; | ||
561 | } | ||
562 | |||
563 | tx_cmd->driver_txop = 0; | ||
564 | tx_cmd->tx_flags = tx_flags; | ||
565 | tx_cmd->next_frame_len = 0; | ||
566 | } | ||
567 | |||
568 | #define RTS_HCCA_RETRY_LIMIT 3 | ||
569 | #define RTS_DFAULT_RETRY_LIMIT 60 | ||
570 | |||
571 | static void iwl_tx_cmd_build_rate(struct iwl_priv *priv, | ||
572 | struct iwl_tx_cmd *tx_cmd, | ||
573 | struct ieee80211_tx_control *ctrl, | ||
574 | u16 fc, int sta_id, | ||
575 | int is_hcca) | ||
576 | { | ||
577 | u8 rts_retry_limit = 0; | ||
578 | u8 data_retry_limit = 0; | ||
579 | u8 rate_plcp; | ||
580 | u16 rate_flags = 0; | ||
581 | int rate_idx = min(ctrl->tx_rate->hw_value & 0xffff, IWL_RATE_COUNT - 1); | ||
582 | |||
583 | rate_plcp = iwl_rates[rate_idx].plcp; | ||
584 | |||
585 | rts_retry_limit = (is_hcca) ? | ||
586 | RTS_HCCA_RETRY_LIMIT : RTS_DFAULT_RETRY_LIMIT; | ||
587 | |||
588 | if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) | ||
589 | rate_flags |= RATE_MCS_CCK_MSK; | ||
590 | |||
591 | |||
592 | if (ieee80211_is_probe_response(fc)) { | ||
593 | data_retry_limit = 3; | ||
594 | if (data_retry_limit < rts_retry_limit) | ||
595 | rts_retry_limit = data_retry_limit; | ||
596 | } else | ||
597 | data_retry_limit = IWL_DEFAULT_TX_RETRY; | ||
598 | |||
599 | if (priv->data_retry_limit != -1) | ||
600 | data_retry_limit = priv->data_retry_limit; | ||
601 | |||
602 | |||
603 | if (ieee80211_is_data(fc)) { | ||
604 | tx_cmd->initial_rate_index = 0; | ||
605 | tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK; | ||
606 | } else { | ||
607 | switch (fc & IEEE80211_FCTL_STYPE) { | ||
608 | case IEEE80211_STYPE_AUTH: | ||
609 | case IEEE80211_STYPE_DEAUTH: | ||
610 | case IEEE80211_STYPE_ASSOC_REQ: | ||
611 | case IEEE80211_STYPE_REASSOC_REQ: | ||
612 | if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) { | ||
613 | tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK; | ||
614 | tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK; | ||
615 | } | ||
616 | break; | ||
617 | default: | ||
618 | break; | ||
619 | } | ||
620 | |||
621 | /* Alternate between antenna A and B for successive frames */ | ||
622 | if (priv->use_ant_b_for_management_frame) { | ||
623 | priv->use_ant_b_for_management_frame = 0; | ||
624 | rate_flags |= RATE_MCS_ANT_B_MSK; | ||
625 | } else { | ||
626 | priv->use_ant_b_for_management_frame = 1; | ||
627 | rate_flags |= RATE_MCS_ANT_A_MSK; | ||
628 | } | ||
629 | } | ||
630 | |||
631 | tx_cmd->rts_retry_limit = rts_retry_limit; | ||
632 | tx_cmd->data_retry_limit = data_retry_limit; | ||
633 | tx_cmd->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags); | ||
634 | } | ||
635 | |||
636 | static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv, | ||
637 | struct ieee80211_tx_control *ctl, | ||
638 | struct iwl_tx_cmd *tx_cmd, | ||
639 | struct sk_buff *skb_frag, | ||
640 | int sta_id) | ||
641 | { | ||
642 | struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo; | ||
643 | struct iwl_wep_key *wepkey; | ||
644 | int keyidx = 0; | ||
645 | |||
646 | BUG_ON(ctl->hw_key->hw_key_idx > 3); | ||
647 | |||
648 | switch (keyinfo->alg) { | ||
649 | case ALG_CCMP: | ||
650 | tx_cmd->sec_ctl = TX_CMD_SEC_CCM; | ||
651 | memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen); | ||
652 | if (ctl->flags & IEEE80211_TXCTL_AMPDU) | ||
653 | tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK; | ||
654 | IWL_DEBUG_TX("tx_cmd with aes hwcrypto\n"); | ||
655 | break; | ||
656 | |||
657 | case ALG_TKIP: | ||
658 | tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; | ||
659 | ieee80211_get_tkip_key(keyinfo->conf, skb_frag, | ||
660 | IEEE80211_TKIP_P2_KEY, tx_cmd->key); | ||
661 | IWL_DEBUG_TX("tx_cmd with tkip hwcrypto\n"); | ||
662 | break; | ||
663 | |||
664 | case ALG_WEP: | ||
665 | wepkey = &priv->wep_keys[ctl->hw_key->hw_key_idx]; | ||
666 | tx_cmd->sec_ctl = 0; | ||
667 | if (priv->default_wep_key) { | ||
668 | /* the WEP key was sent as static */ | ||
669 | keyidx = ctl->hw_key->hw_key_idx; | ||
670 | memcpy(&tx_cmd->key[3], wepkey->key, | ||
671 | wepkey->key_size); | ||
672 | if (wepkey->key_size == WEP_KEY_LEN_128) | ||
673 | tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; | ||
674 | } else { | ||
675 | /* the WEP key was sent as dynamic */ | ||
676 | keyidx = keyinfo->keyidx; | ||
677 | memcpy(&tx_cmd->key[3], keyinfo->key, | ||
678 | keyinfo->keylen); | ||
679 | if (keyinfo->keylen == WEP_KEY_LEN_128) | ||
680 | tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; | ||
681 | } | ||
682 | |||
683 | tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP | | ||
684 | (keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT); | ||
685 | |||
686 | IWL_DEBUG_TX("Configuring packet for WEP encryption " | ||
687 | "with key %d\n", keyidx); | ||
688 | break; | ||
689 | |||
690 | default: | ||
691 | printk(KERN_ERR "Unknown encode alg %d\n", keyinfo->alg); | ||
692 | break; | ||
693 | } | ||
694 | } | ||
695 | |||
696 | static void iwl_update_tx_stats(struct iwl_priv *priv, u16 fc, u16 len) | ||
697 | { | ||
698 | /* 0 - mgmt, 1 - cnt, 2 - data */ | ||
699 | int idx = (fc & IEEE80211_FCTL_FTYPE) >> 2; | ||
700 | priv->tx_stats[idx].cnt++; | ||
701 | priv->tx_stats[idx].bytes += len; | ||
702 | } | ||
703 | |||
704 | /* | ||
705 | * start REPLY_TX command process | ||
706 | */ | ||
707 | int iwl_tx_skb(struct iwl_priv *priv, | ||
708 | struct sk_buff *skb, struct ieee80211_tx_control *ctl) | ||
709 | { | ||
710 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | ||
711 | struct iwl_tfd_frame *tfd; | ||
712 | u32 *control_flags; | ||
713 | int txq_id = ctl->queue; | ||
714 | struct iwl_tx_queue *txq = NULL; | ||
715 | struct iwl_queue *q = NULL; | ||
716 | dma_addr_t phys_addr; | ||
717 | dma_addr_t txcmd_phys; | ||
718 | dma_addr_t scratch_phys; | ||
719 | struct iwl_cmd *out_cmd = NULL; | ||
720 | struct iwl_tx_cmd *tx_cmd; | ||
721 | u16 len, idx, len_org; | ||
722 | u16 seq_number = 0; | ||
723 | u8 id, hdr_len, unicast; | ||
724 | u8 sta_id; | ||
725 | u16 fc; | ||
726 | u8 wait_write_ptr = 0; | ||
727 | u8 tid = 0; | ||
728 | u8 *qc = NULL; | ||
729 | unsigned long flags; | ||
730 | int ret; | ||
731 | |||
732 | spin_lock_irqsave(&priv->lock, flags); | ||
733 | if (iwl_is_rfkill(priv)) { | ||
734 | IWL_DEBUG_DROP("Dropping - RF KILL\n"); | ||
735 | goto drop_unlock; | ||
736 | } | ||
737 | |||
738 | if (!priv->vif) { | ||
739 | IWL_DEBUG_DROP("Dropping - !priv->vif\n"); | ||
740 | goto drop_unlock; | ||
741 | } | ||
742 | |||
743 | if ((ctl->tx_rate->hw_value & 0xFF) == IWL_INVALID_RATE) { | ||
744 | IWL_ERROR("ERROR: No TX rate available.\n"); | ||
745 | goto drop_unlock; | ||
746 | } | ||
747 | |||
748 | unicast = !is_multicast_ether_addr(hdr->addr1); | ||
749 | id = 0; | ||
750 | |||
751 | fc = le16_to_cpu(hdr->frame_control); | ||
752 | |||
753 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
754 | if (ieee80211_is_auth(fc)) | ||
755 | IWL_DEBUG_TX("Sending AUTH frame\n"); | ||
756 | else if (ieee80211_is_assoc_request(fc)) | ||
757 | IWL_DEBUG_TX("Sending ASSOC frame\n"); | ||
758 | else if (ieee80211_is_reassoc_request(fc)) | ||
759 | IWL_DEBUG_TX("Sending REASSOC frame\n"); | ||
760 | #endif | ||
761 | |||
762 | /* drop all data frame if we are not associated */ | ||
763 | if (((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) && | ||
764 | (!iwl_is_associated(priv) || | ||
765 | ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && !priv->assoc_id) || | ||
766 | !priv->assoc_station_added)) { | ||
767 | IWL_DEBUG_DROP("Dropping - !iwl_is_associated\n"); | ||
768 | goto drop_unlock; | ||
769 | } | ||
770 | |||
771 | spin_unlock_irqrestore(&priv->lock, flags); | ||
772 | |||
773 | hdr_len = ieee80211_get_hdrlen(fc); | ||
774 | |||
775 | /* Find (or create) index into station table for destination station */ | ||
776 | sta_id = iwl_get_sta_id(priv, hdr); | ||
777 | if (sta_id == IWL_INVALID_STATION) { | ||
778 | DECLARE_MAC_BUF(mac); | ||
779 | |||
780 | IWL_DEBUG_DROP("Dropping - INVALID STATION: %s\n", | ||
781 | print_mac(mac, hdr->addr1)); | ||
782 | goto drop; | ||
783 | } | ||
784 | |||
785 | IWL_DEBUG_TX("station Id %d\n", sta_id); | ||
786 | |||
787 | if (ieee80211_is_qos_data(fc)) { | ||
788 | qc = ieee80211_get_qos_ctrl(hdr, hdr_len); | ||
789 | tid = qc[0] & 0xf; | ||
790 | seq_number = priv->stations[sta_id].tid[tid].seq_number & | ||
791 | IEEE80211_SCTL_SEQ; | ||
792 | hdr->seq_ctrl = cpu_to_le16(seq_number) | | ||
793 | (hdr->seq_ctrl & | ||
794 | __constant_cpu_to_le16(IEEE80211_SCTL_FRAG)); | ||
795 | seq_number += 0x10; | ||
796 | #ifdef CONFIG_IWL4965_HT | ||
797 | /* aggregation is on for this <sta,tid> */ | ||
798 | if (ctl->flags & IEEE80211_TXCTL_AMPDU) | ||
799 | txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; | ||
800 | priv->stations[sta_id].tid[tid].tfds_in_queue++; | ||
801 | #endif /* CONFIG_IWL4965_HT */ | ||
802 | } | ||
803 | |||
804 | /* Descriptor for chosen Tx queue */ | ||
805 | txq = &priv->txq[txq_id]; | ||
806 | q = &txq->q; | ||
807 | |||
808 | spin_lock_irqsave(&priv->lock, flags); | ||
809 | |||
810 | /* Set up first empty TFD within this queue's circular TFD buffer */ | ||
811 | tfd = &txq->bd[q->write_ptr]; | ||
812 | memset(tfd, 0, sizeof(*tfd)); | ||
813 | control_flags = (u32 *) tfd; | ||
814 | idx = get_cmd_index(q, q->write_ptr, 0); | ||
815 | |||
816 | /* Set up driver data for this TFD */ | ||
817 | memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); | ||
818 | txq->txb[q->write_ptr].skb[0] = skb; | ||
819 | memcpy(&(txq->txb[q->write_ptr].status.control), | ||
820 | ctl, sizeof(struct ieee80211_tx_control)); | ||
821 | |||
822 | /* Set up first empty entry in queue's array of Tx/cmd buffers */ | ||
823 | out_cmd = &txq->cmd[idx]; | ||
824 | tx_cmd = &out_cmd->cmd.tx; | ||
825 | memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); | ||
826 | memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd)); | ||
827 | |||
828 | /* | ||
829 | * Set up the Tx-command (not MAC!) header. | ||
830 | * Store the chosen Tx queue and TFD index within the sequence field; | ||
831 | * after Tx, uCode's Tx response will return this value so driver can | ||
832 | * locate the frame within the tx queue and do post-tx processing. | ||
833 | */ | ||
834 | out_cmd->hdr.cmd = REPLY_TX; | ||
835 | out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | | ||
836 | INDEX_TO_SEQ(q->write_ptr))); | ||
837 | |||
838 | /* Copy MAC header from skb into command buffer */ | ||
839 | memcpy(tx_cmd->hdr, hdr, hdr_len); | ||
840 | |||
841 | /* | ||
842 | * Use the first empty entry in this queue's command buffer array | ||
843 | * to contain the Tx command and MAC header concatenated together | ||
844 | * (payload data will be in another buffer). | ||
845 | * Size of this varies, due to varying MAC header length. | ||
846 | * If end is not dword aligned, we'll have 2 extra bytes at the end | ||
847 | * of the MAC header (device reads on dword boundaries). | ||
848 | * We'll tell device about this padding later. | ||
849 | */ | ||
850 | len = sizeof(struct iwl_tx_cmd) + | ||
851 | sizeof(struct iwl_cmd_header) + hdr_len; | ||
852 | |||
853 | len_org = len; | ||
854 | len = (len + 3) & ~3; | ||
855 | |||
856 | if (len_org != len) | ||
857 | len_org = 1; | ||
858 | else | ||
859 | len_org = 0; | ||
860 | |||
861 | /* Physical address of this Tx command's header (not MAC header!), | ||
862 | * within command buffer array. */ | ||
863 | txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl_cmd) * idx + | ||
864 | offsetof(struct iwl_cmd, hdr); | ||
865 | |||
866 | /* Add buffer containing Tx command and MAC(!) header to TFD's | ||
867 | * first entry */ | ||
868 | iwl_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len); | ||
869 | |||
870 | if (!(ctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)) | ||
871 | iwl_tx_cmd_build_hwcrypto(priv, ctl, tx_cmd, skb, sta_id); | ||
872 | |||
873 | /* Set up TFD's 2nd entry to point directly to remainder of skb, | ||
874 | * if any (802.11 null frames have no payload). */ | ||
875 | len = skb->len - hdr_len; | ||
876 | if (len) { | ||
877 | phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, | ||
878 | len, PCI_DMA_TODEVICE); | ||
879 | iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len); | ||
880 | } | ||
881 | |||
882 | /* Tell NIC about any 2-byte padding after MAC header */ | ||
883 | if (len_org) | ||
884 | tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; | ||
885 | |||
886 | /* Total # bytes to be transmitted */ | ||
887 | len = (u16)skb->len; | ||
888 | tx_cmd->len = cpu_to_le16(len); | ||
889 | /* TODO need this for burst mode later on */ | ||
890 | iwl_tx_cmd_build_basic(priv, tx_cmd, ctl, hdr, unicast, sta_id); | ||
891 | |||
892 | /* set is_hcca to 0; it probably will never be implemented */ | ||
893 | iwl_tx_cmd_build_rate(priv, tx_cmd, ctl, fc, sta_id, 0); | ||
894 | |||
895 | iwl_update_tx_stats(priv, fc, len); | ||
896 | |||
897 | scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + | ||
898 | offsetof(struct iwl_tx_cmd, scratch); | ||
899 | tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); | ||
900 | tx_cmd->dram_msb_ptr = iwl_get_dma_hi_address(scratch_phys); | ||
901 | |||
902 | if (!ieee80211_get_morefrag(hdr)) { | ||
903 | txq->need_update = 1; | ||
904 | if (qc) | ||
905 | priv->stations[sta_id].tid[tid].seq_number = seq_number; | ||
906 | } else { | ||
907 | wait_write_ptr = 1; | ||
908 | txq->need_update = 0; | ||
909 | } | ||
910 | |||
911 | iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd)); | ||
912 | |||
913 | iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len); | ||
914 | |||
915 | /* Set up entry for this TFD in Tx byte-count array */ | ||
916 | priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, len); | ||
917 | |||
918 | /* Tell device the write index *just past* this latest filled TFD */ | ||
919 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | ||
920 | ret = iwl_txq_update_write_ptr(priv, txq); | ||
921 | spin_unlock_irqrestore(&priv->lock, flags); | ||
922 | |||
923 | if (ret) | ||
924 | return ret; | ||
925 | |||
926 | if ((iwl_queue_space(q) < q->high_mark) | ||
927 | && priv->mac80211_registered) { | ||
928 | if (wait_write_ptr) { | ||
929 | spin_lock_irqsave(&priv->lock, flags); | ||
930 | txq->need_update = 1; | ||
931 | iwl_txq_update_write_ptr(priv, txq); | ||
932 | spin_unlock_irqrestore(&priv->lock, flags); | ||
933 | } | ||
934 | |||
935 | ieee80211_stop_queue(priv->hw, ctl->queue); | ||
936 | } | ||
937 | |||
938 | return 0; | ||
939 | |||
940 | drop_unlock: | ||
941 | spin_unlock_irqrestore(&priv->lock, flags); | ||
942 | drop: | ||
943 | return -1; | ||
944 | } | ||
945 | EXPORT_SYMBOL(iwl_tx_skb); | ||
946 | |||
947 | /*************** HOST COMMAND QUEUE FUNCTIONS *****/ | ||
948 | |||
949 | /** | ||
950 | * iwl_enqueue_hcmd - enqueue a uCode command | ||
951 | * @priv: device private data point | ||
952 | * @cmd: a point to the ucode command structure | ||
953 | * | ||
954 | * The function returns < 0 values to indicate the operation is | ||
955 | * failed. On success, it turns the index (> 0) of command in the | ||
956 | * command queue. | ||
957 | */ | ||
958 | int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | ||
959 | { | ||
960 | struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; | ||
961 | struct iwl_queue *q = &txq->q; | ||
962 | struct iwl_tfd_frame *tfd; | ||
963 | u32 *control_flags; | ||
964 | struct iwl_cmd *out_cmd; | ||
965 | u32 idx; | ||
966 | u16 fix_size; | ||
967 | dma_addr_t phys_addr; | ||
968 | int ret; | ||
969 | unsigned long flags; | ||
970 | |||
971 | cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len); | ||
972 | fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr)); | ||
973 | |||
974 | /* If any of the command structures end up being larger than | ||
975 | * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then | ||
976 | * we will need to increase the size of the TFD entries */ | ||
977 | BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && | ||
978 | !(cmd->meta.flags & CMD_SIZE_HUGE)); | ||
979 | |||
980 | if (iwl_is_rfkill(priv)) { | ||
981 | IWL_DEBUG_INFO("Not sending command - RF KILL"); | ||
982 | return -EIO; | ||
983 | } | ||
984 | |||
985 | if (iwl_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) { | ||
986 | IWL_ERROR("No space for Tx\n"); | ||
987 | return -ENOSPC; | ||
988 | } | ||
989 | |||
990 | spin_lock_irqsave(&priv->hcmd_lock, flags); | ||
991 | |||
992 | tfd = &txq->bd[q->write_ptr]; | ||
993 | memset(tfd, 0, sizeof(*tfd)); | ||
994 | |||
995 | control_flags = (u32 *) tfd; | ||
996 | |||
997 | idx = get_cmd_index(q, q->write_ptr, cmd->meta.flags & CMD_SIZE_HUGE); | ||
998 | out_cmd = &txq->cmd[idx]; | ||
999 | |||
1000 | out_cmd->hdr.cmd = cmd->id; | ||
1001 | memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta)); | ||
1002 | memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len); | ||
1003 | |||
1004 | /* At this point, the out_cmd now has all of the incoming cmd | ||
1005 | * information */ | ||
1006 | |||
1007 | out_cmd->hdr.flags = 0; | ||
1008 | out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) | | ||
1009 | INDEX_TO_SEQ(q->write_ptr)); | ||
1010 | if (out_cmd->meta.flags & CMD_SIZE_HUGE) | ||
1011 | out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME); | ||
1012 | |||
1013 | phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx + | ||
1014 | offsetof(struct iwl_cmd, hdr); | ||
1015 | iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size); | ||
1016 | |||
1017 | IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, " | ||
1018 | "%d bytes at %d[%d]:%d\n", | ||
1019 | get_cmd_string(out_cmd->hdr.cmd), | ||
1020 | out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), | ||
1021 | fix_size, q->write_ptr, idx, IWL_CMD_QUEUE_NUM); | ||
1022 | |||
1023 | txq->need_update = 1; | ||
1024 | |||
1025 | /* Set up entry in queue's byte count circular buffer */ | ||
1026 | priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0); | ||
1027 | |||
1028 | /* Increment and update queue's write index */ | ||
1029 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | ||
1030 | ret = iwl_txq_update_write_ptr(priv, txq); | ||
1031 | |||
1032 | spin_unlock_irqrestore(&priv->hcmd_lock, flags); | ||
1033 | return ret ? ret : idx; | ||
1034 | } | ||
1035 | |||