aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl-tx.c
diff options
context:
space:
mode:
authorJohn W. Linville <linville@tuxdriver.com>2010-04-15 16:21:34 -0400
committerJohn W. Linville <linville@tuxdriver.com>2010-04-15 16:21:34 -0400
commit5c01d5669356e13f0fb468944c1dd4c6a7e978ad (patch)
treefa43345288d7b25fac92b3b35360a177c4947313 /drivers/net/wireless/iwlwifi/iwl-tx.c
parentfea069152614cdeefba4b2bf80afcddb9c217fc8 (diff)
parenta5e944f1d955f3819503348426763e21e0413ba6 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next-2.6 into for-davem
Conflicts: Documentation/feature-removal-schedule.txt drivers/net/wireless/ath/ath5k/phy.c drivers/net/wireless/wl12xx/wl1271_main.c
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-tx.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c1091
1 files changed, 25 insertions, 1066 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index b798fbabc3b6..1ece2ea09773 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -38,84 +38,6 @@
38#include "iwl-io.h" 38#include "iwl-io.h"
39#include "iwl-helpers.h" 39#include "iwl-helpers.h"
40 40
41/*
42 * mac80211 queues, ACs, hardware queues, FIFOs.
43 *
44 * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
45 *
46 * Mac80211 uses the following numbers, which we get as from it
47 * by way of skb_get_queue_mapping(skb):
48 *
49 * VO 0
50 * VI 1
51 * BE 2
52 * BK 3
53 *
54 *
55 * Regular (not A-MPDU) frames are put into hardware queues corresponding
56 * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
57 * own queue per aggregation session (RA/TID combination), such queues are
58 * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
59 * order to map frames to the right queue, we also need an AC->hw queue
60 * mapping. This is implemented here.
61 *
62 * Due to the way hw queues are set up (by the hw specific modules like
63 * iwl-4965.c, iwl-5000.c etc.), the AC->hw queue mapping is the identity
64 * mapping.
65 */
66
67static const u8 tid_to_ac[] = {
68 /* this matches the mac80211 numbers */
69 2, 3, 3, 2, 1, 1, 0, 0
70};
71
72static const u8 ac_to_fifo[] = {
73 IWL_TX_FIFO_VO,
74 IWL_TX_FIFO_VI,
75 IWL_TX_FIFO_BE,
76 IWL_TX_FIFO_BK,
77};
78
79static inline int get_fifo_from_ac(u8 ac)
80{
81 return ac_to_fifo[ac];
82}
83
84static inline int get_queue_from_ac(u16 ac)
85{
86 return ac;
87}
88
89static inline int get_fifo_from_tid(u16 tid)
90{
91 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
92 return get_fifo_from_ac(tid_to_ac[tid]);
93
94 /* no support for TIDs 8-15 yet */
95 return -EINVAL;
96}
97
98static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv,
99 struct iwl_dma_ptr *ptr, size_t size)
100{
101 ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
102 GFP_KERNEL);
103 if (!ptr->addr)
104 return -ENOMEM;
105 ptr->size = size;
106 return 0;
107}
108
109static inline void iwl_free_dma_ptr(struct iwl_priv *priv,
110 struct iwl_dma_ptr *ptr)
111{
112 if (unlikely(!ptr->addr))
113 return;
114
115 dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
116 memset(ptr, 0, sizeof(*ptr));
117}
118
119/** 41/**
120 * iwl_txq_update_write_ptr - Send new write index to hardware 42 * iwl_txq_update_write_ptr - Send new write index to hardware
121 */ 43 */
@@ -493,598 +415,6 @@ void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
493} 415}
494EXPORT_SYMBOL(iwl_tx_queue_reset); 416EXPORT_SYMBOL(iwl_tx_queue_reset);
495 417
496/**
497 * iwl_hw_txq_ctx_free - Free TXQ Context
498 *
499 * Destroy all TX DMA queues and structures
500 */
501void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
502{
503 int txq_id;
504
505 /* Tx queues */
506 if (priv->txq) {
507 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
508 if (txq_id == IWL_CMD_QUEUE_NUM)
509 iwl_cmd_queue_free(priv);
510 else
511 iwl_tx_queue_free(priv, txq_id);
512 }
513 iwl_free_dma_ptr(priv, &priv->kw);
514
515 iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
516
517 /* free tx queue structure */
518 iwl_free_txq_mem(priv);
519}
520EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
521
522/**
523 * iwl_txq_ctx_alloc - allocate TX queue context
524 * Allocate all Tx DMA structures and initialize them
525 *
526 * @param priv
527 * @return error code
528 */
529int iwl_txq_ctx_alloc(struct iwl_priv *priv)
530{
531 int ret;
532 int txq_id, slots_num;
533 unsigned long flags;
534
535 /* Free all tx/cmd queues and keep-warm buffer */
536 iwl_hw_txq_ctx_free(priv);
537
538 ret = iwl_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
539 priv->hw_params.scd_bc_tbls_size);
540 if (ret) {
541 IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
542 goto error_bc_tbls;
543 }
544 /* Alloc keep-warm buffer */
545 ret = iwl_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
546 if (ret) {
547 IWL_ERR(priv, "Keep Warm allocation failed\n");
548 goto error_kw;
549 }
550
551 /* allocate tx queue structure */
552 ret = iwl_alloc_txq_mem(priv);
553 if (ret)
554 goto error;
555
556 spin_lock_irqsave(&priv->lock, flags);
557
558 /* Turn off all Tx DMA fifos */
559 priv->cfg->ops->lib->txq_set_sched(priv, 0);
560
561 /* Tell NIC where to find the "keep warm" buffer */
562 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
563
564 spin_unlock_irqrestore(&priv->lock, flags);
565
566 /* Alloc and init all Tx queues, including the command queue (#4) */
567 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
568 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
569 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
570 ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
571 txq_id);
572 if (ret) {
573 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
574 goto error;
575 }
576 }
577
578 return ret;
579
580 error:
581 iwl_hw_txq_ctx_free(priv);
582 iwl_free_dma_ptr(priv, &priv->kw);
583 error_kw:
584 iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
585 error_bc_tbls:
586 return ret;
587}
588
589void iwl_txq_ctx_reset(struct iwl_priv *priv)
590{
591 int txq_id, slots_num;
592 unsigned long flags;
593
594 spin_lock_irqsave(&priv->lock, flags);
595
596 /* Turn off all Tx DMA fifos */
597 priv->cfg->ops->lib->txq_set_sched(priv, 0);
598
599 /* Tell NIC where to find the "keep warm" buffer */
600 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
601
602 spin_unlock_irqrestore(&priv->lock, flags);
603
604 /* Alloc and init all Tx queues, including the command queue (#4) */
605 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
606 slots_num = txq_id == IWL_CMD_QUEUE_NUM ?
607 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
608 iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id);
609 }
610}
611
612/**
613 * iwl_txq_ctx_stop - Stop all Tx DMA channels
614 */
615void iwl_txq_ctx_stop(struct iwl_priv *priv)
616{
617 int ch;
618 unsigned long flags;
619
620 /* Turn off all Tx DMA fifos */
621 spin_lock_irqsave(&priv->lock, flags);
622
623 priv->cfg->ops->lib->txq_set_sched(priv, 0);
624
625 /* Stop each Tx DMA channel, and wait for it to be idle */
626 for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
627 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
628 iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
629 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
630 1000);
631 }
632 spin_unlock_irqrestore(&priv->lock, flags);
633}
634EXPORT_SYMBOL(iwl_txq_ctx_stop);
635
636/*
637 * handle build REPLY_TX command notification.
638 */
639static void iwl_tx_cmd_build_basic(struct iwl_priv *priv,
640 struct iwl_tx_cmd *tx_cmd,
641 struct ieee80211_tx_info *info,
642 struct ieee80211_hdr *hdr,
643 u8 std_id)
644{
645 __le16 fc = hdr->frame_control;
646 __le32 tx_flags = tx_cmd->tx_flags;
647
648 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
649 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
650 tx_flags |= TX_CMD_FLG_ACK_MSK;
651 if (ieee80211_is_mgmt(fc))
652 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
653 if (ieee80211_is_probe_resp(fc) &&
654 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
655 tx_flags |= TX_CMD_FLG_TSF_MSK;
656 } else {
657 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
658 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
659 }
660
661 if (ieee80211_is_back_req(fc))
662 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
663
664
665 tx_cmd->sta_id = std_id;
666 if (ieee80211_has_morefrags(fc))
667 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
668
669 if (ieee80211_is_data_qos(fc)) {
670 u8 *qc = ieee80211_get_qos_ctl(hdr);
671 tx_cmd->tid_tspec = qc[0] & 0xf;
672 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
673 } else {
674 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
675 }
676
677 priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
678
679 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
680 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
681
682 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
683 if (ieee80211_is_mgmt(fc)) {
684 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
685 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
686 else
687 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
688 } else {
689 tx_cmd->timeout.pm_frame_timeout = 0;
690 }
691
692 tx_cmd->driver_txop = 0;
693 tx_cmd->tx_flags = tx_flags;
694 tx_cmd->next_frame_len = 0;
695}
696
697#define RTS_DFAULT_RETRY_LIMIT 60
698
699static void iwl_tx_cmd_build_rate(struct iwl_priv *priv,
700 struct iwl_tx_cmd *tx_cmd,
701 struct ieee80211_tx_info *info,
702 __le16 fc)
703{
704 u32 rate_flags;
705 int rate_idx;
706 u8 rts_retry_limit;
707 u8 data_retry_limit;
708 u8 rate_plcp;
709
710 /* Set retry limit on DATA packets and Probe Responses*/
711 if (ieee80211_is_probe_resp(fc))
712 data_retry_limit = 3;
713 else
714 data_retry_limit = IWL_DEFAULT_TX_RETRY;
715 tx_cmd->data_retry_limit = data_retry_limit;
716
717 /* Set retry limit on RTS packets */
718 rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
719 if (data_retry_limit < rts_retry_limit)
720 rts_retry_limit = data_retry_limit;
721 tx_cmd->rts_retry_limit = rts_retry_limit;
722
723 /* DATA packets will use the uCode station table for rate/antenna
724 * selection */
725 if (ieee80211_is_data(fc)) {
726 tx_cmd->initial_rate_index = 0;
727 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
728 return;
729 }
730
731 /**
732 * If the current TX rate stored in mac80211 has the MCS bit set, it's
733 * not really a TX rate. Thus, we use the lowest supported rate for
734 * this band. Also use the lowest supported rate if the stored rate
735 * index is invalid.
736 */
737 rate_idx = info->control.rates[0].idx;
738 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
739 (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
740 rate_idx = rate_lowest_index(&priv->bands[info->band],
741 info->control.sta);
742 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
743 if (info->band == IEEE80211_BAND_5GHZ)
744 rate_idx += IWL_FIRST_OFDM_RATE;
745 /* Get PLCP rate for tx_cmd->rate_n_flags */
746 rate_plcp = iwl_rates[rate_idx].plcp;
747 /* Zero out flags for this packet */
748 rate_flags = 0;
749
750 /* Set CCK flag as needed */
751 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
752 rate_flags |= RATE_MCS_CCK_MSK;
753
754 /* Set up RTS and CTS flags for certain packets */
755 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
756 case cpu_to_le16(IEEE80211_STYPE_AUTH):
757 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
758 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
759 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
760 if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
761 tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
762 tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
763 }
764 break;
765 default:
766 break;
767 }
768
769 /* Set up antennas */
770 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant);
771 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
772
773 /* Set the rate in the TX cmd */
774 tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
775}
776
777static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
778 struct ieee80211_tx_info *info,
779 struct iwl_tx_cmd *tx_cmd,
780 struct sk_buff *skb_frag,
781 int sta_id)
782{
783 struct ieee80211_key_conf *keyconf = info->control.hw_key;
784
785 switch (keyconf->alg) {
786 case ALG_CCMP:
787 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
788 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
789 if (info->flags & IEEE80211_TX_CTL_AMPDU)
790 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
791 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
792 break;
793
794 case ALG_TKIP:
795 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
796 ieee80211_get_tkip_key(keyconf, skb_frag,
797 IEEE80211_TKIP_P2_KEY, tx_cmd->key);
798 IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
799 break;
800
801 case ALG_WEP:
802 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
803 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
804
805 if (keyconf->keylen == WEP_KEY_LEN_128)
806 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
807
808 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
809
810 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
811 "with key %d\n", keyconf->keyidx);
812 break;
813
814 default:
815 IWL_ERR(priv, "Unknown encode alg %d\n", keyconf->alg);
816 break;
817 }
818}
819
820/*
821 * start REPLY_TX command process
822 */
823int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
824{
825 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
826 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
827 struct ieee80211_sta *sta = info->control.sta;
828 struct iwl_station_priv *sta_priv = NULL;
829 struct iwl_tx_queue *txq;
830 struct iwl_queue *q;
831 struct iwl_device_cmd *out_cmd;
832 struct iwl_cmd_meta *out_meta;
833 struct iwl_tx_cmd *tx_cmd;
834 int swq_id, txq_id;
835 dma_addr_t phys_addr;
836 dma_addr_t txcmd_phys;
837 dma_addr_t scratch_phys;
838 u16 len, len_org, firstlen, secondlen;
839 u16 seq_number = 0;
840 __le16 fc;
841 u8 hdr_len;
842 u8 sta_id;
843 u8 wait_write_ptr = 0;
844 u8 tid = 0;
845 u8 *qc = NULL;
846 unsigned long flags;
847
848 spin_lock_irqsave(&priv->lock, flags);
849 if (iwl_is_rfkill(priv)) {
850 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
851 goto drop_unlock;
852 }
853
854 fc = hdr->frame_control;
855
856#ifdef CONFIG_IWLWIFI_DEBUG
857 if (ieee80211_is_auth(fc))
858 IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
859 else if (ieee80211_is_assoc_req(fc))
860 IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
861 else if (ieee80211_is_reassoc_req(fc))
862 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
863#endif
864
865 hdr_len = ieee80211_hdrlen(fc);
866
867 /* Find (or create) index into station table for destination station */
868 if (info->flags & IEEE80211_TX_CTL_INJECTED)
869 sta_id = priv->hw_params.bcast_sta_id;
870 else
871 sta_id = iwl_get_sta_id(priv, hdr);
872 if (sta_id == IWL_INVALID_STATION) {
873 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
874 hdr->addr1);
875 goto drop_unlock;
876 }
877
878 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
879
880 if (sta)
881 sta_priv = (void *)sta->drv_priv;
882
883 if (sta_priv && sta_id != priv->hw_params.bcast_sta_id &&
884 sta_priv->asleep) {
885 WARN_ON(!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE));
886 /*
887 * This sends an asynchronous command to the device,
888 * but we can rely on it being processed before the
889 * next frame is processed -- and the next frame to
890 * this station is the one that will consume this
891 * counter.
892 * For now set the counter to just 1 since we do not
893 * support uAPSD yet.
894 */
895 iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
896 }
897
898 txq_id = get_queue_from_ac(skb_get_queue_mapping(skb));
899 if (ieee80211_is_data_qos(fc)) {
900 qc = ieee80211_get_qos_ctl(hdr);
901 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
902 if (unlikely(tid >= MAX_TID_COUNT))
903 goto drop_unlock;
904 seq_number = priv->stations[sta_id].tid[tid].seq_number;
905 seq_number &= IEEE80211_SCTL_SEQ;
906 hdr->seq_ctrl = hdr->seq_ctrl &
907 cpu_to_le16(IEEE80211_SCTL_FRAG);
908 hdr->seq_ctrl |= cpu_to_le16(seq_number);
909 seq_number += 0x10;
910 /* aggregation is on for this <sta,tid> */
911 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
912 priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
913 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
914 }
915 }
916
917 txq = &priv->txq[txq_id];
918 swq_id = txq->swq_id;
919 q = &txq->q;
920
921 if (unlikely(iwl_queue_space(q) < q->high_mark))
922 goto drop_unlock;
923
924 if (ieee80211_is_data_qos(fc))
925 priv->stations[sta_id].tid[tid].tfds_in_queue++;
926
927 /* Set up driver data for this TFD */
928 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
929 txq->txb[q->write_ptr].skb[0] = skb;
930
931 /* Set up first empty entry in queue's array of Tx/cmd buffers */
932 out_cmd = txq->cmd[q->write_ptr];
933 out_meta = &txq->meta[q->write_ptr];
934 tx_cmd = &out_cmd->cmd.tx;
935 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
936 memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
937
938 /*
939 * Set up the Tx-command (not MAC!) header.
940 * Store the chosen Tx queue and TFD index within the sequence field;
941 * after Tx, uCode's Tx response will return this value so driver can
942 * locate the frame within the tx queue and do post-tx processing.
943 */
944 out_cmd->hdr.cmd = REPLY_TX;
945 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
946 INDEX_TO_SEQ(q->write_ptr)));
947
948 /* Copy MAC header from skb into command buffer */
949 memcpy(tx_cmd->hdr, hdr, hdr_len);
950
951
952 /* Total # bytes to be transmitted */
953 len = (u16)skb->len;
954 tx_cmd->len = cpu_to_le16(len);
955
956 if (info->control.hw_key)
957 iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
958
959 /* TODO need this for burst mode later on */
960 iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id);
961 iwl_dbg_log_tx_data_frame(priv, len, hdr);
962
963 iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc);
964
965 iwl_update_stats(priv, true, fc, len);
966 /*
967 * Use the first empty entry in this queue's command buffer array
968 * to contain the Tx command and MAC header concatenated together
969 * (payload data will be in another buffer).
970 * Size of this varies, due to varying MAC header length.
971 * If end is not dword aligned, we'll have 2 extra bytes at the end
972 * of the MAC header (device reads on dword boundaries).
973 * We'll tell device about this padding later.
974 */
975 len = sizeof(struct iwl_tx_cmd) +
976 sizeof(struct iwl_cmd_header) + hdr_len;
977
978 len_org = len;
979 firstlen = len = (len + 3) & ~3;
980
981 if (len_org != len)
982 len_org = 1;
983 else
984 len_org = 0;
985
986 /* Tell NIC about any 2-byte padding after MAC header */
987 if (len_org)
988 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
989
990 /* Physical address of this Tx command's header (not MAC header!),
991 * within command buffer array. */
992 txcmd_phys = pci_map_single(priv->pci_dev,
993 &out_cmd->hdr, len,
994 PCI_DMA_BIDIRECTIONAL);
995 pci_unmap_addr_set(out_meta, mapping, txcmd_phys);
996 pci_unmap_len_set(out_meta, len, len);
997 /* Add buffer containing Tx command and MAC(!) header to TFD's
998 * first entry */
999 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
1000 txcmd_phys, len, 1, 0);
1001
1002 if (!ieee80211_has_morefrags(hdr->frame_control)) {
1003 txq->need_update = 1;
1004 if (qc)
1005 priv->stations[sta_id].tid[tid].seq_number = seq_number;
1006 } else {
1007 wait_write_ptr = 1;
1008 txq->need_update = 0;
1009 }
1010
1011 /* Set up TFD's 2nd entry to point directly to remainder of skb,
1012 * if any (802.11 null frames have no payload). */
1013 secondlen = len = skb->len - hdr_len;
1014 if (len) {
1015 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
1016 len, PCI_DMA_TODEVICE);
1017 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
1018 phys_addr, len,
1019 0, 0);
1020 }
1021
1022 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
1023 offsetof(struct iwl_tx_cmd, scratch);
1024
1025 len = sizeof(struct iwl_tx_cmd) +
1026 sizeof(struct iwl_cmd_header) + hdr_len;
1027 /* take back ownership of DMA buffer to enable update */
1028 pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
1029 len, PCI_DMA_BIDIRECTIONAL);
1030 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1031 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1032
1033 IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
1034 le16_to_cpu(out_cmd->hdr.sequence));
1035 IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags));
1036 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
1037 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
1038
1039 /* Set up entry for this TFD in Tx byte-count array */
1040 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1041 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
1042 le16_to_cpu(tx_cmd->len));
1043
1044 pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
1045 len, PCI_DMA_BIDIRECTIONAL);
1046
1047 trace_iwlwifi_dev_tx(priv,
1048 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
1049 sizeof(struct iwl_tfd),
1050 &out_cmd->hdr, firstlen,
1051 skb->data + hdr_len, secondlen);
1052
1053 /* Tell device the write index *just past* this latest filled TFD */
1054 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1055 iwl_txq_update_write_ptr(priv, txq);
1056 spin_unlock_irqrestore(&priv->lock, flags);
1057
1058 /*
1059 * At this point the frame is "transmitted" successfully
1060 * and we will get a TX status notification eventually,
1061 * regardless of the value of ret. "ret" only indicates
1062 * whether or not we should update the write pointer.
1063 */
1064
1065 /* avoid atomic ops if it isn't an associated client */
1066 if (sta_priv && sta_priv->client)
1067 atomic_inc(&sta_priv->pending_frames);
1068
1069 if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
1070 if (wait_write_ptr) {
1071 spin_lock_irqsave(&priv->lock, flags);
1072 txq->need_update = 1;
1073 iwl_txq_update_write_ptr(priv, txq);
1074 spin_unlock_irqrestore(&priv->lock, flags);
1075 } else {
1076 iwl_stop_queue(priv, txq->swq_id);
1077 }
1078 }
1079
1080 return 0;
1081
1082drop_unlock:
1083 spin_unlock_irqrestore(&priv->lock, flags);
1084 return -1;
1085}
1086EXPORT_SYMBOL(iwl_tx_skb);
1087
1088/*************** HOST COMMAND QUEUE FUNCTIONS *****/ 418/*************** HOST COMMAND QUEUE FUNCTIONS *****/
1089 419
1090/** 420/**
@@ -1218,61 +548,6 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1218 return idx; 548 return idx;
1219} 549}
1220 550
1221static void iwl_tx_status(struct iwl_priv *priv, struct sk_buff *skb)
1222{
1223 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1224 struct ieee80211_sta *sta;
1225 struct iwl_station_priv *sta_priv;
1226
1227 sta = ieee80211_find_sta(priv->vif, hdr->addr1);
1228 if (sta) {
1229 sta_priv = (void *)sta->drv_priv;
1230 /* avoid atomic ops if this isn't a client */
1231 if (sta_priv->client &&
1232 atomic_dec_return(&sta_priv->pending_frames) == 0)
1233 ieee80211_sta_block_awake(priv->hw, sta, false);
1234 }
1235
1236 ieee80211_tx_status_irqsafe(priv->hw, skb);
1237}
1238
1239int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1240{
1241 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1242 struct iwl_queue *q = &txq->q;
1243 struct iwl_tx_info *tx_info;
1244 int nfreed = 0;
1245 struct ieee80211_hdr *hdr;
1246
1247 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
1248 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
1249 "is out of range [0-%d] %d %d.\n", txq_id,
1250 index, q->n_bd, q->write_ptr, q->read_ptr);
1251 return 0;
1252 }
1253
1254 for (index = iwl_queue_inc_wrap(index, q->n_bd);
1255 q->read_ptr != index;
1256 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1257
1258 tx_info = &txq->txb[txq->q.read_ptr];
1259 iwl_tx_status(priv, tx_info->skb[0]);
1260
1261 hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data;
1262 if (hdr && ieee80211_is_data_qos(hdr->frame_control))
1263 nfreed++;
1264 tx_info->skb[0] = NULL;
1265
1266 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
1267 priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
1268
1269 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
1270 }
1271 return nfreed;
1272}
1273EXPORT_SYMBOL(iwl_tx_queue_reclaim);
1274
1275
1276/** 551/**
1277 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd 552 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
1278 * 553 *
@@ -1366,7 +641,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1366 641
1367 if (!(meta->flags & CMD_ASYNC)) { 642 if (!(meta->flags & CMD_ASYNC)) {
1368 clear_bit(STATUS_HCMD_ACTIVE, &priv->status); 643 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
1369 IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s \n", 644 IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
1370 get_cmd_string(cmd->hdr.cmd)); 645 get_cmd_string(cmd->hdr.cmd));
1371 wake_up_interruptible(&priv->wait_command_queue); 646 wake_up_interruptible(&priv->wait_command_queue);
1372 } 647 }
@@ -1374,353 +649,37 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1374} 649}
1375EXPORT_SYMBOL(iwl_tx_cmd_complete); 650EXPORT_SYMBOL(iwl_tx_cmd_complete);
1376 651
1377/*
1378 * Find first available (lowest unused) Tx Queue, mark it "active".
1379 * Called only when finding queue for aggregation.
1380 * Should never return anything < 7, because they should already
1381 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
1382 */
1383static int iwl_txq_ctx_activate_free(struct iwl_priv *priv)
1384{
1385 int txq_id;
1386
1387 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
1388 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
1389 return txq_id;
1390 return -1;
1391}
1392
1393int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
1394{
1395 int sta_id;
1396 int tx_fifo;
1397 int txq_id;
1398 int ret;
1399 unsigned long flags;
1400 struct iwl_tid_data *tid_data;
1401
1402 tx_fifo = get_fifo_from_tid(tid);
1403 if (unlikely(tx_fifo < 0))
1404 return tx_fifo;
1405
1406 IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
1407 __func__, ra, tid);
1408
1409 sta_id = iwl_find_station(priv, ra);
1410 if (sta_id == IWL_INVALID_STATION) {
1411 IWL_ERR(priv, "Start AGG on invalid station\n");
1412 return -ENXIO;
1413 }
1414 if (unlikely(tid >= MAX_TID_COUNT))
1415 return -EINVAL;
1416
1417 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
1418 IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
1419 return -ENXIO;
1420 }
1421
1422 txq_id = iwl_txq_ctx_activate_free(priv);
1423 if (txq_id == -1) {
1424 IWL_ERR(priv, "No free aggregation queue available\n");
1425 return -ENXIO;
1426 }
1427
1428 spin_lock_irqsave(&priv->sta_lock, flags);
1429 tid_data = &priv->stations[sta_id].tid[tid];
1430 *ssn = SEQ_TO_SN(tid_data->seq_number);
1431 tid_data->agg.txq_id = txq_id;
1432 priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(tx_fifo, txq_id);
1433 spin_unlock_irqrestore(&priv->sta_lock, flags);
1434
1435 ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,
1436 sta_id, tid, *ssn);
1437 if (ret)
1438 return ret;
1439
1440 if (tid_data->tfds_in_queue == 0) {
1441 IWL_DEBUG_HT(priv, "HW queue is empty\n");
1442 tid_data->agg.state = IWL_AGG_ON;
1443 ieee80211_start_tx_ba_cb_irqsafe(priv->vif, ra, tid);
1444 } else {
1445 IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n",
1446 tid_data->tfds_in_queue);
1447 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
1448 }
1449 return ret;
1450}
1451EXPORT_SYMBOL(iwl_tx_agg_start);
1452
1453int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
1454{
1455 int tx_fifo_id, txq_id, sta_id, ssn = -1;
1456 struct iwl_tid_data *tid_data;
1457 int write_ptr, read_ptr;
1458 unsigned long flags;
1459
1460 if (!ra) {
1461 IWL_ERR(priv, "ra = NULL\n");
1462 return -EINVAL;
1463 }
1464
1465 tx_fifo_id = get_fifo_from_tid(tid);
1466 if (unlikely(tx_fifo_id < 0))
1467 return tx_fifo_id;
1468
1469 sta_id = iwl_find_station(priv, ra);
1470
1471 if (sta_id == IWL_INVALID_STATION) {
1472 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
1473 return -ENXIO;
1474 }
1475
1476 if (priv->stations[sta_id].tid[tid].agg.state ==
1477 IWL_EMPTYING_HW_QUEUE_ADDBA) {
1478 IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
1479 ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid);
1480 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1481 return 0;
1482 }
1483
1484 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
1485 IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
1486
1487 tid_data = &priv->stations[sta_id].tid[tid];
1488 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
1489 txq_id = tid_data->agg.txq_id;
1490 write_ptr = priv->txq[txq_id].q.write_ptr;
1491 read_ptr = priv->txq[txq_id].q.read_ptr;
1492
1493 /* The queue is not empty */
1494 if (write_ptr != read_ptr) {
1495 IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
1496 priv->stations[sta_id].tid[tid].agg.state =
1497 IWL_EMPTYING_HW_QUEUE_DELBA;
1498 return 0;
1499 }
1500
1501 IWL_DEBUG_HT(priv, "HW queue is empty\n");
1502 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1503
1504 spin_lock_irqsave(&priv->lock, flags);
1505 /*
1506 * the only reason this call can fail is queue number out of range,
1507 * which can happen if uCode is reloaded and all the station
1508 * information are lost. if it is outside the range, there is no need
1509 * to deactivate the uCode queue, just return "success" to allow
1510 * mac80211 to clean up it own data.
1511 */
1512 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
1513 tx_fifo_id);
1514 spin_unlock_irqrestore(&priv->lock, flags);
1515
1516 ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid);
1517
1518 return 0;
1519}
1520EXPORT_SYMBOL(iwl_tx_agg_stop);
1521
1522int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id)
1523{
1524 struct iwl_queue *q = &priv->txq[txq_id].q;
1525 u8 *addr = priv->stations[sta_id].sta.sta.addr;
1526 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
1527
1528 switch (priv->stations[sta_id].tid[tid].agg.state) {
1529 case IWL_EMPTYING_HW_QUEUE_DELBA:
1530 /* We are reclaiming the last packet of the */
1531 /* aggregated HW queue */
1532 if ((txq_id == tid_data->agg.txq_id) &&
1533 (q->read_ptr == q->write_ptr)) {
1534 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
1535 int tx_fifo = get_fifo_from_tid(tid);
1536 IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
1537 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
1538 ssn, tx_fifo);
1539 tid_data->agg.state = IWL_AGG_OFF;
1540 ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, addr, tid);
1541 }
1542 break;
1543 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1544 /* We are reclaiming the last packet of the queue */
1545 if (tid_data->tfds_in_queue == 0) {
1546 IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n");
1547 tid_data->agg.state = IWL_AGG_ON;
1548 ieee80211_start_tx_ba_cb_irqsafe(priv->vif, addr, tid);
1549 }
1550 break;
1551 }
1552 return 0;
1553}
1554EXPORT_SYMBOL(iwl_txq_check_empty);
1555
1556/**
1557 * iwl_tx_status_reply_compressed_ba - Update tx status from block-ack
1558 *
1559 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
1560 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
1561 */
1562static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1563 struct iwl_ht_agg *agg,
1564 struct iwl_compressed_ba_resp *ba_resp)
1565
1566{
1567 int i, sh, ack;
1568 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
1569 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1570 u64 bitmap;
1571 int successes = 0;
1572 struct ieee80211_tx_info *info;
1573
1574 if (unlikely(!agg->wait_for_ba)) {
1575 IWL_ERR(priv, "Received BA when not expected\n");
1576 return -EINVAL;
1577 }
1578
1579 /* Mark that the expected block-ack response arrived */
1580 agg->wait_for_ba = 0;
1581 IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
1582
1583 /* Calculate shift to align block-ack bits with our Tx window bits */
1584 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
1585 if (sh < 0) /* tbw something is wrong with indices */
1586 sh += 0x100;
1587
1588 /* don't use 64-bit values for now */
1589 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
1590
1591 if (agg->frame_count > (64 - sh)) {
1592 IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
1593 return -1;
1594 }
1595
1596 /* check for success or failure according to the
1597 * transmitted bitmap and block-ack bitmap */
1598 bitmap &= agg->bitmap;
1599
1600 /* For each frame attempted in aggregation,
1601 * update driver's record of tx frame's status. */
1602 for (i = 0; i < agg->frame_count ; i++) {
1603 ack = bitmap & (1ULL << i);
1604 successes += !!ack;
1605 IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
1606 ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff,
1607 agg->start_idx + i);
1608 }
1609
1610 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]);
1611 memset(&info->status, 0, sizeof(info->status));
1612 info->flags |= IEEE80211_TX_STAT_ACK;
1613 info->flags |= IEEE80211_TX_STAT_AMPDU;
1614 info->status.ampdu_ack_map = successes;
1615 info->status.ampdu_ack_len = agg->frame_count;
1616 iwl_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
1617
1618 IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap);
1619
1620 return 0;
1621}
1622
1623/**
1624 * iwl_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
1625 *
1626 * Handles block-acknowledge notification from device, which reports success
1627 * of frames sent via aggregation.
1628 */
1629void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
1630 struct iwl_rx_mem_buffer *rxb)
1631{
1632 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1633 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
1634 struct iwl_tx_queue *txq = NULL;
1635 struct iwl_ht_agg *agg;
1636 int index;
1637 int sta_id;
1638 int tid;
1639
1640 /* "flow" corresponds to Tx queue */
1641 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1642
1643 /* "ssn" is start of block-ack Tx window, corresponds to index
1644 * (in Tx queue's circular buffer) of first TFD/frame in window */
1645 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
1646
1647 if (scd_flow >= priv->hw_params.max_txq_num) {
1648 IWL_ERR(priv,
1649 "BUG_ON scd_flow is bigger than number of queues\n");
1650 return;
1651 }
1652
1653 txq = &priv->txq[scd_flow];
1654 sta_id = ba_resp->sta_id;
1655 tid = ba_resp->tid;
1656 agg = &priv->stations[sta_id].tid[tid].agg;
1657
1658 /* Find index just before block-ack window */
1659 index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
1660
1661 /* TODO: Need to get this copy more safely - now good for debug */
1662
1663 IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
1664 "sta_id = %d\n",
1665 agg->wait_for_ba,
1666 (u8 *) &ba_resp->sta_addr_lo32,
1667 ba_resp->sta_id);
1668 IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
1669 "%d, scd_ssn = %d\n",
1670 ba_resp->tid,
1671 ba_resp->seq_ctl,
1672 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
1673 ba_resp->scd_flow,
1674 ba_resp->scd_ssn);
1675 IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx \n",
1676 agg->start_idx,
1677 (unsigned long long)agg->bitmap);
1678
1679 /* Update driver's record of ACK vs. not for each frame in window */
1680 iwl_tx_status_reply_compressed_ba(priv, agg, ba_resp);
1681
1682 /* Release all TFDs before the SSN, i.e. all TFDs in front of
1683 * block-ack window (we assume that they've been successfully
1684 * transmitted ... if not, it's too late anyway). */
1685 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
1686 /* calculate mac80211 ampdu sw queue to wake */
1687 int freed = iwl_tx_queue_reclaim(priv, scd_flow, index);
1688 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
1689
1690 if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
1691 priv->mac80211_registered &&
1692 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
1693 iwl_wake_queue(priv, txq->swq_id);
1694
1695 iwl_txq_check_empty(priv, sta_id, tid, scd_flow);
1696 }
1697}
1698EXPORT_SYMBOL(iwl_rx_reply_compressed_ba);
1699
1700#ifdef CONFIG_IWLWIFI_DEBUG 652#ifdef CONFIG_IWLWIFI_DEBUG
1701#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x 653#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
654#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
1702 655
1703const char *iwl_get_tx_fail_reason(u32 status) 656const char *iwl_get_tx_fail_reason(u32 status)
1704{ 657{
1705 switch (status & TX_STATUS_MSK) { 658 switch (status & TX_STATUS_MSK) {
1706 case TX_STATUS_SUCCESS: 659 case TX_STATUS_SUCCESS:
1707 return "SUCCESS"; 660 return "SUCCESS";
1708 TX_STATUS_ENTRY(SHORT_LIMIT); 661 TX_STATUS_POSTPONE(DELAY);
1709 TX_STATUS_ENTRY(LONG_LIMIT); 662 TX_STATUS_POSTPONE(FEW_BYTES);
1710 TX_STATUS_ENTRY(FIFO_UNDERRUN); 663 TX_STATUS_POSTPONE(BT_PRIO);
1711 TX_STATUS_ENTRY(MGMNT_ABORT); 664 TX_STATUS_POSTPONE(QUIET_PERIOD);
1712 TX_STATUS_ENTRY(NEXT_FRAG); 665 TX_STATUS_POSTPONE(CALC_TTAK);
1713 TX_STATUS_ENTRY(LIFE_EXPIRE); 666 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
1714 TX_STATUS_ENTRY(DEST_PS); 667 TX_STATUS_FAIL(SHORT_LIMIT);
1715 TX_STATUS_ENTRY(ABORTED); 668 TX_STATUS_FAIL(LONG_LIMIT);
1716 TX_STATUS_ENTRY(BT_RETRY); 669 TX_STATUS_FAIL(FIFO_UNDERRUN);
1717 TX_STATUS_ENTRY(STA_INVALID); 670 TX_STATUS_FAIL(DRAIN_FLOW);
1718 TX_STATUS_ENTRY(FRAG_DROPPED); 671 TX_STATUS_FAIL(RFKILL_FLUSH);
1719 TX_STATUS_ENTRY(TID_DISABLE); 672 TX_STATUS_FAIL(LIFE_EXPIRE);
1720 TX_STATUS_ENTRY(FRAME_FLUSHED); 673 TX_STATUS_FAIL(DEST_PS);
1721 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL); 674 TX_STATUS_FAIL(HOST_ABORTED);
1722 TX_STATUS_ENTRY(TX_LOCKED); 675 TX_STATUS_FAIL(BT_RETRY);
1723 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR); 676 TX_STATUS_FAIL(STA_INVALID);
677 TX_STATUS_FAIL(FRAG_DROPPED);
678 TX_STATUS_FAIL(TID_DISABLE);
679 TX_STATUS_FAIL(FIFO_FLUSHED);
680 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
681 TX_STATUS_FAIL(FW_DROP);
682 TX_STATUS_FAIL(STA_COLOR_MISMATCH_DROP);
1724 } 683 }
1725 684
1726 return "UNKNOWN"; 685 return "UNKNOWN";