diff options
author | John W. Linville <linville@tuxdriver.com> | 2011-05-16 14:55:42 -0400 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2011-05-16 19:32:19 -0400 |
commit | e00cf3b9eb7839b952e434a75bff6b99e47337ac (patch) | |
tree | ef583ab8ac09bf703026650d4bc7777e6a3864d3 /drivers/net/wireless/iwlwifi/iwl-agn-tx.c | |
parent | 1a8218e96271790a07dd7065a2ef173e0f67e328 (diff) | |
parent | 3b8ab88acaceb505aa06ef3bbf3a73b92470ae78 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next-2.6 into for-davem
Conflicts:
drivers/net/wireless/iwlwifi/iwl-agn-tx.c
net/mac80211/sta_info.h
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-agn-tx.c')
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-agn-tx.c | 105 |
1 files changed, 56 insertions, 49 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c index 4afae1446582..342de780a366 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c | |||
@@ -98,9 +98,9 @@ static inline int get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid) | |||
98 | /** | 98 | /** |
99 | * iwlagn_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array | 99 | * iwlagn_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array |
100 | */ | 100 | */ |
101 | void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv, | 101 | static void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv, |
102 | struct iwl_tx_queue *txq, | 102 | struct iwl_tx_queue *txq, |
103 | u16 byte_cnt) | 103 | u16 byte_cnt) |
104 | { | 104 | { |
105 | struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; | 105 | struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; |
106 | int write_ptr = txq->q.write_ptr; | 106 | int write_ptr = txq->q.write_ptr; |
@@ -112,21 +112,19 @@ void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv, | |||
112 | 112 | ||
113 | WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX); | 113 | WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX); |
114 | 114 | ||
115 | if (txq_id != priv->cmd_queue) { | 115 | sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id; |
116 | sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id; | 116 | sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl; |
117 | sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl; | 117 | |
118 | 118 | switch (sec_ctl & TX_CMD_SEC_MSK) { | |
119 | switch (sec_ctl & TX_CMD_SEC_MSK) { | 119 | case TX_CMD_SEC_CCM: |
120 | case TX_CMD_SEC_CCM: | 120 | len += CCMP_MIC_LEN; |
121 | len += CCMP_MIC_LEN; | 121 | break; |
122 | break; | 122 | case TX_CMD_SEC_TKIP: |
123 | case TX_CMD_SEC_TKIP: | 123 | len += TKIP_ICV_LEN; |
124 | len += TKIP_ICV_LEN; | 124 | break; |
125 | break; | 125 | case TX_CMD_SEC_WEP: |
126 | case TX_CMD_SEC_WEP: | 126 | len += WEP_IV_LEN + WEP_ICV_LEN; |
127 | len += WEP_IV_LEN + WEP_ICV_LEN; | 127 | break; |
128 | break; | ||
129 | } | ||
130 | } | 128 | } |
131 | 129 | ||
132 | bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12)); | 130 | bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12)); |
@@ -138,8 +136,8 @@ void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv, | |||
138 | tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; | 136 | tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; |
139 | } | 137 | } |
140 | 138 | ||
141 | void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv, | 139 | static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv, |
142 | struct iwl_tx_queue *txq) | 140 | struct iwl_tx_queue *txq) |
143 | { | 141 | { |
144 | struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; | 142 | struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; |
145 | int txq_id = txq->q.id; | 143 | int txq_id = txq->q.id; |
@@ -539,7 +537,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
539 | struct iwl_tx_cmd *tx_cmd; | 537 | struct iwl_tx_cmd *tx_cmd; |
540 | struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; | 538 | struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; |
541 | int txq_id; | 539 | int txq_id; |
542 | dma_addr_t phys_addr; | 540 | dma_addr_t phys_addr = 0; |
543 | dma_addr_t txcmd_phys; | 541 | dma_addr_t txcmd_phys; |
544 | dma_addr_t scratch_phys; | 542 | dma_addr_t scratch_phys; |
545 | u16 len, firstlen, secondlen; | 543 | u16 len, firstlen, secondlen; |
@@ -566,7 +564,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
566 | spin_lock_irqsave(&priv->lock, flags); | 564 | spin_lock_irqsave(&priv->lock, flags); |
567 | if (iwl_is_rfkill(priv)) { | 565 | if (iwl_is_rfkill(priv)) { |
568 | IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n"); | 566 | IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n"); |
569 | goto drop_unlock; | 567 | goto drop_unlock_priv; |
570 | } | 568 | } |
571 | 569 | ||
572 | fc = hdr->frame_control; | 570 | fc = hdr->frame_control; |
@@ -591,7 +589,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
591 | if (sta_id == IWL_INVALID_STATION) { | 589 | if (sta_id == IWL_INVALID_STATION) { |
592 | IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", | 590 | IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", |
593 | hdr->addr1); | 591 | hdr->addr1); |
594 | goto drop_unlock; | 592 | goto drop_unlock_priv; |
595 | } | 593 | } |
596 | } | 594 | } |
597 | 595 | ||
@@ -635,10 +633,10 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
635 | if (ieee80211_is_data_qos(fc)) { | 633 | if (ieee80211_is_data_qos(fc)) { |
636 | qc = ieee80211_get_qos_ctl(hdr); | 634 | qc = ieee80211_get_qos_ctl(hdr); |
637 | tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; | 635 | tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; |
638 | if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) { | 636 | |
639 | spin_unlock(&priv->sta_lock); | 637 | if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) |
640 | goto drop_unlock; | 638 | goto drop_unlock_sta; |
641 | } | 639 | |
642 | seq_number = priv->stations[sta_id].tid[tid].seq_number; | 640 | seq_number = priv->stations[sta_id].tid[tid].seq_number; |
643 | seq_number &= IEEE80211_SCTL_SEQ; | 641 | seq_number &= IEEE80211_SCTL_SEQ; |
644 | hdr->seq_ctrl = hdr->seq_ctrl & | 642 | hdr->seq_ctrl = hdr->seq_ctrl & |
@@ -656,18 +654,8 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
656 | txq = &priv->txq[txq_id]; | 654 | txq = &priv->txq[txq_id]; |
657 | q = &txq->q; | 655 | q = &txq->q; |
658 | 656 | ||
659 | if (unlikely(iwl_queue_space(q) < q->high_mark)) { | 657 | if (unlikely(iwl_queue_space(q) < q->high_mark)) |
660 | spin_unlock(&priv->sta_lock); | 658 | goto drop_unlock_sta; |
661 | goto drop_unlock; | ||
662 | } | ||
663 | |||
664 | if (ieee80211_is_data_qos(fc)) { | ||
665 | priv->stations[sta_id].tid[tid].tfds_in_queue++; | ||
666 | if (!ieee80211_has_morefrags(fc)) | ||
667 | priv->stations[sta_id].tid[tid].seq_number = seq_number; | ||
668 | } | ||
669 | |||
670 | spin_unlock(&priv->sta_lock); | ||
671 | 659 | ||
672 | /* Set up driver data for this TFD */ | 660 | /* Set up driver data for this TFD */ |
673 | memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); | 661 | memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); |
@@ -731,12 +719,10 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
731 | txcmd_phys = pci_map_single(priv->pci_dev, | 719 | txcmd_phys = pci_map_single(priv->pci_dev, |
732 | &out_cmd->hdr, firstlen, | 720 | &out_cmd->hdr, firstlen, |
733 | PCI_DMA_BIDIRECTIONAL); | 721 | PCI_DMA_BIDIRECTIONAL); |
722 | if (unlikely(pci_dma_mapping_error(priv->pci_dev, txcmd_phys))) | ||
723 | goto drop_unlock_sta; | ||
734 | dma_unmap_addr_set(out_meta, mapping, txcmd_phys); | 724 | dma_unmap_addr_set(out_meta, mapping, txcmd_phys); |
735 | dma_unmap_len_set(out_meta, len, firstlen); | 725 | dma_unmap_len_set(out_meta, len, firstlen); |
736 | /* Add buffer containing Tx command and MAC(!) header to TFD's | ||
737 | * first entry */ | ||
738 | priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, | ||
739 | txcmd_phys, firstlen, 1, 0); | ||
740 | 726 | ||
741 | if (!ieee80211_has_morefrags(hdr->frame_control)) { | 727 | if (!ieee80211_has_morefrags(hdr->frame_control)) { |
742 | txq->need_update = 1; | 728 | txq->need_update = 1; |
@@ -751,10 +737,30 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
751 | if (secondlen > 0) { | 737 | if (secondlen > 0) { |
752 | phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, | 738 | phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, |
753 | secondlen, PCI_DMA_TODEVICE); | 739 | secondlen, PCI_DMA_TODEVICE); |
740 | if (unlikely(pci_dma_mapping_error(priv->pci_dev, phys_addr))) { | ||
741 | pci_unmap_single(priv->pci_dev, | ||
742 | dma_unmap_addr(out_meta, mapping), | ||
743 | dma_unmap_len(out_meta, len), | ||
744 | PCI_DMA_BIDIRECTIONAL); | ||
745 | goto drop_unlock_sta; | ||
746 | } | ||
747 | } | ||
748 | |||
749 | if (ieee80211_is_data_qos(fc)) { | ||
750 | priv->stations[sta_id].tid[tid].tfds_in_queue++; | ||
751 | if (!ieee80211_has_morefrags(fc)) | ||
752 | priv->stations[sta_id].tid[tid].seq_number = seq_number; | ||
753 | } | ||
754 | |||
755 | spin_unlock(&priv->sta_lock); | ||
756 | |||
757 | /* Attach buffers to TFD */ | ||
758 | priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, | ||
759 | txcmd_phys, firstlen, 1, 0); | ||
760 | if (secondlen > 0) | ||
754 | priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, | 761 | priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, |
755 | phys_addr, secondlen, | 762 | phys_addr, secondlen, |
756 | 0, 0); | 763 | 0, 0); |
757 | } | ||
758 | 764 | ||
759 | scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + | 765 | scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + |
760 | offsetof(struct iwl_tx_cmd, scratch); | 766 | offsetof(struct iwl_tx_cmd, scratch); |
@@ -773,8 +779,8 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
773 | 779 | ||
774 | /* Set up entry for this TFD in Tx byte-count array */ | 780 | /* Set up entry for this TFD in Tx byte-count array */ |
775 | if (info->flags & IEEE80211_TX_CTL_AMPDU) | 781 | if (info->flags & IEEE80211_TX_CTL_AMPDU) |
776 | priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, | 782 | iwlagn_txq_update_byte_cnt_tbl(priv, txq, |
777 | le16_to_cpu(tx_cmd->len)); | 783 | le16_to_cpu(tx_cmd->len)); |
778 | 784 | ||
779 | pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys, | 785 | pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys, |
780 | firstlen, PCI_DMA_BIDIRECTIONAL); | 786 | firstlen, PCI_DMA_BIDIRECTIONAL); |
@@ -820,7 +826,9 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | |||
820 | 826 | ||
821 | return 0; | 827 | return 0; |
822 | 828 | ||
823 | drop_unlock: | 829 | drop_unlock_sta: |
830 | spin_unlock(&priv->sta_lock); | ||
831 | drop_unlock_priv: | ||
824 | spin_unlock_irqrestore(&priv->lock, flags); | 832 | spin_unlock_irqrestore(&priv->lock, flags); |
825 | return -1; | 833 | return -1; |
826 | } | 834 | } |
@@ -1253,8 +1261,7 @@ int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) | |||
1253 | txq_id >= IWLAGN_FIRST_AMPDU_QUEUE); | 1261 | txq_id >= IWLAGN_FIRST_AMPDU_QUEUE); |
1254 | tx_info->skb = NULL; | 1262 | tx_info->skb = NULL; |
1255 | 1263 | ||
1256 | if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl) | 1264 | iwlagn_txq_inval_byte_cnt_tbl(priv, txq); |
1257 | priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq); | ||
1258 | 1265 | ||
1259 | priv->cfg->ops->lib->txq_free_tfd(priv, txq); | 1266 | priv->cfg->ops->lib->txq_free_tfd(priv, txq); |
1260 | } | 1267 | } |