diff options
Diffstat (limited to 'drivers/net/wireless/ath/ath9k/xmit.c')
-rw-r--r-- | drivers/net/wireless/ath/ath9k/xmit.c | 60 |
1 files changed, 29 insertions, 31 deletions
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 859aa4ab0769..7547c8f9a584 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c | |||
@@ -984,32 +984,6 @@ int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype) | |||
984 | return qnum; | 984 | return qnum; |
985 | } | 985 | } |
986 | 986 | ||
987 | struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb) | ||
988 | { | ||
989 | struct ath_txq *txq = NULL; | ||
990 | u16 skb_queue = skb_get_queue_mapping(skb); | ||
991 | int qnum; | ||
992 | |||
993 | qnum = ath_get_hal_qnum(skb_queue, sc); | ||
994 | txq = &sc->tx.txq[qnum]; | ||
995 | |||
996 | spin_lock_bh(&txq->axq_lock); | ||
997 | |||
998 | if (txq->axq_depth >= (ATH_TXBUF - 20)) { | ||
999 | ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_XMIT, | ||
1000 | "TX queue: %d is full, depth: %d\n", | ||
1001 | qnum, txq->axq_depth); | ||
1002 | ath_mac80211_stop_queue(sc, skb_queue); | ||
1003 | txq->stopped = 1; | ||
1004 | spin_unlock_bh(&txq->axq_lock); | ||
1005 | return NULL; | ||
1006 | } | ||
1007 | |||
1008 | spin_unlock_bh(&txq->axq_lock); | ||
1009 | |||
1010 | return txq; | ||
1011 | } | ||
1012 | |||
1013 | int ath_txq_update(struct ath_softc *sc, int qnum, | 987 | int ath_txq_update(struct ath_softc *sc, int qnum, |
1014 | struct ath9k_tx_queue_info *qinfo) | 988 | struct ath9k_tx_queue_info *qinfo) |
1015 | { | 989 | { |
@@ -1809,6 +1783,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
1809 | struct ath_wiphy *aphy = hw->priv; | 1783 | struct ath_wiphy *aphy = hw->priv; |
1810 | struct ath_softc *sc = aphy->sc; | 1784 | struct ath_softc *sc = aphy->sc; |
1811 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); | 1785 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); |
1786 | struct ath_txq *txq = txctl->txq; | ||
1812 | struct ath_buf *bf; | 1787 | struct ath_buf *bf; |
1813 | int r; | 1788 | int r; |
1814 | 1789 | ||
@@ -1818,10 +1793,16 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
1818 | return -1; | 1793 | return -1; |
1819 | } | 1794 | } |
1820 | 1795 | ||
1796 | bf->txq = txctl->txq; | ||
1797 | spin_lock_bh(&bf->txq->axq_lock); | ||
1798 | if (++bf->txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) { | ||
1799 | ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb)); | ||
1800 | txq->stopped = 1; | ||
1801 | } | ||
1802 | spin_unlock_bh(&bf->txq->axq_lock); | ||
1803 | |||
1821 | r = ath_tx_setup_buffer(hw, bf, skb, txctl); | 1804 | r = ath_tx_setup_buffer(hw, bf, skb, txctl); |
1822 | if (unlikely(r)) { | 1805 | if (unlikely(r)) { |
1823 | struct ath_txq *txq = txctl->txq; | ||
1824 | |||
1825 | ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n"); | 1806 | ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n"); |
1826 | 1807 | ||
1827 | /* upon ath_tx_processq() this TX queue will be resumed, we | 1808 | /* upon ath_tx_processq() this TX queue will be resumed, we |
@@ -1829,7 +1810,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
1829 | * we will at least have to run TX completionon one buffer | 1810 | * we will at least have to run TX completionon one buffer |
1830 | * on the queue */ | 1811 | * on the queue */ |
1831 | spin_lock_bh(&txq->axq_lock); | 1812 | spin_lock_bh(&txq->axq_lock); |
1832 | if (sc->tx.txq[txq->axq_qnum].axq_depth > 1) { | 1813 | if (!txq->stopped && txq->axq_depth > 1) { |
1833 | ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb)); | 1814 | ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb)); |
1834 | txq->stopped = 1; | 1815 | txq->stopped = 1; |
1835 | } | 1816 | } |
@@ -1970,6 +1951,13 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, | |||
1970 | tx_flags |= ATH_TX_XRETRY; | 1951 | tx_flags |= ATH_TX_XRETRY; |
1971 | } | 1952 | } |
1972 | 1953 | ||
1954 | if (bf->txq) { | ||
1955 | spin_lock_bh(&bf->txq->axq_lock); | ||
1956 | bf->txq->pending_frames--; | ||
1957 | spin_unlock_bh(&bf->txq->axq_lock); | ||
1958 | bf->txq = NULL; | ||
1959 | } | ||
1960 | |||
1973 | dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE); | 1961 | dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE); |
1974 | ath_tx_complete(sc, skb, bf->aphy, tx_flags); | 1962 | ath_tx_complete(sc, skb, bf->aphy, tx_flags); |
1975 | ath_debug_stat_tx(sc, txq, bf, ts); | 1963 | ath_debug_stat_tx(sc, txq, bf, ts); |
@@ -2058,8 +2046,7 @@ static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq) | |||
2058 | int qnum; | 2046 | int qnum; |
2059 | 2047 | ||
2060 | spin_lock_bh(&txq->axq_lock); | 2048 | spin_lock_bh(&txq->axq_lock); |
2061 | if (txq->stopped && | 2049 | if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) { |
2062 | sc->tx.txq[txq->axq_qnum].axq_depth <= (ATH_TXBUF - 20)) { | ||
2063 | qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc); | 2050 | qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc); |
2064 | if (qnum != -1) { | 2051 | if (qnum != -1) { |
2065 | ath_mac80211_start_queue(sc, qnum); | 2052 | ath_mac80211_start_queue(sc, qnum); |
@@ -2279,6 +2266,17 @@ void ath_tx_edma_tasklet(struct ath_softc *sc) | |||
2279 | 2266 | ||
2280 | txok = !(txs.ts_status & ATH9K_TXERR_MASK); | 2267 | txok = !(txs.ts_status & ATH9K_TXERR_MASK); |
2281 | 2268 | ||
2269 | /* | ||
2270 | * Make sure null func frame is acked before configuring | ||
2271 | * hw into ps mode. | ||
2272 | */ | ||
2273 | if (bf->bf_isnullfunc && txok) { | ||
2274 | if ((sc->ps_flags & PS_ENABLED)) | ||
2275 | ath9k_enable_ps(sc); | ||
2276 | else | ||
2277 | sc->ps_flags |= PS_NULLFUNC_COMPLETED; | ||
2278 | } | ||
2279 | |||
2282 | if (!bf_isampdu(bf)) { | 2280 | if (!bf_isampdu(bf)) { |
2283 | bf->bf_retries = txs.ts_longretry; | 2281 | bf->bf_retries = txs.ts_longretry; |
2284 | if (txs.ts_status & ATH9K_TXERR_XRETRY) | 2282 | if (txs.ts_status & ATH9K_TXERR_XRETRY) |