diff options
author | Felix Fietkau <nbd@openwrt.org> | 2014-06-11 06:47:51 -0400 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2014-06-19 15:49:16 -0400 |
commit | 0453531e2eae61c5c0a2af7b67cdafd19c0dce68 (patch) | |
tree | 47e1997836b90775ce3ccafa6c2e36d0cac18827 /drivers/net/wireless/ath/ath9k/xmit.c | |
parent | bc7e1be70c9f1c6de622aa14baa62003342034bb (diff) |
ath9k: Move acq to channel context
Add support to maintain per-channel ACs list.
Signed-off-by: Felix Fietkau <nbd@openwrt.org>
Signed-off-by: Rajkumar Manoharan <rmanohar@qti.qualcomm.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/ath/ath9k/xmit.c')
-rw-r--r-- | drivers/net/wireless/ath/ath9k/xmit.c | 58 |
1 files changed, 43 insertions, 15 deletions
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 66acb2cbd9df..b2e66d21af1c 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c | |||
@@ -103,9 +103,16 @@ void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq) | |||
103 | ieee80211_tx_status(sc->hw, skb); | 103 | ieee80211_tx_status(sc->hw, skb); |
104 | } | 104 | } |
105 | 105 | ||
106 | static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid) | 106 | static void ath_tx_queue_tid(struct ath_softc *sc, struct ath_txq *txq, |
107 | struct ath_atx_tid *tid) | ||
107 | { | 108 | { |
108 | struct ath_atx_ac *ac = tid->ac; | 109 | struct ath_atx_ac *ac = tid->ac; |
110 | struct list_head *list; | ||
111 | struct ath_vif *avp = (struct ath_vif *) tid->an->vif->drv_priv; | ||
112 | struct ath_chanctx *ctx = avp->chanctx; | ||
113 | |||
114 | if (!ctx) | ||
115 | return; | ||
109 | 116 | ||
110 | if (tid->sched) | 117 | if (tid->sched) |
111 | return; | 118 | return; |
@@ -117,7 +124,9 @@ static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid) | |||
117 | return; | 124 | return; |
118 | 125 | ||
119 | ac->sched = true; | 126 | ac->sched = true; |
120 | list_add_tail(&ac->list, &txq->axq_acq); | 127 | |
128 | list = &ctx->acq[TID_TO_WME_AC(tid->tidno)]; | ||
129 | list_add_tail(&ac->list, list); | ||
121 | } | 130 | } |
122 | 131 | ||
123 | static struct ath_frame_info *get_frame_info(struct sk_buff *skb) | 132 | static struct ath_frame_info *get_frame_info(struct sk_buff *skb) |
@@ -626,7 +635,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, | |||
626 | 635 | ||
627 | skb_queue_splice_tail(&bf_pending, &tid->retry_q); | 636 | skb_queue_splice_tail(&bf_pending, &tid->retry_q); |
628 | if (!an->sleeping) { | 637 | if (!an->sleeping) { |
629 | ath_tx_queue_tid(txq, tid); | 638 | ath_tx_queue_tid(sc, txq, tid); |
630 | 639 | ||
631 | if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY)) | 640 | if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY)) |
632 | tid->ac->clear_ps_filter = true; | 641 | tid->ac->clear_ps_filter = true; |
@@ -1483,7 +1492,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an) | |||
1483 | ac->clear_ps_filter = true; | 1492 | ac->clear_ps_filter = true; |
1484 | 1493 | ||
1485 | if (ath_tid_has_buffered(tid)) { | 1494 | if (ath_tid_has_buffered(tid)) { |
1486 | ath_tx_queue_tid(txq, tid); | 1495 | ath_tx_queue_tid(sc, txq, tid); |
1487 | ath_txq_schedule(sc, txq); | 1496 | ath_txq_schedule(sc, txq); |
1488 | } | 1497 | } |
1489 | 1498 | ||
@@ -1507,7 +1516,7 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, | |||
1507 | tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor; | 1516 | tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor; |
1508 | 1517 | ||
1509 | if (ath_tid_has_buffered(tid)) { | 1518 | if (ath_tid_has_buffered(tid)) { |
1510 | ath_tx_queue_tid(txq, tid); | 1519 | ath_tx_queue_tid(sc, txq, tid); |
1511 | ath_txq_schedule(sc, txq); | 1520 | ath_txq_schedule(sc, txq); |
1512 | } | 1521 | } |
1513 | 1522 | ||
@@ -1642,7 +1651,6 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) | |||
1642 | txq->axq_link = NULL; | 1651 | txq->axq_link = NULL; |
1643 | __skb_queue_head_init(&txq->complete_q); | 1652 | __skb_queue_head_init(&txq->complete_q); |
1644 | INIT_LIST_HEAD(&txq->axq_q); | 1653 | INIT_LIST_HEAD(&txq->axq_q); |
1645 | INIT_LIST_HEAD(&txq->axq_acq); | ||
1646 | spin_lock_init(&txq->axq_lock); | 1654 | spin_lock_init(&txq->axq_lock); |
1647 | txq->axq_depth = 0; | 1655 | txq->axq_depth = 0; |
1648 | txq->axq_ampdu_depth = 0; | 1656 | txq->axq_ampdu_depth = 0; |
@@ -1804,7 +1812,7 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) | |||
1804 | sc->tx.txqsetup &= ~(1<<txq->axq_qnum); | 1812 | sc->tx.txqsetup &= ~(1<<txq->axq_qnum); |
1805 | } | 1813 | } |
1806 | 1814 | ||
1807 | /* For each axq_acq entry, for each tid, try to schedule packets | 1815 | /* For each acq entry, for each tid, try to schedule packets |
1808 | * for transmit until ampdu_depth has reached min Q depth. | 1816 | * for transmit until ampdu_depth has reached min Q depth. |
1809 | */ | 1817 | */ |
1810 | void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) | 1818 | void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) |
@@ -1812,19 +1820,25 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) | |||
1812 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); | 1820 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); |
1813 | struct ath_atx_ac *ac, *last_ac; | 1821 | struct ath_atx_ac *ac, *last_ac; |
1814 | struct ath_atx_tid *tid, *last_tid; | 1822 | struct ath_atx_tid *tid, *last_tid; |
1823 | struct list_head *ac_list; | ||
1815 | bool sent = false; | 1824 | bool sent = false; |
1816 | 1825 | ||
1826 | if (txq->mac80211_qnum < 0) | ||
1827 | return; | ||
1828 | |||
1829 | ac_list = &sc->cur_chan->acq[txq->mac80211_qnum]; | ||
1830 | |||
1817 | if (test_bit(ATH_OP_HW_RESET, &common->op_flags) || | 1831 | if (test_bit(ATH_OP_HW_RESET, &common->op_flags) || |
1818 | list_empty(&txq->axq_acq)) | 1832 | list_empty(ac_list)) |
1819 | return; | 1833 | return; |
1820 | 1834 | ||
1821 | rcu_read_lock(); | 1835 | rcu_read_lock(); |
1822 | 1836 | ||
1823 | last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list); | 1837 | last_ac = list_entry(ac_list->prev, struct ath_atx_ac, list); |
1824 | while (!list_empty(&txq->axq_acq)) { | 1838 | while (!list_empty(ac_list)) { |
1825 | bool stop = false; | 1839 | bool stop = false; |
1826 | 1840 | ||
1827 | ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list); | 1841 | ac = list_first_entry(ac_list, struct ath_atx_ac, list); |
1828 | last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list); | 1842 | last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list); |
1829 | list_del(&ac->list); | 1843 | list_del(&ac->list); |
1830 | ac->sched = false; | 1844 | ac->sched = false; |
@@ -1844,7 +1858,7 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) | |||
1844 | * are pending for the tid | 1858 | * are pending for the tid |
1845 | */ | 1859 | */ |
1846 | if (ath_tid_has_buffered(tid)) | 1860 | if (ath_tid_has_buffered(tid)) |
1847 | ath_tx_queue_tid(txq, tid); | 1861 | ath_tx_queue_tid(sc, txq, tid); |
1848 | 1862 | ||
1849 | if (stop || tid == last_tid) | 1863 | if (stop || tid == last_tid) |
1850 | break; | 1864 | break; |
@@ -1852,7 +1866,7 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) | |||
1852 | 1866 | ||
1853 | if (!list_empty(&ac->tid_q) && !ac->sched) { | 1867 | if (!list_empty(&ac->tid_q) && !ac->sched) { |
1854 | ac->sched = true; | 1868 | ac->sched = true; |
1855 | list_add_tail(&ac->list, &txq->axq_acq); | 1869 | list_add_tail(&ac->list, ac_list); |
1856 | } | 1870 | } |
1857 | 1871 | ||
1858 | if (stop) | 1872 | if (stop) |
@@ -1863,7 +1877,7 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) | |||
1863 | break; | 1877 | break; |
1864 | 1878 | ||
1865 | sent = false; | 1879 | sent = false; |
1866 | last_ac = list_entry(txq->axq_acq.prev, | 1880 | last_ac = list_entry(ac_list->prev, |
1867 | struct ath_atx_ac, list); | 1881 | struct ath_atx_ac, list); |
1868 | } | 1882 | } |
1869 | } | 1883 | } |
@@ -1871,6 +1885,20 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) | |||
1871 | rcu_read_unlock(); | 1885 | rcu_read_unlock(); |
1872 | } | 1886 | } |
1873 | 1887 | ||
1888 | void ath_txq_schedule_all(struct ath_softc *sc) | ||
1889 | { | ||
1890 | struct ath_txq *txq; | ||
1891 | int i; | ||
1892 | |||
1893 | for (i = 0; i < IEEE80211_NUM_ACS; i++) { | ||
1894 | txq = sc->tx.txq_map[i]; | ||
1895 | |||
1896 | spin_lock_bh(&txq->axq_lock); | ||
1897 | ath_txq_schedule(sc, txq); | ||
1898 | spin_unlock_bh(&txq->axq_lock); | ||
1899 | } | ||
1900 | } | ||
1901 | |||
1874 | /***********/ | 1902 | /***********/ |
1875 | /* TX, DMA */ | 1903 | /* TX, DMA */ |
1876 | /***********/ | 1904 | /***********/ |
@@ -2198,7 +2226,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
2198 | TX_STAT_INC(txq->axq_qnum, a_queued_sw); | 2226 | TX_STAT_INC(txq->axq_qnum, a_queued_sw); |
2199 | __skb_queue_tail(&tid->buf_q, skb); | 2227 | __skb_queue_tail(&tid->buf_q, skb); |
2200 | if (!txctl->an->sleeping) | 2228 | if (!txctl->an->sleeping) |
2201 | ath_tx_queue_tid(txq, tid); | 2229 | ath_tx_queue_tid(sc, txq, tid); |
2202 | 2230 | ||
2203 | ath_txq_schedule(sc, txq); | 2231 | ath_txq_schedule(sc, txq); |
2204 | goto out; | 2232 | goto out; |