aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFelix Fietkau <nbd@openwrt.org>2011-08-27 18:32:25 -0400
committerJohn W. Linville <linville@tuxdriver.com>2011-08-30 15:38:53 -0400
commit44f1d26c16d5806f23f8d7ce5b85362a1897fbef (patch)
treeea36e26cd67be99ac706a48d0a22c6840e91e761
parentfa05f87ad4213a3e99bea6f5e73611dc27b4304a (diff)
ath9k: defer ath_tx_setup_buffer setup to the first tx attempt during aggr
With sequence number and buffer allocation deferred to when they're needed for the first time, it becomes much easier to start dropping packets from the tid queue if necessary, e.g. when latency suddenly increases. This can lead to some future improvements in buffer management for better latency. Signed-off-by: Felix Fietkau <nbd@openwrt.org> Signed-off-by: John W. Linville <linville@tuxdriver.com>
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c105
1 files changed, 66 insertions, 39 deletions
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index bd523619e7b7..68066c56e4e5 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -48,8 +48,9 @@ static u16 bits_per_symbol[][2] = {
48#define IS_HT_RATE(_rate) ((_rate) & 0x80) 48#define IS_HT_RATE(_rate) ((_rate) & 0x80)
49 49
50static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, 50static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
51 struct ath_atx_tid *tid, 51 struct ath_atx_tid *tid, struct sk_buff *skb);
52 struct list_head *bf_head); 52static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
53 int tx_flags, struct ath_txq *txq);
53static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 54static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
54 struct ath_txq *txq, struct list_head *bf_q, 55 struct ath_txq *txq, struct list_head *bf_q,
55 struct ath_tx_status *ts, int txok, int sendbar); 56 struct ath_tx_status *ts, int txok, int sendbar);
@@ -61,6 +62,10 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
61 int txok, bool update_rc); 62 int txok, bool update_rc);
62static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 63static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
63 int seqno); 64 int seqno);
65static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
66 struct ath_txq *txq,
67 struct ath_atx_tid *tid,
68 struct sk_buff *skb);
64 69
65enum { 70enum {
66 MCS_HT20, 71 MCS_HT20,
@@ -164,14 +169,13 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
164 fi = get_frame_info(skb); 169 fi = get_frame_info(skb);
165 bf = fi->bf; 170 bf = fi->bf;
166 171
167 list_add_tail(&bf->list, &bf_head);
168
169 spin_unlock_bh(&txq->axq_lock); 172 spin_unlock_bh(&txq->axq_lock);
170 if (fi->retries) { 173 if (bf && fi->retries) {
174 list_add_tail(&bf->list, &bf_head);
171 ath_tx_update_baw(sc, tid, bf->bf_state.seqno); 175 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
172 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1); 176 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
173 } else { 177 } else {
174 ath_tx_send_normal(sc, txq, NULL, &bf_head); 178 ath_tx_send_normal(sc, txq, NULL, skb);
175 } 179 }
176 spin_lock_bh(&txq->axq_lock); 180 spin_lock_bh(&txq->axq_lock);
177 } 181 }
@@ -234,6 +238,13 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
234 fi = get_frame_info(skb); 238 fi = get_frame_info(skb);
235 bf = fi->bf; 239 bf = fi->bf;
236 240
241 if (!bf) {
242 spin_unlock(&txq->axq_lock);
243 ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
244 spin_lock(&txq->axq_lock);
245 continue;
246 }
247
237 list_add_tail(&bf->list, &bf_head); 248 list_add_tail(&bf->list, &bf_head);
238 249
239 if (fi->retries) 250 if (fi->retries)
@@ -760,8 +771,14 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
760 skb = skb_peek(&tid->buf_q); 771 skb = skb_peek(&tid->buf_q);
761 fi = get_frame_info(skb); 772 fi = get_frame_info(skb);
762 bf = fi->bf; 773 bf = fi->bf;
763 seqno = bf->bf_state.seqno; 774 if (!fi->bf)
775 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
764 776
777 if (!bf)
778 continue;
779
780 bf->bf_state.bf_type |= BUF_AMPDU;
781 seqno = bf->bf_state.seqno;
765 if (!bf_first) 782 if (!bf_first)
766 bf_first = bf; 783 bf_first = bf;
767 784
@@ -1434,13 +1451,11 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1434} 1451}
1435 1452
1436static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, 1453static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1437 struct ath_buf *bf, struct ath_tx_control *txctl) 1454 struct sk_buff *skb, struct ath_tx_control *txctl)
1438{ 1455{
1439 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu); 1456 struct ath_frame_info *fi = get_frame_info(skb);
1440 struct list_head bf_head; 1457 struct list_head bf_head;
1441 u16 seqno = bf->bf_state.seqno; 1458 struct ath_buf *bf;
1442
1443 bf->bf_state.bf_type |= BUF_AMPDU;
1444 1459
1445 /* 1460 /*
1446 * Do not queue to h/w when any of the following conditions is true: 1461 * Do not queue to h/w when any of the following conditions is true:
@@ -1450,25 +1465,29 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1450 * - h/w queue depth exceeds low water mark 1465 * - h/w queue depth exceeds low water mark
1451 */ 1466 */
1452 if (!skb_queue_empty(&tid->buf_q) || tid->paused || 1467 if (!skb_queue_empty(&tid->buf_q) || tid->paused ||
1453 !BAW_WITHIN(tid->seq_start, tid->baw_size, seqno) || 1468 !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
1454 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) { 1469 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
1455 /* 1470 /*
1456 * Add this frame to software queue for scheduling later 1471 * Add this frame to software queue for scheduling later
1457 * for aggregation. 1472 * for aggregation.
1458 */ 1473 */
1459 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw); 1474 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
1460 __skb_queue_tail(&tid->buf_q, bf->bf_mpdu); 1475 __skb_queue_tail(&tid->buf_q, skb);
1461 if (!txctl->an || !txctl->an->sleeping) 1476 if (!txctl->an || !txctl->an->sleeping)
1462 ath_tx_queue_tid(txctl->txq, tid); 1477 ath_tx_queue_tid(txctl->txq, tid);
1463 return; 1478 return;
1464 } 1479 }
1465 1480
1481 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1482 if (!bf)
1483 return;
1484
1485 bf->bf_state.bf_type |= BUF_AMPDU;
1466 INIT_LIST_HEAD(&bf_head); 1486 INIT_LIST_HEAD(&bf_head);
1467 list_add(&bf->list, &bf_head); 1487 list_add(&bf->list, &bf_head);
1468 1488
1469 /* Add sub-frame to BAW */ 1489 /* Add sub-frame to BAW */
1470 if (!fi->retries) 1490 ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
1471 ath_tx_addto_baw(sc, tid, seqno);
1472 1491
1473 /* Queue to h/w without aggregation */ 1492 /* Queue to h/w without aggregation */
1474 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw); 1493 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
@@ -1478,13 +1497,21 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1478} 1497}
1479 1498
1480static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, 1499static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1481 struct ath_atx_tid *tid, 1500 struct ath_atx_tid *tid, struct sk_buff *skb)
1482 struct list_head *bf_head)
1483{ 1501{
1484 struct ath_frame_info *fi; 1502 struct ath_frame_info *fi = get_frame_info(skb);
1503 struct list_head bf_head;
1485 struct ath_buf *bf; 1504 struct ath_buf *bf;
1486 1505
1487 bf = list_first_entry(bf_head, struct ath_buf, list); 1506 bf = fi->bf;
1507 if (!bf)
1508 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
1509
1510 if (!bf)
1511 return;
1512
1513 INIT_LIST_HEAD(&bf_head);
1514 list_add_tail(&bf->list, &bf_head);
1488 bf->bf_state.bf_type &= ~BUF_AMPDU; 1515 bf->bf_state.bf_type &= ~BUF_AMPDU;
1489 1516
1490 /* update starting sequence number for subsequent ADDBA request */ 1517 /* update starting sequence number for subsequent ADDBA request */
@@ -1492,9 +1519,8 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1492 INCR(tid->seq_start, IEEE80211_SEQ_MAX); 1519 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
1493 1520
1494 bf->bf_lastbf = bf; 1521 bf->bf_lastbf = bf;
1495 fi = get_frame_info(bf->bf_mpdu);
1496 ath_buf_set_rate(sc, bf, fi->framelen); 1522 ath_buf_set_rate(sc, bf, fi->framelen);
1497 ath_tx_txqaddbuf(sc, txq, bf_head, false); 1523 ath_tx_txqaddbuf(sc, txq, &bf_head, false);
1498 TX_STAT_INC(txq->axq_qnum, queued); 1524 TX_STAT_INC(txq->axq_qnum, queued);
1499} 1525}
1500 1526
@@ -1717,6 +1743,10 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
1717 1743
1718} 1744}
1719 1745
1746/*
1747 * Assign a descriptor (and sequence number if necessary,
1748 * and map buffer for DMA. Frees skb on error
1749 */
1720static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, 1750static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
1721 struct ath_txq *txq, 1751 struct ath_txq *txq,
1722 struct ath_atx_tid *tid, 1752 struct ath_atx_tid *tid,
@@ -1734,7 +1764,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
1734 bf = ath_tx_get_buffer(sc); 1764 bf = ath_tx_get_buffer(sc);
1735 if (!bf) { 1765 if (!bf) {
1736 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n"); 1766 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
1737 return NULL; 1767 goto error;
1738 } 1768 }
1739 1769
1740 ATH_TXBUF_RESET(bf); 1770 ATH_TXBUF_RESET(bf);
@@ -1757,7 +1787,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
1757 ath_err(ath9k_hw_common(sc->sc_ah), 1787 ath_err(ath9k_hw_common(sc->sc_ah),
1758 "dma_mapping_error() on TX\n"); 1788 "dma_mapping_error() on TX\n");
1759 ath_tx_return_buffer(sc, bf); 1789 ath_tx_return_buffer(sc, bf);
1760 return NULL; 1790 goto error;
1761 } 1791 }
1762 1792
1763 frm_type = get_hw_packet_type(skb); 1793 frm_type = get_hw_packet_type(skb);
@@ -1779,18 +1809,20 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
1779 fi->bf = bf; 1809 fi->bf = bf;
1780 1810
1781 return bf; 1811 return bf;
1812
1813error:
1814 dev_kfree_skb_any(skb);
1815 return NULL;
1782} 1816}
1783 1817
1784/* FIXME: tx power */ 1818/* FIXME: tx power */
1785static int ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb, 1819static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
1786 struct ath_tx_control *txctl) 1820 struct ath_tx_control *txctl)
1787{ 1821{
1788 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1822 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1789 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1823 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1790 struct list_head bf_head;
1791 struct ath_atx_tid *tid = NULL; 1824 struct ath_atx_tid *tid = NULL;
1792 struct ath_buf *bf; 1825 struct ath_buf *bf;
1793 int ret = 0;
1794 u8 tidno; 1826 u8 tidno;
1795 1827
1796 spin_lock_bh(&txctl->txq->axq_lock); 1828 spin_lock_bh(&txctl->txq->axq_lock);
@@ -1803,21 +1835,16 @@ static int ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
1803 WARN_ON(tid->ac->txq != txctl->txq); 1835 WARN_ON(tid->ac->txq != txctl->txq);
1804 } 1836 }
1805 1837
1806 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1807 if (unlikely(!bf)) {
1808 ret = -ENOMEM;
1809 goto out;
1810 }
1811
1812 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) { 1838 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
1813 /* 1839 /*
1814 * Try aggregation if it's a unicast data frame 1840 * Try aggregation if it's a unicast data frame
1815 * and the destination is HT capable. 1841 * and the destination is HT capable.
1816 */ 1842 */
1817 ath_tx_send_ampdu(sc, tid, bf, txctl); 1843 ath_tx_send_ampdu(sc, tid, skb, txctl);
1818 } else { 1844 } else {
1819 INIT_LIST_HEAD(&bf_head); 1845 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1820 list_add_tail(&bf->list, &bf_head); 1846 if (!bf)
1847 goto out;
1821 1848
1822 bf->bf_state.bfs_paprd = txctl->paprd; 1849 bf->bf_state.bfs_paprd = txctl->paprd;
1823 1850
@@ -1831,12 +1858,11 @@ static int ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
1831 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) 1858 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1832 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true); 1859 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true);
1833 1860
1834 ath_tx_send_normal(sc, txctl->txq, tid, &bf_head); 1861 ath_tx_send_normal(sc, txctl->txq, tid, skb);
1835 } 1862 }
1836 1863
1837out: 1864out:
1838 spin_unlock_bh(&txctl->txq->axq_lock); 1865 spin_unlock_bh(&txctl->txq->axq_lock);
1839 return ret;
1840} 1866}
1841 1867
1842/* Upon failure caller should free skb */ 1868/* Upon failure caller should free skb */
@@ -1904,7 +1930,8 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1904 } 1930 }
1905 spin_unlock_bh(&txq->axq_lock); 1931 spin_unlock_bh(&txq->axq_lock);
1906 1932
1907 return ath_tx_start_dma(sc, skb, txctl); 1933 ath_tx_start_dma(sc, skb, txctl);
1934 return 0;
1908} 1935}
1909 1936
1910/*****************/ 1937/*****************/