aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFelix Fietkau <nbd@openwrt.org>2010-11-10 21:18:37 -0500
committerJohn W. Linville <linville@tuxdriver.com>2010-11-16 16:37:08 -0500
commit82b873afe83c81d9b1273a816bbdacb266f71a52 (patch)
treecbe6e4032c45774b5fb53a189c0665abdd1b2eb4
parent61117f01e79f7c0da86c23535bed757370f5885f (diff)
ath9k: clean up tx buffer setup
Merge ath_tx_send_normal and ath_tx_send_ht_normal. Move the paprd state initialization and sequence number assignment to reduce the number of redundant checks. This not only simplifies buffer allocation error handling, but also removes a small inconsistency in the buffer HT flag. This flag should only be set if the frame is also a QoS data frame. Signed-off-by: Felix Fietkau <nbd@openwrt.org> Signed-off-by: John W. Linville <linville@tuxdriver.com>
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c117
1 files changed, 39 insertions, 78 deletions
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 32e22677953d..8ba0e2d86c1f 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -48,9 +48,9 @@ static u16 bits_per_symbol[][2] = {
48 48
49#define IS_HT_RATE(_rate) ((_rate) & 0x80) 49#define IS_HT_RATE(_rate) ((_rate) & 0x80)
50 50
51static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq, 51static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
52 struct ath_atx_tid *tid, 52 struct ath_atx_tid *tid,
53 struct list_head *bf_head); 53 struct list_head *bf_head);
54static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 54static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
55 struct ath_txq *txq, struct list_head *bf_q, 55 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar); 56 struct ath_tx_status *ts, int txok, int sendbar);
@@ -160,7 +160,7 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
160 ath_tx_update_baw(sc, tid, bf->bf_seqno); 160 ath_tx_update_baw(sc, tid, bf->bf_seqno);
161 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0); 161 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
162 } else { 162 } else {
163 ath_tx_send_ht_normal(sc, txq, tid, &bf_head); 163 ath_tx_send_normal(sc, txq, tid, &bf_head);
164 } 164 }
165 } 165 }
166 166
@@ -1322,9 +1322,9 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1322 ath_tx_txqaddbuf(sc, txctl->txq, bf_head); 1322 ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
1323} 1323}
1324 1324
1325static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq, 1325static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1326 struct ath_atx_tid *tid, 1326 struct ath_atx_tid *tid,
1327 struct list_head *bf_head) 1327 struct list_head *bf_head)
1328{ 1328{
1329 struct ath_buf *bf; 1329 struct ath_buf *bf;
1330 1330
@@ -1332,7 +1332,8 @@ static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
1332 bf->bf_state.bf_type &= ~BUF_AMPDU; 1332 bf->bf_state.bf_type &= ~BUF_AMPDU;
1333 1333
1334 /* update starting sequence number for subsequent ADDBA request */ 1334 /* update starting sequence number for subsequent ADDBA request */
1335 INCR(tid->seq_start, IEEE80211_SEQ_MAX); 1335 if (tid)
1336 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
1336 1337
1337 bf->bf_nframes = 1; 1338 bf->bf_nframes = 1;
1338 bf->bf_lastbf = bf; 1339 bf->bf_lastbf = bf;
@@ -1341,20 +1342,6 @@ static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
1341 TX_STAT_INC(txq->axq_qnum, queued); 1342 TX_STAT_INC(txq->axq_qnum, queued);
1342} 1343}
1343 1344
1344static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1345 struct list_head *bf_head)
1346{
1347 struct ath_buf *bf;
1348
1349 bf = list_first_entry(bf_head, struct ath_buf, list);
1350
1351 bf->bf_lastbf = bf;
1352 bf->bf_nframes = 1;
1353 ath_buf_set_rate(sc, bf);
1354 ath_tx_txqaddbuf(sc, txq, bf_head);
1355 TX_STAT_INC(txq->axq_qnum, queued);
1356}
1357
1358static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb) 1345static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1359{ 1346{
1360 struct ieee80211_hdr *hdr; 1347 struct ieee80211_hdr *hdr;
@@ -1411,7 +1398,7 @@ static void assign_aggr_tid_seqno(struct sk_buff *skb,
1411 INCR(tid->seq_next, IEEE80211_SEQ_MAX); 1398 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1412} 1399}
1413 1400
1414static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc) 1401static int setup_tx_flags(struct sk_buff *skb)
1415{ 1402{
1416 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1403 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1417 int flags = 0; 1404 int flags = 0;
@@ -1422,7 +1409,7 @@ static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc)
1422 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) 1409 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1423 flags |= ATH9K_TXDESC_NOACK; 1410 flags |= ATH9K_TXDESC_NOACK;
1424 1411
1425 if (use_ldpc) 1412 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1426 flags |= ATH9K_TXDESC_LDPC; 1413 flags |= ATH9K_TXDESC_LDPC;
1427 1414
1428 return flags; 1415 return flags;
@@ -1567,18 +1554,24 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1567 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192); 1554 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
1568} 1555}
1569 1556
1570static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf, 1557static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
1571 struct sk_buff *skb, 1558 struct sk_buff *skb)
1572 struct ath_tx_control *txctl)
1573{ 1559{
1574 struct ath_wiphy *aphy = hw->priv; 1560 struct ath_wiphy *aphy = hw->priv;
1575 struct ath_softc *sc = aphy->sc; 1561 struct ath_softc *sc = aphy->sc;
1562 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1576 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1563 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1577 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1564 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1565 struct ath_buf *bf;
1578 int hdrlen; 1566 int hdrlen;
1579 __le16 fc; 1567 __le16 fc;
1580 int padpos, padsize; 1568 int padpos, padsize;
1581 bool use_ldpc = false; 1569
1570 bf = ath_tx_get_buffer(sc);
1571 if (!bf) {
1572 ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n");
1573 return NULL;
1574 }
1582 1575
1583 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 1576 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1584 fc = hdr->frame_control; 1577 fc = hdr->frame_control;
@@ -1594,16 +1587,13 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
1594 bf->bf_frmlen -= padsize; 1587 bf->bf_frmlen -= padsize;
1595 } 1588 }
1596 1589
1597 if (!txctl->paprd && conf_is_ht(&hw->conf)) { 1590 if (ieee80211_is_data_qos(fc) && conf_is_ht(&hw->conf)) {
1598 bf->bf_state.bf_type |= BUF_HT; 1591 bf->bf_state.bf_type |= BUF_HT;
1599 if (tx_info->flags & IEEE80211_TX_CTL_LDPC) 1592 if (sc->sc_flags & SC_OP_TXAGGR)
1600 use_ldpc = true; 1593 assign_aggr_tid_seqno(skb, bf);
1601 } 1594 }
1602 1595
1603 bf->bf_state.bfs_paprd = txctl->paprd; 1596 bf->bf_flags = setup_tx_flags(skb);
1604 if (txctl->paprd)
1605 bf->bf_state.bfs_paprd_timestamp = jiffies;
1606 bf->bf_flags = setup_tx_flags(skb, use_ldpc);
1607 1597
1608 bf->bf_keytype = ath9k_cmn_get_hw_crypto_keytype(skb); 1598 bf->bf_keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
1609 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) { 1599 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
@@ -1613,10 +1603,6 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
1613 bf->bf_keyix = ATH9K_TXKEYIX_INVALID; 1603 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1614 } 1604 }
1615 1605
1616 if (ieee80211_is_data_qos(fc) && bf_isht(bf) &&
1617 (sc->sc_flags & SC_OP_TXAGGR))
1618 assign_aggr_tid_seqno(skb, bf);
1619
1620 bf->bf_mpdu = skb; 1606 bf->bf_mpdu = skb;
1621 1607
1622 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 1608 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
@@ -1626,12 +1612,13 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
1626 bf->bf_buf_addr = 0; 1612 bf->bf_buf_addr = 0;
1627 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, 1613 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1628 "dma_mapping_error() on TX\n"); 1614 "dma_mapping_error() on TX\n");
1629 return -ENOMEM; 1615 ath_tx_return_buffer(sc, bf);
1616 return NULL;
1630 } 1617 }
1631 1618
1632 bf->bf_tx_aborted = false; 1619 bf->bf_tx_aborted = false;
1633 1620
1634 return 0; 1621 return bf;
1635} 1622}
1636 1623
1637/* FIXME: tx power */ 1624/* FIXME: tx power */
@@ -1679,11 +1666,6 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1679 an = (struct ath_node *)tx_info->control.sta->drv_priv; 1666 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1680 tid = ATH_AN_2_TID(an, bf->bf_tidno); 1667 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1681 1668
1682 if (!ieee80211_is_data_qos(fc)) {
1683 ath_tx_send_normal(sc, txctl->txq, &bf_head);
1684 goto tx_done;
1685 }
1686
1687 WARN_ON(tid->ac->txq != txctl->txq); 1669 WARN_ON(tid->ac->txq != txctl->txq);
1688 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { 1670 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
1689 /* 1671 /*
@@ -1696,15 +1678,18 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1696 * Send this frame as regular when ADDBA 1678 * Send this frame as regular when ADDBA
1697 * exchange is neither complete nor pending. 1679 * exchange is neither complete nor pending.
1698 */ 1680 */
1699 ath_tx_send_ht_normal(sc, txctl->txq, 1681 ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
1700 tid, &bf_head);
1701 } 1682 }
1702 } else { 1683 } else {
1703 bf->bf_state.bfs_ftype = txctl->frame_type; 1684 bf->bf_state.bfs_ftype = txctl->frame_type;
1704 ath_tx_send_normal(sc, txctl->txq, &bf_head); 1685 bf->bf_state.bfs_paprd = txctl->paprd;
1686
1687 if (txctl->paprd)
1688 bf->bf_state.bfs_paprd_timestamp = jiffies;
1689
1690 ath_tx_send_normal(sc, txctl->txq, NULL, &bf_head);
1705 } 1691 }
1706 1692
1707tx_done:
1708 spin_unlock_bh(&txctl->txq->axq_lock); 1693 spin_unlock_bh(&txctl->txq->axq_lock);
1709} 1694}
1710 1695
@@ -1714,39 +1699,15 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1714{ 1699{
1715 struct ath_wiphy *aphy = hw->priv; 1700 struct ath_wiphy *aphy = hw->priv;
1716 struct ath_softc *sc = aphy->sc; 1701 struct ath_softc *sc = aphy->sc;
1717 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1718 struct ath_txq *txq = txctl->txq; 1702 struct ath_txq *txq = txctl->txq;
1719 struct ath_buf *bf; 1703 struct ath_buf *bf;
1720 int q, r; 1704 int q;
1721 1705
1722 bf = ath_tx_get_buffer(sc); 1706 bf = ath_tx_setup_buffer(hw, skb);
1723 if (!bf) { 1707 if (unlikely(!bf))
1724 ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n"); 1708 return -ENOMEM;
1725 return -1;
1726 }
1727 1709
1728 q = skb_get_queue_mapping(skb); 1710 q = skb_get_queue_mapping(skb);
1729 r = ath_tx_setup_buffer(hw, bf, skb, txctl);
1730 if (unlikely(r)) {
1731 ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
1732
1733 /* upon ath_tx_processq() this TX queue will be resumed, we
1734 * guarantee this will happen by knowing beforehand that
1735 * we will at least have to run TX completionon one buffer
1736 * on the queue */
1737 spin_lock_bh(&txq->axq_lock);
1738 if (txq == sc->tx.txq_map[q] && !txq->stopped &&
1739 txq->axq_depth > 1) {
1740 ath_mac80211_stop_queue(sc, q);
1741 txq->stopped = 1;
1742 }
1743 spin_unlock_bh(&txq->axq_lock);
1744
1745 ath_tx_return_buffer(sc, bf);
1746
1747 return r;
1748 }
1749
1750 spin_lock_bh(&txq->axq_lock); 1711 spin_lock_bh(&txq->axq_lock);
1751 if (txq == sc->tx.txq_map[q] && 1712 if (txq == sc->tx.txq_map[q] &&
1752 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) { 1713 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {