diff options
author | Felix Fietkau <nbd@openwrt.org> | 2011-12-14 16:08:07 -0500 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2011-12-15 14:46:36 -0500 |
commit | 3ad2952998b08442044690fa9b4ec38c6c3fc4a9 (patch) | |
tree | 3853bcba02617a5077f7f68bdd5e914958965eb0 | |
parent | 6ee8284edb9be5cd567ff3f772de3bf55c73fc7a (diff) |
ath9k: simplify tx locking
Instead of releasing and taking back the lock over and over again in the
tx path, hold the lock a bit longer, requiring much fewer lock/unlock pairs.
This makes locking much easier to review and should not have any noticeable
performance/latency impact.
Signed-off-by: Felix Fietkau <nbd@openwrt.org>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
-rw-r--r-- | drivers/net/wireless/ath/ath9k/xmit.c | 39 |
1 files changed, 5 insertions, 34 deletions
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 8f38efbea5dc..8766796b48b8 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c | |||
@@ -169,13 +169,11 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) | |||
169 | INIT_LIST_HEAD(&bf_head); | 169 | INIT_LIST_HEAD(&bf_head); |
170 | 170 | ||
171 | memset(&ts, 0, sizeof(ts)); | 171 | memset(&ts, 0, sizeof(ts)); |
172 | spin_lock_bh(&txq->axq_lock); | ||
173 | 172 | ||
174 | while ((skb = __skb_dequeue(&tid->buf_q))) { | 173 | while ((skb = __skb_dequeue(&tid->buf_q))) { |
175 | fi = get_frame_info(skb); | 174 | fi = get_frame_info(skb); |
176 | bf = fi->bf; | 175 | bf = fi->bf; |
177 | 176 | ||
178 | spin_unlock_bh(&txq->axq_lock); | ||
179 | if (bf && fi->retries) { | 177 | if (bf && fi->retries) { |
180 | list_add_tail(&bf->list, &bf_head); | 178 | list_add_tail(&bf->list, &bf_head); |
181 | ath_tx_update_baw(sc, tid, bf->bf_state.seqno); | 179 | ath_tx_update_baw(sc, tid, bf->bf_state.seqno); |
@@ -184,7 +182,6 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) | |||
184 | } else { | 182 | } else { |
185 | ath_tx_send_normal(sc, txq, NULL, skb); | 183 | ath_tx_send_normal(sc, txq, NULL, skb); |
186 | } | 184 | } |
187 | spin_lock_bh(&txq->axq_lock); | ||
188 | } | 185 | } |
189 | 186 | ||
190 | if (tid->baw_head == tid->baw_tail) { | 187 | if (tid->baw_head == tid->baw_tail) { |
@@ -192,8 +189,6 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) | |||
192 | tid->state &= ~AGGR_CLEANUP; | 189 | tid->state &= ~AGGR_CLEANUP; |
193 | } | 190 | } |
194 | 191 | ||
195 | spin_unlock_bh(&txq->axq_lock); | ||
196 | |||
197 | if (sendbar) | 192 | if (sendbar) |
198 | ath_send_bar(tid, tid->seq_start); | 193 | ath_send_bar(tid, tid->seq_start); |
199 | } | 194 | } |
@@ -254,9 +249,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq, | |||
254 | bf = fi->bf; | 249 | bf = fi->bf; |
255 | 250 | ||
256 | if (!bf) { | 251 | if (!bf) { |
257 | spin_unlock(&txq->axq_lock); | ||
258 | ath_tx_complete(sc, skb, ATH_TX_ERROR, txq); | 252 | ath_tx_complete(sc, skb, ATH_TX_ERROR, txq); |
259 | spin_lock(&txq->axq_lock); | ||
260 | continue; | 253 | continue; |
261 | } | 254 | } |
262 | 255 | ||
@@ -265,9 +258,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq, | |||
265 | if (fi->retries) | 258 | if (fi->retries) |
266 | ath_tx_update_baw(sc, tid, bf->bf_state.seqno); | 259 | ath_tx_update_baw(sc, tid, bf->bf_state.seqno); |
267 | 260 | ||
268 | spin_unlock(&txq->axq_lock); | ||
269 | ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); | 261 | ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); |
270 | spin_lock(&txq->axq_lock); | ||
271 | } | 262 | } |
272 | 263 | ||
273 | tid->seq_next = tid->seq_start; | 264 | tid->seq_next = tid->seq_start; |
@@ -515,9 +506,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, | |||
515 | * complete the acked-ones/xretried ones; update | 506 | * complete the acked-ones/xretried ones; update |
516 | * block-ack window | 507 | * block-ack window |
517 | */ | 508 | */ |
518 | spin_lock_bh(&txq->axq_lock); | ||
519 | ath_tx_update_baw(sc, tid, seqno); | 509 | ath_tx_update_baw(sc, tid, seqno); |
520 | spin_unlock_bh(&txq->axq_lock); | ||
521 | 510 | ||
522 | if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) { | 511 | if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) { |
523 | memcpy(tx_info->control.rates, rates, sizeof(rates)); | 512 | memcpy(tx_info->control.rates, rates, sizeof(rates)); |
@@ -540,9 +529,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, | |||
540 | * run out of tx buf. | 529 | * run out of tx buf. |
541 | */ | 530 | */ |
542 | if (!tbf) { | 531 | if (!tbf) { |
543 | spin_lock_bh(&txq->axq_lock); | ||
544 | ath_tx_update_baw(sc, tid, seqno); | 532 | ath_tx_update_baw(sc, tid, seqno); |
545 | spin_unlock_bh(&txq->axq_lock); | ||
546 | 533 | ||
547 | ath_tx_complete_buf(sc, bf, txq, | 534 | ath_tx_complete_buf(sc, bf, txq, |
548 | &bf_head, ts, 0); | 535 | &bf_head, ts, 0); |
@@ -572,7 +559,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, | |||
572 | if (an->sleeping) | 559 | if (an->sleeping) |
573 | ieee80211_sta_set_buffered(sta, tid->tidno, true); | 560 | ieee80211_sta_set_buffered(sta, tid->tidno, true); |
574 | 561 | ||
575 | spin_lock_bh(&txq->axq_lock); | ||
576 | skb_queue_splice(&bf_pending, &tid->buf_q); | 562 | skb_queue_splice(&bf_pending, &tid->buf_q); |
577 | if (!an->sleeping) { | 563 | if (!an->sleeping) { |
578 | ath_tx_queue_tid(txq, tid); | 564 | ath_tx_queue_tid(txq, tid); |
@@ -580,7 +566,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, | |||
580 | if (ts->ts_status & ATH9K_TXERR_FILT) | 566 | if (ts->ts_status & ATH9K_TXERR_FILT) |
581 | tid->ac->clear_ps_filter = true; | 567 | tid->ac->clear_ps_filter = true; |
582 | } | 568 | } |
583 | spin_unlock_bh(&txq->axq_lock); | ||
584 | } | 569 | } |
585 | 570 | ||
586 | if (tid->state & AGGR_CLEANUP) | 571 | if (tid->state & AGGR_CLEANUP) |
@@ -1179,9 +1164,9 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) | |||
1179 | txtid->state |= AGGR_CLEANUP; | 1164 | txtid->state |= AGGR_CLEANUP; |
1180 | else | 1165 | else |
1181 | txtid->state &= ~AGGR_ADDBA_COMPLETE; | 1166 | txtid->state &= ~AGGR_ADDBA_COMPLETE; |
1182 | spin_unlock_bh(&txq->axq_lock); | ||
1183 | 1167 | ||
1184 | ath_tx_flush_tid(sc, txtid); | 1168 | ath_tx_flush_tid(sc, txtid); |
1169 | spin_unlock_bh(&txq->axq_lock); | ||
1185 | } | 1170 | } |
1186 | 1171 | ||
1187 | void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc, | 1172 | void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc, |
@@ -1423,8 +1408,6 @@ static bool bf_is_ampdu_not_probing(struct ath_buf *bf) | |||
1423 | 1408 | ||
1424 | static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq, | 1409 | static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq, |
1425 | struct list_head *list, bool retry_tx) | 1410 | struct list_head *list, bool retry_tx) |
1426 | __releases(txq->axq_lock) | ||
1427 | __acquires(txq->axq_lock) | ||
1428 | { | 1411 | { |
1429 | struct ath_buf *bf, *lastbf; | 1412 | struct ath_buf *bf, *lastbf; |
1430 | struct list_head bf_head; | 1413 | struct list_head bf_head; |
@@ -1451,13 +1434,11 @@ static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq, | |||
1451 | if (bf_is_ampdu_not_probing(bf)) | 1434 | if (bf_is_ampdu_not_probing(bf)) |
1452 | txq->axq_ampdu_depth--; | 1435 | txq->axq_ampdu_depth--; |
1453 | 1436 | ||
1454 | spin_unlock_bh(&txq->axq_lock); | ||
1455 | if (bf_isampdu(bf)) | 1437 | if (bf_isampdu(bf)) |
1456 | ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0, | 1438 | ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0, |
1457 | retry_tx); | 1439 | retry_tx); |
1458 | else | 1440 | else |
1459 | ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); | 1441 | ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); |
1460 | spin_lock_bh(&txq->axq_lock); | ||
1461 | } | 1442 | } |
1462 | } | 1443 | } |
1463 | 1444 | ||
@@ -1836,7 +1817,6 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb, | |||
1836 | struct ath_buf *bf; | 1817 | struct ath_buf *bf; |
1837 | u8 tidno; | 1818 | u8 tidno; |
1838 | 1819 | ||
1839 | spin_lock_bh(&txctl->txq->axq_lock); | ||
1840 | if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an && | 1820 | if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an && |
1841 | ieee80211_is_data_qos(hdr->frame_control)) { | 1821 | ieee80211_is_data_qos(hdr->frame_control)) { |
1842 | tidno = ieee80211_get_qos_ctl(hdr)[0] & | 1822 | tidno = ieee80211_get_qos_ctl(hdr)[0] & |
@@ -1855,7 +1835,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb, | |||
1855 | } else { | 1835 | } else { |
1856 | bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb); | 1836 | bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb); |
1857 | if (!bf) | 1837 | if (!bf) |
1858 | goto out; | 1838 | return; |
1859 | 1839 | ||
1860 | bf->bf_state.bfs_paprd = txctl->paprd; | 1840 | bf->bf_state.bfs_paprd = txctl->paprd; |
1861 | 1841 | ||
@@ -1864,9 +1844,6 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb, | |||
1864 | 1844 | ||
1865 | ath_tx_send_normal(sc, txctl->txq, tid, skb); | 1845 | ath_tx_send_normal(sc, txctl->txq, tid, skb); |
1866 | } | 1846 | } |
1867 | |||
1868 | out: | ||
1869 | spin_unlock_bh(&txctl->txq->axq_lock); | ||
1870 | } | 1847 | } |
1871 | 1848 | ||
1872 | /* Upon failure caller should free skb */ | 1849 | /* Upon failure caller should free skb */ |
@@ -1933,9 +1910,11 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
1933 | ieee80211_stop_queue(sc->hw, q); | 1910 | ieee80211_stop_queue(sc->hw, q); |
1934 | txq->stopped = 1; | 1911 | txq->stopped = 1; |
1935 | } | 1912 | } |
1936 | spin_unlock_bh(&txq->axq_lock); | ||
1937 | 1913 | ||
1938 | ath_tx_start_dma(sc, skb, txctl); | 1914 | ath_tx_start_dma(sc, skb, txctl); |
1915 | |||
1916 | spin_unlock_bh(&txq->axq_lock); | ||
1917 | |||
1939 | return 0; | 1918 | return 0; |
1940 | } | 1919 | } |
1941 | 1920 | ||
@@ -1981,7 +1960,6 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, | |||
1981 | 1960 | ||
1982 | q = skb_get_queue_mapping(skb); | 1961 | q = skb_get_queue_mapping(skb); |
1983 | if (txq == sc->tx.txq_map[q]) { | 1962 | if (txq == sc->tx.txq_map[q]) { |
1984 | spin_lock_bh(&txq->axq_lock); | ||
1985 | if (WARN_ON(--txq->pending_frames < 0)) | 1963 | if (WARN_ON(--txq->pending_frames < 0)) |
1986 | txq->pending_frames = 0; | 1964 | txq->pending_frames = 0; |
1987 | 1965 | ||
@@ -1989,7 +1967,6 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, | |||
1989 | ieee80211_wake_queue(sc->hw, q); | 1967 | ieee80211_wake_queue(sc->hw, q); |
1990 | txq->stopped = 0; | 1968 | txq->stopped = 0; |
1991 | } | 1969 | } |
1992 | spin_unlock_bh(&txq->axq_lock); | ||
1993 | } | 1970 | } |
1994 | 1971 | ||
1995 | ieee80211_tx_status(hw, skb); | 1972 | ieee80211_tx_status(hw, skb); |
@@ -2095,8 +2072,6 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf, | |||
2095 | static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, | 2072 | static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, |
2096 | struct ath_tx_status *ts, struct ath_buf *bf, | 2073 | struct ath_tx_status *ts, struct ath_buf *bf, |
2097 | struct list_head *bf_head) | 2074 | struct list_head *bf_head) |
2098 | __releases(txq->axq_lock) | ||
2099 | __acquires(txq->axq_lock) | ||
2100 | { | 2075 | { |
2101 | int txok; | 2076 | int txok; |
2102 | 2077 | ||
@@ -2106,16 +2081,12 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, | |||
2106 | if (bf_is_ampdu_not_probing(bf)) | 2081 | if (bf_is_ampdu_not_probing(bf)) |
2107 | txq->axq_ampdu_depth--; | 2082 | txq->axq_ampdu_depth--; |
2108 | 2083 | ||
2109 | spin_unlock_bh(&txq->axq_lock); | ||
2110 | |||
2111 | if (!bf_isampdu(bf)) { | 2084 | if (!bf_isampdu(bf)) { |
2112 | ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok); | 2085 | ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok); |
2113 | ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok); | 2086 | ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok); |
2114 | } else | 2087 | } else |
2115 | ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true); | 2088 | ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true); |
2116 | 2089 | ||
2117 | spin_lock_bh(&txq->axq_lock); | ||
2118 | |||
2119 | if (sc->sc_flags & SC_OP_TXAGGR) | 2090 | if (sc->sc_flags & SC_OP_TXAGGR) |
2120 | ath_txq_schedule(sc, txq); | 2091 | ath_txq_schedule(sc, txq); |
2121 | } | 2092 | } |