aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/ath9k/xmit.c
diff options
context:
space:
mode:
authorVasanthakumar Thiagarajan <vasanth@atheros.com>2010-04-15 17:39:36 -0400
committerJohn W. Linville <linville@tuxdriver.com>2010-04-16 15:43:48 -0400
commite5003249ae7165654e13fd4809caa346820547f5 (patch)
treec3bad98d5e100df82486c5c277dd3b3c48221207 /drivers/net/wireless/ath/ath9k/xmit.c
parenteb8232535bbeaf51f7c4826265ad96bf966829f3 (diff)
ath9k: Add Tx EDMA support
Signed-off-by: Vasanthakumar Thiagarajan <vasanth@atheros.com> Signed-off-by: Luis R. Rodriguez <lrodriguez@atheros.com> Signed-off-by: Felix Fietkau <nbd@openwrt.org> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/ath/ath9k/xmit.c')
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c264
1 files changed, 206 insertions, 58 deletions
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index f9f7445f6652..c2b45030d0b4 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -92,7 +92,6 @@ static int ath_max_4ms_framelen[3][16] = {
92 } 92 }
93}; 93};
94 94
95
96/*********************/ 95/*********************/
97/* Aggregation logic */ 96/* Aggregation logic */
98/*********************/ 97/*********************/
@@ -379,7 +378,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
379 } 378 }
380 } 379 }
381 380
382 if (bf_next == NULL) { 381 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
382 bf_next == NULL) {
383 /* 383 /*
384 * Make sure the last desc is reclaimed if it 384 * Make sure the last desc is reclaimed if it
385 * not a holding desc. 385 * not a holding desc.
@@ -413,36 +413,43 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
413 !txfail, sendbar); 413 !txfail, sendbar);
414 } else { 414 } else {
415 /* retry the un-acked ones */ 415 /* retry the un-acked ones */
416 if (bf->bf_next == NULL && bf_last->bf_stale) { 416 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
417 struct ath_buf *tbf; 417 if (bf->bf_next == NULL && bf_last->bf_stale) {
418 418 struct ath_buf *tbf;
419 tbf = ath_clone_txbuf(sc, bf_last); 419
420 /* 420 tbf = ath_clone_txbuf(sc, bf_last);
421 * Update tx baw and complete the frame with 421 /*
422 * failed status if we run out of tx buf 422 * Update tx baw and complete the
423 */ 423 * frame with failed status if we
424 if (!tbf) { 424 * run out of tx buf.
425 spin_lock_bh(&txq->axq_lock); 425 */
426 ath_tx_update_baw(sc, tid, 426 if (!tbf) {
427 bf->bf_seqno); 427 spin_lock_bh(&txq->axq_lock);
428 spin_unlock_bh(&txq->axq_lock); 428 ath_tx_update_baw(sc, tid,
429 429 bf->bf_seqno);
430 bf->bf_state.bf_type |= BUF_XRETRY; 430 spin_unlock_bh(&txq->axq_lock);
431 ath_tx_rc_status(bf, ts, nbad, 431
432 0, false); 432 bf->bf_state.bf_type |=
433 ath_tx_complete_buf(sc, bf, txq, 433 BUF_XRETRY;
434 &bf_head, ts, 0, 0); 434 ath_tx_rc_status(bf, ts, nbad,
435 break; 435 0, false);
436 ath_tx_complete_buf(sc, bf, txq,
437 &bf_head,
438 ts, 0, 0);
439 break;
440 }
441
442 ath9k_hw_cleartxdesc(sc->sc_ah,
443 tbf->bf_desc);
444 list_add_tail(&tbf->list, &bf_head);
445 } else {
446 /*
447 * Clear descriptor status words for
448 * software retry
449 */
450 ath9k_hw_cleartxdesc(sc->sc_ah,
451 bf->bf_desc);
436 } 452 }
437
438 ath9k_hw_cleartxdesc(sc->sc_ah, tbf->bf_desc);
439 list_add_tail(&tbf->list, &bf_head);
440 } else {
441 /*
442 * Clear descriptor status words for
443 * software retry
444 */
445 ath9k_hw_cleartxdesc(sc->sc_ah, bf->bf_desc);
446 } 453 }
447 454
448 /* 455 /*
@@ -855,7 +862,7 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
855 struct ath_hw *ah = sc->sc_ah; 862 struct ath_hw *ah = sc->sc_ah;
856 struct ath_common *common = ath9k_hw_common(ah); 863 struct ath_common *common = ath9k_hw_common(ah);
857 struct ath9k_tx_queue_info qi; 864 struct ath9k_tx_queue_info qi;
858 int qnum; 865 int qnum, i;
859 866
860 memset(&qi, 0, sizeof(qi)); 867 memset(&qi, 0, sizeof(qi));
861 qi.tqi_subtype = subtype; 868 qi.tqi_subtype = subtype;
@@ -910,6 +917,11 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
910 txq->axq_depth = 0; 917 txq->axq_depth = 0;
911 txq->axq_tx_inprogress = false; 918 txq->axq_tx_inprogress = false;
912 sc->tx.txqsetup |= 1<<qnum; 919 sc->tx.txqsetup |= 1<<qnum;
920
921 txq->txq_headidx = txq->txq_tailidx = 0;
922 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
923 INIT_LIST_HEAD(&txq->txq_fifo[i]);
924 INIT_LIST_HEAD(&txq->txq_fifo_pending);
913 } 925 }
914 return &sc->tx.txq[qnum]; 926 return &sc->tx.txq[qnum];
915} 927}
@@ -1042,30 +1054,49 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1042 for (;;) { 1054 for (;;) {
1043 spin_lock_bh(&txq->axq_lock); 1055 spin_lock_bh(&txq->axq_lock);
1044 1056
1045 if (list_empty(&txq->axq_q)) { 1057 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1046 txq->axq_link = NULL; 1058 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1047 spin_unlock_bh(&txq->axq_lock); 1059 txq->txq_headidx = txq->txq_tailidx = 0;
1048 break; 1060 spin_unlock_bh(&txq->axq_lock);
1049 } 1061 break;
1050 1062 } else {
1051 bf = list_first_entry(&txq->axq_q, struct ath_buf, list); 1063 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1064 struct ath_buf, list);
1065 }
1066 } else {
1067 if (list_empty(&txq->axq_q)) {
1068 txq->axq_link = NULL;
1069 spin_unlock_bh(&txq->axq_lock);
1070 break;
1071 }
1072 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1073 list);
1052 1074
1053 if (bf->bf_stale) { 1075 if (bf->bf_stale) {
1054 list_del(&bf->list); 1076 list_del(&bf->list);
1055 spin_unlock_bh(&txq->axq_lock); 1077 spin_unlock_bh(&txq->axq_lock);
1056 1078
1057 spin_lock_bh(&sc->tx.txbuflock); 1079 spin_lock_bh(&sc->tx.txbuflock);
1058 list_add_tail(&bf->list, &sc->tx.txbuf); 1080 list_add_tail(&bf->list, &sc->tx.txbuf);
1059 spin_unlock_bh(&sc->tx.txbuflock); 1081 spin_unlock_bh(&sc->tx.txbuflock);
1060 continue; 1082 continue;
1083 }
1061 } 1084 }
1062 1085
1063 lastbf = bf->bf_lastbf; 1086 lastbf = bf->bf_lastbf;
1064 if (!retry_tx) 1087 if (!retry_tx)
1065 lastbf->bf_tx_aborted = true; 1088 lastbf->bf_tx_aborted = true;
1066 1089
1067 /* remove ath_buf's of the same mpdu from txq */ 1090 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1068 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list); 1091 list_cut_position(&bf_head,
1092 &txq->txq_fifo[txq->txq_tailidx],
1093 &lastbf->list);
1094 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1095 } else {
1096 /* remove ath_buf's of the same mpdu from txq */
1097 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1098 }
1099
1069 txq->axq_depth--; 1100 txq->axq_depth--;
1070 1101
1071 spin_unlock_bh(&txq->axq_lock); 1102 spin_unlock_bh(&txq->axq_lock);
@@ -1088,6 +1119,27 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1088 spin_unlock_bh(&txq->axq_lock); 1119 spin_unlock_bh(&txq->axq_lock);
1089 } 1120 }
1090 } 1121 }
1122
1123 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1124 spin_lock_bh(&txq->axq_lock);
1125 while (!list_empty(&txq->txq_fifo_pending)) {
1126 bf = list_first_entry(&txq->txq_fifo_pending,
1127 struct ath_buf, list);
1128 list_cut_position(&bf_head,
1129 &txq->txq_fifo_pending,
1130 &bf->bf_lastbf->list);
1131 spin_unlock_bh(&txq->axq_lock);
1132
1133 if (bf_isampdu(bf))
1134 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
1135 &ts, 0);
1136 else
1137 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1138 &ts, 0, 0);
1139 spin_lock_bh(&txq->axq_lock);
1140 }
1141 spin_unlock_bh(&txq->axq_lock);
1142 }
1091} 1143}
1092 1144
1093void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) 1145void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
@@ -1225,25 +1277,47 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1225 1277
1226 bf = list_first_entry(head, struct ath_buf, list); 1278 bf = list_first_entry(head, struct ath_buf, list);
1227 1279
1228 list_splice_tail_init(head, &txq->axq_q);
1229 txq->axq_depth++;
1230
1231 ath_print(common, ATH_DBG_QUEUE, 1280 ath_print(common, ATH_DBG_QUEUE,
1232 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth); 1281 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
1233 1282
1234 if (txq->axq_link == NULL) { 1283 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1284 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1285 list_splice_tail_init(head, &txq->txq_fifo_pending);
1286 return;
1287 }
1288 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
1289 ath_print(common, ATH_DBG_XMIT,
1290 "Initializing tx fifo %d which "
1291 "is non-empty\n",
1292 txq->txq_headidx);
1293 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1294 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1295 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
1235 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 1296 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1236 ath_print(common, ATH_DBG_XMIT, 1297 ath_print(common, ATH_DBG_XMIT,
1237 "TXDP[%u] = %llx (%p)\n", 1298 "TXDP[%u] = %llx (%p)\n",
1238 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc); 1299 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1239 } else { 1300 } else {
1240 *txq->axq_link = bf->bf_daddr; 1301 list_splice_tail_init(head, &txq->axq_q);
1241 ath_print(common, ATH_DBG_XMIT, "link[%u] (%p)=%llx (%p)\n", 1302
1242 txq->axq_qnum, txq->axq_link, 1303 if (txq->axq_link == NULL) {
1243 ito64(bf->bf_daddr), bf->bf_desc); 1304 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1305 ath_print(common, ATH_DBG_XMIT,
1306 "TXDP[%u] = %llx (%p)\n",
1307 txq->axq_qnum, ito64(bf->bf_daddr),
1308 bf->bf_desc);
1309 } else {
1310 *txq->axq_link = bf->bf_daddr;
1311 ath_print(common, ATH_DBG_XMIT,
1312 "link[%u] (%p)=%llx (%p)\n",
1313 txq->axq_qnum, txq->axq_link,
1314 ito64(bf->bf_daddr), bf->bf_desc);
1315 }
1316 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1317 &txq->axq_link);
1318 ath9k_hw_txstart(ah, txq->axq_qnum);
1244 } 1319 }
1245 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc, &txq->axq_link); 1320 txq->axq_depth++;
1246 ath9k_hw_txstart(ah, txq->axq_qnum);
1247} 1321}
1248 1322
1249static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc) 1323static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
@@ -2140,6 +2214,80 @@ void ath_tx_tasklet(struct ath_softc *sc)
2140 } 2214 }
2141} 2215}
2142 2216
2217void ath_tx_edma_tasklet(struct ath_softc *sc)
2218{
2219 struct ath_tx_status txs;
2220 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2221 struct ath_hw *ah = sc->sc_ah;
2222 struct ath_txq *txq;
2223 struct ath_buf *bf, *lastbf;
2224 struct list_head bf_head;
2225 int status;
2226 int txok;
2227
2228 for (;;) {
2229 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2230 if (status == -EINPROGRESS)
2231 break;
2232 if (status == -EIO) {
2233 ath_print(common, ATH_DBG_XMIT,
2234 "Error processing tx status\n");
2235 break;
2236 }
2237
2238 /* Skip beacon completions */
2239 if (txs.qid == sc->beacon.beaconq)
2240 continue;
2241
2242 txq = &sc->tx.txq[txs.qid];
2243
2244 spin_lock_bh(&txq->axq_lock);
2245 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2246 spin_unlock_bh(&txq->axq_lock);
2247 return;
2248 }
2249
2250 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2251 struct ath_buf, list);
2252 lastbf = bf->bf_lastbf;
2253
2254 INIT_LIST_HEAD(&bf_head);
2255 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2256 &lastbf->list);
2257 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2258 txq->axq_depth--;
2259 txq->axq_tx_inprogress = false;
2260 spin_unlock_bh(&txq->axq_lock);
2261
2262 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2263
2264 if (!bf_isampdu(bf)) {
2265 bf->bf_retries = txs.ts_longretry;
2266 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2267 bf->bf_state.bf_type |= BUF_XRETRY;
2268 ath_tx_rc_status(bf, &txs, 0, txok, true);
2269 }
2270
2271 if (bf_isampdu(bf))
2272 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok);
2273 else
2274 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2275 &txs, txok, 0);
2276
2277 spin_lock_bh(&txq->axq_lock);
2278 if (!list_empty(&txq->txq_fifo_pending)) {
2279 INIT_LIST_HEAD(&bf_head);
2280 bf = list_first_entry(&txq->txq_fifo_pending,
2281 struct ath_buf, list);
2282 list_cut_position(&bf_head, &txq->txq_fifo_pending,
2283 &bf->bf_lastbf->list);
2284 ath_tx_txqaddbuf(sc, txq, &bf_head);
2285 } else if (sc->sc_flags & SC_OP_TXAGGR)
2286 ath_txq_schedule(sc, txq);
2287 spin_unlock_bh(&txq->axq_lock);
2288 }
2289}
2290
2143/*****************/ 2291/*****************/
2144/* Init, Cleanup */ 2292/* Init, Cleanup */
2145/*****************/ 2293/*****************/