aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/ath9k/recv.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/ath/ath9k/recv.c')
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c518
1 files changed, 412 insertions, 106 deletions
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 94560e2fe376..ac60c4ee62d3 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -15,6 +15,9 @@
15 */ 15 */
16 16
17#include "ath9k.h" 17#include "ath9k.h"
18#include "ar9003_mac.h"
19
20#define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb))
18 21
19static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc, 22static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc,
20 struct ieee80211_hdr *hdr) 23 struct ieee80211_hdr *hdr)
@@ -115,56 +118,246 @@ static void ath_opmode_init(struct ath_softc *sc)
115 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); 118 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
116} 119}
117 120
118int ath_rx_init(struct ath_softc *sc, int nbufs) 121static bool ath_rx_edma_buf_link(struct ath_softc *sc,
122 enum ath9k_rx_qtype qtype)
119{ 123{
120 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 124 struct ath_hw *ah = sc->sc_ah;
125 struct ath_rx_edma *rx_edma;
121 struct sk_buff *skb; 126 struct sk_buff *skb;
122 struct ath_buf *bf; 127 struct ath_buf *bf;
123 int error = 0;
124 128
125 spin_lock_init(&sc->rx.rxflushlock); 129 rx_edma = &sc->rx.rx_edma[qtype];
126 sc->sc_flags &= ~SC_OP_RXFLUSH; 130 if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize)
127 spin_lock_init(&sc->rx.rxbuflock); 131 return false;
128 132
129 common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN, 133 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
130 min(common->cachelsz, (u16)64)); 134 list_del_init(&bf->list);
131 135
132 ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n", 136 skb = bf->bf_mpdu;
133 common->cachelsz, common->rx_bufsize); 137
138 ATH_RXBUF_RESET(bf);
139 memset(skb->data, 0, ah->caps.rx_status_len);
140 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
141 ah->caps.rx_status_len, DMA_TO_DEVICE);
134 142
135 /* Initialize rx descriptors */ 143 SKB_CB_ATHBUF(skb) = bf;
144 ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype);
145 skb_queue_tail(&rx_edma->rx_fifo, skb);
136 146
137 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, 147 return true;
138 "rx", nbufs, 1); 148}
139 if (error != 0) { 149
140 ath_print(common, ATH_DBG_FATAL, 150static void ath_rx_addbuffer_edma(struct ath_softc *sc,
141 "failed to allocate rx descriptors: %d\n", error); 151 enum ath9k_rx_qtype qtype, int size)
142 goto err; 152{
153 struct ath_rx_edma *rx_edma;
154 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
155 u32 nbuf = 0;
156
157 rx_edma = &sc->rx.rx_edma[qtype];
158 if (list_empty(&sc->rx.rxbuf)) {
159 ath_print(common, ATH_DBG_QUEUE, "No free rx buf available\n");
160 return;
143 } 161 }
144 162
163 while (!list_empty(&sc->rx.rxbuf)) {
164 nbuf++;
165
166 if (!ath_rx_edma_buf_link(sc, qtype))
167 break;
168
169 if (nbuf >= size)
170 break;
171 }
172}
173
174static void ath_rx_remove_buffer(struct ath_softc *sc,
175 enum ath9k_rx_qtype qtype)
176{
177 struct ath_buf *bf;
178 struct ath_rx_edma *rx_edma;
179 struct sk_buff *skb;
180
181 rx_edma = &sc->rx.rx_edma[qtype];
182
183 while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) {
184 bf = SKB_CB_ATHBUF(skb);
185 BUG_ON(!bf);
186 list_add_tail(&bf->list, &sc->rx.rxbuf);
187 }
188}
189
190static void ath_rx_edma_cleanup(struct ath_softc *sc)
191{
192 struct ath_buf *bf;
193
194 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
195 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
196
145 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 197 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
198 if (bf->bf_mpdu)
199 dev_kfree_skb_any(bf->bf_mpdu);
200 }
201
202 INIT_LIST_HEAD(&sc->rx.rxbuf);
203
204 kfree(sc->rx.rx_bufptr);
205 sc->rx.rx_bufptr = NULL;
206}
207
208static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size)
209{
210 skb_queue_head_init(&rx_edma->rx_fifo);
211 skb_queue_head_init(&rx_edma->rx_buffers);
212 rx_edma->rx_fifo_hwsize = size;
213}
214
215static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
216{
217 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
218 struct ath_hw *ah = sc->sc_ah;
219 struct sk_buff *skb;
220 struct ath_buf *bf;
221 int error = 0, i;
222 u32 size;
223
224
225 common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN +
226 ah->caps.rx_status_len,
227 min(common->cachelsz, (u16)64));
228
229 ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
230 ah->caps.rx_status_len);
231
232 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP],
233 ah->caps.rx_lp_qdepth);
234 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP],
235 ah->caps.rx_hp_qdepth);
236
237 size = sizeof(struct ath_buf) * nbufs;
238 bf = kzalloc(size, GFP_KERNEL);
239 if (!bf)
240 return -ENOMEM;
241
242 INIT_LIST_HEAD(&sc->rx.rxbuf);
243 sc->rx.rx_bufptr = bf;
244
245 for (i = 0; i < nbufs; i++, bf++) {
146 skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL); 246 skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL);
147 if (skb == NULL) { 247 if (!skb) {
148 error = -ENOMEM; 248 error = -ENOMEM;
149 goto err; 249 goto rx_init_fail;
150 } 250 }
151 251
252 memset(skb->data, 0, common->rx_bufsize);
152 bf->bf_mpdu = skb; 253 bf->bf_mpdu = skb;
254
153 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 255 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
154 common->rx_bufsize, 256 common->rx_bufsize,
155 DMA_FROM_DEVICE); 257 DMA_BIDIRECTIONAL);
156 if (unlikely(dma_mapping_error(sc->dev, 258 if (unlikely(dma_mapping_error(sc->dev,
157 bf->bf_buf_addr))) { 259 bf->bf_buf_addr))) {
158 dev_kfree_skb_any(skb); 260 dev_kfree_skb_any(skb);
159 bf->bf_mpdu = NULL; 261 bf->bf_mpdu = NULL;
262 ath_print(common, ATH_DBG_FATAL,
263 "dma_mapping_error() on RX init\n");
264 error = -ENOMEM;
265 goto rx_init_fail;
266 }
267
268 list_add_tail(&bf->list, &sc->rx.rxbuf);
269 }
270
271 return 0;
272
273rx_init_fail:
274 ath_rx_edma_cleanup(sc);
275 return error;
276}
277
278static void ath_edma_start_recv(struct ath_softc *sc)
279{
280 spin_lock_bh(&sc->rx.rxbuflock);
281
282 ath9k_hw_rxena(sc->sc_ah);
283
284 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP,
285 sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize);
286
287 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP,
288 sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize);
289
290 spin_unlock_bh(&sc->rx.rxbuflock);
291
292 ath_opmode_init(sc);
293
294 ath9k_hw_startpcureceive(sc->sc_ah);
295}
296
297static void ath_edma_stop_recv(struct ath_softc *sc)
298{
299 spin_lock_bh(&sc->rx.rxbuflock);
300 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
301 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
302 spin_unlock_bh(&sc->rx.rxbuflock);
303}
304
305int ath_rx_init(struct ath_softc *sc, int nbufs)
306{
307 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
308 struct sk_buff *skb;
309 struct ath_buf *bf;
310 int error = 0;
311
312 spin_lock_init(&sc->rx.rxflushlock);
313 sc->sc_flags &= ~SC_OP_RXFLUSH;
314 spin_lock_init(&sc->rx.rxbuflock);
315
316 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
317 return ath_rx_edma_init(sc, nbufs);
318 } else {
319 common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
320 min(common->cachelsz, (u16)64));
321
322 ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
323 common->cachelsz, common->rx_bufsize);
324
325 /* Initialize rx descriptors */
326
327 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
328 "rx", nbufs, 1, 0);
329 if (error != 0) {
160 ath_print(common, ATH_DBG_FATAL, 330 ath_print(common, ATH_DBG_FATAL,
161 "dma_mapping_error() on RX init\n"); 331 "failed to allocate rx descriptors: %d\n",
162 error = -ENOMEM; 332 error);
163 goto err; 333 goto err;
164 } 334 }
165 bf->bf_dmacontext = bf->bf_buf_addr; 335
336 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
337 skb = ath_rxbuf_alloc(common, common->rx_bufsize,
338 GFP_KERNEL);
339 if (skb == NULL) {
340 error = -ENOMEM;
341 goto err;
342 }
343
344 bf->bf_mpdu = skb;
345 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
346 common->rx_bufsize,
347 DMA_FROM_DEVICE);
348 if (unlikely(dma_mapping_error(sc->dev,
349 bf->bf_buf_addr))) {
350 dev_kfree_skb_any(skb);
351 bf->bf_mpdu = NULL;
352 ath_print(common, ATH_DBG_FATAL,
353 "dma_mapping_error() on RX init\n");
354 error = -ENOMEM;
355 goto err;
356 }
357 bf->bf_dmacontext = bf->bf_buf_addr;
358 }
359 sc->rx.rxlink = NULL;
166 } 360 }
167 sc->rx.rxlink = NULL;
168 361
169err: 362err:
170 if (error) 363 if (error)
@@ -180,17 +373,23 @@ void ath_rx_cleanup(struct ath_softc *sc)
180 struct sk_buff *skb; 373 struct sk_buff *skb;
181 struct ath_buf *bf; 374 struct ath_buf *bf;
182 375
183 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 376 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
184 skb = bf->bf_mpdu; 377 ath_rx_edma_cleanup(sc);
185 if (skb) { 378 return;
186 dma_unmap_single(sc->dev, bf->bf_buf_addr, 379 } else {
187 common->rx_bufsize, DMA_FROM_DEVICE); 380 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
188 dev_kfree_skb(skb); 381 skb = bf->bf_mpdu;
382 if (skb) {
383 dma_unmap_single(sc->dev, bf->bf_buf_addr,
384 common->rx_bufsize,
385 DMA_FROM_DEVICE);
386 dev_kfree_skb(skb);
387 }
189 } 388 }
190 }
191 389
192 if (sc->rx.rxdma.dd_desc_len != 0) 390 if (sc->rx.rxdma.dd_desc_len != 0)
193 ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf); 391 ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf);
392 }
194} 393}
195 394
196/* 395/*
@@ -273,6 +472,11 @@ int ath_startrecv(struct ath_softc *sc)
273 struct ath_hw *ah = sc->sc_ah; 472 struct ath_hw *ah = sc->sc_ah;
274 struct ath_buf *bf, *tbf; 473 struct ath_buf *bf, *tbf;
275 474
475 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
476 ath_edma_start_recv(sc);
477 return 0;
478 }
479
276 spin_lock_bh(&sc->rx.rxbuflock); 480 spin_lock_bh(&sc->rx.rxbuflock);
277 if (list_empty(&sc->rx.rxbuf)) 481 if (list_empty(&sc->rx.rxbuf))
278 goto start_recv; 482 goto start_recv;
@@ -306,7 +510,11 @@ bool ath_stoprecv(struct ath_softc *sc)
306 ath9k_hw_stoppcurecv(ah); 510 ath9k_hw_stoppcurecv(ah);
307 ath9k_hw_setrxfilter(ah, 0); 511 ath9k_hw_setrxfilter(ah, 0);
308 stopped = ath9k_hw_stopdmarecv(ah); 512 stopped = ath9k_hw_stopdmarecv(ah);
309 sc->rx.rxlink = NULL; 513
514 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
515 ath_edma_stop_recv(sc);
516 else
517 sc->rx.rxlink = NULL;
310 518
311 return stopped; 519 return stopped;
312} 520}
@@ -315,7 +523,9 @@ void ath_flushrecv(struct ath_softc *sc)
315{ 523{
316 spin_lock_bh(&sc->rx.rxflushlock); 524 spin_lock_bh(&sc->rx.rxflushlock);
317 sc->sc_flags |= SC_OP_RXFLUSH; 525 sc->sc_flags |= SC_OP_RXFLUSH;
318 ath_rx_tasklet(sc, 1); 526 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
527 ath_rx_tasklet(sc, 1, true);
528 ath_rx_tasklet(sc, 1, false);
319 sc->sc_flags &= ~SC_OP_RXFLUSH; 529 sc->sc_flags &= ~SC_OP_RXFLUSH;
320 spin_unlock_bh(&sc->rx.rxflushlock); 530 spin_unlock_bh(&sc->rx.rxflushlock);
321} 531}
@@ -469,14 +679,147 @@ static void ath_rx_send_to_mac80211(struct ieee80211_hw *hw,
469 ieee80211_rx(hw, skb); 679 ieee80211_rx(hw, skb);
470} 680}
471 681
472int ath_rx_tasklet(struct ath_softc *sc, int flush) 682static bool ath_edma_get_buffers(struct ath_softc *sc,
683 enum ath9k_rx_qtype qtype)
473{ 684{
474#define PA2DESC(_sc, _pa) \ 685 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
475 ((struct ath_desc *)((caddr_t)(_sc)->rx.rxdma.dd_desc + \ 686 struct ath_hw *ah = sc->sc_ah;
476 ((_pa) - (_sc)->rx.rxdma.dd_desc_paddr))) 687 struct ath_common *common = ath9k_hw_common(ah);
688 struct sk_buff *skb;
689 struct ath_buf *bf;
690 int ret;
691
692 skb = skb_peek(&rx_edma->rx_fifo);
693 if (!skb)
694 return false;
695
696 bf = SKB_CB_ATHBUF(skb);
697 BUG_ON(!bf);
698
699 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
700 common->rx_bufsize, DMA_FROM_DEVICE);
701
702 ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data);
703 if (ret == -EINPROGRESS)
704 return false;
705
706 __skb_unlink(skb, &rx_edma->rx_fifo);
707 if (ret == -EINVAL) {
708 /* corrupt descriptor, skip this one and the following one */
709 list_add_tail(&bf->list, &sc->rx.rxbuf);
710 ath_rx_edma_buf_link(sc, qtype);
711 skb = skb_peek(&rx_edma->rx_fifo);
712 if (!skb)
713 return true;
714
715 bf = SKB_CB_ATHBUF(skb);
716 BUG_ON(!bf);
717
718 __skb_unlink(skb, &rx_edma->rx_fifo);
719 list_add_tail(&bf->list, &sc->rx.rxbuf);
720 ath_rx_edma_buf_link(sc, qtype);
721 }
722 skb_queue_tail(&rx_edma->rx_buffers, skb);
723
724 return true;
725}
477 726
727static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
728 struct ath_rx_status *rs,
729 enum ath9k_rx_qtype qtype)
730{
731 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
732 struct sk_buff *skb;
478 struct ath_buf *bf; 733 struct ath_buf *bf;
734
735 while (ath_edma_get_buffers(sc, qtype));
736 skb = __skb_dequeue(&rx_edma->rx_buffers);
737 if (!skb)
738 return NULL;
739
740 bf = SKB_CB_ATHBUF(skb);
741 ath9k_hw_process_rxdesc_edma(sc->sc_ah, rs, skb->data);
742 return bf;
743}
744
745static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
746 struct ath_rx_status *rs)
747{
748 struct ath_hw *ah = sc->sc_ah;
749 struct ath_common *common = ath9k_hw_common(ah);
479 struct ath_desc *ds; 750 struct ath_desc *ds;
751 struct ath_buf *bf;
752 int ret;
753
754 if (list_empty(&sc->rx.rxbuf)) {
755 sc->rx.rxlink = NULL;
756 return NULL;
757 }
758
759 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
760 ds = bf->bf_desc;
761
762 /*
763 * Must provide the virtual address of the current
764 * descriptor, the physical address, and the virtual
765 * address of the next descriptor in the h/w chain.
766 * This allows the HAL to look ahead to see if the
767 * hardware is done with a descriptor by checking the
768 * done bit in the following descriptor and the address
769 * of the current descriptor the DMA engine is working
770 * on. All this is necessary because of our use of
771 * a self-linked list to avoid rx overruns.
772 */
773 ret = ath9k_hw_rxprocdesc(ah, ds, rs, 0);
774 if (ret == -EINPROGRESS) {
775 struct ath_rx_status trs;
776 struct ath_buf *tbf;
777 struct ath_desc *tds;
778
779 memset(&trs, 0, sizeof(trs));
780 if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
781 sc->rx.rxlink = NULL;
782 return NULL;
783 }
784
785 tbf = list_entry(bf->list.next, struct ath_buf, list);
786
787 /*
788 * On some hardware the descriptor status words could
789 * get corrupted, including the done bit. Because of
790 * this, check if the next descriptor's done bit is
791 * set or not.
792 *
793 * If the next descriptor's done bit is set, the current
794 * descriptor has been corrupted. Force s/w to discard
795 * this descriptor and continue...
796 */
797
798 tds = tbf->bf_desc;
799 ret = ath9k_hw_rxprocdesc(ah, tds, &trs, 0);
800 if (ret == -EINPROGRESS)
801 return NULL;
802 }
803
804 if (!bf->bf_mpdu)
805 return bf;
806
807 /*
808 * Synchronize the DMA transfer with CPU before
809 * 1. accessing the frame
810 * 2. requeueing the same buffer to h/w
811 */
812 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
813 common->rx_bufsize,
814 DMA_FROM_DEVICE);
815
816 return bf;
817}
818
819
820int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
821{
822 struct ath_buf *bf;
480 struct sk_buff *skb = NULL, *requeue_skb; 823 struct sk_buff *skb = NULL, *requeue_skb;
481 struct ieee80211_rx_status *rxs; 824 struct ieee80211_rx_status *rxs;
482 struct ath_hw *ah = sc->sc_ah; 825 struct ath_hw *ah = sc->sc_ah;
@@ -491,7 +834,16 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
491 int retval; 834 int retval;
492 bool decrypt_error = false; 835 bool decrypt_error = false;
493 struct ath_rx_status rs; 836 struct ath_rx_status rs;
837 enum ath9k_rx_qtype qtype;
838 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
839 int dma_type;
494 840
841 if (edma)
842 dma_type = DMA_FROM_DEVICE;
843 else
844 dma_type = DMA_BIDIRECTIONAL;
845
846 qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
495 spin_lock_bh(&sc->rx.rxbuflock); 847 spin_lock_bh(&sc->rx.rxbuflock);
496 848
497 do { 849 do {
@@ -499,71 +851,19 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
499 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) 851 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
500 break; 852 break;
501 853
502 if (list_empty(&sc->rx.rxbuf)) {
503 sc->rx.rxlink = NULL;
504 break;
505 }
506
507 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
508 ds = bf->bf_desc;
509
510 /*
511 * Must provide the virtual address of the current
512 * descriptor, the physical address, and the virtual
513 * address of the next descriptor in the h/w chain.
514 * This allows the HAL to look ahead to see if the
515 * hardware is done with a descriptor by checking the
516 * done bit in the following descriptor and the address
517 * of the current descriptor the DMA engine is working
518 * on. All this is necessary because of our use of
519 * a self-linked list to avoid rx overruns.
520 */
521 memset(&rs, 0, sizeof(rs)); 854 memset(&rs, 0, sizeof(rs));
522 retval = ath9k_hw_rxprocdesc(ah, ds, &rs, 0); 855 if (edma)
523 if (retval == -EINPROGRESS) { 856 bf = ath_edma_get_next_rx_buf(sc, &rs, qtype);
524 struct ath_rx_status trs; 857 else
525 struct ath_buf *tbf; 858 bf = ath_get_next_rx_buf(sc, &rs);
526 struct ath_desc *tds;
527
528 memset(&trs, 0, sizeof(trs));
529 if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
530 sc->rx.rxlink = NULL;
531 break;
532 }
533 859
534 tbf = list_entry(bf->list.next, struct ath_buf, list); 860 if (!bf)
535 861 break;
536 /*
537 * On some hardware the descriptor status words could
538 * get corrupted, including the done bit. Because of
539 * this, check if the next descriptor's done bit is
540 * set or not.
541 *
542 * If the next descriptor's done bit is set, the current
543 * descriptor has been corrupted. Force s/w to discard
544 * this descriptor and continue...
545 */
546
547 tds = tbf->bf_desc;
548 retval = ath9k_hw_rxprocdesc(ah, tds, &trs, 0);
549 if (retval == -EINPROGRESS) {
550 break;
551 }
552 }
553 862
554 skb = bf->bf_mpdu; 863 skb = bf->bf_mpdu;
555 if (!skb) 864 if (!skb)
556 continue; 865 continue;
557 866
558 /*
559 * Synchronize the DMA transfer with CPU before
560 * 1. accessing the frame
561 * 2. requeueing the same buffer to h/w
562 */
563 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
564 common->rx_bufsize,
565 DMA_FROM_DEVICE);
566
567 hdr = (struct ieee80211_hdr *) skb->data; 867 hdr = (struct ieee80211_hdr *) skb->data;
568 rxs = IEEE80211_SKB_RXCB(skb); 868 rxs = IEEE80211_SKB_RXCB(skb);
569 869
@@ -597,9 +897,11 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
597 /* Unmap the frame */ 897 /* Unmap the frame */
598 dma_unmap_single(sc->dev, bf->bf_buf_addr, 898 dma_unmap_single(sc->dev, bf->bf_buf_addr,
599 common->rx_bufsize, 899 common->rx_bufsize,
600 DMA_FROM_DEVICE); 900 dma_type);
601 901
602 skb_put(skb, rs.rs_datalen); 902 skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len);
903 if (ah->caps.rx_status_len)
904 skb_pull(skb, ah->caps.rx_status_len);
603 905
604 ath9k_cmn_rx_skb_postprocess(common, skb, &rs, 906 ath9k_cmn_rx_skb_postprocess(common, skb, &rs,
605 rxs, decrypt_error); 907 rxs, decrypt_error);
@@ -608,7 +910,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
608 bf->bf_mpdu = requeue_skb; 910 bf->bf_mpdu = requeue_skb;
609 bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data, 911 bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
610 common->rx_bufsize, 912 common->rx_bufsize,
611 DMA_FROM_DEVICE); 913 dma_type);
612 if (unlikely(dma_mapping_error(sc->dev, 914 if (unlikely(dma_mapping_error(sc->dev,
613 bf->bf_buf_addr))) { 915 bf->bf_buf_addr))) {
614 dev_kfree_skb_any(requeue_skb); 916 dev_kfree_skb_any(requeue_skb);
@@ -639,12 +941,16 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
639 ath_rx_send_to_mac80211(hw, sc, skb, rxs); 941 ath_rx_send_to_mac80211(hw, sc, skb, rxs);
640 942
641requeue: 943requeue:
642 list_move_tail(&bf->list, &sc->rx.rxbuf); 944 if (edma) {
643 ath_rx_buf_link(sc, bf); 945 list_add_tail(&bf->list, &sc->rx.rxbuf);
946 ath_rx_edma_buf_link(sc, qtype);
947 } else {
948 list_move_tail(&bf->list, &sc->rx.rxbuf);
949 ath_rx_buf_link(sc, bf);
950 }
644 } while (1); 951 } while (1);
645 952
646 spin_unlock_bh(&sc->rx.rxbuflock); 953 spin_unlock_bh(&sc->rx.rxbuflock);
647 954
648 return 0; 955 return 0;
649#undef PA2DESC
650} 956}