diff options
Diffstat (limited to 'drivers/net/wireless/ath/ath9k/recv.c')
-rw-r--r-- | drivers/net/wireless/ath/ath9k/recv.c | 533 |
1 files changed, 418 insertions, 115 deletions
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 1ca42e5148c8..ba139132c85f 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c | |||
@@ -15,6 +15,9 @@ | |||
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include "ath9k.h" | 17 | #include "ath9k.h" |
18 | #include "ar9003_mac.h" | ||
19 | |||
20 | #define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb)) | ||
18 | 21 | ||
19 | static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc, | 22 | static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc, |
20 | struct ieee80211_hdr *hdr) | 23 | struct ieee80211_hdr *hdr) |
@@ -115,56 +118,244 @@ static void ath_opmode_init(struct ath_softc *sc) | |||
115 | ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); | 118 | ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); |
116 | } | 119 | } |
117 | 120 | ||
118 | int ath_rx_init(struct ath_softc *sc, int nbufs) | 121 | static bool ath_rx_edma_buf_link(struct ath_softc *sc, |
122 | enum ath9k_rx_qtype qtype) | ||
119 | { | 123 | { |
120 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); | 124 | struct ath_hw *ah = sc->sc_ah; |
125 | struct ath_rx_edma *rx_edma; | ||
121 | struct sk_buff *skb; | 126 | struct sk_buff *skb; |
122 | struct ath_buf *bf; | 127 | struct ath_buf *bf; |
123 | int error = 0; | ||
124 | 128 | ||
125 | spin_lock_init(&sc->rx.rxflushlock); | 129 | rx_edma = &sc->rx.rx_edma[qtype]; |
126 | sc->sc_flags &= ~SC_OP_RXFLUSH; | 130 | if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize) |
127 | spin_lock_init(&sc->rx.rxbuflock); | 131 | return false; |
128 | 132 | ||
129 | common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN, | 133 | bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); |
130 | min(common->cachelsz, (u16)64)); | 134 | list_del_init(&bf->list); |
131 | 135 | ||
132 | ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n", | 136 | skb = bf->bf_mpdu; |
133 | common->cachelsz, common->rx_bufsize); | 137 | |
138 | ATH_RXBUF_RESET(bf); | ||
139 | memset(skb->data, 0, ah->caps.rx_status_len); | ||
140 | dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, | ||
141 | ah->caps.rx_status_len, DMA_TO_DEVICE); | ||
134 | 142 | ||
135 | /* Initialize rx descriptors */ | 143 | SKB_CB_ATHBUF(skb) = bf; |
144 | ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype); | ||
145 | skb_queue_tail(&rx_edma->rx_fifo, skb); | ||
136 | 146 | ||
137 | error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, | 147 | return true; |
138 | "rx", nbufs, 1); | 148 | } |
139 | if (error != 0) { | 149 | |
140 | ath_print(common, ATH_DBG_FATAL, | 150 | static void ath_rx_addbuffer_edma(struct ath_softc *sc, |
141 | "failed to allocate rx descriptors: %d\n", error); | 151 | enum ath9k_rx_qtype qtype, int size) |
142 | goto err; | 152 | { |
153 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); | ||
154 | u32 nbuf = 0; | ||
155 | |||
156 | if (list_empty(&sc->rx.rxbuf)) { | ||
157 | ath_print(common, ATH_DBG_QUEUE, "No free rx buf available\n"); | ||
158 | return; | ||
143 | } | 159 | } |
144 | 160 | ||
161 | while (!list_empty(&sc->rx.rxbuf)) { | ||
162 | nbuf++; | ||
163 | |||
164 | if (!ath_rx_edma_buf_link(sc, qtype)) | ||
165 | break; | ||
166 | |||
167 | if (nbuf >= size) | ||
168 | break; | ||
169 | } | ||
170 | } | ||
171 | |||
172 | static void ath_rx_remove_buffer(struct ath_softc *sc, | ||
173 | enum ath9k_rx_qtype qtype) | ||
174 | { | ||
175 | struct ath_buf *bf; | ||
176 | struct ath_rx_edma *rx_edma; | ||
177 | struct sk_buff *skb; | ||
178 | |||
179 | rx_edma = &sc->rx.rx_edma[qtype]; | ||
180 | |||
181 | while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) { | ||
182 | bf = SKB_CB_ATHBUF(skb); | ||
183 | BUG_ON(!bf); | ||
184 | list_add_tail(&bf->list, &sc->rx.rxbuf); | ||
185 | } | ||
186 | } | ||
187 | |||
188 | static void ath_rx_edma_cleanup(struct ath_softc *sc) | ||
189 | { | ||
190 | struct ath_buf *bf; | ||
191 | |||
192 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); | ||
193 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); | ||
194 | |||
145 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { | 195 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { |
196 | if (bf->bf_mpdu) | ||
197 | dev_kfree_skb_any(bf->bf_mpdu); | ||
198 | } | ||
199 | |||
200 | INIT_LIST_HEAD(&sc->rx.rxbuf); | ||
201 | |||
202 | kfree(sc->rx.rx_bufptr); | ||
203 | sc->rx.rx_bufptr = NULL; | ||
204 | } | ||
205 | |||
206 | static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size) | ||
207 | { | ||
208 | skb_queue_head_init(&rx_edma->rx_fifo); | ||
209 | skb_queue_head_init(&rx_edma->rx_buffers); | ||
210 | rx_edma->rx_fifo_hwsize = size; | ||
211 | } | ||
212 | |||
213 | static int ath_rx_edma_init(struct ath_softc *sc, int nbufs) | ||
214 | { | ||
215 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); | ||
216 | struct ath_hw *ah = sc->sc_ah; | ||
217 | struct sk_buff *skb; | ||
218 | struct ath_buf *bf; | ||
219 | int error = 0, i; | ||
220 | u32 size; | ||
221 | |||
222 | |||
223 | common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN + | ||
224 | ah->caps.rx_status_len, | ||
225 | min(common->cachelsz, (u16)64)); | ||
226 | |||
227 | ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize - | ||
228 | ah->caps.rx_status_len); | ||
229 | |||
230 | ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP], | ||
231 | ah->caps.rx_lp_qdepth); | ||
232 | ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP], | ||
233 | ah->caps.rx_hp_qdepth); | ||
234 | |||
235 | size = sizeof(struct ath_buf) * nbufs; | ||
236 | bf = kzalloc(size, GFP_KERNEL); | ||
237 | if (!bf) | ||
238 | return -ENOMEM; | ||
239 | |||
240 | INIT_LIST_HEAD(&sc->rx.rxbuf); | ||
241 | sc->rx.rx_bufptr = bf; | ||
242 | |||
243 | for (i = 0; i < nbufs; i++, bf++) { | ||
146 | skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL); | 244 | skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL); |
147 | if (skb == NULL) { | 245 | if (!skb) { |
148 | error = -ENOMEM; | 246 | error = -ENOMEM; |
149 | goto err; | 247 | goto rx_init_fail; |
150 | } | 248 | } |
151 | 249 | ||
250 | memset(skb->data, 0, common->rx_bufsize); | ||
152 | bf->bf_mpdu = skb; | 251 | bf->bf_mpdu = skb; |
252 | |||
153 | bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, | 253 | bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, |
154 | common->rx_bufsize, | 254 | common->rx_bufsize, |
155 | DMA_FROM_DEVICE); | 255 | DMA_BIDIRECTIONAL); |
156 | if (unlikely(dma_mapping_error(sc->dev, | 256 | if (unlikely(dma_mapping_error(sc->dev, |
157 | bf->bf_buf_addr))) { | 257 | bf->bf_buf_addr))) { |
158 | dev_kfree_skb_any(skb); | 258 | dev_kfree_skb_any(skb); |
159 | bf->bf_mpdu = NULL; | 259 | bf->bf_mpdu = NULL; |
260 | ath_print(common, ATH_DBG_FATAL, | ||
261 | "dma_mapping_error() on RX init\n"); | ||
262 | error = -ENOMEM; | ||
263 | goto rx_init_fail; | ||
264 | } | ||
265 | |||
266 | list_add_tail(&bf->list, &sc->rx.rxbuf); | ||
267 | } | ||
268 | |||
269 | return 0; | ||
270 | |||
271 | rx_init_fail: | ||
272 | ath_rx_edma_cleanup(sc); | ||
273 | return error; | ||
274 | } | ||
275 | |||
276 | static void ath_edma_start_recv(struct ath_softc *sc) | ||
277 | { | ||
278 | spin_lock_bh(&sc->rx.rxbuflock); | ||
279 | |||
280 | ath9k_hw_rxena(sc->sc_ah); | ||
281 | |||
282 | ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP, | ||
283 | sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize); | ||
284 | |||
285 | ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP, | ||
286 | sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize); | ||
287 | |||
288 | spin_unlock_bh(&sc->rx.rxbuflock); | ||
289 | |||
290 | ath_opmode_init(sc); | ||
291 | |||
292 | ath9k_hw_startpcureceive(sc->sc_ah); | ||
293 | } | ||
294 | |||
295 | static void ath_edma_stop_recv(struct ath_softc *sc) | ||
296 | { | ||
297 | spin_lock_bh(&sc->rx.rxbuflock); | ||
298 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); | ||
299 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); | ||
300 | spin_unlock_bh(&sc->rx.rxbuflock); | ||
301 | } | ||
302 | |||
303 | int ath_rx_init(struct ath_softc *sc, int nbufs) | ||
304 | { | ||
305 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); | ||
306 | struct sk_buff *skb; | ||
307 | struct ath_buf *bf; | ||
308 | int error = 0; | ||
309 | |||
310 | spin_lock_init(&sc->rx.rxflushlock); | ||
311 | sc->sc_flags &= ~SC_OP_RXFLUSH; | ||
312 | spin_lock_init(&sc->rx.rxbuflock); | ||
313 | |||
314 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { | ||
315 | return ath_rx_edma_init(sc, nbufs); | ||
316 | } else { | ||
317 | common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN, | ||
318 | min(common->cachelsz, (u16)64)); | ||
319 | |||
320 | ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n", | ||
321 | common->cachelsz, common->rx_bufsize); | ||
322 | |||
323 | /* Initialize rx descriptors */ | ||
324 | |||
325 | error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, | ||
326 | "rx", nbufs, 1, 0); | ||
327 | if (error != 0) { | ||
160 | ath_print(common, ATH_DBG_FATAL, | 328 | ath_print(common, ATH_DBG_FATAL, |
161 | "dma_mapping_error() on RX init\n"); | 329 | "failed to allocate rx descriptors: %d\n", |
162 | error = -ENOMEM; | 330 | error); |
163 | goto err; | 331 | goto err; |
164 | } | 332 | } |
165 | bf->bf_dmacontext = bf->bf_buf_addr; | 333 | |
334 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { | ||
335 | skb = ath_rxbuf_alloc(common, common->rx_bufsize, | ||
336 | GFP_KERNEL); | ||
337 | if (skb == NULL) { | ||
338 | error = -ENOMEM; | ||
339 | goto err; | ||
340 | } | ||
341 | |||
342 | bf->bf_mpdu = skb; | ||
343 | bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, | ||
344 | common->rx_bufsize, | ||
345 | DMA_FROM_DEVICE); | ||
346 | if (unlikely(dma_mapping_error(sc->dev, | ||
347 | bf->bf_buf_addr))) { | ||
348 | dev_kfree_skb_any(skb); | ||
349 | bf->bf_mpdu = NULL; | ||
350 | ath_print(common, ATH_DBG_FATAL, | ||
351 | "dma_mapping_error() on RX init\n"); | ||
352 | error = -ENOMEM; | ||
353 | goto err; | ||
354 | } | ||
355 | bf->bf_dmacontext = bf->bf_buf_addr; | ||
356 | } | ||
357 | sc->rx.rxlink = NULL; | ||
166 | } | 358 | } |
167 | sc->rx.rxlink = NULL; | ||
168 | 359 | ||
169 | err: | 360 | err: |
170 | if (error) | 361 | if (error) |
@@ -180,17 +371,23 @@ void ath_rx_cleanup(struct ath_softc *sc) | |||
180 | struct sk_buff *skb; | 371 | struct sk_buff *skb; |
181 | struct ath_buf *bf; | 372 | struct ath_buf *bf; |
182 | 373 | ||
183 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { | 374 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { |
184 | skb = bf->bf_mpdu; | 375 | ath_rx_edma_cleanup(sc); |
185 | if (skb) { | 376 | return; |
186 | dma_unmap_single(sc->dev, bf->bf_buf_addr, | 377 | } else { |
187 | common->rx_bufsize, DMA_FROM_DEVICE); | 378 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { |
188 | dev_kfree_skb(skb); | 379 | skb = bf->bf_mpdu; |
380 | if (skb) { | ||
381 | dma_unmap_single(sc->dev, bf->bf_buf_addr, | ||
382 | common->rx_bufsize, | ||
383 | DMA_FROM_DEVICE); | ||
384 | dev_kfree_skb(skb); | ||
385 | } | ||
189 | } | 386 | } |
190 | } | ||
191 | 387 | ||
192 | if (sc->rx.rxdma.dd_desc_len != 0) | 388 | if (sc->rx.rxdma.dd_desc_len != 0) |
193 | ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf); | 389 | ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf); |
390 | } | ||
194 | } | 391 | } |
195 | 392 | ||
196 | /* | 393 | /* |
@@ -273,6 +470,11 @@ int ath_startrecv(struct ath_softc *sc) | |||
273 | struct ath_hw *ah = sc->sc_ah; | 470 | struct ath_hw *ah = sc->sc_ah; |
274 | struct ath_buf *bf, *tbf; | 471 | struct ath_buf *bf, *tbf; |
275 | 472 | ||
473 | if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { | ||
474 | ath_edma_start_recv(sc); | ||
475 | return 0; | ||
476 | } | ||
477 | |||
276 | spin_lock_bh(&sc->rx.rxbuflock); | 478 | spin_lock_bh(&sc->rx.rxbuflock); |
277 | if (list_empty(&sc->rx.rxbuf)) | 479 | if (list_empty(&sc->rx.rxbuf)) |
278 | goto start_recv; | 480 | goto start_recv; |
@@ -306,7 +508,11 @@ bool ath_stoprecv(struct ath_softc *sc) | |||
306 | ath9k_hw_stoppcurecv(ah); | 508 | ath9k_hw_stoppcurecv(ah); |
307 | ath9k_hw_setrxfilter(ah, 0); | 509 | ath9k_hw_setrxfilter(ah, 0); |
308 | stopped = ath9k_hw_stopdmarecv(ah); | 510 | stopped = ath9k_hw_stopdmarecv(ah); |
309 | sc->rx.rxlink = NULL; | 511 | |
512 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) | ||
513 | ath_edma_stop_recv(sc); | ||
514 | else | ||
515 | sc->rx.rxlink = NULL; | ||
310 | 516 | ||
311 | return stopped; | 517 | return stopped; |
312 | } | 518 | } |
@@ -315,7 +521,9 @@ void ath_flushrecv(struct ath_softc *sc) | |||
315 | { | 521 | { |
316 | spin_lock_bh(&sc->rx.rxflushlock); | 522 | spin_lock_bh(&sc->rx.rxflushlock); |
317 | sc->sc_flags |= SC_OP_RXFLUSH; | 523 | sc->sc_flags |= SC_OP_RXFLUSH; |
318 | ath_rx_tasklet(sc, 1); | 524 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) |
525 | ath_rx_tasklet(sc, 1, true); | ||
526 | ath_rx_tasklet(sc, 1, false); | ||
319 | sc->sc_flags &= ~SC_OP_RXFLUSH; | 527 | sc->sc_flags &= ~SC_OP_RXFLUSH; |
320 | spin_unlock_bh(&sc->rx.rxflushlock); | 528 | spin_unlock_bh(&sc->rx.rxflushlock); |
321 | } | 529 | } |
@@ -469,15 +677,148 @@ static void ath_rx_send_to_mac80211(struct ieee80211_hw *hw, | |||
469 | ieee80211_rx(hw, skb); | 677 | ieee80211_rx(hw, skb); |
470 | } | 678 | } |
471 | 679 | ||
472 | int ath_rx_tasklet(struct ath_softc *sc, int flush) | 680 | static bool ath_edma_get_buffers(struct ath_softc *sc, |
681 | enum ath9k_rx_qtype qtype) | ||
473 | { | 682 | { |
474 | #define PA2DESC(_sc, _pa) \ | 683 | struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; |
475 | ((struct ath_desc *)((caddr_t)(_sc)->rx.rxdma.dd_desc + \ | 684 | struct ath_hw *ah = sc->sc_ah; |
476 | ((_pa) - (_sc)->rx.rxdma.dd_desc_paddr))) | 685 | struct ath_common *common = ath9k_hw_common(ah); |
686 | struct sk_buff *skb; | ||
687 | struct ath_buf *bf; | ||
688 | int ret; | ||
689 | |||
690 | skb = skb_peek(&rx_edma->rx_fifo); | ||
691 | if (!skb) | ||
692 | return false; | ||
693 | |||
694 | bf = SKB_CB_ATHBUF(skb); | ||
695 | BUG_ON(!bf); | ||
696 | |||
697 | dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, | ||
698 | common->rx_bufsize, DMA_FROM_DEVICE); | ||
699 | |||
700 | ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data); | ||
701 | if (ret == -EINPROGRESS) | ||
702 | return false; | ||
703 | |||
704 | __skb_unlink(skb, &rx_edma->rx_fifo); | ||
705 | if (ret == -EINVAL) { | ||
706 | /* corrupt descriptor, skip this one and the following one */ | ||
707 | list_add_tail(&bf->list, &sc->rx.rxbuf); | ||
708 | ath_rx_edma_buf_link(sc, qtype); | ||
709 | skb = skb_peek(&rx_edma->rx_fifo); | ||
710 | if (!skb) | ||
711 | return true; | ||
712 | |||
713 | bf = SKB_CB_ATHBUF(skb); | ||
714 | BUG_ON(!bf); | ||
715 | |||
716 | __skb_unlink(skb, &rx_edma->rx_fifo); | ||
717 | list_add_tail(&bf->list, &sc->rx.rxbuf); | ||
718 | ath_rx_edma_buf_link(sc, qtype); | ||
719 | return true; | ||
720 | } | ||
721 | skb_queue_tail(&rx_edma->rx_buffers, skb); | ||
722 | |||
723 | return true; | ||
724 | } | ||
477 | 725 | ||
726 | static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc, | ||
727 | struct ath_rx_status *rs, | ||
728 | enum ath9k_rx_qtype qtype) | ||
729 | { | ||
730 | struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; | ||
731 | struct sk_buff *skb; | ||
478 | struct ath_buf *bf; | 732 | struct ath_buf *bf; |
733 | |||
734 | while (ath_edma_get_buffers(sc, qtype)); | ||
735 | skb = __skb_dequeue(&rx_edma->rx_buffers); | ||
736 | if (!skb) | ||
737 | return NULL; | ||
738 | |||
739 | bf = SKB_CB_ATHBUF(skb); | ||
740 | ath9k_hw_process_rxdesc_edma(sc->sc_ah, rs, skb->data); | ||
741 | return bf; | ||
742 | } | ||
743 | |||
744 | static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc, | ||
745 | struct ath_rx_status *rs) | ||
746 | { | ||
747 | struct ath_hw *ah = sc->sc_ah; | ||
748 | struct ath_common *common = ath9k_hw_common(ah); | ||
479 | struct ath_desc *ds; | 749 | struct ath_desc *ds; |
480 | struct ath_rx_status *rx_stats; | 750 | struct ath_buf *bf; |
751 | int ret; | ||
752 | |||
753 | if (list_empty(&sc->rx.rxbuf)) { | ||
754 | sc->rx.rxlink = NULL; | ||
755 | return NULL; | ||
756 | } | ||
757 | |||
758 | bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); | ||
759 | ds = bf->bf_desc; | ||
760 | |||
761 | /* | ||
762 | * Must provide the virtual address of the current | ||
763 | * descriptor, the physical address, and the virtual | ||
764 | * address of the next descriptor in the h/w chain. | ||
765 | * This allows the HAL to look ahead to see if the | ||
766 | * hardware is done with a descriptor by checking the | ||
767 | * done bit in the following descriptor and the address | ||
768 | * of the current descriptor the DMA engine is working | ||
769 | * on. All this is necessary because of our use of | ||
770 | * a self-linked list to avoid rx overruns. | ||
771 | */ | ||
772 | ret = ath9k_hw_rxprocdesc(ah, ds, rs, 0); | ||
773 | if (ret == -EINPROGRESS) { | ||
774 | struct ath_rx_status trs; | ||
775 | struct ath_buf *tbf; | ||
776 | struct ath_desc *tds; | ||
777 | |||
778 | memset(&trs, 0, sizeof(trs)); | ||
779 | if (list_is_last(&bf->list, &sc->rx.rxbuf)) { | ||
780 | sc->rx.rxlink = NULL; | ||
781 | return NULL; | ||
782 | } | ||
783 | |||
784 | tbf = list_entry(bf->list.next, struct ath_buf, list); | ||
785 | |||
786 | /* | ||
787 | * On some hardware the descriptor status words could | ||
788 | * get corrupted, including the done bit. Because of | ||
789 | * this, check if the next descriptor's done bit is | ||
790 | * set or not. | ||
791 | * | ||
792 | * If the next descriptor's done bit is set, the current | ||
793 | * descriptor has been corrupted. Force s/w to discard | ||
794 | * this descriptor and continue... | ||
795 | */ | ||
796 | |||
797 | tds = tbf->bf_desc; | ||
798 | ret = ath9k_hw_rxprocdesc(ah, tds, &trs, 0); | ||
799 | if (ret == -EINPROGRESS) | ||
800 | return NULL; | ||
801 | } | ||
802 | |||
803 | if (!bf->bf_mpdu) | ||
804 | return bf; | ||
805 | |||
806 | /* | ||
807 | * Synchronize the DMA transfer with CPU before | ||
808 | * 1. accessing the frame | ||
809 | * 2. requeueing the same buffer to h/w | ||
810 | */ | ||
811 | dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, | ||
812 | common->rx_bufsize, | ||
813 | DMA_FROM_DEVICE); | ||
814 | |||
815 | return bf; | ||
816 | } | ||
817 | |||
818 | |||
819 | int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) | ||
820 | { | ||
821 | struct ath_buf *bf; | ||
481 | struct sk_buff *skb = NULL, *requeue_skb; | 822 | struct sk_buff *skb = NULL, *requeue_skb; |
482 | struct ieee80211_rx_status *rxs; | 823 | struct ieee80211_rx_status *rxs; |
483 | struct ath_hw *ah = sc->sc_ah; | 824 | struct ath_hw *ah = sc->sc_ah; |
@@ -491,7 +832,17 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush) | |||
491 | struct ieee80211_hdr *hdr; | 832 | struct ieee80211_hdr *hdr; |
492 | int retval; | 833 | int retval; |
493 | bool decrypt_error = false; | 834 | bool decrypt_error = false; |
835 | struct ath_rx_status rs; | ||
836 | enum ath9k_rx_qtype qtype; | ||
837 | bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); | ||
838 | int dma_type; | ||
494 | 839 | ||
840 | if (edma) | ||
841 | dma_type = DMA_FROM_DEVICE; | ||
842 | else | ||
843 | dma_type = DMA_BIDIRECTIONAL; | ||
844 | |||
845 | qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; | ||
495 | spin_lock_bh(&sc->rx.rxbuflock); | 846 | spin_lock_bh(&sc->rx.rxbuflock); |
496 | 847 | ||
497 | do { | 848 | do { |
@@ -499,79 +850,25 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush) | |||
499 | if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) | 850 | if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) |
500 | break; | 851 | break; |
501 | 852 | ||
502 | if (list_empty(&sc->rx.rxbuf)) { | 853 | memset(&rs, 0, sizeof(rs)); |
503 | sc->rx.rxlink = NULL; | 854 | if (edma) |
504 | break; | 855 | bf = ath_edma_get_next_rx_buf(sc, &rs, qtype); |
505 | } | 856 | else |
506 | 857 | bf = ath_get_next_rx_buf(sc, &rs); | |
507 | bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); | ||
508 | ds = bf->bf_desc; | ||
509 | |||
510 | /* | ||
511 | * Must provide the virtual address of the current | ||
512 | * descriptor, the physical address, and the virtual | ||
513 | * address of the next descriptor in the h/w chain. | ||
514 | * This allows the HAL to look ahead to see if the | ||
515 | * hardware is done with a descriptor by checking the | ||
516 | * done bit in the following descriptor and the address | ||
517 | * of the current descriptor the DMA engine is working | ||
518 | * on. All this is necessary because of our use of | ||
519 | * a self-linked list to avoid rx overruns. | ||
520 | */ | ||
521 | retval = ath9k_hw_rxprocdesc(ah, ds, | ||
522 | bf->bf_daddr, | ||
523 | PA2DESC(sc, ds->ds_link), | ||
524 | 0); | ||
525 | if (retval == -EINPROGRESS) { | ||
526 | struct ath_buf *tbf; | ||
527 | struct ath_desc *tds; | ||
528 | |||
529 | if (list_is_last(&bf->list, &sc->rx.rxbuf)) { | ||
530 | sc->rx.rxlink = NULL; | ||
531 | break; | ||
532 | } | ||
533 | 858 | ||
534 | tbf = list_entry(bf->list.next, struct ath_buf, list); | 859 | if (!bf) |
535 | 860 | break; | |
536 | /* | ||
537 | * On some hardware the descriptor status words could | ||
538 | * get corrupted, including the done bit. Because of | ||
539 | * this, check if the next descriptor's done bit is | ||
540 | * set or not. | ||
541 | * | ||
542 | * If the next descriptor's done bit is set, the current | ||
543 | * descriptor has been corrupted. Force s/w to discard | ||
544 | * this descriptor and continue... | ||
545 | */ | ||
546 | |||
547 | tds = tbf->bf_desc; | ||
548 | retval = ath9k_hw_rxprocdesc(ah, tds, tbf->bf_daddr, | ||
549 | PA2DESC(sc, tds->ds_link), 0); | ||
550 | if (retval == -EINPROGRESS) { | ||
551 | break; | ||
552 | } | ||
553 | } | ||
554 | 861 | ||
555 | skb = bf->bf_mpdu; | 862 | skb = bf->bf_mpdu; |
556 | if (!skb) | 863 | if (!skb) |
557 | continue; | 864 | continue; |
558 | 865 | ||
559 | /* | ||
560 | * Synchronize the DMA transfer with CPU before | ||
561 | * 1. accessing the frame | ||
562 | * 2. requeueing the same buffer to h/w | ||
563 | */ | ||
564 | dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, | ||
565 | common->rx_bufsize, | ||
566 | DMA_FROM_DEVICE); | ||
567 | |||
568 | hdr = (struct ieee80211_hdr *) skb->data; | 866 | hdr = (struct ieee80211_hdr *) skb->data; |
569 | rxs = IEEE80211_SKB_RXCB(skb); | 867 | rxs = IEEE80211_SKB_RXCB(skb); |
570 | 868 | ||
571 | hw = ath_get_virt_hw(sc, hdr); | 869 | hw = ath_get_virt_hw(sc, hdr); |
572 | rx_stats = &ds->ds_rxstat; | ||
573 | 870 | ||
574 | ath_debug_stat_rx(sc, bf); | 871 | ath_debug_stat_rx(sc, &rs); |
575 | 872 | ||
576 | /* | 873 | /* |
577 | * If we're asked to flush receive queue, directly | 874 | * If we're asked to flush receive queue, directly |
@@ -580,7 +877,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush) | |||
580 | if (flush) | 877 | if (flush) |
581 | goto requeue; | 878 | goto requeue; |
582 | 879 | ||
583 | retval = ath9k_cmn_rx_skb_preprocess(common, hw, skb, rx_stats, | 880 | retval = ath9k_cmn_rx_skb_preprocess(common, hw, skb, &rs, |
584 | rxs, &decrypt_error); | 881 | rxs, &decrypt_error); |
585 | if (retval) | 882 | if (retval) |
586 | goto requeue; | 883 | goto requeue; |
@@ -599,18 +896,20 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush) | |||
599 | /* Unmap the frame */ | 896 | /* Unmap the frame */ |
600 | dma_unmap_single(sc->dev, bf->bf_buf_addr, | 897 | dma_unmap_single(sc->dev, bf->bf_buf_addr, |
601 | common->rx_bufsize, | 898 | common->rx_bufsize, |
602 | DMA_FROM_DEVICE); | 899 | dma_type); |
603 | 900 | ||
604 | skb_put(skb, rx_stats->rs_datalen); | 901 | skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len); |
902 | if (ah->caps.rx_status_len) | ||
903 | skb_pull(skb, ah->caps.rx_status_len); | ||
605 | 904 | ||
606 | ath9k_cmn_rx_skb_postprocess(common, skb, rx_stats, | 905 | ath9k_cmn_rx_skb_postprocess(common, skb, &rs, |
607 | rxs, decrypt_error); | 906 | rxs, decrypt_error); |
608 | 907 | ||
609 | /* We will now give hardware our shiny new allocated skb */ | 908 | /* We will now give hardware our shiny new allocated skb */ |
610 | bf->bf_mpdu = requeue_skb; | 909 | bf->bf_mpdu = requeue_skb; |
611 | bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data, | 910 | bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data, |
612 | common->rx_bufsize, | 911 | common->rx_bufsize, |
613 | DMA_FROM_DEVICE); | 912 | dma_type); |
614 | if (unlikely(dma_mapping_error(sc->dev, | 913 | if (unlikely(dma_mapping_error(sc->dev, |
615 | bf->bf_buf_addr))) { | 914 | bf->bf_buf_addr))) { |
616 | dev_kfree_skb_any(requeue_skb); | 915 | dev_kfree_skb_any(requeue_skb); |
@@ -626,9 +925,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush) | |||
626 | * change the default rx antenna if rx diversity chooses the | 925 | * change the default rx antenna if rx diversity chooses the |
627 | * other antenna 3 times in a row. | 926 | * other antenna 3 times in a row. |
628 | */ | 927 | */ |
629 | if (sc->rx.defant != ds->ds_rxstat.rs_antenna) { | 928 | if (sc->rx.defant != rs.rs_antenna) { |
630 | if (++sc->rx.rxotherant >= 3) | 929 | if (++sc->rx.rxotherant >= 3) |
631 | ath_setdefantenna(sc, rx_stats->rs_antenna); | 930 | ath_setdefantenna(sc, rs.rs_antenna); |
632 | } else { | 931 | } else { |
633 | sc->rx.rxotherant = 0; | 932 | sc->rx.rxotherant = 0; |
634 | } | 933 | } |
@@ -641,12 +940,16 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush) | |||
641 | ath_rx_send_to_mac80211(hw, sc, skb, rxs); | 940 | ath_rx_send_to_mac80211(hw, sc, skb, rxs); |
642 | 941 | ||
643 | requeue: | 942 | requeue: |
644 | list_move_tail(&bf->list, &sc->rx.rxbuf); | 943 | if (edma) { |
645 | ath_rx_buf_link(sc, bf); | 944 | list_add_tail(&bf->list, &sc->rx.rxbuf); |
945 | ath_rx_edma_buf_link(sc, qtype); | ||
946 | } else { | ||
947 | list_move_tail(&bf->list, &sc->rx.rxbuf); | ||
948 | ath_rx_buf_link(sc, bf); | ||
949 | } | ||
646 | } while (1); | 950 | } while (1); |
647 | 951 | ||
648 | spin_unlock_bh(&sc->rx.rxbuflock); | 952 | spin_unlock_bh(&sc->rx.rxbuflock); |
649 | 953 | ||
650 | return 0; | 954 | return 0; |
651 | #undef PA2DESC | ||
652 | } | 955 | } |