diff options
author | Felix Fietkau <nbd@openwrt.org> | 2010-04-15 17:38:48 -0400 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2010-04-16 15:43:26 -0400 |
commit | b5c80475abaad015699384ca64ef8229fdd88758 (patch) | |
tree | 68323f0a04427973085153304167678681f5459b /drivers/net/wireless/ath | |
parent | c38d4d2eb988717f7a8be24faeada648b5dac52a (diff) |
ath9k: Add Rx EDMA support
Signed-off-by: Felix Fietkau <nbd@openwrt.org>
Signed-off-by: Vasanthakumar Thiagarajan <vasanth@atheros.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/ath')
-rw-r--r-- | drivers/net/wireless/ath/ath9k/ar9003_mac.c | 3 | ||||
-rw-r--r-- | drivers/net/wireless/ath/ath9k/ath9k.h | 10 | ||||
-rw-r--r-- | drivers/net/wireless/ath/ath9k/hw.h | 2 | ||||
-rw-r--r-- | drivers/net/wireless/ath/ath9k/main.c | 40 | ||||
-rw-r--r-- | drivers/net/wireless/ath/ath9k/recv.c | 517 |
5 files changed, 459 insertions, 113 deletions
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c index f28adb23ac83..b229597e54cd 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c | |||
@@ -72,6 +72,9 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs, | |||
72 | if ((rxsp->ds_info & (AR_TxRxDesc | AR_CtrlStat)) != 0) | 72 | if ((rxsp->ds_info & (AR_TxRxDesc | AR_CtrlStat)) != 0) |
73 | return -EINPROGRESS; | 73 | return -EINPROGRESS; |
74 | 74 | ||
75 | if (!rxs) | ||
76 | return 0; | ||
77 | |||
75 | rxs->rs_status = 0; | 78 | rxs->rs_status = 0; |
76 | rxs->rs_flags = 0; | 79 | rxs->rs_flags = 0; |
77 | 80 | ||
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index bdcd257ca7a4..a11d830d76a9 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h | |||
@@ -223,6 +223,12 @@ struct ath_tx { | |||
223 | struct ath_descdma txdma; | 223 | struct ath_descdma txdma; |
224 | }; | 224 | }; |
225 | 225 | ||
226 | struct ath_rx_edma { | ||
227 | struct sk_buff_head rx_fifo; | ||
228 | struct sk_buff_head rx_buffers; | ||
229 | u32 rx_fifo_hwsize; | ||
230 | }; | ||
231 | |||
226 | struct ath_rx { | 232 | struct ath_rx { |
227 | u8 defant; | 233 | u8 defant; |
228 | u8 rxotherant; | 234 | u8 rxotherant; |
@@ -232,6 +238,8 @@ struct ath_rx { | |||
232 | spinlock_t rxbuflock; | 238 | spinlock_t rxbuflock; |
233 | struct list_head rxbuf; | 239 | struct list_head rxbuf; |
234 | struct ath_descdma rxdma; | 240 | struct ath_descdma rxdma; |
241 | struct ath_buf *rx_bufptr; | ||
242 | struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX]; | ||
235 | }; | 243 | }; |
236 | 244 | ||
237 | int ath_startrecv(struct ath_softc *sc); | 245 | int ath_startrecv(struct ath_softc *sc); |
@@ -240,7 +248,7 @@ void ath_flushrecv(struct ath_softc *sc); | |||
240 | u32 ath_calcrxfilter(struct ath_softc *sc); | 248 | u32 ath_calcrxfilter(struct ath_softc *sc); |
241 | int ath_rx_init(struct ath_softc *sc, int nbufs); | 249 | int ath_rx_init(struct ath_softc *sc, int nbufs); |
242 | void ath_rx_cleanup(struct ath_softc *sc); | 250 | void ath_rx_cleanup(struct ath_softc *sc); |
243 | int ath_rx_tasklet(struct ath_softc *sc, int flush); | 251 | int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp); |
244 | struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype); | 252 | struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype); |
245 | void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq); | 253 | void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq); |
246 | int ath_tx_setup(struct ath_softc *sc, int haltype); | 254 | int ath_tx_setup(struct ath_softc *sc, int haltype); |
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index c02f46e4a9b8..1eceda22ae5f 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h | |||
@@ -246,6 +246,8 @@ struct ath9k_ops_config { | |||
246 | enum ath9k_int { | 246 | enum ath9k_int { |
247 | ATH9K_INT_RX = 0x00000001, | 247 | ATH9K_INT_RX = 0x00000001, |
248 | ATH9K_INT_RXDESC = 0x00000002, | 248 | ATH9K_INT_RXDESC = 0x00000002, |
249 | ATH9K_INT_RXHP = 0x00000001, | ||
250 | ATH9K_INT_RXLP = 0x00000002, | ||
249 | ATH9K_INT_RXNOFRM = 0x00000008, | 251 | ATH9K_INT_RXNOFRM = 0x00000008, |
250 | ATH9K_INT_RXEOL = 0x00000010, | 252 | ATH9K_INT_RXEOL = 0x00000010, |
251 | ATH9K_INT_RXORN = 0x00000020, | 253 | ATH9K_INT_RXORN = 0x00000020, |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 494456cd1daa..92f6fdc30076 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
@@ -401,6 +401,7 @@ void ath9k_tasklet(unsigned long data) | |||
401 | struct ath_common *common = ath9k_hw_common(ah); | 401 | struct ath_common *common = ath9k_hw_common(ah); |
402 | 402 | ||
403 | u32 status = sc->intrstatus; | 403 | u32 status = sc->intrstatus; |
404 | u32 rxmask; | ||
404 | 405 | ||
405 | ath9k_ps_wakeup(sc); | 406 | ath9k_ps_wakeup(sc); |
406 | 407 | ||
@@ -410,9 +411,21 @@ void ath9k_tasklet(unsigned long data) | |||
410 | return; | 411 | return; |
411 | } | 412 | } |
412 | 413 | ||
413 | if (status & (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) { | 414 | if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) |
415 | rxmask = (ATH9K_INT_RXHP | ATH9K_INT_RXLP | ATH9K_INT_RXEOL | | ||
416 | ATH9K_INT_RXORN); | ||
417 | else | ||
418 | rxmask = (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN); | ||
419 | |||
420 | if (status & rxmask) { | ||
414 | spin_lock_bh(&sc->rx.rxflushlock); | 421 | spin_lock_bh(&sc->rx.rxflushlock); |
415 | ath_rx_tasklet(sc, 0); | 422 | |
423 | /* Check for high priority Rx first */ | ||
424 | if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) && | ||
425 | (status & ATH9K_INT_RXHP)) | ||
426 | ath_rx_tasklet(sc, 0, true); | ||
427 | |||
428 | ath_rx_tasklet(sc, 0, false); | ||
416 | spin_unlock_bh(&sc->rx.rxflushlock); | 429 | spin_unlock_bh(&sc->rx.rxflushlock); |
417 | } | 430 | } |
418 | 431 | ||
@@ -445,6 +458,8 @@ irqreturn_t ath_isr(int irq, void *dev) | |||
445 | ATH9K_INT_RXORN | \ | 458 | ATH9K_INT_RXORN | \ |
446 | ATH9K_INT_RXEOL | \ | 459 | ATH9K_INT_RXEOL | \ |
447 | ATH9K_INT_RX | \ | 460 | ATH9K_INT_RX | \ |
461 | ATH9K_INT_RXLP | \ | ||
462 | ATH9K_INT_RXHP | \ | ||
448 | ATH9K_INT_TX | \ | 463 | ATH9K_INT_TX | \ |
449 | ATH9K_INT_BMISS | \ | 464 | ATH9K_INT_BMISS | \ |
450 | ATH9K_INT_CST | \ | 465 | ATH9K_INT_CST | \ |
@@ -496,7 +511,8 @@ irqreturn_t ath_isr(int irq, void *dev) | |||
496 | * If a FATAL or RXORN interrupt is received, we have to reset the | 511 | * If a FATAL or RXORN interrupt is received, we have to reset the |
497 | * chip immediately. | 512 | * chip immediately. |
498 | */ | 513 | */ |
499 | if (status & (ATH9K_INT_FATAL | ATH9K_INT_RXORN)) | 514 | if ((status & ATH9K_INT_FATAL) || ((status & ATH9K_INT_RXORN) && |
515 | !(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))) | ||
500 | goto chip_reset; | 516 | goto chip_reset; |
501 | 517 | ||
502 | if (status & ATH9K_INT_SWBA) | 518 | if (status & ATH9K_INT_SWBA) |
@@ -505,6 +521,13 @@ irqreturn_t ath_isr(int irq, void *dev) | |||
505 | if (status & ATH9K_INT_TXURN) | 521 | if (status & ATH9K_INT_TXURN) |
506 | ath9k_hw_updatetxtriglevel(ah, true); | 522 | ath9k_hw_updatetxtriglevel(ah, true); |
507 | 523 | ||
524 | if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { | ||
525 | if (status & ATH9K_INT_RXEOL) { | ||
526 | ah->imask &= ~(ATH9K_INT_RXEOL | ATH9K_INT_RXORN); | ||
527 | ath9k_hw_set_interrupts(ah, ah->imask); | ||
528 | } | ||
529 | } | ||
530 | |||
508 | if (status & ATH9K_INT_MIB) { | 531 | if (status & ATH9K_INT_MIB) { |
509 | /* | 532 | /* |
510 | * Disable interrupts until we service the MIB | 533 | * Disable interrupts until we service the MIB |
@@ -1162,9 +1185,14 @@ static int ath9k_start(struct ieee80211_hw *hw) | |||
1162 | } | 1185 | } |
1163 | 1186 | ||
1164 | /* Setup our intr mask. */ | 1187 | /* Setup our intr mask. */ |
1165 | ah->imask = ATH9K_INT_RX | ATH9K_INT_TX | 1188 | ah->imask = ATH9K_INT_TX | ATH9K_INT_RXEOL | |
1166 | | ATH9K_INT_RXEOL | ATH9K_INT_RXORN | 1189 | ATH9K_INT_RXORN | ATH9K_INT_FATAL | |
1167 | | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL; | 1190 | ATH9K_INT_GLOBAL; |
1191 | |||
1192 | if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) | ||
1193 | ah->imask |= ATH9K_INT_RXHP | ATH9K_INT_RXLP; | ||
1194 | else | ||
1195 | ah->imask |= ATH9K_INT_RX; | ||
1168 | 1196 | ||
1169 | if (ah->caps.hw_caps & ATH9K_HW_CAP_GTT) | 1197 | if (ah->caps.hw_caps & ATH9K_HW_CAP_GTT) |
1170 | ah->imask |= ATH9K_INT_GTT; | 1198 | ah->imask |= ATH9K_INT_GTT; |
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 94560e2fe376..ffb599c49185 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c | |||
@@ -16,6 +16,8 @@ | |||
16 | 16 | ||
17 | #include "ath9k.h" | 17 | #include "ath9k.h" |
18 | 18 | ||
19 | #define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb)) | ||
20 | |||
19 | static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc, | 21 | static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc, |
20 | struct ieee80211_hdr *hdr) | 22 | struct ieee80211_hdr *hdr) |
21 | { | 23 | { |
@@ -115,56 +117,246 @@ static void ath_opmode_init(struct ath_softc *sc) | |||
115 | ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); | 117 | ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); |
116 | } | 118 | } |
117 | 119 | ||
118 | int ath_rx_init(struct ath_softc *sc, int nbufs) | 120 | static bool ath_rx_edma_buf_link(struct ath_softc *sc, |
121 | enum ath9k_rx_qtype qtype) | ||
119 | { | 122 | { |
120 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); | 123 | struct ath_hw *ah = sc->sc_ah; |
124 | struct ath_rx_edma *rx_edma; | ||
121 | struct sk_buff *skb; | 125 | struct sk_buff *skb; |
122 | struct ath_buf *bf; | 126 | struct ath_buf *bf; |
123 | int error = 0; | ||
124 | 127 | ||
125 | spin_lock_init(&sc->rx.rxflushlock); | 128 | rx_edma = &sc->rx.rx_edma[qtype]; |
126 | sc->sc_flags &= ~SC_OP_RXFLUSH; | 129 | if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize) |
127 | spin_lock_init(&sc->rx.rxbuflock); | 130 | return false; |
128 | 131 | ||
129 | common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN, | 132 | bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); |
130 | min(common->cachelsz, (u16)64)); | 133 | list_del_init(&bf->list); |
131 | 134 | ||
132 | ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n", | 135 | skb = bf->bf_mpdu; |
133 | common->cachelsz, common->rx_bufsize); | 136 | |
137 | ATH_RXBUF_RESET(bf); | ||
138 | memset(skb->data, 0, ah->caps.rx_status_len); | ||
139 | dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, | ||
140 | ah->caps.rx_status_len, DMA_TO_DEVICE); | ||
134 | 141 | ||
135 | /* Initialize rx descriptors */ | 142 | SKB_CB_ATHBUF(skb) = bf; |
143 | ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype); | ||
144 | skb_queue_tail(&rx_edma->rx_fifo, skb); | ||
136 | 145 | ||
137 | error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, | 146 | return true; |
138 | "rx", nbufs, 1); | 147 | } |
139 | if (error != 0) { | 148 | |
140 | ath_print(common, ATH_DBG_FATAL, | 149 | static void ath_rx_addbuffer_edma(struct ath_softc *sc, |
141 | "failed to allocate rx descriptors: %d\n", error); | 150 | enum ath9k_rx_qtype qtype, int size) |
142 | goto err; | 151 | { |
152 | struct ath_rx_edma *rx_edma; | ||
153 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); | ||
154 | u32 nbuf = 0; | ||
155 | |||
156 | rx_edma = &sc->rx.rx_edma[qtype]; | ||
157 | if (list_empty(&sc->rx.rxbuf)) { | ||
158 | ath_print(common, ATH_DBG_QUEUE, "No free rx buf available\n"); | ||
159 | return; | ||
143 | } | 160 | } |
144 | 161 | ||
162 | while (!list_empty(&sc->rx.rxbuf)) { | ||
163 | nbuf++; | ||
164 | |||
165 | if (!ath_rx_edma_buf_link(sc, qtype)) | ||
166 | break; | ||
167 | |||
168 | if (nbuf >= size) | ||
169 | break; | ||
170 | } | ||
171 | } | ||
172 | |||
173 | static void ath_rx_remove_buffer(struct ath_softc *sc, | ||
174 | enum ath9k_rx_qtype qtype) | ||
175 | { | ||
176 | struct ath_buf *bf; | ||
177 | struct ath_rx_edma *rx_edma; | ||
178 | struct sk_buff *skb; | ||
179 | |||
180 | rx_edma = &sc->rx.rx_edma[qtype]; | ||
181 | |||
182 | while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) { | ||
183 | bf = SKB_CB_ATHBUF(skb); | ||
184 | BUG_ON(!bf); | ||
185 | list_add_tail(&bf->list, &sc->rx.rxbuf); | ||
186 | } | ||
187 | } | ||
188 | |||
189 | static void ath_rx_edma_cleanup(struct ath_softc *sc) | ||
190 | { | ||
191 | struct ath_buf *bf; | ||
192 | |||
193 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); | ||
194 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); | ||
195 | |||
145 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { | 196 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { |
197 | if (bf->bf_mpdu) | ||
198 | dev_kfree_skb_any(bf->bf_mpdu); | ||
199 | } | ||
200 | |||
201 | INIT_LIST_HEAD(&sc->rx.rxbuf); | ||
202 | |||
203 | kfree(sc->rx.rx_bufptr); | ||
204 | sc->rx.rx_bufptr = NULL; | ||
205 | } | ||
206 | |||
207 | static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size) | ||
208 | { | ||
209 | skb_queue_head_init(&rx_edma->rx_fifo); | ||
210 | skb_queue_head_init(&rx_edma->rx_buffers); | ||
211 | rx_edma->rx_fifo_hwsize = size; | ||
212 | } | ||
213 | |||
214 | static int ath_rx_edma_init(struct ath_softc *sc, int nbufs) | ||
215 | { | ||
216 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); | ||
217 | struct ath_hw *ah = sc->sc_ah; | ||
218 | struct sk_buff *skb; | ||
219 | struct ath_buf *bf; | ||
220 | int error = 0, i; | ||
221 | u32 size; | ||
222 | |||
223 | |||
224 | common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN + | ||
225 | ah->caps.rx_status_len, | ||
226 | min(common->cachelsz, (u16)64)); | ||
227 | |||
228 | ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize - | ||
229 | ah->caps.rx_status_len); | ||
230 | |||
231 | ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP], | ||
232 | ah->caps.rx_lp_qdepth); | ||
233 | ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP], | ||
234 | ah->caps.rx_hp_qdepth); | ||
235 | |||
236 | size = sizeof(struct ath_buf) * nbufs; | ||
237 | bf = kzalloc(size, GFP_KERNEL); | ||
238 | if (!bf) | ||
239 | return -ENOMEM; | ||
240 | |||
241 | INIT_LIST_HEAD(&sc->rx.rxbuf); | ||
242 | sc->rx.rx_bufptr = bf; | ||
243 | |||
244 | for (i = 0; i < nbufs; i++, bf++) { | ||
146 | skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL); | 245 | skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL); |
147 | if (skb == NULL) { | 246 | if (!skb) { |
148 | error = -ENOMEM; | 247 | error = -ENOMEM; |
149 | goto err; | 248 | goto rx_init_fail; |
150 | } | 249 | } |
151 | 250 | ||
251 | memset(skb->data, 0, common->rx_bufsize); | ||
152 | bf->bf_mpdu = skb; | 252 | bf->bf_mpdu = skb; |
253 | |||
153 | bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, | 254 | bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, |
154 | common->rx_bufsize, | 255 | common->rx_bufsize, |
155 | DMA_FROM_DEVICE); | 256 | DMA_BIDIRECTIONAL); |
156 | if (unlikely(dma_mapping_error(sc->dev, | 257 | if (unlikely(dma_mapping_error(sc->dev, |
157 | bf->bf_buf_addr))) { | 258 | bf->bf_buf_addr))) { |
158 | dev_kfree_skb_any(skb); | 259 | dev_kfree_skb_any(skb); |
159 | bf->bf_mpdu = NULL; | 260 | bf->bf_mpdu = NULL; |
261 | ath_print(common, ATH_DBG_FATAL, | ||
262 | "dma_mapping_error() on RX init\n"); | ||
263 | error = -ENOMEM; | ||
264 | goto rx_init_fail; | ||
265 | } | ||
266 | |||
267 | list_add_tail(&bf->list, &sc->rx.rxbuf); | ||
268 | } | ||
269 | |||
270 | return 0; | ||
271 | |||
272 | rx_init_fail: | ||
273 | ath_rx_edma_cleanup(sc); | ||
274 | return error; | ||
275 | } | ||
276 | |||
277 | static void ath_edma_start_recv(struct ath_softc *sc) | ||
278 | { | ||
279 | spin_lock_bh(&sc->rx.rxbuflock); | ||
280 | |||
281 | ath9k_hw_rxena(sc->sc_ah); | ||
282 | |||
283 | ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP, | ||
284 | sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize); | ||
285 | |||
286 | ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP, | ||
287 | sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize); | ||
288 | |||
289 | spin_unlock_bh(&sc->rx.rxbuflock); | ||
290 | |||
291 | ath_opmode_init(sc); | ||
292 | |||
293 | ath9k_hw_startpcureceive(sc->sc_ah); | ||
294 | } | ||
295 | |||
296 | static void ath_edma_stop_recv(struct ath_softc *sc) | ||
297 | { | ||
298 | spin_lock_bh(&sc->rx.rxbuflock); | ||
299 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); | ||
300 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); | ||
301 | spin_unlock_bh(&sc->rx.rxbuflock); | ||
302 | } | ||
303 | |||
304 | int ath_rx_init(struct ath_softc *sc, int nbufs) | ||
305 | { | ||
306 | struct ath_common *common = ath9k_hw_common(sc->sc_ah); | ||
307 | struct sk_buff *skb; | ||
308 | struct ath_buf *bf; | ||
309 | int error = 0; | ||
310 | |||
311 | spin_lock_init(&sc->rx.rxflushlock); | ||
312 | sc->sc_flags &= ~SC_OP_RXFLUSH; | ||
313 | spin_lock_init(&sc->rx.rxbuflock); | ||
314 | |||
315 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { | ||
316 | return ath_rx_edma_init(sc, nbufs); | ||
317 | } else { | ||
318 | common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN, | ||
319 | min(common->cachelsz, (u16)64)); | ||
320 | |||
321 | ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n", | ||
322 | common->cachelsz, common->rx_bufsize); | ||
323 | |||
324 | /* Initialize rx descriptors */ | ||
325 | |||
326 | error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, | ||
327 | "rx", nbufs, 1); | ||
328 | if (error != 0) { | ||
160 | ath_print(common, ATH_DBG_FATAL, | 329 | ath_print(common, ATH_DBG_FATAL, |
161 | "dma_mapping_error() on RX init\n"); | 330 | "failed to allocate rx descriptors: %d\n", |
162 | error = -ENOMEM; | 331 | error); |
163 | goto err; | 332 | goto err; |
164 | } | 333 | } |
165 | bf->bf_dmacontext = bf->bf_buf_addr; | 334 | |
335 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { | ||
336 | skb = ath_rxbuf_alloc(common, common->rx_bufsize, | ||
337 | GFP_KERNEL); | ||
338 | if (skb == NULL) { | ||
339 | error = -ENOMEM; | ||
340 | goto err; | ||
341 | } | ||
342 | |||
343 | bf->bf_mpdu = skb; | ||
344 | bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, | ||
345 | common->rx_bufsize, | ||
346 | DMA_FROM_DEVICE); | ||
347 | if (unlikely(dma_mapping_error(sc->dev, | ||
348 | bf->bf_buf_addr))) { | ||
349 | dev_kfree_skb_any(skb); | ||
350 | bf->bf_mpdu = NULL; | ||
351 | ath_print(common, ATH_DBG_FATAL, | ||
352 | "dma_mapping_error() on RX init\n"); | ||
353 | error = -ENOMEM; | ||
354 | goto err; | ||
355 | } | ||
356 | bf->bf_dmacontext = bf->bf_buf_addr; | ||
357 | } | ||
358 | sc->rx.rxlink = NULL; | ||
166 | } | 359 | } |
167 | sc->rx.rxlink = NULL; | ||
168 | 360 | ||
169 | err: | 361 | err: |
170 | if (error) | 362 | if (error) |
@@ -180,17 +372,23 @@ void ath_rx_cleanup(struct ath_softc *sc) | |||
180 | struct sk_buff *skb; | 372 | struct sk_buff *skb; |
181 | struct ath_buf *bf; | 373 | struct ath_buf *bf; |
182 | 374 | ||
183 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { | 375 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { |
184 | skb = bf->bf_mpdu; | 376 | ath_rx_edma_cleanup(sc); |
185 | if (skb) { | 377 | return; |
186 | dma_unmap_single(sc->dev, bf->bf_buf_addr, | 378 | } else { |
187 | common->rx_bufsize, DMA_FROM_DEVICE); | 379 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { |
188 | dev_kfree_skb(skb); | 380 | skb = bf->bf_mpdu; |
381 | if (skb) { | ||
382 | dma_unmap_single(sc->dev, bf->bf_buf_addr, | ||
383 | common->rx_bufsize, | ||
384 | DMA_FROM_DEVICE); | ||
385 | dev_kfree_skb(skb); | ||
386 | } | ||
189 | } | 387 | } |
190 | } | ||
191 | 388 | ||
192 | if (sc->rx.rxdma.dd_desc_len != 0) | 389 | if (sc->rx.rxdma.dd_desc_len != 0) |
193 | ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf); | 390 | ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf); |
391 | } | ||
194 | } | 392 | } |
195 | 393 | ||
196 | /* | 394 | /* |
@@ -273,6 +471,11 @@ int ath_startrecv(struct ath_softc *sc) | |||
273 | struct ath_hw *ah = sc->sc_ah; | 471 | struct ath_hw *ah = sc->sc_ah; |
274 | struct ath_buf *bf, *tbf; | 472 | struct ath_buf *bf, *tbf; |
275 | 473 | ||
474 | if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { | ||
475 | ath_edma_start_recv(sc); | ||
476 | return 0; | ||
477 | } | ||
478 | |||
276 | spin_lock_bh(&sc->rx.rxbuflock); | 479 | spin_lock_bh(&sc->rx.rxbuflock); |
277 | if (list_empty(&sc->rx.rxbuf)) | 480 | if (list_empty(&sc->rx.rxbuf)) |
278 | goto start_recv; | 481 | goto start_recv; |
@@ -306,7 +509,11 @@ bool ath_stoprecv(struct ath_softc *sc) | |||
306 | ath9k_hw_stoppcurecv(ah); | 509 | ath9k_hw_stoppcurecv(ah); |
307 | ath9k_hw_setrxfilter(ah, 0); | 510 | ath9k_hw_setrxfilter(ah, 0); |
308 | stopped = ath9k_hw_stopdmarecv(ah); | 511 | stopped = ath9k_hw_stopdmarecv(ah); |
309 | sc->rx.rxlink = NULL; | 512 | |
513 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) | ||
514 | ath_edma_stop_recv(sc); | ||
515 | else | ||
516 | sc->rx.rxlink = NULL; | ||
310 | 517 | ||
311 | return stopped; | 518 | return stopped; |
312 | } | 519 | } |
@@ -315,7 +522,9 @@ void ath_flushrecv(struct ath_softc *sc) | |||
315 | { | 522 | { |
316 | spin_lock_bh(&sc->rx.rxflushlock); | 523 | spin_lock_bh(&sc->rx.rxflushlock); |
317 | sc->sc_flags |= SC_OP_RXFLUSH; | 524 | sc->sc_flags |= SC_OP_RXFLUSH; |
318 | ath_rx_tasklet(sc, 1); | 525 | if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) |
526 | ath_rx_tasklet(sc, 1, true); | ||
527 | ath_rx_tasklet(sc, 1, false); | ||
319 | sc->sc_flags &= ~SC_OP_RXFLUSH; | 528 | sc->sc_flags &= ~SC_OP_RXFLUSH; |
320 | spin_unlock_bh(&sc->rx.rxflushlock); | 529 | spin_unlock_bh(&sc->rx.rxflushlock); |
321 | } | 530 | } |
@@ -469,14 +678,147 @@ static void ath_rx_send_to_mac80211(struct ieee80211_hw *hw, | |||
469 | ieee80211_rx(hw, skb); | 678 | ieee80211_rx(hw, skb); |
470 | } | 679 | } |
471 | 680 | ||
472 | int ath_rx_tasklet(struct ath_softc *sc, int flush) | 681 | static bool ath_edma_get_buffers(struct ath_softc *sc, |
682 | enum ath9k_rx_qtype qtype) | ||
473 | { | 683 | { |
474 | #define PA2DESC(_sc, _pa) \ | 684 | struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; |
475 | ((struct ath_desc *)((caddr_t)(_sc)->rx.rxdma.dd_desc + \ | 685 | struct ath_hw *ah = sc->sc_ah; |
476 | ((_pa) - (_sc)->rx.rxdma.dd_desc_paddr))) | 686 | struct ath_common *common = ath9k_hw_common(ah); |
687 | struct sk_buff *skb; | ||
688 | struct ath_buf *bf; | ||
689 | int ret; | ||
690 | |||
691 | skb = skb_peek(&rx_edma->rx_fifo); | ||
692 | if (!skb) | ||
693 | return false; | ||
694 | |||
695 | bf = SKB_CB_ATHBUF(skb); | ||
696 | BUG_ON(!bf); | ||
697 | |||
698 | dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, | ||
699 | common->rx_bufsize, DMA_FROM_DEVICE); | ||
700 | |||
701 | ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data); | ||
702 | if (ret == -EINPROGRESS) | ||
703 | return false; | ||
704 | |||
705 | __skb_unlink(skb, &rx_edma->rx_fifo); | ||
706 | if (ret == -EINVAL) { | ||
707 | /* corrupt descriptor, skip this one and the following one */ | ||
708 | list_add_tail(&bf->list, &sc->rx.rxbuf); | ||
709 | ath_rx_edma_buf_link(sc, qtype); | ||
710 | skb = skb_peek(&rx_edma->rx_fifo); | ||
711 | if (!skb) | ||
712 | return true; | ||
713 | |||
714 | bf = SKB_CB_ATHBUF(skb); | ||
715 | BUG_ON(!bf); | ||
716 | |||
717 | __skb_unlink(skb, &rx_edma->rx_fifo); | ||
718 | list_add_tail(&bf->list, &sc->rx.rxbuf); | ||
719 | ath_rx_edma_buf_link(sc, qtype); | ||
720 | } | ||
721 | skb_queue_tail(&rx_edma->rx_buffers, skb); | ||
722 | |||
723 | return true; | ||
724 | } | ||
477 | 725 | ||
726 | static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc, | ||
727 | struct ath_rx_status *rs, | ||
728 | enum ath9k_rx_qtype qtype) | ||
729 | { | ||
730 | struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype]; | ||
731 | struct sk_buff *skb; | ||
478 | struct ath_buf *bf; | 732 | struct ath_buf *bf; |
733 | |||
734 | while (ath_edma_get_buffers(sc, qtype)); | ||
735 | skb = __skb_dequeue(&rx_edma->rx_buffers); | ||
736 | if (!skb) | ||
737 | return NULL; | ||
738 | |||
739 | bf = SKB_CB_ATHBUF(skb); | ||
740 | ath9k_hw_process_rxdesc_edma(sc->sc_ah, rs, skb->data); | ||
741 | return bf; | ||
742 | } | ||
743 | |||
744 | static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc, | ||
745 | struct ath_rx_status *rs) | ||
746 | { | ||
747 | struct ath_hw *ah = sc->sc_ah; | ||
748 | struct ath_common *common = ath9k_hw_common(ah); | ||
479 | struct ath_desc *ds; | 749 | struct ath_desc *ds; |
750 | struct ath_buf *bf; | ||
751 | int ret; | ||
752 | |||
753 | if (list_empty(&sc->rx.rxbuf)) { | ||
754 | sc->rx.rxlink = NULL; | ||
755 | return NULL; | ||
756 | } | ||
757 | |||
758 | bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); | ||
759 | ds = bf->bf_desc; | ||
760 | |||
761 | /* | ||
762 | * Must provide the virtual address of the current | ||
763 | * descriptor, the physical address, and the virtual | ||
764 | * address of the next descriptor in the h/w chain. | ||
765 | * This allows the HAL to look ahead to see if the | ||
766 | * hardware is done with a descriptor by checking the | ||
767 | * done bit in the following descriptor and the address | ||
768 | * of the current descriptor the DMA engine is working | ||
769 | * on. All this is necessary because of our use of | ||
770 | * a self-linked list to avoid rx overruns. | ||
771 | */ | ||
772 | ret = ath9k_hw_rxprocdesc(ah, ds, rs, 0); | ||
773 | if (ret == -EINPROGRESS) { | ||
774 | struct ath_rx_status trs; | ||
775 | struct ath_buf *tbf; | ||
776 | struct ath_desc *tds; | ||
777 | |||
778 | memset(&trs, 0, sizeof(trs)); | ||
779 | if (list_is_last(&bf->list, &sc->rx.rxbuf)) { | ||
780 | sc->rx.rxlink = NULL; | ||
781 | return NULL; | ||
782 | } | ||
783 | |||
784 | tbf = list_entry(bf->list.next, struct ath_buf, list); | ||
785 | |||
786 | /* | ||
787 | * On some hardware the descriptor status words could | ||
788 | * get corrupted, including the done bit. Because of | ||
789 | * this, check if the next descriptor's done bit is | ||
790 | * set or not. | ||
791 | * | ||
792 | * If the next descriptor's done bit is set, the current | ||
793 | * descriptor has been corrupted. Force s/w to discard | ||
794 | * this descriptor and continue... | ||
795 | */ | ||
796 | |||
797 | tds = tbf->bf_desc; | ||
798 | ret = ath9k_hw_rxprocdesc(ah, tds, &trs, 0); | ||
799 | if (ret == -EINPROGRESS) | ||
800 | return NULL; | ||
801 | } | ||
802 | |||
803 | if (!bf->bf_mpdu) | ||
804 | return bf; | ||
805 | |||
806 | /* | ||
807 | * Synchronize the DMA transfer with CPU before | ||
808 | * 1. accessing the frame | ||
809 | * 2. requeueing the same buffer to h/w | ||
810 | */ | ||
811 | dma_sync_single_for_device(sc->dev, bf->bf_buf_addr, | ||
812 | common->rx_bufsize, | ||
813 | DMA_FROM_DEVICE); | ||
814 | |||
815 | return bf; | ||
816 | } | ||
817 | |||
818 | |||
819 | int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) | ||
820 | { | ||
821 | struct ath_buf *bf; | ||
480 | struct sk_buff *skb = NULL, *requeue_skb; | 822 | struct sk_buff *skb = NULL, *requeue_skb; |
481 | struct ieee80211_rx_status *rxs; | 823 | struct ieee80211_rx_status *rxs; |
482 | struct ath_hw *ah = sc->sc_ah; | 824 | struct ath_hw *ah = sc->sc_ah; |
@@ -491,7 +833,16 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush) | |||
491 | int retval; | 833 | int retval; |
492 | bool decrypt_error = false; | 834 | bool decrypt_error = false; |
493 | struct ath_rx_status rs; | 835 | struct ath_rx_status rs; |
836 | enum ath9k_rx_qtype qtype; | ||
837 | bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); | ||
838 | int dma_type; | ||
494 | 839 | ||
840 | if (edma) | ||
841 | dma_type = DMA_FROM_DEVICE; | ||
842 | else | ||
843 | dma_type = DMA_BIDIRECTIONAL; | ||
844 | |||
845 | qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP; | ||
495 | spin_lock_bh(&sc->rx.rxbuflock); | 846 | spin_lock_bh(&sc->rx.rxbuflock); |
496 | 847 | ||
497 | do { | 848 | do { |
@@ -499,71 +850,19 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush) | |||
499 | if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) | 850 | if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) |
500 | break; | 851 | break; |
501 | 852 | ||
502 | if (list_empty(&sc->rx.rxbuf)) { | ||
503 | sc->rx.rxlink = NULL; | ||
504 | break; | ||
505 | } | ||
506 | |||
507 | bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); | ||
508 | ds = bf->bf_desc; | ||
509 | |||
510 | /* | ||
511 | * Must provide the virtual address of the current | ||
512 | * descriptor, the physical address, and the virtual | ||
513 | * address of the next descriptor in the h/w chain. | ||
514 | * This allows the HAL to look ahead to see if the | ||
515 | * hardware is done with a descriptor by checking the | ||
516 | * done bit in the following descriptor and the address | ||
517 | * of the current descriptor the DMA engine is working | ||
518 | * on. All this is necessary because of our use of | ||
519 | * a self-linked list to avoid rx overruns. | ||
520 | */ | ||
521 | memset(&rs, 0, sizeof(rs)); | 853 | memset(&rs, 0, sizeof(rs)); |
522 | retval = ath9k_hw_rxprocdesc(ah, ds, &rs, 0); | 854 | if (edma) |
523 | if (retval == -EINPROGRESS) { | 855 | bf = ath_edma_get_next_rx_buf(sc, &rs, qtype); |
524 | struct ath_rx_status trs; | 856 | else |
525 | struct ath_buf *tbf; | 857 | bf = ath_get_next_rx_buf(sc, &rs); |
526 | struct ath_desc *tds; | ||
527 | |||
528 | memset(&trs, 0, sizeof(trs)); | ||
529 | if (list_is_last(&bf->list, &sc->rx.rxbuf)) { | ||
530 | sc->rx.rxlink = NULL; | ||
531 | break; | ||
532 | } | ||
533 | 858 | ||
534 | tbf = list_entry(bf->list.next, struct ath_buf, list); | 859 | if (!bf) |
535 | 860 | break; | |
536 | /* | ||
537 | * On some hardware the descriptor status words could | ||
538 | * get corrupted, including the done bit. Because of | ||
539 | * this, check if the next descriptor's done bit is | ||
540 | * set or not. | ||
541 | * | ||
542 | * If the next descriptor's done bit is set, the current | ||
543 | * descriptor has been corrupted. Force s/w to discard | ||
544 | * this descriptor and continue... | ||
545 | */ | ||
546 | |||
547 | tds = tbf->bf_desc; | ||
548 | retval = ath9k_hw_rxprocdesc(ah, tds, &trs, 0); | ||
549 | if (retval == -EINPROGRESS) { | ||
550 | break; | ||
551 | } | ||
552 | } | ||
553 | 861 | ||
554 | skb = bf->bf_mpdu; | 862 | skb = bf->bf_mpdu; |
555 | if (!skb) | 863 | if (!skb) |
556 | continue; | 864 | continue; |
557 | 865 | ||
558 | /* | ||
559 | * Synchronize the DMA transfer with CPU before | ||
560 | * 1. accessing the frame | ||
561 | * 2. requeueing the same buffer to h/w | ||
562 | */ | ||
563 | dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr, | ||
564 | common->rx_bufsize, | ||
565 | DMA_FROM_DEVICE); | ||
566 | |||
567 | hdr = (struct ieee80211_hdr *) skb->data; | 866 | hdr = (struct ieee80211_hdr *) skb->data; |
568 | rxs = IEEE80211_SKB_RXCB(skb); | 867 | rxs = IEEE80211_SKB_RXCB(skb); |
569 | 868 | ||
@@ -597,9 +896,11 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush) | |||
597 | /* Unmap the frame */ | 896 | /* Unmap the frame */ |
598 | dma_unmap_single(sc->dev, bf->bf_buf_addr, | 897 | dma_unmap_single(sc->dev, bf->bf_buf_addr, |
599 | common->rx_bufsize, | 898 | common->rx_bufsize, |
600 | DMA_FROM_DEVICE); | 899 | dma_type); |
601 | 900 | ||
602 | skb_put(skb, rs.rs_datalen); | 901 | skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len); |
902 | if (ah->caps.rx_status_len) | ||
903 | skb_pull(skb, ah->caps.rx_status_len); | ||
603 | 904 | ||
604 | ath9k_cmn_rx_skb_postprocess(common, skb, &rs, | 905 | ath9k_cmn_rx_skb_postprocess(common, skb, &rs, |
605 | rxs, decrypt_error); | 906 | rxs, decrypt_error); |
@@ -608,7 +909,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush) | |||
608 | bf->bf_mpdu = requeue_skb; | 909 | bf->bf_mpdu = requeue_skb; |
609 | bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data, | 910 | bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data, |
610 | common->rx_bufsize, | 911 | common->rx_bufsize, |
611 | DMA_FROM_DEVICE); | 912 | dma_type); |
612 | if (unlikely(dma_mapping_error(sc->dev, | 913 | if (unlikely(dma_mapping_error(sc->dev, |
613 | bf->bf_buf_addr))) { | 914 | bf->bf_buf_addr))) { |
614 | dev_kfree_skb_any(requeue_skb); | 915 | dev_kfree_skb_any(requeue_skb); |
@@ -639,12 +940,16 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush) | |||
639 | ath_rx_send_to_mac80211(hw, sc, skb, rxs); | 940 | ath_rx_send_to_mac80211(hw, sc, skb, rxs); |
640 | 941 | ||
641 | requeue: | 942 | requeue: |
642 | list_move_tail(&bf->list, &sc->rx.rxbuf); | 943 | if (edma) { |
643 | ath_rx_buf_link(sc, bf); | 944 | list_add_tail(&bf->list, &sc->rx.rxbuf); |
945 | ath_rx_edma_buf_link(sc, qtype); | ||
946 | } else { | ||
947 | list_move_tail(&bf->list, &sc->rx.rxbuf); | ||
948 | ath_rx_buf_link(sc, bf); | ||
949 | } | ||
644 | } while (1); | 950 | } while (1); |
645 | 951 | ||
646 | spin_unlock_bh(&sc->rx.rxbuflock); | 952 | spin_unlock_bh(&sc->rx.rxbuflock); |
647 | 953 | ||
648 | return 0; | 954 | return 0; |
649 | #undef PA2DESC | ||
650 | } | 955 | } |