diff options
Diffstat (limited to 'drivers/net/wireless/ath9k/recv.c')
-rw-r--r-- | drivers/net/wireless/ath9k/recv.c | 600 |
1 files changed, 228 insertions, 372 deletions
diff --git a/drivers/net/wireless/ath9k/recv.c b/drivers/net/wireless/ath9k/recv.c index 2d72ac19fada..743ad228b833 100644 --- a/drivers/net/wireless/ath9k/recv.c +++ b/drivers/net/wireless/ath9k/recv.c | |||
@@ -14,10 +14,6 @@ | |||
14 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | 14 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | /* | ||
18 | * Implementation of receive path. | ||
19 | */ | ||
20 | |||
21 | #include "core.h" | 17 | #include "core.h" |
22 | 18 | ||
23 | /* | 19 | /* |
@@ -27,10 +23,7 @@ | |||
27 | * MAC acknowledges BA status as long as it copies frames to host | 23 | * MAC acknowledges BA status as long as it copies frames to host |
28 | * buffer (or rx fifo). This can incorrectly acknowledge packets | 24 | * buffer (or rx fifo). This can incorrectly acknowledge packets |
29 | * to a sender if last desc is self-linked. | 25 | * to a sender if last desc is self-linked. |
30 | * | ||
31 | * NOTE: Caller should hold the rxbuf lock. | ||
32 | */ | 26 | */ |
33 | |||
34 | static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) | 27 | static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) |
35 | { | 28 | { |
36 | struct ath_hal *ah = sc->sc_ah; | 29 | struct ath_hal *ah = sc->sc_ah; |
@@ -40,19 +33,17 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) | |||
40 | ATH_RXBUF_RESET(bf); | 33 | ATH_RXBUF_RESET(bf); |
41 | 34 | ||
42 | ds = bf->bf_desc; | 35 | ds = bf->bf_desc; |
43 | ds->ds_link = 0; /* link to null */ | 36 | ds->ds_link = 0; /* link to null */ |
44 | ds->ds_data = bf->bf_buf_addr; | 37 | ds->ds_data = bf->bf_buf_addr; |
45 | 38 | ||
46 | /* XXX For RADAR? | 39 | /* virtual addr of the beginning of the buffer. */ |
47 | * virtual addr of the beginning of the buffer. */ | ||
48 | skb = bf->bf_mpdu; | 40 | skb = bf->bf_mpdu; |
49 | ASSERT(skb != NULL); | 41 | ASSERT(skb != NULL); |
50 | ds->ds_vdata = skb->data; | 42 | ds->ds_vdata = skb->data; |
51 | 43 | ||
52 | /* setup rx descriptors */ | 44 | /* setup rx descriptors */ |
53 | ath9k_hw_setuprxdesc(ah, | 45 | ath9k_hw_setuprxdesc(ah, ds, |
54 | ds, | 46 | skb_tailroom(skb), /* buffer size */ |
55 | skb_tailroom(skb), /* buffer size */ | ||
56 | 0); | 47 | 0); |
57 | 48 | ||
58 | if (sc->sc_rxlink == NULL) | 49 | if (sc->sc_rxlink == NULL) |
@@ -64,8 +55,29 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) | |||
64 | ath9k_hw_rxena(ah); | 55 | ath9k_hw_rxena(ah); |
65 | } | 56 | } |
66 | 57 | ||
67 | static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc, | 58 | static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) |
68 | u32 len) | 59 | { |
60 | /* XXX block beacon interrupts */ | ||
61 | ath9k_hw_setantenna(sc->sc_ah, antenna); | ||
62 | sc->sc_defant = antenna; | ||
63 | sc->sc_rxotherant = 0; | ||
64 | } | ||
65 | |||
66 | /* | ||
67 | * Extend 15-bit time stamp from rx descriptor to | ||
68 | * a full 64-bit TSF using the current h/w TSF. | ||
69 | */ | ||
70 | static u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp) | ||
71 | { | ||
72 | u64 tsf; | ||
73 | |||
74 | tsf = ath9k_hw_gettsf64(sc->sc_ah); | ||
75 | if ((tsf & 0x7fff) < rstamp) | ||
76 | tsf -= 0x8000; | ||
77 | return (tsf & ~0x7fff) | rstamp; | ||
78 | } | ||
79 | |||
80 | static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc, u32 len) | ||
69 | { | 81 | { |
70 | struct sk_buff *skb; | 82 | struct sk_buff *skb; |
71 | u32 off; | 83 | u32 off; |
@@ -91,59 +103,133 @@ static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc, | |||
91 | return skb; | 103 | return skb; |
92 | } | 104 | } |
93 | 105 | ||
94 | static void ath_rx_requeue(struct ath_softc *sc, struct sk_buff *skb) | 106 | static int ath_rate2idx(struct ath_softc *sc, int rate) |
95 | { | 107 | { |
96 | struct ath_buf *bf = ATH_RX_CONTEXT(skb)->ctx_rxbuf; | 108 | int i = 0, cur_band, n_rates; |
109 | struct ieee80211_hw *hw = sc->hw; | ||
97 | 110 | ||
98 | ASSERT(bf != NULL); | 111 | cur_band = hw->conf.channel->band; |
112 | n_rates = sc->sbands[cur_band].n_bitrates; | ||
99 | 113 | ||
100 | spin_lock_bh(&sc->sc_rxbuflock); | 114 | for (i = 0; i < n_rates; i++) { |
101 | if (bf->bf_status & ATH_BUFSTATUS_STALE) { | 115 | if (sc->sbands[cur_band].bitrates[i].bitrate == rate) |
102 | /* | 116 | break; |
103 | * This buffer is still held for hw acess. | ||
104 | * Mark it as free to be re-queued it later. | ||
105 | */ | ||
106 | bf->bf_status |= ATH_BUFSTATUS_FREE; | ||
107 | } else { | ||
108 | /* XXX: we probably never enter here, remove after | ||
109 | * verification */ | ||
110 | list_add_tail(&bf->list, &sc->sc_rxbuf); | ||
111 | ath_rx_buf_link(sc, bf); | ||
112 | } | 117 | } |
113 | spin_unlock_bh(&sc->sc_rxbuflock); | 118 | |
119 | /* | ||
120 | * NB:mac80211 validates rx rate index against the supported legacy rate | ||
121 | * index only (should be done against ht rates also), return the highest | ||
122 | * legacy rate index for rx rate which does not match any one of the | ||
123 | * supported basic and extended rates to make mac80211 happy. | ||
124 | * The following hack will be cleaned up once the issue with | ||
125 | * the rx rate index validation in mac80211 is fixed. | ||
126 | */ | ||
127 | if (i == n_rates) | ||
128 | return n_rates - 1; | ||
129 | |||
130 | return i; | ||
114 | } | 131 | } |
115 | 132 | ||
116 | /* | 133 | /* |
117 | * The skb indicated to upper stack won't be returned to us. | 134 | * For Decrypt or Demic errors, we only mark packet status here and always push |
118 | * So we have to allocate a new one and queue it by ourselves. | 135 | * up the frame up to let mac80211 handle the actual error case, be it no |
136 | * decryption key or real decryption error. This let us keep statistics there. | ||
119 | */ | 137 | */ |
120 | static int ath_rx_indicate(struct ath_softc *sc, | 138 | static int ath_rx_prepare(struct sk_buff *skb, struct ath_desc *ds, |
121 | struct sk_buff *skb, | 139 | struct ieee80211_rx_status *rx_status, bool *decrypt_error, |
122 | struct ath_recv_status *status, | 140 | struct ath_softc *sc) |
123 | u16 keyix) | ||
124 | { | 141 | { |
125 | struct ath_buf *bf = ATH_RX_CONTEXT(skb)->ctx_rxbuf; | 142 | struct ath_rate_table *rate_table = sc->hw_rate_table[sc->sc_curmode]; |
126 | struct sk_buff *nskb; | 143 | struct ieee80211_hdr *hdr; |
127 | int type; | 144 | int ratekbps, rix; |
128 | 145 | u8 ratecode; | |
129 | /* indicate frame to the stack, which will free the old skb. */ | 146 | __le16 fc; |
130 | type = _ath_rx_indicate(sc, skb, status, keyix); | 147 | |
131 | 148 | hdr = (struct ieee80211_hdr *)skb->data; | |
132 | /* allocate a new skb and queue it to for H/W processing */ | 149 | fc = hdr->frame_control; |
133 | nskb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize); | 150 | memset(rx_status, 0, sizeof(struct ieee80211_rx_status)); |
134 | if (nskb != NULL) { | ||
135 | bf->bf_mpdu = nskb; | ||
136 | bf->bf_buf_addr = pci_map_single(sc->pdev, nskb->data, | ||
137 | skb_end_pointer(nskb) - nskb->head, | ||
138 | PCI_DMA_FROMDEVICE); | ||
139 | bf->bf_dmacontext = bf->bf_buf_addr; | ||
140 | ATH_RX_CONTEXT(nskb)->ctx_rxbuf = bf; | ||
141 | 151 | ||
142 | /* queue the new wbuf to H/W */ | 152 | if (ds->ds_rxstat.rs_more) { |
143 | ath_rx_requeue(sc, nskb); | 153 | /* |
154 | * Frame spans multiple descriptors; this cannot happen yet | ||
155 | * as we don't support jumbograms. If not in monitor mode, | ||
156 | * discard the frame. Enable this if you want to see | ||
157 | * error frames in Monitor mode. | ||
158 | */ | ||
159 | if (sc->sc_ah->ah_opmode != ATH9K_M_MONITOR) | ||
160 | goto rx_next; | ||
161 | } else if (ds->ds_rxstat.rs_status != 0) { | ||
162 | if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC) | ||
163 | rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; | ||
164 | if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY) | ||
165 | goto rx_next; | ||
166 | |||
167 | if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT) { | ||
168 | *decrypt_error = true; | ||
169 | } else if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC) { | ||
170 | if (ieee80211_is_ctl(fc)) | ||
171 | /* | ||
172 | * Sometimes, we get invalid | ||
173 | * MIC failures on valid control frames. | ||
174 | * Remove these mic errors. | ||
175 | */ | ||
176 | ds->ds_rxstat.rs_status &= ~ATH9K_RXERR_MIC; | ||
177 | else | ||
178 | rx_status->flag |= RX_FLAG_MMIC_ERROR; | ||
179 | } | ||
180 | /* | ||
181 | * Reject error frames with the exception of | ||
182 | * decryption and MIC failures. For monitor mode, | ||
183 | * we also ignore the CRC error. | ||
184 | */ | ||
185 | if (sc->sc_ah->ah_opmode == ATH9K_M_MONITOR) { | ||
186 | if (ds->ds_rxstat.rs_status & | ||
187 | ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | | ||
188 | ATH9K_RXERR_CRC)) | ||
189 | goto rx_next; | ||
190 | } else { | ||
191 | if (ds->ds_rxstat.rs_status & | ||
192 | ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) { | ||
193 | goto rx_next; | ||
194 | } | ||
195 | } | ||
144 | } | 196 | } |
145 | 197 | ||
146 | return type; | 198 | ratecode = ds->ds_rxstat.rs_rate; |
199 | rix = rate_table->rateCodeToIndex[ratecode]; | ||
200 | ratekbps = rate_table->info[rix].ratekbps; | ||
201 | |||
202 | /* HT rate */ | ||
203 | if (ratecode & 0x80) { | ||
204 | if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040) | ||
205 | ratekbps = (ratekbps * 27) / 13; | ||
206 | if (ds->ds_rxstat.rs_flags & ATH9K_RX_GI) | ||
207 | ratekbps = (ratekbps * 10) / 9; | ||
208 | } | ||
209 | |||
210 | rx_status->mactime = ath_extend_tsf(sc, ds->ds_rxstat.rs_tstamp); | ||
211 | rx_status->band = sc->hw->conf.channel->band; | ||
212 | rx_status->freq = sc->hw->conf.channel->center_freq; | ||
213 | rx_status->noise = sc->sc_ani.sc_noise_floor; | ||
214 | rx_status->signal = rx_status->noise + ds->ds_rxstat.rs_rssi; | ||
215 | rx_status->rate_idx = ath_rate2idx(sc, (ratekbps / 100)); | ||
216 | rx_status->antenna = ds->ds_rxstat.rs_antenna; | ||
217 | |||
218 | /* at 45 you will be able to use MCS 15 reliably. A more elaborate | ||
219 | * scheme can be used here but it requires tables of SNR/throughput for | ||
220 | * each possible mode used. */ | ||
221 | rx_status->qual = ds->ds_rxstat.rs_rssi * 100 / 45; | ||
222 | |||
223 | /* rssi can be more than 45 though, anything above that | ||
224 | * should be considered at 100% */ | ||
225 | if (rx_status->qual > 100) | ||
226 | rx_status->qual = 100; | ||
227 | |||
228 | rx_status->flag |= RX_FLAG_TSFT; | ||
229 | |||
230 | return 1; | ||
231 | rx_next: | ||
232 | return 0; | ||
147 | } | 233 | } |
148 | 234 | ||
149 | static void ath_opmode_init(struct ath_softc *sc) | 235 | static void ath_opmode_init(struct ath_softc *sc) |
@@ -185,12 +271,6 @@ int ath_rx_init(struct ath_softc *sc, int nbufs) | |||
185 | sc->sc_flags &= ~SC_OP_RXFLUSH; | 271 | sc->sc_flags &= ~SC_OP_RXFLUSH; |
186 | spin_lock_init(&sc->sc_rxbuflock); | 272 | spin_lock_init(&sc->sc_rxbuflock); |
187 | 273 | ||
188 | /* | ||
189 | * Cisco's VPN software requires that drivers be able to | ||
190 | * receive encapsulated frames that are larger than the MTU. | ||
191 | * Since we can't be sure how large a frame we'll get, setup | ||
192 | * to handle the larges on possible. | ||
193 | */ | ||
194 | sc->sc_rxbufsize = roundup(IEEE80211_MAX_MPDU_LEN, | 274 | sc->sc_rxbufsize = roundup(IEEE80211_MAX_MPDU_LEN, |
195 | min(sc->sc_cachelsz, | 275 | min(sc->sc_cachelsz, |
196 | (u16)64)); | 276 | (u16)64)); |
@@ -209,8 +289,6 @@ int ath_rx_init(struct ath_softc *sc, int nbufs) | |||
209 | break; | 289 | break; |
210 | } | 290 | } |
211 | 291 | ||
212 | /* Pre-allocate a wbuf for each rx buffer */ | ||
213 | |||
214 | list_for_each_entry(bf, &sc->sc_rxbuf, list) { | 292 | list_for_each_entry(bf, &sc->sc_rxbuf, list) { |
215 | skb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize); | 293 | skb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize); |
216 | if (skb == NULL) { | 294 | if (skb == NULL) { |
@@ -223,7 +301,6 @@ int ath_rx_init(struct ath_softc *sc, int nbufs) | |||
223 | skb_end_pointer(skb) - skb->head, | 301 | skb_end_pointer(skb) - skb->head, |
224 | PCI_DMA_FROMDEVICE); | 302 | PCI_DMA_FROMDEVICE); |
225 | bf->bf_dmacontext = bf->bf_buf_addr; | 303 | bf->bf_dmacontext = bf->bf_buf_addr; |
226 | ATH_RX_CONTEXT(skb)->ctx_rxbuf = bf; | ||
227 | } | 304 | } |
228 | sc->sc_rxlink = NULL; | 305 | sc->sc_rxlink = NULL; |
229 | 306 | ||
@@ -235,8 +312,6 @@ int ath_rx_init(struct ath_softc *sc, int nbufs) | |||
235 | return error; | 312 | return error; |
236 | } | 313 | } |
237 | 314 | ||
238 | /* Reclaim all rx queue resources */ | ||
239 | |||
240 | void ath_rx_cleanup(struct ath_softc *sc) | 315 | void ath_rx_cleanup(struct ath_softc *sc) |
241 | { | 316 | { |
242 | struct sk_buff *skb; | 317 | struct sk_buff *skb; |
@@ -248,8 +323,6 @@ void ath_rx_cleanup(struct ath_softc *sc) | |||
248 | dev_kfree_skb(skb); | 323 | dev_kfree_skb(skb); |
249 | } | 324 | } |
250 | 325 | ||
251 | /* cleanup rx descriptors */ | ||
252 | |||
253 | if (sc->sc_rxdma.dd_desc_len != 0) | 326 | if (sc->sc_rxdma.dd_desc_len != 0) |
254 | ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); | 327 | ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); |
255 | } | 328 | } |
@@ -297,20 +370,19 @@ u32 ath_calcrxfilter(struct ath_softc *sc) | |||
297 | } | 370 | } |
298 | 371 | ||
299 | if (sc->sc_ah->ah_opmode == ATH9K_M_STA || | 372 | if (sc->sc_ah->ah_opmode == ATH9K_M_STA || |
300 | sc->sc_ah->ah_opmode == ATH9K_M_IBSS) | 373 | sc->sc_ah->ah_opmode == ATH9K_M_IBSS) |
301 | rfilt |= ATH9K_RX_FILTER_BEACON; | 374 | rfilt |= ATH9K_RX_FILTER_BEACON; |
302 | 375 | ||
303 | /* If in HOSTAP mode, want to enable reception of PSPOLL frames | 376 | /* If in HOSTAP mode, want to enable reception of PSPOLL frames |
304 | & beacon frames */ | 377 | & beacon frames */ |
305 | if (sc->sc_ah->ah_opmode == ATH9K_M_HOSTAP) | 378 | if (sc->sc_ah->ah_opmode == ATH9K_M_HOSTAP) |
306 | rfilt |= (ATH9K_RX_FILTER_BEACON | ATH9K_RX_FILTER_PSPOLL); | 379 | rfilt |= (ATH9K_RX_FILTER_BEACON | ATH9K_RX_FILTER_PSPOLL); |
380 | |||
307 | return rfilt; | 381 | return rfilt; |
308 | 382 | ||
309 | #undef RX_FILTER_PRESERVE | 383 | #undef RX_FILTER_PRESERVE |
310 | } | 384 | } |
311 | 385 | ||
312 | /* Enable the receive h/w following a reset. */ | ||
313 | |||
314 | int ath_startrecv(struct ath_softc *sc) | 386 | int ath_startrecv(struct ath_softc *sc) |
315 | { | 387 | { |
316 | struct ath_hal *ah = sc->sc_ah; | 388 | struct ath_hal *ah = sc->sc_ah; |
@@ -322,21 +394,6 @@ int ath_startrecv(struct ath_softc *sc) | |||
322 | 394 | ||
323 | sc->sc_rxlink = NULL; | 395 | sc->sc_rxlink = NULL; |
324 | list_for_each_entry_safe(bf, tbf, &sc->sc_rxbuf, list) { | 396 | list_for_each_entry_safe(bf, tbf, &sc->sc_rxbuf, list) { |
325 | if (bf->bf_status & ATH_BUFSTATUS_STALE) { | ||
326 | /* restarting h/w, no need for holding descriptors */ | ||
327 | bf->bf_status &= ~ATH_BUFSTATUS_STALE; | ||
328 | /* | ||
329 | * Upper layer may not be done with the frame yet so | ||
330 | * we can't just re-queue it to hardware. Remove it | ||
331 | * from h/w queue. It'll be re-queued when upper layer | ||
332 | * returns the frame and ath_rx_requeue_mpdu is called. | ||
333 | */ | ||
334 | if (!(bf->bf_status & ATH_BUFSTATUS_FREE)) { | ||
335 | list_del(&bf->list); | ||
336 | continue; | ||
337 | } | ||
338 | } | ||
339 | /* chain descriptors */ | ||
340 | ath_rx_buf_link(sc, bf); | 397 | ath_rx_buf_link(sc, bf); |
341 | } | 398 | } |
342 | 399 | ||
@@ -346,120 +403,69 @@ int ath_startrecv(struct ath_softc *sc) | |||
346 | 403 | ||
347 | bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list); | 404 | bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list); |
348 | ath9k_hw_putrxbuf(ah, bf->bf_daddr); | 405 | ath9k_hw_putrxbuf(ah, bf->bf_daddr); |
349 | ath9k_hw_rxena(ah); /* enable recv descriptors */ | 406 | ath9k_hw_rxena(ah); |
350 | 407 | ||
351 | start_recv: | 408 | start_recv: |
352 | spin_unlock_bh(&sc->sc_rxbuflock); | 409 | spin_unlock_bh(&sc->sc_rxbuflock); |
353 | ath_opmode_init(sc); /* set filters, etc. */ | 410 | ath_opmode_init(sc); |
354 | ath9k_hw_startpcureceive(ah); /* re-enable PCU/DMA engine */ | 411 | ath9k_hw_startpcureceive(ah); |
412 | |||
355 | return 0; | 413 | return 0; |
356 | } | 414 | } |
357 | 415 | ||
358 | /* Disable the receive h/w in preparation for a reset. */ | ||
359 | |||
360 | bool ath_stoprecv(struct ath_softc *sc) | 416 | bool ath_stoprecv(struct ath_softc *sc) |
361 | { | 417 | { |
362 | struct ath_hal *ah = sc->sc_ah; | 418 | struct ath_hal *ah = sc->sc_ah; |
363 | u64 tsf; | ||
364 | bool stopped; | 419 | bool stopped; |
365 | 420 | ||
366 | ath9k_hw_stoppcurecv(ah); /* disable PCU */ | 421 | ath9k_hw_stoppcurecv(ah); |
367 | ath9k_hw_setrxfilter(ah, 0); /* clear recv filter */ | 422 | ath9k_hw_setrxfilter(ah, 0); |
368 | stopped = ath9k_hw_stopdmarecv(ah); /* disable DMA engine */ | 423 | stopped = ath9k_hw_stopdmarecv(ah); |
369 | mdelay(3); /* 3ms is long enough for 1 frame */ | 424 | mdelay(3); /* 3ms is long enough for 1 frame */ |
370 | tsf = ath9k_hw_gettsf64(ah); | 425 | sc->sc_rxlink = NULL; |
371 | sc->sc_rxlink = NULL; /* just in case */ | 426 | |
372 | return stopped; | 427 | return stopped; |
373 | } | 428 | } |
374 | 429 | ||
375 | /* Flush receive queue */ | ||
376 | |||
377 | void ath_flushrecv(struct ath_softc *sc) | 430 | void ath_flushrecv(struct ath_softc *sc) |
378 | { | 431 | { |
379 | /* | ||
380 | * ath_rx_tasklet may be used to handle rx interrupt and flush receive | ||
381 | * queue at the same time. Use a lock to serialize the access of rx | ||
382 | * queue. | ||
383 | * ath_rx_tasklet cannot hold the spinlock while indicating packets. | ||
384 | * Instead, do not claim the spinlock but check for a flush in | ||
385 | * progress (see references to sc_rxflush) | ||
386 | */ | ||
387 | spin_lock_bh(&sc->sc_rxflushlock); | 432 | spin_lock_bh(&sc->sc_rxflushlock); |
388 | sc->sc_flags |= SC_OP_RXFLUSH; | 433 | sc->sc_flags |= SC_OP_RXFLUSH; |
389 | |||
390 | ath_rx_tasklet(sc, 1); | 434 | ath_rx_tasklet(sc, 1); |
391 | |||
392 | sc->sc_flags &= ~SC_OP_RXFLUSH; | 435 | sc->sc_flags &= ~SC_OP_RXFLUSH; |
393 | spin_unlock_bh(&sc->sc_rxflushlock); | 436 | spin_unlock_bh(&sc->sc_rxflushlock); |
394 | } | 437 | } |
395 | 438 | ||
396 | /* Process receive queue, as well as LED, etc. */ | ||
397 | |||
398 | int ath_rx_tasklet(struct ath_softc *sc, int flush) | 439 | int ath_rx_tasklet(struct ath_softc *sc, int flush) |
399 | { | 440 | { |
400 | #define PA2DESC(_sc, _pa) \ | 441 | #define PA2DESC(_sc, _pa) \ |
401 | ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ | 442 | ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ |
402 | ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) | 443 | ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) |
403 | 444 | ||
404 | struct ath_buf *bf, *bf_held = NULL; | 445 | struct ath_buf *bf; |
405 | struct ath_desc *ds; | 446 | struct ath_desc *ds; |
406 | struct ieee80211_hdr *hdr; | 447 | struct sk_buff *skb = NULL, *requeue_skb; |
407 | struct sk_buff *skb = NULL; | 448 | struct ieee80211_rx_status rx_status; |
408 | struct ath_recv_status rx_status; | ||
409 | struct ath_hal *ah = sc->sc_ah; | 449 | struct ath_hal *ah = sc->sc_ah; |
410 | int type, rx_processed = 0; | 450 | struct ieee80211_hdr *hdr; |
411 | u32 phyerr; | 451 | int hdrlen, padsize, retval; |
412 | u8 chainreset = 0; | 452 | bool decrypt_error = false; |
413 | int retval; | 453 | u8 keyix; |
414 | __le16 fc; | 454 | |
455 | spin_lock_bh(&sc->sc_rxbuflock); | ||
415 | 456 | ||
416 | do { | 457 | do { |
417 | /* If handling rx interrupt and flush is in progress => exit */ | 458 | /* If handling rx interrupt and flush is in progress => exit */ |
418 | if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) | 459 | if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) |
419 | break; | 460 | break; |
420 | 461 | ||
421 | spin_lock_bh(&sc->sc_rxbuflock); | ||
422 | if (list_empty(&sc->sc_rxbuf)) { | 462 | if (list_empty(&sc->sc_rxbuf)) { |
423 | sc->sc_rxlink = NULL; | 463 | sc->sc_rxlink = NULL; |
424 | spin_unlock_bh(&sc->sc_rxbuflock); | ||
425 | break; | 464 | break; |
426 | } | 465 | } |
427 | 466 | ||
428 | bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list); | 467 | bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list); |
429 | |||
430 | /* | ||
431 | * There is a race condition that BH gets scheduled after sw | ||
432 | * writes RxE and before hw re-load the last descriptor to get | ||
433 | * the newly chained one. Software must keep the last DONE | ||
434 | * descriptor as a holding descriptor - software does so by | ||
435 | * marking it with the STALE flag. | ||
436 | */ | ||
437 | if (bf->bf_status & ATH_BUFSTATUS_STALE) { | ||
438 | bf_held = bf; | ||
439 | if (list_is_last(&bf_held->list, &sc->sc_rxbuf)) { | ||
440 | /* | ||
441 | * The holding descriptor is the last | ||
442 | * descriptor in queue. It's safe to | ||
443 | * remove the last holding descriptor | ||
444 | * in BH context. | ||
445 | */ | ||
446 | list_del(&bf_held->list); | ||
447 | bf_held->bf_status &= ~ATH_BUFSTATUS_STALE; | ||
448 | sc->sc_rxlink = NULL; | ||
449 | |||
450 | if (bf_held->bf_status & ATH_BUFSTATUS_FREE) { | ||
451 | list_add_tail(&bf_held->list, | ||
452 | &sc->sc_rxbuf); | ||
453 | ath_rx_buf_link(sc, bf_held); | ||
454 | } | ||
455 | spin_unlock_bh(&sc->sc_rxbuflock); | ||
456 | break; | ||
457 | } | ||
458 | bf = list_entry(bf->list.next, struct ath_buf, list); | ||
459 | } | ||
460 | |||
461 | ds = bf->bf_desc; | 468 | ds = bf->bf_desc; |
462 | ++rx_processed; | ||
463 | 469 | ||
464 | /* | 470 | /* |
465 | * Must provide the virtual address of the current | 471 | * Must provide the virtual address of the current |
@@ -472,8 +478,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush) | |||
472 | * on. All this is necessary because of our use of | 478 | * on. All this is necessary because of our use of |
473 | * a self-linked list to avoid rx overruns. | 479 | * a self-linked list to avoid rx overruns. |
474 | */ | 480 | */ |
475 | retval = ath9k_hw_rxprocdesc(ah, | 481 | retval = ath9k_hw_rxprocdesc(ah, ds, |
476 | ds, | ||
477 | bf->bf_daddr, | 482 | bf->bf_daddr, |
478 | PA2DESC(sc, ds->ds_link), | 483 | PA2DESC(sc, ds->ds_link), |
479 | 0); | 484 | 0); |
@@ -482,7 +487,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush) | |||
482 | struct ath_desc *tds; | 487 | struct ath_desc *tds; |
483 | 488 | ||
484 | if (list_is_last(&bf->list, &sc->sc_rxbuf)) { | 489 | if (list_is_last(&bf->list, &sc->sc_rxbuf)) { |
485 | spin_unlock_bh(&sc->sc_rxbuflock); | 490 | sc->sc_rxlink = NULL; |
486 | break; | 491 | break; |
487 | } | 492 | } |
488 | 493 | ||
@@ -500,215 +505,87 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush) | |||
500 | */ | 505 | */ |
501 | 506 | ||
502 | tds = tbf->bf_desc; | 507 | tds = tbf->bf_desc; |
503 | retval = ath9k_hw_rxprocdesc(ah, | 508 | retval = ath9k_hw_rxprocdesc(ah, tds, tbf->bf_daddr, |
504 | tds, tbf->bf_daddr, | 509 | PA2DESC(sc, tds->ds_link), 0); |
505 | PA2DESC(sc, tds->ds_link), 0); | ||
506 | if (retval == -EINPROGRESS) { | 510 | if (retval == -EINPROGRESS) { |
507 | spin_unlock_bh(&sc->sc_rxbuflock); | ||
508 | break; | 511 | break; |
509 | } | 512 | } |
510 | } | 513 | } |
511 | 514 | ||
512 | /* XXX: we do not support frames spanning | ||
513 | * multiple descriptors */ | ||
514 | bf->bf_status |= ATH_BUFSTATUS_DONE; | ||
515 | |||
516 | skb = bf->bf_mpdu; | 515 | skb = bf->bf_mpdu; |
517 | if (skb == NULL) { /* XXX ??? can this happen */ | 516 | if (!skb) |
518 | spin_unlock_bh(&sc->sc_rxbuflock); | ||
519 | continue; | 517 | continue; |
520 | } | ||
521 | /* | ||
522 | * Now we know it's a completed frame, we can indicate the | ||
523 | * frame. Remove the previous holding descriptor and leave | ||
524 | * this one in the queue as the new holding descriptor. | ||
525 | */ | ||
526 | if (bf_held) { | ||
527 | list_del(&bf_held->list); | ||
528 | bf_held->bf_status &= ~ATH_BUFSTATUS_STALE; | ||
529 | if (bf_held->bf_status & ATH_BUFSTATUS_FREE) { | ||
530 | list_add_tail(&bf_held->list, &sc->sc_rxbuf); | ||
531 | /* try to requeue this descriptor */ | ||
532 | ath_rx_buf_link(sc, bf_held); | ||
533 | } | ||
534 | } | ||
535 | 518 | ||
536 | bf->bf_status |= ATH_BUFSTATUS_STALE; | ||
537 | bf_held = bf; | ||
538 | /* | 519 | /* |
539 | * Release the lock here in case ieee80211_input() return | 520 | * If we're asked to flush receive queue, directly |
540 | * the frame immediately by calling ath_rx_mpdu_requeue(). | 521 | * chain it back at the queue without processing it. |
541 | */ | 522 | */ |
542 | spin_unlock_bh(&sc->sc_rxbuflock); | 523 | if (flush) |
524 | goto requeue; | ||
543 | 525 | ||
544 | if (flush) { | 526 | if (!ds->ds_rxstat.rs_datalen) |
545 | /* | 527 | goto requeue; |
546 | * If we're asked to flush receive queue, directly | ||
547 | * chain it back at the queue without processing it. | ||
548 | */ | ||
549 | goto rx_next; | ||
550 | } | ||
551 | 528 | ||
552 | hdr = (struct ieee80211_hdr *)skb->data; | 529 | /* The status portion of the descriptor could get corrupted. */ |
553 | fc = hdr->frame_control; | 530 | if (sc->sc_rxbufsize < ds->ds_rxstat.rs_datalen) |
554 | memset(&rx_status, 0, sizeof(struct ath_recv_status)); | 531 | goto requeue; |
555 | 532 | ||
556 | if (ds->ds_rxstat.rs_more) { | 533 | if (!ath_rx_prepare(skb, ds, &rx_status, &decrypt_error, sc)) |
557 | /* | 534 | goto requeue; |
558 | * Frame spans multiple descriptors; this | ||
559 | * cannot happen yet as we don't support | ||
560 | * jumbograms. If not in monitor mode, | ||
561 | * discard the frame. | ||
562 | */ | ||
563 | #ifndef ERROR_FRAMES | ||
564 | /* | ||
565 | * Enable this if you want to see | ||
566 | * error frames in Monitor mode. | ||
567 | */ | ||
568 | if (sc->sc_ah->ah_opmode != ATH9K_M_MONITOR) | ||
569 | goto rx_next; | ||
570 | #endif | ||
571 | /* fall thru for monitor mode handling... */ | ||
572 | } else if (ds->ds_rxstat.rs_status != 0) { | ||
573 | if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC) | ||
574 | rx_status.flags |= ATH_RX_FCS_ERROR; | ||
575 | if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY) { | ||
576 | phyerr = ds->ds_rxstat.rs_phyerr & 0x1f; | ||
577 | goto rx_next; | ||
578 | } | ||
579 | 535 | ||
580 | if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT) { | 536 | /* Ensure we always have an skb to requeue once we are done |
581 | /* | 537 | * processing the current buffer's skb */ |
582 | * Decrypt error. We only mark packet status | 538 | requeue_skb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize); |
583 | * here and always push up the frame up to let | ||
584 | * mac80211 handle the actual error case, be | ||
585 | * it no decryption key or real decryption | ||
586 | * error. This let us keep statistics there. | ||
587 | */ | ||
588 | rx_status.flags |= ATH_RX_DECRYPT_ERROR; | ||
589 | } else if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC) { | ||
590 | /* | ||
591 | * Demic error. We only mark frame status here | ||
592 | * and always push up the frame up to let | ||
593 | * mac80211 handle the actual error case. This | ||
594 | * let us keep statistics there. Hardware may | ||
595 | * post a false-positive MIC error. | ||
596 | */ | ||
597 | if (ieee80211_is_ctl(fc)) | ||
598 | /* | ||
599 | * Sometimes, we get invalid | ||
600 | * MIC failures on valid control frames. | ||
601 | * Remove these mic errors. | ||
602 | */ | ||
603 | ds->ds_rxstat.rs_status &= | ||
604 | ~ATH9K_RXERR_MIC; | ||
605 | else | ||
606 | rx_status.flags |= ATH_RX_MIC_ERROR; | ||
607 | } | ||
608 | /* | ||
609 | * Reject error frames with the exception of | ||
610 | * decryption and MIC failures. For monitor mode, | ||
611 | * we also ignore the CRC error. | ||
612 | */ | ||
613 | if (sc->sc_ah->ah_opmode == ATH9K_M_MONITOR) { | ||
614 | if (ds->ds_rxstat.rs_status & | ||
615 | ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | | ||
616 | ATH9K_RXERR_CRC)) | ||
617 | goto rx_next; | ||
618 | } else { | ||
619 | if (ds->ds_rxstat.rs_status & | ||
620 | ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) { | ||
621 | goto rx_next; | ||
622 | } | ||
623 | } | ||
624 | } | ||
625 | /* | ||
626 | * The status portion of the descriptor could get corrupted. | ||
627 | */ | ||
628 | if (sc->sc_rxbufsize < ds->ds_rxstat.rs_datalen) | ||
629 | goto rx_next; | ||
630 | /* | ||
631 | * Sync and unmap the frame. At this point we're | ||
632 | * committed to passing the sk_buff somewhere so | ||
633 | * clear buf_skb; this means a new sk_buff must be | ||
634 | * allocated when the rx descriptor is setup again | ||
635 | * to receive another frame. | ||
636 | */ | ||
637 | skb_put(skb, ds->ds_rxstat.rs_datalen); | ||
638 | skb->protocol = cpu_to_be16(ETH_P_CONTROL); | ||
639 | rx_status.tsf = ath_extend_tsf(sc, ds->ds_rxstat.rs_tstamp); | ||
640 | rx_status.rateieee = | ||
641 | sc->sc_hwmap[ds->ds_rxstat.rs_rate].ieeerate; | ||
642 | rx_status.rateKbps = | ||
643 | sc->sc_hwmap[ds->ds_rxstat.rs_rate].rateKbps; | ||
644 | rx_status.ratecode = ds->ds_rxstat.rs_rate; | ||
645 | |||
646 | /* HT rate */ | ||
647 | if (rx_status.ratecode & 0x80) { | ||
648 | /* TODO - add table to avoid division */ | ||
649 | if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040) { | ||
650 | rx_status.flags |= ATH_RX_40MHZ; | ||
651 | rx_status.rateKbps = | ||
652 | (rx_status.rateKbps * 27) / 13; | ||
653 | } | ||
654 | if (ds->ds_rxstat.rs_flags & ATH9K_RX_GI) | ||
655 | rx_status.rateKbps = | ||
656 | (rx_status.rateKbps * 10) / 9; | ||
657 | else | ||
658 | rx_status.flags |= ATH_RX_SHORT_GI; | ||
659 | } | ||
660 | 539 | ||
661 | /* sc_noise_floor is only available when the station | 540 | /* If there is no memory we ignore the current RX'd frame, |
662 | attaches to an AP, so we use a default value | 541 | * tell hardware it can give us a new frame using the old |
663 | if we are not yet attached. */ | 542 | * skb and put it at the tail of the sc->sc_rxbuf list for |
664 | rx_status.abs_rssi = | 543 | * processing. */ |
665 | ds->ds_rxstat.rs_rssi + sc->sc_ani.sc_noise_floor; | 544 | if (!requeue_skb) |
545 | goto requeue; | ||
666 | 546 | ||
667 | pci_dma_sync_single_for_cpu(sc->pdev, | 547 | /* Sync and unmap the frame */ |
668 | bf->bf_buf_addr, | 548 | pci_dma_sync_single_for_cpu(sc->pdev, bf->bf_buf_addr, |
669 | skb_tailroom(skb), | 549 | skb_tailroom(skb), |
670 | PCI_DMA_FROMDEVICE); | 550 | PCI_DMA_FROMDEVICE); |
671 | pci_unmap_single(sc->pdev, | 551 | pci_unmap_single(sc->pdev, bf->bf_buf_addr, |
672 | bf->bf_buf_addr, | ||
673 | sc->sc_rxbufsize, | 552 | sc->sc_rxbufsize, |
674 | PCI_DMA_FROMDEVICE); | 553 | PCI_DMA_FROMDEVICE); |
675 | 554 | ||
676 | /* XXX: Ah! make me more readable, use a helper */ | 555 | skb_put(skb, ds->ds_rxstat.rs_datalen); |
677 | if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) { | 556 | skb->protocol = cpu_to_be16(ETH_P_CONTROL); |
678 | if (ds->ds_rxstat.rs_moreaggr == 0) { | 557 | |
679 | rx_status.rssictl[0] = | 558 | /* see if any padding is done by the hw and remove it */ |
680 | ds->ds_rxstat.rs_rssi_ctl0; | 559 | hdr = (struct ieee80211_hdr *)skb->data; |
681 | rx_status.rssictl[1] = | 560 | hdrlen = ieee80211_get_hdrlen_from_skb(skb); |
682 | ds->ds_rxstat.rs_rssi_ctl1; | 561 | |
683 | rx_status.rssictl[2] = | 562 | if (hdrlen & 3) { |
684 | ds->ds_rxstat.rs_rssi_ctl2; | 563 | padsize = hdrlen % 4; |
685 | rx_status.rssi = ds->ds_rxstat.rs_rssi; | 564 | memmove(skb->data + padsize, skb->data, hdrlen); |
686 | if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040) { | 565 | skb_pull(skb, padsize); |
687 | rx_status.rssiextn[0] = | ||
688 | ds->ds_rxstat.rs_rssi_ext0; | ||
689 | rx_status.rssiextn[1] = | ||
690 | ds->ds_rxstat.rs_rssi_ext1; | ||
691 | rx_status.rssiextn[2] = | ||
692 | ds->ds_rxstat.rs_rssi_ext2; | ||
693 | rx_status.flags |= | ||
694 | ATH_RX_RSSI_EXTN_VALID; | ||
695 | } | ||
696 | rx_status.flags |= ATH_RX_RSSI_VALID | | ||
697 | ATH_RX_CHAIN_RSSI_VALID; | ||
698 | } | ||
699 | } else { | ||
700 | /* | ||
701 | * Need to insert the "combined" rssi into the | ||
702 | * status structure for upper layer processing | ||
703 | */ | ||
704 | rx_status.rssi = ds->ds_rxstat.rs_rssi; | ||
705 | rx_status.flags |= ATH_RX_RSSI_VALID; | ||
706 | } | 566 | } |
707 | 567 | ||
708 | /* Pass frames up to the stack. */ | 568 | keyix = ds->ds_rxstat.rs_keyix; |
709 | 569 | ||
710 | type = ath_rx_indicate(sc, skb, | 570 | if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error) { |
711 | &rx_status, ds->ds_rxstat.rs_keyix); | 571 | rx_status.flag |= RX_FLAG_DECRYPTED; |
572 | } else if ((le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_PROTECTED) | ||
573 | && !decrypt_error && skb->len >= hdrlen + 4) { | ||
574 | keyix = skb->data[hdrlen + 3] >> 6; | ||
575 | |||
576 | if (test_bit(keyix, sc->sc_keymap)) | ||
577 | rx_status.flag |= RX_FLAG_DECRYPTED; | ||
578 | } | ||
579 | |||
580 | /* Send the frame to mac80211 */ | ||
581 | __ieee80211_rx(sc->hw, skb, &rx_status); | ||
582 | |||
583 | /* We will now give hardware our shiny new allocated skb */ | ||
584 | bf->bf_mpdu = requeue_skb; | ||
585 | bf->bf_buf_addr = pci_map_single(sc->pdev, requeue_skb->data, | ||
586 | sc->sc_rxbufsize, | ||
587 | PCI_DMA_FROMDEVICE); | ||
588 | bf->bf_dmacontext = bf->bf_buf_addr; | ||
712 | 589 | ||
713 | /* | 590 | /* |
714 | * change the default rx antenna if rx diversity chooses the | 591 | * change the default rx antenna if rx diversity chooses the |
@@ -716,37 +593,16 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush) | |||
716 | */ | 593 | */ |
717 | if (sc->sc_defant != ds->ds_rxstat.rs_antenna) { | 594 | if (sc->sc_defant != ds->ds_rxstat.rs_antenna) { |
718 | if (++sc->sc_rxotherant >= 3) | 595 | if (++sc->sc_rxotherant >= 3) |
719 | ath_setdefantenna(sc, | 596 | ath_setdefantenna(sc, ds->ds_rxstat.rs_antenna); |
720 | ds->ds_rxstat.rs_antenna); | ||
721 | } else { | 597 | } else { |
722 | sc->sc_rxotherant = 0; | 598 | sc->sc_rxotherant = 0; |
723 | } | 599 | } |
600 | requeue: | ||
601 | list_move_tail(&bf->list, &sc->sc_rxbuf); | ||
602 | ath_rx_buf_link(sc, bf); | ||
603 | } while (1); | ||
724 | 604 | ||
725 | #ifdef CONFIG_SLOW_ANT_DIV | 605 | spin_unlock_bh(&sc->sc_rxbuflock); |
726 | if ((rx_status.flags & ATH_RX_RSSI_VALID) && | ||
727 | ieee80211_is_beacon(fc)) { | ||
728 | ath_slow_ant_div(&sc->sc_antdiv, hdr, &ds->ds_rxstat); | ||
729 | } | ||
730 | #endif | ||
731 | /* | ||
732 | * For frames successfully indicated, the buffer will be | ||
733 | * returned to us by upper layers by calling | ||
734 | * ath_rx_mpdu_requeue, either synchronusly or asynchronously. | ||
735 | * So we don't want to do it here in this loop. | ||
736 | */ | ||
737 | continue; | ||
738 | |||
739 | rx_next: | ||
740 | bf->bf_status |= ATH_BUFSTATUS_FREE; | ||
741 | } while (TRUE); | ||
742 | |||
743 | if (chainreset) { | ||
744 | DPRINTF(sc, ATH_DBG_CONFIG, | ||
745 | "%s: Reset rx chain mask. " | ||
746 | "Do internal reset\n", __func__); | ||
747 | ASSERT(flush == 0); | ||
748 | ath_reset(sc, false); | ||
749 | } | ||
750 | 606 | ||
751 | return 0; | 607 | return 0; |
752 | #undef PA2DESC | 608 | #undef PA2DESC |