aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath9k/recv.c
diff options
context:
space:
mode:
authorSujith <Sujith.Manoharan@atheros.com>2008-11-17 22:35:55 -0500
committerJohn W. Linville <linville@tuxdriver.com>2008-11-26 09:47:26 -0500
commitbe0418ada3fad110977a9d5fa16d4907d4e7d726 (patch)
tree8a2f3f2d0a3a8680716746e719cccfb755c1fcbd /drivers/net/wireless/ath9k/recv.c
parent2b406f1e68dd5348384fd166ac532af46bfc87fc (diff)
ath9k: Revamp RX handling
Remove a lot of old, crufty code and make RX status reporting a bit sane and clean. Do not do anything to the RX skb before unmapping. So in ath_rx_tasklet(), move the skb_put() after PCI unmap. Signed-off-by: Sujith <Sujith.Manoharan@atheros.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/ath9k/recv.c')
-rw-r--r--drivers/net/wireless/ath9k/recv.c575
1 files changed, 206 insertions, 369 deletions
diff --git a/drivers/net/wireless/ath9k/recv.c b/drivers/net/wireless/ath9k/recv.c
index 2d72ac19fada..000e189e104a 100644
--- a/drivers/net/wireless/ath9k/recv.c
+++ b/drivers/net/wireless/ath9k/recv.c
@@ -14,10 +14,6 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17/*
18 * Implementation of receive path.
19 */
20
21#include "core.h" 17#include "core.h"
22 18
23/* 19/*
@@ -27,10 +23,7 @@
27 * MAC acknowledges BA status as long as it copies frames to host 23 * MAC acknowledges BA status as long as it copies frames to host
28 * buffer (or rx fifo). This can incorrectly acknowledge packets 24 * buffer (or rx fifo). This can incorrectly acknowledge packets
29 * to a sender if last desc is self-linked. 25 * to a sender if last desc is self-linked.
30 *
31 * NOTE: Caller should hold the rxbuf lock.
32 */ 26 */
33
34static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) 27static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
35{ 28{
36 struct ath_hal *ah = sc->sc_ah; 29 struct ath_hal *ah = sc->sc_ah;
@@ -40,19 +33,17 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
40 ATH_RXBUF_RESET(bf); 33 ATH_RXBUF_RESET(bf);
41 34
42 ds = bf->bf_desc; 35 ds = bf->bf_desc;
43 ds->ds_link = 0; /* link to null */ 36 ds->ds_link = 0; /* link to null */
44 ds->ds_data = bf->bf_buf_addr; 37 ds->ds_data = bf->bf_buf_addr;
45 38
46 /* XXX For RADAR? 39 /* virtual addr of the beginning of the buffer. */
47 * virtual addr of the beginning of the buffer. */
48 skb = bf->bf_mpdu; 40 skb = bf->bf_mpdu;
49 ASSERT(skb != NULL); 41 ASSERT(skb != NULL);
50 ds->ds_vdata = skb->data; 42 ds->ds_vdata = skb->data;
51 43
52 /* setup rx descriptors */ 44 /* setup rx descriptors */
53 ath9k_hw_setuprxdesc(ah, 45 ath9k_hw_setuprxdesc(ah, ds,
54 ds, 46 skb_tailroom(skb), /* buffer size */
55 skb_tailroom(skb), /* buffer size */
56 0); 47 0);
57 48
58 if (sc->sc_rxlink == NULL) 49 if (sc->sc_rxlink == NULL)
@@ -64,8 +55,7 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
64 ath9k_hw_rxena(ah); 55 ath9k_hw_rxena(ah);
65} 56}
66 57
67static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc, 58static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc, u32 len)
68 u32 len)
69{ 59{
70 struct sk_buff *skb; 60 struct sk_buff *skb;
71 u32 off; 61 u32 off;
@@ -91,59 +81,154 @@ static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc,
91 return skb; 81 return skb;
92} 82}
93 83
94static void ath_rx_requeue(struct ath_softc *sc, struct sk_buff *skb) 84static void ath_rx_requeue(struct ath_softc *sc, struct ath_buf *bf)
95{ 85{
96 struct ath_buf *bf = ATH_RX_CONTEXT(skb)->ctx_rxbuf; 86 struct sk_buff *skb;
97 87
98 ASSERT(bf != NULL); 88 ASSERT(bf != NULL);
99 89
100 spin_lock_bh(&sc->sc_rxbuflock); 90 if (bf->bf_mpdu == NULL) {
101 if (bf->bf_status & ATH_BUFSTATUS_STALE) { 91 skb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize);
102 /* 92 if (skb != NULL) {
103 * This buffer is still held for hw acess. 93 bf->bf_mpdu = skb;
104 * Mark it as free to be re-queued it later. 94 bf->bf_buf_addr = pci_map_single(sc->pdev, skb->data,
105 */ 95 skb_end_pointer(skb) - skb->head,
106 bf->bf_status |= ATH_BUFSTATUS_FREE; 96 PCI_DMA_FROMDEVICE);
107 } else { 97 bf->bf_dmacontext = bf->bf_buf_addr;
108 /* XXX: we probably never enter here, remove after 98
109 * verification */ 99 }
110 list_add_tail(&bf->list, &sc->sc_rxbuf);
111 ath_rx_buf_link(sc, bf);
112 } 100 }
113 spin_unlock_bh(&sc->sc_rxbuflock); 101
102 list_move_tail(&bf->list, &sc->sc_rxbuf);
103 ath_rx_buf_link(sc, bf);
104}
105
106
107static int ath_rate2idx(struct ath_softc *sc, int rate)
108{
109 int i = 0, cur_band, n_rates;
110 struct ieee80211_hw *hw = sc->hw;
111
112 cur_band = hw->conf.channel->band;
113 n_rates = sc->sbands[cur_band].n_bitrates;
114
115 for (i = 0; i < n_rates; i++) {
116 if (sc->sbands[cur_band].bitrates[i].bitrate == rate)
117 break;
118 }
119
120 /*
121 * NB:mac80211 validates rx rate index against the supported legacy rate
122 * index only (should be done against ht rates also), return the highest
123 * legacy rate index for rx rate which does not match any one of the
124 * supported basic and extended rates to make mac80211 happy.
125 * The following hack will be cleaned up once the issue with
126 * the rx rate index validation in mac80211 is fixed.
127 */
128 if (i == n_rates)
129 return n_rates - 1;
130
131 return i;
114} 132}
115 133
116/* 134/*
117 * The skb indicated to upper stack won't be returned to us. 135 * For Decrypt or Demic errors, we only mark packet status here and always push
118 * So we have to allocate a new one and queue it by ourselves. 136 * up the frame up to let mac80211 handle the actual error case, be it no
137 * decryption key or real decryption error. This let us keep statistics there.
119 */ 138 */
120static int ath_rx_indicate(struct ath_softc *sc, 139static int ath_rx_prepare(struct sk_buff *skb, struct ath_desc *ds,
121 struct sk_buff *skb, 140 struct ieee80211_rx_status *rx_status, bool *decrypt_error,
122 struct ath_recv_status *status, 141 struct ath_softc *sc)
123 u16 keyix)
124{ 142{
125 struct ath_buf *bf = ATH_RX_CONTEXT(skb)->ctx_rxbuf; 143 struct ieee80211_hdr *hdr;
126 struct sk_buff *nskb; 144 int ratekbps;
127 int type; 145 u8 ratecode;
128 146 __le16 fc;
129 /* indicate frame to the stack, which will free the old skb. */ 147
130 type = _ath_rx_indicate(sc, skb, status, keyix); 148 hdr = (struct ieee80211_hdr *)skb->data;
131 149 fc = hdr->frame_control;
132 /* allocate a new skb and queue it to for H/W processing */ 150 memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
133 nskb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize); 151
134 if (nskb != NULL) { 152 if (ds->ds_rxstat.rs_more) {
135 bf->bf_mpdu = nskb; 153 /*
136 bf->bf_buf_addr = pci_map_single(sc->pdev, nskb->data, 154 * Frame spans multiple descriptors; this cannot happen yet
137 skb_end_pointer(nskb) - nskb->head, 155 * as we don't support jumbograms. If not in monitor mode,
138 PCI_DMA_FROMDEVICE); 156 * discard the frame. Enable this if you want to see
139 bf->bf_dmacontext = bf->bf_buf_addr; 157 * error frames in Monitor mode.
140 ATH_RX_CONTEXT(nskb)->ctx_rxbuf = bf; 158 */
159 if (sc->sc_ah->ah_opmode != ATH9K_M_MONITOR)
160 goto rx_next;
161 } else if (ds->ds_rxstat.rs_status != 0) {
162 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC)
163 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
164 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY)
165 goto rx_next;
141 166
142 /* queue the new wbuf to H/W */ 167 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT) {
143 ath_rx_requeue(sc, nskb); 168 *decrypt_error = true;
169 } else if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC) {
170 if (ieee80211_is_ctl(fc))
171 /*
172 * Sometimes, we get invalid
173 * MIC failures on valid control frames.
174 * Remove these mic errors.
175 */
176 ds->ds_rxstat.rs_status &= ~ATH9K_RXERR_MIC;
177 else
178 rx_status->flag |= RX_FLAG_MMIC_ERROR;
179 }
180 /*
181 * Reject error frames with the exception of
182 * decryption and MIC failures. For monitor mode,
183 * we also ignore the CRC error.
184 */
185 if (sc->sc_ah->ah_opmode == ATH9K_M_MONITOR) {
186 if (ds->ds_rxstat.rs_status &
187 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
188 ATH9K_RXERR_CRC))
189 goto rx_next;
190 } else {
191 if (ds->ds_rxstat.rs_status &
192 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
193 goto rx_next;
194 }
195 }
144 } 196 }
145 197
146 return type; 198 ratecode = ds->ds_rxstat.rs_rate;
199 ratekbps = sc->sc_hwmap[ratecode].rateKbps;
200
201 /* HT rate */
202 if (ratecode & 0x80) {
203 if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040)
204 ratekbps = (ratekbps * 27) / 13;
205 if (ds->ds_rxstat.rs_flags & ATH9K_RX_GI)
206 ratekbps = (ratekbps * 10) / 9;
207 }
208
209 rx_status->mactime = ath_extend_tsf(sc, ds->ds_rxstat.rs_tstamp);
210 rx_status->band = sc->hw->conf.channel->band;
211 rx_status->freq = sc->hw->conf.channel->center_freq;
212 rx_status->noise = sc->sc_ani.sc_noise_floor;
213 rx_status->signal = rx_status->noise + ds->ds_rxstat.rs_rssi;
214 rx_status->rate_idx = ath_rate2idx(sc, (ratekbps / 100));
215 rx_status->antenna = ds->ds_rxstat.rs_antenna;
216
217 /* at 45 you will be able to use MCS 15 reliably. A more elaborate
218 * scheme can be used here but it requires tables of SNR/throughput for
219 * each possible mode used. */
220 rx_status->qual = ds->ds_rxstat.rs_rssi * 100 / 45;
221
222 /* rssi can be more than 45 though, anything above that
223 * should be considered at 100% */
224 if (rx_status->qual > 100)
225 rx_status->qual = 100;
226
227 rx_status->flag |= RX_FLAG_TSFT;
228
229 return 1;
230rx_next:
231 return 0;
147} 232}
148 233
149static void ath_opmode_init(struct ath_softc *sc) 234static void ath_opmode_init(struct ath_softc *sc)
@@ -185,12 +270,6 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
185 sc->sc_flags &= ~SC_OP_RXFLUSH; 270 sc->sc_flags &= ~SC_OP_RXFLUSH;
186 spin_lock_init(&sc->sc_rxbuflock); 271 spin_lock_init(&sc->sc_rxbuflock);
187 272
188 /*
189 * Cisco's VPN software requires that drivers be able to
190 * receive encapsulated frames that are larger than the MTU.
191 * Since we can't be sure how large a frame we'll get, setup
192 * to handle the larges on possible.
193 */
194 sc->sc_rxbufsize = roundup(IEEE80211_MAX_MPDU_LEN, 273 sc->sc_rxbufsize = roundup(IEEE80211_MAX_MPDU_LEN,
195 min(sc->sc_cachelsz, 274 min(sc->sc_cachelsz,
196 (u16)64)); 275 (u16)64));
@@ -209,8 +288,6 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
209 break; 288 break;
210 } 289 }
211 290
212 /* Pre-allocate a wbuf for each rx buffer */
213
214 list_for_each_entry(bf, &sc->sc_rxbuf, list) { 291 list_for_each_entry(bf, &sc->sc_rxbuf, list) {
215 skb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize); 292 skb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize);
216 if (skb == NULL) { 293 if (skb == NULL) {
@@ -223,7 +300,6 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
223 skb_end_pointer(skb) - skb->head, 300 skb_end_pointer(skb) - skb->head,
224 PCI_DMA_FROMDEVICE); 301 PCI_DMA_FROMDEVICE);
225 bf->bf_dmacontext = bf->bf_buf_addr; 302 bf->bf_dmacontext = bf->bf_buf_addr;
226 ATH_RX_CONTEXT(skb)->ctx_rxbuf = bf;
227 } 303 }
228 sc->sc_rxlink = NULL; 304 sc->sc_rxlink = NULL;
229 305
@@ -235,8 +311,6 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
235 return error; 311 return error;
236} 312}
237 313
238/* Reclaim all rx queue resources */
239
240void ath_rx_cleanup(struct ath_softc *sc) 314void ath_rx_cleanup(struct ath_softc *sc)
241{ 315{
242 struct sk_buff *skb; 316 struct sk_buff *skb;
@@ -248,8 +322,6 @@ void ath_rx_cleanup(struct ath_softc *sc)
248 dev_kfree_skb(skb); 322 dev_kfree_skb(skb);
249 } 323 }
250 324
251 /* cleanup rx descriptors */
252
253 if (sc->sc_rxdma.dd_desc_len != 0) 325 if (sc->sc_rxdma.dd_desc_len != 0)
254 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); 326 ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf);
255} 327}
@@ -297,20 +369,19 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
297 } 369 }
298 370
299 if (sc->sc_ah->ah_opmode == ATH9K_M_STA || 371 if (sc->sc_ah->ah_opmode == ATH9K_M_STA ||
300 sc->sc_ah->ah_opmode == ATH9K_M_IBSS) 372 sc->sc_ah->ah_opmode == ATH9K_M_IBSS)
301 rfilt |= ATH9K_RX_FILTER_BEACON; 373 rfilt |= ATH9K_RX_FILTER_BEACON;
302 374
303 /* If in HOSTAP mode, want to enable reception of PSPOLL frames 375 /* If in HOSTAP mode, want to enable reception of PSPOLL frames
304 & beacon frames */ 376 & beacon frames */
305 if (sc->sc_ah->ah_opmode == ATH9K_M_HOSTAP) 377 if (sc->sc_ah->ah_opmode == ATH9K_M_HOSTAP)
306 rfilt |= (ATH9K_RX_FILTER_BEACON | ATH9K_RX_FILTER_PSPOLL); 378 rfilt |= (ATH9K_RX_FILTER_BEACON | ATH9K_RX_FILTER_PSPOLL);
379
307 return rfilt; 380 return rfilt;
308 381
309#undef RX_FILTER_PRESERVE 382#undef RX_FILTER_PRESERVE
310} 383}
311 384
312/* Enable the receive h/w following a reset. */
313
314int ath_startrecv(struct ath_softc *sc) 385int ath_startrecv(struct ath_softc *sc)
315{ 386{
316 struct ath_hal *ah = sc->sc_ah; 387 struct ath_hal *ah = sc->sc_ah;
@@ -322,21 +393,6 @@ int ath_startrecv(struct ath_softc *sc)
322 393
323 sc->sc_rxlink = NULL; 394 sc->sc_rxlink = NULL;
324 list_for_each_entry_safe(bf, tbf, &sc->sc_rxbuf, list) { 395 list_for_each_entry_safe(bf, tbf, &sc->sc_rxbuf, list) {
325 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
326 /* restarting h/w, no need for holding descriptors */
327 bf->bf_status &= ~ATH_BUFSTATUS_STALE;
328 /*
329 * Upper layer may not be done with the frame yet so
330 * we can't just re-queue it to hardware. Remove it
331 * from h/w queue. It'll be re-queued when upper layer
332 * returns the frame and ath_rx_requeue_mpdu is called.
333 */
334 if (!(bf->bf_status & ATH_BUFSTATUS_FREE)) {
335 list_del(&bf->list);
336 continue;
337 }
338 }
339 /* chain descriptors */
340 ath_rx_buf_link(sc, bf); 396 ath_rx_buf_link(sc, bf);
341 } 397 }
342 398
@@ -346,120 +402,69 @@ int ath_startrecv(struct ath_softc *sc)
346 402
347 bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list); 403 bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list);
348 ath9k_hw_putrxbuf(ah, bf->bf_daddr); 404 ath9k_hw_putrxbuf(ah, bf->bf_daddr);
349 ath9k_hw_rxena(ah); /* enable recv descriptors */ 405 ath9k_hw_rxena(ah);
350 406
351start_recv: 407start_recv:
352 spin_unlock_bh(&sc->sc_rxbuflock); 408 spin_unlock_bh(&sc->sc_rxbuflock);
353 ath_opmode_init(sc); /* set filters, etc. */ 409 ath_opmode_init(sc);
354 ath9k_hw_startpcureceive(ah); /* re-enable PCU/DMA engine */ 410 ath9k_hw_startpcureceive(ah);
411
355 return 0; 412 return 0;
356} 413}
357 414
358/* Disable the receive h/w in preparation for a reset. */
359
360bool ath_stoprecv(struct ath_softc *sc) 415bool ath_stoprecv(struct ath_softc *sc)
361{ 416{
362 struct ath_hal *ah = sc->sc_ah; 417 struct ath_hal *ah = sc->sc_ah;
363 u64 tsf;
364 bool stopped; 418 bool stopped;
365 419
366 ath9k_hw_stoppcurecv(ah); /* disable PCU */ 420 ath9k_hw_stoppcurecv(ah);
367 ath9k_hw_setrxfilter(ah, 0); /* clear recv filter */ 421 ath9k_hw_setrxfilter(ah, 0);
368 stopped = ath9k_hw_stopdmarecv(ah); /* disable DMA engine */ 422 stopped = ath9k_hw_stopdmarecv(ah);
369 mdelay(3); /* 3ms is long enough for 1 frame */ 423 mdelay(3); /* 3ms is long enough for 1 frame */
370 tsf = ath9k_hw_gettsf64(ah); 424 sc->sc_rxlink = NULL;
371 sc->sc_rxlink = NULL; /* just in case */ 425
372 return stopped; 426 return stopped;
373} 427}
374 428
375/* Flush receive queue */
376
377void ath_flushrecv(struct ath_softc *sc) 429void ath_flushrecv(struct ath_softc *sc)
378{ 430{
379 /*
380 * ath_rx_tasklet may be used to handle rx interrupt and flush receive
381 * queue at the same time. Use a lock to serialize the access of rx
382 * queue.
383 * ath_rx_tasklet cannot hold the spinlock while indicating packets.
384 * Instead, do not claim the spinlock but check for a flush in
385 * progress (see references to sc_rxflush)
386 */
387 spin_lock_bh(&sc->sc_rxflushlock); 431 spin_lock_bh(&sc->sc_rxflushlock);
388 sc->sc_flags |= SC_OP_RXFLUSH; 432 sc->sc_flags |= SC_OP_RXFLUSH;
389
390 ath_rx_tasklet(sc, 1); 433 ath_rx_tasklet(sc, 1);
391
392 sc->sc_flags &= ~SC_OP_RXFLUSH; 434 sc->sc_flags &= ~SC_OP_RXFLUSH;
393 spin_unlock_bh(&sc->sc_rxflushlock); 435 spin_unlock_bh(&sc->sc_rxflushlock);
394} 436}
395 437
396/* Process receive queue, as well as LED, etc. */
397
398int ath_rx_tasklet(struct ath_softc *sc, int flush) 438int ath_rx_tasklet(struct ath_softc *sc, int flush)
399{ 439{
400#define PA2DESC(_sc, _pa) \ 440#define PA2DESC(_sc, _pa) \
401 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ 441 ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \
402 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) 442 ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr)))
403 443
404 struct ath_buf *bf, *bf_held = NULL; 444 struct ath_buf *bf;
405 struct ath_desc *ds; 445 struct ath_desc *ds;
406 struct ieee80211_hdr *hdr;
407 struct sk_buff *skb = NULL; 446 struct sk_buff *skb = NULL;
408 struct ath_recv_status rx_status; 447 struct ieee80211_rx_status rx_status;
409 struct ath_hal *ah = sc->sc_ah; 448 struct ath_hal *ah = sc->sc_ah;
410 int type, rx_processed = 0; 449 struct ieee80211_hdr *hdr;
411 u32 phyerr; 450 int hdrlen, padsize, retval;
412 u8 chainreset = 0; 451 bool decrypt_error = false;
413 int retval; 452 u8 keyix;
414 __le16 fc; 453
454 spin_lock_bh(&sc->sc_rxbuflock);
415 455
416 do { 456 do {
417 /* If handling rx interrupt and flush is in progress => exit */ 457 /* If handling rx interrupt and flush is in progress => exit */
418 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) 458 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
419 break; 459 break;
420 460
421 spin_lock_bh(&sc->sc_rxbuflock);
422 if (list_empty(&sc->sc_rxbuf)) { 461 if (list_empty(&sc->sc_rxbuf)) {
423 sc->sc_rxlink = NULL; 462 sc->sc_rxlink = NULL;
424 spin_unlock_bh(&sc->sc_rxbuflock);
425 break; 463 break;
426 } 464 }
427 465
428 bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list); 466 bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list);
429
430 /*
431 * There is a race condition that BH gets scheduled after sw
432 * writes RxE and before hw re-load the last descriptor to get
433 * the newly chained one. Software must keep the last DONE
434 * descriptor as a holding descriptor - software does so by
435 * marking it with the STALE flag.
436 */
437 if (bf->bf_status & ATH_BUFSTATUS_STALE) {
438 bf_held = bf;
439 if (list_is_last(&bf_held->list, &sc->sc_rxbuf)) {
440 /*
441 * The holding descriptor is the last
442 * descriptor in queue. It's safe to
443 * remove the last holding descriptor
444 * in BH context.
445 */
446 list_del(&bf_held->list);
447 bf_held->bf_status &= ~ATH_BUFSTATUS_STALE;
448 sc->sc_rxlink = NULL;
449
450 if (bf_held->bf_status & ATH_BUFSTATUS_FREE) {
451 list_add_tail(&bf_held->list,
452 &sc->sc_rxbuf);
453 ath_rx_buf_link(sc, bf_held);
454 }
455 spin_unlock_bh(&sc->sc_rxbuflock);
456 break;
457 }
458 bf = list_entry(bf->list.next, struct ath_buf, list);
459 }
460
461 ds = bf->bf_desc; 467 ds = bf->bf_desc;
462 ++rx_processed;
463 468
464 /* 469 /*
465 * Must provide the virtual address of the current 470 * Must provide the virtual address of the current
@@ -472,8 +477,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
472 * on. All this is necessary because of our use of 477 * on. All this is necessary because of our use of
473 * a self-linked list to avoid rx overruns. 478 * a self-linked list to avoid rx overruns.
474 */ 479 */
475 retval = ath9k_hw_rxprocdesc(ah, 480 retval = ath9k_hw_rxprocdesc(ah, ds,
476 ds,
477 bf->bf_daddr, 481 bf->bf_daddr,
478 PA2DESC(sc, ds->ds_link), 482 PA2DESC(sc, ds->ds_link),
479 0); 483 0);
@@ -482,7 +486,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
482 struct ath_desc *tds; 486 struct ath_desc *tds;
483 487
484 if (list_is_last(&bf->list, &sc->sc_rxbuf)) { 488 if (list_is_last(&bf->list, &sc->sc_rxbuf)) {
485 spin_unlock_bh(&sc->sc_rxbuflock); 489 sc->sc_rxlink = NULL;
486 break; 490 break;
487 } 491 }
488 492
@@ -500,215 +504,70 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
500 */ 504 */
501 505
502 tds = tbf->bf_desc; 506 tds = tbf->bf_desc;
503 retval = ath9k_hw_rxprocdesc(ah, 507 retval = ath9k_hw_rxprocdesc(ah, tds, tbf->bf_daddr,
504 tds, tbf->bf_daddr, 508 PA2DESC(sc, tds->ds_link), 0);
505 PA2DESC(sc, tds->ds_link), 0);
506 if (retval == -EINPROGRESS) { 509 if (retval == -EINPROGRESS) {
507 spin_unlock_bh(&sc->sc_rxbuflock);
508 break; 510 break;
509 } 511 }
510 } 512 }
511 513
512 /* XXX: we do not support frames spanning
513 * multiple descriptors */
514 bf->bf_status |= ATH_BUFSTATUS_DONE;
515
516 skb = bf->bf_mpdu; 514 skb = bf->bf_mpdu;
517 if (skb == NULL) { /* XXX ??? can this happen */ 515 if (!skb)
518 spin_unlock_bh(&sc->sc_rxbuflock);
519 continue; 516 continue;
520 }
521 /*
522 * Now we know it's a completed frame, we can indicate the
523 * frame. Remove the previous holding descriptor and leave
524 * this one in the queue as the new holding descriptor.
525 */
526 if (bf_held) {
527 list_del(&bf_held->list);
528 bf_held->bf_status &= ~ATH_BUFSTATUS_STALE;
529 if (bf_held->bf_status & ATH_BUFSTATUS_FREE) {
530 list_add_tail(&bf_held->list, &sc->sc_rxbuf);
531 /* try to requeue this descriptor */
532 ath_rx_buf_link(sc, bf_held);
533 }
534 }
535 517
536 bf->bf_status |= ATH_BUFSTATUS_STALE;
537 bf_held = bf;
538 /* 518 /*
539 * Release the lock here in case ieee80211_input() return 519 * If we're asked to flush receive queue, directly
540 * the frame immediately by calling ath_rx_mpdu_requeue(). 520 * chain it back at the queue without processing it.
541 */ 521 */
542 spin_unlock_bh(&sc->sc_rxbuflock); 522 if (flush)
543
544 if (flush) {
545 /*
546 * If we're asked to flush receive queue, directly
547 * chain it back at the queue without processing it.
548 */
549 goto rx_next; 523 goto rx_next;
550 }
551 524
552 hdr = (struct ieee80211_hdr *)skb->data; 525 if (!ds->ds_rxstat.rs_datalen)
553 fc = hdr->frame_control; 526 goto rx_next;
554 memset(&rx_status, 0, sizeof(struct ath_recv_status));
555
556 if (ds->ds_rxstat.rs_more) {
557 /*
558 * Frame spans multiple descriptors; this
559 * cannot happen yet as we don't support
560 * jumbograms. If not in monitor mode,
561 * discard the frame.
562 */
563#ifndef ERROR_FRAMES
564 /*
565 * Enable this if you want to see
566 * error frames in Monitor mode.
567 */
568 if (sc->sc_ah->ah_opmode != ATH9K_M_MONITOR)
569 goto rx_next;
570#endif
571 /* fall thru for monitor mode handling... */
572 } else if (ds->ds_rxstat.rs_status != 0) {
573 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC)
574 rx_status.flags |= ATH_RX_FCS_ERROR;
575 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY) {
576 phyerr = ds->ds_rxstat.rs_phyerr & 0x1f;
577 goto rx_next;
578 }
579 527
580 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT) { 528 /* The status portion of the descriptor could get corrupted. */
581 /*
582 * Decrypt error. We only mark packet status
583 * here and always push up the frame up to let
584 * mac80211 handle the actual error case, be
585 * it no decryption key or real decryption
586 * error. This let us keep statistics there.
587 */
588 rx_status.flags |= ATH_RX_DECRYPT_ERROR;
589 } else if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC) {
590 /*
591 * Demic error. We only mark frame status here
592 * and always push up the frame up to let
593 * mac80211 handle the actual error case. This
594 * let us keep statistics there. Hardware may
595 * post a false-positive MIC error.
596 */
597 if (ieee80211_is_ctl(fc))
598 /*
599 * Sometimes, we get invalid
600 * MIC failures on valid control frames.
601 * Remove these mic errors.
602 */
603 ds->ds_rxstat.rs_status &=
604 ~ATH9K_RXERR_MIC;
605 else
606 rx_status.flags |= ATH_RX_MIC_ERROR;
607 }
608 /*
609 * Reject error frames with the exception of
610 * decryption and MIC failures. For monitor mode,
611 * we also ignore the CRC error.
612 */
613 if (sc->sc_ah->ah_opmode == ATH9K_M_MONITOR) {
614 if (ds->ds_rxstat.rs_status &
615 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
616 ATH9K_RXERR_CRC))
617 goto rx_next;
618 } else {
619 if (ds->ds_rxstat.rs_status &
620 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
621 goto rx_next;
622 }
623 }
624 }
625 /*
626 * The status portion of the descriptor could get corrupted.
627 */
628 if (sc->sc_rxbufsize < ds->ds_rxstat.rs_datalen) 529 if (sc->sc_rxbufsize < ds->ds_rxstat.rs_datalen)
629 goto rx_next; 530 goto rx_next;
630 /*
631 * Sync and unmap the frame. At this point we're
632 * committed to passing the sk_buff somewhere so
633 * clear buf_skb; this means a new sk_buff must be
634 * allocated when the rx descriptor is setup again
635 * to receive another frame.
636 */
637 skb_put(skb, ds->ds_rxstat.rs_datalen);
638 skb->protocol = cpu_to_be16(ETH_P_CONTROL);
639 rx_status.tsf = ath_extend_tsf(sc, ds->ds_rxstat.rs_tstamp);
640 rx_status.rateieee =
641 sc->sc_hwmap[ds->ds_rxstat.rs_rate].ieeerate;
642 rx_status.rateKbps =
643 sc->sc_hwmap[ds->ds_rxstat.rs_rate].rateKbps;
644 rx_status.ratecode = ds->ds_rxstat.rs_rate;
645
646 /* HT rate */
647 if (rx_status.ratecode & 0x80) {
648 /* TODO - add table to avoid division */
649 if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040) {
650 rx_status.flags |= ATH_RX_40MHZ;
651 rx_status.rateKbps =
652 (rx_status.rateKbps * 27) / 13;
653 }
654 if (ds->ds_rxstat.rs_flags & ATH9K_RX_GI)
655 rx_status.rateKbps =
656 (rx_status.rateKbps * 10) / 9;
657 else
658 rx_status.flags |= ATH_RX_SHORT_GI;
659 }
660 531
661 /* sc_noise_floor is only available when the station 532 if (!ath_rx_prepare(skb, ds, &rx_status, &decrypt_error, sc))
662 attaches to an AP, so we use a default value 533 goto rx_next;
663 if we are not yet attached. */
664 rx_status.abs_rssi =
665 ds->ds_rxstat.rs_rssi + sc->sc_ani.sc_noise_floor;
666 534
667 pci_dma_sync_single_for_cpu(sc->pdev, 535 /* Sync and unmap the frame */
668 bf->bf_buf_addr, 536 pci_dma_sync_single_for_cpu(sc->pdev, bf->bf_buf_addr,
669 skb_tailroom(skb), 537 skb_tailroom(skb),
670 PCI_DMA_FROMDEVICE); 538 PCI_DMA_FROMDEVICE);
671 pci_unmap_single(sc->pdev, 539 pci_unmap_single(sc->pdev, bf->bf_buf_addr,
672 bf->bf_buf_addr,
673 sc->sc_rxbufsize, 540 sc->sc_rxbufsize,
674 PCI_DMA_FROMDEVICE); 541 PCI_DMA_FROMDEVICE);
675 542
676 /* XXX: Ah! make me more readable, use a helper */ 543 skb_put(skb, ds->ds_rxstat.rs_datalen);
677 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) { 544 skb->protocol = cpu_to_be16(ETH_P_CONTROL);
678 if (ds->ds_rxstat.rs_moreaggr == 0) { 545
679 rx_status.rssictl[0] = 546 /* see if any padding is done by the hw and remove it */
680 ds->ds_rxstat.rs_rssi_ctl0; 547 hdr = (struct ieee80211_hdr *)skb->data;
681 rx_status.rssictl[1] = 548 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
682 ds->ds_rxstat.rs_rssi_ctl1; 549
683 rx_status.rssictl[2] = 550 if (hdrlen & 3) {
684 ds->ds_rxstat.rs_rssi_ctl2; 551 padsize = hdrlen % 4;
685 rx_status.rssi = ds->ds_rxstat.rs_rssi; 552 memmove(skb->data + padsize, skb->data, hdrlen);
686 if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040) { 553 skb_pull(skb, padsize);
687 rx_status.rssiextn[0] =
688 ds->ds_rxstat.rs_rssi_ext0;
689 rx_status.rssiextn[1] =
690 ds->ds_rxstat.rs_rssi_ext1;
691 rx_status.rssiextn[2] =
692 ds->ds_rxstat.rs_rssi_ext2;
693 rx_status.flags |=
694 ATH_RX_RSSI_EXTN_VALID;
695 }
696 rx_status.flags |= ATH_RX_RSSI_VALID |
697 ATH_RX_CHAIN_RSSI_VALID;
698 }
699 } else {
700 /*
701 * Need to insert the "combined" rssi into the
702 * status structure for upper layer processing
703 */
704 rx_status.rssi = ds->ds_rxstat.rs_rssi;
705 rx_status.flags |= ATH_RX_RSSI_VALID;
706 } 554 }
707 555
708 /* Pass frames up to the stack. */ 556 keyix = ds->ds_rxstat.rs_keyix;
709 557
710 type = ath_rx_indicate(sc, skb, 558 if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error) {
711 &rx_status, ds->ds_rxstat.rs_keyix); 559 rx_status.flag |= RX_FLAG_DECRYPTED;
560 } else if ((le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_PROTECTED)
561 && !decrypt_error && skb->len >= hdrlen + 4) {
562 keyix = skb->data[hdrlen + 3] >> 6;
563
564 if (test_bit(keyix, sc->sc_keymap))
565 rx_status.flag |= RX_FLAG_DECRYPTED;
566 }
567
568 /* Send the frame to mac80211 */
569 __ieee80211_rx(sc->hw, skb, &rx_status);
570 bf->bf_mpdu = NULL;
712 571
713 /* 572 /*
714 * change the default rx antenna if rx diversity chooses the 573 * change the default rx antenna if rx diversity chooses the
@@ -716,37 +575,15 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
716 */ 575 */
717 if (sc->sc_defant != ds->ds_rxstat.rs_antenna) { 576 if (sc->sc_defant != ds->ds_rxstat.rs_antenna) {
718 if (++sc->sc_rxotherant >= 3) 577 if (++sc->sc_rxotherant >= 3)
719 ath_setdefantenna(sc, 578 ath_setdefantenna(sc, ds->ds_rxstat.rs_antenna);
720 ds->ds_rxstat.rs_antenna);
721 } else { 579 } else {
722 sc->sc_rxotherant = 0; 580 sc->sc_rxotherant = 0;
723 } 581 }
724
725#ifdef CONFIG_SLOW_ANT_DIV
726 if ((rx_status.flags & ATH_RX_RSSI_VALID) &&
727 ieee80211_is_beacon(fc)) {
728 ath_slow_ant_div(&sc->sc_antdiv, hdr, &ds->ds_rxstat);
729 }
730#endif
731 /*
732 * For frames successfully indicated, the buffer will be
733 * returned to us by upper layers by calling
734 * ath_rx_mpdu_requeue, either synchronusly or asynchronously.
735 * So we don't want to do it here in this loop.
736 */
737 continue;
738
739rx_next: 582rx_next:
740 bf->bf_status |= ATH_BUFSTATUS_FREE; 583 ath_rx_requeue(sc, bf);
741 } while (TRUE); 584 } while (1);
742 585
743 if (chainreset) { 586 spin_unlock_bh(&sc->sc_rxbuflock);
744 DPRINTF(sc, ATH_DBG_CONFIG,
745 "%s: Reset rx chain mask. "
746 "Do internal reset\n", __func__);
747 ASSERT(flush == 0);
748 ath_reset(sc, false);
749 }
750 587
751 return 0; 588 return 0;
752#undef PA2DESC 589#undef PA2DESC