diff options
Diffstat (limited to 'drivers/net/wireless/ath9k/recv.c')
-rw-r--r-- | drivers/net/wireless/ath9k/recv.c | 1318 |
1 files changed, 1318 insertions, 0 deletions
diff --git a/drivers/net/wireless/ath9k/recv.c b/drivers/net/wireless/ath9k/recv.c new file mode 100644 index 000000000000..2fe806175c01 --- /dev/null +++ b/drivers/net/wireless/ath9k/recv.c | |||
@@ -0,0 +1,1318 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2008 Atheros Communications Inc. | ||
3 | * | ||
4 | * Permission to use, copy, modify, and/or distribute this software for any | ||
5 | * purpose with or without fee is hereby granted, provided that the above | ||
6 | * copyright notice and this permission notice appear in all copies. | ||
7 | * | ||
8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
11 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
13 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
14 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
15 | */ | ||
16 | |||
17 | /* | ||
18 | * Implementation of receive path. | ||
19 | */ | ||
20 | |||
21 | #include "core.h" | ||
22 | |||
23 | /* | ||
24 | * Setup and link descriptors. | ||
25 | * | ||
26 | * 11N: we can no longer afford to self link the last descriptor. | ||
27 | * MAC acknowledges BA status as long as it copies frames to host | ||
28 | * buffer (or rx fifo). This can incorrectly acknowledge packets | ||
29 | * to a sender if last desc is self-linked. | ||
30 | * | ||
31 | * NOTE: Caller should hold the rxbuf lock. | ||
32 | */ | ||
33 | |||
34 | static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) | ||
35 | { | ||
36 | struct ath_hal *ah = sc->sc_ah; | ||
37 | struct ath_desc *ds; | ||
38 | struct sk_buff *skb; | ||
39 | |||
40 | ATH_RXBUF_RESET(bf); | ||
41 | |||
42 | ds = bf->bf_desc; | ||
43 | ds->ds_link = 0; /* link to null */ | ||
44 | ds->ds_data = bf->bf_buf_addr; | ||
45 | |||
46 | /* XXX For RADAR? | ||
47 | * virtual addr of the beginning of the buffer. */ | ||
48 | skb = bf->bf_mpdu; | ||
49 | ASSERT(skb != NULL); | ||
50 | ds->ds_vdata = skb->data; | ||
51 | |||
52 | /* setup rx descriptors */ | ||
53 | ath9k_hw_setuprxdesc(ah, | ||
54 | ds, | ||
55 | skb_tailroom(skb), /* buffer size */ | ||
56 | 0); | ||
57 | |||
58 | if (sc->sc_rxlink == NULL) | ||
59 | ath9k_hw_putrxbuf(ah, bf->bf_daddr); | ||
60 | else | ||
61 | *sc->sc_rxlink = bf->bf_daddr; | ||
62 | |||
63 | sc->sc_rxlink = &ds->ds_link; | ||
64 | ath9k_hw_rxena(ah); | ||
65 | } | ||
66 | |||
67 | /* Process received BAR frame */ | ||
68 | |||
69 | static int ath_bar_rx(struct ath_softc *sc, | ||
70 | struct ath_node *an, | ||
71 | struct sk_buff *skb) | ||
72 | { | ||
73 | struct ieee80211_bar *bar; | ||
74 | struct ath_arx_tid *rxtid; | ||
75 | struct sk_buff *tskb; | ||
76 | struct ath_recv_status *rx_status; | ||
77 | int tidno, index, cindex; | ||
78 | u16 seqno; | ||
79 | |||
80 | /* look at BAR contents */ | ||
81 | |||
82 | bar = (struct ieee80211_bar *)skb->data; | ||
83 | tidno = (le16_to_cpu(bar->control) & IEEE80211_BAR_CTL_TID_M) | ||
84 | >> IEEE80211_BAR_CTL_TID_S; | ||
85 | seqno = le16_to_cpu(bar->start_seq_num) >> IEEE80211_SEQ_SEQ_SHIFT; | ||
86 | |||
87 | /* process BAR - indicate all pending RX frames till the BAR seqno */ | ||
88 | |||
89 | rxtid = &an->an_aggr.rx.tid[tidno]; | ||
90 | |||
91 | spin_lock_bh(&rxtid->tidlock); | ||
92 | |||
93 | /* get relative index */ | ||
94 | |||
95 | index = ATH_BA_INDEX(rxtid->seq_next, seqno); | ||
96 | |||
97 | /* drop BAR if old sequence (index is too large) */ | ||
98 | |||
99 | if ((index > rxtid->baw_size) && | ||
100 | (index > (IEEE80211_SEQ_MAX - (rxtid->baw_size << 2)))) | ||
101 | /* discard frame, ieee layer may not treat frame as a dup */ | ||
102 | goto unlock_and_free; | ||
103 | |||
104 | /* complete receive processing for all pending frames upto BAR seqno */ | ||
105 | |||
106 | cindex = (rxtid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); | ||
107 | while ((rxtid->baw_head != rxtid->baw_tail) && | ||
108 | (rxtid->baw_head != cindex)) { | ||
109 | tskb = rxtid->rxbuf[rxtid->baw_head].rx_wbuf; | ||
110 | rx_status = &rxtid->rxbuf[rxtid->baw_head].rx_status; | ||
111 | rxtid->rxbuf[rxtid->baw_head].rx_wbuf = NULL; | ||
112 | |||
113 | if (tskb != NULL) | ||
114 | ath_rx_subframe(an, tskb, rx_status); | ||
115 | |||
116 | INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); | ||
117 | INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); | ||
118 | } | ||
119 | |||
120 | /* ... and indicate rest of the frames in-order */ | ||
121 | |||
122 | while (rxtid->baw_head != rxtid->baw_tail && | ||
123 | rxtid->rxbuf[rxtid->baw_head].rx_wbuf != NULL) { | ||
124 | tskb = rxtid->rxbuf[rxtid->baw_head].rx_wbuf; | ||
125 | rx_status = &rxtid->rxbuf[rxtid->baw_head].rx_status; | ||
126 | rxtid->rxbuf[rxtid->baw_head].rx_wbuf = NULL; | ||
127 | |||
128 | ath_rx_subframe(an, tskb, rx_status); | ||
129 | |||
130 | INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); | ||
131 | INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); | ||
132 | } | ||
133 | |||
134 | unlock_and_free: | ||
135 | spin_unlock_bh(&rxtid->tidlock); | ||
136 | /* free bar itself */ | ||
137 | dev_kfree_skb(skb); | ||
138 | return IEEE80211_FTYPE_CTL; | ||
139 | } | ||
140 | |||
141 | /* Function to handle a subframe of aggregation when HT is enabled */ | ||
142 | |||
143 | static int ath_ampdu_input(struct ath_softc *sc, | ||
144 | struct ath_node *an, | ||
145 | struct sk_buff *skb, | ||
146 | struct ath_recv_status *rx_status) | ||
147 | { | ||
148 | struct ieee80211_hdr *hdr; | ||
149 | struct ath_arx_tid *rxtid; | ||
150 | struct ath_rxbuf *rxbuf; | ||
151 | u8 type, subtype; | ||
152 | u16 rxseq; | ||
153 | int tid = 0, index, cindex, rxdiff; | ||
154 | __le16 fc; | ||
155 | u8 *qc; | ||
156 | |||
157 | hdr = (struct ieee80211_hdr *)skb->data; | ||
158 | fc = hdr->frame_control; | ||
159 | |||
160 | /* collect stats of frames with non-zero version */ | ||
161 | |||
162 | if ((le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_VERS) != 0) { | ||
163 | dev_kfree_skb(skb); | ||
164 | return -1; | ||
165 | } | ||
166 | |||
167 | type = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_FTYPE; | ||
168 | subtype = le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_STYPE; | ||
169 | |||
170 | if (ieee80211_is_back_req(fc)) | ||
171 | return ath_bar_rx(sc, an, skb); | ||
172 | |||
173 | /* special aggregate processing only for qos unicast data frames */ | ||
174 | |||
175 | if (!ieee80211_is_data(fc) || | ||
176 | !ieee80211_is_data_qos(fc) || | ||
177 | is_multicast_ether_addr(hdr->addr1)) | ||
178 | return ath_rx_subframe(an, skb, rx_status); | ||
179 | |||
180 | /* lookup rx tid state */ | ||
181 | |||
182 | if (ieee80211_is_data_qos(fc)) { | ||
183 | qc = ieee80211_get_qos_ctl(hdr); | ||
184 | tid = qc[0] & 0xf; | ||
185 | } | ||
186 | |||
187 | if (sc->sc_opmode == ATH9K_M_STA) { | ||
188 | /* Drop the frame not belonging to me. */ | ||
189 | if (memcmp(hdr->addr1, sc->sc_myaddr, ETH_ALEN)) { | ||
190 | dev_kfree_skb(skb); | ||
191 | return -1; | ||
192 | } | ||
193 | } | ||
194 | |||
195 | rxtid = &an->an_aggr.rx.tid[tid]; | ||
196 | |||
197 | spin_lock(&rxtid->tidlock); | ||
198 | |||
199 | rxdiff = (rxtid->baw_tail - rxtid->baw_head) & | ||
200 | (ATH_TID_MAX_BUFS - 1); | ||
201 | |||
202 | /* | ||
203 | * If the ADDBA exchange has not been completed by the source, | ||
204 | * process via legacy path (i.e. no reordering buffer is needed) | ||
205 | */ | ||
206 | if (!rxtid->addba_exchangecomplete) { | ||
207 | spin_unlock(&rxtid->tidlock); | ||
208 | return ath_rx_subframe(an, skb, rx_status); | ||
209 | } | ||
210 | |||
211 | /* extract sequence number from recvd frame */ | ||
212 | |||
213 | rxseq = le16_to_cpu(hdr->seq_ctrl) >> IEEE80211_SEQ_SEQ_SHIFT; | ||
214 | |||
215 | if (rxtid->seq_reset) { | ||
216 | rxtid->seq_reset = 0; | ||
217 | rxtid->seq_next = rxseq; | ||
218 | } | ||
219 | |||
220 | index = ATH_BA_INDEX(rxtid->seq_next, rxseq); | ||
221 | |||
222 | /* drop frame if old sequence (index is too large) */ | ||
223 | |||
224 | if (index > (IEEE80211_SEQ_MAX - (rxtid->baw_size << 2))) { | ||
225 | /* discard frame, ieee layer may not treat frame as a dup */ | ||
226 | spin_unlock(&rxtid->tidlock); | ||
227 | dev_kfree_skb(skb); | ||
228 | return IEEE80211_FTYPE_DATA; | ||
229 | } | ||
230 | |||
231 | /* sequence number is beyond block-ack window */ | ||
232 | |||
233 | if (index >= rxtid->baw_size) { | ||
234 | |||
235 | /* complete receive processing for all pending frames */ | ||
236 | |||
237 | while (index >= rxtid->baw_size) { | ||
238 | |||
239 | rxbuf = rxtid->rxbuf + rxtid->baw_head; | ||
240 | |||
241 | if (rxbuf->rx_wbuf != NULL) { | ||
242 | ath_rx_subframe(an, rxbuf->rx_wbuf, | ||
243 | &rxbuf->rx_status); | ||
244 | rxbuf->rx_wbuf = NULL; | ||
245 | } | ||
246 | |||
247 | INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); | ||
248 | INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); | ||
249 | |||
250 | index--; | ||
251 | } | ||
252 | } | ||
253 | |||
254 | /* add buffer to the recv ba window */ | ||
255 | |||
256 | cindex = (rxtid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); | ||
257 | rxbuf = rxtid->rxbuf + cindex; | ||
258 | |||
259 | if (rxbuf->rx_wbuf != NULL) { | ||
260 | spin_unlock(&rxtid->tidlock); | ||
261 | /* duplicate frame */ | ||
262 | dev_kfree_skb(skb); | ||
263 | return IEEE80211_FTYPE_DATA; | ||
264 | } | ||
265 | |||
266 | rxbuf->rx_wbuf = skb; | ||
267 | rxbuf->rx_time = get_timestamp(); | ||
268 | rxbuf->rx_status = *rx_status; | ||
269 | |||
270 | /* advance tail if sequence received is newer | ||
271 | * than any received so far */ | ||
272 | |||
273 | if (index >= rxdiff) { | ||
274 | rxtid->baw_tail = cindex; | ||
275 | INCR(rxtid->baw_tail, ATH_TID_MAX_BUFS); | ||
276 | } | ||
277 | |||
278 | /* indicate all in-order received frames */ | ||
279 | |||
280 | while (rxtid->baw_head != rxtid->baw_tail) { | ||
281 | rxbuf = rxtid->rxbuf + rxtid->baw_head; | ||
282 | if (!rxbuf->rx_wbuf) | ||
283 | break; | ||
284 | |||
285 | ath_rx_subframe(an, rxbuf->rx_wbuf, &rxbuf->rx_status); | ||
286 | rxbuf->rx_wbuf = NULL; | ||
287 | |||
288 | INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); | ||
289 | INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); | ||
290 | } | ||
291 | |||
292 | /* | ||
293 | * start a timer to flush all received frames if there are pending | ||
294 | * receive frames | ||
295 | */ | ||
296 | if (rxtid->baw_head != rxtid->baw_tail) | ||
297 | mod_timer(&rxtid->timer, ATH_RX_TIMEOUT); | ||
298 | else | ||
299 | del_timer_sync(&rxtid->timer); | ||
300 | |||
301 | spin_unlock(&rxtid->tidlock); | ||
302 | return IEEE80211_FTYPE_DATA; | ||
303 | } | ||
304 | |||
305 | /* Timer to flush all received sub-frames */ | ||
306 | |||
307 | static void ath_rx_timer(unsigned long data) | ||
308 | { | ||
309 | struct ath_arx_tid *rxtid = (struct ath_arx_tid *)data; | ||
310 | struct ath_node *an = rxtid->an; | ||
311 | struct ath_rxbuf *rxbuf; | ||
312 | int nosched; | ||
313 | |||
314 | spin_lock_bh(&rxtid->tidlock); | ||
315 | while (rxtid->baw_head != rxtid->baw_tail) { | ||
316 | rxbuf = rxtid->rxbuf + rxtid->baw_head; | ||
317 | if (!rxbuf->rx_wbuf) { | ||
318 | INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); | ||
319 | INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); | ||
320 | continue; | ||
321 | } | ||
322 | |||
323 | /* | ||
324 | * Stop if the next one is a very recent frame. | ||
325 | * | ||
326 | * Call get_timestamp in every iteration to protect against the | ||
327 | * case in which a new frame is received while we are executing | ||
328 | * this function. Using a timestamp obtained before entering | ||
329 | * the loop could lead to a very large time interval | ||
330 | * (a negative value typecast to unsigned), breaking the | ||
331 | * function's logic. | ||
332 | */ | ||
333 | if ((get_timestamp() - rxbuf->rx_time) < | ||
334 | (ATH_RX_TIMEOUT * HZ / 1000)) | ||
335 | break; | ||
336 | |||
337 | ath_rx_subframe(an, rxbuf->rx_wbuf, | ||
338 | &rxbuf->rx_status); | ||
339 | rxbuf->rx_wbuf = NULL; | ||
340 | |||
341 | INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); | ||
342 | INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); | ||
343 | } | ||
344 | |||
345 | /* | ||
346 | * start a timer to flush all received frames if there are pending | ||
347 | * receive frames | ||
348 | */ | ||
349 | if (rxtid->baw_head != rxtid->baw_tail) | ||
350 | nosched = 0; | ||
351 | else | ||
352 | nosched = 1; /* no need to re-arm the timer again */ | ||
353 | |||
354 | spin_unlock_bh(&rxtid->tidlock); | ||
355 | } | ||
356 | |||
357 | /* Free all pending sub-frames in the re-ordering buffer */ | ||
358 | |||
359 | static void ath_rx_flush_tid(struct ath_softc *sc, | ||
360 | struct ath_arx_tid *rxtid, int drop) | ||
361 | { | ||
362 | struct ath_rxbuf *rxbuf; | ||
363 | |||
364 | spin_lock_bh(&rxtid->tidlock); | ||
365 | while (rxtid->baw_head != rxtid->baw_tail) { | ||
366 | rxbuf = rxtid->rxbuf + rxtid->baw_head; | ||
367 | if (!rxbuf->rx_wbuf) { | ||
368 | INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); | ||
369 | INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); | ||
370 | continue; | ||
371 | } | ||
372 | |||
373 | if (drop) | ||
374 | dev_kfree_skb(rxbuf->rx_wbuf); | ||
375 | else | ||
376 | ath_rx_subframe(rxtid->an, | ||
377 | rxbuf->rx_wbuf, | ||
378 | &rxbuf->rx_status); | ||
379 | |||
380 | rxbuf->rx_wbuf = NULL; | ||
381 | |||
382 | INCR(rxtid->baw_head, ATH_TID_MAX_BUFS); | ||
383 | INCR(rxtid->seq_next, IEEE80211_SEQ_MAX); | ||
384 | } | ||
385 | spin_unlock_bh(&rxtid->tidlock); | ||
386 | } | ||
387 | |||
388 | static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc, | ||
389 | u32 len) | ||
390 | { | ||
391 | struct sk_buff *skb; | ||
392 | u32 off; | ||
393 | |||
394 | /* | ||
395 | * Cache-line-align. This is important (for the | ||
396 | * 5210 at least) as not doing so causes bogus data | ||
397 | * in rx'd frames. | ||
398 | */ | ||
399 | |||
400 | skb = dev_alloc_skb(len + sc->sc_cachelsz - 1); | ||
401 | if (skb != NULL) { | ||
402 | off = ((unsigned long) skb->data) % sc->sc_cachelsz; | ||
403 | if (off != 0) | ||
404 | skb_reserve(skb, sc->sc_cachelsz - off); | ||
405 | } else { | ||
406 | DPRINTF(sc, ATH_DBG_FATAL, | ||
407 | "%s: skbuff alloc of size %u failed\n", | ||
408 | __func__, len); | ||
409 | return NULL; | ||
410 | } | ||
411 | |||
412 | return skb; | ||
413 | } | ||
414 | |||
415 | static void ath_rx_requeue(struct ath_softc *sc, struct sk_buff *skb) | ||
416 | { | ||
417 | struct ath_buf *bf = ATH_RX_CONTEXT(skb)->ctx_rxbuf; | ||
418 | |||
419 | ASSERT(bf != NULL); | ||
420 | |||
421 | spin_lock_bh(&sc->sc_rxbuflock); | ||
422 | if (bf->bf_status & ATH_BUFSTATUS_STALE) { | ||
423 | /* | ||
424 | * This buffer is still held for hw acess. | ||
425 | * Mark it as free to be re-queued it later. | ||
426 | */ | ||
427 | bf->bf_status |= ATH_BUFSTATUS_FREE; | ||
428 | } else { | ||
429 | /* XXX: we probably never enter here, remove after | ||
430 | * verification */ | ||
431 | list_add_tail(&bf->list, &sc->sc_rxbuf); | ||
432 | ath_rx_buf_link(sc, bf); | ||
433 | } | ||
434 | spin_unlock_bh(&sc->sc_rxbuflock); | ||
435 | } | ||
436 | |||
437 | /* | ||
438 | * The skb indicated to upper stack won't be returned to us. | ||
439 | * So we have to allocate a new one and queue it by ourselves. | ||
440 | */ | ||
441 | static int ath_rx_indicate(struct ath_softc *sc, | ||
442 | struct sk_buff *skb, | ||
443 | struct ath_recv_status *status, | ||
444 | u16 keyix) | ||
445 | { | ||
446 | struct ath_buf *bf = ATH_RX_CONTEXT(skb)->ctx_rxbuf; | ||
447 | struct sk_buff *nskb; | ||
448 | int type; | ||
449 | |||
450 | /* indicate frame to the stack, which will free the old skb. */ | ||
451 | type = ath__rx_indicate(sc, skb, status, keyix); | ||
452 | |||
453 | /* allocate a new skb and queue it to for H/W processing */ | ||
454 | nskb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize); | ||
455 | if (nskb != NULL) { | ||
456 | bf->bf_mpdu = nskb; | ||
457 | bf->bf_buf_addr = ath_skb_map_single(sc, | ||
458 | nskb, | ||
459 | PCI_DMA_FROMDEVICE, | ||
460 | /* XXX: Remove get_dma_mem_context() */ | ||
461 | get_dma_mem_context(bf, bf_dmacontext)); | ||
462 | ATH_RX_CONTEXT(nskb)->ctx_rxbuf = bf; | ||
463 | |||
464 | /* queue the new wbuf to H/W */ | ||
465 | ath_rx_requeue(sc, nskb); | ||
466 | } | ||
467 | |||
468 | return type; | ||
469 | } | ||
470 | |||
471 | static void ath_opmode_init(struct ath_softc *sc) | ||
472 | { | ||
473 | struct ath_hal *ah = sc->sc_ah; | ||
474 | u32 rfilt, mfilt[2]; | ||
475 | |||
476 | /* configure rx filter */ | ||
477 | rfilt = ath_calcrxfilter(sc); | ||
478 | ath9k_hw_setrxfilter(ah, rfilt); | ||
479 | |||
480 | /* configure bssid mask */ | ||
481 | if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) | ||
482 | ath9k_hw_setbssidmask(ah, sc->sc_bssidmask); | ||
483 | |||
484 | /* configure operational mode */ | ||
485 | ath9k_hw_setopmode(ah); | ||
486 | |||
487 | /* Handle any link-level address change. */ | ||
488 | ath9k_hw_setmac(ah, sc->sc_myaddr); | ||
489 | |||
490 | /* calculate and install multicast filter */ | ||
491 | mfilt[0] = mfilt[1] = ~0; | ||
492 | |||
493 | ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); | ||
494 | DPRINTF(sc, ATH_DBG_CONFIG , | ||
495 | "%s: RX filter 0x%x, MC filter %08x:%08x\n", | ||
496 | __func__, rfilt, mfilt[0], mfilt[1]); | ||
497 | } | ||
498 | |||
499 | int ath_rx_init(struct ath_softc *sc, int nbufs) | ||
500 | { | ||
501 | struct sk_buff *skb; | ||
502 | struct ath_buf *bf; | ||
503 | int error = 0; | ||
504 | |||
505 | do { | ||
506 | spin_lock_init(&sc->sc_rxflushlock); | ||
507 | sc->sc_rxflush = 0; | ||
508 | spin_lock_init(&sc->sc_rxbuflock); | ||
509 | |||
510 | /* | ||
511 | * Cisco's VPN software requires that drivers be able to | ||
512 | * receive encapsulated frames that are larger than the MTU. | ||
513 | * Since we can't be sure how large a frame we'll get, setup | ||
514 | * to handle the larges on possible. | ||
515 | */ | ||
516 | sc->sc_rxbufsize = roundup(IEEE80211_MAX_MPDU_LEN, | ||
517 | min(sc->sc_cachelsz, | ||
518 | (u16)64)); | ||
519 | |||
520 | DPRINTF(sc, ATH_DBG_CONFIG, "%s: cachelsz %u rxbufsize %u\n", | ||
521 | __func__, sc->sc_cachelsz, sc->sc_rxbufsize); | ||
522 | |||
523 | /* Initialize rx descriptors */ | ||
524 | |||
525 | error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf, | ||
526 | "rx", nbufs, 1); | ||
527 | if (error != 0) { | ||
528 | DPRINTF(sc, ATH_DBG_FATAL, | ||
529 | "%s: failed to allocate rx descriptors: %d\n", | ||
530 | __func__, error); | ||
531 | break; | ||
532 | } | ||
533 | |||
534 | /* Pre-allocate a wbuf for each rx buffer */ | ||
535 | |||
536 | list_for_each_entry(bf, &sc->sc_rxbuf, list) { | ||
537 | skb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize); | ||
538 | if (skb == NULL) { | ||
539 | error = -ENOMEM; | ||
540 | break; | ||
541 | } | ||
542 | |||
543 | bf->bf_mpdu = skb; | ||
544 | bf->bf_buf_addr = | ||
545 | ath_skb_map_single(sc, skb, PCI_DMA_FROMDEVICE, | ||
546 | get_dma_mem_context(bf, bf_dmacontext)); | ||
547 | ATH_RX_CONTEXT(skb)->ctx_rxbuf = bf; | ||
548 | } | ||
549 | sc->sc_rxlink = NULL; | ||
550 | |||
551 | } while (0); | ||
552 | |||
553 | if (error) | ||
554 | ath_rx_cleanup(sc); | ||
555 | |||
556 | return error; | ||
557 | } | ||
558 | |||
559 | /* Reclaim all rx queue resources */ | ||
560 | |||
561 | void ath_rx_cleanup(struct ath_softc *sc) | ||
562 | { | ||
563 | struct sk_buff *skb; | ||
564 | struct ath_buf *bf; | ||
565 | |||
566 | list_for_each_entry(bf, &sc->sc_rxbuf, list) { | ||
567 | skb = bf->bf_mpdu; | ||
568 | if (skb) | ||
569 | dev_kfree_skb(skb); | ||
570 | } | ||
571 | |||
572 | /* cleanup rx descriptors */ | ||
573 | |||
574 | if (sc->sc_rxdma.dd_desc_len != 0) | ||
575 | ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); | ||
576 | } | ||
577 | |||
578 | /* | ||
579 | * Calculate the receive filter according to the | ||
580 | * operating mode and state: | ||
581 | * | ||
582 | * o always accept unicast, broadcast, and multicast traffic | ||
583 | * o maintain current state of phy error reception (the hal | ||
584 | * may enable phy error frames for noise immunity work) | ||
585 | * o probe request frames are accepted only when operating in | ||
586 | * hostap, adhoc, or monitor modes | ||
587 | * o enable promiscuous mode according to the interface state | ||
588 | * o accept beacons: | ||
589 | * - when operating in adhoc mode so the 802.11 layer creates | ||
590 | * node table entries for peers, | ||
591 | * - when operating in station mode for collecting rssi data when | ||
592 | * the station is otherwise quiet, or | ||
593 | * - when operating as a repeater so we see repeater-sta beacons | ||
594 | * - when scanning | ||
595 | */ | ||
596 | |||
597 | u32 ath_calcrxfilter(struct ath_softc *sc) | ||
598 | { | ||
599 | #define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR) | ||
600 | u32 rfilt; | ||
601 | |||
602 | rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE) | ||
603 | | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST | ||
604 | | ATH9K_RX_FILTER_MCAST; | ||
605 | |||
606 | /* If not a STA, enable processing of Probe Requests */ | ||
607 | if (sc->sc_opmode != ATH9K_M_STA) | ||
608 | rfilt |= ATH9K_RX_FILTER_PROBEREQ; | ||
609 | |||
610 | /* Can't set HOSTAP into promiscous mode */ | ||
611 | if (sc->sc_opmode == ATH9K_M_MONITOR) { | ||
612 | rfilt |= ATH9K_RX_FILTER_PROM; | ||
613 | /* ??? To prevent from sending ACK */ | ||
614 | rfilt &= ~ATH9K_RX_FILTER_UCAST; | ||
615 | } | ||
616 | |||
617 | if (sc->sc_opmode == ATH9K_M_STA || sc->sc_opmode == ATH9K_M_IBSS || | ||
618 | sc->sc_scanning) | ||
619 | rfilt |= ATH9K_RX_FILTER_BEACON; | ||
620 | |||
621 | /* If in HOSTAP mode, want to enable reception of PSPOLL frames | ||
622 | & beacon frames */ | ||
623 | if (sc->sc_opmode == ATH9K_M_HOSTAP) | ||
624 | rfilt |= (ATH9K_RX_FILTER_BEACON | ATH9K_RX_FILTER_PSPOLL); | ||
625 | return rfilt; | ||
626 | #undef RX_FILTER_PRESERVE | ||
627 | } | ||
628 | |||
629 | /* Enable the receive h/w following a reset. */ | ||
630 | |||
631 | int ath_startrecv(struct ath_softc *sc) | ||
632 | { | ||
633 | struct ath_hal *ah = sc->sc_ah; | ||
634 | struct ath_buf *bf, *tbf; | ||
635 | |||
636 | spin_lock_bh(&sc->sc_rxbuflock); | ||
637 | if (list_empty(&sc->sc_rxbuf)) | ||
638 | goto start_recv; | ||
639 | |||
640 | sc->sc_rxlink = NULL; | ||
641 | list_for_each_entry_safe(bf, tbf, &sc->sc_rxbuf, list) { | ||
642 | if (bf->bf_status & ATH_BUFSTATUS_STALE) { | ||
643 | /* restarting h/w, no need for holding descriptors */ | ||
644 | bf->bf_status &= ~ATH_BUFSTATUS_STALE; | ||
645 | /* | ||
646 | * Upper layer may not be done with the frame yet so | ||
647 | * we can't just re-queue it to hardware. Remove it | ||
648 | * from h/w queue. It'll be re-queued when upper layer | ||
649 | * returns the frame and ath_rx_requeue_mpdu is called. | ||
650 | */ | ||
651 | if (!(bf->bf_status & ATH_BUFSTATUS_FREE)) { | ||
652 | list_del(&bf->list); | ||
653 | continue; | ||
654 | } | ||
655 | } | ||
656 | /* chain descriptors */ | ||
657 | ath_rx_buf_link(sc, bf); | ||
658 | } | ||
659 | |||
660 | /* We could have deleted elements so the list may be empty now */ | ||
661 | if (list_empty(&sc->sc_rxbuf)) | ||
662 | goto start_recv; | ||
663 | |||
664 | bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list); | ||
665 | ath9k_hw_putrxbuf(ah, bf->bf_daddr); | ||
666 | ath9k_hw_rxena(ah); /* enable recv descriptors */ | ||
667 | |||
668 | start_recv: | ||
669 | spin_unlock_bh(&sc->sc_rxbuflock); | ||
670 | ath_opmode_init(sc); /* set filters, etc. */ | ||
671 | ath9k_hw_startpcureceive(ah); /* re-enable PCU/DMA engine */ | ||
672 | return 0; | ||
673 | } | ||
674 | |||
675 | /* Disable the receive h/w in preparation for a reset. */ | ||
676 | |||
677 | bool ath_stoprecv(struct ath_softc *sc) | ||
678 | { | ||
679 | struct ath_hal *ah = sc->sc_ah; | ||
680 | u64 tsf; | ||
681 | bool stopped; | ||
682 | |||
683 | ath9k_hw_stoppcurecv(ah); /* disable PCU */ | ||
684 | ath9k_hw_setrxfilter(ah, 0); /* clear recv filter */ | ||
685 | stopped = ath9k_hw_stopdmarecv(ah); /* disable DMA engine */ | ||
686 | mdelay(3); /* 3ms is long enough for 1 frame */ | ||
687 | tsf = ath9k_hw_gettsf64(ah); | ||
688 | sc->sc_rxlink = NULL; /* just in case */ | ||
689 | return stopped; | ||
690 | } | ||
691 | |||
692 | /* Flush receive queue */ | ||
693 | |||
694 | void ath_flushrecv(struct ath_softc *sc) | ||
695 | { | ||
696 | /* | ||
697 | * ath_rx_tasklet may be used to handle rx interrupt and flush receive | ||
698 | * queue at the same time. Use a lock to serialize the access of rx | ||
699 | * queue. | ||
700 | * ath_rx_tasklet cannot hold the spinlock while indicating packets. | ||
701 | * Instead, do not claim the spinlock but check for a flush in | ||
702 | * progress (see references to sc_rxflush) | ||
703 | */ | ||
704 | spin_lock_bh(&sc->sc_rxflushlock); | ||
705 | sc->sc_rxflush = 1; | ||
706 | |||
707 | ath_rx_tasklet(sc, 1); | ||
708 | |||
709 | sc->sc_rxflush = 0; | ||
710 | spin_unlock_bh(&sc->sc_rxflushlock); | ||
711 | } | ||
712 | |||
713 | /* Process an individual frame */ | ||
714 | |||
715 | int ath_rx_input(struct ath_softc *sc, | ||
716 | struct ath_node *an, | ||
717 | int is_ampdu, | ||
718 | struct sk_buff *skb, | ||
719 | struct ath_recv_status *rx_status, | ||
720 | enum ATH_RX_TYPE *status) | ||
721 | { | ||
722 | if (is_ampdu && sc->sc_rxaggr) { | ||
723 | *status = ATH_RX_CONSUMED; | ||
724 | return ath_ampdu_input(sc, an, skb, rx_status); | ||
725 | } else { | ||
726 | *status = ATH_RX_NON_CONSUMED; | ||
727 | return -1; | ||
728 | } | ||
729 | } | ||
730 | |||
731 | /* Process receive queue, as well as LED, etc. */ | ||
732 | |||
733 | int ath_rx_tasklet(struct ath_softc *sc, int flush) | ||
734 | { | ||
735 | #define PA2DESC(_sc, _pa) \ | ||
736 | ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ | ||
737 | ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) | ||
738 | |||
739 | struct ath_buf *bf, *bf_held = NULL; | ||
740 | struct ath_desc *ds; | ||
741 | struct ieee80211_hdr *hdr; | ||
742 | struct sk_buff *skb = NULL; | ||
743 | struct ath_recv_status rx_status; | ||
744 | struct ath_hal *ah = sc->sc_ah; | ||
745 | int type, rx_processed = 0; | ||
746 | u32 phyerr; | ||
747 | u8 chainreset = 0; | ||
748 | int retval; | ||
749 | __le16 fc; | ||
750 | |||
751 | do { | ||
752 | /* If handling rx interrupt and flush is in progress => exit */ | ||
753 | if (sc->sc_rxflush && (flush == 0)) | ||
754 | break; | ||
755 | |||
756 | spin_lock_bh(&sc->sc_rxbuflock); | ||
757 | if (list_empty(&sc->sc_rxbuf)) { | ||
758 | sc->sc_rxlink = NULL; | ||
759 | spin_unlock_bh(&sc->sc_rxbuflock); | ||
760 | break; | ||
761 | } | ||
762 | |||
763 | bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list); | ||
764 | |||
765 | /* | ||
766 | * There is a race condition that BH gets scheduled after sw | ||
767 | * writes RxE and before hw re-load the last descriptor to get | ||
768 | * the newly chained one. Software must keep the last DONE | ||
769 | * descriptor as a holding descriptor - software does so by | ||
770 | * marking it with the STALE flag. | ||
771 | */ | ||
772 | if (bf->bf_status & ATH_BUFSTATUS_STALE) { | ||
773 | bf_held = bf; | ||
774 | if (list_is_last(&bf_held->list, &sc->sc_rxbuf)) { | ||
775 | /* | ||
776 | * The holding descriptor is the last | ||
777 | * descriptor in queue. It's safe to | ||
778 | * remove the last holding descriptor | ||
779 | * in BH context. | ||
780 | */ | ||
781 | list_del(&bf_held->list); | ||
782 | bf_held->bf_status &= ~ATH_BUFSTATUS_STALE; | ||
783 | sc->sc_rxlink = NULL; | ||
784 | |||
785 | if (bf_held->bf_status & ATH_BUFSTATUS_FREE) { | ||
786 | list_add_tail(&bf_held->list, | ||
787 | &sc->sc_rxbuf); | ||
788 | ath_rx_buf_link(sc, bf_held); | ||
789 | } | ||
790 | spin_unlock_bh(&sc->sc_rxbuflock); | ||
791 | break; | ||
792 | } | ||
793 | bf = list_entry(bf->list.next, struct ath_buf, list); | ||
794 | } | ||
795 | |||
796 | ds = bf->bf_desc; | ||
797 | ++rx_processed; | ||
798 | |||
799 | /* | ||
800 | * Must provide the virtual address of the current | ||
801 | * descriptor, the physical address, and the virtual | ||
802 | * address of the next descriptor in the h/w chain. | ||
803 | * This allows the HAL to look ahead to see if the | ||
804 | * hardware is done with a descriptor by checking the | ||
805 | * done bit in the following descriptor and the address | ||
806 | * of the current descriptor the DMA engine is working | ||
807 | * on. All this is necessary because of our use of | ||
808 | * a self-linked list to avoid rx overruns. | ||
809 | */ | ||
810 | retval = ath9k_hw_rxprocdesc(ah, | ||
811 | ds, | ||
812 | bf->bf_daddr, | ||
813 | PA2DESC(sc, ds->ds_link), | ||
814 | 0); | ||
815 | if (retval == -EINPROGRESS) { | ||
816 | struct ath_buf *tbf; | ||
817 | struct ath_desc *tds; | ||
818 | |||
819 | if (list_is_last(&bf->list, &sc->sc_rxbuf)) { | ||
820 | spin_unlock_bh(&sc->sc_rxbuflock); | ||
821 | break; | ||
822 | } | ||
823 | |||
824 | tbf = list_entry(bf->list.next, struct ath_buf, list); | ||
825 | |||
826 | /* | ||
827 | * On some hardware the descriptor status words could | ||
828 | * get corrupted, including the done bit. Because of | ||
829 | * this, check if the next descriptor's done bit is | ||
830 | * set or not. | ||
831 | * | ||
832 | * If the next descriptor's done bit is set, the current | ||
833 | * descriptor has been corrupted. Force s/w to discard | ||
834 | * this descriptor and continue... | ||
835 | */ | ||
836 | |||
837 | tds = tbf->bf_desc; | ||
838 | retval = ath9k_hw_rxprocdesc(ah, | ||
839 | tds, tbf->bf_daddr, | ||
840 | PA2DESC(sc, tds->ds_link), 0); | ||
841 | if (retval == -EINPROGRESS) { | ||
842 | spin_unlock_bh(&sc->sc_rxbuflock); | ||
843 | break; | ||
844 | } | ||
845 | } | ||
846 | |||
847 | /* XXX: we do not support frames spanning | ||
848 | * multiple descriptors */ | ||
849 | bf->bf_status |= ATH_BUFSTATUS_DONE; | ||
850 | |||
851 | skb = bf->bf_mpdu; | ||
852 | if (skb == NULL) { /* XXX ??? can this happen */ | ||
853 | spin_unlock_bh(&sc->sc_rxbuflock); | ||
854 | continue; | ||
855 | } | ||
856 | /* | ||
857 | * Now we know it's a completed frame, we can indicate the | ||
858 | * frame. Remove the previous holding descriptor and leave | ||
859 | * this one in the queue as the new holding descriptor. | ||
860 | */ | ||
861 | if (bf_held) { | ||
862 | list_del(&bf_held->list); | ||
863 | bf_held->bf_status &= ~ATH_BUFSTATUS_STALE; | ||
864 | if (bf_held->bf_status & ATH_BUFSTATUS_FREE) { | ||
865 | list_add_tail(&bf_held->list, &sc->sc_rxbuf); | ||
866 | /* try to requeue this descriptor */ | ||
867 | ath_rx_buf_link(sc, bf_held); | ||
868 | } | ||
869 | } | ||
870 | |||
871 | bf->bf_status |= ATH_BUFSTATUS_STALE; | ||
872 | bf_held = bf; | ||
873 | /* | ||
874 | * Release the lock here in case ieee80211_input() return | ||
875 | * the frame immediately by calling ath_rx_mpdu_requeue(). | ||
876 | */ | ||
877 | spin_unlock_bh(&sc->sc_rxbuflock); | ||
878 | |||
879 | if (flush) { | ||
880 | /* | ||
881 | * If we're asked to flush receive queue, directly | ||
882 | * chain it back at the queue without processing it. | ||
883 | */ | ||
884 | goto rx_next; | ||
885 | } | ||
886 | |||
887 | hdr = (struct ieee80211_hdr *)skb->data; | ||
888 | fc = hdr->frame_control; | ||
889 | memzero(&rx_status, sizeof(struct ath_recv_status)); | ||
890 | |||
891 | if (ds->ds_rxstat.rs_more) { | ||
892 | /* | ||
893 | * Frame spans multiple descriptors; this | ||
894 | * cannot happen yet as we don't support | ||
895 | * jumbograms. If not in monitor mode, | ||
896 | * discard the frame. | ||
897 | */ | ||
898 | #ifndef ERROR_FRAMES | ||
899 | /* | ||
900 | * Enable this if you want to see | ||
901 | * error frames in Monitor mode. | ||
902 | */ | ||
903 | if (sc->sc_opmode != ATH9K_M_MONITOR) | ||
904 | goto rx_next; | ||
905 | #endif | ||
906 | /* fall thru for monitor mode handling... */ | ||
907 | } else if (ds->ds_rxstat.rs_status != 0) { | ||
908 | if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC) | ||
909 | rx_status.flags |= ATH_RX_FCS_ERROR; | ||
910 | if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY) { | ||
911 | phyerr = ds->ds_rxstat.rs_phyerr & 0x1f; | ||
912 | goto rx_next; | ||
913 | } | ||
914 | |||
915 | if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT) { | ||
916 | /* | ||
917 | * Decrypt error. We only mark packet status | ||
918 | * here and always push up the frame up to let | ||
919 | * mac80211 handle the actual error case, be | ||
920 | * it no decryption key or real decryption | ||
921 | * error. This let us keep statistics there. | ||
922 | */ | ||
923 | rx_status.flags |= ATH_RX_DECRYPT_ERROR; | ||
924 | } else if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC) { | ||
925 | /* | ||
926 | * Demic error. We only mark frame status here | ||
927 | * and always push up the frame up to let | ||
928 | * mac80211 handle the actual error case. This | ||
929 | * let us keep statistics there. Hardware may | ||
930 | * post a false-positive MIC error. | ||
931 | */ | ||
932 | if (ieee80211_is_ctl(fc)) | ||
933 | /* | ||
934 | * Sometimes, we get invalid | ||
935 | * MIC failures on valid control frames. | ||
936 | * Remove these mic errors. | ||
937 | */ | ||
938 | ds->ds_rxstat.rs_status &= | ||
939 | ~ATH9K_RXERR_MIC; | ||
940 | else | ||
941 | rx_status.flags |= ATH_RX_MIC_ERROR; | ||
942 | } | ||
943 | /* | ||
944 | * Reject error frames with the exception of | ||
945 | * decryption and MIC failures. For monitor mode, | ||
946 | * we also ignore the CRC error. | ||
947 | */ | ||
948 | if (sc->sc_opmode == ATH9K_M_MONITOR) { | ||
949 | if (ds->ds_rxstat.rs_status & | ||
950 | ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | | ||
951 | ATH9K_RXERR_CRC)) | ||
952 | goto rx_next; | ||
953 | } else { | ||
954 | if (ds->ds_rxstat.rs_status & | ||
955 | ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) { | ||
956 | goto rx_next; | ||
957 | } | ||
958 | } | ||
959 | } | ||
960 | /* | ||
961 | * The status portion of the descriptor could get corrupted. | ||
962 | */ | ||
963 | if (sc->sc_rxbufsize < ds->ds_rxstat.rs_datalen) | ||
964 | goto rx_next; | ||
965 | /* | ||
966 | * Sync and unmap the frame. At this point we're | ||
967 | * committed to passing the sk_buff somewhere so | ||
968 | * clear buf_skb; this means a new sk_buff must be | ||
969 | * allocated when the rx descriptor is setup again | ||
970 | * to receive another frame. | ||
971 | */ | ||
972 | skb_put(skb, ds->ds_rxstat.rs_datalen); | ||
973 | skb->protocol = cpu_to_be16(ETH_P_CONTROL); | ||
974 | rx_status.tsf = ath_extend_tsf(sc, ds->ds_rxstat.rs_tstamp); | ||
975 | rx_status.rateieee = | ||
976 | sc->sc_hwmap[ds->ds_rxstat.rs_rate].ieeerate; | ||
977 | rx_status.rateKbps = | ||
978 | sc->sc_hwmap[ds->ds_rxstat.rs_rate].rateKbps; | ||
979 | rx_status.ratecode = ds->ds_rxstat.rs_rate; | ||
980 | |||
981 | /* HT rate */ | ||
982 | if (rx_status.ratecode & 0x80) { | ||
983 | /* TODO - add table to avoid division */ | ||
984 | if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040) { | ||
985 | rx_status.flags |= ATH_RX_40MHZ; | ||
986 | rx_status.rateKbps = | ||
987 | (rx_status.rateKbps * 27) / 13; | ||
988 | } | ||
989 | if (ds->ds_rxstat.rs_flags & ATH9K_RX_GI) | ||
990 | rx_status.rateKbps = | ||
991 | (rx_status.rateKbps * 10) / 9; | ||
992 | else | ||
993 | rx_status.flags |= ATH_RX_SHORT_GI; | ||
994 | } | ||
995 | |||
996 | /* sc->sc_noise_floor is only available when the station | ||
997 | attaches to an AP, so we use a default value | ||
998 | if we are not yet attached. */ | ||
999 | |||
1000 | /* XXX we should use either sc->sc_noise_floor or | ||
1001 | * ath_hal_getChanNoise(ah, &sc->sc_curchan) | ||
1002 | * to calculate the noise floor. | ||
1003 | * However, the value returned by ath_hal_getChanNoise | ||
1004 | * seems to be incorrect (-31dBm on the last test), | ||
1005 | * so we will use a hard-coded value until we | ||
1006 | * figure out what is going on. | ||
1007 | */ | ||
1008 | rx_status.abs_rssi = | ||
1009 | ds->ds_rxstat.rs_rssi + ATH_DEFAULT_NOISE_FLOOR; | ||
1010 | |||
1011 | pci_dma_sync_single_for_cpu(sc->pdev, | ||
1012 | bf->bf_buf_addr, | ||
1013 | skb_tailroom(skb), | ||
1014 | PCI_DMA_FROMDEVICE); | ||
1015 | pci_unmap_single(sc->pdev, | ||
1016 | bf->bf_buf_addr, | ||
1017 | sc->sc_rxbufsize, | ||
1018 | PCI_DMA_FROMDEVICE); | ||
1019 | |||
1020 | /* XXX: Ah! make me more readable, use a helper */ | ||
1021 | if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) { | ||
1022 | if (ds->ds_rxstat.rs_moreaggr == 0) { | ||
1023 | rx_status.rssictl[0] = | ||
1024 | ds->ds_rxstat.rs_rssi_ctl0; | ||
1025 | rx_status.rssictl[1] = | ||
1026 | ds->ds_rxstat.rs_rssi_ctl1; | ||
1027 | rx_status.rssictl[2] = | ||
1028 | ds->ds_rxstat.rs_rssi_ctl2; | ||
1029 | rx_status.rssi = ds->ds_rxstat.rs_rssi; | ||
1030 | if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040) { | ||
1031 | rx_status.rssiextn[0] = | ||
1032 | ds->ds_rxstat.rs_rssi_ext0; | ||
1033 | rx_status.rssiextn[1] = | ||
1034 | ds->ds_rxstat.rs_rssi_ext1; | ||
1035 | rx_status.rssiextn[2] = | ||
1036 | ds->ds_rxstat.rs_rssi_ext2; | ||
1037 | rx_status.flags |= | ||
1038 | ATH_RX_RSSI_EXTN_VALID; | ||
1039 | } | ||
1040 | rx_status.flags |= ATH_RX_RSSI_VALID | | ||
1041 | ATH_RX_CHAIN_RSSI_VALID; | ||
1042 | } | ||
1043 | } else { | ||
1044 | /* | ||
1045 | * Need to insert the "combined" rssi into the | ||
1046 | * status structure for upper layer processing | ||
1047 | */ | ||
1048 | rx_status.rssi = ds->ds_rxstat.rs_rssi; | ||
1049 | rx_status.flags |= ATH_RX_RSSI_VALID; | ||
1050 | } | ||
1051 | |||
1052 | /* Pass frames up to the stack. */ | ||
1053 | |||
1054 | type = ath_rx_indicate(sc, skb, | ||
1055 | &rx_status, ds->ds_rxstat.rs_keyix); | ||
1056 | |||
1057 | /* | ||
1058 | * change the default rx antenna if rx diversity chooses the | ||
1059 | * other antenna 3 times in a row. | ||
1060 | */ | ||
1061 | if (sc->sc_defant != ds->ds_rxstat.rs_antenna) { | ||
1062 | if (++sc->sc_rxotherant >= 3) | ||
1063 | ath_setdefantenna(sc, | ||
1064 | ds->ds_rxstat.rs_antenna); | ||
1065 | } else { | ||
1066 | sc->sc_rxotherant = 0; | ||
1067 | } | ||
1068 | |||
1069 | #ifdef CONFIG_SLOW_ANT_DIV | ||
1070 | if ((rx_status.flags & ATH_RX_RSSI_VALID) && | ||
1071 | ieee80211_is_beacon(fc)) { | ||
1072 | ath_slow_ant_div(&sc->sc_antdiv, hdr, &ds->ds_rxstat); | ||
1073 | } | ||
1074 | #endif | ||
1075 | /* | ||
1076 | * For frames successfully indicated, the buffer will be | ||
1077 | * returned to us by upper layers by calling | ||
1078 | * ath_rx_mpdu_requeue, either synchronusly or asynchronously. | ||
1079 | * So we don't want to do it here in this loop. | ||
1080 | */ | ||
1081 | continue; | ||
1082 | |||
1083 | rx_next: | ||
1084 | bf->bf_status |= ATH_BUFSTATUS_FREE; | ||
1085 | } while (TRUE); | ||
1086 | |||
1087 | if (chainreset) { | ||
1088 | DPRINTF(sc, ATH_DBG_CONFIG, | ||
1089 | "%s: Reset rx chain mask. " | ||
1090 | "Do internal reset\n", __func__); | ||
1091 | ASSERT(flush == 0); | ||
1092 | ath_internal_reset(sc); | ||
1093 | } | ||
1094 | |||
1095 | return 0; | ||
1096 | #undef PA2DESC | ||
1097 | } | ||
1098 | |||
1099 | /* Process ADDBA request in per-TID data structure */ | ||
1100 | |||
1101 | int ath_rx_aggr_start(struct ath_softc *sc, | ||
1102 | const u8 *addr, | ||
1103 | u16 tid, | ||
1104 | u16 *ssn) | ||
1105 | { | ||
1106 | struct ath_arx_tid *rxtid; | ||
1107 | struct ath_node *an; | ||
1108 | struct ieee80211_hw *hw = sc->hw; | ||
1109 | struct ieee80211_supported_band *sband; | ||
1110 | u16 buffersize = 0; | ||
1111 | |||
1112 | spin_lock_bh(&sc->node_lock); | ||
1113 | an = ath_node_find(sc, (u8 *) addr); | ||
1114 | spin_unlock_bh(&sc->node_lock); | ||
1115 | |||
1116 | if (!an) { | ||
1117 | DPRINTF(sc, ATH_DBG_AGGR, | ||
1118 | "%s: Node not found to initialize RX aggregation\n", | ||
1119 | __func__); | ||
1120 | return -1; | ||
1121 | } | ||
1122 | |||
1123 | sband = hw->wiphy->bands[hw->conf.channel->band]; | ||
1124 | buffersize = IEEE80211_MIN_AMPDU_BUF << | ||
1125 | sband->ht_info.ampdu_factor; /* FIXME */ | ||
1126 | |||
1127 | rxtid = &an->an_aggr.rx.tid[tid]; | ||
1128 | |||
1129 | spin_lock_bh(&rxtid->tidlock); | ||
1130 | if (sc->sc_rxaggr) { | ||
1131 | /* Allow aggregation reception | ||
1132 | * Adjust rx BA window size. Peer might indicate a | ||
1133 | * zero buffer size for a _dont_care_ condition. | ||
1134 | */ | ||
1135 | if (buffersize) | ||
1136 | rxtid->baw_size = min(buffersize, rxtid->baw_size); | ||
1137 | |||
1138 | /* set rx sequence number */ | ||
1139 | rxtid->seq_next = *ssn; | ||
1140 | |||
1141 | /* Allocate the receive buffers for this TID */ | ||
1142 | DPRINTF(sc, ATH_DBG_AGGR, | ||
1143 | "%s: Allcating rxbuffer for TID %d\n", __func__, tid); | ||
1144 | |||
1145 | if (rxtid->rxbuf == NULL) { | ||
1146 | /* | ||
1147 | * If the rxbuff is not NULL at this point, we *probably* | ||
1148 | * already allocated the buffer on a previous ADDBA, | ||
1149 | * and this is a subsequent ADDBA that got through. | ||
1150 | * Don't allocate, but use the value in the pointer, | ||
1151 | * we zero it out when we de-allocate. | ||
1152 | */ | ||
1153 | rxtid->rxbuf = kmalloc(ATH_TID_MAX_BUFS * | ||
1154 | sizeof(struct ath_rxbuf), GFP_ATOMIC); | ||
1155 | } | ||
1156 | if (rxtid->rxbuf == NULL) { | ||
1157 | DPRINTF(sc, ATH_DBG_AGGR, | ||
1158 | "%s: Unable to allocate RX buffer, " | ||
1159 | "refusing ADDBA\n", __func__); | ||
1160 | } else { | ||
1161 | /* Ensure the memory is zeroed out (all internal | ||
1162 | * pointers are null) */ | ||
1163 | memzero(rxtid->rxbuf, ATH_TID_MAX_BUFS * | ||
1164 | sizeof(struct ath_rxbuf)); | ||
1165 | DPRINTF(sc, ATH_DBG_AGGR, | ||
1166 | "%s: Allocated @%p\n", __func__, rxtid->rxbuf); | ||
1167 | |||
1168 | /* Allow aggregation reception */ | ||
1169 | rxtid->addba_exchangecomplete = 1; | ||
1170 | } | ||
1171 | } | ||
1172 | spin_unlock_bh(&rxtid->tidlock); | ||
1173 | |||
1174 | return 0; | ||
1175 | } | ||
1176 | |||
1177 | /* Process DELBA */ | ||
1178 | |||
1179 | int ath_rx_aggr_stop(struct ath_softc *sc, | ||
1180 | const u8 *addr, | ||
1181 | u16 tid) | ||
1182 | { | ||
1183 | struct ath_node *an; | ||
1184 | |||
1185 | spin_lock_bh(&sc->node_lock); | ||
1186 | an = ath_node_find(sc, (u8 *) addr); | ||
1187 | spin_unlock_bh(&sc->node_lock); | ||
1188 | |||
1189 | if (!an) { | ||
1190 | DPRINTF(sc, ATH_DBG_AGGR, | ||
1191 | "%s: RX aggr stop for non-existent node\n", __func__); | ||
1192 | return -1; | ||
1193 | } | ||
1194 | |||
1195 | ath_rx_aggr_teardown(sc, an, tid); | ||
1196 | return 0; | ||
1197 | } | ||
1198 | |||
1199 | /* Rx aggregation tear down */ | ||
1200 | |||
1201 | void ath_rx_aggr_teardown(struct ath_softc *sc, | ||
1202 | struct ath_node *an, u8 tid) | ||
1203 | { | ||
1204 | struct ath_arx_tid *rxtid = &an->an_aggr.rx.tid[tid]; | ||
1205 | |||
1206 | if (!rxtid->addba_exchangecomplete) | ||
1207 | return; | ||
1208 | |||
1209 | del_timer_sync(&rxtid->timer); | ||
1210 | ath_rx_flush_tid(sc, rxtid, 0); | ||
1211 | rxtid->addba_exchangecomplete = 0; | ||
1212 | |||
1213 | /* De-allocate the receive buffer array allocated when addba started */ | ||
1214 | |||
1215 | if (rxtid->rxbuf) { | ||
1216 | DPRINTF(sc, ATH_DBG_AGGR, | ||
1217 | "%s: Deallocating TID %d rxbuff @%p\n", | ||
1218 | __func__, tid, rxtid->rxbuf); | ||
1219 | kfree(rxtid->rxbuf); | ||
1220 | |||
1221 | /* Set pointer to null to avoid reuse*/ | ||
1222 | rxtid->rxbuf = NULL; | ||
1223 | } | ||
1224 | } | ||
1225 | |||
1226 | /* Initialize per-node receive state */ | ||
1227 | |||
1228 | void ath_rx_node_init(struct ath_softc *sc, struct ath_node *an) | ||
1229 | { | ||
1230 | if (sc->sc_rxaggr) { | ||
1231 | struct ath_arx_tid *rxtid; | ||
1232 | int tidno; | ||
1233 | |||
1234 | /* Init per tid rx state */ | ||
1235 | for (tidno = 0, rxtid = &an->an_aggr.rx.tid[tidno]; | ||
1236 | tidno < WME_NUM_TID; | ||
1237 | tidno++, rxtid++) { | ||
1238 | rxtid->an = an; | ||
1239 | rxtid->seq_reset = 1; | ||
1240 | rxtid->seq_next = 0; | ||
1241 | rxtid->baw_size = WME_MAX_BA; | ||
1242 | rxtid->baw_head = rxtid->baw_tail = 0; | ||
1243 | |||
1244 | /* | ||
1245 | * Ensure the buffer pointer is null at this point | ||
1246 | * (needs to be allocated when addba is received) | ||
1247 | */ | ||
1248 | |||
1249 | rxtid->rxbuf = NULL; | ||
1250 | setup_timer(&rxtid->timer, ath_rx_timer, | ||
1251 | (unsigned long)rxtid); | ||
1252 | spin_lock_init(&rxtid->tidlock); | ||
1253 | |||
1254 | /* ADDBA state */ | ||
1255 | rxtid->addba_exchangecomplete = 0; | ||
1256 | } | ||
1257 | } | ||
1258 | } | ||
1259 | |||
1260 | void ath_rx_node_cleanup(struct ath_softc *sc, struct ath_node *an) | ||
1261 | { | ||
1262 | if (sc->sc_rxaggr) { | ||
1263 | struct ath_arx_tid *rxtid; | ||
1264 | int tidno, i; | ||
1265 | |||
1266 | /* Init per tid rx state */ | ||
1267 | for (tidno = 0, rxtid = &an->an_aggr.rx.tid[tidno]; | ||
1268 | tidno < WME_NUM_TID; | ||
1269 | tidno++, rxtid++) { | ||
1270 | |||
1271 | if (!rxtid->addba_exchangecomplete) | ||
1272 | continue; | ||
1273 | |||
1274 | /* must cancel timer first */ | ||
1275 | del_timer_sync(&rxtid->timer); | ||
1276 | |||
1277 | /* drop any pending sub-frames */ | ||
1278 | ath_rx_flush_tid(sc, rxtid, 1); | ||
1279 | |||
1280 | for (i = 0; i < ATH_TID_MAX_BUFS; i++) | ||
1281 | ASSERT(rxtid->rxbuf[i].rx_wbuf == NULL); | ||
1282 | |||
1283 | rxtid->addba_exchangecomplete = 0; | ||
1284 | } | ||
1285 | } | ||
1286 | |||
1287 | } | ||
1288 | |||
1289 | /* Cleanup per-node receive state */ | ||
1290 | |||
1291 | void ath_rx_node_free(struct ath_softc *sc, struct ath_node *an) | ||
1292 | { | ||
1293 | ath_rx_node_cleanup(sc, an); | ||
1294 | } | ||
1295 | |||
1296 | dma_addr_t ath_skb_map_single(struct ath_softc *sc, | ||
1297 | struct sk_buff *skb, | ||
1298 | int direction, | ||
1299 | dma_addr_t *pa) | ||
1300 | { | ||
1301 | /* | ||
1302 | * NB: do NOT use skb->len, which is 0 on initialization. | ||
1303 | * Use skb's entire data area instead. | ||
1304 | */ | ||
1305 | *pa = pci_map_single(sc->pdev, skb->data, | ||
1306 | skb_end_pointer(skb) - skb->head, direction); | ||
1307 | return *pa; | ||
1308 | } | ||
1309 | |||
1310 | void ath_skb_unmap_single(struct ath_softc *sc, | ||
1311 | struct sk_buff *skb, | ||
1312 | int direction, | ||
1313 | dma_addr_t *pa) | ||
1314 | { | ||
1315 | /* Unmap skb's entire data area */ | ||
1316 | pci_unmap_single(sc->pdev, *pa, | ||
1317 | skb_end_pointer(skb) - skb->head, direction); | ||
1318 | } | ||